From 0fa901b2aaf0dc71beb7ea793d675fae1b436e96 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 6 Mar 2025 11:03:41 -0500 Subject: [PATCH 01/17] Fix `add_epoch_root` call on decide (#2715) --- hotshot-task-impls/src/helpers.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot-task-impls/src/helpers.rs b/hotshot-task-impls/src/helpers.rs index b56a6e736d..44b3ead429 100644 --- a/hotshot-task-impls/src/helpers.rs +++ b/hotshot-task-impls/src/helpers.rs @@ -183,7 +183,7 @@ async fn decide_epoch_root( // Skip if this is not the expected block. if epoch_height != 0 && is_epoch_root(decided_block_number, epoch_height) { let next_epoch_number = - TYPES::Epoch::new(epoch_from_block_number(decided_block_number, epoch_height) + 1); + TYPES::Epoch::new(epoch_from_block_number(decided_block_number, epoch_height) + 2); let write_callback = { tracing::debug!("Calling add_epoch_root for epoch {:?}", next_epoch_number); From 41df9930d90e668b9e2c2343975663361f9c0355 Mon Sep 17 00:00:00 2001 From: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Date: Fri, 7 Mar 2025 01:02:51 +0500 Subject: [PATCH 02/17] types migration (#2478) * consensus storage migration * fix migrations and commit transaction after batch insert * sqlite migration fixes and fix deserialization errors * test * fs migration * fix queries and tests * sequencer sqlite lock file * fix postgres epoch migration * fix undecided_state2 migration * bincode deserialize for migrated hashset * query service leaf2 migration * fix quorum proposal migration and storage * fix fetching * fix leaf from proposal * fix import * fix test_fetching_providers * use TestVersions from hotshot-example-types * fix migration completed check * call v2 methods * fix VID errors * lint * cargo sort * fix recursion * v0 and v1 availability modules * cargo sort * fix tests * lockfile * clippy * sqlite lockfile * fix: vid2 in tests * fix tests * remove field * migrate vid * add logs * more logging * use vid2 table * test for vid migration * replace from trait impl with a function for leaf2querydata to leaf1querydata * fix logging * address comments * use info logging level for progress * increase batch size to 10k --- Cargo.lock | 3 + Cargo.toml | 1 + .../src/testing/basic.rs | 6 +- hotshot-builder-core/src/service.rs | 17 +- .../src/testing/basic_test.rs | 7 +- .../src/testing/finalization_test.rs | 7 +- hotshot-builder-core/src/testing/mod.rs | 15 +- hotshot-example-types/src/storage_types.rs | 10 +- hotshot-query-service/Cargo.toml | 1 + .../postgres/V500__types_migration.sql | 23 + .../sqlite/V300__types_migration.sql | 22 + hotshot-query-service/src/availability.rs | 696 ++++++----- .../src/availability/data_source.rs | 7 +- .../src/availability/query_data.rs | 33 +- hotshot-query-service/src/data_source.rs | 32 +- .../src/data_source/extension.rs | 3 +- .../src/data_source/fetching.rs | 23 +- .../src/data_source/fetching/vid.rs | 7 +- .../src/data_source/storage.rs | 4 +- .../src/data_source/storage/fail_storage.rs | 15 +- .../src/data_source/storage/fs.rs | 17 +- .../src/data_source/storage/sql.rs | 336 +++++- .../src/data_source/storage/sql/queries.rs | 17 +- .../storage/sql/queries/availability.rs | 14 +- .../data_source/storage/sql/queries/node.rs | 15 +- .../data_source/storage/sql/transaction.rs | 21 +- .../src/data_source/update.rs | 57 +- hotshot-query-service/src/explorer.rs | 1 + .../src/fetching/provider/any.rs | 7 +- .../src/fetching/provider/query_service.rs | 124 +- hotshot-query-service/src/fetching/request.rs | 4 +- hotshot-query-service/src/lib.rs | 38 +- hotshot-query-service/src/node.rs | 4 +- hotshot-query-service/src/node/data_source.rs | 4 +- hotshot-types/src/data.rs | 2 +- hotshot-types/src/data/ns_table.rs | 2 +- hotshot-types/src/traits/storage.rs | 8 +- hotshot/src/lib.rs | 12 +- .../src/testing/consensus.rs | 15 +- marketplace-builder/src/builder.rs | 3 +- node-metrics/Cargo.toml | 1 + .../v0/create_node_validator_api.rs | 13 +- node-metrics/src/api/node_validator/v0/mod.rs | 22 +- node-metrics/src/service/client_state/mod.rs | 10 +- node-metrics/src/service/data_state/mod.rs | 17 +- sequencer-sqlite/Cargo.lock | 2 + sequencer/Cargo.toml | 1 + .../postgres/V501__epoch_tables.sql | 54 + .../migrations/sqlite/V301__epoch_tables.sql | 54 + sequencer/src/api.rs | 72 +- sequencer/src/api/endpoints.rs | 2 + sequencer/src/api/options.rs | 20 +- sequencer/src/api/sql.rs | 15 +- sequencer/src/block/full_payload.rs | 9 - sequencer/src/block/full_payload/ns_proof.rs | 172 --- sequencer/src/block/full_payload/ns_table.rs | 467 -------- .../src/block/full_payload/ns_table/test.rs | 251 ---- sequencer/src/block/full_payload/payload.rs | 313 ----- sequencer/src/block/namespace_payload.rs | 12 - sequencer/src/block/namespace_payload/iter.rs | 81 -- .../src/block/namespace_payload/ns_payload.rs | 137 --- .../namespace_payload/ns_payload_range.rs | 34 - .../src/block/namespace_payload/tx_proof.rs | 253 ---- .../src/block/namespace_payload/types.rs | 429 ------- sequencer/src/block/test.rs | 207 ---- sequencer/src/block/uint_bytes.rs | 231 ---- sequencer/src/persistence.rs | 154 +-- sequencer/src/persistence/fs.rs | 735 ++++++++++-- sequencer/src/persistence/no_storage.rs | 58 +- sequencer/src/persistence/sql.rs | 1028 ++++++++++++++--- sequencer/src/proposal_fetcher.rs | 2 +- sequencer/src/state.rs | 4 +- .../v0/impls/block/full_payload/ns_proof.rs | 2 +- .../impls/block/full_payload/ns_proof/test.rs | 4 +- .../v0/impls/block/full_payload/ns_table.rs | 6 +- .../v0/impls/block/full_payload/payload.rs | 4 +- types/src/v0/impls/block/test.rs | 4 +- types/src/v0/impls/header.rs | 7 +- types/src/v0/traits.rs | 97 +- types/src/v0/utils.rs | 32 +- 80 files changed, 3082 insertions(+), 3567 deletions(-) create mode 100644 hotshot-query-service/migrations/postgres/V500__types_migration.sql create mode 100644 hotshot-query-service/migrations/sqlite/V300__types_migration.sql create mode 100644 sequencer/api/migrations/postgres/V501__epoch_tables.sql create mode 100644 sequencer/api/migrations/sqlite/V301__epoch_tables.sql delete mode 100644 sequencer/src/block/full_payload.rs delete mode 100644 sequencer/src/block/full_payload/ns_proof.rs delete mode 100644 sequencer/src/block/full_payload/ns_table.rs delete mode 100644 sequencer/src/block/full_payload/ns_table/test.rs delete mode 100644 sequencer/src/block/full_payload/payload.rs delete mode 100644 sequencer/src/block/namespace_payload.rs delete mode 100644 sequencer/src/block/namespace_payload/iter.rs delete mode 100644 sequencer/src/block/namespace_payload/ns_payload.rs delete mode 100644 sequencer/src/block/namespace_payload/ns_payload_range.rs delete mode 100644 sequencer/src/block/namespace_payload/tx_proof.rs delete mode 100644 sequencer/src/block/namespace_payload/types.rs delete mode 100644 sequencer/src/block/test.rs delete mode 100644 sequencer/src/block/uint_bytes.rs diff --git a/Cargo.lock b/Cargo.lock index f026ffb8d4..1cb7767d15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5411,6 +5411,7 @@ dependencies = [ "refinery", "refinery-core", "reqwest 0.12.12", + "semver 1.0.25", "serde", "serde_json", "snafu 0.8.5", @@ -7983,6 +7984,7 @@ dependencies = [ "espresso-types", "futures", "hotshot", + "hotshot-example-types", "hotshot-query-service", "hotshot-stake-table", "hotshot-types", @@ -10226,6 +10228,7 @@ dependencies = [ "rand_distr", "request-response", "reqwest 0.12.12", + "semver 1.0.25", "sequencer", "sequencer-utils", "serde", diff --git a/Cargo.toml b/Cargo.toml index 2e50ee989e..ce2648d08f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,6 +118,7 @@ libp2p-swarm-derive = { version = "0.35" } typenum = "1" cbor4ii = { version = "1.0", features = ["serde1"] } serde_bytes = { version = "0.11" } +semver = "1" num_cpus = "1" dashmap = "6" memoize = { version = "0.4", features = ["full"] } diff --git a/hotshot-builder-core-refactored/src/testing/basic.rs b/hotshot-builder-core-refactored/src/testing/basic.rs index 53fb9f9055..f353774c0a 100644 --- a/hotshot-builder-core-refactored/src/testing/basic.rs +++ b/hotshot-builder-core-refactored/src/testing/basic.rs @@ -8,7 +8,7 @@ use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; use hotshot_types::data::VidCommitment; use hotshot_types::data::{Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}; use hotshot_types::event::LeafInfo; -use hotshot_types::simple_certificate::QuorumCertificate; +use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::traits::block_contents::BlockHeader; use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; use hotshot_types::utils::BuilderCommitment; @@ -186,9 +186,7 @@ async fn test_pruning() { // everything else is boilerplate. let mock_qc = - QuorumCertificate::genesis::(&Default::default(), &Default::default()) - .await - .to_qc2(); + QuorumCertificate2::genesis::(&Default::default(), &Default::default()).await; let leaf = Leaf2::from_quorum_proposal(&QuorumProposalWrapper { proposal: QuorumProposal2 { block_header: >::genesis( diff --git a/hotshot-builder-core/src/service.rs b/hotshot-builder-core/src/service.rs index ba58ba0abd..a45c4039a0 100644 --- a/hotshot-builder-core/src/service.rs +++ b/hotshot-builder-core/src/service.rs @@ -1552,12 +1552,12 @@ mod test { use hotshot_types::data::EpochNumber; use hotshot_types::data::Leaf2; use hotshot_types::data::{QuorumProposal2, QuorumProposalWrapper}; + use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::traits::block_contents::Transaction; use hotshot_types::traits::node_implementation::Versions; use hotshot_types::{ data::{vid_commitment, Leaf, ViewNumber}, message::Proposal, - simple_certificate::QuorumCertificate, traits::{node_implementation::ConsensusTime, signature_key::BuilderSignatureKey}, utils::BuilderCommitment, }; @@ -4099,12 +4099,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, @@ -4175,12 +4174,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, @@ -4242,12 +4240,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, diff --git a/hotshot-builder-core/src/testing/basic_test.rs b/hotshot-builder-core/src/testing/basic_test.rs index 0825a9e887..867706c213 100644 --- a/hotshot-builder-core/src/testing/basic_test.rs +++ b/hotshot-builder-core/src/testing/basic_test.rs @@ -5,7 +5,7 @@ pub use hotshot_types::{ data::{EpochNumber, Leaf, ViewNumber}, message::Proposal, signature_key::BLSPubKey, - simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + simple_certificate::{QuorumCertificate2, SimpleCertificate, SuccessThreshold}, traits::{ block_contents::BlockPayload, node_implementation::{ConsensusTime, NodeType}, @@ -172,12 +172,11 @@ mod tests { let mut previous_commitment = initial_commitment; let mut previous_view = ViewNumber::new(0); let mut previous_quorum_proposal = { - let previous_jc = QuorumCertificate::::genesis::( + let previous_jc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(); + .await; QuorumProposalWrapper:: { proposal: QuorumProposal2:: { diff --git a/hotshot-builder-core/src/testing/finalization_test.rs b/hotshot-builder-core/src/testing/finalization_test.rs index 0a1846fa95..a671cbbdf8 100644 --- a/hotshot-builder-core/src/testing/finalization_test.rs +++ b/hotshot-builder-core/src/testing/finalization_test.rs @@ -21,10 +21,10 @@ use hotshot_example_types::{ node_types::{TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; +use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::{ data::{vid_commitment, DaProposal2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}, message::Proposal, - simple_certificate::QuorumCertificate, traits::{ block_contents::BlockHeader, node_implementation::{ConsensusTime, Versions}, @@ -332,12 +332,11 @@ async fn progress_round_with_transactions( proposal: QuorumProposal2:: { block_header, view_number: next_view, - justify_qc: QuorumCertificate::::genesis::( + justify_qc: QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, diff --git a/hotshot-builder-core/src/testing/mod.rs b/hotshot-builder-core/src/testing/mod.rs index 8c8ea676e6..279e3ac84c 100644 --- a/hotshot-builder-core/src/testing/mod.rs +++ b/hotshot-builder-core/src/testing/mod.rs @@ -17,7 +17,7 @@ use hotshot_types::{ vid_commitment, DaProposal2, Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber, }, message::Proposal, - simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + simple_certificate::{QuorumCertificate2, SimpleCertificate, SuccessThreshold}, simple_vote::QuorumData2, traits::{ node_implementation::{ConsensusTime, Versions}, @@ -186,12 +186,13 @@ pub async fn calc_proposal_msg( }; let justify_qc = match prev_quorum_proposal.as_ref() { - None => QuorumCertificate::::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await - .to_qc2(), + None => { + QuorumCertificate2::::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + } Some(prev_proposal) => { let prev_justify_qc = prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { diff --git a/hotshot-example-types/src/storage_types.rs b/hotshot-example-types/src/storage_types.rs index a20a8a631e..3d7885973d 100644 --- a/hotshot-example-types/src/storage_types.rs +++ b/hotshot-example-types/src/storage_types.rs @@ -20,7 +20,7 @@ use hotshot_types::{ QuorumProposalWrapper, VidCommitment, }, event::HotShotAction, - message::Proposal, + message::{convert_proposal, Proposal}, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ node_implementation::{ConsensusTime, NodeType}, @@ -362,13 +362,7 @@ impl Storage for TestStorage { Ok(()) } - async fn migrate_consensus( - &self, - _convert_leaf: fn(Leaf) -> Leaf2, - convert_proposal: fn( - Proposal>, - ) -> Proposal>, - ) -> Result<()> { + async fn migrate_consensus(&self) -> Result<()> { let mut storage_writer = self.inner.write().await; for (view, proposal) in storage_writer.proposals.clone().iter() { diff --git a/hotshot-query-service/Cargo.toml b/hotshot-query-service/Cargo.toml index 8a8d2a830b..c2d5c6d5c9 100644 --- a/hotshot-query-service/Cargo.toml +++ b/hotshot-query-service/Cargo.toml @@ -79,6 +79,7 @@ jf-vid = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfis ] } lazy_static = "1" prometheus = "0.13" +semver = { workspace = true } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" snafu = "0.8" diff --git a/hotshot-query-service/migrations/postgres/V500__types_migration.sql b/hotshot-query-service/migrations/postgres/V500__types_migration.sql new file mode 100644 index 0000000000..838bce59a1 --- /dev/null +++ b/hotshot-query-service/migrations/postgres/V500__types_migration.sql @@ -0,0 +1,23 @@ +CREATE TABLE leaf2 +( + height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, + hash VARCHAR NOT NULL UNIQUE, + block_hash VARCHAR NOT NULL REFERENCES header (hash) ON DELETE CASCADE, + leaf JSONB NOT NULL, + qc JSONB NOT NULL +); + +CREATE TABLE types_migration ( + id SERIAL PRIMARY KEY, + completed bool NOT NULL DEFAULT false +); + +INSERT INTO types_migration ("completed") VALUES (false); + + +CREATE TABLE vid2 +( + height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, + common BYTEA NOT NULL, + share BYTEA +); diff --git a/hotshot-query-service/migrations/sqlite/V300__types_migration.sql b/hotshot-query-service/migrations/sqlite/V300__types_migration.sql new file mode 100644 index 0000000000..1b598c5d5a --- /dev/null +++ b/hotshot-query-service/migrations/sqlite/V300__types_migration.sql @@ -0,0 +1,22 @@ +CREATE TABLE leaf2 +( + height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, + hash VARCHAR NOT NULL UNIQUE, + block_hash VARCHAR NOT NULL REFERENCES header (hash) ON DELETE CASCADE, + leaf JSONB NOT NULL, + qc JSONB NOT NULL +); + +CREATE TABLE types_migration ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + completed bool NOT NULL DEFAULT false +); + +INSERT INTO types_migration ("completed") VALUES (false); + +CREATE TABLE vid2 +( + height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, + common BYTEA NOT NULL, + share BYTEA +); diff --git a/hotshot-query-service/src/availability.rs b/hotshot-query-service/src/availability.rs index 63122094ea..d1dbcd249f 100644 --- a/hotshot-query-service/src/availability.rs +++ b/hotshot-query-service/src/availability.rs @@ -29,7 +29,12 @@ use crate::{api::load_api, Payload, QueryError}; use derive_more::From; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt}; -use hotshot_types::traits::node_implementation::NodeType; + +use hotshot_types::{ + data::{Leaf, Leaf2, QuorumProposal}, + simple_certificate::QuorumCertificate, + traits::node_implementation::NodeType, +}; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, Snafu}; use std::{fmt::Display, path::PathBuf, time::Duration}; @@ -161,9 +166,95 @@ impl Error { } } +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(bound = "")] +pub struct Leaf1QueryData { + pub(crate) leaf: Leaf, + pub(crate) qc: QuorumCertificate, +} + +fn downgrade_leaf(leaf2: Leaf2) -> Leaf { + // TODO do we still need some check here? + // `drb_seed` no longer exists on `Leaf2` + // if leaf2.drb_seed != [0; 32] && leaf2.drb_result != [0; 32] { + // panic!("Downgrade of Leaf2 to Leaf will lose DRB information!"); + // } + let quorum_proposal = QuorumProposal { + block_header: leaf2.block_header().clone(), + view_number: leaf2.view_number(), + justify_qc: leaf2.justify_qc().to_qc(), + upgrade_certificate: leaf2.upgrade_certificate(), + proposal_certificate: None, + }; + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + if let Some(payload) = leaf2.block_payload() { + leaf.fill_block_payload_unchecked(payload); + } + leaf +} + +fn downgrade_leaf_query_data(leaf: LeafQueryData) -> Leaf1QueryData { + Leaf1QueryData { + leaf: downgrade_leaf(leaf.leaf), + qc: leaf.qc.to_qc(), + } +} + +async fn get_leaf_handler( + req: tide_disco::RequestParams, + state: &State, + timeout: Duration, +) -> Result, Error> +where + State: 'static + Send + Sync + ReadState, + ::State: Send + Sync + AvailabilityDataSource, + Types: NodeType, + Payload: QueryablePayload, +{ + let id = match req.opt_integer_param("height")? { + Some(height) => LeafId::Number(height), + None => LeafId::Hash(req.blob_param("hash")?), + }; + let fetch = state.read(|state| state.get_leaf(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchLeafSnafu { + resource: id.to_string(), + }) +} + +async fn get_leaf_range_handler( + req: tide_disco::RequestParams, + state: &State, + timeout: Duration, + small_object_range_limit: usize, +) -> Result>, Error> +where + State: 'static + Send + Sync + ReadState, + ::State: Send + Sync + AvailabilityDataSource, + Types: NodeType, + Payload: QueryablePayload, +{ + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param("until")?; + enforce_range_limit(from, until, small_object_range_limit)?; + + let leaves = state + .read(|state| state.get_leaf_range(from..until).boxed()) + .await; + leaves + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchLeafSnafu { + resource: (index + from).to_string(), + }) + }) + .try_collect::>() + .await +} + pub fn define_api( options: &Options, _: Ver, + api_ver: semver::Version, ) -> Result, ApiError> where State: 'static + Send + Sync + ReadState, @@ -179,310 +270,332 @@ where let small_object_range_limit = options.small_object_range_limit; let large_object_range_limit = options.large_object_range_limit; - api.with_version("0.0.1".parse().unwrap()) - .at("get_leaf", move |req, state| { - async move { - let id = match req.opt_integer_param("height")? { - Some(height) => LeafId::Number(height), - None => LeafId::Hash(req.blob_param("hash")?), - }; - let fetch = state.read(|state| state.get_leaf(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchLeafSnafu { - resource: id.to_string(), - }) - } - .boxed() - })? - .at("get_leaf_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param("until")?; - enforce_range_limit(from, until, small_object_range_limit)?; - - let leaves = state - .read(|state| state.get_leaf_range(from..until).boxed()) - .await; - leaves - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchLeafSnafu { - resource: (index + from).to_string(), - }) + api.with_version(api_ver.clone()); + + // `LeafQueryData` now contains `Leaf2` and `QC2``, which is a breaking change. + // On node startup, all leaves are migrated to `Leaf2`. + // + // To maintain compatibility with nodes running an older version + // (which expect `LeafQueryData` with `Leaf1` and `QC1`), + // we downgrade `Leaf2` to `Leaf1` and `QC2` to `QC1` if the API version is V0. + // Otherwise, we return the new types. + if api_ver.major == 0 { + api.at("get_leaf", move |req, state| { + get_leaf_handler(req, state, timeout) + .map(|res| res.map(downgrade_leaf_query_data)) + .boxed() + })?; + + api.at("get_leaf_range", move |req, state| { + get_leaf_range_handler(req, state, timeout, small_object_range_limit) + .map(|res| { + res.map(|r| { + r.into_iter() + .map(downgrade_leaf_query_data) + .collect::>>() }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_leaves", move |req, state| { + }) + .boxed() + })?; + + api.stream("stream_leaves", move |req, state| { async move { let height = req.integer_param("height")?; state .read(|state| { - async move { Ok(state.subscribe_leaves(height).await.map(Ok)) }.boxed() + async move { + Ok(state + .subscribe_leaves(height) + .await + .map(|leaf| Ok(downgrade_leaf_query_data(leaf)))) + } + .boxed() }) .await } .try_flatten_stream() .boxed() - })? - .at("get_header", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::Hash(hash) - } else { - BlockId::PayloadHash(req.blob_param("payload-hash")?) - }; - let fetch = state.read(|state| state.get_header(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { - resource: id.to_string(), - }) - } - .boxed() - })? - .at("get_header_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param::<_, usize>("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let headers = state - .read(|state| state.get_header_range(from..until).boxed()) - .await; - headers - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { - resource: (index + from).to_string(), - }) - }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_headers", move |req, state| { + })?; + } else { + api.at("get_leaf", move |req, state| { + get_leaf_handler(req, state, timeout).boxed() + })?; + + api.at("get_leaf_range", move |req, state| { + get_leaf_range_handler(req, state, timeout, small_object_range_limit).boxed() + })?; + + api.stream("stream_leaves", move |req, state| { async move { let height = req.integer_param("height")?; state .read(|state| { - async move { Ok(state.subscribe_headers(height).await.map(Ok)) }.boxed() + async move { Ok(state.subscribe_leaves(height).await.map(Ok)) }.boxed() }) .await } .try_flatten_stream() .boxed() - })? - .at("get_block", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::Hash(hash) - } else { - BlockId::PayloadHash(req.blob_param("payload-hash")?) - }; - let fetch = state.read(|state| state.get_block(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: id.to_string(), + })?; + } + + api.at("get_header", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::Hash(hash) + } else { + BlockId::PayloadHash(req.blob_param("payload-hash")?) + }; + let fetch = state.read(|state| state.get_header(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .at("get_header_range", move |req, state| { + async move { + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param::<_, usize>("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let headers = state + .read(|state| state.get_header_range(from..until).boxed()) + .await; + headers + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { + resource: (index + from).to_string(), + }) }) - } - .boxed() - })? - .at("get_block_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let blocks = state - .read(|state| state.get_block_range(from..until).boxed()) - .await; - blocks - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: (index + from).to_string(), - }) + .try_collect::>() + .await + } + .boxed() + })? + .stream("stream_headers", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_headers(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_block", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::Hash(hash) + } else { + BlockId::PayloadHash(req.blob_param("payload-hash")?) + }; + let fetch = state.read(|state| state.get_block(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .at("get_block_range", move |req, state| { + async move { + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let blocks = state + .read(|state| state.get_block_range(from..until).boxed()) + .await; + blocks + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: (index + from).to_string(), }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_blocks", move |req, state| { - async move { - let height = req.integer_param("height")?; - state - .read(|state| { - async move { Ok(state.subscribe_blocks(height).await.map(Ok)) }.boxed() + }) + .try_collect::>() + .await + } + .boxed() + })? + .stream("stream_blocks", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_blocks(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_payload", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::PayloadHash(hash) + } else { + BlockId::Hash(req.blob_param("block-hash")?) + }; + let fetch = state.read(|state| state.get_payload(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .at("get_payload_range", move |req, state| { + async move { + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let payloads = state + .read(|state| state.get_payload_range(from..until).boxed()) + .await; + payloads + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: (index + from).to_string(), }) - .await - } - .try_flatten_stream() - .boxed() - })? - .at("get_payload", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::PayloadHash(hash) - } else { - BlockId::Hash(req.blob_param("block-hash")?) - }; - let fetch = state.read(|state| state.get_payload(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: id.to_string(), }) - } - .boxed() - })? - .at("get_payload_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let payloads = state - .read(|state| state.get_payload_range(from..until).boxed()) - .await; - payloads - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: (index + from).to_string(), + .try_collect::>() + .await + } + .boxed() + })? + .stream("stream_payloads", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_payloads(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_vid_common", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::Hash(hash) + } else { + BlockId::PayloadHash(req.blob_param("payload-hash")?) + }; + let fetch = state.read(|state| state.get_vid_common(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .stream("stream_vid_common", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_vid_common(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_transaction", move |req, state| { + async move { + match req.opt_blob_param("hash")? { + Some(hash) => { + let fetch = state + .read(|state| state.get_transaction(hash).boxed()) + .await; + fetch + .with_timeout(timeout) + .await + .context(FetchTransactionSnafu { + resource: hash.to_string(), }) - }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_payloads", move |req, state| { - async move { - let height = req.integer_param("height")?; - state - .read(|state| { - async move { Ok(state.subscribe_payloads(height).await.map(Ok)) }.boxed() - }) - .await + } + None => { + let height: u64 = req.integer_param("height")?; + let fetch = state + .read(|state| state.get_block(height as usize).boxed()) + .await; + let block = fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: height.to_string(), + })?; + let i: u64 = req.integer_param("index")?; + let index = block + .payload() + .nth(block.metadata(), i as usize) + .context(InvalidTransactionIndexSnafu { height, index: i })?; + TransactionQueryData::new(&block, index, i) + .context(InvalidTransactionIndexSnafu { height, index: i }) + } } - .try_flatten_stream() - .boxed() - })? - .at("get_vid_common", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::Hash(hash) - } else { - BlockId::PayloadHash(req.blob_param("payload-hash")?) - }; - let fetch = state.read(|state| state.get_vid_common(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + } + .boxed() + })? + .at("get_block_summary", move |req, state| { + async move { + let id: usize = req.integer_param("height")?; + + let fetch = state.read(|state| state.get_block(id).boxed()).await; + fetch + .with_timeout(timeout) + .await + .context(FetchBlockSnafu { resource: id.to_string(), }) - } - .boxed() - })? - .stream("stream_vid_common", move |req, state| { - async move { - let height = req.integer_param("height")?; - state - .read(|state| { - async move { Ok(state.subscribe_vid_common(height).await.map(Ok)) }.boxed() - }) - .await - } - .try_flatten_stream() - .boxed() - })? - .at("get_transaction", move |req, state| { - async move { - match req.opt_blob_param("hash")? { - Some(hash) => { - let fetch = state - .read(|state| state.get_transaction(hash).boxed()) - .await; - fetch - .with_timeout(timeout) - .await - .context(FetchTransactionSnafu { - resource: hash.to_string(), - }) - } - None => { - let height: u64 = req.integer_param("height")?; - let fetch = state - .read(|state| state.get_block(height as usize).boxed()) - .await; - let block = fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: height.to_string(), - })?; - let i: u64 = req.integer_param("index")?; - let index = block - .payload() - .nth(block.metadata(), i as usize) - .context(InvalidTransactionIndexSnafu { height, index: i })?; - TransactionQueryData::new(&block, index, i) - .context(InvalidTransactionIndexSnafu { height, index: i }) - } - } - } - .boxed() - })? - .at("get_block_summary", move |req, state| { - async move { - let id: usize = req.integer_param("height")?; - - let fetch = state.read(|state| state.get_block(id).boxed()).await; - fetch - .with_timeout(timeout) - .await - .context(FetchBlockSnafu { - resource: id.to_string(), - }) - .map(BlockSummaryQueryData::from) - } - .boxed() - })? - .at("get_block_summary_range", move |req, state| { - async move { - let from: usize = req.integer_param("from")?; - let until: usize = req.integer_param("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let blocks = state - .read(|state| state.get_block_range(from..until).boxed()) - .await; - let result: Vec> = blocks - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: (index + from).to_string(), - }) + .map(BlockSummaryQueryData::from) + } + .boxed() + })? + .at("get_block_summary_range", move |req, state| { + async move { + let from: usize = req.integer_param("from")?; + let until: usize = req.integer_param("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let blocks = state + .read(|state| state.get_block_range(from..until).boxed()) + .await; + let result: Vec> = blocks + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: (index + from).to_string(), }) - .map(|result| result.map(BlockSummaryQueryData::from)) - .try_collect() - .await?; - - Ok(result) - } - .boxed() - })? - .at("get_limits", move |_req, _state| { - async move { - Ok(Limits { - small_object_range_limit, - large_object_range_limit, }) - } - .boxed() - })?; + .map(|result| result.map(BlockSummaryQueryData::from)) + .try_collect() + .await?; + + Ok(result) + } + .boxed() + })? + .at("get_limits", move |_req, _state| { + async move { + Ok(Limits { + small_object_range_limit, + large_object_range_limit, + }) + } + .boxed() + })?; Ok(api) } @@ -498,6 +611,7 @@ mod test { use super::*; use crate::data_source::storage::AvailabilityStorage; use crate::data_source::VersionedDataSource; + use crate::testing::mocks::MockVersions; use crate::{ data_source::ExtensibleDataSource, status::StatusDataSource, @@ -513,7 +627,8 @@ mod test { use async_lock::RwLock; use committable::Committable; use futures::future::FutureExt; - use hotshot_types::{data::Leaf, simple_certificate::QuorumCertificate}; + use hotshot_types::data::Leaf2; + use hotshot_types::simple_certificate::QuorumCertificate2; use portpicker::pick_unused_port; use serde::de::DeserializeOwned; use std::{fmt::Debug, time::Duration}; @@ -788,7 +903,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -884,10 +1004,10 @@ mod test { // mock up some consensus data. let leaf = - Leaf::::genesis::(&Default::default(), &Default::default()) + Leaf2::::genesis::(&Default::default(), &Default::default()) .await; let qc = - QuorumCertificate::genesis::(&Default::default(), &Default::default()) + QuorumCertificate2::genesis::(&Default::default(), &Default::default()) .await; let leaf = LeafQueryData::new(leaf, qc).unwrap(); let block = BlockQueryData::new(leaf.header().clone(), MockPayload::genesis()); @@ -924,6 +1044,7 @@ mod test { ..Default::default() }, MockBase::instance(), + "1.0.0".parse().unwrap(), ) .unwrap(); api.get("get_ext", |_, state| { @@ -994,6 +1115,7 @@ mod test { ..Default::default() }, MockBase::instance(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1078,7 +1200,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1115,7 +1242,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( diff --git a/hotshot-query-service/src/availability/data_source.rs b/hotshot-query-service/src/availability/data_source.rs index f13a9a7b1d..9747b7814b 100644 --- a/hotshot-query-service/src/availability/data_source.rs +++ b/hotshot-query-service/src/availability/data_source.rs @@ -18,7 +18,7 @@ use super::{ VidCommonQueryData, }, }; -use crate::{types::HeightIndexed, Header, Payload, VidCommitment, VidShare}; +use crate::{types::HeightIndexed, Header, Payload}; use async_trait::async_trait; use derivative::Derivative; use derive_more::{Display, From}; @@ -26,7 +26,10 @@ use futures::{ future::Future, stream::{BoxStream, StreamExt}, }; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{ + data::{VidCommitment, VidShare}, + traits::node_implementation::NodeType, +}; use std::{ cmp::Ordering, ops::{Bound, RangeBounds}, diff --git a/hotshot-query-service/src/availability/query_data.rs b/hotshot-query-service/src/availability/query_data.rs index 6232f7ed70..6e1b1eb1f7 100644 --- a/hotshot-query-service/src/availability/query_data.rs +++ b/hotshot-query-service/src/availability/query_data.rs @@ -10,12 +10,11 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use crate::{types::HeightIndexed, Header, Metadata, Payload, Transaction, VidCommon, VidShare}; +use crate::{types::HeightIndexed, Header, Metadata, Payload, Transaction, VidCommon}; use committable::{Commitment, Committable}; use hotshot_types::{ - data::Leaf, - data::VidCommitment, - simple_certificate::QuorumCertificate, + data::{Leaf, Leaf2, VidCommitment, VidShare}, + simple_certificate::QuorumCertificate2, traits::{ self, block_contents::{BlockHeader, GENESIS_VID_NUM_STORAGE_NODES}, @@ -29,8 +28,8 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; use snafu::{ensure, Snafu}; use std::fmt::Debug; -pub type LeafHash = Commitment>; -pub type QcHash = Commitment>; +pub type LeafHash = Commitment>; +pub type QcHash = Commitment>; /// A block hash is the hash of the block header. /// @@ -193,8 +192,8 @@ pub trait QueryablePayload: traits::BlockPayload { #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] #[serde(bound = "")] pub struct LeafQueryData { - pub(crate) leaf: Leaf, - pub(crate) qc: QuorumCertificate, + pub(crate) leaf: Leaf2, + pub(crate) qc: QuorumCertificate2, } #[derive(Clone, Debug, Snafu)] @@ -213,13 +212,13 @@ impl LeafQueryData { /// /// Fails with an [`InconsistentLeafError`] if `qc` does not reference `leaf`. pub fn new( - mut leaf: Leaf, - qc: QuorumCertificate, + mut leaf: Leaf2, + qc: QuorumCertificate2, ) -> Result> { // TODO: Replace with the new `commit` function in HotShot. Add an `upgrade_lock` parameter // and a `HsVer: Versions` bound, then call `leaf.commit(upgrade_lock).await`. This will // require updates in callers and relevant types as well. - let leaf_commit = as Committable>::commit(&leaf); + let leaf_commit = as Committable>::commit(&leaf); ensure!( qc.data.leaf_commit == leaf_commit, InconsistentLeafSnafu { @@ -240,16 +239,16 @@ impl LeafQueryData { instance_state: &Types::InstanceState, ) -> Self { Self { - leaf: Leaf::genesis::(validated_state, instance_state).await, - qc: QuorumCertificate::genesis::(validated_state, instance_state).await, + leaf: Leaf2::genesis::(validated_state, instance_state).await, + qc: QuorumCertificate2::genesis::(validated_state, instance_state).await, } } - pub fn leaf(&self) -> &Leaf { + pub fn leaf(&self) -> &Leaf2 { &self.leaf } - pub fn qc(&self) -> &QuorumCertificate { + pub fn qc(&self) -> &QuorumCertificate2 { &self.qc } @@ -261,7 +260,7 @@ impl LeafQueryData { // TODO: Replace with the new `commit` function in HotShot. Add an `upgrade_lock` parameter // and a `HsVer: Versions` bound, then call `leaf.commit(upgrade_lock).await`. This will // require updates in callers and relevant types as well. - as Committable>::commit(&self.leaf) + as Committable>::commit(&self.leaf) } pub fn block_hash(&self) -> BlockHash { @@ -326,7 +325,7 @@ impl BlockQueryData { where Payload: QueryablePayload, { - let leaf = Leaf::::genesis::(validated_state, instance_state).await; + let leaf = Leaf2::::genesis::(validated_state, instance_state).await; Self::new(leaf.block_header().clone(), leaf.block_payload().unwrap()) } diff --git a/hotshot-query-service/src/data_source.rs b/hotshot-query-service/src/data_source.rs index b6c707665d..3d45b8cd00 100644 --- a/hotshot-query-service/src/data_source.rs +++ b/hotshot-query-service/src/data_source.rs @@ -133,7 +133,7 @@ pub mod availability_tests { }; use committable::Committable; use futures::stream::StreamExt; - use hotshot_types::data::Leaf; + use hotshot_types::data::Leaf2; use std::collections::HashMap; use std::fmt::Debug; use std::ops::{Bound, RangeBounds}; @@ -148,7 +148,7 @@ pub mod availability_tests { assert_eq!(leaf.height(), i as u64); assert_eq!( leaf.hash(), - as Committable>::commit(&leaf.leaf) + as Committable>::commit(&leaf.leaf) ); // Check indices. @@ -550,11 +550,11 @@ pub mod persistence_tests { setup_test, }, types::HeightIndexed, - Leaf, + Leaf2, }; use committable::Committable; use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; - use hotshot_types::simple_certificate::QuorumCertificate; + use hotshot_types::simple_certificate::QuorumCertificate2; #[tokio::test(flavor = "multi_thread")] pub async fn test_revert() @@ -571,12 +571,12 @@ pub mod persistence_tests { let ds = D::connect(&storage).await; // Mock up some consensus data. - let mut qc = QuorumCertificate::::genesis::( + let mut qc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) .await; - let mut leaf = Leaf::::genesis::( + let mut leaf = Leaf2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -584,7 +584,7 @@ pub mod persistence_tests { // Increment the block number, to distinguish this block from the genesis block, which // already exists. leaf.block_header_mut().block_number += 1; - qc.data.leaf_commit = as Committable>::commit(&leaf); + qc.data.leaf_commit = as Committable>::commit(&leaf); let block = BlockQueryData::new(leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(leaf, qc).unwrap(); @@ -623,12 +623,12 @@ pub mod persistence_tests { let ds = D::connect(&storage).await; // Mock up some consensus data. - let mut qc = QuorumCertificate::::genesis::( + let mut qc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) .await; - let mut leaf = Leaf::::genesis::( + let mut leaf = Leaf2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -636,7 +636,7 @@ pub mod persistence_tests { // Increment the block number, to distinguish this block from the genesis block, which // already exists. leaf.block_header_mut().block_number += 1; - qc.data.leaf_commit = as Committable>::commit(&leaf); + qc.data.leaf_commit = as Committable>::commit(&leaf); let block = BlockQueryData::new(leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(leaf, qc).unwrap(); @@ -686,12 +686,12 @@ pub mod persistence_tests { let ds = D::connect(&storage).await; // Mock up some consensus data. - let mut mock_qc = QuorumCertificate::::genesis::( + let mut mock_qc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) .await; - let mut mock_leaf = Leaf::::genesis::( + let mut mock_leaf = Leaf2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -699,7 +699,7 @@ pub mod persistence_tests { // Increment the block number, to distinguish this block from the genesis block, which // already exists. mock_leaf.block_header_mut().block_number += 1; - mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); + mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); let block = BlockQueryData::new(mock_leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(mock_leaf.clone(), mock_qc.clone()).unwrap(); @@ -725,7 +725,7 @@ pub mod persistence_tests { // Get a mutable transaction again, insert different data. mock_leaf.block_header_mut().block_number += 1; - mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); + mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); let block = BlockQueryData::new(mock_leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(mock_leaf, mock_qc).unwrap(); @@ -771,7 +771,7 @@ pub mod node_tests { setup_test, sleep, }, types::HeightIndexed, - Header, VidCommitment, VidShare, + Header, }; use committable::Committable; use futures::{future::join_all, stream::StreamExt}; @@ -784,7 +784,7 @@ pub mod node_tests { state_types::TestInstanceState, }; use hotshot_types::{ - data::vid_commitment, + data::{vid_commitment, VidCommitment, VidShare}, traits::{block_contents::EncodeBytes, node_implementation::Versions}, vid::advz::{advz_scheme, ADVZScheme}, }; diff --git a/hotshot-query-service/src/data_source/extension.rs b/hotshot-query-service/src/data_source/extension.rs index 81de4a4293..8eb38d4668 100644 --- a/hotshot-query-service/src/data_source/extension.rs +++ b/hotshot-query-service/src/data_source/extension.rs @@ -27,9 +27,10 @@ use crate::{ metrics::PrometheusMetrics, node::{NodeDataSource, SyncStatus, TimeWindowQueryData, WindowStart}, status::{HasMetrics, StatusDataSource}, - Header, Payload, QueryResult, Transaction, VidShare, + Header, Payload, QueryResult, Transaction, }; use async_trait::async_trait; +use hotshot_types::data::VidShare; use hotshot_types::traits::node_implementation::NodeType; use jf_merkle_tree::prelude::MerkleProof; use std::ops::{Bound, RangeBounds}; diff --git a/hotshot-query-service/src/data_source/fetching.rs b/hotshot-query-service/src/data_source/fetching.rs index d6a7ce8012..e98fa236df 100644 --- a/hotshot-query-service/src/data_source/fetching.rs +++ b/hotshot-query-service/src/data_source/fetching.rs @@ -77,6 +77,7 @@ use super::{ notifier::Notifier, storage::{ pruning::{PruneStorage, PrunedHeightDataSource, PrunedHeightStorage}, + sql::MigrateTypes, Aggregate, AggregatesStorage, AvailabilityStorage, ExplorerStorage, MerklizedStateHeightStorage, MerklizedStateStorage, NodeStorage, UpdateAggregatesStorage, UpdateAvailabilityStorage, @@ -101,7 +102,7 @@ use crate::{ status::{HasMetrics, StatusDataSource}, task::BackgroundTask, types::HeightIndexed, - Header, Payload, QueryError, QueryResult, VidShare, + Header, Payload, QueryError, QueryResult, }; use anyhow::{bail, Context}; use async_lock::Semaphore; @@ -113,9 +114,12 @@ use futures::{ future::{self, join_all, BoxFuture, Either, Future, FutureExt}, stream::{self, BoxStream, StreamExt}, }; -use hotshot_types::traits::{ - metrics::{Gauge, Metrics}, - node_implementation::NodeType, +use hotshot_types::{ + data::VidShare, + traits::{ + metrics::{Gauge, Metrics}, + node_implementation::NodeType, + }, }; use jf_merkle_tree::{prelude::MerkleProof, MerkleTreeScheme}; use std::sync::Arc; @@ -369,7 +373,7 @@ where Types: NodeType, Payload: QueryablePayload, Header: QueryableHeader, - S: PruneStorage + VersionedDataSource + HasMetrics + 'static, + S: PruneStorage + VersionedDataSource + HasMetrics + MigrateTypes + 'static, for<'a> S::ReadOnly<'a>: AvailabilityStorage + PrunedHeightStorage + NodeStorage + AggregatesStorage, for<'a> S::Transaction<'a>: UpdateAvailabilityStorage + UpdateAggregatesStorage, @@ -482,7 +486,7 @@ where Types: NodeType, Payload: QueryablePayload, Header: QueryableHeader, - S: VersionedDataSource + PruneStorage + HasMetrics + 'static, + S: VersionedDataSource + PruneStorage + HasMetrics + MigrateTypes + 'static, for<'a> S::Transaction<'a>: UpdateAvailabilityStorage + UpdateAggregatesStorage, for<'a> S::ReadOnly<'a>: AvailabilityStorage + NodeStorage + PrunedHeightStorage + AggregatesStorage, @@ -510,6 +514,13 @@ where let aggregator_metrics = AggregatorMetrics::new(builder.storage.metrics()); let fetcher = Arc::new(Fetcher::new(builder).await?); + + // Migrate the old types to new PoS types + // This is a one-time operation that should be done before starting the data source + // It migrates leaf1 storage to leaf2 + // and vid to vid2 + fetcher.storage.migrate_types().await?; + let scanner = if proactive_fetching && !leaf_only { Some(BackgroundTask::spawn( "proactive scanner", diff --git a/hotshot-query-service/src/data_source/fetching/vid.rs b/hotshot-query-service/src/data_source/fetching/vid.rs index 7849ba2a77..51c948c79f 100644 --- a/hotshot-query-service/src/data_source/fetching/vid.rs +++ b/hotshot-query-service/src/data_source/fetching/vid.rs @@ -28,13 +28,16 @@ use crate::{ }, fetching::{self, request, Callback}, types::HeightIndexed, - Header, Payload, QueryResult, VidShare, + Header, Payload, QueryResult, }; use async_trait::async_trait; use derivative::Derivative; use derive_more::From; use futures::future::{BoxFuture, FutureExt}; -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; +use hotshot_types::{ + data::VidShare, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; use std::sync::Arc; use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds}; diff --git a/hotshot-query-service/src/data_source/storage.rs b/hotshot-query-service/src/data_source/storage.rs index 027b6e964f..227950d9e7 100644 --- a/hotshot-query-service/src/data_source/storage.rs +++ b/hotshot-query-service/src/data_source/storage.rs @@ -74,11 +74,11 @@ use crate::{ }, merklized_state::{MerklizedState, Snapshot}, node::{SyncStatus, TimeWindowQueryData, WindowStart}, - Header, Payload, QueryResult, Transaction, VidShare, + Header, Payload, QueryResult, Transaction, }; use async_trait::async_trait; use futures::future::Future; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; use jf_merkle_tree::prelude::MerkleProof; use std::ops::RangeBounds; use tagged_base64::TaggedBase64; diff --git a/hotshot-query-service/src/data_source/storage/fail_storage.rs b/hotshot-query-service/src/data_source/storage/fail_storage.rs index 1b366bd6b2..8398e8c303 100644 --- a/hotshot-query-service/src/data_source/storage/fail_storage.rs +++ b/hotshot-query-service/src/data_source/storage/fail_storage.rs @@ -14,6 +14,7 @@ use super::{ pruning::{PruneStorage, PrunedHeightStorage, PrunerCfg, PrunerConfig}, + sql::MigrateTypes, Aggregate, AggregatesStorage, AvailabilityStorage, NodeStorage, UpdateAggregatesStorage, UpdateAvailabilityStorage, }; @@ -29,12 +30,12 @@ use crate::{ metrics::PrometheusMetrics, node::{SyncStatus, TimeWindowQueryData, WindowStart}, status::HasMetrics, - Header, Payload, QueryError, QueryResult, VidShare, + Header, Payload, QueryError, QueryResult, }; use async_lock::Mutex; use async_trait::async_trait; use futures::future::Future; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; use std::ops::RangeBounds; use std::sync::Arc; @@ -253,6 +254,16 @@ where } } +#[async_trait] +impl MigrateTypes for FailStorage +where + S: MigrateTypes + Sync, +{ + async fn migrate_types(&self) -> anyhow::Result<()> { + Ok(()) + } +} + #[async_trait] impl PruneStorage for FailStorage where diff --git a/hotshot-query-service/src/data_source/storage/fs.rs b/hotshot-query-service/src/data_source/storage/fs.rs index 296261666b..3e58a04911 100644 --- a/hotshot-query-service/src/data_source/storage/fs.rs +++ b/hotshot-query-service/src/data_source/storage/fs.rs @@ -15,6 +15,7 @@ use super::{ ledger_log::{Iter, LedgerLog}, pruning::{PruneStorage, PrunedHeightStorage, PrunerConfig}, + sql::MigrateTypes, Aggregate, AggregatesStorage, AvailabilityStorage, NodeStorage, PayloadMetadata, UpdateAggregatesStorage, UpdateAvailabilityStorage, VidCommonMetadata, }; @@ -33,14 +34,16 @@ use crate::{ status::HasMetrics, types::HeightIndexed, ErrorSnafu, Header, MissingSnafu, NotFoundSnafu, Payload, QueryError, QueryResult, - VidCommitment, VidShare, }; use async_lock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use async_trait::async_trait; use atomic_store::{AtomicStore, AtomicStoreLoader, PersistenceError}; use committable::Committable; use futures::future::Future; -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; +use hotshot_types::{ + data::{VidCommitment, VidShare}, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; use serde::{de::DeserializeOwned, Serialize}; use snafu::OptionExt; use std::collections::{ @@ -133,6 +136,16 @@ where type Pruner = (); } +#[async_trait] +impl MigrateTypes for FileSystemStorage +where + Payload: QueryablePayload, +{ + async fn migrate_types(&self) -> anyhow::Result<()> { + Ok(()) + } +} + impl FileSystemStorage where Payload: QueryablePayload, diff --git a/hotshot-query-service/src/data_source/storage/sql.rs b/hotshot-query-service/src/data_source/storage/sql.rs index 3bb20aa98a..b12cb5efbb 100644 --- a/hotshot-query-service/src/data_source/storage/sql.rs +++ b/hotshot-query-service/src/data_source/storage/sql.rs @@ -11,7 +11,6 @@ // see . #![cfg(feature = "sql-data-source")] - use crate::{ data_source::{ storage::pruning::{PruneStorage, PrunerCfg, PrunerConfig}, @@ -22,10 +21,17 @@ use crate::{ status::HasMetrics, QueryError, QueryResult, }; +use anyhow::Context; use async_trait::async_trait; use chrono::Utc; +use committable::Committable; +use hotshot_types::{ + data::{Leaf, Leaf2, VidShare}, + simple_certificate::{QuorumCertificate, QuorumCertificate2}, + traits::{metrics::Metrics, node_implementation::NodeType}, + vid::advz::ADVZShare, +}; -use hotshot_types::traits::metrics::Metrics; use itertools::Itertools; use log::LevelFilter; @@ -810,6 +816,159 @@ impl VersionedDataSource for SqlStorage { } } +#[async_trait] +pub trait MigrateTypes { + async fn migrate_types(&self) -> anyhow::Result<()>; +} + +#[async_trait] +impl MigrateTypes for SqlStorage { + async fn migrate_types(&self) -> anyhow::Result<()> { + let mut offset = 0; + let limit = 10000; + let mut tx = self.read().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + let (is_migration_completed,) = + query_as::<(bool,)>("SELECT completed from types_migration LIMIT 1 ") + .fetch_one(tx.as_mut()) + .await?; + + if is_migration_completed { + tracing::info!("types migration already completed"); + return Ok(()); + } + + tracing::warn!("migrating query service types storage"); + + loop { + let mut tx = self.read().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + let rows = QueryBuilder::default() + .query(&format!( + "SELECT leaf, qc, common as vid_common, share as vid_share FROM leaf INNER JOIN vid on leaf.height = vid.height ORDER BY leaf.height LIMIT {} OFFSET {}", + limit, offset + )) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + + if rows.is_empty() { + break; + } + + let mut leaf_rows = Vec::new(); + let mut vid_rows = Vec::new(); + + for row in rows.iter() { + let leaf1 = row.try_get("leaf")?; + let qc = row.try_get("qc")?; + let leaf1: Leaf = serde_json::from_value(leaf1)?; + let qc: QuorumCertificate = serde_json::from_value(qc)?; + + let leaf2: Leaf2 = leaf1.into(); + let qc2: QuorumCertificate2 = qc.to_qc2(); + + let commit = leaf2.commit(); + + let leaf2_json = + serde_json::to_value(leaf2.clone()).context("failed to serialize leaf2")?; + let qc2_json = serde_json::to_value(qc2).context("failed to serialize QC2")?; + + // TODO (abdul): revisit after V1 VID has common field + let vid_common_bytes: Vec = row.try_get("vid_common")?; + let vid_share_bytes: Vec = row.try_get("vid_share")?; + + let vid_share: ADVZShare = bincode::deserialize(&vid_share_bytes) + .context("failed to serialize vid_share")?; + + let new_vid_share_bytes = bincode::serialize(&VidShare::V0(vid_share)) + .context("failed to serialize vid_share")?; + + vid_rows.push((leaf2.height() as i64, vid_common_bytes, new_vid_share_bytes)); + leaf_rows.push(( + leaf2.height() as i64, + commit.to_string(), + leaf2.block_header().commit().to_string(), + leaf2_json, + qc2_json, + )); + } + + // migrate leaf2 + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO leaf2 (height, hash, block_hash, leaf, qc) "); + + query_builder.push_values(leaf_rows.into_iter(), |mut b, row| { + b.push_bind(row.0) + .push_bind(row.1) + .push_bind(row.2) + .push_bind(row.3) + .push_bind(row.4); + }); + + let query = query_builder.build(); + + let mut tx = self.write().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + query.execute(tx.as_mut()).await?; + + tx.commit().await?; + tracing::warn!("inserted {} rows into leaf2 table", offset); + // migrate vid + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO vid2 (height, common, share) "); + + query_builder.push_values(vid_rows.into_iter(), |mut b, row| { + b.push_bind(row.0).push_bind(row.1).push_bind(row.2); + }); + + let query = query_builder.build(); + + let mut tx = self.write().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + query.execute(tx.as_mut()).await?; + + tx.commit().await?; + + tracing::warn!("inserted {} rows into vid2 table", offset); + + if rows.len() < limit { + break; + } + + offset += limit; + } + + let mut tx = self.write().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + tracing::warn!("query service types migration is completed!"); + + tx.upsert( + "types_migration", + ["id", "completed"], + ["id"], + [(0_i64, true)], + ) + .await?; + + tracing::info!("updated types_migration table"); + + tx.commit().await?; + Ok(()) + } +} + // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(any(test, feature = "testing"), not(target_os = "windows")))] pub mod testing { @@ -827,8 +986,8 @@ pub mod testing { use portpicker::pick_unused_port; use super::Config; + use crate::availability::query_data::QueryableHeader; use crate::testing::sleep; - #[derive(Debug)] pub struct TmpDb { #[cfg(not(feature = "embedded-db"))] @@ -1116,23 +1275,38 @@ pub mod testing { // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(test, not(target_os = "windows")))] mod test { + use committable::{Commitment, CommitmentBoundsArkless, Committable}; + use hotshot::traits::BlockPayload; use hotshot_example_types::{ node_types::TestVersions, state_types::{TestInstanceState, TestValidatedState}, }; + use jf_vid::VidScheme; + + use hotshot_types::{ + data::vid_commitment, + traits::{node_implementation::Versions, EncodeBytes}, + vid::advz::advz_scheme, + }; + use hotshot_types::{ + data::{QuorumProposal, ViewNumber}, + simple_vote::QuorumData, + traits::{block_contents::BlockHeader, node_implementation::ConsensusTime}, + }; use jf_merkle_tree::{ prelude::UniversalMerkleTree, MerkleTreeScheme, ToTraversalPath, UniversalMerkleTreeScheme, }; use std::time::Duration; use tokio::time::sleep; + use vbs::version::StaticVersionType; use super::{testing::TmpDb, *}; use crate::{ - availability::LeafQueryData, + availability::{LeafQueryData, QueryableHeader}, data_source::storage::{pruning::PrunedHeightStorage, UpdateAvailabilityStorage}, merklized_state::{MerklizedState, UpdateStateData}, testing::{ - mocks::{MockMerkleTree, MockTypes}, + mocks::{MockHeader, MockMerkleTree, MockPayload, MockTypes, MockVersions}, setup_test, }, }; @@ -1509,4 +1683,156 @@ mod test { ); } } + + #[tokio::test(flavor = "multi_thread")] + async fn test_types_migration() { + setup_test(); + + let num_rows = 200; + let db = TmpDb::init().await; + + let storage = SqlStorage::connect(db.config()).await.unwrap(); + + for i in 0..num_rows { + let view = ViewNumber::new(i); + let validated_state = TestValidatedState::default(); + let instance_state = TestInstanceState::default(); + + let (payload, metadata) = >::from_transactions( + [], + &validated_state, + &instance_state, + ) + .await + .unwrap(); + let builder_commitment = + >::builder_commitment(&payload, &metadata); + let payload_bytes = payload.encode(); + + let payload_commitment = vid_commitment::( + &payload_bytes, + &metadata.encode(), + 4, + ::Base::VERSION, + ); + + let mut block_header = >::genesis( + &instance_state, + payload_commitment, + builder_commitment, + metadata, + ); + + block_header.block_number = i; + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::>::default_commitment_no_preimage(), + }; + + let mut qc = QuorumCertificate::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + view, + None, + std::marker::PhantomData, + ); + + let quorum_proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: qc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + }; + + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + leaf.fill_block_payload::( + payload.clone(), + 4, + ::Base::VERSION, + ) + .unwrap(); + qc.data.leaf_commit = as Committable>::commit(&leaf); + + let height = leaf.height() as i64; + let hash = as Committable>::commit(&leaf).to_string(); + let header = leaf.block_header(); + + let header_json = serde_json::to_value(header) + .context("failed to serialize header") + .unwrap(); + + let payload_commitment = + >::payload_commitment(header); + let mut tx = storage.write().await.unwrap(); + + tx.upsert( + "header", + ["height", "hash", "payload_hash", "data", "timestamp"], + ["height"], + [( + height, + leaf.block_header().commit().to_string(), + payload_commitment.to_string(), + header_json, + leaf.block_header().timestamp() as i64, + )], + ) + .await + .unwrap(); + + let leaf_json = serde_json::to_value(leaf.clone()).expect("failed to serialize leaf"); + let qc_json = serde_json::to_value(qc).expect("failed to serialize QC"); + tx.upsert( + "leaf", + ["height", "hash", "block_hash", "leaf", "qc"], + ["height"], + [( + height, + hash, + header.commit().to_string(), + leaf_json, + qc_json, + )], + ) + .await + .unwrap(); + + let mut vid = advz_scheme(2); + let disperse = vid.disperse(payload.encode()).unwrap(); + let common = Some(disperse.common); + let share = disperse.shares[0].clone(); + + let common_bytes = bincode::serialize(&common).unwrap(); + let share_bytes = bincode::serialize(&share).unwrap(); + + tx.upsert( + "vid", + ["height", "common", "share"], + ["height"], + [(height, common_bytes, share_bytes)], + ) + .await + .unwrap(); + tx.commit().await.unwrap(); + } + + >::migrate_types(&storage) + .await + .expect("failed to migrate"); + + let mut tx = storage.read().await.unwrap(); + let (leaf_count,) = query_as::<(i64,)>("SELECT COUNT(*) from leaf2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + + let (vid_count,) = query_as::<(i64,)>("SELECT COUNT(*) from vid2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + + assert_eq!(leaf_count as u64, num_rows, "not all leaves migrated"); + assert_eq!(vid_count as u64, num_rows, "not all vid migrated"); + } } diff --git a/hotshot-query-service/src/data_source/storage/sql/queries.rs b/hotshot-query-service/src/data_source/storage/sql/queries.rs index b8c227b752..696aca5ba3 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries.rs @@ -14,22 +14,21 @@ //! Immutable query functionality of a SQL database. use super::{Database, Db, Query, QueryAs, Transaction}; +use crate::Leaf2; use crate::{ availability::{ BlockId, BlockQueryData, LeafQueryData, PayloadQueryData, QueryablePayload, VidCommonQueryData, }, data_source::storage::{PayloadMetadata, VidCommonMetadata}, - Header, Leaf, Payload, QueryError, QueryResult, + Header, Payload, QueryError, QueryResult, }; use anyhow::Context; use derivative::Derivative; -use hotshot_types::{ - simple_certificate::QuorumCertificate, - traits::{ - block_contents::{BlockHeader, BlockPayload}, - node_implementation::NodeType, - }, +use hotshot_types::simple_certificate::QuorumCertificate2; +use hotshot_types::traits::{ + block_contents::{BlockHeader, BlockPayload}, + node_implementation::NodeType, }; use sqlx::{Arguments, FromRow, Row}; use std::{ @@ -171,10 +170,10 @@ where { fn from_row(row: &'r ::Row) -> sqlx::Result { let leaf = row.try_get("leaf")?; - let leaf: Leaf = serde_json::from_value(leaf).decode_error("malformed leaf")?; + let leaf: Leaf2 = serde_json::from_value(leaf).decode_error("malformed leaf")?; let qc = row.try_get("qc")?; - let qc: QuorumCertificate = + let qc: QuorumCertificate2 = serde_json::from_value(qc).decode_error("malformed QC")?; Ok(Self { leaf, qc }) diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs b/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs index 759296fb87..0a51d28bee 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs @@ -50,7 +50,7 @@ where }; let row = query .query(&format!( - "SELECT {LEAF_COLUMNS} FROM leaf WHERE {where_clause}" + "SELECT {LEAF_COLUMNS} FROM leaf2 WHERE {where_clause}" )) .fetch_one(self.as_mut()) .await?; @@ -134,7 +134,7 @@ where let sql = format!( "SELECT {VID_COMMON_COLUMNS} FROM header AS h - JOIN vid AS v ON h.height = v.height + JOIN vid2 AS v ON h.height = v.height WHERE {where_clause} ORDER BY h.height LIMIT 1" @@ -155,7 +155,7 @@ where let sql = format!( "SELECT {VID_COMMON_METADATA_COLUMNS} FROM header AS h - JOIN vid AS v ON h.height = v.height + JOIN vid2 AS v ON h.height = v.height WHERE {where_clause} ORDER BY h.height ASC LIMIT 1" @@ -174,7 +174,7 @@ where { let mut query = QueryBuilder::default(); let where_clause = query.bounds_to_where_clause(range, "height")?; - let sql = format!("SELECT {LEAF_COLUMNS} FROM leaf {where_clause} ORDER BY height"); + let sql = format!("SELECT {LEAF_COLUMNS} FROM leaf2 {where_clause} ORDER BY height"); Ok(query .query(&sql) .fetch(self.as_mut()) @@ -296,7 +296,7 @@ where let sql = format!( "SELECT {VID_COMMON_COLUMNS} FROM header AS h - JOIN vid AS v ON h.height = v.height + JOIN vid2 AS v ON h.height = v.height {where_clause} ORDER BY h.height" ); @@ -321,7 +321,7 @@ where let sql = format!( "SELECT {VID_COMMON_METADATA_COLUMNS} FROM header AS h - JOIN vid AS v ON h.height = v.height + JOIN vid2 AS v ON h.height = v.height {where_clause} ORDER BY h.height ASC" ); @@ -367,7 +367,7 @@ where async fn first_available_leaf(&mut self, from: u64) -> QueryResult> { let row = query(&format!( - "SELECT {LEAF_COLUMNS} FROM leaf WHERE height >= $1 ORDER BY height LIMIT 1" + "SELECT {LEAF_COLUMNS} FROM leaf2 WHERE height >= $1 ORDER BY height LIMIT 1" )) .bind(from as i64) .fetch_one(self.as_mut()) diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs index ca8874c179..326d29e695 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs @@ -22,12 +22,15 @@ use crate::{ }, node::{BlockId, SyncStatus, TimeWindowQueryData, WindowStart}, types::HeightIndexed, - Header, MissingSnafu, NotFoundSnafu, QueryError, QueryResult, VidShare, + Header, MissingSnafu, NotFoundSnafu, QueryError, QueryResult, }; use anyhow::anyhow; use async_trait::async_trait; use futures::stream::{StreamExt, TryStreamExt}; -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; +use hotshot_types::{ + data::VidShare, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; use snafu::OptionExt; use sqlx::Row; use std::ops::{Bound, RangeBounds}; @@ -115,7 +118,7 @@ where // ORDER BY h.height ASC ensures that if there are duplicate blocks (this can happen when // selecting by payload ID, as payloads are not unique), we return the first one. let sql = format!( - "SELECT v.share AS share FROM vid AS v + "SELECT v.share AS share FROM vid2 AS v JOIN header AS h ON v.height = h.height WHERE {where_clause} ORDER BY h.height @@ -155,10 +158,10 @@ where // need to select the total number of VID rows and the number of present VID rows with a // NULL share. let sql = "SELECT l.max_height, l.total_leaves, p.null_payloads, v.total_vid, vn.null_vid, pruned_height FROM - (SELECT max(leaf.height) AS max_height, count(*) AS total_leaves FROM leaf) AS l, + (SELECT max(leaf2.height) AS max_height, count(*) AS total_leaves FROM leaf2) AS l, (SELECT count(*) AS null_payloads FROM payload WHERE data IS NULL) AS p, - (SELECT count(*) AS total_vid FROM vid) AS v, - (SELECT count(*) AS null_vid FROM vid WHERE share IS NULL) AS vn, + (SELECT count(*) AS total_vid FROM vid2) AS v, + (SELECT count(*) AS null_vid FROM vid2 WHERE share IS NULL) AS vn, (SELECT(SELECT last_height FROM pruned_height ORDER BY id DESC LIMIT 1) as pruned_height) "; let row = query(sql) diff --git a/hotshot-query-service/src/data_source/storage/sql/transaction.rs b/hotshot-query-service/src/data_source/storage/sql/transaction.rs index 105df2586a..f7443e36a7 100644 --- a/hotshot-query-service/src/data_source/storage/sql/transaction.rs +++ b/hotshot-query-service/src/data_source/storage/sql/transaction.rs @@ -36,7 +36,7 @@ use crate::{ }, merklized_state::{MerklizedState, UpdateStateData}, types::HeightIndexed, - Header, Payload, QueryError, QueryResult, VidShare, + Header, Payload, QueryError, QueryResult, }; use anyhow::{bail, Context}; use ark_serialize::CanonicalSerialize; @@ -44,11 +44,14 @@ use async_trait::async_trait; use committable::Committable; use derive_more::{Deref, DerefMut}; use futures::{future::Future, stream::TryStreamExt}; -use hotshot_types::traits::{ - block_contents::BlockHeader, - metrics::{Counter, Gauge, Histogram, Metrics}, - node_implementation::NodeType, - EncodeBytes, +use hotshot_types::{ + data::VidShare, + traits::{ + block_contents::BlockHeader, + metrics::{Counter, Gauge, Histogram, Metrics}, + node_implementation::NodeType, + EncodeBytes, + }, }; use itertools::Itertools; use jf_merkle_tree::prelude::{MerkleNode, MerkleProof}; @@ -509,7 +512,7 @@ where let leaf_json = serde_json::to_value(leaf.leaf()).context("failed to serialize leaf")?; let qc_json = serde_json::to_value(leaf.qc()).context("failed to serialize QC")?; self.upsert( - "leaf", + "leaf2", ["height", "hash", "block_hash", "leaf", "qc"], ["height"], [( @@ -603,7 +606,7 @@ where if let Some(share) = share { let share_data = bincode::serialize(&share).context("failed to serialize VID share")?; self.upsert( - "vid", + "vid2", ["height", "common", "share"], ["height"], [(height as i64, common_data, share_data)], @@ -614,7 +617,7 @@ where // possible that this column already exists, and we are just upserting the common data, // in which case we don't want to overwrite the share with NULL. self.upsert( - "vid", + "vid2", ["height", "common"], ["height"], [(height as i64, common_data)], diff --git a/hotshot-query-service/src/data_source/update.rs b/hotshot-query-service/src/data_source/update.rs index b1ddd13033..0f2da97126 100644 --- a/hotshot-query-service/src/data_source/update.rs +++ b/hotshot-query-service/src/data_source/update.rs @@ -16,47 +16,25 @@ use crate::{ BlockInfo, BlockQueryData, LeafQueryData, QueryablePayload, UpdateAvailabilityData, VidCommonQueryData, }, - Payload, VidShare, + Payload, }; use anyhow::{ensure, Context}; use async_trait::async_trait; use futures::future::Future; use hotshot::types::{Event, EventType}; +use hotshot_types::data::{VidDisperseShare, VidShare}; use hotshot_types::{ - data::{Leaf, Leaf2, QuorumProposal}, + data::Leaf2, traits::{ block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES}, node_implementation::{ConsensusTime, NodeType}, }, vid::advz::advz_scheme, }; -use hotshot_types::{ - data::{VidCommitment, VidDisperseShare}, - event::LeafInfo, -}; +use hotshot_types::{data::VidCommitment, event::LeafInfo}; use jf_vid::VidScheme; use std::iter::once; -fn downgrade_leaf(leaf2: Leaf2) -> Leaf { - // TODO do we still need some check here? - // `drb_seed` no longer exists on `Leaf2` - // if leaf2.drb_seed != [0; 32] && leaf2.drb_result != [0; 32] { - // panic!("Downgrade of Leaf2 to Leaf will lose DRB information!"); - // } - let quorum_proposal = QuorumProposal { - block_header: leaf2.block_header().clone(), - view_number: leaf2.view_number(), - justify_qc: leaf2.justify_qc().to_qc(), - upgrade_certificate: leaf2.upgrade_certificate(), - proposal_certificate: None, - }; - let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); - if let Some(payload) = leaf2.block_payload() { - leaf.fill_block_payload_unchecked(payload); - } - leaf -} - /// An extension trait for types which implement the update trait for each API module. /// /// If a type implements [UpdateAvailabilityData] and @@ -116,24 +94,23 @@ where }, ) in qcs.zip(leaf_chain.iter().rev()) { - let leaf = downgrade_leaf(leaf2.clone()); - let qc = qc2.to_qc(); - let height = leaf.block_header().block_number(); - let leaf_data = match LeafQueryData::new(leaf.clone(), qc.clone()) { + let height = leaf2.block_header().block_number(); + + let leaf_data = match LeafQueryData::new(leaf2.clone(), qc2.clone()) { Ok(leaf) => leaf, Err(err) => { tracing::error!( height, - ?leaf, + ?leaf2, ?qc, "inconsistent leaf; cannot append leaf information: {err:#}" ); - return Err(leaf.block_header().block_number()); + return Err(leaf2.block_header().block_number()); } }; - let block_data = leaf + let block_data = leaf2 .block_payload() - .map(|payload| BlockQueryData::new(leaf.block_header().clone(), payload)); + .map(|payload| BlockQueryData::new(leaf2.block_header().clone(), payload)); if block_data.is_none() { tracing::info!(height, "block not available at decide"); } @@ -141,22 +118,22 @@ where let (vid_common, vid_share) = match vid_share { Some(VidDisperseShare::V0(share)) => ( Some(VidCommonQueryData::new( - leaf.block_header().clone(), + leaf2.block_header().clone(), Some(share.common.clone()), )), Some(VidShare::V0(share.share.clone())), ), Some(VidDisperseShare::V1(share)) => ( - Some(VidCommonQueryData::new(leaf.block_header().clone(), None)), + Some(VidCommonQueryData::new(leaf2.block_header().clone(), None)), Some(VidShare::V1(share.share.clone())), ), None => { - if leaf.view_number().u64() == 0 { + if leaf2.view_number().u64() == 0 { // HotShot does not run VID in consensus for the genesis block. In this case, // the block payload is guaranteed to always be empty, so VID isn't really // necessary. But for consistency, we will still store the VID dispersal data, // computing it ourselves based on the well-known genesis VID commitment. - match genesis_vid(&leaf) { + match genesis_vid(leaf2) { Ok((common, share)) => (Some(common), Some(share)), Err(err) => { tracing::warn!("failed to compute genesis VID: {err:#}"); @@ -178,7 +155,7 @@ where .await { tracing::error!(height, "failed to append leaf information: {err:#}"); - return Err(leaf.block_header().block_number()); + return Err(leaf2.block_header().block_number()); } } } @@ -187,7 +164,7 @@ where } fn genesis_vid( - leaf: &Leaf, + leaf: &Leaf2, ) -> anyhow::Result<(VidCommonQueryData, VidShare)> { let payload = Payload::::empty().0; let bytes = payload.encode(); diff --git a/hotshot-query-service/src/explorer.rs b/hotshot-query-service/src/explorer.rs index 03ca26323c..ef259a83e5 100644 --- a/hotshot-query-service/src/explorer.rs +++ b/hotshot-query-service/src/explorer.rs @@ -878,6 +878,7 @@ mod test { ..Default::default() }, MockBase::instance(), + "0.0.1".parse().unwrap(), ) .unwrap(), ) diff --git a/hotshot-query-service/src/fetching/provider/any.rs b/hotshot-query-service/src/fetching/provider/any.rs index c1112cefe2..0896abdce4 100644 --- a/hotshot-query-service/src/fetching/provider/any.rs +++ b/hotshot-query-service/src/fetching/provider/any.rs @@ -234,7 +234,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); let _server = BackgroundTask::spawn( diff --git a/hotshot-query-service/src/fetching/provider/query_service.rs b/hotshot-query-service/src/fetching/provider/query_service.rs index 8e42bce3fd..dbf09aa900 100644 --- a/hotshot-query-service/src/fetching/provider/query_service.rs +++ b/hotshot-query-service/src/fetching/provider/query_service.rs @@ -16,12 +16,13 @@ use crate::{ availability::{LeafQueryData, PayloadQueryData, VidCommonQueryData}, fetching::request::{LeafRequest, PayloadRequest, VidCommonRequest}, types::HeightIndexed, - Error, Payload, VidCommitment, VidCommon, + Error, Payload, VidCommon, }; use async_trait::async_trait; use committable::Committable; use futures::try_join; use hotshot_types::{ + data::VidCommitment, traits::{node_implementation::NodeType, EncodeBytes}, vid::advz::{advz_scheme, ADVZScheme}, }; @@ -216,7 +217,7 @@ mod test { setup_test, sleep, }, types::HeightIndexed, - ApiState, VidCommitment, + ApiState, }; use committable::Committable; use futures::{ @@ -268,7 +269,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -491,7 +497,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -549,7 +560,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -611,7 +627,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -670,7 +691,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -726,7 +752,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -797,7 +828,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -942,7 +978,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1110,7 +1151,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1209,7 +1255,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1301,7 +1352,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1365,7 +1421,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1423,7 +1484,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1499,7 +1565,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1589,7 +1660,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1656,7 +1732,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1728,7 +1809,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( diff --git a/hotshot-query-service/src/fetching/request.rs b/hotshot-query-service/src/fetching/request.rs index d0dd2710f2..be3d1de434 100644 --- a/hotshot-query-service/src/fetching/request.rs +++ b/hotshot-query-service/src/fetching/request.rs @@ -14,10 +14,10 @@ use crate::{ availability::{LeafHash, LeafQueryData, QcHash}, - Payload, VidCommitment, + Payload, }; use derive_more::{From, Into}; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{data::VidCommitment, traits::node_implementation::NodeType}; use std::fmt::Debug; use std::hash::Hash; diff --git a/hotshot-query-service/src/lib.rs b/hotshot-query-service/src/lib.rs index 20a9e92846..5fafb197dd 100644 --- a/hotshot-query-service/src/lib.rs +++ b/hotshot-query-service/src/lib.rs @@ -446,11 +446,7 @@ use task::BackgroundTask; use tide_disco::{method::ReadState, App, StatusCode}; use vbs::version::StaticVersionType; -pub use hotshot_types::{ - data::Leaf, - data::{VidCommitment, VidShare}, - simple_certificate::QuorumCertificate, -}; +pub use hotshot_types::{data::Leaf2, simple_certificate::QuorumCertificate}; pub type VidCommon = Option; @@ -543,15 +539,28 @@ where ApiVer: StaticVersionType + 'static, { // Create API modules. - let availability_api = - availability::define_api(&options.availability, bind_version).map_err(Error::internal)?; + let availability_api_v0 = availability::define_api( + &options.availability, + bind_version, + "0.0.1".parse().unwrap(), + ) + .map_err(Error::internal)?; + + let availability_api_v1 = availability::define_api( + &options.availability, + bind_version, + "1.0.0".parse().unwrap(), + ) + .map_err(Error::internal)?; let node_api = node::define_api(&options.node, bind_version).map_err(Error::internal)?; let status_api = status::define_api(&options.status, bind_version).map_err(Error::internal)?; // Create app. let data_source = Arc::new(data_source); let mut app = App::<_, Error>::with_state(ApiState(data_source.clone())); - app.register_module("availability", availability_api) + app.register_module("availability", availability_api_v0) + .map_err(Error::internal)? + .register_module("availability", availability_api_v1) .map_err(Error::internal)? .register_module("node", node_api) .map_err(Error::internal)? @@ -599,7 +608,7 @@ mod test { use async_trait::async_trait; use atomic_store::{load_store::BincodeLoadStore, AtomicStore, AtomicStoreLoader, RollingLog}; use futures::future::FutureExt; - use hotshot_types::simple_certificate::QuorumCertificate; + use hotshot_types::{data::VidShare, simple_certificate::QuorumCertificate2}; use portpicker::pick_unused_port; use std::ops::{Bound, RangeBounds}; use std::time::Duration; @@ -828,10 +837,10 @@ mod test { // Mock up some data and add a block to the store. let leaf = - Leaf::::genesis::(&Default::default(), &Default::default()) + Leaf2::::genesis::(&Default::default(), &Default::default()) .await; let qc = - QuorumCertificate::genesis::(&Default::default(), &Default::default()) + QuorumCertificate2::genesis::(&Default::default(), &Default::default()) .await; let leaf = LeafQueryData::new(leaf, qc).unwrap(); let block = BlockQueryData::new(leaf.header().clone(), MockPayload::genesis()); @@ -862,7 +871,12 @@ mod test { let mut app = App::<_, Error>::with_state(RwLock::new(state)); app.register_module( "availability", - availability::define_api(&Default::default(), MockBase::instance()).unwrap(), + availability::define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap() .register_module( diff --git a/hotshot-query-service/src/node.rs b/hotshot-query-service/src/node.rs index 91d3d7ef1b..6499cc3e36 100644 --- a/hotshot-query-service/src/node.rs +++ b/hotshot-query-service/src/node.rs @@ -210,13 +210,13 @@ mod test { mocks::{mock_transaction, MockBase, MockTypes}, setup_test, }, - ApiState, Error, Header, VidShare, + ApiState, Error, Header, }; use async_lock::RwLock; use committable::Committable; use futures::{FutureExt, StreamExt}; use hotshot_types::{ - data::VidDisperseShare, + data::{VidDisperseShare, VidShare}, event::{EventType, LeafInfo}, traits::{ block_contents::{BlockHeader, BlockPayload}, diff --git a/hotshot-query-service/src/node/data_source.rs b/hotshot-query-service/src/node/data_source.rs index 952a366196..a256b209bc 100644 --- a/hotshot-query-service/src/node/data_source.rs +++ b/hotshot-query-service/src/node/data_source.rs @@ -25,11 +25,11 @@ //! trait](crate::availability::UpdateAvailabilityData). use super::query_data::{BlockHash, BlockId, SyncStatus, TimeWindowQueryData}; -use crate::{Header, QueryResult, VidShare}; +use crate::{Header, QueryResult}; use async_trait::async_trait; use derivative::Derivative; use derive_more::From; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; use std::ops::RangeBounds; #[derive(Derivative, From)] diff --git a/hotshot-types/src/data.rs b/hotshot-types/src/data.rs index a57bec5069..e61063227f 100644 --- a/hotshot-types/src/data.rs +++ b/hotshot-types/src/data.rs @@ -372,7 +372,7 @@ impl From for VidShare { } } -mod ns_table; +pub mod ns_table; pub mod vid_disperse; /// VID dispersal data diff --git a/hotshot-types/src/data/ns_table.rs b/hotshot-types/src/data/ns_table.rs index 2a53c1e272..c645c36957 100644 --- a/hotshot-types/src/data/ns_table.rs +++ b/hotshot-types/src/data/ns_table.rs @@ -17,7 +17,7 @@ const NS_ID_BYTE_LEN: usize = 4; /// If the namespace table is invalid, it returns a default single entry namespace table. /// For details, please refer to `block/full_payload/ns_table.rs` in the `sequencer` crate. #[allow(clippy::single_range_in_vec_init)] -pub(crate) fn parse_ns_table(payload_byte_len: usize, bytes: &[u8]) -> Vec> { +pub fn parse_ns_table(payload_byte_len: usize, bytes: &[u8]) -> Vec> { let mut result = vec![]; if bytes.len() < NUM_NSS_BYTE_LEN || (bytes.len() - NUM_NSS_BYTE_LEN) % (NS_OFFSET_BYTE_LEN + NS_ID_BYTE_LEN) != 0 diff --git a/hotshot-types/src/traits/storage.rs b/hotshot-types/src/traits/storage.rs index 1b66b4d512..991ba8282f 100644 --- a/hotshot-types/src/traits/storage.rs +++ b/hotshot-types/src/traits/storage.rs @@ -155,13 +155,7 @@ pub trait Storage: Send + Sync + Clone { decided_upgrade_certificate: Option>, ) -> Result<()>; /// Migrate leaves from `Leaf` to `Leaf2`, and proposals from `QuorumProposal` to `QuorumProposal2` - async fn migrate_consensus( - &self, - _convert_leaf: fn(Leaf) -> Leaf2, - _convert_proposal: fn( - Proposal>, - ) -> Proposal>, - ) -> Result<()> { + async fn migrate_consensus(&self) -> Result<()> { Ok(()) } } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 6b0e77b1cb..032c82edc5 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -54,9 +54,9 @@ use hotshot_types::{ ViewInner, }, constants::{EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE}, - data::{Leaf2, QuorumProposal, QuorumProposal2}, + data::Leaf2, event::{EventType, LeafInfo}, - message::{convert_proposal, DataMessage, Message, MessageKind, Proposal}, + message::{DataMessage, Message, MessageKind, Proposal}, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ consensus_api::ConsensusApi, @@ -211,13 +211,7 @@ impl, V: Versions> SystemContext, ) -> Arc { #[allow(clippy::panic)] - match storage - .migrate_consensus( - Into::>::into, - convert_proposal::, QuorumProposal2>, - ) - .await - { + match storage.migrate_consensus().await { Ok(()) => {} Err(e) => { panic!("Failed to migrate consensus storage: {e}"); diff --git a/marketplace-builder-shared/src/testing/consensus.rs b/marketplace-builder-shared/src/testing/consensus.rs index 93845e7113..f4ee61cd00 100644 --- a/marketplace-builder-shared/src/testing/consensus.rs +++ b/marketplace-builder-shared/src/testing/consensus.rs @@ -20,7 +20,7 @@ use hotshot_types::{ data::vid_commitment, data::{DaProposal2, EpochNumber, Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}, message::Proposal, - simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + simple_certificate::{QuorumCertificate2, SimpleCertificate, SuccessThreshold}, simple_vote::QuorumData2, traits::{ node_implementation::{ConsensusTime, Versions}, @@ -102,12 +102,13 @@ impl SimulatedChainState { }; let justify_qc = match self.previous_quorum_proposal.as_ref() { - None => QuorumCertificate::::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await - .to_qc2(), + None => { + QuorumCertificate2::::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + } Some(prev_proposal) => { let prev_justify_qc = &prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { diff --git a/marketplace-builder/src/builder.rs b/marketplace-builder/src/builder.rs index 8de45bf4d3..cd334f9c0e 100644 --- a/marketplace-builder/src/builder.rs +++ b/marketplace-builder/src/builder.rs @@ -231,9 +231,10 @@ mod test { events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, events_source::{EventConsumer, EventsStreamer}, }; - use hotshot_query_service::{availability::LeafQueryData, VidCommitment}; + use hotshot_query_service::availability::LeafQueryData; use hotshot_types::{ bundle::Bundle, + data::VidCommitment, event::LeafInfo, light_client::StateKeyPair, signature_key::BLSPubKey, diff --git a/node-metrics/Cargo.toml b/node-metrics/Cargo.toml index 0092500753..a325e748c6 100644 --- a/node-metrics/Cargo.toml +++ b/node-metrics/Cargo.toml @@ -21,6 +21,7 @@ clap = { workspace = true } espresso-types = { path = "../types" } futures = { workspace = true } hotshot = { workspace = true } +hotshot-example-types = { workspace = true } hotshot-query-service = { workspace = true } hotshot-stake-table = { workspace = true } tokio = { workspace = true } diff --git a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs index eb05938302..c60d089e62 100644 --- a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs +++ b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs @@ -13,12 +13,12 @@ use crate::service::{ server_message::ServerMessage, }; use async_lock::RwLock; -use espresso_types::{downgrade_leaf, PubKey, SeqTypes}; +use espresso_types::{PubKey, SeqTypes}; use futures::{ channel::mpsc::{self, Receiver, SendError, Sender}, Sink, SinkExt, Stream, StreamExt, }; -use hotshot_query_service::Leaf; +use hotshot_query_service::Leaf2; use hotshot_types::event::{Event, EventType}; use serde::{Deserialize, Serialize}; use tokio::{spawn, task::JoinHandle}; @@ -88,7 +88,7 @@ impl HotShotEventProcessingTask { where S: Stream> + Send + Unpin + 'static, K1: Sink + Send + Unpin + 'static, - K2: Sink, Error = SendError> + Send + Unpin + 'static, + K2: Sink, Error = SendError> + Send + Unpin + 'static, { let task_handle = spawn(Self::process_messages( event_stream, @@ -107,7 +107,7 @@ impl HotShotEventProcessingTask { where S: Stream> + Send + Unpin + 'static, K1: Sink + Unpin, - K2: Sink, Error = SendError> + Unpin, + K2: Sink, Error = SendError> + Unpin, { let mut event_stream = event_receiver; let mut url_sender = url_sender; @@ -128,9 +128,8 @@ impl HotShotEventProcessingTask { EventType::Decide { leaf_chain, .. } => { for leaf_info in leaf_chain.iter().rev() { let leaf2 = leaf_info.leaf.clone(); - let leaf = downgrade_leaf(leaf2); - let send_result = leaf_sender.send(leaf).await; + let send_result = leaf_sender.send(leaf2).await; if let Err(err) = send_result { tracing::error!("leaf sender closed: {}", err); panic!("HotShotEventProcessingTask leaf sender is closed, unrecoverable, the block state will stagnate."); @@ -280,7 +279,7 @@ impl Drop for ProcessExternalMessageHandlingTask { pub async fn create_node_validator_processing( config: NodeValidatorConfig, internal_client_message_receiver: Receiver>>, - leaf_receiver: Receiver>, + leaf_receiver: Receiver>, ) -> Result>, CreateNodeValidatorProcessingError> { let client_thread_state = ClientThreadState::>::new( Default::default(), diff --git a/node-metrics/src/api/node_validator/v0/mod.rs b/node-metrics/src/api/node_validator/v0/mod.rs index 90d1867474..b364c105ee 100644 --- a/node-metrics/src/api/node_validator/v0/mod.rs +++ b/node-metrics/src/api/node_validator/v0/mod.rs @@ -11,7 +11,7 @@ use futures::{ channel::mpsc::{self, Sender}, FutureExt, Sink, SinkExt, Stream, StreamExt, }; -use hotshot_query_service::Leaf; +use hotshot_query_service::Leaf2; use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::light_client::{CircuitField, StateVerKey}; use hotshot_types::signature_key::BLSPubKey; @@ -461,11 +461,11 @@ impl HotshotQueryServiceLeafStreamRetriever { } impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { - type Item = Leaf; + type Item = Leaf2; type ItemError = hotshot_query_service::Error; type Error = hotshot_query_service::Error; type Stream = surf_disco::socket::Connection< - Leaf, + Leaf2, surf_disco::socket::Unsupported, Self::ItemError, Version01, @@ -496,7 +496,7 @@ impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { "availability/stream/leaves/{}", start_block_height )) - .subscribe::() + .subscribe::() .await; let leaves_stream = match leaves_stream_result { @@ -540,8 +540,8 @@ impl ProcessProduceLeafStreamTask { /// returned state. pub fn new(leaf_stream_retriever: R, leaf_sender: K) -> Self where - R: LeafStreamRetriever> + Send + Sync + 'static, - K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, + R: LeafStreamRetriever> + Send + Sync + 'static, + K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { // let future = Self::process_consume_leaf_stream(leaf_stream_retriever, leaf_sender); let task_handle = spawn(Self::connect_and_process_leaves( @@ -556,8 +556,8 @@ impl ProcessProduceLeafStreamTask { async fn connect_and_process_leaves(leaf_stream_retriever: R, leaf_sender: K) where - R: LeafStreamRetriever>, - K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, + R: LeafStreamRetriever>, + K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { // We want to try and ensure that we are connected to the HotShot Query // Service, and are consuming leaves. @@ -596,7 +596,7 @@ impl ProcessProduceLeafStreamTask { leaf_stream_receiver: &R, ) -> Result where - R: LeafStreamRetriever>, + R: LeafStreamRetriever>, { let backoff_params = BackoffParams::default(); let mut delay = Duration::ZERO; @@ -639,8 +639,8 @@ impl ProcessProduceLeafStreamTask { /// will return. async fn process_consume_leaf_stream(leaves_stream: R::Stream, leaf_sender: K) where - R: LeafStreamRetriever>, - K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, + R: LeafStreamRetriever>, + K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { let mut leaf_sender = leaf_sender; let mut leaves_stream = leaves_stream; diff --git a/node-metrics/src/service/client_state/mod.rs b/node-metrics/src/service/client_state/mod.rs index 97a06fac6f..1ca1415d57 100644 --- a/node-metrics/src/service/client_state/mod.rs +++ b/node-metrics/src/service/client_state/mod.rs @@ -1196,12 +1196,12 @@ pub mod tests { }; use async_lock::RwLock; use bitvec::vec::BitVec; - use espresso_types::{Leaf, NodeState, ValidatedState}; + use espresso_types::{Leaf2, NodeState, ValidatedState}; use futures::{ channel::mpsc::{self, Sender}, SinkExt, StreamExt, }; - use hotshot_query_service::testing::mocks::MockVersions; + use hotshot_example_types::node_types::TestVersions; use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; use std::{sync::Arc, time::Duration}; use tokio::{ @@ -1370,7 +1370,7 @@ pub mod tests { #[tokio::test(flavor = "multi_thread")] #[cfg(feature = "testing")] async fn test_process_client_handling_stream_request_latest_blocks_snapshot() { - use hotshot_query_service::testing::mocks::MockVersions; + use hotshot_example_types::node_types::TestVersions; use super::clone_block_detail; use crate::service::data_state::create_block_detail_from_leaf; @@ -1378,7 +1378,7 @@ pub mod tests { let (_, _, _, mut data_state) = create_test_data_state(); let client_thread_state = Arc::new(RwLock::new(create_test_client_thread_state())); let leaf_1 = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()).await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let block_1 = create_block_detail_from_leaf(&leaf_1); data_state.add_latest_block(clone_block_detail(&block_1)); @@ -1619,7 +1619,7 @@ pub mod tests { // send a new leaf let leaf = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()).await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let expected_block = create_block_detail_from_leaf(&leaf); let arc_expected_block = Arc::new(expected_block); diff --git a/node-metrics/src/service/data_state/mod.rs b/node-metrics/src/service/data_state/mod.rs index 6325a69b83..ae820f4e5d 100644 --- a/node-metrics/src/service/data_state/mod.rs +++ b/node-metrics/src/service/data_state/mod.rs @@ -9,7 +9,7 @@ use futures::{channel::mpsc::SendError, Sink, SinkExt, Stream, StreamExt}; use hotshot_query_service::{ availability::{QueryableHeader, QueryablePayload}, explorer::{BlockDetail, ExplorerHeader, Timestamp}, - Leaf, Resolvable, + Leaf2, Resolvable, }; use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::{ @@ -151,7 +151,7 @@ impl DataState { /// [create_block_detail_from_leaf] is a helper function that will build a /// [BlockDetail] from the reference to [Leaf]. -pub fn create_block_detail_from_leaf(leaf: &Leaf) -> BlockDetail { +pub fn create_block_detail_from_leaf(leaf: &Leaf2) -> BlockDetail { let block_header = leaf.block_header(); let block_payload = &leaf.block_payload().unwrap_or(Payload::empty().0); @@ -223,7 +223,7 @@ impl std::error::Error for ProcessLeafError { /// computed into a [BlockDetail] and sent to the [Sink] so that it can be /// processed for real-time considerations. async fn process_incoming_leaf( - leaf: Leaf, + leaf: Leaf2, data_state: Arc>, mut block_sender: BDSink, mut voters_sender: BVSink, @@ -339,7 +339,7 @@ impl ProcessLeafStreamTask { voters_sender: K2, ) -> Self where - S: Stream> + Send + Sync + Unpin + 'static, + S: Stream> + Send + Sync + Unpin + 'static, K1: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, K2: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { @@ -363,7 +363,7 @@ impl ProcessLeafStreamTask { block_sender: BDSink, voters_senders: BVSink, ) where - S: Stream> + Unpin, + S: Stream> + Unpin, Header: BlockHeader + QueryableHeader + ExplorerHeader, Payload: BlockPayload, BDSink: Sink, Error = SendError> + Clone + Unpin, @@ -569,10 +569,11 @@ mod tests { }; use async_lock::RwLock; use espresso_types::{ - v0_99::ChainConfig, BlockMerkleTree, FeeMerkleTree, Leaf, NodeState, ValidatedState, + v0_99::ChainConfig, BlockMerkleTree, FeeMerkleTree, Leaf2, NodeState, ValidatedState, }; use futures::{channel::mpsc, SinkExt, StreamExt}; - use hotshot_query_service::testing::mocks::MockVersions; + + use hotshot_example_types::node_types::TestVersions; use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; use std::{sync::Arc, time::Duration}; use tokio::time::timeout; @@ -628,7 +629,7 @@ mod tests { }; let instance_state = NodeState::mock(); - let sample_leaf = Leaf::genesis::(&validated_state, &instance_state).await; + let sample_leaf = Leaf2::genesis::(&validated_state, &instance_state).await; let mut leaf_sender = leaf_sender; // We should be able to send a leaf without issue diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index 1d9cb79e6d..b975a9d8fe 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -4986,6 +4986,7 @@ dependencies = [ "prometheus", "refinery", "refinery-core", + "semver 1.0.25", "serde", "serde_json", "snafu 0.8.5", @@ -9514,6 +9515,7 @@ dependencies = [ "rand_chacha 0.3.1", "rand_distr", "request-response", + "semver 1.0.25", "sequencer-utils", "serde", "serde_json", diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 1c5d9b5347..f5982e981f 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -103,6 +103,7 @@ rand = { workspace = true } rand_chacha = { workspace = true } rand_distr = { workspace = true } request-response = { path = "../request-response" } +semver = { workspace = true } sequencer-utils = { path = "../utils" } serde = { workspace = true } serde_json = { workspace = true } diff --git a/sequencer/api/migrations/postgres/V501__epoch_tables.sql b/sequencer/api/migrations/postgres/V501__epoch_tables.sql new file mode 100644 index 0000000000..9a25f42c20 --- /dev/null +++ b/sequencer/api/migrations/postgres/V501__epoch_tables.sql @@ -0,0 +1,54 @@ +CREATE TABLE anchor_leaf2 ( + view BIGINT PRIMARY KEY, + leaf BYTEA, + qc BYTEA +); + + +CREATE TABLE da_proposal2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BYTEA +); + +CREATE TABLE vid_share2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BYTEA +); + + +CREATE TABLE undecided_state2 ( + -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or + -- update that there is only a single entry in this table: the latest known state. + id INT PRIMARY KEY, + + leaves BYTEA NOT NULL, + state BYTEA NOT NULL +); + + +CREATE TABLE quorum_proposals2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR, + data BYTEA +); + +CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash); +CREATE INDEX da_proposal2_payload_hash_idx ON da_proposal (payload_hash); +CREATE INDEX vid_share2_payload_hash_idx ON vid_share (payload_hash); + +CREATE TABLE quorum_certificate2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR NOT NULL, + data BYTEA NOT NULL +); + +CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash); + +CREATE TABLE epoch_migration ( + table_name TEXT PRIMARY KEY, + completed bool DEFAULT FALSE +); + +INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('undecided_state'), ('quorum_proposals'), ('quorum_certificate'); \ No newline at end of file diff --git a/sequencer/api/migrations/sqlite/V301__epoch_tables.sql b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql new file mode 100644 index 0000000000..c11ec79b0a --- /dev/null +++ b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql @@ -0,0 +1,54 @@ +CREATE TABLE anchor_leaf2 ( + view BIGINT PRIMARY KEY, + leaf BLOB, + qc BLOB +); + + +CREATE TABLE da_proposal2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BLOB +); + +CREATE TABLE vid_share2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BLOB +); + + +CREATE TABLE undecided_state2 ( + -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or + -- update that there is only a single entry in this table: the latest known state. + id INT PRIMARY KEY, + + leaves BLOB NOT NULL, + state BLOB NOT NULL +); + + +CREATE TABLE quorum_proposals2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR, + data BLOB +); + +CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash); +CREATE INDEX da_proposal2_payload_hash_idx ON da_proposal (payload_hash); +CREATE INDEX vid_share2_payload_hash_idx ON vid_share (payload_hash); + +CREATE TABLE quorum_certificate2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR NOT NULL, + data BLOB NOT NULL +); + +CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash); + +CREATE TABLE epoch_migration ( + table_name TEXT PRIMARY KEY, + completed bool NOT NULL DEFAULT FALSE +); + +INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('undecided_state'), ('quorum_proposals'), ('quorum_certificate'); \ No newline at end of file diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 0faaa03514..a2eaf1ea35 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1095,7 +1095,7 @@ mod api_tests { use espresso_types::MockSequencerVersions; use espresso_types::{ traits::{EventConsumer, PersistenceOptions}, - Header, Leaf, Leaf2, NamespaceId, + Header, Leaf2, NamespaceId, }; use ethers::utils::Anvil; use futures::{future, stream::StreamExt}; @@ -1104,18 +1104,19 @@ mod api_tests { AvailabilityDataSource, BlockQueryData, VidCommonQueryData, }; - use hotshot_query_service::VidCommitment; - use hotshot_types::data::vid_disperse::ADVZDisperseShare; - use hotshot_types::vid::advz::advz_scheme; + use hotshot_types::data::ns_table::parse_ns_table; + use hotshot_types::data::vid_disperse::VidDisperseShare2; + use hotshot_types::data::{DaProposal2, EpochNumber, VidCommitment}; + use hotshot_types::simple_certificate::QuorumCertificate2; + + use hotshot_types::vid::avidm::{init_avidm_param, AvidMScheme}; use hotshot_types::{ - data::{DaProposal, QuorumProposal2, QuorumProposalWrapper}, + data::{QuorumProposal2, QuorumProposalWrapper}, event::LeafInfo, message::Proposal, - simple_certificate::QuorumCertificate, traits::{node_implementation::ConsensusTime, signature_key::SignatureKey, EncodeBytes}, }; - use jf_vid::VidScheme; use portpicker::pick_unused_port; use sequencer_utils::test_utils::setup_test; use std::fmt::Debug; @@ -1287,21 +1288,26 @@ mod api_tests { // Create two non-consecutive leaf chains. let mut chain1 = vec![]; - let genesis = Leaf::genesis::(&Default::default(), &NodeState::mock()).await; + let genesis = Leaf2::genesis::(&Default::default(), &NodeState::mock()).await; let payload = genesis.block_payload().unwrap(); let payload_bytes_arc = payload.encode(); - let disperse = advz_scheme(2).disperse(payload_bytes_arc.clone()).unwrap(); - let payload_commitment = disperse.commit; + + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + + let ns_table = parse_ns_table(payload.byte_len().as_usize(), &payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &payload_bytes_arc, ns_table).unwrap(); + let mut quorum_proposal = QuorumProposalWrapper:: { proposal: QuorumProposal2:: { block_header: genesis.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -1309,12 +1315,11 @@ mod api_tests { epoch: None, }, }; - let mut qc = QuorumCertificate::genesis::( + let mut qc = QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(); + .await; let mut justify_qc = qc.clone(); for i in 0..5 { @@ -1332,7 +1337,7 @@ mod api_tests { PubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) .expect("Failed to sign quorum_proposal"); persistence - .append_quorum_proposal(&Proposal { + .append_quorum_proposal2(&Proposal { data: quorum_proposal.clone(), signature: quorum_proposal_signature, _pd: Default::default(), @@ -1341,25 +1346,27 @@ mod api_tests { .unwrap(); // Include VID information for each leaf. - let share = ADVZDisperseShare:: { + let share = VidDisperseShare2:: { view_number: leaf.view_number(), payload_commitment, - share: disperse.shares[0].clone(), - common: disperse.common.clone(), + share: shares[0].clone(), recipient_key: pubkey, + epoch: Some(EpochNumber::new(0)), + target_epoch: Some(EpochNumber::new(0)), }; persistence - .append_vid(&share.to_proposal(&privkey).unwrap()) + .append_vid2(&share.to_proposal(&privkey).unwrap()) .await .unwrap(); // Include payload information for each leaf. let block_payload_signature = PubKey::sign(&privkey, &payload_bytes_arc).expect("Failed to sign block payload"); - let da_proposal_inner = DaProposal:: { + let da_proposal_inner = DaProposal2:: { encoded_transactions: payload_bytes_arc.clone(), metadata: payload.ns_table().clone(), view_number: leaf.view_number(), + epoch: Some(EpochNumber::new(0)), }; let da_proposal = Proposal { data: da_proposal_inner, @@ -1367,7 +1374,7 @@ mod api_tests { _pd: Default::default(), }; persistence - .append_da(&da_proposal, VidCommitment::V0(payload_commitment)) + .append_da2(&da_proposal, VidCommitment::V1(payload_commitment)) .await .unwrap(); } @@ -1413,8 +1420,8 @@ mod api_tests { for (leaf, qc) in chain1.iter().chain(&chain2) { tracing::info!(height = leaf.height(), "check archive"); let qd = data_source.get_leaf(leaf.height() as usize).await.await; - let stored_leaf: Leaf2 = qd.leaf().clone().into(); - let stored_qc = qd.qc().clone().to_qc2(); + let stored_leaf: Leaf2 = qd.leaf().clone(); + let stored_qc = qd.qc().clone(); assert_eq!(&stored_leaf, leaf); assert_eq!(&stored_qc, qc); @@ -1483,15 +1490,13 @@ mod api_tests { )); let consumer = ApiEventConsumer::from(data_source.clone()); - let mut qc = QuorumCertificate::genesis::( + let mut qc = QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(); + .await; let leaf = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()) - .await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; // Append the genesis leaf. We don't use this for the test, because the update function will // automatically fill in the missing data for genesis. We just append this to get into a @@ -1500,7 +1505,7 @@ mod api_tests { persistence .append_decided_leaves( leaf.view_number(), - [(&leaf_info(leaf.clone().into()), qc.clone())], + [(&leaf_info(leaf.clone()), qc.clone())], &consumer, ) .await @@ -1538,10 +1543,7 @@ mod api_tests { .unwrap(); // Check that we still processed the leaf. - assert_eq!( - leaf, - data_source.get_leaf(1).await.await.leaf().clone().into() - ); + assert_eq!(leaf, data_source.get_leaf(1).await.await.leaf().clone()); assert!(data_source.get_vid_common(1).await.is_pending()); assert!(data_source.get_block(1).await.is_pending()); } diff --git a/sequencer/src/api/endpoints.rs b/sequencer/src/api/endpoints.rs index 651a38e796..91cecf6743 100644 --- a/sequencer/src/api/endpoints.rs +++ b/sequencer/src/api/endpoints.rs @@ -91,6 +91,7 @@ type AvailabilityApi = Api, availabil // Snafu has been replaced by `this_error` everywhere. // However, the query service still uses snafu pub(super) fn availability( + api_ver: semver::Version, ) -> Result> where N: ConnectedNetwork, @@ -105,6 +106,7 @@ where let mut api = availability::define_api::, SeqTypes, _>( &options, SequencerApiVersion::instance(), + api_ver, )?; api.get("getnamespaceproof", move |req, state| { diff --git a/sequencer/src/api/options.rs b/sequencer/src/api/options.rs index 794c3b6fd5..2db7fd4ea3 100644 --- a/sequencer/src/api/options.rs +++ b/sequencer/src/api/options.rs @@ -274,7 +274,25 @@ impl Options { app.register_module("status", status_api)?; // Initialize availability and node APIs (these both use the same data source). - app.register_module("availability", endpoints::availability()?)?; + + // Note: We initialize two versions of the availability module: `availability/v0` and `availability/v1`. + // - `availability/v0/leaf/0` returns the old `Leaf1` type for backward compatibility. + // - `availability/v1/leaf/0` returns the new `Leaf2` type + + // initialize the availability module for API version V0. + // This ensures compatibility for nodes that expect `Leaf1` for leaf endpoints + app.register_module( + "availability", + endpoints::availability("0.0.1".parse().unwrap())?, + )?; + + // initialize the availability module for API version V1. + // This enables support for the new `Leaf2` type + app.register_module( + "availability", + endpoints::availability("1.0.0".parse().unwrap())?, + )?; + app.register_module("node", endpoints::node()?)?; // Initialize submit API diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index 7106f9fa52..8af85c7b3b 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -4,7 +4,7 @@ use committable::{Commitment, Committable}; use espresso_types::{ get_l1_deposits, v0_99::{ChainConfig, IterableFeeInfo}, - BlockMerkleTree, FeeAccount, FeeMerkleTree, Leaf, Leaf2, NodeState, ValidatedState, + BlockMerkleTree, FeeAccount, FeeMerkleTree, Leaf2, NodeState, ValidatedState, }; use hotshot::traits::ValidatedState as _; use hotshot_query_service::{ @@ -21,7 +21,7 @@ use hotshot_query_service::{ Resolvable, }; use hotshot_types::{ - data::{QuorumProposal, ViewNumber}, + data::{QuorumProposalWrapper, ViewNumber}, message::Proposal, traits::node_implementation::ConsensusTime, }; @@ -261,7 +261,7 @@ async fn load_accounts( } } - Ok((snapshot, leaf.leaf().clone().into())) + Ok((snapshot, leaf.leaf().clone())) } async fn load_chain_config( @@ -290,7 +290,7 @@ async fn reconstruct_state( .get_leaf((from_height as usize).into()) .await .context(format!("leaf {from_height} not available"))?; - let from_leaf: Leaf2 = from_leaf.leaf().clone().into(); + let from_leaf: Leaf2 = from_leaf.leaf().clone(); ensure!( from_leaf.view_number() < to_view, "state reconstruction: starting state {:?} must be before ending state {to_view:?}", @@ -444,13 +444,14 @@ where P: Type + for<'q> Encode<'q, Db>, { let (data,) = query_as::<(Vec,)>(&format!( - "SELECT data FROM quorum_proposals WHERE {where_clause} LIMIT 1", + "SELECT data FROM quorum_proposals2 WHERE {where_clause} LIMIT 1", )) .bind(param) .fetch_one(tx.as_mut()) .await?; - let proposal: Proposal> = bincode::deserialize(&data)?; - Ok(Leaf::from_quorum_proposal(&proposal.data).into()) + let proposal: Proposal> = + bincode::deserialize(&data)?; + Ok(Leaf2::from_quorum_proposal(&proposal.data)) } #[cfg(any(test, feature = "testing"))] diff --git a/sequencer/src/block/full_payload.rs b/sequencer/src/block/full_payload.rs deleted file mode 100644 index 61247ec87e..0000000000 --- a/sequencer/src/block/full_payload.rs +++ /dev/null @@ -1,9 +0,0 @@ -mod ns_proof; -mod ns_table; -mod payload; - -pub use ns_proof::NsProof; -pub use ns_table::{NsIndex, NsTable, NsTableValidationError}; -pub use payload::{Payload, PayloadByteLen}; - -pub(in crate::block) use ns_table::NsIter; diff --git a/sequencer/src/block/full_payload/ns_proof.rs b/sequencer/src/block/full_payload/ns_proof.rs deleted file mode 100644 index 104ca45f4f..0000000000 --- a/sequencer/src/block/full_payload/ns_proof.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::{ - block::{ - full_payload::{NsIndex, NsTable, Payload, PayloadByteLen}, - namespace_payload::NsPayloadOwned, - }, - NamespaceId, Transaction, -}; -use hotshot_types::{ - traits::EncodeBytes, - vid::{vid_scheme, LargeRangeProofType, VidCommitment, VidCommon, VidSchemeType}, -}; -use jf_vid::{ - payload_prover::{PayloadProver, Statement}, - VidScheme, -}; -use serde::{Deserialize, Serialize}; - -/// Proof of correctness for namespace payload bytes in a block. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct NsProof { - ns_index: NsIndex, - ns_payload: NsPayloadOwned, - ns_proof: Option, // `None` if ns_payload is empty -} - -impl NsProof { - /// Returns the payload bytes for the `index`th namespace, along with a - /// proof of correctness for those bytes. Returns `None` on error. - /// - /// The namespace payload [`NsPayloadOwned`] is included as a hidden field - /// in the returned [`NsProof`]. A conventional API would instead return - /// `(NsPayload, NsProof)` and [`NsProof`] would not contain the namespace - /// payload. - /// ([`TxProof::new`](crate::block::namespace_payload::TxProof::new) - /// conforms to this convention.) In the future we should change this API to - /// conform to convention. But that would require a change to our RPC - /// endpoint API at [`endpoints`](crate::api::endpoints), which is a hassle. - pub fn new(payload: &Payload, index: &NsIndex, common: &VidCommon) -> Option { - let payload_byte_len = payload.byte_len(); - if !payload_byte_len.is_consistent(common) { - tracing::warn!( - "payload byte len {} inconsistent with common {}", - payload_byte_len, - VidSchemeType::get_payload_byte_len(common) - ); - return None; // error: payload byte len inconsistent with common - } - if !payload.ns_table().in_bounds(index) { - tracing::warn!("ns_index {:?} out of bounds", index); - return None; // error: index out of bounds - } - let ns_payload_range = payload.ns_table().ns_range(index, &payload_byte_len); - - // TODO vid_scheme() arg should be u32 to match get_num_storage_nodes - // https://github.com/EspressoSystems/HotShot/issues/3298 - let vid = vid_scheme( - VidSchemeType::get_num_storage_nodes(common) - .try_into() - .ok()?, // error: failure to convert u32 to usize - ); - - let ns_proof = if ns_payload_range.as_block_range().is_empty() { - None - } else { - Some( - vid.payload_proof(payload.encode(), ns_payload_range.as_block_range()) - .ok()?, // error: internal to payload_proof() - ) - }; - - Some(NsProof { - ns_index: index.clone(), - ns_payload: payload.read_ns_payload(&ns_payload_range).to_owned(), - ns_proof, - }) - } - - /// Verify a [`NsProof`] against a payload commitment. Returns `None` on - /// error or if verification fails. - /// - /// There is no [`NsPayload`](crate::block::namespace_payload::NsPayload) - /// arg because this data is already included in the [`NsProof`]. See - /// [`NsProof::new`] for discussion. - /// - /// If verification is successful then return `(Vec, - /// NamespaceId)` obtained by post-processing the underlying - /// [`NsPayload`](crate::block::namespace_payload::NsPayload). Why? This - /// method might be run by a client in a WASM environment who might be - /// running non-Rust code, in which case the client is unable to perform - /// this post-processing himself. - pub fn verify( - &self, - ns_table: &NsTable, - commit: &VidCommitment, - common: &VidCommon, - ) -> Option<(Vec, NamespaceId)> { - VidSchemeType::is_consistent(commit, common).ok()?; - if !ns_table.in_bounds(&self.ns_index) { - return None; // error: index out of bounds - } - - let range = ns_table - .ns_range(&self.ns_index, &PayloadByteLen::from_vid_common(common)) - .as_block_range(); - - match (&self.ns_proof, range.is_empty()) { - (Some(proof), false) => { - // TODO vid_scheme() arg should be u32 to match get_num_storage_nodes - // https://github.com/EspressoSystems/HotShot/issues/3298 - let vid = vid_scheme( - VidSchemeType::get_num_storage_nodes(common) - .try_into() - .ok()?, // error: failure to convert u32 to usize - ); - - vid.payload_verify( - Statement { - payload_subslice: self.ns_payload.as_bytes_slice(), - range, - commit, - common, - }, - proof, - ) - .ok()? // error: internal to payload_verify() - .ok()?; // verification failure - } - (None, true) => {} // 0-length namespace, nothing to verify - (None, false) => { - tracing::error!( - "ns verify: missing proof for nonempty ns payload range {:?}", - range - ); - return None; - } - (Some(_), true) => { - tracing::error!("ns verify: unexpected proof for empty ns payload range"); - return None; - } - } - - // verification succeeded, return some data - let ns_id = ns_table.read_ns_id_unchecked(&self.ns_index); - Some((self.ns_payload.export_all_txs(&ns_id), ns_id)) - } - - /// Return all transactions in the namespace whose payload is proven by - /// `self`. The namespace ID for each returned [`Transaction`] is set to - /// `ns_id`. - /// - /// # Design warning - /// - /// This method relies on a promise that a [`NsProof`] stores the entire - /// namespace payload. If in the future we wish to remove the payload from a - /// [`NsProof`] then this method can no longer be supported. - /// - /// In that case, use the following a workaround: - /// - Given a [`NamespaceId`], get a [`NsIndex`] `i` via - /// [`NsTable::find_ns_id`]. - /// - Use `i` to get a - /// [`NsPayload`](crate::block::namespace_payload::NsPayload) `p` via - /// [`Payload::ns_payload`]. - /// - Use `p` to get the desired [`Vec`] via - /// [`NsPayload::export_all_txs`](crate::block::namespace_payload::NsPayload::export_all_txs). - /// - /// This workaround duplicates the work done in [`NsProof::new`]. If you - /// don't like that then you could instead hack [`NsProof::new`] to return a - /// pair `(NsProof, Vec)`. - pub fn export_all_txs(&self, ns_id: &NamespaceId) -> Vec { - self.ns_payload.export_all_txs(ns_id) - } -} diff --git a/sequencer/src/block/full_payload/ns_table.rs b/sequencer/src/block/full_payload/ns_table.rs deleted file mode 100644 index d2d2290ef1..0000000000 --- a/sequencer/src/block/full_payload/ns_table.rs +++ /dev/null @@ -1,467 +0,0 @@ -//! Types related to a namespace table. -//! -//! All code that needs to know the binary format of a namespace table is -//! restricted to this file. -//! -//! See [`NsTable`] for a full specification of the binary format of a namespace -//! table. -use crate::{ - block::{ - full_payload::payload::PayloadByteLen, - namespace_payload::NsPayloadRange, - uint_bytes::{ - bytes_serde_impl, u32_from_bytes, u32_to_bytes, usize_from_bytes, usize_to_bytes, - }, - }, - NamespaceId, -}; -use committable::{Commitment, Committable, RawCommitmentBuilder}; -use derive_more::Display; -use hotshot_types::traits::EncodeBytes; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -use std::{collections::HashSet, ops::Range, sync::Arc}; -use thiserror::Error; - -/// Byte lengths for the different items that could appear in a namespace table. -const NUM_NSS_BYTE_LEN: usize = 4; -const NS_OFFSET_BYTE_LEN: usize = 4; - -// TODO prefer [`NS_ID_BYTE_LEN`] set to `8` because [`NamespaceId`] is a `u64` -// but we need to maintain serialization compatibility. -// https://github.com/EspressoSystems/espresso-sequencer/issues/1574 -const NS_ID_BYTE_LEN: usize = 4; - -/// Raw binary data for a namespace table. -/// -/// Any sequence of bytes is a valid [`NsTable`]. -/// -/// # Binary format of a namespace table -/// -/// Byte lengths for the different items that could appear in a namespace table -/// are specified in local private constants [`NUM_NSS_BYTE_LEN`], -/// [`NS_OFFSET_BYTE_LEN`], [`NS_ID_BYTE_LEN`]. -/// -/// ## Number of entries in the namespace table -/// -/// The first [`NUM_NSS_BYTE_LEN`] bytes of the namespace table indicate the -/// number `n` of entries in the table as a little-endian unsigned integer. If -/// the entire table length is smaller than [`NUM_NSS_BYTE_LEN`] then the -/// missing bytes are zero-padded. -/// -/// The bytes in the namespace table beyond the first [`NUM_NSS_BYTE_LEN`] bytes -/// encode table entries. Each entry consumes exactly [`NS_ID_BYTE_LEN`] `+` -/// [`NS_OFFSET_BYTE_LEN`] bytes. -/// -/// The number `n` could be anything, including a number much larger than the -/// number of entries that could fit in the namespace table. As such, the actual -/// number of entries in the table is defined as the minimum of `n` and the -/// maximum number of whole entries that could fit in the table. -/// -/// See [`Self::in_bounds`] for clarification. -/// -/// ## Namespace table entry -/// -/// ### Namespace ID -/// -/// The first [`NS_ID_BYTE_LEN`] bytes of each table entry indicate the -/// [`NamespaceId`] for this namespace. Any table entry whose [`NamespaceId`] is -/// a duplicate of a previous entry is ignored. A correct count of the number of -/// *unique* (non-ignored) entries is given by `NsTable::iter().count()`. -/// -/// ### Namespace offset -/// -/// The next [`NS_OFFSET_BYTE_LEN`] bytes of each table entry indicate the -/// end-index of a namespace in the block payload bytes -/// [`Payload`](super::payload::Payload). This end-index is a little-endian -/// unsigned integer. -/// -/// # How to deduce a namespace's byte range -/// -/// In order to extract the payload bytes of a single namespace `N` from the -/// block payload one needs both the start- and end-indices for `N`. -/// -/// See [`Self::ns_range`] for clarification. What follows is a description of -/// what's implemented in [`Self::ns_range`]. -/// -/// If `N` occupies the `i`th entry in the namespace table for `i>0` then the -/// start-index for `N` is defined as the end-index of the `(i-1)`th entry in -/// the table. -/// -/// Even if the `(i-1)`the entry would otherwise be ignored (due to a duplicate -/// [`NamespaceId`] or any other reason), that entry's end-index still defines -/// the start-index of `N`. This rule guarantees that both start- and -/// end-indices for any namespace `N` can be read from a constant-size byte -/// range in the namespace table, and it eliminates the need to traverse an -/// unbounded number of previous entries of the namespace table looking for a -/// previous non-ignored entry. -/// -/// The start-index of the 0th entry in the table is implicitly defined to be -/// `0`. -/// -/// The start- and end-indices `(declared_start, declared_end)` declared in the -/// namespace table could be anything. As such, the actual start- and -/// end-indices `(start, end)` are defined so as to ensure that the byte range -/// is well-defined and in-bounds for the block payload: -/// ```ignore -/// end = min(declared_end, block_payload_byte_length) -/// start = min(declared_start, end) -/// ``` -/// -/// In a "honestly-prepared" namespace table the end-index of the final -/// namespace equals the byte length of the block payload. (Otherwise the block -/// payload might have bytes that are not included in any namespace.) -/// -/// It is possible that a namespace table could indicate two distinct namespaces -/// whose byte ranges overlap, though no "honestly-prepared" namespace table -/// would do this. -/// -/// TODO prefer [`NsTable`] to be a newtype like this -/// ```ignore -/// #[repr(transparent)] -/// #[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] -/// #[serde(transparent)] -/// pub struct NsTable(#[serde(with = "base64_bytes")] Vec); -/// ``` -/// but we need to maintain serialization compatibility. -/// -#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] -// Boilerplate: `#[serde(remote = "Self")]` needed to check invariants on -// deserialization. See -// https://github.com/serde-rs/serde/issues/1220#issuecomment-382589140 -#[serde(remote = "Self")] -pub struct NsTable { - #[serde(with = "base64_bytes")] - bytes: Vec, -} - -// Boilerplate: `#[serde(remote = "Self")]` allows invariant checking on -// deserialization via re-implementation of `Deserialize` in terms of default -// derivation. See -// https://github.com/serde-rs/serde/issues/1220#issuecomment-382589140 -impl<'de> Deserialize<'de> for NsTable { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let unchecked = NsTable::deserialize(deserializer)?; - unchecked - .validate_deserialization_invariants() - .map_err(de::Error::custom)?; - Ok(unchecked) - } -} - -// Boilerplate: use of `#[serde(remote = "Self")]` must include a trivial -// `Serialize` impl. See -// https://github.com/serde-rs/serde/issues/1220#issuecomment-382589140 -impl Serialize for NsTable { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - NsTable::serialize(self, serializer) - } -} - -impl NsTable { - /// Search the namespace table for the ns_index belonging to `ns_id`. - pub fn find_ns_id(&self, ns_id: &NamespaceId) -> Option { - self.iter() - .find(|index| self.read_ns_id_unchecked(index) == *ns_id) - } - - /// Number of entries in the namespace table. - /// - /// Defined as the maximum number of entries that could fit in the namespace - /// table, ignoring what's declared in the table header. - pub fn len(&self) -> NumNss { - NumNss( - self.bytes.len().saturating_sub(NUM_NSS_BYTE_LEN) - / NS_ID_BYTE_LEN.saturating_add(NS_OFFSET_BYTE_LEN), - ) - } - - /// Iterator over all unique namespaces in the namespace table. - pub fn iter(&self) -> impl Iterator + '_ { - NsIter::new(&self.len()) - } - - /// Read the namespace id from the `index`th entry from the namespace table. - /// Returns `None` if `index` is out of bounds. - /// - /// TODO I want to restrict visibility to `pub(crate)` or lower but this - /// method is currently used in `nasty-client`. - pub fn read_ns_id(&self, index: &NsIndex) -> Option { - if !self.in_bounds(index) { - None - } else { - Some(self.read_ns_id_unchecked(index)) - } - } - - /// Like [`Self::read_ns_id`] except `index` is not checked. Use [`Self::in_bounds`] as needed. - pub fn read_ns_id_unchecked(&self, index: &NsIndex) -> NamespaceId { - let start = index.0 * (NS_ID_BYTE_LEN + NS_OFFSET_BYTE_LEN) + NUM_NSS_BYTE_LEN; - - // TODO hack to deserialize `NamespaceId` from `NS_ID_BYTE_LEN` bytes - // https://github.com/EspressoSystems/espresso-sequencer/issues/1574 - NamespaceId::from(u32_from_bytes::( - &self.bytes[start..start + NS_ID_BYTE_LEN], - )) - } - - /// Does the `index`th entry exist in the namespace table? - pub fn in_bounds(&self, index: &NsIndex) -> bool { - self.len().in_bounds(index) - } - - /// Are the bytes of this [`NsTable`] uncorrupted? - /// - /// # Checks - /// 1. Byte length must hold a whole number of entries. - /// 2. All offsets must increase monotonically. Offsets - /// must be nonzero. Namespace IDs must be unique. - /// 3. Header consistent with byte length. (Obsolete after - /// .) - /// 4. Final offset must equal `payload_byte_len`. (Obsolete after - /// .) - /// If the namespace table is empty then `payload_byte_len` must be 0. - pub fn validate( - &self, - payload_byte_len: &PayloadByteLen, - ) -> Result<(), NsTableValidationError> { - use NsTableValidationError::*; - - // conditions 1-3 - self.validate_deserialization_invariants()?; - - // condition 4 - let len = self.len().0; - if len > 0 { - let final_ns_index = NsIndex(len - 1); - let final_offset = self.read_ns_offset_unchecked(&final_ns_index); - if final_offset != payload_byte_len.as_usize() { - return Err(InvalidFinalOffset); - } - } else if payload_byte_len.as_usize() != 0 { - return Err(ExpectNonemptyNsTable); - } - - Ok(()) - } - - // CRATE-VISIBLE HELPERS START HERE - - /// Read subslice range for the `index`th namespace from the namespace - /// table. - pub(in crate::block) fn ns_range( - &self, - index: &NsIndex, - payload_byte_len: &PayloadByteLen, - ) -> NsPayloadRange { - let end = self - .read_ns_offset_unchecked(index) - .min(payload_byte_len.as_usize()); - let start = if index.0 == 0 { - 0 - } else { - self.read_ns_offset_unchecked(&NsIndex(index.0 - 1)) - } - .min(end); - NsPayloadRange::new(start, end) - } - - // PRIVATE HELPERS START HERE - - /// Read the number of namespaces declared in the namespace table. THIS - /// QUANTITY IS NEVER USED. Instead use [`NsTable::len`]. - /// - /// TODO Delete this method after - /// - fn read_num_nss(&self) -> usize { - let num_nss_byte_len = NUM_NSS_BYTE_LEN.min(self.bytes.len()); - usize_from_bytes::(&self.bytes[..num_nss_byte_len]) - } - - /// Read the namespace offset from the `index`th entry from the namespace table. - fn read_ns_offset_unchecked(&self, index: &NsIndex) -> usize { - let start = - index.0 * (NS_ID_BYTE_LEN + NS_OFFSET_BYTE_LEN) + NUM_NSS_BYTE_LEN + NS_ID_BYTE_LEN; - usize_from_bytes::(&self.bytes[start..start + NS_OFFSET_BYTE_LEN]) - } - - /// Helper for [`NsTable::validate`], used in our custom [`serde`] - /// implementation. - /// - /// Checks conditions 1-3 of [`NsTable::validate`]. Those conditions can be - /// checked by looking only at the contents of the [`NsTable`]. - fn validate_deserialization_invariants(&self) -> Result<(), NsTableValidationError> { - use NsTableValidationError::*; - - // Byte length for a table with `x` entries must be exactly `x * - // NsTableBuilder::entry_byte_len() + - // NsTableBuilder::header_byte_len()`. - // - // Explanation for the following `if` condition: - // - // The above condition is equivalent to `[byte length] - - // header_byte_len` equals 0 modulo `entry_byte_len`. In order to - // compute `[byte length] - header_byte_len` we must first check that - // `[byte length]` is not exceeded by `header_byte_len` - if self.bytes.len() < NsTableBuilder::header_byte_len() - || (self.bytes.len() - NsTableBuilder::header_byte_len()) - % NsTableBuilder::entry_byte_len() - != 0 - { - return Err(InvalidByteLen); - } - - // Header must declare the correct number of namespaces - // - // TODO this check obsolete after - // https://github.com/EspressoSystems/espresso-sequencer/issues/1604 - if self.len().0 != self.read_num_nss() { - return Err(InvalidHeader); - } - - // Offsets must increase monotonically. Offsets must - // be nonzero. Namespace IDs must be unique - { - let mut prev_offset = 0; - let mut repeat_ns_ids = HashSet::::new(); - for (ns_id, offset) in self.iter().map(|i| { - ( - self.read_ns_id_unchecked(&i), - self.read_ns_offset_unchecked(&i), - ) - }) { - if !repeat_ns_ids.insert(ns_id) { - return Err(DuplicateNamespaceId); - } - if offset <= prev_offset { - return Err(NonIncreasingEntries); - } - prev_offset = offset; - } - } - - Ok(()) - } -} - -impl EncodeBytes for NsTable { - fn encode(&self) -> Arc<[u8]> { - Arc::from(self.bytes.as_ref()) - } -} - -impl Committable for NsTable { - fn commit(&self) -> Commitment { - RawCommitmentBuilder::new(&Self::tag()) - .var_size_bytes(&self.bytes) - .finalize() - } - - fn tag() -> String { - "NSTABLE".into() - } -} - -/// Return type for [`NsTable::validate`]. -#[derive(Error, Debug, Display, Eq, PartialEq)] -pub enum NsTableValidationError { - InvalidByteLen, - NonIncreasingEntries, - DuplicateNamespaceId, - InvalidHeader, // TODO this variant obsolete after https://github.com/EspressoSystems/espresso-sequencer/issues/1604 - InvalidFinalOffset, // TODO this variant obsolete after https://github.com/EspressoSystems/espresso-sequencer/issues/1604 - ExpectNonemptyNsTable, -} - -pub struct NsTableBuilder { - bytes: Vec, - num_entries: usize, -} - -impl NsTableBuilder { - pub fn new() -> Self { - // pre-allocate space for the ns table header - Self { - bytes: Vec::from([0; NUM_NSS_BYTE_LEN]), - num_entries: 0, - } - } - - /// Add an entry to the namespace table. - pub fn append_entry(&mut self, ns_id: NamespaceId, offset: usize) { - // hack to serialize `NamespaceId` to `NS_ID_BYTE_LEN` bytes - self.bytes - .extend(u32_to_bytes::(u32::from(ns_id))); - self.bytes - .extend(usize_to_bytes::(offset)); - self.num_entries += 1; - } - - /// Serialize to bytes and consume self. - pub fn into_ns_table(self) -> NsTable { - let mut bytes = self.bytes; - // write the number of entries to the ns table header - bytes[..NUM_NSS_BYTE_LEN] - .copy_from_slice(&usize_to_bytes::(self.num_entries)); - NsTable { bytes } - } - - /// Byte length of a namespace table header. - pub const fn header_byte_len() -> usize { - NUM_NSS_BYTE_LEN - } - - /// Byte length of a single namespace table entry. - pub const fn entry_byte_len() -> usize { - NS_ID_BYTE_LEN + NS_OFFSET_BYTE_LEN - } -} - -/// Index for an entry in a ns table. -#[derive(Clone, Debug, Display, Eq, Hash, PartialEq)] -pub struct NsIndex(usize); -bytes_serde_impl!(NsIndex, to_bytes, [u8; NUM_NSS_BYTE_LEN], from_bytes); - -impl NsIndex { - pub fn to_bytes(&self) -> [u8; NUM_NSS_BYTE_LEN] { - usize_to_bytes::(self.0) - } - fn from_bytes(bytes: &[u8]) -> Self { - Self(usize_from_bytes::(bytes)) - } -} - -/// Number of entries in a namespace table. -pub struct NumNss(usize); - -impl NumNss { - pub fn in_bounds(&self, index: &NsIndex) -> bool { - index.0 < self.0 - } -} - -/// Return type for [`Payload::ns_iter`]. -pub(in crate::block) struct NsIter(Range); - -impl NsIter { - pub fn new(num_nss: &NumNss) -> Self { - Self(0..num_nss.0) - } -} - -// Simple `impl Iterator` delegates to `Range`. -impl Iterator for NsIter { - type Item = NsIndex; - - fn next(&mut self) -> Option { - self.0.next().map(NsIndex) - } -} - -#[cfg(test)] -mod test; diff --git a/sequencer/src/block/full_payload/ns_table/test.rs b/sequencer/src/block/full_payload/ns_table/test.rs deleted file mode 100644 index d0499ce6ef..0000000000 --- a/sequencer/src/block/full_payload/ns_table/test.rs +++ /dev/null @@ -1,251 +0,0 @@ -use super::{ - NsTable, NsTableBuilder, NsTableValidationError, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, - NUM_NSS_BYTE_LEN, -}; -use crate::{ - block::{ - test::ValidTest, - uint_bytes::{u32_max_from_byte_len, usize_max_from_byte_len, usize_to_bytes}, - }, - NamespaceId, Payload, -}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use hotshot::traits::BlockPayload; -use rand::{Rng, RngCore}; -use NsTableValidationError::*; - -#[test] -fn random_valid() { - setup_logging(); - setup_backtrace(); - let mut rng = jf_utils::test_rng(); - - for num_entries in 0..20 { - expect_valid(&random_valid_ns_table(num_entries, &mut rng)); - } -} - -#[test] -fn ns_table_byte_len() { - setup_logging(); - setup_backtrace(); - let mut rng = jf_utils::test_rng(); - - // Extremely small byte lengths should get rejected. - { - let mut ns_table = NsTable { bytes: Vec::new() }; - expect_invalid(&ns_table, InvalidByteLen); - expect_num_bytes_invalid(&mut ns_table, NsTableBuilder::header_byte_len(), &mut rng); - } - - // Add enough bytes for a new entry. - { - let mut ns_table = random_valid_ns_table(20, &mut rng); - expect_num_bytes_invalid(&mut ns_table, NsTableBuilder::entry_byte_len(), &mut rng); - } - - // Helper fn: add 1 byte to the `ns_table` `num_bytes` times. Expect - // invalidity in all but the final time. - fn expect_num_bytes_invalid(ns_table: &mut NsTable, num_bytes: usize, rng: &mut R) - where - R: RngCore, - { - for i in 0..num_bytes { - ns_table.bytes.push(rng.gen()); - if i == num_bytes - 1 { - break; // final iteration: no error expected - } - expect_invalid(ns_table, InvalidByteLen); - } - expect_invalid(ns_table, InvalidHeader); - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn payload_byte_len() { - setup_logging(); - setup_backtrace(); - let test_case = vec![vec![5, 8, 8], vec![7, 9, 11], vec![10, 5, 8]]; - let mut rng = jf_utils::test_rng(); - let test = ValidTest::from_tx_lengths(test_case, &mut rng); - let mut block = - Payload::from_transactions(test.all_txs(), &Default::default(), &Default::default()) - .await - .unwrap() - .0; - let payload_byte_len = block.byte_len(); - let final_offset = block - .ns_table() - .read_ns_offset_unchecked(&block.ns_table().iter().last().unwrap()); - - // final offset matches payload byte len - block.ns_table().validate(&payload_byte_len).unwrap(); - - // Helper closure fn: modify the final offset of `block`'s namespace table - // by adding `diff` to it. Assert failure. - let mut modify_final_offset = |diff: isize| { - let ns_table_byte_len = block.ns_table().bytes.len(); - let old_final_offset: isize = final_offset.try_into().unwrap(); - let new_final_offset: usize = (old_final_offset + diff).try_into().unwrap(); - - block.ns_table_mut().bytes[ns_table_byte_len - NS_OFFSET_BYTE_LEN..] - .copy_from_slice(&usize_to_bytes::(new_final_offset)); - assert_eq!( - block.ns_table().validate(&payload_byte_len).unwrap_err(), - InvalidFinalOffset - ); - }; - - // final offset exceeds payload byte len - modify_final_offset(1); - - // final offset less than payload byte len - modify_final_offset(-1); - - // zero-length payload - let empty_block = Payload::from_transactions([], &Default::default(), &Default::default()) - .await - .unwrap() - .0; - assert_eq!(empty_block.ns_table().len().0, 0); - assert_eq!( - empty_block.ns_table().bytes, - usize_to_bytes::(0) - ); - empty_block - .ns_table() - .validate(&empty_block.byte_len()) - .unwrap(); - - // empty namespace table with nonempty payload - *block.ns_table_mut() = empty_block.ns_table().clone(); - assert_eq!( - block.ns_table().validate(&payload_byte_len).unwrap_err(), - ExpectNonemptyNsTable - ); -} - -#[test] -fn monotonic_increase() { - setup_logging(); - setup_backtrace(); - - // Duplicate namespace ID - two_entries_ns_table((5, 5), (5, 6), Some(DuplicateNamespaceId)); - - // Decreasing namespace ID - two_entries_ns_table((5, 5), (4, 6), None); - - // Duplicate offset - two_entries_ns_table((5, 5), (6, 5), Some(NonIncreasingEntries)); - - // Decreasing offset - two_entries_ns_table((5, 5), (6, 4), Some(NonIncreasingEntries)); - - // Zero namespace ID - two_entries_ns_table((0, 5), (6, 6), None); - - // Zero offset - two_entries_ns_table((5, 0), (6, 6), Some(NonIncreasingEntries)); - - // Helper fn: build a 2-entry NsTable, assert failure - fn two_entries_ns_table( - entry1: (u32, usize), - entry2: (u32, usize), - expect_err: Option, - ) { - let mut ns_table_builder = NsTableBuilder::new(); - ns_table_builder.append_entry(NamespaceId::from(entry1.0), entry1.1); - ns_table_builder.append_entry(NamespaceId::from(entry2.0), entry2.1); - let ns_table = ns_table_builder.into_ns_table(); - if let Some(err) = expect_err { - expect_invalid(&ns_table, err); - } else { - expect_valid(&ns_table); - } - } -} - -// TODO this test obsolete after -// https://github.com/EspressoSystems/espresso-sequencer/issues/1604 -#[test] -fn header() { - setup_logging(); - setup_backtrace(); - let mut rng = jf_utils::test_rng(); - - for num_entries in 0..20 { - let mut ns_table = random_valid_ns_table(num_entries, &mut rng); - if num_entries != 0 { - set_header(&mut ns_table, 0); - set_header(&mut ns_table, num_entries - 1); - } - set_header(&mut ns_table, num_entries + 1); - set_header(&mut ns_table, usize_max_from_byte_len(NUM_NSS_BYTE_LEN)); - } - - // Helper fn: set the header of `ns_table` to declare `num_nss` entries, - // assert failure. - fn set_header(ns_table: &mut NsTable, num_nss: usize) { - ns_table.bytes[..NUM_NSS_BYTE_LEN] - .copy_from_slice(&usize_to_bytes::(num_nss)); - expect_invalid(ns_table, InvalidHeader); - } -} - -fn random_valid_ns_table(num_entries: usize, rng: &mut R) -> NsTable -where - R: RngCore, -{ - let (offset_max_increment, ns_id_max_increment) = if num_entries == 0 { - (0, 0) - } else { - let num_entries_u32: u32 = num_entries.try_into().unwrap(); - ( - usize_max_from_byte_len(NS_OFFSET_BYTE_LEN) / num_entries, - u32_max_from_byte_len(NS_ID_BYTE_LEN) / num_entries_u32, - ) - }; - - let mut ns_id = 0; - let mut offset = 0; - let mut ns_table_builder = NsTableBuilder::new(); - for _ in 0..num_entries { - // ns_id, offset must increase monotonically - ns_id += rng.gen_range(1..=ns_id_max_increment); - offset += rng.gen_range(1..=offset_max_increment); - ns_table_builder.append_entry(NamespaceId::from(ns_id), offset); - } - ns_table_builder.into_ns_table() -} - -fn expect_valid(ns_table: &NsTable) { - // `validate` should succeed - ns_table.validate_deserialization_invariants().unwrap(); - - // serde round-trip should succeed - let serde_bytes = bincode::serialize(ns_table).unwrap(); - let ns_table_serde: NsTable = bincode::deserialize(&serde_bytes).unwrap(); - assert_eq!(&ns_table_serde, ns_table); -} - -fn expect_invalid(ns_table: &NsTable, err: NsTableValidationError) { - use serde::de::Error; - - // `validate` should fail - assert_eq!( - ns_table.validate_deserialization_invariants().unwrap_err(), - err - ); - - // serde round-trip should fail - // - // need to use `to_string` because `bincode::Error`` is not `Eq` - let serde_bytes = bincode::serialize(ns_table).unwrap(); - assert_eq!( - bincode::deserialize::(&serde_bytes) - .unwrap_err() - .to_string(), - bincode::Error::custom(err).to_string(), - ); -} diff --git a/sequencer/src/block/full_payload/payload.rs b/sequencer/src/block/full_payload/payload.rs deleted file mode 100644 index bf2398c656..0000000000 --- a/sequencer/src/block/full_payload/payload.rs +++ /dev/null @@ -1,313 +0,0 @@ -use crate::{ - block::{ - full_payload::ns_table::{NsIndex, NsTable, NsTableBuilder}, - namespace_payload::{Index, Iter, NsPayload, NsPayloadBuilder, NsPayloadRange, TxProof}, - }, - ChainConfig, NamespaceId, NodeState, SeqTypes, Transaction, ValidatedState, -}; - -use async_trait::async_trait; -use committable::Committable; -use derive_more::Display; -use hotshot_query_service::availability::QueryablePayload; -use hotshot_types::{ - traits::{BlockPayload, EncodeBytes}, - utils::BuilderCommitment, - vid::{VidCommon, VidSchemeType}, -}; -use jf_vid::VidScheme; -use serde::{Deserialize, Serialize}; -use sha2::Digest; -use std::{collections::BTreeMap, sync::Arc}; - -/// Raw payload data for an entire block. -/// -/// A block consists of two sequences of arbitrary bytes: -/// - `ns_table`: namespace table -/// - `ns_payloads`: namespace payloads -/// -/// Any sequence of bytes is a valid `ns_table`. Any sequence of bytes is a -/// valid `ns_payloads`. The contents of `ns_table` determine how to interpret -/// `ns_payload`. -/// -/// # Namespace table -/// -/// See [`NsTable`] for the format of a namespace table. -/// -/// # Namespace payloads -/// -/// A concatenation of payload bytes for multiple individual namespaces. -/// Namespace boundaries are dictated by `ns_table`. See [`NsPayload`] for the -/// format of a namespace payload. -#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] -pub struct Payload { - // Concatenated payload bytes for each namespace - // - // TODO want to rename thisfield to `ns_payloads`, but can't due to - // serialization compatibility. - #[serde(with = "base64_bytes")] - raw_payload: Vec, - - ns_table: NsTable, -} - -impl Payload { - pub fn ns_table(&self) -> &NsTable { - &self.ns_table - } - - /// Like [`QueryablePayload::transaction_with_proof`] except without the - /// proof. - pub fn transaction(&self, index: &Index) -> Option { - let ns_id = self.ns_table.read_ns_id(index.ns())?; - let ns_payload = self.ns_payload(index.ns()); - ns_payload.export_tx(&ns_id, index.tx()) - } - - // CRATE-VISIBLE HELPERS START HERE - - pub(in crate::block) fn read_ns_payload(&self, range: &NsPayloadRange) -> &NsPayload { - NsPayload::from_bytes_slice(&self.raw_payload[range.as_block_range()]) - } - - /// Convenience wrapper for [`Self::read_ns_payload`]. - /// - /// `index` is not checked. Use `self.ns_table().in_bounds()` as needed. - pub(in crate::block) fn ns_payload(&self, index: &NsIndex) -> &NsPayload { - let ns_payload_range = self.ns_table().ns_range(index, &self.byte_len()); - self.read_ns_payload(&ns_payload_range) - } - - pub(in crate::block) fn byte_len(&self) -> PayloadByteLen { - PayloadByteLen(self.raw_payload.len()) - } - - // PRIVATE HELPERS START HERE - - /// Need a sync version of [`BlockPayload::from_transactions`] in order to impl [`BlockPayload::empty`]. - fn from_transactions_sync( - transactions: impl IntoIterator>::Transaction> + Send, - chain_config: ChainConfig, - ) -> Result< - (Self, >::Metadata), - >::Error, - > { - // accounting for block byte length limit - let max_block_byte_len: usize = u64::from(chain_config.max_block_size) - .try_into() - .map_err(|_| >::Error::BlockBuilding)?; - let mut block_byte_len = NsTableBuilder::header_byte_len(); - - // add each tx to its namespace - let mut ns_builders = BTreeMap::::new(); - for tx in transactions.into_iter() { - // accounting for block byte length limit - block_byte_len += tx.size_in_block(!ns_builders.contains_key(&tx.namespace())); - if block_byte_len > max_block_byte_len { - tracing::warn!("transactions truncated to fit in maximum block byte length {max_block_byte_len}"); - break; - } - - let ns_builder = ns_builders.entry(tx.namespace()).or_default(); - ns_builder.append_tx(tx); - } - - // build block payload and namespace table - let mut payload = Vec::new(); - let mut ns_table_builder = NsTableBuilder::new(); - for (ns_id, ns_builder) in ns_builders { - payload.extend(ns_builder.into_bytes()); - ns_table_builder.append_entry(ns_id, payload.len()); - } - let ns_table = ns_table_builder.into_ns_table(); - let metadata = ns_table.clone(); - Ok(( - Self { - raw_payload: payload, - ns_table, - }, - metadata, - )) - } -} - -#[async_trait] -impl BlockPayload for Payload { - // TODO BlockPayload trait eliminate unneeded args, return vals of type - // `Self::Metadata` https://github.com/EspressoSystems/HotShot/issues/3300 - type Error = crate::Error; - type Transaction = Transaction; - type Instance = NodeState; - type Metadata = NsTable; - type ValidatedState = ValidatedState; - - async fn from_transactions( - transactions: impl IntoIterator + Send, - validated_state: &Self::ValidatedState, - instance_state: &Self::Instance, - ) -> Result<(Self, Self::Metadata), Self::Error> { - let validated_state_cf = validated_state.chain_config; - let instance_state_cf = instance_state.chain_config; - - let chain_config = if validated_state_cf.commit() == instance_state_cf.commit() { - instance_state_cf - } else { - match validated_state_cf.resolve() { - Some(cf) => cf, - None => { - instance_state - .peers - .as_ref() - .fetch_chain_config(validated_state_cf.commit()) - .await? - } - } - }; - - Self::from_transactions_sync(transactions, chain_config, instance_state) - } - - // TODO avoid cloning the entire payload here? - fn from_bytes(block_payload_bytes: &[u8], ns_table: &Self::Metadata) -> Self { - Self { - raw_payload: block_payload_bytes.to_vec(), - ns_table: ns_table.clone(), - } - } - - fn empty() -> (Self, Self::Metadata) { - let payload = Self::from_transactions_sync(vec![], Default::default(), &Default::default()) - .unwrap() - .0; - let ns_table = payload.ns_table().clone(); - (payload, ns_table) - } - - fn builder_commitment(&self, metadata: &Self::Metadata) -> BuilderCommitment { - let ns_table_bytes = self.ns_table.encode(); - - // TODO `metadata_bytes` equals `ns_table_bytes`, so we are - // double-hashing the ns_table. Why? To maintain serialization - // compatibility. - // https://github.com/EspressoSystems/espresso-sequencer/issues/1576 - let metadata_bytes = metadata.encode(); - - let mut digest = sha2::Sha256::new(); - digest.update((self.raw_payload.len() as u64).to_le_bytes()); - digest.update((ns_table_bytes.len() as u64).to_le_bytes()); - digest.update((metadata_bytes.len() as u64).to_le_bytes()); // https://github.com/EspressoSystems/espresso-sequencer/issues/1576 - digest.update(&self.raw_payload); - digest.update(ns_table_bytes); - digest.update(metadata_bytes); // https://github.com/EspressoSystems/espresso-sequencer/issues/1576 - BuilderCommitment::from_raw_digest(digest.finalize()) - } - - fn transactions<'a>( - &'a self, - metadata: &'a Self::Metadata, - ) -> impl 'a + Iterator { - self.enumerate(metadata).map(|(_, t)| t) - } -} - -impl QueryablePayload for Payload { - // TODO changes to QueryablePayload trait: - // https://github.com/EspressoSystems/hotshot-query-service/issues/639 - type TransactionIndex = Index; - type Iter<'a> = Iter<'a>; - type InclusionProof = TxProof; - - fn len(&self, _meta: &Self::Metadata) -> usize { - // Counting txs is nontrivial. The easiest solution is to consume an - // iterator. If performance is a concern then we could cache this count - // on construction of `Payload`. - self.iter(_meta).count() - } - - fn iter<'a>(&'a self, _meta: &'a Self::Metadata) -> Self::Iter<'a> { - Iter::new(self) - } - - fn transaction_with_proof( - &self, - _meta: &Self::Metadata, - index: &Self::TransactionIndex, - ) -> Option<(Self::Transaction, Self::InclusionProof)> { - // TODO HACK! THE RETURNED PROOF MIGHT FAIL VERIFICATION. - // https://github.com/EspressoSystems/hotshot-query-service/issues/639 - // - // Need a `VidCommon` to proceed. Need to modify `QueryablePayload` - // trait to add a `VidCommon` arg. In the meantime tests fail if I leave - // it `todo!()`, so this hack allows tests to pass. - let common = hotshot_types::vid::vid_scheme(10) - .disperse(&self.raw_payload) - .unwrap() - .common; - - TxProof::new(index, self, &common) - } -} - -impl std::fmt::Display for Payload { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self:#?}") - } -} - -impl EncodeBytes for Payload { - fn encode(&self) -> Arc<[u8]> { - Arc::from(self.raw_payload.as_ref()) - } -} - -/// Byte length of a block payload, which includes all namespaces but *not* the -/// namespace table. -#[derive(Clone, Debug, Display, Eq, Hash, PartialEq)] -pub struct PayloadByteLen(usize); - -impl PayloadByteLen { - /// Extract payload byte length from a [`VidCommon`] and construct a new [`Self`] from it. - pub fn from_vid_common(common: &VidCommon) -> Self { - Self(usize::try_from(VidSchemeType::get_payload_byte_len(common)).unwrap()) - } - - /// Is the payload byte length declared in a [`VidCommon`] equal [`Self`]? - pub fn is_consistent(&self, common: &VidCommon) -> bool { - // failure to convert to usize implies that `common` cannot be - // consistent with `self`. - let expected = match usize::try_from(VidSchemeType::get_payload_byte_len(common)) { - Ok(n) => n, - Err(_) => { - tracing::warn!( - "VidCommon byte len u32 {} should convert to usize", - VidSchemeType::get_payload_byte_len(common) - ); - return false; - } - }; - - self.0 == expected - } - - pub(in crate::block::full_payload) fn as_usize(&self) -> usize { - self.0 - } -} - -#[cfg(any(test, feature = "testing"))] -impl hotshot_types::traits::block_contents::TestableBlock for Payload { - fn genesis() -> Self { - BlockPayload::empty().0 - } - - fn txn_count(&self) -> u64 { - self.len(&self.ns_table) as u64 - } -} - -#[cfg(any(test, feature = "testing"))] -impl Payload { - pub fn ns_table_mut(&mut self) -> &mut NsTable { - &mut self.ns_table - } -} diff --git a/sequencer/src/block/namespace_payload.rs b/sequencer/src/block/namespace_payload.rs deleted file mode 100644 index ecd894f86e..0000000000 --- a/sequencer/src/block/namespace_payload.rs +++ /dev/null @@ -1,12 +0,0 @@ -mod iter; -mod ns_payload; -mod ns_payload_range; -mod tx_proof; -mod types; - -pub use iter::{Index, Iter}; -pub use tx_proof::TxProof; - -pub(in crate::block) use ns_payload::{NsPayload, NsPayloadOwned}; -pub(in crate::block) use ns_payload_range::NsPayloadRange; -pub(in crate::block) use types::NsPayloadBuilder; diff --git a/sequencer/src/block/namespace_payload/iter.rs b/sequencer/src/block/namespace_payload/iter.rs deleted file mode 100644 index cf136f76eb..0000000000 --- a/sequencer/src/block/namespace_payload/iter.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::block::{ - full_payload::{NsIndex, NsIter, Payload}, - namespace_payload::types::{TxIndex, TxIter}, -}; -use serde::{Deserialize, Serialize}; -use std::iter::Peekable; - -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct Index { - ns_index: NsIndex, - tx_index: TxIndex, -} - -impl Index { - pub fn ns(&self) -> &NsIndex { - &self.ns_index - } - pub(in crate::block) fn tx(&self) -> &TxIndex { - &self.tx_index - } -} - -// TODO don't impl `PartialOrd` -// It's needed only for `QueryablePayload` trait: -// https://github.com/EspressoSystems/hotshot-query-service/issues/639 -impl PartialOrd for Index { - fn partial_cmp(&self, _other: &Self) -> Option { - Some(self.cmp(_other)) - } -} -// TODO don't impl `Ord` -// It's needed only for `QueryablePayload` trait: -// https://github.com/EspressoSystems/hotshot-query-service/issues/639 -impl Ord for Index { - fn cmp(&self, _other: &Self) -> std::cmp::Ordering { - unimplemented!() - } -} - -/// Cartesian product of [`NsIter`], [`TxIter`]. -pub struct Iter<'a> { - ns_iter: Peekable, - tx_iter: Option, - block: &'a Payload, -} - -impl<'a> Iter<'a> { - pub fn new(block: &'a Payload) -> Self { - Self { - ns_iter: NsIter::new(&block.ns_table().len()).peekable(), - tx_iter: None, - block, - } - } -} - -impl Iterator for Iter<'_> { - type Item = Index; - - fn next(&mut self) -> Option { - loop { - let Some(ns_index) = self.ns_iter.peek() else { - break None; // ns_iter consumed - }; - - if let Some(tx_index) = self - .tx_iter - .get_or_insert_with(|| self.block.ns_payload(ns_index).iter()) - .next() - { - break Some(Index { - ns_index: ns_index.clone(), - tx_index, - }); - } - - self.tx_iter = None; // unset `tx_iter`; it's consumed for this namespace - self.ns_iter.next(); - } - } -} diff --git a/sequencer/src/block/namespace_payload/ns_payload.rs b/sequencer/src/block/namespace_payload/ns_payload.rs deleted file mode 100644 index f2997839df..0000000000 --- a/sequencer/src/block/namespace_payload/ns_payload.rs +++ /dev/null @@ -1,137 +0,0 @@ -use crate::{ - block::namespace_payload::types::{ - FromNsPayloadBytes, NsPayloadByteLen, NsPayloadBytesRange, NumTxs, NumTxsRange, - NumTxsUnchecked, TxIndex, TxIter, TxPayloadRange, TxTableEntriesRange, - }, - NamespaceId, Transaction, -}; -use serde::{Deserialize, Serialize}; - -/// Raw binary data for a single namespace's payload. -/// -/// Any sequence of bytes is a valid [`NsPayload`]. -/// -/// See module-level documentation [`types`](super::types) for a full -/// specification of the binary format of a namespace. -pub(in crate::block) struct NsPayload([u8]); - -impl NsPayload { - pub fn from_bytes_slice(bytes: &[u8]) -> &NsPayload { - NsPayload::new_private(bytes) - } - pub fn as_bytes_slice(&self) -> &[u8] { - &self.0 - } - pub fn byte_len(&self) -> NsPayloadByteLen { - NsPayloadByteLen::from_usize(self.0.len()) - } - - /// Read and parse bytes from the ns payload. - /// - /// Arg `range: &R` is convertible into a `Range` via - /// [`NsPayloadBytesRange`]. The payload bytes are parsed into a `R::Output` - /// via [`FromNsPayloadBytes`]. - pub fn read<'a, R>(&'a self, range: &R) -> R::Output - where - R: NsPayloadBytesRange<'a>, - { - >::from_payload_bytes(&self.0[range.ns_payload_range()]) - } - - /// Iterator over all transactions in this namespace. - pub fn iter(&self) -> TxIter { - self.iter_from_num_txs(&self.read_num_txs()) - } - - /// Return all transactions in this namespace. The namespace ID for each - /// returned [`Transaction`] is set to `ns_id`. - pub fn export_all_txs(&self, ns_id: &NamespaceId) -> Vec { - let num_txs = self.read_num_txs(); - self.iter_from_num_txs(&num_txs) - .map(|i| self.tx_from_num_txs(ns_id, &i, &num_txs)) - .collect() - } - - /// Return a transaction from this namespace. Set its namespace ID to - /// `ns_id`. - /// - /// Return `None` if `index` is out of bounds. - pub fn export_tx(&self, ns_id: &NamespaceId, index: &TxIndex) -> Option { - let num_txs_unchecked = self.read_num_txs(); - let num_txs = NumTxs::new(&num_txs_unchecked, &self.byte_len()); - if !num_txs.in_bounds(index) { - return None; // error: tx index out of bounds - } - Some(self.tx_from_num_txs(ns_id, index, &num_txs_unchecked)) - } - - /// Private helper. (Could be pub if desired.) - fn read_num_txs(&self) -> NumTxsUnchecked { - self.read(&NumTxsRange::new(&self.byte_len())) - } - - /// Private helper - fn iter_from_num_txs(&self, num_txs: &NumTxsUnchecked) -> TxIter { - let num_txs = NumTxs::new(num_txs, &self.byte_len()); - TxIter::new(&num_txs) - } - - /// Private helper - fn tx_from_num_txs( - &self, - ns_id: &NamespaceId, - index: &TxIndex, - num_txs_unchecked: &NumTxsUnchecked, - ) -> Transaction { - let tx_table_entries = self.read(&TxTableEntriesRange::new(index)); - let tx_range = TxPayloadRange::new(num_txs_unchecked, &tx_table_entries, &self.byte_len()); - - // TODO don't copy the tx bytes into the return value - // https://github.com/EspressoSystems/hotshot-query-service/issues/267 - let tx_payload = self.read(&tx_range).to_payload_bytes().to_vec(); - Transaction::new(*ns_id, tx_payload) - } -} - -#[repr(transparent)] -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -#[serde(transparent)] -pub(in crate::block) struct NsPayloadOwned(#[serde(with = "base64_bytes")] Vec); - -/// Crazy boilerplate code to make it so that [`NsPayloadOwned`] is to -/// [`NsPayload`] as [`Vec`] is to `[T]`. See [How can I create newtypes for -/// an unsized type and its owned counterpart (like `str` and `String`) in safe -/// Rust? - Stack Overflow](https://stackoverflow.com/q/64977525) -mod ns_payload_owned { - use super::{NsPayload, NsPayloadOwned}; - use std::borrow::Borrow; - use std::ops::Deref; - - impl NsPayload { - // pub(super) because I want it visible everywhere in this file but I - // also want this boilerplate code quarrantined in `ns_payload_owned`. - pub(super) fn new_private(p: &[u8]) -> &NsPayload { - unsafe { &*(p as *const [u8] as *const NsPayload) } - } - } - - impl Deref for NsPayloadOwned { - type Target = NsPayload; - fn deref(&self) -> &NsPayload { - NsPayload::new_private(&self.0) - } - } - - impl Borrow for NsPayloadOwned { - fn borrow(&self) -> &NsPayload { - self.deref() - } - } - - impl ToOwned for NsPayload { - type Owned = NsPayloadOwned; - fn to_owned(&self) -> NsPayloadOwned { - NsPayloadOwned(self.0.to_owned()) - } - } -} diff --git a/sequencer/src/block/namespace_payload/ns_payload_range.rs b/sequencer/src/block/namespace_payload/ns_payload_range.rs deleted file mode 100644 index f2812f6fd9..0000000000 --- a/sequencer/src/block/namespace_payload/ns_payload_range.rs +++ /dev/null @@ -1,34 +0,0 @@ -use super::types::{NsPayloadByteLen, NsPayloadBytesRange}; -use std::ops::Range; - -/// Index range for a namespace payload inside a block payload. -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub(in crate::block) struct NsPayloadRange(Range); - -impl NsPayloadRange { - /// TODO restrict visibility? - pub fn new(start: usize, end: usize) -> Self { - Self(start..end) - } - - /// Access the underlying index range for this namespace inside a block - /// payload. - pub fn as_block_range(&self) -> Range { - self.0.clone() - } - - /// Return the byte length of this namespace. - pub fn byte_len(&self) -> NsPayloadByteLen { - NsPayloadByteLen::from_usize(self.0.len()) - } - - /// Convert a [`NsPayloadBytesRange`] into a range that's relative to the - /// entire block payload. - pub fn block_range<'a, R>(&self, range: &R) -> Range - where - R: NsPayloadBytesRange<'a>, - { - let range = range.ns_payload_range(); - range.start + self.0.start..range.end + self.0.start - } -} diff --git a/sequencer/src/block/namespace_payload/tx_proof.rs b/sequencer/src/block/namespace_payload/tx_proof.rs deleted file mode 100644 index ee025c0f4b..0000000000 --- a/sequencer/src/block/namespace_payload/tx_proof.rs +++ /dev/null @@ -1,253 +0,0 @@ -use crate::{ - block::{ - full_payload::{ - NsTable, {Payload, PayloadByteLen}, - }, - namespace_payload::{ - iter::Index, - types::{ - NumTxs, NumTxsRange, NumTxsUnchecked, TxIndex, TxPayloadRange, TxTableEntries, - TxTableEntriesRange, - }, - }, - }, - Transaction, -}; -use hotshot_query_service::{VidCommitment, VidCommon}; -use hotshot_types::{ - traits::EncodeBytes, - vid::{vid_scheme, SmallRangeProofType, VidSchemeType}, -}; -use jf_vid::{ - payload_prover::{PayloadProver, Statement}, - VidScheme, -}; -use serde::{Deserialize, Serialize}; - -/// Proof of correctness for transaction bytes in a block. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct TxProof { - // Naming conventions for this struct's fields: - // - `payload_x`: bytes from the payload - // - `payload_proof_x`: a proof of those bytes from the payload - tx_index: TxIndex, - - // Number of txs declared in the tx table - payload_num_txs: NumTxsUnchecked, - payload_proof_num_txs: SmallRangeProofType, - - // Tx table entries for this tx - payload_tx_table_entries: TxTableEntries, - payload_proof_tx_table_entries: SmallRangeProofType, - - // This tx's payload bytes. - // `None` if this tx has zero length. - payload_proof_tx: Option, -} - -impl TxProof { - /// Returns the [`Transaction`] indicated by `index`, along with a proof of - /// correctness for that transaction. Returns `None` on error. - pub fn new( - index: &Index, - payload: &Payload, - common: &VidCommon, - ) -> Option<(Transaction, Self)> { - let payload_byte_len = payload.byte_len(); - if !payload_byte_len.is_consistent(common) { - tracing::warn!( - "payload byte len {} inconsistent with common {}", - payload_byte_len, - VidSchemeType::get_payload_byte_len(common) - ); - return None; // error: payload byte len inconsistent with common - } - if !payload.ns_table().in_bounds(index.ns()) { - tracing::warn!("ns_index {:?} out of bounds", index.ns()); - return None; // error: ns index out of bounds - } - // check tx index below - - let payload_bytes_arc = payload.encode(); // pacify borrow checker - let payload_bytes = payload_bytes_arc.as_ref(); - let ns_range = payload.ns_table().ns_range(index.ns(), &payload_byte_len); - let ns_byte_len = ns_range.byte_len(); - let ns_payload = payload.read_ns_payload(&ns_range); - let vid = vid_scheme( - VidSchemeType::get_num_storage_nodes(common) - .try_into() - .unwrap(), - ); - - // Read the tx table len from this namespace's tx table and compute a - // proof of correctness. - let num_txs_range = NumTxsRange::new(&ns_byte_len); - let payload_num_txs = ns_payload.read(&num_txs_range); - - // Check tx index. - // - // TODO the next line of code (and other code) could be easier to read - // if we make a helpers that repeat computation we've already done. - if !NumTxs::new(&payload_num_txs, &ns_byte_len).in_bounds(index.tx()) { - return None; // error: tx index out of bounds - } - - let payload_proof_num_txs = vid - .payload_proof(payload_bytes, ns_range.block_range(&num_txs_range)) - .ok()?; - - // Read the tx table entries for this tx and compute a proof of - // correctness. - let tx_table_entries_range = TxTableEntriesRange::new(index.tx()); - let payload_tx_table_entries = ns_payload.read(&tx_table_entries_range); - let payload_proof_tx_table_entries = { - vid.payload_proof(payload_bytes, ns_range.block_range(&tx_table_entries_range)) - .ok()? - }; - - // Read the tx payload and compute a proof of correctness. - let tx_payload_range = - TxPayloadRange::new(&payload_num_txs, &payload_tx_table_entries, &ns_byte_len); - let payload_proof_tx = { - let range = ns_range.block_range(&tx_payload_range); - if range.is_empty() { - None - } else { - Some(vid.payload_proof(payload_bytes, range).ok()?) - } - }; - - let tx = { - let ns_id = payload.ns_table().read_ns_id_unchecked(index.ns()); - let tx_payload = ns_payload - .read(&tx_payload_range) - .to_payload_bytes() - .to_vec(); - Transaction::new(ns_id, tx_payload) - }; - - Some(( - tx, - TxProof { - tx_index: index.tx().clone(), - payload_num_txs, - payload_proof_num_txs, - payload_tx_table_entries, - payload_proof_tx_table_entries, - payload_proof_tx, - }, - )) - } - - /// Verify a [`TxProof`] for `tx` against a payload commitment. Returns - /// `None` on error. - pub fn verify( - &self, - ns_table: &NsTable, - tx: &Transaction, - commit: &VidCommitment, - common: &VidCommon, - ) -> Option { - VidSchemeType::is_consistent(commit, common).ok()?; - let Some(ns_index) = ns_table.find_ns_id(&tx.namespace()) else { - tracing::info!("ns id {} does not exist", tx.namespace()); - return None; // error: ns id does not exist - }; - let ns_range = ns_table.ns_range(&ns_index, &PayloadByteLen::from_vid_common(common)); - let ns_byte_len = ns_range.byte_len(); - - if !NumTxs::new(&self.payload_num_txs, &ns_byte_len).in_bounds(&self.tx_index) { - tracing::info!("tx index {:?} out of bounds", self.tx_index); - return None; // error: tx index out of bounds - } - - let vid = vid_scheme( - VidSchemeType::get_num_storage_nodes(common) - .try_into() - .unwrap(), - ); - - // Verify proof for tx table len - { - let range = ns_range.block_range(&NumTxsRange::new(&ns_byte_len)); - if vid - .payload_verify( - Statement { - payload_subslice: &self.payload_num_txs.to_payload_bytes(), - range, - commit, - common, - }, - &self.payload_proof_num_txs, - ) - .ok()? - .is_err() - { - return Some(false); - } - } - - // Verify proof for tx table entries - { - let range = ns_range.block_range(&TxTableEntriesRange::new(&self.tx_index)); - if vid - .payload_verify( - Statement { - payload_subslice: &self.payload_tx_table_entries.to_payload_bytes(), - range, - commit, - common, - }, - &self.payload_proof_tx_table_entries, - ) - .ok()? - .is_err() - { - return Some(false); - } - } - - // Verify proof for tx payload - { - let range = ns_range.block_range(&TxPayloadRange::new( - &self.payload_num_txs, - &self.payload_tx_table_entries, - &ns_byte_len, - )); - - match (&self.payload_proof_tx, range.is_empty()) { - (Some(proof), false) => { - if vid - .payload_verify( - Statement { - payload_subslice: tx.payload(), - range, - commit, - common, - }, - proof, - ) - .ok()? - .is_err() - { - return Some(false); - } - } - (None, true) => {} // 0-length tx, nothing to verify - (None, false) => { - tracing::error!( - "tx verify: missing proof for nonempty tx payload range {:?}", - range - ); - return None; - } - (Some(_), true) => { - tracing::error!("tx verify: unexpected proof for empty tx payload range"); - return None; - } - } - } - - Some(true) - } -} diff --git a/sequencer/src/block/namespace_payload/types.rs b/sequencer/src/block/namespace_payload/types.rs deleted file mode 100644 index 09860f80bd..0000000000 --- a/sequencer/src/block/namespace_payload/types.rs +++ /dev/null @@ -1,429 +0,0 @@ -//! Types related to a namespace payload and its transaction table. -//! -//! All code that needs to know the binary format of a namespace payload and its -//! transaction table is restricted to this file. -//! -//! There are many newtypes in this file to facilitate transaction proofs. -//! -//! # Binary format of a namespace payload -//! -//! Any sequence of bytes is a valid [`NsPayload`]. -//! -//! A namespace payload consists of two concatenated byte sequences: -//! - `tx_table`: transaction table -//! - `tx_payloads`: transaction payloads -//! -//! # Transaction table -//! -//! Byte lengths for the different items that could appear in a `tx_table` are -//! specified in local private constants [`NUM_TXS_BYTE_LEN`], -//! [`TX_OFFSET_BYTE_LEN`]. -//! -//! ## Number of entries in the transaction table -//! -//! The first [`NUM_TXS_BYTE_LEN`] bytes of the `tx_table` indicate the number -//! `n` of entries in the table as a little-endian unsigned integer. If the -//! entire namespace payload byte length is smaller than [`NUM_TXS_BYTE_LEN`] -//! then the missing bytes are zero-padded. -//! -//! The bytes in the namespace payload beyond the first [`NUM_TXS_BYTE_LEN`] -//! bytes encode entries in the `tx_table`. Each entry consumes exactly -//! [`TX_OFFSET_BYTE_LEN`] bytes. -//! -//! The number `n` could be anything, including a number much larger than the -//! number of entries that could fit in the namespace payload. As such, the -//! actual number of entries in the `tx_table` is defined as the minimum of `n` -//! and the maximum number of whole `tx_table` entries that could fit in the -//! namespace payload. -//! -//! The `tx_payloads` consist of any bytes in the namespace payload beyond the -//! `tx_table`. -//! -//! ## Transaction table entry -//! -//! Each entry in the `tx_table` is exactly [`TX_OFFSET_BYTE_LEN`] bytes. These -//! bytes indicate the end-index of a transaction in the namespace payload -//! bytes. This end-index is a little-endian unsigned integer. -//! -//! This offset is relative to the end of the `tx_table` within the current -//! namespace. -//! -//! ### Example -//! -//! Suppose a block payload has 3000 bytes and 3 namespaces of 1000 bytes each. -//! Suppose the `tx_table` for final namespace in the block has byte length 100, -//! and suppose an entry in that `tx_table` indicates an end-index of `10`. The -//! actual end-index of that transaction relative to the current namespace is -//! `110`: `10` bytes for the offset plus `100` bytes for the `tx_table`. -//! Relative to the entire block payload, the end-index of that transaction is -//! `2110`: `10` bytes for the offset plus `100` bytes for the `tx_table` plus -//! `2000` bytes for this namespace. -//! -//! # How to deduce a transaction's byte range -//! -//! In order to extract the payload bytes of a single transaction `T` from the -//! namespace payload one needs both the start- and end-indices for `T`. -//! -//! See [`TxPayloadRange::new`] for clarification. What follows is a description -//! of what's implemented in [`TxPayloadRange::new`]. -//! -//! If `T` occupies the `i`th entry in the `tx_table` for `i>0` then the -//! start-index for `T` is defined as the end-index of the `(i-1)`th entry in -//! the table. -//! -//! Thus, both start- and end-indices for any transaction `T` can be read from a -//! contiguous, constant-size byte range in the `tx_table`. This property -//! facilitates transaction proofs. -//! -//! The start-index of the 0th entry in the table is implicitly defined to be -//! `0`. -//! -//! The start- and end-indices `(declared_start, declared_end)` declared in the -//! `tx_table` could be anything. As such, the actual start- and end-indices -//! `(start, end)` are defined so as to ensure that the byte range is -//! well-defined and in-bounds for the namespace payload: -//! ```ignore -//! end = min(declared_end, namespace_payload_byte_length) -//! start = min(declared_start, end) -//! ``` -//! -//! To get the byte range for `T` relative to the current namespace, the above -//! range is translated by the byte length of the `tx_table` *as declared in the -//! `tx_table` itself*, suitably truncated to fit within the current namespace. -//! -//! In particular, if the `tx_table` declares a huge number `n` of entries that -//! cannot fit into the namespace payload then all transactions in this -//! namespace have a zero-length byte range whose start- and end-indices are -//! both `namespace_payload_byte_length`. -//! -//! In a "honestly-prepared" `tx_table` the end-index of the final transaction -//! equals the byte length of the namespace payload minus the byte length of the -//! `tx_table`. (Otherwise the namespace payload might have bytes that are not -//! included in any transaction.) -//! -//! It is possible that a `tx_table` table could indicate two distinct -//! transactions whose byte ranges overlap, though no "honestly-prepared" -//! `tx_table` would do this. -use crate::block::uint_bytes::{bytes_serde_impl, usize_from_bytes, usize_to_bytes}; -use crate::Transaction; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::ops::Range; - -/// Byte lengths for the different items that could appear in a tx table. -const NUM_TXS_BYTE_LEN: usize = 4; -const TX_OFFSET_BYTE_LEN: usize = 4; - -/// Data that can be deserialized from a subslice of namespace payload bytes. -/// -/// Companion trait for [`NsPayloadBytesRange`], which specifies the subslice of -/// namespace payload bytes to read. -pub trait FromNsPayloadBytes<'a> { - /// Deserialize `Self` from namespace payload bytes. - fn from_payload_bytes(bytes: &'a [u8]) -> Self; -} - -/// Specifies a subslice of namespace payload bytes to read. -/// -/// Companion trait for [`FromNsPayloadBytes`], which holds data that can be -/// deserialized from that subslice of bytes. -pub trait NsPayloadBytesRange<'a> { - type Output: FromNsPayloadBytes<'a>; - - /// Range relative to this ns payload - fn ns_payload_range(&self) -> Range; -} - -/// Number of txs in a namespace. -/// -/// Like [`NumTxsUnchecked`] but checked against a [`NsPayloadByteLen`]. -pub struct NumTxs(usize); - -impl NumTxs { - /// Returns the minimum of: - /// - `num_txs` - /// - The maximum number of tx table entries that could fit in a namespace - /// whose byte length is `byte_len`. - pub fn new(num_txs: &NumTxsUnchecked, byte_len: &NsPayloadByteLen) -> Self { - Self(std::cmp::min( - // Number of txs declared in the tx table - num_txs.0, - // Max number of tx table entries that could fit in the namespace payload - byte_len.0.saturating_sub(NUM_TXS_BYTE_LEN) / TX_OFFSET_BYTE_LEN, - )) - } - - pub fn in_bounds(&self, index: &TxIndex) -> bool { - index.0 < self.0 - } -} - -/// Byte length of a namespace payload. -pub struct NsPayloadByteLen(usize); - -impl NsPayloadByteLen { - // TODO restrict visibility? - pub fn from_usize(n: usize) -> Self { - Self(n) - } -} - -/// The part of a tx table that declares the number of txs in the payload. -/// -/// "Unchecked" because this quantity might exceed the number of tx table -/// entries that could fit into the namespace that contains it. -/// -/// Use [`NumTxs`] for the actual number of txs in this namespace. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct NumTxsUnchecked(usize); -bytes_serde_impl!( - NumTxsUnchecked, - to_payload_bytes, - [u8; NUM_TXS_BYTE_LEN], - from_payload_bytes -); - -impl NumTxsUnchecked { - pub fn to_payload_bytes(&self) -> [u8; NUM_TXS_BYTE_LEN] { - usize_to_bytes::(self.0) - } -} - -impl FromNsPayloadBytes<'_> for NumTxsUnchecked { - fn from_payload_bytes(bytes: &[u8]) -> Self { - Self(usize_from_bytes::(bytes)) - } -} - -/// Byte range for the part of a tx table that declares the number of txs in the -/// payload. -pub struct NumTxsRange(Range); - -impl NumTxsRange { - pub fn new(byte_len: &NsPayloadByteLen) -> Self { - Self(0..NUM_TXS_BYTE_LEN.min(byte_len.0)) - } -} - -impl NsPayloadBytesRange<'_> for NumTxsRange { - type Output = NumTxsUnchecked; - - fn ns_payload_range(&self) -> Range { - self.0.clone() - } -} - -/// Entries from a tx table in a namespace for use in a transaction proof. -/// -/// Contains either one or two entries according to whether it was derived from -/// the first transaction in the namespace. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct TxTableEntries { - cur: usize, - prev: Option, // `None` if derived from the first transaction -} - -// This serde impl uses Vec. We could save space by using an array of -// length `TWO_ENTRIES_BYTE_LEN`, but then we need a way to distinguish -// `prev=Some(0)` from `prev=None`. -bytes_serde_impl!( - TxTableEntries, - to_payload_bytes, - Vec, - from_payload_bytes -); - -impl TxTableEntries { - const TWO_ENTRIES_BYTE_LEN: usize = 2 * TX_OFFSET_BYTE_LEN; - - pub fn to_payload_bytes(&self) -> Vec { - let mut bytes = Vec::with_capacity(Self::TWO_ENTRIES_BYTE_LEN); - if let Some(prev) = self.prev { - bytes.extend(usize_to_bytes::(prev)); - } - bytes.extend(usize_to_bytes::(self.cur)); - bytes - } -} - -impl FromNsPayloadBytes<'_> for TxTableEntries { - fn from_payload_bytes(bytes: &[u8]) -> Self { - match bytes.len() { - TX_OFFSET_BYTE_LEN => Self { - cur: usize_from_bytes::(bytes), - prev: None, - }, - Self::TWO_ENTRIES_BYTE_LEN => Self { - cur: usize_from_bytes::(&bytes[TX_OFFSET_BYTE_LEN..]), - prev: Some(usize_from_bytes::( - &bytes[..TX_OFFSET_BYTE_LEN], - )), - }, - len => panic!( - "unexpected bytes len {} should be either {} or {}", - len, - TX_OFFSET_BYTE_LEN, - Self::TWO_ENTRIES_BYTE_LEN - ), - } - } -} - -/// Byte range for entries from a tx table for use in a transaction proof. -/// -/// This range covers either one or two entries from a tx table according to -/// whether it was derived from the first transaction in the namespace. -pub struct TxTableEntriesRange(Range); - -impl TxTableEntriesRange { - pub fn new(index: &TxIndex) -> Self { - let start = if index.0 == 0 { - // Special case: the desired range includes only one entry from - // the tx table: the first entry. This entry starts immediately - // following the bytes that encode the tx table length. - NUM_TXS_BYTE_LEN - } else { - // The desired range starts at the beginning of the previous tx - // table entry. - (index.0 - 1) - .saturating_mul(TX_OFFSET_BYTE_LEN) - .saturating_add(NUM_TXS_BYTE_LEN) - }; - // The desired range ends at the end of this transaction's tx table entry - let end = index - .0 - .saturating_add(1) - .saturating_mul(TX_OFFSET_BYTE_LEN) - .saturating_add(NUM_TXS_BYTE_LEN); - Self(start..end) - } -} - -impl NsPayloadBytesRange<'_> for TxTableEntriesRange { - type Output = TxTableEntries; - - fn ns_payload_range(&self) -> Range { - self.0.clone() - } -} - -/// A transaction's payload data. -pub struct TxPayload<'a>(&'a [u8]); - -impl<'a> TxPayload<'a> { - pub fn to_payload_bytes(&self) -> &'a [u8] { - self.0 - } -} - -impl<'a> FromNsPayloadBytes<'a> for TxPayload<'a> { - fn from_payload_bytes(bytes: &'a [u8]) -> Self { - Self(bytes) - } -} - -/// Byte range for a transaction's payload data. -pub struct TxPayloadRange(Range); - -impl TxPayloadRange { - pub fn new( - num_txs: &NumTxsUnchecked, - tx_table_entries: &TxTableEntries, - byte_len: &NsPayloadByteLen, - ) -> Self { - let tx_table_byte_len = num_txs - .0 - .saturating_mul(TX_OFFSET_BYTE_LEN) - .saturating_add(NUM_TXS_BYTE_LEN); - let end = tx_table_entries - .cur - .saturating_add(tx_table_byte_len) - .min(byte_len.0); - let start = tx_table_entries - .prev - .unwrap_or(0) - .saturating_add(tx_table_byte_len) - .min(end); - Self(start..end) - } -} - -impl<'a> NsPayloadBytesRange<'a> for TxPayloadRange { - type Output = TxPayload<'a>; - - fn ns_payload_range(&self) -> Range { - self.0.clone() - } -} - -/// Index for an entry in a tx table. -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub(in crate::block) struct TxIndex(usize); -bytes_serde_impl!(TxIndex, to_bytes, [u8; NUM_TXS_BYTE_LEN], from_bytes); - -impl TxIndex { - pub fn to_bytes(&self) -> [u8; NUM_TXS_BYTE_LEN] { - usize_to_bytes::(self.0) - } - fn from_bytes(bytes: &[u8]) -> Self { - Self(usize_from_bytes::(bytes)) - } -} - -pub(in crate::block) struct TxIter(Range); - -impl TxIter { - pub fn new(num_txs: &NumTxs) -> Self { - Self(0..num_txs.0) - } -} - -// Simple `impl Iterator` delegates to `Range`. -impl Iterator for TxIter { - type Item = TxIndex; - - fn next(&mut self) -> Option { - self.0.next().map(TxIndex) - } -} - -/// Build an individual namespace payload one transaction at a time. -/// -/// Use [`Self::append_tx`] to add each transaction. Use [`Self::into_bytes`] -/// when you're done. The returned bytes include a well-formed tx table and all -/// tx payloads. -#[derive(Default)] -pub(in crate::block) struct NsPayloadBuilder { - tx_table_entries: Vec, - tx_bodies: Vec, -} - -impl NsPayloadBuilder { - /// Add a transaction's payload to this namespace - pub fn append_tx(&mut self, tx: Transaction) { - self.tx_bodies.extend(tx.into_payload()); - self.tx_table_entries - .extend(usize_to_bytes::(self.tx_bodies.len())); - } - - /// Serialize to bytes and consume self. - pub fn into_bytes(self) -> Vec { - let mut result = Vec::with_capacity( - NUM_TXS_BYTE_LEN + self.tx_table_entries.len() + self.tx_bodies.len(), - ); - let num_txs = NumTxsUnchecked(self.tx_table_entries.len() / TX_OFFSET_BYTE_LEN); - result.extend(num_txs.to_payload_bytes()); - result.extend(self.tx_table_entries); - result.extend(self.tx_bodies); - result - } - - /// Byte length of a tx table header. - pub const fn tx_table_header_byte_len() -> usize { - NUM_TXS_BYTE_LEN - } - - /// Byte length of a single tx table entry. - pub const fn tx_table_entry_byte_len() -> usize { - TX_OFFSET_BYTE_LEN - } -} diff --git a/sequencer/src/block/test.rs b/sequencer/src/block/test.rs deleted file mode 100644 index fe8f77b417..0000000000 --- a/sequencer/src/block/test.rs +++ /dev/null @@ -1,207 +0,0 @@ -use crate::{ - block::{ - full_payload::{NsProof, Payload}, - namespace_payload::TxProof, - }, - chain_config::BlockSize, - ChainConfig, NamespaceId, NodeState, Transaction, ValidatedState, -}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use hotshot::traits::BlockPayload; -use hotshot_query_service::availability::QueryablePayload; -use hotshot_types::{traits::EncodeBytes, vid::vid_scheme}; -use jf_vid::VidScheme; -use rand::RngCore; -use std::collections::HashMap; - -#[tokio::test(flavor = "multi_thread")] -async fn basic_correctness() { - // play with this - let test_cases = vec![ - vec![vec![5, 8, 8], vec![7, 9, 11], vec![10, 5, 8]], // 3 non-empty namespaces - ]; - - setup_logging(); - setup_backtrace(); - let mut rng = jf_utils::test_rng(); - let valid_tests = ValidTest::many_from_tx_lengths(test_cases, &mut rng); - - let mut vid = vid_scheme(10); - - for mut test in valid_tests { - let mut all_txs = test.all_txs(); - tracing::info!("test case {} nss {} txs", test.nss.len(), all_txs.len()); - - let block = - Payload::from_transactions(test.all_txs(), &Default::default(), &Default::default()) - .await - .unwrap() - .0; - tracing::info!( - "ns_table {:?}, payload {:?}", - block.ns_table().encode(), - block.encode() - ); - - // test correct number of nss, txs - assert_eq!(block.ns_table().iter().count(), test.nss.len()); - assert_eq!(block.len(block.ns_table()), all_txs.len()); - assert_eq!(block.iter(block.ns_table()).count(), all_txs.len()); - - tracing::info!("all_txs {:?}", all_txs); - - let (vid_commit, vid_common) = { - let disperse_data = vid.disperse(block.encode()).unwrap(); - (disperse_data.commit, disperse_data.common) - }; - - // test iterate over all txs - for tx_index in block.iter(block.ns_table()) { - let tx = block.transaction(&tx_index).unwrap(); - tracing::info!("tx {:?}, {:?}", tx_index, tx); - - // warning: linear search for a tx - let test_tx = all_txs.remove(all_txs.iter().position(|t| t == &tx).unwrap()); - assert_eq!(tx, test_tx); - - let tx_proof2 = { - let (tx2, tx_proof) = TxProof::new(&tx_index, &block, &vid_common).unwrap(); - assert_eq!(tx, tx2); - tx_proof - }; - assert!(tx_proof2 - .verify(block.ns_table(), &tx, &vid_commit, &vid_common) - .unwrap()); - } - assert!( - all_txs.is_empty(), - "not all test txs consumed by block.iter" - ); - - // test iterate over all namespaces - for ns_index in block.ns_table().iter() { - let ns_id = block.ns_table().read_ns_id(&ns_index).unwrap(); - tracing::info!("test ns_id {ns_id}"); - - let txs = test - .nss - .remove(&ns_id) - .expect("block ns_id missing from test"); - - let ns_proof = NsProof::new(&block, &ns_index, &vid_common) - .expect("namespace_with_proof should succeed"); - - let (ns_proof_txs, ns_proof_ns_id) = ns_proof - .verify(block.ns_table(), &vid_commit, &vid_common) - .unwrap_or_else(|| panic!("namespace {} proof verification failure", ns_id)); - - assert_eq!(ns_proof_ns_id, ns_id); - assert_eq!(ns_proof_txs, txs); - } - assert!( - test.nss.is_empty(), - "not all test namespaces consumed by ns_iter" - ); - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn enforce_max_block_size() { - setup_logging(); - setup_backtrace(); - let test_case = vec![vec![5, 8, 8], vec![7, 9, 11], vec![10, 5, 8]]; - let payload_byte_len_expected: usize = 119; - let ns_table_byte_len_expected: usize = 28; - - let mut rng = jf_utils::test_rng(); - let test = ValidTest::from_tx_lengths(test_case, &mut rng); - let tx_count_expected = test.all_txs().len(); - - let chain_config = ChainConfig { - max_block_size: BlockSize::from( - (payload_byte_len_expected + ns_table_byte_len_expected) as u64, - ), - ..Default::default() - }; - - // test: actual block size equals max block size - let instance_state = NodeState::default().with_chain_config(chain_config); - - let validated_state = ValidatedState { - chain_config: chain_config.into(), - ..Default::default() - }; - let block = Payload::from_transactions(test.all_txs(), &validated_state, &instance_state) - .await - .unwrap() - .0; - assert_eq!(block.encode().len(), payload_byte_len_expected); - assert_eq!(block.ns_table().encode().len(), ns_table_byte_len_expected); - assert_eq!(block.len(block.ns_table()), tx_count_expected); - - // test: actual block size exceeds max block size, so 1 tx is dropped - // WARN log should be emitted - - let chain_config = ChainConfig { - max_block_size: BlockSize::from( - (payload_byte_len_expected + ns_table_byte_len_expected - 1) as u64, - ), - ..Default::default() - }; - let instance_state = NodeState::default().with_chain_config(chain_config); - - let validated_state = ValidatedState { - chain_config: chain_config.into(), - ..Default::default() - }; - - let block = Payload::from_transactions(test.all_txs(), &validated_state, &instance_state) - .await - .unwrap() - .0; - assert!(block.encode().len() < payload_byte_len_expected); - assert_eq!(block.ns_table().encode().len(), ns_table_byte_len_expected); - assert_eq!(block.len(block.ns_table()), tx_count_expected - 1); -} - -// TODO lots of infra here that could be reused in other tests. -pub struct ValidTest { - nss: HashMap>, -} - -impl ValidTest { - pub fn from_tx_lengths(tx_lengths: Vec>, rng: &mut R) -> Self - where - R: RngCore, - { - let mut nss = HashMap::new(); - for tx_lens in tx_lengths.into_iter() { - let ns_id = NamespaceId::random(rng); - for len in tx_lens { - let ns: &mut Vec<_> = nss.entry(ns_id).or_default(); - ns.push(Transaction::new(ns_id, random_bytes(len, rng))); - } - } - Self { nss } - } - - pub fn many_from_tx_lengths(test_cases: Vec>>, rng: &mut R) -> Vec - where - R: RngCore, - { - test_cases - .into_iter() - .map(|t| Self::from_tx_lengths(t, rng)) - .collect() - } - - pub fn all_txs(&self) -> Vec { - self.nss.iter().flat_map(|(_, txs)| txs.clone()).collect() - } -} - -fn random_bytes(len: usize, rng: &mut R) -> Vec { - let mut result = vec![0; len]; - rng.fill_bytes(&mut result); - result -} diff --git a/sequencer/src/block/uint_bytes.rs b/sequencer/src/block/uint_bytes.rs deleted file mode 100644 index 2296a8182a..0000000000 --- a/sequencer/src/block/uint_bytes.rs +++ /dev/null @@ -1,231 +0,0 @@ -//! Serialization (and deserialization) of primitive unsigned integer types to -//! (and from) an arbitrary fixed-length byte array. -//! -use paste::paste; -use std::mem::size_of; - -// Use an ugly macro because it's difficult or impossible to be generic over -// primitive types such as `usize`, `u64`. -macro_rules! uint_bytes_impl { - ($T:ty) => { - paste! { - /// Serialize `n` into `BYTE_LEN` bytes in little-endian form, padding with - /// 0 as needed. - /// - /// # Panics - /// If `n` cannot fit into `BYTE_LEN` bytes. - pub fn [<$T _to_bytes>](n: $T) -> [u8; BYTE_LEN] { - if size_of::<$T>() > BYTE_LEN { - assert!( - [<$T _fits>](n, BYTE_LEN), - "n {n} cannot fit into {BYTE_LEN} bytes" - ); - n.to_le_bytes()[..BYTE_LEN].try_into().unwrap() // panic is impossible - } else { - // convert `n` to bytes and pad with 0 - let mut result = [0; BYTE_LEN]; - result[..size_of::<$T>()].copy_from_slice(&n.to_le_bytes()[..]); - result - } - } - - /// Deserialize `bytes` in little-endian form into a `$T`, padding with 0 - /// as needed. - /// - /// # Panics - /// If `bytes.len()` is too large to fit into a `$T`. - pub fn [<$T _from_bytes>](bytes: &[u8]) -> $T { - assert!(bytes.len() <= BYTE_LEN, "bytes len {} exceeds BYTE_LEN {BYTE_LEN}", bytes.len()); - assert!( - BYTE_LEN <= size_of::<$T>(), - "BYTE_LEN {BYTE_LEN} cannot fit into {}", - stringify!($T) - ); - let mut [<$T _bytes>] = [0; size_of::<$T>()]; - [<$T _bytes>][..bytes.len()].copy_from_slice(bytes); - $T::from_le_bytes([<$T _bytes>]) - } - - /// Return the largest `$T` value that can fit into `byte_len` bytes. - pub const fn [<$T _max_from_byte_len>](byte_len: usize) -> $T { - if byte_len >= size_of::<$T>() { - $T::MAX - } else { - // overflow cannot occur because `byte_len < size_of::<$T>()` - (1 << (byte_len * 8)) - 1 - } - } - - /// Can `n` fit into `byte_len` bytes? - pub const fn [<$T _fits>](n: $T, byte_len: usize) -> bool { - n <= [<$T _max_from_byte_len>](byte_len) - } - } - }; - } - -uint_bytes_impl!(usize); -uint_bytes_impl!(u32); - -/// Impl [`serde`] for type `$T` with methods named `$to_bytes`, `$from_bytes` -/// of the form -/// ```ignore -/// $T::$to_bytes(&self) -> $B -/// $T::$from_bytes(bytes: &[u8]) -> Self -/// ``` -/// where `$B` is any type that impls [`serde::Deserialize`] and has a method -/// `as_ref` of the form -/// ```ignore -/// $B::as_ref(&self) -> &[u8] -/// ``` -/// Typical examples of `$B` include array `[u8; N]`, slice `&[u8]`, or -/// `Vec`. -macro_rules! bytes_serde_impl { - ($T:ty, $to_bytes:ident, $B:ty, $from_bytes:ident) => { - impl Serialize for $T { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.$to_bytes().serialize(serializer) - } - } - - impl<'de> Deserialize<'de> for $T { - fn deserialize(deserializer: D) -> Result<$T, D::Error> - where - D: Deserializer<'de>, - { - <$B as Deserialize>::deserialize(deserializer) - .map(|bytes| <$T>::$from_bytes(bytes.as_ref())) - } - } - }; -} - -pub(super) use bytes_serde_impl; - -#[cfg(test)] -mod test { - use fluent_asserter::prelude::*; - use paste::paste; - use std::mem::size_of; - - macro_rules! uint_bytes_test_impl { - ($T:ty) => { - paste! { - use super::{[<$T _max_from_byte_len>], [<$T _to_bytes>], [<$T _from_bytes>]}; - - #[test] - fn [<$T _max_from_byte_len_correctness>]() { - // test byte lengths 0 to size_of::<$T>() - let mut bytes = [0; size_of::<$T>()]; - assert_eq!([<$T _max_from_byte_len>](0), 0); - for i in 0..bytes.len() { - bytes[i] = 0xff; - assert_eq!([<$T _max_from_byte_len>](i + 1).to_le_bytes(), bytes); - } - - // test byte lengths size_of::<$T>() to twice that length - for i in size_of::<$T>()..2 * size_of::<$T>() { - assert_eq!([<$T _max_from_byte_len>](i + 1), $T::MAX); - } - } - - #[test] - fn [<$T _to_bytes_correctness>]() { - // byte length 0 - assert_eq!([<$T _to_bytes>](0), [0; 0]); - assert_that_code!(|| [<$T _to_bytes>]::<0>(1)).panics(); - - // byte length 1 - assert_eq!([<$T _to_bytes>](0), [0; 1]); - assert_eq!([<$T _to_bytes>](255), [255; 1]); - assert_that_code!(|| [<$T _to_bytes>]::<1>(256)).panics(); - - // byte length 2 - assert_eq!([<$T _to_bytes>](0), [0; 2]); - assert_eq!([<$T _to_bytes>](65535), [255; 2]); - assert_that_code!(|| [<$T _to_bytes>]::<2>(65536)).panics(); - - // byte length size_of::<$T>() - assert_eq!([<$T _to_bytes>](0), [0; size_of::<$T>()]); - assert_eq!([<$T _to_bytes>]($T::MAX), [255; size_of::<$T>()]); - - // byte length size_of::<$T>() + 1 - assert_eq!([<$T _to_bytes>](0), [0; size_of::<$T>() + 1]); - let [<$T _max_bytes>] = { - let mut bytes = [255; size_of::<$T>() + 1]; - bytes[bytes.len() - 1] = 0; - bytes - }; - assert_eq!([<$T _to_bytes>]($T::MAX), [<$T _max_bytes>]); - } - - #[test] - fn [<$T _from_bytes_correctness>]() { - let bytes = [255; size_of::<$T>() + 1]; - - // It would be nice to iterate through - // `0..size_of::<$T>()` but this is not possible with - // const generics for `[<$T _from_bytes>]`. We could - // use `seq-macro` crate but it requires an integer - // literal whereas our range includes `size_of::<$T>()`. - // - // Instead we just hard code four constants: - // `0`, `1`, `size_of::<$T>() - 1`, `size_of::<$T>()`. - assert_eq!( - [<$T _from_bytes>]::<0>(&bytes[..0]), - [<$T _max_from_byte_len>](0) - ); - assert_eq!( - [<$T _from_bytes>]::<1>(&bytes[..1]), - [<$T _max_from_byte_len>](1) - ); - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>() - 1}>(&bytes[..size_of::<$T>() - 1]), - [<$T _max_from_byte_len>](size_of::<$T>() - 1) - ); - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..size_of::<$T>()]), - [<$T _max_from_byte_len>](size_of::<$T>()) - ); - - assert_that_code!(|| [<$T _from_bytes>]::<{size_of::<$T>() + 1}>(&bytes[..])).panics(); - } - - #[test] - fn [<$T _from_bytes_allows_smaller_byte_lens>]() { - // This test same as `xxx_from_bytes_correctness` except - // we set the const param `BYTE_LEN` to - // `size_of::<$T>()` in all cases. Why? To ensure that - // `xxx_from_bytes` allows its arg to have length - // smaller than `BYTE_LEN`. - let bytes = [255; size_of::<$T>() + 1]; - - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..0]), - [<$T _max_from_byte_len>](0) - ); - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..1]), - [<$T _max_from_byte_len>](1) - ); - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..size_of::<$T>() - 1]), - [<$T _max_from_byte_len>](size_of::<$T>() - 1) - ); - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..size_of::<$T>()]), - [<$T _max_from_byte_len>](size_of::<$T>()) - ); - - assert_that_code!(|| [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..])).panics(); - } - } - }; - } - - uint_bytes_test_impl!(usize); - uint_bytes_test_impl!(u32); -} diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 39aa46bd5c..8dff3c7448 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -58,21 +58,25 @@ mod persistence_tests { use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::{ data::{ - vid_commitment, vid_disperse::ADVZDisperseShare, DaProposal, EpochNumber, - QuorumProposal2, QuorumProposalWrapper, VidDisperseShare, ViewNumber, + ns_table::parse_ns_table, vid_commitment, vid_disperse::VidDisperseShare2, DaProposal2, + EpochNumber, QuorumProposal2, QuorumProposalWrapper, VidCommitment, VidDisperseShare, + ViewNumber, }, event::{EventType, HotShotAction, LeafInfo}, - message::{Proposal, UpgradeLock}, - simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate, UpgradeCertificate}, + message::{convert_proposal, Proposal, UpgradeLock}, + simple_certificate::{ + NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, UpgradeCertificate, + }, simple_vote::{NextEpochQuorumData2, QuorumData2, UpgradeProposalData, VersionedVoteData}, traits::{ block_contents::BlockHeader, node_implementation::{ConsensusTime, Versions}, EncodeBytes, }, - vid::advz::advz_scheme, + vid::avidm::{init_avidm_param, AvidMScheme}, + vote::HasViewNumber, }; - use jf_vid::VidScheme; + use sequencer_utils::test_utils::setup_test; use std::sync::Arc; use testing::TestablePersistence; @@ -175,22 +179,27 @@ mod persistence_tests { ); let leaf: Leaf2 = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()) - .await - .into(); + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); - let disperse = advz_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) - .unwrap(); + + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + + let ns_table = parse_ns_table(leaf_payload.byte_len().as_usize(), &leaf_payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &leaf_payload_bytes_arc, ns_table) + .unwrap(); + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); let signature = PubKey::sign(&privkey, &[]).unwrap(); - let mut vid = ADVZDisperseShare:: { + let mut vid = VidDisperseShare2:: { view_number: ViewNumber::new(0), - payload_commitment: Default::default(), - share: disperse.shares[0].clone(), - common: disperse.common, + payload_commitment, + share: shares[0].clone(), recipient_key: pubkey, + epoch: Some(EpochNumber::new(0)), + target_epoch: Some(EpochNumber::new(0)), }; let mut quorum_proposal = Proposal { data: QuorumProposalWrapper:: { @@ -198,12 +207,11 @@ mod persistence_tests { epoch: None, block_header: leaf.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -216,50 +224,51 @@ mod persistence_tests { let vid_share0 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share0).await.unwrap(); + storage.append_vid2(&vid_share0).await.unwrap(); assert_eq!( storage.load_vid_share(ViewNumber::new(0)).await.unwrap(), - Some(vid_share0.clone()) + Some(convert_proposal(vid_share0.clone())) ); vid.view_number = ViewNumber::new(1); let vid_share1 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share1).await.unwrap(); + storage.append_vid2(&vid_share1).await.unwrap(); assert_eq!( - storage.load_vid_share(vid.view_number).await.unwrap(), - Some(vid_share1.clone()) + storage.load_vid_share(vid.view_number()).await.unwrap(), + Some(convert_proposal(vid_share1.clone())) ); vid.view_number = ViewNumber::new(2); let vid_share2 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share2).await.unwrap(); + storage.append_vid2(&vid_share2).await.unwrap(); assert_eq!( - storage.load_vid_share(vid.view_number).await.unwrap(), - Some(vid_share2.clone()) + storage.load_vid_share(vid.view_number()).await.unwrap(), + Some(convert_proposal(vid_share2.clone())) ); vid.view_number = ViewNumber::new(3); let vid_share3 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share3).await.unwrap(); + storage.append_vid2(&vid_share3).await.unwrap(); assert_eq!( - storage.load_vid_share(vid.view_number).await.unwrap(), - Some(vid_share3.clone()) + storage.load_vid_share(vid.view_number()).await.unwrap(), + Some(convert_proposal(vid_share3.clone())) ); let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); - let da_proposal_inner = DaProposal:: { + let da_proposal_inner = DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: None, }; let da_proposal = Proposal { @@ -276,7 +285,7 @@ mod persistence_tests { ); storage - .append_da(&da_proposal, vid_commitment) + .append_da2(&da_proposal, vid_commitment) .await .unwrap(); @@ -288,7 +297,7 @@ mod persistence_tests { let mut da_proposal1 = da_proposal.clone(); da_proposal1.data.view_number = ViewNumber::new(1); storage - .append_da(&da_proposal1.clone(), vid_commitment) + .append_da2(&da_proposal1.clone(), vid_commitment) .await .unwrap(); @@ -303,7 +312,7 @@ mod persistence_tests { let mut da_proposal2 = da_proposal1.clone(); da_proposal2.data.view_number = ViewNumber::new(2); storage - .append_da(&da_proposal2.clone(), vid_commitment) + .append_da2(&da_proposal2.clone(), vid_commitment) .await .unwrap(); @@ -318,7 +327,7 @@ mod persistence_tests { let mut da_proposal3 = da_proposal2.clone(); da_proposal3.data.view_number = ViewNumber::new(3); storage - .append_da(&da_proposal3.clone(), vid_commitment) + .append_da2(&da_proposal3.clone(), vid_commitment) .await .unwrap(); @@ -331,8 +340,9 @@ mod persistence_tests { ); let quorum_proposal1 = quorum_proposal.clone(); + storage - .append_quorum_proposal(&quorum_proposal1) + .append_quorum_proposal2(&quorum_proposal1) .await .unwrap(); @@ -344,7 +354,7 @@ mod persistence_tests { quorum_proposal.data.proposal.view_number = ViewNumber::new(1); let quorum_proposal2 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal2) + .append_quorum_proposal2(&quorum_proposal2) .await .unwrap(); @@ -360,7 +370,7 @@ mod persistence_tests { quorum_proposal.data.proposal.justify_qc.view_number = ViewNumber::new(1); let quorum_proposal3 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal3) + .append_quorum_proposal2(&quorum_proposal3) .await .unwrap(); @@ -379,7 +389,7 @@ mod persistence_tests { // This one should stick around after GC runs. let quorum_proposal4 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal4) + .append_quorum_proposal2(&quorum_proposal4) .await .unwrap(); @@ -456,7 +466,7 @@ mod persistence_tests { assert_eq!( storage.load_vid_share(ViewNumber::new(3)).await.unwrap(), - Some(vid_share3.clone()) + Some(convert_proposal(vid_share3.clone())) ); let proposals = storage.load_quorum_proposals().await.unwrap(); @@ -669,16 +679,21 @@ mod persistence_tests { .into(); let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); - let disperse = advz_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) - .unwrap(); + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + let ns_table = parse_ns_table(leaf_payload.byte_len().as_usize(), &leaf_payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &leaf_payload_bytes_arc, ns_table) + .unwrap(); + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let mut vid = ADVZDisperseShare:: { + let mut vid = VidDisperseShare2:: { view_number: ViewNumber::new(0), - payload_commitment: Default::default(), - share: disperse.shares[0].clone(), - common: disperse.common, + payload_commitment, + share: shares[0].clone(), recipient_key: pubkey, + epoch: Some(EpochNumber::new(0)), + target_epoch: Some(EpochNumber::new(0)), } .to_proposal(&privkey) .unwrap() @@ -700,20 +715,20 @@ mod persistence_tests { epoch: None, }, }; - let mut qc = QuorumCertificate::genesis::( + let mut qc = QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(); + .await; let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let mut da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: Some(EpochNumber::new(0)), }, signature: block_payload_signature, _pd: Default::default(), @@ -739,8 +754,8 @@ mod persistence_tests { // Add proposals. for (_, _, vid, da) in &chain { tracing::info!(?da, ?vid, "insert proposal"); - storage.append_da(da, vid_commitment).await.unwrap(); - storage.append_vid(vid).await.unwrap(); + storage.append_da2(da, vid_commitment).await.unwrap(); + storage.append_vid2(vid).await.unwrap(); } // Decide 2 leaves, but fail in event processing. @@ -866,17 +881,22 @@ mod persistence_tests { Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); - let disperse = advz_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) - .unwrap(); - let payload_commitment = disperse.commit; + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + + let ns_table = parse_ns_table(leaf_payload.byte_len().as_usize(), &leaf_payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &leaf_payload_bytes_arc, ns_table) + .unwrap(); + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid_share = ADVZDisperseShare:: { + let vid_share = VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment, - share: disperse.shares[0].clone(), - common: disperse.common, + share: shares[0].clone(), recipient_key: pubkey, + epoch: None, + target_epoch: None, } .to_proposal(&privkey) .unwrap() @@ -911,25 +931,23 @@ mod persistence_tests { let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc, metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: None, }, signature: block_payload_signature, _pd: Default::default(), }; storage - .append_da( - &da_proposal, - hotshot_query_service::VidCommitment::V0(payload_commitment), - ) + .append_da2(&da_proposal, VidCommitment::V1(payload_commitment)) .await .unwrap(); - storage.append_vid(&vid_share).await.unwrap(); + storage.append_vid2(&vid_share).await.unwrap(); storage - .append_quorum_proposal(&quorum_proposal) + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); @@ -955,7 +973,7 @@ mod persistence_tests { .await .unwrap() .unwrap(), - vid_share + convert_proposal(vid_share) ); assert_eq!( storage diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index c3b0abf7a9..853142fa9a 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -6,12 +6,12 @@ use espresso_types::{ v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence}, Leaf, Leaf2, NetworkConfig, Payload, SeqTypes, }; -use hotshot_query_service::VidCommitment; use hotshot_types::{ consensus::CommitmentMap, data::{ vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, - DaProposal, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, + DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposal2, + QuorumProposalWrapper, VidCommitment, VidDisperseShare, }, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -27,7 +27,7 @@ use hotshot_types::{ }; use std::sync::Arc; use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashSet}, fs::{self, File, OpenOptions}, io::{Read, Seek, SeekFrom, Write}, ops::RangeInclusive, @@ -36,7 +36,7 @@ use std::{ use crate::ViewNumber; -use espresso_types::{downgrade_commitment_map, downgrade_leaf, upgrade_commitment_map}; +use espresso_types::upgrade_commitment_map; /// Options for file system backed persistence. #[derive(Parser, Clone, Debug)] @@ -100,10 +100,20 @@ impl PersistenceOptions for Options { let store_undecided_state = self.store_undecided_state; let view_retention = self.consensus_view_retention; + let migration_path = path.join("migration"); + let migrated = if migration_path.is_file() { + let bytes = fs::read(&path) + .context(format!("unable to read migration from {}", path.display()))?; + bincode::deserialize(&bytes).context("malformed migration file")? + } else { + HashSet::new() + }; + Ok(Persistence { store_undecided_state, inner: Arc::new(RwLock::new(Inner { path, + migrated, view_retention, })), }) @@ -129,6 +139,7 @@ pub struct Persistence { struct Inner { path: PathBuf, view_retention: u64, + migrated: HashSet, } impl Inner { @@ -136,6 +147,10 @@ impl Inner { self.path.join("hotshot.cfg") } + fn migration(&self) -> PathBuf { + self.path.join("migration") + } + fn voted_view_path(&self) -> PathBuf { self.path.join("highest_voted_view") } @@ -145,6 +160,10 @@ impl Inner { self.path.join("decided_leaves") } + fn decided_leaf2_path(&self) -> PathBuf { + self.path.join("decided_leaves2") + } + /// The path from previous versions where there was only a single file for anchor leaves. fn legacy_anchor_leaf_path(&self) -> PathBuf { self.path.join("anchor_leaf") @@ -154,18 +173,34 @@ impl Inner { self.path.join("vid") } + fn vid2_dir_path(&self) -> PathBuf { + self.path.join("vid2") + } + fn da_dir_path(&self) -> PathBuf { self.path.join("da") } + fn da2_dir_path(&self) -> PathBuf { + self.path.join("da2") + } + fn undecided_state_path(&self) -> PathBuf { self.path.join("undecided_state") } + fn undecided2_state_path(&self) -> PathBuf { + self.path.join("undecided_state2") + } + fn quorum_proposals_dir_path(&self) -> PathBuf { self.path.join("quorum_proposals") } + fn quorum_proposals2_dir_path(&self) -> PathBuf { + self.path.join("quorum_proposals2") + } + fn upgrade_certificate_dir_path(&self) -> PathBuf { self.path.join("upgrade_certificate") } @@ -174,6 +209,20 @@ impl Inner { self.path.join("next_epoch_quorum_certificate") } + fn update_migration(&mut self) -> anyhow::Result<()> { + let path = self.migration(); + let bytes = bincode::serialize(&self.migrated)?; + + self.replace( + &path, + |_| Ok(true), + |mut file| { + file.write_all(&bytes)?; + Ok(()) + }, + ) + } + /// Overwrite a file if a condition is met. /// /// The file at `path`, if it exists, is opened in read mode and passed to `pred`. If `pred` @@ -225,10 +274,10 @@ impl Inner { ) -> anyhow::Result<()> { let prune_view = ViewNumber::new(decided_view.saturating_sub(self.view_retention)); - self.prune_files(self.da_dir_path(), prune_view, None, prune_intervals)?; - self.prune_files(self.vid_dir_path(), prune_view, None, prune_intervals)?; + self.prune_files(self.da2_dir_path(), prune_view, None, prune_intervals)?; + self.prune_files(self.vid2_dir_path(), prune_view, None, prune_intervals)?; self.prune_files( - self.quorum_proposals_dir_path(), + self.quorum_proposals2_dir_path(), prune_view, None, prune_intervals, @@ -236,7 +285,7 @@ impl Inner { // Save the most recent leaf as it will be our anchor point if the node restarts. self.prune_files( - self.decided_leaf_path(), + self.decided_leaf2_path(), prune_view, Some(decided_view), prune_intervals, @@ -287,7 +336,7 @@ impl Inner { // separate event for each leaf because it is possible we have non-consecutive leaves in our // storage, which would not be valid as a single decide with a single leaf chain. let mut leaves = BTreeMap::new(); - for (v, path) in view_files(self.decided_leaf_path())? { + for (v, path) in view_files(self.decided_leaf2_path())? { if v > view { continue; } @@ -295,7 +344,7 @@ impl Inner { let bytes = fs::read(&path).context(format!("reading decided leaf {}", path.display()))?; let (mut leaf, qc) = - bincode::deserialize::<(Leaf, QuorumCertificate)>(&bytes) + bincode::deserialize::<(Leaf2, QuorumCertificate2)>(&bytes) .context(format!("parsing decided leaf {}", path.display()))?; // Include the VID share if available. @@ -316,9 +365,8 @@ impl Inner { } let info = LeafInfo { - leaf: leaf.into(), - vid_share: vid_share.map(Into::into), - + leaf, + vid_share, // Note: the following fields are not used in Decide event processing, and should be // removed. For now, we just default them. state: Default::default(), @@ -347,7 +395,7 @@ impl Inner { .handle_event(&Event { view_number: view, event: EventType::Decide { - qc: Arc::new(qc.to_qc2()), + qc: Arc::new(qc), leaf_chain: Arc::new(vec![leaf]), block_size: None, }, @@ -379,8 +427,8 @@ impl Inner { fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>> { - let dir_path = self.da_dir_path(); + ) -> anyhow::Result>>> { + let dir_path = self.da2_dir_path(); let file_path = dir_path.join(view.u64().to_string()).with_extension("txt"); @@ -390,7 +438,7 @@ impl Inner { let da_bytes = fs::read(file_path)?; - let da_proposal: Proposal> = + let da_proposal: Proposal> = bincode::deserialize(&da_bytes)?; Ok(Some(da_proposal)) } @@ -398,8 +446,8 @@ impl Inner { fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>> { - let dir_path = self.vid_dir_path(); + ) -> anyhow::Result>>> { + let dir_path = self.vid2_dir_path(); let file_path = dir_path.join(view.u64().to_string()).with_extension("txt"); @@ -408,31 +456,27 @@ impl Inner { } let vid_share_bytes = fs::read(file_path)?; - let vid_share: Proposal> = + let vid_share: Proposal> = bincode::deserialize(&vid_share_bytes)?; Ok(Some(vid_share)) } fn load_anchor_leaf(&self) -> anyhow::Result)>> { - if self.decided_leaf_path().is_dir() { + if self.decided_leaf2_path().is_dir() { let mut anchor: Option<(Leaf2, QuorumCertificate2)> = None; // Return the latest decided leaf. - for (_, path) in view_files(self.decided_leaf_path())? { + for (_, path) in view_files(self.decided_leaf2_path())? { let bytes = fs::read(&path).context(format!("reading decided leaf {}", path.display()))?; - let (leaf, qc) = - bincode::deserialize::<(Leaf, QuorumCertificate)>(&bytes) + let (leaf2, qc2) = + bincode::deserialize::<(Leaf2, QuorumCertificate2)>(&bytes) .context(format!("parsing decided leaf {}", path.display()))?; if let Some((anchor_leaf, _)) = &anchor { - if leaf.view_number() > anchor_leaf.view_number() { - let leaf2 = leaf.into(); - let qc2 = qc.to_qc2(); + if leaf2.view_number() > anchor_leaf.view_number() { anchor = Some((leaf2, qc2)); } } else { - let leaf2 = leaf.into(); - let qc2 = qc.to_qc2(); anchor = Some((leaf2, qc2)); } } @@ -503,7 +547,7 @@ impl SequencerPersistence for Persistence { consumer: &impl EventConsumer, ) -> anyhow::Result<()> { let mut inner = self.inner.write().await; - let path = inner.decided_leaf_path(); + let path = inner.decided_leaf2_path(); // Ensure the anchor leaf directory exists. fs::create_dir_all(&path).context("creating anchor leaf directory")?; @@ -539,9 +583,7 @@ impl SequencerPersistence for Persistence { Ok(false) }, |mut file| { - let leaf = downgrade_leaf(info.leaf.clone()); - let qc = qc2.to_qc(); - let bytes = bincode::serialize(&(&leaf, qc))?; + let bytes = bincode::serialize(&(&info.leaf.clone(), qc2))?; file.write_all(&bytes)?; Ok(()) }, @@ -578,27 +620,27 @@ impl SequencerPersistence for Persistence { &self, ) -> anyhow::Result, BTreeMap>)>> { let inner = self.inner.read().await; - let path = inner.undecided_state_path(); + let path = inner.undecided2_state_path(); if !path.is_file() { return Ok(None); } let bytes = fs::read(&path).context("read")?; - let value: (CommitmentMap, _) = + let value: (CommitmentMap, _) = bincode::deserialize(&bytes).context("deserialize")?; - Ok(Some((upgrade_commitment_map(value.0), value.1))) + Ok(Some((value.0, value.1))) } async fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { self.inner.read().await.load_da_proposal(view) } async fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { self.inner.read().await.load_vid_share(view) } @@ -634,11 +676,13 @@ impl SequencerPersistence for Persistence { ) -> anyhow::Result<()> { let mut inner = self.inner.write().await; let view_number = proposal.data.view_number().u64(); - let dir_path = inner.vid_dir_path(); + + let dir_path = inner.vid2_dir_path(); fs::create_dir_all(dir_path.clone()).context("failed to create vid dir")?; let file_path = dir_path.join(view_number.to_string()).with_extension("txt"); + inner.replace( &file_path, |_| { @@ -648,6 +692,8 @@ impl SequencerPersistence for Persistence { Ok(false) }, |mut file| { + let proposal: Proposal> = + convert_proposal(proposal.clone()); let proposal_bytes = bincode::serialize(&proposal).context("serialize proposal")?; file.write_all(&proposal_bytes)?; Ok(()) @@ -712,19 +758,17 @@ impl SequencerPersistence for Persistence { }, ) } - async fn update_undecided_state( + async fn update_undecided_state2( &self, leaves: CommitmentMap, state: BTreeMap>, ) -> anyhow::Result<()> { - let leaves = downgrade_commitment_map(leaves); - if !self.store_undecided_state { return Ok(()); } let mut inner = self.inner.write().await; - let path = &inner.undecided_state_path(); + let path = &inner.undecided2_state_path(); inner.replace( path, |_| { @@ -739,15 +783,13 @@ impl SequencerPersistence for Persistence { }, ) } - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, proposal: &Proposal>, ) -> anyhow::Result<()> { - let proposal: Proposal> = - convert_proposal(proposal.clone()); let mut inner = self.inner.write().await; let view_number = proposal.data.view_number().u64(); - let dir_path = inner.quorum_proposals_dir_path(); + let dir_path = inner.quorum_proposals2_dir_path(); fs::create_dir_all(dir_path.clone()).context("failed to create proposals dir")?; @@ -773,7 +815,7 @@ impl SequencerPersistence for Persistence { let inner = self.inner.read().await; // First, get the proposal directory. - let dir_path = inner.quorum_proposals_dir_path(); + let dir_path = inner.quorum_proposals2_dir_path(); if !dir_path.is_dir() { return Ok(Default::default()); } @@ -782,7 +824,7 @@ impl SequencerPersistence for Persistence { let mut map = BTreeMap::new(); for (view, path) in view_files(&dir_path)? { let proposal_bytes = fs::read(path)?; - let proposal: Proposal> = + let proposal: Proposal> = match bincode::deserialize(&proposal_bytes) { Ok(proposal) => proposal, Err(err) => { @@ -810,13 +852,12 @@ impl SequencerPersistence for Persistence { view: ViewNumber, ) -> anyhow::Result>> { let inner = self.inner.read().await; - let dir_path = inner.quorum_proposals_dir_path(); + let dir_path = inner.quorum_proposals2_dir_path(); let file_path = dir_path.join(view.to_string()).with_extension("txt"); let bytes = fs::read(file_path)?; - let proposal: Proposal> = bincode::deserialize(&bytes)?; - // TODO: rather than converting, we should store the value of QuorumProposalWrapper::with_epoch - let proposal_wrapper = convert_proposal(proposal); - Ok(proposal_wrapper) + let proposal = bincode::deserialize(&bytes)?; + + Ok(proposal) } async fn load_upgrade_certificate( @@ -893,14 +934,338 @@ impl SequencerPersistence for Persistence { )) } - async fn migrate_consensus( + async fn append_da2( &self, - _migrate_leaf: fn(Leaf) -> Leaf2, - _migrate_proposal: fn( - Proposal>, - ) -> Proposal>, + proposal: &Proposal>, + _vid_commit: VidCommitment, ) -> anyhow::Result<()> { - // TODO: https://github.com/EspressoSystems/espresso-sequencer/issues/2357 + let mut inner = self.inner.write().await; + let view_number = proposal.data.view_number().u64(); + let dir_path = inner.da2_dir_path(); + + fs::create_dir_all(dir_path.clone()).context("failed to create da dir")?; + + let file_path = dir_path.join(view_number.to_string()).with_extension("txt"); + inner.replace( + &file_path, + |_| { + // Don't overwrite an existing proposal, but warn about it as this is likely not + // intended behavior from HotShot. + tracing::warn!(view_number, "duplicate DA proposal"); + Ok(false) + }, + |mut file| { + let proposal_bytes = bincode::serialize(&proposal).context("serialize proposal")?; + file.write_all(&proposal_bytes)?; + Ok(()) + }, + ) + } + + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + self.append_quorum_proposal2(proposal).await + } + + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + + if inner.migrated.contains("anchor_leaf") { + tracing::info!("decided leaves already migrated"); + return Ok(()); + } + + let new_leaf_dir = inner.decided_leaf2_path(); + + fs::create_dir_all(new_leaf_dir.clone()).context("failed to create anchor leaf 2 dir")?; + + let old_leaf_dir = inner.decided_leaf_path(); + if !old_leaf_dir.is_dir() { + return Ok(()); + } + + tracing::warn!("migrating decided leaves.."); + for entry in fs::read_dir(old_leaf_dir)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = + fs::read(&path).context(format!("reading decided leaf {}", path.display()))?; + let (leaf, qc) = bincode::deserialize::<(Leaf, QuorumCertificate)>(&bytes) + .context(format!("parsing decided leaf {}", path.display()))?; + + let leaf2: Leaf2 = leaf.into(); + let qc2 = qc.to_qc2(); + + let new_leaf_path = new_leaf_dir.join(view.to_string()).with_extension("txt"); + + inner.replace( + &new_leaf_path, + |_| { + tracing::warn!(view, "duplicate decided leaf"); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&(&leaf2.clone(), qc2))?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + + if view % 100 == 0 { + tracing::info!(view, "decided leaves migration progress"); + } + } + + inner.migrated.insert("anchor_leaf".to_string()); + inner.update_migration()?; + tracing::warn!("successfully migrated decided leaves"); + Ok(()) + } + async fn migrate_da_proposals(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + + if inner.migrated.contains("da_proposal") { + tracing::info!("da proposals already migrated"); + return Ok(()); + } + + let new_da_dir = inner.da2_dir_path(); + + fs::create_dir_all(new_da_dir.clone()).context("failed to create da proposals 2 dir")?; + + let old_da_dir = inner.da_dir_path(); + if !old_da_dir.is_dir() { + return Ok(()); + } + + tracing::warn!("migrating da proposals.."); + + for entry in fs::read_dir(old_da_dir)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = + fs::read(&path).context(format!("reading da proposal {}", path.display()))?; + let proposal = bincode::deserialize::>>(&bytes) + .context(format!("parsing da proposal {}", path.display()))?; + + let new_da_path = new_da_dir.join(view.to_string()).with_extension("txt"); + + let proposal2: Proposal> = convert_proposal(proposal); + + inner.replace( + &new_da_path, + |_| { + tracing::warn!(view, "duplicate DA proposal 2"); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&proposal2)?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + + if view % 100 == 0 { + tracing::info!(view, "DA proposals migration progress"); + } + } + + inner.migrated.insert("da_proposal".to_string()); + inner.update_migration()?; + tracing::warn!("successfully migrated da proposals"); + Ok(()) + } + async fn migrate_vid_shares(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + + if inner.migrated.contains("vid_share") { + tracing::info!("vid shares already migrated"); + return Ok(()); + } + + let new_vid_dir = inner.vid2_dir_path(); + + fs::create_dir_all(new_vid_dir.clone()).context("failed to create vid shares 2 dir")?; + + let old_vid_dir = inner.vid_dir_path(); + if !old_vid_dir.is_dir() { + return Ok(()); + } + + tracing::warn!("migrating vid shares.."); + + for entry in fs::read_dir(old_vid_dir)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = fs::read(&path).context(format!("reading vid share {}", path.display()))?; + let proposal = + bincode::deserialize::>>(&bytes) + .context(format!("parsing vid share {}", path.display()))?; + + let new_vid_path = new_vid_dir.join(view.to_string()).with_extension("txt"); + + let proposal2: Proposal> = + convert_proposal(proposal); + + inner.replace( + &new_vid_path, + |_| { + tracing::warn!(view, "duplicate VID share "); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&proposal2)?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + + if view % 100 == 0 { + tracing::info!(view, "VID shares migration progress"); + } + } + + inner.migrated.insert("vid_share".to_string()); + inner.update_migration()?; + tracing::warn!("successfully migrated vid shares"); + Ok(()) + } + async fn migrate_undecided_state(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + if inner.migrated.contains("undecided_state") { + tracing::info!("undecided state already migrated"); + return Ok(()); + } + + let new_undecided_state_path = &inner.undecided2_state_path(); + + let old_undecided_state_path = inner.undecided_state_path(); + + if !old_undecided_state_path.is_file() { + return Ok(()); + } + + let bytes = fs::read(&old_undecided_state_path).context("read")?; + let (leaves, state): (CommitmentMap, QuorumCertificate) = + bincode::deserialize(&bytes).context("deserialize")?; + + let leaves2 = upgrade_commitment_map(leaves); + let state2 = state.to_qc2(); + + tracing::warn!("migrating undecided state.."); + inner.replace( + new_undecided_state_path, + |_| { + // Always overwrite the previous file. + Ok(true) + }, + |mut file| { + let bytes = bincode::serialize(&(leaves2, state2)) + .context("serializing undecided state2")?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + + inner.migrated.insert("undecided_state".to_string()); + inner.update_migration()?; + tracing::warn!("successfully migrated undecided state"); + Ok(()) + } + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + + if inner.migrated.contains("quorum_proposals") { + tracing::info!("quorum proposals already migrated"); + return Ok(()); + } + + let new_quorum_proposals_dir = inner.quorum_proposals2_dir_path(); + + fs::create_dir_all(new_quorum_proposals_dir.clone()) + .context("failed to create quorum proposals 2 dir")?; + + let old_quorum_proposals_dir = inner.quorum_proposals_dir_path(); + if !old_quorum_proposals_dir.is_dir() { + tracing::info!("no existing quorum proposals found for migration"); + return Ok(()); + } + + tracing::warn!("migrating quorum proposals.."); + for entry in fs::read_dir(old_quorum_proposals_dir)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = + fs::read(&path).context(format!("reading quorum proposal {}", path.display()))?; + let proposal = + bincode::deserialize::>>(&bytes) + .context(format!("parsing quorum proposal {}", path.display()))?; + + let new_file_path = new_quorum_proposals_dir + .join(view.to_string()) + .with_extension("txt"); + + let proposal2: Proposal> = + convert_proposal(proposal); + + inner.replace( + &new_file_path, + |_| { + tracing::warn!(view, "duplicate Quorum proposal2 "); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&proposal2)?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + + if view % 100 == 0 { + tracing::info!(view, "Quorum proposals migration progress"); + } + } + + inner.migrated.insert("quorum_proposals".to_string()); + inner.update_migration()?; + tracing::warn!("successfully migrated quorum proposals"); + Ok(()) + } + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { Ok(()) } } @@ -1026,12 +1391,29 @@ mod test { use hotshot::types::SignatureKey; use hotshot_example_types::node_types::TestVersions; use hotshot_query_service::testing::mocks::MockVersions; + use hotshot_types::data::{vid_commitment, QuorumProposal2}; + use hotshot_types::traits::node_implementation::Versions; + + use hotshot_types::vid::advz::advz_scheme; use sequencer_utils::test_utils::setup_test; + use vbs::version::StaticVersionType; + use serde_json::json; + use std::marker::PhantomData; use super::*; use crate::persistence::testing::TestablePersistence; + use crate::BLSPubKey; + use committable::Committable; + use committable::{Commitment, CommitmentBoundsArkless}; + use espresso_types::{Header, Leaf, ValidatedState}; + + use hotshot_types::{ + simple_certificate::QuorumCertificate, simple_vote::QuorumData, traits::EncodeBytes, + }; + use jf_vid::VidScheme; + #[test] fn test_config_migrations_add_builder_urls() { let before = json!({ @@ -1130,6 +1512,211 @@ mod test { assert_eq!(migrate_network_config(before.clone()).unwrap(), before); } + #[tokio::test(flavor = "multi_thread")] + pub async fn test_consensus_migration() { + setup_test(); + let rows = 300; + let tmp = Persistence::tmp_storage().await; + let mut opt = Persistence::options(&tmp); + let storage = opt.create().await.unwrap(); + + let inner = storage.inner.read().await; + + let decided_leaves_path = inner.decided_leaf_path(); + fs::create_dir_all(decided_leaves_path.clone()).expect("failed to create proposals dir"); + + let qp_dir_path = inner.quorum_proposals_dir_path(); + fs::create_dir_all(qp_dir_path.clone()).expect("failed to create proposals dir"); + drop(inner); + + for i in 0..rows { + let view = ViewNumber::new(i); + let validated_state = ValidatedState::default(); + let instance_state = NodeState::default(); + + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], i); + let (payload, metadata) = + Payload::from_transactions([], &validated_state, &instance_state) + .await + .unwrap(); + let builder_commitment = payload.builder_commitment(&metadata); + let payload_bytes = payload.encode(); + + let payload_commitment = vid_commitment::( + &payload_bytes, + &metadata.encode(), + 4, + ::Base::VERSION, + ); + + let block_header = Header::genesis( + &instance_state, + payload_commitment, + builder_commitment, + metadata, + ); + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::::default_commitment_no_preimage(), + }; + + let justify_qc = QuorumCertificate::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + view, + None, + PhantomData, + ); + + let quorum_proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: justify_qc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + }; + + let quorum_proposal_signature = + BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) + .expect("Failed to sign quorum proposal"); + + let proposal = Proposal { + data: quorum_proposal.clone(), + signature: quorum_proposal_signature, + _pd: PhantomData, + }; + + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + leaf.fill_block_payload::( + payload, + 4, + ::Base::VERSION, + ) + .unwrap(); + + let mut inner = storage.inner.write().await; + + tracing::debug!("inserting decided leaves"); + let file_path = decided_leaves_path + .join(view.to_string()) + .with_extension("txt"); + + tracing::debug!("inserting decided leaves"); + + inner + .replace( + &file_path, + |_| Ok(true), + |mut file| { + let bytes = bincode::serialize(&(&leaf.clone(), justify_qc))?; + file.write_all(&bytes)?; + Ok(()) + }, + ) + .expect("replace decided leaves"); + + let file_path = qp_dir_path.join(view.to_string()).with_extension("txt"); + + tracing::debug!("inserting qc for {view}"); + + inner + .replace( + &file_path, + |_| Ok(true), + |mut file| { + let proposal_bytes = + bincode::serialize(&proposal).context("serialize proposal")?; + + file.write_all(&proposal_bytes)?; + Ok(()) + }, + ) + .unwrap(); + + drop(inner); + let disperse = advz_scheme(4).disperse(payload_bytes.clone()).unwrap(); + + let vid = ADVZDisperseShare:: { + view_number: ViewNumber::new(i), + payload_commitment: Default::default(), + share: disperse.shares[0].clone(), + common: disperse.common, + recipient_key: pubkey, + }; + + let (payload, metadata) = + Payload::from_transactions([], &ValidatedState::default(), &NodeState::default()) + .await + .unwrap(); + + let da = DaProposal:: { + encoded_transactions: payload.encode(), + metadata, + view_number: ViewNumber::new(i), + }; + + let block_payload_signature = + BLSPubKey::sign(&privkey, &payload_bytes).expect("Failed to sign block payload"); + + let da_proposal = Proposal { + data: da, + signature: block_payload_signature, + _pd: Default::default(), + }; + + tracing::debug!("inserting vid for {view}"); + storage + .append_vid(&vid.to_proposal(&privkey).unwrap()) + .await + .unwrap(); + + tracing::debug!("inserting da for {view}"); + storage + .append_da(&da_proposal, VidCommitment::V0(disperse.commit)) + .await + .unwrap(); + } + + storage.migrate_consensus().await.unwrap(); + let inner = storage.inner.read().await; + let decided_leaves = fs::read_dir(inner.decided_leaf2_path()).unwrap(); + let decided_leaves_count = decided_leaves + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!( + decided_leaves_count, rows as usize, + "decided leaves count does not match", + ); + + let da_proposals = fs::read_dir(inner.da2_dir_path()).unwrap(); + let da_proposals_count = da_proposals + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!( + da_proposals_count, rows as usize, + "da proposals does not match", + ); + + let vids = fs::read_dir(inner.vid2_dir_path()).unwrap(); + let vids_count = vids + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!(vids_count, rows as usize, "vid shares count does not match",); + + let qps = fs::read_dir(inner.quorum_proposals2_dir_path()).unwrap(); + let qps_count = qps + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!( + qps_count, rows as usize, + "quorum proposals count does not match", + ); + } + #[tokio::test(flavor = "multi_thread")] async fn test_load_quorum_proposals_invalid_extension() { setup_test(); @@ -1138,9 +1725,7 @@ mod test { let storage = Persistence::connect(&tmp).await; // Generate a couple of valid quorum proposals. - let leaf: Leaf2 = Leaf::genesis::(&Default::default(), &NodeState::mock()) - .await - .into(); + let leaf = Leaf2::genesis::(&Default::default(), &NodeState::mock()).await; let privkey = PubKey::generated_from_seed_indexed([0; 32], 1).1; let signature = PubKey::sign(&privkey, &[]).unwrap(); let mut quorum_proposal = Proposal { @@ -1149,12 +1734,11 @@ mod test { epoch: None, block_header: leaf.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &Default::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -1168,21 +1752,21 @@ mod test { // Store quorum proposals. let quorum_proposal1 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal1) + .append_quorum_proposal2(&quorum_proposal1) .await .unwrap(); quorum_proposal.data.proposal.view_number = ViewNumber::new(1); let quorum_proposal2 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal2) + .append_quorum_proposal2(&quorum_proposal2) .await .unwrap(); // Change one of the file extensions. It can happen that we end up with files with the wrong // extension if, for example, the node is killed before cleaning up a swap file. fs::rename( - tmp.path().join("quorum_proposals/1.txt"), - tmp.path().join("quorum_proposals/1.swp"), + tmp.path().join("quorum_proposals2/1.txt"), + tmp.path().join("quorum_proposals2/1.swp"), ) .unwrap(); @@ -1214,12 +1798,11 @@ mod test { epoch: None, block_header: leaf.block_header().clone(), view_number: ViewNumber::new(1), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &Default::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -1231,16 +1814,16 @@ mod test { }; // First store an invalid quorum proposal. - fs::create_dir_all(tmp.path().join("quorum_proposals")).unwrap(); + fs::create_dir_all(tmp.path().join("quorum_proposals2")).unwrap(); fs::write( - tmp.path().join("quorum_proposals/0.txt"), + tmp.path().join("quorum_proposals2/0.txt"), "invalid data".as_bytes(), ) .unwrap(); // Store valid quorum proposal. storage - .append_quorum_proposal(&quorum_proposal) + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index 56b69bb938..c47701a3ea 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -5,14 +5,14 @@ use anyhow::bail; use async_trait::async_trait; use espresso_types::{ v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence}, - Leaf, Leaf2, NetworkConfig, + Leaf2, NetworkConfig, }; -use hotshot_query_service::VidCommitment; use hotshot_types::{ consensus::CommitmentMap, data::{ vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, - DaProposal, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, + DaProposal, DaProposal2, EpochNumber, QuorumProposalWrapper, VidCommitment, + VidDisperseShare, }, event::{Event, EventType, HotShotAction, LeafInfo}, message::Proposal, @@ -99,14 +99,14 @@ impl SequencerPersistence for NoStorage { async fn load_da_proposal( &self, _view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { Ok(None) } async fn load_vid_share( &self, _view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { Ok(None) } @@ -155,14 +155,14 @@ impl SequencerPersistence for NoStorage { ) -> anyhow::Result<()> { Ok(()) } - async fn update_undecided_state( + async fn update_undecided_state2( &self, _leaves: CommitmentMap, _state: BTreeMap>, ) -> anyhow::Result<()> { Ok(()) } - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, _proposal: &Proposal>, ) -> anyhow::Result<()> { @@ -175,16 +175,6 @@ impl SequencerPersistence for NoStorage { Ok(()) } - async fn migrate_consensus( - &self, - _: fn(Leaf) -> Leaf2, - _: fn( - Proposal>, - ) -> Proposal>, - ) -> anyhow::Result<()> { - Ok(()) - } - async fn store_next_epoch_quorum_certificate( &self, _high_qc: NextEpochQuorumCertificate2, @@ -197,4 +187,38 @@ impl SequencerPersistence for NoStorage { ) -> anyhow::Result>> { Ok(None) } + + async fn append_da2( + &self, + _proposal: &Proposal>, + _vid_commit: VidCommitment, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn append_proposal2( + &self, + _proposal: &Proposal>, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_da_proposals(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_vid_shares(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_undecided_state(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { + Ok(()) + } } diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 0ed4716cdd..e2383a81ca 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -5,7 +5,7 @@ use committable::Committable; use derivative::Derivative; use derive_more::derive::{From, Into}; use espresso_types::{ - downgrade_commitment_map, downgrade_leaf, parse_duration, parse_size, upgrade_commitment_map, + parse_duration, parse_size, upgrade_commitment_map, v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence, StateCatchup}, BackoffParams, BlockMerkleTree, FeeMerkleTree, Leaf, Leaf2, NetworkConfig, Payload, }; @@ -27,13 +27,14 @@ use hotshot_query_service::{ Provider, }, merklized_state::MerklizedState, - VidCommitment, VidCommon, + VidCommon, }; use hotshot_types::{ consensus::CommitmentMap, data::{ vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, - DaProposal, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, + DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposalWrapper, VidCommitment, + VidDisperseShare, }, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -646,9 +647,10 @@ impl Persistence { }; let mut parent = None; - let mut rows = query("SELECT leaf, qc FROM anchor_leaf WHERE view >= $1 ORDER BY view") - .bind(from_view) - .fetch(tx.as_mut()); + let mut rows = + query("SELECT leaf, qc FROM anchor_leaf2 WHERE view >= $1 ORDER BY view") + .bind(from_view) + .fetch(tx.as_mut()); let mut leaves = vec![]; let mut final_qc = None; while let Some(row) = rows.next().await { @@ -663,9 +665,9 @@ impl Persistence { }; let leaf_data: Vec = row.get("leaf"); - let leaf = bincode::deserialize::(&leaf_data)?; + let leaf = bincode::deserialize::(&leaf_data)?; let qc_data: Vec = row.get("qc"); - let qc = bincode::deserialize::>(&qc_data)?; + let qc = bincode::deserialize::>(&qc_data)?; let height = leaf.block_header().block_number(); // Ensure we are only dealing with a consecutive chain of leaves. We don't want to @@ -701,7 +703,7 @@ impl Persistence { // Collect VID shares for the decide event. let mut vid_shares = tx .fetch_all( - query("SELECT view, data FROM vid_share where view >= $1 AND view <= $2") + query("SELECT view, data FROM vid_share2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -711,7 +713,7 @@ impl Persistence { let view: i64 = row.get("view"); let data: Vec = row.get("data"); let vid_proposal = bincode::deserialize::< - Proposal>, + Proposal>, >(&data)?; Ok((view as u64, vid_proposal.data)) }) @@ -720,7 +722,7 @@ impl Persistence { // Collect DA proposals for the decide event. let mut da_proposals = tx .fetch_all( - query("SELECT view, data FROM da_proposal where view >= $1 AND view <= $2") + query("SELECT view, data FROM da_proposal2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -730,7 +732,7 @@ impl Persistence { let view: i64 = row.get("view"); let data: Vec = row.get("data"); let da_proposal = - bincode::deserialize::>>(&data)?; + bincode::deserialize::>>(&data)?; Ok((view as u64, da_proposal.data)) }) .collect::>>()?; @@ -765,8 +767,8 @@ impl Persistence { } LeafInfo { - leaf: leaf.into(), - vid_share: vid_share.map(Into::into), + leaf, + vid_share, // Note: the following fields are not used in Decide event processing, and // should be removed. For now, we just default them. state: Default::default(), @@ -782,7 +784,7 @@ impl Persistence { view_number: to_view, event: EventType::Decide { leaf_chain: Arc::new(leaf_chain), - qc: Arc::new(final_qc.to_qc2()), + qc: Arc::new(final_qc), block_size: None, }, }) @@ -805,25 +807,25 @@ impl Persistence { // Delete the data that has been fully processed. tx.execute( - query("DELETE FROM vid_share where view >= $1 AND view <= $2") + query("DELETE FROM vid_share2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) .await?; tx.execute( - query("DELETE FROM da_proposal where view >= $1 AND view <= $2") + query("DELETE FROM da_proposal2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) .await?; tx.execute( - query("DELETE FROM quorum_proposals where view >= $1 AND view <= $2") + query("DELETE FROM quorum_proposals2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) .await?; tx.execute( - query("DELETE FROM quorum_certificate where view >= $1 AND view <= $2") + query("DELETE FROM quorum_certificate2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -833,7 +835,7 @@ impl Persistence { // less than the given value). This is necessary to ensure that, in case of a restart, // we can resume from the last decided leaf. tx.execute( - query("DELETE FROM anchor_leaf WHERE view >= $1 AND view < $2") + query("DELETE FROM anchor_leaf2 WHERE view >= $1 AND view < $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -896,11 +898,11 @@ impl Persistence { } const PRUNE_TABLES: &[&str] = &[ - "anchor_leaf", - "vid_share", - "da_proposal", - "quorum_proposals", - "quorum_certificate", + "anchor_leaf2", + "vid_share2", + "da_proposal2", + "quorum_proposals2", + "quorum_certificate2", ]; async fn prune_to_view(tx: &mut Transaction, view: u64) -> anyhow::Result<()> { @@ -977,14 +979,12 @@ impl SequencerPersistence for Persistence { // because we already store it separately, as part of the DA proposal. Storing it // here contributes to load on the DB for no reason, so we remove it before // serializing the leaf. - let mut leaf = downgrade_leaf(info.leaf.clone()); + let mut leaf = info.leaf.clone(); leaf.unfill_block_payload(); - let qc = qc2.to_qc(); - - let view = qc.view_number.u64() as i64; + let view = qc2.view_number.u64() as i64; let leaf_bytes = bincode::serialize(&leaf)?; - let qc_bytes = bincode::serialize(&qc)?; + let qc_bytes = bincode::serialize(&qc2)?; Ok((view, leaf_bytes, qc_bytes)) }) .collect::>>()?; @@ -993,7 +993,7 @@ impl SequencerPersistence for Persistence { // event consumer later fails, there is no need to abort the storage of the leaves. let mut tx = self.db.write().await?; - tx.upsert("anchor_leaf", ["view", "leaf", "qc"], ["view"], values) + tx.upsert("anchor_leaf2", ["view", "leaf", "qc"], ["view"], values) .await?; tx.commit().await?; @@ -1036,26 +1036,24 @@ impl SequencerPersistence for Persistence { .db .read() .await? - .fetch_optional("SELECT leaf, qc FROM anchor_leaf ORDER BY view DESC LIMIT 1") + .fetch_optional("SELECT leaf, qc FROM anchor_leaf2 ORDER BY view DESC LIMIT 1") .await? else { return Ok(None); }; let leaf_bytes: Vec = row.get("leaf"); - let leaf: Leaf = bincode::deserialize(&leaf_bytes)?; - let leaf2: Leaf2 = leaf.into(); + let leaf2: Leaf2 = bincode::deserialize(&leaf_bytes)?; let qc_bytes: Vec = row.get("qc"); - let qc: QuorumCertificate = bincode::deserialize(&qc_bytes)?; - let qc2 = qc.to_qc2(); + let qc2: QuorumCertificate2 = bincode::deserialize(&qc_bytes)?; Ok(Some((leaf2, qc2))) } async fn load_anchor_view(&self) -> anyhow::Result { let mut tx = self.db.read().await?; - let (view,) = query_as::<(i64,)>("SELECT coalesce(max(view), 0) FROM anchor_leaf") + let (view,) = query_as::<(i64,)>("SELECT coalesce(max(view), 0) FROM anchor_leaf2") .fetch_one(tx.as_mut()) .await?; Ok(ViewNumber::new(view as u64)) @@ -1068,15 +1066,14 @@ impl SequencerPersistence for Persistence { .db .read() .await? - .fetch_optional("SELECT leaves, state FROM undecided_state WHERE id = 0") + .fetch_optional("SELECT leaves, state FROM undecided_state2 WHERE id = 0") .await? else { return Ok(None); }; let leaves_bytes: Vec = row.get("leaves"); - let leaves: CommitmentMap = bincode::deserialize(&leaves_bytes)?; - let leaves2 = upgrade_commitment_map(leaves); + let leaves2: CommitmentMap = bincode::deserialize(&leaves_bytes)?; let state_bytes: Vec = row.get("state"); let state = bincode::deserialize(&state_bytes)?; @@ -1087,13 +1084,13 @@ impl SequencerPersistence for Persistence { async fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { let result = self .db .read() .await? .fetch_optional( - query("SELECT data FROM da_proposal where view = $1").bind(view.u64() as i64), + query("SELECT data FROM da_proposal2 where view = $1").bind(view.u64() as i64), ) .await?; @@ -1108,13 +1105,13 @@ impl SequencerPersistence for Persistence { async fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { let result = self .db .read() .await? .fetch_optional( - query("SELECT data FROM vid_share where view = $1").bind(view.u64() as i64), + query("SELECT data FROM vid_share2 where view = $1").bind(view.u64() as i64), ) .await?; @@ -1134,7 +1131,7 @@ impl SequencerPersistence for Persistence { .db .read() .await? - .fetch_all("SELECT * FROM quorum_proposals") + .fetch_all("SELECT * FROM quorum_proposals2") .await?; Ok(BTreeMap::from_iter( @@ -1143,9 +1140,8 @@ impl SequencerPersistence for Persistence { let view: i64 = row.get("view"); let view_number: ViewNumber = ViewNumber::new(view.try_into()?); let bytes: Vec = row.get("data"); - let proposal: Proposal> = - bincode::deserialize(&bytes)?; - Ok((view_number, convert_proposal(proposal))) + let proposal = bincode::deserialize(&bytes)?; + Ok((view_number, proposal)) }) .collect::>>()?, )) @@ -1157,12 +1153,12 @@ impl SequencerPersistence for Persistence { ) -> anyhow::Result>> { let mut tx = self.db.read().await?; let (data,) = - query_as::<(Vec,)>("SELECT data FROM quorum_proposals WHERE view = $1 LIMIT 1") + query_as::<(Vec,)>("SELECT data FROM quorum_proposals2 WHERE view = $1 LIMIT 1") .bind(view.u64() as i64) .fetch_one(tx.as_mut()) .await?; - let proposal: Proposal> = bincode::deserialize(&data)?; - let proposal = convert_proposal(proposal); + let proposal = bincode::deserialize(&data)?; + Ok(proposal) } @@ -1191,11 +1187,13 @@ impl SequencerPersistence for Persistence { ) -> anyhow::Result<()> { let view = proposal.data.view_number.u64(); let payload_hash = proposal.data.payload_commitment; - let data_bytes = bincode::serialize(proposal).unwrap(); + let proposal: Proposal> = + convert_proposal(proposal.clone()); + let data_bytes = bincode::serialize(&proposal).unwrap(); let mut tx = self.db.write().await?; tx.upsert( - "vid_share", + "vid_share2", ["view", "data", "payload_hash"], ["view"], [(view as i64, data_bytes, payload_hash.to_string())], @@ -1244,42 +1242,18 @@ impl SequencerPersistence for Persistence { tx.execute(query(&stmt).bind(view.u64() as i64)).await?; tx.commit().await } - async fn update_undecided_state( - &self, - leaves: CommitmentMap, - state: BTreeMap>, - ) -> anyhow::Result<()> { - let leaves = downgrade_commitment_map(leaves); - - if !self.store_undecided_state { - return Ok(()); - } - let leaves_bytes = bincode::serialize(&leaves).context("serializing leaves")?; - let state_bytes = bincode::serialize(&state).context("serializing state")?; - - let mut tx = self.db.write().await?; - tx.upsert( - "undecided_state", - ["id", "leaves", "state"], - ["id"], - [(0_i32, leaves_bytes, state_bytes)], - ) - .await?; - tx.commit().await - } - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, proposal: &Proposal>, ) -> anyhow::Result<()> { - let proposal: Proposal> = - convert_proposal(proposal.clone()); let view_number = proposal.data.view_number().u64(); + let proposal_bytes = bincode::serialize(&proposal).context("serializing proposal")?; - let leaf_hash = Committable::commit(&Leaf::from_quorum_proposal(&proposal.data)); + let leaf_hash = Committable::commit(&Leaf2::from_quorum_proposal(&proposal.data)); let mut tx = self.db.write().await?; tx.upsert( - "quorum_proposals", + "quorum_proposals2", ["view", "leaf_hash", "data"], ["view"], [(view_number as i64, leaf_hash.to_string(), proposal_bytes)], @@ -1287,10 +1261,10 @@ impl SequencerPersistence for Persistence { .await?; // We also keep track of any QC we see in case we need it to recover our archival storage. - let justify_qc = &proposal.data.justify_qc; + let justify_qc = proposal.data.justify_qc(); let justify_qc_bytes = bincode::serialize(&justify_qc).context("serializing QC")?; tx.upsert( - "quorum_certificate", + "quorum_certificate2", ["view", "leaf_hash", "data"], ["view"], [( @@ -1343,14 +1317,483 @@ impl SequencerPersistence for Persistence { tx.commit().await } - async fn migrate_consensus( - &self, - _migrate_leaf: fn(Leaf) -> Leaf2, - _migrate_proposal: fn( - Proposal>, - ) -> Proposal>, - ) -> anyhow::Result<()> { - // TODO: https://github.com/EspressoSystems/espresso-sequencer/issues/2357 + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { + let batch_size: i64 = 10000; + let mut offset: i64 = 0; + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'anchor_leaf'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("decided leaves already migrated"); + return Ok(()); + } + + tracing::warn!("migrating decided leaves.."); + loop { + let mut tx = self.db.read().await?; + let rows = + query("SELECT view, leaf, qc FROM anchor_leaf ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + if rows.is_empty() { + break; + } + let mut values = Vec::new(); + + for row in rows.iter() { + let leaf: Vec = row.try_get("leaf")?; + let qc: Vec = row.try_get("qc")?; + let leaf1: Leaf = bincode::deserialize(&leaf)?; + let qc1: QuorumCertificate = bincode::deserialize(&qc)?; + let view: i64 = row.try_get("view")?; + + let leaf2: Leaf2 = leaf1.into(); + let qc2: QuorumCertificate2 = qc1.to_qc2(); + + let leaf2_bytes = bincode::serialize(&leaf2)?; + let qc2_bytes = bincode::serialize(&qc2)?; + + values.push((view, leaf2_bytes, qc2_bytes)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO anchor_leaf2 (view, leaf, qc) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, leaf, qc)| { + b.push_bind(view).push_bind(leaf).push_bind(qc); + }); + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + tx.commit().await?; + offset += batch_size; + tracing::info!("anchor leaf migration progress: {} rows", offset); + + if rows.len() < batch_size as usize { + break; + } + } + + tracing::warn!("migrated decided leaves"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("anchor_leaf".to_string(), true)], + ) + .await?; + tx.commit().await?; + + tracing::info!("updated epoch_migration table for anchor_leaf"); + + Ok(()) + } + + async fn migrate_da_proposals(&self) -> anyhow::Result<()> { + let batch_size: i64 = 10000; + let mut offset: i64 = 0; + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'da_proposal'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("da proposals migration already done"); + return Ok(()); + } + + tracing::warn!("migrating da proposals.."); + + loop { + let mut tx = self.db.read().await?; + let rows = query( + "SELECT payload_hash, data FROM da_proposal ORDER BY view LIMIT $1 OFFSET $2", + ) + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + if rows.is_empty() { + break; + } + let mut values = Vec::new(); + + for row in rows.iter() { + let data: Vec = row.try_get("data")?; + let payload_hash: String = row.try_get("payload_hash")?; + + let da_proposal: DaProposal = bincode::deserialize(&data)?; + let da_proposal2: DaProposal2 = da_proposal.into(); + + let view = da_proposal2.view_number.u64() as i64; + let data = bincode::serialize(&da_proposal2)?; + + values.push((view, payload_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO da_proposal2 (view, payload_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, payload_hash, data)| { + b.push_bind(view).push_bind(payload_hash).push_bind(data); + }); + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + + tx.commit().await?; + + tracing::info!("DA proposals migration progress: {} rows", offset); + offset += batch_size; + + if rows.len() < batch_size as usize { + break; + } + } + + tracing::warn!("migrated da proposals"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("da_proposal".to_string(), true)], + ) + .await?; + tx.commit().await?; + + tracing::info!("updated epoch_migration table for da_proposal"); + + Ok(()) + } + + async fn migrate_vid_shares(&self) -> anyhow::Result<()> { + let batch_size: i64 = 10000; + let mut offset: i64 = 0; + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'vid_share'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("vid_share migration already done"); + return Ok(()); + } + + tracing::warn!("migrating vid shares.."); + loop { + let mut tx = self.db.read().await?; + let rows = + query("SELECT payload_hash, data FROM vid_share ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + if rows.is_empty() { + break; + } + let mut values = Vec::new(); + + for row in rows.iter() { + let data: Vec = row.try_get("data")?; + let payload_hash: String = row.try_get("payload_hash")?; + + let vid_share: ADVZDisperseShare = bincode::deserialize(&data)?; + let vid_share2: VidDisperseShare = vid_share.into(); + + let view = vid_share2.view_number().u64() as i64; + let data = bincode::serialize(&vid_share2)?; + + values.push((view, payload_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO vid_share2 (view, payload_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, payload_hash, data)| { + b.push_bind(view).push_bind(payload_hash).push_bind(data); + }); + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + tx.commit().await?; + tracing::info!("VID shares migration progress: {} rows", offset); + offset += batch_size; + + if rows.len() < batch_size as usize { + break; + } + } + + tracing::warn!("migrated vid shares"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("vid_share".to_string(), true)], + ) + .await?; + tx.commit().await?; + + tracing::info!("updated epoch_migration table for vid_share"); + + Ok(()) + } + + async fn migrate_undecided_state(&self) -> anyhow::Result<()> { + let mut tx = self.db.read().await?; + + let row = tx + .fetch_optional("SELECT leaves, state FROM undecided_state WHERE id = 0") + .await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'undecided_state'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("undecided state migration already done"); + + return Ok(()); + } + + tracing::warn!("migrating undecided state.."); + + if let Some(row) = row { + let leaves_bytes: Vec = row.try_get("leaves")?; + let leaves: CommitmentMap = bincode::deserialize(&leaves_bytes)?; + + let leaves2 = upgrade_commitment_map(leaves); + let leaves2_bytes = bincode::serialize(&leaves2)?; + let state_bytes: Vec = row.try_get("state")?; + + let mut tx = self.db.write().await?; + tx.upsert( + "undecided_state2", + ["id", "leaves", "state"], + ["id"], + [(0_i32, leaves2_bytes, state_bytes)], + ) + .await?; + tx.commit().await?; + }; + + tracing::warn!("migrated undecided state"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("undecided_state".to_string(), true)], + ) + .await?; + tx.commit().await?; + + tracing::info!("updated epoch_migration table for undecided_state"); + + Ok(()) + } + + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { + let batch_size: i64 = 10000; + let mut offset: i64 = 0; + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'quorum_proposals'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("quorum proposals migration already done"); + return Ok(()); + } + + tracing::warn!("migrating quorum proposals.."); + + loop { + let mut tx = self.db.read().await?; + let rows = + query("SELECT view, leaf_hash, data FROM quorum_proposals ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + + if rows.is_empty() { + break; + } + + let mut values = Vec::new(); + + for row in rows.iter() { + let leaf_hash: String = row.try_get("leaf_hash")?; + let data: Vec = row.try_get("data")?; + + let quorum_proposal: Proposal> = + bincode::deserialize(&data)?; + let quorum_proposal2: Proposal> = + convert_proposal(quorum_proposal); + + let view = quorum_proposal2.data.view_number().u64() as i64; + let data = bincode::serialize(&quorum_proposal2)?; + + values.push((view, leaf_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO quorum_proposals2 (view, leaf_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, leaf_hash, data)| { + b.push_bind(view).push_bind(leaf_hash).push_bind(data); + }); + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + tx.commit().await?; + + offset += batch_size; + tracing::info!("quorum proposals migration progress: {} rows", offset); + + if rows.len() < batch_size as usize { + break; + } + } + + tracing::warn!("migrated quorum proposals"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("quorum_proposals".to_string(), true)], + ) + .await?; + tx.commit().await?; + + tracing::info!("updated epoch_migration table for quorum_proposals"); + + Ok(()) + } + + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { + let batch_size: i64 = 10000; + let mut offset: i64 = 0; + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'quorum_certificate'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("quorum certificates migration already done"); + return Ok(()); + } + + tracing::warn!("migrating quorum certificates.."); + loop { + let mut tx = self.db.read().await?; + let rows = + query("SELECT view, leaf_hash, data FROM quorum_certificate ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + if rows.is_empty() { + break; + } + let mut values = Vec::new(); + + for row in rows.iter() { + let leaf_hash: String = row.try_get("leaf_hash")?; + let data: Vec = row.try_get("data")?; + + let qc: QuorumCertificate = bincode::deserialize(&data)?; + let qc2: QuorumCertificate2 = qc.to_qc2(); + + let view = qc2.view_number().u64() as i64; + let data = bincode::serialize(&qc2)?; + + values.push((view, leaf_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO quorum_certificate2 (view, leaf_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, leaf_hash, data)| { + b.push_bind(view).push_bind(leaf_hash).push_bind(data); + }); + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + tx.commit().await?; + offset += batch_size; + + tracing::info!("Quorum certificates migration progress: {} rows", offset); + + if rows.len() < batch_size as usize { + break; + } + } + + tracing::warn!("migrated quorum certificates"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("quorum_certificate".to_string(), true)], + ) + .await?; + tx.commit().await?; + tracing::info!("updated epoch_migration table for quorum_certificate"); + Ok(()) } @@ -1387,6 +1830,49 @@ impl SequencerPersistence for Persistence { }) .transpose() } + + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: VidCommitment, + ) -> anyhow::Result<()> { + let data = &proposal.data; + let view = data.view_number().u64(); + let data_bytes = bincode::serialize(proposal).unwrap(); + + let mut tx = self.db.write().await?; + tx.upsert( + "da_proposal2", + ["view", "data", "payload_hash"], + ["view"], + [(view as i64, data_bytes, vid_commit.to_string())], + ) + .await?; + tx.commit().await + } + + async fn update_undecided_state2( + &self, + leaves: CommitmentMap, + state: BTreeMap>, + ) -> anyhow::Result<()> { + if !self.store_undecided_state { + return Ok(()); + } + + let leaves_bytes = bincode::serialize(&leaves).context("serializing leaves")?; + let state_bytes = bincode::serialize(&state).context("serializing state")?; + + let mut tx = self.db.write().await?; + tx.upsert( + "undecided_state2", + ["id", "leaves", "state"], + ["id"], + [(0_i32, leaves_bytes, state_bytes)], + ) + .await?; + tx.commit().await + } } #[async_trait] @@ -1402,7 +1888,7 @@ impl Provider for Persistence { }; let bytes = match query_as::<(Vec,)>( - "SELECT data FROM vid_share WHERE payload_hash = $1 LIMIT 1", + "SELECT data FROM vid_share2 WHERE payload_hash = $1 LIMIT 1", ) .bind(req.0.to_string()) .fetch_optional(tx.as_mut()) @@ -1411,12 +1897,12 @@ impl Provider for Persistence { Ok(Some((bytes,))) => bytes, Ok(None) => return None, Err(err) => { - tracing::warn!("error loading VID share: {err:#}"); + tracing::error!("error loading VID share: {err:#}"); return None; } }; - let share: Proposal> = + let share: Proposal> = match bincode::deserialize(&bytes) { Ok(share) => share, Err(err) => { @@ -1425,7 +1911,11 @@ impl Provider for Persistence { } }; - Some(share.data.common) + match share.data { + VidDisperseShare::V0(vid) => Some(vid.common), + // TODO (abdul): V1 VID does not have common field + _ => None, + } } } @@ -1442,7 +1932,7 @@ impl Provider for Persistence { }; let bytes = match query_as::<(Vec,)>( - "SELECT data FROM da_proposal WHERE payload_hash = $1 LIMIT 1", + "SELECT data FROM da_proposal2 WHERE payload_hash = $1 LIMIT 1", ) .bind(req.0.to_string()) .fetch_optional(tx.as_mut()) @@ -1456,11 +1946,11 @@ impl Provider for Persistence { } }; - let proposal: Proposal> = match bincode::deserialize(&bytes) + let proposal: Proposal> = match bincode::deserialize(&bytes) { Ok(proposal) => proposal, Err(err) => { - tracing::warn!("error decoding DA proposal: {err:#}"); + tracing::error!("error decoding DA proposal: {err:#}"); return None; } }; @@ -1505,10 +1995,10 @@ impl Provider> for Persistence { async fn fetch_leaf_from_proposals( tx: &mut Transaction, req: LeafRequest, -) -> anyhow::Result)>> { +) -> anyhow::Result)>> { // Look for a quorum proposal corresponding to this leaf. let Some((proposal_bytes,)) = - query_as::<(Vec,)>("SELECT data FROM quorum_proposals WHERE leaf_hash = $1 LIMIT 1") + query_as::<(Vec,)>("SELECT data FROM quorum_proposals2 WHERE leaf_hash = $1 LIMIT 1") .bind(req.expected_leaf.to_string()) .fetch_optional(tx.as_mut()) .await @@ -1519,7 +2009,7 @@ async fn fetch_leaf_from_proposals( // Look for a QC corresponding to this leaf. let Some((qc_bytes,)) = - query_as::<(Vec,)>("SELECT data FROM quorum_certificate WHERE leaf_hash = $1 LIMIT 1") + query_as::<(Vec,)>("SELECT data FROM quorum_certificate2 WHERE leaf_hash = $1 LIMIT 1") .bind(req.expected_leaf.to_string()) .fetch_optional(tx.as_mut()) .await @@ -1528,12 +2018,12 @@ async fn fetch_leaf_from_proposals( return Ok(None); }; - let proposal: Proposal> = + let proposal: Proposal> = bincode::deserialize(&proposal_bytes).context("deserializing quorum proposal")?; - let qc: QuorumCertificate = + let qc: QuorumCertificate2 = bincode::deserialize(&qc_bytes).context("deserializing quorum certificate")?; - let leaf = Leaf::from_quorum_proposal(&proposal.data); + let leaf = Leaf2::from_quorum_proposal(&proposal.data); Ok(Some((leaf, qc))) } @@ -1588,21 +2078,29 @@ mod generic_tests { #[cfg(test)] mod test { + use super::*; use crate::{persistence::testing::TestablePersistence, BLSPubKey, PubKey}; - use espresso_types::{ - traits::NullEventConsumer, Leaf, MockSequencerVersions, NodeState, ValidatedState, - }; + use committable::{Commitment, CommitmentBoundsArkless}; + use espresso_types::{traits::NullEventConsumer, Header, Leaf, NodeState, ValidatedState}; use futures::stream::TryStreamExt; use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ - data::vid_commitment, + data::{ + ns_table::parse_ns_table, vid_commitment, vid_disperse::VidDisperseShare2, EpochNumber, + QuorumProposal2, + }, + message::convert_proposal, simple_certificate::QuorumCertificate, + simple_vote::QuorumData, traits::{ block_contents::BlockHeader, node_implementation::Versions, signature_key::SignatureKey, EncodeBytes, }, - vid::advz::advz_scheme, + vid::{ + advz::advz_scheme, + avidm::{init_avidm_param, AvidMScheme}, + }, }; use jf_vid::VidScheme; use sequencer_utils::test_utils::setup_test; @@ -1697,20 +2195,25 @@ mod test { // Mock up some data. let leaf = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()).await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); - let disperse = advz_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) - .unwrap(); - let payload_commitment = disperse.commit; + + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + + let ns_table = parse_ns_table(leaf_payload.byte_len().as_usize(), &leaf_payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &leaf_payload_bytes_arc, ns_table) + .unwrap(); let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid_share = ADVZDisperseShare:: { + let vid_share = VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment, - share: disperse.shares[0].clone(), - common: disperse.common, + share: shares[0].clone(), recipient_key: pubkey, + epoch: None, + target_epoch: None, } .to_proposal(&privkey) .unwrap() @@ -1718,14 +2221,14 @@ mod test { let quorum_proposal = QuorumProposalWrapper:: { proposal: QuorumProposal2:: { - epoch: None, block_header: leaf.block_header().clone(), view_number: leaf.view_number(), - justify_qc: leaf.justify_qc().to_qc2(), + justify_qc: leaf.justify_qc(), upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, next_epoch_justify_qc: None, + epoch: None, }, }; let quorum_proposal_signature = @@ -1740,10 +2243,11 @@ mod test { let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc, metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: None, }, signature: block_payload_signature, _pd: Default::default(), @@ -1757,52 +2261,54 @@ mod test { .proposal .justify_qc .data - .leaf_commit = Committable::commit(&leaf.clone().into()); + .leaf_commit = Committable::commit(&leaf.clone()); let qc = next_quorum_proposal.data.justify_qc(); // Add to database. storage - .append_da(&da_proposal, VidCommitment::V0(payload_commitment)) + .append_da2(&da_proposal, VidCommitment::V1(payload_commitment)) .await .unwrap(); - storage.append_vid(&vid_share).await.unwrap(); storage - .append_quorum_proposal(&quorum_proposal) + .append_vid2(&convert_proposal(vid_share.clone())) + .await + .unwrap(); + storage + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); // Add an extra quorum proposal so we have a QC pointing back at `leaf`. storage - .append_quorum_proposal(&next_quorum_proposal) + .append_quorum_proposal2(&next_quorum_proposal) .await .unwrap(); // Fetch it as if we were rebuilding an archive. assert_eq!( - vid_share.data.common, + None, storage - .fetch(VidCommonRequest(VidCommitment::V0( + .fetch(VidCommonRequest(VidCommitment::V1( vid_share.data.payload_commitment ))) .await - .unwrap() ); assert_eq!( leaf_payload, storage - .fetch(PayloadRequest(VidCommitment::V0( + .fetch(PayloadRequest(VidCommitment::V1( vid_share.data.payload_commitment ))) .await .unwrap() ); assert_eq!( - LeafQueryData::new(leaf.clone(), qc.clone().to_qc()).unwrap(), + LeafQueryData::new(leaf.clone(), qc.clone()).unwrap(), storage .fetch(LeafRequest::new( leaf.block_header().block_number(), Committable::commit(&leaf), - qc.clone().to_qc().commit() + qc.clone().commit() )) .await .unwrap() @@ -1828,28 +2334,26 @@ mod test { // Populate some data. let leaf = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()) - .await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); - let disperse = advz_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) - .unwrap(); - let payload_commitment = vid_commitment::( - &leaf_payload_bytes_arc, - &leaf.block_header().metadata().encode(), - 2, - ::Base::VERSION, - ) - .unwrap_v0(); + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + + let ns_table = parse_ns_table(leaf_payload.byte_len().as_usize(), &leaf_payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &leaf_payload_bytes_arc, ns_table) + .unwrap(); + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid = ADVZDisperseShare:: { + let vid = VidDisperseShare2:: { view_number: data_view, payload_commitment, - share: disperse.shares[0].clone(), - common: disperse.common, + share: shares[0].clone(), recipient_key: pubkey, + epoch: None, + target_epoch: None, } .to_proposal(&privkey) .unwrap() @@ -1859,12 +2363,11 @@ mod test { epoch: None, block_header: leaf.block_header().clone(), view_number: data_view, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -1883,23 +2386,24 @@ mod test { let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: data_view, + epoch: Some(EpochNumber::new(0)), }, signature: block_payload_signature, _pd: Default::default(), }; tracing::info!(?vid, ?da_proposal, ?quorum_proposal, "append data"); - storage.append_vid(&vid).await.unwrap(); + storage.append_vid2(&vid).await.unwrap(); storage - .append_da(&da_proposal, VidCommitment::V0(payload_commitment)) + .append_da2(&da_proposal, VidCommitment::V1(payload_commitment)) .await .unwrap(); storage - .append_quorum_proposal(&quorum_proposal) + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); @@ -1912,7 +2416,7 @@ mod test { .unwrap(); assert_eq!( storage.load_vid_share(data_view).await.unwrap().unwrap(), - vid + convert_proposal(vid) ); assert_eq!( storage.load_da_proposal(data_view).await.unwrap().unwrap(), @@ -1962,4 +2466,220 @@ mod test { }) .await } + + #[tokio::test(flavor = "multi_thread")] + async fn test_consensus_migration() { + setup_test(); + + let tmp = Persistence::tmp_storage().await; + let mut opt = Persistence::options(&tmp); + + let storage = opt.create().await.unwrap(); + + let rows = 300; + + for i in 0..rows { + let view = ViewNumber::new(i); + let validated_state = ValidatedState::default(); + let instance_state = NodeState::default(); + + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], i); + let (payload, metadata) = + Payload::from_transactions([], &validated_state, &instance_state) + .await + .unwrap(); + let builder_commitment = payload.builder_commitment(&metadata); + let payload_bytes = payload.encode(); + + let payload_commitment = vid_commitment::( + &payload_bytes, + &metadata.encode(), + 4, + ::Base::VERSION, + ); + + let block_header = Header::genesis( + &instance_state, + payload_commitment, + builder_commitment, + metadata, + ); + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::::default_commitment_no_preimage(), + }; + + let justify_qc = QuorumCertificate::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + view, + None, + std::marker::PhantomData, + ); + + let quorum_proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: justify_qc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + }; + + let quorum_proposal_signature = + BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) + .expect("Failed to sign quorum proposal"); + + let proposal = Proposal { + data: quorum_proposal.clone(), + signature: quorum_proposal_signature, + _pd: std::marker::PhantomData, + }; + + let proposal_bytes = bincode::serialize(&proposal) + .context("serializing proposal") + .unwrap(); + + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + leaf.fill_block_payload::( + payload, + 4, + ::Base::VERSION, + ) + .unwrap(); + + let mut tx = storage.db.write().await.unwrap(); + + let qc_bytes = bincode::serialize(&justify_qc).unwrap(); + let leaf_bytes = bincode::serialize(&leaf).unwrap(); + + tx.upsert( + "anchor_leaf", + ["view", "leaf", "qc"], + ["view"], + [(i as i64, leaf_bytes, qc_bytes)], + ) + .await + .unwrap(); + tx.commit().await.unwrap(); + + let disperse = advz_scheme(4).disperse(payload_bytes.clone()).unwrap(); + + let vid = ADVZDisperseShare:: { + view_number: ViewNumber::new(i), + payload_commitment: Default::default(), + share: disperse.shares[0].clone(), + common: disperse.common, + recipient_key: pubkey, + }; + + let (payload, metadata) = + Payload::from_transactions([], &ValidatedState::default(), &NodeState::default()) + .await + .unwrap(); + + let da = DaProposal:: { + encoded_transactions: payload.encode(), + metadata, + view_number: ViewNumber::new(i), + }; + + let block_payload_signature = + BLSPubKey::sign(&privkey, &payload_bytes).expect("Failed to sign block payload"); + + let da_proposal = Proposal { + data: da, + signature: block_payload_signature, + _pd: Default::default(), + }; + + storage + .append_vid(&vid.to_proposal(&privkey).unwrap()) + .await + .unwrap(); + storage + .append_da(&da_proposal, VidCommitment::V0(disperse.commit)) + .await + .unwrap(); + + let leaf_hash = Committable::commit(&leaf); + let mut tx = storage.db.write().await.expect("failed to start write tx"); + tx.upsert( + "quorum_proposals", + ["view", "leaf_hash", "data"], + ["view"], + [(i as i64, leaf_hash.to_string(), proposal_bytes)], + ) + .await + .expect("failed to upsert quorum proposal"); + + let justify_qc = &proposal.data.justify_qc; + let justify_qc_bytes = bincode::serialize(&justify_qc) + .context("serializing QC") + .unwrap(); + tx.upsert( + "quorum_certificate", + ["view", "leaf_hash", "data"], + ["view"], + [( + justify_qc.view_number.u64() as i64, + justify_qc.data.leaf_commit.to_string(), + &justify_qc_bytes, + )], + ) + .await + .expect("failed to upsert qc"); + + tx.commit().await.expect("failed to commit"); + } + + storage.migrate_consensus().await.unwrap(); + + let mut tx = storage.db.read().await.unwrap(); + let (anchor_leaf2_count,) = query_as::<(i64,)>("SELECT COUNT(*) from anchor_leaf2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + anchor_leaf2_count, rows as i64, + "anchor leaf count does not match rows", + ); + + let (da_proposal_count,) = query_as::<(i64,)>("SELECT COUNT(*) from da_proposal2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + da_proposal_count, rows as i64, + "da proposal count does not match rows", + ); + + let (vid_share_count,) = query_as::<(i64,)>("SELECT COUNT(*) from vid_share2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + vid_share_count, rows as i64, + "vid share count does not match rows" + ); + + let (quorum_proposals_count,) = + query_as::<(i64,)>("SELECT COUNT(*) from quorum_proposals2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + quorum_proposals_count, rows as i64, + "quorum proposals count does not match rows", + ); + + let (quorum_certificates_count,) = + query_as::<(i64,)>("SELECT COUNT(*) from quorum_certificate2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + quorum_certificates_count, rows as i64, + "quorum certificates count does not match rows", + ); + } } diff --git a/sequencer/src/proposal_fetcher.rs b/sequencer/src/proposal_fetcher.rs index d9232bc5cd..a5d143188a 100644 --- a/sequencer/src/proposal_fetcher.rs +++ b/sequencer/src/proposal_fetcher.rs @@ -196,7 +196,7 @@ where .context("timed out fetching proposal")? .context("error fetching proposal")?; self.persistence - .append_quorum_proposal(&proposal) + .append_quorum_proposal2(&proposal) .await .context("error saving fetched proposal")?; diff --git a/sequencer/src/state.rs b/sequencer/src/state.rs index 50eed617ac..e7f9160e41 100644 --- a/sequencer/src/state.rs +++ b/sequencer/src/state.rs @@ -155,8 +155,8 @@ where parent_state, instance, peers, - &parent_leaf.leaf().clone().into(), - &proposed_leaf.leaf().clone().into(), + &parent_leaf.leaf().clone(), + &proposed_leaf.leaf().clone(), ) .await .context("computing state update")?; diff --git a/types/src/v0/impls/block/full_payload/ns_proof.rs b/types/src/v0/impls/block/full_payload/ns_proof.rs index 4d463658d4..837e81885e 100644 --- a/types/src/v0/impls/block/full_payload/ns_proof.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof.rs @@ -1,5 +1,5 @@ -use hotshot_query_service::VidCommitment; use hotshot_types::{ + data::VidCommitment, traits::EncodeBytes, vid::advz::{advz_scheme, ADVZCommon, ADVZScheme}, }; diff --git a/types/src/v0/impls/block/full_payload/ns_proof/test.rs b/types/src/v0/impls/block/full_payload/ns_proof/test.rs index 6bd3849241..6b12ff2fb5 100644 --- a/types/src/v0/impls/block/full_payload/ns_proof/test.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof/test.rs @@ -1,8 +1,8 @@ use futures::future; use hotshot::helpers::initialize_logging; use hotshot::traits::BlockPayload; -use hotshot_query_service::VidCommitment; use hotshot_types::{ + data::VidCommitment, traits::EncodeBytes, vid::advz::{advz_scheme, ADVZScheme}, }; @@ -87,7 +87,7 @@ async fn ns_proof() { let (ns_proof_txs, ns_proof_ns_id) = ns_proof .verify( block.ns_table(), - &hotshot_query_service::VidCommitment::V0(vid.commit), + &VidCommitment::V0(vid.commit), &vid.common, ) .unwrap_or_else(|| panic!("namespace {} proof verification failure", ns_id)); diff --git a/types/src/v0/impls/block/full_payload/ns_table.rs b/types/src/v0/impls/block/full_payload/ns_table.rs index bbab22eb85..574cc46854 100644 --- a/types/src/v0/impls/block/full_payload/ns_table.rs +++ b/types/src/v0/impls/block/full_payload/ns_table.rs @@ -146,11 +146,7 @@ impl NsTable { /// Read subslice range for the `index`th namespace from the namespace /// table. - pub(crate) fn ns_range( - &self, - index: &NsIndex, - payload_byte_len: &PayloadByteLen, - ) -> NsPayloadRange { + pub fn ns_range(&self, index: &NsIndex, payload_byte_len: &PayloadByteLen) -> NsPayloadRange { let end = self .read_ns_offset_unchecked(index) .min(payload_byte_len.as_usize()); diff --git a/types/src/v0/impls/block/full_payload/payload.rs b/types/src/v0/impls/block/full_payload/payload.rs index dc328273ea..3707ab3a57 100644 --- a/types/src/v0/impls/block/full_payload/payload.rs +++ b/types/src/v0/impls/block/full_payload/payload.rs @@ -64,7 +64,7 @@ impl Payload { self.read_ns_payload(&ns_payload_range) } - pub(crate) fn byte_len(&self) -> PayloadByteLen { + pub fn byte_len(&self) -> PayloadByteLen { PayloadByteLen(self.raw_payload.len()) } @@ -287,7 +287,7 @@ impl PayloadByteLen { self.0 == expected } - pub(in crate::v0::impls::block::full_payload) fn as_usize(&self) -> usize { + pub fn as_usize(&self) -> usize { self.0 } } diff --git a/types/src/v0/impls/block/test.rs b/types/src/v0/impls/block/test.rs index 1000b6ca18..225332dd51 100755 --- a/types/src/v0/impls/block/test.rs +++ b/types/src/v0/impls/block/test.rs @@ -2,8 +2,8 @@ use std::collections::BTreeMap; use hotshot::traits::BlockPayload; -use hotshot_query_service::{availability::QueryablePayload, VidCommitment}; -use hotshot_types::{traits::EncodeBytes, vid::advz::advz_scheme}; +use hotshot_query_service::availability::QueryablePayload; +use hotshot_types::{data::VidCommitment, traits::EncodeBytes, vid::advz::advz_scheme}; use jf_vid::VidScheme; use rand::RngCore; use sequencer_utils::test_utils::setup_test; diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index 13940947e6..bc3e982bf7 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -2,10 +2,9 @@ use anyhow::{ensure, Context}; use ark_serialize::CanonicalSerialize; use committable::{Commitment, Committable, RawCommitmentBuilder}; use ethers_conv::ToAlloy; -use hotshot_query_service::{ - availability::QueryableHeader, explorer::ExplorerHeader, VidCommitment, -}; +use hotshot_query_service::{availability::QueryableHeader, explorer::ExplorerHeader}; use hotshot_types::{ + data::VidCommitment, traits::{ block_contents::{BlockHeader, BuilderFee}, node_implementation::NodeType, @@ -13,10 +12,8 @@ use hotshot_types::{ BlockPayload, ValidatedState as _, }, utils::BuilderCommitment, - // vid::advz::{ADVZCommon, ADVZScheme}, }; use jf_merkle_tree::{AppendableMerkleTreeScheme, MerkleTreeScheme}; -// use jf_vid::VidScheme; use serde::{ de::{self, MapAccess, SeqAccess, Visitor}, Deserialize, Deserializer, Serialize, Serializer, diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index b5f5bd5d83..b29d8e76bb 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -11,8 +11,8 @@ use hotshot_types::{ consensus::CommitmentMap, data::{ vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, - DaProposal, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, - VidCommitment, ViewNumber, + DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposal2, + QuorumProposalWrapper, VidCommitment, VidDisperseShare, ViewNumber, }, event::{HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -460,11 +460,11 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { async fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>>; + ) -> anyhow::Result>>>; async fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>>; + ) -> anyhow::Result>>>; async fn load_upgrade_certificate( &self, ) -> anyhow::Result>>; @@ -521,12 +521,9 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { None => { tracing::info!("no saved leaf, starting from genesis leaf"); ( - hotshot_types::data::Leaf::genesis::(&genesis_validated_state, &state) - .await - .into(), - QuorumCertificate::genesis::(&genesis_validated_state, &state) - .await - .to_qc2(), + hotshot_types::data::Leaf2::genesis::(&genesis_validated_state, &state) + .await, + QuorumCertificate2::genesis::(&genesis_validated_state, &state).await, None, ) } @@ -688,12 +685,13 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { epoch: Option, action: HotShotAction, ) -> anyhow::Result<()>; - async fn update_undecided_state( + + async fn update_undecided_state2( &self, leaves: CommitmentMap, state: BTreeMap>, ) -> anyhow::Result<()>; - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, proposal: &Proposal>, ) -> anyhow::Result<()>; @@ -701,13 +699,27 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { &self, decided_upgrade_certificate: Option>, ) -> anyhow::Result<()>; - async fn migrate_consensus( - &self, - migrate_leaf: fn(Leaf) -> Leaf2, - migrate_proposal: fn( - Proposal>, - ) -> Proposal>, - ) -> anyhow::Result<()>; + async fn migrate_consensus(&self) -> anyhow::Result<()> { + tracing::warn!("migrating consensus data..."); + + self.migrate_anchor_leaf().await?; + self.migrate_da_proposals().await?; + self.migrate_vid_shares().await?; + self.migrate_undecided_state().await?; + self.migrate_quorum_proposals().await?; + self.migrate_quorum_certificates().await?; + + tracing::warn!("consensus storage has been migrated to new types"); + + Ok(()) + } + + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()>; + async fn migrate_da_proposals(&self) -> anyhow::Result<()>; + async fn migrate_vid_shares(&self) -> anyhow::Result<()>; + async fn migrate_undecided_state(&self) -> anyhow::Result<()>; + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()>; + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()>; async fn load_anchor_view(&self) -> anyhow::Result { match self.load_anchor_leaf().await? { @@ -724,6 +736,19 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { async fn load_next_epoch_quorum_certificate( &self, ) -> anyhow::Result>>; + + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: VidCommitment, + ) -> anyhow::Result<()>; + + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + self.append_quorum_proposal2(proposal).await + } } #[async_trait] @@ -775,6 +800,14 @@ impl Storage for Arc

{ (**self).append_da(proposal, vid_commit).await } + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: VidCommitment, + ) -> anyhow::Result<()> { + (**self).append_da2(proposal, vid_commit).await + } + async fn record_action( &self, view: ViewNumber, @@ -794,7 +827,7 @@ impl Storage for Arc

{ state: BTreeMap>, ) -> anyhow::Result<()> { (**self) - .update_undecided_state( + .update_undecided_state2( leaves .into_values() .map(|leaf| { @@ -806,16 +839,28 @@ impl Storage for Arc

{ ) .await } - async fn append_proposal( &self, proposal: &Proposal>, ) -> anyhow::Result<()> { (**self) - .append_quorum_proposal(&convert_proposal(proposal.clone())) + .append_quorum_proposal2(&convert_proposal(proposal.clone())) .await } + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + let proposal_qp_wrapper: Proposal> = + convert_proposal(proposal.clone()); + (**self).append_quorum_proposal2(&proposal_qp_wrapper).await + } + + async fn update_high_qc2(&self, _high_qc: QuorumCertificate2) -> anyhow::Result<()> { + Ok(()) + } + async fn update_decided_upgrade_certificate( &self, decided_upgrade_certificate: Option>, @@ -824,6 +869,14 @@ impl Storage for Arc

{ .store_upgrade_certificate(decided_upgrade_certificate) .await } + + async fn update_undecided_state2( + &self, + leaves: CommitmentMap, + state: BTreeMap>, + ) -> anyhow::Result<()> { + (**self).update_undecided_state2(leaves, state).await + } } /// Data that can be deserialized from a subslice of namespace payload bytes. diff --git a/types/src/v0/utils.rs b/types/src/v0/utils.rs index fdac8c80e0..d2c32e598f 100644 --- a/types/src/v0/utils.rs +++ b/types/src/v0/utils.rs @@ -6,7 +6,7 @@ use derive_more::{From, Into}; use futures::future::BoxFuture; use hotshot_types::{ consensus::CommitmentMap, - data::{Leaf, Leaf2, QuorumProposal}, + data::{Leaf, Leaf2}, traits::node_implementation::NodeType, }; use rand::Rng; @@ -25,25 +25,6 @@ use time::{ }; use tokio::time::sleep; -pub fn downgrade_leaf(leaf2: Leaf2) -> Leaf { - // TODO verify removal. It doesn't seem we need this check, but lets double check. - // if leaf2.drb_seed != INITIAL_DRB_SEED_INPUT && leaf2.drb_result != INITIAL_DRB_RESULT { - // panic!("Downgrade of Leaf2 to Leaf will lose DRB information!"); - // } - let quorum_proposal = QuorumProposal { - block_header: leaf2.block_header().clone(), - view_number: leaf2.view_number(), - justify_qc: leaf2.justify_qc().to_qc(), - upgrade_certificate: leaf2.upgrade_certificate(), - proposal_certificate: None, - }; - let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); - if let Some(payload) = leaf2.block_payload() { - leaf.fill_block_payload_unchecked(payload); - } - leaf -} - pub fn upgrade_commitment_map( map: CommitmentMap>, ) -> CommitmentMap> { @@ -55,17 +36,6 @@ pub fn upgrade_commitment_map( .collect() } -pub fn downgrade_commitment_map( - map: CommitmentMap>, -) -> CommitmentMap> { - map.into_values() - .map(|leaf2| { - let leaf = downgrade_leaf(leaf2); - ( as Committable>::commit(&leaf), leaf) - }) - .collect() -} - #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)] pub enum Update { #[default] From b08ddc0ece9a5b9397440aa7322638367a5ee12b Mon Sep 17 00:00:00 2001 From: Mathis Date: Thu, 6 Mar 2025 21:05:05 +0100 Subject: [PATCH 03/17] Use latest ubuntu LTS version as base for images (#2724) * Use latest ubuntu LTS version as base for images We build on ubuntu-latest on the CI which will upgrade automatically so I think it makes sense to do so as well for our docker images. I'm seeing errors of this style /bin/orchestrator: /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.39' not found (required by /bin/orchestrator) to reproduce docker pull ghcr.io/espressosystems/espresso-sequencer/orchestrator:main docker run ghcr.io/espressosystems/espresso-sequencer/orchestrator:main * fix: local docker build script If we run with `--all-features` we build CUDA support for icicle which is not something we actually want. * dev env: use openssl 3.0 * dev-node: use sqlite instead of postgres --- docker/cdn-broker.Dockerfile | 2 +- docker/cdn-marshal.Dockerfile | 2 +- docker/cdn-whitelist.Dockerfile | 2 +- docker/deploy.Dockerfile | 2 +- docker/dev-rollup.Dockerfile | 2 +- docker/espresso-bridge.Dockerfile | 2 +- docker/espresso-dev-node.Dockerfile | 7 ++---- docker/marketplace-builder.Dockerfile | 2 +- docker/marketplace-solver.Dockerfile | 2 +- docker/nasty-client.Dockerfile | 2 +- docker/node-validator.Dockerfile | 2 +- docker/orchestrator.Dockerfile | 2 +- docker/permissionless-builder.Dockerfile | 2 +- docker/prover-service.Dockerfile | 2 +- docker/sequencer.Dockerfile | 2 +- docker/state-relay-server.Dockerfile | 2 +- docker/submit-transactions.Dockerfile | 2 +- flake.nix | 8 +++---- scripts/build-docker-images-native | 4 ++-- scripts/build-docker-images-static | 4 ++-- scripts/launch-dev-node-with-postgres | 29 ------------------------ 21 files changed, 26 insertions(+), 58 deletions(-) delete mode 100644 scripts/launch-dev-node-with-postgres diff --git a/docker/cdn-broker.Dockerfile b/docker/cdn-broker.Dockerfile index 608e25b4b4..18e4eb1ba4 100644 --- a/docker/cdn-broker.Dockerfile +++ b/docker/cdn-broker.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/cdn-marshal.Dockerfile b/docker/cdn-marshal.Dockerfile index b21b5fa116..a3d7027e15 100644 --- a/docker/cdn-marshal.Dockerfile +++ b/docker/cdn-marshal.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/cdn-whitelist.Dockerfile b/docker/cdn-whitelist.Dockerfile index 551de1cbf0..dcaa952563 100644 --- a/docker/cdn-whitelist.Dockerfile +++ b/docker/cdn-whitelist.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/deploy.Dockerfile b/docker/deploy.Dockerfile index 09d53c3301..79dea6896d 100644 --- a/docker/deploy.Dockerfile +++ b/docker/deploy.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/dev-rollup.Dockerfile b/docker/dev-rollup.Dockerfile index 337c482bcf..dcd54f0979 100644 --- a/docker/dev-rollup.Dockerfile +++ b/docker/dev-rollup.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/espresso-bridge.Dockerfile b/docker/espresso-bridge.Dockerfile index ad7db1839f..48d7ca793a 100644 --- a/docker/espresso-bridge.Dockerfile +++ b/docker/espresso-bridge.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/espresso-dev-node.Dockerfile b/docker/espresso-dev-node.Dockerfile index e9a366aa12..d4f9efcf78 100644 --- a/docker/espresso-dev-node.Dockerfile +++ b/docker/espresso-dev-node.Dockerfile @@ -1,4 +1,4 @@ -FROM postgres +FROM ubuntu:latest ARG TARGETARCH @@ -17,9 +17,6 @@ RUN chmod +x /bin/espresso-dev-node # Download the anvil binary RUN curl -L https://github.com/foundry-rs/foundry/releases/download/nightly/foundry_nightly_linux_${TARGETARCH}.tar.gz --output -| tar -xzvf - -C /bin/ anvil -COPY scripts/launch-dev-node-with-postgres /bin/launch-dev-node-with-postgres -RUN chmod +x /bin/launch-dev-node-with-postgres - # When running as a Docker service, we always want a healthcheck endpoint, so set a default for the # port that the HTTP server will run on. This can be overridden in any given deployment environment. ENV ESPRESSO_SEQUENCER_API_PORT=8770 @@ -33,4 +30,4 @@ EXPOSE 8770 EXPOSE 8771 EXPOSE 8772 -CMD [ "/bin/launch-dev-node-with-postgres"] +CMD [ "/bin/espresso-dev-node" ] diff --git a/docker/marketplace-builder.Dockerfile b/docker/marketplace-builder.Dockerfile index 04242b76df..2d08cfa485 100644 --- a/docker/marketplace-builder.Dockerfile +++ b/docker/marketplace-builder.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/marketplace-solver.Dockerfile b/docker/marketplace-solver.Dockerfile index c244479ec6..c29ea9e200 100644 --- a/docker/marketplace-solver.Dockerfile +++ b/docker/marketplace-solver.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/nasty-client.Dockerfile b/docker/nasty-client.Dockerfile index 3b58f18145..827c7ad8d4 100644 --- a/docker/nasty-client.Dockerfile +++ b/docker/nasty-client.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/node-validator.Dockerfile b/docker/node-validator.Dockerfile index 5a7690f656..17ee300c84 100644 --- a/docker/node-validator.Dockerfile +++ b/docker/node-validator.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/orchestrator.Dockerfile b/docker/orchestrator.Dockerfile index 83180d016e..7ae2fa90f2 100644 --- a/docker/orchestrator.Dockerfile +++ b/docker/orchestrator.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/permissionless-builder.Dockerfile b/docker/permissionless-builder.Dockerfile index 82424dd6ee..a41fb984f6 100644 --- a/docker/permissionless-builder.Dockerfile +++ b/docker/permissionless-builder.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/prover-service.Dockerfile b/docker/prover-service.Dockerfile index 8e329c54e4..1652ea3c93 100644 --- a/docker/prover-service.Dockerfile +++ b/docker/prover-service.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/sequencer.Dockerfile b/docker/sequencer.Dockerfile index 320a29868c..ee2e7a6491 100644 --- a/docker/sequencer.Dockerfile +++ b/docker/sequencer.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/state-relay-server.Dockerfile b/docker/state-relay-server.Dockerfile index 9d3bd5137c..7393ca447b 100644 --- a/docker/state-relay-server.Dockerfile +++ b/docker/state-relay-server.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/docker/submit-transactions.Dockerfile b/docker/submit-transactions.Dockerfile index b0d239a295..d41a606802 100644 --- a/docker/submit-transactions.Dockerfile +++ b/docker/submit-transactions.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:jammy +FROM ubuntu:latest ARG TARGETARCH diff --git a/flake.nix b/flake.nix index a3ea27c3cd..f6f5f83242 100644 --- a/flake.nix +++ b/flake.nix @@ -186,7 +186,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl + openssl_3 # match ubuntu 24.04 that we use on CI and as base image in docker curl protobuf # to compile libp2p-autonat stableToolchain @@ -272,7 +272,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl + openssl_3 curl protobuf # to compile libp2p-autonat toolchain @@ -286,7 +286,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl + openssl_3 curl protobuf # to compile libp2p-autonat toolchain @@ -309,7 +309,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl + openssl_3 curl protobuf # to compile libp2p-autonat stableToolchain diff --git a/scripts/build-docker-images-native b/scripts/build-docker-images-native index e047d6c98d..e6f4f964d5 100755 --- a/scripts/build-docker-images-native +++ b/scripts/build-docker-images-native @@ -51,7 +51,7 @@ case $KERNEL in CARGO_TARGET_DIR=target cargo build --release # espresso-dev-node requires embedded-db feature to build so we build it separately - cargo build --bin espresso-dev-node --release --all-features + cargo build --bin espresso-dev-node --release --features 'testing embedded-db' # building sequencer-sqlite binary cargo build --release --manifest-path ./sequencer-sqlite/Cargo.toml ;; @@ -131,4 +131,4 @@ docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/ docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/marketplace-solver:main -f docker/marketplace-solver.Dockerfile ${WORKDIR} docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/marketplace-builder:main -f docker/marketplace-builder.Dockerfile ${WORKDIR} docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/node-validator:main -f docker/node-validator.Dockerfile ${WORKDIR} -docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/dev-rollup:main -f docker/dev-rollup.Dockerfile ${WORKDIR} \ No newline at end of file +docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/dev-rollup:main -f docker/dev-rollup.Dockerfile ${WORKDIR} diff --git a/scripts/build-docker-images-static b/scripts/build-docker-images-static index 9f892328ab..9197255e88 100755 --- a/scripts/build-docker-images-static +++ b/scripts/build-docker-images-static @@ -9,11 +9,11 @@ set -euxo pipefail nix develop .#crossShell --ignore-environment --command bash -c "cargo build --release \ && cargo build --release --manifest-path ./sequencer-sqlite/Cargo.toml \ -&& cargo build --bin espresso-dev-node --release --all-features" +&& cargo build --bin espresso-dev-node --release --features='testing embedded-db'" nix develop .#armCrossShell --ignore-environment --command bash -c "cargo build --release \ && cargo build --release --manifest-path ./sequencer-sqlite/Cargo.toml \ -&& cargo build --bin espresso-dev-node --release --all-features" +&& cargo build --bin espresso-dev-node --release --features='testing embedded-db'" # The rest of the script doesn't run in a nix shell but we need to know where # the binaries are. diff --git a/scripts/launch-dev-node-with-postgres b/scripts/launch-dev-node-with-postgres deleted file mode 100644 index 97156754e9..0000000000 --- a/scripts/launch-dev-node-with-postgres +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash -set -euxo pipefail - -export ESPRESSO_SEQUENCER_POSTGRES_HOST=${ESPRESSO_SEQUENCER_POSTGRES_HOST:-localhost} -export ESPRESSO_SEQUENCER_POSTGRES_PORT=${ESPRESSO_SEQUENCER_POSTGRES_PORT:-5432} -export ESPRESSO_SEQUENCER_POSTGRES_USER=${ESPRESSO_SEQUENCER_POSTGRES_USER:-root} -export ESPRESSO_SEQUENCER_POSTGRES_PASSWORD=${ESPRESSO_SEQUENCER_POSTGRES_PASSWORD:-password} - -export POSTGRES_USER=$ESPRESSO_SEQUENCER_POSTGRES_USER -export POSTGRES_PASSWORD=$ESPRESSO_SEQUENCER_POSTGRES_PASSWORD - -export RUST_LOG=${RUST_LOG:-info} - -# Trap SIGTERM and SIGINT signals and send them to the process group -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT - -# Start postgres in the background -docker-entrypoint.sh postgres & - -# Wait (twice) for postgres to be ready -# Postgres can be falsely "ready" once before running init scripts. -until pg_isready && sleep 1 && pg_isready; do - echo "Waiting for postgres..." - sleep 1 -done - -# Start the dev node -espresso-dev-node & -wait From 3019354ff5916172d83d54fa8114494ce0dad347 Mon Sep 17 00:00:00 2001 From: Mathis Date: Thu, 6 Mar 2025 21:07:24 +0100 Subject: [PATCH 04/17] CI: use public runners for cargo features check (#2727) * CI: use public runners for cargo features check This isn't a very important job and it's not required to merge so it's IMO fine if it's a bit slower. This frees up buildjet runners for other jobs. * CI: somewhat strangely, we run out of disk space * CI: delete less Currently seems to delete something we need. --- stderr sqlite3/sqlite3.h:35:10: fatal error: 'stdarg.h' file not found --- .github/workflows/cargo-features.yml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cargo-features.yml b/.github/workflows/cargo-features.yml index 0561d93bea..208ba94fe8 100644 --- a/.github/workflows/cargo-features.yml +++ b/.github/workflows/cargo-features.yml @@ -19,13 +19,24 @@ concurrency: jobs: cargo-features: - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: ubuntu-latest steps: - uses: taiki-e/install-action@cargo-hack - name: Checkout Repository uses: actions/checkout@v4 + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + tool-cache: false + android: true + dotnet: true + haskell: true + large-packages: false + docker-images: false + swap-storage: false + # Note: this job doesn't use a cache on purpose because it mostly compiles # the crates in this repo over and over again with different feature # combinations. Adding caching would not speed it up much and further From 7608376b014080653b596e1c99186ed02ad19594 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 6 Mar 2025 17:58:24 -0500 Subject: [PATCH 05/17] Stake Table Catchup in Hotshot (#2623) Adds Catchup mechanism to Hotshot via the a wrapper around `Membership` --- Cargo.lock | 1 + hotshot-examples/infra/mod.rs | 16 +- .../examples/simple-server.rs | 7 +- .../src/testing/consensus.rs | 5 +- hotshot-task-impls/src/consensus/handlers.rs | 86 ++-- hotshot-task-impls/src/consensus/mod.rs | 6 +- hotshot-task-impls/src/da.rs | 69 +-- hotshot-task-impls/src/helpers.rs | 68 +-- hotshot-task-impls/src/network.rs | 87 +++- .../src/quorum_proposal/handlers.rs | 30 +- hotshot-task-impls/src/quorum_proposal/mod.rs | 47 +- .../src/quorum_proposal_recv/handlers.rs | 7 +- .../src/quorum_proposal_recv/mod.rs | 23 +- .../src/quorum_vote/handlers.rs | 84 ++-- hotshot-task-impls/src/quorum_vote/mod.rs | 55 ++- hotshot-task-impls/src/request.rs | 39 +- hotshot-task-impls/src/response.rs | 16 +- hotshot-task-impls/src/transactions.rs | 12 +- hotshot-task-impls/src/upgrade.rs | 52 ++- hotshot-task-impls/src/vid.rs | 17 +- hotshot-task-impls/src/view_sync.rs | 109 ++--- hotshot-task-impls/src/vote_collection.rs | 155 +++---- .../src/byzantine/byzantine_behaviour.rs | 3 +- hotshot-testing/src/helpers.rs | 82 ++-- hotshot-testing/src/spinning_task.rs | 4 +- hotshot-testing/src/test_builder.rs | 8 +- hotshot-testing/src/test_runner.rs | 7 +- hotshot-testing/src/view_generator.rs | 92 ++-- hotshot-testing/tests/tests_1/da_task.rs | 24 +- hotshot-testing/tests/tests_1/message.rs | 14 +- hotshot-testing/tests/tests_1/network_task.rs | 12 +- .../tests_1/quorum_proposal_recv_task.rs | 4 +- .../tests/tests_1/quorum_proposal_task.rs | 80 ++-- .../tests/tests_1/quorum_vote_task.rs | 6 +- .../tests_1/upgrade_task_with_proposal.rs | 17 +- .../tests/tests_1/upgrade_task_with_vote.rs | 2 +- hotshot-testing/tests/tests_1/vid_task.rs | 10 +- .../tests/tests_1/vote_dependency_handle.rs | 5 +- hotshot-types/Cargo.toml | 1 + hotshot-types/src/consensus.rs | 5 +- hotshot-types/src/data.rs | 3 +- hotshot-types/src/data/vid_disperse.rs | 37 +- hotshot-types/src/epoch_membership.rs | 425 ++++++++++++++++++ hotshot-types/src/lib.rs | 2 + hotshot-types/src/message.rs | 17 +- hotshot-types/src/simple_certificate.rs | 122 ++--- hotshot-types/src/traits/election.rs | 18 +- hotshot-types/src/traits/network.rs | 8 +- hotshot-types/src/utils.rs | 11 +- hotshot-types/src/vote.rs | 42 +- hotshot/src/lib.rs | 46 +- hotshot/src/tasks/mod.rs | 25 +- hotshot/src/tasks/task_state.rs | 20 +- .../traits/election/randomized_committee.rs | 15 +- .../election/randomized_committee_members.rs | 10 + .../src/traits/election/static_committee.rs | 10 + .../static_committee_leader_two_views.rs | 11 + .../traits/election/two_static_committees.rs | 10 + .../src/traits/networking/combined_network.rs | 3 +- .../src/traits/networking/libp2p_network.rs | 41 +- hotshot/src/types/handle.rs | 38 +- sequencer-sqlite/Cargo.lock | 1 + sequencer/src/api.rs | 13 +- sequencer/src/context.rs | 11 +- sequencer/src/message_compat_tests.rs | 14 +- types/src/v0/impls/stake_table.rs | 14 +- 66 files changed, 1468 insertions(+), 866 deletions(-) create mode 100644 hotshot-types/src/epoch_membership.rs diff --git a/Cargo.lock b/Cargo.lock index 1cb7767d15..7b0358f5c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5594,6 +5594,7 @@ dependencies = [ "ark-serialize 0.4.2", "ark-srs", "ark-std 0.4.0", + "async-broadcast", "async-lock 3.4.0", "async-trait", "bincode", diff --git a/hotshot-examples/infra/mod.rs b/hotshot-examples/infra/mod.rs index c125bd371f..875d849678 100755 --- a/hotshot-examples/infra/mod.rs +++ b/hotshot-examples/infra/mod.rs @@ -54,6 +54,7 @@ use hotshot_testing::block_builder::{ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, TestableLeaf}, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, network::{BuilderType, NetworkConfig, NetworkConfigFile, NetworkConfigSource}, traits::{ @@ -388,13 +389,14 @@ pub trait RunDa< // TODO: we need to pass a valid fallback builder url here somehow fallback_builder_url: config.config.builder_urls.first().clone(), }; + let epoch_height = config.config.epoch_height; SystemContext::init( pk, sk, config.node_index, config.config, - membership, + EpochMembershipCoordinator::new(membership, epoch_height), Arc::from(network), initializer, ConsensusMetricsValue::default(), @@ -524,15 +526,15 @@ pub trait RunDa< } } } + // Panic if we don't have the genesis epoch, there is no recovery from that let num_eligible_leaders = context .hotshot - .memberships - .read() + .membership_coordinator + .membership_for_epoch(genesis_epoch_from_version::()) + .await + .unwrap() + .committee_leaders(TYPES::View::genesis()) .await - .committee_leaders( - TYPES::View::genesis(), - genesis_epoch_from_version::(), - ) .len(); let consensus_lock = context.hotshot.consensus(); let consensus_reader = consensus_lock.read().await; diff --git a/hotshot-query-service/examples/simple-server.rs b/hotshot-query-service/examples/simple-server.rs index a4e2ebd36c..847f8c60bc 100644 --- a/hotshot-query-service/examples/simple-server.rs +++ b/hotshot-query-service/examples/simple-server.rs @@ -42,6 +42,7 @@ use hotshot_query_service::{ use hotshot_testing::block_builder::{SimpleBuilderImplementation, TestBuilderImplementation}; use hotshot_types::{ consensus::ConsensusMetricsValue, + epoch_membership::EpochMembershipCoordinator, light_client::StateKeyPair, signature_key::BLSPubKey, traits::{election::Membership, network::Topic}, @@ -236,13 +237,17 @@ async fn init_consensus( )); let storage: TestStorage = TestStorage::default(); + let coordinator = EpochMembershipCoordinator::new( + Arc::new(RwLock::new(membership)), + config.epoch_height, + ); SystemContext::init( pub_keys[node_id], priv_key, node_id as u64, config, - Arc::new(RwLock::new(membership)), + coordinator, network, HotShotInitializer::from_genesis::( TestInstanceState::default(), diff --git a/hotshot-query-service/src/testing/consensus.rs b/hotshot-query-service/src/testing/consensus.rs index 88a146aa79..28236ad6bf 100644 --- a/hotshot-query-service/src/testing/consensus.rs +++ b/hotshot-query-service/src/testing/consensus.rs @@ -38,6 +38,7 @@ use hotshot_example_types::{ use hotshot_testing::block_builder::{SimpleBuilderImplementation, TestBuilderImplementation}; use hotshot_types::{ consensus::ConsensusMetricsValue, + epoch_membership::EpochMembershipCoordinator, light_client::StateKeyPair, signature_key::BLSPubKey, traits::{election::Membership, network::Topic, signature_key::SignatureKey as _}, @@ -181,13 +182,15 @@ impl MockNetwork { )); let hs_storage: TestStorage = TestStorage::default(); + let memberships = + EpochMembershipCoordinator::new(membership, config.epoch_height); let hotshot = SystemContext::init( pub_keys[node_id], priv_key, node_id as u64, config, - membership, + memberships, network, HotShotInitializer::from_genesis::( TestInstanceState::default(), diff --git a/hotshot-task-impls/src/consensus/handlers.rs b/hotshot-task-impls/src/consensus/handlers.rs index 64b8e19960..e33b2596ee 100644 --- a/hotshot-task-impls/src/consensus/handlers.rs +++ b/hotshot-task-impls/src/consensus/handlers.rs @@ -11,10 +11,7 @@ use chrono::Utc; use hotshot_types::{ event::{Event, EventType}, simple_vote::{HasEpoch, QuorumVote2, TimeoutData2, TimeoutVote2}, - traits::{ - election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, - }, + traits::node_implementation::{ConsensusTime, NodeImplementation, NodeType}, utils::EpochTransitionIndicator, vote::{HasViewNumber, Vote}, }; @@ -47,12 +44,14 @@ pub(crate) async fn handle_quorum_vote_recv< .read() .await .is_high_qc_for_last_block(); - let we_are_leader = task_state - .membership - .read() + let epoch_membership = task_state + .membership_coordinator + .membership_for_epoch(vote.data.epoch) .await - .leader(vote.view_number() + 1, vote.data.epoch)? - == task_state.public_key; + .context(warn!("No stake table for epoch"))?; + + let we_are_leader = + epoch_membership.leader(vote.view_number() + 1).await? == task_state.public_key; ensure!( in_transition || we_are_leader, info!( @@ -70,8 +69,7 @@ pub(crate) async fn handle_quorum_vote_recv< &mut task_state.vote_collectors, vote, task_state.public_key.clone(), - &task_state.membership, - vote.data.epoch, + &epoch_membership, task_state.id, &event, sender, @@ -80,20 +78,19 @@ pub(crate) async fn handle_quorum_vote_recv< ) .await?; - if let Some(vote_epoch) = vote.epoch() { + if vote.epoch().is_some() { // If the vote sender belongs to the next epoch, collect it separately to form the second QC - let has_stake = task_state - .membership - .read() - .await - .has_stake(&vote.signing_key(), Some(vote_epoch + 1)); + let has_stake = epoch_membership + .next_epoch() + .await? + .has_stake(&vote.signing_key()) + .await; if has_stake { handle_vote( &mut task_state.next_epoch_vote_collectors, &vote.clone().into(), task_state.public_key.clone(), - &task_state.membership, - vote.data.epoch, + &epoch_membership.next_epoch().await?.clone(), task_state.id, &event, sender, @@ -118,14 +115,14 @@ pub(crate) async fn handle_timeout_vote_recv< sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { + let epoch_membership = task_state + .membership_coordinator + .membership_for_epoch(task_state.cur_epoch) + .await + .context(warn!("No stake table for epoch"))?; // Are we the leader for this view? ensure!( - task_state - .membership - .read() - .await - .leader(vote.view_number() + 1, task_state.cur_epoch)? - == task_state.public_key, + epoch_membership.leader(vote.view_number() + 1).await? == task_state.public_key, info!( "We are not the leader for view {:?}", vote.view_number() + 1 @@ -136,8 +133,10 @@ pub(crate) async fn handle_timeout_vote_recv< &mut task_state.timeout_vote_collectors, vote, task_state.public_key.clone(), - &task_state.membership, - vote.data.epoch, + &task_state + .membership_coordinator + .membership_for_epoch(vote.data.epoch) + .await?, task_state.id, &event, sender, @@ -201,10 +200,11 @@ pub async fn send_high_qc ensure!( task_state - .membership - .read() + .membership_coordinator + .membership_for_epoch(epoch) .await - .has_stake(&task_state.public_key, epoch), + .context(warn!("No stake table for epoch"))? + .has_stake(&task_state.public_key) + .await, debug!( "We were not chosen for the consensus committee for view {:?}", view_number @@ -416,10 +420,12 @@ pub(crate) async fn handle_timeout .await; let leader = task_state - .membership - .read() + .membership_coordinator + .membership_for_epoch(task_state.cur_epoch) .await - .leader(view_number, task_state.cur_epoch); + .context(warn!("No stake table for epoch"))? + .leader(view_number) + .await; let consensus_reader = task_state.consensus.read().await; consensus_reader.metrics.number_of_timeouts.add(1); diff --git a/hotshot-task-impls/src/consensus/mod.rs b/hotshot-task-impls/src/consensus/mod.rs index 202e174152..6e303ee4fd 100644 --- a/hotshot-task-impls/src/consensus/mod.rs +++ b/hotshot-task-impls/src/consensus/mod.rs @@ -5,11 +5,11 @@ // along with the HotShot repository. If not, see . use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, + epoch_membership::EpochMembershipCoordinator, event::Event, message::UpgradeLock, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, TimeoutCertificate2}, @@ -53,7 +53,7 @@ pub struct ConsensusTaskState, V: pub network: Arc, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// A map of `QuorumVote` collector tasks. pub vote_collectors: VoteCollectorsMap, QuorumCertificate2, V>, @@ -183,7 +183,7 @@ impl, V: Versions> ConsensusTaskSt high_qc, Some(next_epoch_high_qc), &self.consensus, - &self.membership, + &self.membership_coordinator, &self.upgrade_lock, ) .await diff --git a/hotshot-task-impls/src/da.rs b/hotshot-task-impls/src/da.rs index 9ce863d193..f8bf3add36 100644 --- a/hotshot-task-impls/src/da.rs +++ b/hotshot-task-impls/src/da.rs @@ -10,15 +10,15 @@ use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; +use hotshot_types::epoch_membership::EpochMembershipCoordinator; use hotshot_types::{ consensus::{Consensus, OuterConsensus, PayloadWithMetadata}, data::{vid_commitment, DaProposal2, PackedBundle}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_certificate::DaCertificate2, - simple_vote::{DaData2, DaVote2, HasEpoch}, + simple_vote::{DaData2, DaVote2}, traits::{ - election::Membership, network::ConnectedNetwork, node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, @@ -56,7 +56,7 @@ pub struct DaTaskState, V: Version /// Membership for the DA committee and quorum committee. /// We need the latter only for calculating the proper VID scheme /// from the number of nodes in the quorum. - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// The underlying network pub network: Arc, @@ -116,10 +116,12 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState( OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), view_number, target_epoch, - membership, + membership.coordinator.clone(), &pk, &upgrade_lock, ) @@ -316,24 +321,26 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState( view_number: TYPES::View, event_sender: Sender>>, event_receiver: Receiver>>, - membership: Arc>, + membership_coordinator: EpochMembershipCoordinator, consensus: OuterConsensus, sender_public_key: TYPES::SignatureKey, sender_private_key: ::PrivateKey, @@ -78,7 +79,7 @@ pub(crate) async fn fetch_proposal( ) .await; - let mem = Arc::clone(&membership); + let mem_coordinator = membership_coordinator.clone(); // Make a background task to await the arrival of the event data. let Ok(Some(proposal)) = // We want to explicitly timeout here so we aren't waiting around for the data. @@ -109,9 +110,14 @@ pub(crate) async fn fetch_proposal( if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() { + let proposal_epoch = option_epoch_from_block_number::( + quorum_proposal.data.proposal.epoch().is_some(), + quorum_proposal.data.block_header().block_number(), + epoch_height, + ); + let epoch_membership = mem_coordinator.membership_for_epoch(proposal_epoch).await.ok()?; // Make sure that the quorum_proposal is valid - let mem_reader = mem.read().await; - if quorum_proposal.validate_signature(&mem_reader, epoch_height).is_ok() { + if quorum_proposal.validate_signature(&epoch_membership).await.is_ok() { proposal = Some(quorum_proposal.clone()); } @@ -133,10 +139,11 @@ pub(crate) async fn fetch_proposal( let justify_qc_epoch = justify_qc.data.epoch(); - let membership_reader = membership.read().await; - let membership_stake_table = membership_reader.stake_table(justify_qc_epoch); - let membership_success_threshold = membership_reader.success_threshold(justify_qc_epoch); - drop(membership_reader); + let epoch_membership = membership_coordinator + .membership_for_epoch(justify_qc_epoch) + .await?; + let membership_stake_table = epoch_membership.stake_table().await; + let membership_success_threshold = epoch_membership.success_threshold().await; justify_qc .is_valid_cert( @@ -498,7 +505,7 @@ pub async fn decide_from_proposal( pub(crate) async fn parent_leaf_and_state( event_sender: &Sender>>, event_receiver: &Receiver>>, - membership: Arc>, + membership: EpochMembershipCoordinator, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, consensus: OuterConsensus, @@ -745,9 +752,8 @@ pub(crate) async fn validate_proposal_view_and_certs< ); // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - let membership_reader = validation_info.membership.read().await; - proposal.validate_signature(&membership_reader, validation_info.epoch_height)?; - drop(membership_reader); + let mut membership = validation_info.membership.clone(); + proposal.validate_signature(&membership).await?; // Verify a timeout certificate OR a view sync certificate exists and is valid. if proposal.data.justify_qc().view_number() != view_number - 1 { @@ -765,12 +771,10 @@ pub(crate) async fn validate_proposal_view_and_certs< *view_number ); let timeout_cert_epoch = timeout_cert.data().epoch(); + membership = membership.get_new_epoch(timeout_cert_epoch).await?; - let membership_reader = validation_info.membership.read().await; - let membership_stake_table = membership_reader.stake_table(timeout_cert_epoch); - let membership_success_threshold = - membership_reader.success_threshold(timeout_cert_epoch); - drop(membership_reader); + let membership_stake_table = membership.stake_table().await; + let membership_success_threshold = membership.success_threshold().await; timeout_cert .is_valid_cert( @@ -795,12 +799,10 @@ pub(crate) async fn validate_proposal_view_and_certs< ); let view_sync_cert_epoch = view_sync_cert.data().epoch(); + membership = membership.get_new_epoch(view_sync_cert_epoch).await?; - let membership_reader = validation_info.membership.read().await; - let membership_stake_table = membership_reader.stake_table(view_sync_cert_epoch); - let membership_success_threshold = - membership_reader.success_threshold(view_sync_cert_epoch); - drop(membership_reader); + let membership_stake_table = membership.stake_table().await; + let membership_success_threshold = membership.success_threshold().await; // View sync certs must also be valid. view_sync_cert @@ -922,13 +924,15 @@ pub async fn validate_qc_and_next_epoch_qc( qc: &QuorumCertificate2, maybe_next_epoch_qc: Option<&NextEpochQuorumCertificate2>, consensus: &OuterConsensus, - membership: &Arc>, + membership_coordinator: &EpochMembershipCoordinator, upgrade_lock: &UpgradeLock, ) -> Result<()> { - let membership_reader = membership.read().await; - let membership_stake_table = membership_reader.stake_table(qc.data.epoch); - let membership_success_threshold = membership_reader.success_threshold(qc.data.epoch); - drop(membership_reader); + let mut epoch_membership = membership_coordinator + .membership_for_epoch(qc.data.epoch) + .await?; + + let membership_stake_table = epoch_membership.stake_table().await; + let membership_success_threshold = epoch_membership.success_threshold().await; { let consensus_reader = consensus.read().await; @@ -950,13 +954,9 @@ pub async fn validate_qc_and_next_epoch_qc( if qc.view_number() != next_epoch_qc.view_number() || qc.data != *next_epoch_qc.data { bail!("Next epoch qc exists but it's not equal with qc."); } - - let membership_reader = membership.read().await; - let membership_next_stake_table = - membership_reader.stake_table(qc.data.epoch.map(|x| x + 1)); - let membership_next_success_threshold = - membership_reader.success_threshold(qc.data.epoch.map(|x| x + 1)); - drop(membership_reader); + epoch_membership = epoch_membership.next_epoch().await?; + let membership_next_stake_table = epoch_membership.stake_table().await; + let membership_next_success_threshold = epoch_membership.success_threshold().await; // Validate the next epoch qc as well next_epoch_qc diff --git a/hotshot-task-impls/src/network.rs b/hotshot-task-impls/src/network.rs index 03004f5077..8994a594fb 100644 --- a/hotshot-task-impls/src/network.rs +++ b/hotshot-task-impls/src/network.rs @@ -17,6 +17,7 @@ use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, data::{VidDisperse, VidDisperseShare}, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType, HotShotAction}, message::{ convert_proposal, DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, @@ -24,7 +25,6 @@ use hotshot_types::{ }, simple_vote::HasEpoch, traits::{ - election::Membership, network::{ BroadcastDelay, ConnectedNetwork, RequestKind, ResponseMessage, Topic, TransmitType, ViewMessage, @@ -481,7 +481,7 @@ pub struct NetworkEventTaskState< pub epoch: Option, /// network memberships - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// Storage to store actionable events pub storage: Arc>, @@ -725,10 +725,12 @@ impl< *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; let leader = match self - .membership - .read() + .membership_coordinator + .membership_for_epoch(vote.epoch()) + .await + .ok()? + .leader(view_number) .await - .leader(view_number, vote.epoch()) { Ok(l) => l, Err(e) => { @@ -821,8 +823,14 @@ impl< HotShotEvent::DaVoteSend(vote) => { *maybe_action = Some(HotShotAction::DaVote); let view_number = vote.view_number(); - let epoch = vote.data.epoch; - let leader = match self.membership.read().await.leader(view_number, epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(vote.epoch()) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -866,7 +874,14 @@ impl< } HotShotEvent::ViewSyncPreCommitVoteSend(vote) => { let view_number = vote.view_number() + vote.date().relay; - let leader = match self.membership.read().await.leader(view_number, self.epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(self.epoch) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -892,7 +907,14 @@ impl< HotShotEvent::ViewSyncCommitVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; - let leader = match self.membership.read().await.leader(view_number, self.epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(self.epoch) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -918,7 +940,14 @@ impl< HotShotEvent::ViewSyncFinalizeVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; - let leader = match self.membership.read().await.leader(view_number, self.epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(self.epoch) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -986,7 +1015,14 @@ impl< HotShotEvent::TimeoutVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; - let leader = match self.membership.read().await.leader(view_number, self.epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(self.epoch) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -1019,7 +1055,14 @@ impl< HotShotEvent::UpgradeVoteSend(vote) => { tracing::error!("Sending upgrade vote!"); let view_number = vote.view_number(); - let leader = match self.membership.read().await.leader(view_number, self.epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(self.epoch) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -1047,9 +1090,10 @@ impl< self.cancel_tasks(keep_view); let net = Arc::clone(&self.network); let epoch = self.epoch.map(|x| x.u64()); - let mem = Arc::clone(&self.membership); + let membership_coordinator = self.membership_coordinator.clone(); spawn(async move { - net.update_view::(*keep_view, epoch, mem).await; + net.update_view::(*keep_view, epoch, membership_coordinator) + .await; }); None } @@ -1145,11 +1189,14 @@ impl< let view_number = message.kind.view_number(); let epoch = message.kind.epoch(); let committee_topic = Topic::Global; - let da_committee = self - .membership - .read() + let Ok(mem) = self + .membership_coordinator + .membership_for_epoch(self.epoch) .await - .da_committee_members(view_number, self.epoch); + else { + return; + }; + let da_committee = mem.da_committee_members(view_number).await; let network = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); @@ -1271,14 +1318,12 @@ pub mod test { self.parse_event(event, &mut maybe_action).await { // Modify the values acquired by parsing the event. - let membership_reader = self.membership.read().await; (self.modifier)( &mut sender, &mut message_kind, &mut transmit, - &membership_reader, + &*self.membership_coordinator.membership().read().await, ); - drop(membership_reader); self.spawn_transmit_task(message_kind, maybe_action, transmit, sender) .await; diff --git a/hotshot-task-impls/src/quorum_proposal/handlers.rs b/hotshot-task-impls/src/quorum_proposal/handlers.rs index fc5577c912..2203370d3d 100644 --- a/hotshot-task-impls/src/quorum_proposal/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal/handlers.rs @@ -26,11 +26,11 @@ use hotshot_task::dependency_task::HandleDepOutput; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, data::{Leaf2, QuorumProposal2, QuorumProposalWrapper, VidDisperse, ViewChangeEvidence2}, + epoch_membership::EpochMembership, message::Proposal, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ block_contents::BlockHeader, - election::Membership, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, }, @@ -82,7 +82,7 @@ pub struct ProposalDependencyHandle { pub instance_state: Arc, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership: EpochMembership, /// Our public key pub public_key: TYPES::SignatureKey, @@ -128,11 +128,10 @@ impl ProposalDependencyHandle { ) -> Option> { while let Ok(event) = rx.recv_direct().await { if let HotShotEvent::HighQcRecv(qc, _sender) = event.as_ref() { - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(qc.data.epoch); - let membership_success_threshold = - membership_reader.success_threshold(qc.data.epoch); - drop(membership_reader); + let prev_epoch = qc.data.epoch; + let epoch_membership = self.membership.get_new_epoch(prev_epoch).await.ok()?; + let membership_stake_table = epoch_membership.stake_table().await; + let membership_success_threshold = epoch_membership.success_threshold().await; if qc .is_valid_cert( @@ -208,7 +207,7 @@ impl ProposalDependencyHandle { let (parent_leaf, state) = parent_leaf_and_state( &self.sender, &self.receiver, - Arc::clone(&self.membership), + self.membership.coordinator.clone(), self.public_key.clone(), self.private_key.clone(), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), @@ -309,16 +308,15 @@ impl ProposalDependencyHandle { self.epoch_height, ); + let epoch_membership = self + .membership + .coordinator + .membership_for_epoch(epoch) + .await?; // Make sure we are the leader for the view and epoch. // We might have ended up here because we were in the epoch transition. - if self - .membership - .read() - .await - .leader(self.view_number, epoch)? - != self.public_key - { - tracing::debug!( + if epoch_membership.leader(self.view_number).await? != self.public_key { + tracing::warn!( "We are not the leader in the epoch for which we are about to propose. Do not send the quorum proposal." ); return Ok(()); diff --git a/hotshot-task-impls/src/quorum_proposal/mod.rs b/hotshot-task-impls/src/quorum_proposal/mod.rs index bfbebe5a57..88c09de09e 100644 --- a/hotshot-task-impls/src/quorum_proposal/mod.rs +++ b/hotshot-task-impls/src/quorum_proposal/mod.rs @@ -18,10 +18,10 @@ use hotshot_task::{ use hotshot_types::StakeTableEntries; use hotshot_types::{ consensus::OuterConsensus, + epoch_membership::EpochMembershipCoordinator, message::UpgradeLock, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ - election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, @@ -54,7 +54,7 @@ pub struct QuorumProposalTaskState pub instance_state: Arc, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// Our public key pub public_key: TYPES::SignatureKey, @@ -282,9 +282,12 @@ impl, V: Versions> event: Arc>, epoch_transition_indicator: EpochTransitionIndicator, ) -> Result<()> { - let membership_reader = self.membership.read().await; + let epoch_membership = self + .membership_coordinator + .membership_for_epoch(epoch_number) + .await?; let leader_in_current_epoch = - membership_reader.leader(view_number, epoch_number)? == self.public_key; + epoch_membership.leader(view_number).await? == self.public_key; // If we are in the epoch transition and we are the leader in the next epoch, // we might want to start collecting dependencies for our next epoch proposal. @@ -293,9 +296,16 @@ impl, V: Versions> epoch_transition_indicator, EpochTransitionIndicator::InTransition ) - && membership_reader.leader(view_number, epoch_number.map(|x| x + 1))? + && epoch_membership + .next_epoch() + .await + .context(warn!( + "No Stake Table for Epoch = {:?}", + epoch_number.unwrap() + 1 + ))? + .leader(view_number) + .await? == self.public_key; - drop(membership_reader); // Don't even bother making the task if we are not entitled to propose anyway. ensure!( @@ -328,7 +338,7 @@ impl, V: Versions> view_number, sender: event_sender, receiver: event_receiver, - membership: Arc::clone(&self.membership), + membership: epoch_membership, public_key: self.public_key.clone(), private_key: self.private_key.clone(), instance_state: Arc::clone(&self.instance_state), @@ -476,12 +486,14 @@ impl, V: Versions> } HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { let epoch_number = certificate.data.epoch; + let epoch_membership = self + .membership_coordinator + .membership_for_epoch(epoch_number) + .await + .context(warn!("No Stake Table for Epoch = {:?}", epoch_number))?; - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(epoch_number); - let membership_success_threshold = - membership_reader.success_threshold(epoch_number); - drop(membership_reader); + let membership_stake_table = epoch_membership.stake_table().await; + let membership_success_threshold = epoch_membership.success_threshold().await; certificate .is_valid_cert( @@ -562,11 +574,12 @@ impl, V: Versions> ensure!(qc.view_number() > self.highest_qc.view_number()); let cert_epoch_number = qc.data.epoch; - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(cert_epoch_number); - let membership_success_threshold = - membership_reader.success_threshold(cert_epoch_number); - drop(membership_reader); + let epoch_membership = self + .membership_coordinator + .membership_for_epoch(cert_epoch_number) + .await?; + let membership_stake_table = epoch_membership.stake_table().await; + let membership_success_threshold = epoch_membership.success_threshold().await; qc.is_valid_cert( StakeTableEntries::::from(membership_stake_table).0, diff --git a/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs b/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs index a70fc30c96..f965bbfada 100644 --- a/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs @@ -14,6 +14,7 @@ use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposal, QuorumProposalWrapper}, + epoch_membership::EpochMembershipCoordinator, message::Proposal, simple_certificate::QuorumCertificate, simple_vote::HasEpoch, @@ -102,7 +103,7 @@ fn spawn_fetch_proposal( view: TYPES::View, event_sender: Sender>>, event_receiver: Receiver>>, - membership: Arc>, + membership: EpochMembershipCoordinator, consensus: OuterConsensus, sender_public_key: TYPES::SignatureKey, sender_private_key: ::PrivateKey, @@ -173,7 +174,7 @@ pub(crate) async fn handle_quorum_proposal_recv< &justify_qc, maybe_next_epoch_justify_qc.as_ref(), &validation_info.consensus, - &validation_info.membership, + &validation_info.membership.coordinator, &validation_info.upgrade_lock, ) .await?; @@ -200,7 +201,7 @@ pub(crate) async fn handle_quorum_proposal_recv< justify_qc.view_number(), event_sender.clone(), event_receiver.clone(), - Arc::clone(&validation_info.membership), + validation_info.membership.coordinator.clone(), OuterConsensus::new(Arc::clone(&validation_info.consensus.inner_consensus)), // Note that we explicitly use the node key here instead of the provided key in the signature. // This is because the key that we receive is for the prior leader, so the payload would be routed diff --git a/hotshot-task-impls/src/quorum_proposal_recv/mod.rs b/hotshot-task-impls/src/quorum_proposal_recv/mod.rs index a7ab61cd50..b56130f3b3 100644 --- a/hotshot-task-impls/src/quorum_proposal_recv/mod.rs +++ b/hotshot-task-impls/src/quorum_proposal_recv/mod.rs @@ -17,13 +17,17 @@ use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{Consensus, OuterConsensus}, data::{EpochNumber, Leaf, ViewChangeEvidence2}, + epoch_membership::{self, EpochMembership, EpochMembershipCoordinator}, event::Event, message::UpgradeLock, simple_certificate::UpgradeCertificate, + simple_vote::HasEpoch, + traits::block_contents::BlockHeader, traits::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, + utils::option_epoch_from_block_number, vote::{Certificate, HasViewNumber}, }; use hotshot_utils::anytrace::{bail, Result}; @@ -58,7 +62,7 @@ pub struct QuorumProposalRecvTaskState, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership: EpochMembershipCoordinator, /// View timeout from config. pub timeout: u64, @@ -99,7 +103,7 @@ pub(crate) struct ValidationInfo, pub(crate) consensus: OuterConsensus, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership: EpochMembership, /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -145,12 +149,23 @@ impl, V: Versions> tracing::error!("Throwing away old proposal"); return; } + let proposal_epoch = option_epoch_from_block_number::( + proposal.data.proposal.epoch().is_some(), + proposal.data.block_header().block_number(), + self.epoch_height, + ); + let Ok(epoch_membership) = + self.membership.membership_for_epoch(proposal_epoch).await + else { + tracing::warn!("No Stake table for epoch = {:?}", proposal_epoch); + return; + }; let validation_info = ValidationInfo:: { id: self.id, public_key: self.public_key.clone(), private_key: self.private_key.clone(), consensus: self.consensus.clone(), - membership: Arc::clone(&self.membership), + membership: epoch_membership, output_event_stream: self.output_event_stream.clone(), storage: Arc::clone(&self.storage), upgrade_lock: self.upgrade_lock.clone(), @@ -166,7 +181,7 @@ impl, V: Versions> .await { Ok(()) => {} - Err(e) => debug!(?e, "Failed to validate the proposal"), + Err(e) => error!(?e, "Failed to validate the proposal"), } } HotShotEvent::ViewChange(view, epoch) => { diff --git a/hotshot-task-impls/src/quorum_vote/handlers.rs b/hotshot-task-impls/src/quorum_vote/handlers.rs index 768d6a162c..19a926a0fd 100644 --- a/hotshot-task-impls/src/quorum_vote/handlers.rs +++ b/hotshot-task-impls/src/quorum_vote/handlers.rs @@ -10,10 +10,12 @@ use async_broadcast::{InactiveReceiver, Sender}; use async_lock::RwLock; use chrono::Utc; use committable::Committable; +use hotshot_types::epoch_membership::EpochMembership; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposalWrapper, VidDisperseShare}, drb::{compute_drb_result, DrbResult, INITIAL_DRB_RESULT}, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_vote::{HasEpoch, QuorumData2, QuorumVote2}, @@ -47,12 +49,11 @@ use crate::{ }; async fn notify_membership_of_drb_result( - membership: &Arc>, - epoch: ::Epoch, + membership: &EpochMembership, drb_result: DrbResult, ) { - tracing::debug!("Calling add_drb_result for epoch {:?}", epoch); - membership.write().await.add_drb_result(epoch, drb_result); + tracing::debug!("Calling add_drb_result for epoch {:?}", membership.epoch()); + membership.add_drb_result(drb_result).await; } /// Store the DRB result from the computation task to the shared `results` table. @@ -100,8 +101,14 @@ async fn store_and_get_computed_drb_result< .insert(epoch_number, result); drop(consensus_writer); - notify_membership_of_drb_result::(&task_state.membership, epoch_number, result) - .await; + notify_membership_of_drb_result::( + &task_state + .membership + .membership_for_epoch(Some(epoch_number)) + .await?, + result, + ) + .await; task_state.drb_computation = None; Ok(result) } @@ -146,13 +153,14 @@ async fn verify_drb_result, V: Ver .next_drb_result() .context(info!("Proposal is missing the DRB result."))?; - let membership_reader = task_state.membership.read().await; - if let Some(epoch_val) = epoch { - let has_stake_current_epoch = - membership_reader.has_stake(&task_state.public_key, Some(epoch_val)); - - drop(membership_reader); + let has_stake_current_epoch = task_state + .membership + .membership_for_epoch(epoch) + .await + .context(warn!("No stake table for epoch"))? + .has_stake(&task_state.public_key) + .await; if has_stake_current_epoch { let computed_result = @@ -184,13 +192,17 @@ async fn start_drb_task, V: Versio task_state.epoch_height, )); - // Start the new task if we're in the committee for this epoch - if task_state + let Ok(epoch_membership) = task_state .membership - .read() + .membership_for_epoch(Some(current_epoch_number)) .await - .has_stake(&task_state.public_key, Some(current_epoch_number)) - { + else { + tracing::warn!("No Stake Table for Epoch = {:?}", current_epoch_number); + return; + }; + + // Start the new task if we're in the committee for this epoch + if epoch_membership.has_stake(&task_state.public_key).await { let new_epoch_number = current_epoch_number + 1; // If a task is currently live AND has finished, join it and save the result. @@ -209,12 +221,7 @@ async fn start_drb_task, V: Versio .drb_seeds_and_results .results .insert(*task_epoch, result); - notify_membership_of_drb_result::( - &task_state.membership, - *task_epoch, - result, - ) - .await; + notify_membership_of_drb_result::(&epoch_membership, result).await; task_state.drb_computation = None; } Err(e) => { @@ -329,8 +336,10 @@ async fn store_drb_seed_and_result .results .insert(current_epoch_number + 1, result); notify_membership_of_drb_result::( - &task_state.membership, - current_epoch_number + 1, + &task_state + .membership + .membership_for_epoch(Some(current_epoch_number + 1)) + .await?, result, ) .await; @@ -376,7 +385,7 @@ pub(crate) async fn handle_quorum_proposal_validated< Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), &task_state.public_key, version >= V::Epochs::VERSION, - &task_state.membership, + task_state.membership.membership(), ) .await } else { @@ -386,7 +395,7 @@ pub(crate) async fn handle_quorum_proposal_validated< Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), &task_state.public_key, version >= V::Epochs::VERSION, - &task_state.membership, + task_state.membership.membership(), ) .await }; @@ -424,6 +433,7 @@ pub(crate) async fn handle_quorum_proposal_validated< tracing::debug!("Calling set_first_epoch for epoch {:?}", first_epoch_number); task_state .membership + .membership() .write() .await .set_first_epoch(first_epoch_number, INITIAL_DRB_RESULT); @@ -521,7 +531,7 @@ pub(crate) async fn update_shared_state< consensus: OuterConsensus, sender: Sender>>, receiver: InactiveReceiver>>, - membership: Arc>, + membership: EpochMembershipCoordinator, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, upgrade_lock: UpgradeLock, @@ -560,7 +570,7 @@ pub(crate) async fn update_shared_state< justify_qc.view_number(), sender.clone(), receiver.activate_cloned(), - Arc::clone(&membership), + membership.clone(), OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), public_key.clone(), private_key.clone(), @@ -656,7 +666,7 @@ pub(crate) async fn update_shared_state< #[allow(clippy::too_many_arguments)] pub(crate) async fn submit_vote, V: Versions>( sender: Sender>>, - membership: Arc>, + membership: EpochMembership, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, upgrade_lock: UpgradeLock, @@ -667,20 +677,12 @@ pub(crate) async fn submit_vote, V extended_vote: bool, epoch_height: u64, ) -> Result<()> { - let epoch_number = option_epoch_from_block_number::( - leaf.with_epoch, - leaf.block_header().block_number(), - epoch_height, - ); - - let membership_reader = membership.read().await; - let committee_member_in_current_epoch = membership_reader.has_stake(&public_key, epoch_number); + let committee_member_in_current_epoch = membership.has_stake(&public_key).await; // If the proposed leaf is for the last block in the epoch and the node is part of the quorum committee // in the next epoch, the node should vote to achieve the double quorum. let committee_member_in_next_epoch = leaf.with_epoch && is_last_block_in_epoch(leaf.height(), epoch_height) - && membership_reader.has_stake(&public_key, epoch_number.map(|x| x + 1)); - drop(membership_reader); + && membership.next_epoch().await?.has_stake(&public_key).await; ensure!( committee_member_in_current_epoch || committee_member_in_next_epoch, @@ -694,7 +696,7 @@ pub(crate) async fn submit_vote, V let vote = QuorumVote2::::create_signed_vote( QuorumData2 { leaf_commit: leaf.commit(), - epoch: epoch_number, + epoch: membership.epoch(), }, view_number, &public_key, diff --git a/hotshot-task-impls/src/quorum_vote/mod.rs b/hotshot-task-impls/src/quorum_vote/mod.rs index 8b26cbb39a..e3d7af2771 100644 --- a/hotshot-task-impls/src/quorum_vote/mod.rs +++ b/hotshot-task-impls/src/quorum_vote/mod.rs @@ -20,13 +20,13 @@ use hotshot_types::{ consensus::{ConsensusMetricsValue, OuterConsensus}, data::{Leaf2, QuorumProposalWrapper}, drb::DrbComputation, + epoch_membership::EpochMembershipCoordinator, event::Event, message::{Proposal, UpgradeLock}, simple_certificate::UpgradeCertificate, simple_vote::HasEpoch, traits::{ block_contents::BlockHeader, - election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, @@ -74,7 +74,7 @@ pub struct VoteDependencyHandle, V pub instance_state: Arc, /// Membership for Quorum certs/votes. - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// Reference to the storage. pub storage: Arc>, @@ -237,7 +237,7 @@ impl + 'static, V: Versions> Handl OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), self.sender.clone(), self.receiver.clone(), - Arc::clone(&self.membership), + self.membership_coordinator.clone(), self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), @@ -254,12 +254,24 @@ impl + 'static, V: Versions> Handl tracing::error!("Failed to update shared consensus state; error = {e:#}"); return; } - let cur_epoch = option_epoch_from_block_number::( leaf.with_epoch, leaf.height(), self.epoch_height, ); + + let epoch_membership = match self + .membership_coordinator + .membership_for_epoch(cur_epoch) + .await + { + Ok(epoch_membership) => epoch_membership, + Err(e) => { + tracing::warn!("{:?}", e); + return; + } + }; + tracing::trace!( "Sending ViewChange for view {} and epoch {:?}", self.view_number + 1, @@ -273,7 +285,7 @@ impl + 'static, V: Versions> Handl if let Err(e) = submit_vote::( self.sender.clone(), - Arc::clone(&self.membership), + epoch_membership, self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), @@ -317,7 +329,7 @@ pub struct QuorumVoteTaskState, V: pub network: Arc, /// Membership for Quorum certs/votes and DA committee certs/votes. - pub membership: Arc>, + pub membership: EpochMembershipCoordinator, /// In-progress DRB computation task. pub drb_computation: DrbComputation, @@ -441,7 +453,7 @@ impl, V: Versions> QuorumVoteTaskS private_key: self.private_key.clone(), consensus: OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), instance_state: Arc::clone(&self.instance_state), - membership: Arc::clone(&self.membership), + membership_coordinator: self.membership.clone(), storage: Arc::clone(&self.storage), view_number, sender: event_sender.clone(), @@ -553,11 +565,9 @@ impl, V: Versions> QuorumVoteTaskS let cert_epoch = cert.data.epoch; - let membership_reader = self.membership.read().await; - let membership_da_stake_table = membership_reader.da_stake_table(cert_epoch); - let membership_da_success_threshold = - membership_reader.da_success_threshold(cert_epoch); - drop(membership_reader); + let epoch_membership = self.membership.membership_for_epoch(cert_epoch).await?; + let membership_da_stake_table = epoch_membership.da_stake_table().await; + let membership_da_success_threshold = epoch_membership.da_success_threshold().await; // Validate the DAC. cert.is_valid_cert( @@ -606,18 +616,23 @@ impl, V: Versions> QuorumVoteTaskS let vid_epoch = share.data.epoch(); let target_epoch = share.data.target_epoch(); - let membership_reader = self.membership.read().await; + let membership_reader = self.membership.membership_for_epoch(vid_epoch).await?; // ensure that the VID share was sent by a DA member OR the view leader ensure!( membership_reader - .da_committee_members(view, vid_epoch) + .da_committee_members(view) + .await .contains(sender) - || *sender == membership_reader.leader(view, vid_epoch)?, + || *sender == membership_reader.leader(view).await?, "VID share was not sent by a DA member or the view leader." ); - let membership_total_nodes = membership_reader.total_nodes(target_epoch); - drop(membership_reader); + let membership_total_nodes = self + .membership + .membership_for_epoch(target_epoch) + .await? + .total_nodes() + .await; if let Err(()) = share.data.verify_share(membership_total_nodes) { bail!("Failed to verify VID share"); @@ -734,7 +749,7 @@ impl, V: Versions> QuorumVoteTaskS OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), event_sender.clone(), event_receiver.clone().deactivate(), - Arc::clone(&self.membership), + self.membership.clone(), self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), @@ -781,7 +796,9 @@ impl, V: Versions> QuorumVoteTaskS submit_vote::( event_sender.clone(), - Arc::clone(&self.membership), + self.membership + .membership_for_epoch(Some(current_epoch)) + .await?, self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), diff --git a/hotshot-task-impls/src/request.rs b/hotshot-task-impls/src/request.rs index 02d10a284d..1bf327a98f 100644 --- a/hotshot-task-impls/src/request.rs +++ b/hotshot-task-impls/src/request.rs @@ -14,7 +14,6 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::{ dependency::{Dependency, EventDependency}, @@ -22,10 +21,10 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::OuterConsensus, + epoch_membership::EpochMembershipCoordinator, simple_vote::HasEpoch, traits::{ block_contents::BlockHeader, - election::Membership, network::{ConnectedNetwork, DataRequest, RequestKind}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, @@ -68,7 +67,7 @@ pub struct NetworkRequestState> { pub delay: Duration, /// Membership (Used here only for DA) - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// This nodes public key pub public_key: TYPES::SignatureKey, @@ -114,14 +113,20 @@ impl> TaskState for NetworkRequest HotShotEvent::QuorumProposalValidated(proposal, _) => { let prop_view = proposal.data.view_number(); let prop_epoch = proposal.data.epoch(); - let next_epoch = prop_epoch.map(|epoch| epoch + 1); // Request VID share only if: // 1. we are part of the current epoch or // 2. we are part of the next epoch and this is a proposal for the last block. - let membership_reader = self.membership.read().await; - if !membership_reader.has_stake(&self.public_key, prop_epoch) - && (!membership_reader.has_stake(&self.public_key, next_epoch) + let membership_reader = self + .membership_coordinator + .membership_for_epoch(prop_epoch) + .await?; + if !membership_reader.has_stake(&self.public_key).await + && (!membership_reader + .next_epoch() + .await? + .has_stake(&self.public_key) + .await || !is_last_block_in_epoch( proposal.data.block_header().block_number(), self.epoch_height, @@ -129,7 +134,6 @@ impl> TaskState for NetworkRequest { return Ok(()); } - drop(membership_reader); let consensus_reader = self.consensus.read().await; let maybe_vid_share = consensus_reader @@ -213,15 +217,26 @@ impl> NetworkRequestState m, + Err(e) => { + tracing::warn!(e.message); + return; + } + }; + let mut da_committee_for_view = membership_reader.da_committee_members(view).await; + if let Ok(leader) = membership_reader.leader(view).await { da_committee_for_view.insert(leader); } // Get committee members for view let mut recipients: Vec = membership_reader - .da_committee_members(view, epoch) + .da_committee_members(view) + .await .into_iter() .collect(); drop(membership_reader); diff --git a/hotshot-task-impls/src/response.rs b/hotshot-task-impls/src/response.rs index f0000a7c14..1ea66cc667 100644 --- a/hotshot-task-impls/src/response.rs +++ b/hotshot-task-impls/src/response.rs @@ -7,14 +7,13 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use committable::Committable; use hotshot_types::{ consensus::{Consensus, LockedConsensusState, OuterConsensus}, data::VidDisperseShare, + epoch_membership::EpochMembershipCoordinator, message::{Proposal, UpgradeLock}, traits::{ - election::Membership, network::DataRequest, node_implementation::{NodeType, Versions}, signature_key::SignatureKey, @@ -36,7 +35,7 @@ pub struct NetworkResponseState { consensus: LockedConsensusState, /// Quorum membership for checking if requesters have state - membership: Arc>, + membership: EpochMembershipCoordinator, /// This replicas public key pub_key: TYPES::SignatureKey, @@ -55,7 +54,7 @@ impl NetworkResponseState { /// Create the network request state with the info it needs pub fn new( consensus: LockedConsensusState, - membership: Arc>, + membership: EpochMembershipCoordinator, pub_key: TYPES::SignatureKey, private_key: ::PrivateKey, id: u64, @@ -175,7 +174,7 @@ impl NetworkResponseState { OuterConsensus::new(Arc::clone(&self.consensus)), view, target_epoch, - Arc::clone(&self.membership), + self.membership.clone(), &self.private_key, &self.upgrade_lock, ) @@ -188,7 +187,7 @@ impl NetworkResponseState { OuterConsensus::new(Arc::clone(&self.consensus)), view, target_epoch, - Arc::clone(&self.membership), + self.membership.clone(), &self.private_key, &self.upgrade_lock, ) @@ -210,7 +209,10 @@ impl NetworkResponseState { sender: &TYPES::SignatureKey, epoch: Option, ) -> bool { - self.membership.read().await.has_stake(sender, epoch) + let Ok(memb) = self.membership.membership_for_epoch(epoch).await else { + return false; + }; + memb.has_stake(sender).await } } diff --git a/hotshot-task-impls/src/transactions.rs b/hotshot-task-impls/src/transactions.rs index 4437597c56..06a65f8b63 100644 --- a/hotshot-task-impls/src/transactions.rs +++ b/hotshot-task-impls/src/transactions.rs @@ -10,7 +10,6 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use futures::{future::join_all, stream::FuturesUnordered, StreamExt}; use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; @@ -19,12 +18,12 @@ use hotshot_types::{ consensus::OuterConsensus, data::VidCommitment, data::{null_block, PackedBundle}, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, message::UpgradeLock, traits::{ auction_results_provider::AuctionResultsProvider, block_contents::{BuilderFee, EncodeBytes}, - election::Membership, node_implementation::{ConsensusTime, HasUrls, NodeImplementation, NodeType, Versions}, signature_key::{BuilderSignatureKey, SignatureKey}, BlockPayload, @@ -92,7 +91,7 @@ pub struct TransactionTaskState, V pub consensus: OuterConsensus, /// Membership for the quorum - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// Builder 0.1 API clients pub builder_clients: Vec>, @@ -482,7 +481,12 @@ impl, V: Versions> TransactionTask self.cur_view = view; self.cur_epoch = epoch; - let leader = self.membership.read().await.leader(view, epoch)?; + let leader = self + .membership_coordinator + .membership_for_epoch(epoch) + .await? + .leader(view) + .await?; if leader == self.public_key { self.handle_view_change(&event_stream, view, epoch).await; return Ok(()); diff --git a/hotshot-task-impls/src/upgrade.rs b/hotshot-task-impls/src/upgrade.rs index c98cd611e9..4eaffb5dd8 100644 --- a/hotshot-task-impls/src/upgrade.rs +++ b/hotshot-task-impls/src/upgrade.rs @@ -7,18 +7,17 @@ use std::{marker::PhantomData, sync::Arc, time::SystemTime}; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use committable::Committable; use hotshot_task::task::TaskState; use hotshot_types::{ data::UpgradeProposal, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_certificate::UpgradeCertificate, simple_vote::{UpgradeProposalData, UpgradeVote}, traits::{ - election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, @@ -47,7 +46,7 @@ pub struct UpgradeTaskState { pub cur_epoch: Option, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// A map of `UpgradeVote` collector tasks pub vote_collectors: VoteCollectorsMap, UpgradeCertificate, V>, @@ -177,7 +176,12 @@ impl UpgradeTaskState { ); // We then validate that the proposal was issued by the leader for the view. - let view_leader_key = self.membership.read().await.leader(view, self.cur_epoch)?; + let view_leader_key = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await? + .leader(view) + .await?; ensure!( view_leader_key == *sender, info!( @@ -218,25 +222,25 @@ impl UpgradeTaskState { tracing::debug!("Upgrade vote recv, Main Task {:?}", vote.view_number()); // Check if we are the leader. - { - let view = vote.view_number(); - let membership_reader = self.membership.read().await; - ensure!( - membership_reader.leader(view, self.cur_epoch)? == self.public_key, - debug!( - "We are not the leader for view {} are we leader for next view? {}", - *view, - membership_reader.leader(view + 1, self.cur_epoch)? == self.public_key - ) - ); - } + let view = vote.view_number(); + let epoch_membership = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await?; + ensure!( + epoch_membership.leader(view).await? == self.public_key, + debug!( + "We are not the leader for view {} are we leader for next view? {}", + *view, + epoch_membership.leader(view + 1).await? == self.public_key + ) + ); handle_vote( &mut self.vote_collectors, vote, self.public_key.clone(), - &self.membership, - self.cur_epoch, + &epoch_membership, self.id, &event, &tx, @@ -262,10 +266,14 @@ impl UpgradeTaskState { ))? .as_secs(); - let leader = self.membership.read().await.leader( - TYPES::View::new(view + TYPES::UPGRADE_CONSTANTS.propose_offset), - self.cur_epoch, - )?; + let leader = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await? + .leader(TYPES::View::new( + view + TYPES::UPGRADE_CONSTANTS.propose_offset, + )) + .await?; // We try to form a certificate 5 views before we're leader. if view >= self.start_proposing_view diff --git a/hotshot-task-impls/src/vid.rs b/hotshot-task-impls/src/vid.rs index 21ffe46eed..8f98e018a7 100644 --- a/hotshot-task-impls/src/vid.rs +++ b/hotshot-task-impls/src/vid.rs @@ -7,17 +7,16 @@ use std::{marker::PhantomData, sync::Arc}; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, data::{PackedBundle, VidDisperse, VidDisperseShare}, + epoch_membership::EpochMembershipCoordinator, message::{Proposal, UpgradeLock}, simple_vote::HasEpoch, traits::{ block_contents::BlockHeader, - election::Membership, node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, BlockPayload, @@ -47,7 +46,7 @@ pub struct VidTaskState, V: Versio pub network: Arc, /// Membership for the quorum - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -88,10 +87,12 @@ impl, V: Versions> VidTaskState, V: Versions> VidTaskState( &payload, - &Arc::clone(&self.membership), + &self.membership_coordinator, *view_number, epoch, epoch, @@ -210,7 +211,7 @@ impl, V: Versions> VidTaskState( &payload.payload, - &Arc::clone(&self.membership), + &self.membership_coordinator, proposal_view_number, target_epoch, sender_epoch, diff --git a/hotshot-task-impls/src/view_sync.rs b/hotshot-task-impls/src/view_sync.rs index f984bc4dff..8dcc296cfe 100644 --- a/hotshot-task-impls/src/view_sync.rs +++ b/hotshot-task-impls/src/view_sync.rs @@ -16,6 +16,7 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ + epoch_membership::{EpochMembership, EpochMembershipCoordinator}, message::UpgradeLock, simple_certificate::{ ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, @@ -25,7 +26,6 @@ use hotshot_types::{ ViewSyncPreCommitData2, ViewSyncPreCommitVote2, }, traits::{ - election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, @@ -75,7 +75,7 @@ pub struct ViewSyncTaskState { pub cur_epoch: Option, /// Membership for the quorum - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -143,9 +143,6 @@ pub struct ViewSyncReplicaTaskState { /// Round HotShot wishes to be in pub next_view: TYPES::View, - /// Current epoch HotShot is in - pub cur_epoch: Option, - /// The relay index we are currently on pub relay: u64, @@ -162,7 +159,7 @@ pub struct ViewSyncReplicaTaskState { pub id: u64, /// Membership for the quorum - pub membership: Arc>, + pub membership: EpochMembership, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -227,16 +224,27 @@ impl ViewSyncTaskState { return; } + let membership = match self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await + { + Ok(m) => m, + Err(e) => { + tracing::warn!(e.message); + return; + } + }; + // We do not have a replica task already running, so start one let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { cur_view: view, next_view: view, - cur_epoch: self.cur_epoch, relay: 0, finalized: false, sent_view_change_event: false, timeout_task: None, - membership: Arc::clone(&self.membership), + membership, public_key: self.public_key.clone(), private_key: self.private_key.clone(), view_sync_timeout: self.view_sync_timeout, @@ -310,22 +318,21 @@ impl ViewSyncTaskState { return Ok(()); } + let epoch_mem = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await?; // We do not have a relay task already running, so start one ensure!( - self.membership - .read() - .await - .leader(vote_view + relay, self.cur_epoch)? - == self.public_key, + epoch_mem.leader(vote_view + relay).await? == self.public_key, "View sync vote sent to wrong leader" ); let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: Arc::clone(&self.membership), + membership: epoch_mem, view: vote_view, id: self.id, - epoch: vote.data.epoch, }; let vote_collector = create_vote_accumulator( &info, @@ -360,21 +367,20 @@ impl ViewSyncTaskState { } // We do not have a relay task already running, so start one + let epoch_mem = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await?; ensure!( - self.membership - .read() - .await - .leader(vote_view + relay, self.cur_epoch)? - == self.public_key, + epoch_mem.leader(vote_view + relay).await? == self.public_key, debug!("View sync vote sent to wrong leader") ); let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: Arc::clone(&self.membership), + membership: epoch_mem, view: vote_view, id: self.id, - epoch: vote.data.epoch, }; let vote_collector = create_vote_accumulator( @@ -408,22 +414,21 @@ impl ViewSyncTaskState { return Ok(()); } + let epoch_mem = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await?; // We do not have a relay task already running, so start one ensure!( - self.membership - .read() - .await - .leader(vote_view + relay, self.cur_epoch)? - == self.public_key, + epoch_mem.leader(vote_view + relay).await? == self.public_key, debug!("View sync vote sent to wrong leader") ); let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: Arc::clone(&self.membership), + membership: epoch_mem, view: vote_view, id: self.id, - epoch: vote.data.epoch, }; let vote_collector = create_vote_accumulator( &info, @@ -488,10 +493,11 @@ impl ViewSyncTaskState { self.num_timeouts_tracked += 1; let leader = self - .membership - .read() - .await - .leader(view_number, self.cur_epoch)?; + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await? + .leader(view_number) + .await?; tracing::warn!( %leader, leader_mnemonic = hotshot_types::utils::mnemonic(&leader), @@ -531,7 +537,7 @@ impl ViewSyncTaskState { } impl ViewSyncReplicaTaskState { - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = self.cur_epoch.map(|x| *x)), name = "View Sync Replica Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = self.membership.epoch().map(|x| *x)), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task pub async fn handle( &mut self, @@ -549,11 +555,8 @@ impl ViewSyncReplicaTaskState { return None; } - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(self.cur_epoch); - let membership_failure_threshold = - membership_reader.failure_threshold(self.cur_epoch); - drop(membership_reader); + let membership_stake_table = self.membership.stake_table().await; + let membership_failure_threshold = self.membership.failure_threshold().await; // If certificate is not valid, return current state if let Err(e) = certificate @@ -643,11 +646,8 @@ impl ViewSyncReplicaTaskState { return None; } - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(self.cur_epoch); - let membership_success_threshold = - membership_reader.success_threshold(self.cur_epoch); - drop(membership_reader); + let membership_stake_table = self.membership.stake_table().await; + let membership_success_threshold = self.membership.success_threshold().await; // If certificate is not valid, return current state if let Err(e) = certificate @@ -707,7 +707,10 @@ impl ViewSyncReplicaTaskState { // TODO: Figure out the correct way to view sync across epochs if needed broadcast_event( - Arc::new(HotShotEvent::ViewChange(self.next_view, self.cur_epoch)), + Arc::new(HotShotEvent::ViewChange( + self.next_view, + self.membership.epoch(), + )), &event_stream, ) .await; @@ -748,11 +751,8 @@ impl ViewSyncReplicaTaskState { return None; } - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(self.cur_epoch); - let membership_success_threshold = - membership_reader.success_threshold(self.cur_epoch); - drop(membership_reader); + let membership_stake_table = self.membership.stake_table().await; + let membership_success_threshold = self.membership.success_threshold().await; // If certificate is not valid, return current state if let Err(e) = certificate @@ -788,7 +788,10 @@ impl ViewSyncReplicaTaskState { // TODO: Figure out the correct way to view sync across epochs if needed broadcast_event( - Arc::new(HotShotEvent::ViewChange(self.next_view, self.cur_epoch)), + Arc::new(HotShotEvent::ViewChange( + self.next_view, + self.membership.epoch(), + )), &event_stream, ) .await; @@ -802,7 +805,7 @@ impl ViewSyncReplicaTaskState { return None; } - let epoch = self.cur_epoch; + let epoch = self.membership.epoch(); let Ok(vote) = ViewSyncPreCommitVote2::::create_signed_vote( ViewSyncPreCommitData2 { relay: 0, @@ -863,7 +866,7 @@ impl ViewSyncReplicaTaskState { ViewSyncPreCommitData2 { relay: self.relay, round: self.next_view, - epoch: self.cur_epoch, + epoch: self.membership.epoch(), }, self.next_view, &self.public_key, diff --git a/hotshot-task-impls/src/vote_collection.rs b/hotshot-task-impls/src/vote_collection.rs index ede632a973..eb8a4cd0aa 100644 --- a/hotshot-task-impls/src/vote_collection.rs +++ b/hotshot-task-impls/src/vote_collection.rs @@ -7,15 +7,16 @@ use std::{ collections::{btree_map::Entry, BTreeMap, HashMap}, fmt::Debug, + future::Future, marker::PhantomData, sync::Arc, }; use async_broadcast::Sender; -use async_lock::RwLock; use async_trait::async_trait; use either::Either::{Left, Right}; use hotshot_types::{ + epoch_membership::EpochMembership, message::UpgradeLock, simple_certificate::{ DaCertificate2, NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, @@ -26,10 +27,7 @@ use hotshot_types::{ DaVote2, NextEpochQuorumVote2, QuorumVote, QuorumVote2, TimeoutVote2, UpgradeVote, ViewSyncCommitVote2, ViewSyncFinalizeVote2, ViewSyncPreCommitVote2, }, - traits::{ - election::Membership, - node_implementation::{NodeType, Versions}, - }, + traits::node_implementation::{ConsensusTime, NodeType, Versions}, utils::EpochTransitionIndicator, vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; @@ -52,7 +50,7 @@ pub struct VoteCollectionTaskState< pub public_key: TYPES::SignatureKey, /// Membership for voting - pub membership: Arc>, + pub membership: EpochMembership, /// accumulator handles aggregating the votes pub accumulator: Option>, @@ -60,9 +58,6 @@ pub struct VoteCollectionTaskState< /// The view which we are collecting votes for pub view: TYPES::View, - /// The epoch which we are collecting votes for - pub epoch: Option, - /// Node id pub id: u64, @@ -83,9 +78,8 @@ pub trait AggregatableVote< /// if the leader cannot be calculated fn leader( &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result; + membership: &EpochMembership, + ) -> impl Future>; /// return the Hotshot event for the completion of this CERT fn make_cert_event(certificate: CERT, key: &TYPES::SignatureKey) -> HotShotEvent; @@ -107,14 +101,14 @@ impl< pub async fn accumulate_vote( &mut self, vote: &VOTE, - sender_epoch: Option, event_stream: &Sender>>, ) -> Result> { + // TODO create this only once ensure!( matches!( self.transition_indicator, EpochTransitionIndicator::InTransition - ) || vote.leader(&*self.membership.read().await, self.epoch)? == self.public_key, + ) || vote.leader(&self.membership).await? == self.public_key, info!("Received vote for a view in which we were not the leader.") ); @@ -131,10 +125,7 @@ impl< "No accumulator to handle vote with. This shouldn't happen." ))?; - match accumulator - .accumulate(vote, &self.membership, sender_epoch) - .await - { + match accumulator.accumulate(vote, self.membership.clone()).await { None => Ok(None), Some(cert) => { tracing::debug!("Certificate Formed! {:?}", cert); @@ -180,14 +171,11 @@ pub struct AccumulatorInfo { pub public_key: TYPES::SignatureKey, /// Membership we are accumulation votes for - pub membership: Arc>, + pub membership: EpochMembership, /// View of the votes we are collecting pub view: TYPES::View, - /// Epoch of the votes we are collecting - pub epoch: Option, - /// This nodes id pub id: u64, } @@ -229,11 +217,10 @@ where }; let mut state = VoteCollectionTaskState:: { - membership: Arc::clone(&info.membership), + membership: info.membership.clone(), public_key: info.public_key.clone(), accumulator: Some(new_accumulator), view: info.view, - epoch: info.epoch, id: info.id, transition_indicator, }; @@ -261,8 +248,7 @@ pub async fn handle_vote< collectors: &mut VoteCollectorsMap, vote: &VOTE, public_key: TYPES::SignatureKey, - membership: &Arc>, - epoch: Option, + membership: &EpochMembership, id: u64, event: &Arc>, event_stream: &Sender>>, @@ -277,9 +263,8 @@ where tracing::debug!("Starting vote handle for view {:?}", vote.view_number()); let info = AccumulatorInfo { public_key, - membership: Arc::clone(membership), + membership: membership.clone(), view: vote.view_number(), - epoch, id, }; let collector = create_vote_accumulator( @@ -356,12 +341,8 @@ type ViewSyncFinalizeVoteState = VoteCollectionTaskState< impl AggregatableVote, QuorumCertificate> for QuorumVote { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number() + 1, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership.leader(self.view_number() + 1).await } fn make_cert_event( certificate: QuorumCertificate, @@ -374,12 +355,8 @@ impl AggregatableVote, QuorumCertifica impl AggregatableVote, QuorumCertificate2> for QuorumVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number() + 1, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership.leader(self.view_number() + 1).await } fn make_cert_event( certificate: QuorumCertificate2, @@ -393,12 +370,15 @@ impl AggregatableVote, NextEpochQuorumCertificate2> for NextEpochQuorumVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number() + 1, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + let epoch = membership + .epoch + .map(|e| TYPES::Epoch::new(e.saturating_sub(1))); + membership + .get_new_epoch(epoch) + .await? + .leader(self.view_number() + 1) + .await } fn make_cert_event( certificate: NextEpochQuorumCertificate2, @@ -411,12 +391,8 @@ impl impl AggregatableVote, UpgradeCertificate> for UpgradeVote { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number(), epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership.leader(self.view_number()).await } fn make_cert_event( certificate: UpgradeCertificate, @@ -429,12 +405,8 @@ impl AggregatableVote, UpgradeCertifi impl AggregatableVote, DaCertificate2> for DaVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number(), epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership.leader(self.view_number()).await } fn make_cert_event( certificate: DaCertificate2, @@ -447,12 +419,8 @@ impl AggregatableVote, DaCertificate2 AggregatableVote, TimeoutCertificate2> for TimeoutVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number() + 1, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership.leader(self.view_number() + 1).await } fn make_cert_event( certificate: TimeoutCertificate2, @@ -466,12 +434,10 @@ impl AggregatableVote, ViewSyncCommitCertificate2> for ViewSyncCommitVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.date().round + self.date().relay, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership + .leader(self.date().round + self.date().relay) + .await } fn make_cert_event( certificate: ViewSyncCommitCertificate2, @@ -485,12 +451,10 @@ impl AggregatableVote, ViewSyncPreCommitCertificate2> for ViewSyncPreCommitVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.date().round + self.date().relay, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership + .leader(self.date().round + self.date().relay) + .await } fn make_cert_event( certificate: ViewSyncPreCommitCertificate2, @@ -504,12 +468,10 @@ impl AggregatableVote, ViewSyncFinalizeCertificate2> for ViewSyncFinalizeVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.date().round + self.date().relay, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership + .leader(self.date().round + self.date().relay) + .await } fn make_cert_event( certificate: ViewSyncFinalizeCertificate2, @@ -531,9 +493,7 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::QuorumVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await - } + HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), } } @@ -556,12 +516,7 @@ impl match event.as_ref() { HotShotEvent::QuorumVoteRecv(vote) => { // #3967 REVIEW NOTE: Should we error if self.epoch is None? - let next_epoch = self - .epoch - .map(|x| x + 1) - .ok_or_else(|| error!("epoch should not be none in handle_vote_event"))?; - self.accumulate_vote(&vote.clone().into(), Some(next_epoch), sender) - .await + self.accumulate_vote(&vote.clone().into(), sender).await } _ => Ok(None), } @@ -583,9 +538,7 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::UpgradeVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await - } + HotShotEvent::UpgradeVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), } } @@ -604,7 +557,7 @@ impl HandleVoteEvent, DaCert sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::DaVoteRecv(vote) => self.accumulate_vote(vote, self.epoch, sender).await, + HotShotEvent::DaVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), } } @@ -624,9 +577,7 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::TimeoutVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await - } + HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), } } @@ -647,7 +598,7 @@ impl ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await + self.accumulate_vote(vote, sender).await } _ => Ok(None), } @@ -668,9 +619,7 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::ViewSyncCommitVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await - } + HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), } } @@ -691,7 +640,7 @@ impl ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await + self.accumulate_vote(vote, sender).await } _ => Ok(None), } diff --git a/hotshot-testing/src/byzantine/byzantine_behaviour.rs b/hotshot-testing/src/byzantine/byzantine_behaviour.rs index 8fab295bd4..7816b3e354 100644 --- a/hotshot-testing/src/byzantine/byzantine_behaviour.rs +++ b/hotshot-testing/src/byzantine/byzantine_behaviour.rs @@ -339,13 +339,12 @@ impl + std::fmt::Debug, V: Version &self, handle: &mut SystemContextHandle, network: Arc<>::Network>, - membership: Arc>, ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, view: TYPES::View::genesis(), epoch: None, - membership, + membership_coordinator: handle.membership_coordinator.clone(), storage: Arc::clone(&handle.storage()), consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), diff --git a/hotshot-testing/src/helpers.rs b/hotshot-testing/src/helpers.rs index 082e965b83..7e331a9d65 100644 --- a/hotshot-testing/src/helpers.rs +++ b/hotshot-testing/src/helpers.rs @@ -27,6 +27,7 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{vid_commitment, Leaf2, VidCommitment, VidDisperse, VidDisperseShare}, drb::INITIAL_DRB_RESULT, + epoch_membership::{EpochMembership, EpochMembershipCoordinator}, message::{Proposal, UpgradeLock}, simple_certificate::DaCertificate2, simple_vote::{DaData2, DaVote2, SimpleVote, VersionedVoteData}, @@ -137,6 +138,7 @@ pub async fn build_system_handle_from_launcher< hotshot_config.known_da_nodes.clone(), ))); + let coordinator = EpochMembershipCoordinator::new(memberships, hotshot_config.epoch_height); let node_key_map = launcher.metadata.build_node_key_map(); let (c, s, r) = SystemContext::init( @@ -144,7 +146,7 @@ pub async fn build_system_handle_from_launcher< private_key, node_id, hotshot_config, - memberships, + coordinator, network, initializer, ConsensusMetricsValue::default(), @@ -168,18 +170,16 @@ pub async fn build_cert< CERT: Certificate, >( data: DATAType, - membership: &Arc>, + epoch_membership: &EpochMembership, view: TYPES::View, - epoch: Option, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, ) -> CERT { let real_qc_sig = build_assembled_sig::( &data, - membership, + epoch_membership, view, - epoch, upgrade_lock, ) .await; @@ -234,19 +234,16 @@ pub async fn build_assembled_sig< DATAType: Committable + Clone + Eq + Hash + Serialize + Debug + 'static, >( data: &DATAType, - membership: &Arc>, + epoch_membership: &EpochMembership, view: TYPES::View, - epoch: Option, upgrade_lock: &UpgradeLock, ) -> ::QcType { - let membership_reader = membership.read().await; - let stake_table = CERT::stake_table(&*membership_reader, epoch); + let stake_table = CERT::stake_table(epoch_membership).await; let real_qc_pp: ::QcParams = ::public_parameter( StakeTableEntries::::from(stake_table.clone()).0, - U256::from(CERT::threshold(&*membership_reader, epoch)), + U256::from(CERT::threshold(epoch_membership).await), ); - drop(membership_reader); let total_nodes = stake_table.len(); let signers = bitvec![1; total_nodes]; @@ -292,10 +289,9 @@ pub fn key_pair_for_id( } pub async fn da_payload_commitment( - membership: &Arc::Membership>>, + membership: &EpochMembership, transactions: Vec, metadata: &>::Metadata, - epoch_number: Option, version: Version, ) -> VidCommitment { let encoded_transactions = TestTransaction::encode(&transactions); @@ -303,26 +299,25 @@ pub async fn da_payload_commitment( vid_commitment::( &encoded_transactions, &metadata.encode(), - membership.read().await.total_nodes(epoch_number), + membership.total_nodes().await, version, ) } pub async fn build_payload_commitment( - membership: &Arc::Membership>>, + membership: &EpochMembership, view: TYPES::View, - epoch: Option, version: Version, ) -> VidCommitment { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. let encoded_transactions = Vec::new(); - let num_storage_nodes = membership.read().await.committee_members(view, epoch).len(); + let num_storage_nodes = membership.committee_members(view).await.len(); vid_commitment::(&encoded_transactions, &[], num_storage_nodes, version) } pub async fn build_vid_proposal( - membership: &Arc::Membership>>, + membership: &EpochMembership, view_number: TYPES::View, epoch_number: Option, payload: &TYPES::BlockPayload, @@ -335,7 +330,7 @@ pub async fn build_vid_proposal( ) { let vid_disperse = VidDisperse::calculate_vid_disperse::( payload, - membership, + &membership.coordinator, view_number, epoch_number, epoch_number, @@ -369,7 +364,7 @@ pub async fn build_vid_proposal( #[allow(clippy::too_many_arguments)] pub async fn build_da_certificate( - membership: &Arc::Membership>>, + membership: &EpochMembership, view_number: TYPES::View, epoch_number: Option, transactions: Vec, @@ -377,29 +372,27 @@ pub async fn build_da_certificate( public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, -) -> DaCertificate2 { +) -> anyhow::Result> { let encoded_transactions = TestTransaction::encode(&transactions); let da_payload_commitment = vid_commitment::( &encoded_transactions, &metadata.encode(), - membership.read().await.total_nodes(epoch_number), + membership.total_nodes().await, upgrade_lock.version_infallible(view_number).await, ); - let next_epoch_da_payload_commitment = if upgrade_lock.epochs_enabled(view_number).await { - Some(vid_commitment::( - &encoded_transactions, - &metadata.encode(), - membership - .read() - .await - .total_nodes(epoch_number.map(|e| e + 1)), - upgrade_lock.version_infallible(view_number).await, - )) - } else { - None - }; + let next_epoch_da_payload_commitment = + if upgrade_lock.epochs_enabled(view_number).await && membership.epoch().is_some() { + Some(vid_commitment::( + &encoded_transactions, + &metadata.encode(), + membership.next_epoch().await?.total_nodes().await, + upgrade_lock.version_infallible(view_number).await, + )) + } else { + None + }; let da_data = DaData2 { payload_commit: da_payload_commitment, @@ -407,16 +400,17 @@ pub async fn build_da_certificate( epoch: epoch_number, }; - build_cert::, DaVote2, DaCertificate2>( - da_data, - membership, - view_number, - epoch_number, - public_key, - private_key, - upgrade_lock, + anyhow::Ok( + build_cert::, DaVote2, DaCertificate2>( + da_data, + membership, + view_number, + public_key, + private_key, + upgrade_lock, + ) + .await, ) - .await } /// This function permutes the provided input vector `inputs`, given some order provided within the diff --git a/hotshot-testing/src/spinning_task.rs b/hotshot-testing/src/spinning_task.rs index 55b51949f9..c13fe23d6b 100644 --- a/hotshot-testing/src/spinning_task.rs +++ b/hotshot-testing/src/spinning_task.rs @@ -243,7 +243,7 @@ where }; let storage = node.handle.storage().clone(); - let memberships = Arc::clone(&node.handle.memberships); + let memberships = node.handle.membership_coordinator.clone(); let config = node.handle.hotshot.config.clone(); let marketplace_config = node.handle.hotshot.marketplace_config.clone(); @@ -297,7 +297,7 @@ where TestRunner::::add_node_with_config_and_channels( node_id, generated_network.clone(), - memberships, + Arc::clone(memberships.membership()), initializer, config, validator_config, diff --git a/hotshot-testing/src/test_builder.rs b/hotshot-testing/src/test_builder.rs index 0d9d5d861f..408c259360 100644 --- a/hotshot-testing/src/test_builder.rs +++ b/hotshot-testing/src/test_builder.rs @@ -19,6 +19,7 @@ use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ consensus::ConsensusMetricsValue, drb::INITIAL_DRB_RESULT, + epoch_membership::EpochMembershipCoordinator, traits::node_implementation::{NodeType, Versions}, HotShotConfig, PeerConfig, ValidatorConfig, }; @@ -260,6 +261,7 @@ pub async fn create_test_handle< // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); + let membership_coordinator = EpochMembershipCoordinator::new(memberships, config.epoch_height); let behaviour = (metadata.behaviour)(node_id); match behaviour { @@ -271,7 +273,7 @@ pub async fn create_test_handle< private_key, node_id, config, - memberships, + membership_coordinator, network, initializer, ConsensusMetricsValue::default(), @@ -290,7 +292,7 @@ pub async fn create_test_handle< private_key, node_id, config, - memberships, + membership_coordinator, network, initializer, ConsensusMetricsValue::default(), @@ -305,7 +307,7 @@ pub async fn create_test_handle< private_key, node_id, config, - memberships, + membership_coordinator, network, initializer, ConsensusMetricsValue::default(), diff --git a/hotshot-testing/src/test_runner.rs b/hotshot-testing/src/test_runner.rs index dd6cf62661..a07cb509c8 100644 --- a/hotshot-testing/src/test_runner.rs +++ b/hotshot-testing/src/test_runner.rs @@ -27,6 +27,7 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, constants::EVENT_CHANNEL_SIZE, data::Leaf2, + epoch_membership::EpochMembershipCoordinator, simple_certificate::QuorumCertificate2, traits::{ election::Membership, @@ -596,13 +597,14 @@ where // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); + let epoch_height = config.epoch_height; SystemContext::new( public_key, private_key, node_id, config, - Arc::new(RwLock::new(memberships)), + EpochMembershipCoordinator::new(Arc::new(RwLock::new(memberships)), epoch_height), network, initializer, ConsensusMetricsValue::default(), @@ -634,13 +636,14 @@ where // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); + let epoch_height = config.epoch_height; SystemContext::new_from_channels( public_key, private_key, node_id, config, - memberships, + EpochMembershipCoordinator::new(memberships, epoch_height), network, initializer, ConsensusMetricsValue::default(), diff --git a/hotshot-testing/src/view_generator.rs b/hotshot-testing/src/view_generator.rs index 5b55391461..f46d4b1101 100644 --- a/hotshot-testing/src/view_generator.rs +++ b/hotshot-testing/src/view_generator.rs @@ -12,7 +12,6 @@ use std::{ task::{Context, Poll}, }; -use async_lock::RwLock; use committable::Committable; use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; @@ -26,6 +25,7 @@ use hotshot_types::{ DaProposal2, EpochNumber, Leaf2, QuorumProposal2, QuorumProposalWrapper, VidDisperse, VidDisperseShare, ViewChangeEvidence2, ViewNumber, }, + epoch_membership::{EpochMembership, EpochMembershipCoordinator}, message::{Proposal, UpgradeLock}, simple_certificate::{ DaCertificate2, QuorumCertificate2, TimeoutCertificate2, UpgradeCertificate, @@ -37,7 +37,6 @@ use hotshot_types::{ }, traits::{ consensus_api::ConsensusApi, - election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, BlockPayload, }, @@ -57,7 +56,7 @@ pub struct TestView { pub leaf: Leaf2, pub view_number: ViewNumber, pub epoch_number: Option, - pub membership: Arc::Membership>>, + pub membership: EpochMembershipCoordinator, pub node_key_map: Arc, pub vid_disperse: Proposal>, pub vid_proposal: ( @@ -76,19 +75,17 @@ pub struct TestView { impl TestView { async fn find_leader_key_pair( - membership: &Arc::Membership>>, + membership: &EpochMembership, node_key_map: &Arc, view_number: ::View, - epoch: Option<::Epoch>, ) -> ( <::SignatureKey as SignatureKey>::PrivateKey, ::SignatureKey, ) { - let membership_reader = membership.read().await; - let leader = membership_reader - .leader(view_number, epoch) + let leader = membership + .leader(view_number) + .await .expect("expected Membership::leader to succeed"); - drop(membership_reader); let sk = node_key_map .get(&leader) @@ -98,7 +95,7 @@ impl TestView { } pub async fn genesis( - membership: &Arc::Membership>>, + membership: &EpochMembershipCoordinator, node_key_map: Arc, ) -> Self { let genesis_view = ViewNumber::new(1); @@ -120,27 +117,28 @@ impl TestView { &block_payload, &metadata, ); - + let epoch_membership = membership + .membership_for_epoch(genesis_epoch) + .await + .unwrap(); //let (private_key, public_key) = key_pair_for_id::(*genesis_view); let (private_key, public_key) = - Self::find_leader_key_pair(membership, &node_key_map, genesis_view, genesis_epoch) - .await; + Self::find_leader_key_pair(&epoch_membership, &node_key_map, genesis_view).await; let leader_public_key = public_key; let genesis_version = upgrade_lock.version_infallible(genesis_view).await; let payload_commitment = da_payload_commitment::( - membership, + &epoch_membership, transactions.clone(), &metadata, - genesis_epoch, genesis_version, ) .await; let (vid_disperse, vid_proposal) = build_vid_proposal::( - membership, + &epoch_membership, genesis_view, genesis_epoch, &block_payload, @@ -151,7 +149,7 @@ impl TestView { .await; let da_certificate = build_da_certificate( - membership, + &epoch_membership, genesis_view, genesis_epoch, transactions.clone(), @@ -160,7 +158,8 @@ impl TestView { &private_key, &upgrade_lock, ) - .await; + .await + .unwrap(); let block_header = TestBlockHeader::new( &Leaf2::::genesis::( @@ -258,8 +257,6 @@ impl TestView { // test view here. let next_view = max(old_view, self.view_number) + 1; - let membership = &self.membership; - let transactions = &self.transactions; let quorum_data = QuorumData2 { @@ -268,16 +265,26 @@ impl TestView { }; //let (old_private_key, old_public_key) = key_pair_for_id::(*old_view); - let (old_private_key, old_public_key) = - Self::find_leader_key_pair(&self.membership, &self.node_key_map, old_view, old_epoch) - .await; + let (old_private_key, old_public_key) = Self::find_leader_key_pair( + &self + .membership + .membership_for_epoch(old_epoch) + .await + .unwrap(), + &self.node_key_map, + old_view, + ) + .await; //let (private_key, public_key) = key_pair_for_id::(*next_view); let (private_key, public_key) = Self::find_leader_key_pair( - &self.membership, + &self + .membership + .membership_for_epoch(self.epoch_number) + .await + .unwrap(), &self.node_key_map, next_view, - self.epoch_number, ) .await; @@ -297,17 +304,21 @@ impl TestView { ); let version = self.upgrade_lock.version_infallible(next_view).await; + let membership = self + .membership + .membership_for_epoch(self.epoch_number) + .await + .unwrap(); let payload_commitment = da_payload_commitment::( - membership, + &membership, transactions.clone(), &metadata, - self.epoch_number, version, ) .await; let (vid_disperse, vid_proposal) = build_vid_proposal::( - membership, + &membership, next_view, self.epoch_number, &block_payload, @@ -318,7 +329,7 @@ impl TestView { .await; let da_certificate = build_da_certificate::( - membership, + &membership, next_view, self.epoch_number, transactions.clone(), @@ -327,7 +338,8 @@ impl TestView { &private_key, &self.upgrade_lock, ) - .await; + .await + .unwrap(); let quorum_certificate = build_cert::< TestTypes, @@ -337,9 +349,8 @@ impl TestView { QuorumCertificate2, >( quorum_data, - membership, + &membership, old_view, - self.epoch_number, &old_public_key, &old_private_key, &self.upgrade_lock, @@ -355,9 +366,8 @@ impl TestView { UpgradeCertificate, >( data.clone(), - membership, + &membership, next_view, - self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -378,9 +388,8 @@ impl TestView { ViewSyncFinalizeCertificate2, >( data.clone(), - membership, + &membership, next_view, - self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -401,9 +410,8 @@ impl TestView { TimeoutCertificate2, >( data.clone(), - membership, + &membership, next_view, - self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -562,14 +570,14 @@ impl TestView { pub struct TestViewGenerator { pub current_view: Option, - pub membership: Arc::Membership>>, + pub membership: EpochMembershipCoordinator, pub node_key_map: Arc, pub _pd: PhantomData, } impl TestViewGenerator { pub fn generate( - membership: Arc::Membership>>, + membership: EpochMembershipCoordinator, node_key_map: Arc, ) -> Self { TestViewGenerator { @@ -653,14 +661,14 @@ impl Stream for TestViewGenerator { type Item = TestView; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mem = Arc::clone(&self.membership); + let epoch_membership = self.membership.clone(); let nkm = Arc::clone(&self.node_key_map); let curr_view = &self.current_view.clone(); let mut fut = if let Some(ref view) = curr_view { async move { TestView::next_view(view).await }.boxed() } else { - async move { TestView::genesis::(&mem, nkm).await }.boxed() + async move { TestView::genesis::(&epoch_membership, nkm).await }.boxed() }; match fut.as_mut().poll(cx) { diff --git a/hotshot-testing/tests/tests_1/da_task.rs b/hotshot-testing/tests/tests_1/da_task.rs index e01a2f0f7c..28eb3edc58 100644 --- a/hotshot-testing/tests/tests_1/da_task.rs +++ b/hotshot-testing/tests/tests_1/da_task.rs @@ -24,10 +24,7 @@ use hotshot_testing::{ use hotshot_types::{ data::{null_block, PackedBundle, ViewNumber}, simple_vote::DaData2, - traits::{ - election::Membership, - node_implementation::{ConsensusTime, Versions}, - }, + traits::node_implementation::{ConsensusTime, Versions}, }; use vbs::version::{StaticVersionType, Version}; @@ -38,7 +35,7 @@ async fn test_da_task() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let default_version = Version { major: 0, minor: 0 }; // Make some empty encoded transactions, we just care about having a commitment handy for the @@ -48,7 +45,11 @@ async fn test_da_task() { let payload_commit = hotshot_types::data::vid_commitment::( &encoded_transactions, &[], - handle.hotshot.memberships.read().await.total_nodes(None), + membership + .membership_for_epoch(None) + .await.unwrap() + .total_nodes() + .await, default_version, ); @@ -149,7 +150,7 @@ async fn test_da_task_storage_failure() { // Set the error flag here for the system handle. This causes it to emit an error on append. handle.storage().write().await.should_return_err = true; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let default_version = Version { major: 0, minor: 0 }; // Make some empty encoded transactions, we just care about having a commitment handy for the @@ -159,12 +160,15 @@ async fn test_da_task_storage_failure() { let payload_commit = hotshot_types::data::vid_commitment::( &encoded_transactions, &[], - handle.hotshot.memberships.read().await.total_nodes(None), + membership + .membership_for_epoch(None) + .await.unwrap() + .total_nodes() + .await, default_version, ); - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); diff --git a/hotshot-testing/tests/tests_1/message.rs b/hotshot-testing/tests/tests_1/message.rs index 568b2906f4..5adc897336 100644 --- a/hotshot-testing/tests/tests_1/message.rs +++ b/hotshot-testing/tests/tests_1/message.rs @@ -6,7 +6,6 @@ #[cfg(test)] use std::marker::PhantomData; -use std::sync::Arc; use committable::Committable; use hotshot_example_types::node_types::TestTypes; @@ -69,19 +68,19 @@ async fn test_certificate2_validity() { use hotshot_testing::{helpers::build_system_handle, view_generator::TestViewGenerator}; use hotshot_types::{ data::{Leaf, Leaf2}, - traits::election::Membership, vote::Certificate, }; hotshot::helpers::initialize_logging(); let node_id = 1; + let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -104,10 +103,9 @@ async fn test_certificate2_validity() { let qc2 = proposal.data.justify_qc().clone(); let qc = qc2.clone().to_qc(); - let membership_reader = membership.read().await; - let membership_stake_table = membership_reader.stake_table(None); - let membership_success_threshold = membership_reader.success_threshold(None); - drop(membership_reader); + let epoch_mem = membership.membership_for_epoch(None).await.unwrap(); + let membership_stake_table = epoch_mem.stake_table().await; + let membership_success_threshold = epoch_mem.success_threshold().await; assert!(qc .is_valid_cert( diff --git a/hotshot-testing/tests/tests_1/network_task.rs b/hotshot-testing/tests/tests_1/network_task.rs index ef3584a55e..78abf7cf57 100644 --- a/hotshot-testing/tests/tests_1/network_task.rs +++ b/hotshot-testing/tests/tests_1/network_task.rs @@ -36,6 +36,7 @@ async fn test_network_task() { use std::collections::BTreeMap; use futures::StreamExt; + use hotshot_types::epoch_membership::EpochMembershipCoordinator; hotshot::helpers::initialize_logging(); @@ -61,12 +62,13 @@ async fn test_network_task() { all_nodes.clone(), all_nodes, ))); + let coordinator = EpochMembershipCoordinator::new(membership, config.epoch_height); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), epoch: None, - membership: Arc::clone(&membership), + membership_coordinator: coordinator.clone(), upgrade_lock: upgrade_lock.clone(), storage, consensus, @@ -79,7 +81,7 @@ async fn test_network_task() { let task = Task::new(network_state, tx.clone(), rx); task_reg.run_task(task); - let mut generator = TestViewGenerator::::generate(membership, node_key_map); + let mut generator = TestViewGenerator::::generate(coordinator, node_key_map); let view = generator.next().await.unwrap(); let (out_tx_internal, mut out_rx_internal) = async_broadcast::broadcast(10); @@ -208,6 +210,7 @@ async fn test_network_storage_fail() { use std::collections::BTreeMap; use futures::StreamExt; + use hotshot_types::epoch_membership::EpochMembershipCoordinator; hotshot::helpers::initialize_logging(); @@ -233,12 +236,13 @@ async fn test_network_storage_fail() { all_nodes.clone(), all_nodes, ))); + let coordinator = EpochMembershipCoordinator::new(membership, config.epoch_height); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), epoch: None, - membership: Arc::clone(&membership), + membership_coordinator: coordinator.clone(), upgrade_lock: upgrade_lock.clone(), storage, consensus, @@ -251,7 +255,7 @@ async fn test_network_storage_fail() { let task = Task::new(network_state, tx.clone(), rx); task_reg.run_task(task); - let mut generator = TestViewGenerator::::generate(membership, node_key_map); + let mut generator = TestViewGenerator::::generate(coordinator, node_key_map); let view = generator.next().await.unwrap(); let (out_tx_internal, mut out_rx_internal): (Sender>>, _) = diff --git a/hotshot-testing/tests/tests_1/quorum_proposal_recv_task.rs b/hotshot-testing/tests/tests_1/quorum_proposal_recv_task.rs index d7cd877ddc..73642e0d44 100644 --- a/hotshot-testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/hotshot-testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -53,7 +53,7 @@ async fn test_quorum_proposal_recv_task() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; @@ -127,7 +127,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { let (handle, _, _, node_key_map) = build_system_handle::(4).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; diff --git a/hotshot-testing/tests/tests_1/quorum_proposal_task.rs b/hotshot-testing/tests/tests_1/quorum_proposal_task.rs index 58a8c6f568..55c79c0c25 100644 --- a/hotshot-testing/tests/tests_1/quorum_proposal_task.rs +++ b/hotshot-testing/tests/tests_1/quorum_proposal_task.rs @@ -47,7 +47,10 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); let version = handle .hotshot .upgrade_lock @@ -55,15 +58,13 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { .await; let payload_commitment = build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(node_id), - None, version, ) .await; - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -147,7 +148,10 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); @@ -202,9 +206,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(1), - None, version_1, ) .await, @@ -223,9 +226,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(2), - None, version_2, ) .await, @@ -242,9 +244,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(3), - None, version_3, ) .await, @@ -261,9 +262,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[3].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(4), - None, version_4, ) .await, @@ -280,9 +280,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[4].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(5), - None, version_5, ) .await, @@ -324,9 +323,13 @@ async fn test_quorum_proposal_task_qc_timeout() { hotshot::helpers::initialize_logging(); let node_id = 3; + let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); let version = handle .hotshot .upgrade_lock @@ -334,16 +337,14 @@ async fn test_quorum_proposal_task_qc_timeout() { .await; let payload_commitment = build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(node_id), - None, version, ) .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -421,7 +422,10 @@ async fn test_quorum_proposal_task_view_sync() { let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); let version = handle .hotshot .upgrade_lock @@ -429,16 +433,14 @@ async fn test_quorum_proposal_task_view_sync() { .await; let payload_commitment = build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(node_id), - None, version, ) .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -516,10 +518,12 @@ async fn test_quorum_proposal_task_liveness_check() { let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -570,9 +574,8 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(1), - None, version_1, ) .await, @@ -591,9 +594,8 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(2), - None, version_2, ) .await, @@ -610,9 +612,8 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(3), - None, version_3, ) .await, @@ -629,9 +630,8 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[3].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(4), - None, version_4, ) .await, @@ -648,9 +648,8 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[4].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(5), - None, version_5, ) .await, @@ -690,8 +689,7 @@ async fn test_quorum_proposal_task_with_incomplete_events() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); - + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); let mut proposals = Vec::new(); diff --git a/hotshot-testing/tests/tests_1/quorum_vote_task.rs b/hotshot-testing/tests/tests_1/quorum_vote_task.rs index ae8c2bc9ad..170eaa363c 100644 --- a/hotshot-testing/tests/tests_1/quorum_vote_task.rs +++ b/hotshot-testing/tests/tests_1/quorum_vote_task.rs @@ -44,7 +44,7 @@ async fn test_quorum_vote_task_success() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); @@ -110,7 +110,7 @@ async fn test_quorum_vote_task_miss_dependency() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); @@ -193,7 +193,7 @@ async fn test_quorum_vote_task_incorrect_dependency() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); diff --git a/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs b/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs index 00f970a40b..d8533acee2 100644 --- a/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -82,10 +82,12 @@ async fn test_upgrade_task_with_proposal() { let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); @@ -157,9 +159,8 @@ async fn test_upgrade_task_with_proposal() { Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(1), - None, version_1, ) .await, @@ -178,9 +179,8 @@ async fn test_upgrade_task_with_proposal() { Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(2), - None, version_2, ) .await, @@ -198,9 +198,8 @@ async fn test_upgrade_task_with_proposal() { Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(3), - None, version_3, ) .await, diff --git a/hotshot-testing/tests/tests_1/upgrade_task_with_vote.rs b/hotshot-testing/tests/tests_1/upgrade_task_with_vote.rs index 4047f95607..2597ba3690 100644 --- a/hotshot-testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/hotshot-testing/tests/tests_1/upgrade_task_with_vote.rs @@ -69,7 +69,7 @@ async fn test_upgrade_task_with_vote() { let consensus = handle.hotshot.consensus().clone(); let mut consensus_writer = consensus.write().await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); for view in (&mut generator).take(2).collect::>().await { diff --git a/hotshot-testing/tests/tests_1/vid_task.rs b/hotshot-testing/tests/tests_1/vid_task.rs index cfde747d7d..b5f7520888 100644 --- a/hotshot-testing/tests/tests_1/vid_task.rs +++ b/hotshot-testing/tests/tests_1/vid_task.rs @@ -44,8 +44,16 @@ async fn test_vid_task() { .0; let pub_key = handle.public_key(); - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let default_version = Version { major: 0, minor: 0 }; + + let mut vid = vid_scheme_from_view_number::( + &membership.membership_for_epoch(None).await.unwrap(), + ViewNumber::new(0), + default_version, + ) + .await; let upgrade_lock = UpgradeLock::::new(); let transactions = vec![TestTransaction::new(vec![0])]; diff --git a/hotshot-testing/tests/tests_1/vote_dependency_handle.rs b/hotshot-testing/tests/tests_1/vote_dependency_handle.rs index e6e40f2b8c..447639c3da 100644 --- a/hotshot-testing/tests/tests_1/vote_dependency_handle.rs +++ b/hotshot-testing/tests/tests_1/vote_dependency_handle.rs @@ -35,8 +35,7 @@ async fn test_vote_dependency_handle() { // Construct the system handle for the node ID to build all of the state objects. let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); - + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); // Generate our state for the test @@ -89,7 +88,7 @@ async fn test_vote_dependency_handle() { consensus: OuterConsensus::new(consensus.clone()), consensus_metrics: Arc::clone(&consensus.read().await.metrics), instance_state: handle.hotshot.instance_state(), - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), storage: Arc::clone(&handle.storage()), view_number, sender: event_sender.clone(), diff --git a/hotshot-types/Cargo.toml b/hotshot-types/Cargo.toml index e236b4acc3..e65ea80623 100644 --- a/hotshot-types/Cargo.toml +++ b/hotshot-types/Cargo.toml @@ -14,6 +14,7 @@ ark-ff = { workspace = true } ark-serialize = { workspace = true } ark-srs = { version = "0.3.1" } ark-std = { workspace = true } +async-broadcast = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } diff --git a/hotshot-types/src/consensus.rs b/hotshot-types/src/consensus.rs index c775880206..0849b4d4d9 100644 --- a/hotshot-types/src/consensus.rs +++ b/hotshot-types/src/consensus.rs @@ -23,6 +23,7 @@ pub use crate::utils::{View, ViewInner}; use crate::{ data::{Leaf2, QuorumProposalWrapper, VidCommitment, VidDisperse, VidDisperseShare}, drb::DrbSeedsAndResults, + epoch_membership::EpochMembershipCoordinator, error::HotShotError, event::{HotShotAction, LeafInfo}, message::{Proposal, UpgradeLock}, @@ -962,7 +963,7 @@ impl Consensus { consensus: OuterConsensus, view: ::View, target_epoch: Option<::Epoch>, - membership: Arc>, + membership_coordinator: EpochMembershipCoordinator, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, ) -> Option<()> { @@ -977,7 +978,7 @@ impl Consensus { let vid = VidDisperse::calculate_vid_disperse::( &payload_with_metadata.payload, - &membership, + &membership_coordinator, view, target_epoch, epoch, diff --git a/hotshot-types/src/data.rs b/hotshot-types/src/data.rs index e61063227f..3deb60f5dd 100644 --- a/hotshot-types/src/data.rs +++ b/hotshot-types/src/data.rs @@ -31,6 +31,7 @@ use vid_disperse::{ADVZDisperse, ADVZDisperseShare, AvidMDisperse, VidDisperseSh use crate::{ drb::DrbResult, + epoch_membership::EpochMembershipCoordinator, impl_has_epoch, impl_has_none_epoch, message::{convert_proposal, Proposal, UpgradeLock}, simple_certificate::{ @@ -428,7 +429,7 @@ impl VidDisperse { #[allow(clippy::panic)] pub async fn calculate_vid_disperse( payload: &TYPES::BlockPayload, - membership: &Arc>, + membership: &EpochMembershipCoordinator, view: TYPES::View, target_epoch: Option, data_epoch: Option, diff --git a/hotshot-types/src/data/vid_disperse.rs b/hotshot-types/src/data/vid_disperse.rs index 0e21767906..f7c90e5c68 100644 --- a/hotshot-types/src/data/vid_disperse.rs +++ b/hotshot-types/src/data/vid_disperse.rs @@ -6,21 +6,21 @@ //! This module provides types for VID disperse related data structures. -use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; +use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData}; -use async_lock::RwLock; use hotshot_utils::anytrace::*; use jf_vid::{VidDisperse as JfVidDisperse, VidScheme}; use serde::{Deserialize, Serialize}; use tokio::task::spawn_blocking; use crate::{ + epoch_membership::{EpochMembership, EpochMembershipCoordinator}, impl_has_epoch, message::Proposal, simple_vote::HasEpoch, traits::{ - block_contents::EncodeBytes, election::Membership, node_implementation::NodeType, - signature_key::SignatureKey, BlockPayload, + block_contents::EncodeBytes, node_implementation::NodeType, signature_key::SignatureKey, + BlockPayload, }, vid::{ advz::{advz_scheme, ADVZCommitment, ADVZCommon, ADVZScheme, ADVZShare}, @@ -67,14 +67,16 @@ impl ADVZDisperse { async fn from_membership( view_number: TYPES::View, mut vid_disperse: JfVidDisperse, - membership: &Arc>, + membership: &EpochMembershipCoordinator, target_epoch: Option, data_epoch: Option, ) -> Self { let shares = membership - .read() + .membership_for_epoch(target_epoch) + .await + .unwrap() + .committee_members(view_number) .await - .committee_members(view_number, target_epoch) .iter() .map(|node| (node.clone(), vid_disperse.shares.remove(0))) .collect(); @@ -97,12 +99,17 @@ impl ADVZDisperse { #[allow(clippy::panic)] pub async fn calculate_vid_disperse( payload: &TYPES::BlockPayload, - membership: &Arc>, + membership: &EpochMembershipCoordinator, view: TYPES::View, target_epoch: Option, data_epoch: Option, ) -> Result { - let num_nodes = membership.read().await.total_nodes(target_epoch); + let num_nodes = membership + .membership_for_epoch(target_epoch) + .await? + .total_nodes() + .await; + let txns = payload.encode(); let vid_disperse = spawn_blocking(move || advz_scheme(num_nodes).disperse(&txns)) @@ -274,15 +281,14 @@ impl AvidMDisperse { view_number: TYPES::View, commit: AvidMCommitment, shares: &[AvidMShare], - membership: &Arc>, + membership: &EpochMembership, target_epoch: Option, data_epoch: Option, ) -> Self { let payload_byte_len = shares[0].payload_byte_len(); let shares = membership - .read() + .committee_members(view_number) .await - .committee_members(view_number, target_epoch) .iter() .zip(shares) .map(|(node, share)| (node.clone(), share.clone())) @@ -307,13 +313,14 @@ impl AvidMDisperse { #[allow(clippy::single_range_in_vec_init)] pub async fn calculate_vid_disperse( payload: &TYPES::BlockPayload, - membership: &Arc>, + membership: &EpochMembershipCoordinator, view: TYPES::View, target_epoch: Option, data_epoch: Option, metadata: &>::Metadata, ) -> Result { - let num_nodes = membership.read().await.total_nodes(target_epoch); + let target_mem = membership.membership_for_epoch(target_epoch).await?; + let num_nodes = target_mem.total_nodes().await; let txns = payload.encode(); let num_txns = txns.len(); @@ -333,7 +340,7 @@ impl AvidMDisperse { .context(|err| error!("Failed to calculate VID disperse. Error: {}", err))?; Ok( - Self::from_membership(view, commit, &shares, membership, target_epoch, data_epoch) + Self::from_membership(view, commit, &shares, &target_mem, target_epoch, data_epoch) .await, ) } diff --git a/hotshot-types/src/epoch_membership.rs b/hotshot-types/src/epoch_membership.rs new file mode 100644 index 0000000000..00001fcb8f --- /dev/null +++ b/hotshot-types/src/epoch_membership.rs @@ -0,0 +1,425 @@ +use std::collections::BTreeSet; +use std::num::NonZeroU64; +use std::{collections::HashMap, sync::Arc}; + +use async_broadcast::{broadcast, InactiveReceiver}; +use async_lock::{Mutex, RwLock}; +use hotshot_utils::anytrace::{self, Error, Level, Result, DEFAULT_LOG_LEVEL}; +use hotshot_utils::{ensure, line_info, log, warn}; + +use crate::drb::DrbResult; +use crate::traits::election::Membership; +use crate::traits::node_implementation::{ConsensusTime, NodeType}; +use crate::utils::root_block_in_epoch; +use crate::PeerConfig; + +type EpochMap = + HashMap<::Epoch, InactiveReceiver>>>; + +/// Struct to Coordinate membership catchup +pub struct EpochMembershipCoordinator { + /// The underlying membhersip + membership: Arc>, + + /// Any in progress attempts at catching up are stored in this map + /// Any new callers wantin an `EpochMembership` will await on the signal + /// alerting them the membership is ready. The first caller for an epoch will + /// wait for the actual catchup and allert future callers when it's done + catchup_map: Arc>>, + + /// Number of blocks in an epoch + pub epoch_height: u64, +} + +impl Clone for EpochMembershipCoordinator { + fn clone(&self) -> Self { + Self { + membership: Arc::clone(&self.membership), + catchup_map: Arc::clone(&self.catchup_map), + epoch_height: self.epoch_height, + } + } +} +// async fn catchup_membership(coordinator: EpochMembershipCoordinator) { + +// } + +impl EpochMembershipCoordinator +where + Self: Send, +{ + /// Create an EpochMembershipCoordinator + pub fn new(membership: Arc>, epoch_height: u64) -> Self { + Self { + membership, + catchup_map: Arc::default(), + epoch_height, + } + } + + /// Get a reference to the membership + #[must_use] + pub fn membership(&self) -> &Arc> { + &self.membership + } + + /// Get a Membership for a given Epoch, which is guaranteed to have a stake + /// table for the given Epoch + pub async fn membership_for_epoch( + &self, + maybe_epoch: Option, + ) -> Result> { + let ret_val = EpochMembership { + epoch: maybe_epoch, + coordinator: self.clone(), + }; + let Some(epoch) = maybe_epoch else { + return Ok(ret_val); + }; + if self.membership.read().await.has_epoch(epoch) { + return Ok(ret_val); + } + if self.catchup_map.lock().await.contains_key(&epoch) { + return Err(warn!( + "Stake table for Epoch {:?} Unavailable. Catch up already in Progress", + epoch + )); + } + let coordinator = self.clone(); + spawn_catchup(coordinator, epoch); + + Err(warn!( + "Stake table for Epoch {:?} Unavailable. Starting catchup", + epoch + )) + } + + /// Catches the membership up to the epoch passed as an argument. + /// To do this try to get the stake table for the epoch containing this epoch's root + /// if the root does not exist recursively catchup until you've found it + /// + /// If there is another catchup in progress this will not duplicate efforts + /// e.g. if we start with only epoch 0 stake table and call catchup for epoch 10, then call catchup for epoch 20 + /// the first caller will actually do the work for to catchup to epoch 10 then the second caller will continue + /// catching up to epoch 20 + async fn catchup(self, epoch: TYPES::Epoch) -> Result> { + // recursively catchup until we have a stake table for the epoch containing our root + ensure!( + *epoch != 0 && *epoch != 1, + "We are trying to catchup to epoch 0! This means the initial stake table is missing!" + ); + let root_epoch = TYPES::Epoch::new(*epoch - 2); + + let root_membership = if self.membership.read().await.has_epoch(root_epoch) { + EpochMembership { + epoch: Some(root_epoch), + coordinator: self.clone(), + } + } else { + Box::pin(self.wait_for_catchup(root_epoch)).await? + }; + + // Get the epoch root headers and update our membership with them, finally sync them + // Verification of the root is handled in get_epoch_root + let (next_epoch, header) = root_membership + .get_epoch_root(root_block_in_epoch(*root_epoch, self.epoch_height)) + .await + .ok_or(anytrace::warn!("get epoch root failed"))?; + let updater = self + .membership + .read() + .await + .add_epoch_root(next_epoch, header) + .await + .ok_or(anytrace::warn!("add epoch root failed"))?; + updater(&mut *(self.membership.write().await)); + + Ok(EpochMembership { + epoch: Some(epoch), + coordinator: self.clone(), + }) + } + + pub async fn wait_for_catchup(&self, epoch: TYPES::Epoch) -> Result> { + let Some(mut rx) = self + .catchup_map + .lock() + .await + .get(&epoch) + .map(InactiveReceiver::activate_cloned) + else { + return self.clone().catchup(epoch).await; + }; + let Ok(Ok(mem)) = rx.recv_direct().await else { + return self.clone().catchup(epoch).await; + }; + Ok(mem) + } +} + +fn spawn_catchup(coordinator: EpochMembershipCoordinator, epoch: T::Epoch) { + tokio::spawn(async move { + let tx = { + let mut map = coordinator.catchup_map.lock().await; + if map.contains_key(&epoch) { + return; + } + let (tx, rx) = broadcast(1); + map.insert(epoch, rx.deactivate()); + tx + }; + // do catchup + let ret = coordinator.catchup(epoch).await; + let _ = tx.broadcast_direct(ret).await; + }); +} +/// Wrapper around a membership that guarantees that the epoch +/// has a stake table +pub struct EpochMembership { + /// Epoch the `membership` is guaranteed to have a stake table for + pub epoch: Option, + /// Underlying membership + pub coordinator: EpochMembershipCoordinator, +} + +impl Clone for EpochMembership { + fn clone(&self) -> Self { + Self { + coordinator: self.coordinator.clone(), + epoch: self.epoch, + } + } +} + +impl EpochMembership { + /// Get the epoch this membership is good for + pub fn epoch(&self) -> Option { + self.epoch + } + + /// Get a membership for the next epoch + pub async fn next_epoch(&self) -> Result { + ensure!( + self.epoch().is_some(), + "No next epoch because epoch is None" + ); + self.coordinator + .membership_for_epoch(self.epoch.map(|e| e + 1)) + .await + } + pub async fn get_new_epoch(&self, epoch: Option) -> Result { + self.coordinator.membership_for_epoch(epoch).await + } + + /// Wraps the same named Membership trait fn + async fn get_epoch_root( + &self, + block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + self.coordinator + .membership + .read() + .await + .get_epoch_root(block_height) + .await + } + + /// Get all participants in the committee (including their stake) for a specific epoch + pub async fn stake_table(&self) -> Vec> { + self.coordinator + .membership + .read() + .await + .stake_table(self.epoch) + } + + /// Get all participants in the committee (including their stake) for a specific epoch + pub async fn da_stake_table(&self) -> Vec> { + self.coordinator + .membership + .read() + .await + .da_stake_table(self.epoch) + } + + /// Get all participants in the committee for a specific view for a specific epoch + pub async fn committee_members( + &self, + view_number: TYPES::View, + ) -> BTreeSet { + self.coordinator + .membership + .read() + .await + .committee_members(view_number, self.epoch) + } + + /// Get all participants in the committee for a specific view for a specific epoch + pub async fn da_committee_members( + &self, + view_number: TYPES::View, + ) -> BTreeSet { + self.coordinator + .membership + .read() + .await + .da_committee_members(view_number, self.epoch) + } + + /// Get all leaders in the committee for a specific view for a specific epoch + pub async fn committee_leaders( + &self, + view_number: TYPES::View, + ) -> BTreeSet { + self.coordinator + .membership + .read() + .await + .committee_leaders(view_number, self.epoch) + } + + /// Get the stake table entry for a public key, returns `None` if the + /// key is not in the table for a specific epoch + pub async fn stake( + &self, + pub_key: &TYPES::SignatureKey, + ) -> Option> { + self.coordinator + .membership + .read() + .await + .stake(pub_key, self.epoch) + } + + /// Get the DA stake table entry for a public key, returns `None` if the + /// key is not in the table for a specific epoch + pub async fn da_stake( + &self, + pub_key: &TYPES::SignatureKey, + ) -> Option> { + self.coordinator + .membership + .read() + .await + .da_stake(pub_key, self.epoch) + } + + /// See if a node has stake in the committee in a specific epoch + pub async fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool { + self.coordinator + .membership + .read() + .await + .has_stake(pub_key, self.epoch) + } + + /// See if a node has stake in the committee in a specific epoch + pub async fn has_da_stake(&self, pub_key: &TYPES::SignatureKey) -> bool { + self.coordinator + .membership + .read() + .await + .has_da_stake(pub_key, self.epoch) + } + + /// The leader of the committee for view `view_number` in `epoch`. + /// + /// Note: this function uses a HotShot-internal error type. + /// You should implement `lookup_leader`, rather than implementing this function directly. + /// + /// # Errors + /// Returns an error if the leader cannot be calculated. + pub async fn leader(&self, view: TYPES::View) -> Result { + self.coordinator + .membership + .read() + .await + .leader(view, self.epoch) + } + + /// The leader of the committee for view `view_number` in `epoch`. + /// + /// Note: There is no such thing as a DA leader, so any consumer + /// requiring a leader should call this. + /// + /// # Errors + /// Returns an error if the leader cannot be calculated + pub async fn lookup_leader( + &self, + view: TYPES::View, + ) -> std::result::Result< + TYPES::SignatureKey, + <::Membership as Membership>::Error, + > { + self.coordinator + .membership + .read() + .await + .lookup_leader(view, self.epoch) + } + + /// Returns the number of total nodes in the committee in an epoch `epoch` + pub async fn total_nodes(&self) -> usize { + self.coordinator + .membership + .read() + .await + .total_nodes(self.epoch) + } + + /// Returns the number of total DA nodes in the committee in an epoch `epoch` + pub async fn da_total_nodes(&self) -> usize { + self.coordinator + .membership + .read() + .await + .da_total_nodes(self.epoch) + } + + /// Returns the threshold for a specific `Membership` implementation + pub async fn success_threshold(&self) -> NonZeroU64 { + self.coordinator + .membership + .read() + .await + .success_threshold(self.epoch) + } + + /// Returns the DA threshold for a specific `Membership` implementation + pub async fn da_success_threshold(&self) -> NonZeroU64 { + self.coordinator + .membership + .read() + .await + .da_success_threshold(self.epoch) + } + + /// Returns the threshold for a specific `Membership` implementation + pub async fn failure_threshold(&self) -> NonZeroU64 { + self.coordinator + .membership + .read() + .await + .failure_threshold(self.epoch) + } + + /// Returns the threshold required to upgrade the network protocol + pub async fn upgrade_threshold(&self) -> NonZeroU64 { + self.coordinator + .membership + .read() + .await + .upgrade_threshold(self.epoch) + } + + /// Add the epoch result to the membership + pub async fn add_drb_result(&self, drb_result: DrbResult) { + if let Some(epoch) = self.epoch() { + self.coordinator + .membership + .write() + .await + .add_drb_result(epoch, drb_result) + } + } +} diff --git a/hotshot-types/src/lib.rs b/hotshot-types/src/lib.rs index 9066f14e08..eac8950a5e 100644 --- a/hotshot-types/src/lib.rs +++ b/hotshot-types/src/lib.rs @@ -22,6 +22,8 @@ pub mod constants; pub mod data; /// Holds the types and functions for DRB computation. pub mod drb; +/// Epoch Membership wrappers +pub mod epoch_membership; pub mod error; pub mod event; /// Holds the configuration file specification for a HotShot node. diff --git a/hotshot-types/src/message.rs b/hotshot-types/src/message.rs index b2ecca7840..12e07efd87 100644 --- a/hotshot-types/src/message.rs +++ b/hotshot-types/src/message.rs @@ -30,6 +30,7 @@ use crate::{ DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, UpgradeProposal, }, + epoch_membership::EpochMembership, request_response::ProposalRequestPayload, simple_certificate::{ DaCertificate, DaCertificate2, NextEpochQuorumCertificate2, QuorumCertificate2, @@ -43,13 +44,12 @@ use crate::{ ViewSyncPreCommitVote, ViewSyncPreCommitVote2, }, traits::{ - block_contents::BlockHeader, election::Membership, network::{DataRequest, ResponseMessage, ViewMessage}, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, - utils::{mnemonic, option_epoch_from_block_number}, + utils::mnemonic, vote::HasViewNumber, }; @@ -588,18 +588,9 @@ where /// Checks that the signature of the quorum proposal is valid. /// # Errors /// Returns an error when the proposal signature is invalid. - pub fn validate_signature( - &self, - membership: &TYPES::Membership, - epoch_height: u64, - ) -> Result<()> { + pub async fn validate_signature(&self, membership: &EpochMembership) -> Result<()> { let view_number = self.data.proposal.view_number(); - let proposal_epoch = option_epoch_from_block_number::( - self.data.proposal.epoch().is_some(), - self.data.block_header().block_number(), - epoch_height, - ); - let view_leader_key = membership.leader(view_number, proposal_epoch)?; + let view_leader_key = membership.leader(view_number).await?; let proposed_leaf = Leaf2::from_quorum_proposal(&self.data); ensure!( diff --git a/hotshot-types/src/simple_certificate.rs b/hotshot-types/src/simple_certificate.rs index 82092027b5..2c9dbeaa63 100644 --- a/hotshot-types/src/simple_certificate.rs +++ b/hotshot-types/src/simple_certificate.rs @@ -8,6 +8,7 @@ use std::{ fmt::{self, Debug, Display, Formatter}, + future::Future, hash::Hash, marker::PhantomData, num::NonZeroU64, @@ -22,6 +23,7 @@ use serde::{Deserialize, Serialize}; use crate::{ data::serialize_signature2, + epoch_membership::EpochMembership, message::UpgradeLock, simple_vote::{ DaData, DaData2, HasEpoch, NextEpochQuorumData2, QuorumData, QuorumData2, QuorumMarker, @@ -30,7 +32,6 @@ use crate::{ ViewSyncPreCommitData2, Voteable, }, traits::{ - election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, @@ -41,10 +42,7 @@ use crate::{ /// Trait which allows use to inject different threshold calculations into a Certificate type pub trait Threshold { /// Calculate a threshold based on the membership - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64; + fn threshold(membership: &EpochMembership) -> impl Future + Send; } /// Defines a threshold which is 2f + 1 (Amount needed for Quorum) @@ -52,11 +50,8 @@ pub trait Threshold { pub struct SuccessThreshold {} impl Threshold for SuccessThreshold { - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - membership.success_threshold(epoch).into() + async fn threshold(membership: &EpochMembership) -> u64 { + membership.success_threshold().await.into() } } @@ -65,11 +60,8 @@ impl Threshold for SuccessThreshold { pub struct OneHonestThreshold {} impl Threshold for OneHonestThreshold { - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - membership.failure_threshold(epoch).into() + async fn threshold(membership: &EpochMembership) -> u64 { + membership.failure_threshold().await.into() } } @@ -78,11 +70,8 @@ impl Threshold for OneHonestThreshold { pub struct UpgradeThreshold {} impl Threshold for UpgradeThreshold { - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - membership.upgrade_threshold(epoch).into() + async fn threshold(membership: &EpochMembership) -> u64 { + membership.upgrade_threshold().await.into() } } @@ -189,33 +178,25 @@ impl> Certificate .context(|e| warn!("Signature check failed: {}", e)) } /// Proxy's to `Membership.stake` - fn stake_table_entry>( - membership: &MEMBERSHIP, + async fn stake_table_entry( + membership: &EpochMembership, pub_key: &TYPES::SignatureKey, - epoch: Option, ) -> Option> { - membership.da_stake(pub_key, epoch) + membership.da_stake(pub_key).await } /// Proxy's to `Membership.da_stake_table` - fn stake_table>( - membership: &MEMBERSHIP, - epoch: Option, + async fn stake_table( + membership: &EpochMembership, ) -> Vec> { - membership.da_stake_table(epoch) + membership.da_stake_table().await } /// Proxy's to `Membership.da_total_nodes` - fn total_nodes>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> usize { - membership.da_total_nodes(epoch) + async fn total_nodes(membership: &EpochMembership) -> usize { + membership.da_total_nodes().await } - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - membership.da_success_threshold(epoch).into() + async fn threshold(membership: &EpochMembership) -> u64 { + membership.da_success_threshold().await.into() } fn data(&self) -> &Self::Voteable { &self.data @@ -278,33 +259,25 @@ impl> Certificate>( - membership: &MEMBERSHIP, + async fn stake_table_entry( + membership: &EpochMembership, pub_key: &TYPES::SignatureKey, - epoch: Option, ) -> Option> { - membership.da_stake(pub_key, epoch) + membership.da_stake(pub_key).await } /// Proxy's to `Membership.da_stake_table` - fn stake_table>( - membership: &MEMBERSHIP, - epoch: Option, + async fn stake_table( + membership: &EpochMembership, ) -> Vec> { - membership.da_stake_table(epoch) + membership.da_stake_table().await } /// Proxy's to `Membership.da_total_nodes` - fn total_nodes>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> usize { - membership.da_total_nodes(epoch) + async fn total_nodes(membership: &EpochMembership) -> usize { + membership.da_total_nodes().await } - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - membership.da_success_threshold(epoch).into() + async fn threshold(membership: &EpochMembership) -> u64 { + membership.da_success_threshold().await.into() } fn data(&self) -> &Self::Voteable { &self.data @@ -369,34 +342,26 @@ impl< .wrap() .context(|e| warn!("Signature check failed: {}", e)) } - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - THRESHOLD::threshold(membership, epoch) + async fn threshold(membership: &EpochMembership) -> u64 { + THRESHOLD::threshold(membership).await } - fn stake_table_entry>( - membership: &MEMBERSHIP, + async fn stake_table_entry( + membership: &EpochMembership, pub_key: &TYPES::SignatureKey, - epoch: Option, ) -> Option> { - membership.stake(pub_key, epoch) + membership.stake(pub_key).await } - fn stake_table>( - membership: &MEMBERSHIP, - epoch: Option, + async fn stake_table( + membership: &EpochMembership, ) -> Vec> { - membership.stake_table(epoch) + membership.stake_table().await } /// Proxy's to `Membership.total_nodes` - fn total_nodes>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> usize { - membership.total_nodes(epoch) + async fn total_nodes(membership: &EpochMembership) -> usize { + membership.total_nodes().await } fn data(&self) -> &Self::Voteable { @@ -467,15 +432,14 @@ impl UpgradeCertificate { /// Returns an error when the upgrade certificate is invalid. pub async fn validate( upgrade_certificate: &Option, - membership: &RwLock, + membership: &EpochMembership, epoch: Option, upgrade_lock: &UpgradeLock, ) -> Result<()> { + ensure!(epoch == membership.epoch(), "Epochs don't match!"); if let Some(ref cert) = upgrade_certificate { - let membership_reader = membership.read().await; - let membership_stake_table = membership_reader.stake_table(epoch); - let membership_upgrade_threshold = membership_reader.upgrade_threshold(epoch); - drop(membership_reader); + let membership_stake_table = membership.stake_table().await; + let membership_upgrade_threshold = membership.upgrade_threshold().await; cert.is_valid_cert( StakeTableEntries::::from(membership_stake_table).0, diff --git a/hotshot-types/src/traits/election.rs b/hotshot-types/src/traits/election.rs index b57065f435..b0f162f170 100644 --- a/hotshot-types/src/traits/election.rs +++ b/hotshot-types/src/traits/election.rs @@ -7,13 +7,11 @@ //! The election trait, used to decide which node is the leader and determine if a vote is valid. use std::{collections::BTreeSet, fmt::Debug, num::NonZeroU64}; -use async_trait::async_trait; use hotshot_utils::anytrace::Result; use super::node_implementation::NodeType; use crate::{drb::DrbResult, PeerConfig}; -#[async_trait] /// A protocol for determining membership in and participating in a committee. pub trait Membership: Debug + Send + Sync { /// The error type returned by methods like `lookup_leader`. @@ -125,18 +123,28 @@ pub trait Membership: Debug + Send + Sync { /// Returns the threshold required to upgrade the network protocol fn upgrade_threshold(&self, epoch: Option) -> NonZeroU64; + /// Returns if the stake table is available for the current Epoch + fn has_epoch(&self, epoch: TYPES::Epoch) -> bool; + + /// Gets the validated block header and epoch number of the epoch root + /// at the given block height + fn get_epoch_root( + &self, + block_height: u64, + ) -> impl std::future::Future> + Send; + #[allow(clippy::type_complexity)] /// Handles notifications that a new epoch root has been created /// Is called under a read lock to the Membership. Return a callback /// with Some to have that callback invoked under a write lock. /// /// #3967 REVIEW NOTE: this is only called if epoch is Some. Is there any reason to do otherwise? - async fn add_epoch_root( + fn add_epoch_root( &self, _epoch: TYPES::Epoch, _block_header: TYPES::BlockHeader, - ) -> Option> { - None + ) -> impl std::future::Future>> + Send { + async { None } } /// Called to notify the Membership when a new DRB result has been calculated. diff --git a/hotshot-types/src/traits/network.rs b/hotshot-types/src/traits/network.rs index 4d85ee82dc..fc9086106f 100644 --- a/hotshot-types/src/traits/network.rs +++ b/hotshot-types/src/traits/network.rs @@ -17,7 +17,6 @@ use std::{ time::Duration, }; -use async_lock::RwLock; use async_trait::async_trait; use dyn_clone::DynClone; use futures::{future::join_all, Future}; @@ -30,7 +29,10 @@ use thiserror::Error; use tokio::{sync::mpsc::error::TrySendError, time::sleep}; use super::{node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{data::ViewNumber, message::SequencingMessage, BoxSyncFuture}; +use crate::{ + data::ViewNumber, epoch_membership::EpochMembershipCoordinator, message::SequencingMessage, + BoxSyncFuture, +}; /// Centralized server specific errors #[derive(Debug, Error, Serialize, Deserialize)] @@ -263,7 +265,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st &'a self, _view: u64, _epoch: Option, - _membership: Arc>, + _membership_coordinator: EpochMembershipCoordinator, ) where TYPES: NodeType + 'a, { diff --git a/hotshot-types/src/utils.rs b/hotshot-types/src/utils.rs index 5f4031e77b..28b9b72aea 100644 --- a/hotshot-types/src/utils.rs +++ b/hotshot-types/src/utils.rs @@ -6,12 +6,6 @@ //! Utility functions, type aliases, helper structs and enum definitions. -use std::{ - hash::{Hash, Hasher}, - ops::Deref, - sync::Arc, -}; - use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::{ config::{ @@ -24,6 +18,11 @@ use committable::Commitment; use digest::OutputSizeUser; use serde::{Deserialize, Serialize}; use sha2::Digest; +use std::{ + hash::{Hash, Hasher}, + ops::Deref, + sync::Arc, +}; use tagged_base64::tagged; use typenum::Unsigned; use vbs::version::StaticVersionType; diff --git a/hotshot-types/src/vote.rs b/hotshot-types/src/vote.rs index 789615ab80..e33e66f57f 100644 --- a/hotshot-types/src/vote.rs +++ b/hotshot-types/src/vote.rs @@ -8,12 +8,11 @@ use std::{ collections::{BTreeMap, HashMap}, + future::Future, marker::PhantomData, num::NonZeroU64, - sync::Arc, }; -use async_lock::RwLock; use bitvec::{bitvec, vec::BitVec}; use committable::{Commitment, Committable}; use hotshot_utils::anytrace::*; @@ -21,11 +20,11 @@ use primitive_types::U256; use tracing::error; use crate::{ + epoch_membership::EpochMembership, message::UpgradeLock, simple_certificate::Threshold, simple_vote::{VersionedVoteData, Voteable}, traits::{ - election::Membership, node_implementation::{NodeType, Versions}, signature_key::{SignatureKey, StakeTableEntryType}, }, @@ -83,29 +82,21 @@ pub trait Certificate: HasViewNumber { ) -> impl std::future::Future>; /// Returns the amount of stake needed to create this certificate // TODO: Make this a static ratio of the total stake of `Membership` - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64; + fn threshold(membership: &EpochMembership) -> impl Future + Send; /// Get Stake Table from Membership implementation. - fn stake_table>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> Vec>; + fn stake_table( + membership: &EpochMembership, + ) -> impl Future>> + Send; /// Get Total Nodes from Membership implementation. - fn total_nodes>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> usize; + fn total_nodes(membership: &EpochMembership) -> impl Future + Send; /// Get `StakeTableEntry` from Membership implementation. - fn stake_table_entry>( - membership: &MEMBERSHIP, + fn stake_table_entry( + membership: &EpochMembership, pub_key: &TYPES::SignatureKey, - epoch: Option, - ) -> Option>; + ) -> impl Future>> + Send; /// Get the commitment which was voted on fn data(&self) -> &Self::Voteable; @@ -164,8 +155,7 @@ impl< pub async fn accumulate( &mut self, vote: &VOTE, - membership: &Arc>, - epoch: Option, + membership: EpochMembership, ) -> Option { let key = vote.signing_key(); @@ -188,12 +178,10 @@ impl< return None; } - let membership_reader = membership.read().await; - let stake_table_entry = CERT::stake_table_entry(&*membership_reader, &key, epoch)?; - let stake_table = CERT::stake_table(&*membership_reader, epoch); - let total_nodes = CERT::total_nodes(&*membership_reader, epoch); - let threshold = CERT::threshold(&*membership_reader, epoch); - drop(membership_reader); + let stake_table_entry = CERT::stake_table_entry(&membership, &key).await?; + let stake_table = CERT::stake_table(&membership).await; + let total_nodes = CERT::total_nodes(&membership).await; + let threshold = CERT::threshold(&membership).await; let vote_node_id = stake_table .iter() diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 032c82edc5..e1ffbae935 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -15,8 +15,12 @@ use committable::Committable; use futures::future::{select, Either}; use hotshot_types::{ drb::{DrbResult, INITIAL_DRB_RESULT}, + epoch_membership::EpochMembershipCoordinator, message::UpgradeLock, - traits::{block_contents::BlockHeader, network::BroadcastDelay, node_implementation::Versions}, + traits::{ + block_contents::BlockHeader, election::Membership, network::BroadcastDelay, + node_implementation::Versions, + }, }; use rand::Rng; use url::Url; @@ -60,7 +64,6 @@ use hotshot_types::{ simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ consensus_api::ConsensusApi, - election::Membership, network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, @@ -112,7 +115,7 @@ pub struct SystemContext, V: Versi pub network: Arc, /// Memberships used by consensus - pub memberships: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// the metrics that the implementor is using. metrics: Arc, @@ -167,7 +170,7 @@ impl, V: Versions> Clone private_key: self.private_key.clone(), config: self.config.clone(), network: Arc::clone(&self.network), - memberships: Arc::clone(&self.memberships), + membership_coordinator: self.membership_coordinator.clone(), metrics: Arc::clone(&self.metrics), consensus: self.consensus.clone(), instance_state: Arc::clone(&self.instance_state), @@ -203,7 +206,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: Arc>, + memberships: EpochMembershipCoordinator, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -251,7 +254,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, config: HotShotConfig, - membership: Arc>, + membership_coordinator: EpochMembershipCoordinator, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -295,7 +298,11 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext m, + Err(e) => return Err(HotShotError::InvalidState(e.message)), + }; + spawn(async move { - let memberships_da_committee_members = api - .memberships - .read() + let memberships_da_committee_members = membership + .da_committee_members(view_number) .await - .da_committee_members(view_number, epoch) .iter() .cloned() .collect(); @@ -617,7 +627,7 @@ impl, V: Versions> SystemContext::PrivateKey, node_id: u64, config: HotShotConfig, - memberships: Arc>, + memberships: EpochMembershipCoordinator, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -675,7 +685,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: Arc>, + memberships: EpochMembershipCoordinator, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -796,7 +806,7 @@ where private_key.clone(), nonce, config.clone(), - Arc::clone(&memberships), + memberships.clone(), Arc::clone(&network), initializer.clone(), metrics.clone(), @@ -857,7 +867,7 @@ where hotshot: Arc::clone(&left_system_context), storage: Arc::clone(&left_system_context.storage), network: Arc::clone(&left_system_context.network), - memberships: Arc::clone(&left_system_context.memberships), + membership_coordinator: left_system_context.membership_coordinator.clone(), epoch_height, }; @@ -869,7 +879,7 @@ where hotshot: Arc::clone(&right_system_context), storage: Arc::clone(&right_system_context.storage), network: Arc::clone(&right_system_context.network), - memberships: Arc::clone(&right_system_context.memberships), + membership_coordinator: right_system_context.membership_coordinator.clone(), epoch_height, }; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index e69ed4aaa2..544154a2cd 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -10,6 +10,7 @@ pub mod task_state; use std::{collections::BTreeMap, fmt::Debug, num::NonZeroUsize, sync::Arc, time::Duration}; +use crate::EpochMembershipCoordinator; use async_broadcast::{broadcast, RecvError}; use async_lock::RwLock; use async_trait::async_trait; @@ -82,7 +83,7 @@ pub fn add_response_task, V: Versi ) { let state = NetworkResponseState::::new( handle.hotshot.consensus(), - Arc::clone(&handle.memberships), + handle.membership_coordinator.clone(), handle.public_key().clone(), handle.private_key().clone(), handle.hotshot.id, @@ -190,13 +191,12 @@ pub fn add_network_event_task< >( handle: &mut SystemContextHandle, network: Arc, - membership: Arc>, ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, view: TYPES::View::genesis(), epoch: genesis_epoch_from_version::(), - membership, + membership_coordinator: handle.membership_coordinator.clone(), storage: Arc::clone(&handle.storage()), consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), @@ -322,7 +322,7 @@ where private_key: ::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: Arc>, + memberships: EpochMembershipCoordinator, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -330,12 +330,13 @@ where marketplace_config: MarketplaceConfig, ) -> SystemContextHandle { let epoch_height = config.epoch_height; + let hotshot = SystemContext::new( public_key, private_key, nonce, config, - memberships, + memberships.clone(), network, initializer, metrics, @@ -357,7 +358,7 @@ where hotshot: Arc::clone(&hotshot), storage: Arc::clone(&hotshot.storage), network: Arc::clone(&hotshot.network), - memberships: Arc::clone(&hotshot.memberships), + membership_coordinator: memberships.clone(), epoch_height, }; @@ -517,9 +518,8 @@ where /// Adds the `NetworkEventTaskState` tasks possibly modifying them as well. fn add_network_event_tasks(&self, handle: &mut SystemContextHandle) { let network = Arc::clone(&handle.network); - let memberships = Arc::clone(&handle.memberships); - self.add_network_event_task(handle, network, memberships); + self.add_network_event_task(handle, network); } /// Adds a `NetworkEventTaskState` task. Can be reimplemented to modify its behaviour. @@ -527,9 +527,8 @@ where &self, handle: &mut SystemContextHandle, channel: Arc<>::Network>, - membership: Arc>, ) { - add_network_event_task(handle, channel, membership); + add_network_event_task(handle, channel); } } @@ -562,9 +561,5 @@ pub async fn add_network_message_and_request_receiver_tasks< pub fn add_network_event_tasks, V: Versions>( handle: &mut SystemContextHandle, ) { - add_network_event_task( - handle, - Arc::clone(&handle.network), - Arc::clone(&handle.memberships), - ); + add_network_event_task(handle, Arc::clone(&handle.network)); } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index d574ac91c6..b793d08930 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -52,7 +52,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(handle.hotshot.consensus()), view: handle.cur_view().await, delay: handle.hotshot.config.data_request_delay, - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, @@ -73,7 +73,7 @@ impl, V: Versions> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), vote_collectors: BTreeMap::default(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -123,7 +123,7 @@ impl, V: Versions> CreateTaskState cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, network: Arc::clone(&handle.hotshot.network), - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, @@ -141,7 +141,7 @@ impl, V: Versions> CreateTaskState Self { consensus: OuterConsensus::new(handle.hotshot.consensus()), output_event_stream: handle.hotshot.external_event_stream.0.clone(), - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), network: Arc::clone(&handle.hotshot.network), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, @@ -166,7 +166,7 @@ impl, V: Versions> CreateTaskState cur_view, next_view: cur_view, cur_epoch: handle.cur_epoch().await, - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), num_timeouts_tracked: 0, @@ -193,7 +193,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), @@ -238,7 +238,7 @@ impl, V: Versions> CreateTaskState latest_voted_view: handle.cur_view().await, vote_dependencies: BTreeMap::new(), network: Arc::clone(&handle.hotshot.network), - membership: Arc::clone(&handle.hotshot.memberships), + membership: handle.hotshot.membership_coordinator.clone(), drb_computation: None, output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, @@ -265,7 +265,7 @@ impl, V: Versions> CreateTaskState proposal_dependencies: BTreeMap::new(), consensus: OuterConsensus::new(consensus), instance_state: handle.hotshot.instance_state(), - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), storage: Arc::clone(&handle.storage), @@ -292,7 +292,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(consensus), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - membership: Arc::clone(&handle.hotshot.memberships), + membership: handle.hotshot.membership_coordinator.clone(), timeout: handle.hotshot.config.next_view_timeout, output_event_stream: handle.hotshot.external_event_stream.0.clone(), storage: Arc::clone(&handle.storage), @@ -316,7 +316,7 @@ impl, V: Versions> CreateTaskState private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), network: Arc::clone(&handle.hotshot.network), - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), vote_collectors: BTreeMap::default(), next_epoch_vote_collectors: BTreeMap::default(), timeout_vote_collectors: BTreeMap::default(), diff --git a/hotshot/src/traits/election/randomized_committee.rs b/hotshot/src/traits/election/randomized_committee.rs index cd3838fa05..b420723642 100644 --- a/hotshot/src/traits/election/randomized_committee.rs +++ b/hotshot/src/traits/election/randomized_committee.rs @@ -217,11 +217,6 @@ impl Membership for Committee { .is_some_and(|x| x.stake_table_entry.stake() > U256::zero()) } - // /// Get the network topic for the committee - // fn committee_topic(&self) -> Topic { - // self.committee_topic.clone() - // } - /// Index the vector of public keys with the current view number fn lookup_leader( &self, @@ -265,6 +260,16 @@ impl Membership for Committee { .unwrap() } + fn has_epoch(&self, _epoch: TYPES::Epoch) -> bool { + true + } + + async fn get_epoch_root( + &self, + _block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + None + } fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} fn set_first_epoch(&mut self, _epoch: TYPES::Epoch, _initial_drb_result: DrbResult) {} diff --git a/hotshot/src/traits/election/randomized_committee_members.rs b/hotshot/src/traits/election/randomized_committee_members.rs index 7a66573096..f1f386be0d 100644 --- a/hotshot/src/traits/election/randomized_committee_members.rs +++ b/hotshot/src/traits/election/randomized_committee_members.rs @@ -446,6 +446,16 @@ impl Membership let len = self.total_nodes(epoch); NonZeroU64::new(max((len as u64 * 9) / 10, ((len as u64 * 2) / 3) + 1)).unwrap() } + fn has_epoch(&self, _epoch: TYPES::Epoch) -> bool { + true + } + + async fn get_epoch_root( + &self, + _block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + None + } fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 603fc45d44..35bd896f17 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -270,6 +270,16 @@ impl Membership for StaticCommittee { let len = self.stake_table.len(); NonZeroU64::new(max((len as u64 * 9) / 10, ((len as u64 * 2) / 3) + 1)).unwrap() } + fn has_epoch(&self, _epoch: TYPES::Epoch) -> bool { + true + } + + async fn get_epoch_root( + &self, + _block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + None + } fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 64b79112e8..ededbe507a 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -239,6 +239,17 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 9) / 10) + 1).unwrap() } + fn has_epoch(&self, _epoch: TYPES::Epoch) -> bool { + true + } + + async fn get_epoch_root( + &self, + _block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + None + } + fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} fn set_first_epoch(&mut self, _epoch: TYPES::Epoch, _initial_drb_result: DrbResult) {} diff --git a/hotshot/src/traits/election/two_static_committees.rs b/hotshot/src/traits/election/two_static_committees.rs index c13200c75e..7e80335a1b 100644 --- a/hotshot/src/traits/election/two_static_committees.rs +++ b/hotshot/src/traits/election/two_static_committees.rs @@ -427,6 +427,16 @@ impl Membership for TwoStaticCommittees { .unwrap() } } + fn has_epoch(&self, _epoch: TYPES::Epoch) -> bool { + true + } + + async fn get_epoch_root( + &self, + _block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + None + } fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 438f790d68..87b4a873c2 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -32,6 +32,7 @@ use hotshot_types::{ COMBINED_NETWORK_MIN_PRIMARY_FAILURES, COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, }, data::ViewNumber, + epoch_membership::EpochMembershipCoordinator, traits::{ network::{BroadcastDelay, ConnectedNetwork, Topic}, node_implementation::NodeType, @@ -471,7 +472,7 @@ impl ConnectedNetwork for CombinedNetworks &'a self, view: u64, epoch: Option, - membership: Arc>, + membership: EpochMembershipCoordinator, ) where T: NodeType + 'a, { diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 24ae0d9138..93e9c5ef21 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -7,21 +7,7 @@ //! Libp2p based/production networking implementation //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network -#[cfg(feature = "hotshot-testing")] -use std::str::FromStr; -use std::{ - cmp::min, - collections::{BTreeSet, HashSet}, - fmt::Debug, - net::{IpAddr, ToSocketAddrs}, - num::NonZeroUsize, - sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, - }, - time::Duration, -}; - +use crate::EpochMembershipCoordinator; use anyhow::{anyhow, Context}; use async_lock::RwLock; use async_trait::async_trait; @@ -52,7 +38,6 @@ use hotshot_types::{ data::ViewNumber, network::NetworkConfig, traits::{ - election::Membership, metrics::{Counter, Gauge, Metrics, NoMetrics}, network::{ConnectedNetwork, NetworkError, Topic}, node_implementation::{ConsensusTime, NodeType}, @@ -66,6 +51,20 @@ use libp2p_identity::{ }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; +#[cfg(feature = "hotshot-testing")] +use std::str::FromStr; +use std::{ + cmp::min, + collections::{BTreeSet, HashSet}, + fmt::Debug, + net::{IpAddr, ToSocketAddrs}, + num::NonZeroUsize, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; use tokio::{ select, spawn, sync::{ @@ -992,14 +991,20 @@ impl ConnectedNetwork for Libp2pNetwork { &'a self, view: u64, epoch: Option, - membership: Arc>, + membership_coordinator: EpochMembershipCoordinator, ) where TYPES: NodeType + 'a, { let future_view = ::View::new(view) + LOOK_AHEAD; let epoch = epoch.map(::Epoch::new); - let future_leader = match membership.read().await.leader(future_view, epoch) { + let membership = match membership_coordinator.membership_for_epoch(epoch).await { + Ok(m) => m, + Err(e) => { + return tracing::warn!(e.message); + } + }; + let future_leader = match membership.leader(future_view).await { Ok(l) => l, Err(e) => { return tracing::info!( diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index f8c82dfad6..07e6bab901 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -6,8 +6,6 @@ //! Provides an event-streaming handle for a [`SystemContext`] running in the background -use std::sync::Arc; - use anyhow::{anyhow, Context, Ok, Result}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; @@ -21,17 +19,20 @@ use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; use hotshot_types::{ consensus::Consensus, data::{Leaf2, QuorumProposalWrapper}, + epoch_membership::EpochMembershipCoordinator, error::HotShotError, message::{Message, MessageKind, Proposal, RecipientList}, request_response::ProposalRequestPayload, traits::{ + block_contents::BlockHeader, consensus_api::ConsensusApi, - election::Membership, network::{BroadcastDelay, ConnectedNetwork, Topic}, node_implementation::NodeType, signature_key::SignatureKey, }, + utils::option_epoch_from_block_number, }; +use std::sync::Arc; use tracing::instrument; use crate::{traits::NodeImplementation, types::Event, SystemContext, Versions}; @@ -68,7 +69,7 @@ pub struct SystemContextHandle, V: pub network: Arc, /// Memberships used by consensus - pub memberships: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// Number of blocks in an epoch, zero means there are no epochs pub epoch_height: u64, @@ -155,7 +156,7 @@ impl + 'static, V: Versions> signed_proposal_request.commit().as_ref(), )?; - let mem = Arc::clone(&self.memberships); + let membership_coordinator = self.membership_coordinator.clone(); let receiver = self.internal_event_stream.1.activate_cloned(); let sender = self.internal_event_stream.0.clone(); let epoch_height = self.epoch_height; @@ -185,14 +186,26 @@ impl + 'static, V: Versions> // Then, if it's `Some`, make sure that the data is correct if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() { - // Make sure that the quorum_proposal is valid - let mem_reader = mem.read().await; - if let Err(err) = quorum_proposal.validate_signature(&mem_reader, epoch_height) + let maybe_epoch = option_epoch_from_block_number::( + quorum_proposal.data.proposal.epoch.is_some(), + quorum_proposal.data.block_header().block_number(), + epoch_height, + ); + let membership = match membership_coordinator + .membership_for_epoch(maybe_epoch) + .await { + Result::Ok(m) => m, + Err(e) => { + tracing::warn!(e.message); + continue; + } + }; + // Make sure that the quorum_proposal is valid + if let Err(err) = quorum_proposal.validate_signature(&membership).await { tracing::warn!("Invalid Proposal Received after Request. Err {:?}", err); continue; } - drop(mem_reader); let proposed_leaf = Leaf2::from_quorum_proposal(&quorum_proposal.data); let commit = proposed_leaf.commit(); if commit == leaf_commitment { @@ -327,10 +340,11 @@ impl + 'static, V: Versions> epoch_number: Option, ) -> Result { self.hotshot - .memberships - .read() + .membership_coordinator + .membership_for_epoch(epoch_number) + .await? + .leader(view_number) .await - .leader(view_number, epoch_number) .context("Failed to lookup leader") } diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index b975a9d8fe..96be4a2167 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -5167,6 +5167,7 @@ dependencies = [ "ark-serialize 0.4.2", "ark-srs", "ark-std 0.4.0", + "async-broadcast", "async-lock 3.4.0", "async-trait", "bincode", diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index a2eaf1ea35..559f692070 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -18,7 +18,6 @@ use hotshot_events_service::events_source::{ EventFilterSet, EventsSource, EventsStreamer, StartupInfo, }; use hotshot_query_service::data_source::ExtensibleDataSource; -use hotshot_types::traits::election::Membership; use hotshot_types::{ data::ViewNumber, event::Event, @@ -185,14 +184,18 @@ impl, V: Versions, P: SequencerPersistence> &self, epoch: Option<::Epoch>, ) -> Vec::SignatureKey>> { - self.consensus() + let Ok(mem) = self + .consensus() .await .read() .await - .memberships - .read() + .membership_coordinator + .membership_for_epoch(epoch) .await - .stake_table(epoch) + else { + return vec![]; + }; + mem.stake_table().await } /// Get the stake table for the current epoch if not provided diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 9b69947f73..25c8fec67b 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -18,6 +18,7 @@ use hotshot_orchestrator::client::OrchestratorClient; use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf2, ViewNumber}, + epoch_membership::EpochMembershipCoordinator, network::NetworkConfig, traits::{ metrics::Metrics, @@ -144,14 +145,16 @@ impl, P: SequencerPersistence, V: Versions> Sequence ))); let persistence = Arc::new(persistence); - let memberships = Arc::new(async_lock::RwLock::new(membership)); + let coordinator = + EpochMembershipCoordinator::new(Arc::new(RwLock::new(membership)), config.epoch_height); + let membership = coordinator.membership().clone(); let handle = SystemContext::init( validator_config.public_key, validator_config.private_key.clone(), instance_state.node_id, config.clone(), - memberships.clone(), + coordinator, network.clone(), initializer, ConsensusMetricsValue::new(metrics), @@ -186,7 +189,9 @@ impl, P: SequencerPersistence, V: Versions> Sequence request_response_config, RequestResponseSender::new(outbound_message_sender), request_response_receiver, - RecipientSource { memberships }, + RecipientSource { + memberships: membership, + }, DataSource {}, ); diff --git a/sequencer/src/message_compat_tests.rs b/sequencer/src/message_compat_tests.rs index 157e2df2d1..08438f08c2 100755 --- a/sequencer/src/message_compat_tests.rs +++ b/sequencer/src/message_compat_tests.rs @@ -48,6 +48,7 @@ async fn test_message_compat(_ver: Ver) { use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ data::vid_disperse::{ADVZDisperse, ADVZDisperseShare}, + epoch_membership::EpochMembershipCoordinator, simple_certificate::{ TimeoutCertificate, ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, ViewSyncPreCommitCertificate, @@ -62,12 +63,15 @@ async fn test_message_compat(_ver: Ver) { let (sender, priv_key) = PubKey::generated_from_seed_indexed(Default::default(), 0); let signature = PubKey::sign(&priv_key, &[]).unwrap(); let committee = vec![PeerConfig::default()]; /* one committee member, necessary to generate a VID share */ - let membership = Arc::new(RwLock::new(EpochCommittees::new_stake( - committee.clone(), - committee, - &NodeState::default(), + let membership = EpochMembershipCoordinator::new( + Arc::new(RwLock::new(EpochCommittees::new_stake( + committee.clone(), + committee, + &NodeState::default(), + 10, + ))), 10, - ))); + ); let upgrade_data = UpgradeProposalData { old_version: Version { major: 0, minor: 1 }, new_version: Version { major: 1, minor: 0 }, diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 52b015a1b1..89d5ef2d25 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -4,7 +4,7 @@ use std::{ num::NonZeroU64, }; -use async_trait::async_trait; +// use async_trait::async_trait; use contract_bindings_alloy::permissionedstaketable::PermissionedStakeTable::StakersUpdated; use ethers::types::{Address, U256}; use ethers_conv::ToAlloy; @@ -24,6 +24,7 @@ use hotshot_types::{ }, PeerConfig, }; + use itertools::Itertools; use thiserror::Error; @@ -298,7 +299,7 @@ impl EpochCommittees { #[error("Could not lookup leader")] // TODO error variants? message? pub struct LeaderLookupError; -#[async_trait] +// #[async_trait] impl Membership for EpochCommittees { type Error = LeaderLookupError; // DO NOT USE. Dummy constructor to comply w/ trait. @@ -468,6 +469,7 @@ impl Membership for EpochCommittees { .unwrap() } + #[allow(refining_impl_trait)] async fn add_epoch_root( &self, epoch: Epoch, @@ -485,6 +487,14 @@ impl Membership for EpochCommittees { }) } + fn has_epoch(&self, epoch: Epoch) -> bool { + self.state.contains_key(&epoch) + } + + async fn get_epoch_root(&self, _block_height: u64) -> Option<(Epoch, Header)> { + None + } + fn add_drb_result(&mut self, epoch: Epoch, drb: DrbResult) { let Some(raw_stake_table) = self.state.get(&epoch) else { tracing::error!("add_drb_result({}, {:?}) was called, but we do not yet have the stake table for epoch {}", epoch, drb, epoch); From a31db38027eee48915d2bbe9e6ef46595e102f4f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 7 Mar 2025 08:55:51 -0500 Subject: [PATCH 06/17] Fetch Epoch Root via Leaves for Stake Table Catchup (#2650) * Bring over changes * return option wip * Refactor to return result and fixed the build * fix recursion * delete file accidently added * remove accidently added file * fix * fix typos * fmt * cargo sort * fix * some more slight improvements * fixes for wrong epoch when verifying qc * fmt * Add helper to verify a decide chain for an epoch root * fix again * save epoch root verification for later pr * lint * adding param for epoch height * add fn back * wip * fixes * fmt * fix typo * Fix up everything * Hooking up leaf catchup maybe? * typos, lots of typos * hook up the catchup provider * fmt * Fix tests * Address comments * fmt * Don't fail DA on None epoch * typo * fix merge * revert changes to eqc validation, check for None * fmt * fix for many tests * fmt * fixes/fmt * add to toml * fix params * fmt * typo * address comments * bump todos... * properly retry * Add basic test for having epoch in membership * add catchup full integration test and fix HS one * no drb check for now * Apply suggestions from code review Co-authored-by: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> * fetch leafs as a chain from DB * better log message * fmt * version for epochs * ignore epoch test for now * Add DRB catchup and make get_epoch_root_and_drb have default impl * lint * revert version change for now --------- Co-authored-by: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> --- hotshot-example-types/src/block_types.rs | 16 ++ hotshot-example-types/src/node_types.rs | 34 ++- hotshot-testing/tests/tests_1/da_task.rs | 12 +- .../tests/tests_1/quorum_proposal_task.rs | 31 ++- .../tests_1/upgrade_task_with_proposal.rs | 6 +- hotshot-testing/tests/tests_6/test_epochs.rs | 3 +- hotshot-types/src/consensus.rs | 4 + hotshot-types/src/epoch_membership.rs | 23 +- hotshot-types/src/traits/election.rs | 13 +- hotshot-types/src/utils.rs | 90 +++++++- .../election/dummy_catchup_membership.rs | 204 ++++++++++++++++++ hotshot/src/traits/election/mod.rs | 2 + .../traits/election/randomized_committee.rs | 6 - .../election/randomized_committee_members.rs | 7 - .../src/traits/election/static_committee.rs | 7 - .../static_committee_leader_two_views.rs | 7 - .../traits/election/two_static_committees.rs | 7 - sequencer/api/catchup.toml | 11 + sequencer/src/api.rs | 179 ++++++++++++++- sequencer/src/api/data_source.rs | 7 +- sequencer/src/api/endpoints.rs | 12 ++ sequencer/src/api/sql.rs | 24 +++ sequencer/src/catchup.rs | 28 +++ sequencer/src/lib.rs | 5 + types/src/v0/impls/instance_state.rs | 10 +- types/src/v0/impls/stake_table.rs | 39 +++- types/src/v0/traits.rs | 76 ++++++- 27 files changed, 782 insertions(+), 81 deletions(-) create mode 100644 hotshot/src/traits/election/dummy_catchup_membership.rs diff --git a/hotshot-example-types/src/block_types.rs b/hotshot-example-types/src/block_types.rs index 7d4ba68c4d..6bdcca4fed 100644 --- a/hotshot-example-types/src/block_types.rs +++ b/hotshot-example-types/src/block_types.rs @@ -298,6 +298,22 @@ impl TestBlockHeader { } } +impl Default for TestBlockHeader { + fn default() -> Self { + let metadata = TestMetadata { + num_transactions: 0, + }; + Self { + block_number: 0, + payload_commitment: Default::default(), + builder_commitment: Default::default(), + metadata, + timestamp: 0, + random: 0, + } + } +} + impl< TYPES: NodeType< BlockHeader = Self, diff --git a/hotshot-example-types/src/node_types.rs b/hotshot-example-types/src/node_types.rs index dd3d08f48b..0c8dc1750b 100644 --- a/hotshot-example-types/src/node_types.rs +++ b/hotshot-example-types/src/node_types.rs @@ -11,8 +11,8 @@ pub use hotshot::traits::election::helpers::{ }; use hotshot::traits::{ election::{ - helpers::QuorumFilterConfig, randomized_committee::Committee, - randomized_committee_members::RandomizedCommitteeMembers, + dummy_catchup_membership::DummyCatchupCommittee, helpers::QuorumFilterConfig, + randomized_committee::Committee, randomized_committee_members::RandomizedCommitteeMembers, static_committee::StaticCommittee, static_committee_leader_two_views::StaticCommitteeLeaderForTwoViews, two_static_committees::TwoStaticCommittees, @@ -101,6 +101,36 @@ impl NodeType for TestTypesRandomizedLeader { type BuilderSignatureKey = BuilderKey; } +#[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, +)] +pub struct TestTypesEpochCatchupTypes; +impl NodeType for TestTypesEpochCatchupTypes { + const UPGRADE_CONSTANTS: UpgradeConstants = TEST_UPGRADE_CONSTANTS; + + type AuctionResult = TestAuctionResult; + type View = ViewNumber; + type Epoch = EpochNumber; + type BlockHeader = TestBlockHeader; + type BlockPayload = TestBlockPayload; + type SignatureKey = BLSPubKey; + type Transaction = TestTransaction; + type ValidatedState = TestValidatedState; + type InstanceState = TestInstanceState; + type Membership = DummyCatchupCommittee; + type BuilderSignatureKey = BuilderKey; +} + #[derive( Copy, Clone, diff --git a/hotshot-testing/tests/tests_1/da_task.rs b/hotshot-testing/tests/tests_1/da_task.rs index 28eb3edc58..2c160161c9 100644 --- a/hotshot-testing/tests/tests_1/da_task.rs +++ b/hotshot-testing/tests/tests_1/da_task.rs @@ -47,7 +47,8 @@ async fn test_da_task() { &[], membership .membership_for_epoch(None) - .await.unwrap() + .await + .unwrap() .total_nodes() .await, default_version, @@ -162,17 +163,16 @@ async fn test_da_task_storage_failure() { &[], membership .membership_for_epoch(None) - .await.unwrap() + .await + .unwrap() .total_nodes() .await, default_version, ); - let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); + let mut generator = + TestViewGenerator::::generate(membership.clone(), node_key_map); - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); diff --git a/hotshot-testing/tests/tests_1/quorum_proposal_task.rs b/hotshot-testing/tests/tests_1/quorum_proposal_task.rs index 55c79c0c25..8a403c93c7 100644 --- a/hotshot-testing/tests/tests_1/quorum_proposal_task.rs +++ b/hotshot-testing/tests/tests_1/quorum_proposal_task.rs @@ -50,7 +50,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let membership = handle.hotshot.membership_coordinator.clone(); let epoch_1_mem = membership .membership_for_epoch(Some(EpochNumber::new(1))) - .await.unwrap(); + .await + .unwrap(); let version = handle .hotshot .upgrade_lock @@ -64,7 +65,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { ) .await; - let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); + let mut generator = + TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -151,7 +153,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let membership = handle.hotshot.membership_coordinator.clone(); let epoch_1_mem = membership .membership_for_epoch(Some(EpochNumber::new(1))) - .await.unwrap(); + .await + .unwrap(); let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); @@ -328,8 +331,9 @@ async fn test_quorum_proposal_task_qc_timeout() { build_system_handle::(node_id).await; let membership = handle.hotshot.membership_coordinator.clone(); let epoch_1_mem = membership - .membership_for_epoch(Some(EpochNumber::new(1))) - .await.unwrap(); + .membership_for_epoch(Some(EpochNumber::new(1))) + .await + .unwrap(); let version = handle .hotshot .upgrade_lock @@ -337,14 +341,15 @@ async fn test_quorum_proposal_task_qc_timeout() { .await; let payload_commitment = build_payload_commitment::( - &epoch_1_mem, + &epoch_1_mem, ViewNumber::new(node_id), version, ) .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); + let mut generator = + TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -425,7 +430,8 @@ async fn test_quorum_proposal_task_view_sync() { let membership = handle.hotshot.membership_coordinator.clone(); let epoch_1_mem = membership .membership_for_epoch(Some(EpochNumber::new(1))) - .await.unwrap(); + .await + .unwrap(); let version = handle .hotshot .upgrade_lock @@ -440,7 +446,8 @@ async fn test_quorum_proposal_task_view_sync() { .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); + let mut generator = + TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -521,9 +528,11 @@ async fn test_quorum_proposal_task_liveness_check() { let membership = handle.hotshot.membership_coordinator.clone(); let epoch_1_mem = membership .membership_for_epoch(Some(EpochNumber::new(1))) - .await.unwrap(); + .await + .unwrap(); - let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); + let mut generator = + TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); diff --git a/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs b/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs index d8533acee2..3628b6adef 100644 --- a/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -85,9 +85,11 @@ async fn test_upgrade_task_with_proposal() { let membership = handle.hotshot.membership_coordinator.clone(); let epoch_1_mem = membership .membership_for_epoch(Some(EpochNumber::new(1))) - .await.unwrap(); + .await + .unwrap(); - let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); + let mut generator = + TestViewGenerator::::generate(membership.clone(), node_key_map); for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); diff --git a/hotshot-testing/tests/tests_6/test_epochs.rs b/hotshot-testing/tests/tests_6/test_epochs.rs index 718ab21310..bed8d185ac 100644 --- a/hotshot-testing/tests/tests_6/test_epochs.rs +++ b/hotshot-testing/tests/tests_6/test_epochs.rs @@ -9,7 +9,7 @@ use hotshot_example_types::{ CombinedImpl, EpochUpgradeTestVersions, EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, RandomOverlapQuorumFilterConfig, StableQuorumFilterConfig, TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, - TestTypesRandomizedCommitteeMembers, TestTypesRandomizedLeader, + TestTypesRandomizedCommitteeMembers, TestTypesRandomizedLeader, TestTypesEpochCatchupTypes }, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; @@ -52,6 +52,7 @@ cross_tests!( Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [ TestTypes, + TestTypesEpochCatchupTypes, TestTypesRandomizedLeader, TestTypesRandomizedCommitteeMembers>, // Overlap = F TestTypesRandomizedCommitteeMembers>, // Overlap = F+1 diff --git a/hotshot-types/src/consensus.rs b/hotshot-types/src/consensus.rs index 0849b4d4d9..66ef1db9bf 100644 --- a/hotshot-types/src/consensus.rs +++ b/hotshot-types/src/consensus.rs @@ -922,6 +922,10 @@ impl Consensus { self.saved_leaves.get(&leaf).unwrap().clone() } + pub fn undecided_leaves(&self) -> Vec> { + self.saved_leaves.values().cloned().collect::>() + } + /// Gets the validated state with the given view number, if in the state map. #[must_use] pub fn state(&self, view_number: TYPES::View) -> Option<&Arc> { diff --git a/hotshot-types/src/epoch_membership.rs b/hotshot-types/src/epoch_membership.rs index 00001fcb8f..0494a0c76a 100644 --- a/hotshot-types/src/epoch_membership.rs +++ b/hotshot-types/src/epoch_membership.rs @@ -120,20 +120,24 @@ where }; // Get the epoch root headers and update our membership with them, finally sync them - // Verification of the root is handled in get_epoch_root - let (next_epoch, header) = root_membership - .get_epoch_root(root_block_in_epoch(*root_epoch, self.epoch_height)) + // Verification of the root is handled in get_epoch_root_and_drb + let Ok((header, drb)) = root_membership + .get_epoch_root_and_drb(root_block_in_epoch(*root_epoch, self.epoch_height)) .await - .ok_or(anytrace::warn!("get epoch root failed"))?; + else { + anytrace::bail!("get epoch root failed for epoch {:?}", root_epoch); + }; let updater = self .membership .read() .await - .add_epoch_root(next_epoch, header) + .add_epoch_root(epoch, header) .await .ok_or(anytrace::warn!("add epoch root failed"))?; updater(&mut *(self.membership.write().await)); + self.membership.write().await.add_drb_result(epoch, drb); + Ok(EpochMembership { epoch: Some(epoch), coordinator: self.clone(), @@ -212,15 +216,18 @@ impl EpochMembership { } /// Wraps the same named Membership trait fn - async fn get_epoch_root( + async fn get_epoch_root_and_drb( &self, block_height: u64, - ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + ) -> anyhow::Result<(TYPES::BlockHeader, DrbResult)> { + let Some(epoch) = self.epoch else { + anyhow::bail!("Cannot get root for None epoch"); + }; self.coordinator .membership .read() .await - .get_epoch_root(block_height) + .get_epoch_root_and_drb(block_height, self.coordinator.epoch_height, epoch) .await } diff --git a/hotshot-types/src/traits/election.rs b/hotshot-types/src/traits/election.rs index b0f162f170..382034ba3d 100644 --- a/hotshot-types/src/traits/election.rs +++ b/hotshot-types/src/traits/election.rs @@ -128,10 +128,17 @@ pub trait Membership: Debug + Send + Sync { /// Gets the validated block header and epoch number of the epoch root /// at the given block height - fn get_epoch_root( + fn get_epoch_root_and_drb( &self, - block_height: u64, - ) -> impl std::future::Future> + Send; + _block_height: u64, + _epoch_height: u64, + _epoch: TYPES::Epoch, + ) -> impl std::future::Future> + Send + { + async { + anyhow::bail!("Not implemented"); + } + } #[allow(clippy::type_complexity)] /// Handles notifications that a new epoch root has been created diff --git a/hotshot-types/src/utils.rs b/hotshot-types/src/utils.rs index 28b9b72aea..864bd4c54a 100644 --- a/hotshot-types/src/utils.rs +++ b/hotshot-types/src/utils.rs @@ -6,6 +6,7 @@ //! Utility functions, type aliases, helper structs and enum definitions. +use anyhow::{anyhow, ensure}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::{ config::{ @@ -14,7 +15,7 @@ use bincode::{ }, DefaultOptions, Options, }; -use committable::Commitment; +use committable::{Commitment, Committable}; use digest::OutputSizeUser; use serde::{Deserialize, Serialize}; use sha2::Digest; @@ -30,9 +31,12 @@ use vbs::version::StaticVersionType; use crate::{ data::{Leaf2, VidCommitment}, traits::{ + election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, ValidatedState, }, + vote::{Certificate, HasViewNumber}, + StakeTableEntries, }; /// A view's state @@ -98,6 +102,88 @@ pub type StateAndDelta = ( Option::ValidatedState as ValidatedState>::Delta>>, ); +pub async fn verify_epoch_root_chain( + leaf_chain: Vec>, + membership: &T::Membership, + epoch: T::Epoch, + epoch_height: u64, + upgrade_lock: &crate::message::UpgradeLock, +) -> anyhow::Result> { + // Check we actually have a chain long enough for deciding + if leaf_chain.len() < 3 { + return Err(anyhow!("Leaf chain is not long enough for a decide")); + } + + let newest_leaf = leaf_chain.first().unwrap(); + let parent = &leaf_chain[1]; + let grand_parent = &leaf_chain[2]; + + // Check if the leaves form a decide + if newest_leaf.justify_qc().view_number() != parent.view_number() + || parent.justify_qc().view_number() != grand_parent.view_number() + { + return Err(anyhow!("Leaf views do not chain")); + } + if newest_leaf.justify_qc().data.leaf_commit != parent.commit() + || parent.justify_qc().data().leaf_commit != grand_parent.commit() + { + return Err(anyhow!("Leaf commits do not chain")); + } + if parent.view_number() != grand_parent.view_number() + 1 { + return Err(anyhow::anyhow!( + "Decide rule failed, parent does not directly extend grandparent" + )); + } + + // verify all QCs are valid + let stake_table = membership.stake_table(Some(epoch)); + let threshold = membership.success_threshold(Some(epoch)); + newest_leaf + .justify_qc() + .is_valid_cert( + StakeTableEntries::::from(stake_table.clone()).0, + threshold, + upgrade_lock, + ) + .await?; + parent + .justify_qc() + .is_valid_cert( + StakeTableEntries::::from(stake_table.clone()).0, + threshold, + upgrade_lock, + ) + .await?; + grand_parent + .justify_qc() + .is_valid_cert( + StakeTableEntries::::from(stake_table.clone()).0, + threshold, + upgrade_lock, + ) + .await?; + + // Verify the + let root_height_interval = epoch_height - 3; + let mut last_leaf = parent; + for leaf in leaf_chain.iter().skip(2) { + ensure!(last_leaf.justify_qc().view_number() == leaf.view_number()); + ensure!(last_leaf.justify_qc().data().leaf_commit == leaf.commit()); + leaf.justify_qc() + .is_valid_cert( + StakeTableEntries::::from(stake_table.clone()).0, + threshold, + upgrade_lock, + ) + .await?; + if leaf.height() % root_height_interval == 0 { + return Ok(leaf.clone()); + } + last_leaf = leaf; + } + Err(anyhow!("Epoch Root was not found in the decided chain")) +} + impl ViewInner { /// Return the underlying undecide leaf commitment and validated state if they exist. #[must_use] @@ -198,7 +284,7 @@ pub enum Terminator { type Sha256Digest = [u8; ::OutputSize::USIZE]; #[tagged("BUILDER_COMMITMENT")] -#[derive(Clone, Debug, Hash, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] +#[derive(Clone, Debug, Default, Hash, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] /// Commitment that builders use to sign block options. /// A thin wrapper around a Sha256 digest. pub struct BuilderCommitment(Sha256Digest); diff --git a/hotshot/src/traits/election/dummy_catchup_membership.rs b/hotshot/src/traits/election/dummy_catchup_membership.rs new file mode 100644 index 0000000000..9d1347f944 --- /dev/null +++ b/hotshot/src/traits/election/dummy_catchup_membership.rs @@ -0,0 +1,204 @@ +use std::{collections::HashSet, time::Duration}; + +use anyhow::Ok; +use hotshot_types::{ + drb::DrbResult, + traits::{election::Membership, node_implementation::NodeType}, +}; + +use super::static_committee::StaticCommittee; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct DummyCatchupCommittee { + inner: StaticCommittee, + epochs: HashSet, + drbs: HashSet, +} + +impl DummyCatchupCommittee { + fn assert_has_epoch(&self, epoch: Option) { + let Some(epoch) = epoch else { + return; + }; + let pass = self.epochs.contains(&epoch); + //&& self.drbs.contains(&epoch); + if !pass { + tracing::error!("FAILEAD EPOCH CHECK {epoch}"); + } + assert!(pass); + } +} + +impl Membership for DummyCatchupCommittee +where + TYPES::BlockHeader: Default, +{ + type Error = hotshot_utils::anytrace::Error; + + fn new( + // Note: eligible_leaders is currently a haMemck because the DA leader == the quorum leader + // but they should not have voting power. + stake_committee_members: Vec>, + da_committee_members: Vec>, + ) -> Self { + Self { + inner: StaticCommittee::new(stake_committee_members, da_committee_members), + epochs: HashSet::new(), + drbs: HashSet::new(), + } + } + + fn stake_table( + &self, + epoch: Option, + ) -> Vec> { + self.assert_has_epoch(epoch); + self.inner.stake_table(epoch) + } + + fn da_stake_table( + &self, + epoch: Option, + ) -> Vec> { + self.assert_has_epoch(epoch); + self.inner.da_stake_table(epoch) + } + + fn committee_members( + &self, + view_number: TYPES::View, + epoch: Option, + ) -> std::collections::BTreeSet { + self.assert_has_epoch(epoch); + self.inner.committee_members(view_number, epoch) + } + + fn da_committee_members( + &self, + view_number: TYPES::View, + epoch: Option, + ) -> std::collections::BTreeSet { + self.assert_has_epoch(epoch); + self.inner.da_committee_members(view_number, epoch) + } + + fn committee_leaders( + &self, + view_number: TYPES::View, + epoch: Option, + ) -> std::collections::BTreeSet { + self.assert_has_epoch(epoch); + self.inner.committee_leaders(view_number, epoch) + } + + fn stake( + &self, + pub_key: &TYPES::SignatureKey, + epoch: Option, + ) -> Option> { + self.assert_has_epoch(epoch); + self.inner.stake(pub_key, epoch) + } + + fn da_stake( + &self, + pub_key: &TYPES::SignatureKey, + epoch: Option, + ) -> Option> { + self.assert_has_epoch(epoch); + self.inner.da_stake(pub_key, epoch) + } + + fn has_stake(&self, pub_key: &TYPES::SignatureKey, epoch: Option) -> bool { + self.assert_has_epoch(epoch); + self.inner.has_stake(pub_key, epoch) + } + + fn has_da_stake(&self, pub_key: &TYPES::SignatureKey, epoch: Option) -> bool { + self.assert_has_epoch(epoch); + self.inner.has_da_stake(pub_key, epoch) + } + + fn lookup_leader( + &self, + view: TYPES::View, + epoch: Option, + ) -> std::result::Result { + self.assert_has_epoch(epoch); + self.inner.lookup_leader(view, epoch) + } + + fn total_nodes(&self, epoch: Option) -> usize { + self.assert_has_epoch(epoch); + self.inner.total_nodes(epoch) + } + + fn da_total_nodes(&self, epoch: Option) -> usize { + self.assert_has_epoch(epoch); + self.inner.da_total_nodes(epoch) + } + + fn success_threshold(&self, epoch: Option) -> std::num::NonZeroU64 { + self.assert_has_epoch(epoch); + self.inner.success_threshold(epoch) + } + + fn da_success_threshold(&self, epoch: Option) -> std::num::NonZeroU64 { + self.assert_has_epoch(epoch); + self.inner.da_success_threshold(epoch) + } + + fn failure_threshold(&self, epoch: Option) -> std::num::NonZeroU64 { + self.assert_has_epoch(epoch); + self.inner.failure_threshold(epoch) + } + + fn upgrade_threshold(&self, epoch: Option) -> std::num::NonZeroU64 { + self.assert_has_epoch(epoch); + self.inner.upgrade_threshold(epoch) + } + + fn has_epoch(&self, epoch: TYPES::Epoch) -> bool { + self.assert_has_epoch(Some(epoch)); + self.epochs.contains(&epoch) + } + + async fn get_epoch_root_and_drb( + &self, + _block_height: u64, + _epoch_height: u64, + _epoch: TYPES::Epoch, + ) -> anyhow::Result<(TYPES::BlockHeader, DrbResult)> { + tokio::time::sleep(Duration::from_secs(1)).await; + Ok((TYPES::BlockHeader::default(), DrbResult::default())) + } + + fn add_drb_result(&mut self, epoch: TYPES::Epoch, drb_result: hotshot_types::drb::DrbResult) { + self.drbs.insert(epoch); + self.inner.add_drb_result(epoch, drb_result); + } + + fn set_first_epoch( + &mut self, + epoch: TYPES::Epoch, + initial_drb_result: hotshot_types::drb::DrbResult, + ) { + self.epochs.insert(epoch); + self.epochs.insert(epoch + 1); + self.drbs.insert(epoch); + self.drbs.insert(epoch + 1); + self.inner.set_first_epoch(epoch, initial_drb_result); + } + + #[allow(refining_impl_trait)] + async fn add_epoch_root( + &self, + epoch: TYPES::Epoch, + _block_header: TYPES::BlockHeader, + ) -> Option> { + Some(Box::new(move |mem: &mut Self| { + tracing::error!("Adding epoch root for {epoch}"); + mem.epochs.insert(epoch); + })) + } +} diff --git a/hotshot/src/traits/election/mod.rs b/hotshot/src/traits/election/mod.rs index 5cc908a6d6..e3ebf22e13 100644 --- a/hotshot/src/traits/election/mod.rs +++ b/hotshot/src/traits/election/mod.rs @@ -6,6 +6,8 @@ //! elections used for consensus +/// Dummy Membership which enforces that we must be caught up to use +pub mod dummy_catchup_membership; /// leader completely randomized every view pub mod randomized_committee; diff --git a/hotshot/src/traits/election/randomized_committee.rs b/hotshot/src/traits/election/randomized_committee.rs index b420723642..1564d7647a 100644 --- a/hotshot/src/traits/election/randomized_committee.rs +++ b/hotshot/src/traits/election/randomized_committee.rs @@ -264,12 +264,6 @@ impl Membership for Committee { true } - async fn get_epoch_root( - &self, - _block_height: u64, - ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { - None - } fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} fn set_first_epoch(&mut self, _epoch: TYPES::Epoch, _initial_drb_result: DrbResult) {} diff --git a/hotshot/src/traits/election/randomized_committee_members.rs b/hotshot/src/traits/election/randomized_committee_members.rs index f1f386be0d..cc11e78e83 100644 --- a/hotshot/src/traits/election/randomized_committee_members.rs +++ b/hotshot/src/traits/election/randomized_committee_members.rs @@ -450,13 +450,6 @@ impl Membership true } - async fn get_epoch_root( - &self, - _block_height: u64, - ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { - None - } - fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} fn set_first_epoch(&mut self, _epoch: TYPES::Epoch, _initial_drb_result: DrbResult) {} diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 35bd896f17..adcb4c220b 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -274,13 +274,6 @@ impl Membership for StaticCommittee { true } - async fn get_epoch_root( - &self, - _block_height: u64, - ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { - None - } - fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} fn set_first_epoch(&mut self, epoch: TYPES::Epoch, _initial_drb_result: DrbResult) { diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index ededbe507a..8b3e828f71 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -243,13 +243,6 @@ impl Membership for StaticCommitteeLeaderForTwoViews Option<(TYPES::Epoch, TYPES::BlockHeader)> { - None - } - fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} fn set_first_epoch(&mut self, _epoch: TYPES::Epoch, _initial_drb_result: DrbResult) {} diff --git a/hotshot/src/traits/election/two_static_committees.rs b/hotshot/src/traits/election/two_static_committees.rs index 7e80335a1b..2b2834710c 100644 --- a/hotshot/src/traits/election/two_static_committees.rs +++ b/hotshot/src/traits/election/two_static_committees.rs @@ -431,13 +431,6 @@ impl Membership for TwoStaticCommittees { true } - async fn get_epoch_root( - &self, - _block_height: u64, - ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { - None - } - fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} fn set_first_epoch(&mut self, _epoch: TYPES::Epoch, _initial_drb_result: DrbResult) {} diff --git a/sequencer/api/catchup.toml b/sequencer/api/catchup.toml index 8de2f5eda7..77166a83f5 100644 --- a/sequencer/api/catchup.toml +++ b/sequencer/api/catchup.toml @@ -69,4 +69,15 @@ This is only called when the state does not have full chain config which is diff This can happen if the node missed a protocol upgrade. Returns the chain config -- this includes parameters such as `max_block_size`, `chain_id`, `base_fee`, and `fee_recipient`. +""" + +[route.leafchain] +PATH = ["/:height/leafchain"] +":height" = "Integer" +DOC = """ + +This Endpoint fetches a leaf chain which decides a specified block height. This endpoint should be used +For catching up the stake table, where `:height` is the block height of the epoch root you want to catchup to + +Returns a list of leaves which includes `:height` as the last leaf and should prove the block with `:height` was decided. """ \ No newline at end of file diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 559f692070..3a8ebff8da 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -8,7 +8,7 @@ use derivative::Derivative; use espresso_types::{ config::PublicNetworkConfig, retain_accounts, v0::traits::SequencerPersistence, v0_99::ChainConfig, AccountQueryData, BlockMerkleTree, FeeAccount, FeeAccountProof, - FeeMerkleTree, NodeState, PubKey, Transaction, ValidatedState, + FeeMerkleTree, Leaf2, NodeState, PubKey, Transaction, ValidatedState, }; use futures::{ future::{BoxFuture, Future, FutureExt}, @@ -18,6 +18,7 @@ use hotshot_events_service::events_source::{ EventFilterSet, EventsSource, EventsStreamer, StartupInfo, }; use hotshot_query_service::data_source::ExtensibleDataSource; +use hotshot_types::vote::HasViewNumber; use hotshot_types::{ data::ViewNumber, event::Event, @@ -31,6 +32,7 @@ use hotshot_types::{ utils::{View, ViewInner}, PeerConfig, }; +use itertools::Itertools; use jf_merkle_tree::MerkleTreeScheme; use std::pin::Pin; use std::sync::Arc; @@ -372,6 +374,18 @@ impl< // Try storage. self.inner().get_chain_config(commitment).await } + async fn get_leaf_chain(&self, height: u64) -> anyhow::Result> { + // Check if we have the desired state in memory. + match self.as_ref().get_leaf_chain(height).await { + Ok(cf) => return Ok(cf), + Err(err) => { + tracing::info!("chain config is not in memory, trying storage: {err:#}"); + } + } + + // Try storage. + self.inner().get_leaf_chain(height).await + } } // #[async_trait] @@ -463,6 +477,48 @@ impl, V: Versions, P: SequencerPersistence> CatchupD bail!("chain config not found") } } + + async fn get_leaf_chain(&self, height: u64) -> anyhow::Result> { + let mut leaves = self + .consensus() + .await + .read() + .await + .consensus() + .read() + .await + .undecided_leaves(); + leaves.sort_by_key(|l| l.height()); + let (position, mut last_leaf) = leaves + .iter() + .find_position(|l| l.height() == height) + .context(format!("leaf chain not available for {height}"))?; + let mut chain = vec![last_leaf.clone()]; + for leaf in leaves.iter().skip(position + 1) { + if leaf.justify_qc().view_number() == last_leaf.view_number() { + chain.push(leaf.clone()); + } else { + continue; + } + if leaf.view_number() == last_leaf.view_number() + 1 { + // one away from decide + last_leaf = leaf; + break; + } + last_leaf = leaf; + } + // Make sure we got one more leaf to confirm the decide + for leaf in leaves + .iter() + .skip_while(|l| l.height() <= last_leaf.height()) + { + if leaf.justify_qc().view_number() == last_leaf.view_number() { + chain.push(leaf.clone()); + return Ok(chain); + } + } + bail!(format!("leaf chain not available for {height}")) + } } impl, D: Sync, V: Versions, P: SequencerPersistence> @@ -1068,6 +1124,15 @@ pub mod test_helpers { }) } .boxed() + })? + .get("leafchain", |_req, _state| { + async move { + Result::, _>::Err(hotshot_query_service::Error::catch_all( + StatusCode::BAD_REQUEST, + "No leafchain found".to_string(), + )) + } + .boxed() })?; let mut app = App::<_, hotshot_query_service::Error>::with_state(()); @@ -1571,7 +1636,7 @@ mod test { config::PublicHotShotConfig, traits::NullEventConsumer, v0_1::{UpgradeMode, ViewBasedUpgrade}, - BackoffParams, FeeAccount, FeeAmount, FeeVersion, Header, MarketplaceVersion, + BackoffParams, EpochVersion, FeeAccount, FeeAmount, FeeVersion, Header, MarketplaceVersion, MockSequencerVersions, SequencerVersions, TimeBasedUpgrade, Timestamp, Upgrade, UpgradeType, ValidatedState, }; @@ -1926,6 +1991,116 @@ mod test { } } + #[ignore] + #[tokio::test(flavor = "multi_thread")] + async fn test_catchup_epochs() { + setup_test(); + + // Start a sequencer network, using the query service for catchup. + let port = pick_unused_port().expect("No ports free"); + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); + const EPOCH_HEIGHT: u64 = 5; + let network_config = TestConfigBuilder::default() + .l1_url(l1) + .epoch_height(EPOCH_HEIGHT) + .build(); + const NUM_NODES: usize = 5; + let config = TestNetworkConfigBuilder::::with_num_nodes() + .api_config(Options::with_port(port)) + .network_config(network_config) + .catchups(std::array::from_fn(|_| { + StatePeers::>::from_urls( + vec![format!("http://localhost:{port}").parse().unwrap()], + Default::default(), + &NoMetrics, + ) + })) + .build(); + let mut network = TestNetwork::new( + config, + SequencerVersions::::new(), + ) + .await; + + // Wait for replica 0 to decide in the third epoch. + let mut events = network.peers[0].event_stream().await; + loop { + let event = events.next().await.unwrap(); + let EventType::Decide { leaf_chain, .. } = event.event else { + continue; + }; + if leaf_chain[0].leaf.height() > EPOCH_HEIGHT * 3 { + break; + } + } + + // Shut down and restart replica 0. We don't just stop consensus and restart it; we fully + // drop the node and recreate it so it loses all of its temporary state and starts off from + // genesis. It should be able to catch up by listening to proposals and then rebuild its + // state from its peers. + tracing::info!("shutting down node"); + network.peers.remove(0); + + // Wait for a few blocks to pass while the node is down, so it falls behind. + network + .server + .event_stream() + .await + .filter(|event| future::ready(matches!(event.event, EventType::Decide { .. }))) + .take(3) + .collect::>() + .await; + + tracing::info!("restarting node"); + let node = network + .cfg + .init_node( + 1, + ValidatedState::default(), + no_storage::Options, + StatePeers::>::from_urls( + vec![format!("http://localhost:{port}").parse().unwrap()], + Default::default(), + &NoMetrics, + ), + &NoMetrics, + test_helpers::STAKE_TABLE_CAPACITY_FOR_TEST, + NullEventConsumer, + MockSequencerVersions::new(), + Default::default(), + "http://localhost".parse().unwrap(), + ) + .await; + let mut events = node.event_stream().await; + + // Wait for a (non-genesis) block proposed by each node, to prove that the lagging node has + // caught up and all nodes are in sync. + let mut proposers = [false; NUM_NODES]; + loop { + let event = events.next().await.unwrap(); + let EventType::Decide { leaf_chain, .. } = event.event else { + continue; + }; + for LeafInfo { leaf, .. } in leaf_chain.iter().rev() { + let height = leaf.height(); + let leaf_builder = (leaf.view_number().u64() as usize) % NUM_NODES; + if height == 0 { + continue; + } + + tracing::info!( + "waiting for blocks from {proposers:?}, block {height} is from {leaf_builder}", + ); + proposers[leaf_builder] = true; + } + + if proposers.iter().all(|has_proposed| *has_proposed) { + break; + } + } + } + #[tokio::test(flavor = "multi_thread")] async fn test_chain_config_from_instance() { // This test uses a ValidatedState which only has the default chain config commitment. diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index 349ce3f073..daa3518b40 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -5,7 +5,7 @@ use espresso_types::{ config::PublicNetworkConfig, v0::traits::{PersistenceOptions, SequencerPersistence}, v0_99::ChainConfig, - FeeAccount, FeeAccountProof, FeeMerkleTree, NodeState, PubKey, Transaction, + FeeAccount, FeeAccountProof, FeeMerkleTree, Leaf2, NodeState, PubKey, Transaction, }; use futures::future::Future; use hotshot_query_service::{ @@ -177,6 +177,11 @@ pub(crate) trait CatchupDataSource: Sync { &self, commitment: Commitment, ) -> impl Send + Future>; + + fn get_leaf_chain( + &self, + height: u64, + ) -> impl Send + Future>>; } #[cfg(any(test, feature = "testing"))] diff --git a/sequencer/src/api/endpoints.rs b/sequencer/src/api/endpoints.rs index 91cecf6743..622d50a229 100644 --- a/sequencer/src/api/endpoints.rs +++ b/sequencer/src/api/endpoints.rs @@ -383,6 +383,18 @@ where .map_err(|err| Error::catch_all(StatusCode::NOT_FOUND, format!("{err:#}"))) } .boxed() + })? + .get("leafchain", |req, state| { + async move { + let height = req + .integer_param("height") + .map_err(Error::from_request_error)?; + state + .get_leaf_chain(height) + .await + .map_err(|err| Error::catch_all(StatusCode::NOT_FOUND, format!("{err:#}"))) + } + .boxed() })?; Ok(api) diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index 8af85c7b3b..b19fdc83f1 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -159,6 +159,27 @@ impl CatchupStorage for SqlStorage { ))?; load_chain_config(&mut tx, commitment).await } + + async fn get_leaf_chain(&self, height: u64) -> anyhow::Result> { + let mut tx = self + .read() + .await + .context(format!("opening transaction to fetch leaf at {height}"))?; + let h = usize::try_from(height)?; + let query_leaf_chain = tx + .get_leaf_range(h..=(h + 2)) + .await + .context(format!("leaf chain {height} not available"))?; + let mut chain = vec![]; + + for query_result in query_leaf_chain { + let Ok(leaf_query) = query_result else { + bail!(format!("leaf chain {height} not available")); + }; + chain.push(leaf_query.leaf().clone()); + } + Ok(chain) + } } impl CatchupStorage for DataSource { @@ -189,6 +210,9 @@ impl CatchupStorage for DataSource { ) -> anyhow::Result { self.as_ref().get_chain_config(commitment).await } + async fn get_leaf_chain(&self, height: u64) -> anyhow::Result> { + self.as_ref().get_leaf_chain(height).await + } } #[async_trait] diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index 7e64d2a4ca..facfb02bf7 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -327,6 +327,16 @@ impl StateCatchup for StatePeers { }) .await } + async fn try_fetch_leaves(&self, retry: usize, height: u64) -> anyhow::Result> { + self.fetch(retry, |client| async move { + let leaf = client + .get::>(&format!("catchup/leaf-chain/{}", height)) + .send() + .await?; + anyhow::Ok(leaf) + }) + .await + } fn backoff(&self) -> &BackoffParams { &self.backoff @@ -399,6 +409,15 @@ pub(crate) trait CatchupStorage: Sync { bail!("chain config catchup is not supported for this data source"); } } + + fn get_leaf_chain( + &self, + _height: u64, + ) -> impl Send + Future>> { + async { + bail!("leaf chain catchup is not supported for this data source"); + } + } } impl CatchupStorage for hotshot_query_service::data_source::MetricsDataSource {} @@ -435,6 +454,9 @@ where ) -> anyhow::Result { self.inner().get_chain_config(commitment).await } + async fn get_leaf_chain(&self, height: u64) -> anyhow::Result> { + self.inner().get_leaf_chain(height).await + } } #[derive(Debug)] @@ -454,6 +476,9 @@ impl StateCatchup for SqlStateCatchup where T: CatchupStorage + Send + Sync, { + async fn try_fetch_leaves(&self, _retry: usize, height: u64) -> anyhow::Result> { + self.db.get_leaf_chain(height).await + } // TODO: add a test for the account proof validation // issue # 2102 (https://github.com/EspressoSystems/espresso-sequencer/issues/2102) #[tracing::instrument(skip(self, _retry, instance))] @@ -560,6 +585,9 @@ impl NullStateCatchup { #[async_trait] impl StateCatchup for NullStateCatchup { + async fn try_fetch_leaves(&self, _retry: usize, _height: u64) -> anyhow::Result> { + bail!("state catchup is didabled") + } async fn try_fetch_accounts( &self, _retry: usize, diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 8f165ec825..2d9c0a352e 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -745,6 +745,11 @@ pub mod testing { self } + pub fn epoch_height(mut self, epoch_height: u64) -> Self { + self.config.epoch_height = epoch_height; + self + } + pub fn build(self) -> TestConfig { TestConfig { config: self.config, diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index 545b3722ce..0330695769 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -204,7 +204,7 @@ pub mod mock { use super::*; use crate::{ retain_accounts, BackoffParams, BlockMerkleTree, FeeAccount, FeeMerkleCommitment, - FeeMerkleTree, + FeeMerkleTree, Leaf2, }; #[derive(Debug, Clone, Default)] @@ -224,6 +224,14 @@ pub mod mock { #[async_trait] impl StateCatchup for MockStateCatchup { + async fn try_fetch_leaves( + &self, + _retry: usize, + _height: u64, + ) -> anyhow::Result> { + Err(anyhow::anyhow!("todo")) + } + async fn try_fetch_accounts( &self, _retry: usize, diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 89d5ef2d25..7823fc04ec 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -4,7 +4,7 @@ use std::{ num::NonZeroU64, }; -// use async_trait::async_trait; +use anyhow::Context; use contract_bindings_alloy::permissionedstaketable::PermissionedStakeTable::StakersUpdated; use ethers::types::{Address, U256}; use ethers_conv::ToAlloy; @@ -26,11 +26,13 @@ use hotshot_types::{ }; use itertools::Itertools; +use std::{fmt::Debug, sync::Arc}; use thiserror::Error; use super::{ + traits::StateCatchup, v0_3::{DAMembers, StakeTable, StakeTables}, - Header, L1Client, NodeState, PubKey, SeqTypes, + Header, L1Client, Leaf2, NodeState, PubKey, SeqTypes, }; type Epoch = ::Epoch; @@ -91,7 +93,7 @@ impl StakeTables { } } -#[derive(Clone, Debug)] +#[derive(Clone, derive_more::derive::Debug)] /// Type to describe DA and Stake memberships pub struct EpochCommittees { /// Committee used when we're in pre-epoch state @@ -112,6 +114,9 @@ pub struct EpochCommittees { /// Randomized committees, filled when we receive the DrbResult randomized_committees: BTreeMap>>, + /// Peers for catching up the stake table + #[debug(skip)] + peers: Option>, /// Contains the epoch after which initial_drb_result will not be used (set_first_epoch.epoch + 2) /// And the DrbResult to use before that epoch initial_drb_result: Option<(Epoch, DrbResult)>, @@ -282,6 +287,7 @@ impl EpochCommittees { l1_client: instance_state.l1_client.clone(), contract_address: instance_state.chain_config.stake_table_contract, randomized_committees: BTreeMap::new(), + peers: Some(instance_state.peers.clone()), initial_drb_result: None, } } @@ -491,8 +497,31 @@ impl Membership for EpochCommittees { self.state.contains_key(&epoch) } - async fn get_epoch_root(&self, _block_height: u64) -> Option<(Epoch, Header)> { - None + async fn get_epoch_root_and_drb( + &self, + block_height: u64, + epoch_height: u64, + epoch: Epoch, + ) -> anyhow::Result<(Header, DrbResult)> { + let Some(ref peers) = self.peers else { + anyhow::bail!("No Peers Configured for Catchup"); + }; + // Fetch leaves from peers + let leaf: Leaf2 = peers + .fetch_leaf(block_height, self, epoch, epoch_height) + .await?; + //DRB height is decided in the next epoch's last block + let drb_height = block_height + epoch_height + 3; + let drb_leaf = peers + .fetch_leaf(drb_height, self, epoch, epoch_height) + .await?; + + Ok(( + leaf.block_header().clone(), + drb_leaf + .next_drb_result + .context(format!("No DRB result on decided leaf at {drb_height}"))?, + )) } fn add_drb_result(&mut self, epoch: Epoch, drb: DrbResult) { diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index b29d8e76bb..610fc19e3f 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -15,7 +15,7 @@ use hotshot_types::{ QuorumProposalWrapper, VidCommitment, VidDisperseShare, ViewNumber, }, event::{HotShotAction, LeafInfo}, - message::{convert_proposal, Proposal}, + message::{convert_proposal, Proposal, UpgradeLock}, simple_certificate::{ NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, UpgradeCertificate, }, @@ -24,7 +24,7 @@ use hotshot_types::{ storage::Storage, ValidatedState as HotShotState, }, - utils::{genesis_epoch_from_version, View}, + utils::{genesis_epoch_from_version, verify_epoch_root_chain, View}, }; use itertools::Itertools; use serde::{de::DeserializeOwned, Serialize}; @@ -34,10 +34,35 @@ use crate::{ FeeAccountProof, FeeMerkleCommitment, FeeMerkleTree, Leaf2, NetworkConfig, SeqTypes, }; -use super::{impls::NodeState, utils::BackoffParams, Leaf}; +use super::{ + impls::NodeState, utils::BackoffParams, EpochCommittees, EpochVersion, Leaf, SequencerVersions, +}; #[async_trait] pub trait StateCatchup: Send + Sync { + async fn try_fetch_leaves(&self, retry: usize, height: u64) -> anyhow::Result>; + + async fn fetch_leaf( + &self, + height: u64, + membership: &EpochCommittees, + epoch: EpochNumber, + epoch_height: u64, + ) -> anyhow::Result { + self.backoff().retry( + self, |provider, retry| { + async move { + let chain = provider.try_fetch_leaves(retry, height).await?; + verify_epoch_root_chain( + chain, + membership, + epoch, + epoch_height, + &UpgradeLock::>::new()).await + }.boxed() + }).await + } + /// Try to fetch the given accounts state, failing without retrying if unable. async fn try_fetch_accounts( &self, @@ -144,6 +169,21 @@ pub trait StateCatchup: Send + Sync { #[async_trait] impl StateCatchup for Box { + async fn try_fetch_leaves(&self, retry: usize, height: u64) -> anyhow::Result> { + (**self).try_fetch_leaves(retry, height).await + } + + async fn fetch_leaf( + &self, + height: u64, + membership: &EpochCommittees, + epoch: EpochNumber, + epoch_height: u64, + ) -> anyhow::Result { + (**self) + .fetch_leaf(height, membership, epoch, epoch_height) + .await + } async fn try_fetch_accounts( &self, retry: usize, @@ -229,6 +269,21 @@ impl StateCatchup for Box { #[async_trait] impl StateCatchup for Arc { + async fn try_fetch_leaves(&self, retry: usize, height: u64) -> anyhow::Result> { + (**self).try_fetch_leaves(retry, height).await + } + + async fn fetch_leaf( + &self, + height: u64, + membership: &EpochCommittees, + epoch: EpochNumber, + epoch_height: u64, + ) -> anyhow::Result { + (**self) + .fetch_leaf(height, membership, epoch, epoch_height) + .await + } async fn try_fetch_accounts( &self, retry: usize, @@ -315,6 +370,21 @@ impl StateCatchup for Arc { /// Catchup from multiple providers tries each provider in a round robin fashion until it succeeds. #[async_trait] impl StateCatchup for Vec { + async fn try_fetch_leaves(&self, retry: usize, height: u64) -> anyhow::Result> { + for provider in self { + match provider.try_fetch_leaves(retry, height).await { + Ok(leaves) => return Ok(leaves), + Err(err) => { + tracing::info!( + provider = provider.name(), + "failed to fetch leaves: {err:#}" + ); + } + } + } + + bail!("could not fetch leaves from any provider"); + } #[tracing::instrument(skip(self, instance))] async fn try_fetch_accounts( &self, From b904f76a67c79d639482c6be896a4740864d10de Mon Sep 17 00:00:00 2001 From: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Date: Fri, 7 Mar 2025 19:53:08 +0500 Subject: [PATCH 07/17] fix genesis vid for Proof of stake version (#2732) * fix genesis vid for Proof of stake version * fix weight * lint --- .../src/data_source/update.rs | 61 ++++++++++++++----- 1 file changed, 45 insertions(+), 16 deletions(-) diff --git a/hotshot-query-service/src/data_source/update.rs b/hotshot-query-service/src/data_source/update.rs index 0f2da97126..86ec016938 100644 --- a/hotshot-query-service/src/data_source/update.rs +++ b/hotshot-query-service/src/data_source/update.rs @@ -22,16 +22,19 @@ use anyhow::{ensure, Context}; use async_trait::async_trait; use futures::future::Future; use hotshot::types::{Event, EventType}; -use hotshot_types::data::{VidDisperseShare, VidShare}; +use hotshot_types::{data::VidCommitment, event::LeafInfo}; use hotshot_types::{ - data::Leaf2, + data::{ns_table::parse_ns_table, Leaf2}, traits::{ block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES}, node_implementation::{ConsensusTime, NodeType}, }, vid::advz::advz_scheme, }; -use hotshot_types::{data::VidCommitment, event::LeafInfo}; +use hotshot_types::{ + data::{VidDisperseShare, VidShare}, + vid::avidm::{init_avidm_param, AvidMScheme}, +}; use jf_vid::VidScheme; use std::iter::once; @@ -168,19 +171,45 @@ fn genesis_vid( ) -> anyhow::Result<(VidCommonQueryData, VidShare)> { let payload = Payload::::empty().0; let bytes = payload.encode(); - let mut disperse = advz_scheme(GENESIS_VID_NUM_STORAGE_NODES) - .disperse(bytes) - .context("unable to compute VID dispersal for genesis block")?; - ensure!( - VidCommitment::V0(disperse.commit) == leaf.block_header().payload_commitment(), - "computed VID commit {} for genesis block does not match header commit {}", - disperse.commit, - leaf.block_header().payload_commitment() - ); - Ok(( - VidCommonQueryData::new(leaf.block_header().clone(), Some(disperse.common)), - VidShare::V0(disperse.shares.remove(0)), - )) + + match leaf.block_header().payload_commitment() { + VidCommitment::V0(commit) => { + let mut disperse = advz_scheme(GENESIS_VID_NUM_STORAGE_NODES) + .disperse(bytes) + .context("unable to compute VID dispersal for genesis block")?; + + ensure!( + disperse.commit == commit, + "computed VID commit {} for genesis block does not match header commit {}", + disperse.commit, + commit + ); + Ok(( + VidCommonQueryData::new(leaf.block_header().clone(), Some(disperse.common)), + VidShare::V0(disperse.shares.remove(0)), + )) + } + VidCommitment::V1(commit) => { + let avidm_param = init_avidm_param(GENESIS_VID_NUM_STORAGE_NODES)?; + let weights = vec![1; GENESIS_VID_NUM_STORAGE_NODES]; + let ns_table = parse_ns_table(bytes.len(), &leaf.block_header().metadata().encode()); + + let (calculated_commit, mut shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &bytes, ns_table).unwrap(); + + ensure!( + calculated_commit == commit, + "computed VID commit {} for genesis block does not match header commit {}", + calculated_commit, + commit + ); + + Ok(( + VidCommonQueryData::new(leaf.block_header().clone(), None), + VidShare::V1(shares.remove(0)), + )) + } + } } /// A data source with an atomic transaction-based synchronization interface. From 79e80bc2767bac131a0cdf936aac7972e3a01fea Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 7 Mar 2025 08:28:14 -0700 Subject: [PATCH 08/17] Renaming Sequencer in Docs (#2720) * init * update link * fix docs link * better link * fix other docs link --- README.md | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 4dbe498290..f9a1c30984 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,18 @@ -# Espresso Sequencer +# Espresso Network -[![Build](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/build.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/build.yml) -[![Test](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/test.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/test.yml) -[![Docs rust](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/doc-rust.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/doc-rust.yml) -[![Contracts](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/contracts.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/contracts.yml) -[![Lint](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/lint.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/lint.yml) -[![Audit](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/audit.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/audit.yml) -[![Ubuntu](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/ubuntu-install-without-nix.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/ubuntu-install-without-nix.yml) -[![Build without lockfile](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/build-without-lockfile.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/build-without-lockfile.yml) -[![Coverage Status](https://coveralls.io/repos/github/EspressoSystems/espresso-sequencer/badge.svg?branch=main)](https://coveralls.io/github/EspressoSystems/espresso-sequencer?branch=main) +[![Build](https://github.com/EspressoSystems/espresso-network/actions/workflows/build.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/build.yml) +[![Test](https://github.com/EspressoSystems/espresso-network/actions/workflows/test.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/test.yml) +[![Docs rust](https://github.com/EspressoSystems/espresso-network/actions/workflows/doc-rust.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/doc-rust.yml) +[![Contracts](https://github.com/EspressoSystems/espresso-network/actions/workflows/contracts.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/contracts.yml) +[![Lint](https://github.com/EspressoSystems/espresso-network/actions/workflows/lint.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/lint.yml) +[![Audit](https://github.com/EspressoSystems/espresso-network/actions/workflows/audit.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/audit.yml) +[![Ubuntu](https://github.com/EspressoSystems/espresso-network/actions/workflows/ubuntu-install-without-nix.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/ubuntu-install-without-nix.yml) +[![Build without lockfile](https://github.com/EspressoSystems/espresso-network/actions/workflows/build-without-lockfile.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/build-without-lockfile.yml) +[![Coverage Status](https://coveralls.io/repos/github/EspressoSystems/espresso-network/badge.svg?branch=main)](https://coveralls.io/github/EspressoSystems/espresso-network?branch=main) -The Espresso Sequencer offers rollups credible neutrality and enhanced interoperability, without compromising on scale. -Consisting of a data availability solution and a decentralized network of nodes that sequences transactions, layer-2 -rollups can leverage the Espresso Sequencer to give developers and end users fast confirmations, low (and fair) fees, -and robust infrastructure. +The Espresso Network is the global confirmation layer for rollups in the Ethereum ecosystem. Espresso's [global confirmation layer(GCL)](https://docs.espressosys.com/network) provides agreement on inputs to a collection of composable blockchains, providing a high trust, fast, and verifiable way to process inputs on any chain, providing fast confirmations in return. -[Official Documentation](https://docs.espressosys.com/sequencer/espresso-sequencer-architecture/readme) +[Official Documentation](https://docs.espressosys.com/network/) ### Architecture @@ -36,7 +33,7 @@ a dockerized Espresso Sequencer network with an example Layer 2 rollup applicati # Development -- Obtain code: `git clone git@github.com:EspressoSystems/espresso-sequencer`. +- Obtain code: `git clone git@github.com:EspressoSystems/espresso-network`. - Make sure [nix](https://nixos.org/download.html) is installed. - Activate the environment with `nix-shell`, or `nix develop`, or `direnv allow` if using [direnv](https://direnv.net/). - For installation without nix please see [ubuntu.md](./doc/ubuntu.md). From 1161ba2fa9255be6b983f8a7e0703e5aa0dcd03f Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 7 Mar 2025 08:28:27 -0700 Subject: [PATCH 09/17] Update CODEOWNERS (#2726) * update CODEOWNERS * add morew hotshot members --- CODEOWNERS | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index 14c5ed8923..ebe3f7d986 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,8 +2,4 @@ # later match takes precedence, they will be requested for review when someone # opens a pull request. -* @nomaxg @philippecamacho @ImJeremyHe @sveitser @jbearer @tbro @imabdulbasit - -# Dependabot PRs -*.toml @nomaxg @philippecamacho @ImJeremyHe @sveitser -*.lock @nomaxg @philippecamacho @ImJeremyHe @sveitser +* @sveitser @jbearer @tbro @imabdulbasit @ss-es @pls148 @bfish713 @rob-maron @lukaszrzasik From 2ee151328da8753aece506dad42782529b083e8e Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Fri, 7 Mar 2025 10:39:16 -0500 Subject: [PATCH 10/17] Fix new vid panic on empty payload (#2733) * fix panic on empty payload * better fix * typo --- hotshot-types/src/data/ns_table.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hotshot-types/src/data/ns_table.rs b/hotshot-types/src/data/ns_table.rs index c645c36957..1f9ac59b6f 100644 --- a/hotshot-types/src/data/ns_table.rs +++ b/hotshot-types/src/data/ns_table.rs @@ -29,10 +29,15 @@ pub fn parse_ns_table(payload_byte_len: usize, bytes: &[u8]) -> Vec if num_entries != bytes.len().saturating_sub(NUM_NSS_BYTE_LEN) / NS_ID_BYTE_LEN.saturating_add(NS_OFFSET_BYTE_LEN) + || (num_entries == 0 && payload_byte_len != 0) { tracing::warn!("Failed to parse the metadata as namespace table. Use a single namespace table instead."); return vec![(0..payload_byte_len)]; } + // Early breaks for empty payload and namespace table + if num_entries == 0 { + return vec![(0..payload_byte_len)]; + } let mut l = 0; for i in 0..num_entries { let offset = NUM_NSS_BYTE_LEN + i * (NS_ID_BYTE_LEN + NS_OFFSET_BYTE_LEN) + NS_ID_BYTE_LEN; From c4cca68b163b12e8a76535f5d0ddfdba2666dabe Mon Sep 17 00:00:00 2001 From: Mathis Date: Fri, 7 Mar 2025 17:07:01 +0100 Subject: [PATCH 11/17] CI: features cleanup (#2735) * CI: features cleanup - Move commands to justfile - Use matrix: run without `--tests` or with - Only check pairs of features. This tests that each feature works with each other feature. This should cover most problems while hopefully keeping runtime reasable. - Fix sqlite-unbundled feature in nix-shell. * fix typo --- .github/workflows/cargo-features.yml | 27 +++++++++++++-------------- flake.nix | 3 +++ justfile | 25 +++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 14 deletions(-) diff --git a/.github/workflows/cargo-features.yml b/.github/workflows/cargo-features.yml index 208ba94fe8..9f6f115717 100644 --- a/.github/workflows/cargo-features.yml +++ b/.github/workflows/cargo-features.yml @@ -20,8 +20,14 @@ concurrency: jobs: cargo-features: runs-on: ubuntu-latest + strategy: + matrix: + command: + - just check-features-ci + - just check-features-ci --tests steps: - uses: taiki-e/install-action@cargo-hack + - uses: taiki-e/install-action@just - name: Checkout Repository uses: actions/checkout@v4 @@ -29,25 +35,18 @@ jobs: - name: Free Disk Space (Ubuntu) uses: jlumbroso/free-disk-space@main with: - tool-cache: false android: true - dotnet: true - haskell: true + tool-cache: false + dotnet: false + haskell: false large-packages: false docker-images: false swap-storage: false # Note: this job doesn't use a cache on purpose because it mostly compiles # the crates in this repo over and over again with different feature - # combinations. Adding caching would not speed it up much and further - # contribute to our cache usage. - - # Includes checks for `--no-default-features` and `--all-features` as well - # as each individual feature enabled. - - name: Check compilation for feature combinations - run: | - cargo hack check --feature-powerset --exclude hotshot --exclude hotshot-builder-api --exclude hotshot-task-impls --exclude hotshot-macros --exclude hotshot-events-service --exclude hotshot-utils --exclude hotshot-orchestrator --exclude hotshot-query-service --exclude hotshot-stake-table --exclude hotshot-state-prover --exclude hotshot-task --exclude hotshot-testing --exclude hotshot-types --exclude hotshot-libp2p-networking --exclude hotshot-contract-adapter --exclude hotshot-example-types --exclude vid - - - name: Check compilation for feature combinations (--tests) + # combinations. The target directory gets really large. Adding caching + # would not speed it up much and further contribute to our cache usage. + - name: Check compilation feature combinations run: | - cargo hack check --feature-powerset --tests --exclude hotshot --exclude hotshot-builder-api --exclude hotshot-task-impls --exclude hotshot-macros --exclude hotshot-events-service --exclude hotshot-utils --exclude hotshot-orchestrator --exclude hotshot-query-service --exclude hotshot-stake-table --exclude hotshot-state-prover --exclude hotshot-task --exclude hotshot-testing --exclude hotshot-types --exclude hotshot-libp2p-networking --exclude hotshot-contract-adapter --exclude hotshot-example-types --exclude vid + ${{ matrix.command }} diff --git a/flake.nix b/flake.nix index f6f5f83242..32619e684a 100644 --- a/flake.nix +++ b/flake.nix @@ -254,6 +254,9 @@ # Add rust binaries to PATH for native demo export PATH="$PWD/$CARGO_TARGET_DIR/debug:$PATH" + + # Needed to compile with the sqlite-unbundled feature + export LIBCLANG_PATH="${pkgs.llvmPackages.libclang.lib}/lib"; '' + self.checks.${system}.pre-commit-check.shellHook; RUST_SRC_PATH = "${stableToolchain}/lib/rustlib/src/rust/library"; FOUNDRY_SOLC = "${solc}/bin/solc"; diff --git a/justfile b/justfile index 8ecf0302c4..739b4b544f 100644 --- a/justfile +++ b/justfile @@ -89,6 +89,31 @@ clippy: check-features *args: cargo hack check --each-feature {{args}} +check-features-ci *args: + # check each pair of features plus `default` and `--no-default-features` + cargo hack check --feature-powerset \ + --depth 2 \ + --exclude contract-bindings-alloy \ + --exclude contract-bindings-ethers \ + --exclude hotshot \ + --exclude hotshot-builder-api \ + --exclude hotshot-contract-adapter \ + --exclude hotshot-events-service \ + --exclude hotshot-example-types \ + --exclude hotshot-libp2p-networking \ + --exclude hotshot-macros \ + --exclude hotshot-orchestrator \ + --exclude hotshot-query-service \ + --exclude hotshot-stake-table \ + --exclude hotshot-state-prover \ + --exclude hotshot-task \ + --exclude hotshot-task-impls \ + --exclude hotshot-testing \ + --exclude hotshot-types \ + --exclude hotshot-utils \ + --exclude vid \ + {{args}} + # Helpful shortcuts for local development dev-orchestrator: target/release/orchestrator -p 8080 -n 1 From ccde27cf5753eb486bc58861b9f3c0795460c7b5 Mon Sep 17 00:00:00 2001 From: Mathis Date: Fri, 7 Mar 2025 17:24:51 +0100 Subject: [PATCH 12/17] Remove foundry patch (#2736) * Remove foundry patch The patch in alloy is now included in foundry. - Create a separate dev shell with the legacy version of foundry that we need to generate ethers bindings. - Switch to foundry.nix overlay because the nixpkgs version is likely to lag a lot because foundry doesn't properly version their code. - Enable the optimizer. This used to be the default but this was changed in foundry recently. * CI: nuke contracts rust cache if nix files change I think this sometimes leads to weird compilation errors. * use glob * revert change to use openssl 3.0 in dev env This causes some issues when linking against libcurl which is needed by some of our rust code. So we need to do something smarter. This will break the script to build docker containers locally with nix but shouldn't break anything else. We could use openssl_3 in the nix dev env everywhere but this would lead to a huge number of nix derivations missing the upstream binary cache and therefore would be really annoying. I think this will unblock the CI and we can figure out what to do from here. * foundry: allow internal reverts This is described here: https://book.getfoundry.sh/cheatcodes/expect-revert#error --- .github/workflows/contracts.yml | 2 +- contract-bindings-alloy/src/erc1967proxy.rs | 9 ++ contract-bindings-alloy/src/feecontract.rs | 53 +++++++ contract-bindings-alloy/src/iplonkverifier.rs | 52 +++++++ contract-bindings-alloy/src/lib.rs | 18 +-- contract-bindings-alloy/src/lightclient.rs | 132 ++++++++++++++++ .../src/lightclientarbitrum.rs | 132 ++++++++++++++++ .../src/lightclientmock.rs | 146 ++++++++++++++++++ .../src/permissionedstaketable.rs | 40 +++++ contract-bindings-alloy/src/plonkverifier.rs | 55 +++++++ contract-bindings-alloy/src/plonkverifier2.rs | 57 +++++++ contracts/test/PlonkVerifier.t.sol | 6 +- contracts/test/PolynomialEval.t.sol | 1 + flake.lock | 103 +++++++++--- flake.nix | 50 +++--- foundry.toml | 3 + justfile | 2 +- 17 files changed, 807 insertions(+), 54 deletions(-) diff --git a/.github/workflows/contracts.yml b/.github/workflows/contracts.yml index 29f9684974..241c570f00 100644 --- a/.github/workflows/contracts.yml +++ b/.github/workflows/contracts.yml @@ -45,7 +45,7 @@ jobs: - uses: Swatinem/rust-cache@v2 name: Enable Rust Caching with: - prefix-key: v2-rust + prefix-key: v2-rust-${{ hashFiles('flake.*') }} - name: Check toolchain versions run: nix develop --accept-flake-config -c ./scripts/show-toolchain-versions diff --git a/contract-bindings-alloy/src/erc1967proxy.rs b/contract-bindings-alloy/src/erc1967proxy.rs index 2f9998b31e..e4b265f08a 100644 --- a/contract-bindings-alloy/src/erc1967proxy.rs +++ b/contract-bindings-alloy/src/erc1967proxy.rs @@ -123,6 +123,7 @@ pub mod ERC1967Proxy { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct AddressEmptyCode { + #[allow(missing_docs)] pub target: alloy::sol_types::private::Address, } #[allow( @@ -189,6 +190,7 @@ pub mod ERC1967Proxy { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ERC1967InvalidImplementation { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, } #[allow( @@ -479,7 +481,9 @@ pub mod ERC1967Proxy { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct constructorCall { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, + #[allow(missing_docs)] pub _data: alloy::sol_types::private::Bytes, } const _: () = { @@ -550,9 +554,13 @@ pub mod ERC1967Proxy { }; ///Container for all the [`ERC1967Proxy`](self) custom errors. pub enum ERC1967ProxyErrors { + #[allow(missing_docs)] AddressEmptyCode(AddressEmptyCode), + #[allow(missing_docs)] ERC1967InvalidImplementation(ERC1967InvalidImplementation), + #[allow(missing_docs)] ERC1967NonPayable(ERC1967NonPayable), + #[allow(missing_docs)] FailedInnerCall(FailedInnerCall), } #[automatically_derived] @@ -710,6 +718,7 @@ pub mod ERC1967Proxy { } ///Container for all the [`ERC1967Proxy`](self) events. pub enum ERC1967ProxyEvents { + #[allow(missing_docs)] Upgraded(Upgraded), } #[automatically_derived] diff --git a/contract-bindings-alloy/src/feecontract.rs b/contract-bindings-alloy/src/feecontract.rs index 0eedfa5680..87689f8538 100644 --- a/contract-bindings-alloy/src/feecontract.rs +++ b/contract-bindings-alloy/src/feecontract.rs @@ -474,6 +474,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct AddressEmptyCode { + #[allow(missing_docs)] pub target: alloy::sol_types::private::Address, } #[allow( @@ -660,6 +661,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ERC1967InvalidImplementation { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, } #[allow( @@ -1148,6 +1150,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableInvalidOwner { + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } #[allow( @@ -1214,6 +1217,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableUnauthorizedAccount { + #[allow(missing_docs)] pub account: alloy::sol_types::private::Address, } #[allow( @@ -1340,6 +1344,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UUPSUnsupportedProxiableUUID { + #[allow(missing_docs)] pub slot: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -2095,6 +2100,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UPGRADE_INTERFACE_VERSIONReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::String, } #[allow( @@ -2201,12 +2207,14 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct balancesCall { + #[allow(missing_docs)] pub user: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`balances(address)`](balancesCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct balancesReturn { + #[allow(missing_docs)] pub amount: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2317,6 +2325,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct depositCall { + #[allow(missing_docs)] pub user: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`deposit(address)`](depositCall) function. @@ -2435,8 +2444,11 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getVersionReturn { + #[allow(missing_docs)] pub majorVersion: u8, + #[allow(missing_docs)] pub minorVersion: u8, + #[allow(missing_docs)] pub patchVersion: u8, } #[allow( @@ -2555,6 +2567,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct initializeCall { + #[allow(missing_docs)] pub multisig: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`initialize(address)`](initializeCall) function. @@ -2673,6 +2686,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct maxDepositAmountReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2783,6 +2797,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct minDepositAmountReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2893,6 +2908,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ownerReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -3003,6 +3019,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct proxiableUUIDReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -3217,6 +3234,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct transferOwnershipCall { + #[allow(missing_docs)] pub newOwner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`transferOwnership(address)`](transferOwnershipCall) function. @@ -3331,7 +3349,9 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct upgradeToAndCallCall { + #[allow(missing_docs)] pub newImplementation: alloy::sol_types::private::Address, + #[allow(missing_docs)] pub data: alloy::sol_types::private::Bytes, } ///Container type for the return parameters of the [`upgradeToAndCall(address,bytes)`](upgradeToAndCallCall) function. @@ -3456,17 +3476,29 @@ pub mod FeeContract { }; ///Container for all the [`FeeContract`](self) function calls. pub enum FeeContractCalls { + #[allow(missing_docs)] UPGRADE_INTERFACE_VERSION(UPGRADE_INTERFACE_VERSIONCall), + #[allow(missing_docs)] balances(balancesCall), + #[allow(missing_docs)] deposit(depositCall), + #[allow(missing_docs)] getVersion(getVersionCall), + #[allow(missing_docs)] initialize(initializeCall), + #[allow(missing_docs)] maxDepositAmount(maxDepositAmountCall), + #[allow(missing_docs)] minDepositAmount(minDepositAmountCall), + #[allow(missing_docs)] owner(ownerCall), + #[allow(missing_docs)] proxiableUUID(proxiableUUIDCall), + #[allow(missing_docs)] renounceOwnership(renounceOwnershipCall), + #[allow(missing_docs)] transferOwnership(transferOwnershipCall), + #[allow(missing_docs)] upgradeToAndCall(upgradeToAndCallCall), } #[automatically_derived] @@ -3784,20 +3816,35 @@ pub mod FeeContract { } ///Container for all the [`FeeContract`](self) custom errors. pub enum FeeContractErrors { + #[allow(missing_docs)] AddressEmptyCode(AddressEmptyCode), + #[allow(missing_docs)] DepositTooLarge(DepositTooLarge), + #[allow(missing_docs)] DepositTooSmall(DepositTooSmall), + #[allow(missing_docs)] ERC1967InvalidImplementation(ERC1967InvalidImplementation), + #[allow(missing_docs)] ERC1967NonPayable(ERC1967NonPayable), + #[allow(missing_docs)] FailedInnerCall(FailedInnerCall), + #[allow(missing_docs)] FunctionDoesNotExist(FunctionDoesNotExist), + #[allow(missing_docs)] InvalidInitialization(InvalidInitialization), + #[allow(missing_docs)] InvalidUserAddress(InvalidUserAddress), + #[allow(missing_docs)] NoFunctionCalled(NoFunctionCalled), + #[allow(missing_docs)] NotInitializing(NotInitializing), + #[allow(missing_docs)] OwnableInvalidOwner(OwnableInvalidOwner), + #[allow(missing_docs)] OwnableUnauthorizedAccount(OwnableUnauthorizedAccount), + #[allow(missing_docs)] UUPSUnauthorizedCallContext(UUPSUnauthorizedCallContext), + #[allow(missing_docs)] UUPSUnsupportedProxiableUUID(UUPSUnsupportedProxiableUUID), } #[automatically_derived] @@ -4209,11 +4256,17 @@ pub mod FeeContract { } ///Container for all the [`FeeContract`](self) events. pub enum FeeContractEvents { + #[allow(missing_docs)] Deposit(Deposit), + #[allow(missing_docs)] Initialized(Initialized), + #[allow(missing_docs)] Log(Log), + #[allow(missing_docs)] OwnershipTransferred(OwnershipTransferred), + #[allow(missing_docs)] Upgrade(Upgrade), + #[allow(missing_docs)] Upgraded(Upgraded), } #[automatically_derived] diff --git a/contract-bindings-alloy/src/iplonkverifier.rs b/contract-bindings-alloy/src/iplonkverifier.rs index d4af84cd19..39e61d1e74 100644 --- a/contract-bindings-alloy/src/iplonkverifier.rs +++ b/contract-bindings-alloy/src/iplonkverifier.rs @@ -256,7 +256,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -1283,28 +1285,51 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -1830,27 +1855,49 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct VerifyingKey { + #[allow(missing_docs)] pub domainSize: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub numInputs: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub sigma0: ::RustType, + #[allow(missing_docs)] pub sigma1: ::RustType, + #[allow(missing_docs)] pub sigma2: ::RustType, + #[allow(missing_docs)] pub sigma3: ::RustType, + #[allow(missing_docs)] pub sigma4: ::RustType, + #[allow(missing_docs)] pub q1: ::RustType, + #[allow(missing_docs)] pub q2: ::RustType, + #[allow(missing_docs)] pub q3: ::RustType, + #[allow(missing_docs)] pub q4: ::RustType, + #[allow(missing_docs)] pub qM12: ::RustType, + #[allow(missing_docs)] pub qM34: ::RustType, + #[allow(missing_docs)] pub qO: ::RustType, + #[allow(missing_docs)] pub qC: ::RustType, + #[allow(missing_docs)] pub qH1: ::RustType, + #[allow(missing_docs)] pub qH2: ::RustType, + #[allow(missing_docs)] pub qH3: ::RustType, + #[allow(missing_docs)] pub qH4: ::RustType, + #[allow(missing_docs)] pub qEcc: ::RustType, + #[allow(missing_docs)] pub g2LSB: alloy::sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] pub g2MSB: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -2419,14 +2466,18 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyCall { + #[allow(missing_docs)] pub verifyingKey: ::RustType, + #[allow(missing_docs)] pub publicInput: [alloy::sol_types::private::primitives::aliases::U256; 8usize], + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`verify((uint256,uint256,(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),bytes32,bytes32),uint256[8],((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](verifyCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -2557,6 +2608,7 @@ pub mod IPlonkVerifier { }; ///Container for all the [`IPlonkVerifier`](self) function calls. pub enum IPlonkVerifierCalls { + #[allow(missing_docs)] verify(verifyCall), } #[automatically_derived] diff --git a/contract-bindings-alloy/src/lib.rs b/contract-bindings-alloy/src/lib.rs index ec4b26153a..256939ee1c 100644 --- a/contract-bindings-alloy/src/lib.rs +++ b/contract-bindings-alloy/src/lib.rs @@ -3,12 +3,12 @@ //! This is autogenerated code. //! Do not manually edit these files. //! These files may be overwritten by the codegen system at any time. -pub mod erc1967proxy; -pub mod feecontract; -pub mod iplonkverifier; -pub mod lightclient; -pub mod lightclientarbitrum; -pub mod lightclientmock; -pub mod permissionedstaketable; -pub mod plonkverifier; -pub mod plonkverifier2; +pub mod r#erc1967proxy; +pub mod r#feecontract; +pub mod r#iplonkverifier; +pub mod r#lightclient; +pub mod r#lightclientarbitrum; +pub mod r#lightclientmock; +pub mod r#permissionedstaketable; +pub mod r#plonkverifier; +pub mod r#plonkverifier2; diff --git a/contract-bindings-alloy/src/lightclient.rs b/contract-bindings-alloy/src/lightclient.rs index a2e27b0672..c9f47e3da6 100644 --- a/contract-bindings-alloy/src/lightclient.rs +++ b/contract-bindings-alloy/src/lightclient.rs @@ -256,7 +256,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -568,28 +570,51 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -2371,8 +2396,11 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct LightClientState { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -2586,9 +2614,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StakeTableState { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -2819,6 +2851,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct AddressEmptyCode { + #[allow(missing_docs)] pub target: alloy::sol_types::private::Address, } #[allow( @@ -2885,6 +2918,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ERC1967InvalidImplementation { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, } #[allow( @@ -3673,6 +3707,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableInvalidOwner { + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } #[allow( @@ -3739,6 +3774,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableUnauthorizedAccount { + #[allow(missing_docs)] pub account: alloy::sol_types::private::Address, } #[allow( @@ -3925,6 +3961,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UUPSUnsupportedProxiableUUID { + #[allow(missing_docs)] pub slot: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -4831,6 +4868,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UPGRADE_INTERFACE_VERSIONReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::String, } #[allow( @@ -4941,6 +4979,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct currentBlockNumberReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -5159,8 +5198,11 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct finalizedStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -5287,9 +5329,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStakeTableStateReturn { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -5425,8 +5471,11 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -5549,13 +5598,16 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentCall { + #[allow(missing_docs)] pub hotShotBlockHeight: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`getHotShotCommitment(uint256)`](getHotShotCommitmentCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentReturn { + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, + #[allow(missing_docs)] pub hotshotBlockHeight: u64, } #[allow( @@ -5679,6 +5731,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getStateHistoryCountReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -5789,8 +5842,11 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getVersionReturn { + #[allow(missing_docs)] pub majorVersion: u8, + #[allow(missing_docs)] pub minorVersion: u8, + #[allow(missing_docs)] pub patchVersion: u8, } #[allow( @@ -5909,9 +5965,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct initializeCall { + #[allow(missing_docs)] pub _genesis: ::RustType, + #[allow(missing_docs)] pub _genesisStakeTableState: ::RustType, + #[allow(missing_docs)] pub _stateHistoryRetentionPeriod: u32, + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`initialize((uint64,uint64,uint256),(uint256,uint256,uint256,uint256),uint32,address)`](initializeCall) function. @@ -6062,6 +6122,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct isPermissionedProverEnabledReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6168,13 +6229,16 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdCall { + #[allow(missing_docs)] pub blockNumber: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blockThreshold: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`lagOverEscapeHatchThreshold(uint256,uint256)`](lagOverEscapeHatchThresholdCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6300,7 +6364,9 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct newFinalizedStateCall { + #[allow(missing_docs)] pub newState: ::RustType, + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`newFinalizedState((uint64,uint64,uint256),((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](newFinalizedStateCall) function. @@ -6424,6 +6490,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ownerReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -6534,6 +6601,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct permissionedProverReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -6644,6 +6712,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct proxiableUUIDReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -6858,6 +6927,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setPermissionedProverCall { + #[allow(missing_docs)] pub prover: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`setPermissionedProver(address)`](setPermissionedProverCall) function. @@ -6972,6 +7042,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setstateHistoryRetentionPeriodCall { + #[allow(missing_docs)] pub historySeconds: u32, } ///Container type for the return parameters of the [`setstateHistoryRetentionPeriod(uint32)`](setstateHistoryRetentionPeriodCall) function. @@ -7088,15 +7159,20 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsCall { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`stateHistoryCommitments(uint256)`](stateHistoryCommitmentsCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsReturn { + #[allow(missing_docs)] pub l1BlockHeight: u64, + #[allow(missing_docs)] pub l1BlockTimestamp: u64, + #[allow(missing_docs)] pub hotShotBlockHeight: u64, + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, } #[allow( @@ -7236,6 +7312,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryFirstIndexReturn { + #[allow(missing_docs)] pub _0: u64, } #[allow( @@ -7346,6 +7423,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryRetentionPeriodReturn { + #[allow(missing_docs)] pub _0: u32, } #[allow( @@ -7452,6 +7530,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct transferOwnershipCall { + #[allow(missing_docs)] pub newOwner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`transferOwnership(address)`](transferOwnershipCall) function. @@ -7566,7 +7645,9 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct upgradeToAndCallCall { + #[allow(missing_docs)] pub newImplementation: alloy::sol_types::private::Address, + #[allow(missing_docs)] pub data: alloy::sol_types::private::Bytes, } ///Container type for the return parameters of the [`upgradeToAndCall(address,bytes)`](upgradeToAndCallCall) function. @@ -7691,29 +7772,53 @@ pub mod LightClient { }; ///Container for all the [`LightClient`](self) function calls. pub enum LightClientCalls { + #[allow(missing_docs)] UPGRADE_INTERFACE_VERSION(UPGRADE_INTERFACE_VERSIONCall), + #[allow(missing_docs)] currentBlockNumber(currentBlockNumberCall), + #[allow(missing_docs)] disablePermissionedProverMode(disablePermissionedProverModeCall), + #[allow(missing_docs)] finalizedState(finalizedStateCall), + #[allow(missing_docs)] genesisStakeTableState(genesisStakeTableStateCall), + #[allow(missing_docs)] genesisState(genesisStateCall), + #[allow(missing_docs)] getHotShotCommitment(getHotShotCommitmentCall), + #[allow(missing_docs)] getStateHistoryCount(getStateHistoryCountCall), + #[allow(missing_docs)] getVersion(getVersionCall), + #[allow(missing_docs)] initialize(initializeCall), + #[allow(missing_docs)] isPermissionedProverEnabled(isPermissionedProverEnabledCall), + #[allow(missing_docs)] lagOverEscapeHatchThreshold(lagOverEscapeHatchThresholdCall), + #[allow(missing_docs)] newFinalizedState(newFinalizedStateCall), + #[allow(missing_docs)] owner(ownerCall), + #[allow(missing_docs)] permissionedProver(permissionedProverCall), + #[allow(missing_docs)] proxiableUUID(proxiableUUIDCall), + #[allow(missing_docs)] renounceOwnership(renounceOwnershipCall), + #[allow(missing_docs)] setPermissionedProver(setPermissionedProverCall), + #[allow(missing_docs)] setstateHistoryRetentionPeriod(setstateHistoryRetentionPeriodCall), + #[allow(missing_docs)] stateHistoryCommitments(stateHistoryCommitmentsCall), + #[allow(missing_docs)] stateHistoryFirstIndex(stateHistoryFirstIndexCall), + #[allow(missing_docs)] stateHistoryRetentionPeriod(stateHistoryRetentionPeriodCall), + #[allow(missing_docs)] transferOwnership(transferOwnershipCall), + #[allow(missing_docs)] upgradeToAndCall(upgradeToAndCallCall), } #[automatically_derived] @@ -8367,25 +8472,45 @@ pub mod LightClient { } ///Container for all the [`LightClient`](self) custom errors. pub enum LightClientErrors { + #[allow(missing_docs)] AddressEmptyCode(AddressEmptyCode), + #[allow(missing_docs)] ERC1967InvalidImplementation(ERC1967InvalidImplementation), + #[allow(missing_docs)] ERC1967NonPayable(ERC1967NonPayable), + #[allow(missing_docs)] FailedInnerCall(FailedInnerCall), + #[allow(missing_docs)] InsufficientSnapshotHistory(InsufficientSnapshotHistory), + #[allow(missing_docs)] InvalidAddress(InvalidAddress), + #[allow(missing_docs)] InvalidArgs(InvalidArgs), + #[allow(missing_docs)] InvalidHotShotBlockForCommitmentCheck(InvalidHotShotBlockForCommitmentCheck), + #[allow(missing_docs)] InvalidInitialization(InvalidInitialization), + #[allow(missing_docs)] InvalidMaxStateHistory(InvalidMaxStateHistory), + #[allow(missing_docs)] InvalidProof(InvalidProof), + #[allow(missing_docs)] NoChangeRequired(NoChangeRequired), + #[allow(missing_docs)] NotInitializing(NotInitializing), + #[allow(missing_docs)] OutdatedState(OutdatedState), + #[allow(missing_docs)] OwnableInvalidOwner(OwnableInvalidOwner), + #[allow(missing_docs)] OwnableUnauthorizedAccount(OwnableUnauthorizedAccount), + #[allow(missing_docs)] ProverNotPermissioned(ProverNotPermissioned), + #[allow(missing_docs)] UUPSUnauthorizedCallContext(UUPSUnauthorizedCallContext), + #[allow(missing_docs)] UUPSUnsupportedProxiableUUID(UUPSUnsupportedProxiableUUID), + #[allow(missing_docs)] WrongStakeTableUsed(WrongStakeTableUsed), } #[automatically_derived] @@ -8974,12 +9099,19 @@ pub mod LightClient { } ///Container for all the [`LightClient`](self) events. pub enum LightClientEvents { + #[allow(missing_docs)] Initialized(Initialized), + #[allow(missing_docs)] NewState(NewState), + #[allow(missing_docs)] OwnershipTransferred(OwnershipTransferred), + #[allow(missing_docs)] PermissionedProverNotRequired(PermissionedProverNotRequired), + #[allow(missing_docs)] PermissionedProverRequired(PermissionedProverRequired), + #[allow(missing_docs)] Upgrade(Upgrade), + #[allow(missing_docs)] Upgraded(Upgraded), } #[automatically_derived] diff --git a/contract-bindings-alloy/src/lightclientarbitrum.rs b/contract-bindings-alloy/src/lightclientarbitrum.rs index 1edab7451d..63a5ce6fb1 100644 --- a/contract-bindings-alloy/src/lightclientarbitrum.rs +++ b/contract-bindings-alloy/src/lightclientarbitrum.rs @@ -256,7 +256,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -568,28 +570,51 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -1264,8 +1289,11 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct LightClientState { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -1479,9 +1507,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StakeTableState { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -2963,6 +2995,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct AddressEmptyCode { + #[allow(missing_docs)] pub target: alloy::sol_types::private::Address, } #[allow( @@ -3029,6 +3062,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ERC1967InvalidImplementation { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, } #[allow( @@ -3817,6 +3851,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableInvalidOwner { + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } #[allow( @@ -3883,6 +3918,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableUnauthorizedAccount { + #[allow(missing_docs)] pub account: alloy::sol_types::private::Address, } #[allow( @@ -4069,6 +4105,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UUPSUnsupportedProxiableUUID { + #[allow(missing_docs)] pub slot: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -4921,6 +4958,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UPGRADE_INTERFACE_VERSIONReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::String, } #[allow( @@ -5031,6 +5069,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct currentBlockNumberReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -5249,8 +5288,11 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct finalizedStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -5377,9 +5419,13 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStakeTableStateReturn { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -5515,8 +5561,11 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -5639,13 +5688,16 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentCall { + #[allow(missing_docs)] pub hotShotBlockHeight: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`getHotShotCommitment(uint256)`](getHotShotCommitmentCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentReturn { + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, + #[allow(missing_docs)] pub hotshotBlockHeight: u64, } #[allow( @@ -5769,6 +5821,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getStateHistoryCountReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -5879,8 +5932,11 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getVersionReturn { + #[allow(missing_docs)] pub majorVersion: u8, + #[allow(missing_docs)] pub minorVersion: u8, + #[allow(missing_docs)] pub patchVersion: u8, } #[allow( @@ -5999,10 +6055,14 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct initializeCall { + #[allow(missing_docs)] pub _genesis: ::RustType, + #[allow(missing_docs)] pub _genesisStakeTableState: ::RustType, + #[allow(missing_docs)] pub _stateHistoryRetentionPeriod: u32, + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`initialize((uint64,uint64,uint256),(uint256,uint256,uint256,uint256),uint32,address)`](initializeCall) function. @@ -6155,6 +6215,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct isPermissionedProverEnabledReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6261,13 +6322,16 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdCall { + #[allow(missing_docs)] pub blockNumber: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blockThreshold: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`lagOverEscapeHatchThreshold(uint256,uint256)`](lagOverEscapeHatchThresholdCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6393,7 +6457,9 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct newFinalizedStateCall { + #[allow(missing_docs)] pub newState: ::RustType, + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`newFinalizedState((uint64,uint64,uint256),((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](newFinalizedStateCall) function. @@ -6520,6 +6586,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ownerReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -6630,6 +6697,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct permissionedProverReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -6740,6 +6808,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct proxiableUUIDReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -6954,6 +7023,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setPermissionedProverCall { + #[allow(missing_docs)] pub prover: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`setPermissionedProver(address)`](setPermissionedProverCall) function. @@ -7068,6 +7138,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setstateHistoryRetentionPeriodCall { + #[allow(missing_docs)] pub historySeconds: u32, } ///Container type for the return parameters of the [`setstateHistoryRetentionPeriod(uint32)`](setstateHistoryRetentionPeriodCall) function. @@ -7184,15 +7255,20 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsCall { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`stateHistoryCommitments(uint256)`](stateHistoryCommitmentsCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsReturn { + #[allow(missing_docs)] pub l1BlockHeight: u64, + #[allow(missing_docs)] pub l1BlockTimestamp: u64, + #[allow(missing_docs)] pub hotShotBlockHeight: u64, + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, } #[allow( @@ -7332,6 +7408,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryFirstIndexReturn { + #[allow(missing_docs)] pub _0: u64, } #[allow( @@ -7442,6 +7519,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryRetentionPeriodReturn { + #[allow(missing_docs)] pub _0: u32, } #[allow( @@ -7548,6 +7626,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct transferOwnershipCall { + #[allow(missing_docs)] pub newOwner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`transferOwnership(address)`](transferOwnershipCall) function. @@ -7662,7 +7741,9 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct upgradeToAndCallCall { + #[allow(missing_docs)] pub newImplementation: alloy::sol_types::private::Address, + #[allow(missing_docs)] pub data: alloy::sol_types::private::Bytes, } ///Container type for the return parameters of the [`upgradeToAndCall(address,bytes)`](upgradeToAndCallCall) function. @@ -7787,29 +7868,53 @@ pub mod LightClientArbitrum { }; ///Container for all the [`LightClientArbitrum`](self) function calls. pub enum LightClientArbitrumCalls { + #[allow(missing_docs)] UPGRADE_INTERFACE_VERSION(UPGRADE_INTERFACE_VERSIONCall), + #[allow(missing_docs)] currentBlockNumber(currentBlockNumberCall), + #[allow(missing_docs)] disablePermissionedProverMode(disablePermissionedProverModeCall), + #[allow(missing_docs)] finalizedState(finalizedStateCall), + #[allow(missing_docs)] genesisStakeTableState(genesisStakeTableStateCall), + #[allow(missing_docs)] genesisState(genesisStateCall), + #[allow(missing_docs)] getHotShotCommitment(getHotShotCommitmentCall), + #[allow(missing_docs)] getStateHistoryCount(getStateHistoryCountCall), + #[allow(missing_docs)] getVersion(getVersionCall), + #[allow(missing_docs)] initialize(initializeCall), + #[allow(missing_docs)] isPermissionedProverEnabled(isPermissionedProverEnabledCall), + #[allow(missing_docs)] lagOverEscapeHatchThreshold(lagOverEscapeHatchThresholdCall), + #[allow(missing_docs)] newFinalizedState(newFinalizedStateCall), + #[allow(missing_docs)] owner(ownerCall), + #[allow(missing_docs)] permissionedProver(permissionedProverCall), + #[allow(missing_docs)] proxiableUUID(proxiableUUIDCall), + #[allow(missing_docs)] renounceOwnership(renounceOwnershipCall), + #[allow(missing_docs)] setPermissionedProver(setPermissionedProverCall), + #[allow(missing_docs)] setstateHistoryRetentionPeriod(setstateHistoryRetentionPeriodCall), + #[allow(missing_docs)] stateHistoryCommitments(stateHistoryCommitmentsCall), + #[allow(missing_docs)] stateHistoryFirstIndex(stateHistoryFirstIndexCall), + #[allow(missing_docs)] stateHistoryRetentionPeriod(stateHistoryRetentionPeriodCall), + #[allow(missing_docs)] transferOwnership(transferOwnershipCall), + #[allow(missing_docs)] upgradeToAndCall(upgradeToAndCallCall), } #[automatically_derived] @@ -8460,25 +8565,45 @@ pub mod LightClientArbitrum { } ///Container for all the [`LightClientArbitrum`](self) custom errors. pub enum LightClientArbitrumErrors { + #[allow(missing_docs)] AddressEmptyCode(AddressEmptyCode), + #[allow(missing_docs)] ERC1967InvalidImplementation(ERC1967InvalidImplementation), + #[allow(missing_docs)] ERC1967NonPayable(ERC1967NonPayable), + #[allow(missing_docs)] FailedInnerCall(FailedInnerCall), + #[allow(missing_docs)] InsufficientSnapshotHistory(InsufficientSnapshotHistory), + #[allow(missing_docs)] InvalidAddress(InvalidAddress), + #[allow(missing_docs)] InvalidArgs(InvalidArgs), + #[allow(missing_docs)] InvalidHotShotBlockForCommitmentCheck(InvalidHotShotBlockForCommitmentCheck), + #[allow(missing_docs)] InvalidInitialization(InvalidInitialization), + #[allow(missing_docs)] InvalidMaxStateHistory(InvalidMaxStateHistory), + #[allow(missing_docs)] InvalidProof(InvalidProof), + #[allow(missing_docs)] NoChangeRequired(NoChangeRequired), + #[allow(missing_docs)] NotInitializing(NotInitializing), + #[allow(missing_docs)] OutdatedState(OutdatedState), + #[allow(missing_docs)] OwnableInvalidOwner(OwnableInvalidOwner), + #[allow(missing_docs)] OwnableUnauthorizedAccount(OwnableUnauthorizedAccount), + #[allow(missing_docs)] ProverNotPermissioned(ProverNotPermissioned), + #[allow(missing_docs)] UUPSUnauthorizedCallContext(UUPSUnauthorizedCallContext), + #[allow(missing_docs)] UUPSUnsupportedProxiableUUID(UUPSUnsupportedProxiableUUID), + #[allow(missing_docs)] WrongStakeTableUsed(WrongStakeTableUsed), } #[automatically_derived] @@ -9067,12 +9192,19 @@ pub mod LightClientArbitrum { } ///Container for all the [`LightClientArbitrum`](self) events. pub enum LightClientArbitrumEvents { + #[allow(missing_docs)] Initialized(Initialized), + #[allow(missing_docs)] NewState(NewState), + #[allow(missing_docs)] OwnershipTransferred(OwnershipTransferred), + #[allow(missing_docs)] PermissionedProverNotRequired(PermissionedProverNotRequired), + #[allow(missing_docs)] PermissionedProverRequired(PermissionedProverRequired), + #[allow(missing_docs)] Upgrade(Upgrade), + #[allow(missing_docs)] Upgraded(Upgraded), } #[automatically_derived] diff --git a/contract-bindings-alloy/src/lightclientmock.rs b/contract-bindings-alloy/src/lightclientmock.rs index 6d30b708d7..6b3fd406cd 100644 --- a/contract-bindings-alloy/src/lightclientmock.rs +++ b/contract-bindings-alloy/src/lightclientmock.rs @@ -256,7 +256,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -568,28 +570,51 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -1265,8 +1290,11 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct LightClientState { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -1480,9 +1508,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StakeTableState { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -1712,9 +1744,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StateHistoryCommitment { + #[allow(missing_docs)] pub l1BlockHeight: u64, + #[allow(missing_docs)] pub l1BlockTimestamp: u64, + #[allow(missing_docs)] pub hotShotBlockHeight: u64, + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, } #[allow( @@ -3369,6 +3405,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct AddressEmptyCode { + #[allow(missing_docs)] pub target: alloy::sol_types::private::Address, } #[allow( @@ -3435,6 +3472,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ERC1967InvalidImplementation { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, } #[allow( @@ -4223,6 +4261,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableInvalidOwner { + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } #[allow( @@ -4289,6 +4328,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableUnauthorizedAccount { + #[allow(missing_docs)] pub account: alloy::sol_types::private::Address, } #[allow( @@ -4475,6 +4515,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UUPSUnsupportedProxiableUUID { + #[allow(missing_docs)] pub slot: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -5323,9 +5364,12 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct constructorCall { + #[allow(missing_docs)] pub genesis: ::RustType, + #[allow(missing_docs)] pub genesisStakeTableState: ::RustType, + #[allow(missing_docs)] pub maxHistorySeconds: u32, } const _: () = { @@ -5416,6 +5460,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UPGRADE_INTERFACE_VERSIONReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::String, } #[allow( @@ -5526,6 +5571,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct currentBlockNumberReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -5744,8 +5790,11 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct finalizedStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -5872,9 +5921,13 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStakeTableStateReturn { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -6010,8 +6063,11 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -6134,13 +6190,16 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentCall { + #[allow(missing_docs)] pub hotShotBlockHeight: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`getHotShotCommitment(uint256)`](getHotShotCommitmentCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentReturn { + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, + #[allow(missing_docs)] pub hotshotBlockHeight: u64, } #[allow( @@ -6264,6 +6323,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getStateHistoryCountReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -6374,8 +6434,11 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getVersionReturn { + #[allow(missing_docs)] pub majorVersion: u8, + #[allow(missing_docs)] pub minorVersion: u8, + #[allow(missing_docs)] pub patchVersion: u8, } #[allow( @@ -6494,10 +6557,14 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct initializeCall { + #[allow(missing_docs)] pub _genesis: ::RustType, + #[allow(missing_docs)] pub _genesisStakeTableState: ::RustType, + #[allow(missing_docs)] pub _stateHistoryRetentionPeriod: u32, + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`initialize((uint64,uint64,uint256),(uint256,uint256,uint256,uint256),uint32,address)`](initializeCall) function. @@ -6650,6 +6717,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct isPermissionedProverEnabledReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6756,13 +6824,16 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdCall { + #[allow(missing_docs)] pub blockNumber: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`lagOverEscapeHatchThreshold(uint256,uint256)`](lagOverEscapeHatchThresholdCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6888,7 +6959,9 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct newFinalizedStateCall { + #[allow(missing_docs)] pub newState: ::RustType, + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`newFinalizedState((uint64,uint64,uint256),((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](newFinalizedStateCall) function. @@ -7015,6 +7088,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ownerReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -7125,6 +7199,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct permissionedProverReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -7235,6 +7310,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct proxiableUUIDReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -7449,6 +7525,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setFinalizedStateCall { + #[allow(missing_docs)] pub state: ::RustType, } ///Container type for the return parameters of the [`setFinalizedState((uint64,uint64,uint256))`](setFinalizedStateCall) function. @@ -7564,6 +7641,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setHotShotDownSinceCall { + #[allow(missing_docs)] pub l1Height: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`setHotShotDownSince(uint256)`](setHotShotDownSinceCall) function. @@ -7786,6 +7864,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setPermissionedProverCall { + #[allow(missing_docs)] pub prover: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`setPermissionedProver(address)`](setPermissionedProverCall) function. @@ -7900,6 +7979,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setStateHistoryCall { + #[allow(missing_docs)] pub _stateHistoryCommitments: alloy::sol_types::private::Vec< ::RustType, >, @@ -8024,6 +8104,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setstateHistoryRetentionPeriodCall { + #[allow(missing_docs)] pub historySeconds: u32, } ///Container type for the return parameters of the [`setstateHistoryRetentionPeriod(uint32)`](setstateHistoryRetentionPeriodCall) function. @@ -8140,15 +8221,20 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsCall { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`stateHistoryCommitments(uint256)`](stateHistoryCommitmentsCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsReturn { + #[allow(missing_docs)] pub l1BlockHeight: u64, + #[allow(missing_docs)] pub l1BlockTimestamp: u64, + #[allow(missing_docs)] pub hotShotBlockHeight: u64, + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, } #[allow( @@ -8288,6 +8374,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryFirstIndexReturn { + #[allow(missing_docs)] pub _0: u64, } #[allow( @@ -8398,6 +8485,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryRetentionPeriodReturn { + #[allow(missing_docs)] pub _0: u32, } #[allow( @@ -8504,6 +8592,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct transferOwnershipCall { + #[allow(missing_docs)] pub newOwner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`transferOwnership(address)`](transferOwnershipCall) function. @@ -8618,7 +8707,9 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct upgradeToAndCallCall { + #[allow(missing_docs)] pub newImplementation: alloy::sol_types::private::Address, + #[allow(missing_docs)] pub data: alloy::sol_types::private::Bytes, } ///Container type for the return parameters of the [`upgradeToAndCall(address,bytes)`](upgradeToAndCallCall) function. @@ -8743,33 +8834,61 @@ pub mod LightClientMock { }; ///Container for all the [`LightClientMock`](self) function calls. pub enum LightClientMockCalls { + #[allow(missing_docs)] UPGRADE_INTERFACE_VERSION(UPGRADE_INTERFACE_VERSIONCall), + #[allow(missing_docs)] currentBlockNumber(currentBlockNumberCall), + #[allow(missing_docs)] disablePermissionedProverMode(disablePermissionedProverModeCall), + #[allow(missing_docs)] finalizedState(finalizedStateCall), + #[allow(missing_docs)] genesisStakeTableState(genesisStakeTableStateCall), + #[allow(missing_docs)] genesisState(genesisStateCall), + #[allow(missing_docs)] getHotShotCommitment(getHotShotCommitmentCall), + #[allow(missing_docs)] getStateHistoryCount(getStateHistoryCountCall), + #[allow(missing_docs)] getVersion(getVersionCall), + #[allow(missing_docs)] initialize(initializeCall), + #[allow(missing_docs)] isPermissionedProverEnabled(isPermissionedProverEnabledCall), + #[allow(missing_docs)] lagOverEscapeHatchThreshold(lagOverEscapeHatchThresholdCall), + #[allow(missing_docs)] newFinalizedState(newFinalizedStateCall), + #[allow(missing_docs)] owner(ownerCall), + #[allow(missing_docs)] permissionedProver(permissionedProverCall), + #[allow(missing_docs)] proxiableUUID(proxiableUUIDCall), + #[allow(missing_docs)] renounceOwnership(renounceOwnershipCall), + #[allow(missing_docs)] setFinalizedState(setFinalizedStateCall), + #[allow(missing_docs)] setHotShotDownSince(setHotShotDownSinceCall), + #[allow(missing_docs)] setHotShotUp(setHotShotUpCall), + #[allow(missing_docs)] setPermissionedProver(setPermissionedProverCall), + #[allow(missing_docs)] setStateHistory(setStateHistoryCall), + #[allow(missing_docs)] setstateHistoryRetentionPeriod(setstateHistoryRetentionPeriodCall), + #[allow(missing_docs)] stateHistoryCommitments(stateHistoryCommitmentsCall), + #[allow(missing_docs)] stateHistoryFirstIndex(stateHistoryFirstIndexCall), + #[allow(missing_docs)] stateHistoryRetentionPeriod(stateHistoryRetentionPeriodCall), + #[allow(missing_docs)] transferOwnership(transferOwnershipCall), + #[allow(missing_docs)] upgradeToAndCall(upgradeToAndCallCall), } #[automatically_derived] @@ -9514,25 +9633,45 @@ pub mod LightClientMock { } ///Container for all the [`LightClientMock`](self) custom errors. pub enum LightClientMockErrors { + #[allow(missing_docs)] AddressEmptyCode(AddressEmptyCode), + #[allow(missing_docs)] ERC1967InvalidImplementation(ERC1967InvalidImplementation), + #[allow(missing_docs)] ERC1967NonPayable(ERC1967NonPayable), + #[allow(missing_docs)] FailedInnerCall(FailedInnerCall), + #[allow(missing_docs)] InsufficientSnapshotHistory(InsufficientSnapshotHistory), + #[allow(missing_docs)] InvalidAddress(InvalidAddress), + #[allow(missing_docs)] InvalidArgs(InvalidArgs), + #[allow(missing_docs)] InvalidHotShotBlockForCommitmentCheck(InvalidHotShotBlockForCommitmentCheck), + #[allow(missing_docs)] InvalidInitialization(InvalidInitialization), + #[allow(missing_docs)] InvalidMaxStateHistory(InvalidMaxStateHistory), + #[allow(missing_docs)] InvalidProof(InvalidProof), + #[allow(missing_docs)] NoChangeRequired(NoChangeRequired), + #[allow(missing_docs)] NotInitializing(NotInitializing), + #[allow(missing_docs)] OutdatedState(OutdatedState), + #[allow(missing_docs)] OwnableInvalidOwner(OwnableInvalidOwner), + #[allow(missing_docs)] OwnableUnauthorizedAccount(OwnableUnauthorizedAccount), + #[allow(missing_docs)] ProverNotPermissioned(ProverNotPermissioned), + #[allow(missing_docs)] UUPSUnauthorizedCallContext(UUPSUnauthorizedCallContext), + #[allow(missing_docs)] UUPSUnsupportedProxiableUUID(UUPSUnsupportedProxiableUUID), + #[allow(missing_docs)] WrongStakeTableUsed(WrongStakeTableUsed), } #[automatically_derived] @@ -10121,12 +10260,19 @@ pub mod LightClientMock { } ///Container for all the [`LightClientMock`](self) events. pub enum LightClientMockEvents { + #[allow(missing_docs)] Initialized(Initialized), + #[allow(missing_docs)] NewState(NewState), + #[allow(missing_docs)] OwnershipTransferred(OwnershipTransferred), + #[allow(missing_docs)] PermissionedProverNotRequired(PermissionedProverNotRequired), + #[allow(missing_docs)] PermissionedProverRequired(PermissionedProverRequired), + #[allow(missing_docs)] Upgrade(Upgrade), + #[allow(missing_docs)] Upgraded(Upgraded), } #[automatically_derived] diff --git a/contract-bindings-alloy/src/permissionedstaketable.rs b/contract-bindings-alloy/src/permissionedstaketable.rs index fd58dff88e..d8aa26a865 100644 --- a/contract-bindings-alloy/src/permissionedstaketable.rs +++ b/contract-bindings-alloy/src/permissionedstaketable.rs @@ -139,9 +139,13 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G2Point { + #[allow(missing_docs)] pub x0: ::RustType, + #[allow(missing_docs)] pub x1: ::RustType, + #[allow(missing_docs)] pub y0: ::RustType, + #[allow(missing_docs)] pub y1: ::RustType, } #[allow( @@ -467,7 +471,9 @@ pub mod EdOnBN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct EdOnBN254Point { + #[allow(missing_docs)] pub x: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub y: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -1381,8 +1387,11 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct NodeInfo { + #[allow(missing_docs)] pub blsVK: ::RustType, + #[allow(missing_docs)] pub schnorrVK: ::RustType, + #[allow(missing_docs)] pub isDA: bool, } #[allow( @@ -1715,6 +1724,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableInvalidOwner { + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } #[allow( @@ -1781,6 +1791,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableUnauthorizedAccount { + #[allow(missing_docs)] pub account: alloy::sol_types::private::Address, } #[allow( @@ -1847,6 +1858,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StakerAlreadyExists { + #[allow(missing_docs)] pub _0: ::RustType, } #[allow( @@ -1912,6 +1924,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StakerNotFound { + #[allow(missing_docs)] pub _0: ::RustType, } #[allow( @@ -2298,6 +2311,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct constructorCall { + #[allow(missing_docs)] pub initialStakers: alloy::sol_types::private::Vec<::RustType>, } @@ -2363,12 +2377,14 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct _hashBlsKeyCall { + #[allow(missing_docs)] pub blsVK: ::RustType, } ///Container type for the return parameters of the [`_hashBlsKey((uint256,uint256,uint256,uint256))`](_hashBlsKeyCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct _hashBlsKeyReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -2590,6 +2606,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct initializedAtBlockReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2696,12 +2713,14 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct isStakerCall { + #[allow(missing_docs)] pub staker: ::RustType, } ///Container type for the return parameters of the [`isStaker((uint256,uint256,uint256,uint256))`](isStakerCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct isStakerReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -2815,6 +2834,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ownerReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -3029,6 +3049,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct transferOwnershipCall { + #[allow(missing_docs)] pub newOwner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`transferOwnership(address)`](transferOwnershipCall) function. @@ -3143,8 +3164,10 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct updateCall { + #[allow(missing_docs)] pub stakersToRemove: alloy::sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] pub newStakers: alloy::sol_types::private::Vec<::RustType>, } @@ -3272,13 +3295,21 @@ pub mod PermissionedStakeTable { }; ///Container for all the [`PermissionedStakeTable`](self) function calls. pub enum PermissionedStakeTableCalls { + #[allow(missing_docs)] _hashBlsKey(_hashBlsKeyCall), + #[allow(missing_docs)] initialize(initializeCall), + #[allow(missing_docs)] initializedAtBlock(initializedAtBlockCall), + #[allow(missing_docs)] isStaker(isStakerCall), + #[allow(missing_docs)] owner(ownerCall), + #[allow(missing_docs)] renounceOwnership(renounceOwnershipCall), + #[allow(missing_docs)] transferOwnership(transferOwnershipCall), + #[allow(missing_docs)] update(updateCall), } #[automatically_derived] @@ -3502,11 +3533,17 @@ pub mod PermissionedStakeTable { } ///Container for all the [`PermissionedStakeTable`](self) custom errors. pub enum PermissionedStakeTableErrors { + #[allow(missing_docs)] InvalidInitialization(InvalidInitialization), + #[allow(missing_docs)] NotInitializing(NotInitializing), + #[allow(missing_docs)] OwnableInvalidOwner(OwnableInvalidOwner), + #[allow(missing_docs)] OwnableUnauthorizedAccount(OwnableUnauthorizedAccount), + #[allow(missing_docs)] StakerAlreadyExists(StakerAlreadyExists), + #[allow(missing_docs)] StakerNotFound(StakerNotFound), } #[automatically_derived] @@ -3706,8 +3743,11 @@ pub mod PermissionedStakeTable { } ///Container for all the [`PermissionedStakeTable`](self) events. pub enum PermissionedStakeTableEvents { + #[allow(missing_docs)] Initialized(Initialized), + #[allow(missing_docs)] OwnershipTransferred(OwnershipTransferred), + #[allow(missing_docs)] StakersUpdated(StakersUpdated), } #[automatically_derived] diff --git a/contract-bindings-alloy/src/plonkverifier.rs b/contract-bindings-alloy/src/plonkverifier.rs index 363d08a96a..edc97722de 100644 --- a/contract-bindings-alloy/src/plonkverifier.rs +++ b/contract-bindings-alloy/src/plonkverifier.rs @@ -256,7 +256,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -569,28 +571,51 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -1116,27 +1141,49 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct VerifyingKey { + #[allow(missing_docs)] pub domainSize: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub numInputs: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub sigma0: ::RustType, + #[allow(missing_docs)] pub sigma1: ::RustType, + #[allow(missing_docs)] pub sigma2: ::RustType, + #[allow(missing_docs)] pub sigma3: ::RustType, + #[allow(missing_docs)] pub sigma4: ::RustType, + #[allow(missing_docs)] pub q1: ::RustType, + #[allow(missing_docs)] pub q2: ::RustType, + #[allow(missing_docs)] pub q3: ::RustType, + #[allow(missing_docs)] pub q4: ::RustType, + #[allow(missing_docs)] pub qM12: ::RustType, + #[allow(missing_docs)] pub qM34: ::RustType, + #[allow(missing_docs)] pub qO: ::RustType, + #[allow(missing_docs)] pub qC: ::RustType, + #[allow(missing_docs)] pub qH1: ::RustType, + #[allow(missing_docs)] pub qH2: ::RustType, + #[allow(missing_docs)] pub qH3: ::RustType, + #[allow(missing_docs)] pub qH4: ::RustType, + #[allow(missing_docs)] pub qEcc: ::RustType, + #[allow(missing_docs)] pub g2LSB: alloy::sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] pub g2MSB: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -2769,14 +2816,18 @@ pub mod PlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyCall { + #[allow(missing_docs)] pub verifyingKey: ::RustType, + #[allow(missing_docs)] pub publicInput: [alloy::sol_types::private::primitives::aliases::U256; 7usize], + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`verify((uint256,uint256,(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),bytes32,bytes32),uint256[7],((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](verifyCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -2909,6 +2960,7 @@ pub mod PlonkVerifier { }; ///Container for all the [`PlonkVerifier`](self) function calls. pub enum PlonkVerifierCalls { + #[allow(missing_docs)] verify(verifyCall), } #[automatically_derived] @@ -2988,8 +3040,11 @@ pub mod PlonkVerifier { } ///Container for all the [`PlonkVerifier`](self) custom errors. pub enum PlonkVerifierErrors { + #[allow(missing_docs)] InvalidPlonkArgs(InvalidPlonkArgs), + #[allow(missing_docs)] UnsupportedDegree(UnsupportedDegree), + #[allow(missing_docs)] WrongPlonkVK(WrongPlonkVK), } #[automatically_derived] diff --git a/contract-bindings-alloy/src/plonkverifier2.rs b/contract-bindings-alloy/src/plonkverifier2.rs index 12cc4014ac..c7ab4bb3cc 100644 --- a/contract-bindings-alloy/src/plonkverifier2.rs +++ b/contract-bindings-alloy/src/plonkverifier2.rs @@ -256,7 +256,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -569,28 +571,51 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -1116,27 +1141,49 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct VerifyingKey { + #[allow(missing_docs)] pub domainSize: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub numInputs: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub sigma0: ::RustType, + #[allow(missing_docs)] pub sigma1: ::RustType, + #[allow(missing_docs)] pub sigma2: ::RustType, + #[allow(missing_docs)] pub sigma3: ::RustType, + #[allow(missing_docs)] pub sigma4: ::RustType, + #[allow(missing_docs)] pub q1: ::RustType, + #[allow(missing_docs)] pub q2: ::RustType, + #[allow(missing_docs)] pub q3: ::RustType, + #[allow(missing_docs)] pub q4: ::RustType, + #[allow(missing_docs)] pub qM12: ::RustType, + #[allow(missing_docs)] pub qM34: ::RustType, + #[allow(missing_docs)] pub qO: ::RustType, + #[allow(missing_docs)] pub qC: ::RustType, + #[allow(missing_docs)] pub qH1: ::RustType, + #[allow(missing_docs)] pub qH2: ::RustType, + #[allow(missing_docs)] pub qH3: ::RustType, + #[allow(missing_docs)] pub qH4: ::RustType, + #[allow(missing_docs)] pub qEcc: ::RustType, + #[allow(missing_docs)] pub g2LSB: alloy::sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] pub g2MSB: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -2669,6 +2716,7 @@ pub mod PlonkVerifier2 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct P_MODReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2779,6 +2827,7 @@ pub mod PlonkVerifier2 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct R_MODReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2885,14 +2934,18 @@ pub mod PlonkVerifier2 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyCall { + #[allow(missing_docs)] pub vk: ::RustType, + #[allow(missing_docs)] pub publicInput: [alloy::sol_types::private::primitives::aliases::U256; 7usize], + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`verify((uint256,uint256,(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),bytes32,bytes32),uint256[7],((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](verifyCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyReturn { + #[allow(missing_docs)] pub success: bool, } #[allow( @@ -3023,8 +3076,11 @@ pub mod PlonkVerifier2 { }; ///Container for all the [`PlonkVerifier2`](self) function calls. pub enum PlonkVerifier2Calls { + #[allow(missing_docs)] P_MOD(P_MODCall), + #[allow(missing_docs)] R_MOD(R_MODCall), + #[allow(missing_docs)] verify(verifyCall), } #[automatically_derived] @@ -3144,6 +3200,7 @@ pub mod PlonkVerifier2 { } ///Container for all the [`PlonkVerifier2`](self) custom errors. pub enum PlonkVerifier2Errors { + #[allow(missing_docs)] UnsupportedDegree(UnsupportedDegree), } #[automatically_derived] diff --git a/contracts/test/PlonkVerifier.t.sol b/contracts/test/PlonkVerifier.t.sol index c541471c40..07da9808e7 100644 --- a/contracts/test/PlonkVerifier.t.sol +++ b/contracts/test/PlonkVerifier.t.sol @@ -230,6 +230,7 @@ contract PlonkVerifier_validateProof_Test is PlonkVerifierCommonTest { /// @dev Randomly pick a coordinate of a point among points in a proof /// mutate it to another value so that the point is no longer valid, /// test if our check will revert. + /// forge-config: default.allow_internal_expect_revert = true function testFuzz_RevertIfProofContainsInvalidGroup(uint256 nthPoint, bool testX) external { // a valid proof IPlonkVerifier.PlonkProof memory proof = dummyProof(42); @@ -251,12 +252,13 @@ contract PlonkVerifier_validateProof_Test is PlonkVerifierCommonTest { } } - vm.expectRevert(); + vm.expectRevert("Bn254: invalid G1 point"); V._validateProof(proof); } /// @dev Randomly pick field in a proof mutate it to invalid value /// test if our check will revert. + /// forge-config: default.allow_internal_expect_revert = true function testFuzz_RevertIfProofContainsInvalidField(uint256 nthField) external { // a valid proof IPlonkVerifier.PlonkProof memory proof = dummyProof(42); @@ -271,7 +273,7 @@ contract PlonkVerifier_validateProof_Test is PlonkVerifierCommonTest { mstore(add(start, mul(nthField, 0x20)), invalidField) } - vm.expectRevert(); + vm.expectRevert(bytes("Bn254: invalid scalar field")); V._validateProof(proof); } } diff --git a/contracts/test/PolynomialEval.t.sol b/contracts/test/PolynomialEval.t.sol index e4ebba1523..2a7c3c8898 100644 --- a/contracts/test/PolynomialEval.t.sol +++ b/contracts/test/PolynomialEval.t.sol @@ -31,6 +31,7 @@ contract PolynomialEval_newEvalDomain_Test is Test { } /// @dev Test revert if domainSize is not among {2^16 ~ 2^20, 2^5} + /// forge-config: default.allow_internal_expect_revert = true function testFuzz_unsupportedDomainSize_reverts(uint256 domainSize) external { vm.assume( domainSize != 2 ** 16 && domainSize != 2 ** 17 && domainSize != 2 ** 18 diff --git a/flake.lock b/flake.lock index 5648cdfb01..9c6891b0fe 100644 --- a/flake.lock +++ b/flake.lock @@ -51,6 +51,21 @@ } }, "flake-utils_2": { + "locked": { + "lastModified": 1644229661, + "narHash": "sha256-1YdnJAsNy69bpcjuoKdOYQX0YxZBiCYZo4Twxerqv7k=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "3cecb5b042f7f209c56ffd8371b2711a290ec797", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_3": { "inputs": { "systems": "systems_2" }, @@ -68,7 +83,7 @@ "type": "github" } }, - "flake-utils_3": { + "flake-utils_4": { "inputs": { "systems": "systems_3" }, @@ -86,6 +101,26 @@ "type": "github" } }, + "foundry-nix": { + "inputs": { + "flake-utils": "flake-utils_2", + "nixpkgs": "nixpkgs" + }, + "locked": { + "lastModified": 1740993113, + "narHash": "sha256-XY6CUZft7wjB/cbLyi/xeOZHh2mizSAT0EaYo9wuRXI=", + "owner": "shazow", + "repo": "foundry.nix", + "rev": "ed2a08376f14c0caf2b97418c91a66872e5ab3e2", + "type": "github" + }, + "original": { + "owner": "shazow", + "ref": "monthly", + "repo": "foundry.nix", + "type": "github" + } + }, "gitignore": { "inputs": { "nixpkgs": [ @@ -109,24 +144,22 @@ }, "nixpkgs": { "locked": { - "lastModified": 1736798957, - "narHash": "sha256-qwpCtZhSsSNQtK4xYGzMiyEDhkNzOCz/Vfu4oL2ETsQ=", + "lastModified": 1666753130, + "narHash": "sha256-Wff1dGPFSneXJLI2c0kkdWTgxnQ416KE6X4KnFkgPYQ=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9abb87b552b7f55ac8916b6fc9e5cb486656a2f3", + "rev": "f540aeda6f677354f1e7144ab04352f61aaa0118", "type": "github" }, "original": { - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" + "id": "nixpkgs", + "type": "indirect" } }, "nixpkgs-cross-overlay": { "inputs": { - "flake-utils": "flake-utils_2", - "nixpkgs": "nixpkgs_2", + "flake-utils": "flake-utils_3", + "nixpkgs": "nixpkgs_3", "rust-overlay": "rust-overlay", "treefmt-nix": "treefmt-nix" }, @@ -144,7 +177,39 @@ "type": "github" } }, + "nixpkgs-legacy-foundry": { + "locked": { + "lastModified": 1736798957, + "narHash": "sha256-qwpCtZhSsSNQtK4xYGzMiyEDhkNzOCz/Vfu4oL2ETsQ=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9abb87b552b7f55ac8916b6fc9e5cb486656a2f3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9abb87b552b7f55ac8916b6fc9e5cb486656a2f3", + "type": "github" + } + }, "nixpkgs_2": { + "locked": { + "lastModified": 1741246872, + "narHash": "sha256-Q6pMP4a9ed636qilcYX8XUguvKl/0/LGXhHcRI91p0U=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "10069ef4cf863633f57238f179a0297de84bd8d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { "locked": { "lastModified": 1733550349, "narHash": "sha256-NcGumB4Lr6KSDq+nIqXtNA8QwAQKDSZT7N9OTGWbTrs=", @@ -160,7 +225,7 @@ "type": "github" } }, - "nixpkgs_3": { + "nixpkgs_4": { "locked": { "lastModified": 1730768919, "narHash": "sha256-8AKquNnnSaJRXZxc5YmF/WfmxiHX6MMZZasRP6RRQkE=", @@ -176,7 +241,7 @@ "type": "github" } }, - "nixpkgs_4": { + "nixpkgs_5": { "locked": { "lastModified": 1736320768, "narHash": "sha256-nIYdTAiKIGnFNugbomgBJR+Xv5F1ZQU+HfaBqJKroC0=", @@ -192,7 +257,7 @@ "type": "github" } }, - "nixpkgs_5": { + "nixpkgs_6": { "locked": { "lastModified": 1682516527, "narHash": "sha256-1joLG1A4mwhMrj4XVp0mBTNIHphVQSEMIlZ50t0Udxk=", @@ -211,7 +276,7 @@ "inputs": { "flake-compat": "flake-compat_2", "gitignore": "gitignore", - "nixpkgs": "nixpkgs_3" + "nixpkgs": "nixpkgs_4" }, "locked": { "lastModified": 1735882644, @@ -231,8 +296,10 @@ "inputs": { "flake-compat": "flake-compat", "flake-utils": "flake-utils", - "nixpkgs": "nixpkgs", + "foundry-nix": "foundry-nix", + "nixpkgs": "nixpkgs_2", "nixpkgs-cross-overlay": "nixpkgs-cross-overlay", + "nixpkgs-legacy-foundry": "nixpkgs-legacy-foundry", "pre-commit-hooks": "pre-commit-hooks", "rust-overlay": "rust-overlay_2", "solc-bin": "solc-bin" @@ -261,7 +328,7 @@ }, "rust-overlay_2": { "inputs": { - "nixpkgs": "nixpkgs_4" + "nixpkgs": "nixpkgs_5" }, "locked": { "lastModified": 1740104932, @@ -279,8 +346,8 @@ }, "solc-bin": { "inputs": { - "flake-utils": "flake-utils_3", - "nixpkgs": "nixpkgs_5" + "flake-utils": "flake-utils_4", + "nixpkgs": "nixpkgs_6" }, "locked": { "lastModified": 1733347147, diff --git a/flake.nix b/flake.nix index 32619e684a..5f856b271f 100644 --- a/flake.nix +++ b/flake.nix @@ -13,6 +13,10 @@ }; inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; + inputs.nixpkgs-legacy-foundry.url = "github:NixOS/nixpkgs/9abb87b552b7f55ac8916b6fc9e5cb486656a2f3"; + + inputs.foundry-nix.url = "github:shazow/foundry.nix/monthly"; # Use monthly branch for permanent releases + inputs.rust-overlay.url = "github:oxalica/rust-overlay"; inputs.nixpkgs-cross-overlay.url = @@ -29,6 +33,8 @@ outputs = { self , nixpkgs + , nixpkgs-legacy-foundry + , foundry-nix , rust-overlay , nixpkgs-cross-overlay , flake-utils @@ -61,6 +67,7 @@ overlays = [ (import rust-overlay) + foundry-nix.overlay solc-bin.overlays.default (final: prev: { solhint = @@ -186,7 +193,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 # match ubuntu 24.04 that we use on CI and as base image in docker + openssl curl protobuf # to compile libp2p-autonat stableToolchain @@ -216,25 +223,7 @@ coreutils # Ethereum contracts, solidity, ... - # TODO: remove alloy patch when forge includes this fix: https://github.com/alloy-rs/core/pull/864 - # foundry - (foundry.overrideAttrs { - # Set the resolve limit to 128 by replacing the value in the vendored dependencies. - postPatch = '' - pushd $cargoDepsCopy/alloy-sol-macro-expander - - oldHash=$(sha256sum src/expand/mod.rs | cut -d " " -f 1) - - substituteInPlace src/expand/mod.rs \ - --replace-warn \ - 'const RESOLVE_LIMIT: usize = 32;' 'const RESOLVE_LIMIT: usize = 128;' - - substituteInPlace .cargo-checksum.json \ - --replace-warn $oldHash $(sha256sum src/expand/mod.rs | cut -d " " -f 1) - - popd - ''; - }) + foundry-bin solc nodePackages.prettier solhint @@ -261,6 +250,21 @@ RUST_SRC_PATH = "${stableToolchain}/lib/rustlib/src/rust/library"; FOUNDRY_SOLC = "${solc}/bin/solc"; }); + # A shell with foundry v0.3.0 which can still build ethers-rs bindings. + # Can be removed when we are no longer using the ethers-rs bindings. + devShells.legacyFoundry = + let + overlays = [ + solc-bin.overlays.default + ]; + pkgs = import nixpkgs-legacy-foundry { inherit system overlays; }; + in + mkShell { + packages = with pkgs; [ + solc + foundry + ]; + }; devShells.crossShell = crossShell { config = "x86_64-unknown-linux-musl"; }; devShells.armCrossShell = @@ -275,7 +279,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 + openssl curl protobuf # to compile libp2p-autonat toolchain @@ -289,7 +293,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 + openssl curl protobuf # to compile libp2p-autonat toolchain @@ -312,7 +316,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 + openssl curl protobuf # to compile libp2p-autonat stableToolchain diff --git a/foundry.toml b/foundry.toml index e80e9a402e..9d3bfc1697 100644 --- a/foundry.toml +++ b/foundry.toml @@ -22,6 +22,9 @@ extra_output = ["storageLayout"] fs_permissions = [{ access = "read-write", path = "./contracts/script/"}, { access = "read-write", path = "contracts/out"}] ignored_warnings_from = ['contracts/lib'] +# Without the optimizer we hit stack too deep errors. +optimizer = true + # See more config options https://github.com/foundry-rs/foundry/tree/master/config [rpc_endpoints] diff --git a/justfile b/justfile index 739b4b544f..b53f445648 100644 --- a/justfile +++ b/justfile @@ -141,7 +141,7 @@ gen-bindings: git submodule update --init --recursive # Generate the ethers bindings - forge bind --contracts ./contracts/src/ --ethers --crate-name contract-bindings-ethers --bindings-path contract-bindings-ethers --select "{{REGEXP}}" --overwrite --force + nix develop .#legacyFoundry -c forge bind --contracts ./contracts/src/ --ethers --crate-name contract-bindings-ethers --bindings-path contract-bindings-ethers --select "{{REGEXP}}" --overwrite --force # Foundry doesn't include bytecode in the bindings for LightClient.sol, since it links with # libraries. However, this bytecode is still needed to link and deploy the contract. Copy it to From 01b5c08cfb69057e89b49a358f5d978f9b83cbd0 Mon Sep 17 00:00:00 2001 From: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Date: Fri, 7 Mar 2025 23:13:40 +0500 Subject: [PATCH 13/17] fix process compose logs (#2544) * Fix process compose logs on CI * CI: maybe fix process compose logs Remove -D detach when running process compose. Currently we only get the process compose internal logs which aren't useful. We would like to have the logs from the native demo. --------- Co-authored-by: Mathis --- .github/workflows/test.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 394b9fcc43..caebddf4d9 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -212,10 +212,10 @@ jobs: version: [02,99] include: - version: 02 - compose: "-f process-compose.yaml -D" + compose: "-f process-compose.yaml" - version: 99 - compose: "-f process-compose.yaml -f process-compose-mp.yml -D" + compose: "-f process-compose.yaml -f process-compose-mp.yml" fail-fast: false runs-on: ubuntu-latest steps: @@ -255,6 +255,11 @@ jobs: --workspace-remap $PWD $(if [ "${{ matrix.version }}" == "2" ]; then echo " smoke"; fi) timeout-minutes: 10 + - name: Show end of logs + if: always() + run: | + tail -n 1000 ${{ env.PC_LOGS }} + - name: Upload process compose logs if: always() uses: actions/upload-artifact@v4 @@ -262,6 +267,7 @@ jobs: name: process-compose-logs-integration-v${{ matrix.version }} path: ${{ env.PC_LOGS }} + demo-native: needs: build-test-bins runs-on: ubuntu-latest From 0e574eb6902348d18de860bad946d15fd9cd9cf4 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 7 Mar 2025 16:22:39 -0500 Subject: [PATCH 14/17] Fix decide logic in response to audit feedback (#2728) --- hotshot-task-impls/src/helpers.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/hotshot-task-impls/src/helpers.rs b/hotshot-task-impls/src/helpers.rs index 5fc78869db..5524f1b25d 100644 --- a/hotshot-task-impls/src/helpers.rs +++ b/hotshot-task-impls/src/helpers.rs @@ -331,10 +331,8 @@ pub async fn decide_from_proposal_2( let epoch_height = consensus_reader.epoch_height; drop(consensus_reader); - if let Some(decided_leaf_info) = res.leaf_views.last() { + for decided_leaf_info in &res.leaf_views { decide_epoch_root(&decided_leaf_info.leaf, epoch_height, membership).await; - } else { - tracing::info!("No decided leaf while a view has been decided."); } } @@ -489,10 +487,8 @@ pub async fn decide_from_proposal( drop(consensus_reader); if with_epochs && res.new_decided_view_number.is_some() { - if let Some(decided_leaf_info) = res.leaf_views.last() { + for decided_leaf_info in &res.leaf_views { decide_epoch_root(&decided_leaf_info.leaf, epoch_height, membership).await; - } else { - tracing::info!("No decided leaf while a view has been decided."); } } From 9f8ea8c17963573e2684dd421967148d3be3cf91 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Sat, 8 Mar 2025 13:19:58 -0800 Subject: [PATCH 15/17] Store epochs and drbs from HotShot into Storage (#2734) store epochs and drbs --- hotshot-example-types/src/storage_types.rs | 25 ++++ hotshot-task-impls/src/helpers.rs | 39 +++++- .../src/quorum_vote/handlers.rs | 35 ++++- hotshot-testing/tests/tests_6/test_epochs.rs | 14 +- hotshot-types/src/traits/storage.rs | 9 ++ hotshot/src/lib.rs | 2 +- .../postgres/V502__epoch_drb_and_root.sql | 5 + .../sqlite/V302__epoch_drb_and_root.sql | 5 + sequencer/src/persistence.rs | 47 +++++++ sequencer/src/persistence/fs.rs | 123 +++++++++++++++++- sequencer/src/persistence/no_storage.rs | 24 +++- sequencer/src/persistence/sql.rs | 82 +++++++++++- types/src/v0/traits.rs | 53 +++++++- 13 files changed, 435 insertions(+), 28 deletions(-) create mode 100644 sequencer/api/migrations/postgres/V502__epoch_drb_and_root.sql create mode 100644 sequencer/api/migrations/sqlite/V302__epoch_drb_and_root.sql diff --git a/hotshot-example-types/src/storage_types.rs b/hotshot-example-types/src/storage_types.rs index 3d7885973d..9084eb463d 100644 --- a/hotshot-example-types/src/storage_types.rs +++ b/hotshot-example-types/src/storage_types.rs @@ -12,6 +12,7 @@ use std::{ use anyhow::{bail, Result}; use async_lock::RwLock; use async_trait::async_trait; +use hotshot_types::drb::DrbResult; use hotshot_types::{ consensus::CommitmentMap, data::{ @@ -56,6 +57,8 @@ pub struct TestStorageState { Option>, action: TYPES::View, epoch: Option, + drb_results: BTreeMap, + epoch_roots: BTreeMap, } impl Default for TestStorageState { @@ -73,6 +76,8 @@ impl Default for TestStorageState { high_qc2: None, action: TYPES::View::genesis(), epoch: None, + drb_results: BTreeMap::new(), + epoch_roots: BTreeMap::new(), } } } @@ -373,4 +378,24 @@ impl Storage for TestStorage { Ok(()) } + + async fn add_drb_result(&self, epoch: TYPES::Epoch, drb_result: DrbResult) -> Result<()> { + let mut inner = self.inner.write().await; + + inner.drb_results.insert(epoch, drb_result); + + Ok(()) + } + + async fn add_epoch_root( + &self, + epoch: TYPES::Epoch, + block_header: TYPES::BlockHeader, + ) -> Result<()> { + let mut inner = self.inner.write().await; + + inner.epoch_roots.insert(epoch, block_header); + + Ok(()) + } } diff --git a/hotshot-task-impls/src/helpers.rs b/hotshot-task-impls/src/helpers.rs index 5524f1b25d..61f40340ac 100644 --- a/hotshot-task-impls/src/helpers.rs +++ b/hotshot-task-impls/src/helpers.rs @@ -9,6 +9,7 @@ use async_lock::RwLock; use committable::{Commitment, Committable}; use either::Either; use hotshot_task::dependency::{Dependency, EventDependency}; +use hotshot_types::traits::storage::Storage; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposalWrapper, ViewChangeEvidence2}, @@ -180,10 +181,11 @@ pub(crate) async fn fetch_proposal( } /// Handles calling add_epoch_root and sync_l1 on Membership if necessary. -async fn decide_epoch_root( +async fn decide_epoch_root>( decided_leaf: &Leaf2, epoch_height: u64, membership: &Arc>, + storage: &Arc>, ) { let decided_block_number = decided_leaf.block_header().block_number(); @@ -192,6 +194,19 @@ async fn decide_epoch_root( let next_epoch_number = TYPES::Epoch::new(epoch_from_block_number(decided_block_number, epoch_height) + 2); + if let Err(e) = storage + .write() + .await + .add_epoch_root(next_epoch_number, decided_leaf.block_header().clone()) + .await + { + tracing::error!( + "Failed to store epoch root for epoch {:?}: {}", + next_epoch_number, + e + ); + } + let write_callback = { tracing::debug!("Calling add_epoch_root for epoch {:?}", next_epoch_number); let membership_reader = membership.read().await; @@ -251,13 +266,14 @@ impl Default for LeafChainTraversalOutcome { /// # Panics /// If the leaf chain contains no decided leaf while reaching a decided view, which should be /// impossible. -pub async fn decide_from_proposal_2( +pub async fn decide_from_proposal_2>( proposal: &QuorumProposalWrapper, consensus: OuterConsensus, existing_upgrade_cert: Arc>>>, public_key: &TYPES::SignatureKey, with_epochs: bool, membership: &Arc>, + storage: &Arc>, ) -> LeafChainTraversalOutcome { let mut res = LeafChainTraversalOutcome::default(); let consensus_reader = consensus.read().await; @@ -332,7 +348,13 @@ pub async fn decide_from_proposal_2( drop(consensus_reader); for decided_leaf_info in &res.leaf_views { - decide_epoch_root(&decided_leaf_info.leaf, epoch_height, membership).await; + decide_epoch_root::( + &decided_leaf_info.leaf, + epoch_height, + membership, + storage, + ) + .await; } } @@ -370,13 +392,14 @@ pub async fn decide_from_proposal_2( /// # Panics /// If the leaf chain contains no decided leaf while reaching a decided view, which should be /// impossible. -pub async fn decide_from_proposal( +pub async fn decide_from_proposal, V: Versions>( proposal: &QuorumProposalWrapper, consensus: OuterConsensus, existing_upgrade_cert: Arc>>>, public_key: &TYPES::SignatureKey, with_epochs: bool, membership: &Arc>, + storage: &Arc>, ) -> LeafChainTraversalOutcome { let consensus_reader = consensus.read().await; let existing_upgrade_cert_reader = existing_upgrade_cert.read().await; @@ -488,7 +511,13 @@ pub async fn decide_from_proposal( if with_epochs && res.new_decided_view_number.is_some() { for decided_leaf_info in &res.leaf_views { - decide_epoch_root(&decided_leaf_info.leaf, epoch_height, membership).await; + decide_epoch_root::( + &decided_leaf_info.leaf, + epoch_height, + membership, + storage, + ) + .await; } } diff --git a/hotshot-task-impls/src/quorum_vote/handlers.rs b/hotshot-task-impls/src/quorum_vote/handlers.rs index 19a926a0fd..7fd2a190f6 100644 --- a/hotshot-task-impls/src/quorum_vote/handlers.rs +++ b/hotshot-task-impls/src/quorum_vote/handlers.rs @@ -48,11 +48,25 @@ use crate::{ quorum_vote::Versions, }; -async fn notify_membership_of_drb_result( +async fn handle_drb_result>( membership: &EpochMembership, + storage: &Arc>, drb_result: DrbResult, ) { tracing::debug!("Calling add_drb_result for epoch {:?}", membership.epoch()); + + // membership.epoch should always be Some + if let Some(epoch) = membership.epoch() { + if let Err(e) = storage + .write() + .await + .add_drb_result(epoch, drb_result) + .await + { + tracing::error!("Failed to store drb result for epoch {:?}: {}", epoch, e); + } + } + membership.add_drb_result(drb_result).await; } @@ -101,11 +115,12 @@ async fn store_and_get_computed_drb_result< .insert(epoch_number, result); drop(consensus_writer); - notify_membership_of_drb_result::( + handle_drb_result::( &task_state .membership .membership_for_epoch(Some(epoch_number)) .await?, + &task_state.storage, result, ) .await; @@ -221,7 +236,12 @@ async fn start_drb_task, V: Versio .drb_seeds_and_results .results .insert(*task_epoch, result); - notify_membership_of_drb_result::(&epoch_membership, result).await; + handle_drb_result::( + &epoch_membership, + &task_state.storage, + result, + ) + .await; task_state.drb_computation = None; } Err(e) => { @@ -335,11 +355,12 @@ async fn store_drb_seed_and_result .drb_seeds_and_results .results .insert(current_epoch_number + 1, result); - notify_membership_of_drb_result::( + handle_drb_result::( &task_state .membership .membership_for_epoch(Some(current_epoch_number + 1)) .await?, + &task_state.storage, result, ) .await; @@ -379,23 +400,25 @@ pub(crate) async fn handle_quorum_proposal_validated< included_txns, decided_upgrade_cert, } = if version >= V::Epochs::VERSION { - decide_from_proposal_2( + decide_from_proposal_2::( proposal, OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), &task_state.public_key, version >= V::Epochs::VERSION, task_state.membership.membership(), + &task_state.storage, ) .await } else { - decide_from_proposal::( + decide_from_proposal::( proposal, OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), &task_state.public_key, version >= V::Epochs::VERSION, task_state.membership.membership(), + &task_state.storage, ) .await }; diff --git a/hotshot-testing/tests/tests_6/test_epochs.rs b/hotshot-testing/tests/tests_6/test_epochs.rs index bed8d185ac..cd4031b534 100644 --- a/hotshot-testing/tests/tests_6/test_epochs.rs +++ b/hotshot-testing/tests/tests_6/test_epochs.rs @@ -8,8 +8,8 @@ use hotshot_example_types::{ node_types::{ CombinedImpl, EpochUpgradeTestVersions, EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, RandomOverlapQuorumFilterConfig, StableQuorumFilterConfig, - TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, - TestTypesRandomizedCommitteeMembers, TestTypesRandomizedLeader, TestTypesEpochCatchupTypes + TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, TestTypesEpochCatchupTypes, + TestTypesRandomizedCommitteeMembers, TestTypesRandomizedLeader, }, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; @@ -505,23 +505,23 @@ cross_tests!( // }; // let mut metadata = TestDescription::default().set_num_nodes(20,20); // let mut catchup_nodes = vec![]; -// +// // for i in 0..20 { // catchup_nodes.push(ChangeNode { // idx: i, // updown: NodeAction::RestartDown(0), // }) // } -// +// // metadata.timing_data = timing_data; -// +// // metadata.spinning_properties = SpinningTaskDescription { // // Restart all the nodes in view 10 // node_changes: vec![(10, catchup_nodes)], // }; // metadata.view_sync_properties = // hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); -// +// // metadata.completion_task_description = // CompletionTaskDescription::TimeBasedCompletionTaskBuilder( // TimeBasedCompletionTaskDescription { @@ -536,7 +536,7 @@ cross_tests!( // decide_timeout: Duration::from_secs(20), // ..Default::default() // }; -// +// // metadata // }, // ); diff --git a/hotshot-types/src/traits/storage.rs b/hotshot-types/src/traits/storage.rs index 991ba8282f..c2ac50fcf2 100644 --- a/hotshot-types/src/traits/storage.rs +++ b/hotshot-types/src/traits/storage.rs @@ -24,6 +24,7 @@ use crate::{ DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, VidDisperseShare, }, + drb::DrbResult, event::HotShotAction, message::{convert_proposal, Proposal}, simple_certificate::{ @@ -158,4 +159,12 @@ pub trait Storage: Send + Sync + Clone { async fn migrate_consensus(&self) -> Result<()> { Ok(()) } + /// Add a drb result + async fn add_drb_result(&self, epoch: TYPES::Epoch, drb_result: DrbResult) -> Result<()>; + /// Add an epoch block header + async fn add_epoch_root( + &self, + epoch: TYPES::Epoch, + block_header: TYPES::BlockHeader, + ) -> Result<()>; } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e1ffbae935..da4afdcd4c 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -993,7 +993,7 @@ impl, V: Versions> ConsensusApi { pub epoch: TYPES::Epoch, pub drb_result: DrbResult, diff --git a/sequencer/api/migrations/postgres/V502__epoch_drb_and_root.sql b/sequencer/api/migrations/postgres/V502__epoch_drb_and_root.sql new file mode 100644 index 0000000000..6079c41482 --- /dev/null +++ b/sequencer/api/migrations/postgres/V502__epoch_drb_and_root.sql @@ -0,0 +1,5 @@ +CREATE TABLE epoch_drb_and_root ( + epoch BIGINT PRIMARY KEY, + drb_result BYTEA, + block_header BYTEA +); \ No newline at end of file diff --git a/sequencer/api/migrations/sqlite/V302__epoch_drb_and_root.sql b/sequencer/api/migrations/sqlite/V302__epoch_drb_and_root.sql new file mode 100644 index 0000000000..09ab017e37 --- /dev/null +++ b/sequencer/api/migrations/sqlite/V302__epoch_drb_and_root.sql @@ -0,0 +1,5 @@ +CREATE TABLE epoch_drb_and_root ( + epoch BIGINT PRIMARY KEY, + drb_result BLOB, + block_header BLOB +); \ No newline at end of file diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 8dff3c7448..7ef5a3c7b4 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -54,6 +54,7 @@ mod persistence_tests { Event, Leaf, Leaf2, NodeState, PubKey, SeqTypes, ValidatedState, }; use hotshot::types::{BLSPubKey, SignatureKey}; + use hotshot::InitializerEpochInfo; use hotshot_example_types::node_types::TestVersions; use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::{ @@ -156,6 +157,52 @@ mod persistence_tests { ); } + #[tokio::test(flavor = "multi_thread")] + pub async fn test_epoch_info() { + setup_test(); + + let tmp = P::tmp_storage().await; + let storage = P::connect(&tmp).await; + + // Initially, there is no saved info. + assert_eq!(storage.load_start_epoch_info().await.unwrap(), Vec::new()); + + // Store a drb result. + storage + .add_drb_result(EpochNumber::new(1), [1; 32]) + .await + .unwrap(); + assert_eq!( + storage.load_start_epoch_info().await.unwrap(), + vec![InitializerEpochInfo:: { + epoch: EpochNumber::new(1), + drb_result: [1; 32], + block_header: None, + }] + ); + + // Store a second DRB result + storage + .add_drb_result(EpochNumber::new(2), [3; 32]) + .await + .unwrap(); + assert_eq!( + storage.load_start_epoch_info().await.unwrap(), + vec![ + InitializerEpochInfo:: { + epoch: EpochNumber::new(1), + drb_result: [1; 32], + block_header: None, + }, + InitializerEpochInfo:: { + epoch: EpochNumber::new(2), + drb_result: [3; 32], + block_header: None, + } + ] + ); + } + fn leaf_info(leaf: Leaf2) -> LeafInfo { LeafInfo { leaf, diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index 853142fa9a..9ee7110d9a 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -6,6 +6,7 @@ use espresso_types::{ v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence}, Leaf, Leaf2, NetworkConfig, Payload, SeqTypes, }; +use hotshot::InitializerEpochInfo; use hotshot_types::{ consensus::CommitmentMap, data::{ @@ -13,6 +14,7 @@ use hotshot_types::{ DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, VidCommitment, VidDisperseShare, }, + drb::DrbResult, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, simple_certificate::{ @@ -20,7 +22,7 @@ use hotshot_types::{ }, traits::{ block_contents::{BlockHeader, BlockPayload}, - node_implementation::ConsensusTime, + node_implementation::{ConsensusTime, NodeType}, }, utils::View, vote::HasViewNumber, @@ -209,6 +211,14 @@ impl Inner { self.path.join("next_epoch_quorum_certificate") } + fn epoch_drb_result_dir_path(&self) -> PathBuf { + self.path.join("epoch_drb_result") + } + + fn epoch_root_block_header_dir_path(&self) -> PathBuf { + self.path.join("epoch_root_block_header") + } + fn update_migration(&mut self) -> anyhow::Result<()> { let path = self.migration(); let bytes = bincode::serialize(&self.migrated)?; @@ -1268,6 +1278,91 @@ impl SequencerPersistence for Persistence { async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { Ok(()) } + + async fn add_drb_result( + &self, + epoch: EpochNumber, + drb_result: DrbResult, + ) -> anyhow::Result<()> { + let inner = self.inner.write().await; + let dir_path = inner.epoch_drb_result_dir_path(); + + fs::create_dir_all(dir_path.clone()).context("failed to create epoch drb result dir")?; + + let drb_result_bytes = bincode::serialize(&drb_result).context("serialize drb result")?; + + let file_path = dir_path.join(epoch.to_string()).with_extension("txt"); + fs::write(file_path, drb_result_bytes) + .context(format!("writing epoch drb result file for epoch {epoch:?}"))?; + + Ok(()) + } + + async fn add_epoch_root( + &self, + epoch: EpochNumber, + block_header: ::BlockHeader, + ) -> anyhow::Result<()> { + let inner = self.inner.write().await; + let dir_path = inner.epoch_root_block_header_dir_path(); + + fs::create_dir_all(dir_path.clone()) + .context("failed to create epoch root block header dir")?; + + let block_header_bytes = + bincode::serialize(&block_header).context("serialize block header")?; + + let file_path = dir_path.join(epoch.to_string()).with_extension("txt"); + fs::write(file_path, block_header_bytes).context(format!( + "writing epoch root block header file for epoch {epoch:?}" + ))?; + + Ok(()) + } + + async fn load_start_epoch_info(&self) -> anyhow::Result>> { + let inner = self.inner.read().await; + let drb_dir_path = inner.epoch_drb_result_dir_path(); + let block_header_dir_path = inner.epoch_root_block_header_dir_path(); + + let mut result = Vec::new(); + + if drb_dir_path.is_dir() { + for (epoch, path) in epoch_files(drb_dir_path)? { + let bytes = fs::read(&path) + .context(format!("reading epoch drb result {}", path.display()))?; + let drb_result = bincode::deserialize::(&bytes) + .context(format!("parsing epoch drb result {}", path.display()))?; + + let block_header_path = block_header_dir_path + .join(epoch.to_string()) + .with_extension("txt"); + let block_header = if block_header_path.is_file() { + let bytes = fs::read(&path).context(format!( + "reading epoch root block header {}", + path.display() + ))?; + Some( + bincode::deserialize::<::BlockHeader>(&bytes) + .context(format!( + "parsing epoch root block header {}", + path.display() + ))?, + ) + } else { + None + }; + + result.push(InitializerEpochInfo:: { + epoch, + drb_result, + block_header, + }); + } + } + + Ok(result) + } } /// Update a `NetworkConfig` that may have originally been persisted with an old version. @@ -1355,6 +1450,32 @@ fn view_files( })) } +/// Get all paths under `dir` whose name is of the form .txt. +/// Should probably be made generic and merged with view_files. +fn epoch_files( + dir: impl AsRef, +) -> anyhow::Result> { + Ok(fs::read_dir(dir.as_ref())?.filter_map(move |entry| { + let dir = dir.as_ref().display(); + let entry = entry.ok()?; + if !entry.file_type().ok()?.is_file() { + tracing::debug!(%dir, ?entry, "ignoring non-file in data directory"); + return None; + } + let path = entry.path(); + if path.extension()? != "txt" { + tracing::debug!(%dir, ?entry, "ignoring non-text file in data directory"); + return None; + } + let file_name = path.file_stem()?; + let Ok(epoch_number) = file_name.to_string_lossy().parse::() else { + tracing::debug!(%dir, ?file_name, "ignoring extraneous file in data directory"); + return None; + }; + Some((EpochNumber::new(epoch_number), entry.path().to_owned())) + })) +} + #[cfg(test)] mod testing { use tempfile::TempDir; diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index c47701a3ea..c35d9e6266 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -7,6 +7,7 @@ use espresso_types::{ v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence}, Leaf2, NetworkConfig, }; +use hotshot::InitializerEpochInfo; use hotshot_types::{ consensus::CommitmentMap, data::{ @@ -14,6 +15,7 @@ use hotshot_types::{ DaProposal, DaProposal2, EpochNumber, QuorumProposalWrapper, VidCommitment, VidDisperseShare, }, + drb::DrbResult, event::{Event, EventType, HotShotAction, LeafInfo}, message::Proposal, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, @@ -22,7 +24,7 @@ use hotshot_types::{ use std::collections::BTreeMap; use std::sync::Arc; -use crate::{SeqTypes, ViewNumber}; +use crate::{NodeType, SeqTypes, ViewNumber}; #[derive(Clone, Copy, Debug)] pub struct Options; @@ -221,4 +223,24 @@ impl SequencerPersistence for NoStorage { async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { Ok(()) } + + async fn add_drb_result( + &self, + _epoch: EpochNumber, + _drb_result: DrbResult, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn add_epoch_root( + &self, + _epoch: EpochNumber, + _block_header: ::BlockHeader, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn load_start_epoch_info(&self) -> anyhow::Result>> { + Ok(Vec::new()) + } } diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index e2383a81ca..59d834caaf 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -1,4 +1,5 @@ -use anyhow::Context; +use crate::{catchup::SqlStateCatchup, NodeType, SeqTypes, ViewNumber}; +use anyhow::{bail, Context}; use async_trait::async_trait; use clap::Parser; use committable::Committable; @@ -10,6 +11,7 @@ use espresso_types::{ BackoffParams, BlockMerkleTree, FeeMerkleTree, Leaf, Leaf2, NetworkConfig, Payload, }; use futures::stream::StreamExt; +use hotshot::InitializerEpochInfo; use hotshot_query_service::{ availability::LeafQueryData, data_source::{ @@ -29,6 +31,7 @@ use hotshot_query_service::{ merklized_state::MerklizedState, VidCommon, }; +use hotshot_types::drb::DrbResult; use hotshot_types::{ consensus::CommitmentMap, data::{ @@ -53,8 +56,6 @@ use sqlx::Row; use sqlx::{query, Executor}; use std::{collections::BTreeMap, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; -use crate::{catchup::SqlStateCatchup, SeqTypes, ViewNumber}; - /// Options for Postgres-backed persistence. #[derive(Parser, Clone, Derivative)] #[derivative(Debug)] @@ -1873,6 +1874,81 @@ impl SequencerPersistence for Persistence { .await?; tx.commit().await } + + async fn add_drb_result( + &self, + epoch: EpochNumber, + drb_result: DrbResult, + ) -> anyhow::Result<()> { + let drb_result_vec = Vec::from(drb_result); + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_drb_and_root", + ["epoch", "drb_result"], + ["epoch"], + [(epoch.u64() as i64, drb_result_vec)], + ) + .await?; + tx.commit().await + } + + async fn add_epoch_root( + &self, + epoch: EpochNumber, + block_header: ::BlockHeader, + ) -> anyhow::Result<()> { + let block_header_bytes = + bincode::serialize(&block_header).context("serializing block header")?; + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_drb_and_root", + ["epoch", "block_header"], + ["epoch"], + [(epoch.u64() as i64, block_header_bytes)], + ) + .await?; + tx.commit().await + } + + async fn load_start_epoch_info(&self) -> anyhow::Result>> { + let rows = self + .db + .read() + .await? + .fetch_all("SELECT * from epoch_drb_and_root ORDER BY epoch ASC") + .await?; + + rows.into_iter() + .map(|row| { + let epoch: i64 = row.get("epoch"); + let drb_result: Option> = row.get("drb_result"); + let block_header: Option> = row.get("block_header"); + if let Some(drb_result) = drb_result { + let drb_result_array = drb_result + .try_into() + .or_else(|_| bail!("invalid drb result"))?; + let block_header: Option<::BlockHeader> = block_header + .map(|data| bincode::deserialize(&data)) + .transpose()?; + Ok(Some(InitializerEpochInfo:: { + epoch: ::Epoch::new(epoch as u64), + drb_result: drb_result_array, + block_header, + })) + } else { + // Right now we skip the epoch_drb_and_root row if there is no drb result. + // This seems reasonable based on the expected order of events, but please double check! + Ok(None) + } + }) + .filter_map(|e| match e { + Err(v) => Some(Err(v)), + Ok(Some(v)) => Some(Ok(v)), + Ok(None) => None, + }) + .collect() + } } #[async_trait] diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index 610fc19e3f..4f628eeb63 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -6,7 +6,9 @@ use anyhow::{bail, ensure, Context}; use async_trait::async_trait; use committable::{Commitment, Committable}; use futures::{FutureExt, TryFutureExt}; -use hotshot::{types::EventType, HotShotInitializer}; +use hotshot::{types::EventType, HotShotInitializer, InitializerEpochInfo}; +use hotshot_types::drb::DrbResult; +use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ consensus::CommitmentMap, data::{ @@ -538,6 +540,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { async fn load_upgrade_certificate( &self, ) -> anyhow::Result>>; + async fn load_start_epoch_info(&self) -> anyhow::Result>>; /// Load the latest known consensus state. /// @@ -615,6 +618,16 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { // TODO: let epoch = genesis_epoch_from_version::(); + let config = self.load_config().await.context("loading config")?; + let epoch_height = config + .as_ref() + .map(|c| c.config.epoch_height) + .unwrap_or_default(); + let epoch_start_block = config + .as_ref() + .map(|c| c.config.epoch_start_block) + .unwrap_or_default(); + let (undecided_leaves, undecided_state) = self .load_undecided_state() .await @@ -631,6 +644,11 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { .await .context("loading upgrade certificate")?; + let start_epoch_info = self + .load_start_epoch_info() + .await + .context("loading start epoch info")?; + tracing::info!( ?leaf, ?view, @@ -647,8 +665,8 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { Ok(( HotShotInitializer { instance_state: state, - epoch_height: 0, - epoch_start_block: 0, + epoch_height, + epoch_start_block, anchor_leaf: leaf, anchor_state: validated_state.unwrap_or_default(), anchor_state_delta: None, @@ -665,7 +683,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { .collect(), undecided_state, saved_vid_shares: Default::default(), // TODO: implement saved_vid_shares - start_epoch_info: Default::default(), // TODO: implement start_epoch_info + start_epoch_info, }, anchor_view, )) @@ -819,6 +837,17 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { ) -> anyhow::Result<()> { self.append_quorum_proposal2(proposal).await } + + async fn add_drb_result( + &self, + epoch: ::Epoch, + drb_result: DrbResult, + ) -> anyhow::Result<()>; + async fn add_epoch_root( + &self, + epoch: ::Epoch, + block_header: ::BlockHeader, + ) -> anyhow::Result<()>; } #[async_trait] @@ -947,6 +976,22 @@ impl Storage for Arc

{ ) -> anyhow::Result<()> { (**self).update_undecided_state2(leaves, state).await } + + async fn add_drb_result( + &self, + epoch: ::Epoch, + drb_result: DrbResult, + ) -> anyhow::Result<()> { + (**self).add_drb_result(epoch, drb_result).await + } + + async fn add_epoch_root( + &self, + epoch: ::Epoch, + block_header: ::BlockHeader, + ) -> anyhow::Result<()> { + (**self).add_epoch_root(epoch, block_header).await + } } /// Data that can be deserialized from a subslice of namespace payload bytes. From 984bcf51735fc4cfcf1cf520218ebd7186a757df Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Sat, 8 Mar 2025 14:50:46 -0800 Subject: [PATCH 16/17] Pull in nightly rust-fmt, use vid rustfmt.toml (#2700) Pull in nightly rust-fmt, use vid rustfmt --- builder/src/bin/permissionless-builder.rs | 4 +- builder/src/lib.rs | 21 +- builder/src/non_permissioned.rs | 6 +- client/src/lib.rs | 5 +- contract-bindings-alloy/src/erc1967proxy.rs | 41 +-- contract-bindings-alloy/src/feecontract.rs | 255 ++++++------- contract-bindings-alloy/src/iplonkverifier.rs | 20 +- contract-bindings-alloy/src/lightclient.rs | 305 ++++++++-------- .../src/lightclientarbitrum.rs | 306 ++++++++-------- .../src/lightclientmock.rs | 340 +++++++++--------- .../src/permissionedstaketable.rs | 151 ++++---- contract-bindings-alloy/src/plonkverifier.rs | 45 +-- contract-bindings-alloy/src/plonkverifier2.rs | 47 +-- contract-bindings-ethers/src/erc1967_proxy.rs | 12 +- contract-bindings-ethers/src/fee_contract.rs | 52 +-- contract-bindings-ethers/src/light_client.rs | 64 ++-- .../src/light_client_arbitrum.rs | 64 ++-- .../src/light_client_mock.rs | 66 ++-- .../src/permissioned_stake_table.rs | 22 +- .../src/plonk_verifier.rs | 4 +- contracts/rust/adapter/src/jellyfish.rs | 10 +- contracts/rust/adapter/src/light_client.rs | 4 +- contracts/rust/adapter/src/stake_table.rs | 6 +- contracts/rust/diff-test/src/main.rs | 62 ++-- contracts/rust/gen-vk-contract/src/main.rs | 3 +- flake.nix | 58 ++- hotshot-builder-api/src/api.rs | 2 +- .../src/block_size_limits.rs | 3 +- .../src/block_store.rs | 13 +- .../src/service.rs | 89 ++--- .../src/testing/basic.rs | 46 ++- .../src/testing/block_size.rs | 33 +- .../src/testing/finalization.rs | 19 +- .../src/testing/integration.rs | 17 +- .../src/testing/mod.rs | 36 +- hotshot-builder-core/src/builder_state.rs | 61 ++-- hotshot-builder-core/src/service.rs | 320 ++++++++--------- .../src/testing/basic_test.rs | 63 ++-- .../src/testing/finalization_test.rs | 34 +- hotshot-builder-core/src/testing/mod.rs | 45 ++- hotshot-events-service/src/api.rs | 2 +- hotshot-events-service/src/events.rs | 3 +- hotshot-events-service/src/events_source.rs | 2 +- hotshot-example-types/src/block_types.rs | 3 +- hotshot-example-types/src/storage_types.rs | 2 +- hotshot-example-types/src/testable_delay.rs | 8 +- hotshot-examples/infra/mod.rs | 22 +- hotshot-fakeapi/src/fake_solver.rs | 4 +- .../src/network/behaviours/dht/bootstrap.rs | 16 +- .../src/network/behaviours/dht/mod.rs | 40 +-- .../behaviours/dht/store/persistent.rs | 8 +- .../src/network/behaviours/direct_message.rs | 10 +- hotshot-libp2p-networking/src/network/cbor.rs | 10 +- hotshot-libp2p-networking/src/network/node.rs | 80 ++--- .../src/network/transport.rs | 4 +- hotshot-macros/src/lib.rs | 8 +- hotshot-orchestrator/src/client.rs | 2 +- .../examples/simple-server.rs | 3 +- hotshot-query-service/src/api.rs | 6 +- hotshot-query-service/src/availability.rs | 47 ++- .../src/availability/data_source.rs | 24 +- .../src/availability/fetch.rs | 3 +- .../src/availability/query_data.rs | 6 +- hotshot-query-service/src/data_source.rs | 74 ++-- .../src/data_source/extension.rs | 19 +- .../src/data_source/fetching.rs | 113 +++--- .../src/data_source/fetching/block.rs | 14 +- .../src/data_source/fetching/header.rs | 43 +-- .../src/data_source/fetching/leaf.rs | 41 +-- .../src/data_source/fetching/transaction.rs | 12 +- .../src/data_source/fetching/vid.rs | 21 +- hotshot-query-service/src/data_source/fs.rs | 21 +- .../src/data_source/metrics.rs | 10 +- .../src/data_source/notifier.rs | 20 +- hotshot-query-service/src/data_source/sql.rs | 28 +- .../src/data_source/storage.rs | 14 +- .../src/data_source/storage/fail_storage.rs | 17 +- .../src/data_source/storage/fs.rs | 55 +-- .../src/data_source/storage/ledger_log.rs | 9 +- .../src/data_source/storage/pruning.rs | 3 +- .../src/data_source/storage/sql.rs | 72 ++-- .../src/data_source/storage/sql/migrate.rs | 3 +- .../src/data_source/storage/sql/queries.rs | 43 +-- .../storage/sql/queries/availability.rs | 19 +- .../storage/sql/queries/explorer.rs | 32 +- .../data_source/storage/sql/queries/node.rs | 34 +- .../data_source/storage/sql/queries/state.rs | 47 +-- .../data_source/storage/sql/transaction.rs | 67 ++-- .../src/data_source/update.rs | 41 +-- hotshot-query-service/src/error.rs | 6 +- hotshot-query-service/src/explorer.rs | 33 +- .../src/explorer/currency.rs | 7 +- .../src/explorer/data_source.rs | 7 +- hotshot-query-service/src/explorer/errors.rs | 6 +- .../src/explorer/monetary_value.rs | 21 +- .../src/explorer/query_data.rs | 25 +- hotshot-query-service/src/explorer/traits.rs | 3 +- hotshot-query-service/src/fetching.rs | 12 +- .../src/fetching/provider.rs | 6 +- .../src/fetching/provider/any.rs | 22 +- .../src/fetching/provider/query_service.rs | 69 ++-- .../src/fetching/provider/testing.rs | 18 +- hotshot-query-service/src/fetching/request.rs | 10 +- hotshot-query-service/src/lib.rs | 39 +- hotshot-query-service/src/merklized_state.rs | 6 +- .../src/merklized_state/data_source.rs | 6 +- hotshot-query-service/src/metrics.rs | 12 +- hotshot-query-service/src/node.rs | 32 +- hotshot-query-service/src/node/data_source.rs | 8 +- hotshot-query-service/src/node/query_data.rs | 2 +- hotshot-query-service/src/status.rs | 29 +- .../src/status/data_source.rs | 8 +- hotshot-query-service/src/task.rs | 4 +- .../src/testing/consensus.rs | 27 +- hotshot-query-service/src/testing/mocks.rs | 20 +- hotshot-stake-table/src/mt_based.rs | 8 +- hotshot-stake-table/src/mt_based/internal.rs | 18 +- hotshot-stake-table/src/vec_based.rs | 4 +- hotshot-stake-table/src/vec_based/config.rs | 2 +- hotshot-state-prover/src/circuit.rs | 6 +- hotshot-state-prover/src/service.rs | 25 +- hotshot-task-impls/src/builder.rs | 4 +- hotshot-task-impls/src/consensus/mod.rs | 17 +- hotshot-task-impls/src/da.rs | 14 +- hotshot-task-impls/src/events.rs | 117 +++--- hotshot-task-impls/src/helpers.rs | 21 +- hotshot-task-impls/src/network.rs | 150 ++++---- .../src/quorum_proposal/handlers.rs | 23 +- hotshot-task-impls/src/quorum_proposal/mod.rs | 59 ++- .../src/quorum_proposal_recv/handlers.rs | 2 +- .../src/quorum_proposal_recv/mod.rs | 10 +- .../src/quorum_vote/handlers.rs | 13 +- hotshot-task-impls/src/quorum_vote/mod.rs | 32 +- hotshot-task-impls/src/request.rs | 6 +- hotshot-task-impls/src/response.rs | 12 +- hotshot-task-impls/src/rewind.rs | 2 +- hotshot-task-impls/src/transactions.rs | 45 ++- hotshot-task-impls/src/upgrade.rs | 8 +- hotshot-task-impls/src/vid.rs | 10 +- hotshot-task-impls/src/view_sync.rs | 38 +- hotshot-task-impls/src/vote_collection.rs | 12 +- hotshot-task/src/dependency.rs | 6 +- hotshot-task/src/task.rs | 6 +- hotshot-testing/src/block_builder/mod.rs | 12 +- hotshot-testing/src/block_builder/random.rs | 10 +- hotshot-testing/src/block_builder/simple.rs | 16 +- .../src/byzantine/byzantine_behaviour.rs | 20 +- hotshot-testing/src/consistency_task.rs | 12 +- hotshot-testing/src/helpers.rs | 6 +- hotshot-testing/src/predicates/event.rs | 2 +- hotshot-testing/src/script.rs | 2 +- hotshot-testing/src/spinning_task.rs | 16 +- hotshot-testing/src/test_builder.rs | 12 +- hotshot-testing/src/test_runner.rs | 26 +- hotshot-testing/src/test_task.rs | 10 +- hotshot-testing/src/txn_task.rs | 4 +- hotshot-testing/src/view_generator.rs | 2 +- hotshot-testing/src/view_sync_task.rs | 12 +- hotshot-testing/tests/tests_6/test_epochs.rs | 3 +- hotshot-types/src/consensus.rs | 2 +- hotshot-types/src/data.rs | 24 +- hotshot-types/src/data/vid_disperse.rs | 3 +- hotshot-types/src/epoch_membership.rs | 30 +- hotshot-types/src/lib.rs | 4 +- hotshot-types/src/message.rs | 68 ++-- hotshot-types/src/network.rs | 4 +- hotshot-types/src/simple_vote.rs | 3 +- hotshot-types/src/traits/storage.rs | 7 +- hotshot-types/src/utils.rs | 11 +- hotshot-types/src/vote.rs | 2 +- hotshot-utils/src/anytrace.rs | 26 +- hotshot-utils/src/anytrace/macros.rs | 12 +- hotshot/src/lib.rs | 8 +- hotshot/src/tasks/mod.rs | 12 +- .../static_committee_leader_two_views.rs | 9 +- .../traits/election/two_static_committees.rs | 11 +- .../src/traits/networking/combined_network.rs | 8 +- .../src/traits/networking/libp2p_network.rs | 54 +-- .../src/traits/networking/memory_network.rs | 10 +- .../src/traits/networking/push_cdn_network.rs | 2 +- hotshot/src/types/handle.rs | 11 +- marketplace-builder-core/src/service.rs | 48 +-- .../src/testing/basic_test.rs | 13 +- .../src/testing/integration.rs | 17 +- .../src/testing/order_test.rs | 18 +- marketplace-builder-shared/src/block.rs | 9 +- .../src/coordinator/mod.rs | 17 +- .../src/coordinator/tiered_view_map.rs | 10 +- marketplace-builder-shared/src/error.rs | 6 +- marketplace-builder-shared/src/state.rs | 15 +- .../src/testing/consensus.rs | 14 +- .../src/testing/generation.rs | 14 +- .../src/testing/mock.rs | 37 +- .../src/testing/validation.rs | 9 +- .../src/utils/event_service_wrapper.rs | 26 +- .../src/utils/rotating_set.rs | 3 +- .../src/bin/marketplace-builder.rs | 4 +- marketplace-builder/src/builder.rs | 39 +- marketplace-builder/src/hooks.rs | 50 +-- marketplace-builder/src/lib.rs | 8 +- marketplace-solver/src/api.rs | 2 +- marketplace-solver/src/database.rs | 5 +- marketplace-solver/src/events.rs | 8 +- marketplace-solver/src/testing.rs | 15 +- .../src/api/node_validator/v0/cdn/mod.rs | 57 +-- .../v0/create_node_validator_api.rs | 56 +-- node-metrics/src/api/node_validator/v0/mod.rs | 63 ++-- node-metrics/src/lib.rs | 27 +- node-metrics/src/service/client_id/mod.rs | 9 +- .../src/service/client_message/mod.rs | 14 +- node-metrics/src/service/client_state/mod.rs | 89 ++--- node-metrics/src/service/data_state/mod.rs | 34 +- .../src/service/data_state/node_identity.rs | 10 +- .../src/service/server_message/mod.rs | 3 +- request-response/src/lib.rs | 10 +- request-response/src/message.rs | 14 +- rust-toolchain.toml | 2 +- vid/rustfmt.toml => rustfmt.toml | 5 +- sequencer/src/api.rs | 64 ++-- sequencer/src/api/data_source.rs | 6 +- sequencer/src/api/endpoints.rs | 8 +- sequencer/src/api/options.rs | 3 +- sequencer/src/api/sql.rs | 13 +- sequencer/src/api/update.rs | 4 +- sequencer/src/bin/cdn-whitelist.rs | 6 +- sequencer/src/bin/dev-rollup.rs | 1 - sequencer/src/bin/espresso-bridge.rs | 5 +- sequencer/src/bin/espresso-dev-node.rs | 4 +- sequencer/src/bin/keygen.rs | 6 +- sequencer/src/bin/nasty-client.rs | 53 +-- sequencer/src/bin/pub-key.rs | 6 +- sequencer/src/bin/reset-storage.rs | 4 +- sequencer/src/bin/submit-transactions.rs | 15 +- .../bin/update-permissioned-stake-table.rs | 3 +- sequencer/src/bin/utils/keygen.rs | 6 +- sequencer/src/bin/utils/main.rs | 3 +- sequencer/src/bin/utils/pubkey.rs | 15 +- sequencer/src/bin/utils/reset_storage.rs | 9 +- sequencer/src/bin/verify-headers.rs | 9 +- sequencer/src/catchup.rs | 24 +- sequencer/src/context.rs | 8 +- sequencer/src/external_event_handler.rs | 16 +- sequencer/src/genesis.rs | 35 +- sequencer/src/lib.rs | 47 ++- sequencer/src/options.rs | 20 +- sequencer/src/persistence.rs | 19 +- sequencer/src/persistence/fs.rs | 59 ++- sequencer/src/persistence/no_storage.rs | 4 +- sequencer/src/persistence/sql.rs | 36 +- sequencer/src/proposal_fetcher.rs | 7 +- sequencer/src/request_response/data_source.rs | 3 +- sequencer/src/request_response/network.rs | 10 +- .../src/request_response/recipient_source.rs | 2 +- sequencer/src/restart_tests.rs | 27 +- sequencer/src/run.rs | 40 +-- sequencer/src/state.rs | 7 +- sequencer/src/state_signature.rs | 4 +- sequencer/src/state_signature/relay_server.rs | 6 +- tests/common/mod.rs | 3 +- tests/smoke.rs | 6 +- tests/upgrades.rs | 3 +- types/src/eth_signature_key.rs | 3 +- types/src/v0/config.rs | 14 +- types/src/v0/impls/auction.rs | 16 +- .../v0/impls/block/full_payload/ns_proof.rs | 10 +- .../impls/block/full_payload/ns_proof/test.rs | 3 +- .../v0/impls/block/full_payload/payload.rs | 7 +- .../impls/block/namespace_payload/tx_proof.rs | 8 +- types/src/v0/impls/chain_config.rs | 8 +- types/src/v0/impls/fee_info.rs | 20 +- types/src/v0/impls/header.rs | 16 +- types/src/v0/impls/instance_state.rs | 16 +- types/src/v0/impls/l1.rs | 30 +- types/src/v0/impls/mod.rs | 5 +- types/src/v0/impls/solver.rs | 6 +- types/src/v0/impls/stake_table.rs | 4 +- types/src/v0/impls/state.rs | 7 +- types/src/v0/impls/transaction.rs | 3 +- types/src/v0/mod.rs | 8 +- types/src/v0/traits.rs | 28 +- types/src/v0/utils.rs | 19 +- utils/src/deployer.rs | 9 +- utils/src/lib.rs | 20 +- utils/src/stake_table.rs | 11 +- vid/src/avid_m.rs | 15 +- vid/src/avid_m/namespaced.rs | 8 +- vid/src/utils/bytes_to_field.rs | 6 +- 287 files changed, 3677 insertions(+), 3565 deletions(-) rename vid/rustfmt.toml => rustfmt.toml (73%) diff --git a/builder/src/bin/permissionless-builder.rs b/builder/src/bin/permissionless-builder.rs index 1ff329399e..4df79fb223 100644 --- a/builder/src/bin/permissionless-builder.rs +++ b/builder/src/bin/permissionless-builder.rs @@ -122,11 +122,11 @@ async fn main() -> anyhow::Result<()> { match (base, upgrade) { (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { run::>(genesis, opt).await - } + }, (FeeVersion::VERSION, _) => run::>(genesis, opt).await, (MarketplaceVersion::VERSION, _) => { run::>(genesis, opt).await - } + }, _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." ), diff --git a/builder/src/lib.rs b/builder/src/lib.rs index 2a16ff89e5..627fcfccba 100755 --- a/builder/src/lib.rs +++ b/builder/src/lib.rs @@ -40,6 +40,12 @@ pub fn run_builder_api_service(url: Url, source: ProxyGlobalState) { #[cfg(test)] pub mod testing { + use std::{ + num::NonZeroUsize, + sync::Arc, + time::{Duration, Instant}, + }; + use async_lock::RwLock; use committable::Committable; use espresso_types::{ @@ -74,18 +80,13 @@ pub mod testing { }, HotShotConfig, PeerConfig, ValidatorConfig, }; + use jf_signature::bls_over_bn254::VerKey; use sequencer::{context::Consensus, network, SequencerApiVersion}; - use std::{ - num::NonZeroUsize, - sync::Arc, - time::{Duration, Instant}, - }; use surf_disco::Client; use vbs::version::StaticVersion; use super::*; use crate::non_permissioned::BuilderConfig; - use jf_signature::bls_over_bn254::VerKey; #[derive(Clone)] pub struct HotShotTestConfig { @@ -414,10 +415,10 @@ pub mod testing { { Ok(response) => { tracing::info!("Received txn submitted response : {:?}", response); - } + }, Err(e) => { panic!("Error submitting private transaction {:?}", e); - } + }, } let seed = [207_u8; 32]; @@ -514,10 +515,10 @@ pub mod testing { Ok(response) => { tracing::info!("Received Builder Key : {:?}", response); assert_eq!(response, builder_pub_key); - } + }, Err(e) => { panic!("Error getting builder key {:?}", e); - } + }, } } } diff --git a/builder/src/non_permissioned.rs b/builder/src/non_permissioned.rs index fd8d3bada1..d0d6224abe 100644 --- a/builder/src/non_permissioned.rs +++ b/builder/src/non_permissioned.rs @@ -1,4 +1,4 @@ -use std::{collections::VecDeque, num::NonZeroUsize, time::Duration}; +use std::{collections::VecDeque, num::NonZeroUsize, sync::Arc, time::Duration}; use anyhow::Context; use async_broadcast::broadcast; @@ -22,10 +22,8 @@ use hotshot_types::{ node_implementation::Versions, EncodeBytes, }, }; -use marketplace_builder_shared::block::ParentBlockReferences; -use marketplace_builder_shared::utils::EventServiceStream; +use marketplace_builder_shared::{block::ParentBlockReferences, utils::EventServiceStream}; use sequencer::{catchup::StatePeers, L1Params, SequencerApiVersion}; -use std::sync::Arc; use tide_disco::Url; use tokio::spawn; use vbs::version::StaticVersionType; diff --git a/client/src/lib.rs b/client/src/lib.rs index a9d5cc995f..88d921afc7 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use anyhow::Context; use espresso_types::{FeeAccount, FeeAmount, FeeMerkleTree, Header}; use ethers::types::Address; @@ -6,7 +8,6 @@ use jf_merkle_tree::{ prelude::{MerkleProof, Sha3Node}, MerkleTreeScheme, }; -use std::time::Duration; use surf_disco::{ error::ClientError, socket::{Connection, Unsupported}, @@ -110,7 +111,7 @@ impl SequencerClient { } else { sleep(Duration::from_millis(200)).await; } - } + }, } }; diff --git a/contract-bindings-alloy/src/erc1967proxy.rs b/contract-bindings-alloy/src/erc1967proxy.rs index e4b265f08a..d8c58c39fd 100644 --- a/contract-bindings-alloy/src/erc1967proxy.rs +++ b/contract-bindings-alloy/src/erc1967proxy.rs @@ -94,8 +94,9 @@ interface ERC1967Proxy { clippy::empty_structs_with_brackets )] pub mod ERC1967Proxy { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -144,7 +145,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -211,7 +212,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -277,7 +278,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -337,7 +338,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -505,7 +506,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -588,16 +589,16 @@ pub mod ERC1967Proxy { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -682,18 +683,18 @@ pub mod ERC1967Proxy { match self { Self::AddressEmptyCode(inner) => { ::abi_encoded_size(inner) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encoded_size(inner) - } + }, Self::FailedInnerCall(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -701,18 +702,18 @@ pub mod ERC1967Proxy { match self { Self::AddressEmptyCode(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::FailedInnerCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -748,7 +749,7 @@ pub mod ERC1967Proxy { Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -772,7 +773,7 @@ pub mod ERC1967Proxy { match self { Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/feecontract.rs b/contract-bindings-alloy/src/feecontract.rs index 87689f8538..19aba5e2fd 100644 --- a/contract-bindings-alloy/src/feecontract.rs +++ b/contract-bindings-alloy/src/feecontract.rs @@ -445,8 +445,9 @@ interface FeeContract { clippy::empty_structs_with_brackets )] pub mod FeeContract { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -495,7 +496,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -559,7 +560,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -619,7 +620,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -682,7 +683,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -748,7 +749,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -808,7 +809,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -868,7 +869,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -928,7 +929,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -988,7 +989,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1048,7 +1049,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1108,7 +1109,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1171,7 +1172,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1238,7 +1239,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1302,7 +1303,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1365,7 +1366,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2055,7 +2056,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2122,7 +2123,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2151,7 +2152,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2236,7 +2237,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2265,7 +2266,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2351,7 +2352,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2380,7 +2381,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2470,7 +2471,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2503,7 +2504,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2593,7 +2594,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2622,7 +2623,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2708,7 +2709,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2737,7 +2738,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2819,7 +2820,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2848,7 +2849,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2930,7 +2931,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2959,7 +2960,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3041,7 +3042,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3070,7 +3071,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3149,7 +3150,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3178,7 +3179,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3260,7 +3261,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3289,7 +3290,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3383,7 +3384,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3415,7 +3416,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3534,28 +3535,28 @@ pub mod FeeContract { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::balances(_) => ::SELECTOR, Self::deposit(_) => ::SELECTOR, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::maxDepositAmount(_) => { ::SELECTOR - } + }, Self::minDepositAmount(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -3734,40 +3735,40 @@ pub mod FeeContract { ::abi_encoded_size( inner, ) - } + }, Self::balances(inner) => { ::abi_encoded_size(inner) - } + }, Self::deposit(inner) => { ::abi_encoded_size(inner) - } + }, Self::getVersion(inner) => { ::abi_encoded_size(inner) - } + }, Self::initialize(inner) => { ::abi_encoded_size(inner) - } + }, Self::maxDepositAmount(inner) => { ::abi_encoded_size(inner) - } + }, Self::minDepositAmount(inner) => { ::abi_encoded_size(inner) - } + }, Self::owner(inner) => { ::abi_encoded_size(inner) - } + }, Self::proxiableUUID(inner) => { ::abi_encoded_size(inner) - } + }, Self::renounceOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::transferOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3777,40 +3778,40 @@ pub mod FeeContract { ::abi_encode_raw( inner, out, ) - } + }, Self::balances(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::deposit(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::maxDepositAmount(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::minDepositAmount(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -3883,49 +3884,49 @@ pub mod FeeContract { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::DepositTooLarge(_) => { ::SELECTOR - } + }, Self::DepositTooSmall(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::FunctionDoesNotExist(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidUserAddress(_) => { ::SELECTOR - } + }, Self::NoFunctionCalled(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, } } #[inline] @@ -4142,57 +4143,57 @@ pub mod FeeContract { match self { Self::AddressEmptyCode(inner) => { ::abi_encoded_size(inner) - } + }, Self::DepositTooLarge(inner) => { ::abi_encoded_size(inner) - } + }, Self::DepositTooSmall(inner) => { ::abi_encoded_size(inner) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encoded_size(inner) - } + }, Self::FailedInnerCall(inner) => { ::abi_encoded_size(inner) - } + }, Self::FunctionDoesNotExist(inner) => { ::abi_encoded_size(inner) - } + }, Self::InvalidInitialization(inner) => { ::abi_encoded_size(inner) - } + }, Self::InvalidUserAddress(inner) => { ::abi_encoded_size(inner) - } + }, Self::NoFunctionCalled(inner) => { ::abi_encoded_size(inner) - } + }, Self::NotInitializing(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::UUPSUnauthorizedCallContext(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::UUPSUnsupportedProxiableUUID(inner) => { ::abi_encoded_size( inner, ) - } + }, } } #[inline] @@ -4200,57 +4201,57 @@ pub mod FeeContract { match self { Self::AddressEmptyCode(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::DepositTooLarge(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::DepositTooSmall(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::FailedInnerCall(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::FunctionDoesNotExist(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::InvalidInitialization(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::InvalidUserAddress(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::NoFunctionCalled(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::NotInitializing(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::UUPSUnauthorizedCallContext(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::UUPSUnsupportedProxiableUUID(inner) => { ::abi_encode_raw( inner, out, ) - } + }, } } } @@ -4323,31 +4324,31 @@ pub mod FeeContract { Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Deposit) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Log) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -4367,11 +4368,11 @@ pub mod FeeContract { Self::Deposit(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Log(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -4381,15 +4382,15 @@ pub mod FeeContract { Self::Deposit(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Log(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/iplonkverifier.rs b/contract-bindings-alloy/src/iplonkverifier.rs index 39e61d1e74..6d334f24a6 100644 --- a/contract-bindings-alloy/src/iplonkverifier.rs +++ b/contract-bindings-alloy/src/iplonkverifier.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -282,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1257,8 +1258,9 @@ interface IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -1398,7 +1400,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1964,7 +1966,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2510,7 +2512,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2543,7 +2545,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2674,7 +2676,7 @@ pub mod IPlonkVerifier { match self { Self::verify(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -2682,7 +2684,7 @@ pub mod IPlonkVerifier { match self { Self::verify(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } diff --git a/contract-bindings-alloy/src/lightclient.rs b/contract-bindings-alloy/src/lightclient.rs index c9f47e3da6..c5f11bd7cc 100644 --- a/contract-bindings-alloy/src/lightclient.rs +++ b/contract-bindings-alloy/src/lightclient.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -282,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -562,8 +563,9 @@ library IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct PlonkProof { BN254.G1Point wire0; BN254.G1Point wire1; BN254.G1Point wire2; BN254.G1Point wire3; BN254.G1Point wire4; BN254.G1Point prodPerm; BN254.G1Point split0; BN254.G1Point split1; BN254.G1Point split2; BN254.G1Point split3; BN254.G1Point split4; BN254.G1Point zeta; BN254.G1Point zetaOmega; BN254.ScalarField wireEval0; BN254.ScalarField wireEval1; BN254.ScalarField wireEval2; BN254.ScalarField wireEval3; BN254.ScalarField wireEval4; BN254.ScalarField sigmaEval0; BN254.ScalarField sigmaEval1; BN254.ScalarField sigmaEval2; BN254.ScalarField sigmaEval3; BN254.ScalarField prodPermZetaOmegaEval; } ```*/ @@ -683,7 +685,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2368,8 +2370,9 @@ interface LightClient { clippy::empty_structs_with_brackets )] pub mod LightClient { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -2429,7 +2432,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2651,7 +2654,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2872,7 +2875,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2939,7 +2942,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3005,7 +3008,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3065,7 +3068,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3125,7 +3128,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3185,7 +3188,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3245,7 +3248,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3305,7 +3308,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3365,7 +3368,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3425,7 +3428,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3485,7 +3488,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3545,7 +3548,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3605,7 +3608,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3665,7 +3668,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3728,7 +3731,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3795,7 +3798,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3859,7 +3862,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3919,7 +3922,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3982,7 +3985,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4046,7 +4049,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4823,7 +4826,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4890,7 +4893,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4919,7 +4922,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5001,7 +5004,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5030,7 +5033,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5109,7 +5112,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5138,7 +5141,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5224,7 +5227,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5261,7 +5264,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5357,7 +5360,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5396,7 +5399,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5497,7 +5500,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5534,7 +5537,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5629,7 +5632,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5664,7 +5667,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5753,7 +5756,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5782,7 +5785,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5868,7 +5871,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5901,7 +5904,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6007,7 +6010,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6046,7 +6049,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6144,7 +6147,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6173,7 +6176,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6266,7 +6269,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6298,7 +6301,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6395,7 +6398,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6427,7 +6430,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6512,7 +6515,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6541,7 +6544,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6623,7 +6626,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6652,7 +6655,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6734,7 +6737,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6763,7 +6766,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6842,7 +6845,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6871,7 +6874,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6953,7 +6956,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6982,7 +6985,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7068,7 +7071,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7099,7 +7102,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7194,7 +7197,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7233,7 +7236,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7334,7 +7337,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7363,7 +7366,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7445,7 +7448,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7474,7 +7477,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7556,7 +7559,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7585,7 +7588,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7679,7 +7682,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7711,7 +7714,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7866,66 +7869,66 @@ pub mod LightClient { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::currentBlockNumber(_) => { ::SELECTOR - } + }, Self::disablePermissionedProverMode(_) => { ::SELECTOR - } + }, Self::finalizedState(_) => { ::SELECTOR - } + }, Self::genesisStakeTableState(_) => { ::SELECTOR - } + }, Self::genesisState(_) => ::SELECTOR, Self::getHotShotCommitment(_) => { ::SELECTOR - } + }, Self::getStateHistoryCount(_) => { ::SELECTOR - } + }, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::isPermissionedProverEnabled(_) => { ::SELECTOR - } + }, Self::lagOverEscapeHatchThreshold(_) => { ::SELECTOR - } + }, Self::newFinalizedState(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::permissionedProver(_) => { ::SELECTOR - } + }, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::setPermissionedProver(_) => { ::SELECTOR - } + }, Self::setstateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::stateHistoryCommitments(_) => { ::SELECTOR - } + }, Self::stateHistoryFirstIndex(_) => { ::SELECTOR - } + }, Self::stateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -8375,98 +8378,98 @@ pub mod LightClient { ::abi_encode_raw( inner, out, ) - } + }, Self::currentBlockNumber(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::disablePermissionedProverMode(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::finalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::genesisStakeTableState(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::genesisState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getHotShotCommitment(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getStateHistoryCount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isPermissionedProverEnabled(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::lagOverEscapeHatchThreshold(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::newFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::permissionedProver(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setPermissionedProver(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setstateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryCommitments(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryFirstIndex(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -8554,56 +8557,56 @@ pub mod LightClient { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::InsufficientSnapshotHistory(_) => { ::SELECTOR - } + }, Self::InvalidAddress(_) => ::SELECTOR, Self::InvalidArgs(_) => ::SELECTOR, Self::InvalidHotShotBlockForCommitmentCheck(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidMaxStateHistory(_) => { ::SELECTOR - } + }, Self::InvalidProof(_) => ::SELECTOR, Self::NoChangeRequired(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OutdatedState(_) => ::SELECTOR, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::ProverNotPermissioned(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, Self::WrongStakeTableUsed(_) => { ::SELECTOR - } + }, } } #[inline] @@ -9175,17 +9178,17 @@ pub mod LightClient { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::NewState) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some( ::SIGNATURE_HASH, ) => ::decode_raw_log( @@ -9197,15 +9200,15 @@ pub mod LightClient { topics, data, validate, ) .map(Self::PermissionedProverRequired) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -9224,17 +9227,17 @@ pub mod LightClient { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::NewState(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -9243,23 +9246,23 @@ pub mod LightClient { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::NewState(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/lightclientarbitrum.rs b/contract-bindings-alloy/src/lightclientarbitrum.rs index 63a5ce6fb1..105a3c7d38 100644 --- a/contract-bindings-alloy/src/lightclientarbitrum.rs +++ b/contract-bindings-alloy/src/lightclientarbitrum.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -282,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -562,8 +563,9 @@ library IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct PlonkProof { BN254.G1Point wire0; BN254.G1Point wire1; BN254.G1Point wire2; BN254.G1Point wire3; BN254.G1Point wire4; BN254.G1Point prodPerm; BN254.G1Point split0; BN254.G1Point split1; BN254.G1Point split2; BN254.G1Point split3; BN254.G1Point split4; BN254.G1Point zeta; BN254.G1Point zetaOmega; BN254.ScalarField wireEval0; BN254.ScalarField wireEval1; BN254.ScalarField wireEval2; BN254.ScalarField wireEval3; BN254.ScalarField wireEval4; BN254.ScalarField sigmaEval0; BN254.ScalarField sigmaEval1; BN254.ScalarField sigmaEval2; BN254.ScalarField sigmaEval3; BN254.ScalarField prodPermZetaOmegaEval; } ```*/ @@ -683,7 +685,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1281,8 +1283,9 @@ library LightClient { clippy::empty_structs_with_brackets )] pub mod LightClient { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct LightClientState { uint64 viewNum; uint64 blockHeight; BN254.ScalarField blockCommRoot; } ```*/ @@ -1322,7 +1325,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1544,7 +1547,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2966,8 +2969,9 @@ interface LightClientArbitrum { clippy::empty_structs_with_brackets )] pub mod LightClientArbitrum { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -3016,7 +3020,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3083,7 +3087,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3149,7 +3153,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3209,7 +3213,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3269,7 +3273,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3329,7 +3333,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3389,7 +3393,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3449,7 +3453,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3509,7 +3513,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3569,7 +3573,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3629,7 +3633,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3689,7 +3693,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3749,7 +3753,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3809,7 +3813,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3872,7 +3876,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3939,7 +3943,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4003,7 +4007,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4063,7 +4067,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4126,7 +4130,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4190,7 +4194,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4980,7 +4984,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5009,7 +5013,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5091,7 +5095,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5120,7 +5124,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5199,7 +5203,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5228,7 +5232,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5314,7 +5318,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5351,7 +5355,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5447,7 +5451,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5486,7 +5490,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5587,7 +5591,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5624,7 +5628,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5719,7 +5723,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5754,7 +5758,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5843,7 +5847,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5872,7 +5876,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5958,7 +5962,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5991,7 +5995,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6098,7 +6102,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6137,7 +6141,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6237,7 +6241,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6266,7 +6270,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6359,7 +6363,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6391,7 +6395,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6489,7 +6493,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6521,7 +6525,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6608,7 +6612,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6637,7 +6641,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6719,7 +6723,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6748,7 +6752,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6830,7 +6834,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6859,7 +6863,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6938,7 +6942,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6967,7 +6971,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7049,7 +7053,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7078,7 +7082,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7164,7 +7168,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7195,7 +7199,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7290,7 +7294,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7329,7 +7333,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7430,7 +7434,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7459,7 +7463,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7541,7 +7545,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7570,7 +7574,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7652,7 +7656,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7681,7 +7685,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7775,7 +7779,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7807,7 +7811,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7962,66 +7966,66 @@ pub mod LightClientArbitrum { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::currentBlockNumber(_) => { ::SELECTOR - } + }, Self::disablePermissionedProverMode(_) => { ::SELECTOR - } + }, Self::finalizedState(_) => { ::SELECTOR - } + }, Self::genesisStakeTableState(_) => { ::SELECTOR - } + }, Self::genesisState(_) => ::SELECTOR, Self::getHotShotCommitment(_) => { ::SELECTOR - } + }, Self::getStateHistoryCount(_) => { ::SELECTOR - } + }, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::isPermissionedProverEnabled(_) => { ::SELECTOR - } + }, Self::lagOverEscapeHatchThreshold(_) => { ::SELECTOR - } + }, Self::newFinalizedState(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::permissionedProver(_) => { ::SELECTOR - } + }, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::setPermissionedProver(_) => { ::SELECTOR - } + }, Self::setstateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::stateHistoryCommitments(_) => { ::SELECTOR - } + }, Self::stateHistoryFirstIndex(_) => { ::SELECTOR - } + }, Self::stateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -8468,98 +8472,98 @@ pub mod LightClientArbitrum { ::abi_encode_raw( inner, out, ) - } + }, Self::currentBlockNumber(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::disablePermissionedProverMode(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::finalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::genesisStakeTableState(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::genesisState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getHotShotCommitment(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getStateHistoryCount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isPermissionedProverEnabled(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::lagOverEscapeHatchThreshold(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::newFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::permissionedProver(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setPermissionedProver(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setstateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryCommitments(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryFirstIndex(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -8647,56 +8651,56 @@ pub mod LightClientArbitrum { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::InsufficientSnapshotHistory(_) => { ::SELECTOR - } + }, Self::InvalidAddress(_) => ::SELECTOR, Self::InvalidArgs(_) => ::SELECTOR, Self::InvalidHotShotBlockForCommitmentCheck(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidMaxStateHistory(_) => { ::SELECTOR - } + }, Self::InvalidProof(_) => ::SELECTOR, Self::NoChangeRequired(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OutdatedState(_) => ::SELECTOR, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::ProverNotPermissioned(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, Self::WrongStakeTableUsed(_) => { ::SELECTOR - } + }, } } #[inline] @@ -9268,17 +9272,17 @@ pub mod LightClientArbitrum { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::NewState) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some( ::SIGNATURE_HASH, ) => ::decode_raw_log( @@ -9290,15 +9294,15 @@ pub mod LightClientArbitrum { topics, data, validate, ) .map(Self::PermissionedProverRequired) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -9317,17 +9321,17 @@ pub mod LightClientArbitrum { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::NewState(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -9336,23 +9340,23 @@ pub mod LightClientArbitrum { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::NewState(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/lightclientmock.rs b/contract-bindings-alloy/src/lightclientmock.rs index 6b3fd406cd..7feb65d5ef 100644 --- a/contract-bindings-alloy/src/lightclientmock.rs +++ b/contract-bindings-alloy/src/lightclientmock.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -282,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -562,8 +563,9 @@ library IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct PlonkProof { BN254.G1Point wire0; BN254.G1Point wire1; BN254.G1Point wire2; BN254.G1Point wire3; BN254.G1Point wire4; BN254.G1Point prodPerm; BN254.G1Point split0; BN254.G1Point split1; BN254.G1Point split2; BN254.G1Point split3; BN254.G1Point split4; BN254.G1Point zeta; BN254.G1Point zetaOmega; BN254.ScalarField wireEval0; BN254.ScalarField wireEval1; BN254.ScalarField wireEval2; BN254.ScalarField wireEval3; BN254.ScalarField wireEval4; BN254.ScalarField sigmaEval0; BN254.ScalarField sigmaEval1; BN254.ScalarField sigmaEval2; BN254.ScalarField sigmaEval3; BN254.ScalarField prodPermZetaOmegaEval; } ```*/ @@ -683,7 +685,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1282,8 +1284,9 @@ library LightClient { clippy::empty_structs_with_brackets )] pub mod LightClient { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct LightClientState { uint64 viewNum; uint64 blockHeight; BN254.ScalarField blockCommRoot; } ```*/ @@ -1323,7 +1326,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1545,7 +1548,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1781,7 +1784,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3376,8 +3379,9 @@ interface LightClientMock { clippy::empty_structs_with_brackets )] pub mod LightClientMock { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -3426,7 +3430,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3493,7 +3497,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3559,7 +3563,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3619,7 +3623,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3679,7 +3683,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3739,7 +3743,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3799,7 +3803,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3859,7 +3863,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3919,7 +3923,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3979,7 +3983,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4039,7 +4043,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4099,7 +4103,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4159,7 +4163,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4219,7 +4223,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4282,7 +4286,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4349,7 +4353,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4413,7 +4417,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4473,7 +4477,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4536,7 +4540,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4600,7 +4604,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5393,7 +5397,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5482,7 +5486,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5511,7 +5515,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5593,7 +5597,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5622,7 +5626,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5701,7 +5705,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5730,7 +5734,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5816,7 +5820,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5853,7 +5857,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5949,7 +5953,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5988,7 +5992,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6089,7 +6093,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6126,7 +6130,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6221,7 +6225,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6256,7 +6260,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6345,7 +6349,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6374,7 +6378,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6460,7 +6464,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6493,7 +6497,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6600,7 +6604,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6639,7 +6643,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6739,7 +6743,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6768,7 +6772,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6861,7 +6865,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6893,7 +6897,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6991,7 +6995,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7023,7 +7027,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7110,7 +7114,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7139,7 +7143,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7221,7 +7225,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7250,7 +7254,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7332,7 +7336,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7361,7 +7365,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7440,7 +7444,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7469,7 +7473,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7552,7 +7556,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7581,7 +7585,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7667,7 +7671,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7696,7 +7700,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7779,7 +7783,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7808,7 +7812,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7890,7 +7894,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7919,7 +7923,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8012,7 +8016,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8043,7 +8047,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8130,7 +8134,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8161,7 +8165,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8256,7 +8260,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8295,7 +8299,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8396,7 +8400,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8425,7 +8429,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8507,7 +8511,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8536,7 +8540,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8618,7 +8622,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8647,7 +8651,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8741,7 +8745,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8773,7 +8777,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8940,76 +8944,76 @@ pub mod LightClientMock { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::currentBlockNumber(_) => { ::SELECTOR - } + }, Self::disablePermissionedProverMode(_) => { ::SELECTOR - } + }, Self::finalizedState(_) => { ::SELECTOR - } + }, Self::genesisStakeTableState(_) => { ::SELECTOR - } + }, Self::genesisState(_) => ::SELECTOR, Self::getHotShotCommitment(_) => { ::SELECTOR - } + }, Self::getStateHistoryCount(_) => { ::SELECTOR - } + }, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::isPermissionedProverEnabled(_) => { ::SELECTOR - } + }, Self::lagOverEscapeHatchThreshold(_) => { ::SELECTOR - } + }, Self::newFinalizedState(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::permissionedProver(_) => { ::SELECTOR - } + }, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::setFinalizedState(_) => { ::SELECTOR - } + }, Self::setHotShotDownSince(_) => { ::SELECTOR - } + }, Self::setHotShotUp(_) => ::SELECTOR, Self::setPermissionedProver(_) => { ::SELECTOR - } + }, Self::setStateHistory(_) => { ::SELECTOR - } + }, Self::setstateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::stateHistoryCommitments(_) => { ::SELECTOR - } + }, Self::stateHistoryFirstIndex(_) => { ::SELECTOR - } + }, Self::stateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -9522,112 +9526,112 @@ pub mod LightClientMock { ::abi_encode_raw( inner, out, ) - } + }, Self::currentBlockNumber(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::disablePermissionedProverMode(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::finalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::genesisStakeTableState(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::genesisState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getHotShotCommitment(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getStateHistoryCount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isPermissionedProverEnabled(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::lagOverEscapeHatchThreshold(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::newFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::permissionedProver(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setHotShotDownSince(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setHotShotUp(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setPermissionedProver(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setStateHistory(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setstateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryCommitments(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryFirstIndex(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -9715,56 +9719,56 @@ pub mod LightClientMock { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::InsufficientSnapshotHistory(_) => { ::SELECTOR - } + }, Self::InvalidAddress(_) => ::SELECTOR, Self::InvalidArgs(_) => ::SELECTOR, Self::InvalidHotShotBlockForCommitmentCheck(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidMaxStateHistory(_) => { ::SELECTOR - } + }, Self::InvalidProof(_) => ::SELECTOR, Self::NoChangeRequired(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OutdatedState(_) => ::SELECTOR, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::ProverNotPermissioned(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, Self::WrongStakeTableUsed(_) => { ::SELECTOR - } + }, } } #[inline] @@ -10336,17 +10340,17 @@ pub mod LightClientMock { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::NewState) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some( ::SIGNATURE_HASH, ) => ::decode_raw_log( @@ -10358,15 +10362,15 @@ pub mod LightClientMock { topics, data, validate, ) .map(Self::PermissionedProverRequired) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -10385,17 +10389,17 @@ pub mod LightClientMock { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::NewState(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -10404,23 +10408,23 @@ pub mod LightClientMock { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::NewState(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/permissionedstaketable.rs b/contract-bindings-alloy/src/permissionedstaketable.rs index d8aa26a865..14d1f72146 100644 --- a/contract-bindings-alloy/src/permissionedstaketable.rs +++ b/contract-bindings-alloy/src/permissionedstaketable.rs @@ -15,8 +15,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -171,7 +172,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -463,8 +464,9 @@ library EdOnBN254 { clippy::empty_structs_with_brackets )] pub mod EdOnBN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct EdOnBN254Point { uint256 x; uint256 y; } ```*/ @@ -500,7 +502,7 @@ pub mod EdOnBN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1359,8 +1361,9 @@ interface PermissionedStakeTable { clippy::empty_structs_with_brackets )] pub mod PermissionedStakeTable { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -1420,7 +1423,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1622,7 +1625,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1682,7 +1685,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1745,7 +1748,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1812,7 +1815,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1879,7 +1882,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1945,7 +1948,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2330,7 +2333,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2407,7 +2410,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2436,7 +2439,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2517,7 +2520,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2546,7 +2549,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2628,7 +2631,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2657,7 +2660,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2743,7 +2746,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2772,7 +2775,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2856,7 +2859,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2885,7 +2888,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2964,7 +2967,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2993,7 +2996,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3075,7 +3078,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3104,7 +3107,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3202,7 +3205,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3234,7 +3237,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3343,15 +3346,15 @@ pub mod PermissionedStakeTable { Self::initialize(_) => ::SELECTOR, Self::initializedAtBlock(_) => { ::SELECTOR - } + }, Self::isStaker(_) => ::SELECTOR, Self::owner(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::update(_) => ::SELECTOR, } } @@ -3477,28 +3480,28 @@ pub mod PermissionedStakeTable { match self { Self::_hashBlsKey(inner) => { <_hashBlsKeyCall as alloy_sol_types::SolCall>::abi_encoded_size(inner) - } + }, Self::initialize(inner) => { ::abi_encoded_size(inner) - } + }, Self::initializedAtBlock(inner) => { ::abi_encoded_size(inner) - } + }, Self::isStaker(inner) => { ::abi_encoded_size(inner) - } + }, Self::owner(inner) => { ::abi_encoded_size(inner) - } + }, Self::renounceOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::transferOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::update(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3506,28 +3509,28 @@ pub mod PermissionedStakeTable { match self { Self::_hashBlsKey(inner) => { <_hashBlsKeyCall as alloy_sol_types::SolCall>::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initializedAtBlock(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isStaker(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::update(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -3573,19 +3576,19 @@ pub mod PermissionedStakeTable { match self { Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::StakerAlreadyExists(_) => { ::SELECTOR - } + }, Self::StakerNotFound(_) => ::SELECTOR, } } @@ -3695,24 +3698,24 @@ pub mod PermissionedStakeTable { match self { Self::InvalidInitialization(inner) => { ::abi_encoded_size(inner) - } + }, Self::NotInitializing(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::StakerAlreadyExists(inner) => { ::abi_encoded_size(inner) - } + }, Self::StakerNotFound(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3720,24 +3723,24 @@ pub mod PermissionedStakeTable { match self { Self::InvalidInitialization(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::NotInitializing(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::StakerAlreadyExists(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::StakerNotFound(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -3791,19 +3794,19 @@ pub mod PermissionedStakeTable { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::StakersUpdated) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -3822,26 +3825,26 @@ pub mod PermissionedStakeTable { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::StakersUpdated(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, } } fn into_log_data(self) -> alloy_sol_types::private::LogData { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::StakersUpdated(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/plonkverifier.rs b/contract-bindings-alloy/src/plonkverifier.rs index edc97722de..060d8dccac 100644 --- a/contract-bindings-alloy/src/plonkverifier.rs +++ b/contract-bindings-alloy/src/plonkverifier.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -282,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -563,8 +564,9 @@ library IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct PlonkProof { BN254.G1Point wire0; BN254.G1Point wire1; BN254.G1Point wire2; BN254.G1Point wire3; BN254.G1Point wire4; BN254.G1Point prodPerm; BN254.G1Point split0; BN254.G1Point split1; BN254.G1Point split2; BN254.G1Point split3; BN254.G1Point split4; BN254.G1Point zeta; BN254.G1Point zetaOmega; BN254.ScalarField wireEval0; BN254.ScalarField wireEval1; BN254.ScalarField wireEval2; BN254.ScalarField wireEval3; BN254.ScalarField wireEval4; BN254.ScalarField sigmaEval0; BN254.ScalarField sigmaEval1; BN254.ScalarField sigmaEval2; BN254.ScalarField sigmaEval3; BN254.ScalarField prodPermZetaOmegaEval; } ```*/ @@ -684,7 +686,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1250,7 +1252,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2607,8 +2609,9 @@ interface PlonkVerifier { clippy::empty_structs_with_brackets )] pub mod PlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -2654,7 +2657,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2714,7 +2717,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2774,7 +2777,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2860,7 +2863,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2893,7 +2896,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3026,7 +3029,7 @@ pub mod PlonkVerifier { match self { Self::verify(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3034,7 +3037,7 @@ pub mod PlonkVerifier { match self { Self::verify(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -3071,10 +3074,10 @@ pub mod PlonkVerifier { match self { Self::InvalidPlonkArgs(_) => { ::SELECTOR - } + }, Self::UnsupportedDegree(_) => { ::SELECTOR - } + }, Self::WrongPlonkVK(_) => ::SELECTOR, } } @@ -3146,13 +3149,13 @@ pub mod PlonkVerifier { match self { Self::InvalidPlonkArgs(inner) => { ::abi_encoded_size(inner) - } + }, Self::UnsupportedDegree(inner) => { ::abi_encoded_size(inner) - } + }, Self::WrongPlonkVK(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3160,13 +3163,13 @@ pub mod PlonkVerifier { match self { Self::InvalidPlonkArgs(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::UnsupportedDegree(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::WrongPlonkVK(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } diff --git a/contract-bindings-alloy/src/plonkverifier2.rs b/contract-bindings-alloy/src/plonkverifier2.rs index c7ab4bb3cc..1997d2f3bb 100644 --- a/contract-bindings-alloy/src/plonkverifier2.rs +++ b/contract-bindings-alloy/src/plonkverifier2.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -282,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -563,8 +564,9 @@ library IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct PlonkProof { BN254.G1Point wire0; BN254.G1Point wire1; BN254.G1Point wire2; BN254.G1Point wire3; BN254.G1Point wire4; BN254.G1Point prodPerm; BN254.G1Point split0; BN254.G1Point split1; BN254.G1Point split2; BN254.G1Point split3; BN254.G1Point split4; BN254.G1Point zeta; BN254.G1Point zetaOmega; BN254.ScalarField wireEval0; BN254.ScalarField wireEval1; BN254.ScalarField wireEval2; BN254.ScalarField wireEval3; BN254.ScalarField wireEval4; BN254.ScalarField sigmaEval0; BN254.ScalarField sigmaEval1; BN254.ScalarField sigmaEval2; BN254.ScalarField sigmaEval3; BN254.ScalarField prodPermZetaOmegaEval; } ```*/ @@ -684,7 +686,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1250,7 +1252,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2623,8 +2625,9 @@ interface PlonkVerifier2 { clippy::empty_structs_with_brackets )] pub mod PlonkVerifier2 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -2670,7 +2673,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2738,7 +2741,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2767,7 +2770,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2849,7 +2852,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2878,7 +2881,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2978,7 +2981,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3011,7 +3014,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3174,13 +3177,13 @@ pub mod PlonkVerifier2 { match self { Self::P_MOD(inner) => { ::abi_encoded_size(inner) - } + }, Self::R_MOD(inner) => { ::abi_encoded_size(inner) - } + }, Self::verify(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3188,13 +3191,13 @@ pub mod PlonkVerifier2 { match self { Self::P_MOD(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::R_MOD(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::verify(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -3223,7 +3226,7 @@ pub mod PlonkVerifier2 { match self { Self::UnsupportedDegree(_) => { ::SELECTOR - } + }, } } #[inline] @@ -3268,7 +3271,7 @@ pub mod PlonkVerifier2 { match self { Self::UnsupportedDegree(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3276,7 +3279,7 @@ pub mod PlonkVerifier2 { match self { Self::UnsupportedDegree(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } diff --git a/contract-bindings-ethers/src/erc1967_proxy.rs b/contract-bindings-ethers/src/erc1967_proxy.rs index cff5203cba..453bf08c61 100644 --- a/contract-bindings-ethers/src/erc1967_proxy.rs +++ b/contract-bindings-ethers/src/erc1967_proxy.rs @@ -320,7 +320,7 @@ pub mod erc1967_proxy { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), @@ -333,21 +333,21 @@ pub mod erc1967_proxy { [0x08, 0xc3, 0x79, 0xa0] => true, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ => false, } } @@ -358,7 +358,7 @@ pub mod erc1967_proxy { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), diff --git a/contract-bindings-ethers/src/fee_contract.rs b/contract-bindings-ethers/src/fee_contract.rs index 06cb0aa83d..38f00a01f0 100644 --- a/contract-bindings-ethers/src/fee_contract.rs +++ b/contract-bindings-ethers/src/fee_contract.rs @@ -1071,32 +1071,32 @@ pub mod fee_contract { Self::DepositTooSmall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FunctionDoesNotExist(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidUserAddress(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NoFunctionCalled(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -1107,70 +1107,70 @@ pub mod fee_contract { [0x08, 0xc3, 0x79, 0xa0] => true, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ => false, } } @@ -1183,7 +1183,7 @@ pub mod fee_contract { Self::DepositTooSmall(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::FunctionDoesNotExist(element) => ::core::fmt::Display::fmt(element, f), @@ -1196,7 +1196,7 @@ pub mod fee_contract { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } } @@ -1754,7 +1754,7 @@ pub mod fee_contract { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::Balances(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Deposit(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), diff --git a/contract-bindings-ethers/src/light_client.rs b/contract-bindings-ethers/src/light_client.rs index 32daa6fc12..09aa7f02bd 100644 --- a/contract-bindings-ethers/src/light_client.rs +++ b/contract-bindings-ethers/src/light_client.rs @@ -1726,45 +1726,45 @@ pub mod light_client { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InsufficientSnapshotHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidAddress(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidArgs(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidMaxStateHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidProof(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NoChangeRequired(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OutdatedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProverNotPermissioned(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::WrongStakeTableUsed(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -1859,7 +1859,7 @@ pub mod light_client { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::InsufficientSnapshotHistory(element) => ::core::fmt::Display::fmt(element, f), @@ -1867,7 +1867,7 @@ pub mod light_client { Self::InvalidArgs(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::InvalidInitialization(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidMaxStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidProof(element) => ::core::fmt::Display::fmt(element, f), @@ -1880,7 +1880,7 @@ pub mod light_client { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::WrongStakeTableUsed(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } @@ -2176,10 +2176,10 @@ pub mod light_client { Self::OwnershipTransferredFilter(element) => ::core::fmt::Display::fmt(element, f), Self::PermissionedProverNotRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::PermissionedProverRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::UpgradeFilter(element) => ::core::fmt::Display::fmt(element, f), Self::UpgradedFilter(element) => ::core::fmt::Display::fmt(element, f), } @@ -2777,54 +2777,54 @@ pub mod light_client { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::CurrentBlockNumber(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::DisablePermissionedProverMode(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::FinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GenesisStakeTableState(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GenesisState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetHotShotCommitment(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetStateHistoryCount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::IsPermissionedProverEnabled(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::LagOverEscapeHatchThreshold(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NewFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::PermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProxiableUUID(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetPermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetstateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryCommitments(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryFirstIndex(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::TransferOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::UpgradeToAndCall(element) => ::ethers::core::abi::AbiEncode::encode(element), } @@ -2837,7 +2837,7 @@ pub mod light_client { Self::CurrentBlockNumber(element) => ::core::fmt::Display::fmt(element, f), Self::DisablePermissionedProverMode(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::FinalizedState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisStakeTableState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisState(element) => ::core::fmt::Display::fmt(element, f), @@ -2855,7 +2855,7 @@ pub mod light_client { Self::SetPermissionedProver(element) => ::core::fmt::Display::fmt(element, f), Self::SetstateHistoryRetentionPeriod(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::StateHistoryCommitments(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryFirstIndex(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryRetentionPeriod(element) => ::core::fmt::Display::fmt(element, f), diff --git a/contract-bindings-ethers/src/light_client_arbitrum.rs b/contract-bindings-ethers/src/light_client_arbitrum.rs index df441f0a6d..13fa3c65d5 100644 --- a/contract-bindings-ethers/src/light_client_arbitrum.rs +++ b/contract-bindings-ethers/src/light_client_arbitrum.rs @@ -1726,45 +1726,45 @@ pub mod light_client_arbitrum { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InsufficientSnapshotHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidAddress(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidArgs(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidMaxStateHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidProof(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NoChangeRequired(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OutdatedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProverNotPermissioned(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::WrongStakeTableUsed(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -1859,7 +1859,7 @@ pub mod light_client_arbitrum { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::InsufficientSnapshotHistory(element) => ::core::fmt::Display::fmt(element, f), @@ -1867,7 +1867,7 @@ pub mod light_client_arbitrum { Self::InvalidArgs(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::InvalidInitialization(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidMaxStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidProof(element) => ::core::fmt::Display::fmt(element, f), @@ -1880,7 +1880,7 @@ pub mod light_client_arbitrum { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::WrongStakeTableUsed(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } @@ -2178,10 +2178,10 @@ pub mod light_client_arbitrum { Self::OwnershipTransferredFilter(element) => ::core::fmt::Display::fmt(element, f), Self::PermissionedProverNotRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::PermissionedProverRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::UpgradeFilter(element) => ::core::fmt::Display::fmt(element, f), Self::UpgradedFilter(element) => ::core::fmt::Display::fmt(element, f), } @@ -2779,54 +2779,54 @@ pub mod light_client_arbitrum { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::CurrentBlockNumber(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::DisablePermissionedProverMode(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::FinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GenesisStakeTableState(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GenesisState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetHotShotCommitment(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetStateHistoryCount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::IsPermissionedProverEnabled(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::LagOverEscapeHatchThreshold(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NewFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::PermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProxiableUUID(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetPermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetstateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryCommitments(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryFirstIndex(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::TransferOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::UpgradeToAndCall(element) => ::ethers::core::abi::AbiEncode::encode(element), } @@ -2839,7 +2839,7 @@ pub mod light_client_arbitrum { Self::CurrentBlockNumber(element) => ::core::fmt::Display::fmt(element, f), Self::DisablePermissionedProverMode(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::FinalizedState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisStakeTableState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisState(element) => ::core::fmt::Display::fmt(element, f), @@ -2857,7 +2857,7 @@ pub mod light_client_arbitrum { Self::SetPermissionedProver(element) => ::core::fmt::Display::fmt(element, f), Self::SetstateHistoryRetentionPeriod(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::StateHistoryCommitments(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryFirstIndex(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryRetentionPeriod(element) => ::core::fmt::Display::fmt(element, f), diff --git a/contract-bindings-ethers/src/light_client_mock.rs b/contract-bindings-ethers/src/light_client_mock.rs index 951f036d38..3132c0d7e7 100644 --- a/contract-bindings-ethers/src/light_client_mock.rs +++ b/contract-bindings-ethers/src/light_client_mock.rs @@ -1867,45 +1867,45 @@ pub mod light_client_mock { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InsufficientSnapshotHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidAddress(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidArgs(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidMaxStateHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidProof(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NoChangeRequired(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OutdatedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProverNotPermissioned(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::WrongStakeTableUsed(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -2000,7 +2000,7 @@ pub mod light_client_mock { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::InsufficientSnapshotHistory(element) => ::core::fmt::Display::fmt(element, f), @@ -2008,7 +2008,7 @@ pub mod light_client_mock { Self::InvalidArgs(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::InvalidInitialization(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidMaxStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidProof(element) => ::core::fmt::Display::fmt(element, f), @@ -2021,7 +2021,7 @@ pub mod light_client_mock { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::WrongStakeTableUsed(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } @@ -2319,10 +2319,10 @@ pub mod light_client_mock { Self::OwnershipTransferredFilter(element) => ::core::fmt::Display::fmt(element, f), Self::PermissionedProverNotRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::PermissionedProverRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::UpgradeFilter(element) => ::core::fmt::Display::fmt(element, f), Self::UpgradedFilter(element) => ::core::fmt::Display::fmt(element, f), } @@ -3015,60 +3015,60 @@ pub mod light_client_mock { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::CurrentBlockNumber(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::DisablePermissionedProverMode(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::FinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GenesisStakeTableState(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GenesisState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetHotShotCommitment(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetStateHistoryCount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::IsPermissionedProverEnabled(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::LagOverEscapeHatchThreshold(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NewFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::PermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProxiableUUID(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetHotShotDownSince(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetHotShotUp(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetPermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetStateHistory(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetstateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryCommitments(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryFirstIndex(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::TransferOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::UpgradeToAndCall(element) => ::ethers::core::abi::AbiEncode::encode(element), } @@ -3081,7 +3081,7 @@ pub mod light_client_mock { Self::CurrentBlockNumber(element) => ::core::fmt::Display::fmt(element, f), Self::DisablePermissionedProverMode(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::FinalizedState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisStakeTableState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisState(element) => ::core::fmt::Display::fmt(element, f), @@ -3103,7 +3103,7 @@ pub mod light_client_mock { Self::SetStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::SetstateHistoryRetentionPeriod(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::StateHistoryCommitments(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryFirstIndex(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryRetentionPeriod(element) => ::core::fmt::Display::fmt(element, f), diff --git a/contract-bindings-ethers/src/permissioned_stake_table.rs b/contract-bindings-ethers/src/permissioned_stake_table.rs index dfabb2739a..aa4e50abbb 100644 --- a/contract-bindings-ethers/src/permissioned_stake_table.rs +++ b/contract-bindings-ethers/src/permissioned_stake_table.rs @@ -763,17 +763,17 @@ pub mod permissioned_stake_table { match self { Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StakerAlreadyExists(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StakerNotFound(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } @@ -787,28 +787,28 @@ pub mod permissioned_stake_table { == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ => false, } } @@ -1178,7 +1178,7 @@ pub mod permissioned_stake_table { Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InitializedAtBlock(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::IsStaker(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), diff --git a/contract-bindings-ethers/src/plonk_verifier.rs b/contract-bindings-ethers/src/plonk_verifier.rs index 9f5ae4cd72..48b9775bd4 100644 --- a/contract-bindings-ethers/src/plonk_verifier.rs +++ b/contract-bindings-ethers/src/plonk_verifier.rs @@ -442,12 +442,12 @@ pub mod plonk_verifier { [0x08, 0xc3, 0x79, 0xa0] => true, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => true, _ => false, } diff --git a/contracts/rust/adapter/src/jellyfish.rs b/contracts/rust/adapter/src/jellyfish.rs index afdd1a59d5..8b55b99983 100644 --- a/contracts/rust/adapter/src/jellyfish.rs +++ b/contracts/rust/adapter/src/jellyfish.rs @@ -13,10 +13,12 @@ use ethers::{ utils::hex::ToHex, }; use jf_pcs::prelude::Commitment; -use jf_plonk::constants::KECCAK256_STATE_SIZE; -use jf_plonk::proof_system::structs::{OpenKey, Proof, ProofEvaluations, VerifyingKey}; -use jf_plonk::testing_apis::Challenges; -use jf_plonk::transcript::SolidityTranscript; +use jf_plonk::{ + constants::KECCAK256_STATE_SIZE, + proof_system::structs::{OpenKey, Proof, ProofEvaluations, VerifyingKey}, + testing_apis::Challenges, + transcript::SolidityTranscript, +}; use jf_utils::to_bytes; use num_bigint::BigUint; use num_traits::Num; diff --git a/contracts/rust/adapter/src/light_client.rs b/contracts/rust/adapter/src/light_client.rs index fdf8d95a3c..4bca08f08a 100644 --- a/contracts/rust/adapter/src/light_client.rs +++ b/contracts/rust/adapter/src/light_client.rs @@ -4,9 +4,7 @@ use ark_ff::PrimeField; use ark_std::str::FromStr; use diff_test_bn254::{field_to_u256, u256_to_field}; use ethers::{ - abi::AbiDecode, - abi::Token, - abi::Tokenize, + abi::{AbiDecode, Token, Tokenize}, prelude::{AbiError, EthAbiCodec, EthAbiType}, types::U256, }; diff --git a/contracts/rust/adapter/src/stake_table.rs b/contracts/rust/adapter/src/stake_table.rs index 1ea6e98077..5e853ad24f 100644 --- a/contracts/rust/adapter/src/stake_table.rs +++ b/contracts/rust/adapter/src/stake_table.rs @@ -1,4 +1,5 @@ -use crate::jellyfish::u256_to_field; +use std::str::FromStr; + use ark_ec::{ short_weierstrass, twisted_edwards::{self, Affine, TECurveConfig}, @@ -29,7 +30,8 @@ use hotshot_types::{ PeerConfig, }; use serde::{Deserialize, Serialize}; -use std::str::FromStr; + +use crate::jellyfish::u256_to_field; // TODO: (alex) maybe move these commonly shared util to a crate /// convert a field element to U256, panic if field size is larger than 256 bit diff --git a/contracts/rust/diff-test/src/main.rs b/contracts/rust/diff-test/src/main.rs index a07382ff11..d87e221cb8 100644 --- a/contracts/rust/diff-test/src/main.rs +++ b/contracts/rust/diff-test/src/main.rs @@ -2,12 +2,10 @@ use ark_bn254::{Bn254, Fq, Fr, G1Affine, G2Affine}; use ark_ec::{AffineRepr, CurveGroup}; use ark_ed_on_bn254::{EdwardsConfig as EdOnBn254Config, Fq as FqEd254}; use ark_ff::field_hashers::{DefaultFieldHasher, HashToField}; -use ark_poly::domain::radix2::Radix2EvaluationDomain; -use ark_poly::EvaluationDomain; +use ark_poly::{domain::radix2::Radix2EvaluationDomain, EvaluationDomain}; use ark_std::rand::{rngs::StdRng, Rng, SeedableRng}; use clap::{Parser, ValueEnum}; use diff_test_bn254::ParsedG2Point; - use ethers::{ abi::{AbiDecode, AbiEncode, Address}, types::{Bytes, U256}, @@ -17,15 +15,19 @@ use hotshot_state_prover::mock_ledger::{ gen_plonk_proof_for_test, MockLedger, MockSystemParam, STAKE_TABLE_CAPACITY, }; use jf_pcs::prelude::Commitment; -use jf_plonk::proof_system::structs::{Proof, VerifyingKey}; -use jf_plonk::proof_system::PlonkKzgSnark; use jf_plonk::{ + proof_system::{ + structs::{Proof, VerifyingKey}, + PlonkKzgSnark, + }, testing_apis::Verifier, transcript::{PlonkTranscript, SolidityTranscript}, }; -use jf_signature::bls_over_bn254::{hash_to_curve, KeyPair as BLSKeyPair, Signature}; -use jf_signature::constants::CS_ID_BLS_BN254; -use jf_signature::schnorr::KeyPair as SchnorrKeyPair; +use jf_signature::{ + bls_over_bn254::{hash_to_curve, KeyPair as BLSKeyPair, Signature}, + constants::CS_ID_BLS_BN254, + schnorr::KeyPair as SchnorrKeyPair, +}; use sha3::Keccak256; #[derive(Parser)] @@ -102,7 +104,7 @@ fn main() { field_to_u256(domain.group_gen), ); println!("{}", res.encode_hex()); - } + }, Action::EvalDomainElements => { if cli.args.len() != 2 { panic!("Should provide arg1=logSize, arg2=length"); @@ -117,7 +119,7 @@ fn main() { .map(field_to_u256) .collect::>(); println!("{}", res.encode_hex()); - } + }, Action::EvalDataGen => { if cli.args.len() != 3 { panic!("Should provide arg1=logSize, arg2=zeta, arg3=publicInput"); @@ -138,7 +140,7 @@ fn main() { field_to_u256(pi_eval), ); println!("{}", res.encode_hex()); - } + }, Action::TranscriptAppendMsg => { if cli.args.len() != 2 { panic!("Should provide arg1=transcript, arg2=message"); @@ -153,7 +155,7 @@ fn main() { >::append_message(&mut t, &[], &msg).unwrap(); let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptAppendField => { if cli.args.len() != 2 { panic!("Should provide arg1=transcript, arg2=fieldElement"); @@ -165,7 +167,7 @@ fn main() { t.append_field_elem::(&[], &field).unwrap(); let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptAppendGroup => { if cli.args.len() != 2 { panic!("Should provide arg1=transcript, arg2=groupElement"); @@ -179,7 +181,7 @@ fn main() { .unwrap(); let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptGetChal => { if cli.args.len() != 1 { panic!("Should provide arg1=transcript"); @@ -193,7 +195,7 @@ fn main() { let updated_t: ParsedTranscript = t.into(); let res = (updated_t, field_to_u256(chal)); println!("{}", res.encode_hex()); - } + }, Action::TranscriptAppendVkAndPi => { if cli.args.len() != 3 { panic!("Should provide arg1=transcript, arg2=verifyingKey, arg3=publicInput"); @@ -210,7 +212,7 @@ fn main() { let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptAppendProofEvals => { if cli.args.len() != 1 { panic!("Should provide arg1=transcript"); @@ -232,7 +234,7 @@ fn main() { let t_updated: ParsedTranscript = t.into(); let res = (t_updated, proof_parsed); println!("{}", res.encode_hex()); - } + }, Action::PlonkConstants => { let coset_k = coset_k(); let open_key = open_key(); @@ -250,7 +252,7 @@ fn main() { field_to_u256::(open_key.beta_h.y().unwrap().c0), ); println!("{}", res.encode_hex()); - } + }, Action::PlonkComputeChal => { if cli.args.len() != 4 { panic!("Should provide arg1=verifyingKey, arg2=publicInput, arg3=proof, arg4=extraTranscriptInitMsg"); @@ -275,9 +277,9 @@ fn main() { .unwrap() .into(); println!("{}", (chal,).encode_hex()); - } + }, Action::PlonkVerify => { - let (proof, vk, public_input, _, _): ( + let (proof, vk, public_input, ..): ( Proof, VerifyingKey, Vec, @@ -304,7 +306,7 @@ fn main() { let res = (vk_parsed, pi_parsed, proof_parsed); println!("{}", res.encode_hex()); - } + }, Action::DummyProof => { let mut rng = jf_utils::test_rng(); if !cli.args.is_empty() { @@ -313,10 +315,10 @@ fn main() { } let proof = ParsedPlonkProof::dummy(&mut rng); println!("{}", (proof,).encode_hex()); - } + }, Action::TestOnly => { println!("args: {:?}", cli.args); - } + }, Action::GenClientWallet => { if cli.args.len() != 2 { panic!("Should provide arg1=senderAddress arg2=seed"); @@ -358,7 +360,7 @@ fn main() { sender_address, ); println!("{}", res.encode_hex()); - } + }, Action::GenRandomG2Point => { if cli.args.len() != 1 { panic!("Should provide arg1=exponent"); @@ -370,7 +372,7 @@ fn main() { let point_parsed: ParsedG2Point = point.into(); let res = point_parsed; println!("{}", (res.encode_hex())); - } + }, Action::MockGenesis => { if cli.args.len() != 1 { panic!("Should provide arg1=numInitValidators"); @@ -382,7 +384,7 @@ fn main() { let res = (ledger.get_state(), ledger.get_stake_table_state()); println!("{}", res.encode_hex()); - } + }, Action::MockConsecutiveFinalizedStates => { if cli.args.len() != 1 { panic!("Should provide arg1=numInitValidators"); @@ -413,7 +415,7 @@ fn main() { let res = (new_states, proofs); println!("{}", res.encode_hex()); - } + }, Action::MockSkipBlocks => { if cli.args.is_empty() || cli.args.len() > 2 { panic!("Should provide arg1=numBlockSkipped,arg2(opt)=requireValidProof"); @@ -444,7 +446,7 @@ fn main() { (state_parsed, proof_parsed) }; println!("{}", res.encode_hex()); - } + }, Action::GenBLSHashes => { if cli.args.len() != 1 { panic!("Should provide arg1=message"); @@ -464,7 +466,7 @@ fn main() { let res = (fq_u256, hash_to_curve_elem_parsed); println!("{}", res.encode_hex()); - } + }, Action::GenBLSSig => { let mut rng = jf_utils::test_rng(); @@ -486,6 +488,6 @@ fn main() { let res = (vk_parsed, sig_parsed); println!("{}", res.encode_hex()); - } + }, }; } diff --git a/contracts/rust/gen-vk-contract/src/main.rs b/contracts/rust/gen-vk-contract/src/main.rs index e402297254..5617d34527 100644 --- a/contracts/rust/gen-vk-contract/src/main.rs +++ b/contracts/rust/gen-vk-contract/src/main.rs @@ -5,13 +5,12 @@ use std::{fs::OpenOptions, io::Write, path::PathBuf, process::Command}; +use clap::Parser; use ethers::core::abi::AbiEncode; use hotshot_contract_adapter::jellyfish::ParsedVerifyingKey; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use jf_pcs::prelude::UnivariateUniversalParams; -use clap::Parser; - #[derive(Parser)] struct Cli { /// indicate if it's for the mock verification key diff --git a/flake.nix b/flake.nix index 5f856b271f..c735c6e7b9 100644 --- a/flake.nix +++ b/flake.nix @@ -13,10 +13,6 @@ }; inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; - inputs.nixpkgs-legacy-foundry.url = "github:NixOS/nixpkgs/9abb87b552b7f55ac8916b6fc9e5cb486656a2f3"; - - inputs.foundry-nix.url = "github:shazow/foundry.nix/monthly"; # Use monthly branch for permanent releases - inputs.rust-overlay.url = "github:oxalica/rust-overlay"; inputs.nixpkgs-cross-overlay.url = @@ -33,8 +29,6 @@ outputs = { self , nixpkgs - , nixpkgs-legacy-foundry - , foundry-nix , rust-overlay , nixpkgs-cross-overlay , flake-utils @@ -67,7 +61,6 @@ overlays = [ (import rust-overlay) - foundry-nix.overlay solc-bin.overlays.default (final: prev: { solhint = @@ -116,7 +109,7 @@ cargo-fmt = { enable = true; description = "Enforce rustfmt"; - entry = "cargo fmt --all"; + entry = "just fmt"; types_or = [ "rust" "toml" ]; pass_filenames = false; }; @@ -185,7 +178,7 @@ let stableToolchain = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml; nightlyToolchain = pkgs.rust-bin.selectLatestNightlyWith (toolchain: toolchain.minimal.override { - extensions = [ "rust-analyzer" ]; + extensions = [ "rust-analyzer" "rustfmt" ]; }); solc = pkgs.solc-bin."0.8.23"; in @@ -193,7 +186,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl + openssl_3 # match ubuntu 24.04 that we use on CI and as base image in docker curl protobuf # to compile libp2p-autonat stableToolchain @@ -208,6 +201,7 @@ typos just nightlyToolchain.passthru.availableComponents.rust-analyzer + nightlyToolchain.passthru.availableComponents.rustfmt # Tools nixpkgs-fmt @@ -223,7 +217,25 @@ coreutils # Ethereum contracts, solidity, ... - foundry-bin + # TODO: remove alloy patch when forge includes this fix: https://github.com/alloy-rs/core/pull/864 + # foundry + (foundry.overrideAttrs { + # Set the resolve limit to 128 by replacing the value in the vendored dependencies. + postPatch = '' + pushd $cargoDepsCopy/alloy-sol-macro-expander + + oldHash=$(sha256sum src/expand/mod.rs | cut -d " " -f 1) + + substituteInPlace src/expand/mod.rs \ + --replace-warn \ + 'const RESOLVE_LIMIT: usize = 32;' 'const RESOLVE_LIMIT: usize = 128;' + + substituteInPlace .cargo-checksum.json \ + --replace-warn $oldHash $(sha256sum src/expand/mod.rs | cut -d " " -f 1) + + popd + ''; + }) solc nodePackages.prettier solhint @@ -243,28 +255,10 @@ # Add rust binaries to PATH for native demo export PATH="$PWD/$CARGO_TARGET_DIR/debug:$PATH" - - # Needed to compile with the sqlite-unbundled feature - export LIBCLANG_PATH="${pkgs.llvmPackages.libclang.lib}/lib"; '' + self.checks.${system}.pre-commit-check.shellHook; RUST_SRC_PATH = "${stableToolchain}/lib/rustlib/src/rust/library"; FOUNDRY_SOLC = "${solc}/bin/solc"; }); - # A shell with foundry v0.3.0 which can still build ethers-rs bindings. - # Can be removed when we are no longer using the ethers-rs bindings. - devShells.legacyFoundry = - let - overlays = [ - solc-bin.overlays.default - ]; - pkgs = import nixpkgs-legacy-foundry { inherit system overlays; }; - in - mkShell { - packages = with pkgs; [ - solc - foundry - ]; - }; devShells.crossShell = crossShell { config = "x86_64-unknown-linux-musl"; }; devShells.armCrossShell = @@ -279,7 +273,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl + openssl_3 curl protobuf # to compile libp2p-autonat toolchain @@ -293,7 +287,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl + openssl_3 curl protobuf # to compile libp2p-autonat toolchain @@ -316,7 +310,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl + openssl_3 curl protobuf # to compile libp2p-autonat stableToolchain diff --git a/hotshot-builder-api/src/api.rs b/hotshot-builder-api/src/api.rs index 04042630c1..5250d1320a 100644 --- a/hotshot-builder-api/src/api.rs +++ b/hotshot-builder-api/src/api.rs @@ -34,7 +34,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/hotshot-builder-core-refactored/src/block_size_limits.rs b/hotshot-builder-core-refactored/src/block_size_limits.rs index 47cca3aca2..b33cc656ae 100644 --- a/hotshot-builder-core-refactored/src/block_size_limits.rs +++ b/hotshot-builder-core-refactored/src/block_size_limits.rs @@ -1,6 +1,7 @@ +use std::sync::atomic::Ordering; + use atomic::Atomic; use coarsetime::{Duration, Instant}; -use std::sync::atomic::Ordering; #[derive(Debug, Clone, Copy, bytemuck::NoUninit)] #[repr(C)] diff --git a/hotshot-builder-core-refactored/src/block_store.rs b/hotshot-builder-core-refactored/src/block_store.rs index 26e5dcaf2b..6119ae7eda 100644 --- a/hotshot-builder-core-refactored/src/block_store.rs +++ b/hotshot-builder-core-refactored/src/block_store.rs @@ -2,17 +2,14 @@ use std::marker::PhantomData; use hotshot::traits::BlockPayload; use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; -use hotshot_types::traits::signature_key::BuilderSignatureKey; -use marketplace_builder_shared::error::Error; -use marketplace_builder_shared::utils::BuilderKeys; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::BuilderSignatureKey}; use marketplace_builder_shared::{ - block::BuilderStateId, coordinator::tiered_view_map::TieredViewMap, + block::{BlockId, BuilderStateId}, + coordinator::tiered_view_map::TieredViewMap, + error::Error, + utils::BuilderKeys, }; -use marketplace_builder_shared::block::BlockId; - -use hotshot_types::traits::node_implementation::NodeType; - // It holds all the necessary information for a block #[derive(Debug, Clone)] pub struct BlockInfo { diff --git a/hotshot-builder-core-refactored/src/service.rs b/hotshot-builder-core-refactored/src/service.rs index d9831565d7..d3c17a7464 100644 --- a/hotshot-builder-core-refactored/src/service.rs +++ b/hotshot-builder-core-refactored/src/service.rs @@ -1,3 +1,21 @@ +use std::{ + fmt::Display, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; + +pub use async_broadcast::{broadcast, RecvError, TryRecvError}; +use async_lock::RwLock; +use async_trait::async_trait; +use committable::Commitment; +use futures::{ + future::BoxFuture, + stream::{FuturesOrdered, FuturesUnordered, StreamExt}, + Stream, TryStreamExt, +}; use hotshot::types::Event; use hotshot_builder_api::{ v0_1::{ @@ -9,51 +27,38 @@ use hotshot_builder_api::{ }, v0_2::block_info::AvailableBlockHeaderInputV1, }; -use hotshot_types::traits::block_contents::Transaction; -use hotshot_types::traits::EncodeBytes; use hotshot_types::{ data::VidCommitment, event::EventType, traits::{ - block_contents::BlockPayload, + block_contents::{BlockPayload, Transaction}, node_implementation::{ConsensusTime, NodeType}, signature_key::{BuilderSignatureKey, SignatureKey}, + EncodeBytes, }, utils::BuilderCommitment, }; -use marketplace_builder_shared::coordinator::BuilderStateLookup; -use marketplace_builder_shared::error::Error; -use marketplace_builder_shared::state::BuilderState; -use marketplace_builder_shared::utils::BuilderKeys; use marketplace_builder_shared::{ block::{BlockId, BuilderStateId, ReceivedTransaction, TransactionSource}, - coordinator::BuilderStateCoordinator, + coordinator::{BuilderStateCoordinator, BuilderStateLookup}, + error::Error, + state::BuilderState, + utils::BuilderKeys, +}; +use tagged_base64::TaggedBase64; +use tide_disco::{app::AppError, method::ReadState, App}; +use tokio::{ + spawn, + task::JoinHandle, + time::{sleep, timeout}, }; -use tide_disco::app::AppError; -use tokio::spawn; -use tokio::time::{sleep, timeout}; use tracing::{error, info, instrument, trace, warn}; use vbs::version::StaticVersion; -use crate::block_size_limits::BlockSizeLimits; -use crate::block_store::{BlockInfo, BlockStore}; -pub use async_broadcast::{broadcast, RecvError, TryRecvError}; -use async_lock::RwLock; -use async_trait::async_trait; -use committable::Commitment; -use futures::{future::BoxFuture, Stream}; -use futures::{ - stream::{FuturesOrdered, FuturesUnordered, StreamExt}, - TryStreamExt, +use crate::{ + block_size_limits::BlockSizeLimits, + block_store::{BlockInfo, BlockStore}, }; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::time::Duration; -use std::{fmt::Display, time::Instant}; -use tagged_base64::TaggedBase64; -use tide_disco::{method::ReadState, App}; -use tokio::task::JoinHandle; /// Proportion of overall allotted time to wait for optimal builder state /// to appear before resorting to highest view builder state @@ -201,7 +206,7 @@ where match event.event { EventType::Error { error } => { error!("Error event in HotShot: {:?}", error); - } + }, EventType::Transactions { transactions } => { let this = Arc::clone(&self); spawn(async move { @@ -217,7 +222,7 @@ where .collect::>() .await; }); - } + }, EventType::Decide { leaf_chain, .. } => { let prune_cutoff = leaf_chain[0].leaf.view_number(); @@ -226,16 +231,16 @@ where let this = Arc::clone(&self); spawn(async move { this.block_store.write().await.prune(prune_cutoff) }); - } + }, EventType::DaProposal { proposal, .. } => { let coordinator = Arc::clone(&self.coordinator); spawn(async move { coordinator.handle_da_proposal(proposal.data).await }); - } + }, EventType::QuorumProposal { proposal, .. } => { let coordinator = Arc::clone(&self.coordinator); spawn(async move { coordinator.handle_quorum_proposal(proposal.data).await }); - } - _ => {} + }, + _ => {}, } } } @@ -287,10 +292,10 @@ where BuilderStateLookup::Found(builder) => break Ok(builder), BuilderStateLookup::Decided => { return Err(Error::AlreadyDecided); - } + }, BuilderStateLookup::NotFound => { sleep(check_period).await; - } + }, }; } } @@ -374,7 +379,7 @@ where Err(error) => { warn!(?error, "Failed to build block payload"); return Err(Error::BuildBlock(error)); - } + }, }; // count the number of txns @@ -442,7 +447,7 @@ where // Timeout waiting for ideal state, get the highest view builder instead warn!("Couldn't find the ideal builder state"); self.coordinator.highest_view_builder().await - } + }, Ok(Err(e)) => { // State already decided let lowest_view = self.coordinator.lowest_view().await; @@ -451,7 +456,7 @@ where "get_available_blocks request for decided view" ); return Err(e); - } + }, }; let Some(builder) = builder else { @@ -485,7 +490,7 @@ where } Ok(vec![response]) - } + }, // Success, but no block: we don't have transactions and aren't prioritizing finalization Ok(Ok(None)) => Ok(vec![]), // Error building block, try to respond with a cached one as last-ditch attempt @@ -495,7 +500,7 @@ where } else { Err(e) } - } + }, } } diff --git a/hotshot-builder-core-refactored/src/testing/basic.rs b/hotshot-builder-core-refactored/src/testing/basic.rs index f353774c0a..24ace84754 100644 --- a/hotshot-builder-core-refactored/src/testing/basic.rs +++ b/hotshot-builder-core-refactored/src/testing/basic.rs @@ -1,29 +1,37 @@ +use std::{sync::Arc, time::Duration}; + use async_broadcast::broadcast; use hotshot::types::{EventType, SignatureKey}; - use hotshot_builder_api::v0_1::data_source::BuilderDataSource; -use hotshot_example_types::block_types::{TestBlockHeader, TestMetadata, TestTransaction}; -use hotshot_example_types::node_types::{TestTypes, TestVersions}; -use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; -use hotshot_types::data::VidCommitment; -use hotshot_types::data::{Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}; -use hotshot_types::event::LeafInfo; -use hotshot_types::simple_certificate::QuorumCertificate2; -use hotshot_types::traits::block_contents::BlockHeader; -use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; -use hotshot_types::utils::BuilderCommitment; -use marketplace_builder_shared::error::Error; -use marketplace_builder_shared::testing::consensus::SimulatedChainState; -use marketplace_builder_shared::testing::constants::{ - TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE, +use hotshot_example_types::{ + block_types::{TestBlockHeader, TestMetadata, TestTransaction}, + node_types::{TestTypes, TestVersions}, + state_types::{TestInstanceState, TestValidatedState}, +}; +use hotshot_types::{ + data::{Leaf2, QuorumProposal2, QuorumProposalWrapper, VidCommitment, ViewNumber}, + event::LeafInfo, + simple_certificate::QuorumCertificate2, + traits::{ + block_contents::BlockHeader, + node_implementation::{ConsensusTime, NodeType}, + }, + utils::BuilderCommitment, +}; +use marketplace_builder_shared::{ + error::Error, + testing::{ + consensus::SimulatedChainState, + constants::{TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE}, + }, }; use tokio::time::sleep; use tracing_test::traced_test; -use crate::service::{BuilderConfig, GlobalState, ProxyGlobalState}; -use crate::testing::{assert_eq_generic_err, sign, TestServiceWrapper, MOCK_LEADER_KEYS}; -use std::sync::Arc; -use std::time::Duration; +use crate::{ + service::{BuilderConfig, GlobalState, ProxyGlobalState}, + testing::{assert_eq_generic_err, sign, TestServiceWrapper, MOCK_LEADER_KEYS}, +}; /// This test simulates consensus performing as expected and builder processing a number /// of transactions diff --git a/hotshot-builder-core-refactored/src/testing/block_size.rs b/hotshot-builder-core-refactored/src/testing/block_size.rs index 7ba6c672c3..85620896fe 100644 --- a/hotshot-builder-core-refactored/src/testing/block_size.rs +++ b/hotshot-builder-core-refactored/src/testing/block_size.rs @@ -1,22 +1,27 @@ +use std::{ + sync::{atomic::Ordering, Arc}, + time::Duration, +}; + use async_broadcast::broadcast; use committable::Committable; use hotshot_builder_api::v0_1::builder::TransactionStatus; -use hotshot_example_types::block_types::TestTransaction; -use hotshot_example_types::state_types::TestInstanceState; -use hotshot_types::data::VidCommitment; -use hotshot_types::data::ViewNumber; -use hotshot_types::traits::node_implementation::ConsensusTime; -use marketplace_builder_shared::block::{BlockId, BuilderStateId}; -use marketplace_builder_shared::testing::consensus::SimulatedChainState; -use marketplace_builder_shared::testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION; +use hotshot_example_types::{block_types::TestTransaction, state_types::TestInstanceState}; +use hotshot_types::{ + data::{VidCommitment, ViewNumber}, + traits::node_implementation::ConsensusTime, +}; +use marketplace_builder_shared::{ + block::{BlockId, BuilderStateId}, + testing::{consensus::SimulatedChainState, constants::TEST_NUM_NODES_IN_VID_COMPUTATION}, +}; use tracing_test::traced_test; -use crate::block_size_limits::BlockSizeLimits; -use crate::service::{BuilderConfig, GlobalState}; -use crate::testing::TestServiceWrapper; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::time::Duration; +use crate::{ + block_size_limits::BlockSizeLimits, + service::{BuilderConfig, GlobalState}, + testing::TestServiceWrapper, +}; /// This tests simulates size limits being decreased lower than our capacity /// and then checks that size limits return to protocol maximum over time diff --git a/hotshot-builder-core-refactored/src/testing/finalization.rs b/hotshot-builder-core-refactored/src/testing/finalization.rs index 13b1d21678..40fd3833fb 100644 --- a/hotshot-builder-core-refactored/src/testing/finalization.rs +++ b/hotshot-builder-core-refactored/src/testing/finalization.rs @@ -1,17 +1,18 @@ +use std::sync::Arc; + use async_broadcast::broadcast; +use hotshot_example_types::{block_types::TestTransaction, state_types::TestInstanceState}; +use marketplace_builder_shared::testing::{ + consensus::SimulatedChainState, + constants::{TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE}, +}; use tracing_test::traced_test; -use hotshot_example_types::block_types::TestTransaction; -use hotshot_example_types::state_types::TestInstanceState; -use marketplace_builder_shared::testing::consensus::SimulatedChainState; -use marketplace_builder_shared::testing::constants::{ - TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE, +use crate::{ + service::{BuilderConfig, GlobalState, ALLOW_EMPTY_BLOCK_PERIOD}, + testing::TestServiceWrapper, }; -use crate::service::{BuilderConfig, GlobalState, ALLOW_EMPTY_BLOCK_PERIOD}; -use crate::testing::TestServiceWrapper; -use std::sync::Arc; - // How many times consensus will re-try getting available blocks const NUM_RETRIES: usize = 5; diff --git a/hotshot-builder-core-refactored/src/testing/integration.rs b/hotshot-builder-core-refactored/src/testing/integration.rs index f8df6e0a6a..41c6a84270 100644 --- a/hotshot-builder-core-refactored/src/testing/integration.rs +++ b/hotshot-builder-core-refactored/src/testing/integration.rs @@ -118,21 +118,20 @@ where mod tests { use std::time::Duration; - use crate::testing::integration::LegacyBuilderImpl; - use marketplace_builder_shared::testing::{ - generation::{self, TransactionGenerationConfig}, - run_test, - validation::BuilderValidationConfig, - }; - - use hotshot_example_types::node_types::TestVersions; - use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_macros::cross_tests; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::TestDescription, }; + use marketplace_builder_shared::testing::{ + generation::{self, TransactionGenerationConfig}, + run_test, + validation::BuilderValidationConfig, + }; + + use crate::testing::integration::LegacyBuilderImpl; #[tokio::test(flavor = "multi_thread")] #[tracing::instrument] diff --git a/hotshot-builder-core-refactored/src/testing/mod.rs b/hotshot-builder-core-refactored/src/testing/mod.rs index 08207ed994..e3b8c9ecbb 100644 --- a/hotshot-builder-core-refactored/src/testing/mod.rs +++ b/hotshot-builder-core-refactored/src/testing/mod.rs @@ -2,26 +2,30 @@ #![allow(clippy::declare_interior_mutable_const)] #![allow(clippy::borrow_interior_mutable_const)] -use std::cell::LazyCell; -use std::sync::Arc; -use std::time::Duration; +use std::{cell::LazyCell, sync::Arc, time::Duration}; use async_broadcast::Sender; use committable::Commitment; -use hotshot::rand::{thread_rng, Rng}; -use hotshot::types::{BLSPubKey, Event, EventType, SignatureKey}; -use hotshot_builder_api::v0_1::block_info::AvailableBlockHeaderInputV1; -use hotshot_builder_api::v0_1::builder::BuildError; -use hotshot_builder_api::v0_1::data_source::AcceptsTxnSubmits; -use hotshot_builder_api::v0_1::{block_info::AvailableBlockInfo, data_source::BuilderDataSource}; -use hotshot_example_types::block_types::TestTransaction; -use hotshot_example_types::node_types::TestTypes; +use hotshot::{ + rand::{thread_rng, Rng}, + types::{BLSPubKey, Event, EventType, SignatureKey}, +}; +use hotshot_builder_api::v0_1::{ + block_info::{AvailableBlockHeaderInputV1, AvailableBlockInfo}, + builder::BuildError, + data_source::{AcceptsTxnSubmits, BuilderDataSource}, +}; +use hotshot_example_types::{block_types::TestTransaction, node_types::TestTypes}; use hotshot_task_impls::builder::v0_1::BuilderClient; -use hotshot_types::data::ViewNumber; -use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; -use marketplace_builder_shared::block::{BlockId, BuilderStateId}; -use marketplace_builder_shared::error::Error; -use marketplace_builder_shared::utils::BuilderKeys; +use hotshot_types::{ + data::ViewNumber, + traits::node_implementation::{ConsensusTime, NodeType}, +}; +use marketplace_builder_shared::{ + block::{BlockId, BuilderStateId}, + error::Error, + utils::BuilderKeys, +}; use tokio::spawn; use url::Url; use vbs::version::StaticVersion; diff --git a/hotshot-builder-core/src/builder_state.rs b/hotshot-builder-core/src/builder_state.rs index 558d0d8767..0f156370e0 100644 --- a/hotshot-builder-core/src/builder_state.rs +++ b/hotshot-builder-core/src/builder_state.rs @@ -1,3 +1,17 @@ +use core::panic; +use std::{ + cmp::PartialEq, + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + fmt::Debug, + marker::PhantomData, + sync::Arc, + time::{Duration, Instant}, +}; + +use async_broadcast::{broadcast, Receiver as BroadcastReceiver, Sender as BroadcastSender}; +use async_lock::RwLock; +use committable::{Commitment, Committable}; +use futures::StreamExt; use hotshot_types::{ data::{DaProposal2, Leaf2, QuorumProposalWrapper}, message::Proposal, @@ -9,29 +23,13 @@ use hotshot_types::{ utils::BuilderCommitment, }; use marketplace_builder_shared::block::{BlockId, BuilderStateId, ParentBlockReferences}; - -use committable::{Commitment, Committable}; - -use crate::service::{GlobalState, ReceivedTransaction}; -use async_broadcast::broadcast; -use async_broadcast::Receiver as BroadcastReceiver; -use async_broadcast::Sender as BroadcastSender; -use async_lock::RwLock; -use core::panic; -use futures::StreamExt; - use tokio::{ spawn, sync::{mpsc::UnboundedSender, oneshot}, time::sleep, }; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::fmt::Debug; -use std::sync::Arc; -use std::time::Instant; -use std::{cmp::PartialEq, marker::PhantomData}; -use std::{collections::hash_map::Entry, time::Duration}; +use crate::service::{GlobalState, ReceivedTransaction}; pub type TxTimeStamp = u128; @@ -295,7 +293,7 @@ async fn best_builder_states_to_extend( Some(parent_block_references) => { parent_block_references.leaf_commit == justify_qc.data.leaf_commit && parent_block_references.view_number == justify_qc.view_number - } + }, }, ) .map(|(builder_state_id, _)| builder_state_id.clone()) @@ -1102,15 +1100,15 @@ impl BuilderState { } self.txns_in_queue.insert(tx.commit); self.tx_queue.push_back(tx); - } + }, Err(async_broadcast::TryRecvError::Empty) | Err(async_broadcast::TryRecvError::Closed) => { break; - } + }, Err(async_broadcast::TryRecvError::Overflowed(lost)) => { tracing::warn!("Missed {lost} transactions due to backlog"); continue; - } + }, } } } @@ -1122,20 +1120,19 @@ mod test { use async_broadcast::broadcast; use committable::RawCommitmentBuilder; - use hotshot_example_types::block_types::TestTransaction; - use hotshot_example_types::node_types::TestTypes; - use hotshot_example_types::node_types::TestVersions; - use hotshot_types::data::Leaf2; - use hotshot_types::data::QuorumProposalWrapper; - use hotshot_types::data::ViewNumber; - use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; - use hotshot_types::utils::BuilderCommitment; + use hotshot_example_types::{ + block_types::TestTransaction, + node_types::{TestTypes, TestVersions}, + }; + use hotshot_types::{ + data::{Leaf2, QuorumProposalWrapper, ViewNumber}, + traits::node_implementation::{ConsensusTime, NodeType}, + utils::BuilderCommitment, + }; use marketplace_builder_shared::testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION; use tracing_subscriber::EnvFilter; - use super::DAProposalInfo; - use super::MessageType; - use super::ParentBlockReferences; + use super::{DAProposalInfo, MessageType, ParentBlockReferences}; use crate::testing::{calc_builder_commitment, calc_proposal_msg, create_builder_state}; /// This test the function `process_da_proposal`. diff --git a/hotshot-builder-core/src/service.rs b/hotshot-builder-core/src/service.rs index a45c4039a0..06176a217b 100644 --- a/hotshot-builder-core/src/service.rs +++ b/hotshot-builder-core/src/service.rs @@ -1,3 +1,17 @@ +use std::{ + collections::HashMap, + fmt::Display, + num::NonZeroUsize, + sync::Arc, + time::{Duration, Instant}, +}; + +pub use async_broadcast::{broadcast, RecvError, TryRecvError}; +use async_broadcast::{Sender as BroadcastSender, TrySendError}; +use async_lock::RwLock; +use async_trait::async_trait; +use committable::{Commitment, Committable}; +use futures::{future::BoxFuture, stream::StreamExt, Stream}; use hotshot::types::Event; use hotshot_builder_api::{ v0_1::{ @@ -8,8 +22,7 @@ use hotshot_builder_api::{ v0_2::builder::TransactionStatus, }; use hotshot_types::{ - data::VidCommitment, - data::{DaProposal2, Leaf2, QuorumProposalWrapper}, + data::{DaProposal2, Leaf2, QuorumProposalWrapper, VidCommitment}, event::EventType, message::Proposal, traits::{ @@ -20,33 +33,20 @@ use hotshot_types::{ utils::BuilderCommitment, }; use lru::LruCache; -use vbs::version::StaticVersionType; - -use crate::builder_state::{ - BuildBlockInfo, DaProposalMessage, DecideMessage, QuorumProposalMessage, TransactionSource, - TriggerStatus, -}; -use crate::builder_state::{MessageType, RequestMessage, ResponseMessage}; -pub use async_broadcast::{broadcast, RecvError, TryRecvError}; -use async_broadcast::{Sender as BroadcastSender, TrySendError}; -use async_lock::RwLock; -use async_trait::async_trait; -use committable::{Commitment, Committable}; -use futures::stream::StreamExt; -use futures::{future::BoxFuture, Stream}; use marketplace_builder_shared::block::{BlockId, BuilderStateId, ParentBlockReferences}; use sha2::{Digest, Sha256}; -use std::collections::HashMap; -use std::num::NonZeroUsize; -use std::sync::Arc; -use std::time::Duration; -use std::{fmt::Display, time::Instant}; use tagged_base64::TaggedBase64; use tide_disco::method::ReadState; use tokio::{ sync::{mpsc::unbounded_channel, oneshot}, time::{sleep, timeout}, }; +use vbs::version::StaticVersionType; + +use crate::builder_state::{ + BuildBlockInfo, DaProposalMessage, DecideMessage, MessageType, QuorumProposalMessage, + RequestMessage, ResponseMessage, TransactionSource, TriggerStatus, +}; // It holds all the necessary information for a block #[derive(Debug)] @@ -409,19 +409,19 @@ impl GlobalState { match old_status { Some(TransactionStatus::Rejected { reason }) => { tracing::debug!("Changing the status of a rejected transaction to status {:?}! The reason it is previously rejected is {:?}", txn_status, reason); - } + }, Some(TransactionStatus::Sequenced { leaf }) => { let e = format!("Changing the status of a sequenced transaction to status {:?} is not allowed! The transaction is sequenced in leaf {:?}", txn_status, leaf); tracing::error!(e); return Err(BuildError::Error(e)); - } + }, _ => { tracing::debug!( "change status of transaction {txn_hash} from {:?} to {:?}", old_status, txn_status ); - } + }, } } else { tracing::debug!( @@ -540,23 +540,23 @@ impl From> for BuildError { match error { AvailableBlocksError::SignatureValidationFailed => { BuildError::Error("Signature validation failed in get_available_blocks".to_string()) - } + }, AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided => { BuildError::Error( "Request for available blocks for a view that has already been decided." .to_string(), ) - } + }, AvailableBlocksError::SigningBlockFailed(e) => { BuildError::Error(format!("Signing over block info failed: {:?}", e)) - } + }, AvailableBlocksError::GetChannelForMatchingBuilderError(e) => e.into(), AvailableBlocksError::NoBlocksAvailable => { BuildError::Error("No blocks available".to_string()) - } + }, AvailableBlocksError::ChannelUnexpectedlyClosed => { BuildError::Error("Channel unexpectedly closed".to_string()) - } + }, } } } @@ -580,13 +580,13 @@ impl From> for BuildError { match error { ClaimBlockError::SignatureValidationFailed => { BuildError::Error("Signature validation failed in claim block".to_string()) - } + }, ClaimBlockError::SigningCommitmentFailed(e) => { BuildError::Error(format!("Signing over builder commitment failed: {:?}", e)) - } + }, ClaimBlockError::BlockDataNotFound => { BuildError::Error("Block data not found".to_string()) - } + }, } } } @@ -608,10 +608,10 @@ impl From> for BuildError { ), ClaimBlockHeaderInputError::BlockHeaderNotFound => { BuildError::Error("Block header not found".to_string()) - } + }, ClaimBlockHeaderInputError::FailedToSignFeeInfo(e) => { BuildError::Error(format!("Failed to sign fee info: {:?}", e)) - } + }, } } } @@ -743,7 +743,7 @@ impl ProxyGlobalState { break Err(AvailableBlocksError::NoBlocksAvailable); } continue; - } + }, Ok(recv_attempt) => { if recv_attempt.is_none() { tracing::error!( @@ -752,7 +752,7 @@ impl ProxyGlobalState { } break recv_attempt .ok_or_else(|| AvailableBlocksError::ChannelUnexpectedlyClosed); - } + }, } }; @@ -783,13 +783,13 @@ impl ProxyGlobalState { response.builder_hash ); Ok(vec![initial_block_info]) - } + }, // We failed to get available blocks Err(e) => { tracing::debug!("Failed to get available blocks for parent {state_id}",); Err(e) - } + }, } } @@ -1111,7 +1111,7 @@ pub async fn run_non_permissioned_standalone_builder_service< match event.event { EventType::Error { error } => { tracing::error!("Error event in HotShot: {:?}", error); - } + }, // tx event EventType::Transactions { transactions } => { let max_block_size = { @@ -1151,7 +1151,7 @@ pub async fn run_non_permissioned_standalone_builder_service< .await?; } } - } + }, // decide event EventType::Decide { block_size: _, @@ -1160,19 +1160,19 @@ pub async fn run_non_permissioned_standalone_builder_service< } => { let latest_decide_view_num = leaf_chain[0].leaf.view_number(); handle_decide_event(&decide_sender, latest_decide_view_num).await; - } + }, // DA proposal event EventType::DaProposal { proposal, sender } => { handle_da_event(&da_sender, Arc::new(proposal), sender).await; - } + }, // QC proposal event EventType::QuorumProposal { proposal, sender } => { // get the leader for current view handle_quorum_event(&quorum_sender, Arc::new(proposal), sender).await; - } + }, _ => { tracing::debug!("Unhandled event from Builder"); - } + }, } } } @@ -1533,32 +1533,33 @@ mod test { use std::{sync::Arc, time::Duration}; use async_lock::RwLock; - use committable::Commitment; - use committable::Committable; + use committable::{Commitment, Committable}; use futures::StreamExt; use hotshot::{ traits::BlockPayload, types::{BLSPubKey, SignatureKey}, }; - use hotshot_builder_api::v0_1::data_source::AcceptsTxnSubmits; - use hotshot_builder_api::v0_2::block_info::AvailableBlockInfo; - use hotshot_builder_api::v0_2::builder::TransactionStatus; + use hotshot_builder_api::{ + v0_1::data_source::AcceptsTxnSubmits, + v0_2::{block_info::AvailableBlockInfo, builder::TransactionStatus}, + }; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, node_types::{TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; - use hotshot_types::data::DaProposal2; - use hotshot_types::data::EpochNumber; - use hotshot_types::data::Leaf2; - use hotshot_types::data::{QuorumProposal2, QuorumProposalWrapper}; - use hotshot_types::simple_certificate::QuorumCertificate2; - use hotshot_types::traits::block_contents::Transaction; - use hotshot_types::traits::node_implementation::Versions; use hotshot_types::{ - data::{vid_commitment, Leaf, ViewNumber}, + data::{ + vid_commitment, DaProposal2, EpochNumber, Leaf, Leaf2, QuorumProposal2, + QuorumProposalWrapper, ViewNumber, + }, message::Proposal, - traits::{node_implementation::ConsensusTime, signature_key::BuilderSignatureKey}, + simple_certificate::QuorumCertificate2, + traits::{ + block_contents::Transaction, + node_implementation::{ConsensusTime, Versions}, + signature_key::BuilderSignatureKey, + }, utils::BuilderCommitment, }; use marketplace_builder_shared::{ @@ -1575,6 +1576,11 @@ mod test { }; use vbs::version::StaticVersionType; + use super::{ + handle_da_event_implementation, handle_quorum_event_implementation, AvailableBlocksError, + BlockInfo, ClaimBlockError, ClaimBlockHeaderInputError, GlobalState, HandleDaEventError, + HandleQuorumEventError, HandleReceivedTxns, ProxyGlobalState, + }; use crate::{ builder_state::{ BuildBlockInfo, MessageType, RequestMessage, ResponseMessage, TransactionSource, @@ -1587,12 +1593,6 @@ mod test { }, }; - use super::{ - handle_da_event_implementation, handle_quorum_event_implementation, AvailableBlocksError, - BlockInfo, ClaimBlockError, ClaimBlockHeaderInputError, GlobalState, HandleDaEventError, - HandleQuorumEventError, HandleReceivedTxns, ProxyGlobalState, - }; - /// A const number on `max_tx_len` to be used consistently spanning all the tests /// It is set to 1 as current estimation on `TestTransaction` is 1 const TEST_MAX_TX_LEN: u64 = 1; @@ -2141,10 +2141,10 @@ mod test { match vid_trigger_receiver.await { Ok(TriggerStatus::Start) => { // This is expected - } + }, _ => { panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); - } + }, } } @@ -2366,10 +2366,10 @@ mod test { match vid_trigger_receiver_2.await { Ok(TriggerStatus::Start) => { // This is expected - } + }, _ => { panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); - } + }, } assert!( @@ -2960,13 +2960,13 @@ mod test { Err(AvailableBlocksError::NoBlocksAvailable) => { // This is what we expect. // This message *should* indicate that no blocks were available. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3032,13 +3032,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3103,13 +3103,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3172,13 +3172,13 @@ mod test { Err(AvailableBlocksError::GetChannelForMatchingBuilderError(_)) => { // This is what we expect. // This message *should* indicate that the response channel was closed. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3285,17 +3285,17 @@ mod test { let response_channel = match response_receiver.next().await { None => { panic!("Expected a request for available blocks, but didn't get one"); - } + }, Some(MessageType::RequestMessage(req_msg)) => { assert_eq!(req_msg.state_id, expected_builder_state_id); req_msg.response_channel - } + }, Some(message) => { panic!( "Expected a request for available blocks, but got a different message: {:?}", message ); - } + }, }; // We want to send a ResponseMessage to the channel @@ -3316,7 +3316,7 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(result) => { assert_eq!( result, @@ -3336,7 +3336,7 @@ mod test { }], "get_available_blocks response matches expectation" ); - } + }, } } @@ -3431,17 +3431,17 @@ mod test { let response_channel = match response_receiver.next().await { None => { panic!("Expected a request for available blocks, but didn't get one"); - } + }, Some(MessageType::RequestMessage(req_msg)) => { assert_eq!(req_msg.state_id, expected_builder_state_id); req_msg.response_channel - } + }, Some(message) => { panic!( "Expected a request for available blocks, but got a different message: {:?}", message ); - } + }, }; // We want to send a ResponseMessage to the channel @@ -3462,7 +3462,7 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(result) => { assert_eq!( result, @@ -3482,7 +3482,7 @@ mod test { }], "get_available_blocks response matches expectation" ); - } + }, } } @@ -3539,13 +3539,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3600,13 +3600,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3687,10 +3687,10 @@ mod test { match vid_trigger_receiver.await { Ok(TriggerStatus::Start) => { // This is what we expect. - } + }, _ => { panic!("Expected a TriggerStatus::Start event"); - } + }, } let result = claim_block_join_handle.await; @@ -3698,10 +3698,10 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { // This is expected - } + }, } } @@ -3759,13 +3759,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3820,13 +3820,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3887,10 +3887,10 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { // This is expected. - } + }, } } @@ -3943,13 +3943,13 @@ mod test { match result { Err(HandleDaEventError::SignatureValidationFailed) => { // This is expected. - } + }, Ok(_) => { panic!("expected an error, but received a successful attempt instead") - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4001,13 +4001,13 @@ mod test { match result { Err(HandleDaEventError::BroadcastFailed(_)) => { // This error is expected - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4050,20 +4050,20 @@ mod test { match result { Ok(_) => { // This is expected. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } let mut da_channel_receiver = da_channel_receiver; match da_channel_receiver.next().await { Some(MessageType::DaProposalMessage(da_proposal_message)) => { assert_eq!(da_proposal_message.proposal, signed_da_proposal); - } + }, _ => { panic!("Expected a DaProposalMessage, but got something else"); - } + }, } } @@ -4134,13 +4134,13 @@ mod test { match result { Err(HandleQuorumEventError::SignatureValidationFailed) => { // This is expected. - } + }, Ok(_) => { panic!("expected an error, but received a successful attempt instead"); - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4209,13 +4209,13 @@ mod test { match result { Err(HandleQuorumEventError::BroadcastFailed(_)) => { // This is expected. - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4275,20 +4275,20 @@ mod test { match result { Ok(_) => { // This is expected. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } let mut quorum_channel_receiver = quorum_channel_receiver; match quorum_channel_receiver.next().await { Some(MessageType::QuorumProposalMessage(da_proposal_message)) => { assert_eq!(da_proposal_message.proposal, signed_quorum_proposal); - } + }, _ => { panic!("Expected a QuorumProposalMessage, but got something else"); - } + }, } } @@ -4323,16 +4323,16 @@ mod test { match handle_received_txns_iter.next() { Some(Err(HandleReceivedTxnsError::TooManyTransactions)) => { // This is expected, - } + }, Some(Err(err)) => { panic!("Unexpected error: {:?}", err); - } + }, Some(Ok(_)) => { panic!("Expected an error, but got a result"); - } + }, None => { panic!("Expected an error, but got a result"); - } + }, } } @@ -4376,16 +4376,16 @@ mod test { // This is expected, assert!(estimated_length >= 256); assert_eq!(max_txn_len, TEST_MAX_TX_LEN); - } + }, Some(Err(err)) => { panic!("Unexpected error: {:?}", err); - } + }, Some(Ok(_)) => { panic!("Expected an error, but got a result"); - } + }, None => { panic!("Expected an error, but got a result"); - } + }, } } @@ -4431,21 +4431,21 @@ mod test { match err { async_broadcast::TrySendError::Closed(_) => { // This is expected. - } + }, _ => { panic!("Unexpected error: {:?}", err); - } + }, } - } + }, Some(Err(err)) => { panic!("Unexpected error: {:?}", err); - } + }, Some(Ok(_)) => { panic!("Expected an error, but got a result"); - } + }, None => { panic!("Expected an error, but got a result"); - } + }, } } } @@ -4473,10 +4473,10 @@ mod test { match iteration { Ok(_) => { // This is expected. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4485,10 +4485,10 @@ mod test { match tx_receiver.next().await { Some(received_txn) => { assert_eq!(received_txn.tx, tx); - } + }, _ => { panic!("Expected a TransactionMessage, but got something else"); - } + }, } } } @@ -4553,10 +4553,10 @@ mod test { match proxy_global_state.txn_status(tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Pending); - } + }, e => { panic!("transaction status should be Pending instead of {:?}", e); - } + }, } } @@ -4594,10 +4594,10 @@ mod test { match proxy_global_state.txn_status(tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Pending); - } + }, e => { panic!("transaction status should be Pending instead of {:?}", e); - } + }, } } @@ -4624,13 +4624,13 @@ mod test { } else { assert_eq!(txn_status, TransactionStatus::Pending); } - } + }, e => { panic!( "transaction status should be a valid status instead of {:?}", e ); - } + }, } } @@ -4644,22 +4644,22 @@ mod test { { Err(err) => { panic!("Expected a result, but got a error {:?}", err); - } + }, _ => { // This is expected - } + }, } match write_guard.txn_status(tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Pending); - } + }, e => { panic!( "transaction status should be a valid status instead of {:?}", e ); - } + }, } } } @@ -4682,10 +4682,10 @@ mod test { { Err(_err) => { // This is expected - } + }, _ => { panic!("Expected an error, but got a result"); - } + }, } } @@ -4695,10 +4695,10 @@ mod test { match proxy_global_state.txn_status(unknown_tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Unknown); - } + }, e => { panic!("transaction status should be Unknown instead of {:?}", e); - } + }, } } } diff --git a/hotshot-builder-core/src/testing/basic_test.rs b/hotshot-builder-core/src/testing/basic_test.rs index 867706c213..bd6cbb5635 100644 --- a/hotshot-builder-core/src/testing/basic_test.rs +++ b/hotshot-builder-core/src/testing/basic_test.rs @@ -1,4 +1,3 @@ -pub use crate::builder_state::{BuilderState, MessageType}; pub use async_broadcast::broadcast; pub use hotshot::traits::election::static_committee::StaticCommittee; pub use hotshot_types::{ @@ -12,51 +11,49 @@ pub use hotshot_types::{ }, }; use vbs::version::StaticVersionType; + +pub use crate::builder_state::{BuilderState, MessageType}; /// The following tests are performed: #[cfg(test)] mod tests { - use super::*; - use std::collections::VecDeque; - use std::{hash::Hash, marker::PhantomData}; + use std::{collections::VecDeque, hash::Hash, marker::PhantomData, sync::Arc, time::Duration}; + use async_lock::RwLock; + use committable::{Commitment, CommitmentBoundsArkless, Committable}; use hotshot::types::SignatureKey; use hotshot_builder_api::v0_2::data_source::BuilderDataSource; - use hotshot_example_types::auction_results_provider_types::TestAuctionResult; - use hotshot_example_types::node_types::TestVersions; - use hotshot_types::data::{DaProposal2, Leaf2, QuorumProposal2, QuorumProposalWrapper}; - use hotshot_types::simple_vote::QuorumData2; - use hotshot_types::traits::node_implementation::Versions; - use hotshot_types::{ - data::vid_commitment, signature_key::BuilderKey, traits::block_contents::BlockHeader, - traits::EncodeBytes, utils::BuilderCommitment, - }; - use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResult, block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, + node_types::TestVersions, state_types::{TestInstanceState, TestValidatedState}, }; - use marketplace_builder_shared::block::ParentBlockReferences; - use marketplace_builder_shared::testing::constants::{ - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_MAX_TX_NUM, TEST_NUM_NODES_IN_VID_COMPUTATION, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - }; - use tokio::time::error::Elapsed; - use tokio::time::timeout; - use tracing_subscriber::EnvFilter; - - use crate::builder_state::{ - DaProposalMessage, DecideMessage, QuorumProposalMessage, TransactionSource, + use hotshot_types::{ + data::{vid_commitment, DaProposal2, Leaf2, QuorumProposal2, QuorumProposalWrapper}, + signature_key::BuilderKey, + simple_vote::QuorumData2, + traits::{block_contents::BlockHeader, node_implementation::Versions, EncodeBytes}, + utils::BuilderCommitment, }; - use crate::service::{ - handle_received_txns, GlobalState, ProxyGlobalState, ReceivedTransaction, + use marketplace_builder_shared::{ + block::ParentBlockReferences, + testing::constants::{ + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_MAX_TX_NUM, + TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE, + }, }; - use async_lock::RwLock; - use committable::{Commitment, CommitmentBoundsArkless, Committable}; + use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; - use std::sync::Arc; - use std::time::Duration; + use tokio::time::{error::Elapsed, timeout}; + use tracing_subscriber::EnvFilter; - use serde::{Deserialize, Serialize}; + use super::*; + use crate::{ + builder_state::{ + DaProposalMessage, DecideMessage, QuorumProposalMessage, TransactionSource, + }, + service::{handle_received_txns, GlobalState, ProxyGlobalState, ReceivedTransaction}, + }; /// This test simulates multiple builder states receiving messages from the channels and processing them #[tokio::test] //#[instrument] @@ -461,7 +458,7 @@ mod tests { ) .unwrap(); current_leaf - } + }, }; DecideMessage:: { diff --git a/hotshot-builder-core/src/testing/finalization_test.rs b/hotshot-builder-core/src/testing/finalization_test.rs index a671cbbdf8..52ac28a84c 100644 --- a/hotshot-builder-core/src/testing/finalization_test.rs +++ b/hotshot-builder-core/src/testing/finalization_test.rs @@ -1,10 +1,5 @@ use std::{sync::Arc, time::Duration}; -use super::basic_test::{BuilderState, MessageType}; -use crate::{ - builder_state::{DaProposalMessage, QuorumProposalMessage, ALLOW_EMPTY_BLOCK_PERIOD}, - service::{GlobalState, ProxyGlobalState, ReceivedTransaction}, -}; use async_broadcast::{broadcast, Sender}; use async_lock::RwLock; use committable::Commitment; @@ -12,19 +7,20 @@ use hotshot::{ traits::BlockPayload, types::{BLSPubKey, SignatureKey}, }; -use hotshot_builder_api::{ - v0_1::{block_info::AvailableBlockInfo, data_source::BuilderDataSource}, - v0_1::{builder::BuildError, data_source::AcceptsTxnSubmits}, +use hotshot_builder_api::v0_1::{ + block_info::AvailableBlockInfo, + builder::BuildError, + data_source::{AcceptsTxnSubmits, BuilderDataSource}, }; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, node_types::{TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; -use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::{ data::{vid_commitment, DaProposal2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}, message::Proposal, + simple_certificate::QuorumCertificate2, traits::{ block_contents::BlockHeader, node_implementation::{ConsensusTime, Versions}, @@ -32,19 +28,23 @@ use hotshot_types::{ }, utils::BuilderCommitment, }; -use marketplace_builder_shared::testing::constants::{ - TEST_CHANNEL_BUFFER_SIZE, TEST_MAX_TX_NUM, TEST_NUM_CONSENSUS_RETRIES, - TEST_NUM_NODES_IN_VID_COMPUTATION, -}; -use marketplace_builder_shared::{ - block::BuilderStateId, testing::constants::TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, -}; use marketplace_builder_shared::{ - block::ParentBlockReferences, testing::constants::TEST_PROTOCOL_MAX_BLOCK_SIZE, + block::{BuilderStateId, ParentBlockReferences}, + testing::constants::{ + TEST_CHANNEL_BUFFER_SIZE, TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_MAX_TX_NUM, + TEST_NUM_CONSENSUS_RETRIES, TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + }, }; use sha2::{Digest, Sha256}; use vbs::version::StaticVersionType; +use super::basic_test::{BuilderState, MessageType}; +use crate::{ + builder_state::{DaProposalMessage, QuorumProposalMessage, ALLOW_EMPTY_BLOCK_PERIOD}, + service::{GlobalState, ProxyGlobalState, ReceivedTransaction}, +}; + type TestSetup = ( ProxyGlobalState, async_broadcast::Sender>, diff --git a/hotshot-builder-core/src/testing/mod.rs b/hotshot-builder-core/src/testing/mod.rs index 279e3ac84c..9f16ee5b5a 100644 --- a/hotshot-builder-core/src/testing/mod.rs +++ b/hotshot-builder-core/src/testing/mod.rs @@ -1,17 +1,17 @@ -use std::{collections::VecDeque, marker::PhantomData}; +use std::{collections::VecDeque, marker::PhantomData, sync::Arc, time::Duration}; -use crate::{ - builder_state::{ - BuilderState, DAProposalInfo, DaProposalMessage, MessageType, QuorumProposalMessage, - }, - service::ReceivedTransaction, -}; -use async_broadcast::broadcast; -use async_broadcast::Sender as BroadcastSender; +use async_broadcast::{broadcast, Sender as BroadcastSender}; +use async_lock::RwLock; +use committable::{Commitment, CommitmentBoundsArkless, Committable}; use hotshot::{ traits::BlockPayload, types::{BLSPubKey, SignatureKey}, }; +use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, + node_types::{TestTypes, TestVersions}, + state_types::{TestInstanceState, TestValidatedState}, +}; use hotshot_types::{ data::{ vid_commitment, DaProposal2, Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber, @@ -25,26 +25,21 @@ use hotshot_types::{ }, utils::BuilderCommitment, }; -use vbs::version::StaticVersionType; - -use hotshot_example_types::{ - block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, - node_types::{TestTypes, TestVersions}, - state_types::{TestInstanceState, TestValidatedState}, -}; -use sha2::{Digest, Sha256}; - -use crate::service::GlobalState; -use async_lock::RwLock; -use committable::{Commitment, CommitmentBoundsArkless, Committable}; use marketplace_builder_shared::{ block::{BuilderStateId, ParentBlockReferences}, testing::constants::{ TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_MAX_TX_NUM, TEST_PROTOCOL_MAX_BLOCK_SIZE, }, }; -use std::sync::Arc; -use std::time::Duration; +use sha2::{Digest, Sha256}; +use vbs::version::StaticVersionType; + +use crate::{ + builder_state::{ + BuilderState, DAProposalInfo, DaProposalMessage, MessageType, QuorumProposalMessage, + }, + service::{GlobalState, ReceivedTransaction}, +}; mod basic_test; pub mod finalization_test; @@ -192,7 +187,7 @@ pub async fn calc_proposal_msg( &TestInstanceState::default(), ) .await - } + }, Some(prev_proposal) => { let prev_justify_qc = prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { @@ -208,7 +203,7 @@ pub async fn calc_proposal_msg( prev_justify_qc.signatures.clone(), PhantomData, ) - } + }, }; tracing::debug!("Iteration: {} justify_qc: {:?}", round, justify_qc); diff --git a/hotshot-events-service/src/api.rs b/hotshot-events-service/src/api.rs index 215d1f28e4..dd4579cef8 100644 --- a/hotshot-events-service/src/api.rs +++ b/hotshot-events-service/src/api.rs @@ -40,7 +40,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/hotshot-events-service/src/events.rs b/hotshot-events-service/src/events.rs index f3fc9cad5b..594ffd0a31 100644 --- a/hotshot-events-service/src/events.rs +++ b/hotshot-events-service/src/events.rs @@ -1,10 +1,11 @@ +use std::path::PathBuf; + use clap::Args; use derive_more::From; use futures::{FutureExt, StreamExt, TryFutureExt}; use hotshot_types::traits::node_implementation::NodeType; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::path::PathBuf; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, StatusCode}; use vbs::version::StaticVersionType; diff --git a/hotshot-events-service/src/events_source.rs b/hotshot-events-service/src/events_source.rs index 4f905bca63..22e6793384 100644 --- a/hotshot-events-service/src/events_source.rs +++ b/hotshot-events-service/src/events_source.rs @@ -99,7 +99,7 @@ impl EventFilterSet { EventType::Decide { .. } => filter.contains(&EventFilter::Decide), EventType::ReplicaViewTimeout { .. } => { filter.contains(&EventFilter::ReplicaViewTimeout) - } + }, EventType::ViewFinished { .. } => filter.contains(&EventFilter::ViewFinished), EventType::ViewTimeout { .. } => filter.contains(&EventFilter::ViewTimeout), EventType::Transactions { .. } => filter.contains(&EventFilter::Transactions), diff --git a/hotshot-example-types/src/block_types.rs b/hotshot-example-types/src/block_types.rs index 6bdcca4fed..c96e1c7ce3 100644 --- a/hotshot-example-types/src/block_types.rs +++ b/hotshot-example-types/src/block_types.rs @@ -13,8 +13,7 @@ use std::{ use async_trait::async_trait; use committable::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ - data::VidCommitment, - data::{BlockError, Leaf2}, + data::{BlockError, Leaf2, VidCommitment}, traits::{ block_contents::{BlockHeader, BuilderFee, EncodeBytes, TestableBlock, Transaction}, node_implementation::NodeType, diff --git a/hotshot-example-types/src/storage_types.rs b/hotshot-example-types/src/storage_types.rs index 9084eb463d..26156b53d5 100644 --- a/hotshot-example-types/src/storage_types.rs +++ b/hotshot-example-types/src/storage_types.rs @@ -12,7 +12,6 @@ use std::{ use anyhow::{bail, Result}; use async_lock::RwLock; use async_trait::async_trait; -use hotshot_types::drb::DrbResult; use hotshot_types::{ consensus::CommitmentMap, data::{ @@ -20,6 +19,7 @@ use hotshot_types::{ DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, VidCommitment, }, + drb::DrbResult, event::HotShotAction, message::{convert_proposal, Proposal}, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, diff --git a/hotshot-example-types/src/testable_delay.rs b/hotshot-example-types/src/testable_delay.rs index 07f460eaf3..ea16b4b3f3 100644 --- a/hotshot-example-types/src/testable_delay.rs +++ b/hotshot-example-types/src/testable_delay.rs @@ -85,16 +85,16 @@ pub trait TestableDelay { /// Add a delay from settings async fn handle_async_delay(settings: &DelaySettings) { match settings.delay_option { - DelayOptions::None => {} + DelayOptions::None => {}, DelayOptions::Fixed => { sleep(Duration::from_millis(settings.fixed_time_in_milliseconds)).await; - } + }, DelayOptions::Random => { let sleep_in_millis = rand::thread_rng().gen_range( settings.min_time_in_milliseconds..=settings.max_time_in_milliseconds, ); sleep(Duration::from_millis(sleep_in_millis)).await; - } + }, } } @@ -124,7 +124,7 @@ impl Iterator for SupportedTraitTypesForAsyncDelayIterator { _ => { assert_eq!(self.index, 3, "Need to ensure that newly added or removed `SupportedTraitTypesForAsyncDelay` enum is handled in iterator"); return None; - } + }, }; self.index += 1; supported_type diff --git a/hotshot-examples/infra/mod.rs b/hotshot-examples/infra/mod.rs index 875d849678..01e9d6223f 100755 --- a/hotshot-examples/infra/mod.rs +++ b/hotshot-examples/infra/mod.rs @@ -441,13 +441,13 @@ pub trait RunDa< match event_stream.next().await { None => { panic!("Error! Event stream completed before consensus ended."); - } + }, Some(Event { event, .. }) => { match event { EventType::Error { error } => { error!("Error in consensus: {:?}", error); // TODO what to do here - } + }, EventType::Decide { leaf_chain, qc: _, @@ -514,16 +514,16 @@ pub trait RunDa< warn!("Leaf chain is greater than 1 with len {}", leaf_chain.len()); } // when we make progress, submit new events - } + }, EventType::ReplicaViewTimeout { view_number } => { warn!("Timed out as a replicas in view {:?}", view_number); - } + }, EventType::ViewTimeout { view_number } => { warn!("Timed out in view {:?}", view_number); - } - _ => {} // mostly DA proposal + }, + _ => {}, // mostly DA proposal } - } + }, } } // Panic if we don't have the genesis epoch, there is no recovery from that @@ -1092,11 +1092,11 @@ where }) .collect(); bind_address = Url::parse(&format!("http://0.0.0.0:{port}")).unwrap(); - } + }, Some(ref addr) => { bind_address = Url::parse(&format!("http://{addr}")).expect("Valid URL"); advertise_urls = vec![bind_address.clone()]; - } + }, } match run_config.builder { @@ -1116,7 +1116,7 @@ where .await; Some(builder_task) - } + }, BuilderType::Simple => { let builder_task = >::start( @@ -1132,7 +1132,7 @@ where .await; Some(builder_task) - } + }, } } diff --git a/hotshot-fakeapi/src/fake_solver.rs b/hotshot-fakeapi/src/fake_solver.rs index b52418cc9b..f2b81175b5 100644 --- a/hotshot-fakeapi/src/fake_solver.rs +++ b/hotshot-fakeapi/src/fake_solver.rs @@ -91,11 +91,11 @@ impl FakeSolverState { status: tide_disco::StatusCode::INTERNAL_SERVER_ERROR, message: "Internal Server Error".to_string(), }); - } + }, FakeSolverFaultType::TimeoutFault => { // Sleep for the preconfigured 1 second timeout interval tokio::time::sleep(SOLVER_MAX_TIMEOUT_S).await; - } + }, } } diff --git a/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs index 566db12d4c..cf07181249 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs @@ -50,19 +50,19 @@ impl DHTBootstrapTask { Some(InputEvent::BootstrapFinished) => { tracing::debug!("Bootstrap finished"); self.in_progress = false; - } + }, Some(InputEvent::ShutdownBootstrap) => { tracing::info!("ShutdownBootstrap received, shutting down"); break; - } + }, Some(InputEvent::StartBootstrap) => { tracing::warn!("Trying to start bootstrap that's already in progress"); continue; - } + }, None => { tracing::debug!("Bootstrap channel closed, exiting loop"); break; - } + }, } } else if let Ok(maybe_event) = timeout(Duration::from_secs(120), self.rx.next()).await { @@ -70,18 +70,18 @@ impl DHTBootstrapTask { Some(InputEvent::StartBootstrap) => { tracing::debug!("Start bootstrap in bootstrap task"); self.bootstrap(); - } + }, Some(InputEvent::ShutdownBootstrap) => { tracing::debug!("ShutdownBootstrap received, shutting down"); break; - } + }, Some(InputEvent::BootstrapFinished) => { tracing::debug!("not in progress got bootstrap finished"); - } + }, None => { tracing::debug!("Bootstrap channel closed, exiting loop"); break; - } + }, } } else { tracing::debug!("Start bootstrap in bootstrap task after timeout"); diff --git a/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs index 7ef41e1192..950450aecc 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs @@ -274,31 +274,31 @@ impl DHTBehaviour { let num_entries = o.get_mut(); *num_entries += 1; *num_entries - } + }, std::collections::hash_map::Entry::Vacant(v) => { v.insert(1); 1 - } + }, } - } + }, GetRecordOk::FinishedWithNoAdditionalRecord { cache_candidates: _, } => { tracing::debug!("GetRecord Finished with No Additional Record"); last = true; 0 - } + }, }, Err(err) => { warn!("Error in Kademlia query: {:?}", err); 0 - } + }, }, None => { // We already finished the query (or it's been cancelled). Do nothing and exit the // function. return; - } + }, }; // if the query has completed and we need to retry @@ -398,7 +398,7 @@ impl DHTBehaviour { if query.notify.send(()).is_err() { warn!("Put DHT: client channel closed before put record request could be sent"); } - } + }, Err(e) => { query.progress = DHTProgress::NotStarted; query.backoff.start_next(false); @@ -409,7 +409,7 @@ impl DHTBehaviour { ); // push back onto the queue self.retry_put(query); - } + }, } } else { warn!("Put DHT: completed DHT query that is no longer tracked."); @@ -439,7 +439,7 @@ impl DHTBehaviour { if last { self.handle_put_query(record_results, id); } - } + }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::GetClosestPeers(r), id: query_id, @@ -454,13 +454,13 @@ impl DHTBehaviour { }; }; debug!("Successfully got closest peers for key {:?}", key); - } + }, Err(e) => { if let Some(chan) = self.in_progress_get_closest_peers.remove(&query_id) { let _: Result<_, _> = chan.send(()); }; warn!("Failed to get closest peers: {:?}", e); - } + }, }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::GetRecord(record_results), @@ -469,7 +469,7 @@ impl DHTBehaviour { .. } => { self.handle_get_query(store, record_results, id, last); - } + }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::Bootstrap(Ok(BootstrapOk { @@ -485,7 +485,7 @@ impl DHTBehaviour { debug!("Bootstrap in progress, {} nodes remaining", num_remaining); } return Some(NetworkEvent::IsBootstrapped); - } + }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::Bootstrap(Err(e)), .. @@ -495,16 +495,16 @@ impl DHTBehaviour { error!("Failed to bootstrap: {:?}", e); } self.finish_bootstrap(); - } + }, KademliaEvent::RoutablePeer { peer, address: _ } => { debug!("Found routable peer {:?}", peer); - } + }, KademliaEvent::PendingRoutablePeer { peer, address: _ } => { debug!("Found pending routable peer {:?}", peer); - } + }, KademliaEvent::UnroutablePeer { peer } => { debug!("Found unroutable peer {:?}", peer); - } + }, KademliaEvent::RoutingUpdated { peer: _, is_new_peer: _, @@ -513,13 +513,13 @@ impl DHTBehaviour { old_peer: _, } => { debug!("Routing table updated"); - } + }, e @ KademliaEvent::OutboundQueryProgressed { .. } => { debug!("Not handling dht event {:?}", e); - } + }, e => { debug!("New unhandled swarm event: {e:?}"); - } + }, } None } diff --git a/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs index cd927c2470..2c89cc741a 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs @@ -281,10 +281,10 @@ impl PersistentStore { .await .map_err(|_| anyhow::anyhow!("save operation timed out")) { - Ok(Ok(())) => {} + Ok(Ok(())) => {}, Ok(Err(error)) | Err(error) => { warn!("Failed to save DHT to persistent storage: {error}"); - } + }, }; // Reset the record delta @@ -324,10 +324,10 @@ impl PersistentStore { err ); } - } + }, Err(err) => { warn!("Failed to parse record from persistent storage: {:?}", err); - } + }, }; } diff --git a/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs b/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs index 72d378a587..dfd8e5ca4f 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs @@ -59,7 +59,7 @@ impl DMBehaviour { } => { error!("Inbound message failure from {:?}: {:?}", peer, error); None - } + }, Event::OutboundFailure { peer, request_id, @@ -83,7 +83,7 @@ impl DMBehaviour { } } None - } + }, Event::Message { message, peer, .. } => match message { Message::Request { request: msg, @@ -94,7 +94,7 @@ impl DMBehaviour { // receiver, not initiator. // don't track. If we are disconnected, sender will reinitiate Some(NetworkEvent::DirectRequest(msg, peer, channel)) - } + }, Message::Response { request_id, response: msg, @@ -107,12 +107,12 @@ impl DMBehaviour { warn!("Received response for unknown request id {:?}", request_id); None } - } + }, }, e @ Event::ResponseSent { .. } => { debug!("Response sent {:?}", e); None - } + }, } } } diff --git a/hotshot-libp2p-networking/src/network/cbor.rs b/hotshot-libp2p-networking/src/network/cbor.rs index a8ca6afedf..71f19281e7 100644 --- a/hotshot-libp2p-networking/src/network/cbor.rs +++ b/hotshot-libp2p-networking/src/network/cbor.rs @@ -126,19 +126,19 @@ fn decode_into_io_error(err: cbor4ii::serde::DecodeError) -> io::Err match err { cbor4ii::serde::DecodeError::Core(DecodeError::Read(e)) => { io::Error::new(io::ErrorKind::Other, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Core(e @ DecodeError::Unsupported { .. }) => { io::Error::new(io::ErrorKind::Unsupported, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Core(e @ DecodeError::Eof { .. }) => { io::Error::new(io::ErrorKind::UnexpectedEof, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Core(e) => { io::Error::new(io::ErrorKind::InvalidData, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Custom(e) => { io::Error::new(io::ErrorKind::Other, e.to_string()) - } + }, } } diff --git a/hotshot-libp2p-networking/src/network/node.rs b/hotshot-libp2p-networking/src/network/node.rs index 28d009f846..d5e1703a0e 100644 --- a/hotshot-libp2p-networking/src/network/node.rs +++ b/hotshot-libp2p-networking/src/network/node.rs @@ -360,7 +360,7 @@ impl NetworkNode { query.progress = DHTProgress::NotStarted; query.backoff.start_next(false); error!("Error publishing to DHT: {e:?} for peer {:?}", self.peer_id); - } + }, Ok(qid) => { debug!("Published record to DHT with qid {:?}", qid); let query = KadPutQuery { @@ -368,7 +368,7 @@ impl NetworkNode { ..query }; self.dht_handler.put_record(qid, query); - } + }, } } @@ -392,20 +392,20 @@ impl NetworkNode { ClientRequest::BeginBootstrap => { debug!("Beginning Libp2p bootstrap"); let _ = self.swarm.behaviour_mut().dht.bootstrap(); - } + }, ClientRequest::LookupPeer(pid, chan) => { let id = self.swarm.behaviour_mut().dht.get_closest_peers(pid); self.dht_handler .in_progress_get_closest_peers .insert(id, chan); - } + }, ClientRequest::GetRoutingTable(chan) => { self.dht_handler .print_routing_table(&mut self.swarm.behaviour_mut().dht); if chan.send(()).is_err() { warn!("Tried to notify client but client not tracking anymore"); } - } + }, ClientRequest::PutDHT { key, value, notify } => { let query = KadPutQuery { progress: DHTProgress::NotStarted, @@ -415,17 +415,17 @@ impl NetworkNode { backoff: ExponentialBackoff::default(), }; self.put_record(query); - } + }, ClientRequest::GetConnectedPeerNum(s) => { if s.send(self.num_connected()).is_err() { error!("error sending peer number to client"); } - } + }, ClientRequest::GetConnectedPeers(s) => { if s.send(self.connected_pids()).is_err() { error!("error sending peer set to client"); } - } + }, ClientRequest::GetDHT { key, notify, @@ -439,20 +439,20 @@ impl NetworkNode { retry_count, &mut self.swarm.behaviour_mut().dht, ); - } + }, ClientRequest::IgnorePeers(_peers) => { // NOTE used by test with conductor only - } + }, ClientRequest::Shutdown => { if let Some(listener_id) = self.listener_id { self.swarm.remove_listener(listener_id); } return Ok(true); - } + }, ClientRequest::GossipMsg(topic, contents) => { behaviour.publish_gossip(Topic::new(topic.clone()), contents.clone()); - } + }, ClientRequest::Subscribe(t, chan) => { behaviour.subscribe_gossip(&t); if let Some(chan) = chan { @@ -460,7 +460,7 @@ impl NetworkNode { error!("finished subscribing but response channel dropped"); } } - } + }, ClientRequest::Unsubscribe(t, chan) => { behaviour.unsubscribe_gossip(&t); if let Some(chan) = chan { @@ -468,7 +468,7 @@ impl NetworkNode { error!("finished unsubscribing but response channel dropped"); } } - } + }, ClientRequest::DirectRequest { pid, contents, @@ -483,23 +483,23 @@ impl NetworkNode { retry_count, }; self.direct_message_state.add_direct_request(req, id); - } + }, ClientRequest::DirectResponse(chan, msg) => { behaviour.add_direct_response(chan, msg); - } + }, ClientRequest::AddKnownPeers(peers) => { self.add_known_peers(&peers); - } + }, ClientRequest::Prune(pid) => { if self.swarm.disconnect_peer_id(pid).is_err() { warn!("Could not disconnect from {:?}", pid); } - } + }, } - } + }, None => { error!("Error receiving msg in main behaviour loop: channel closed"); - } + }, } Ok(false) } @@ -541,7 +541,7 @@ impl NetworkNode { send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; - } + }, SwarmEvent::ConnectionClosed { connection_id: _, peer_id, @@ -565,13 +565,13 @@ impl NetworkNode { send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; - } + }, SwarmEvent::Dialing { peer_id, connection_id: _, } => { debug!("Attempting to dial {:?}", peer_id); - } + }, SwarmEvent::ListenerClosed { listener_id: _, addresses: _, @@ -591,7 +591,7 @@ impl NetworkNode { connection_id: _, local_addr: _, send_back_addr: _, - } => {} + } => {}, SwarmEvent::Behaviour(b) => { let maybe_event = match b { NetworkEventInternal::DHTEvent(e) => self @@ -621,7 +621,7 @@ impl NetworkNode { } } None - } + }, NetworkEventInternal::GossipEvent(e) => match *e { GossipEvent::Message { propagation_source: _peer_id, @@ -631,25 +631,25 @@ impl NetworkNode { GossipEvent::Subscribed { peer_id, topic } => { debug!("Peer {:?} subscribed to topic {:?}", peer_id, topic); None - } + }, GossipEvent::Unsubscribed { peer_id, topic } => { debug!("Peer {:?} unsubscribed from topic {:?}", peer_id, topic); None - } + }, GossipEvent::GossipsubNotSupported { peer_id } => { warn!("Peer {:?} does not support gossipsub", peer_id); None - } + }, }, NetworkEventInternal::DMEvent(e) => self .direct_message_state .handle_dm_event(e, self.resend_tx.clone()), NetworkEventInternal::AutonatEvent(e) => { match e { - autonat::Event::InboundProbe(_) => {} + autonat::Event::InboundProbe(_) => {}, autonat::Event::OutboundProbe(e) => match e { autonat::OutboundProbeEvent::Request { .. } - | autonat::OutboundProbeEvent::Response { .. } => {} + | autonat::OutboundProbeEvent::Response { .. } => {}, autonat::OutboundProbeEvent::Error { probe_id: _, peer, @@ -659,14 +659,14 @@ impl NetworkNode { "AutoNAT Probe failed to peer {:?} with error: {:?}", peer, error ); - } + }, }, autonat::Event::StatusChanged { old, new } => { debug!("AutoNAT Status changed. Old: {:?}, New: {:?}", old, new); - } + }, }; None - } + }, }; if let Some(event) = maybe_event { @@ -675,14 +675,14 @@ impl NetworkNode { .send(event) .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } - } + }, SwarmEvent::OutgoingConnectionError { connection_id: _, peer_id, error, } => { warn!("Outgoing connection error to {:?}: {:?}", peer_id, error); - } + }, SwarmEvent::IncomingConnectionError { connection_id: _, local_addr: _, @@ -690,29 +690,29 @@ impl NetworkNode { error, } => { warn!("Incoming connection error: {:?}", error); - } + }, SwarmEvent::ListenerError { listener_id: _, error, } => { warn!("Listener error: {:?}", error); - } + }, SwarmEvent::ExternalAddrConfirmed { address } => { let my_id = *self.swarm.local_peer_id(); self.swarm .behaviour_mut() .dht .add_address(&my_id, address.clone()); - } + }, SwarmEvent::NewExternalAddrOfPeer { peer_id, address } => { self.swarm .behaviour_mut() .dht .add_address(&peer_id, address.clone()); - } + }, _ => { debug!("Unhandled swarm event {:?}", event); - } + }, } Ok(()) } diff --git a/hotshot-libp2p-networking/src/network/transport.rs b/hotshot-libp2p-networking/src/network/transport.rs index 01e94e6b90..b69c2c9019 100644 --- a/hotshot-libp2p-networking/src/network/transport.rs +++ b/hotshot-libp2p-networking/src/network/transport.rs @@ -358,7 +358,7 @@ where local_addr, send_back_addr, } - } + }, // We need to re-map the other events because we changed the type of the upgrade TransportEvent::AddressExpired { @@ -377,7 +377,7 @@ where }, TransportEvent::ListenerError { listener_id, error } => { TransportEvent::ListenerError { listener_id, error } - } + }, TransportEvent::NewAddress { listener_id, listen_addr, diff --git a/hotshot-macros/src/lib.rs b/hotshot-macros/src/lib.rs index 3608ef6da1..f318d8e1b0 100644 --- a/hotshot-macros/src/lib.rs +++ b/hotshot-macros/src/lib.rs @@ -118,7 +118,7 @@ impl ToLowerSnakeStr for syn::GenericArgument { syn::Type::Path(p) => p.to_lower_snake_str(), _ => { panic!("Unexpected type for GenericArgument::Type: {t:?}"); - } + }, }, syn::GenericArgument::Const(c) => match c { syn::Expr::Lit(l) => match &l.lit { @@ -126,15 +126,15 @@ impl ToLowerSnakeStr for syn::GenericArgument { syn::Lit::Int(v) => format!("{}_", v.base10_digits()), _ => { panic!("Unexpected type for GenericArgument::Const::Lit: {l:?}"); - } + }, }, _ => { panic!("Unexpected type for GenericArgument::Const: {c:?}"); - } + }, }, _ => { panic!("Unexpected type for GenericArgument: {self:?}"); - } + }, } } } diff --git a/hotshot-orchestrator/src/client.rs b/hotshot-orchestrator/src/client.rs index de167ff505..3c1d8e0884 100644 --- a/hotshot-orchestrator/src/client.rs +++ b/hotshot-orchestrator/src/client.rs @@ -515,7 +515,7 @@ impl OrchestratorClient { Err(err) => { tracing::info!("{err}"); sleep(Duration::from_millis(250)).await; - } + }, } } } diff --git a/hotshot-query-service/examples/simple-server.rs b/hotshot-query-service/examples/simple-server.rs index 847f8c60bc..916e8a5c7d 100644 --- a/hotshot-query-service/examples/simple-server.rs +++ b/hotshot-query-service/examples/simple-server.rs @@ -16,6 +16,8 @@ //! consensus network with two nodes and connects a query service to each node. It runs each query //! server on local host. The program continues until it is manually killed. +use std::{num::NonZeroUsize, str::FromStr, sync::Arc, time::Duration}; + use async_lock::RwLock; use clap::Parser; use futures::future::{join_all, try_join_all}; @@ -48,7 +50,6 @@ use hotshot_types::{ traits::{election::Membership, network::Topic}, HotShotConfig, PeerConfig, }; -use std::{num::NonZeroUsize, str::FromStr, sync::Arc, time::Duration}; use tracing_subscriber::EnvFilter; use url::Url; use vbs::version::StaticVersionType; diff --git a/hotshot-query-service/src/api.rs b/hotshot-query-service/src/api.rs index 5f447fbb53..dd4579cef8 100644 --- a/hotshot-query-service/src/api.rs +++ b/hotshot-query-service/src/api.rs @@ -10,8 +10,8 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use std::fs; -use std::path::Path; +use std::{fs, path::Path}; + use tide_disco::api::{Api, ApiError}; use toml::{map::Entry, Value}; use vbs::version::StaticVersionType; @@ -40,7 +40,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/hotshot-query-service/src/availability.rs b/hotshot-query-service/src/availability.rs index d1dbcd249f..1737aab585 100644 --- a/hotshot-query-service/src/availability.rs +++ b/hotshot-query-service/src/availability.rs @@ -26,10 +26,10 @@ //! chain which is tabulated by this specific node and not subject to full consensus agreement, try //! the [node](crate::node) API. -use crate::{api::load_api, Payload, QueryError}; +use std::{fmt::Display, path::PathBuf, time::Duration}; + use derive_more::From; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt}; - use hotshot_types::{ data::{Leaf, Leaf2, QuorumProposal}, simple_certificate::QuorumCertificate, @@ -37,10 +37,11 @@ use hotshot_types::{ }; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, Snafu}; -use std::{fmt::Display, path::PathBuf, time::Duration}; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, StatusCode}; use vbs::version::StaticVersionType; +use crate::{api::load_api, Payload, QueryError}; + pub(crate) mod data_source; mod fetch; pub(crate) mod query_data; @@ -527,7 +528,7 @@ where .context(FetchTransactionSnafu { resource: hash.to_string(), }) - } + }, None => { let height: u64 = req.integer_param("height")?; let fetch = state @@ -543,7 +544,7 @@ where .context(InvalidTransactionIndexSnafu { height, index: i })?; TransactionQueryData::new(&block, index, i) .context(InvalidTransactionIndexSnafu { height, index: i }) - } + }, } } .boxed() @@ -608,34 +609,32 @@ fn enforce_range_limit(from: usize, until: usize, limit: usize) -> Result<(), Er #[cfg(test)] mod test { + use std::{fmt::Debug, time::Duration}; + + use async_lock::RwLock; + use committable::Committable; + use futures::future::FutureExt; + use hotshot_types::{data::Leaf2, simple_certificate::QuorumCertificate2}; + use portpicker::pick_unused_port; + use serde::de::DeserializeOwned; + use surf_disco::{Client, Error as _}; + use tempfile::TempDir; + use tide_disco::App; + use toml::toml; + use super::*; - use crate::data_source::storage::AvailabilityStorage; - use crate::data_source::VersionedDataSource; - use crate::testing::mocks::MockVersions; use crate::{ - data_source::ExtensibleDataSource, + data_source::{storage::AvailabilityStorage, ExtensibleDataSource, VersionedDataSource}, status::StatusDataSource, task::BackgroundTask, testing::{ consensus::{MockDataSource, MockNetwork, MockSqlDataSource}, - mocks::{mock_transaction, MockBase, MockHeader, MockPayload, MockTypes}, + mocks::{mock_transaction, MockBase, MockHeader, MockPayload, MockTypes, MockVersions}, setup_test, }, types::HeightIndexed, ApiState, Error, Header, }; - use async_lock::RwLock; - use committable::Committable; - use futures::future::FutureExt; - use hotshot_types::data::Leaf2; - use hotshot_types::simple_certificate::QuorumCertificate2; - use portpicker::pick_unused_port; - use serde::de::DeserializeOwned; - use std::{fmt::Debug, time::Duration}; - use surf_disco::{Client, Error as _}; - use tempfile::TempDir; - use tide_disco::App; - use toml::toml; /// Get the current ledger height and a list of non-empty leaf/block pairs. async fn get_non_empty_blocks( @@ -657,7 +656,7 @@ mod test { let leaf = client.get(&format!("leaf/{}", i)).send().await.unwrap(); blocks.push((leaf, block)); } - } + }, Err(Error::Availability { source: super::Error::FetchBlock { .. }, }) => { @@ -665,7 +664,7 @@ mod test { "found end of ledger at height {i}, non-empty blocks are {blocks:?}", ); return (i, blocks); - } + }, Err(err) => panic!("unexpected error {}", err), } } diff --git a/hotshot-query-service/src/availability/data_source.rs b/hotshot-query-service/src/availability/data_source.rs index 9747b7814b..c35caafdc2 100644 --- a/hotshot-query-service/src/availability/data_source.rs +++ b/hotshot-query-service/src/availability/data_source.rs @@ -10,15 +10,11 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use super::{ - fetch::Fetch, - query_data::{ - BlockHash, BlockQueryData, LeafHash, LeafQueryData, PayloadMetadata, PayloadQueryData, - QueryablePayload, TransactionHash, TransactionQueryData, VidCommonMetadata, - VidCommonQueryData, - }, +use std::{ + cmp::Ordering, + ops::{Bound, RangeBounds}, }; -use crate::{types::HeightIndexed, Header, Payload}; + use async_trait::async_trait; use derivative::Derivative; use derive_more::{Display, From}; @@ -30,10 +26,16 @@ use hotshot_types::{ data::{VidCommitment, VidShare}, traits::node_implementation::NodeType, }; -use std::{ - cmp::Ordering, - ops::{Bound, RangeBounds}, + +use super::{ + fetch::Fetch, + query_data::{ + BlockHash, BlockQueryData, LeafHash, LeafQueryData, PayloadMetadata, PayloadQueryData, + QueryablePayload, TransactionHash, TransactionQueryData, VidCommonMetadata, + VidCommonQueryData, + }, }; +use crate::{types::HeightIndexed, Header, Payload}; #[derive(Derivative, From, Display)] #[derivative(Ord = "feature_allow_slow_enum")] diff --git a/hotshot-query-service/src/availability/fetch.rs b/hotshot-query-service/src/availability/fetch.rs index ca48252b5d..d3175549c4 100644 --- a/hotshot-query-service/src/availability/fetch.rs +++ b/hotshot-query-service/src/availability/fetch.rs @@ -10,9 +10,10 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::{future::IntoFuture, time::Duration}; + use futures::future::{BoxFuture, FutureExt}; use snafu::{Error, ErrorCompat, IntoError, NoneError, OptionExt}; -use std::{future::IntoFuture, time::Duration}; use tokio::time::timeout; /// An in-progress request to fetch some data. diff --git a/hotshot-query-service/src/availability/query_data.rs b/hotshot-query-service/src/availability/query_data.rs index 6e1b1eb1f7..174db36101 100644 --- a/hotshot-query-service/src/availability/query_data.rs +++ b/hotshot-query-service/src/availability/query_data.rs @@ -10,7 +10,8 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use crate::{types::HeightIndexed, Header, Metadata, Payload, Transaction, VidCommon}; +use std::fmt::Debug; + use committable::{Commitment, Committable}; use hotshot_types::{ data::{Leaf, Leaf2, VidCommitment, VidShare}, @@ -26,7 +27,8 @@ use hotshot_types::{ use jf_vid::VidScheme; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use snafu::{ensure, Snafu}; -use std::fmt::Debug; + +use crate::{types::HeightIndexed, Header, Metadata, Payload, Transaction, VidCommon}; pub type LeafHash = Commitment>; pub type QcHash = Commitment>; diff --git a/hotshot-query-service/src/data_source.rs b/hotshot-query-service/src/data_source.rs index 3d45b8cd00..30881aa5c3 100644 --- a/hotshot-query-service/src/data_source.rs +++ b/hotshot-query-service/src/data_source.rs @@ -47,16 +47,18 @@ pub use update::{Transaction, UpdateDataSource, VersionedDataSource}; #[cfg(any(test, feature = "testing"))] mod test_helpers { + use std::ops::{Bound, RangeBounds}; + + use futures::{ + future, + stream::{BoxStream, StreamExt}, + }; + use crate::{ availability::{BlockQueryData, Fetch, LeafQueryData}, node::NodeDataSource, testing::{consensus::TestableDataSource, mocks::MockTypes}, }; - use futures::{ - future, - stream::{BoxStream, StreamExt}, - }; - use std::ops::{Bound, RangeBounds}; /// Apply an upper bound to a range based on the currently available block height. async fn bound_range(ds: &D, range: R) -> impl RangeBounds @@ -119,6 +121,16 @@ mod test_helpers { #[cfg(any(test, feature = "testing"))] #[espresso_macros::generic_tests] pub mod availability_tests { + use std::{ + collections::HashMap, + fmt::Debug, + ops::{Bound, RangeBounds}, + }; + + use committable::Committable; + use futures::stream::StreamExt; + use hotshot_types::data::Leaf2; + use super::test_helpers::*; use crate::{ availability::{payload_size, BlockId}, @@ -131,12 +143,6 @@ pub mod availability_tests { }, types::HeightIndexed, }; - use committable::Committable; - use futures::stream::StreamExt; - use hotshot_types::data::Leaf2; - use std::collections::HashMap; - use std::fmt::Debug; - use std::ops::{Bound, RangeBounds}; async fn validate(ds: &impl TestableDataSource) { // Check the consistency of every block/leaf pair. Keep track of payloads and transactions @@ -537,6 +543,10 @@ pub mod availability_tests { #[cfg(any(test, feature = "testing"))] #[espresso_macros::generic_tests] pub mod persistence_tests { + use committable::Committable; + use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; + use hotshot_types::simple_certificate::QuorumCertificate2; + use crate::{ availability::{BlockQueryData, LeafQueryData}, data_source::{ @@ -552,9 +562,6 @@ pub mod persistence_tests { types::HeightIndexed, Leaf2, }; - use committable::Committable; - use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; - use hotshot_types::simple_certificate::QuorumCertificate2; #[tokio::test(flavor = "multi_thread")] pub async fn test_revert() @@ -756,6 +763,24 @@ pub mod persistence_tests { #[cfg(any(test, feature = "testing"))] #[espresso_macros::generic_tests] pub mod node_tests { + use std::time::Duration; + + use committable::Committable; + use futures::{future::join_all, stream::StreamExt}; + use hotshot::traits::BlockPayload; + use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestMetadata}, + node_types::TestTypes, + state_types::{TestInstanceState, TestValidatedState}, + }; + use hotshot_types::{ + data::{vid_commitment, VidCommitment, VidShare}, + traits::{block_contents::EncodeBytes, node_implementation::Versions}, + vid::advz::{advz_scheme, ADVZScheme}, + }; + use jf_vid::VidScheme; + use vbs::version::StaticVersionType; + use crate::{ availability::{ BlockInfo, BlockQueryData, LeafQueryData, QueryableHeader, VidCommonQueryData, @@ -773,24 +798,6 @@ pub mod node_tests { types::HeightIndexed, Header, }; - use committable::Committable; - use futures::{future::join_all, stream::StreamExt}; - use hotshot::traits::BlockPayload; - use hotshot_example_types::{ - block_types::TestBlockPayload, node_types::TestTypes, state_types::TestValidatedState, - }; - use hotshot_example_types::{ - block_types::{TestBlockHeader, TestMetadata}, - state_types::TestInstanceState, - }; - use hotshot_types::{ - data::{vid_commitment, VidCommitment, VidShare}, - traits::{block_contents::EncodeBytes, node_implementation::Versions}, - vid::advz::{advz_scheme, ADVZScheme}, - }; - use jf_vid::VidScheme; - use std::time::Duration; - use vbs::version::StaticVersionType; #[tokio::test(flavor = "multi_thread")] pub async fn test_sync_status() @@ -1387,6 +1394,8 @@ pub mod node_tests { #[cfg(any(test, feature = "testing"))] #[espresso_macros::generic_tests] pub mod status_tests { + use std::time::Duration; + use crate::{ status::StatusDataSource, testing::{ @@ -1395,7 +1404,6 @@ pub mod status_tests { setup_test, sleep, }, }; - use std::time::Duration; #[tokio::test(flavor = "multi_thread")] pub async fn test_metrics() { diff --git a/hotshot-query-service/src/data_source/extension.rs b/hotshot-query-service/src/data_source/extension.rs index 8eb38d4668..467cda0891 100644 --- a/hotshot-query-service/src/data_source/extension.rs +++ b/hotshot-query-service/src/data_source/extension.rs @@ -10,8 +10,14 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::ops::{Bound, RangeBounds}; + +use async_trait::async_trait; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; +use jf_merkle_tree::prelude::MerkleProof; +use tagged_base64::TaggedBase64; + use super::VersionedDataSource; -use crate::data_source::storage::pruning::PrunedHeightDataSource; use crate::{ availability::{ AvailabilityDataSource, BlockId, BlockInfo, BlockQueryData, Fetch, FetchStream, LeafId, @@ -19,6 +25,7 @@ use crate::{ TransactionHash, TransactionQueryData, UpdateAvailabilityData, VidCommonMetadata, VidCommonQueryData, }, + data_source::storage::pruning::PrunedHeightDataSource, explorer::{self, ExplorerDataSource, ExplorerHeader, ExplorerTransaction}, merklized_state::{ MerklizedState, MerklizedStateDataSource, MerklizedStateHeightPersistence, Snapshot, @@ -29,12 +36,6 @@ use crate::{ status::{HasMetrics, StatusDataSource}, Header, Payload, QueryResult, Transaction, }; -use async_trait::async_trait; -use hotshot_types::data::VidShare; -use hotshot_types::traits::node_implementation::NodeType; -use jf_merkle_tree::prelude::MerkleProof; -use std::ops::{Bound, RangeBounds}; -use tagged_base64::TaggedBase64; /// Wrapper to add extensibility to an existing data source. /// /// [`ExtensibleDataSource`] adds app-specific data to any existing data source. It implements all @@ -500,6 +501,8 @@ where #[cfg(any(test, feature = "testing"))] mod impl_testable_data_source { + use hotshot::types::Event; + use super::*; use crate::{ data_source::UpdateDataSource, @@ -508,7 +511,6 @@ mod impl_testable_data_source { mocks::MockTypes, }, }; - use hotshot::types::Event; #[async_trait] impl DataSourceLifeCycle for ExtensibleDataSource @@ -540,7 +542,6 @@ mod impl_testable_data_source { mod test { use super::ExtensibleDataSource; use crate::testing::consensus::MockDataSource; - // For some reason this is the only way to import the macro defined in another module of this // crate. use crate::*; diff --git a/hotshot-query-service/src/data_source/fetching.rs b/hotshot-query-service/src/data_source/fetching.rs index e98fa236df..cec480bf65 100644 --- a/hotshot-query-service/src/data_source/fetching.rs +++ b/hotshot-query-service/src/data_source/fetching.rs @@ -73,6 +73,38 @@ //! different request for the same object, one that permitted an active fetch. Or it may have been //! fetched [proactively](#proactive-fetching). +use std::{ + cmp::{max, min}, + fmt::{Debug, Display}, + iter::repeat_with, + marker::PhantomData, + ops::{Bound, Range, RangeBounds}, + sync::Arc, + time::Duration, +}; + +use anyhow::{bail, Context}; +use async_lock::Semaphore; +use async_trait::async_trait; +use backoff::{backoff::Backoff, ExponentialBackoff, ExponentialBackoffBuilder}; +use derivative::Derivative; +use futures::{ + channel::oneshot, + future::{self, join_all, BoxFuture, Either, Future, FutureExt}, + stream::{self, BoxStream, StreamExt}, +}; +use hotshot_types::{ + data::VidShare, + traits::{ + metrics::{Gauge, Metrics}, + node_implementation::NodeType, + }, +}; +use jf_merkle_tree::{prelude::MerkleProof, MerkleTreeScheme}; +use tagged_base64::TaggedBase64; +use tokio::{spawn, time::sleep}; +use tracing::Instrument; + use super::{ notifier::Notifier, storage::{ @@ -84,13 +116,12 @@ use super::{ }, Transaction, VersionedDataSource, }; -use crate::availability::HeaderQueryData; use crate::{ availability::{ - AvailabilityDataSource, BlockId, BlockInfo, BlockQueryData, Fetch, FetchStream, LeafId, - LeafQueryData, PayloadMetadata, PayloadQueryData, QueryableHeader, QueryablePayload, - TransactionHash, TransactionQueryData, UpdateAvailabilityData, VidCommonMetadata, - VidCommonQueryData, + AvailabilityDataSource, BlockId, BlockInfo, BlockQueryData, Fetch, FetchStream, + HeaderQueryData, LeafId, LeafQueryData, PayloadMetadata, PayloadQueryData, QueryableHeader, + QueryablePayload, TransactionHash, TransactionQueryData, UpdateAvailabilityData, + VidCommonMetadata, VidCommonQueryData, }, explorer::{self, ExplorerDataSource}, fetching::{self, request, Provider}, @@ -104,36 +135,6 @@ use crate::{ types::HeightIndexed, Header, Payload, QueryError, QueryResult, }; -use anyhow::{bail, Context}; -use async_lock::Semaphore; -use async_trait::async_trait; -use backoff::{backoff::Backoff, ExponentialBackoff, ExponentialBackoffBuilder}; -use derivative::Derivative; -use futures::{ - channel::oneshot, - future::{self, join_all, BoxFuture, Either, Future, FutureExt}, - stream::{self, BoxStream, StreamExt}, -}; -use hotshot_types::{ - data::VidShare, - traits::{ - metrics::{Gauge, Metrics}, - node_implementation::NodeType, - }, -}; -use jf_merkle_tree::{prelude::MerkleProof, MerkleTreeScheme}; -use std::sync::Arc; -use std::{ - cmp::{max, min}, - fmt::{Debug, Display}, - iter::repeat_with, - marker::PhantomData, - ops::{Bound, Range, RangeBounds}, - time::Duration, -}; -use tagged_base64::TaggedBase64; -use tokio::{spawn, time::sleep}; -use tracing::Instrument; mod block; mod header; @@ -467,15 +468,15 @@ where match storage.prune(&mut pruner).await { Ok(Some(height)) => { tracing::warn!("Pruned to height {height}"); - } + }, Ok(None) => { tracing::warn!("pruner run complete."); break; - } + }, Err(e) => { tracing::error!("pruner run failed: {e:?}"); break; - } + }, } } } @@ -977,7 +978,7 @@ where ?req, "unable to fetch object; spawning a task to retry: {err:#}" ); - } + }, } // We'll use this channel to get the object back if we successfully load it on retry. @@ -1005,14 +1006,14 @@ where tracing::info!(?req, "object was ready after retries"); send.send(obj).ok(); break; - } + }, Ok(None) => { // The object was not immediately available after all, but we have // successfully spawned a fetch for it if possible. The spawned fetch // will notify the original request once it completes. tracing::info!(?req, "spawned fetch after retries"); break; - } + }, Err(err) => { tracing::warn!( ?req, @@ -1023,7 +1024,7 @@ where if let Some(next_delay) = backoff.next_backoff() { delay = next_delay; } - } + }, } } } @@ -1058,12 +1059,12 @@ where tracing::debug!(?req, "object missing from local storage, will try to fetch"); self.fetch::(&mut tx, req).await?; Ok(None) - } + }, Err(err) => { // An error occurred while querying the database. We don't know if we need to fetch // the object or not. Return an error so we can try again. bail!("failed to fetch resource {req:?} from local storage: {err:#}"); - } + }, } } @@ -1224,13 +1225,13 @@ where None => passive(T::Request::from(chunk.start + i), passive_fetch), }) .collect(); - } + }, Err(err) => { tracing::warn!( ?chunk, "unable to fetch chunk; spawning a task to retry: {err:#}" ); - } + }, } // We'll use these channels to get the objects back that we successfully load on retry. @@ -1272,7 +1273,7 @@ where } } break; - } + }, Err(err) => { tracing::warn!( ?chunk, @@ -1283,7 +1284,7 @@ where if let Some(next_delay) = backoff.next_backoff() { delay = next_delay; } - } + }, } } } @@ -1432,7 +1433,7 @@ where backoff = min(2 * backoff, max_backoff); metrics.backoff.set(backoff.as_secs() as usize); continue; - } + }, }; let heights = match Heights::load(&mut tx).await { Ok(heights) => heights, @@ -1443,7 +1444,7 @@ where backoff = min(2 * backoff, max_backoff); metrics.backoff.set(backoff.as_secs() as usize); continue; - } + }, }; metrics.retries.set(0); break heights; @@ -1577,7 +1578,7 @@ where tracing::error!("unable to open read tx: {err:#}"); sleep(Duration::from_secs(5)).await; continue; - } + }, }; match tx.load_prev_aggregate().await { Ok(agg) => break agg, @@ -1585,7 +1586,7 @@ where tracing::error!("unable to load previous aggregate: {err:#}"); sleep(Duration::from_secs(5)).await; continue; - } + }, } }; @@ -1629,7 +1630,7 @@ where match res { Ok(()) => { break; - } + }, Err(err) => { tracing::warn!( num_blocks, @@ -1637,7 +1638,7 @@ where "failed to update aggregates for chunk: {err:#}" ); sleep(Duration::from_secs(1)).await; - } + }, } } metrics.height.set(height as usize); @@ -2201,7 +2202,7 @@ impl ResultExt for Result { "error loading resource from local storage, will try to fetch: {err:#}" ); None - } + }, } } } @@ -2320,7 +2321,7 @@ where // dropped. If this happens, things are very broken in any case, and it is // better to panic loudly than simply block forever. panic!("notifier dropped without satisfying request {req:?}"); - } + }, } }) .boxed(), diff --git a/hotshot-query-service/src/data_source/fetching/block.rs b/hotshot-query-service/src/data_source/fetching/block.rs index 980bb5a032..c0a255ef6f 100644 --- a/hotshot-query-service/src/data_source/fetching/block.rs +++ b/hotshot-query-service/src/data_source/fetching/block.rs @@ -12,6 +12,14 @@ //! [`Fetchable`] implementation for [`BlockQueryData`] and [`PayloadQueryData`]. +use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds, sync::Arc}; + +use async_trait::async_trait; +use derivative::Derivative; +use derive_more::From; +use futures::future::{BoxFuture, FutureExt}; +use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; + use super::{ header::{fetch_header_and_then, HeaderCallback}, AvailabilityProvider, FetchRequest, Fetchable, Fetcher, Heights, Notifiers, RangedFetchable, @@ -34,12 +42,6 @@ use crate::{ types::HeightIndexed, Header, Payload, QueryResult, }; -use async_trait::async_trait; -use derivative::Derivative; -use derive_more::From; -use futures::future::{BoxFuture, FutureExt}; -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; -use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds, sync::Arc}; pub(super) type PayloadFetcher = fetching::Fetcher>; diff --git a/hotshot-query-service/src/data_source/fetching/header.rs b/hotshot-query-service/src/data_source/fetching/header.rs index 6782fb5d37..02a8f7c629 100644 --- a/hotshot-query-service/src/data_source/fetching/header.rs +++ b/hotshot-query-service/src/data_source/fetching/header.rs @@ -12,36 +12,31 @@ //! Header fetching. +use std::{cmp::Ordering, future::IntoFuture, sync::Arc}; + +use anyhow::bail; +use async_trait::async_trait; +use committable::Committable; +use derivative::Derivative; +use futures::{future::BoxFuture, FutureExt}; +use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; + use super::{ block::fetch_block_with_header, leaf::fetch_leaf_with_callbacks, vid::fetch_vid_common_with_header, AvailabilityProvider, Fetcher, }; -use crate::data_source::fetching::Fetchable; -use crate::data_source::fetching::HeaderQueryData; -use crate::data_source::fetching::LeafQueryData; -use crate::data_source::fetching::Notifiers; -use crate::QueryResult; use crate::{ availability::{BlockId, QueryablePayload}, data_source::{ + fetching::{Fetchable, HeaderQueryData, LeafQueryData, Notifiers}, storage::{ pruning::PrunedHeightStorage, AvailabilityStorage, NodeStorage, UpdateAvailabilityStorage, }, update::VersionedDataSource, }, - Header, Payload, QueryError, + Header, Payload, QueryError, QueryResult, }; -use anyhow::bail; -use async_trait::async_trait; -use committable::Committable; -use derivative::Derivative; -use futures::future::BoxFuture; -use futures::FutureExt; -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; -use std::cmp::Ordering; -use std::future::IntoFuture; -use std::sync::Arc; impl From> for HeaderQueryData { fn from(leaf: LeafQueryData) -> Self { @@ -188,14 +183,14 @@ where header.block_number() ); fetch_block_with_header(fetcher, header); - } + }, Self::VidCommon { fetcher } => { tracing::info!( "fetched leaf {}, will now fetch VID common", header.block_number() ); fetch_vid_common_with_header(fetcher, header); - } + }, } } } @@ -225,17 +220,17 @@ where Ok(header) => { callback.run(header); return Ok(()); - } + }, Err(QueryError::Missing | QueryError::NotFound) => { // We successfully queried the database, but the header wasn't there. Fall through to // fetching it. tracing::debug!(?req, "header not available locally; trying fetch"); - } + }, Err(QueryError::Error { message }) => { // An error occurred while querying the database. We don't know if we need to fetch the // header or not. Return an error so we can try again. bail!("failed to fetch header for block {req:?}: {message}"); - } + }, } // If the header is _not_ present, we may still be able to fetch the request, but we need to @@ -245,16 +240,16 @@ where match req { BlockId::Number(n) => { fetch_leaf_with_callbacks(tx, callback.fetcher(), n.into(), [callback.into()]).await?; - } + }, BlockId::Hash(h) => { // Given only the hash, we cannot tell if the corresponding leaf actually exists, since // we don't have a corresponding header. Therefore, we will not spawn an active fetch. tracing::debug!("not fetching unknown block {h}"); - } + }, BlockId::PayloadHash(h) => { // Same as above, we don't fetch a block with a payload that is not known to exist. tracing::debug!("not fetching block with unknown payload {h}"); - } + }, } Ok(()) diff --git a/hotshot-query-service/src/data_source/fetching/leaf.rs b/hotshot-query-service/src/data_source/fetching/leaf.rs index 3692d01851..0f2b37097d 100644 --- a/hotshot-query-service/src/data_source/fetching/leaf.rs +++ b/hotshot-query-service/src/data_source/fetching/leaf.rs @@ -12,6 +12,18 @@ //! [`Fetchable`] implementation for [`LeafQueryData`]. +use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds, sync::Arc}; + +use anyhow::bail; +use async_trait::async_trait; +use committable::Committable; +use derivative::Derivative; +use derive_more::From; +use futures::future::{BoxFuture, FutureExt}; +use hotshot_types::traits::node_implementation::NodeType; +use tokio::spawn; +use tracing::Instrument; + use super::{ header::HeaderCallback, AvailabilityProvider, FetchRequest, Fetchable, Fetcher, Heights, Notifiers, RangedFetchable, Storable, @@ -29,17 +41,6 @@ use crate::{ types::HeightIndexed, Payload, QueryError, QueryResult, }; -use anyhow::bail; -use async_trait::async_trait; -use committable::Committable; -use derivative::Derivative; -use derive_more::From; -use futures::future::{BoxFuture, FutureExt}; -use hotshot_types::traits::node_implementation::NodeType; -use std::sync::Arc; -use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds}; -use tokio::spawn; -use tracing::Instrument; pub(super) type LeafFetcher = fetching::Fetcher, LeafCallback>; @@ -172,19 +173,19 @@ where callbacks, ); return Ok(()); - } + }, Err(QueryError::Missing | QueryError::NotFound) => { // We successfully queried the database, but the next leaf wasn't there. We // know for sure that based on the current state of the DB, we cannot fetch this // leaf. tracing::debug!(n, "not fetching leaf with unknown successor"); return Ok(()); - } + }, Err(QueryError::Error { message }) => { // An error occurred while querying the database. We don't know if we need to // fetch the leaf or not. Return an error so we can try again. bail!("failed to fetch successor for leaf {n}: {message}"); - } + }, }; let fetcher = fetcher.clone(); @@ -197,13 +198,13 @@ where fetcher.provider.clone(), once(LeafCallback::Leaf { fetcher }).chain(callbacks), ); - } + }, LeafId::Hash(h) => { // We don't actively fetch leaves when requested by hash, because we have no way of // knowing whether a leaf with such a hash actually exists, and we don't want to bother // peers with requests for non-existent leaves. tracing::debug!("not fetching unknown leaf {h}"); - } + }, } Ok(()) @@ -262,7 +263,7 @@ pub(super) fn trigger_fetch_for_parent( if tx.get_leaf(((height - 1) as usize).into()).await.is_ok() { return; } - } + }, Err(err) => { // If we can't open a transaction, we can't be sure that we already have the // parent, so we fall through to fetching it just to be safe. @@ -271,7 +272,7 @@ pub(super) fn trigger_fetch_for_parent( %parent, "error opening transaction to check for parent leaf: {err:#}", ); - } + }, } tracing::info!(height, %parent, "received new leaf; fetching missing parent"); @@ -369,7 +370,7 @@ impl Ord for LeafCallback { (Self::Continuation { callback: cb1 }, Self::Continuation { callback: cb2 }) => { cb1.cmp(cb2) - } + }, _ => Ordering::Equal, } } @@ -396,7 +397,7 @@ where // Trigger a fetch of the parent leaf, if we don't already have it. trigger_fetch_for_parent(&fetcher, &leaf); fetcher.store_and_notify(leaf).await; - } + }, Self::Continuation { callback } => callback.run(leaf.leaf.block_header().clone()), } } diff --git a/hotshot-query-service/src/data_source/fetching/transaction.rs b/hotshot-query-service/src/data_source/fetching/transaction.rs index f19681b24d..baf581166b 100644 --- a/hotshot-query-service/src/data_source/fetching/transaction.rs +++ b/hotshot-query-service/src/data_source/fetching/transaction.rs @@ -12,6 +12,13 @@ //! Transaction fetching. +use std::sync::Arc; + +use async_trait::async_trait; +use derive_more::From; +use futures::future::{BoxFuture, FutureExt}; +use hotshot_types::traits::node_implementation::NodeType; + use super::{AvailabilityProvider, FetchRequest, Fetchable, Fetcher, Notifiers}; use crate::{ availability::{QueryablePayload, TransactionHash, TransactionQueryData}, @@ -24,11 +31,6 @@ use crate::{ }, Payload, QueryResult, }; -use async_trait::async_trait; -use derive_more::From; -use futures::future::{BoxFuture, FutureExt}; -use hotshot_types::traits::node_implementation::NodeType; -use std::sync::Arc; #[derive(Clone, Copy, Debug, From)] pub(super) struct TransactionRequest(TransactionHash); diff --git a/hotshot-query-service/src/data_source/fetching/vid.rs b/hotshot-query-service/src/data_source/fetching/vid.rs index 51c948c79f..618c11e077 100644 --- a/hotshot-query-service/src/data_source/fetching/vid.rs +++ b/hotshot-query-service/src/data_source/fetching/vid.rs @@ -12,6 +12,17 @@ //! [`Fetchable`] implementation for [`VidCommonQueryData`]. +use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds, sync::Arc}; + +use async_trait::async_trait; +use derivative::Derivative; +use derive_more::From; +use futures::future::{BoxFuture, FutureExt}; +use hotshot_types::{ + data::VidShare, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; + use super::{ header::{fetch_header_and_then, HeaderCallback}, AvailabilityProvider, FetchRequest, Fetchable, Fetcher, Heights, Notifiers, RangedFetchable, @@ -30,16 +41,6 @@ use crate::{ types::HeightIndexed, Header, Payload, QueryResult, }; -use async_trait::async_trait; -use derivative::Derivative; -use derive_more::From; -use futures::future::{BoxFuture, FutureExt}; -use hotshot_types::{ - data::VidShare, - traits::{block_contents::BlockHeader, node_implementation::NodeType}, -}; -use std::sync::Arc; -use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds}; pub(super) type VidCommonFetcher = fetching::Fetcher>; diff --git a/hotshot-query-service/src/data_source/fs.rs b/hotshot-query-service/src/data_source/fs.rs index c77664a06d..83f6ee7bd8 100644 --- a/hotshot-query-service/src/data_source/fs.rs +++ b/hotshot-query-service/src/data_source/fs.rs @@ -12,16 +12,17 @@ #![cfg(feature = "file-system-data-source")] +use std::path::Path; + +use atomic_store::AtomicStoreLoader; +use hotshot_types::traits::node_implementation::NodeType; + +pub use super::storage::fs::Transaction; use super::{storage::FileSystemStorage, AvailabilityProvider, FetchingDataSource}; use crate::{ availability::{query_data::QueryablePayload, QueryableHeader}, Header, Payload, }; -use atomic_store::AtomicStoreLoader; -use hotshot_types::traits::node_implementation::NodeType; -use std::path::Path; - -pub use super::storage::fs::Transaction; /// A data source for the APIs provided in this crate, backed by the local file system. /// @@ -239,14 +240,15 @@ where #[cfg(any(test, feature = "testing"))] mod impl_testable_data_source { + use async_trait::async_trait; + use hotshot::types::Event; + use tempfile::TempDir; + use super::*; use crate::{ data_source::UpdateDataSource, testing::{consensus::DataSourceLifeCycle, mocks::MockTypes}, }; - use async_trait::async_trait; - use hotshot::types::Event; - use tempfile::TempDir; #[async_trait] impl + Default> DataSourceLifeCycle @@ -279,11 +281,10 @@ mod impl_testable_data_source { #[cfg(test)] mod test { use super::FileSystemDataSource; - use crate::{fetching::provider::NoFetching, testing::mocks::MockTypes}; - // For some reason this is the only way to import the macro defined in another module of this // crate. use crate::*; + use crate::{fetching::provider::NoFetching, testing::mocks::MockTypes}; instantiate_data_source_tests!(FileSystemDataSource); } diff --git a/hotshot-query-service/src/data_source/metrics.rs b/hotshot-query-service/src/data_source/metrics.rs index a539673a10..e59171ce36 100644 --- a/hotshot-query-service/src/data_source/metrics.rs +++ b/hotshot-query-service/src/data_source/metrics.rs @@ -12,12 +12,13 @@ #![cfg(feature = "metrics-data-source")] +use async_trait::async_trait; + use crate::{ metrics::PrometheusMetrics, status::{HasMetrics, StatusDataSource}, QueryError, QueryResult, }; -use async_trait::async_trait; /// A minimal data source for the status API provided in this crate, with no persistent storage. /// @@ -82,9 +83,10 @@ impl StatusDataSource for MetricsDataSource { #[cfg(any(test, feature = "testing"))] mod impl_testable_data_source { + use hotshot::types::Event; + use super::*; use crate::testing::{consensus::DataSourceLifeCycle, mocks::MockTypes}; - use hotshot::types::Event; #[async_trait] impl DataSourceLifeCycle for MetricsDataSource { @@ -112,9 +114,7 @@ mod impl_testable_data_source { #[cfg(test)] mod test { - use super::super::status_tests; - use super::MetricsDataSource; - + use super::{super::status_tests, MetricsDataSource}; // For some reason this is the only way to import the macro defined in another module of this // crate. use crate::*; diff --git a/hotshot-query-service/src/data_source/notifier.rs b/hotshot-query-service/src/data_source/notifier.rs index a21e497c9f..53227c726d 100644 --- a/hotshot-query-service/src/data_source/notifier.rs +++ b/hotshot-query-service/src/data_source/notifier.rs @@ -70,16 +70,21 @@ //! spawned to fetch missing resources and send them through the [`Notifier`], but these should be //! relatively few and rare. +use std::{ + future::IntoFuture, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + use async_lock::Mutex; use derivative::Derivative; use futures::future::{BoxFuture, FutureExt}; -use std::sync::Arc; -use std::{ - future::IntoFuture, - sync::atomic::{AtomicBool, Ordering}, +use tokio::sync::{ + mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, + oneshot, }; -use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; -use tokio::sync::oneshot; use tracing::warn; /// A predicate on a type ``. @@ -286,11 +291,12 @@ where #[cfg(test)] mod test { + use std::time::Duration; + use tokio::time::timeout; use super::*; use crate::testing::setup_test; - use std::time::Duration; #[tokio::test(flavor = "multi_thread")] async fn test_notify_drop() { diff --git a/hotshot-query-service/src/data_source/sql.rs b/hotshot-query-service/src/data_source/sql.rs index b667b66862..21c09b3b42 100644 --- a/hotshot-query-service/src/data_source/sql.rs +++ b/hotshot-query-service/src/data_source/sql.rs @@ -12,6 +12,11 @@ #![cfg(feature = "sql-data-source")] +pub use anyhow::Error; +use hotshot_types::traits::node_implementation::NodeType; +pub use refinery::Migration; +pub use sql::Transaction; + use super::{ fetching::{self}, storage::sql::{self, SqlStorage}, @@ -22,11 +27,6 @@ use crate::{ availability::{QueryableHeader, QueryablePayload}, Header, Payload, }; -pub use anyhow::Error; -use hotshot_types::traits::node_implementation::NodeType; -pub use refinery::Migration; - -pub use sql::Transaction; pub type Builder = fetching::Builder; @@ -318,15 +318,15 @@ where // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(any(test, feature = "testing"), not(target_os = "windows")))] pub mod testing { + use async_trait::async_trait; + use hotshot::types::Event; + pub use sql::testing::TmpDb; + use super::*; use crate::{ data_source::UpdateDataSource, testing::{consensus::DataSourceLifeCycle, mocks::MockTypes}, }; - use async_trait::async_trait; - use hotshot::types::Event; - - pub use sql::testing::TmpDb; #[async_trait] impl + Default> DataSourceLifeCycle @@ -372,17 +372,20 @@ pub mod testing { #[cfg(all(test, not(target_os = "windows")))] mod generic_test { use super::SqlDataSource; - use crate::{fetching::provider::NoFetching, testing::mocks::MockTypes}; - // For some reason this is the only way to import the macro defined in another module of this // crate. use crate::*; + use crate::{fetching::provider::NoFetching, testing::mocks::MockTypes}; instantiate_data_source_tests!(SqlDataSource); } #[cfg(all(test, not(target_os = "windows")))] mod test { + use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; + use hotshot_types::{data::VidShare, vid::advz::advz_scheme}; + use jf_vid::VidScheme; + use super::*; use crate::{ availability::{ @@ -396,9 +399,6 @@ mod test { fetching::provider::NoFetching, testing::{consensus::DataSourceLifeCycle, mocks::MockTypes, setup_test}, }; - use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; - use hotshot_types::{data::VidShare, vid::advz::advz_scheme}; - use jf_vid::VidScheme; type D = SqlDataSource; diff --git a/hotshot-query-service/src/data_source/storage.rs b/hotshot-query-service/src/data_source/storage.rs index 227950d9e7..46e66dba00 100644 --- a/hotshot-query-service/src/data_source/storage.rs +++ b/hotshot-query-service/src/data_source/storage.rs @@ -56,6 +56,14 @@ //! [`AvailabilityDataSource`](crate::availability::AvailabilityDataSource) in fallibility. //! +use std::ops::RangeBounds; + +use async_trait::async_trait; +use futures::future::Future; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; +use jf_merkle_tree::prelude::MerkleProof; +use tagged_base64::TaggedBase64; + use crate::{ availability::{ BlockId, BlockQueryData, LeafId, LeafQueryData, PayloadMetadata, PayloadQueryData, @@ -76,12 +84,6 @@ use crate::{ node::{SyncStatus, TimeWindowQueryData, WindowStart}, Header, Payload, QueryResult, Transaction, }; -use async_trait::async_trait; -use futures::future::Future; -use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; -use jf_merkle_tree::prelude::MerkleProof; -use std::ops::RangeBounds; -use tagged_base64::TaggedBase64; pub mod fail_storage; pub mod fs; diff --git a/hotshot-query-service/src/data_source/storage/fail_storage.rs b/hotshot-query-service/src/data_source/storage/fail_storage.rs index 8398e8c303..d090bbfa5e 100644 --- a/hotshot-query-service/src/data_source/storage/fail_storage.rs +++ b/hotshot-query-service/src/data_source/storage/fail_storage.rs @@ -12,6 +12,13 @@ #![cfg(any(test, feature = "testing"))] +use std::{ops::RangeBounds, sync::Arc}; + +use async_lock::Mutex; +use async_trait::async_trait; +use futures::future::Future; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; + use super::{ pruning::{PruneStorage, PrunedHeightStorage, PrunerCfg, PrunerConfig}, sql::MigrateTypes, @@ -32,12 +39,6 @@ use crate::{ status::HasMetrics, Header, Payload, QueryError, QueryResult, }; -use async_lock::Mutex; -use async_trait::async_trait; -use futures::future::Future; -use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; -use std::ops::RangeBounds; -use std::sync::Arc; /// A specific action that can be targeted to inject an error. #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -87,8 +88,8 @@ impl FailureMode { match self { Self::Once(fail_action) if fail_action.matches(action) => { *self = Self::Never; - } - Self::Always(fail_action) if fail_action.matches(action) => {} + }, + Self::Always(fail_action) if fail_action.matches(action) => {}, _ => return Ok(()), } diff --git a/hotshot-query-service/src/data_source/storage/fs.rs b/hotshot-query-service/src/data_source/storage/fs.rs index 3e58a04911..a0e3ff1aa7 100644 --- a/hotshot-query-service/src/data_source/storage/fs.rs +++ b/hotshot-query-service/src/data_source/storage/fs.rs @@ -12,6 +12,28 @@ #![cfg(feature = "file-system-data-source")] +use std::{ + collections::{ + hash_map::{Entry, HashMap}, + BTreeMap, + }, + hash::Hash, + ops::{Bound, Deref, RangeBounds}, + path::Path, +}; + +use async_lock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use async_trait::async_trait; +use atomic_store::{AtomicStore, AtomicStoreLoader, PersistenceError}; +use committable::Committable; +use futures::future::Future; +use hotshot_types::{ + data::{VidCommitment, VidShare}, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; +use serde::{de::DeserializeOwned, Serialize}; +use snafu::OptionExt; + use super::{ ledger_log::{Iter, LedgerLog}, pruning::{PruneStorage, PrunedHeightStorage, PrunerConfig}, @@ -19,7 +41,6 @@ use super::{ Aggregate, AggregatesStorage, AvailabilityStorage, NodeStorage, PayloadMetadata, UpdateAggregatesStorage, UpdateAvailabilityStorage, VidCommonMetadata, }; - use crate::{ availability::{ data_source::{BlockId, LeafId}, @@ -35,24 +56,6 @@ use crate::{ types::HeightIndexed, ErrorSnafu, Header, MissingSnafu, NotFoundSnafu, Payload, QueryError, QueryResult, }; -use async_lock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -use async_trait::async_trait; -use atomic_store::{AtomicStore, AtomicStoreLoader, PersistenceError}; -use committable::Committable; -use futures::future::Future; -use hotshot_types::{ - data::{VidCommitment, VidShare}, - traits::{block_contents::BlockHeader, node_implementation::NodeType}, -}; -use serde::{de::DeserializeOwned, Serialize}; -use snafu::OptionExt; -use std::collections::{ - hash_map::{Entry, HashMap}, - BTreeMap, -}; -use std::hash::Hash; -use std::ops::{Bound, Deref, RangeBounds}; -use std::path::Path; const CACHED_LEAVES_COUNT: usize = 100; const CACHED_BLOCKS_COUNT: usize = 100; @@ -88,10 +91,10 @@ where BlockId::Number(n) => Ok(n), BlockId::Hash(h) => { Ok(*self.index_by_block_hash.get(&h).context(NotFoundSnafu)? as usize) - } + }, BlockId::PayloadHash(h) => { Ok(*self.index_by_payload_hash.get(&h).context(NotFoundSnafu)? as usize) - } + }, } } @@ -405,11 +408,11 @@ where iter.nth(n - 1); } n - } + }, Bound::Excluded(n) => { iter.nth(n); n + 1 - } + }, Bound::Unbounded => 0, }; @@ -662,10 +665,10 @@ fn update_index_by_hash(index: &mut HashMap, hash: H // Overwrite the existing entry if the new object was sequenced first. e.insert(pos); } - } + }, Entry::Vacant(e) => { e.insert(pos); - } + }, } } @@ -772,7 +775,7 @@ where // entry in `index_by_time` has a non-empty list associated with it, so this // indexing is safe. blocks[0] - } + }, } as usize; let mut res = TimeWindowQueryData::default(); diff --git a/hotshot-query-service/src/data_source/storage/ledger_log.rs b/hotshot-query-service/src/data_source/storage/ledger_log.rs index 4ff798ee52..e18523c7b5 100644 --- a/hotshot-query-service/src/data_source/storage/ledger_log.rs +++ b/hotshot-query-service/src/data_source/storage/ledger_log.rs @@ -12,12 +12,12 @@ #![cfg(feature = "file-system-data-source")] +use std::{collections::VecDeque, fmt::Debug}; + use atomic_store::{ append_log, load_store::BincodeLoadStore, AppendLog, AtomicStoreLoader, PersistenceError, }; use serde::{de::DeserializeOwned, Serialize}; -use std::collections::VecDeque; -use std::fmt::Debug; use tracing::{debug, warn}; /// A caching append log for ledger objects. @@ -262,11 +262,12 @@ impl ExactSizeIterator for Iter<'_, T> #[cfg(test)] mod test { - use super::*; - use crate::testing::setup_test; use atomic_store::AtomicStore; use tempfile::TempDir; + use super::*; + use crate::testing::setup_test; + #[tokio::test(flavor = "multi_thread")] async fn test_ledger_log_creation() { setup_test(); diff --git a/hotshot-query-service/src/data_source/storage/pruning.rs b/hotshot-query-service/src/data_source/storage/pruning.rs index 26e1243729..081b3a1fbf 100644 --- a/hotshot-query-service/src/data_source/storage/pruning.rs +++ b/hotshot-query-service/src/data_source/storage/pruning.rs @@ -10,9 +10,10 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::{fmt::Debug, time::Duration}; + use anyhow::bail; use async_trait::async_trait; -use std::{fmt::Debug, time::Duration}; #[derive(Clone, Debug)] pub struct PrunerCfg { diff --git a/hotshot-query-service/src/data_source/storage/sql.rs b/hotshot-query-service/src/data_source/storage/sql.rs index b12cb5efbb..2cbd0b20d6 100644 --- a/hotshot-query-service/src/data_source/storage/sql.rs +++ b/hotshot-query-service/src/data_source/storage/sql.rs @@ -11,32 +11,22 @@ // see . #![cfg(feature = "sql-data-source")] -use crate::{ - data_source::{ - storage::pruning::{PruneStorage, PrunerCfg, PrunerConfig}, - update::Transaction as _, - VersionedDataSource, - }, - metrics::PrometheusMetrics, - status::HasMetrics, - QueryError, QueryResult, -}; +use std::{cmp::min, fmt::Debug, str::FromStr, time::Duration}; + use anyhow::Context; use async_trait::async_trait; use chrono::Utc; use committable::Committable; +#[cfg(not(feature = "embedded-db"))] +use futures::future::FutureExt; use hotshot_types::{ data::{Leaf, Leaf2, VidShare}, simple_certificate::{QuorumCertificate, QuorumCertificate2}, traits::{metrics::Metrics, node_implementation::NodeType}, vid::advz::ADVZShare, }; - use itertools::Itertools; use log::LevelFilter; - -#[cfg(not(feature = "embedded-db"))] -use futures::future::FutureExt; #[cfg(not(feature = "embedded-db"))] use sqlx::postgres::{PgConnectOptions, PgSslMode}; #[cfg(feature = "embedded-db")] @@ -45,7 +35,17 @@ use sqlx::{ pool::{Pool, PoolOptions}, ConnectOptions, Row, }; -use std::{cmp::min, fmt::Debug, str::FromStr, time::Duration}; + +use crate::{ + data_source::{ + storage::pruning::{PruneStorage, PrunerCfg, PrunerConfig}, + update::Transaction as _, + VersionedDataSource, + }, + metrics::PrometheusMetrics, + status::HasMetrics, + QueryError, QueryResult, +}; pub extern crate sqlx; pub use sqlx::{Database, Sqlite}; @@ -55,10 +55,6 @@ mod queries; mod transaction; pub use anyhow::Error; -// This needs to be reexported so that we can reference it by absolute path relative to this crate -// in the expansion of `include_migrations`, even when `include_migrations` is invoked from another -// crate which doesn't have `include_dir` as a dependency. -pub use crate::include_migrations; pub use db::*; pub use include_dir::include_dir; pub use queries::QueryBuilder; @@ -66,6 +62,10 @@ pub use refinery::Migration; pub use transaction::*; use self::{migrate::Migrator, transaction::PoolMetrics}; +// This needs to be reexported so that we can reference it by absolute path relative to this crate +// in the expansion of `include_migrations`, even when `include_migrations` is invoked from another +// crate which doesn't have `include_dir` as a dependency. +pub use crate::include_migrations; /// Embed migrations from the given directory into the current binary for PostgreSQL or SQLite. /// @@ -577,11 +577,11 @@ impl SqlStorage { match runner.run_async(&mut Migrator::from(&mut conn)).await { Ok(report) => { tracing::info!("ran DB migrations: {report:?}"); - } + }, Err(err) => { tracing::error!("DB migrations failed: {:?}", err.report()); Err(err)?; - } + }, } } @@ -709,7 +709,7 @@ impl PruneStorage for SqlStorage { }; height - } + }, }; // Prune data exceeding target retention in batches @@ -973,21 +973,19 @@ impl MigrateTypes for SqlStorage { #[cfg(all(any(test, feature = "testing"), not(target_os = "windows")))] pub mod testing { #![allow(unused_imports)] - use refinery::Migration; use std::{ env, process::{Command, Stdio}, str::{self, FromStr}, time::Duration, }; - use tokio::net::TcpStream; - use tokio::time::timeout; use portpicker::pick_unused_port; + use refinery::Migration; + use tokio::{net::TcpStream, time::timeout}; use super::Config; - use crate::availability::query_data::QueryableHeader; - use crate::testing::sleep; + use crate::{availability::query_data::QueryableHeader, testing::sleep}; #[derive(Debug)] pub struct TmpDb { #[cfg(not(feature = "embedded-db"))] @@ -1275,28 +1273,28 @@ pub mod testing { // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(test, not(target_os = "windows")))] mod test { + use std::time::Duration; + use committable::{Commitment, CommitmentBoundsArkless, Committable}; use hotshot::traits::BlockPayload; use hotshot_example_types::{ node_types::TestVersions, state_types::{TestInstanceState, TestValidatedState}, }; - use jf_vid::VidScheme; - use hotshot_types::{ - data::vid_commitment, - traits::{node_implementation::Versions, EncodeBytes}, - vid::advz::advz_scheme, - }; - use hotshot_types::{ - data::{QuorumProposal, ViewNumber}, + data::{vid_commitment, QuorumProposal, ViewNumber}, simple_vote::QuorumData, - traits::{block_contents::BlockHeader, node_implementation::ConsensusTime}, + traits::{ + block_contents::BlockHeader, + node_implementation::{ConsensusTime, Versions}, + EncodeBytes, + }, + vid::advz::advz_scheme, }; use jf_merkle_tree::{ prelude::UniversalMerkleTree, MerkleTreeScheme, ToTraversalPath, UniversalMerkleTreeScheme, }; - use std::time::Duration; + use jf_vid::VidScheme; use tokio::time::sleep; use vbs::version::StaticVersionType; diff --git a/hotshot-query-service/src/data_source/storage/sql/migrate.rs b/hotshot-query-service/src/data_source/storage/sql/migrate.rs index c86c0fb34c..2ae4056a82 100644 --- a/hotshot-query-service/src/data_source/storage/sql/migrate.rs +++ b/hotshot-query-service/src/data_source/storage/sql/migrate.rs @@ -1,4 +1,3 @@ -use super::{queries::DecodeError, Db}; use async_trait::async_trait; use derive_more::From; use futures::stream::StreamExt; @@ -9,6 +8,8 @@ use refinery_core::{ use sqlx::{pool::PoolConnection, Acquire, Executor, Row}; use time::{format_description::well_known::Rfc3339, OffsetDateTime}; +use super::{queries::DecodeError, Db}; + /// Run migrations using a sqlx connection. /// /// While SQLx has its own built-in migration functionality, we use Refinery, and alas we must diff --git a/hotshot-query-service/src/data_source/storage/sql/queries.rs b/hotshot-query-service/src/data_source/storage/sql/queries.rs index 696aca5ba3..0f3db6dcc1 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries.rs @@ -13,27 +13,30 @@ //! Immutable query functionality of a SQL database. +use std::{ + fmt::Display, + ops::{Bound, RangeBounds}, +}; + +use anyhow::Context; +use derivative::Derivative; +use hotshot_types::{ + simple_certificate::QuorumCertificate2, + traits::{ + block_contents::{BlockHeader, BlockPayload}, + node_implementation::NodeType, + }, +}; +use sqlx::{Arguments, FromRow, Row}; + use super::{Database, Db, Query, QueryAs, Transaction}; -use crate::Leaf2; use crate::{ availability::{ BlockId, BlockQueryData, LeafQueryData, PayloadQueryData, QueryablePayload, VidCommonQueryData, }, data_source::storage::{PayloadMetadata, VidCommonMetadata}, - Header, Payload, QueryError, QueryResult, -}; -use anyhow::Context; -use derivative::Derivative; -use hotshot_types::simple_certificate::QuorumCertificate2; -use hotshot_types::traits::{ - block_contents::{BlockHeader, BlockPayload}, - node_implementation::NodeType, -}; -use sqlx::{Arguments, FromRow, Row}; -use std::{ - fmt::Display, - ops::{Bound, RangeBounds}, + Header, Leaf2, Payload, QueryError, QueryResult, }; pub(super) mod availability; @@ -137,20 +140,20 @@ impl QueryBuilder<'_> { match range.start_bound() { Bound::Included(n) => { bounds.push(format!("{column} >= {}", self.bind(*n as i64)?)); - } + }, Bound::Excluded(n) => { bounds.push(format!("{column} > {}", self.bind(*n as i64)?)); - } - Bound::Unbounded => {} + }, + Bound::Unbounded => {}, } match range.end_bound() { Bound::Included(n) => { bounds.push(format!("{column} <= {}", self.bind(*n as i64)?)); - } + }, Bound::Excluded(n) => { bounds.push(format!("{column} < {}", self.bind(*n as i64)?)); - } - Bound::Unbounded => {} + }, + Bound::Unbounded => {}, } let mut where_clause = bounds.join(" AND "); diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs b/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs index 0a51d28bee..140b0cafdb 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs @@ -12,27 +12,30 @@ //! Availability storage implementation for a database query engine. +use std::ops::RangeBounds; + +use async_trait::async_trait; +use futures::stream::{StreamExt, TryStreamExt}; +use hotshot_types::traits::node_implementation::NodeType; +use snafu::OptionExt; +use sqlx::FromRow; + use super::{ super::transaction::{query, Transaction, TransactionMode}, QueryBuilder, BLOCK_COLUMNS, LEAF_COLUMNS, PAYLOAD_COLUMNS, PAYLOAD_METADATA_COLUMNS, VID_COMMON_COLUMNS, VID_COMMON_METADATA_COLUMNS, }; -use crate::data_source::storage::sql::sqlx::Row; use crate::{ availability::{ BlockId, BlockQueryData, LeafId, LeafQueryData, PayloadQueryData, QueryableHeader, QueryablePayload, TransactionHash, TransactionQueryData, VidCommonQueryData, }, - data_source::storage::{AvailabilityStorage, PayloadMetadata, VidCommonMetadata}, + data_source::storage::{ + sql::sqlx::Row, AvailabilityStorage, PayloadMetadata, VidCommonMetadata, + }, types::HeightIndexed, ErrorSnafu, Header, MissingSnafu, Payload, QueryError, QueryResult, }; -use async_trait::async_trait; -use futures::stream::{StreamExt, TryStreamExt}; -use hotshot_types::traits::node_implementation::NodeType; -use snafu::OptionExt; -use sqlx::FromRow; -use std::ops::RangeBounds; #[async_trait] impl AvailabilityStorage for Transaction diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs b/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs index 14fb489a00..a769e5f4ee 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs @@ -12,6 +12,16 @@ //! Explorer storage implementation for a database query engine. +use std::{collections::VecDeque, num::NonZeroUsize}; + +use async_trait::async_trait; +use committable::{Commitment, Committable}; +use futures::stream::{self, StreamExt, TryStreamExt}; +use hotshot_types::traits::node_implementation::NodeType; +use itertools::Itertools; +use sqlx::{types::Json, FromRow, Row}; +use tagged_base64::{Tagged, TaggedBase64}; + use super::{ super::transaction::{query, Transaction, TransactionMode}, Database, Db, DecodeError, BLOCK_COLUMNS, @@ -33,14 +43,6 @@ use crate::{ }, Header, Payload, QueryError, QueryResult, Transaction as HotshotTransaction, }; -use async_trait::async_trait; -use committable::{Commitment, Committable}; -use futures::stream::{self, StreamExt, TryStreamExt}; -use hotshot_types::traits::node_implementation::NodeType; -use itertools::Itertools; -use sqlx::{types::Json, FromRow, Row}; -use std::{collections::VecDeque, num::NonZeroUsize}; -use tagged_base64::{Tagged, TaggedBase64}; impl From for GetExplorerSummaryError { fn from(err: sqlx::Error) -> Self { @@ -282,7 +284,7 @@ where let query_stmt = match request.target { BlockIdentifier::Latest => { query(&GET_BLOCK_SUMMARIES_QUERY_FOR_LATEST).bind(request.num_blocks.get() as i64) - } + }, BlockIdentifier::Height(height) => query(&GET_BLOCK_SUMMARIES_QUERY_FOR_HEIGHT) .bind(height as i64) .bind(request.num_blocks.get() as i64), @@ -305,10 +307,10 @@ where BlockIdentifier::Latest => query(&GET_BLOCK_DETAIL_QUERY_FOR_LATEST), BlockIdentifier::Height(height) => { query(&GET_BLOCK_DETAIL_QUERY_FOR_HEIGHT).bind(height as i64) - } + }, BlockIdentifier::Hash(hash) => { query(&GET_BLOCK_DETAIL_QUERY_FOR_HASH).bind(hash.to_string()) - } + }, }; let query_result = query_stmt.fetch_one(self.as_mut()).await?; @@ -375,7 +377,7 @@ where TransactionSummaryFilter::Block(block) => { query(&GET_TRANSACTION_SUMMARIES_QUERY_FOR_BLOCK).bind(*block as i64) - } + }, }; let block_stream = query_stmt @@ -432,10 +434,10 @@ where query(&GET_TRANSACTION_DETAIL_QUERY_FOR_HEIGHT_AND_OFFSET) .bind(height as i64) .bind(offset as i64) - } + }, TransactionIdentifier::Hash(hash) => { query(&GET_TRANSACTION_DETAIL_QUERY_FOR_HASH).bind(hash.to_string()) - } + }, }; let query_row = query_stmt.fetch_one(self.as_mut()).await?; @@ -455,7 +457,7 @@ where key: format!("at {height} and {offset}"), }), ) - } + }, TransactionIdentifier::Hash(hash) => txns .into_iter() .enumerate() diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs index 326d29e695..dfd99bfc9f 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs @@ -12,6 +12,18 @@ //! Node storage implementation for a database query engine. +use std::ops::{Bound, RangeBounds}; + +use anyhow::anyhow; +use async_trait::async_trait; +use futures::stream::{StreamExt, TryStreamExt}; +use hotshot_types::{ + data::VidShare, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; +use snafu::OptionExt; +use sqlx::Row; + use super::{ super::transaction::{query, query_as, Transaction, TransactionMode, Write}, parse_header, DecodeError, QueryBuilder, HEADER_COLUMNS, @@ -24,16 +36,6 @@ use crate::{ types::HeightIndexed, Header, MissingSnafu, NotFoundSnafu, QueryError, QueryResult, }; -use anyhow::anyhow; -use async_trait::async_trait; -use futures::stream::{StreamExt, TryStreamExt}; -use hotshot_types::{ - data::VidShare, - traits::{block_contents::BlockHeader, node_implementation::NodeType}, -}; -use snafu::OptionExt; -use sqlx::Row; -use std::ops::{Bound, RangeBounds}; #[async_trait] impl NodeStorage for Transaction @@ -50,11 +52,11 @@ where // The height of the block is the number of blocks below it, so the total number of // blocks is one more than the height of the highest block. Ok(height as usize + 1) - } + }, (None,) => { // If there are no blocks yet, the height is 0. Ok(0) - } + }, } } @@ -174,11 +176,11 @@ where // The height of the block is the number of blocks below it, so the total number of // blocks is one more than the height of the highest block. height as usize + 1 - } + }, None => { // If there are no blocks yet, the height is 0. 0 - } + }, }; let total_leaves = row.get::("total_leaves") as usize; let null_payloads = row.get::("null_payloads") as usize; @@ -216,7 +218,7 @@ where // sufficient data to answer the query is not as simple as just trying `load_header` // for a specific block ID. return self.time_window::(t, end, limit).await; - } + }, WindowStart::Height(h) => h, WindowStart::Hash(h) => self.load_header::(h).await?.block_number(), }; @@ -479,7 +481,7 @@ async fn aggregate_range_bounds( return Ok(None); } height - 1 - } + }, }; Ok(Some((from, to))) } diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/state.rs b/hotshot-query-service/src/data_source/storage/sql/queries/state.rs index 4e3d41d141..328d538ab5 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/state.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/state.rs @@ -12,17 +12,11 @@ //! Merklized state storage implementation for a database query engine. -use super::{ - super::transaction::{query_as, Transaction, TransactionMode, Write}, - DecodeError, QueryBuilder, -}; -use crate::data_source::storage::sql::sqlx::Row; -use crate::data_source::storage::{pruning::PrunedHeightStorage, sql::build_where_in}; -use crate::{ - data_source::storage::{MerklizedStateHeightStorage, MerklizedStateStorage}, - merklized_state::{MerklizedState, Snapshot}, - QueryError, QueryResult, +use std::{ + collections::{HashMap, HashSet, VecDeque}, + sync::Arc, }; + use ark_serialize::CanonicalDeserialize; use async_trait::async_trait; use futures::stream::TryStreamExt; @@ -31,10 +25,21 @@ use jf_merkle_tree::{ prelude::{MerkleNode, MerkleProof}, DigestAlgorithm, MerkleCommitment, ToTraversalPath, }; -use sqlx::types::BitVec; -use sqlx::types::JsonValue; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::sync::Arc; +use sqlx::types::{BitVec, JsonValue}; + +use super::{ + super::transaction::{query_as, Transaction, TransactionMode, Write}, + DecodeError, QueryBuilder, +}; +use crate::{ + data_source::storage::{ + pruning::PrunedHeightStorage, + sql::{build_where_in, sqlx::Row}, + MerklizedStateHeightStorage, MerklizedStateStorage, + }, + merklized_state::{MerklizedState, Snapshot}, + QueryError, QueryResult, +}; #[async_trait] impl MerklizedStateStorage @@ -148,7 +153,7 @@ where .decode_error("malformed merkle node value")?, children: child_nodes, }); - } + }, // If it has an entry, it's a leaf (None, None, Some(index), Some(entry)) => { proof_path.push_back(MerkleNode::Leaf { @@ -159,16 +164,16 @@ where elem: serde_json::from_value(entry.clone()) .decode_error("malformed merkle element")?, }); - } + }, // Otherwise, it's empty. (None, None, Some(_), None) => { proof_path.push_back(MerkleNode::Empty); - } + }, _ => { return Err(QueryError::Error { message: "Invalid type of merkle node found".to_string(), }); - } + }, } } } @@ -223,7 +228,7 @@ where State::Digest::digest(&data).map_err(|err| QueryError::Error { message: format!("failed to update digest: {err:#}"), }) - } + }, MerkleNode::Empty => Ok(init), _ => Err(QueryError::Error { message: "Invalid type of Node in the proof".to_string(), @@ -292,7 +297,7 @@ impl Transaction { .await?; (height, commit) - } + }, Snapshot::Index(created) => { let created = created as i64; let (commit,) = query_as::<(String,)>(&format!( @@ -307,7 +312,7 @@ impl Transaction { let commit = serde_json::from_value(commit.into()) .decode_error("malformed state commitment")?; (created, commit) - } + }, }; // Make sure the requested snapshot is up to date. diff --git a/hotshot-query-service/src/data_source/storage/sql/transaction.rs b/hotshot-query-service/src/data_source/storage/sql/transaction.rs index f7443e36a7..850f8006f8 100644 --- a/hotshot-query-service/src/data_source/storage/sql/transaction.rs +++ b/hotshot-query-service/src/data_source/storage/sql/transaction.rs @@ -18,26 +18,12 @@ //! database connection, so that the updated state of the database can be queried midway through a //! transaction. -use super::{ - queries::{ - self, - state::{build_hash_batch_insert, Node}, - DecodeError, - }, - Database, Db, -}; -use crate::{ - availability::{ - BlockQueryData, LeafQueryData, QueryableHeader, QueryablePayload, VidCommonQueryData, - }, - data_source::{ - storage::{pruning::PrunedHeightStorage, UpdateAvailabilityStorage}, - update, - }, - merklized_state::{MerklizedState, UpdateStateData}, - types::HeightIndexed, - Header, Payload, QueryError, QueryResult, +use std::{ + collections::{HashMap, HashSet}, + marker::PhantomData, + time::Instant, }; + use anyhow::{bail, Context}; use ark_serialize::CanonicalSerialize; use async_trait::async_trait; @@ -55,13 +41,30 @@ use hotshot_types::{ }; use itertools::Itertools; use jf_merkle_tree::prelude::{MerkleNode, MerkleProof}; -use sqlx::types::BitVec; pub use sqlx::Executor; -use sqlx::{pool::Pool, query_builder::Separated, Encode, FromRow, QueryBuilder, Type}; -use std::{ - collections::{HashMap, HashSet}, - marker::PhantomData, - time::Instant, +use sqlx::{ + pool::Pool, query_builder::Separated, types::BitVec, Encode, FromRow, QueryBuilder, Type, +}; + +use super::{ + queries::{ + self, + state::{build_hash_batch_insert, Node}, + DecodeError, + }, + Database, Db, +}; +use crate::{ + availability::{ + BlockQueryData, LeafQueryData, QueryableHeader, QueryablePayload, VidCommonQueryData, + }, + data_source::{ + storage::{pruning::PrunedHeightStorage, UpdateAvailabilityStorage}, + update, + }, + merklized_state::{MerklizedState, UpdateStateData}, + types::HeightIndexed, + Header, Payload, QueryError, QueryResult, }; pub type Query<'q> = sqlx::query::Query<'q, Db, ::Arguments<'q>>; @@ -681,10 +684,10 @@ impl, const ARITY: usize> [0_u8; 32].to_vec(), )); hashset.insert([0_u8; 32].to_vec()); - } + }, MerkleNode::ForgettenSubtree { .. } => { bail!("Node in the Merkle path contains a forgetten subtree"); - } + }, MerkleNode::Leaf { value, pos, elem } => { let mut leaf_commit = Vec::new(); // Serialize the leaf node hash value into a vector @@ -711,7 +714,7 @@ impl, const ARITY: usize> )); hashset.insert(leaf_commit); - } + }, MerkleNode::Branch { value, children } => { // Get hash let mut branch_hash = Vec::new(); @@ -728,7 +731,7 @@ impl, const ARITY: usize> match child { MerkleNode::Empty => { children_bitvec.push(false); - } + }, MerkleNode::Branch { value, .. } | MerkleNode::Leaf { value, .. } | MerkleNode::ForgettenSubtree { value } => { @@ -740,7 +743,7 @@ impl, const ARITY: usize> children_values.push(hash); // Mark the entry as 1 in bitvec to indicate a non-empty child children_bitvec.push(true); - } + }, } } @@ -758,7 +761,7 @@ impl, const ARITY: usize> )); hashset.insert(branch_hash); hashset.extend(children_values); - } + }, } // advance the traversal path for the internal nodes at each iteration @@ -798,7 +801,7 @@ impl, const ARITY: usize> } } - Node::upsert(name, nodes.into_iter().map(|(n, _, _)| n), self).await?; + Node::upsert(name, nodes.into_iter().map(|(n, ..)| n), self).await?; Ok(()) } diff --git a/hotshot-query-service/src/data_source/update.rs b/hotshot-query-service/src/data_source/update.rs index 86ec016938..c3b832846f 100644 --- a/hotshot-query-service/src/data_source/update.rs +++ b/hotshot-query-service/src/data_source/update.rs @@ -11,32 +11,33 @@ // see . //! A generic algorithm for updating a HotShot Query Service data source with new data. -use crate::{ - availability::{ - BlockInfo, BlockQueryData, LeafQueryData, QueryablePayload, UpdateAvailabilityData, - VidCommonQueryData, - }, - Payload, -}; +use std::iter::once; + use anyhow::{ensure, Context}; use async_trait::async_trait; use futures::future::Future; use hotshot::types::{Event, EventType}; -use hotshot_types::{data::VidCommitment, event::LeafInfo}; use hotshot_types::{ - data::{ns_table::parse_ns_table, Leaf2}, + data::{ns_table::parse_ns_table, Leaf2, VidCommitment, VidDisperseShare, VidShare}, + event::LeafInfo, traits::{ block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES}, node_implementation::{ConsensusTime, NodeType}, }, - vid::advz::advz_scheme, -}; -use hotshot_types::{ - data::{VidDisperseShare, VidShare}, - vid::avidm::{init_avidm_param, AvidMScheme}, + vid::{ + advz::advz_scheme, + avidm::{init_avidm_param, AvidMScheme}, + }, }; use jf_vid::VidScheme; -use std::iter::once; + +use crate::{ + availability::{ + BlockInfo, BlockQueryData, LeafQueryData, QueryablePayload, UpdateAvailabilityData, + VidCommonQueryData, + }, + Payload, +}; /// An extension trait for types which implement the update trait for each API module. /// @@ -109,7 +110,7 @@ where "inconsistent leaf; cannot append leaf information: {err:#}" ); return Err(leaf2.block_header().block_number()); - } + }, }; let block_data = leaf2 .block_payload() @@ -141,12 +142,12 @@ where Err(err) => { tracing::warn!("failed to compute genesis VID: {err:#}"); (None, None) - } + }, } } else { (None, None) } - } + }, }; if vid_common.is_none() { @@ -188,7 +189,7 @@ fn genesis_vid( VidCommonQueryData::new(leaf.block_header().clone(), Some(disperse.common)), VidShare::V0(disperse.shares.remove(0)), )) - } + }, VidCommitment::V1(commit) => { let avidm_param = init_avidm_param(GENESIS_VID_NUM_STORAGE_NODES)?; let weights = vec![1; GENESIS_VID_NUM_STORAGE_NODES]; @@ -208,7 +209,7 @@ fn genesis_vid( VidCommonQueryData::new(leaf.block_header().clone(), None), VidShare::V1(shares.remove(0)), )) - } + }, } } diff --git a/hotshot-query-service/src/error.rs b/hotshot-query-service/src/error.rs index 123ac11db9..30f5d7c529 100644 --- a/hotshot-query-service/src/error.rs +++ b/hotshot-query-service/src/error.rs @@ -10,13 +10,15 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use crate::{availability, explorer, merklized_state, node, status}; +use std::fmt::Display; + use derive_more::From; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::fmt::Display; use tide_disco::StatusCode; +use crate::{availability, explorer, merklized_state, node, status}; + #[derive(Clone, Debug, From, Snafu, Deserialize, Serialize)] pub enum Error { #[snafu(display("{source}"))] diff --git a/hotshot-query-service/src/explorer.rs b/hotshot-query-service/src/explorer.rs index ef259a83e5..714aecaef3 100644 --- a/hotshot-query-service/src/explorer.rs +++ b/hotshot-query-service/src/explorer.rs @@ -17,9 +17,7 @@ pub(crate) mod monetary_value; pub(crate) mod query_data; pub(crate) mod traits; -use self::errors::InvalidLimit; -use crate::availability::{QueryableHeader, QueryablePayload}; -use crate::{api::load_api, Header, Payload, Transaction}; +use std::{fmt::Display, num::NonZeroUsize, path::Path}; pub use currency::*; pub use data_source::*; @@ -28,14 +26,17 @@ use hotshot_types::traits::node_implementation::NodeType; pub use monetary_value::*; pub use query_data::*; use serde::{Deserialize, Serialize}; -use std::fmt::Display; -use std::num::NonZeroUsize; -use std::path::Path; -use tide_disco::StatusCode; -use tide_disco::{api::ApiError, method::ReadState, Api}; +use tide_disco::{api::ApiError, method::ReadState, Api, StatusCode}; pub use traits::*; use vbs::version::StaticVersionType; +use self::errors::InvalidLimit; +use crate::{ + api::load_api, + availability::{QueryableHeader, QueryablePayload}, + Header, Payload, Transaction, +}; + /// [Error] is an enum that represents the various errors that can be returned /// from the Explorer API. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -308,7 +309,7 @@ where ) { (Ok(Some(height)), Ok(Some(offset)), _) => { TransactionIdentifier::HeightAndOffset(height, offset) - } + }, (_, _, Ok(Some(hash))) => TransactionIdentifier::Hash(hash), _ => TransactionIdentifier::Latest, }, @@ -341,7 +342,7 @@ where ) { (Ok(Some(height)), Ok(Some(offset)), _) => { TransactionIdentifier::HeightAndOffset(height, offset) - } + }, (_, _, Ok(Some(hash))) => TransactionIdentifier::Hash(hash), _ => TransactionIdentifier::Latest, }; @@ -393,6 +394,13 @@ where #[cfg(test)] mod test { + use std::{cmp::min, time::Duration}; + + use futures::StreamExt; + use portpicker::pick_unused_port; + use surf_disco::Client; + use tide_disco::App; + use super::*; use crate::{ availability, @@ -403,11 +411,6 @@ mod test { }, ApiState, Error, }; - use futures::StreamExt; - use portpicker::pick_unused_port; - use std::{cmp::min, time::Duration}; - use surf_disco::Client; - use tide_disco::App; async fn validate(client: &Client) { let explorer_summary_response: ExplorerSummaryResponse = diff --git a/hotshot-query-service/src/explorer/currency.rs b/hotshot-query-service/src/explorer/currency.rs index 2e128d17e9..0fc3a03d1f 100644 --- a/hotshot-query-service/src/explorer/currency.rs +++ b/hotshot-query-service/src/explorer/currency.rs @@ -10,11 +10,12 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use super::errors::ExplorerAPIError; -use serde::ser::SerializeStruct; -use serde::{Deserialize, Serialize, Serializer}; use std::fmt::Display; +use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; + +use super::errors::ExplorerAPIError; + /// CurrencyMismatchError is an error that occurs when two different currencies /// are attempted to be combined in any way that would result in an invalid /// state. diff --git a/hotshot-query-service/src/explorer/data_source.rs b/hotshot-query-service/src/explorer/data_source.rs index aa5613e9b3..088a75f272 100644 --- a/hotshot-query-service/src/explorer/data_source.rs +++ b/hotshot-query-service/src/explorer/data_source.rs @@ -10,6 +10,10 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use async_trait::async_trait; +use hotshot_types::traits::node_implementation::NodeType; +use tagged_base64::TaggedBase64; + use super::{ query_data::{ BlockDetail, BlockIdentifier, BlockSummary, ExplorerSummary, GetBlockDetailError, @@ -24,9 +28,6 @@ use crate::{ availability::{QueryableHeader, QueryablePayload}, Header, Payload, Transaction, }; -use async_trait::async_trait; -use hotshot_types::traits::node_implementation::NodeType; -use tagged_base64::TaggedBase64; /// An interface for querying Data and Statistics from the HotShot Blockchain. /// diff --git a/hotshot-query-service/src/explorer/errors.rs b/hotshot-query-service/src/explorer/errors.rs index 90c6a0ac1d..eab824d8be 100644 --- a/hotshot-query-service/src/explorer/errors.rs +++ b/hotshot-query-service/src/explorer/errors.rs @@ -10,9 +10,9 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use serde::ser::SerializeStruct; -use serde::{Deserialize, Serialize, Serializer}; use std::fmt::{Debug, Display}; + +use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; use tide_disco::StatusCode; /// [ExplorerAPIError] is a trait that represents an error that can be returned @@ -417,7 +417,7 @@ mod test { let want = query_error; match &have.error { - crate::QueryError::NotFound => {} + crate::QueryError::NotFound => {}, _ => panic!("deserialized QueryError mismatch: have: {have}, want: {want}"), } } diff --git a/hotshot-query-service/src/explorer/monetary_value.rs b/hotshot-query-service/src/explorer/monetary_value.rs index 399e18e021..45354e59ee 100644 --- a/hotshot-query-service/src/explorer/monetary_value.rs +++ b/hotshot-query-service/src/explorer/monetary_value.rs @@ -10,15 +10,16 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use super::currency::{CurrencyCode, CurrencyMismatchError}; -use itertools::Itertools; -use serde::{Deserialize, Serialize, Serializer}; -use std::fmt::Display; use std::{ - fmt::Debug, + fmt::{Debug, Display}, ops::{Add, Sub}, }; +use itertools::Itertools; +use serde::{Deserialize, Serialize, Serializer}; + +use super::currency::{CurrencyCode, CurrencyMismatchError}; + #[derive(Debug, Clone, PartialEq, Eq)] /// [MonetaryValue]s is a struct that paris a [CurrencyCode] with a value. /// This structure is able to represent both positive and negative currencies. @@ -195,7 +196,7 @@ where return Err(E::custom( "no non-breaking space found in expected MonetaryValue", )) - } + }, }; let first: String = value.chars().take(index).collect(); @@ -244,7 +245,7 @@ fn determine_pre_and_post_decimal_strings(value: &str) -> (String, Option { panic!("{} failed to parse: {}", value, err); - } + }, Ok(result) => result, }; @@ -436,7 +437,7 @@ mod test { let result = match result { Err(err) => { panic!("{} failed to parse: {}", value, err); - } + }, Ok(result) => result, }; diff --git a/hotshot-query-service/src/explorer/query_data.rs b/hotshot-query-service/src/explorer/query_data.rs index 7b4f0b3de7..4bb05062b2 100644 --- a/hotshot-query-service/src/explorer/query_data.rs +++ b/hotshot-query-service/src/explorer/query_data.rs @@ -10,6 +10,17 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::{ + collections::VecDeque, + fmt::{Debug, Display}, + num::{NonZeroUsize, TryFromIntError}, +}; + +use hotshot_types::traits::node_implementation::NodeType; +use serde::{Deserialize, Serialize}; +use tide_disco::StatusCode; +use time::format_description::well_known::Rfc3339; + use super::{ errors::{BadQuery, ExplorerAPIError, InvalidLimit, NotFound, QueryError, Unimplemented}, monetary_value::MonetaryValue, @@ -17,18 +28,10 @@ use super::{ }; use crate::{ availability::{BlockQueryData, QueryableHeader, QueryablePayload, TransactionHash}, + node::BlockHash, + types::HeightIndexed, Header, Payload, Resolvable, Transaction, }; -use crate::{node::BlockHash, types::HeightIndexed}; -use hotshot_types::traits::node_implementation::NodeType; -use serde::{Deserialize, Serialize}; -use std::{ - collections::VecDeque, - fmt::{Debug, Display}, - num::{NonZeroUsize, TryFromIntError}, -}; -use tide_disco::StatusCode; -use time::format_description::well_known::Rfc3339; /// BlockIdentifier is an enum that represents multiple ways of referring to /// a specific Block. These use cases are specific to a Block Explorer and @@ -79,7 +82,7 @@ impl Display for TransactionIdentifier { TransactionIdentifier::Latest => write!(f, "latest"), TransactionIdentifier::HeightAndOffset(height, offset) => { write!(f, "{} {}", height, offset) - } + }, TransactionIdentifier::Hash(hash) => write!(f, "{}", hash), } } diff --git a/hotshot-query-service/src/explorer/traits.rs b/hotshot-query-service/src/explorer/traits.rs index 7b44e9a0ad..ebd59c53a4 100644 --- a/hotshot-query-service/src/explorer/traits.rs +++ b/hotshot-query-service/src/explorer/traits.rs @@ -10,9 +10,10 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::fmt::Debug; + use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; use serde::{de::DeserializeOwned, Serialize}; -use std::fmt::Debug; /// [ExplorerHeader] is a trait that represents certain extensions to the /// [BlockHeader] that are specific to the Block Explorer API. This trait diff --git a/hotshot-query-service/src/fetching.rs b/hotshot-query-service/src/fetching.rs index 427f482553..22ea7e8b6f 100644 --- a/hotshot-query-service/src/fetching.rs +++ b/hotshot-query-service/src/fetching.rs @@ -21,16 +21,16 @@ //! implementations of [`Provider`] for various data availability sources. //! -use async_lock::Mutex; -use async_lock::Semaphore; -use backoff::{backoff::Backoff, ExponentialBackoff}; -use derivative::Derivative; use std::{ collections::{hash_map::Entry, BTreeSet, HashMap}, fmt::Debug, sync::Arc, time::Duration, }; + +use async_lock::{Mutex, Semaphore}; +use backoff::{backoff::Backoff, ExponentialBackoff}; +use derivative::Derivative; use tokio::{spawn, time::sleep}; pub mod provider; @@ -122,12 +122,12 @@ impl Fetcher { e.get_mut().extend(callbacks); tracing::info!(?req, callbacks = ?e.get(), "resource is already being fetched"); return; - } + }, Entry::Vacant(e) => { // If the object is not being fetched, we will register our own callback and // then fetch it ourselves. e.insert(callbacks.into_iter().collect()); - } + }, } } diff --git a/hotshot-query-service/src/fetching/provider.rs b/hotshot-query-service/src/fetching/provider.rs index d4f5eb4d05..7a38b4b539 100644 --- a/hotshot-query-service/src/fetching/provider.rs +++ b/hotshot-query-service/src/fetching/provider.rs @@ -35,10 +35,12 @@ //! * [`TestProvider`] //! -use super::Request; -use async_trait::async_trait; use std::sync::Arc; +use async_trait::async_trait; + +use super::Request; + mod any; mod query_service; mod testing; diff --git a/hotshot-query-service/src/fetching/provider/any.rs b/hotshot-query-service/src/fetching/provider/any.rs index 0896abdce4..9a67cdf01b 100644 --- a/hotshot-query-service/src/fetching/provider/any.rs +++ b/hotshot-query-service/src/fetching/provider/any.rs @@ -10,6 +10,12 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::{fmt::Debug, sync::Arc}; + +use async_trait::async_trait; +use derivative::Derivative; +use hotshot_types::traits::node_implementation::NodeType; + use super::{Provider, Request}; use crate::{ availability::LeafQueryData, @@ -17,11 +23,6 @@ use crate::{ fetching::request::{LeafRequest, PayloadRequest, VidCommonRequest}, Payload, VidCommon, }; -use async_trait::async_trait; -use derivative::Derivative; -use hotshot_types::traits::node_implementation::NodeType; -use std::fmt::Debug; -use std::sync::Arc; /// Blanket trait combining [`Debug`] and [`Provider`]. /// @@ -191,7 +192,7 @@ where providers.len() ); continue; - } + }, } } @@ -201,6 +202,11 @@ where // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(test, not(target_os = "windows")))] mod test { + use futures::stream::StreamExt; + use portpicker::pick_unused_port; + use tide_disco::App; + use vbs::version::StaticVersionType; + use super::*; use crate::{ availability::{define_api, AvailabilityDataSource, UpdateAvailabilityData}, @@ -215,10 +221,6 @@ mod test { types::HeightIndexed, ApiState, Error, }; - use futures::stream::StreamExt; - use portpicker::pick_unused_port; - use tide_disco::App; - use vbs::version::StaticVersionType; type Provider = AnyProvider; diff --git a/hotshot-query-service/src/fetching/provider/query_service.rs b/hotshot-query-service/src/fetching/provider/query_service.rs index dbf09aa900..5efca08c99 100644 --- a/hotshot-query-service/src/fetching/provider/query_service.rs +++ b/hotshot-query-service/src/fetching/provider/query_service.rs @@ -10,14 +10,6 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use super::Provider; - -use crate::{ - availability::{LeafQueryData, PayloadQueryData, VidCommonQueryData}, - fetching::request::{LeafRequest, PayloadRequest, VidCommonRequest}, - types::HeightIndexed, - Error, Payload, VidCommon, -}; use async_trait::async_trait; use committable::Committable; use futures::try_join; @@ -30,6 +22,14 @@ use jf_vid::VidScheme; use surf_disco::{Client, Url}; use vbs::version::StaticVersionType; +use super::Provider; +use crate::{ + availability::{LeafQueryData, PayloadQueryData, VidCommonQueryData}, + fetching::request::{LeafRequest, PayloadRequest, VidCommonRequest}, + types::HeightIndexed, + Error, Payload, VidCommon, +}; + /// Data availability provider backed by another instance of this query service. /// /// This fetcher implements the [`Provider`] interface by querying the REST API provided by another @@ -79,7 +79,7 @@ where Err(err) => { tracing::error!(%err, "unable to compute VID commitment"); return None; - } + }, }, ); if commit != req.0 { @@ -91,11 +91,11 @@ where } Some(payload.data) - } + }, Err(err) => { tracing::error!("failed to fetch payload {req:?}: {err}"); None - } + }, } } } @@ -134,11 +134,11 @@ where leaf.leaf.unfill_block_payload(); Some(leaf) - } + }, Err(err) => { tracing::error!("failed to fetch leaf {req:?}: {err}"); None - } + }, } } } @@ -171,18 +171,18 @@ where tracing::error!(?req, ?res, "Expect VID common data but found None"); None } - } + }, VidCommitment::V1(_) => { if res.common.is_some() { tracing::warn!(?req, ?res, "Expect no VID common data but found some.") } None - } + }, }, Err(err) => { tracing::error!("failed to fetch VID common {req:?}: {err}"); None - } + }, } } } @@ -190,8 +190,20 @@ where // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(test, not(target_os = "windows")))] mod test { - use super::*; + use std::{future::IntoFuture, time::Duration}; + use committable::Committable; + use futures::{ + future::{join, FutureExt}, + stream::StreamExt, + }; + use generic_array::GenericArray; + use hotshot_example_types::node_types::TestVersions; + use portpicker::pick_unused_port; + use rand::RngCore; + use tide_disco::{error::ServerError, App}; + + use super::*; use crate::{ api::load_api, availability::{ @@ -219,17 +231,6 @@ mod test { types::HeightIndexed, ApiState, }; - use committable::Committable; - use futures::{ - future::{join, FutureExt}, - stream::StreamExt, - }; - use generic_array::GenericArray; - use hotshot_example_types::node_types::TestVersions; - use portpicker::pick_unused_port; - use rand::RngCore; - use std::{future::IntoFuture, time::Duration}; - use tide_disco::{error::ServerError, App}; type Provider = TestProvider>; @@ -1201,7 +1202,7 @@ mod test { .as_ref() .fail_begins_writable(FailableAction::Any) .await - } + }, FailureType::Write => data_source.as_ref().fail_writes(FailableAction::Any).await, FailureType::Commit => data_source.as_ref().fail_commits(FailableAction::Any).await, } @@ -1304,19 +1305,19 @@ mod test { .as_ref() .fail_one_begin_writable(FailableAction::Any) .await - } + }, FailureType::Write => { data_source .as_ref() .fail_one_write(FailableAction::Any) .await - } + }, FailureType::Commit => { data_source .as_ref() .fail_one_commit(FailableAction::Any) .await - } + }, } assert_eq!(leaves[0], data_source.get_leaf(1).await.await); @@ -1882,7 +1883,7 @@ mod test { for (leaf, payload) in leaves.iter().zip(payloads) { assert_eq!(payload.block_hash, leaf.block_hash()); } - } + }, MetadataType::Vid => { let vids = data_source.subscribe_vid_common_metadata(1).await.take(3); @@ -1895,7 +1896,7 @@ mod test { for (leaf, vid) in leaves.iter().zip(vids) { assert_eq!(vid.block_hash, leaf.block_hash()); } - } + }, } } diff --git a/hotshot-query-service/src/fetching/provider/testing.rs b/hotshot-query-service/src/fetching/provider/testing.rs index 22803d7d19..8ebcc904b2 100644 --- a/hotshot-query-service/src/fetching/provider/testing.rs +++ b/hotshot-query-service/src/fetching/provider/testing.rs @@ -12,19 +12,23 @@ #![cfg(any(test, feature = "testing"))] -use super::Provider; -use crate::fetching::Request; +use std::{ + fmt::Debug, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + use async_lock::RwLock; use async_trait::async_trait; use derivative::Derivative; use hotshot_types::traits::node_implementation::NodeType; -use std::sync::Arc; -use std::{ - fmt::Debug, - sync::atomic::{AtomicBool, Ordering}, -}; use tokio::sync::broadcast; +use super::Provider; +use crate::fetching::Request; + /// Adaptor to add test-only functionality to an existing [`Provider`]. /// /// [`TestProvider`] wraps an existing provider `P` and adds some additional functionality which can diff --git a/hotshot-query-service/src/fetching/request.rs b/hotshot-query-service/src/fetching/request.rs index be3d1de434..b04838d5f2 100644 --- a/hotshot-query-service/src/fetching/request.rs +++ b/hotshot-query-service/src/fetching/request.rs @@ -12,15 +12,15 @@ //! Requests for fetching resources. +use std::{fmt::Debug, hash::Hash}; + +use derive_more::{From, Into}; +use hotshot_types::{data::VidCommitment, traits::node_implementation::NodeType}; + use crate::{ availability::{LeafHash, LeafQueryData, QcHash}, Payload, }; -use derive_more::{From, Into}; -use hotshot_types::{data::VidCommitment, traits::node_implementation::NodeType}; - -use std::fmt::Debug; -use std::hash::Hash; /// A request for a resource. pub trait Request: Copy + Debug + Eq + Hash + Send { diff --git a/hotshot-query-service/src/lib.rs b/hotshot-query-service/src/lib.rs index 5fafb197dd..eabb0e129f 100644 --- a/hotshot-query-service/src/lib.rs +++ b/hotshot-query-service/src/lib.rs @@ -428,26 +428,25 @@ pub mod task; pub mod testing; pub mod types; -pub use error::Error; -pub use resolvable::Resolvable; +use std::sync::Arc; use async_trait::async_trait; use derive_more::{Deref, From, Into}; +pub use error::Error; use futures::{future::BoxFuture, stream::StreamExt}; use hotshot::types::SystemContextHandle; use hotshot_types::traits::{ node_implementation::{NodeImplementation, NodeType, Versions}, BlockPayload, }; +pub use hotshot_types::{data::Leaf2, simple_certificate::QuorumCertificate}; +pub use resolvable::Resolvable; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::sync::Arc; use task::BackgroundTask; use tide_disco::{method::ReadState, App, StatusCode}; use vbs::version::StaticVersionType; -pub use hotshot_types::{data::Leaf2, simple_certificate::QuorumCertificate}; - pub type VidCommon = Option; pub type Payload = ::BlockPayload; @@ -589,6 +588,23 @@ where #[cfg(test)] mod test { + use std::{ + ops::{Bound, RangeBounds}, + time::Duration, + }; + + use async_lock::RwLock; + use async_trait::async_trait; + use atomic_store::{load_store::BincodeLoadStore, AtomicStore, AtomicStoreLoader, RollingLog}; + use futures::future::FutureExt; + use hotshot_types::{data::VidShare, simple_certificate::QuorumCertificate2}; + use portpicker::pick_unused_port; + use surf_disco::Client; + use tempfile::TempDir; + use testing::mocks::MockBase; + use tide_disco::App; + use toml::toml; + use super::*; use crate::{ availability::{ @@ -604,19 +620,6 @@ mod test { mocks::{MockHeader, MockPayload, MockTypes}, }, }; - use async_lock::RwLock; - use async_trait::async_trait; - use atomic_store::{load_store::BincodeLoadStore, AtomicStore, AtomicStoreLoader, RollingLog}; - use futures::future::FutureExt; - use hotshot_types::{data::VidShare, simple_certificate::QuorumCertificate2}; - use portpicker::pick_unused_port; - use std::ops::{Bound, RangeBounds}; - use std::time::Duration; - use surf_disco::Client; - use tempfile::TempDir; - use testing::mocks::MockBase; - use tide_disco::App; - use toml::toml; struct CompositeState { store: AtomicStore, diff --git a/hotshot-query-service/src/merklized_state.rs b/hotshot-query-service/src/merklized_state.rs index 453a34151c..4cbc244270 100644 --- a/hotshot-query-service/src/merklized_state.rs +++ b/hotshot-query-service/src/merklized_state.rs @@ -15,14 +15,16 @@ //! The state API provides an interface for serving queries against arbitrarily old snapshots of the state. //! This allows a full Merkle tree to be reconstructed from storage. //! If any parent state is missing then the partial snapshot can not be queried. -use std::{fmt::Display, path::PathBuf}; +use std::{ + fmt::{Debug, Display}, + path::PathBuf, +}; use derive_more::From; use futures::FutureExt; use hotshot_types::traits::node_implementation::NodeType; use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; -use std::fmt::Debug; use tagged_base64::TaggedBase64; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, StatusCode}; use vbs::version::StaticVersionType; diff --git a/hotshot-query-service/src/merklized_state/data_source.rs b/hotshot-query-service/src/merklized_state/data_source.rs index 562071413b..f5b77238ad 100644 --- a/hotshot-query-service/src/merklized_state/data_source.rs +++ b/hotshot-query-service/src/merklized_state/data_source.rs @@ -16,22 +16,20 @@ //! and provides methods for querying and reconstructing the snapshot. //! +use std::{cmp::Ordering, fmt::Debug, str::FromStr}; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use async_trait::async_trait; use derivative::Derivative; use derive_more::Display; use hotshot_types::traits::node_implementation::NodeType; - use jf_merkle_tree::{ prelude::MerkleProof, DigestAlgorithm, Element, ForgetableMerkleTreeScheme, Index, MerkleCommitment, NodeValue, ToTraversalPath, }; use serde::{de::DeserializeOwned, Serialize}; -use std::{fmt::Debug, str::FromStr}; use tagged_base64::TaggedBase64; -use std::cmp::Ordering; - use crate::QueryResult; /// This trait defines methods that a data source should implement diff --git a/hotshot-query-service/src/metrics.rs b/hotshot-query-service/src/metrics.rs index 9216fb0a30..ceb365a15a 100644 --- a/hotshot-query-service/src/metrics.rs +++ b/hotshot-query-service/src/metrics.rs @@ -12,6 +12,11 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + use hotshot_types::traits::metrics; use itertools::Itertools; use prometheus::{ @@ -19,8 +24,6 @@ use prometheus::{ Encoder, HistogramVec, Opts, Registry, TextEncoder, }; use snafu::Snafu; -use std::collections::HashMap; -use std::sync::{Arc, RwLock}; #[derive(Debug, Snafu)] pub enum MetricsError { @@ -444,11 +447,12 @@ impl metrics::MetricsFamily<()> for TextFamily { #[cfg(test)] mod test { - use super::*; - use crate::testing::setup_test; use metrics::Metrics; use tide_disco::metrics::Metrics as _; + use super::*; + use crate::testing::setup_test; + #[test] fn test_prometheus_metrics() { setup_test(); diff --git a/hotshot-query-service/src/node.rs b/hotshot-query-service/src/node.rs index 6499cc3e36..7129d2f307 100644 --- a/hotshot-query-service/src/node.rs +++ b/hotshot-query-service/src/node.rs @@ -20,16 +20,18 @@ //! fully synced with the entire history of the chain. However, the node will _eventually_ sync and //! return the expected counts. -use crate::{api::load_api, QueryError}; +use std::{fmt::Display, ops::Bound, path::PathBuf}; + use derive_more::From; use futures::FutureExt; use hotshot_types::traits::node_implementation::NodeType; use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; -use std::{fmt::Display, ops::Bound, path::PathBuf}; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, StatusCode}; use vbs::version::StaticVersionType; +use crate::{api::load_api, QueryError}; + pub(crate) mod data_source; pub(crate) mod query_data; pub use data_source::*; @@ -201,17 +203,8 @@ where #[cfg(test)] mod test { - use super::*; - use crate::{ - data_source::ExtensibleDataSource, - task::BackgroundTask, - testing::{ - consensus::{MockDataSource, MockNetwork, MockSqlDataSource}, - mocks::{mock_transaction, MockBase, MockTypes}, - setup_test, - }, - ApiState, Error, Header, - }; + use std::time::Duration; + use async_lock::RwLock; use committable::Committable; use futures::{FutureExt, StreamExt}; @@ -224,13 +217,24 @@ mod test { }, }; use portpicker::pick_unused_port; - use std::time::Duration; use surf_disco::Client; use tempfile::TempDir; use tide_disco::{App, Error as _}; use tokio::time::sleep; use toml::toml; + use super::*; + use crate::{ + data_source::ExtensibleDataSource, + task::BackgroundTask, + testing::{ + consensus::{MockDataSource, MockNetwork, MockSqlDataSource}, + mocks::{mock_transaction, MockBase, MockTypes}, + setup_test, + }, + ApiState, Error, Header, + }; + #[tokio::test(flavor = "multi_thread")] async fn test_api() { setup_test(); diff --git a/hotshot-query-service/src/node/data_source.rs b/hotshot-query-service/src/node/data_source.rs index a256b209bc..3a0b124fef 100644 --- a/hotshot-query-service/src/node/data_source.rs +++ b/hotshot-query-service/src/node/data_source.rs @@ -24,13 +24,15 @@ //! updated implicitly via the [availability API update //! trait](crate::availability::UpdateAvailabilityData). -use super::query_data::{BlockHash, BlockId, SyncStatus, TimeWindowQueryData}; -use crate::{Header, QueryResult}; +use std::ops::RangeBounds; + use async_trait::async_trait; use derivative::Derivative; use derive_more::From; use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; -use std::ops::RangeBounds; + +use super::query_data::{BlockHash, BlockId, SyncStatus, TimeWindowQueryData}; +use crate::{Header, QueryResult}; #[derive(Derivative, From)] #[derivative(Copy(bound = ""), Debug(bound = ""))] diff --git a/hotshot-query-service/src/node/query_data.rs b/hotshot-query-service/src/node/query_data.rs index f1805bc1f0..eff49e6a33 100644 --- a/hotshot-query-service/src/node/query_data.rs +++ b/hotshot-query-service/src/node/query_data.rs @@ -10,11 +10,11 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use crate::types::HeightIndexed; use derivative::Derivative; use serde::{Deserialize, Serialize}; pub use crate::availability::{BlockHash, BlockId}; +use crate::types::HeightIndexed; #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] pub struct SyncStatus { diff --git a/hotshot-query-service/src/status.rs b/hotshot-query-service/src/status.rs index abe973f10a..2c2d4379eb 100644 --- a/hotshot-query-service/src/status.rs +++ b/hotshot-query-service/src/status.rs @@ -23,17 +23,17 @@ //! * snapshots of the state right now, with no way to query historical snapshots //! * summary statistics -use crate::api::load_api; +use std::{borrow::Cow, fmt::Display, path::PathBuf}; + use derive_more::From; use futures::FutureExt; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::borrow::Cow; -use std::fmt::Display; -use std::path::PathBuf; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, StatusCode}; use vbs::version::StaticVersionType; +use crate::api::load_api; + pub(crate) mod data_source; pub use data_source::*; @@ -107,6 +107,17 @@ where #[cfg(test)] mod test { + use std::{str::FromStr, time::Duration}; + + use async_lock::RwLock; + use futures::FutureExt; + use portpicker::pick_unused_port; + use reqwest::redirect::Policy; + use surf_disco::Client; + use tempfile::TempDir; + use tide_disco::{App, Url}; + use toml::toml; + use super::*; use crate::{ data_source::ExtensibleDataSource, @@ -118,16 +129,6 @@ mod test { }, ApiState, Error, }; - use async_lock::RwLock; - use futures::FutureExt; - use portpicker::pick_unused_port; - use reqwest::redirect::Policy; - use std::str::FromStr; - use std::time::Duration; - use surf_disco::Client; - use tempfile::TempDir; - use tide_disco::{App, Url}; - use toml::toml; #[tokio::test(flavor = "multi_thread")] async fn test_api() { diff --git a/hotshot-query-service/src/status/data_source.rs b/hotshot-query-service/src/status/data_source.rs index 59af95fd23..5857c8aaa0 100644 --- a/hotshot-query-service/src/status/data_source.rs +++ b/hotshot-query-service/src/status/data_source.rs @@ -10,15 +10,15 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use async_trait::async_trait; +use chrono::Utc; +use hotshot_types::traits::metrics::Metrics; + use crate::{ metrics::{MetricsError, PrometheusMetrics}, QueryError, QueryResult, }; -use async_trait::async_trait; -use chrono::Utc; -use hotshot_types::traits::metrics::Metrics; - pub trait HasMetrics { fn metrics(&self) -> &PrometheusMetrics; } diff --git a/hotshot-query-service/src/task.rs b/hotshot-query-service/src/task.rs index 5b816b6ae6..fe3f3e2f20 100644 --- a/hotshot-query-service/src/task.rs +++ b/hotshot-query-service/src/task.rs @@ -12,10 +12,10 @@ //! Async task utilities. +use std::{fmt::Display, sync::Arc}; + use derivative::Derivative; use futures::future::Future; -use std::fmt::Display; -use std::sync::Arc; use tokio::{ spawn, task::{JoinError, JoinHandle}, diff --git a/hotshot-query-service/src/testing/consensus.rs b/hotshot-query-service/src/testing/consensus.rs index 28236ad6bf..ce65187d9c 100644 --- a/hotshot-query-service/src/testing/consensus.rs +++ b/hotshot-query-service/src/testing/consensus.rs @@ -10,16 +10,8 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use super::mocks::{MockMembership, MockNodeImpl, MockTransaction, MockTypes, MockVersions}; -use crate::{ - availability::{AvailabilityDataSource, UpdateAvailabilityData}, - data_source::{FileSystemDataSource, SqlDataSource, VersionedDataSource}, - fetching::provider::NoFetching, - node::NodeDataSource, - status::{StatusDataSource, UpdateStatusData}, - task::BackgroundTask, - SignatureKey, -}; +use std::{fmt::Display, num::NonZeroUsize, str::FromStr, sync::Arc, time::Duration}; + use async_lock::RwLock; use async_trait::async_trait; use futures::{ @@ -44,10 +36,6 @@ use hotshot_types::{ traits::{election::Membership, network::Topic, signature_key::SignatureKey as _}, HotShotConfig, PeerConfig, }; -use std::num::NonZeroUsize; -use std::sync::Arc; -use std::time::Duration; -use std::{fmt::Display, str::FromStr}; use tokio::{ runtime::Handle, task::{block_in_place, yield_now}, @@ -55,6 +43,17 @@ use tokio::{ use tracing::{info_span, Instrument}; use url::Url; +use super::mocks::{MockMembership, MockNodeImpl, MockTransaction, MockTypes, MockVersions}; +use crate::{ + availability::{AvailabilityDataSource, UpdateAvailabilityData}, + data_source::{FileSystemDataSource, SqlDataSource, VersionedDataSource}, + fetching::provider::NoFetching, + node::NodeDataSource, + status::{StatusDataSource, UpdateStatusData}, + task::BackgroundTask, + SignatureKey, +}; + struct MockNode { hotshot: SystemContextHandle, data_source: D, diff --git a/hotshot-query-service/src/testing/mocks.rs b/hotshot-query-service/src/testing/mocks.rs index 5c271ca1ff..8883294f79 100644 --- a/hotshot-query-service/src/testing/mocks.rs +++ b/hotshot-query-service/src/testing/mocks.rs @@ -10,12 +10,8 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use crate::explorer::traits::{ExplorerHeader, ExplorerTransaction}; -use crate::merklized_state::MerklizedState; -use crate::{ - availability::{QueryableHeader, QueryablePayload}, - types::HeightIndexed, -}; +use std::ops::Range; + use hotshot::traits::{ election::static_committee::StaticCommittee, implementations::MemoryNetwork, NodeImplementation, }; @@ -25,22 +21,26 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; -use hotshot_types::traits::node_implementation::Versions; use hotshot_types::{ data::{QuorumProposal, ViewNumber}, signature_key::BLSPubKey, - traits::node_implementation::NodeType, + traits::node_implementation::{NodeType, Versions}, }; - use jf_merkle_tree::{ prelude::{MerkleProof, Sha3Digest, Sha3Node}, universal_merkle_tree::UniversalMerkleTree, ForgetableMerkleTreeScheme, ForgetableUniversalMerkleTreeScheme, }; use serde::{Deserialize, Serialize}; -use std::ops::Range; use vbs::version::StaticVersion; +use crate::{ + availability::{QueryableHeader, QueryablePayload}, + explorer::traits::{ExplorerHeader, ExplorerTransaction}, + merklized_state::MerklizedState, + types::HeightIndexed, +}; + pub type MockHeader = TestBlockHeader; pub type MockPayload = TestBlockPayload; pub type MockTransaction = TestTransaction; diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index a6804662be..f96cf53a58 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -100,7 +100,7 @@ impl StakeTableScheme for StakeTable { Some(index) => { let branches = to_merkle_path(*index, self.height); root.simple_lookup(self.height, &branches) - } + }, None => Err(StakeTableError::KeyNotFound), } } @@ -116,7 +116,7 @@ impl StakeTableScheme for StakeTable { Some(index) => { let branches = to_merkle_path(*index, self.height); root.lookup(self.height, &branches) - } + }, None => Err(StakeTableError::KeyNotFound), }?; let amount = *proof.value().ok_or(StakeTableError::KeyNotFound)?; @@ -149,7 +149,7 @@ impl StakeTableScheme for StakeTable { negative, )?; Ok(value) - } + }, None => Err(StakeTableError::KeyNotFound), } } @@ -221,7 +221,7 @@ impl StakeTable { value, )?; Ok(old_value) - } + }, None => Err(StakeTableError::KeyNotFound), } } diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index b08301ee30..d28233f2e7 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -155,10 +155,10 @@ impl MerkleProof { let comm = Digest::evaluate(input) .map_err(|_| StakeTableError::RescueError)?[0]; Ok(comm) - } + }, MerklePathEntry::Leaf { .. } => Err(StakeTableError::MalformedProof), }) - } + }, _ => Err(StakeTableError::MalformedProof), } } @@ -305,7 +305,7 @@ impl PersistentMerkleNode { siblings: siblings.try_into().unwrap(), }); Ok(proof) - } + }, PersistentMerkleNode::Leaf { comm: _, key, @@ -341,7 +341,7 @@ impl PersistentMerkleNode { ptr += 1; } children[ptr].key_by_stake(stake_number) - } + }, PersistentMerkleNode::Leaf { comm: _, key, @@ -441,7 +441,7 @@ impl PersistentMerkleNode { }), value, )) - } + }, PersistentMerkleNode::Leaf { comm: _, key: node_key, @@ -473,7 +473,7 @@ impl PersistentMerkleNode { } else { Err(StakeTableError::MismatchedKey) } - } + }, } } @@ -518,7 +518,7 @@ impl PersistentMerkleNode { old_value, )) } - } + }, PersistentMerkleNode::Leaf { comm: _, key: cur_key, @@ -541,7 +541,7 @@ impl PersistentMerkleNode { } else { Err(StakeTableError::MismatchedKey) } - } + }, } } } @@ -584,7 +584,7 @@ impl Iterator for IntoIter { // put the left-most child to the last, so it is visited first. self.unvisited.extend(children.into_iter().rev()); self.next() - } + }, PersistentMerkleNode::Leaf { comm: _, key, diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 6267dd21ca..0117e302d5 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -126,7 +126,7 @@ where self.head_total_stake -= self.head.stake_amount[*pos]; self.head.stake_amount[*pos] = U256::zero(); Ok(()) - } + }, None => Err(StakeTableError::KeyNotFound), } } @@ -306,7 +306,7 @@ where self.head_total_stake -= old_value; self.head_total_stake += value; Ok(old_value) - } + }, None => Err(StakeTableError::KeyNotFound), } } diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index 4415555142..a752088765 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -41,7 +41,7 @@ impl ToFields for QCVerKey { FieldType::from_le_bytes_mod_order(&bytes[31..62]), FieldType::from_le_bytes_mod_order(&bytes[62..]), ] - } + }, Err(_) => unreachable!(), } } diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index baf5c8af38..c45f517c9a 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -331,8 +331,10 @@ where #[cfg(test)] mod tests { use ark_ed_on_bn254::EdwardsConfig as Config; - use hotshot_types::light_client::LightClientState; - use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableScheme}; + use hotshot_types::{ + light_client::LightClientState, + traits::stake_table::{SnapshotVersion, StakeTableScheme}, + }; use jf_crhf::CRHF; use jf_relation::Circuit; use jf_rescue::crhf::VariableLengthRescueCRHF; diff --git a/hotshot-state-prover/src/service.rs b/hotshot-state-prover/src/service.rs index 60a31acc31..d03dfd51d1 100644 --- a/hotshot-state-prover/src/service.rs +++ b/hotshot-state-prover/src/service.rs @@ -9,13 +9,13 @@ use std::{ use anyhow::{anyhow, Context, Result}; use contract_bindings_ethers::light_client::{LightClient, LightClientErrors}; use displaydoc::Display; -use ethers::middleware::{ - gas_oracle::{GasCategory, GasOracle}, - signer::SignerMiddlewareError, -}; use ethers::{ core::k256::ecdsa::SigningKey, - middleware::SignerMiddleware, + middleware::{ + gas_oracle::{GasCategory, GasOracle}, + signer::SignerMiddlewareError, + SignerMiddleware, + }, providers::{Http, Middleware, Provider, ProviderError}, signers::{LocalWallet, Signer, Wallet}, types::{transaction::eip2718::TypedTransaction, Address, U256}, @@ -42,8 +42,7 @@ use jf_pcs::prelude::UnivariateUniversalParams; use jf_plonk::errors::PlonkError; use jf_relation::Circuit as _; use jf_signature::constants::CS_ID_SCHNORR; -use sequencer_utils::blocknative::BlockNative; -use sequencer_utils::deployer::is_proxy_contract; +use sequencer_utils::{blocknative::BlockNative, deployer::is_proxy_contract}; use serde::Deserialize; use surf_disco::Client; use tide_disco::{error::ServerError, Api}; @@ -155,12 +154,12 @@ async fn init_stake_table_from_sequencer( Err(e) => { tracing::error!("Failed to parse the network config: {e}"); sleep(Duration::from_secs(5)).await; - } + }, }, Err(e) => { tracing::error!("Failed to fetch the network config: {e}"); sleep(Duration::from_secs(5)).await; - } + }, } }; @@ -288,7 +287,7 @@ pub async fn read_contract_state( Err(e) => { tracing::error!("unable to read finalized_state from contract: {}", e); return Err(ProverError::ContractError(e.into())); - } + }, }; let st_state: ParsedStakeTableState = match contract.genesis_stake_table_state().call().await { Ok(s) => s.into(), @@ -298,7 +297,7 @@ pub async fn read_contract_state( e ); return Err(ProverError::ContractError(e.into())); - } + }, }; Ok((state.into(), st_state.into())) @@ -330,10 +329,10 @@ pub async fn submit_state_and_proof( priority_fee ); } - } + }, Err(e) => { tracing::warn!("!! BlockNative Price Oracle failed: {}", e); - } + }, } } diff --git a/hotshot-task-impls/src/builder.rs b/hotshot-task-impls/src/builder.rs index d40d041e6d..ca5acd10ef 100644 --- a/hotshot-task-impls/src/builder.rs +++ b/hotshot-task-impls/src/builder.rs @@ -43,10 +43,10 @@ impl From for BuilderClientError { match value { BuilderApiError::Request(source) | BuilderApiError::TxnUnpack(source) => { Self::Api(source.to_string()) - } + }, BuilderApiError::TxnSubmit(source) | BuilderApiError::BuilderAddress(source) => { Self::Api(source.to_string()) - } + }, BuilderApiError::Custom { message, .. } => Self::Api(message), BuilderApiError::BlockAvailable { source, .. } | BuilderApiError::BlockClaim { source, .. } => match source { diff --git a/hotshot-task-impls/src/consensus/mod.rs b/hotshot-task-impls/src/consensus/mod.rs index 6e303ee4fd..98560947f1 100644 --- a/hotshot-task-impls/src/consensus/mod.rs +++ b/hotshot-task-impls/src/consensus/mod.rs @@ -4,6 +4,8 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{sync::Arc, time::Instant}; + use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; use hotshot_task::task::TaskState; @@ -22,7 +24,6 @@ use hotshot_types::{ vote::HasViewNumber, }; use hotshot_utils::anytrace::*; -use std::{sync::Arc, time::Instant}; use tokio::task::JoinHandle; use tracing::instrument; @@ -120,14 +121,14 @@ impl, V: Versions> ConsensusTaskSt { tracing::debug!("Failed to handle QuorumVoteRecv event; error = {e}"); } - } + }, HotShotEvent::TimeoutVoteRecv(ref vote) => { if let Err(e) = handle_timeout_vote_recv(vote, Arc::clone(&event), &sender, self).await { tracing::debug!("Failed to handle TimeoutVoteRecv event; error = {e}"); } - } + }, HotShotEvent::ViewChange(new_view_number, epoch_number) => { if let Err(e) = handle_view_change(*new_view_number, *epoch_number, &sender, &receiver, self) @@ -136,12 +137,12 @@ impl, V: Versions> ConsensusTaskSt tracing::trace!("Failed to handle ViewChange event; error = {e}"); } self.view_start_time = Instant::now(); - } + }, HotShotEvent::Timeout(view_number, epoch) => { if let Err(e) = handle_timeout(*view_number, *epoch, &sender, self).await { tracing::debug!("Failed to handle Timeout event; error = {e}"); } - } + }, HotShotEvent::ExtendedQc2Formed(eqc) => { let cert_view = eqc.view_number(); let cert_block_number = self @@ -168,7 +169,7 @@ impl, V: Versions> ConsensusTaskSt &sender, ) .await; - } + }, HotShotEvent::ExtendedQcRecv(high_qc, next_epoch_high_qc, _) => { if !self .consensus @@ -217,8 +218,8 @@ impl, V: Versions> ConsensusTaskSt ) .await; } - } - _ => {} + }, + _ => {}, } Ok(()) diff --git a/hotshot-task-impls/src/da.rs b/hotshot-task-impls/src/da.rs index f8bf3add36..25fa5ec14d 100644 --- a/hotshot-task-impls/src/da.rs +++ b/hotshot-task-impls/src/da.rs @@ -10,10 +10,10 @@ use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; -use hotshot_types::epoch_membership::EpochMembershipCoordinator; use hotshot_types::{ consensus::{Consensus, OuterConsensus, PayloadWithMetadata}, data::{vid_commitment, DaProposal2, PackedBundle}, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_certificate::DaCertificate2, @@ -141,7 +141,7 @@ impl, V: Versions> DaTaskState { let cur_view = self.consensus.read().await.cur_view(); let view_number = proposal.data.view_number(); @@ -315,7 +315,7 @@ impl, V: Versions> DaTaskState { tracing::debug!("DA vote recv, Main Task {:?}", vote.view_number()); // Check if we are the leader and the vote is from the sender. @@ -348,7 +348,7 @@ impl, V: Versions> DaTaskState { if *epoch > self.cur_epoch { self.cur_epoch = *epoch; @@ -364,7 +364,7 @@ impl, V: Versions> DaTaskState { let PackedBundle:: { encoded_transactions, @@ -434,8 +434,8 @@ impl, V: Versions> DaTaskState {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/events.rs b/hotshot-task-impls/src/events.rs index a1e828f27a..c543265ef3 100644 --- a/hotshot-task-impls/src/events.rs +++ b/hotshot-task-impls/src/events.rs @@ -10,10 +10,9 @@ use async_broadcast::Sender; use either::Either; use hotshot_task::task::TaskEvent; use hotshot_types::{ - data::VidCommitment, data::{ DaProposal2, Leaf2, PackedBundle, QuorumProposal2, QuorumProposalWrapper, UpgradeProposal, - VidDisperse, VidDisperseShare, + VidCommitment, VidDisperse, VidDisperseShare, }, message::Proposal, request_response::ProposalRequestPayload, @@ -284,7 +283,7 @@ impl HotShotEvent { HotShotEvent::QuorumVoteRecv(v) => Some(v.view_number()), HotShotEvent::TimeoutVoteRecv(v) | HotShotEvent::TimeoutVoteSend(v) => { Some(v.view_number()) - } + }, HotShotEvent::QuorumProposalRecv(proposal, _) | HotShotEvent::QuorumProposalSend(proposal, _) | HotShotEvent::QuorumProposalValidated(proposal, _) @@ -292,16 +291,16 @@ impl HotShotEvent { | HotShotEvent::QuorumProposalResponseSend(_, proposal) | HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { Some(proposal.data.view_number()) - } + }, HotShotEvent::QuorumVoteSend(vote) | HotShotEvent::ExtendedQuorumVoteSend(vote) => { Some(vote.view_number()) - } + }, HotShotEvent::DaProposalRecv(proposal, _) | HotShotEvent::DaProposalValidated(proposal, _) | HotShotEvent::DaProposalSend(proposal, _) => Some(proposal.data.view_number()), HotShotEvent::DaVoteRecv(vote) | HotShotEvent::DaVoteSend(vote) => { Some(vote.view_number()) - } + }, HotShotEvent::QcFormed(cert) => match cert { either::Left(qc) => Some(qc.view_number()), either::Right(tc) => Some(tc.view_number()), @@ -327,41 +326,41 @@ impl HotShotEvent { | HotShotEvent::ViewSyncCommitCertificateSend(cert, _) => Some(cert.view_number()), HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) | HotShotEvent::ViewSyncFinalizeCertificateSend(cert, _) => Some(cert.view_number()), - HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _, _) => { + HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, ..) => { Some(*view_number) - } + }, HotShotEvent::BlockRecv(packed_bundle) => Some(packed_bundle.view_number), HotShotEvent::Shutdown - | HotShotEvent::TransactionSend(_, _) + | HotShotEvent::TransactionSend(..) | HotShotEvent::TransactionsRecv(_) => None, HotShotEvent::VidDisperseSend(proposal, _) => Some(proposal.data.view_number()), HotShotEvent::VidShareRecv(_, proposal) | HotShotEvent::VidShareValidated(proposal) => { Some(proposal.data.view_number()) - } + }, HotShotEvent::UpgradeProposalRecv(proposal, _) | HotShotEvent::UpgradeProposalSend(proposal, _) => Some(proposal.data.view_number()), HotShotEvent::UpgradeVoteRecv(vote) | HotShotEvent::UpgradeVoteSend(vote) => { Some(vote.view_number()) - } + }, HotShotEvent::QuorumProposalRequestSend(req, _) | HotShotEvent::QuorumProposalRequestRecv(req, _) => Some(req.view_number), HotShotEvent::ViewChange(view_number, _) - | HotShotEvent::ViewSyncTimeout(view_number, _, _) + | HotShotEvent::ViewSyncTimeout(view_number, ..) | HotShotEvent::ViewSyncTrigger(view_number) | HotShotEvent::Timeout(view_number, ..) => Some(*view_number), HotShotEvent::DaCertificateRecv(cert) | HotShotEvent::DacSend(cert, _) => { Some(cert.view_number()) - } + }, HotShotEvent::DaCertificateValidated(cert) => Some(cert.view_number), HotShotEvent::UpgradeCertificateFormed(cert) => Some(cert.view_number()), - HotShotEvent::VidRequestSend(request, _, _) + HotShotEvent::VidRequestSend(request, ..) | HotShotEvent::VidRequestRecv(request, _) => Some(request.view), HotShotEvent::VidResponseSend(_, _, proposal) | HotShotEvent::VidResponseRecv(_, proposal) => Some(proposal.data.view_number()), HotShotEvent::HighQcRecv(qc, _) | HotShotEvent::HighQcSend(qc, ..) - | HotShotEvent::ExtendedQcRecv(qc, _, _) - | HotShotEvent::ExtendedQcSend(qc, _, _) => Some(qc.view_number()), + | HotShotEvent::ExtendedQcRecv(qc, ..) + | HotShotEvent::ExtendedQcSend(qc, ..) => Some(qc.view_number()), } } } @@ -378,20 +377,20 @@ impl Display for HotShotEvent { ), HotShotEvent::QuorumVoteRecv(v) => { write!(f, "QuorumVoteRecv(view_number={:?})", v.view_number()) - } + }, HotShotEvent::ExtendedQuorumVoteSend(v) => { write!( f, "ExtendedQuorumVoteSend(view_number={:?})", v.view_number() ) - } + }, HotShotEvent::TimeoutVoteRecv(v) => { write!(f, "TimeoutVoteRecv(view_number={:?})", v.view_number()) - } + }, HotShotEvent::TimeoutVoteSend(v) => { write!(f, "TimeoutVoteSend(view_number={:?})", v.view_number()) - } + }, HotShotEvent::DaProposalRecv(proposal, _) => write!( f, "DaProposalRecv(view_number={:?})", @@ -404,10 +403,10 @@ impl Display for HotShotEvent { ), HotShotEvent::DaVoteRecv(vote) => { write!(f, "DaVoteRecv(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::DaCertificateRecv(cert) => { write!(f, "DaCertificateRecv(view_number={:?})", cert.view_number()) - } + }, HotShotEvent::DaCertificateValidated(cert) => write!( f, "DaCertificateValidated(view_number={:?})", @@ -420,7 +419,7 @@ impl Display for HotShotEvent { ), HotShotEvent::QuorumVoteSend(vote) => { write!(f, "QuorumVoteSend(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::QuorumProposalValidated(proposal, _) => write!( f, "QuorumProposalValidated(view_number={:?})", @@ -433,7 +432,7 @@ impl Display for HotShotEvent { ), HotShotEvent::DaVoteSend(vote) => { write!(f, "DaVoteSend(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::QcFormed(cert) => match cert { either::Left(qc) => write!(f, "QcFormed(view_number={:?})", qc.view_number()), either::Right(tc) => write!(f, "QcFormed(view_number={:?})", tc.view_number()), @@ -445,26 +444,26 @@ impl Display for HotShotEvent { HotShotEvent::NextEpochQc2Formed(cert) => match cert { either::Left(qc) => { write!(f, "NextEpochQc2Formed(view_number={:?})", qc.view_number()) - } + }, either::Right(tc) => { write!(f, "NextEpochQc2Formed(view_number={:?})", tc.view_number()) - } + }, }, HotShotEvent::ExtendedQc2Formed(cert) => { write!(f, "ExtendedQc2Formed(view_number={:?})", cert.view_number()) - } + }, HotShotEvent::DacSend(cert, _) => { write!(f, "DacSend(view_number={:?})", cert.view_number()) - } + }, HotShotEvent::ViewChange(view_number, epoch_number) => { write!( f, "ViewChange(view_number={view_number:?}, epoch_number={epoch_number:?})" ) - } - HotShotEvent::ViewSyncTimeout(view_number, _, _) => { + }, + HotShotEvent::ViewSyncTimeout(view_number, ..) => { write!(f, "ViewSyncTimeout(view_number={view_number:?})") - } + }, HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => write!( f, "ViewSyncPreCommitVoteRecv(view_number={:?})", @@ -501,59 +500,59 @@ impl Display for HotShotEvent { "ViewSyncPreCommitCertificateRecv(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncCommitCertificateRecv(cert) => { write!( f, "ViewSyncCommitCertificateRecv(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) => { write!( f, "ViewSyncFinalizeCertificateRecv(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncPreCommitCertificateSend(cert, _) => { write!( f, "ViewSyncPreCommitCertificateSend(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncCommitCertificateSend(cert, _) => { write!( f, "ViewSyncCommitCertificateSend(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncFinalizeCertificateSend(cert, _) => { write!( f, "ViewSyncFinalizeCertificateSend(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncTrigger(view_number) => { write!(f, "ViewSyncTrigger(view_number={view_number:?})") - } + }, HotShotEvent::Timeout(view_number, epoch) => { write!(f, "Timeout(view_number={view_number:?}, epoch={epoch:?})") - } + }, HotShotEvent::TransactionsRecv(_) => write!(f, "TransactionsRecv"), - HotShotEvent::TransactionSend(_, _) => write!(f, "TransactionSend"), - HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _, _) => { + HotShotEvent::TransactionSend(..) => write!(f, "TransactionSend"), + HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, ..) => { write!( f, "SendPayloadCommitmentAndMetadata(view_number={view_number:?})" ) - } + }, HotShotEvent::BlockRecv(packed_bundle) => { write!(f, "BlockRecv(view_number={:?})", packed_bundle.view_number) - } + }, HotShotEvent::VidDisperseSend(proposal, _) => write!( f, "VidDisperseSend(view_number={:?})", @@ -581,10 +580,10 @@ impl Display for HotShotEvent { ), HotShotEvent::UpgradeVoteRecv(vote) => { write!(f, "UpgradeVoteRecv(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::UpgradeVoteSend(vote) => { write!(f, "UpgradeVoteSend(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::UpgradeCertificateFormed(cert) => write!( f, "UpgradeCertificateFormed(view_number={:?})", @@ -592,63 +591,63 @@ impl Display for HotShotEvent { ), HotShotEvent::QuorumProposalRequestSend(view_number, _) => { write!(f, "QuorumProposalRequestSend(view_number={view_number:?})") - } + }, HotShotEvent::QuorumProposalRequestRecv(view_number, _) => { write!(f, "QuorumProposalRequestRecv(view_number={view_number:?})") - } + }, HotShotEvent::QuorumProposalResponseSend(_, proposal) => { write!( f, "QuorumProposalResponseSend(view_number={:?})", proposal.data.view_number() ) - } + }, HotShotEvent::QuorumProposalResponseRecv(proposal) => { write!( f, "QuorumProposalResponseRecv(view_number={:?})", proposal.data.view_number() ) - } + }, HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { write!( f, "QuorumProposalPreliminarilyValidated(view_number={:?}", proposal.data.view_number() ) - } - HotShotEvent::VidRequestSend(request, _, _) => { + }, + HotShotEvent::VidRequestSend(request, ..) => { write!(f, "VidRequestSend(view_number={:?}", request.view) - } + }, HotShotEvent::VidRequestRecv(request, _) => { write!(f, "VidRequestRecv(view_number={:?}", request.view) - } + }, HotShotEvent::VidResponseSend(_, _, proposal) => { write!( f, "VidResponseSend(view_number={:?}", proposal.data.view_number() ) - } + }, HotShotEvent::VidResponseRecv(_, proposal) => { write!( f, "VidResponseRecv(view_number={:?}", proposal.data.view_number() ) - } + }, HotShotEvent::HighQcRecv(qc, _) => { write!(f, "HighQcRecv(view_number={:?}", qc.view_number()) - } + }, HotShotEvent::HighQcSend(qc, ..) => { write!(f, "HighQcSend(view_number={:?}", qc.view_number()) - } + }, HotShotEvent::ExtendedQcRecv(qc, ..) => { write!(f, "ExtendedQcRecv(view_number={:?}", qc.view_number()) - } + }, HotShotEvent::ExtendedQcSend(qc, ..) => { write!(f, "ExtendedQcSend(view_number={:?}", qc.view_number()) - } + }, } } } diff --git a/hotshot-task-impls/src/helpers.rs b/hotshot-task-impls/src/helpers.rs index 61f40340ac..6884bc0169 100644 --- a/hotshot-task-impls/src/helpers.rs +++ b/hotshot-task-impls/src/helpers.rs @@ -4,12 +4,17 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::{Duration, Instant}, +}; + use async_broadcast::{Receiver, SendError, Sender}; use async_lock::RwLock; use committable::{Commitment, Committable}; use either::Either; use hotshot_task::dependency::{Dependency, EventDependency}; -use hotshot_types::traits::storage::Storage; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposalWrapper, ViewChangeEvidence2}, @@ -24,6 +29,7 @@ use hotshot_types::{ election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, + storage::Storage, BlockPayload, ValidatedState, }, utils::{ @@ -34,11 +40,6 @@ use hotshot_types::{ StakeTableEntries, }; use hotshot_utils::anytrace::*; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - time::{Duration, Instant}, -}; use tokio::time::timeout; use tracing::instrument; @@ -814,7 +815,7 @@ pub(crate) async fn validate_proposal_view_and_certs< *view_number, e ) })?; - } + }, ViewChangeEvidence2::ViewSync(view_sync_cert) => { ensure!( view_sync_cert.view_number == view_number, @@ -838,7 +839,7 @@ pub(crate) async fn validate_proposal_view_and_certs< ) .await .context(|e| warn!("Invalid view sync finalize cert provided: {}", e))?; - } + }, } } @@ -871,13 +872,13 @@ pub async fn broadcast_event(event: E, sender: &Send "Event sender queue overflow, Oldest event removed form queue: {:?}", overflowed ); - } + }, Err(SendError(e)) => { tracing::warn!( "Event: {:?}\n Sending failed, event stream probably shutdown", e ); - } + }, } } diff --git a/hotshot-task-impls/src/network.rs b/hotshot-task-impls/src/network.rs index 8994a594fb..a7ebb87c0a 100644 --- a/hotshot-task-impls/src/network.rs +++ b/hotshot-task-impls/src/network.rs @@ -85,7 +85,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalRecv(convert_proposal(proposal), sender) - } + }, GeneralConsensusMessage::Proposal2(proposal) => { if !self .upgrade_lock @@ -96,10 +96,10 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalRecv(convert_proposal(proposal), sender) - } + }, GeneralConsensusMessage::ProposalRequested(req, sig) => { HotShotEvent::QuorumProposalRequestRecv(req, sig) - } + }, GeneralConsensusMessage::ProposalResponse(proposal) => { if self .upgrade_lock @@ -110,7 +110,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalResponseRecv(convert_proposal(proposal)) - } + }, GeneralConsensusMessage::ProposalResponse2(proposal) => { if !self .upgrade_lock @@ -121,21 +121,21 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalResponseRecv(convert_proposal(proposal)) - } + }, GeneralConsensusMessage::Vote(vote) => { if self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received GeneralConsensusMessage::Vote for view {} but epochs are enabled for that view", vote.view_number()); return; } HotShotEvent::QuorumVoteRecv(vote.to_vote2()) - } + }, GeneralConsensusMessage::Vote2(vote) => { if !self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received GeneralConsensusMessage::Vote2 for view {} but epochs are not enabled for that view", vote.view_number()); return; } HotShotEvent::QuorumVoteRecv(vote) - } + }, GeneralConsensusMessage::ViewSyncPreCommitVote(view_sync_message) => { if self .upgrade_lock @@ -146,7 +146,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message.to_vote2()) - } + }, GeneralConsensusMessage::ViewSyncPreCommitVote2(view_sync_message) => { if !self .upgrade_lock @@ -157,7 +157,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncPreCommitCertificate( view_sync_message, ) => { @@ -172,7 +172,7 @@ impl NetworkMessageTaskState { HotShotEvent::ViewSyncPreCommitCertificateRecv( view_sync_message.to_vsc2(), ) - } + }, GeneralConsensusMessage::ViewSyncPreCommitCertificate2( view_sync_message, ) => { @@ -185,7 +185,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncPreCommitCertificateRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncCommitVote(view_sync_message) => { if self .upgrade_lock @@ -196,7 +196,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message.to_vote2()) - } + }, GeneralConsensusMessage::ViewSyncCommitVote2(view_sync_message) => { if !self .upgrade_lock @@ -207,7 +207,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate(view_sync_message) => { if self .upgrade_lock @@ -218,7 +218,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message.to_vsc2()) - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate2(view_sync_message) => { if !self .upgrade_lock @@ -229,7 +229,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncFinalizeVote(view_sync_message) => { if self .upgrade_lock @@ -240,7 +240,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message.to_vote2()) - } + }, GeneralConsensusMessage::ViewSyncFinalizeVote2(view_sync_message) => { if !self .upgrade_lock @@ -251,7 +251,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate(view_sync_message) => { if self .upgrade_lock @@ -264,7 +264,7 @@ impl NetworkMessageTaskState { HotShotEvent::ViewSyncFinalizeCertificateRecv( view_sync_message.to_vsc2(), ) - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate2( view_sync_message, ) => { @@ -277,7 +277,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_message) - } + }, GeneralConsensusMessage::TimeoutVote(message) => { if self .upgrade_lock @@ -288,7 +288,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::TimeoutVoteRecv(message.to_vote2()) - } + }, GeneralConsensusMessage::TimeoutVote2(message) => { if !self .upgrade_lock @@ -299,18 +299,18 @@ impl NetworkMessageTaskState { return; } HotShotEvent::TimeoutVoteRecv(message) - } + }, GeneralConsensusMessage::UpgradeProposal(message) => { HotShotEvent::UpgradeProposalRecv(message, sender) - } + }, GeneralConsensusMessage::UpgradeVote(message) => { tracing::error!("Received upgrade vote!"); HotShotEvent::UpgradeVoteRecv(message) - } + }, GeneralConsensusMessage::HighQc(qc) => HotShotEvent::HighQcRecv(qc, sender), GeneralConsensusMessage::ExtendedQc(qc, next_epoch_qc) => { HotShotEvent::ExtendedQcRecv(qc, next_epoch_qc, sender) - } + }, }, SequencingMessage::Da(da_message) => match da_message { DaConsensusMessage::DaProposal(proposal) => { @@ -323,7 +323,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::DaProposalRecv(convert_proposal(proposal), sender) - } + }, DaConsensusMessage::DaProposal2(proposal) => { if !self .upgrade_lock @@ -334,35 +334,35 @@ impl NetworkMessageTaskState { return; } HotShotEvent::DaProposalRecv(proposal, sender) - } + }, DaConsensusMessage::DaVote(vote) => { if self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received DaConsensusMessage::DaVote for view {} but epochs are enabled for that view", vote.view_number()); return; } HotShotEvent::DaVoteRecv(vote.clone().to_vote2()) - } + }, DaConsensusMessage::DaVote2(vote) => { if !self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received DaConsensusMessage::DaVote2 for view {} but epochs are not enabled for that view", vote.view_number()); return; } HotShotEvent::DaVoteRecv(vote.clone()) - } + }, DaConsensusMessage::DaCertificate(cert) => { if self.upgrade_lock.epochs_enabled(cert.view_number()).await { tracing::warn!("received DaConsensusMessage::DaCertificate for view {} but epochs are enabled for that view", cert.view_number()); return; } HotShotEvent::DaCertificateRecv(cert.to_dac2()) - } + }, DaConsensusMessage::DaCertificate2(cert) => { if !self.upgrade_lock.epochs_enabled(cert.view_number()).await { tracing::warn!("received DaConsensusMessage::DaCertificate2 for view {} but epochs are not enabled for that view", cert.view_number()); return; } HotShotEvent::DaCertificateRecv(cert) - } + }, DaConsensusMessage::VidDisperseMsg(proposal) => { if self .upgrade_lock @@ -373,7 +373,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::VidShareRecv(sender, convert_proposal(proposal)) - } + }, DaConsensusMessage::VidDisperseMsg2(proposal) => { if !self .upgrade_lock @@ -384,11 +384,11 @@ impl NetworkMessageTaskState { return; } HotShotEvent::VidShareRecv(sender, convert_proposal(proposal)) - } + }, }, }; broadcast_event(Arc::new(event), &self.internal_event_stream).await; - } + }, // Handle data messages MessageKind::Data(message) => match message { @@ -403,7 +403,7 @@ impl NetworkMessageTaskState { &self.internal_event_stream, ) .await; - } + }, DataMessage::DataResponse(response) => { if let ResponseMessage::Found(message) = response { match message { @@ -416,7 +416,7 @@ impl NetworkMessageTaskState { &self.internal_event_stream, ) .await; - } + }, SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg2( proposal, )) => { @@ -428,11 +428,11 @@ impl NetworkMessageTaskState { &self.internal_event_stream, ) .await; - } - _ => {} + }, + _ => {}, } } - } + }, DataMessage::RequestData(data) => { let req_data = data.clone(); if let RequestKind::Vid(_view_number, _key) = req_data.request { @@ -442,7 +442,7 @@ impl NetworkMessageTaskState { ) .await; } - } + }, }, // Handle external messages @@ -459,7 +459,7 @@ impl NetworkMessageTaskState { &self.external_event_stream, ) .await; - } + }, } } } @@ -607,7 +607,7 @@ impl< Err(e) => { tracing::error!("Failed to serialize message: {}", e); continue; - } + }, }; messages.insert(recipient, serialized_message); @@ -630,7 +630,7 @@ impl< return; } match net.vid_broadcast_message(messages).await { - Ok(()) => {} + Ok(()) => {}, Err(e) => tracing::warn!("Failed to send message from network task: {:?}", e), } }); @@ -665,7 +665,7 @@ impl< Err(e) => { tracing::warn!("Not Sending {:?} because of storage error: {:?}", action, e); Err(()) - } + }, } } else { Ok(()) @@ -718,7 +718,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, // ED Each network task is subscribed to all these message types. Need filters per network task HotShotEvent::QuorumVoteSend(vote) => { @@ -740,7 +740,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { @@ -754,7 +754,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ExtendedQuorumVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { @@ -768,7 +768,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Broadcast)) - } + }, HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( req.key.clone(), MessageKind::::from_consensus_message(SequencingMessage::General( @@ -796,11 +796,11 @@ impl< message, TransmitType::Direct(sender_key), )) - } + }, HotShotEvent::VidDisperseSend(proposal, sender) => { self.handle_vid_disperse_proposal(proposal, &sender).await; None - } + }, HotShotEvent::DaProposalSend(proposal, sender) => { *maybe_action = Some(HotShotAction::DaPropose); @@ -819,7 +819,7 @@ impl< }; Some((sender, message, TransmitType::DaCommitteeBroadcast)) - } + }, HotShotEvent::DaVoteSend(vote) => { *maybe_action = Some(HotShotAction::DaVote); let view_number = vote.view_number(); @@ -839,7 +839,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -853,7 +853,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::DacSend(certificate, sender) => { *maybe_action = Some(HotShotAction::DaCert); let message = if self @@ -871,7 +871,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::ViewSyncPreCommitVoteSend(vote) => { let view_number = vote.view_number() + vote.date().relay; let leader = match self @@ -890,7 +890,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -903,7 +903,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ViewSyncCommitVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; @@ -923,7 +923,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -936,7 +936,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ViewSyncFinalizeVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; @@ -956,7 +956,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -969,7 +969,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -983,7 +983,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::ViewSyncCommitCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -997,7 +997,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::ViewSyncFinalizeCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -1011,7 +1011,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::TimeoutVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; @@ -1031,7 +1031,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -1044,7 +1044,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( @@ -1071,7 +1071,7 @@ impl< e ); return None; - } + }, }; Some(( vote.signing_key(), @@ -1080,7 +1080,7 @@ impl< )), TransmitType::Direct(leader), )) - } + }, HotShotEvent::ViewChange(view, epoch) => { self.view = view; if epoch > self.epoch { @@ -1096,7 +1096,7 @@ impl< .await; }); None - } + }, HotShotEvent::VidRequestSend(req, sender, to) => Some(( sender, MessageKind::Data(DataMessage::RequestData(req)), @@ -1126,7 +1126,7 @@ impl< vid_share_proposal, )), ))) - } + }, VidDisperseShare::V1(data) => { if !epochs_enabled { tracing::warn!( @@ -1145,10 +1145,10 @@ impl< vid_share_proposal, )), ))) - } + }, }; Some((sender, message, TransmitType::Direct(to))) - } + }, HotShotEvent::HighQcSend(quorum_cert, leader, sender) => Some(( sender, MessageKind::Consensus(SequencingMessage::General( @@ -1234,18 +1234,18 @@ impl< Err(e) => { tracing::error!("Failed to serialize message: {}", e); return; - } + }, }; let transmit_result = match transmit { TransmitType::Direct(recipient) => { network.direct_message(serialized_message, recipient).await - } + }, TransmitType::Broadcast => { network .broadcast_message(serialized_message, committee_topic, broadcast_delay) .await - } + }, TransmitType::DaCommitteeBroadcast => { network .da_broadcast_message( @@ -1254,11 +1254,11 @@ impl< broadcast_delay, ) .await - } + }, }; match transmit_result { - Ok(()) => {} + Ok(()) => {}, Err(e) => tracing::warn!("Failed to send message task: {:?}", e), } }); diff --git a/hotshot-task-impls/src/quorum_proposal/handlers.rs b/hotshot-task-impls/src/quorum_proposal/handlers.rs index 2203370d3d..16f50edec8 100644 --- a/hotshot-task-impls/src/quorum_proposal/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal/handlers.rs @@ -13,11 +13,6 @@ use std::{ time::{Duration, Instant}, }; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, parent_leaf_and_state, wait_for_next_epoch_qc}, - quorum_proposal::{QuorumProposalTaskState, UpgradeLock, Versions}, -}; use anyhow::{ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; @@ -42,6 +37,12 @@ use hotshot_utils::anytrace::*; use tracing::instrument; use vbs::version::StaticVersionType; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, parent_leaf_and_state, wait_for_next_epoch_qc}, + quorum_proposal::{QuorumProposalTaskState, UpgradeLock, Versions}, +}; + /// Proposal dependency types. These types represent events that precipitate a proposal. #[derive(PartialEq, Debug)] pub(crate) enum ProposalDependency { @@ -431,22 +432,22 @@ impl HandleDepOutput for ProposalDependencyHandle< block_view: *view, auction_result: auction_result.clone(), }); - } + }, HotShotEvent::Qc2Formed(cert) => match cert { either::Right(timeout) => { timeout_certificate = Some(timeout.clone()); - } + }, either::Left(qc) => { parent_qc = Some(qc.clone()); - } + }, }, HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) => { view_sync_finalize_cert = Some(cert.clone()); - } + }, HotShotEvent::VidDisperseSend(share, _) => { vid_share = Some(share.clone()); - } - _ => {} + }, + _ => {}, } } diff --git a/hotshot-task-impls/src/quorum_proposal/mod.rs b/hotshot-task-impls/src/quorum_proposal/mod.rs index 88c09de09e..1b933f8374 100644 --- a/hotshot-task-impls/src/quorum_proposal/mod.rs +++ b/hotshot-task-impls/src/quorum_proposal/mod.rs @@ -15,7 +15,6 @@ use hotshot_task::{ dependency_task::DependencyTask, task::TaskState, }; -use hotshot_types::StakeTableEntries; use hotshot_types::{ consensus::OuterConsensus, epoch_membership::EpochMembershipCoordinator, @@ -28,14 +27,14 @@ use hotshot_types::{ }, utils::EpochTransitionIndicator, vote::{Certificate, HasViewNumber}, + StakeTableEntries, }; use hotshot_utils::anytrace::*; use tokio::task::JoinHandle; use tracing::instrument; use self::handlers::{ProposalDependency, ProposalDependencyHandle}; -use crate::events::HotShotEvent; -use crate::quorum_proposal::handlers::handle_eqc_formed; +use crate::{events::HotShotEvent, quorum_proposal::handlers::handle_eqc_formed}; mod handlers; @@ -115,14 +114,14 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::TimeoutCert => { if let HotShotEvent::Qc2Formed(either::Right(timeout)) = event { timeout.view_number() + 1 } else { return false; } - } + }, ProposalDependency::ViewSyncCert => { if let HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_cert) = event { @@ -130,7 +129,7 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::Proposal => { if let HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) = event { @@ -138,7 +137,7 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::PayloadAndMetadata => { if let HotShotEvent::SendPayloadCommitmentAndMetadata( _payload_commitment, @@ -153,14 +152,14 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::VidShare => { if let HotShotEvent::VidDisperseSend(vid_disperse, _) = event { vid_disperse.data.view_number() } else { return false; } - } + }, }; let valid = event_view == view_number; if valid { @@ -219,25 +218,25 @@ impl, V: Versions> match event.as_ref() { HotShotEvent::SendPayloadCommitmentAndMetadata(..) => { payload_commitment_dependency.mark_as_completed(Arc::clone(&event)); - } + }, HotShotEvent::QuorumProposalPreliminarilyValidated(..) => { proposal_dependency.mark_as_completed(event); - } + }, HotShotEvent::Qc2Formed(quorum_certificate) => match quorum_certificate { Either::Right(_) => { timeout_dependency.mark_as_completed(event); - } + }, Either::Left(_) => { qc_dependency.mark_as_completed(event); - } + }, }, HotShotEvent::ViewSyncFinalizeCertificateRecv(_) => { view_sync_dependency.mark_as_completed(event); - } - HotShotEvent::VidDisperseSend(_, _) => { + }, + HotShotEvent::VidDisperseSend(..) => { vid_share_dependency.mark_as_completed(event); - } - _ => {} + }, + _ => {}, }; // We have three cases to consider: @@ -410,7 +409,7 @@ impl, V: Versions> self.formed_upgrade_certificate = Some(cert.clone()); } - } + }, HotShotEvent::Qc2Formed(cert) => match cert.clone() { either::Right(timeout_cert) => { let view_number = timeout_cert.view_number + 1; @@ -423,7 +422,7 @@ impl, V: Versions> epoch_transition_indicator, ) .await?; - } + }, either::Left(qc) => { // Only update if the qc is from a newer view if qc.view_number <= self.consensus.read().await.high_qc().view_number { @@ -462,7 +461,7 @@ impl, V: Versions> epoch_transition_indicator, ) .await?; - } + }, }, HotShotEvent::SendPayloadCommitmentAndMetadata( _payload_commitment, @@ -483,7 +482,7 @@ impl, V: Versions> EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { let epoch_number = certificate.data.epoch; let epoch_membership = self @@ -521,7 +520,7 @@ impl, V: Versions> EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { let view_number = proposal.data.view_number(); // All nodes get the latest proposed view as a proxy of `cur_view` of old. @@ -538,7 +537,7 @@ impl, V: Versions> epoch_transition_indicator, ) .await?; - } + }, HotShotEvent::QuorumProposalSend(proposal, _) => { let view = proposal.data.view_number(); @@ -546,7 +545,7 @@ impl, V: Versions> self.update_latest_proposed_view(view).await, "Failed to update latest proposed view" ); - } + }, HotShotEvent::VidDisperseSend(vid_disperse, _) => { let view_number = vid_disperse.data.view_number(); self.create_dependency_task_if_new( @@ -558,18 +557,18 @@ impl, V: Versions> EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::ViewChange(view, epoch) => { if epoch > &self.cur_epoch { self.cur_epoch = *epoch; } let keep_view = TYPES::View::new(view.saturating_sub(1)); self.cancel_tasks(keep_view); - } + }, HotShotEvent::Timeout(view, ..) => { let keep_view = TYPES::View::new(view.saturating_sub(1)); self.cancel_tasks(keep_view); - } + }, HotShotEvent::HighQcSend(qc, ..) | HotShotEvent::ExtendedQcSend(qc, ..) => { ensure!(qc.view_number() > self.highest_qc.view_number()); let cert_epoch_number = qc.data.epoch; @@ -590,7 +589,7 @@ impl, V: Versions> .context(|e| warn!("Quorum certificate {:?} was invalid: {}", qc.data(), e))?; self.highest_qc = qc.clone(); - } + }, HotShotEvent::NextEpochQc2Formed(Either::Left(next_epoch_qc)) => { // Only update if the qc is from a newer view let current_next_epoch_qc = @@ -624,8 +623,8 @@ impl, V: Versions> &event_sender, ) .await; - } - _ => {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs b/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs index f965bbfada..a5dc3a8428 100644 --- a/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs @@ -221,7 +221,7 @@ pub(crate) async fn handle_quorum_proposal_recv< } else { bail!("Parent state not found! Consensus internally inconsistent"); } - } + }, None => None, }; diff --git a/hotshot-task-impls/src/quorum_proposal_recv/mod.rs b/hotshot-task-impls/src/quorum_proposal_recv/mod.rs index b56130f3b3..3238252fc4 100644 --- a/hotshot-task-impls/src/quorum_proposal_recv/mod.rs +++ b/hotshot-task-impls/src/quorum_proposal_recv/mod.rs @@ -22,8 +22,8 @@ use hotshot_types::{ message::UpgradeLock, simple_certificate::UpgradeCertificate, simple_vote::HasEpoch, - traits::block_contents::BlockHeader, traits::{ + block_contents::BlockHeader, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, @@ -180,10 +180,10 @@ impl, V: Versions> ) .await { - Ok(()) => {} + Ok(()) => {}, Err(e) => error!(?e, "Failed to validate the proposal"), } - } + }, HotShotEvent::ViewChange(view, epoch) => { if *epoch > self.cur_epoch { self.cur_epoch = *epoch; @@ -198,8 +198,8 @@ impl, V: Versions> // to enter view V + 1. let oldest_view_to_keep = TYPES::View::new(view.saturating_sub(1)); self.cancel_tasks(oldest_view_to_keep); - } - _ => {} + }, + _ => {}, } } } diff --git a/hotshot-task-impls/src/quorum_vote/handlers.rs b/hotshot-task-impls/src/quorum_vote/handlers.rs index 7fd2a190f6..eb47f7c3e4 100644 --- a/hotshot-task-impls/src/quorum_vote/handlers.rs +++ b/hotshot-task-impls/src/quorum_vote/handlers.rs @@ -10,12 +10,11 @@ use async_broadcast::{InactiveReceiver, Sender}; use async_lock::RwLock; use chrono::Utc; use committable::Committable; -use hotshot_types::epoch_membership::EpochMembership; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposalWrapper, VidDisperseShare}, drb::{compute_drb_result, DrbResult, INITIAL_DRB_RESULT}, - epoch_membership::EpochMembershipCoordinator, + epoch_membership::{EpochMembership, EpochMembershipCoordinator}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_vote::{HasEpoch, QuorumData2, QuorumVote2}, @@ -126,7 +125,7 @@ async fn store_and_get_computed_drb_result< .await; task_state.drb_computation = None; Ok(result) - } + }, Err(e) => Err(warn!("Error in DRB calculation: {:?}.", e)), } } @@ -243,10 +242,10 @@ async fn start_drb_task, V: Versio ) .await; task_state.drb_computation = None; - } + }, Err(e) => { tracing::error!("error joining DRB computation task: {e:?}"); - } + }, } } else if *task_epoch == new_epoch_number { return; @@ -606,10 +605,10 @@ pub(crate) async fn update_shared_state< Some((leaf, view)) => { maybe_validated_view = Some(view); Some(leaf) - } + }, None => None, } - } + }, }; let parent = maybe_parent.context(info!( diff --git a/hotshot-task-impls/src/quorum_vote/mod.rs b/hotshot-task-impls/src/quorum_vote/mod.rs index e3d7af2771..e27b3b7cd1 100644 --- a/hotshot-task-impls/src/quorum_vote/mod.rs +++ b/hotshot-task-impls/src/quorum_vote/mod.rs @@ -15,7 +15,6 @@ use hotshot_task::{ dependency_task::{DependencyTask, HandleDepOutput}, task::TaskState, }; -use hotshot_types::StakeTableEntries; use hotshot_types::{ consensus::{ConsensusMetricsValue, OuterConsensus}, data::{Leaf2, QuorumProposalWrapper}, @@ -33,6 +32,7 @@ use hotshot_types::{ }, utils::{epoch_from_block_number, option_epoch_from_block_number}, vote::{Certificate, HasViewNumber}, + StakeTableEntries, }; use hotshot_utils::anytrace::*; use tokio::task::JoinHandle; @@ -123,7 +123,7 @@ impl + 'static, V: Versions> Handl Err(e) => { tracing::error!("{e:#}"); return; - } + }, }; let proposal_payload_comm = proposal.data.block_header().payload_commitment(); let parent_commitment = parent_leaf.commit(); @@ -165,7 +165,7 @@ impl + 'static, V: Versions> Handl } leaf = Some(proposed_leaf); parent_view_number = Some(parent_leaf.view_number()); - } + }, HotShotEvent::DaCertificateValidated(cert) => { let cert_payload_comm = &cert.data().payload_commit; let next_epoch_cert_payload_comm = cert.data().next_epoch_payload_commit; @@ -187,7 +187,7 @@ impl + 'static, V: Versions> Handl } else { next_epoch_payload_commitment = next_epoch_cert_payload_comm; } - } + }, HotShotEvent::VidShareValidated(share) => { let vid_payload_commitment = &share.data.payload_commitment(); vid_share = Some(share.clone()); @@ -211,8 +211,8 @@ impl + 'static, V: Versions> Handl } else { payload_commitment = Some(*vid_payload_commitment); } - } - _ => {} + }, + _ => {}, } } @@ -269,7 +269,7 @@ impl + 'static, V: Versions> Handl Err(e) => { tracing::warn!("{:?}", e); return; - } + }, }; tracing::trace!( @@ -380,21 +380,21 @@ impl, V: Versions> QuorumVoteTaskS } else { return false; } - } + }, VoteDependency::Dac => { if let HotShotEvent::DaCertificateValidated(cert) = event { cert.view_number } else { return false; } - } + }, VoteDependency::Vid => { if let HotShotEvent::VidShareValidated(disperse) = event { disperse.data.view_number() } else { return false; } - } + }, }; if event_view == view_number { tracing::trace!( @@ -552,7 +552,7 @@ impl, V: Versions> QuorumVoteTaskS Arc::clone(&event), ); } - } + }, HotShotEvent::DaCertificateRecv(cert) => { let view = cert.view_number; @@ -595,7 +595,7 @@ impl, V: Versions> QuorumVoteTaskS &event_sender, Arc::clone(&event), ); - } + }, HotShotEvent::VidShareRecv(sender, share) => { let view = share.data.view_number(); // Do nothing if the VID share is old @@ -659,7 +659,7 @@ impl, V: Versions> QuorumVoteTaskS &event_sender, Arc::clone(&event), ); - } + }, HotShotEvent::Timeout(view, ..) => { let view = TYPES::View::new(view.saturating_sub(1)); // cancel old tasks @@ -668,7 +668,7 @@ impl, V: Versions> QuorumVoteTaskS task.abort(); } self.vote_dependencies = current_tasks; - } + }, HotShotEvent::ViewChange(mut view, _) => { view = TYPES::View::new(view.saturating_sub(1)); if !self.update_latest_voted_view(view).await { @@ -680,8 +680,8 @@ impl, V: Versions> QuorumVoteTaskS task.abort(); } self.vote_dependencies = current_tasks; - } - _ => {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/request.rs b/hotshot-task-impls/src/request.rs index 1bf327a98f..c0320105a8 100644 --- a/hotshot-task-impls/src/request.rs +++ b/hotshot-task-impls/src/request.rs @@ -147,14 +147,14 @@ impl> TaskState for NetworkRequest .await; } Ok(()) - } + }, HotShotEvent::ViewChange(view, _) => { let view = *view; if view > self.view { self.view = view; } Ok(()) - } + }, _ => Ok(()), } } @@ -226,7 +226,7 @@ impl> NetworkRequestState { tracing::warn!(e.message); return; - } + }, }; let mut da_committee_for_view = membership_reader.da_committee_members(view).await; if let Ok(leader) = membership_reader.leader(view).await { diff --git a/hotshot-task-impls/src/response.rs b/hotshot-task-impls/src/response.rs index 1ea66cc667..4f15a3a343 100644 --- a/hotshot-task-impls/src/response.rs +++ b/hotshot-task-impls/src/response.rs @@ -111,7 +111,7 @@ impl NetworkResponseState { ) .await; } - } + }, HotShotEvent::QuorumProposalRequestRecv(req, signature) => { // Make sure that this request came from who we think it did if !req.key.validate(signature, req.commit().as_ref()) { @@ -137,16 +137,16 @@ impl NetworkResponseState { ) .await; } - } + }, HotShotEvent::Shutdown => { return; - } - _ => {} + }, + _ => {}, } - } + }, Err(e) => { tracing::error!("Failed to receive event. {:?}", e); - } + }, } } } diff --git a/hotshot-task-impls/src/rewind.rs b/hotshot-task-impls/src/rewind.rs index 82e267dfb3..d4a9bcb58e 100644 --- a/hotshot-task-impls/src/rewind.rs +++ b/hotshot-task-impls/src/rewind.rs @@ -58,7 +58,7 @@ impl TaskState for RewindTaskState { Err(e) => { tracing::error!("Failed to write file {}; error = {}", filename, e); return; - } + }, }; for (event_number, event) in self.events.iter().enumerate() { diff --git a/hotshot-task-impls/src/transactions.rs b/hotshot-task-impls/src/transactions.rs index 06a65f8b63..c3ad41e5f0 100644 --- a/hotshot-task-impls/src/transactions.rs +++ b/hotshot-task-impls/src/transactions.rs @@ -16,8 +16,7 @@ use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, - data::VidCommitment, - data::{null_block, PackedBundle}, + data::{null_block, PackedBundle, VidCommitment}, epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, message::UpgradeLock, @@ -134,7 +133,7 @@ impl, V: Versions> TransactionTask Err(e) => { tracing::error!("Failed to calculate version: {:?}", e); return None; - } + }, }; if version < V::Marketplace::VERSION { @@ -159,7 +158,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); return None; - } + }, }; // Request a block from the builder unless we are between versions. @@ -303,11 +302,11 @@ impl, V: Versions> TransactionTask Ok(Err(e)) => { tracing::debug!("Failed to retrieve bundle: {e}"); continue; - } + }, Err(e) => { tracing::debug!("Failed to retrieve bundle: {e}"); continue; - } + }, } } @@ -384,7 +383,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); return None; - } + }, }; let packed_bundle = match self @@ -410,7 +409,7 @@ impl, V: Versions> TransactionTask .add(1); null_block - } + }, }; broadcast_event( @@ -458,7 +457,7 @@ impl, V: Versions> TransactionTask &self.output_event_stream, ) .await; - } + }, HotShotEvent::ViewChange(view, epoch) => { let view = TYPES::View::new(std::cmp::max(1, **view)); let epoch = if self.upgrade_lock.epochs_enabled(view).await { @@ -491,8 +490,8 @@ impl, V: Versions> TransactionTask self.handle_view_change(&event_stream, view, epoch).await; return Ok(()); } - } - _ => {} + }, + _ => {}, } Ok(()) } @@ -513,7 +512,7 @@ impl, V: Versions> TransactionTask // We still have time, will re-try in a bit sleep(RETRY_DELAY).await; continue; - } + }, } } } @@ -547,13 +546,13 @@ impl, V: Versions> TransactionTask let leaf = consensus_reader.saved_leaves().get(leaf_commitment).context (info!("Missing leaf with commitment {leaf_commitment} for view {target_view} in saved_leaves"))?; return Ok((target_view, leaf.payload_commitment())); - } + }, ViewInner::Failed => { // For failed views, backtrack target_view = TYPES::View::new(target_view.checked_sub(1).context(warn!("Reached genesis. Something is wrong -- have we not decided any blocks since genesis?"))?); continue; - } + }, } } } @@ -571,7 +570,7 @@ impl, V: Versions> TransactionTask Err(e) => { tracing::warn!("Failed to find last vid commitment in time: {e}"); return None; - } + }, }; let parent_comm_sig = match <::SignatureKey as SignatureKey>::sign( @@ -582,7 +581,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!(%err, "Failed to sign block hash"); return None; - } + }, }; while task_start_time.elapsed() < self.builder_timeout { @@ -596,7 +595,7 @@ impl, V: Versions> TransactionTask // We got a block Ok(Ok(block)) => { return Some(block); - } + }, // We failed to get a block Ok(Err(err)) => { @@ -604,13 +603,13 @@ impl, V: Versions> TransactionTask // pause a bit sleep(RETRY_DELAY).await; continue; - } + }, // We timed out while getting available blocks Err(err) => { tracing::info!(%err, "Timeout while getting available blocks"); return None; - } + }, } } @@ -675,7 +674,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::warn!(%err,"Error getting available blocks"); None - } + }, }) .flatten() .collect::>() @@ -735,7 +734,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!(%err, "Failed to sign block hash"); continue; - } + }, }; let response = { @@ -751,7 +750,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::warn!(%err, "Error claiming block data"); continue; - } + }, }; let header_input = match header_input { @@ -759,7 +758,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::warn!(%err, "Error claiming header input"); continue; - } + }, }; // verify the signature over the message diff --git a/hotshot-task-impls/src/upgrade.rs b/hotshot-task-impls/src/upgrade.rs index 4eaffb5dd8..d45917076e 100644 --- a/hotshot-task-impls/src/upgrade.rs +++ b/hotshot-task-impls/src/upgrade.rs @@ -217,7 +217,7 @@ impl UpgradeTaskState { tracing::debug!("Sending upgrade vote {:?}", vote.view_number()); broadcast_event(Arc::new(HotShotEvent::UpgradeVoteSend(vote)), &tx).await; - } + }, HotShotEvent::UpgradeVoteRecv(ref vote) => { tracing::debug!("Upgrade vote recv, Main Task {:?}", vote.view_number()); @@ -248,7 +248,7 @@ impl UpgradeTaskState { EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::ViewChange(new_view, epoch_number) => { if *epoch_number > self.cur_epoch { self.cur_epoch = *epoch_number; @@ -328,8 +328,8 @@ impl UpgradeTaskState { ) .await; } - } - _ => {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/vid.rs b/hotshot-task-impls/src/vid.rs index 8f98e018a7..0767288ac9 100644 --- a/hotshot-task-impls/src/vid.rs +++ b/hotshot-task-impls/src/vid.rs @@ -160,7 +160,7 @@ impl, V: Versions> VidTaskState { if *epoch > self.cur_epoch { @@ -178,7 +178,7 @@ impl, V: Versions> VidTaskState { let proposed_block_number = proposal.data.block_header().block_number(); @@ -243,11 +243,11 @@ impl, V: Versions> VidTaskState { return Some(HotShotTaskCompleted); - } - _ => {} + }, + _ => {}, } None } diff --git a/hotshot-task-impls/src/view_sync.rs b/hotshot-task-impls/src/view_sync.rs index 8dcc296cfe..a87ed3aac7 100644 --- a/hotshot-task-impls/src/view_sync.rs +++ b/hotshot-task-impls/src/view_sync.rs @@ -233,7 +233,7 @@ impl ViewSyncTaskState { Err(e) => { tracing::warn!(e.message); return; - } + }, }; // We do not have a replica task already running, so start one @@ -278,25 +278,25 @@ impl ViewSyncTaskState { let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; - } + }, HotShotEvent::ViewSyncCommitCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; - } - HotShotEvent::ViewSyncTimeout(view, _, _) => { + }, + HotShotEvent::ViewSyncTimeout(view, ..) => { tracing::debug!("view sync timeout in main task {:?}", view); let view = *view; self.send_to_or_create_replica(event, view, &event_stream) .await; - } + }, HotShotEvent::ViewSyncPreCommitVoteRecv(ref vote) => { let mut map = self.pre_commit_relay_map.write().await; @@ -344,7 +344,7 @@ impl ViewSyncTaskState { .await?; relay_map.insert(relay, vote_collector); - } + }, HotShotEvent::ViewSyncCommitVoteRecv(ref vote) => { let mut map = self.commit_relay_map.write().await; @@ -392,7 +392,7 @@ impl ViewSyncTaskState { ) .await?; relay_map.insert(relay, vote_collector); - } + }, HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { let mut map = self.finalize_relay_map.write().await; @@ -441,7 +441,7 @@ impl ViewSyncTaskState { if let Ok(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } - } + }, &HotShotEvent::ViewChange(new_view, epoch) => { if epoch > self.cur_epoch { @@ -483,7 +483,7 @@ impl ViewSyncTaskState { self.last_garbage_collected_view = self.cur_view - 1; } - } + }, &HotShotEvent::Timeout(view_number, ..) => { // This is an old timeout and we can ignore it ensure!( @@ -528,9 +528,9 @@ impl ViewSyncTaskState { ) .await; } - } + }, - _ => {} + _ => {}, } Ok(()) } @@ -634,7 +634,7 @@ impl ViewSyncReplicaTaskState { .await; } })); - } + }, HotShotEvent::ViewSyncCommitCertificateRecv(certificate) => { let last_seen_certificate = ViewSyncPhase::Commit; @@ -741,7 +741,7 @@ impl ViewSyncReplicaTaskState { .await; } })); - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { // Ignore certificate if it is for an older round @@ -796,7 +796,7 @@ impl ViewSyncReplicaTaskState { ) .await; return Some(HotShotTaskCompleted); - } + }, HotShotEvent::ViewSyncTrigger(view_number) => { let view_number = *view_number; @@ -850,7 +850,7 @@ impl ViewSyncReplicaTaskState { })); return None; - } + }, HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { let round = *round; @@ -884,11 +884,11 @@ impl ViewSyncReplicaTaskState { &event_stream, ) .await; - } + }, ViewSyncPhase::Finalize => { // This should never occur unimplemented!() - } + }, } self.timeout_task = Some(spawn({ @@ -917,7 +917,7 @@ impl ViewSyncReplicaTaskState { return None; } - } + }, _ => return None, } None diff --git a/hotshot-task-impls/src/vote_collection.rs b/hotshot-task-impls/src/vote_collection.rs index eb8a4cd0aa..9dc3ebc477 100644 --- a/hotshot-task-impls/src/vote_collection.rs +++ b/hotshot-task-impls/src/vote_collection.rs @@ -138,7 +138,7 @@ impl< self.accumulator = None; Ok(Some(cert)) - } + }, } } } @@ -279,7 +279,7 @@ where entry.insert(collector); Ok(()) - } + }, Entry::Occupied(mut entry) => { // handle the vote, and garbage collect if the vote collector is finished if entry @@ -293,7 +293,7 @@ where } Ok(()) - } + }, } } @@ -517,7 +517,7 @@ impl HotShotEvent::QuorumVoteRecv(vote) => { // #3967 REVIEW NOTE: Should we error if self.epoch is None? self.accumulate_vote(&vote.clone().into(), sender).await - } + }, _ => Ok(None), } } @@ -599,7 +599,7 @@ impl match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { self.accumulate_vote(vote, sender).await - } + }, _ => Ok(None), } } @@ -641,7 +641,7 @@ impl match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { self.accumulate_vote(vote, sender).await - } + }, _ => Ok(None), } } diff --git a/hotshot-task/src/dependency.rs b/hotshot-task/src/dependency.rs index 7b3d7dfa0b..d4a0e2eed8 100644 --- a/hotshot-task/src/dependency.rs +++ b/hotshot-task/src/dependency.rs @@ -163,13 +163,13 @@ impl Dependency for EventDependency { if (self.match_fn)(&event) { return Some(event); } - } + }, Err(RecvError::Overflowed(n)) => { tracing::error!("Dependency Task overloaded, skipping {} events", n); - } + }, Err(RecvError::Closed) => { return None; - } + }, } } } diff --git a/hotshot-task/src/task.rs b/hotshot-task/src/task.rs index 170d7dbc02..c0e4ec5b07 100644 --- a/hotshot-task/src/task.rs +++ b/hotshot-task/src/task.rs @@ -86,13 +86,13 @@ impl Task { S::handle_event(&mut self.state, input, &self.sender, &self.receiver) .await .inspect_err(|e| tracing::debug!("{e}")); - } + }, Err(RecvError::Closed) => { break self.boxed_state(); - } + }, Err(e) => { tracing::error!("Failed to receive from event stream Error: {}", e); - } + }, } } }) diff --git a/hotshot-testing/src/block_builder/mod.rs b/hotshot-testing/src/block_builder/mod.rs index 103b59c14d..534e6167ef 100644 --- a/hotshot-testing/src/block_builder/mod.rs +++ b/hotshot-testing/src/block_builder/mod.rs @@ -109,13 +109,13 @@ pub fn run_builder_source( match event { BuilderChange::Up if handle.is_none() => { handle = Some(start_builder(url.clone(), source.clone())); - } + }, BuilderChange::Down => { if let Some(handle) = handle.take() { handle.abort(); } - } - _ => {} + }, + _ => {}, } } }); @@ -153,13 +153,13 @@ pub fn run_builder_source_0_1( match event { BuilderChange::Up if handle.is_none() => { handle = Some(start_builder(url.clone(), source.clone())); - } + }, BuilderChange::Down => { if let Some(handle) = handle.take() { handle.abort(); } - } - _ => {} + }, + _ => {}, } } }); diff --git a/hotshot-testing/src/block_builder/random.rs b/hotshot-testing/src/block_builder/random.rs index 7a8723b22f..ec7d6129e2 100644 --- a/hotshot-testing/src/block_builder/random.rs +++ b/hotshot-testing/src/block_builder/random.rs @@ -178,7 +178,7 @@ where match stream.next().await { None => { break; - } + }, Some(evt) => { if let EventType::ViewFinished { view_number } = evt.event { if let Some(change) = self.changes.remove(&view_number) { @@ -192,18 +192,18 @@ where self.blocks.clone(), ))) } - } + }, BuilderChange::Down => { if let Some(handle) = task.take() { handle.abort(); } - } - BuilderChange::FailClaims(_) => {} + }, + BuilderChange::FailClaims(_) => {}, } let _ = self.change_sender.broadcast(change).await; } } - } + }, } } }); diff --git a/hotshot-testing/src/block_builder/simple.rs b/hotshot-testing/src/block_builder/simple.rs index bc098033a8..c29cf8666e 100644 --- a/hotshot-testing/src/block_builder/simple.rs +++ b/hotshot-testing/src/block_builder/simple.rs @@ -382,7 +382,7 @@ impl BuilderTask for SimpleBuilderTask { match stream.next().await { None => { break; - } + }, Some(evt) => match evt.event { EventType::ViewFinished { view_number } => { if let Some(change) = self.changes.remove(&view_number) { @@ -392,14 +392,14 @@ impl BuilderTask for SimpleBuilderTask { should_build_blocks = false; self.transactions.write().await.clear(); self.blocks.write().await.clear(); - } + }, BuilderChange::FailClaims(value) => { self.should_fail_claims.store(value, Ordering::Relaxed); - } + }, } let _ = self.change_sender.broadcast(change).await; } - } + }, EventType::Decide { leaf_chain, .. } if should_build_blocks => { let mut queue = self.transactions.write().await; for leaf_info in leaf_chain.iter() { @@ -413,7 +413,7 @@ impl BuilderTask for SimpleBuilderTask { } } self.blocks.write().await.clear(); - } + }, EventType::DaProposal { proposal, .. } if should_build_blocks => { let payload = TYPES::BlockPayload::from_bytes( &proposal.data.encoded_transactions, @@ -429,7 +429,7 @@ impl BuilderTask for SimpleBuilderTask { txn.claimed = Some(now); } } - } + }, EventType::Transactions { transactions } if should_build_blocks => { let mut queue = self.transactions.write().await; for transaction in transactions { @@ -443,8 +443,8 @@ impl BuilderTask for SimpleBuilderTask { ); } } - } - _ => {} + }, + _ => {}, }, } } diff --git a/hotshot-testing/src/byzantine/byzantine_behaviour.rs b/hotshot-testing/src/byzantine/byzantine_behaviour.rs index 7816b3e354..d5c6647fa9 100644 --- a/hotshot-testing/src/byzantine/byzantine_behaviour.rs +++ b/hotshot-testing/src/byzantine/byzantine_behaviour.rs @@ -67,7 +67,7 @@ impl, V: Versions> EventTransforme consensus.write().await.reset_actions(); result - } + }, _ => vec![event.clone()], } } @@ -94,9 +94,9 @@ impl, V: Versions> EventTransforme _consensus: Arc>>, ) -> Vec> { match event { - HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::QuorumVoteSend(_) => { + HotShotEvent::QuorumProposalSend(..) | HotShotEvent::QuorumVoteSend(_) => { vec![event.clone(), event.clone()] - } + }, _ => vec![event.clone()], } } @@ -182,11 +182,11 @@ impl + std::fmt::Debug, V: Version self.handle_proposal_send_event(event, proposal, sender) .await, ]; - } + }, HotShotEvent::QuorumProposalValidated(proposal, _) => { self.validated_proposals.push(proposal.data.clone()); - } - _ => {} + }, + _ => {}, } vec![event.clone()] } @@ -412,7 +412,7 @@ impl + std::fmt::Debug, V: Version .unwrap(); return vec![HotShotEvent::QuorumVoteSend(vote)]; } - } + }, HotShotEvent::TimeoutVoteSend(vote) => { // Check if this view was a dishonest proposal view, if true dont send timeout let dishonest_proposals = self.dishonest_proposal_view_numbers.read().await; @@ -421,11 +421,11 @@ impl + std::fmt::Debug, V: Version // So, dont send the timeout to the next leader from this byzantine replica return vec![]; } - } + }, HotShotEvent::QuorumVoteSend(vote) => { self.votes_sent.push(vote.clone()); - } - _ => {} + }, + _ => {}, } vec![event.clone()] } diff --git a/hotshot-testing/src/consistency_task.rs b/hotshot-testing/src/consistency_task.rs index 8813725db9..9caac9c975 100644 --- a/hotshot-testing/src/consistency_task.rs +++ b/hotshot-testing/src/consistency_task.rs @@ -44,15 +44,15 @@ fn sanitize_node_map( reduced.dedup(); match reduced.len() { - 0 => {} + 0 => {}, 1 => { result.insert(*view, reduced[0].clone()); - } + }, _ => { bail!( "We have received inconsistent leaves for view {view:?}. Leaves:\n\n{leaves:?}" ); - } + }, } } @@ -300,12 +300,12 @@ impl, V: Versions> ConsistencyTas match result { Ok(TestProgress::Finished) => { let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; - } + }, Err(e) => { self.add_error(e); let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; - } - Ok(TestProgress::Incomplete) => {} + }, + Ok(TestProgress::Incomplete) => {}, } } diff --git a/hotshot-testing/src/helpers.rs b/hotshot-testing/src/helpers.rs index 7e331a9d65..c28cc276b3 100644 --- a/hotshot-testing/src/helpers.rs +++ b/hotshot-testing/src/helpers.rs @@ -5,6 +5,8 @@ // along with the HotShot repository. If not, see . #![allow(clippy::panic)] +use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; + use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use bitvec::bitvec; @@ -22,7 +24,6 @@ use hotshot_example_types::{ storage_types::TestStorage, }; use hotshot_task_impls::events::HotShotEvent; -use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ consensus::ConsensusMetricsValue, data::{vid_commitment, Leaf2, VidCommitment, VidDisperse, VidDisperseShare}, @@ -33,7 +34,7 @@ use hotshot_types::{ simple_vote::{DaData2, DaVote2, SimpleVote, VersionedVoteData}, traits::{ election::Membership, - node_implementation::{NodeType, Versions}, + node_implementation::{ConsensusTime, NodeType, Versions}, EncodeBytes, }, utils::{option_epoch_from_block_number, View, ViewInner}, @@ -42,7 +43,6 @@ use hotshot_types::{ }; use primitive_types::U256; use serde::Serialize; -use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; use vbs::version::Version; use crate::{test_builder::TestDescription, test_launcher::TestLauncher}; diff --git a/hotshot-testing/src/predicates/event.rs b/hotshot-testing/src/predicates/event.rs index 6ee78fb2f8..4e1139f09a 100644 --- a/hotshot-testing/src/predicates/event.rs +++ b/hotshot-testing/src/predicates/event.rs @@ -217,7 +217,7 @@ where QuorumProposalSend(proposal, _) => { Some(proposal.data.block_header().payload_commitment()) == null_block::commitment::(num_storage_nodes) - } + }, _ => false, }); Box::new(EventPredicate { check, info }) diff --git a/hotshot-testing/src/script.rs b/hotshot-testing/src/script.rs index b25e286f9f..29a853c6ba 100644 --- a/hotshot-testing/src/script.rs +++ b/hotshot-testing/src/script.rs @@ -121,6 +121,6 @@ pub async fn validate_output_or_panic_in_script( "Stage {} | Output in {} failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", stage_number, script_name, assert, output ) - } + }, } } diff --git a/hotshot-testing/src/spinning_task.rs b/hotshot-testing/src/spinning_task.rs index c13fe23d6b..ac92dc2287 100644 --- a/hotshot-testing/src/spinning_task.rs +++ b/hotshot-testing/src/spinning_task.rs @@ -199,10 +199,10 @@ where marketplace_config, ) .await - } + }, LateNodeContext::Restart => { panic!("Cannot spin up a node with Restart context") - } + }, }; let handle = context.run_tasks().await; @@ -219,13 +219,13 @@ where self.handles.write().await.push(node); } - } + }, NodeAction::Down => { if let Some(node) = self.handles.write().await.get_mut(idx) { tracing::error!("Node {} shutting down", idx); node.handle.shut_down().await; } - } + }, NodeAction::RestartDown(delay_views) => { let node_id = idx.try_into().unwrap(); if let Some(node) = self.handles.write().await.get_mut(idx) { @@ -327,25 +327,25 @@ where self.restart_contexts.insert(idx, new_ctx); } } - } + }, NodeAction::RestartUp => { if let Some(ctx) = self.restart_contexts.remove(&idx) { new_nodes.push((ctx.context, idx)); new_networks.push(ctx.network.clone()); } - } + }, NodeAction::NetworkUp => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks resuming", idx); handle.network.resume(); } - } + }, NodeAction::NetworkDown => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks pausing", idx); handle.network.pause(); } - } + }, } } } diff --git a/hotshot-testing/src/test_builder.rs b/hotshot-testing/src/test_builder.rs index 408c259360..31c69768e7 100644 --- a/hotshot-testing/src/test_builder.rs +++ b/hotshot-testing/src/test_builder.rs @@ -4,6 +4,8 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; + use async_lock::RwLock; use hotshot::{ tasks::EventTransformerState, @@ -15,16 +17,14 @@ use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, node_types::TestTypes, state_types::TestInstanceState, storage_types::TestStorage, testable_delay::DelayConfig, }; -use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ consensus::ConsensusMetricsValue, drb::INITIAL_DRB_RESULT, epoch_membership::EpochMembershipCoordinator, - traits::node_implementation::{NodeType, Versions}, + traits::node_implementation::{ConsensusTime, NodeType, Versions}, HotShotConfig, PeerConfig, ValidatorConfig, }; use hotshot_utils::anytrace::*; -use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; use tide_disco::Url; use vec1::Vec1; @@ -283,7 +283,7 @@ pub async fn create_test_handle< .await; left_handle - } + }, Behaviour::Byzantine(state) => { let state = Box::leak(state); state @@ -300,7 +300,7 @@ pub async fn create_test_handle< marketplace_config, ) .await - } + }, Behaviour::Standard => { let hotshot = SystemContext::::new( public_key, @@ -317,7 +317,7 @@ pub async fn create_test_handle< .await; hotshot.run_tasks().await - } + }, } } diff --git a/hotshot-testing/src/test_runner.rs b/hotshot-testing/src/test_runner.rs index a07cb509c8..d640050dae 100644 --- a/hotshot-testing/src/test_runner.rs +++ b/hotshot-testing/src/test_runner.rs @@ -5,14 +5,19 @@ // along with the HotShot repository. If not, see . #![allow(clippy::panic)] +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + marker::PhantomData, + sync::Arc, +}; + use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLock; use futures::future::join_all; -use hotshot::InitializerEpochInfo; use hotshot::{ traits::TestableNodeImplementation, types::{Event, SystemContextHandle}, - HotShotInitializer, MarketplaceConfig, SystemContext, + HotShotInitializer, InitializerEpochInfo, MarketplaceConfig, SystemContext, }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, @@ -22,11 +27,11 @@ use hotshot_example_types::{ }; use hotshot_fakeapi::fake_solver::FakeSolverState; use hotshot_task_impls::events::HotShotEvent; -use hotshot_types::drb::INITIAL_DRB_RESULT; use hotshot_types::{ consensus::ConsensusMetricsValue, constants::EVENT_CHANNEL_SIZE, data::Leaf2, + drb::INITIAL_DRB_RESULT, epoch_membership::EpochMembershipCoordinator, simple_certificate::QuorumCertificate2, traits::{ @@ -36,11 +41,6 @@ use hotshot_types::{ }, HotShotConfig, ValidatorConfig, }; -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - marker::PhantomData, - sync::Arc, -}; use tide_disco::Url; use tokio::{spawn, task::JoinHandle}; #[allow(deprecated)] @@ -281,12 +281,12 @@ where Ok(res) => match res { TestResult::Pass => { info!("Task shut down successfully"); - } + }, TestResult::Fail(e) => error_list.push(e), }, Err(e) => { tracing::error!("Error Joining the test task {:?}", e); - } + }, } } @@ -560,14 +560,14 @@ where if let Some(task) = builder_tasks.pop() { task.start(Box::new(handle.event_stream())) } - } + }, std::cmp::Ordering::Equal => { // If we have more builder tasks than DA nodes, pin them all on the last node. while let Some(task) = builder_tasks.pop() { task.start(Box::new(handle.event_stream())) } - } - std::cmp::Ordering::Greater => {} + }, + std::cmp::Ordering::Greater => {}, } self.nodes.push(Node { diff --git a/hotshot-testing/src/test_task.rs b/hotshot-testing/src/test_task.rs index 14520c6d13..890fd2ddf9 100644 --- a/hotshot-testing/src/test_task.rs +++ b/hotshot-testing/src/test_task.rs @@ -158,12 +158,12 @@ impl TestTask { let _ = S::handle_event(&mut self.state, (input, id)) .await .inspect_err(|e| tracing::error!("{e}")); - } + }, Ok((Err(e), _id, _)) => { error!("Error from one channel in test task {:?}", e); sleep(Duration::from_millis(4000)).await; - } - _ => {} + }, + _ => {}, }; } }) @@ -202,7 +202,7 @@ pub async fn add_network_message_test_task< Err(e) => { error!("Failed to receive message: {:?}", e); continue; - } + }, }; // Deserialize the message @@ -212,7 +212,7 @@ pub async fn add_network_message_test_task< Err(e) => { tracing::error!("Failed to deserialize message: {:?}", e); continue; - } + }, }; // Handle the message diff --git a/hotshot-testing/src/txn_task.rs b/hotshot-testing/src/txn_task.rs index 41b5ec3b14..f20d1524f3 100644 --- a/hotshot-testing/src/txn_task.rs +++ b/hotshot-testing/src/txn_task.rs @@ -52,7 +52,7 @@ impl, V: Versions> TxnTask match handles.get(idx) { None => { tracing::error!("couldn't get node in txn task"); - } + }, Some(node) => { // use rand::seq::IteratorRandom; // we're assuming all nodes have the same leaf. @@ -64,7 +64,7 @@ impl, V: Versions> TxnTask .submit_transaction(txn.clone()) .await .expect("Could not send transaction"); - } + }, } } } diff --git a/hotshot-testing/src/view_generator.rs b/hotshot-testing/src/view_generator.rs index f46d4b1101..3209b92c25 100644 --- a/hotshot-testing/src/view_generator.rs +++ b/hotshot-testing/src/view_generator.rs @@ -675,7 +675,7 @@ impl Stream for TestViewGenerator { Poll::Ready(test_view) => { self.current_view = Some(test_view.clone()); Poll::Ready(Some(test_view)) - } + }, Poll::Pending => Poll::Pending, } } diff --git a/hotshot-testing/src/view_sync_task.rs b/hotshot-testing/src/view_sync_task.rs index 9ad967c74b..e41bfc0842 100644 --- a/hotshot-testing/src/view_sync_task.rs +++ b/hotshot-testing/src/view_sync_task.rs @@ -42,7 +42,7 @@ impl> TestTaskState async fn handle_event(&mut self, (event, id): (Self::Event, usize)) -> Result<()> { match event.as_ref() { // all the view sync events - HotShotEvent::ViewSyncTimeout(_, _, _) + HotShotEvent::ViewSyncTimeout(..) | HotShotEvent::ViewSyncPreCommitVoteRecv(_) | HotShotEvent::ViewSyncCommitVoteRecv(_) | HotShotEvent::ViewSyncFinalizeVoteRecv(_) @@ -52,12 +52,12 @@ impl> TestTaskState | HotShotEvent::ViewSyncPreCommitCertificateRecv(_) | HotShotEvent::ViewSyncCommitCertificateRecv(_) | HotShotEvent::ViewSyncFinalizeCertificateRecv(_) - | HotShotEvent::ViewSyncPreCommitCertificateSend(_, _) - | HotShotEvent::ViewSyncCommitCertificateSend(_, _) - | HotShotEvent::ViewSyncFinalizeCertificateSend(_, _) + | HotShotEvent::ViewSyncPreCommitCertificateSend(..) + | HotShotEvent::ViewSyncCommitCertificateSend(..) + | HotShotEvent::ViewSyncFinalizeCertificateSend(..) | HotShotEvent::ViewSyncTrigger(_) => { self.hit_view_sync.insert(id); - } + }, _ => (), } @@ -75,7 +75,7 @@ impl> TestTaskState hit_view_sync: self.hit_view_sync.clone(), })) } - } + }, } } } diff --git a/hotshot-testing/tests/tests_6/test_epochs.rs b/hotshot-testing/tests/tests_6/test_epochs.rs index cd4031b534..3a161ec1dd 100644 --- a/hotshot-testing/tests/tests_6/test_epochs.rs +++ b/hotshot-testing/tests/tests_6/test_epochs.rs @@ -4,6 +4,8 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::time::Duration; + use hotshot_example_types::{ node_types::{ CombinedImpl, EpochUpgradeTestVersions, EpochsTestVersions, Libp2pImpl, MemoryImpl, @@ -22,7 +24,6 @@ use hotshot_testing::{ test_builder::{TestDescription, TimingData}, view_sync_task::ViewSyncTaskDescription, }; -use std::time::Duration; cross_tests!( TestName: test_success_with_epochs, diff --git a/hotshot-types/src/consensus.rs b/hotshot-types/src/consensus.rs index 66ef1db9bf..c85fce8b7c 100644 --- a/hotshot-types/src/consensus.rs +++ b/hotshot-types/src/consensus.rs @@ -589,7 +589,7 @@ impl Consensus { // because the leader of view n + 1 may propose to the DA (and we would vote) // before the leader of view n. return true; - } + }, _ => return true, }; if view > *old_view { diff --git a/hotshot-types/src/data.rs b/hotshot-types/src/data.rs index 3deb60f5dd..ad3ae8c9e7 100644 --- a/hotshot-types/src/data.rs +++ b/hotshot-types/src/data.rs @@ -505,13 +505,13 @@ impl VidDisperseShare { .into_iter() .map(|share| Self::V0(share)) .collect() - } + }, VidDisperse::V1(vid_disperse) => { VidDisperseShare2::::from_vid_disperse(vid_disperse) .into_iter() .map(|share| Self::V1(share)) .collect() - } + }, } } @@ -672,10 +672,10 @@ impl ViewChangeEvidence { match self { ViewChangeEvidence::Timeout(timeout_cert) => { ViewChangeEvidence2::Timeout(timeout_cert.to_tc2()) - } + }, ViewChangeEvidence::ViewSync(view_sync_cert) => { ViewChangeEvidence2::ViewSync(view_sync_cert.to_vsc2()) - } + }, } } } @@ -705,10 +705,10 @@ impl ViewChangeEvidence2 { match self { ViewChangeEvidence2::Timeout(timeout_cert) => { ViewChangeEvidence::Timeout(timeout_cert.to_tc()) - } + }, ViewChangeEvidence2::ViewSync(view_sync_cert) => { ViewChangeEvidence::ViewSync(view_sync_cert.to_vsc()) - } + }, } } } @@ -1242,7 +1242,7 @@ impl Leaf2 { // Easiest cases are: // - no upgrade certificate on either: this is the most common case, and is always fine. // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. - (None | Some(_), None) => {} + (None | Some(_), None) => {}, // If we no longer see a cert, we have to make sure that we either: // - no longer care because we have passed new_version_first_view, or // - no longer care because we have passed `decide_by` without deciding the certificate. @@ -1252,13 +1252,13 @@ impl Leaf2 { || (self.view_number() > parent_cert.data.decide_by && decided_upgrade_certificate_read.is_none()), "The new leaf is missing an upgrade certificate that was present in its parent, and should still be live." ); - } + }, // If we both have a certificate, they should be identical. // Technically, this prevents us from initiating a new upgrade in the view immediately following an upgrade. // I think this is a fairly lax restriction. (Some(cert), Some(parent_cert)) => { ensure!(cert == parent_cert, "The new leaf does not extend the parent leaf, because it has attached a different upgrade certificate."); - } + }, } // This check should be added once we sort out the genesis leaf/justify_qc issue. @@ -1621,7 +1621,7 @@ impl Leaf { // Easiest cases are: // - no upgrade certificate on either: this is the most common case, and is always fine. // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. - (None | Some(_), None) => {} + (None | Some(_), None) => {}, // If we no longer see a cert, we have to make sure that we either: // - no longer care because we have passed new_version_first_view, or // - no longer care because we have passed `decide_by` without deciding the certificate. @@ -1631,13 +1631,13 @@ impl Leaf { || (self.view_number() > parent_cert.data.decide_by && decided_upgrade_certificate_read.is_none()), "The new leaf is missing an upgrade certificate that was present in its parent, and should still be live." ); - } + }, // If we both have a certificate, they should be identical. // Technically, this prevents us from initiating a new upgrade in the view immediately following an upgrade. // I think this is a fairly lax restriction. (Some(cert), Some(parent_cert)) => { ensure!(cert == parent_cert, "The new leaf does not extend the parent leaf, because it has attached a different upgrade certificate."); - } + }, } // This check should be added once we sort out the genesis leaf/justify_qc issue. diff --git a/hotshot-types/src/data/vid_disperse.rs b/hotshot-types/src/data/vid_disperse.rs index f7c90e5c68..892aed79f9 100644 --- a/hotshot-types/src/data/vid_disperse.rs +++ b/hotshot-types/src/data/vid_disperse.rs @@ -13,6 +13,7 @@ use jf_vid::{VidDisperse as JfVidDisperse, VidScheme}; use serde::{Deserialize, Serialize}; use tokio::task::spawn_blocking; +use super::ns_table::parse_ns_table; use crate::{ epoch_membership::{EpochMembership, EpochMembershipCoordinator}, impl_has_epoch, @@ -29,8 +30,6 @@ use crate::{ vote::HasViewNumber, }; -use super::ns_table::parse_ns_table; - impl_has_epoch!( ADVZDisperse, AvidMDisperse, diff --git a/hotshot-types/src/epoch_membership.rs b/hotshot-types/src/epoch_membership.rs index 0494a0c76a..3a316cf033 100644 --- a/hotshot-types/src/epoch_membership.rs +++ b/hotshot-types/src/epoch_membership.rs @@ -1,17 +1,25 @@ -use std::collections::BTreeSet; -use std::num::NonZeroU64; -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{BTreeSet, HashMap}, + num::NonZeroU64, + sync::Arc, +}; use async_broadcast::{broadcast, InactiveReceiver}; use async_lock::{Mutex, RwLock}; -use hotshot_utils::anytrace::{self, Error, Level, Result, DEFAULT_LOG_LEVEL}; -use hotshot_utils::{ensure, line_info, log, warn}; - -use crate::drb::DrbResult; -use crate::traits::election::Membership; -use crate::traits::node_implementation::{ConsensusTime, NodeType}; -use crate::utils::root_block_in_epoch; -use crate::PeerConfig; +use hotshot_utils::{ + anytrace::{self, Error, Level, Result, DEFAULT_LOG_LEVEL}, + ensure, line_info, log, warn, +}; + +use crate::{ + drb::DrbResult, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, NodeType}, + }, + utils::root_block_in_epoch, + PeerConfig, +}; type EpochMap = HashMap<::Epoch, InactiveReceiver>>>; diff --git a/hotshot-types/src/lib.rs b/hotshot-types/src/lib.rs index eac8950a5e..0d9bf701fc 100644 --- a/hotshot-types/src/lib.rs +++ b/hotshot-types/src/lib.rs @@ -134,7 +134,7 @@ impl PeerConfig { Err(e) => { error!(?e, "Failed to serialize public key"); vec![] - } + }, } } @@ -148,7 +148,7 @@ impl PeerConfig { Err(e) => { error!(?e, "Failed to deserialize public key"); None - } + }, } } } diff --git a/hotshot-types/src/message.rs b/hotshot-types/src/message.rs index 12e07efd87..8f7ac65386 100644 --- a/hotshot-types/src/message.rs +++ b/hotshot-types/src/message.rs @@ -173,9 +173,7 @@ impl HasEpoch for MessageKind { fn epoch(&self) -> Option { match &self { MessageKind::Consensus(message) => message.epoch_number(), - MessageKind::Data( - DataMessage::SubmitTransaction(_, _) | DataMessage::RequestData(_), - ) + MessageKind::Data(DataMessage::SubmitTransaction(..) | DataMessage::RequestData(_)) | MessageKind::External(_) => None, MessageKind::Data(DataMessage::DataResponse(msg)) => match msg { ResponseMessage::Found(m) => m.epoch_number(), @@ -325,66 +323,66 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, GeneralConsensusMessage::Proposal2(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, GeneralConsensusMessage::ProposalRequested(req, _) => req.view_number, GeneralConsensusMessage::ProposalResponse(proposal) => { proposal.data.view_number() - } + }, GeneralConsensusMessage::ProposalResponse2(proposal) => { proposal.data.view_number() - } + }, GeneralConsensusMessage::Vote(vote_message) => vote_message.view_number(), GeneralConsensusMessage::Vote2(vote_message) => vote_message.view_number(), GeneralConsensusMessage::TimeoutVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncFinalizeVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitCertificate(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { message.view_number() - } + }, GeneralConsensusMessage::TimeoutVote2(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitVote2(message) => message.view_number(), GeneralConsensusMessage::ViewSyncFinalizeVote2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncPreCommitCertificate2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate2(message) => { message.view_number() - } + }, GeneralConsensusMessage::UpgradeProposal(message) => message.data.view_number(), GeneralConsensusMessage::UpgradeVote(message) => message.view_number(), GeneralConsensusMessage::HighQc(qc) | GeneralConsensusMessage::ExtendedQc(qc, _) => qc.view_number(), } - } + }, SequencingMessage::Da(da_message) => { match da_message { DaConsensusMessage::DaProposal(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, DaConsensusMessage::DaVote(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate(cert) => cert.view_number, DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.view_number(), @@ -392,12 +390,12 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, DaConsensusMessage::DaVote2(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate2(cert) => cert.view_number, DaConsensusMessage::VidDisperseMsg2(disperse) => disperse.data.view_number(), } - } + }, } } @@ -410,13 +408,13 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } + }, GeneralConsensusMessage::Proposal2(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } - GeneralConsensusMessage::ProposalRequested(_, _) => None, + }, + GeneralConsensusMessage::ProposalRequested(..) => None, GeneralConsensusMessage::ProposalResponse(proposal) => proposal.data.epoch(), GeneralConsensusMessage::ProposalResponse2(proposal) => proposal.data.epoch(), GeneralConsensusMessage::Vote(vote_message) => vote_message.epoch(), @@ -427,35 +425,35 @@ impl SequencingMessage { GeneralConsensusMessage::ViewSyncFinalizeVote(message) => message.epoch(), GeneralConsensusMessage::ViewSyncPreCommitCertificate(message) => { message.epoch() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate(message) => message.epoch(), GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { message.epoch() - } + }, GeneralConsensusMessage::TimeoutVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncPreCommitVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncCommitVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncFinalizeVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncPreCommitCertificate2(message) => { message.epoch() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncFinalizeCertificate2(message) => { message.epoch() - } + }, GeneralConsensusMessage::UpgradeProposal(message) => message.data.epoch(), GeneralConsensusMessage::UpgradeVote(message) => message.epoch(), GeneralConsensusMessage::HighQc(qc) | GeneralConsensusMessage::ExtendedQc(qc, _) => qc.epoch(), } - } + }, SequencingMessage::Da(da_message) => { match da_message { DaConsensusMessage::DaProposal(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } + }, DaConsensusMessage::DaVote(vote_message) => vote_message.epoch(), DaConsensusMessage::DaCertificate(cert) => cert.epoch(), DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.epoch(), @@ -464,11 +462,11 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } + }, DaConsensusMessage::DaVote2(vote_message) => vote_message.epoch(), DaConsensusMessage::DaCertificate2(cert) => cert.epoch(), } - } + }, } } } @@ -649,7 +647,7 @@ impl UpgradeLock { } else { V::Base::VERSION } - } + }, None => V::Base::VERSION, }; @@ -669,7 +667,7 @@ impl UpgradeLock { } else { cert.data.old_version } - } + }, None => V::Base::VERSION, } } @@ -698,7 +696,7 @@ impl UpgradeLock { v if v == V::Upgrade::VERSION => Serializer::::serialize(&message), v => { bail!("Attempted to serialize with version {}, which is incompatible. This should be impossible.", v); - } + }, }; serialized_message @@ -725,7 +723,7 @@ impl UpgradeLock { v if v == V::Upgrade::VERSION => Serializer::::deserialize(message), v => { bail!("Cannot deserialize message with stated version {}", v); - } + }, } .wrap() .context(info!("Failed to deserialize message!"))?; diff --git a/hotshot-types/src/network.rs b/hotshot-types/src/network.rs index f2d35b6984..3a5dd4fe31 100644 --- a/hotshot-types/src/network.rs +++ b/hotshot-types/src/network.rs @@ -209,7 +209,7 @@ impl NetworkConfig { Ok(data) => data, Err(e) => { return Err(NetworkConfigError::ReadFromFileError(e)); - } + }, }; // deserialize @@ -256,7 +256,7 @@ impl NetworkConfig { Ok(data) => data, Err(e) => { return Err(NetworkConfigError::SerializeError(e)); - } + }, }; // write to file diff --git a/hotshot-types/src/simple_vote.rs b/hotshot-types/src/simple_vote.rs index 547db100a6..1ae78c4f20 100644 --- a/hotshot-types/src/simple_vote.rs +++ b/hotshot-types/src/simple_vote.rs @@ -19,8 +19,7 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; use vbs::version::Version; use crate::{ - data::VidCommitment, - data::{Leaf, Leaf2}, + data::{Leaf, Leaf2, VidCommitment}, message::UpgradeLock, traits::{ node_implementation::{NodeType, Versions}, diff --git a/hotshot-types/src/traits/storage.rs b/hotshot-types/src/traits/storage.rs index c2ac50fcf2..7ad83f58ed 100644 --- a/hotshot-types/src/traits/storage.rs +++ b/hotshot-types/src/traits/storage.rs @@ -18,11 +18,10 @@ use committable::Commitment; use super::node_implementation::NodeType; use crate::{ consensus::{CommitmentMap, View}, - data::VidCommitment, data::{ vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, - QuorumProposalWrapper, VidDisperseShare, + QuorumProposalWrapper, VidCommitment, VidDisperseShare, }, drb::DrbResult, event::HotShotAction, @@ -55,7 +54,7 @@ pub trait Storage: Send + Sync + Clone { _pd: std::marker::PhantomData, }) .await - } + }, VidDisperseShare::V1(share) => { self.append_vid2(&Proposal { data: share.clone(), @@ -63,7 +62,7 @@ pub trait Storage: Send + Sync + Clone { _pd: std::marker::PhantomData, }) .await - } + }, } } /// Add a proposal to the stored DA proposals. diff --git a/hotshot-types/src/utils.rs b/hotshot-types/src/utils.rs index 864bd4c54a..09c0817ce8 100644 --- a/hotshot-types/src/utils.rs +++ b/hotshot-types/src/utils.rs @@ -6,6 +6,12 @@ //! Utility functions, type aliases, helper structs and enum definitions. +use std::{ + hash::{Hash, Hasher}, + ops::Deref, + sync::Arc, +}; + use anyhow::{anyhow, ensure}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::{ @@ -19,11 +25,6 @@ use committable::{Commitment, Committable}; use digest::OutputSizeUser; use serde::{Deserialize, Serialize}; use sha2::Digest; -use std::{ - hash::{Hash, Hasher}, - ops::Deref, - sync::Arc, -}; use tagged_base64::tagged; use typenum::Unsigned; use vbs::version::StaticVersionType; diff --git a/hotshot-types/src/vote.rs b/hotshot-types/src/vote.rs index e33e66f57f..1631896652 100644 --- a/hotshot-types/src/vote.rs +++ b/hotshot-types/src/vote.rs @@ -170,7 +170,7 @@ impl< Err(e) => { tracing::warn!("Failed to generate versioned vote data: {e}"); return None; - } + }, }; if !key.validate(&vote.signature(), vote_commitment.as_ref()) { diff --git a/hotshot-utils/src/anytrace.rs b/hotshot-utils/src/anytrace.rs index 12e129ca01..b9aec49221 100644 --- a/hotshot-utils/src/anytrace.rs +++ b/hotshot-utils/src/anytrace.rs @@ -24,21 +24,21 @@ impl Log for Error { match error_level { Level::Trace => { tracing::trace!("{}", self.message); - } + }, Level::Debug => { tracing::debug!("{}", self.message); - } + }, Level::Info => { tracing::info!("{}", self.message); - } + }, Level::Warn => { tracing::warn!("{}", self.message); - } + }, Level::Error => { tracing::error!("{}", self.message); - } + }, // impossible - Level::Unspecified => {} + Level::Unspecified => {}, } } } @@ -48,7 +48,7 @@ impl Log for Result { let error = match self { Ok(_) => { return; - } + }, Err(e) => e, }; @@ -60,21 +60,21 @@ impl Log for Result { match error_level { Level::Trace => { tracing::trace!("{}", error.message); - } + }, Level::Debug => { tracing::debug!("{}", error.message); - } + }, Level::Info => { tracing::info!("{}", error.message); - } + }, Level::Warn => { tracing::warn!("{}", error.message); - } + }, Level::Error => { tracing::error!("{}", error.message); - } + }, // impossible - Level::Unspecified => {} + Level::Unspecified => {}, } } } diff --git a/hotshot-utils/src/anytrace/macros.rs b/hotshot-utils/src/anytrace/macros.rs index 29c5178b07..b9f6b7db56 100644 --- a/hotshot-utils/src/anytrace/macros.rs +++ b/hotshot-utils/src/anytrace/macros.rs @@ -167,21 +167,21 @@ macro_rules! log { match error_level { Level::Trace => { tracing::trace!("{}", error.message); - } + }, Level::Debug => { tracing::debug!("{}", error.message); - } + }, Level::Info => { tracing::info!("{}", error.message); - } + }, Level::Warn => { tracing::warn!("{}", error.message); - } + }, Level::Error => { tracing::error!("{}", error.message); - } + }, // impossible - Level::Unspecified => {} + Level::Unspecified => {}, } } }; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index da4afdcd4c..660984de2d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -215,10 +215,10 @@ impl, V: Versions> SystemContext Arc { #[allow(clippy::panic)] match storage.migrate_consensus().await { - Ok(()) => {} + Ok(()) => {}, Err(e) => { panic!("Failed to migrate consensus storage: {e}"); - } + }, } let internal_chan = broadcast(EVENT_CHANNEL_SIZE); @@ -767,10 +767,10 @@ where match event { Either::Left(msg) => { let _ = left_sender.broadcast(msg.into()).await; - } + }, Either::Right(msg) => { let _ = right_sender.broadcast(msg.into()).await; - } + }, } } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 544154a2cd..743327ea4b 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -10,7 +10,6 @@ pub mod task_state; use std::{collections::BTreeMap, fmt::Debug, num::NonZeroUsize, sync::Arc, time::Duration}; -use crate::EpochMembershipCoordinator; use async_broadcast::{broadcast, RecvError}; use async_lock::RwLock; use async_trait::async_trait; @@ -46,8 +45,9 @@ use vbs::version::StaticVersionType; use crate::{ genesis_epoch_from_version, tasks::task_state::CreateTaskState, types::SystemContextHandle, - ConsensusApi, ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, - MarketplaceConfig, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, + ConsensusApi, ConsensusMetricsValue, ConsensusTaskRegistry, EpochMembershipCoordinator, + HotShotConfig, HotShotInitializer, MarketplaceConfig, NetworkTaskRegistry, SignatureKey, + SystemContext, Versions, }; /// event for global event stream @@ -280,13 +280,13 @@ pub fn create_shutdown_event_monitor { return; - } + }, Err(e) => { tracing::error!("Shutdown event monitor channel recv error: {}", e); - } + }, } } } diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 8b3e828f71..85a467057a 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -4,6 +4,11 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{ + collections::{BTreeMap, BTreeSet}, + num::NonZeroU64, +}; + use hotshot_types::{ drb::DrbResult, traits::{ @@ -15,10 +20,6 @@ use hotshot_types::{ }; use hotshot_utils::anytrace::Result; use primitive_types::U256; -use std::{ - collections::{BTreeMap, BTreeSet}, - num::NonZeroU64, -}; #[derive(Clone, Debug, Eq, PartialEq, Hash)] diff --git a/hotshot/src/traits/election/two_static_committees.rs b/hotshot/src/traits/election/two_static_committees.rs index 2b2834710c..3825c4ed47 100644 --- a/hotshot/src/traits/election/two_static_committees.rs +++ b/hotshot/src/traits/election/two_static_committees.rs @@ -4,6 +4,12 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{ + cmp::max, + collections::{BTreeMap, BTreeSet}, + num::NonZeroU64, +}; + use hotshot_types::{ drb::DrbResult, traits::{ @@ -15,11 +21,6 @@ use hotshot_types::{ }; use hotshot_utils::anytrace::Result; use primitive_types::U256; -use std::{ - cmp::max, - collections::{BTreeMap, BTreeSet}, - num::NonZeroU64, -}; /// Tuple type for eligible leaders type EligibleLeaders = ( diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 87b4a873c2..dc6af286cc 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -185,12 +185,12 @@ impl CombinedNetworks { // The primary fail counter reached 0, the primary is now considered up primary_down.store(false, Ordering::Relaxed); debug!("primary_fail_counter reached zero, primary_down set to false"); - } + }, c => { // Decrement the primary fail counter primary_fail_counter.store(c - 1, Ordering::Relaxed); debug!("primary_fail_counter set to {:?}", c - 1); - } + }, } return Ok(()); } @@ -211,7 +211,7 @@ impl CombinedNetworks { c if c < COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL => { // Just increment the 'no delay counter' self.no_delay_counter.store(c + 1, Ordering::Relaxed); - } + }, _ => { // The 'no delay counter' reached the threshold debug!( @@ -226,7 +226,7 @@ impl CombinedNetworks { // The primary fail counter is set just below the threshold to delay the next message self.primary_fail_counter .store(COMBINED_NETWORK_MIN_PRIMARY_FAILURES, Ordering::Relaxed); - } + }, } } // Send the message diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 93e9c5ef21..52ce406350 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -7,7 +7,21 @@ //! Libp2p based/production networking implementation //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network -use crate::EpochMembershipCoordinator; +#[cfg(feature = "hotshot-testing")] +use std::str::FromStr; +use std::{ + cmp::min, + collections::{BTreeSet, HashSet}, + fmt::Debug, + net::{IpAddr, ToSocketAddrs}, + num::NonZeroUsize, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; + use anyhow::{anyhow, Context}; use async_lock::RwLock; use async_trait::async_trait; @@ -51,20 +65,6 @@ use libp2p_identity::{ }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; -#[cfg(feature = "hotshot-testing")] -use std::str::FromStr; -use std::{ - cmp::min, - collections::{BTreeSet, HashSet}, - fmt::Debug, - net::{IpAddr, ToSocketAddrs}, - num::NonZeroUsize, - sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, - }, - time::Duration, -}; use tokio::{ select, spawn, sync::{ @@ -75,7 +75,7 @@ use tokio::{ }; use tracing::{error, info, instrument, trace, warn}; -use crate::BroadcastDelay; +use crate::{BroadcastDelay, EpochMembershipCoordinator}; /// Libp2p-specific metrics #[derive(Clone, Debug)] @@ -289,7 +289,7 @@ impl TestableNetworkingImplementation for Libp2pNetwork { Ok(network) => network, Err(err) => { panic!("Failed to create libp2p network: {err:?}"); - } + }, }, ) }) @@ -372,7 +372,7 @@ pub fn derive_libp2p_multiaddr(addr: &String) -> anyhow::Result { } format!("/dns/{host}/udp/{port}/quic-v1") - } + }, }; // Convert the multiaddr string to a `Multiaddr` @@ -680,7 +680,7 @@ impl Libp2pNetwork { sender.try_send(msg).map_err(|err| { NetworkError::ChannelSendError(format!("failed to send gossip message: {err}")) })?; - } + }, DirectRequest(msg, _pid, chan) => { sender.try_send(msg).map_err(|err| { NetworkError::ChannelSendError(format!( @@ -702,12 +702,12 @@ impl Libp2pNetwork { { error!("failed to ack!"); }; - } - DirectResponse(_msg, _) => {} + }, + DirectResponse(_msg, _) => {}, NetworkEvent::IsBootstrapped => { error!("handle_recvd_events received `NetworkEvent::IsBootstrapped`, which should be impossible."); - } - NetworkEvent::ConnectedPeersUpdate(_) => {} + }, + NetworkEvent::ConnectedPeersUpdate(_) => {}, } Ok::<(), NetworkError>(()) } @@ -909,7 +909,7 @@ impl ConnectedNetwork for Libp2pNetwork { return Err(NetworkError::LookupError(format!( "failed to look up node for direct message: {err}" ))); - } + }, }; #[cfg(feature = "hotshot-testing")] @@ -941,7 +941,7 @@ impl ConnectedNetwork for Libp2pNetwork { Err(e) => { self.inner.metrics.num_failed_messages.add(1); Err(e) - } + }, } } @@ -1002,7 +1002,7 @@ impl ConnectedNetwork for Libp2pNetwork { Ok(m) => m, Err(e) => { return tracing::warn!(e.message); - } + }, }; let future_leader = match membership.leader(future_view).await { Ok(l) => l, @@ -1011,7 +1011,7 @@ impl ConnectedNetwork for Libp2pNetwork { "Failed to calculate leader for view {:?}: {e}", future_view ); - } + }, }; let _ = self diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 9aa8adfef2..4a0663d246 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -282,10 +282,10 @@ impl ConnectedNetwork for MemoryNetwork { match res { Ok(()) => { trace!(?key, "Delivered message to remote"); - } + }, Err(e) => { warn!(?e, ?key, "Error sending broadcast message to node"); - } + }, } } } @@ -336,10 +336,10 @@ impl ConnectedNetwork for MemoryNetwork { match res { Ok(()) => { trace!(?key, "Delivered message to remote"); - } + }, Err(e) => { warn!(?e, ?key, "Error sending broadcast message to node"); - } + }, } } } @@ -375,7 +375,7 @@ impl ConnectedNetwork for MemoryNetwork { Ok(()) => { trace!(?recipient, "Delivered message to remote"); Ok(()) - } + }, Err(e) => Err(NetworkError::MessageSendError(format!( "error sending direct message to node: {e}", ))), diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index a742e5f857..0f222f2729 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -591,7 +591,7 @@ impl ConnectedNetwork for PushCdnNetwork { return Err(NetworkError::MessageReceiveError(format!( "failed to receive message: {error}" ))); - } + }, }; // Extract the underlying message diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 07e6bab901..f9d1c40eeb 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -6,6 +6,8 @@ //! Provides an event-streaming handle for a [`SystemContext`] running in the background +use std::sync::Arc; + use anyhow::{anyhow, Context, Ok, Result}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; @@ -32,7 +34,6 @@ use hotshot_types::{ }, utils::option_epoch_from_block_number, }; -use std::sync::Arc; use tracing::instrument; use crate::{traits::NodeImplementation, types::Event, SystemContext, Versions}; @@ -116,17 +117,17 @@ impl + 'static, V: Versions> self.network .broadcast_message(serialized_message, Topic::Global, BroadcastDelay::None) .await?; - } + }, RecipientList::Direct(recipient) => { self.network .direct_message(serialized_message, recipient) .await?; - } + }, RecipientList::Many(recipients) => { self.network .da_broadcast_message(serialized_message, recipients, BroadcastDelay::None) .await?; - } + }, } Ok(()) } @@ -199,7 +200,7 @@ impl + 'static, V: Versions> Err(e) => { tracing::warn!(e.message); continue; - } + }, }; // Make sure that the quorum_proposal is valid if let Err(err) = quorum_proposal.validate_signature(&membership).await { diff --git a/marketplace-builder-core/src/service.rs b/marketplace-builder-core/src/service.rs index 29fff93c46..bb5af4bc70 100644 --- a/marketplace-builder-core/src/service.rs +++ b/marketplace-builder-core/src/service.rs @@ -1,19 +1,16 @@ -use std::time::Duration; - -use marketplace_builder_shared::{ - block::{BuilderStateId, ReceivedTransaction, TransactionSource}, - coordinator::{BuilderStateCoordinator, BuilderStateLookup}, - state::BuilderState, - utils::BuilderKeys, +use std::{ + fmt::Display, + sync::Arc, + time::{Duration, Instant}, }; pub use async_broadcast::{broadcast, RecvError, TryRecvError}; use async_trait::async_trait; use committable::{Commitment, Committable}; -use futures::{future::BoxFuture, stream::FuturesUnordered, Stream}; use futures::{ - stream::{FuturesOrdered, StreamExt}, - TryStreamExt, + future::BoxFuture, + stream::{FuturesOrdered, FuturesUnordered, StreamExt}, + Stream, TryStreamExt, }; use hotshot::types::Event; use hotshot_builder_api::{ @@ -23,26 +20,29 @@ use hotshot_builder_api::{ data_source::{AcceptsTxnSubmits, BuilderDataSource}, }, }; -use hotshot_types::bundle::Bundle; -use hotshot_types::traits::block_contents::{BuilderFee, Transaction}; use hotshot_types::{ + bundle::Bundle, data::VidCommitment, event::EventType, traits::{ + block_contents::{BuilderFee, Transaction}, node_implementation::{ConsensusTime, NodeType}, signature_key::{BuilderSignatureKey, SignatureKey}, }, }; -use std::sync::Arc; -use std::{fmt::Display, time::Instant}; +pub use marketplace_builder_shared::utils::EventServiceStream; +use marketplace_builder_shared::{ + block::{BuilderStateId, ReceivedTransaction, TransactionSource}, + coordinator::{BuilderStateCoordinator, BuilderStateLookup}, + state::BuilderState, + utils::BuilderKeys, +}; use tagged_base64::TaggedBase64; use tide_disco::{app::AppError, method::ReadState, App}; use tokio::{spawn, task::JoinHandle, time::sleep}; use tracing::Level; use vbs::version::StaticVersion; -pub use marketplace_builder_shared::utils::EventServiceStream; - use crate::hooks::BuilderHooks; /// Configuration to initialize the builder @@ -189,7 +189,7 @@ where match event.event { EventType::Error { error } => { tracing::error!("Error event in HotShot: {:?}", error); - } + }, EventType::Transactions { transactions } => { let hooks = Arc::clone(&hooks); let coordinator = Arc::clone(&coordinator); @@ -208,20 +208,20 @@ where .collect::>() .await; }); - } + }, EventType::Decide { leaf_chain, .. } => { let coordinator = Arc::clone(&coordinator); spawn(async move { coordinator.handle_decide(leaf_chain).await }); - } + }, EventType::DaProposal { proposal, .. } => { let coordinator = Arc::clone(&coordinator); spawn(async move { coordinator.handle_da_proposal(proposal.data).await }); - } + }, EventType::QuorumProposal { proposal, .. } => { let coordinator = Arc::clone(&coordinator); spawn(async move { coordinator.handle_quorum_proposal(proposal.data).await }); - } - _ => {} + }, + _ => {}, } } } @@ -356,14 +356,14 @@ where // If we couldn't find the state because it hasn't yet been created, try again sleep(self.api_timeout / 10).await; continue; - } + }, BuilderStateLookup::Decided => { // If we couldn't find the state because the view has already been decided, we can just return an error tracing::warn!("Requested a bundle for view we already GCd as decided",); return Err(BuildError::Error( "Request for a bundle for a view that has already been decided.".to_owned(), )); - } + }, }; tracing::info!( diff --git a/marketplace-builder-core/src/testing/basic_test.rs b/marketplace-builder-core/src/testing/basic_test.rs index 1d4cf42c5f..6919fd3ce8 100644 --- a/marketplace-builder-core/src/testing/basic_test.rs +++ b/marketplace-builder-core/src/testing/basic_test.rs @@ -1,14 +1,15 @@ +use std::{marker::PhantomData, sync::Arc}; + use async_broadcast::broadcast; use hotshot_builder_api::v0_99::data_source::{AcceptsTxnSubmits, BuilderDataSource}; - use hotshot_example_types::block_types::TestTransaction; +use marketplace_builder_shared::testing::consensus::SimulatedChainState; use tracing_test::traced_test; -use crate::hooks::NoHooks; -use crate::service::{BuilderConfig, GlobalState, ProxyGlobalState}; -use marketplace_builder_shared::testing::consensus::SimulatedChainState; -use std::marker::PhantomData; -use std::sync::Arc; +use crate::{ + hooks::NoHooks, + service::{BuilderConfig, GlobalState, ProxyGlobalState}, +}; /// This test simulates multiple builder states receiving messages from the channels and processing them #[tokio::test] diff --git a/marketplace-builder-core/src/testing/integration.rs b/marketplace-builder-core/src/testing/integration.rs index 1b986aa549..3cb98cd7c8 100644 --- a/marketplace-builder-core/src/testing/integration.rs +++ b/marketplace-builder-core/src/testing/integration.rs @@ -118,21 +118,20 @@ where mod tests { use std::time::Duration; - use crate::testing::integration::MarketplaceBuilderImpl; - use marketplace_builder_shared::testing::{ - generation::{self, TransactionGenerationConfig}, - run_test, - validation::BuilderValidationConfig, - }; - - use hotshot_example_types::node_types::MarketplaceTestVersions; - use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + use hotshot_example_types::node_types::{MarketplaceTestVersions, MemoryImpl, TestTypes}; use hotshot_macros::cross_tests; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::TestDescription, }; + use marketplace_builder_shared::testing::{ + generation::{self, TransactionGenerationConfig}, + run_test, + validation::BuilderValidationConfig, + }; + + use crate::testing::integration::MarketplaceBuilderImpl; #[tokio::test(flavor = "multi_thread")] #[tracing::instrument] diff --git a/marketplace-builder-core/src/testing/order_test.rs b/marketplace-builder-core/src/testing/order_test.rs index 9de0297172..b95416d6ba 100644 --- a/marketplace-builder-core/src/testing/order_test.rs +++ b/marketplace-builder-core/src/testing/order_test.rs @@ -1,5 +1,9 @@ +use std::{fmt::Debug, marker::PhantomData, sync::Arc}; + use async_broadcast::broadcast; +use hotshot::rand::{self, seq::SliceRandom, thread_rng}; use hotshot_builder_api::v0_99::data_source::{AcceptsTxnSubmits, BuilderDataSource}; +use hotshot_example_types::block_types::TestTransaction; use hotshot_types::{bundle::Bundle, traits::node_implementation::ConsensusTime}; use marketplace_builder_shared::{block::BuilderStateId, testing::consensus::SimulatedChainState}; use tracing_test::traced_test; @@ -9,12 +13,6 @@ use crate::{ service::{BuilderConfig, GlobalState, ProxyGlobalState}, }; -use std::{fmt::Debug, marker::PhantomData, sync::Arc}; - -use hotshot_example_types::block_types::TestTransaction; - -use hotshot::rand::{self, seq::SliceRandom, thread_rng}; - /// [`RoundTransactionBehavior`] is an enum that is used to represent different /// behaviors that we may want to simulate during a round. This applies to /// determining which transactions are included in the block, and how their @@ -64,12 +62,12 @@ impl RoundTransactionBehavior { ]), ); transactions - } + }, RoundTransactionBehavior::AdjustRemoveTail => { let mut transactions = transactions.clone(); transactions.pop(); transactions - } + }, RoundTransactionBehavior::ProposeInAdvance(propose_in_advance_round) => { let mut transactions = transactions.clone(); transactions.push(TestTransaction::new(vec![ @@ -77,12 +75,12 @@ impl RoundTransactionBehavior { 0_u8, ])); transactions - } + }, RoundTransactionBehavior::AdjustRemove => { let mut transactions = transactions.clone(); transactions.remove(rand::random::() % (transactions.len() - 1)); transactions - } + }, } } } diff --git a/marketplace-builder-shared/src/block.rs b/marketplace-builder-shared/src/block.rs index 1f458f2e62..b9775b2aaa 100644 --- a/marketplace-builder-shared/src/block.rs +++ b/marketplace-builder-shared/src/block.rs @@ -3,11 +3,12 @@ use std::time::Instant; use committable::{Commitment, Committable}; -use hotshot_types::data::{fake_commitment, Leaf2}; -use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ - data::VidCommitment, - traits::{block_contents::Transaction, node_implementation::NodeType}, + data::{fake_commitment, Leaf2, VidCommitment}, + traits::{ + block_contents::Transaction, + node_implementation::{ConsensusTime, NodeType}, + }, utils::BuilderCommitment, }; diff --git a/marketplace-builder-shared/src/coordinator/mod.rs b/marketplace-builder-shared/src/coordinator/mod.rs index e546aa3f23..5d8c04a480 100644 --- a/marketplace-builder-shared/src/coordinator/mod.rs +++ b/marketplace-builder-shared/src/coordinator/mod.rs @@ -195,7 +195,7 @@ where }, ); return Err(Error::TxnSender(err)); - } + }, }; self.update_txn_status(&commit, TransactionStatus::Pending); @@ -251,15 +251,15 @@ where (Either::Right(da_proposal), Either::Left(quorum_proposal)) | (Either::Left(quorum_proposal), Either::Right(da_proposal)) => { self.spawn_builder_state(quorum_proposal, da_proposal).await - } + }, _ => { unreachable!() - } + }, } - } + }, Entry::Vacant(entry) => { entry.insert(proposal); - } + }, } } @@ -499,10 +499,10 @@ where "Not changing status of rejected/sequenced transaction", ); return; - } + }, _ => { tracing::debug!(?old_status, ?new_status, "Changing status of transaction",); - } + }, } } self.tx_status.insert(*txn_hash, new_status); @@ -525,6 +525,7 @@ mod tests { use hotshot_types::data::ViewNumber; use tracing_test::traced_test; + use super::*; use crate::{ block::TransactionSource, testing::{ @@ -535,8 +536,6 @@ mod tests { }, }; - use super::*; - type BuilderStateCoordinator = super::BuilderStateCoordinator; #[tokio::test] diff --git a/marketplace-builder-shared/src/coordinator/tiered_view_map.rs b/marketplace-builder-shared/src/coordinator/tiered_view_map.rs index 6d52f3d955..385d7ec6c1 100644 --- a/marketplace-builder-shared/src/coordinator/tiered_view_map.rs +++ b/marketplace-builder-shared/src/coordinator/tiered_view_map.rs @@ -143,10 +143,10 @@ where match self.0.entry(*key.view()) { Entry::Vacant(entry) => { entry.insert(nem![key.into_subkey() => value]); - } + }, Entry::Occupied(mut entry) => { entry.get_mut().insert(key.into_subkey(), value); - } + }, } } @@ -181,14 +181,14 @@ where mod tests { use std::{cmp::Ordering, ops::Bound, sync::Arc}; - use crate::{state::BuilderState, testing::mock}; - - use super::*; use hotshot_example_types::node_types::TestTypes; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; use rand::{distributions::Standard, thread_rng, Rng}; use tracing_test::traced_test; + use super::*; + use crate::{state::BuilderState, testing::mock}; + type View = ViewNumber; type BuilderStateMap = super::TieredViewMap, Arc>>; diff --git a/marketplace-builder-shared/src/error.rs b/marketplace-builder-shared/src/error.rs index ac2d3b155e..9200c49c06 100644 --- a/marketplace-builder-shared/src/error.rs +++ b/marketplace-builder-shared/src/error.rs @@ -33,18 +33,18 @@ impl From> for BuildError { match value { Error::SignatureValidation => { BuildError::Error("Signature validation failed".to_owned()) - } + }, Error::Signing(_) => BuildError::Error("Failed to sign response".to_owned()), Error::ApiTimeout => BuildError::Error("Timeout".to_owned()), Error::NotFound => BuildError::NotFound, Error::AlreadyDecided => { BuildError::Error("Request for an already decided view".to_owned()) - } + }, Error::BuildBlock(_) => BuildError::Error("Failed to build block".to_owned()), Error::TxnSender(_) => BuildError::Error("Transaction channel error".to_owned()), Error::TxTooBig { len, max_tx_len } => { BuildError::Error(format!("Transaction too big ({len}/{max_tx_len}")) - } + }, } } } diff --git a/marketplace-builder-shared/src/state.rs b/marketplace-builder-shared/src/state.rs index 9666d17337..2ad7756fd8 100644 --- a/marketplace-builder-shared/src/state.rs +++ b/marketplace-builder-shared/src/state.rs @@ -4,10 +4,6 @@ use std::{ time::{Duration, Instant}, }; -use crate::{ - block::{BuilderStateId, ParentBlockReferences, ReceivedTransaction}, - utils::RotatingSet, -}; use async_broadcast::Receiver; use async_lock::{Mutex, RwLock}; use committable::{Commitment, Committable}; @@ -17,6 +13,11 @@ use hotshot_types::{ traits::{block_contents::BlockHeader, node_implementation::NodeType}, }; +use crate::{ + block::{BuilderStateId, ParentBlockReferences, ReceivedTransaction}, + utils::RotatingSet, +}; + #[derive(derive_more::Debug, Clone)] pub struct TransactionQueue where @@ -208,7 +209,7 @@ where self.txn_queue.write().await.insert(txn); queue_empty = false; - } + }, Err(async_broadcast::TryRecvError::Empty) | Err(async_broadcast::TryRecvError::Closed) => { @@ -216,12 +217,12 @@ where // If it's closed that's a big problem and we should // probably indicate it as such. break; - } + }, Err(async_broadcast::TryRecvError::Overflowed(lost)) => { tracing::warn!("Missed {lost} transactions due to backlog"); continue; - } + }, } } queue_empty diff --git a/marketplace-builder-shared/src/testing/consensus.rs b/marketplace-builder-shared/src/testing/consensus.rs index f4ee61cd00..13d658f8f8 100644 --- a/marketplace-builder-shared/src/testing/consensus.rs +++ b/marketplace-builder-shared/src/testing/consensus.rs @@ -2,8 +2,6 @@ use std::marker::PhantomData; -use crate::block::BuilderStateId; -use crate::testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION; use async_broadcast::Sender; use committable::Committable; use hotshot::{ @@ -17,8 +15,10 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, }; use hotshot_types::{ - data::vid_commitment, - data::{DaProposal2, EpochNumber, Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}, + data::{ + vid_commitment, DaProposal2, EpochNumber, Leaf2, QuorumProposal2, QuorumProposalWrapper, + ViewNumber, + }, message::Proposal, simple_certificate::{QuorumCertificate2, SimpleCertificate, SuccessThreshold}, simple_vote::QuorumData2, @@ -30,6 +30,8 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; use vbs::version::StaticVersionType; +use crate::{block::BuilderStateId, testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION}; + pub struct SimulatedChainState { epoch: Option, round: ViewNumber, @@ -108,7 +110,7 @@ impl SimulatedChainState { &TestInstanceState::default(), ) .await - } + }, Some(prev_proposal) => { let prev_justify_qc = &prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { @@ -124,7 +126,7 @@ impl SimulatedChainState { prev_justify_qc.signatures.clone(), PhantomData, ) - } + }, }; tracing::debug!("Iteration: {} justify_qc: {:?}", self.round, justify_qc); diff --git a/marketplace-builder-shared/src/testing/generation.rs b/marketplace-builder-shared/src/testing/generation.rs index d7667a24e8..e16293a384 100644 --- a/marketplace-builder-shared/src/testing/generation.rs +++ b/marketplace-builder-shared/src/testing/generation.rs @@ -137,7 +137,7 @@ where .map(Result::unwrap), ); } - } + }, GenerationStrategy::Random { min_per_view, max_per_view, @@ -164,7 +164,7 @@ where self.txn_nonce += 1; } - } + }, GenerationStrategy::Flood { min_tx_size, max_tx_size, @@ -188,7 +188,7 @@ where self.txn_nonce += 1; } - } + }, }; } } @@ -235,7 +235,7 @@ where .publish_transaction_async(txn) .await .expect("Failed to submit transaction to public mempool"); - } + }, SubmissionEndpoint::Private => { if let Err(e) = private_mempool_client .post::<()>("submit") @@ -248,17 +248,17 @@ where // If we can't reach the builder altogether, test should fail builder::Error::Request(request_error) => { panic!("Builder API not available: {request_error}") - } + }, // If the builder returns an error, we will re-submit this transaction // on the next view, so we return it to the queue and break error => { tracing::warn!(?error, "Builder API error"); self.txn_queue.push_front(txn); break; - } + }, }; } - } + }, } } } diff --git a/marketplace-builder-shared/src/testing/mock.rs b/marketplace-builder-shared/src/testing/mock.rs index 31d8c4fbf6..fbcb8ba176 100644 --- a/marketplace-builder-shared/src/testing/mock.rs +++ b/marketplace-builder-shared/src/testing/mock.rs @@ -2,40 +2,35 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; use async_broadcast::broadcast; -use committable::Commitment; -use committable::Committable; -use hotshot_example_types::block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}; +use committable::{Commitment, Committable}; use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, node_types::{TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; -use hotshot_types::data::DaProposal2; -use hotshot_types::data::ViewNumber; -use hotshot_types::data::{QuorumProposal2, QuorumProposalWrapper}; -use hotshot_types::event::LeafInfo; -use hotshot_types::simple_certificate::QuorumCertificate2; -use hotshot_types::simple_vote::QuorumData2; -use hotshot_types::traits::block_contents::GENESIS_VID_NUM_STORAGE_NODES; -use hotshot_types::traits::node_implementation::Versions; -use hotshot_types::traits::EncodeBytes; -use hotshot_types::vid::advz::advz_scheme; use hotshot_types::{ - data::{random_commitment, vid_commitment, Leaf, Leaf2}, + data::{ + random_commitment, vid_commitment, DaProposal2, Leaf, Leaf2, QuorumProposal2, + QuorumProposalWrapper, ViewNumber, + }, + event::LeafInfo, message::UpgradeLock, - simple_certificate::QuorumCertificate, - simple_vote::VersionedVoteData, - traits::node_implementation::{ConsensusTime, NodeType}, - traits::BlockPayload, + simple_certificate::{QuorumCertificate, QuorumCertificate2}, + simple_vote::{QuorumData2, VersionedVoteData}, + traits::{ + block_contents::GENESIS_VID_NUM_STORAGE_NODES, + node_implementation::{ConsensusTime, NodeType, Versions}, + BlockPayload, EncodeBytes, + }, utils::BuilderCommitment, + vid::advz::advz_scheme, }; use jf_vid::VidScheme; use rand::{distributions::Standard, thread_rng, Rng}; use vbs::version::StaticVersionType; -use crate::block::ParentBlockReferences; -use crate::state::BuilderState; - use super::constants::{TEST_CHANNEL_BUFFER_SIZE, TEST_NUM_NODES_IN_VID_COMPUTATION}; +use crate::{block::ParentBlockReferences, state::BuilderState}; pub fn transaction() -> TestTransaction { TestTransaction::new( diff --git a/marketplace-builder-shared/src/testing/validation.rs b/marketplace-builder-shared/src/testing/validation.rs index cc9a4b434a..c8d058f4cb 100644 --- a/marketplace-builder-shared/src/testing/validation.rs +++ b/marketplace-builder-shared/src/testing/validation.rs @@ -1,8 +1,9 @@ use std::sync::Arc; -use super::TransactionPayload; - +use anyhow::{bail, Error}; use async_lock::RwLock; +use async_trait::async_trait; +use chrono::{DateTime, Local}; use hotshot::{ traits::{BlockPayload, TestableNodeImplementation}, types::{Event, EventType}, @@ -17,9 +18,7 @@ use hotshot_types::traits::{ node_implementation::{NodeType, Versions}, }; -use anyhow::{bail, Error}; -use async_trait::async_trait; -use chrono::{DateTime, Local}; +use super::TransactionPayload; #[derive(Clone, Debug)] pub struct IncludedTransaction { diff --git a/marketplace-builder-shared/src/utils/event_service_wrapper.rs b/marketplace-builder-shared/src/utils/event_service_wrapper.rs index ece95e072d..59050f660c 100644 --- a/marketplace-builder-shared/src/utils/event_service_wrapper.rs +++ b/marketplace-builder-shared/src/utils/event_service_wrapper.rs @@ -1,16 +1,12 @@ -use std::{future::Future, pin::Pin}; - -use std::time::Duration; +use std::{future::Future, pin::Pin, time::Duration}; use anyhow::Context; use either::Either::{self, Left, Right}; -use futures::stream::unfold; -use futures::{Stream, StreamExt}; +use futures::{stream::unfold, Stream, StreamExt}; use hotshot::types::Event; use hotshot_events_service::events::Error as EventStreamError; use hotshot_types::traits::node_implementation::NodeType; -use surf_disco::client::HealthStatus; -use surf_disco::Client; +use surf_disco::{client::HealthStatus, Client}; use tokio::time::{sleep, timeout}; use tracing::{error, warn}; use url::Url; @@ -58,7 +54,7 @@ impl EventServiceStream break, Err(err) => { tracing::debug!(?err, "Healthcheck failed, retrying"); - } + }, } sleep(Self::RETRY_PERIOD).await; } @@ -90,18 +86,18 @@ impl EventServiceStream { return Some((event, this)); - } + }, Ok(Some(Err(err))) => { warn!(?err, "Error in event stream"); continue; - } + }, Ok(None) => { warn!("Event stream ended, attempting reconnection"); let fut = Self::connect_inner(this.api_url.clone()); let _ = std::mem::replace(&mut this.connection, Right(Box::pin(fut))); continue; - } + }, Err(_) => { // Timeout occurred, reconnect warn!("Timeout waiting for next event; reconnecting"); @@ -109,21 +105,21 @@ impl EventServiceStream match reconnection.await { Ok(connection) => { let _ = std::mem::replace(&mut this.connection, Left(connection)); continue; - } + }, Err(err) => { error!(?err, "Error while reconnecting, will retry in a while"); sleep(Self::RETRY_PERIOD).await; let fut = Self::connect_inner(this.api_url.clone()); let _ = std::mem::replace(&mut this.connection, Right(Box::pin(fut))); continue; - } + }, }, } } diff --git a/marketplace-builder-shared/src/utils/rotating_set.rs b/marketplace-builder-shared/src/utils/rotating_set.rs index 86ca9e3ed8..9e626db846 100644 --- a/marketplace-builder-shared/src/utils/rotating_set.rs +++ b/marketplace-builder-shared/src/utils/rotating_set.rs @@ -76,10 +76,11 @@ where #[cfg(test)] mod tests { + use std::thread::sleep; + use tracing_test::traced_test; use super::*; - use std::thread::sleep; #[test] #[traced_test] diff --git a/marketplace-builder/src/bin/marketplace-builder.rs b/marketplace-builder/src/bin/marketplace-builder.rs index c23cf6a1b3..d6aa8d16bb 100644 --- a/marketplace-builder/src/bin/marketplace-builder.rs +++ b/marketplace-builder/src/bin/marketplace-builder.rs @@ -129,11 +129,11 @@ async fn main() -> anyhow::Result<()> { match (base, upgrade) { (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { run::>(genesis, opt).await - } + }, (FeeVersion::VERSION, _) => run::>(genesis, opt).await, (MarketplaceVersion::VERSION, _) => { run::>(genesis, opt).await - } + }, _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." ), diff --git a/marketplace-builder/src/builder.rs b/marketplace-builder/src/builder.rs index cd334f9c0e..b824dbd91d 100644 --- a/marketplace-builder/src/builder.rs +++ b/marketplace-builder/src/builder.rs @@ -1,10 +1,9 @@ -use std::{arch::global_asm, collections::HashSet, num::NonZeroUsize, time::Duration}; +use std::{arch::global_asm, collections::HashSet, num::NonZeroUsize, sync::Arc, time::Duration}; use anyhow::Context; use async_broadcast::{ broadcast, Receiver as BroadcastReceiver, RecvError, Sender as BroadcastSender, TryRecvError, }; - use async_lock::RwLock; use espresso_types::{ eth_signature_key::EthKeyPair, @@ -43,7 +42,6 @@ use marketplace_builder_core::{ use marketplace_builder_shared::block::ParentBlockReferences; use marketplace_solver::SolverError; use sequencer::{catchup::StatePeers, L1Params, NetworkParams, SequencerApiVersion}; -use std::sync::Arc; use surf::http::headers::ACCEPT; use surf_disco::Client; use tide_disco::{app, method::ReadState, App, Url}; @@ -209,8 +207,7 @@ mod test { use anyhow::Error; use async_lock::RwLock; - use committable::Commitment; - use committable::Committable; + use committable::{Commitment, Committable}; use espresso_types::{ mock::MockStateCatchup, v0_99::{RollupRegistration, RollupRegistrationBody}, @@ -220,12 +217,14 @@ mod test { use ethers::{core::k256::elliptic_curve::rand_core::block, utils::Anvil}; use futures::{Stream, StreamExt}; use hooks::connect_to_solver; - use hotshot::helpers::initialize_logging; - use hotshot::types::{ - BLSPrivKey, - EventType::{Decide, *}, + use hotshot::{ + helpers::initialize_logging, + rand, + types::{ + BLSPrivKey, EventType, + EventType::{Decide, *}, + }, }; - use hotshot::{rand, types::EventType}; use hotshot_builder_api::v0_99::builder::BuildError; use hotshot_events_service::{ events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, @@ -248,15 +247,17 @@ mod test { use marketplace_solver::{testing::MockSolver, SolverError}; use portpicker::pick_unused_port; use sequencer::{ - api::test_helpers::TestNetworkConfigBuilder, + api::{ + fs::DataSource, + options::HotshotEvents, + test_helpers::{TestNetwork, TestNetworkConfigBuilder}, + Options, + }, + persistence, persistence::no_storage::{self, NoStorage}, testing::TestConfigBuilder, SequencerApiVersion, }; - use sequencer::{ - api::{fs::DataSource, options::HotshotEvents, test_helpers::TestNetwork, Options}, - persistence, - }; use sequencer_utils::test_utils::setup_test; use surf_disco::{ socket::{Connection, Unsupported}, @@ -608,7 +609,7 @@ mod test { get_bundle(builder_client, parent_view_number, parent_commitment).await, parent_view_number, ) - } + }, Mempool::Private => { submit_and_get_bundle_with_private_mempool( builder_client, @@ -616,7 +617,7 @@ mod test { urls, ) .await - } + }, }; assert_eq!(bundle.transactions, vec![registered_transaction.clone()]); @@ -728,7 +729,7 @@ mod test { get_bundle(builder_client, parent_view_number, parent_commitment).await, parent_view_number, ) - } + }, Mempool::Private => { submit_and_get_bundle_with_private_mempool( builder_client, @@ -736,7 +737,7 @@ mod test { urls, ) .await - } + }, }; assert_eq!(bundle.transactions, vec![unregistered_transaction.clone()]); diff --git a/marketplace-builder/src/hooks.rs b/marketplace-builder/src/hooks.rs index ede7452152..e96ec8ca1b 100644 --- a/marketplace-builder/src/hooks.rs +++ b/marketplace-builder/src/hooks.rs @@ -1,39 +1,21 @@ -use std::collections::HashSet; -use std::sync::Arc; -use std::time::Duration; +use std::{collections::HashSet, sync::Arc, time::Duration}; use async_lock::RwLock; use async_trait::async_trait; -use espresso_types::v0_99::BidTxBody; -use tokio::{spawn, time::sleep}; - -use espresso_types::v0_99::RollupRegistration; - -use espresso_types::MarketplaceVersion; -use espresso_types::SeqTypes; -use hotshot::types::EventType; - -use hotshot::types::Event; - -use hotshot_types::traits::node_implementation::Versions; +use espresso_types::{ + eth_signature_key::EthKeyPair, + v0_99::{BidTxBody, RollupRegistration}, + FeeAmount, MarketplaceVersion, NamespaceId, SeqTypes, +}; +use hotshot::types::{Event, EventType}; +use hotshot_types::traits::node_implementation::{NodeType, Versions}; use marketplace_builder_core::hooks::BuilderHooks; - -use espresso_types::FeeAmount; - -use espresso_types::eth_signature_key::EthKeyPair; - -use espresso_types::NamespaceId; - -use hotshot_types::traits::node_implementation::NodeType; - -use marketplace_solver::SolverError; -use marketplace_solver::SOLVER_API_PATH; +use marketplace_solver::{SolverError, SOLVER_API_PATH}; use sequencer::SequencerApiVersion; use surf_disco::Client; - use tide_disco::Url; -use tracing::error; -use tracing::info; +use tokio::{spawn, time::sleep}; +use tracing::{error, info}; /// Configurations for bid submission. pub struct BidConfig { @@ -67,11 +49,11 @@ pub async fn fetch_namespaces_to_skip(solver_base_url: Url) -> Option { error!("Failed to get the registered rollups: {:?}.", e); None - } + }, } } @@ -130,7 +112,7 @@ impl BuilderHooks for EspressoReserveHooks { Err(e) => { error!("Failed to sign the bid txn: {:?}.", e); return; - } + }, }; let solver_client = connect_to_solver(solver_base_url); @@ -172,12 +154,12 @@ impl BuilderHooks for EspressoFallbackHooks { Some(namespaces_to_skip) => { transactions.retain(|txn| !namespaces_to_skip.contains(&txn.namespace())); transactions - } + }, // Solver connection has failed and we don't have up-to-date information on this None => { error!("Not accepting transactions due to outdated information"); Vec::new() - } + }, } } diff --git a/marketplace-builder/src/lib.rs b/marketplace-builder/src/lib.rs index 18616cea5b..b58f7788d8 100755 --- a/marketplace-builder/src/lib.rs +++ b/marketplace-builder/src/lib.rs @@ -6,6 +6,7 @@ use std::{ marker::PhantomData, mem, net::{IpAddr, Ipv4Addr}, + sync::Arc, thread::Builder, }; @@ -35,16 +36,13 @@ use hotshot_builder_api::v0_99::builder::{ BuildError, Error as BuilderApiError, Options as HotshotBuilderApiOptions, }; use hotshot_orchestrator::client::{OrchestratorClient, ValidatorArgs}; -use hotshot_types::network::NetworkConfig; -use marketplace_builder_core::service::{GlobalState, ProxyGlobalState}; -use std::sync::Arc; -use tokio::{spawn, task::JoinHandle}; // Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use hotshot_types::{ consensus::ConsensusMetricsValue, event::LeafInfo, light_client::StateKeyPair, + network::NetworkConfig, signature_key::{BLSPrivKey, BLSPubKey}, traits::{ block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES}, @@ -57,6 +55,7 @@ use hotshot_types::{ }; use jf_merkle_tree::{namespaced_merkle_tree::NamespacedMerkleTreeScheme, MerkleTreeScheme}; use jf_signature::bls_over_bn254::VerKey; +use marketplace_builder_core::service::{GlobalState, ProxyGlobalState}; use sequencer::{ catchup::StatePeers, context::{Consensus, SequencerContext}, @@ -66,6 +65,7 @@ use sequencer::{ }; use surf_disco::Client; use tide_disco::{app, method::ReadState, App, Url}; +use tokio::{spawn, task::JoinHandle}; use tracing::error; use vbs::version::{StaticVersion, StaticVersionType}; diff --git a/marketplace-solver/src/api.rs b/marketplace-solver/src/api.rs index 5f0d9c8c30..11ed573a9a 100644 --- a/marketplace-solver/src/api.rs +++ b/marketplace-solver/src/api.rs @@ -164,7 +164,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/marketplace-solver/src/database.rs b/marketplace-solver/src/database.rs index c2d09f84af..06986fb6c3 100644 --- a/marketplace-solver/src/database.rs +++ b/marketplace-solver/src/database.rs @@ -60,7 +60,7 @@ impl PostgresClient { } connect_opts.to_url_lossy() - } + }, }; if let Some(max_connections) = max_connections { @@ -149,9 +149,10 @@ pub mod mock { #[cfg(all(test, not(target_os = "windows"), not(feature = "embedded-db")))] mod test { - use crate::database::mock::setup_mock_database; use hotshot::helpers::initialize_logging; + use crate::database::mock::setup_mock_database; + #[tokio::test(flavor = "multi_thread")] async fn test_database_connection() { initialize_logging(); diff --git a/marketplace-solver/src/events.rs b/marketplace-solver/src/events.rs index 913fbe3151..85a49ca1dc 100644 --- a/marketplace-solver/src/events.rs +++ b/marketplace-solver/src/events.rs @@ -59,7 +59,7 @@ pub async fn handle_events( match event.event { hotshot::types::EventType::ViewFinished { view_number } => { tracing::debug!("received view finished event {view_number:?}") - } + }, _ => (), } } @@ -69,6 +69,8 @@ pub async fn handle_events( #[cfg(any(test, feature = "testing"))] pub mod mock { + use std::{sync::Arc, time::Duration}; + use async_lock::RwLock; use espresso_types::SeqTypes; use hotshot::rand::{self}; @@ -83,7 +85,6 @@ pub mod mock { }; use portpicker::pick_unused_port; use rand::{rngs::OsRng, RngCore}; - use std::{sync::Arc, time::Duration}; use tide_disco::{App, Url}; use tokio::{spawn, task::JoinHandle, time::sleep}; use vbs::version::{StaticVersion, StaticVersionType}; @@ -184,8 +185,7 @@ pub mod mock { mod test { use espresso_types::SeqTypes; use futures::StreamExt as _; - use hotshot::helpers::initialize_logging; - use hotshot::types::Event; + use hotshot::{helpers::initialize_logging, types::Event}; use hotshot_events_service::events_source::StartupInfo; use surf_disco::Client; diff --git a/marketplace-solver/src/testing.rs b/marketplace-solver/src/testing.rs index b6745fa86f..70b392fa18 100755 --- a/marketplace-solver/src/testing.rs +++ b/marketplace-solver/src/testing.rs @@ -122,6 +122,8 @@ impl MockSolver { #[cfg(all(test, not(feature = "embedded-db")))] mod test { + use std::{str::FromStr, time::Duration}; + use committable::Committable; use espresso_types::{ v0_99::{ @@ -132,7 +134,6 @@ mod test { }; use hotshot::types::{BLSPubKey, SignatureKey}; use hotshot_types::traits::node_implementation::NodeType; - use std::{str::FromStr, time::Duration}; use tide_disco::Url; use crate::{testing::MockSolver, SolverError}; @@ -192,7 +193,7 @@ mod test { let client = surf_disco::Client::::new(solver_api); client.connect(None).await; - let (reg_ns_1, _, _) = + let (reg_ns_1, ..) = register_rollup_helper(1, Some("http://localhost"), 200, true, "test").await; // registering a rollup @@ -230,7 +231,7 @@ mod test { let client = surf_disco::Client::::new(solver_api); client.connect(None).await; - let (reg_ns_1, _, _) = + let (reg_ns_1, ..) = register_rollup_helper(1, Some("http://localhost"), 200, true, "test").await; // registering a rollup @@ -268,7 +269,7 @@ mod test { // Ensure the error indicates an invalid signature match err { SolverError::InvalidSignature(signature) - if reg_ns_2.signature.to_string() == signature => {} + if reg_ns_2.signature.to_string() == signature => {}, _ => panic!("err {err:?}"), } } @@ -375,7 +376,7 @@ mod test { .unwrap_err(); match err { - SolverError::Database(_) => {} + SolverError::Database(_) => {}, _ => panic!("err {err:?}"), } } @@ -532,7 +533,7 @@ mod test { client.connect(Some(Duration::from_secs(5))).await; // Register the first rollup (ns = 1) - let (reg_ns_1, _, _) = + let (reg_ns_1, ..) = register_rollup_helper(1, Some("http://localhost"), 200, true, "test").await; let _: RollupRegistration = client .post("register_rollup") @@ -543,7 +544,7 @@ mod test { .unwrap(); // Register the second rollup (ns = 2) - let (reg_ns_2, _, _) = + let (reg_ns_2, ..) = register_rollup_helper(2, Some("http://localhost"), 200, true, "test").await; let _: RollupRegistration = client .post("register_rollup") diff --git a/node-metrics/src/api/node_validator/v0/cdn/mod.rs b/node-metrics/src/api/node_validator/v0/cdn/mod.rs index 9359afec01..a1a5c01b8a 100644 --- a/node-metrics/src/api/node_validator/v0/cdn/mod.rs +++ b/node-metrics/src/api/node_validator/v0/cdn/mod.rs @@ -1,4 +1,3 @@ -use crate::api::node_validator::v0::create_node_validator_api::ExternalMessage; use espresso_types::{PubKey, SeqTypes}; use futures::{channel::mpsc::SendError, Sink, SinkExt}; use hotshot::{ @@ -15,6 +14,8 @@ use hotshot_types::{ use tokio::{spawn, task::JoinHandle}; use url::Url; +use crate::api::node_validator::v0::create_node_validator_api::ExternalMessage; + /// ConnectedNetworkConsumer represents a trait that splits up a portion of /// the ConnectedNetwork trait, so that the consumer only needs to be aware of /// the `wait_for_ready` and `recv_message` functions. @@ -95,7 +96,7 @@ impl CdnReceiveMessagesTask { Err(err) => { tracing::error!("error receiving message: {:?}", err); continue; - } + }, }; // We want to try and decode this message. @@ -106,17 +107,17 @@ impl CdnReceiveMessagesTask { Err(err) => { tracing::error!("error deserializing message: {:?}", err); continue; - } + }, }; let external_message_deserialize_result = match message.kind { MessageKind::External(external_message) => { bincode::deserialize::(&external_message) - } + }, _ => { tracing::error!("unexpected message kind: {:?}", message); continue; - } + }, }; let external_message = match external_message_deserialize_result { @@ -124,7 +125,7 @@ impl CdnReceiveMessagesTask { Err(err) => { tracing::error!("error deserializing message: {:?}", err); continue; - } + }, }; match external_message { @@ -137,11 +138,11 @@ impl CdnReceiveMessagesTask { tracing::error!("error sending public api url: {:?}", err); return; } - } + }, _ => { // We're not concerned about other message types - } + }, } } } @@ -237,7 +238,7 @@ impl BroadcastRollCallTask { Err(err) => { tracing::error!("error serializing rollcall request: {:?}", err); return; - } + }, }; let hotshot_message = Message:: { @@ -250,7 +251,7 @@ impl BroadcastRollCallTask { Err(err) => { tracing::error!("error serializing hotshot message: {:?}", err); return; - } + }, }; let broadcast_result = network @@ -278,31 +279,33 @@ impl Drop for BroadcastRollCallTask { #[cfg(test)] mod test { - use super::{BroadcastRollCallTask, ConnectedNetworkConsumer, ConnectedNetworkPublisher}; - use crate::api::node_validator::v0::create_node_validator_api::ExternalMessage; - use crate::api::node_validator::v0::{ - cdn::CdnReceiveMessagesTask, create_node_validator_api::RollCallInfo, - }; use core::panic; + use std::time::Duration; + use espresso_types::SeqTypes; - use futures::channel::mpsc::Sender; - use futures::SinkExt; use futures::{ - channel::mpsc::{self}, - StreamExt, + channel::mpsc::{ + Sender, {self}, + }, + SinkExt, StreamExt, }; - use hotshot::types::SignatureKey; use hotshot::{ traits::NetworkError, - types::{BLSPubKey, Message}, + types::{BLSPubKey, Message, SignatureKey}, }; - use hotshot_types::message::{DataMessage, MessageKind}; - use hotshot_types::traits::network::{BroadcastDelay, ResponseMessage}; - use std::time::Duration; - use tokio::time::error::Elapsed; - use tokio::time::{sleep, timeout}; + use hotshot_types::{ + message::{DataMessage, MessageKind}, + traits::network::{BroadcastDelay, ResponseMessage}, + }; + use tokio::time::{error::Elapsed, sleep, timeout}; use url::Url; + use super::{BroadcastRollCallTask, ConnectedNetworkConsumer, ConnectedNetworkPublisher}; + use crate::api::node_validator::v0::{ + cdn::CdnReceiveMessagesTask, + create_node_validator_api::{ExternalMessage, RollCallInfo}, + }; + /// [TestConnectedNetworkConsumer] is a test implementation of the /// [ConnectedNetworkConsumer] trait that allows for the simulation of /// network messages being received. @@ -564,7 +567,7 @@ mod test { public_key, BLSPubKey::generated_from_seed_indexed([0; 32], 0).0 ); - } + }, _ => panic!("unexpected external message"), } diff --git a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs index c60d089e62..d13b6ed950 100644 --- a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs +++ b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs @@ -1,5 +1,17 @@ use std::sync::Arc; +use async_lock::RwLock; +use espresso_types::{PubKey, SeqTypes}; +use futures::{ + channel::mpsc::{self, Receiver, SendError, Sender}, + Sink, SinkExt, Stream, StreamExt, +}; +use hotshot_query_service::Leaf2; +use hotshot_types::event::{Event, EventType}; +use serde::{Deserialize, Serialize}; +use tokio::{spawn, task::JoinHandle}; +use url::Url; + use super::{get_stake_table_from_sequencer, ProcessNodeIdentityUrlStreamTask}; use crate::service::{ client_id::ClientId, @@ -12,17 +24,6 @@ use crate::service::{ data_state::{DataState, ProcessLeafStreamTask, ProcessNodeIdentityStreamTask}, server_message::ServerMessage, }; -use async_lock::RwLock; -use espresso_types::{PubKey, SeqTypes}; -use futures::{ - channel::mpsc::{self, Receiver, SendError, Sender}, - Sink, SinkExt, Stream, StreamExt, -}; -use hotshot_query_service::Leaf2; -use hotshot_types::event::{Event, EventType}; -use serde::{Deserialize, Serialize}; -use tokio::{spawn, task::JoinHandle}; -use url::Url; pub struct NodeValidatorAPI { pub process_internal_client_message_handle: Option, @@ -119,7 +120,7 @@ impl HotShotEventProcessingTask { None => { tracing::info!("event stream closed"); break; - } + }, }; let Event { event, .. } = event; @@ -135,7 +136,7 @@ impl HotShotEventProcessingTask { panic!("HotShotEventProcessingTask leaf sender is closed, unrecoverable, the block state will stagnate."); } } - } + }, EventType::ExternalMessageReceived { data, .. } => { let roll_call_info = match bincode::deserialize(&data) { @@ -147,12 +148,12 @@ impl HotShotEventProcessingTask { err ); continue; - } + }, _ => { // Ignore any other potentially recognized messages continue; - } + }, }; let public_api_url = roll_call_info.public_api_url; @@ -163,11 +164,11 @@ impl HotShotEventProcessingTask { tracing::error!("url sender closed: {}", err); panic!("HotShotEventProcessingTask url sender is closed, unrecoverable, the node state will stagnate."); } - } + }, _ => { // Ignore all other events continue; - } + }, } } } @@ -236,7 +237,7 @@ impl ProcessExternalMessageHandlingTask { None => { tracing::error!("external message receiver closed"); break; - } + }, }; match external_message { @@ -248,12 +249,12 @@ impl ProcessExternalMessageHandlingTask { tracing::error!("url sender closed: {}", err); break; } - } + }, _ => { // Ignore all other messages continue; - } + }, } } } @@ -368,6 +369,10 @@ pub async fn create_node_validator_processing( #[cfg(test)] mod test { + use futures::channel::mpsc::{self, Sender}; + use tide_disco::App; + use tokio::spawn; + use crate::{ api::node_validator::v0::{ HotshotQueryServiceLeafStreamRetriever, ProcessProduceLeafStreamTask, @@ -375,9 +380,6 @@ mod test { }, service::{client_message::InternalClientMessage, server_message::ServerMessage}, }; - use futures::channel::mpsc::{self, Sender}; - use tide_disco::App; - use tokio::spawn; struct TestState(Sender>>); @@ -399,14 +401,14 @@ mod test { Ok(node_validator_api) => node_validator_api, Err(err) => { panic!("error defining node validator api: {:?}", err); - } + }, }; match app.register_module("node-validator", node_validator_api) { - Ok(_) => {} + Ok(_) => {}, Err(err) => { panic!("error registering node validator api: {:?}", err); - } + }, } let (leaf_sender, leaf_receiver) = mpsc::channel(10); @@ -449,7 +451,7 @@ mod test { Err(err) => { panic!("error defining node validator api: {:?}", err); - } + }, }; // We would like to wait until being signaled diff --git a/node-metrics/src/api/node_validator/v0/mod.rs b/node-metrics/src/api/node_validator/v0/mod.rs index b364c105ee..c152a8e81b 100644 --- a/node-metrics/src/api/node_validator/v0/mod.rs +++ b/node-metrics/src/api/node_validator/v0/mod.rs @@ -1,38 +1,35 @@ pub mod cdn; pub mod create_node_validator_api; -use crate::service::client_message::{ClientMessage, InternalClientMessage}; -use crate::service::data_state::{LocationDetails, NodeIdentity}; -use crate::service::server_message::ServerMessage; +use std::{fmt, future::Future, io::BufRead, pin::Pin, str::FromStr, time::Duration}; + use espresso_types::{BackoffParams, SeqTypes}; -use futures::channel::mpsc::SendError; -use futures::future::Either; use futures::{ - channel::mpsc::{self, Sender}, + channel::mpsc::{self, SendError, Sender}, + future::Either, FutureExt, Sink, SinkExt, Stream, StreamExt, }; use hotshot_query_service::Leaf2; use hotshot_stake_table::vec_based::StakeTable; -use hotshot_types::light_client::{CircuitField, StateVerKey}; -use hotshot_types::signature_key::BLSPubKey; -use hotshot_types::traits::{signature_key::StakeTableEntryType, stake_table::StakeTableScheme}; -use hotshot_types::PeerConfig; +use hotshot_types::{ + light_client::{CircuitField, StateVerKey}, + signature_key::BLSPubKey, + traits::{signature_key::StakeTableEntryType, stake_table::StakeTableScheme}, + PeerConfig, +}; use prometheus_parse::{Sample, Scrape}; use serde::{Deserialize, Serialize}; -use std::fmt; -use std::future::Future; -use std::io::BufRead; -use std::pin::Pin; -use std::str::FromStr; -use std::time::Duration; -use tide_disco::socket::Connection; -use tide_disco::{api::ApiError, Api}; -use tokio::spawn; -use tokio::task::JoinHandle; -use tokio::time::sleep; +use tide_disco::{api::ApiError, socket::Connection, Api}; +use tokio::{spawn, task::JoinHandle, time::sleep}; use url::Url; use vbs::version::{StaticVersion, StaticVersionType, Version}; +use crate::service::{ + client_message::{ClientMessage, InternalClientMessage}, + data_state::{LocationDetails, NodeIdentity}, + server_message::ServerMessage, +}; + /// CONSTANT for protocol major version pub const VERSION_MAJ: u16 = 0; @@ -64,11 +61,11 @@ impl fmt::Display for Error { match self { Self::UnhandledSurfDisco(status, msg) => { write!(f, "Unhandled Surf Disco Error: {} - {}", status, msg) - } + }, Self::UnhandledTideDisco(status, msg) => { write!(f, "Unhandled Tide Disco Error: {} - {}", status, msg) - } + }, } } } @@ -255,7 +252,7 @@ where // let's queue up the next client message to receive next_client_message = socket_stream.next(); next_server_message = remaining_server_message; - } + }, Either::Right((server_message, remaining_client_message)) => { // Alright, we have a server message, we want to forward it // to the down-stream client. @@ -277,7 +274,7 @@ where // let's queue up the next server message to receive next_server_message = server_message_receiver.next(); next_client_message = remaining_client_message; - } + }, } } @@ -327,7 +324,7 @@ pub async fn get_stake_table_from_sequencer( Err(err) => { tracing::info!("retrieve stake table request failed: {}", err); return Err(err); - } + }, }; let public_hot_shot_config = sequencer_config.config; @@ -481,7 +478,7 @@ impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { Err(err) => { tracing::info!("retrieve block height request failed: {}", err); return Err(err); - } + }, }; let latest_block_start = block_height.saturating_sub(50); @@ -504,7 +501,7 @@ impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { Err(err) => { tracing::info!("retrieve leaves stream failed: {}", err); return Err(err); - } + }, }; Ok(leaves_stream) @@ -621,7 +618,7 @@ impl ProcessProduceLeafStreamTask { delay = backoff_params.backoff(delay); sleep(delay).await; continue; - } + }, Ok(leaves_stream) => leaves_stream, }; @@ -795,7 +792,7 @@ pub fn populate_node_identity_from_scrape(node_identity: &mut NodeIdentity, scra // We couldn't parse the public key, so we can't create a NodeIdentity. tracing::info!("parsing public key failed: {}", err); return; - } + }, } } else { // We were unable to find the public key in the scrape result. @@ -878,7 +875,7 @@ pub fn node_identity_from_scrape(scrape: Scrape) -> Option { Err(err) => { tracing::info!("parsing public key failed: {}", err); return None; - } + }, }; let mut node_identity = NodeIdentity::from_public_key(public_key); @@ -937,7 +934,7 @@ impl ProcessNodeIdentityUrlStreamTask { None => { tracing::info!("node identity url stream closed"); return; - } + }, }; // Alright we have a new Url to try and scrape for a Node Identity. @@ -949,7 +946,7 @@ impl ProcessNodeIdentityUrlStreamTask { Err(err) => { tracing::warn!("get node identity from url failed. bad base url?: {}", err); continue; - } + }, }; let send_result = node_identity_sender.send(node_identity).await; diff --git a/node-metrics/src/lib.rs b/node-metrics/src/lib.rs index cc1d7c8e90..024e29c0a5 100644 --- a/node-metrics/src/lib.rs +++ b/node-metrics/src/lib.rs @@ -99,15 +99,6 @@ pub mod api; pub mod service; -use crate::{ - api::node_validator::v0::{ - cdn::{BroadcastRollCallTask, CdnReceiveMessagesTask}, - create_node_validator_api::{create_node_validator_processing, NodeValidatorConfig}, - HotshotQueryServiceLeafStreamRetriever, ProcessProduceLeafStreamTask, - StateClientMessageSender, STATIC_VER_0_1, - }, - service::{client_message::InternalClientMessage, server_message::ServerMessage}, -}; use clap::Parser; use espresso_types::{PubKey, SeqTypes}; use futures::channel::mpsc::{self, Sender}; @@ -120,6 +111,16 @@ use tide_disco::App; use tokio::spawn; use url::Url; +use crate::{ + api::node_validator::v0::{ + cdn::{BroadcastRollCallTask, CdnReceiveMessagesTask}, + create_node_validator_api::{create_node_validator_processing, NodeValidatorConfig}, + HotshotQueryServiceLeafStreamRetriever, ProcessProduceLeafStreamTask, + StateClientMessageSender, STATIC_VER_0_1, + }, + service::{client_message::InternalClientMessage, server_message::ServerMessage}, +}; + /// Options represents the configuration options that are available for running /// the node validator service via the [run_standalone_service] function. /// These options are configurable via command line arguments or environment @@ -233,10 +234,10 @@ pub async fn run_standalone_service(options: Options) { api::node_validator::v0::define_api().expect("error defining node validator api"); match app.register_module("node-validator", node_validator_api) { - Ok(_) => {} + Ok(_) => {}, Err(err) => { panic!("error registering node validator api: {:?}", err); - } + }, } let (leaf_sender, leaf_receiver) = mpsc::channel(10); @@ -260,7 +261,7 @@ pub async fn run_standalone_service(options: Options) { Err(err) => { panic!("error defining node validator api: {:?}", err); - } + }, }; let _cdn_tasks = if let Some(cdn_broker_url_string) = options.cdn_marshal_endpoint() { @@ -278,7 +279,7 @@ pub async fn run_standalone_service(options: Options) { Ok(cdn_network) => cdn_network, Err(err) => { panic!("error creating cdn network: {:?}", err); - } + }, }; let url_sender = node_validator_task_state.url_sender.clone(); diff --git a/node-metrics/src/service/client_id/mod.rs b/node-metrics/src/service/client_id/mod.rs index 11353b6e5d..65213982eb 100644 --- a/node-metrics/src/service/client_id/mod.rs +++ b/node-metrics/src/service/client_id/mod.rs @@ -1,6 +1,7 @@ -use serde::{Deserialize, Serialize}; use std::ops::{Add, AddAssign}; +use serde::{Deserialize, Serialize}; + /// [ClientId] represents the unique identifier for a client that is connected /// to the server. /// @@ -108,8 +109,10 @@ mod tests { #[test] fn test_hash() { - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; + use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + }; let hash_1 = { let client_id = ClientId::from_count(1); diff --git a/node-metrics/src/service/client_message/mod.rs b/node-metrics/src/service/client_message/mod.rs index d19881430f..e4eba18b0d 100644 --- a/node-metrics/src/service/client_message/mod.rs +++ b/node-metrics/src/service/client_message/mod.rs @@ -1,6 +1,7 @@ -use super::client_id::ClientId; use serde::{Deserialize, Serialize}; +use super::client_id::ClientId; + /// [ClientMessage] represents the messages that the client can send to the /// server for a request. /// @@ -38,12 +39,13 @@ impl ClientMessage { #[cfg(test)] mod tests { - use super::InternalClientMessage; - use super::*; - use crate::service::server_message::ServerMessage; - use futures::channel::mpsc::Sender; use std::iter::zip; + use futures::channel::mpsc::Sender; + + use super::{InternalClientMessage, *}; + use crate::service::server_message::ServerMessage; + impl PartialEq for InternalClientMessage { fn eq(&self, other: &Self) -> bool { match (self, other) { @@ -141,7 +143,7 @@ mod tests { match internal_client_message { InternalClientMessage::Request(id, _) => { assert_eq!(id, client_id); - } + }, _ => panic!("Unexpected InternalClientMessage"), } } diff --git a/node-metrics/src/service/client_state/mod.rs b/node-metrics/src/service/client_state/mod.rs index 1ca1415d57..7bc798db33 100644 --- a/node-metrics/src/service/client_state/mod.rs +++ b/node-metrics/src/service/client_state/mod.rs @@ -1,20 +1,21 @@ -use super::{ - client_id::ClientId, - client_message::{ClientMessage, InternalClientMessage}, - data_state::{DataState, NodeIdentity}, - server_message::ServerMessage, +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, }; + +use async_lock::{RwLock, RwLockWriteGuard}; use bitvec::vec::BitVec; use espresso_types::SeqTypes; use futures::{channel::mpsc::SendError, Sink, SinkExt, Stream, StreamExt}; use hotshot_query_service::explorer::{BlockDetail, ExplorerHistograms}; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; use tokio::{spawn, task::JoinHandle}; -use async_lock::{RwLock, RwLockWriteGuard}; +use super::{ + client_id::ClientId, + client_message::{ClientMessage, InternalClientMessage}, + data_state::{DataState, NodeIdentity}, + server_message::ServerMessage, +}; /// ClientState represents the service state of the connected clients. /// It maintains and represents the connected clients, and their subscriptions. @@ -112,7 +113,7 @@ impl std::fmt::Display for HandleConnectedError { match self { HandleConnectedError::ClientSendError(err) => { write!(f, "handle connected error: client send error: {}", err) - } + }, } } } @@ -235,7 +236,7 @@ impl std::fmt::Display for HandleRequestBlocksSnapshotsError { "handle request blocks snapshot error: client send error:: {}", err ) - } + }, } } } @@ -306,7 +307,7 @@ impl std::fmt::Display for HandleRequestNodeIdentitySnapshotError { "handle request node identity snapshot error: client send error: {}", err ) - } + }, } } } @@ -374,7 +375,7 @@ impl std::fmt::Display for HandleRequestHistogramSnapshotError { "handle request histogram snapshot error: client send error: {}", err ) - } + }, } } } @@ -461,7 +462,7 @@ impl std::fmt::Display for HandleRequestVotersSnapshotError { "handle request voters snapshot error: client send error: {}", err ) - } + }, } } } @@ -557,27 +558,27 @@ impl std::fmt::Display for ProcessClientMessageError { match self { ProcessClientMessageError::Connected(err) => { write!(f, "process client message error: connected: {}", err) - } + }, ProcessClientMessageError::BlocksSnapshot(err) => { write!(f, "process client message error: blocks snapshot: {}", err) - } + }, ProcessClientMessageError::NodeIdentitySnapshot(err) => { write!( f, "process client message error: node identity snapshot: {}", err ) - } + }, ProcessClientMessageError::HistogramSnapshot(err) => { write!( f, "process client message error: histogram snapshot: {}", err ) - } + }, ProcessClientMessageError::VotersSnapshot(err) => { write!(f, "process client message error: voters snapshot: {}", err) - } + }, } } } @@ -615,27 +616,27 @@ where InternalClientMessage::Connected(sender) => { handle_client_message_connected(sender, client_thread_state).await?; Ok(()) - } + }, InternalClientMessage::Disconnected(client_id) => { handle_client_message_disconnected(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::SubscribeLatestBlock) => { handle_client_message_subscribe_latest_block(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::SubscribeNodeIdentity) => { handle_client_message_subscribe_node_identity(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::SubscribeVoters) => { handle_client_message_subscribe_voters(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestBlocksSnapshot) => { handle_client_message_request_blocks_snapshot( @@ -645,7 +646,7 @@ where ) .await?; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestNodeIdentitySnapshot) => { handle_client_message_request_node_identity_snapshot( @@ -655,7 +656,7 @@ where ) .await?; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestHistogramSnapshot) => { handle_client_message_request_histogram_snapshot( @@ -665,7 +666,7 @@ where ) .await?; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestVotersSnapshot) => { handle_client_message_request_voters_snapshot( @@ -675,7 +676,7 @@ where ) .await?; Ok(()) - } + }, } } @@ -1180,6 +1181,22 @@ impl Drop for ProcessDistributeVotersHandlingTask { #[cfg(test)] pub mod tests { + use std::{sync::Arc, time::Duration}; + + use async_lock::RwLock; + use bitvec::vec::BitVec; + use espresso_types::{Leaf2, NodeState, ValidatedState}; + use futures::{ + channel::mpsc::{self, Sender}, + SinkExt, StreamExt, + }; + use hotshot_example_types::node_types::TestVersions; + use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; + use tokio::{ + spawn, + time::{sleep, timeout}, + }; + use super::{ClientThreadState, InternalClientMessageProcessingTask}; use crate::service::{ client_id::ClientId, @@ -1194,20 +1211,6 @@ pub mod tests { }, server_message::ServerMessage, }; - use async_lock::RwLock; - use bitvec::vec::BitVec; - use espresso_types::{Leaf2, NodeState, ValidatedState}; - use futures::{ - channel::mpsc::{self, Sender}, - SinkExt, StreamExt, - }; - use hotshot_example_types::node_types::TestVersions; - use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; - use std::{sync::Arc, time::Duration}; - use tokio::{ - spawn, - time::{sleep, timeout}, - }; pub fn create_test_client_thread_state() -> ClientThreadState> { ClientThreadState { diff --git a/node-metrics/src/service/data_state/mod.rs b/node-metrics/src/service/data_state/mod.rs index ae820f4e5d..82309f129b 100644 --- a/node-metrics/src/service/data_state/mod.rs +++ b/node-metrics/src/service/data_state/mod.rs @@ -1,6 +1,8 @@ pub mod location_details; pub mod node_identity; +use std::{collections::HashSet, iter::zip, sync::Arc}; + use async_lock::RwLock; use bitvec::vec::BitVec; use circular_buffer::CircularBuffer; @@ -23,7 +25,6 @@ use hotshot_types::{ }; pub use location_details::LocationDetails; pub use node_identity::NodeIdentity; -use std::{collections::HashSet, iter::zip, sync::Arc}; use time::OffsetDateTime; use tokio::{spawn, task::JoinHandle}; @@ -52,7 +53,7 @@ impl DataState { let stake_table_iter_result = stake_table.try_iter(SnapshotVersion::Head); match stake_table_iter_result { Ok(into_iter) => into_iter - .map(|(key, _, _)| NodeIdentity::from_public_key(key)) + .map(|(key, ..)| NodeIdentity::from_public_key(key)) .collect(), Err(_) => vec![], } @@ -106,10 +107,10 @@ impl DataState { }; let missing_node_identity_entries = - stake_table_iter.filter(|(key, _, _)| !current_identity_set.contains(key)); + stake_table_iter.filter(|(key, ..)| !current_identity_set.contains(key)); self.node_identity.extend( - missing_node_identity_entries.map(|(key, _, _)| NodeIdentity::from_public_key(key)), + missing_node_identity_entries.map(|(key, ..)| NodeIdentity::from_public_key(key)), ); } @@ -200,10 +201,10 @@ impl std::fmt::Display for ProcessLeafError { match self { ProcessLeafError::BlockSendError(err) => { write!(f, "error sending block detail to sender: {}", err) - } + }, ProcessLeafError::VotersSendError(err) => { write!(f, "error sending voters to sender: {}", err) - } + }, } } } @@ -283,7 +284,7 @@ where // In this case, we just want to determine who voted for this // Leaf. - let (key, _, _): (BLSPubKey, _, _) = entry; + let (key, ..): (BLSPubKey, _, _) = entry; key }); @@ -396,10 +397,10 @@ impl ProcessLeafStreamTask { match err { ProcessLeafError::BlockSendError(_) => { panic!("ProcessLeafStreamTask: process_incoming_leaf failed, underlying sink is closed, blocks will stagnate: {}", err) - } + }, ProcessLeafError::VotersSendError(_) => { panic!("ProcessLeafStreamTask: process_incoming_leaf failed, underlying sink is closed, voters will stagnate: {}", err) - } + }, } } } @@ -429,7 +430,7 @@ impl std::fmt::Display for ProcessNodeIdentityError { match self { ProcessNodeIdentityError::SendError(err) => { write!(f, "error sending node identity to sender: {}", err) - } + }, } } } @@ -563,22 +564,23 @@ impl Drop for ProcessNodeIdentityStreamTask { #[cfg(test)] mod tests { - use super::{DataState, ProcessLeafStreamTask}; - use crate::service::data_state::{ - LocationDetails, NodeIdentity, ProcessNodeIdentityStreamTask, - }; + use std::{sync::Arc, time::Duration}; + use async_lock::RwLock; use espresso_types::{ v0_99::ChainConfig, BlockMerkleTree, FeeMerkleTree, Leaf2, NodeState, ValidatedState, }; use futures::{channel::mpsc, SinkExt, StreamExt}; - use hotshot_example_types::node_types::TestVersions; use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; - use std::{sync::Arc, time::Duration}; use tokio::time::timeout; use url::Url; + use super::{DataState, ProcessLeafStreamTask}; + use crate::service::data_state::{ + LocationDetails, NodeIdentity, ProcessNodeIdentityStreamTask, + }; + #[tokio::test(flavor = "multi_thread")] async fn test_process_leaf_error_debug() { let (mut sender, receiver) = mpsc::channel(1); diff --git a/node-metrics/src/service/data_state/node_identity.rs b/node-metrics/src/service/data_state/node_identity.rs index 8396a81340..96bdc6ab5a 100644 --- a/node-metrics/src/service/data_state/node_identity.rs +++ b/node-metrics/src/service/data_state/node_identity.rs @@ -1,8 +1,9 @@ -use super::LocationDetails; use hotshot_types::signature_key::BLSPubKey; use serde::{Deserialize, Serialize}; use surf_disco::Url; +use super::LocationDetails; + /// [NodeIdentity] represents the identity of the node that is participating /// in the network. #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] @@ -126,10 +127,9 @@ impl NodeIdentity { #[cfg(test)] pub mod tests { - use super::LocationDetails; - use super::NodeIdentity; - use hotshot_types::signature_key::BLSPubKey; - use hotshot_types::traits::signature_key::SignatureKey; + use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; + + use super::{LocationDetails, NodeIdentity}; pub fn create_test_node(index: u64) -> NodeIdentity { let (pub_key, _) = BLSPubKey::generated_from_seed_indexed([0; 32], index); diff --git a/node-metrics/src/service/server_message/mod.rs b/node-metrics/src/service/server_message/mod.rs index 0620de3ecb..da8b51c0fd 100644 --- a/node-metrics/src/service/server_message/mod.rs +++ b/node-metrics/src/service/server_message/mod.rs @@ -1,11 +1,12 @@ use std::sync::Arc; -use super::{client_id::ClientId, data_state::NodeIdentity}; use bitvec::vec::BitVec; use espresso_types::SeqTypes; use hotshot_query_service::explorer::{BlockDetail, ExplorerHistograms}; use serde::{Deserialize, Serialize}; +use super::{client_id::ClientId, data_state::NodeIdentity}; + /// [ServerMessage] represents the messages that the server can send to the /// client for a response. #[derive(Debug, Serialize, Deserialize)] diff --git a/request-response/src/lib.rs b/request-response/src/lib.rs index d2cd58221b..211261cf94 100644 --- a/request-response/src/lib.rs +++ b/request-response/src/lib.rs @@ -406,24 +406,24 @@ impl< Err(e) => { warn!("Received invalid message: {e}"); continue; - } + }, }; // Handle the message based on its type match message { Message::Request(request_message) => { self.handle_request(request_message, &mut outgoing_responses); - } + }, Message::Response(response_message) => { self.handle_response(response_message, &mut incoming_responses); - } + }, } - } + }, // An error here means the receiver will _NEVER_ receive any more messages Err(e) => { error!("Request/response receive task exited: {e}"); return; - } + }, } } } diff --git a/request-response/src/message.rs b/request-response/src/message.rs index 622be26f22..a704e1c454 100644 --- a/request-response/src/message.rs +++ b/request-response/src/message.rs @@ -140,14 +140,14 @@ impl Serializable for Message { // Write the request content bytes.extend_from_slice(request_message.to_bytes()?.as_slice()); - } + }, Message::Response(response_message) => { // Write the type (response) bytes.push(1); // Write the response content bytes.extend_from_slice(response_message.to_bytes()?.as_slice()); - } + }, }; Ok(bytes) @@ -168,13 +168,13 @@ impl Serializable for Message { Ok(Message::Request(RequestMessage::from_bytes(&read_to_end( &mut bytes, )?)?)) - } + }, 1 => { // Read the `ResponseMessage` Ok(Message::Response(ResponseMessage::from_bytes( &read_to_end(&mut bytes)?, )?)) - } + }, _ => Err(anyhow::anyhow!("invalid message type")), } } @@ -353,7 +353,7 @@ mod tests { // It should not be valid anymore (false, Duration::from_secs(1)) - } + }, 2 => { // Alter the timestamp @@ -361,13 +361,13 @@ mod tests { // It should not be valid anymore (false, Duration::from_secs(1)) - } + }, 3 => { // Change the request ttl to be 0. This should make the request // invalid immediately (true, Duration::from_secs(0)) - } + }, _ => unreachable!(), }; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 6dc822914a..0b80460114 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] channel = "stable" -components = ["rustfmt", "llvm-tools-preview", "rust-src", "clippy"] +components = ["llvm-tools-preview", "rust-src", "clippy"] profile = "minimal" diff --git a/vid/rustfmt.toml b/rustfmt.toml similarity index 73% rename from vid/rustfmt.toml rename to rustfmt.toml index b288fc82e4..e4468267ac 100644 --- a/vid/rustfmt.toml +++ b/rustfmt.toml @@ -1,9 +1,8 @@ reorder_imports = true -wrap_comments = true -normalize_comments = true use_try_shorthand = true match_block_trailing_comma = true use_field_init_shorthand = true -edition = "2018" +edition = "2021" condense_wildcard_suffixes = true imports_granularity = "Crate" +group_imports = "StdExternalCrate" \ No newline at end of file diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 3a8ebff8da..0433e9f03e 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1,3 +1,5 @@ +use std::{pin::Pin, sync::Arc}; + use anyhow::{bail, Context}; use async_lock::RwLock; use async_once_cell::Lazy; @@ -18,7 +20,6 @@ use hotshot_events_service::events_source::{ EventFilterSet, EventsSource, EventsStreamer, StartupInfo, }; use hotshot_query_service::data_source::ExtensibleDataSource; -use hotshot_types::vote::HasViewNumber; use hotshot_types::{ data::ViewNumber, event::Event, @@ -30,12 +31,11 @@ use hotshot_types::{ ValidatedState as _, }, utils::{View, ViewInner}, + vote::HasViewNumber, PeerConfig, }; use itertools::Itertools; use jf_merkle_tree::MerkleTreeScheme; -use std::pin::Pin; -use std::sync::Arc; use self::data_source::{HotShotConfigDataSource, NodeStateDataSource, StateSignatureDataSource}; use crate::{ @@ -284,7 +284,7 @@ impl< Ok(accounts) => return Ok(accounts), Err(err) => { tracing::info!("accounts not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -321,7 +321,7 @@ impl< } (Arc::new(state), delta.clone()) - } + }, _ => { // If we don't already have a leaf for this view, or if we don't have the view // at all, we can create a new view based on the recovered leaf and add it to @@ -330,7 +330,7 @@ impl< let mut state = ValidatedState::from_header(leaf.block_header()); state.fee_merkle_tree = tree.clone(); (Arc::new(state), None) - } + }, }; if let Err(err) = consensus.update_leaf(leaf, Arc::clone(&state), delta) { tracing::warn!(?view, "cannot update fetched account state: {err:#}"); @@ -352,7 +352,7 @@ impl< Ok(frontier) => return Ok(frontier), Err(err) => { tracing::info!("frontier is not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -368,7 +368,7 @@ impl< Ok(cf) => return Ok(cf), Err(err) => { tracing::info!("chain config is not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -380,7 +380,7 @@ impl< Ok(cf) => return Ok(cf), Err(err) => { tracing::info!("chain config is not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -557,17 +557,12 @@ impl, V: Versions, P: SequencerPersistence> StateSig #[cfg(any(test, feature = "testing"))] pub mod test_helpers { - use committable::Committable; - use hotshot_state_prover::service::light_client_genesis_from_stake_table; use std::time::Duration; - use tempfile::TempDir; - use tokio::{spawn, time::sleep}; - use crate::network; - use espresso_types::MockSequencerVersions; + use committable::Committable; use espresso_types::{ v0::traits::{NullEventConsumer, PersistenceOptions, StateCatchup}, - MarketplaceVersion, NamespaceId, ValidatedState, + MarketplaceVersion, MockSequencerVersions, NamespaceId, ValidatedState, }; use ethers::{prelude::Address, utils::Anvil}; use futures::{ @@ -576,6 +571,7 @@ pub mod test_helpers { }; use hotshot::types::{Event, EventType}; use hotshot_contract_adapter::light_client::{ParsedLightClientState, ParsedStakeTableState}; + use hotshot_state_prover::service::light_client_genesis_from_stake_table; use hotshot_types::{ event::LeafInfo, traits::{metrics::NoMetrics, node_implementation::ConsensusTime}, @@ -585,15 +581,16 @@ pub mod test_helpers { use portpicker::pick_unused_port; use sequencer_utils::test_utils::setup_test; use surf_disco::Client; - use tide_disco::error::ServerError; - use tide_disco::{Api, App, Error, StatusCode}; - use tokio::task::JoinHandle; + use tempfile::TempDir; + use tide_disco::{error::ServerError, Api, App, Error, StatusCode}; + use tokio::{spawn, task::JoinHandle, time::sleep}; use url::Url; use vbs::version::{StaticVersion, StaticVersionType}; use super::*; use crate::{ catchup::NullStateCatchup, + network, persistence::no_storage, testing::{ run_marketplace_builder, run_test_builder, wait_for_decide_on_handle, TestConfig, @@ -1157,13 +1154,14 @@ pub mod test_helpers { #[cfg(test)] #[espresso_macros::generic_tests] mod api_tests { + use std::fmt::Debug; + use committable::Committable; use data_source::testing::TestableSequencerDataSource; use endpoints::NamespaceProofQueryData; - use espresso_types::MockSequencerVersions; use espresso_types::{ traits::{EventConsumer, PersistenceOptions}, - Header, Leaf2, NamespaceId, + Header, Leaf2, MockSequencerVersions, NamespaceId, }; use ethers::utils::Anvil; use futures::{future, stream::StreamExt}; @@ -1171,23 +1169,19 @@ mod api_tests { use hotshot_query_service::availability::{ AvailabilityDataSource, BlockQueryData, VidCommonQueryData, }; - - use hotshot_types::data::ns_table::parse_ns_table; - use hotshot_types::data::vid_disperse::VidDisperseShare2; - use hotshot_types::data::{DaProposal2, EpochNumber, VidCommitment}; - use hotshot_types::simple_certificate::QuorumCertificate2; - - use hotshot_types::vid::avidm::{init_avidm_param, AvidMScheme}; use hotshot_types::{ - data::{QuorumProposal2, QuorumProposalWrapper}, + data::{ + ns_table::parse_ns_table, vid_disperse::VidDisperseShare2, DaProposal2, EpochNumber, + QuorumProposal2, QuorumProposalWrapper, VidCommitment, + }, event::LeafInfo, message::Proposal, + simple_certificate::QuorumCertificate2, traits::{node_implementation::ConsensusTime, signature_key::SignatureKey, EncodeBytes}, + vid::avidm::{init_avidm_param, AvidMScheme}, }; - use portpicker::pick_unused_port; use sequencer_utils::test_utils::setup_test; - use std::fmt::Debug; use surf_disco::Client; use test_helpers::{ catchup_test_helper, state_signature_test_helper, status_test_helper, submit_test_helper, @@ -1197,8 +1191,8 @@ mod api_tests { use vbs::version::StaticVersion; use super::{update::ApiEventConsumer, *}; - use crate::network; use crate::{ + network, persistence::no_storage::NoStorage, testing::{wait_for_decide_on_handle, TestConfigBuilder}, }; @@ -1628,10 +1622,9 @@ mod api_tests { #[cfg(test)] mod test { - use committable::{Commitment, Committable}; use std::{collections::BTreeMap, time::Duration}; - use tokio::time::sleep; + use committable::{Commitment, Committable}; use espresso_types::{ config::PublicHotShotConfig, traits::NullEventConsumer, @@ -1665,6 +1658,7 @@ mod test { }; use tide_disco::{app::AppHealth, error::ServerError, healthcheck::HealthStatus}; use time::OffsetDateTime; + use tokio::time::sleep; use vbs::version::{StaticVersion, StaticVersionType, Version}; use self::{ @@ -2479,7 +2473,7 @@ mod test { let new_version = upgrade.new_version; assert_eq!(new_version, ::Upgrade::VERSION); break upgrade.new_version_first_view; - } + }, _ => continue, } }; diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index daa3518b40..a88ad528be 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -15,11 +15,13 @@ use hotshot_query_service::{ node::NodeDataSource, status::StatusDataSource, }; -use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ data::ViewNumber, light_client::StateSignatureRequestBody, - traits::{network::ConnectedNetwork, node_implementation::Versions}, + traits::{ + network::ConnectedNetwork, + node_implementation::{NodeType, Versions}, + }, PeerConfig, }; use tide_disco::Url; diff --git a/sequencer/src/api/endpoints.rs b/sequencer/src/api/endpoints.rs index 622d50a229..cfa4a0c440 100644 --- a/sequencer/src/api/endpoints.rs +++ b/sequencer/src/api/endpoints.rs @@ -13,11 +13,12 @@ use hotshot_query_service::{ availability::{self, AvailabilityDataSource, CustomSnafu, FetchBlockSnafu}, explorer::{self, ExplorerDataSource}, merklized_state::{ - self, MerklizedState, MerklizedStateDataSource, MerklizedStateHeightPersistence, + self, MerklizedState, MerklizedStateDataSource, MerklizedStateHeightPersistence, Snapshot, }, - node, ApiState, Error, + node, + node::NodeDataSource, + ApiState, Error, }; -use hotshot_query_service::{merklized_state::Snapshot, node::NodeDataSource}; use hotshot_types::{ data::{EpochNumber, ViewNumber}, traits::{ @@ -28,7 +29,6 @@ use hotshot_types::{ use jf_merkle_tree::MerkleTreeScheme; use serde::{de::Error as _, Deserialize, Serialize}; use snafu::OptionExt; - use tagged_base64::TaggedBase64; use tide_disco::{method::ReadState, Api, Error as _, StatusCode}; use vbs::version::{StaticVersion, StaticVersionType}; diff --git a/sequencer/src/api/options.rs b/sequencer/src/api/options.rs index 2db7fd4ea3..0f867e633f 100644 --- a/sequencer/src/api/options.rs +++ b/sequencer/src/api/options.rs @@ -1,5 +1,7 @@ //! Sequencer-specific API options and initialization. +use std::sync::Arc; + use anyhow::{bail, Context}; use clap::Parser; use espresso_types::{ @@ -22,7 +24,6 @@ use hotshot_types::traits::{ network::ConnectedNetwork, node_implementation::Versions, }; -use std::sync::Arc; use tide_disco::{listener::RateLimitListener, method::ReadState, App, Url}; use vbs::version::StaticVersionType; diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index b19fdc83f1..fb8132151a 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -1,3 +1,5 @@ +use std::collections::{HashSet, VecDeque}; + use anyhow::{bail, ensure, Context}; use async_trait::async_trait; use committable::{Commitment, Committable}; @@ -30,7 +32,6 @@ use jf_merkle_tree::{ LookupResult, MerkleTreeScheme, }; use sqlx::{Encode, Type}; -use std::collections::{HashSet, VecDeque}; use super::{ data_source::{Provider, SequencerDataSource}, @@ -145,7 +146,7 @@ impl CatchupStorage for SqlStorage { LookupResult::Ok(_, proof) => Ok(proof), _ => { bail!("state snapshot {view:?},{height} was found but does not contain frontier at height {}; this should not be possible", height - 1); - } + }, } } } @@ -275,13 +276,13 @@ async fn load_accounts( ))? { MerkleNode::Leaf { pos, elem, .. } => { snapshot.remember(*pos, *elem, proof)?; - } + }, MerkleNode::Empty => { snapshot.non_membership_remember(*account, proof)?; - } + }, _ => { bail!("Invalid proof"); - } + }, } } @@ -442,7 +443,7 @@ async fn header_dependencies( // so the STF will be able to look it up later. catchup.add_chain_config(cf); cf - } + }, } }; diff --git a/sequencer/src/api/update.rs b/sequencer/src/api/update.rs index 5f49fde095..bf9d821eba 100644 --- a/sequencer/src/api/update.rs +++ b/sequencer/src/api/update.rs @@ -1,5 +1,7 @@ //! Update loop for query API state. +use std::{fmt::Debug, sync::Arc}; + use anyhow::bail; use async_trait::async_trait; use derivative::Derivative; @@ -8,8 +10,6 @@ use espresso_types::{v0::traits::SequencerPersistence, PubKey}; use hotshot::types::Event; use hotshot_query_service::data_source::UpdateDataSource; use hotshot_types::traits::{network::ConnectedNetwork, node_implementation::Versions}; -use std::fmt::Debug; -use std::sync::Arc; use super::{data_source::SequencerDataSource, StorageState}; use crate::{EventConsumer, SeqTypes}; diff --git a/sequencer/src/bin/cdn-whitelist.rs b/sequencer/src/bin/cdn-whitelist.rs index 23273f9d8d..2e47b3d049 100644 --- a/sequencer/src/bin/cdn-whitelist.rs +++ b/sequencer/src/bin/cdn-whitelist.rs @@ -9,8 +9,10 @@ use cdn_broker::reexports::discovery::{DiscoveryClient, Embedded, Redis}; use clap::Parser; use espresso_types::SeqTypes; use hotshot_orchestrator::client::OrchestratorClient; -use hotshot_types::network::NetworkConfig; -use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; +use hotshot_types::{ + network::NetworkConfig, + traits::{node_implementation::NodeType, signature_key::SignatureKey}, +}; use surf_disco::Url; #[derive(Parser, Debug)] diff --git a/sequencer/src/bin/dev-rollup.rs b/sequencer/src/bin/dev-rollup.rs index 19736da728..ad33ab2c17 100644 --- a/sequencer/src/bin/dev-rollup.rs +++ b/sequencer/src/bin/dev-rollup.rs @@ -10,7 +10,6 @@ use espresso_types::{ use hotshot::types::BLSPubKey; use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use marketplace_solver::{SolverError, SOLVER_API_PATH}; - use sequencer_utils::logging; use tagged_base64::TaggedBase64; use url::Url; diff --git a/sequencer/src/bin/espresso-bridge.rs b/sequencer/src/bin/espresso-bridge.rs index 8201904e45..2ab78fde16 100644 --- a/sequencer/src/bin/espresso-bridge.rs +++ b/sequencer/src/bin/espresso-bridge.rs @@ -1,3 +1,5 @@ +use std::{sync::Arc, time::Duration}; + use anyhow::{bail, ensure, Context}; use clap::{Parser, Subcommand}; use client::SequencerClient; @@ -10,7 +12,6 @@ use ethers::{ }; use futures::stream::StreamExt; use sequencer_utils::logging; -use std::{sync::Arc, time::Duration}; use surf_disco::Url; /// Command-line utility for working with the Espresso bridge. @@ -213,7 +214,7 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> { Err(err) => { tracing::warn!("error in header stream: {err:#}"); continue; - } + }, }; let Some(l1_finalized) = header.l1_finalized() else { continue; diff --git a/sequencer/src/bin/espresso-dev-node.rs b/sequencer/src/bin/espresso-dev-node.rs index 7fed6cec03..fdff16e3b2 100644 --- a/sequencer/src/bin/espresso-dev-node.rs +++ b/sequencer/src/bin/espresso-dev-node.rs @@ -559,7 +559,6 @@ struct SetHotshotUpReqBody { mod tests { use std::{process::Child, sync::Arc, time::Duration}; - use crate::AltChainInfo; use committable::{Commitment, Committable}; use contract_bindings_ethers::light_client::LightClient; use escargot::CargoBuild; @@ -577,11 +576,10 @@ mod tests { use surf_disco::Client; use tide_disco::error::ServerError; use tokio::time::sleep; - use url::Url; use vbs::version::StaticVersion; - use crate::{DevInfo, SetHotshotDownReqBody, SetHotshotUpReqBody}; + use crate::{AltChainInfo, DevInfo, SetHotshotDownReqBody, SetHotshotUpReqBody}; const TEST_MNEMONIC: &str = "test test test test test test test test test test test junk"; const NUM_ALT_CHAIN_PROVIDERS: usize = 1; diff --git a/sequencer/src/bin/keygen.rs b/sequencer/src/bin/keygen.rs index 188179f982..f4ac7d7d83 100644 --- a/sequencer/src/bin/keygen.rs +++ b/sequencer/src/bin/keygen.rs @@ -33,7 +33,7 @@ impl Scheme { Self::All => { Self::Bls.gen(seed, index, env_file)?; Self::Schnorr.gen(seed, index, env_file)?; - } + }, Self::Bls => { let (pub_key, priv_key) = BLSPubKey::generated_from_seed_indexed(seed, index); let priv_key = priv_key.to_tagged_base64()?; @@ -43,7 +43,7 @@ impl Scheme { "ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY={priv_key}" )?; tracing::info!(%pub_key, "generated staking key") - } + }, Self::Schnorr => { let key_pair = StateKeyPair::generate_from_seed_indexed(seed, index); let priv_key = key_pair.sign_key_ref().to_tagged_base64()?; @@ -54,7 +54,7 @@ impl Scheme { )?; writeln!(env_file, "ESPRESSO_SEQUENCER_PRIVATE_STATE_KEY={priv_key}")?; tracing::info!(pub_key = %key_pair.ver_key(), "generated state key"); - } + }, } Ok(()) } diff --git a/sequencer/src/bin/nasty-client.rs b/sequencer/src/bin/nasty-client.rs index b3741582d8..f478e8323c 100644 --- a/sequencer/src/bin/nasty-client.rs +++ b/sequencer/src/bin/nasty-client.rs @@ -12,6 +12,16 @@ //! provides a healthcheck endpoint as well as a prometheus endpoint which provides metrics like the //! count of various types of actions performed and the number of open streams. +use std::{ + borrow::Cow, + cmp::max, + collections::{BTreeMap, HashMap}, + fmt::Debug, + pin::Pin, + sync::Arc, + time::{Duration, Instant}, +}; + use anyhow::{bail, ensure, Context}; use async_lock::RwLock; use clap::Parser; @@ -41,15 +51,6 @@ use rand::{seq::SliceRandom, RngCore}; use sequencer::{api::endpoints::NamespaceProofQueryData, SequencerApiVersion}; use sequencer_utils::logging; use serde::de::DeserializeOwned; -use std::{ - borrow::Cow, - cmp::max, - collections::{BTreeMap, HashMap}, - fmt::Debug, - pin::Pin, - sync::Arc, - time::{Duration, Instant}, -}; use strum::{EnumDiscriminants, VariantArray}; use surf_disco::{error::ClientError, socket, Error, StatusCode, Url}; use tide_disco::{error::ServerError, App}; @@ -520,7 +521,7 @@ impl ResourceManager { Ok(res) if i == 0 => { // Succeeded on the first try, get on with it. return Ok(res); - } + }, Ok(res) => { // Succeeded after at least one failure; retry a number of additional times to // be sure the endpoint is healed. @@ -531,14 +532,14 @@ impl ResourceManager { )?; } return Ok(res); - } + }, Err(err) if i < self.cfg.max_retries => { tracing::warn!("failed, will retry: {err:#}"); i += 1; - } + }, Err(err) => { return Err(err).context("failed too many times"); - } + }, } } } @@ -674,7 +675,7 @@ impl ResourceManager { obj.height() ); } - } + }, Err(_) if to - from > limit => { tracing::info!( limit, @@ -682,10 +683,10 @@ impl ResourceManager { to, "range query exceeding limit failed as expected" ); - } + }, Err(err) => { return Err(err).context("error in range query"); - } + }, } self.metrics.query_range_actions[&T::RESOURCE].add(1); @@ -800,7 +801,7 @@ impl ResourceManager { ); } break obj; - } + }, Err(err) if refreshed.elapsed() >= self.cfg.web_socket_timeout => { // Streams are allowed to fail if the connection is too old. Warn about it, // but refresh the connection and try again. @@ -818,7 +819,7 @@ impl ResourceManager { "{} stream refreshed due to connection reset", Self::singular(), ); - } + }, Err(err) => { // Errors on a relatively fresh connection are not allowed. Close the stream // since it is apparently in a bad state, and return an error. @@ -830,7 +831,7 @@ impl ResourceManager { Self::singular(), refreshed.elapsed() )); - } + }, } }; @@ -944,11 +945,11 @@ impl ResourceManager

{ // The block state at height 0 is empty, so to have a valid query just adjust to // querying at height 1. At height 1, the only valid index to query is 0. (1, 0) - } + }, block => { // At any other height, all indices between 0 and `block - 1` are valid to query. (block, index % (block - 1)) - } + }, }; // Get the header of the state snapshot we're going to query and the block commitment we're @@ -1344,7 +1345,7 @@ impl Client { Resource::Payloads => self.payloads.close_stream(id).await, }; Ok(()) - } + }, Action::PollStream { resource, id, @@ -1357,16 +1358,16 @@ impl Client { }, Action::QueryWindow { from, duration } => { self.headers.query_window(from, duration).await - } + }, Action::QueryNamespace { block, namespace } => { self.blocks.query_namespace(block, namespace).await - } + }, Action::QueryBlockState { block, index } => { self.headers.query_block_state(block, index).await - } + }, Action::QueryFeeState { block, builder } => { self.headers.query_fee_state(block, builder).await - } + }, } } } diff --git a/sequencer/src/bin/pub-key.rs b/sequencer/src/bin/pub-key.rs index 0c2bbc99cc..38efb49a86 100644 --- a/sequencer/src/bin/pub-key.rs +++ b/sequencer/src/bin/pub-key.rs @@ -49,7 +49,7 @@ fn main() { (false, PrivateKey::Bls(key)) => println!("{}", PubKey::from_private(&key)), (false, PrivateKey::Schnorr(key)) => { println!("{}", StateKeyPair::from_sign_key(key).ver_key()) - } + }, // Libp2p (true, PrivateKey::Bls(key)) => { @@ -57,9 +57,9 @@ fn main() { "{}", derive_libp2p_peer_id::(&key).expect("Failed to derive libp2p peer ID") ); - } + }, (true, _) => { eprintln!("Key type unsupported for libp2p peer ID derivation"); - } + }, } } diff --git a/sequencer/src/bin/reset-storage.rs b/sequencer/src/bin/reset-storage.rs index a635cda1b7..88d83fd1fd 100644 --- a/sequencer/src/bin/reset-storage.rs +++ b/sequencer/src/bin/reset-storage.rs @@ -35,11 +35,11 @@ async fn main() -> anyhow::Result<()> { Command::Fs(opt) => { tracing::warn!("resetting file system storage {opt:?}"); reset_storage(opt).await - } + }, Command::Sql(opt) => { tracing::warn!("resetting SQL storage {opt:?}"); reset_storage(*opt).await - } + }, } } diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 87795b4761..1422141213 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -1,5 +1,9 @@ //! Utility program to submit random transactions to an Espresso Sequencer. +#[cfg(feature = "benchmarking")] +use std::fs::OpenOptions; +#[cfg(feature = "benchmarking")] +use std::num::NonZeroUsize; use std::{ collections::HashMap, time::{Duration, Instant}, @@ -7,6 +11,8 @@ use std::{ use clap::Parser; use committable::{Commitment, Committable}; +#[cfg(feature = "benchmarking")] +use csv::Writer; use espresso_types::{parse_duration, parse_size, SeqTypes, Transaction}; use futures::{ channel::mpsc::{self, Sender}, @@ -24,13 +30,6 @@ use tide_disco::{error::ServerError, App}; use tokio::{task::spawn, time::sleep}; use vbs::version::StaticVersionType; -#[cfg(feature = "benchmarking")] -use csv::Writer; -#[cfg(feature = "benchmarking")] -use std::fs::OpenOptions; -#[cfg(feature = "benchmarking")] -use std::num::NonZeroUsize; - /// Submit random transactions to an Espresso Sequencer. #[derive(Clone, Debug, Parser)] struct Options { @@ -229,7 +228,7 @@ async fn main() { Err(err) => { tracing::warn!("error getting block: {err}"); continue; - } + }, }; let received_at = Instant::now(); tracing::debug!("got block {}", block.height()); diff --git a/sequencer/src/bin/update-permissioned-stake-table.rs b/sequencer/src/bin/update-permissioned-stake-table.rs index 28e30767d6..37534bc66c 100644 --- a/sequencer/src/bin/update-permissioned-stake-table.rs +++ b/sequencer/src/bin/update-permissioned-stake-table.rs @@ -1,3 +1,5 @@ +use std::{path::PathBuf, time::Duration}; + use anyhow::Result; use clap::Parser; use espresso_types::parse_duration; @@ -6,7 +8,6 @@ use sequencer_utils::{ logging, stake_table::{update_stake_table, PermissionedStakeTableUpdate}, }; -use std::{path::PathBuf, time::Duration}; use url::Url; #[derive(Debug, Clone, Parser)] diff --git a/sequencer/src/bin/utils/keygen.rs b/sequencer/src/bin/utils/keygen.rs index a381dc0d25..5240493f16 100644 --- a/sequencer/src/bin/utils/keygen.rs +++ b/sequencer/src/bin/utils/keygen.rs @@ -32,7 +32,7 @@ impl Scheme { Self::All => { Self::Bls.gen(seed, index, env_file)?; Self::Schnorr.gen(seed, index, env_file)?; - } + }, Self::Bls => { let (pub_key, priv_key) = BLSPubKey::generated_from_seed_indexed(seed, index); let priv_key = priv_key.to_tagged_base64()?; @@ -42,7 +42,7 @@ impl Scheme { "ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY={priv_key}" )?; tracing::info!(%pub_key, "generated staking key") - } + }, Self::Schnorr => { let key_pair = StateKeyPair::generate_from_seed_indexed(seed, index); let priv_key = key_pair.sign_key_ref().to_tagged_base64()?; @@ -53,7 +53,7 @@ impl Scheme { )?; writeln!(env_file, "ESPRESSO_SEQUENCER_PRIVATE_STATE_KEY={priv_key}")?; tracing::info!(pub_key = %key_pair.ver_key(), "generated state key"); - } + }, } Ok(()) } diff --git a/sequencer/src/bin/utils/main.rs b/sequencer/src/bin/utils/main.rs index 97ac6486ba..c3c803c204 100644 --- a/sequencer/src/bin/utils/main.rs +++ b/sequencer/src/bin/utils/main.rs @@ -1,7 +1,6 @@ //! sequencer utility programs use clap::{Parser, Subcommand}; - use sequencer_utils::logging; mod keygen; mod pubkey; @@ -34,7 +33,7 @@ async fn main() -> anyhow::Result<()> { Command::Pubkey(opt) => { pubkey::run(opt); Ok(()) - } + }, Command::ResetStorage(opt) => reset_storage::run(opt).await, } } diff --git a/sequencer/src/bin/utils/pubkey.rs b/sequencer/src/bin/utils/pubkey.rs index 84b65c5042..bd4156df8b 100644 --- a/sequencer/src/bin/utils/pubkey.rs +++ b/sequencer/src/bin/utils/pubkey.rs @@ -3,10 +3,11 @@ use std::str::FromStr; use anyhow::bail; use clap::Parser; use espresso_types::{PrivKey, PubKey}; -use hotshot::traits::implementations::derive_libp2p_peer_id; -use hotshot::types::SignatureKey; -use hotshot_types::light_client::StateSignKey; -use hotshot_types::{light_client::StateKeyPair, signature_key::BLSPubKey}; +use hotshot::{traits::implementations::derive_libp2p_peer_id, types::SignatureKey}; +use hotshot_types::{ + light_client::{StateKeyPair, StateSignKey}, + signature_key::BLSPubKey, +}; use tagged_base64::TaggedBase64; #[derive(Clone, Debug)] @@ -47,7 +48,7 @@ pub fn run(opt: Options) { (false, PrivateKey::Bls(key)) => println!("{}", PubKey::from_private(&key)), (false, PrivateKey::Schnorr(key)) => { println!("{}", StateKeyPair::from_sign_key(key).ver_key()) - } + }, // Libp2p (true, PrivateKey::Bls(key)) => { @@ -55,9 +56,9 @@ pub fn run(opt: Options) { "{}", derive_libp2p_peer_id::(&key).expect("Failed to derive libp2p peer ID") ); - } + }, (true, _) => { eprintln!("Key type unsupported for libp2p peer ID derivation"); - } + }, } } diff --git a/sequencer/src/bin/utils/reset_storage.rs b/sequencer/src/bin/utils/reset_storage.rs index b25c8a3c11..1938b5857e 100644 --- a/sequencer/src/bin/utils/reset_storage.rs +++ b/sequencer/src/bin/utils/reset_storage.rs @@ -1,10 +1,9 @@ +use clap::Subcommand; use sequencer::{ api::data_source::{DataSourceOptions, SequencerDataSource}, persistence, }; -use clap::Subcommand; - /// Options for resetting persistent storage. /// /// This will remove all the persistent storage of a sequencer node or marketplace solver, effectively resetting it to @@ -32,11 +31,11 @@ pub async fn run(opt: Commands) -> anyhow::Result<()> { SequencerStorage::Fs(opt) => { tracing::warn!("resetting sequencer file system storage {opt:?}"); reset_storage(opt).await - } + }, SequencerStorage::Sql(opt) => { tracing::warn!("resetting sequencer SQL storage {opt:?}"); reset_storage(*opt).await - } + }, }, Commands::Solver(opt) => { @@ -45,7 +44,7 @@ pub async fn run(opt: Commands) -> anyhow::Result<()> { opts.connect().await?; Ok(()) - } + }, } } diff --git a/sequencer/src/bin/verify-headers.rs b/sequencer/src/bin/verify-headers.rs index 4b999070a3..fb1c6096fe 100644 --- a/sequencer/src/bin/verify-headers.rs +++ b/sequencer/src/bin/verify-headers.rs @@ -1,6 +1,6 @@ //! Utility program to verify properties of headers sequenced by HotShot. -use std::{cmp::max, process::exit, time::Duration}; +use std::{cmp::max, process::exit, sync::Arc, time::Duration}; use clap::Parser; use espresso_types::{Header, L1BlockInfo}; @@ -9,7 +9,6 @@ use futures::future::join_all; use itertools::Itertools; use sequencer::SequencerApiVersion; use sequencer_utils::logging; -use std::sync::Arc; use surf_disco::Url; use tokio::time::sleep; use vbs::version::StaticVersionType; @@ -134,7 +133,7 @@ async fn get_header( // Back off a bit and then retry. sleep(Duration::from_millis(100)).await; - } + }, } } } @@ -147,12 +146,12 @@ async fn get_l1_block(l1: &Provider, height: u64) -> L1BlockInfo { tracing::warn!("L1 block {height} not yet available"); sleep(Duration::from_secs(1)).await; continue; - } + }, Err(err) => { tracing::warn!("error fetching L1 block {height}: {err}"); sleep(Duration::from_millis(100)).await; continue; - } + }, }; let Some(hash) = block.hash else { diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index facfb02bf7..f779a32f59 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -1,15 +1,13 @@ -use std::sync::Arc; +use std::{cmp::Ordering, collections::HashMap, fmt::Display, sync::Arc, time::Duration}; use anyhow::{anyhow, bail, ensure, Context}; use async_lock::RwLock; use async_trait::async_trait; -use committable::Commitment; -use committable::Committable; -use espresso_types::config::PublicNetworkConfig; -use espresso_types::traits::SequencerPersistence; +use committable::{Commitment, Committable}; use espresso_types::{ - v0::traits::StateCatchup, v0_99::ChainConfig, BackoffParams, BlockMerkleTree, FeeAccount, - FeeAccountProof, FeeMerkleCommitment, FeeMerkleTree, Leaf2, NodeState, + config::PublicNetworkConfig, traits::SequencerPersistence, v0::traits::StateCatchup, + v0_99::ChainConfig, BackoffParams, BlockMerkleTree, FeeAccount, FeeAccountProof, + FeeMerkleCommitment, FeeMerkleTree, Leaf2, NodeState, }; use futures::future::{Future, FutureExt, TryFuture, TryFutureExt}; use hotshot_types::{ @@ -25,15 +23,13 @@ use itertools::Itertools; use jf_merkle_tree::{prelude::MerkleNode, ForgetableMerkleTreeScheme, MerkleTreeScheme}; use priority_queue::PriorityQueue; use serde::de::DeserializeOwned; -use std::{cmp::Ordering, collections::HashMap, fmt::Display, time::Duration}; use surf_disco::Request; use tide_disco::error::ServerError; use tokio::time::timeout; use url::Url; use vbs::version::StaticVersionType; -use crate::api::BlocksFrontier; -use crate::PubKey; +use crate::{api::BlocksFrontier, PubKey}; // This newtype is probably not worth having. It's only used to be able to log // URLs before doing requests. @@ -75,7 +71,7 @@ pub(crate) async fn local_and_remote( Err(err) => { tracing::warn!("not using local catchup: {err:#}"); Arc::new(remote) - } + }, } } @@ -164,15 +160,15 @@ impl StatePeers { requests.insert(id, true); res = Ok(t); break; - } + }, Ok(Err(err)) => { tracing::warn!(id, ?score, peer = %client.url, "error from peer: {err:#}"); requests.insert(id, false); - } + }, Err(_) => { tracing::warn!(id, ?score, peer = %client.url, ?timeout_dur, "request timed out"); requests.insert(id, false); - } + }, } } diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 25c8fec67b..1ad2d1a2da 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -1,3 +1,9 @@ +use std::{ + fmt::{Debug, Display}, + sync::Arc, + time::Duration, +}; + use anyhow::Context; use async_lock::RwLock; use derivative::Derivative; @@ -29,8 +35,6 @@ use hotshot_types::{ }; use parking_lot::Mutex; use request_response::{network::Bytes, RequestResponse, RequestResponseConfig}; -use std::{fmt::Debug, time::Duration}; -use std::{fmt::Display, sync::Arc}; use tokio::{ spawn, sync::mpsc::{channel, Receiver}, diff --git a/sequencer/src/external_event_handler.rs b/sequencer/src/external_event_handler.rs index 86659f946f..5b7e8175e2 100644 --- a/sequencer/src/external_event_handler.rs +++ b/sequencer/src/external_event_handler.rs @@ -1,6 +1,7 @@ //! Should probably rename this to "external" or something -use crate::context::TaskList; +use std::{marker::PhantomData, sync::Arc}; + use anyhow::{Context, Result}; use espresso_types::{PubKey, SeqTypes}; use hotshot::types::Message; @@ -13,9 +14,10 @@ use hotshot_types::{ }; use request_response::network::Bytes; use serde::{Deserialize, Serialize}; -use std::{marker::PhantomData, sync::Arc}; use tokio::sync::mpsc::{Receiver, Sender}; +use crate::context::TaskList; + /// An external message that can be sent to or received from a node #[derive(Debug, Serialize, Deserialize, Clone)] pub enum ExternalMessage { @@ -83,7 +85,7 @@ impl ExternalEventHandler { self.request_response_sender .send(request_response.into()) .await?; - } + }, } Ok(()) } @@ -111,14 +113,14 @@ impl ExternalEventHandler { Err(err) => { tracing::warn!("Failed to serialize direct message: {}", err); continue; - } + }, }; // Send the message to the recipient if let Err(err) = network.direct_message(message_bytes, recipient).await { tracing::error!("Failed to send message: {:?}", err); }; - } + }, OutboundMessage::Broadcast(message) => { // Wrap it in the real message type @@ -133,7 +135,7 @@ impl ExternalEventHandler { Err(err) => { tracing::warn!("Failed to serialize broadcast message: {}", err); continue; - } + }, }; // Broadcast the message to the global topic @@ -143,7 +145,7 @@ impl ExternalEventHandler { { tracing::error!("Failed to broadcast message: {:?}", err); }; - } + }, } } } diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 2e6235fc54..99431c8f6d 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -132,9 +132,8 @@ impl Genesis { mod version_ser { - use vbs::version::Version; - use serde::{de, Deserialize, Deserializer, Serializer}; + use vbs::version::Version; pub fn serialize(ver: &Version, serializer: S) -> Result where @@ -254,12 +253,12 @@ mod upgrade_ser { return Err(de::Error::custom( "both view and time mode parameters are set", )) - } + }, (None, None) => { return Err(de::Error::custom( "no view or time mode parameters provided", )) - } + }, (None, Some(v)) => { if v.start_proposing_view > v.stop_proposing_view { return Err(de::Error::custom( @@ -274,7 +273,7 @@ mod upgrade_ser { upgrade_type: fields.upgrade_type, }, ); - } + }, (Some(t), None) => { if t.start_proposing_time.unix_timestamp() > t.stop_proposing_time.unix_timestamp() @@ -291,7 +290,7 @@ mod upgrade_ser { upgrade_type: fields.upgrade_type.clone(), }, ); - } + }, } } @@ -321,25 +320,25 @@ impl Genesis { #[cfg(test)] mod test { - use ethers::middleware::Middleware; - use ethers::prelude::*; - use ethers::signers::Signer; - use ethers::utils::{Anvil, AnvilInstance}; - use sequencer_utils::deployer::test_helpers::{ - deploy_fee_contract, deploy_fee_contract_as_proxy, - }; use std::sync::Arc; use anyhow::Result; - use contract_bindings_ethers::fee_contract::FeeContract; use espresso_types::{ L1BlockInfo, TimeBasedUpgrade, Timestamp, UpgradeMode, UpgradeType, ViewBasedUpgrade, }; - - use sequencer_utils::deployer; - use sequencer_utils::ser::FromStringOrInteger; - use sequencer_utils::test_utils::setup_test; + use ethers::{ + middleware::Middleware, + prelude::*, + signers::Signer, + utils::{Anvil, AnvilInstance}, + }; + use sequencer_utils::{ + deployer, + deployer::test_helpers::{deploy_fee_contract, deploy_fee_contract_as_proxy}, + ser::FromStringOrInteger, + test_utils::setup_test, + }; use toml::toml; use super::*; diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 2d9c0a352e..1ebd47d0a7 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -13,45 +13,44 @@ mod restart_tests; mod message_compat_tests; +use std::sync::Arc; + use anyhow::Context; use catchup::StatePeers; use context::SequencerContext; -use espresso_types::EpochCommittees; use espresso_types::{ - traits::EventConsumer, BackoffParams, L1ClientOptions, NodeState, PubKey, SeqTypes, - SolverAuctionResultsProvider, ValidatedState, + traits::EventConsumer, BackoffParams, EpochCommittees, L1ClientOptions, NodeState, PubKey, + SeqTypes, SolverAuctionResultsProvider, ValidatedState, }; use ethers_conv::ToAlloy; use genesis::L1Finalized; -use proposal_fetcher::ProposalFetcherConfig; -use std::sync::Arc; -use tokio::select; // Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support use hotshot_libp2p_networking::network::behaviours::dht::store::persistent::DhtNoPersistence; use libp2p::Multiaddr; use network::libp2p::split_off_peer_id; use options::Identity; +use proposal_fetcher::ProposalFetcherConfig; use state_signature::static_stake_table_commitment; +use tokio::select; use tracing::info; use url::Url; pub mod persistence; pub mod state; +use std::{fmt::Debug, marker::PhantomData, time::Duration}; + use derivative::Derivative; use espresso_types::v0::traits::SequencerPersistence; pub use genesis::Genesis; -use hotshot::traits::implementations::{ - derive_libp2p_multiaddr, CombinedNetworks, GossipConfig, Libp2pNetwork, RequestResponseConfig, -}; use hotshot::{ traits::implementations::{ - derive_libp2p_peer_id, CdnMetricsValue, CdnTopic, KeyPair, MemoryNetwork, PushCdnNetwork, - WrappedSignatureKey, + derive_libp2p_multiaddr, derive_libp2p_peer_id, CdnMetricsValue, CdnTopic, + CombinedNetworks, GossipConfig, KeyPair, Libp2pNetwork, MemoryNetwork, PushCdnNetwork, + RequestResponseConfig, WrappedSignatureKey, }, types::SignatureKey, MarketplaceConfig, }; -use hotshot_orchestrator::client::get_complete_config; -use hotshot_orchestrator::client::OrchestratorClient; +use hotshot_orchestrator::client::{get_complete_config, OrchestratorClient}; use hotshot_types::{ data::ViewNumber, light_client::{StateKeyPair, StateSignKey}, @@ -66,8 +65,6 @@ use hotshot_types::{ }; pub use options::Options; use serde::{Deserialize, Serialize}; -use std::time::Duration; -use std::{fmt::Debug, marker::PhantomData}; use vbs::version::{StaticVersion, StaticVersionType}; pub mod network; @@ -312,7 +309,7 @@ pub async fn init_node( (Some(config), _) => { tracing::info!("loaded network config from storage, rejoining existing network"); (config, false) - } + }, // If we were told to fetch the config from an already-started peer, do so. (None, Some(peers)) => { tracing::info!(?peers, "loading network config from peers"); @@ -330,7 +327,7 @@ pub async fn init_node( ); persistence.save_config(&config).await?; (config, false) - } + }, // Otherwise, this is a fresh network; load from the orchestrator. (None, None) => { tracing::info!("loading network config from orchestrator"); @@ -356,7 +353,7 @@ pub async fn init_node( persistence.save_config(&config).await?; tracing::error!("all nodes connected"); (config, true) - } + }, }; if let Some(upgrade) = genesis.upgrades.get(&V::Upgrade::VERSION) { @@ -451,7 +448,7 @@ pub async fn init_node( ethers::types::U256::from(timestamp.unix_timestamp()).to_alloy(), ) .await - } + }, }; let mut genesis_state = ValidatedState { @@ -590,20 +587,22 @@ pub mod testing { use hotshot_testing::block_builder::{ BuilderTask, SimpleBuilderImplementation, TestBuilderImplementation, }; - use hotshot_types::traits::network::Topic; - use hotshot_types::traits::signature_key::StakeTableEntryType; use hotshot_types::{ event::LeafInfo, light_client::{CircuitField, StateKeyPair, StateVerKey}, - traits::signature_key::BuilderSignatureKey, - traits::{block_contents::BlockHeader, metrics::NoMetrics, stake_table::StakeTableScheme}, + traits::{ + block_contents::BlockHeader, + metrics::NoMetrics, + network::Topic, + signature_key::{BuilderSignatureKey, StakeTableEntryType}, + stake_table::StakeTableScheme, + }, HotShotConfig, PeerConfig, }; use marketplace_builder_core::{ hooks::NoHooks, service::{BuilderConfig, GlobalState}, }; - use portpicker::pick_unused_port; use tokio::spawn; use vbs::version::Version; diff --git a/sequencer/src/options.rs b/sequencer/src/options.rs index 93d52f3463..5cd5a8c008 100644 --- a/sequencer/src/options.rs +++ b/sequencer/src/options.rs @@ -1,8 +1,6 @@ #![allow(clippy::needless_lifetimes)] use core::fmt::Display; -use jf_signature::{bls_over_bn254, schnorr}; -use sequencer_utils::logging; use std::{ cmp::Ordering, collections::{HashMap, HashSet}, @@ -11,14 +9,16 @@ use std::{ path::PathBuf, time::Duration, }; -use tagged_base64::TaggedBase64; use anyhow::{bail, Context}; use clap::{error::ErrorKind, Args, FromArgMatches, Parser}; use derivative::Derivative; use espresso_types::{parse_duration, BackoffParams, L1ClientOptions}; use hotshot_types::{light_client::StateSignKey, signature_key::BLSPrivKey}; +use jf_signature::{bls_over_bn254, schnorr}; use libp2p::Multiaddr; +use sequencer_utils::logging; +use tagged_base64::TaggedBase64; use url::Url; use crate::{api, persistence, proposal_fetcher::ProposalFetcherConfig}; @@ -472,10 +472,10 @@ fn fmt_opt_urls( write!(fmt, "Some(")?; fmt_urls(urls, fmt)?; write!(fmt, ")")?; - } + }, None => { write!(fmt, "None")?; - } + }, } Ok(()) } @@ -536,13 +536,13 @@ impl ModuleArgs { match module { SequencerModule::Storage(m) => { curr = m.add(&mut modules.storage_fs, &mut provided)? - } + }, SequencerModule::StorageFs(m) => { curr = m.add(&mut modules.storage_fs, &mut provided)? - } + }, SequencerModule::StorageSql(m) => { curr = m.add(&mut modules.storage_sql, &mut provided)? - } + }, SequencerModule::Http(m) => curr = m.add(&mut modules.http, &mut provided)?, SequencerModule::Query(m) => curr = m.add(&mut modules.query, &mut provided)?, SequencerModule::Submit(m) => curr = m.add(&mut modules.submit, &mut provided)?, @@ -551,10 +551,10 @@ impl ModuleArgs { SequencerModule::Config(m) => curr = m.add(&mut modules.config, &mut provided)?, SequencerModule::HotshotEvents(m) => { curr = m.add(&mut modules.hotshot_events, &mut provided)? - } + }, SequencerModule::Explorer(m) => { curr = m.add(&mut modules.explorer, &mut provided)? - } + }, } } diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 7ef5a3c7b4..5c39941223 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -43,8 +43,7 @@ mod testing { #[cfg(test)] #[espresso_macros::generic_tests] mod persistence_tests { - use std::{collections::BTreeMap, marker::PhantomData}; - use vbs::version::StaticVersionType; + use std::{collections::BTreeMap, marker::PhantomData, sync::Arc}; use anyhow::bail; use async_lock::RwLock; @@ -53,8 +52,10 @@ mod persistence_tests { traits::{EventConsumer, NullEventConsumer, PersistenceOptions}, Event, Leaf, Leaf2, NodeState, PubKey, SeqTypes, ValidatedState, }; - use hotshot::types::{BLSPubKey, SignatureKey}; - use hotshot::InitializerEpochInfo; + use hotshot::{ + types::{BLSPubKey, SignatureKey}, + InitializerEpochInfo, + }; use hotshot_example_types::node_types::TestVersions; use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::{ @@ -77,11 +78,9 @@ mod persistence_tests { vid::avidm::{init_avidm_param, AvidMScheme}, vote::HasViewNumber, }; - use sequencer_utils::test_utils::setup_test; - use std::sync::Arc; use testing::TestablePersistence; - use vbs::version::Version; + use vbs::version::{StaticVersionType, Version}; use super::*; @@ -809,7 +808,7 @@ mod persistence_tests { let leaf_chain = chain .iter() .take(2) - .map(|(leaf, qc, _, _)| (leaf_info(leaf.clone()), qc.clone())) + .map(|(leaf, qc, ..)| (leaf_info(leaf.clone()), qc.clone())) .collect::>(); tracing::info!("decide with event handling failure"); storage @@ -856,7 +855,7 @@ mod persistence_tests { let leaf_chain = chain .iter() .skip(2) - .map(|(leaf, qc, _, _)| (leaf_info(leaf.clone()), qc.clone())) + .map(|(leaf, qc, ..)| (leaf_info(leaf.clone()), qc.clone())) .collect::>(); tracing::info!("decide successfully"); storage @@ -901,7 +900,7 @@ mod persistence_tests { tracing::info!("check decide event"); let leaf_chain = consumer.leaf_chain().await; assert_eq!(leaf_chain.len(), 4, "{leaf_chain:#?}"); - for ((leaf, _, _, _), info) in chain.iter().zip(leaf_chain.iter()) { + for ((leaf, ..), info) in chain.iter().zip(leaf_chain.iter()) { assert_eq!(info.leaf, *leaf); let decided_vid_share = info.vid_share.as_ref().unwrap(); let view_number = match decided_vid_share { diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index 9ee7110d9a..21c25850a9 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -1,8 +1,18 @@ +use std::{ + collections::{BTreeMap, HashSet}, + fs::{self, File, OpenOptions}, + io::{Read, Seek, SeekFrom, Write}, + ops::RangeInclusive, + path::{Path, PathBuf}, + sync::Arc, +}; + use anyhow::{anyhow, Context}; use async_lock::RwLock; use async_trait::async_trait; use clap::Parser; use espresso_types::{ + upgrade_commitment_map, v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence}, Leaf, Leaf2, NetworkConfig, Payload, SeqTypes, }; @@ -27,19 +37,9 @@ use hotshot_types::{ utils::View, vote::HasViewNumber, }; -use std::sync::Arc; -use std::{ - collections::{BTreeMap, HashSet}, - fs::{self, File, OpenOptions}, - io::{Read, Seek, SeekFrom, Write}, - ops::RangeInclusive, - path::{Path, PathBuf}, -}; use crate::ViewNumber; -use espresso_types::upgrade_commitment_map; - /// Options for file system backed persistence. #[derive(Parser, Clone, Debug)] pub struct Options { @@ -606,7 +606,7 @@ impl SequencerPersistence for Persistence { // managed to persist the decided leaves successfully, and the event processing will // just run again at the next decide. tracing::warn!(?view, "event processing failed: {err:#}"); - } + }, Ok(intervals) => { if let Err(err) = inner.collect_garbage(view, &intervals) { // Similarly, garbage collection is not an error. We have done everything we @@ -614,7 +614,7 @@ impl SequencerPersistence for Persistence { // error but do not return it. tracing::warn!(?view, "GC failed: {err:#}"); } - } + }, } Ok(()) @@ -846,7 +846,7 @@ impl SequencerPersistence for Persistence { // some unintended file whose name happened to match the naming convention. tracing::warn!(?view, "ignoring malformed quorum proposal file: {err:#}"); continue; - } + }, }; let proposal2 = convert_proposal(proposal); @@ -1508,32 +1508,27 @@ mod generic_tests { #[cfg(test)] mod test { - use espresso_types::{NodeState, PubKey}; + use std::marker::PhantomData; + + use committable::{Commitment, CommitmentBoundsArkless, Committable}; + use espresso_types::{Header, Leaf, NodeState, PubKey, ValidatedState}; use hotshot::types::SignatureKey; use hotshot_example_types::node_types::TestVersions; use hotshot_query_service::testing::mocks::MockVersions; - use hotshot_types::data::{vid_commitment, QuorumProposal2}; - use hotshot_types::traits::node_implementation::Versions; - - use hotshot_types::vid::advz::advz_scheme; + use hotshot_types::{ + data::{vid_commitment, QuorumProposal2}, + simple_certificate::QuorumCertificate, + simple_vote::QuorumData, + traits::{node_implementation::Versions, EncodeBytes}, + vid::advz::advz_scheme, + }; + use jf_vid::VidScheme; use sequencer_utils::test_utils::setup_test; - use vbs::version::StaticVersionType; - use serde_json::json; - use std::marker::PhantomData; + use vbs::version::StaticVersionType; use super::*; - use crate::persistence::testing::TestablePersistence; - - use crate::BLSPubKey; - use committable::Committable; - use committable::{Commitment, CommitmentBoundsArkless}; - use espresso_types::{Header, Leaf, ValidatedState}; - - use hotshot_types::{ - simple_certificate::QuorumCertificate, simple_vote::QuorumData, traits::EncodeBytes, - }; - use jf_vid::VidScheme; + use crate::{persistence::testing::TestablePersistence, BLSPubKey}; #[test] fn test_config_migrations_add_builder_urls() { diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index c35d9e6266..49f00bb46e 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -1,6 +1,8 @@ //! Mock implementation of persistence, for testing. #![cfg(any(test, feature = "testing"))] +use std::{collections::BTreeMap, sync::Arc}; + use anyhow::bail; use async_trait::async_trait; use espresso_types::{ @@ -21,8 +23,6 @@ use hotshot_types::{ simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, utils::View, }; -use std::collections::BTreeMap; -use std::sync::Arc; use crate::{NodeType, SeqTypes, ViewNumber}; diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 59d834caaf..a49c744313 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -1,4 +1,5 @@ -use crate::{catchup::SqlStateCatchup, NodeType, SeqTypes, ViewNumber}; +use std::{collections::BTreeMap, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; + use anyhow::{bail, Context}; use async_trait::async_trait; use clap::Parser; @@ -31,7 +32,6 @@ use hotshot_query_service::{ merklized_state::MerklizedState, VidCommon, }; -use hotshot_types::drb::DrbResult; use hotshot_types::{ consensus::CommitmentMap, data::{ @@ -39,6 +39,7 @@ use hotshot_types::{ DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposalWrapper, VidCommitment, VidDisperseShare, }, + drb::DrbResult, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, simple_certificate::{ @@ -52,9 +53,9 @@ use hotshot_types::{ vote::HasViewNumber, }; use itertools::Itertools; -use sqlx::Row; -use sqlx::{query, Executor}; -use std::{collections::BTreeMap, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; +use sqlx::{query, Executor, Row}; + +use crate::{catchup::SqlStateCatchup, NodeType, SeqTypes, ViewNumber}; /// Options for Postgres-backed persistence. #[derive(Parser, Clone, Derivative)] @@ -662,7 +663,7 @@ impl Persistence { // we do have. tracing::warn!("error loading row: {err:#}"); break; - } + }, }; let leaf_data: Vec = row.get("leaf"); @@ -1960,7 +1961,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("could not open transaction: {err:#}"); return None; - } + }, }; let bytes = match query_as::<(Vec,)>( @@ -1975,7 +1976,7 @@ impl Provider for Persistence { Err(err) => { tracing::error!("error loading VID share: {err:#}"); return None; - } + }, }; let share: Proposal> = @@ -1984,7 +1985,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("error decoding VID share: {err:#}"); return None; - } + }, }; match share.data { @@ -2004,7 +2005,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("could not open transaction: {err:#}"); return None; - } + }, }; let bytes = match query_as::<(Vec,)>( @@ -2019,7 +2020,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("error loading DA proposal: {err:#}"); return None; - } + }, }; let proposal: Proposal> = match bincode::deserialize(&bytes) @@ -2028,7 +2029,7 @@ impl Provider for Persistence { Err(err) => { tracing::error!("error decoding DA proposal: {err:#}"); return None; - } + }, }; Some(Payload::from_bytes( @@ -2047,7 +2048,7 @@ impl Provider> for Persistence { Err(err) => { tracing::warn!("could not open transaction: {err:#}"); return None; - } + }, }; let (leaf, qc) = match fetch_leaf_from_proposals(&mut tx, req).await { @@ -2055,7 +2056,7 @@ impl Provider> for Persistence { Err(err) => { tracing::info!("requested leaf not found in undecided proposals: {err:#}"); return None; - } + }, }; match LeafQueryData::new(leaf, qc) { @@ -2063,7 +2064,7 @@ impl Provider> for Persistence { Err(err) => { tracing::warn!("fetched invalid leaf: {err:#}"); None - } + }, } } } @@ -2155,8 +2156,6 @@ mod generic_tests { #[cfg(test)] mod test { - use super::*; - use crate::{persistence::testing::TestablePersistence, BLSPubKey, PubKey}; use committable::{Commitment, CommitmentBoundsArkless}; use espresso_types::{traits::NullEventConsumer, Header, Leaf, NodeState, ValidatedState}; use futures::stream::TryStreamExt; @@ -2182,6 +2181,9 @@ mod test { use sequencer_utils::test_utils::setup_test; use vbs::version::StaticVersionType; + use super::*; + use crate::{persistence::testing::TestablePersistence, BLSPubKey, PubKey}; + #[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposals_leaf_hash_migration() { setup_test(); diff --git a/sequencer/src/proposal_fetcher.rs b/sequencer/src/proposal_fetcher.rs index a5d143188a..2a783914fb 100644 --- a/sequencer/src/proposal_fetcher.rs +++ b/sequencer/src/proposal_fetcher.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use anyhow::Context; use async_channel::{Receiver, Sender}; @@ -19,7 +19,6 @@ use hotshot_types::{ }, utils::{View, ViewInner}, }; -use std::time::Duration; use tokio::time::{sleep, timeout}; use tracing::Instrument; @@ -184,10 +183,10 @@ where let leaf = proposal.data.justify_qc().data.leaf_commit; self.request((view, leaf)).await; return Ok(()); - } + }, Err(err) => { tracing::info!("proposal missing from storage; fetching from network: {err:#}"); - } + }, } let future = self.consensus.read().await.request_proposal(view, leaf)?; diff --git a/sequencer/src/request_response/data_source.rs b/sequencer/src/request_response/data_source.rs index df7476677e..e4b86193b5 100644 --- a/sequencer/src/request_response/data_source.rs +++ b/sequencer/src/request_response/data_source.rs @@ -2,11 +2,12 @@ //! to calculate/derive a response for a specific request. In the confirmation layer the implementer //! would be something like a [`FeeMerkleTree`] for fee catchup -use super::request::{Request, Response}; use anyhow::Result; use async_trait::async_trait; use request_response::data_source::DataSource as DataSourceTrait; +use super::request::{Request, Response}; + #[derive(Clone, Debug)] pub struct DataSource {} diff --git a/sequencer/src/request_response/network.rs b/sequencer/src/request_response/network.rs index 38a80b2621..d05487d308 100644 --- a/sequencer/src/request_response/network.rs +++ b/sequencer/src/request_response/network.rs @@ -1,14 +1,12 @@ -use crate::external_event_handler::ExternalMessage; -use crate::external_event_handler::OutboundMessage; use anyhow::{Context, Result}; use async_trait::async_trait; -use espresso_types::PubKey; -use espresso_types::SeqTypes; +use espresso_types::{PubKey, SeqTypes}; use hotshot_types::message::MessageKind; -use request_response::network::Bytes; -use request_response::network::Sender as SenderTrait; +use request_response::network::{Bytes, Sender as SenderTrait}; use tokio::sync::mpsc; +use crate::external_event_handler::{ExternalMessage, OutboundMessage}; + /// A wrapper type that we will implement the `Sender` trait for #[derive(Clone)] pub struct Sender(mpsc::Sender); diff --git a/sequencer/src/request_response/recipient_source.rs b/sequencer/src/request_response/recipient_source.rs index a0dcbbd69d..ce9b5819d5 100644 --- a/sequencer/src/request_response/recipient_source.rs +++ b/sequencer/src/request_response/recipient_source.rs @@ -38,7 +38,7 @@ impl RecipientSourceTrait for RecipientSource { .iter() .map(|entry| entry.stake_table_entry.stake_key) .collect() - } + }, } } } diff --git a/sequencer/src/restart_tests.rs b/sequencer/src/restart_tests.rs index 7631120994..b7a365cd49 100755 --- a/sequencer/src/restart_tests.rs +++ b/sequencer/src/restart_tests.rs @@ -1,13 +1,7 @@ #![cfg(test)] -use super::*; -use crate::{ - api::{self, data_source::testing::TestableSequencerDataSource, options::Query}, - genesis::{L1Finalized, StakeTableConfig}, - network::cdn::{TestingDef, WrappedSignatureKey}, - testing::wait_for_decide_on_handle, - SequencerApiVersion, -}; +use std::{collections::HashSet, path::Path, time::Duration}; + use anyhow::bail; use cdn_broker::{ reexports::{crypto::signature::KeyPair, def::hook::NoMessageHook}, @@ -31,10 +25,10 @@ use hotshot_testing::{ block_builder::{SimpleBuilderImplementation, TestBuilderImplementation}, test_builder::BuilderChange, }; -use hotshot_types::network::{Libp2pConfig, NetworkConfig}; use hotshot_types::{ event::{Event, EventType}, light_client::StateKeyPair, + network::{Libp2pConfig, NetworkConfig}, traits::{node_implementation::ConsensusTime, signature_key::SignatureKey}, }; use itertools::Itertools; @@ -42,17 +36,24 @@ use options::Modules; use portpicker::pick_unused_port; use run::init_with_storage; use sequencer_utils::test_utils::setup_test; -use std::{collections::HashSet, path::Path, time::Duration}; use surf_disco::{error::ClientError, Url}; use tempfile::TempDir; -use tokio::time::timeout; use tokio::{ task::{spawn, JoinHandle}, - time::sleep, + time::{sleep, timeout}, }; use vbs::version::Version; use vec1::vec1; +use super::*; +use crate::{ + api::{self, data_source::testing::TestableSequencerDataSource, options::Query}, + genesis::{L1Finalized, StakeTableConfig}, + network::cdn::{TestingDef, WrappedSignatureKey}, + testing::wait_for_decide_on_handle, + SequencerApiVersion, +}; + async fn test_restart_helper(network: (usize, usize), restart: (usize, usize), cdn: bool) { setup_test(); @@ -358,7 +359,7 @@ impl TestNode { sleep(delay).await; delay *= 2; retries -= 1; - } + }, } }; diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index 73c17561ea..baf104ffed 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -1,12 +1,5 @@ use std::sync::Arc; -use super::{ - api::{self, data_source::DataSourceOptions}, - context::SequencerContext, - init_node, network, - options::{Modules, Options}, - persistence, Genesis, L1Params, NetworkParams, -}; use clap::Parser; #[allow(unused_imports)] use espresso_types::{ @@ -18,6 +11,14 @@ use hotshot::MarketplaceConfig; use hotshot_types::traits::{metrics::NoMetrics, node_implementation::Versions}; use vbs::version::StaticVersionType; +use super::{ + api::{self, data_source::DataSourceOptions}, + context::SequencerContext, + init_node, network, + options::{Modules, Options}, + persistence, Genesis, L1Params, NetworkParams, +}; + pub async fn main() -> anyhow::Result<()> { let opt = Options::parse(); opt.logging.init(); @@ -48,7 +49,7 @@ pub async fn main() -> anyhow::Result<()> { SequencerVersions::::new(), ) .await - } + }, #[cfg(feature = "fee")] (FeeVersion::VERSION, _) => { run( @@ -58,7 +59,7 @@ pub async fn main() -> anyhow::Result<()> { SequencerVersions::::new(), ) .await - } + }, #[cfg(feature = "marketplace")] (MarketplaceVersion::VERSION, _) => { run( @@ -68,7 +69,7 @@ pub async fn main() -> anyhow::Result<()> { SequencerVersions::::new(), ) .await - } + }, _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." ), @@ -237,7 +238,7 @@ where .boxed() }) .await? - } + }, None => { init_node( genesis, @@ -253,7 +254,7 @@ where proposal_fetcher_config, ) .await? - } + }, }; Ok(ctx) @@ -263,23 +264,22 @@ where mod test { use std::time::Duration; - use tokio::spawn; - - use crate::{ - api::options::Http, - genesis::{L1Finalized, StakeTableConfig}, - persistence::fs, - SequencerApiVersion, - }; use espresso_types::{MockSequencerVersions, PubKey}; use hotshot_types::{light_client::StateKeyPair, traits::signature_key::SignatureKey}; use portpicker::pick_unused_port; use sequencer_utils::test_utils::setup_test; use surf_disco::{error::ClientError, Client, Url}; use tempfile::TempDir; + use tokio::spawn; use vbs::version::Version; use super::*; + use crate::{ + api::options::Http, + genesis::{L1Finalized, StakeTableConfig}, + persistence::fs, + SequencerApiVersion, + }; #[tokio::test(flavor = "multi_thread")] async fn test_startup_before_orchestrator() { diff --git a/sequencer/src/state.rs b/sequencer/src/state.rs index e7f9160e41..4f5cb7ac25 100644 --- a/sequencer/src/state.rs +++ b/sequencer/src/state.rs @@ -6,8 +6,7 @@ use espresso_types::{ traits::StateCatchup, v0_99::ChainConfig, BlockMerkleTree, Delta, FeeAccount, FeeMerkleTree, Leaf2, ValidatedState, }; -use futures::future::Future; -use futures::StreamExt; +use futures::{future::Future, StreamExt}; use hotshot::traits::ValidatedState as HotShotState; use hotshot_query_service::{ availability::{AvailabilityDataSource, LeafQueryData}, @@ -300,12 +299,12 @@ where parent_leaf = leaf; parent_state = state; break; - } + }, Err(err) => { tracing::error!(height = leaf.height(), "failed to updated state: {err:#}"); // If we fail, delay for a second and retry. sleep(Duration::from_secs(1)).await; - } + }, } } } diff --git a/sequencer/src/state_signature.rs b/sequencer/src/state_signature.rs index 87ff5b1761..9eeb1bf798 100644 --- a/sequencer/src/state_signature.rs +++ b/sequencer/src/state_signature.rs @@ -97,10 +97,10 @@ impl StateSigner { tracing::warn!("Error posting signature to the relay server: {:?}", error); } } - } + }, Err(err) => { tracing::error!("Error generating light client state: {:?}", err) - } + }, } } diff --git a/sequencer/src/state_signature/relay_server.rs b/sequencer/src/state_signature/relay_server.rs index fcfda46374..c718d8a185 100644 --- a/sequencer/src/state_signature/relay_server.rs +++ b/sequencer/src/state_signature/relay_server.rs @@ -149,11 +149,11 @@ impl StateRelayServerDataSource for StateRelayServerState { StatusCode::BAD_REQUEST, "A signature of this light client state is already posted at this block height for this key.".to_owned(), )); - } + }, std::collections::hash_map::Entry::Vacant(entry) => { entry.insert(signature); bundle.accumulated_weight += *weight; - } + }, } if bundle.accumulated_weight >= self.threshold { @@ -204,7 +204,7 @@ where reason: err.to_string(), })?; Api::::new(toml)? - } + }, }; api.get("getlateststate", |_req, state| { diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 22b206cfbe..03ee163a1b 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,9 +1,10 @@ +use std::{fmt, str::FromStr, time::Duration}; + use anyhow::{anyhow, Result}; use client::SequencerClient; use espresso_types::{FeeAmount, FeeVersion, MarketplaceVersion}; use ethers::prelude::*; use futures::future::join_all; -use std::{fmt, str::FromStr, time::Duration}; use surf_disco::Url; use tokio::time::{sleep, timeout}; use vbs::version::StaticVersionType; diff --git a/tests/smoke.rs b/tests/smoke.rs index d154fc03dd..bbb258ca9e 100644 --- a/tests/smoke.rs +++ b/tests/smoke.rs @@ -1,7 +1,9 @@ -use crate::common::TestConfig; +use std::time::Instant; + use anyhow::Result; use futures::StreamExt; -use std::time::Instant; + +use crate::common::TestConfig; /// We allow for no change in state across this many consecutive iterations. const MAX_STATE_NOT_INCREMENTING: u8 = 1; diff --git a/tests/upgrades.rs b/tests/upgrades.rs index e680d904be..1f0c8e2df1 100644 --- a/tests/upgrades.rs +++ b/tests/upgrades.rs @@ -1,9 +1,10 @@ -use crate::common::TestConfig; use anyhow::Result; use espresso_types::{FeeVersion, MarketplaceVersion}; use futures::{future::join_all, StreamExt}; use vbs::version::StaticVersionType; +use crate::common::TestConfig; + const SEQUENCER_BLOCKS_TIMEOUT: u64 = 200; #[tokio::test(flavor = "multi_thread")] diff --git a/types/src/eth_signature_key.rs b/types/src/eth_signature_key.rs index e5c3de4554..be0de6abbe 100644 --- a/types/src/eth_signature_key.rs +++ b/types/src/eth_signature_key.rs @@ -12,8 +12,7 @@ use ethers::{ types::{Address, Signature}, utils::public_key_to_address, }; -use hotshot_types::traits::signature_key::BuilderSignatureKey; -use hotshot_types::traits::signature_key::PrivateSignatureKey; +use hotshot_types::traits::signature_key::{BuilderSignatureKey, PrivateSignatureKey}; use serde::{Deserialize, Serialize}; use thiserror::Error; diff --git a/types/src/v0/config.rs b/types/src/v0/config.rs index 54f634ee86..860871f0ca 100644 --- a/types/src/v0/config.rs +++ b/types/src/v0/config.rs @@ -1,15 +1,17 @@ use std::{num::NonZeroUsize, time::Duration}; use anyhow::Context; -use vec1::Vec1; - -use crate::PubKey; -use hotshot_types::network::{ - BuilderType, CombinedNetworkConfig, Libp2pConfig, RandomBuilderConfig, +use hotshot_types::{ + network::{ + BuilderType, CombinedNetworkConfig, Libp2pConfig, NetworkConfig, RandomBuilderConfig, + }, + HotShotConfig, PeerConfig, ValidatorConfig, }; -use hotshot_types::{network::NetworkConfig, HotShotConfig, PeerConfig, ValidatorConfig}; use serde::{Deserialize, Serialize}; use tide_disco::Url; +use vec1::Vec1; + +use crate::PubKey; /// This struct defines the public Hotshot validator configuration. /// Private key and state key pairs are excluded for security reasons. diff --git a/types/src/v0/impls/auction.rs b/types/src/v0/impls/auction.rs index 71b5a5592d..53a7812fff 100644 --- a/types/src/v0/impls/auction.rs +++ b/types/src/v0/impls/auction.rs @@ -1,9 +1,5 @@ -use super::{state::ValidatedState, MarketplaceVersion}; -use crate::{ - eth_signature_key::{EthKeyPair, SigningError}, - v0_99::{BidTx, BidTxBody, FullNetworkTx, SolverAuctionResults}, - FeeAccount, FeeAmount, FeeError, FeeInfo, NamespaceId, -}; +use std::str::FromStr; + use anyhow::Context; use async_trait::async_trait; use committable::{Commitment, Committable}; @@ -15,11 +11,17 @@ use hotshot_types::{ signature_key::BuilderSignatureKey, }, }; -use std::str::FromStr; use thiserror::Error; use tide_disco::error::ServerError; use url::Url; +use super::{state::ValidatedState, MarketplaceVersion}; +use crate::{ + eth_signature_key::{EthKeyPair, SigningError}, + v0_99::{BidTx, BidTxBody, FullNetworkTx, SolverAuctionResults}, + FeeAccount, FeeAmount, FeeError, FeeInfo, NamespaceId, +}; + impl FullNetworkTx { /// Proxy for `execute` method of each transaction variant. pub fn execute(&self, state: &mut ValidatedState) -> Result<(), ExecutionError> { diff --git a/types/src/v0/impls/block/full_payload/ns_proof.rs b/types/src/v0/impls/block/full_payload/ns_proof.rs index 837e81885e..2b8eeaf567 100644 --- a/types/src/v0/impls/block/full_payload/ns_proof.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof.rs @@ -109,25 +109,25 @@ impl NsProof { ) .ok()? // error: internal to payload_verify() .ok()?; // verification failure - } - (None, true) => {} // 0-length namespace, nothing to verify + }, + (None, true) => {}, // 0-length namespace, nothing to verify (None, false) => { tracing::error!( "ns verify: missing proof for nonempty ns payload range {:?}", range ); return None; - } + }, (Some(_), true) => { tracing::error!("ns verify: unexpected proof for empty ns payload range"); return None; - } + }, } // verification succeeded, return some data let ns_id = ns_table.read_ns_id_unchecked(&self.ns_index); Some((self.ns_payload.export_all_txs(&ns_id), ns_id)) - } + }, VidCommitment::V1(_) => None, } } diff --git a/types/src/v0/impls/block/full_payload/ns_proof/test.rs b/types/src/v0/impls/block/full_payload/ns_proof/test.rs index 6b12ff2fb5..7e917f91ff 100644 --- a/types/src/v0/impls/block/full_payload/ns_proof/test.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof/test.rs @@ -1,6 +1,5 @@ use futures::future; -use hotshot::helpers::initialize_logging; -use hotshot::traits::BlockPayload; +use hotshot::{helpers::initialize_logging, traits::BlockPayload}; use hotshot_types::{ data::VidCommitment, traits::EncodeBytes, diff --git a/types/src/v0/impls/block/full_payload/payload.rs b/types/src/v0/impls/block/full_payload/payload.rs index 3707ab3a57..523aea7270 100644 --- a/types/src/v0/impls/block/full_payload/payload.rs +++ b/types/src/v0/impls/block/full_payload/payload.rs @@ -3,8 +3,8 @@ use std::{collections::BTreeMap, sync::Arc}; use async_trait::async_trait; use committable::Committable; use hotshot_query_service::availability::QueryablePayload; -use hotshot_types::data::ViewNumber; use hotshot_types::{ + data::ViewNumber, traits::{BlockPayload, EncodeBytes}, utils::BuilderCommitment, vid::advz::{ADVZCommon, ADVZScheme}, @@ -13,12 +13,11 @@ use jf_vid::VidScheme; use sha2::Digest; use thiserror::Error; -use crate::Transaction; use crate::{ v0::impls::{NodeState, ValidatedState}, v0_1::ChainConfig, Index, Iter, NamespaceId, NsIndex, NsPayload, NsPayloadBuilder, NsPayloadRange, NsTable, - NsTableBuilder, Payload, PayloadByteLen, SeqTypes, TxProof, + NsTableBuilder, Payload, PayloadByteLen, SeqTypes, Transaction, TxProof, }; #[derive(serde::Deserialize, serde::Serialize, Error, Debug, Eq, PartialEq)] @@ -281,7 +280,7 @@ impl PayloadByteLen { ADVZScheme::get_payload_byte_len(common) ); return false; - } + }, }; self.0 == expected diff --git a/types/src/v0/impls/block/namespace_payload/tx_proof.rs b/types/src/v0/impls/block/namespace_payload/tx_proof.rs index 5c2026088a..370e9da08b 100644 --- a/types/src/v0/impls/block/namespace_payload/tx_proof.rs +++ b/types/src/v0/impls/block/namespace_payload/tx_proof.rs @@ -199,19 +199,19 @@ impl TxProof { { return Some(false); } - } - (None, true) => {} // 0-length tx, nothing to verify + }, + (None, true) => {}, // 0-length tx, nothing to verify (None, false) => { tracing::error!( "tx verify: missing proof for nonempty tx payload range {:?}", range ); return None; - } + }, (Some(_), true) => { tracing::error!("tx verify: unexpected proof for empty tx payload range"); return None; - } + }, } } diff --git a/types/src/v0/impls/chain_config.rs b/types/src/v0/impls/chain_config.rs index d29ad6bf2b..32fa4d8b0c 100644 --- a/types/src/v0/impls/chain_config.rs +++ b/types/src/v0/impls/chain_config.rs @@ -1,11 +1,12 @@ -use crate::{BlockSize, ChainId}; +use std::str::FromStr; + use ethers::types::U256; use sequencer_utils::{ impl_serde_from_string_or_integer, impl_to_fixed_bytes, ser::FromStringOrInteger, }; -use std::str::FromStr; use super::parse_size; +use crate::{BlockSize, ChainId}; impl_serde_from_string_or_integer!(ChainId); impl_to_fixed_bytes!(ChainId, U256); @@ -74,9 +75,8 @@ impl FromStringOrInteger for BlockSize { #[cfg(test)] mod tests { - use crate::v0_99::{ChainConfig, ResolvableChainConfig}; - use super::*; + use crate::v0_99::{ChainConfig, ResolvableChainConfig}; #[test] fn test_chainid_serde_json_as_decimal() { diff --git a/types/src/v0/impls/fee_info.rs b/types/src/v0/impls/fee_info.rs index d1d61dd42e..be5ad34330 100644 --- a/types/src/v0/impls/fee_info.rs +++ b/types/src/v0/impls/fee_info.rs @@ -1,3 +1,5 @@ +use std::str::FromStr; + use anyhow::{bail, ensure, Context}; use ark_serialize::{ CanonicalDeserialize, CanonicalSerialize, Compress, Read, SerializationError, Valid, Validate, @@ -22,7 +24,6 @@ use num_traits::CheckedSub; use sequencer_utils::{ impl_serde_from_string_or_integer, impl_to_fixed_bytes, ser::FromStringOrInteger, }; -use std::str::FromStr; use thiserror::Error; use crate::{ @@ -390,7 +391,7 @@ impl FeeAccountProof { .elem() .context("presence proof is missing account balance")? .0) - } + }, FeeMerkleProof::Absence(proof) => { let tree = FeeMerkleTree::from_commitment(comm); ensure!( @@ -398,7 +399,7 @@ impl FeeAccountProof { "invalid proof" ); Ok(0.into()) - } + }, } } @@ -413,11 +414,11 @@ impl FeeAccountProof { proof, )?; Ok(()) - } + }, FeeMerkleProof::Absence(proof) => { tree.non_membership_remember(FeeAccount(self.account), proof)?; Ok(()) - } + }, } } } @@ -442,14 +443,14 @@ pub fn retain_accounts( // This remember cannot fail, since we just constructed a valid proof, and are // remembering into a tree with the same commitment. snapshot.remember(account, *elem, proof).unwrap(); - } + }, LookupResult::NotFound(proof) => { // Likewise this cannot fail. snapshot.non_membership_remember(account, proof).unwrap() - } + }, LookupResult::NotInMemory => { bail!("missing account {account}"); - } + }, } } @@ -460,9 +461,8 @@ pub fn retain_accounts( mod test { use ethers::abi::Address; - use crate::{FeeAccount, FeeAmount, FeeInfo}; - use super::IterableFeeInfo; + use crate::{FeeAccount, FeeAmount, FeeInfo}; #[test] fn test_iterable_fee_info() { diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index bc3e982bf7..3f765bca55 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -1,3 +1,5 @@ +use std::fmt; + use anyhow::{ensure, Context}; use ark_serialize::CanonicalSerialize; use committable::{Commitment, Committable, RawCommitmentBuilder}; @@ -19,11 +21,11 @@ use serde::{ Deserialize, Deserializer, Serialize, Serializer, }; use serde_json::{Map, Value}; -use std::fmt; use thiserror::Error; use time::OffsetDateTime; use vbs::version::{StaticVersionType, Version}; +use super::{instance_state::NodeState, state::ValidatedState}; use crate::{ v0::{ header::{EitherOrVersion, VersionedHeader}, @@ -35,8 +37,6 @@ use crate::{ Header, L1BlockInfo, L1Snapshot, Leaf2, NamespaceId, NsTable, SeqTypes, UpgradeType, }; -use super::{instance_state::NodeState, state::ValidatedState}; - impl v0_1::Header { pub(crate) fn commit(&self) -> Commitment
{ let mut bmt_bytes = vec![]; @@ -174,7 +174,7 @@ impl<'de> Deserialize<'de> for Header { )), EitherOrVersion::Version(v) => { Err(serde::de::Error::custom(format!("invalid version {v:?}"))) - } + }, } } @@ -211,7 +211,7 @@ impl<'de> Deserialize<'de> for Header { )), EitherOrVersion::Version(v) => { Err(de::Error::custom(format!("invalid version {v:?}"))) - } + }, chain_config => Err(de::Error::custom(format!( "expected version, found chain_config {chain_config:?}" ))), @@ -604,7 +604,7 @@ impl Header { .as_ref() .fetch_chain_config(validated_cf.commit()) .await - } + }, } } } @@ -1176,14 +1176,12 @@ mod test_headers { use ethers::{types::Address, utils::Anvil}; use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::traits::signature_key::BuilderSignatureKey; - use sequencer_utils::test_utils::setup_test; use v0_1::{BlockMerkleTree, FeeMerkleTree, L1Client}; use vbs::{bincode_serializer::BincodeSerializer, version::StaticVersion, BinarySerializer}; - use crate::{eth_signature_key::EthKeyPair, mock::MockStateCatchup, Leaf}; - use super::*; + use crate::{eth_signature_key::EthKeyPair, mock::MockStateCatchup, Leaf}; #[derive(Debug, Default)] #[must_use] diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index 0330695769..cf5c2dbeaf 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -1,15 +1,15 @@ -use crate::v0::{ - traits::StateCatchup, v0_99::ChainConfig, GenesisHeader, L1BlockInfo, L1Client, PubKey, - Timestamp, Upgrade, UpgradeMode, -}; -use hotshot_types::traits::states::InstanceState; -use hotshot_types::HotShotConfig; use std::{collections::BTreeMap, sync::Arc}; + +use hotshot_types::{traits::states::InstanceState, HotShotConfig}; use vbs::version::Version; #[cfg(any(test, feature = "testing"))] use vbs::version::{StaticVersion, StaticVersionType}; use super::state::ValidatedState; +use crate::v0::{ + traits::StateCatchup, v0_99::ChainConfig, GenesisHeader, L1BlockInfo, L1Client, PubKey, + Timestamp, Upgrade, UpgradeMode, +}; /// Represents the immutable state of a node. /// @@ -174,7 +174,7 @@ impl Upgrade { config.stop_proposing_time = u64::MAX; config.start_voting_time = 0; config.stop_voting_time = u64::MAX; - } + }, UpgradeMode::Time(t) => { config.start_proposing_time = t.start_proposing_time.unix_timestamp(); config.stop_proposing_time = t.stop_proposing_time.unix_timestamp(); @@ -187,7 +187,7 @@ impl Upgrade { config.stop_proposing_view = u64::MAX; config.start_voting_view = 0; config.stop_voting_view = u64::MAX; - } + }, } } } diff --git a/types/src/v0/impls/l1.rs b/types/src/v0/impls/l1.rs index 9661dd47ec..3aa0eb930d 100644 --- a/types/src/v0/impls/l1.rs +++ b/types/src/v0/impls/l1.rs @@ -1,3 +1,12 @@ +use std::{ + cmp::{min, Ordering}, + num::NonZeroUsize, + pin::Pin, + result::Result as StdResult, + sync::Arc, + time::Instant, +}; + use alloy::{ eips::BlockId, hex, @@ -28,14 +37,6 @@ use futures::{ use hotshot_types::traits::metrics::Metrics; use lru::LruCache; use parking_lot::RwLock; -use std::result::Result as StdResult; -use std::{ - cmp::{min, Ordering}, - num::NonZeroUsize, - pin::Pin, - sync::Arc, - time::Instant, -}; use tokio::{ spawn, sync::{Mutex, MutexGuard, Notify}, @@ -312,7 +313,7 @@ impl Service for SwitchingTransport { // If it's okay, log the success to the status current_transport.status.write().log_success(); Ok(res) - } + }, Err(err) => { // Increment the failure metric if let Some(f) = self_clone @@ -364,7 +365,7 @@ impl Service for SwitchingTransport { } Err(err) - } + }, } }) } @@ -737,12 +738,12 @@ impl L1Client { ); self.retry_delay().await; continue; - } + }, Err(err) => { tracing::warn!(number, "failed to get finalized L1 block: {err:#}"); self.retry_delay().await; continue; - } + }, }; break L1BlockInfo { number: block.header.number, @@ -815,7 +816,7 @@ impl L1Client { Err(err) => { tracing::warn!(from, to, %err, "Fee L1Event Error"); sleep(retry_delay).await; - } + }, } } } @@ -935,7 +936,7 @@ async fn get_finalized_block( #[cfg(test)] mod test { - use std::ops::Add; + use std::{ops::Add, time::Duration}; use ethers::{ middleware::SignerMiddleware, @@ -948,7 +949,6 @@ mod test { use hotshot_contract_adapter::stake_table::NodeInfoJf; use portpicker::pick_unused_port; use sequencer_utils::test_utils::setup_test; - use std::time::Duration; use time::OffsetDateTime; use super::*; diff --git a/types/src/v0/impls/mod.rs b/types/src/v0/impls/mod.rs index cc1f2fe1c9..8c58f6bcfe 100644 --- a/types/src/v0/impls/mod.rs +++ b/types/src/v0/impls/mod.rs @@ -14,12 +14,11 @@ mod transaction; pub use auction::SolverAuctionResultsProvider; pub use fee_info::{retain_accounts, FeeError}; +#[cfg(any(test, feature = "testing"))] +pub use instance_state::mock; pub use instance_state::NodeState; pub use stake_table::*; pub use state::{ get_l1_deposits, BuilderValidationError, ProposalValidationError, StateValidationError, ValidatedState, }; - -#[cfg(any(test, feature = "testing"))] -pub use instance_state::mock; diff --git a/types/src/v0/impls/solver.rs b/types/src/v0/impls/solver.rs index e16fbc7f9f..da6f525e25 100644 --- a/types/src/v0/impls/solver.rs +++ b/types/src/v0/impls/solver.rs @@ -1,10 +1,8 @@ use committable::{Commitment, Committable}; use hotshot::types::SignatureKey; -use crate::v0::utils::Update; - use super::v0_99::{RollupRegistrationBody, RollupUpdatebody}; -use crate::v0::utils::Update::Set; +use crate::v0::utils::{Update, Update::Set}; impl Committable for RollupRegistrationBody { fn tag() -> String { @@ -54,7 +52,7 @@ impl Committable for RollupUpdatebody { comm = comm .u64_field("reserve_url", 2) .var_size_bytes(url.as_str().as_ref()) - } + }, Set(None) => comm = comm.u64_field("reserve_url", 1), Update::Skip => comm = comm.u64_field("reserve_url", 0), } diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 7823fc04ec..d65d6f8335 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -1,7 +1,9 @@ use std::{ cmp::max, collections::{BTreeMap, BTreeSet, HashMap}, + fmt::Debug, num::NonZeroU64, + sync::Arc, }; use anyhow::Context; @@ -24,9 +26,7 @@ use hotshot_types::{ }, PeerConfig, }; - use itertools::Itertools; -use std::{fmt::Debug, sync::Arc}; use thiserror::Error; use super::{ diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index 8f0439d21e..b13a48fb17 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -1,3 +1,5 @@ +use std::ops::Add; + use anyhow::bail; use committable::{Commitment, Committable}; use ethers::types::Address; @@ -19,7 +21,6 @@ use jf_merkle_tree::{ }; use num_traits::CheckedSub; use serde::{Deserialize, Serialize}; -use std::ops::Add; use thiserror::Error; use time::OffsetDateTime; use vbs::version::Version; @@ -1118,7 +1119,7 @@ mod test { }), Header::V99(_) => { panic!("You called `Header.next()` on unimplemented version (v3)") - } + }, } } /// Replaces builder signature w/ invalid one. @@ -1147,7 +1148,7 @@ mod test { }), Header::V99(_) => { panic!("You called `Header.sign()` on unimplemented version (v3)") - } + }, } } diff --git a/types/src/v0/impls/transaction.rs b/types/src/v0/impls/transaction.rs index cd473d0398..ca09a1533a 100644 --- a/types/src/v0/impls/transaction.rs +++ b/types/src/v0/impls/transaction.rs @@ -3,9 +3,8 @@ use hotshot_query_service::explorer::ExplorerTransaction; use hotshot_types::traits::block_contents::Transaction as HotShotTransaction; use serde::{de::Error, Deserialize, Deserializer}; -use crate::{NamespaceId, Transaction}; - use super::{NsPayloadBuilder, NsTableBuilder}; +use crate::{NamespaceId, Transaction}; impl From for NamespaceId { fn from(value: u32) -> Self { diff --git a/types/src/v0/mod.rs b/types/src/v0/mod.rs index e5dbe0d015..578f0c363a 100644 --- a/types/src/v0/mod.rs +++ b/types/src/v0/mod.rs @@ -1,3 +1,5 @@ +use std::marker::PhantomData; + use hotshot_types::{ data::{EpochNumber, ViewNumber}, signature_key::BLSPubKey, @@ -7,7 +9,6 @@ use hotshot_types::{ }, }; use serde::{Deserialize, Serialize}; -use std::marker::PhantomData; pub mod config; mod header; @@ -15,6 +16,8 @@ mod impls; pub mod traits; mod utils; pub use header::Header; +#[cfg(any(test, feature = "testing"))] +pub use impls::mock; pub use impls::{ get_l1_deposits, retain_accounts, BuilderValidationError, EpochCommittees, FeeError, ProposalValidationError, StateValidationError, @@ -22,9 +25,6 @@ pub use impls::{ pub use utils::*; use vbs::version::{StaticVersion, StaticVersionType}; -#[cfg(any(test, feature = "testing"))] -pub use impls::mock; - // This is the single source of truth for minor versions supported by this major version. // // It is written as a higher-level macro which takes a macro invocation as an argument and appends diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index 4f628eeb63..e4f1389077 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -7,8 +7,6 @@ use async_trait::async_trait; use committable::{Commitment, Committable}; use futures::{FutureExt, TryFutureExt}; use hotshot::{types::EventType, HotShotInitializer, InitializerEpochInfo}; -use hotshot_types::drb::DrbResult; -use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ consensus::CommitmentMap, data::{ @@ -16,13 +14,14 @@ use hotshot_types::{ DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, VidCommitment, VidDisperseShare, ViewNumber, }, + drb::DrbResult, event::{HotShotAction, LeafInfo}, message::{convert_proposal, Proposal, UpgradeLock}, simple_certificate::{ NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, UpgradeCertificate, }, traits::{ - node_implementation::{ConsensusTime, Versions}, + node_implementation::{ConsensusTime, NodeType, Versions}, storage::Storage, ValidatedState as HotShotState, }, @@ -31,15 +30,14 @@ use hotshot_types::{ use itertools::Itertools; use serde::{de::DeserializeOwned, Serialize}; +use super::{ + impls::NodeState, utils::BackoffParams, EpochCommittees, EpochVersion, Leaf, SequencerVersions, +}; use crate::{ v0::impls::ValidatedState, v0_99::ChainConfig, BlockMerkleTree, Event, FeeAccount, FeeAccountProof, FeeMerkleCommitment, FeeMerkleTree, Leaf2, NetworkConfig, SeqTypes, }; -use super::{ - impls::NodeState, utils::BackoffParams, EpochCommittees, EpochVersion, Leaf, SequencerVersions, -}; - #[async_trait] pub trait StateCatchup: Send + Sync { async fn try_fetch_leaves(&self, retry: usize, height: u64) -> anyhow::Result>; @@ -381,7 +379,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch leaves: {err:#}" ); - } + }, } } @@ -416,7 +414,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch accounts: {err:#}" ); - } + }, } } @@ -443,7 +441,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch frontier: {err:#}" ); - } + }, } } @@ -463,7 +461,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch chain config: {err:#}" ); - } + }, } } @@ -561,11 +559,11 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { Some(view) => { tracing::info!(?view, "starting from saved view"); view - } + }, None => { tracing::info!("no saved view, starting from genesis"); ViewNumber::genesis() - } + }, }; let next_epoch_high_qc = self @@ -590,7 +588,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { let anchor_view = leaf.view_number(); (leaf, high_qc, Some(anchor_view)) - } + }, None => { tracing::info!("no saved leaf, starting from genesis leaf"); ( @@ -599,7 +597,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { QuorumCertificate2::genesis::(&genesis_validated_state, &state).await, None, ) - } + }, }; let validated_state = if leaf.block_header().height() == 0 { // If we are starting from genesis, we can provide the full state. diff --git a/types/src/v0/utils.rs b/types/src/v0/utils.rs index d2c32e598f..af751f99fd 100644 --- a/types/src/v0/utils.rs +++ b/types/src/v0/utils.rs @@ -1,3 +1,11 @@ +use std::{ + cmp::{min, Ordering}, + fmt::{self, Debug, Display, Formatter}, + num::ParseIntError, + str::FromStr, + time::Duration, +}; + use anyhow::Context; use bytesize::ByteSize; use clap::Parser; @@ -12,13 +20,6 @@ use hotshot_types::{ use rand::Rng; use sequencer_utils::{impl_serde_from_string_or_integer, ser::FromStringOrInteger}; use serde::{Deserialize, Serialize}; -use std::{ - cmp::{min, Ordering}, - fmt::{self, Debug, Display, Formatter}, - num::ParseIntError, - str::FromStr, - time::Duration, -}; use thiserror::Error; use time::{ format_description::well_known::Rfc3339 as TimestampFormat, macros::time, Date, OffsetDateTime, @@ -264,14 +265,14 @@ impl BackoffParams { Ok(res) => return Ok(res), Err(err) if self.disable => { return Err(err.context("Retryable operation failed; retries disabled")); - } + }, Err(err) => { tracing::warn!( "Retryable operation failed, will retry after {delay:?}: {err:#}" ); sleep(delay).await; delay = self.backoff(delay); - } + }, } } unreachable!() diff --git a/utils/src/deployer.rs b/utils/src/deployer.rs index b19f249d6b..6bb4702413 100644 --- a/utils/src/deployer.rs +++ b/utils/src/deployer.rs @@ -1,3 +1,5 @@ +use std::{collections::HashMap, io::Write, ops::Deref, sync::Arc, time::Duration}; + use alloy::{ contract::RawCallBuilder, network::{Ethereum, EthereumWallet}, @@ -24,7 +26,6 @@ use futures::future::{BoxFuture, FutureExt}; use hotshot_contract_adapter::light_client::{ LightClientConstructorArgs, ParsedLightClientState, ParsedStakeTableState, }; -use std::{collections::HashMap, io::Write, ops::Deref, sync::Arc, time::Duration}; use url::Url; /// Set of predeployed contracts. @@ -548,6 +549,8 @@ fn link_light_client_contract( #[cfg(any(test, feature = "testing"))] pub mod test_helpers { + use std::sync::Arc; + use anyhow::Context; use contract_bindings_ethers::{ erc1967_proxy::ERC1967Proxy, @@ -557,11 +560,9 @@ pub mod test_helpers { }; use ethers::prelude::*; use hotshot_contract_adapter::light_client::LightClientConstructorArgs; - use std::sync::Arc; - - use crate::deployer::link_light_client_contract; use super::{Contract, Contracts}; + use crate::deployer::link_light_client_contract; /// Deployment `LightClientMock.sol` as proxy for testing pub async fn deploy_light_client_contract_as_proxy_for_test( diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 014cecec6d..b76ecf50d5 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -257,14 +257,14 @@ pub async fn init_signer(provider: &Url, mnemonic: &str, index: u32) -> Option { tracing::error!("error connecting to RPC {}: {}", provider, err); return None; - } + }, }; let chain_id = match provider.get_chainid().await { Ok(id) => id.as_u64(), Err(err) => { tracing::error!("error getting chain ID: {}", err); return None; - } + }, }; let mnemonic = match MnemonicBuilder::::default() .phrase(mnemonic) @@ -274,14 +274,14 @@ pub async fn init_signer(provider: &Url, mnemonic: &str, index: u32) -> Option { tracing::error!("error building wallet: {}", err); return None; - } + }, }; let wallet = match mnemonic.build() { Ok(wallet) => wallet, Err(err) => { tracing::error!("error opening wallet: {}", err); return None; - } + }, }; let wallet = wallet.with_chain_id(chain_id); Some(SignerMiddleware::new(provider, wallet)) @@ -367,7 +367,7 @@ where tracing::error!("contract revert: {:?}", e); } return Err(anyhow!("error sending transaction: {:?}", err)); - } + }, }; let hash = pending.tx_hash(); @@ -382,12 +382,12 @@ where Ok(Some(receipt)) => receipt, Ok(None) => { return Err(anyhow!("contract call {hash:x}: no receipt")); - } + }, Err(err) => { return Err(anyhow!( "contract call {hash:x}: error getting transaction receipt: {err}" )) - } + }, }; if receipt.status != Some(1.into()) { return Err(anyhow!("contract call {hash:x}: transaction reverted")); @@ -418,19 +418,19 @@ async fn wait_for_transaction_to_be_mined( if i >= log_retries { tracing::warn!("contract call {hash:?} (retry {i}/{retries}): error getting transaction status: {err}"); } - } + }, Ok(None) => { if i >= log_retries { tracing::warn!( "contract call {hash:?} (retry {i}/{retries}): missing from mempool" ); } - } + }, Ok(Some(tx)) if tx.block_number.is_none() => { if i >= log_retries { tracing::warn!("contract call {hash:?} (retry {i}/{retries}): pending"); } - } + }, Ok(Some(_)) => return true, } diff --git a/utils/src/stake_table.rs b/utils/src/stake_table.rs index 943c53f808..f82d6ac66c 100644 --- a/utils/src/stake_table.rs +++ b/utils/src/stake_table.rs @@ -1,3 +1,5 @@ +use std::{fs, path::Path, sync::Arc, time::Duration}; + /// Utilities for loading an initial permissioned stake table from a toml file. /// /// The initial stake table is passed to the permissioned stake table contract @@ -17,8 +19,6 @@ use hotshot_contract_adapter::stake_table::{bls_jf_to_sol, NodeInfoJf}; use hotshot_types::network::PeerConfigKeys; use url::Url; -use std::{fs, path::Path, sync::Arc, time::Duration}; - /// A stake table config stored in a file #[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] #[serde(bound(deserialize = ""))] @@ -144,12 +144,15 @@ pub async fn update_stake_table( #[cfg(test)] mod test { - use crate::stake_table::{PermissionedStakeTableConfig, PermissionedStakeTableUpdate}; - use crate::test_utils::setup_test; use hotshot::types::{BLSPubKey, SignatureKey}; use hotshot_types::{light_client::StateKeyPair, network::PeerConfigKeys}; use toml::toml; + use crate::{ + stake_table::{PermissionedStakeTableConfig, PermissionedStakeTableUpdate}, + test_utils::setup_test, + }; + fn assert_peer_config_eq(p1: &PeerConfigKeys, p2: &PeerConfigKeys) { assert_eq!(p1.stake_table_key, p2.stake_table_key); assert_eq!(p1.state_ver_key, p2.state_ver_key); diff --git a/vid/src/avid_m.rs b/vid/src/avid_m.rs index 29cc31308f..6e7f5be281 100644 --- a/vid/src/avid_m.rs +++ b/vid/src/avid_m.rs @@ -11,10 +11,8 @@ //! vectors. And for dispersal, each storage node gets some vectors and their //! Merkle proofs according to its weight. -use crate::{ - utils::bytes_to_field::{self, bytes_to_field, field_to_bytes}, - VidError, VidResult, VidScheme, -}; +use std::ops::Range; + use ark_ff::PrimeField; use ark_poly::{EvaluationDomain, Radix2EvaluationDomain}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -26,9 +24,13 @@ use p3_maybe_rayon::prelude::{ IntoParallelIterator, IntoParallelRefIterator, ParallelIterator, ParallelSlice, }; use serde::{Deserialize, Serialize}; -use std::ops::Range; use tagged_base64::tagged; +use crate::{ + utils::bytes_to_field::{self, bytes_to_field, field_to_bytes}, + VidError, VidResult, VidScheme, +}; + mod config; pub mod namespaced; @@ -401,9 +403,10 @@ impl VidScheme for AvidMScheme { /// Unit tests #[cfg(test)] pub mod tests { - use crate::{avid_m::AvidMScheme, VidScheme}; use rand::{seq::SliceRandom, RngCore}; + use crate::{avid_m::AvidMScheme, VidScheme}; + #[test] fn round_trip() { // play with these items diff --git a/vid/src/avid_m/namespaced.rs b/vid/src/avid_m/namespaced.rs index aeda112d39..d49cb679fd 100644 --- a/vid/src/avid_m/namespaced.rs +++ b/vid/src/avid_m/namespaced.rs @@ -1,13 +1,15 @@ //! This file implements the namespaced AvidM scheme. +use std::ops::Range; + +use jf_merkle_tree::MerkleTreeScheme; +use serde::{Deserialize, Serialize}; + use super::{AvidMCommit, AvidMShare, RawAvidMShare}; use crate::{ avid_m::{AvidMScheme, MerkleTree}, VidError, VidResult, VidScheme, }; -use jf_merkle_tree::MerkleTreeScheme; -use serde::{Deserialize, Serialize}; -use std::ops::Range; /// Dummy struct for namespaced AvidM scheme pub struct NsAvidMScheme; diff --git a/vid/src/utils/bytes_to_field.rs b/vid/src/utils/bytes_to_field.rs index 5808c3e701..1b22ed81ea 100644 --- a/vid/src/utils/bytes_to_field.rs +++ b/vid/src/utils/bytes_to_field.rs @@ -1,4 +1,3 @@ -use ark_ff::{BigInteger, PrimeField}; use std::{ borrow::Borrow, iter::Take, @@ -6,6 +5,8 @@ use std::{ vec::{IntoIter, Vec}, }; +use ark_ff::{BigInteger, PrimeField}; + /// Deterministic, infallible, invertible iterator adaptor to convert from /// arbitrary bytes to field elements. /// @@ -178,11 +179,12 @@ pub fn elem_byte_capacity() -> usize { #[cfg(test)] mod tests { - use super::{bytes_to_field, field_to_bytes, PrimeField, Vec}; use ark_bls12_381::Fr as Fr381; use ark_bn254::Fr as Fr254; use rand::RngCore; + use super::{bytes_to_field, field_to_bytes, PrimeField, Vec}; + fn bytes_to_field_iter() { let byte_lens = [0, 1, 2, 16, 31, 32, 33, 48, 65, 100, 200, 5000]; From f9060fd4aced86eb5bc913a7cc155a025fd487aa Mon Sep 17 00:00:00 2001 From: Mathis Date: Mon, 10 Mar 2025 10:22:58 +0100 Subject: [PATCH 17/17] Re-apply removal of foundry patch, accidentally reverted on main (#2748) * Revert "Pull in nightly rust-fmt, use vid rustfmt.toml (#2700)" This reverts commit 984bcf51735fc4cfcf1cf520218ebd7186a757df. * Re-apply rustfmt nightly changes This re-applies 984bcf51735fc4cfcf1cf520218ebd7186a757df but without the errors in there that undid previous commits on main. * run nightly rustfmt on CI too * CI: install nightly rustfmt * fix: nightly toolchain version --- .github/workflows/lint.yml | 7 ++++- .typos.toml | 3 ++- flake.nix | 53 +++++++++++++++++++++----------------- 3 files changed, 38 insertions(+), 25 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 66fb4aecad..099fec055d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -26,11 +26,16 @@ jobs: - uses: actions/checkout@v4 name: Checkout Repository + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + components: rustfmt + - uses: Swatinem/rust-cache@v2 name: Enable Rust Caching - name: Format Check - run: cargo fmt -- --check + run: cargo +nightly fmt -- --check - name: Check (embedded-db) run: | diff --git a/.typos.toml b/.typos.toml index bd8457462f..eee48b9695 100644 --- a/.typos.toml +++ b/.typos.toml @@ -11,5 +11,6 @@ extend-exclude = [ "contract-bindings-alloy", "contract-bindings-ethers", "node-metrics/src/api/node_validator/v0/example_prometheus_metrics_output.txt", - "hotshot-orchestrator/run-config.toml" + "hotshot-orchestrator/run-config.toml", + "hotshot-macros/src/lib.rs" ] diff --git a/flake.nix b/flake.nix index c735c6e7b9..1e6a605157 100644 --- a/flake.nix +++ b/flake.nix @@ -13,6 +13,10 @@ }; inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; + inputs.nixpkgs-legacy-foundry.url = "github:NixOS/nixpkgs/9abb87b552b7f55ac8916b6fc9e5cb486656a2f3"; + + inputs.foundry-nix.url = "github:shazow/foundry.nix/monthly"; # Use monthly branch for permanent releases + inputs.rust-overlay.url = "github:oxalica/rust-overlay"; inputs.nixpkgs-cross-overlay.url = @@ -29,6 +33,8 @@ outputs = { self , nixpkgs + , nixpkgs-legacy-foundry + , foundry-nix , rust-overlay , nixpkgs-cross-overlay , flake-utils @@ -61,6 +67,7 @@ overlays = [ (import rust-overlay) + foundry-nix.overlay solc-bin.overlays.default (final: prev: { solhint = @@ -186,7 +193,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 # match ubuntu 24.04 that we use on CI and as base image in docker + openssl curl protobuf # to compile libp2p-autonat stableToolchain @@ -217,25 +224,7 @@ coreutils # Ethereum contracts, solidity, ... - # TODO: remove alloy patch when forge includes this fix: https://github.com/alloy-rs/core/pull/864 - # foundry - (foundry.overrideAttrs { - # Set the resolve limit to 128 by replacing the value in the vendored dependencies. - postPatch = '' - pushd $cargoDepsCopy/alloy-sol-macro-expander - - oldHash=$(sha256sum src/expand/mod.rs | cut -d " " -f 1) - - substituteInPlace src/expand/mod.rs \ - --replace-warn \ - 'const RESOLVE_LIMIT: usize = 32;' 'const RESOLVE_LIMIT: usize = 128;' - - substituteInPlace .cargo-checksum.json \ - --replace-warn $oldHash $(sha256sum src/expand/mod.rs | cut -d " " -f 1) - - popd - ''; - }) + foundry-bin solc nodePackages.prettier solhint @@ -255,10 +244,28 @@ # Add rust binaries to PATH for native demo export PATH="$PWD/$CARGO_TARGET_DIR/debug:$PATH" + + # Needed to compile with the sqlite-unbundled feature + export LIBCLANG_PATH="${pkgs.llvmPackages.libclang.lib}/lib"; '' + self.checks.${system}.pre-commit-check.shellHook; RUST_SRC_PATH = "${stableToolchain}/lib/rustlib/src/rust/library"; FOUNDRY_SOLC = "${solc}/bin/solc"; }); + # A shell with foundry v0.3.0 which can still build ethers-rs bindings. + # Can be removed when we are no longer using the ethers-rs bindings. + devShells.legacyFoundry = + let + overlays = [ + solc-bin.overlays.default + ]; + pkgs = import nixpkgs-legacy-foundry { inherit system overlays; }; + in + mkShell { + packages = with pkgs; [ + solc + foundry + ]; + }; devShells.crossShell = crossShell { config = "x86_64-unknown-linux-musl"; }; devShells.armCrossShell = @@ -273,7 +280,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 + openssl curl protobuf # to compile libp2p-autonat toolchain @@ -287,7 +294,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 + openssl curl protobuf # to compile libp2p-autonat toolchain @@ -310,7 +317,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 + openssl curl protobuf # to compile libp2p-autonat stableToolchain