diff --git a/.github/workflows/cargo-features.yml b/.github/workflows/cargo-features.yml index 208ba94fe8..9f6f115717 100644 --- a/.github/workflows/cargo-features.yml +++ b/.github/workflows/cargo-features.yml @@ -20,8 +20,14 @@ concurrency: jobs: cargo-features: runs-on: ubuntu-latest + strategy: + matrix: + command: + - just check-features-ci + - just check-features-ci --tests steps: - uses: taiki-e/install-action@cargo-hack + - uses: taiki-e/install-action@just - name: Checkout Repository uses: actions/checkout@v4 @@ -29,25 +35,18 @@ jobs: - name: Free Disk Space (Ubuntu) uses: jlumbroso/free-disk-space@main with: - tool-cache: false android: true - dotnet: true - haskell: true + tool-cache: false + dotnet: false + haskell: false large-packages: false docker-images: false swap-storage: false # Note: this job doesn't use a cache on purpose because it mostly compiles # the crates in this repo over and over again with different feature - # combinations. Adding caching would not speed it up much and further - # contribute to our cache usage. - - # Includes checks for `--no-default-features` and `--all-features` as well - # as each individual feature enabled. - - name: Check compilation for feature combinations - run: | - cargo hack check --feature-powerset --exclude hotshot --exclude hotshot-builder-api --exclude hotshot-task-impls --exclude hotshot-macros --exclude hotshot-events-service --exclude hotshot-utils --exclude hotshot-orchestrator --exclude hotshot-query-service --exclude hotshot-stake-table --exclude hotshot-state-prover --exclude hotshot-task --exclude hotshot-testing --exclude hotshot-types --exclude hotshot-libp2p-networking --exclude hotshot-contract-adapter --exclude hotshot-example-types --exclude vid - - - name: Check compilation for feature combinations (--tests) + # combinations. The target directory gets really large. Adding caching + # would not speed it up much and further contribute to our cache usage. + - name: Check compilation feature combinations run: | - cargo hack check --feature-powerset --tests --exclude hotshot --exclude hotshot-builder-api --exclude hotshot-task-impls --exclude hotshot-macros --exclude hotshot-events-service --exclude hotshot-utils --exclude hotshot-orchestrator --exclude hotshot-query-service --exclude hotshot-stake-table --exclude hotshot-state-prover --exclude hotshot-task --exclude hotshot-testing --exclude hotshot-types --exclude hotshot-libp2p-networking --exclude hotshot-contract-adapter --exclude hotshot-example-types --exclude vid + ${{ matrix.command }} diff --git a/.github/workflows/contracts.yml b/.github/workflows/contracts.yml index 29f9684974..241c570f00 100644 --- a/.github/workflows/contracts.yml +++ b/.github/workflows/contracts.yml @@ -45,7 +45,7 @@ jobs: - uses: Swatinem/rust-cache@v2 name: Enable Rust Caching with: - prefix-key: v2-rust + prefix-key: v2-rust-${{ hashFiles('flake.*') }} - name: Check toolchain versions run: nix develop --accept-flake-config -c ./scripts/show-toolchain-versions diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 66fb4aecad..099fec055d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -26,11 +26,16 @@ jobs: - uses: actions/checkout@v4 name: Checkout Repository + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + components: rustfmt + - uses: Swatinem/rust-cache@v2 name: Enable Rust Caching - name: Format Check - run: cargo fmt -- --check + run: cargo +nightly fmt -- --check - name: Check (embedded-db) run: | diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 394b9fcc43..a516fcd8ac 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -212,10 +212,11 @@ jobs: version: [02,99] include: - version: 02 - compose: "-f process-compose.yaml -D" + test-name: test_native_demo_basic - version: 99 - compose: "-f process-compose.yaml -f process-compose-mp.yml -D" + test-name: test_native_demo_upgrade + fail-fast: false runs-on: ubuntu-latest steps: @@ -242,9 +243,6 @@ jobs: - name: Install process-compose run: nix profile install nixpkgs#process-compose - - name: Run Demo-Native ${{matrix.version}} - run: bash -x scripts/demo-native ${{matrix.compose}} --tui=false > ${{ env.PC_LOGS }} 2>&1 & - - name: Test Integration env: RUST_LOG: debug @@ -252,9 +250,14 @@ jobs: INTEGRATION_TEST_SEQUENCER_VERSION: ${{ matrix.version }} run: | cargo nextest run --archive-file nextest-archive-postgres.tar.zst --verbose --no-fail-fast --nocapture \ - --workspace-remap $PWD $(if [ "${{ matrix.version }}" == "2" ]; then echo " smoke"; fi) + --workspace-remap $PWD ${{ matrix.test-name }} timeout-minutes: 10 + - name: Show end of logs + if: always() + run: | + tail -n 1000 ${{ env.PC_LOGS }} + - name: Upload process compose logs if: always() uses: actions/upload-artifact@v4 @@ -262,6 +265,7 @@ jobs: name: process-compose-logs-integration-v${{ matrix.version }} path: ${{ env.PC_LOGS }} + demo-native: needs: build-test-bins runs-on: ubuntu-latest diff --git a/.typos.toml b/.typos.toml index bd8457462f..eee48b9695 100644 --- a/.typos.toml +++ b/.typos.toml @@ -11,5 +11,6 @@ extend-exclude = [ "contract-bindings-alloy", "contract-bindings-ethers", "node-metrics/src/api/node_validator/v0/example_prometheus_metrics_output.txt", - "hotshot-orchestrator/run-config.toml" + "hotshot-orchestrator/run-config.toml", + "hotshot-macros/src/lib.rs" ] diff --git a/CODEOWNERS b/CODEOWNERS index 14c5ed8923..ebe3f7d986 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,8 +2,4 @@ # later match takes precedence, they will be requested for review when someone # opens a pull request. -* @nomaxg @philippecamacho @ImJeremyHe @sveitser @jbearer @tbro @imabdulbasit - -# Dependabot PRs -*.toml @nomaxg @philippecamacho @ImJeremyHe @sveitser -*.lock @nomaxg @philippecamacho @ImJeremyHe @sveitser +* @sveitser @jbearer @tbro @imabdulbasit @ss-es @pls148 @bfish713 @rob-maron @lukaszrzasik diff --git a/Cargo.lock b/Cargo.lock index 7b0358f5c0..f59f544b13 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11398,6 +11398,7 @@ dependencies = [ "futures", "reqwest 0.12.12", "surf-disco", + "tempfile", "tokio", "vbs", ] diff --git a/README.md b/README.md index 4dbe498290..f9a1c30984 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,18 @@ -# Espresso Sequencer +# Espresso Network -[![Build](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/build.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/build.yml) -[![Test](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/test.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/test.yml) -[![Docs rust](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/doc-rust.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/doc-rust.yml) -[![Contracts](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/contracts.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/contracts.yml) -[![Lint](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/lint.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/lint.yml) -[![Audit](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/audit.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/audit.yml) -[![Ubuntu](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/ubuntu-install-without-nix.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/ubuntu-install-without-nix.yml) -[![Build without lockfile](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/build-without-lockfile.yml/badge.svg)](https://github.com/EspressoSystems/espresso-sequencer/actions/workflows/build-without-lockfile.yml) -[![Coverage Status](https://coveralls.io/repos/github/EspressoSystems/espresso-sequencer/badge.svg?branch=main)](https://coveralls.io/github/EspressoSystems/espresso-sequencer?branch=main) +[![Build](https://github.com/EspressoSystems/espresso-network/actions/workflows/build.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/build.yml) +[![Test](https://github.com/EspressoSystems/espresso-network/actions/workflows/test.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/test.yml) +[![Docs rust](https://github.com/EspressoSystems/espresso-network/actions/workflows/doc-rust.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/doc-rust.yml) +[![Contracts](https://github.com/EspressoSystems/espresso-network/actions/workflows/contracts.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/contracts.yml) +[![Lint](https://github.com/EspressoSystems/espresso-network/actions/workflows/lint.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/lint.yml) +[![Audit](https://github.com/EspressoSystems/espresso-network/actions/workflows/audit.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/audit.yml) +[![Ubuntu](https://github.com/EspressoSystems/espresso-network/actions/workflows/ubuntu-install-without-nix.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/ubuntu-install-without-nix.yml) +[![Build without lockfile](https://github.com/EspressoSystems/espresso-network/actions/workflows/build-without-lockfile.yml/badge.svg)](https://github.com/EspressoSystems/espresso-network/actions/workflows/build-without-lockfile.yml) +[![Coverage Status](https://coveralls.io/repos/github/EspressoSystems/espresso-network/badge.svg?branch=main)](https://coveralls.io/github/EspressoSystems/espresso-network?branch=main) -The Espresso Sequencer offers rollups credible neutrality and enhanced interoperability, without compromising on scale. -Consisting of a data availability solution and a decentralized network of nodes that sequences transactions, layer-2 -rollups can leverage the Espresso Sequencer to give developers and end users fast confirmations, low (and fair) fees, -and robust infrastructure. +The Espresso Network is the global confirmation layer for rollups in the Ethereum ecosystem. Espresso's [global confirmation layer(GCL)](https://docs.espressosys.com/network) provides agreement on inputs to a collection of composable blockchains, providing a high trust, fast, and verifiable way to process inputs on any chain, providing fast confirmations in return. -[Official Documentation](https://docs.espressosys.com/sequencer/espresso-sequencer-architecture/readme) +[Official Documentation](https://docs.espressosys.com/network/) ### Architecture @@ -36,7 +33,7 @@ a dockerized Espresso Sequencer network with an example Layer 2 rollup applicati # Development -- Obtain code: `git clone git@github.com:EspressoSystems/espresso-sequencer`. +- Obtain code: `git clone git@github.com:EspressoSystems/espresso-network`. - Make sure [nix](https://nixos.org/download.html) is installed. - Activate the environment with `nix-shell`, or `nix develop`, or `direnv allow` if using [direnv](https://direnv.net/). - For installation without nix please see [ubuntu.md](./doc/ubuntu.md). diff --git a/builder/src/bin/permissionless-builder.rs b/builder/src/bin/permissionless-builder.rs index 1ff329399e..4df79fb223 100644 --- a/builder/src/bin/permissionless-builder.rs +++ b/builder/src/bin/permissionless-builder.rs @@ -122,11 +122,11 @@ async fn main() -> anyhow::Result<()> { match (base, upgrade) { (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { run::>(genesis, opt).await - } + }, (FeeVersion::VERSION, _) => run::>(genesis, opt).await, (MarketplaceVersion::VERSION, _) => { run::>(genesis, opt).await - } + }, _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." ), diff --git a/builder/src/lib.rs b/builder/src/lib.rs index 2a16ff89e5..627fcfccba 100755 --- a/builder/src/lib.rs +++ b/builder/src/lib.rs @@ -40,6 +40,12 @@ pub fn run_builder_api_service(url: Url, source: ProxyGlobalState) { #[cfg(test)] pub mod testing { + use std::{ + num::NonZeroUsize, + sync::Arc, + time::{Duration, Instant}, + }; + use async_lock::RwLock; use committable::Committable; use espresso_types::{ @@ -74,18 +80,13 @@ pub mod testing { }, HotShotConfig, PeerConfig, ValidatorConfig, }; + use jf_signature::bls_over_bn254::VerKey; use sequencer::{context::Consensus, network, SequencerApiVersion}; - use std::{ - num::NonZeroUsize, - sync::Arc, - time::{Duration, Instant}, - }; use surf_disco::Client; use vbs::version::StaticVersion; use super::*; use crate::non_permissioned::BuilderConfig; - use jf_signature::bls_over_bn254::VerKey; #[derive(Clone)] pub struct HotShotTestConfig { @@ -414,10 +415,10 @@ pub mod testing { { Ok(response) => { tracing::info!("Received txn submitted response : {:?}", response); - } + }, Err(e) => { panic!("Error submitting private transaction {:?}", e); - } + }, } let seed = [207_u8; 32]; @@ -514,10 +515,10 @@ pub mod testing { Ok(response) => { tracing::info!("Received Builder Key : {:?}", response); assert_eq!(response, builder_pub_key); - } + }, Err(e) => { panic!("Error getting builder key {:?}", e); - } + }, } } } diff --git a/builder/src/non_permissioned.rs b/builder/src/non_permissioned.rs index fd8d3bada1..d0d6224abe 100644 --- a/builder/src/non_permissioned.rs +++ b/builder/src/non_permissioned.rs @@ -1,4 +1,4 @@ -use std::{collections::VecDeque, num::NonZeroUsize, time::Duration}; +use std::{collections::VecDeque, num::NonZeroUsize, sync::Arc, time::Duration}; use anyhow::Context; use async_broadcast::broadcast; @@ -22,10 +22,8 @@ use hotshot_types::{ node_implementation::Versions, EncodeBytes, }, }; -use marketplace_builder_shared::block::ParentBlockReferences; -use marketplace_builder_shared::utils::EventServiceStream; +use marketplace_builder_shared::{block::ParentBlockReferences, utils::EventServiceStream}; use sequencer::{catchup::StatePeers, L1Params, SequencerApiVersion}; -use std::sync::Arc; use tide_disco::Url; use tokio::spawn; use vbs::version::StaticVersionType; diff --git a/client/src/lib.rs b/client/src/lib.rs index a9d5cc995f..88d921afc7 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use anyhow::Context; use espresso_types::{FeeAccount, FeeAmount, FeeMerkleTree, Header}; use ethers::types::Address; @@ -6,7 +8,6 @@ use jf_merkle_tree::{ prelude::{MerkleProof, Sha3Node}, MerkleTreeScheme, }; -use std::time::Duration; use surf_disco::{ error::ClientError, socket::{Connection, Unsupported}, @@ -110,7 +111,7 @@ impl SequencerClient { } else { sleep(Duration::from_millis(200)).await; } - } + }, } }; diff --git a/contract-bindings-alloy/src/erc1967proxy.rs b/contract-bindings-alloy/src/erc1967proxy.rs index 2f9998b31e..d8c58c39fd 100644 --- a/contract-bindings-alloy/src/erc1967proxy.rs +++ b/contract-bindings-alloy/src/erc1967proxy.rs @@ -94,8 +94,9 @@ interface ERC1967Proxy { clippy::empty_structs_with_brackets )] pub mod ERC1967Proxy { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -123,6 +124,7 @@ pub mod ERC1967Proxy { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct AddressEmptyCode { + #[allow(missing_docs)] pub target: alloy::sol_types::private::Address, } #[allow( @@ -143,7 +145,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -189,6 +191,7 @@ pub mod ERC1967Proxy { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ERC1967InvalidImplementation { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, } #[allow( @@ -209,7 +212,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -275,7 +278,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -335,7 +338,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -479,7 +482,9 @@ pub mod ERC1967Proxy { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct constructorCall { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, + #[allow(missing_docs)] pub _data: alloy::sol_types::private::Bytes, } const _: () = { @@ -501,7 +506,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -550,9 +555,13 @@ pub mod ERC1967Proxy { }; ///Container for all the [`ERC1967Proxy`](self) custom errors. pub enum ERC1967ProxyErrors { + #[allow(missing_docs)] AddressEmptyCode(AddressEmptyCode), + #[allow(missing_docs)] ERC1967InvalidImplementation(ERC1967InvalidImplementation), + #[allow(missing_docs)] ERC1967NonPayable(ERC1967NonPayable), + #[allow(missing_docs)] FailedInnerCall(FailedInnerCall), } #[automatically_derived] @@ -580,16 +589,16 @@ pub mod ERC1967Proxy { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -674,18 +683,18 @@ pub mod ERC1967Proxy { match self { Self::AddressEmptyCode(inner) => { ::abi_encoded_size(inner) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encoded_size(inner) - } + }, Self::FailedInnerCall(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -693,23 +702,24 @@ pub mod ERC1967Proxy { match self { Self::AddressEmptyCode(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::FailedInnerCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } ///Container for all the [`ERC1967Proxy`](self) events. pub enum ERC1967ProxyEvents { + #[allow(missing_docs)] Upgraded(Upgraded), } #[automatically_derived] @@ -739,7 +749,7 @@ pub mod ERC1967Proxy { Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -763,7 +773,7 @@ pub mod ERC1967Proxy { match self { Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/feecontract.rs b/contract-bindings-alloy/src/feecontract.rs index 0eedfa5680..19aba5e2fd 100644 --- a/contract-bindings-alloy/src/feecontract.rs +++ b/contract-bindings-alloy/src/feecontract.rs @@ -445,8 +445,9 @@ interface FeeContract { clippy::empty_structs_with_brackets )] pub mod FeeContract { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -474,6 +475,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct AddressEmptyCode { + #[allow(missing_docs)] pub target: alloy::sol_types::private::Address, } #[allow( @@ -494,7 +496,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -558,7 +560,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -618,7 +620,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -660,6 +662,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ERC1967InvalidImplementation { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, } #[allow( @@ -680,7 +683,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -746,7 +749,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -806,7 +809,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -866,7 +869,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -926,7 +929,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -986,7 +989,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1046,7 +1049,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1106,7 +1109,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1148,6 +1151,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableInvalidOwner { + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } #[allow( @@ -1168,7 +1172,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1214,6 +1218,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableUnauthorizedAccount { + #[allow(missing_docs)] pub account: alloy::sol_types::private::Address, } #[allow( @@ -1234,7 +1239,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1298,7 +1303,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1340,6 +1345,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UUPSUnsupportedProxiableUUID { + #[allow(missing_docs)] pub slot: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -1360,7 +1366,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2050,7 +2056,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2095,6 +2101,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UPGRADE_INTERFACE_VERSIONReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::String, } #[allow( @@ -2116,7 +2123,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2145,7 +2152,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2201,12 +2208,14 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct balancesCall { + #[allow(missing_docs)] pub user: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`balances(address)`](balancesCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct balancesReturn { + #[allow(missing_docs)] pub amount: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2228,7 +2237,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2257,7 +2266,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2317,6 +2326,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct depositCall { + #[allow(missing_docs)] pub user: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`deposit(address)`](depositCall) function. @@ -2342,7 +2352,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2371,7 +2381,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2435,8 +2445,11 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getVersionReturn { + #[allow(missing_docs)] pub majorVersion: u8, + #[allow(missing_docs)] pub minorVersion: u8, + #[allow(missing_docs)] pub patchVersion: u8, } #[allow( @@ -2458,7 +2471,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2491,7 +2504,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2555,6 +2568,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct initializeCall { + #[allow(missing_docs)] pub multisig: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`initialize(address)`](initializeCall) function. @@ -2580,7 +2594,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2609,7 +2623,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2673,6 +2687,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct maxDepositAmountReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2694,7 +2709,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2723,7 +2738,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2783,6 +2798,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct minDepositAmountReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2804,7 +2820,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2833,7 +2849,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2893,6 +2909,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ownerReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -2914,7 +2931,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2943,7 +2960,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3003,6 +3020,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct proxiableUUIDReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -3024,7 +3042,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3053,7 +3071,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3132,7 +3150,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3161,7 +3179,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3217,6 +3235,7 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct transferOwnershipCall { + #[allow(missing_docs)] pub newOwner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`transferOwnership(address)`](transferOwnershipCall) function. @@ -3242,7 +3261,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3271,7 +3290,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3331,7 +3350,9 @@ pub mod FeeContract { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct upgradeToAndCallCall { + #[allow(missing_docs)] pub newImplementation: alloy::sol_types::private::Address, + #[allow(missing_docs)] pub data: alloy::sol_types::private::Bytes, } ///Container type for the return parameters of the [`upgradeToAndCall(address,bytes)`](upgradeToAndCallCall) function. @@ -3363,7 +3384,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3395,7 +3416,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3456,17 +3477,29 @@ pub mod FeeContract { }; ///Container for all the [`FeeContract`](self) function calls. pub enum FeeContractCalls { + #[allow(missing_docs)] UPGRADE_INTERFACE_VERSION(UPGRADE_INTERFACE_VERSIONCall), + #[allow(missing_docs)] balances(balancesCall), + #[allow(missing_docs)] deposit(depositCall), + #[allow(missing_docs)] getVersion(getVersionCall), + #[allow(missing_docs)] initialize(initializeCall), + #[allow(missing_docs)] maxDepositAmount(maxDepositAmountCall), + #[allow(missing_docs)] minDepositAmount(minDepositAmountCall), + #[allow(missing_docs)] owner(ownerCall), + #[allow(missing_docs)] proxiableUUID(proxiableUUIDCall), + #[allow(missing_docs)] renounceOwnership(renounceOwnershipCall), + #[allow(missing_docs)] transferOwnership(transferOwnershipCall), + #[allow(missing_docs)] upgradeToAndCall(upgradeToAndCallCall), } #[automatically_derived] @@ -3502,28 +3535,28 @@ pub mod FeeContract { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::balances(_) => ::SELECTOR, Self::deposit(_) => ::SELECTOR, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::maxDepositAmount(_) => { ::SELECTOR - } + }, Self::minDepositAmount(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -3702,40 +3735,40 @@ pub mod FeeContract { ::abi_encoded_size( inner, ) - } + }, Self::balances(inner) => { ::abi_encoded_size(inner) - } + }, Self::deposit(inner) => { ::abi_encoded_size(inner) - } + }, Self::getVersion(inner) => { ::abi_encoded_size(inner) - } + }, Self::initialize(inner) => { ::abi_encoded_size(inner) - } + }, Self::maxDepositAmount(inner) => { ::abi_encoded_size(inner) - } + }, Self::minDepositAmount(inner) => { ::abi_encoded_size(inner) - } + }, Self::owner(inner) => { ::abi_encoded_size(inner) - } + }, Self::proxiableUUID(inner) => { ::abi_encoded_size(inner) - } + }, Self::renounceOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::transferOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3745,59 +3778,74 @@ pub mod FeeContract { ::abi_encode_raw( inner, out, ) - } + }, Self::balances(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::deposit(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::maxDepositAmount(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::minDepositAmount(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } ///Container for all the [`FeeContract`](self) custom errors. pub enum FeeContractErrors { + #[allow(missing_docs)] AddressEmptyCode(AddressEmptyCode), + #[allow(missing_docs)] DepositTooLarge(DepositTooLarge), + #[allow(missing_docs)] DepositTooSmall(DepositTooSmall), + #[allow(missing_docs)] ERC1967InvalidImplementation(ERC1967InvalidImplementation), + #[allow(missing_docs)] ERC1967NonPayable(ERC1967NonPayable), + #[allow(missing_docs)] FailedInnerCall(FailedInnerCall), + #[allow(missing_docs)] FunctionDoesNotExist(FunctionDoesNotExist), + #[allow(missing_docs)] InvalidInitialization(InvalidInitialization), + #[allow(missing_docs)] InvalidUserAddress(InvalidUserAddress), + #[allow(missing_docs)] NoFunctionCalled(NoFunctionCalled), + #[allow(missing_docs)] NotInitializing(NotInitializing), + #[allow(missing_docs)] OwnableInvalidOwner(OwnableInvalidOwner), + #[allow(missing_docs)] OwnableUnauthorizedAccount(OwnableUnauthorizedAccount), + #[allow(missing_docs)] UUPSUnauthorizedCallContext(UUPSUnauthorizedCallContext), + #[allow(missing_docs)] UUPSUnsupportedProxiableUUID(UUPSUnsupportedProxiableUUID), } #[automatically_derived] @@ -3836,49 +3884,49 @@ pub mod FeeContract { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::DepositTooLarge(_) => { ::SELECTOR - } + }, Self::DepositTooSmall(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::FunctionDoesNotExist(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidUserAddress(_) => { ::SELECTOR - } + }, Self::NoFunctionCalled(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, } } #[inline] @@ -4095,57 +4143,57 @@ pub mod FeeContract { match self { Self::AddressEmptyCode(inner) => { ::abi_encoded_size(inner) - } + }, Self::DepositTooLarge(inner) => { ::abi_encoded_size(inner) - } + }, Self::DepositTooSmall(inner) => { ::abi_encoded_size(inner) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encoded_size(inner) - } + }, Self::FailedInnerCall(inner) => { ::abi_encoded_size(inner) - } + }, Self::FunctionDoesNotExist(inner) => { ::abi_encoded_size(inner) - } + }, Self::InvalidInitialization(inner) => { ::abi_encoded_size(inner) - } + }, Self::InvalidUserAddress(inner) => { ::abi_encoded_size(inner) - } + }, Self::NoFunctionCalled(inner) => { ::abi_encoded_size(inner) - } + }, Self::NotInitializing(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::UUPSUnauthorizedCallContext(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::UUPSUnsupportedProxiableUUID(inner) => { ::abi_encoded_size( inner, ) - } + }, } } #[inline] @@ -4153,67 +4201,73 @@ pub mod FeeContract { match self { Self::AddressEmptyCode(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::DepositTooLarge(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::DepositTooSmall(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::FailedInnerCall(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::FunctionDoesNotExist(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::InvalidInitialization(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::InvalidUserAddress(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::NoFunctionCalled(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::NotInitializing(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::UUPSUnauthorizedCallContext(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::UUPSUnsupportedProxiableUUID(inner) => { ::abi_encode_raw( inner, out, ) - } + }, } } } ///Container for all the [`FeeContract`](self) events. pub enum FeeContractEvents { + #[allow(missing_docs)] Deposit(Deposit), + #[allow(missing_docs)] Initialized(Initialized), + #[allow(missing_docs)] Log(Log), + #[allow(missing_docs)] OwnershipTransferred(OwnershipTransferred), + #[allow(missing_docs)] Upgrade(Upgrade), + #[allow(missing_docs)] Upgraded(Upgraded), } #[automatically_derived] @@ -4270,31 +4324,31 @@ pub mod FeeContract { Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Deposit) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Log) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -4314,11 +4368,11 @@ pub mod FeeContract { Self::Deposit(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Log(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -4328,15 +4382,15 @@ pub mod FeeContract { Self::Deposit(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Log(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/iplonkverifier.rs b/contract-bindings-alloy/src/iplonkverifier.rs index d4af84cd19..6d334f24a6 100644 --- a/contract-bindings-alloy/src/iplonkverifier.rs +++ b/contract-bindings-alloy/src/iplonkverifier.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -256,7 +257,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -280,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1255,8 +1258,9 @@ interface IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -1283,28 +1287,51 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -1373,7 +1400,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1830,27 +1857,49 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct VerifyingKey { + #[allow(missing_docs)] pub domainSize: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub numInputs: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub sigma0: ::RustType, + #[allow(missing_docs)] pub sigma1: ::RustType, + #[allow(missing_docs)] pub sigma2: ::RustType, + #[allow(missing_docs)] pub sigma3: ::RustType, + #[allow(missing_docs)] pub sigma4: ::RustType, + #[allow(missing_docs)] pub q1: ::RustType, + #[allow(missing_docs)] pub q2: ::RustType, + #[allow(missing_docs)] pub q3: ::RustType, + #[allow(missing_docs)] pub q4: ::RustType, + #[allow(missing_docs)] pub qM12: ::RustType, + #[allow(missing_docs)] pub qM34: ::RustType, + #[allow(missing_docs)] pub qO: ::RustType, + #[allow(missing_docs)] pub qC: ::RustType, + #[allow(missing_docs)] pub qH1: ::RustType, + #[allow(missing_docs)] pub qH2: ::RustType, + #[allow(missing_docs)] pub qH3: ::RustType, + #[allow(missing_docs)] pub qH4: ::RustType, + #[allow(missing_docs)] pub qEcc: ::RustType, + #[allow(missing_docs)] pub g2LSB: alloy::sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] pub g2MSB: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -1917,7 +1966,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2419,14 +2468,18 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyCall { + #[allow(missing_docs)] pub verifyingKey: ::RustType, + #[allow(missing_docs)] pub publicInput: [alloy::sol_types::private::primitives::aliases::U256; 8usize], + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`verify((uint256,uint256,(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),bytes32,bytes32),uint256[8],((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](verifyCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -2459,7 +2512,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2492,7 +2545,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2557,6 +2610,7 @@ pub mod IPlonkVerifier { }; ///Container for all the [`IPlonkVerifier`](self) function calls. pub enum IPlonkVerifierCalls { + #[allow(missing_docs)] verify(verifyCall), } #[automatically_derived] @@ -2622,7 +2676,7 @@ pub mod IPlonkVerifier { match self { Self::verify(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -2630,7 +2684,7 @@ pub mod IPlonkVerifier { match self { Self::verify(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } diff --git a/contract-bindings-alloy/src/lib.rs b/contract-bindings-alloy/src/lib.rs index ec4b26153a..256939ee1c 100644 --- a/contract-bindings-alloy/src/lib.rs +++ b/contract-bindings-alloy/src/lib.rs @@ -3,12 +3,12 @@ //! This is autogenerated code. //! Do not manually edit these files. //! These files may be overwritten by the codegen system at any time. -pub mod erc1967proxy; -pub mod feecontract; -pub mod iplonkverifier; -pub mod lightclient; -pub mod lightclientarbitrum; -pub mod lightclientmock; -pub mod permissionedstaketable; -pub mod plonkverifier; -pub mod plonkverifier2; +pub mod r#erc1967proxy; +pub mod r#feecontract; +pub mod r#iplonkverifier; +pub mod r#lightclient; +pub mod r#lightclientarbitrum; +pub mod r#lightclientmock; +pub mod r#permissionedstaketable; +pub mod r#plonkverifier; +pub mod r#plonkverifier2; diff --git a/contract-bindings-alloy/src/lightclient.rs b/contract-bindings-alloy/src/lightclient.rs index a2e27b0672..c5f11bd7cc 100644 --- a/contract-bindings-alloy/src/lightclient.rs +++ b/contract-bindings-alloy/src/lightclient.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -256,7 +257,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -280,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -560,36 +563,60 @@ library IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct PlonkProof { BN254.G1Point wire0; BN254.G1Point wire1; BN254.G1Point wire2; BN254.G1Point wire3; BN254.G1Point wire4; BN254.G1Point prodPerm; BN254.G1Point split0; BN254.G1Point split1; BN254.G1Point split2; BN254.G1Point split3; BN254.G1Point split4; BN254.G1Point zeta; BN254.G1Point zetaOmega; BN254.ScalarField wireEval0; BN254.ScalarField wireEval1; BN254.ScalarField wireEval2; BN254.ScalarField wireEval3; BN254.ScalarField wireEval4; BN254.ScalarField sigmaEval0; BN254.ScalarField sigmaEval1; BN254.ScalarField sigmaEval2; BN254.ScalarField sigmaEval3; BN254.ScalarField prodPermZetaOmegaEval; } ```*/ #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -658,7 +685,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2343,8 +2370,9 @@ interface LightClient { clippy::empty_structs_with_brackets )] pub mod LightClient { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -2371,8 +2399,11 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct LightClientState { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -2401,7 +2432,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2586,9 +2617,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StakeTableState { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -2619,7 +2654,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2819,6 +2854,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct AddressEmptyCode { + #[allow(missing_docs)] pub target: alloy::sol_types::private::Address, } #[allow( @@ -2839,7 +2875,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2885,6 +2921,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ERC1967InvalidImplementation { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, } #[allow( @@ -2905,7 +2942,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2971,7 +3008,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3031,7 +3068,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3091,7 +3128,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3151,7 +3188,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3211,7 +3248,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3271,7 +3308,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3331,7 +3368,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3391,7 +3428,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3451,7 +3488,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3511,7 +3548,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3571,7 +3608,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3631,7 +3668,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3673,6 +3710,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableInvalidOwner { + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } #[allow( @@ -3693,7 +3731,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3739,6 +3777,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableUnauthorizedAccount { + #[allow(missing_docs)] pub account: alloy::sol_types::private::Address, } #[allow( @@ -3759,7 +3798,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3823,7 +3862,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3883,7 +3922,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3925,6 +3964,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UUPSUnsupportedProxiableUUID { + #[allow(missing_docs)] pub slot: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -3945,7 +3985,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4009,7 +4049,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4786,7 +4826,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4831,6 +4871,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UPGRADE_INTERFACE_VERSIONReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::String, } #[allow( @@ -4852,7 +4893,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4881,7 +4922,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4941,6 +4982,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct currentBlockNumberReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -4962,7 +5004,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4991,7 +5033,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5070,7 +5112,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5099,7 +5141,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5159,8 +5201,11 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct finalizedStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -5182,7 +5227,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5219,7 +5264,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5287,9 +5332,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStakeTableStateReturn { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -5311,7 +5360,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5350,7 +5399,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5425,8 +5474,11 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -5448,7 +5500,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5485,7 +5537,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5549,13 +5601,16 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentCall { + #[allow(missing_docs)] pub hotShotBlockHeight: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`getHotShotCommitment(uint256)`](getHotShotCommitmentCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentReturn { + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, + #[allow(missing_docs)] pub hotshotBlockHeight: u64, } #[allow( @@ -5577,7 +5632,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5612,7 +5667,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5679,6 +5734,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getStateHistoryCountReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -5700,7 +5756,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5729,7 +5785,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5789,8 +5845,11 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getVersionReturn { + #[allow(missing_docs)] pub majorVersion: u8, + #[allow(missing_docs)] pub minorVersion: u8, + #[allow(missing_docs)] pub patchVersion: u8, } #[allow( @@ -5812,7 +5871,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5845,7 +5904,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5909,9 +5968,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct initializeCall { + #[allow(missing_docs)] pub _genesis: ::RustType, + #[allow(missing_docs)] pub _genesisStakeTableState: ::RustType, + #[allow(missing_docs)] pub _stateHistoryRetentionPeriod: u32, + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`initialize((uint64,uint64,uint256),(uint256,uint256,uint256,uint256),uint32,address)`](initializeCall) function. @@ -5947,7 +6010,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5986,7 +6049,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6062,6 +6125,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct isPermissionedProverEnabledReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6083,7 +6147,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6112,7 +6176,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6168,13 +6232,16 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdCall { + #[allow(missing_docs)] pub blockNumber: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blockThreshold: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`lagOverEscapeHatchThreshold(uint256,uint256)`](lagOverEscapeHatchThresholdCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6202,7 +6269,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6234,7 +6301,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6300,7 +6367,9 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct newFinalizedStateCall { + #[allow(missing_docs)] pub newState: ::RustType, + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`newFinalizedState((uint64,uint64,uint256),((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](newFinalizedStateCall) function. @@ -6329,7 +6398,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6361,7 +6430,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6424,6 +6493,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ownerReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -6445,7 +6515,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6474,7 +6544,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6534,6 +6604,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct permissionedProverReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -6555,7 +6626,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6584,7 +6655,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6644,6 +6715,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct proxiableUUIDReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -6665,7 +6737,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6694,7 +6766,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6773,7 +6845,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6802,7 +6874,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6858,6 +6930,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setPermissionedProverCall { + #[allow(missing_docs)] pub prover: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`setPermissionedProver(address)`](setPermissionedProverCall) function. @@ -6883,7 +6956,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6912,7 +6985,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6972,6 +7045,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setstateHistoryRetentionPeriodCall { + #[allow(missing_docs)] pub historySeconds: u32, } ///Container type for the return parameters of the [`setstateHistoryRetentionPeriod(uint32)`](setstateHistoryRetentionPeriodCall) function. @@ -6997,7 +7071,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7028,7 +7102,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7088,15 +7162,20 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsCall { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`stateHistoryCommitments(uint256)`](stateHistoryCommitmentsCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsReturn { + #[allow(missing_docs)] pub l1BlockHeight: u64, + #[allow(missing_docs)] pub l1BlockTimestamp: u64, + #[allow(missing_docs)] pub hotShotBlockHeight: u64, + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, } #[allow( @@ -7118,7 +7197,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7157,7 +7236,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7236,6 +7315,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryFirstIndexReturn { + #[allow(missing_docs)] pub _0: u64, } #[allow( @@ -7257,7 +7337,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7286,7 +7366,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7346,6 +7426,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryRetentionPeriodReturn { + #[allow(missing_docs)] pub _0: u32, } #[allow( @@ -7367,7 +7448,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7396,7 +7477,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7452,6 +7533,7 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct transferOwnershipCall { + #[allow(missing_docs)] pub newOwner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`transferOwnership(address)`](transferOwnershipCall) function. @@ -7477,7 +7559,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7506,7 +7588,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7566,7 +7648,9 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct upgradeToAndCallCall { + #[allow(missing_docs)] pub newImplementation: alloy::sol_types::private::Address, + #[allow(missing_docs)] pub data: alloy::sol_types::private::Bytes, } ///Container type for the return parameters of the [`upgradeToAndCall(address,bytes)`](upgradeToAndCallCall) function. @@ -7598,7 +7682,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7630,7 +7714,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7691,29 +7775,53 @@ pub mod LightClient { }; ///Container for all the [`LightClient`](self) function calls. pub enum LightClientCalls { + #[allow(missing_docs)] UPGRADE_INTERFACE_VERSION(UPGRADE_INTERFACE_VERSIONCall), + #[allow(missing_docs)] currentBlockNumber(currentBlockNumberCall), + #[allow(missing_docs)] disablePermissionedProverMode(disablePermissionedProverModeCall), + #[allow(missing_docs)] finalizedState(finalizedStateCall), + #[allow(missing_docs)] genesisStakeTableState(genesisStakeTableStateCall), + #[allow(missing_docs)] genesisState(genesisStateCall), + #[allow(missing_docs)] getHotShotCommitment(getHotShotCommitmentCall), + #[allow(missing_docs)] getStateHistoryCount(getStateHistoryCountCall), + #[allow(missing_docs)] getVersion(getVersionCall), + #[allow(missing_docs)] initialize(initializeCall), + #[allow(missing_docs)] isPermissionedProverEnabled(isPermissionedProverEnabledCall), + #[allow(missing_docs)] lagOverEscapeHatchThreshold(lagOverEscapeHatchThresholdCall), + #[allow(missing_docs)] newFinalizedState(newFinalizedStateCall), + #[allow(missing_docs)] owner(ownerCall), + #[allow(missing_docs)] permissionedProver(permissionedProverCall), + #[allow(missing_docs)] proxiableUUID(proxiableUUIDCall), + #[allow(missing_docs)] renounceOwnership(renounceOwnershipCall), + #[allow(missing_docs)] setPermissionedProver(setPermissionedProverCall), + #[allow(missing_docs)] setstateHistoryRetentionPeriod(setstateHistoryRetentionPeriodCall), + #[allow(missing_docs)] stateHistoryCommitments(stateHistoryCommitmentsCall), + #[allow(missing_docs)] stateHistoryFirstIndex(stateHistoryFirstIndexCall), + #[allow(missing_docs)] stateHistoryRetentionPeriod(stateHistoryRetentionPeriodCall), + #[allow(missing_docs)] transferOwnership(transferOwnershipCall), + #[allow(missing_docs)] upgradeToAndCall(upgradeToAndCallCall), } #[automatically_derived] @@ -7761,66 +7869,66 @@ pub mod LightClient { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::currentBlockNumber(_) => { ::SELECTOR - } + }, Self::disablePermissionedProverMode(_) => { ::SELECTOR - } + }, Self::finalizedState(_) => { ::SELECTOR - } + }, Self::genesisStakeTableState(_) => { ::SELECTOR - } + }, Self::genesisState(_) => ::SELECTOR, Self::getHotShotCommitment(_) => { ::SELECTOR - } + }, Self::getStateHistoryCount(_) => { ::SELECTOR - } + }, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::isPermissionedProverEnabled(_) => { ::SELECTOR - } + }, Self::lagOverEscapeHatchThreshold(_) => { ::SELECTOR - } + }, Self::newFinalizedState(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::permissionedProver(_) => { ::SELECTOR - } + }, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::setPermissionedProver(_) => { ::SELECTOR - } + }, Self::setstateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::stateHistoryCommitments(_) => { ::SELECTOR - } + }, Self::stateHistoryFirstIndex(_) => { ::SELECTOR - } + }, Self::stateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -8270,122 +8378,142 @@ pub mod LightClient { ::abi_encode_raw( inner, out, ) - } + }, Self::currentBlockNumber(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::disablePermissionedProverMode(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::finalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::genesisStakeTableState(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::genesisState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getHotShotCommitment(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getStateHistoryCount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isPermissionedProverEnabled(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::lagOverEscapeHatchThreshold(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::newFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::permissionedProver(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setPermissionedProver(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setstateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryCommitments(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryFirstIndex(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } ///Container for all the [`LightClient`](self) custom errors. pub enum LightClientErrors { + #[allow(missing_docs)] AddressEmptyCode(AddressEmptyCode), + #[allow(missing_docs)] ERC1967InvalidImplementation(ERC1967InvalidImplementation), + #[allow(missing_docs)] ERC1967NonPayable(ERC1967NonPayable), + #[allow(missing_docs)] FailedInnerCall(FailedInnerCall), + #[allow(missing_docs)] InsufficientSnapshotHistory(InsufficientSnapshotHistory), + #[allow(missing_docs)] InvalidAddress(InvalidAddress), + #[allow(missing_docs)] InvalidArgs(InvalidArgs), + #[allow(missing_docs)] InvalidHotShotBlockForCommitmentCheck(InvalidHotShotBlockForCommitmentCheck), + #[allow(missing_docs)] InvalidInitialization(InvalidInitialization), + #[allow(missing_docs)] InvalidMaxStateHistory(InvalidMaxStateHistory), + #[allow(missing_docs)] InvalidProof(InvalidProof), + #[allow(missing_docs)] NoChangeRequired(NoChangeRequired), + #[allow(missing_docs)] NotInitializing(NotInitializing), + #[allow(missing_docs)] OutdatedState(OutdatedState), + #[allow(missing_docs)] OwnableInvalidOwner(OwnableInvalidOwner), + #[allow(missing_docs)] OwnableUnauthorizedAccount(OwnableUnauthorizedAccount), + #[allow(missing_docs)] ProverNotPermissioned(ProverNotPermissioned), + #[allow(missing_docs)] UUPSUnauthorizedCallContext(UUPSUnauthorizedCallContext), + #[allow(missing_docs)] UUPSUnsupportedProxiableUUID(UUPSUnsupportedProxiableUUID), + #[allow(missing_docs)] WrongStakeTableUsed(WrongStakeTableUsed), } #[automatically_derived] @@ -8429,56 +8557,56 @@ pub mod LightClient { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::InsufficientSnapshotHistory(_) => { ::SELECTOR - } + }, Self::InvalidAddress(_) => ::SELECTOR, Self::InvalidArgs(_) => ::SELECTOR, Self::InvalidHotShotBlockForCommitmentCheck(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidMaxStateHistory(_) => { ::SELECTOR - } + }, Self::InvalidProof(_) => ::SELECTOR, Self::NoChangeRequired(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OutdatedState(_) => ::SELECTOR, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::ProverNotPermissioned(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, Self::WrongStakeTableUsed(_) => { ::SELECTOR - } + }, } } #[inline] @@ -8974,12 +9102,19 @@ pub mod LightClient { } ///Container for all the [`LightClient`](self) events. pub enum LightClientEvents { + #[allow(missing_docs)] Initialized(Initialized), + #[allow(missing_docs)] NewState(NewState), + #[allow(missing_docs)] OwnershipTransferred(OwnershipTransferred), + #[allow(missing_docs)] PermissionedProverNotRequired(PermissionedProverNotRequired), + #[allow(missing_docs)] PermissionedProverRequired(PermissionedProverRequired), + #[allow(missing_docs)] Upgrade(Upgrade), + #[allow(missing_docs)] Upgraded(Upgraded), } #[automatically_derived] @@ -9043,17 +9178,17 @@ pub mod LightClient { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::NewState) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some( ::SIGNATURE_HASH, ) => ::decode_raw_log( @@ -9065,15 +9200,15 @@ pub mod LightClient { topics, data, validate, ) .map(Self::PermissionedProverRequired) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -9092,17 +9227,17 @@ pub mod LightClient { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::NewState(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -9111,23 +9246,23 @@ pub mod LightClient { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::NewState(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/lightclientarbitrum.rs b/contract-bindings-alloy/src/lightclientarbitrum.rs index 1edab7451d..105a3c7d38 100644 --- a/contract-bindings-alloy/src/lightclientarbitrum.rs +++ b/contract-bindings-alloy/src/lightclientarbitrum.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -256,7 +257,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -280,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -560,36 +563,60 @@ library IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct PlonkProof { BN254.G1Point wire0; BN254.G1Point wire1; BN254.G1Point wire2; BN254.G1Point wire3; BN254.G1Point wire4; BN254.G1Point prodPerm; BN254.G1Point split0; BN254.G1Point split1; BN254.G1Point split2; BN254.G1Point split3; BN254.G1Point split4; BN254.G1Point zeta; BN254.G1Point zetaOmega; BN254.ScalarField wireEval0; BN254.ScalarField wireEval1; BN254.ScalarField wireEval2; BN254.ScalarField wireEval3; BN254.ScalarField wireEval4; BN254.ScalarField sigmaEval0; BN254.ScalarField sigmaEval1; BN254.ScalarField sigmaEval2; BN254.ScalarField sigmaEval3; BN254.ScalarField prodPermZetaOmegaEval; } ```*/ #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -658,7 +685,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1256,16 +1283,20 @@ library LightClient { clippy::empty_structs_with_brackets )] pub mod LightClient { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct LightClientState { uint64 viewNum; uint64 blockHeight; BN254.ScalarField blockCommRoot; } ```*/ #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct LightClientState { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -1294,7 +1325,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1479,9 +1510,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StakeTableState { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -1512,7 +1547,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2934,8 +2969,9 @@ interface LightClientArbitrum { clippy::empty_structs_with_brackets )] pub mod LightClientArbitrum { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -2963,6 +2999,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct AddressEmptyCode { + #[allow(missing_docs)] pub target: alloy::sol_types::private::Address, } #[allow( @@ -2983,7 +3020,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3029,6 +3066,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ERC1967InvalidImplementation { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, } #[allow( @@ -3049,7 +3087,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3115,7 +3153,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3175,7 +3213,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3235,7 +3273,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3295,7 +3333,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3355,7 +3393,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3415,7 +3453,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3475,7 +3513,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3535,7 +3573,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3595,7 +3633,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3655,7 +3693,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3715,7 +3753,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3775,7 +3813,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3817,6 +3855,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableInvalidOwner { + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } #[allow( @@ -3837,7 +3876,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3883,6 +3922,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableUnauthorizedAccount { + #[allow(missing_docs)] pub account: alloy::sol_types::private::Address, } #[allow( @@ -3903,7 +3943,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3967,7 +4007,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4027,7 +4067,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4069,6 +4109,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UUPSUnsupportedProxiableUUID { + #[allow(missing_docs)] pub slot: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -4089,7 +4130,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4153,7 +4194,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4921,6 +4962,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UPGRADE_INTERFACE_VERSIONReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::String, } #[allow( @@ -4942,7 +4984,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4971,7 +5013,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5031,6 +5073,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct currentBlockNumberReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -5052,7 +5095,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5081,7 +5124,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5160,7 +5203,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5189,7 +5232,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5249,8 +5292,11 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct finalizedStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -5272,7 +5318,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5309,7 +5355,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5377,9 +5423,13 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStakeTableStateReturn { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -5401,7 +5451,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5440,7 +5490,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5515,8 +5565,11 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -5538,7 +5591,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5575,7 +5628,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5639,13 +5692,16 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentCall { + #[allow(missing_docs)] pub hotShotBlockHeight: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`getHotShotCommitment(uint256)`](getHotShotCommitmentCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentReturn { + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, + #[allow(missing_docs)] pub hotshotBlockHeight: u64, } #[allow( @@ -5667,7 +5723,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5702,7 +5758,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5769,6 +5825,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getStateHistoryCountReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -5790,7 +5847,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5819,7 +5876,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5879,8 +5936,11 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getVersionReturn { + #[allow(missing_docs)] pub majorVersion: u8, + #[allow(missing_docs)] pub minorVersion: u8, + #[allow(missing_docs)] pub patchVersion: u8, } #[allow( @@ -5902,7 +5962,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5935,7 +5995,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5999,10 +6059,14 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct initializeCall { + #[allow(missing_docs)] pub _genesis: ::RustType, + #[allow(missing_docs)] pub _genesisStakeTableState: ::RustType, + #[allow(missing_docs)] pub _stateHistoryRetentionPeriod: u32, + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`initialize((uint64,uint64,uint256),(uint256,uint256,uint256,uint256),uint32,address)`](initializeCall) function. @@ -6038,7 +6102,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6077,7 +6141,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6155,6 +6219,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct isPermissionedProverEnabledReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6176,7 +6241,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6205,7 +6270,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6261,13 +6326,16 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdCall { + #[allow(missing_docs)] pub blockNumber: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blockThreshold: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`lagOverEscapeHatchThreshold(uint256,uint256)`](lagOverEscapeHatchThresholdCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6295,7 +6363,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6327,7 +6395,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6393,7 +6461,9 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct newFinalizedStateCall { + #[allow(missing_docs)] pub newState: ::RustType, + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`newFinalizedState((uint64,uint64,uint256),((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](newFinalizedStateCall) function. @@ -6423,7 +6493,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6455,7 +6525,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6520,6 +6590,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ownerReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -6541,7 +6612,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6570,7 +6641,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6630,6 +6701,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct permissionedProverReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -6651,7 +6723,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6680,7 +6752,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6740,6 +6812,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct proxiableUUIDReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -6761,7 +6834,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6790,7 +6863,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6869,7 +6942,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6898,7 +6971,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6954,6 +7027,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setPermissionedProverCall { + #[allow(missing_docs)] pub prover: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`setPermissionedProver(address)`](setPermissionedProverCall) function. @@ -6979,7 +7053,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7008,7 +7082,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7068,6 +7142,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setstateHistoryRetentionPeriodCall { + #[allow(missing_docs)] pub historySeconds: u32, } ///Container type for the return parameters of the [`setstateHistoryRetentionPeriod(uint32)`](setstateHistoryRetentionPeriodCall) function. @@ -7093,7 +7168,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7124,7 +7199,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7184,15 +7259,20 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsCall { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`stateHistoryCommitments(uint256)`](stateHistoryCommitmentsCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsReturn { + #[allow(missing_docs)] pub l1BlockHeight: u64, + #[allow(missing_docs)] pub l1BlockTimestamp: u64, + #[allow(missing_docs)] pub hotShotBlockHeight: u64, + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, } #[allow( @@ -7214,7 +7294,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7253,7 +7333,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7332,6 +7412,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryFirstIndexReturn { + #[allow(missing_docs)] pub _0: u64, } #[allow( @@ -7353,7 +7434,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7382,7 +7463,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7442,6 +7523,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryRetentionPeriodReturn { + #[allow(missing_docs)] pub _0: u32, } #[allow( @@ -7463,7 +7545,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7492,7 +7574,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7548,6 +7630,7 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct transferOwnershipCall { + #[allow(missing_docs)] pub newOwner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`transferOwnership(address)`](transferOwnershipCall) function. @@ -7573,7 +7656,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7602,7 +7685,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7662,7 +7745,9 @@ pub mod LightClientArbitrum { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct upgradeToAndCallCall { + #[allow(missing_docs)] pub newImplementation: alloy::sol_types::private::Address, + #[allow(missing_docs)] pub data: alloy::sol_types::private::Bytes, } ///Container type for the return parameters of the [`upgradeToAndCall(address,bytes)`](upgradeToAndCallCall) function. @@ -7694,7 +7779,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7726,7 +7811,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7787,29 +7872,53 @@ pub mod LightClientArbitrum { }; ///Container for all the [`LightClientArbitrum`](self) function calls. pub enum LightClientArbitrumCalls { + #[allow(missing_docs)] UPGRADE_INTERFACE_VERSION(UPGRADE_INTERFACE_VERSIONCall), + #[allow(missing_docs)] currentBlockNumber(currentBlockNumberCall), + #[allow(missing_docs)] disablePermissionedProverMode(disablePermissionedProverModeCall), + #[allow(missing_docs)] finalizedState(finalizedStateCall), + #[allow(missing_docs)] genesisStakeTableState(genesisStakeTableStateCall), + #[allow(missing_docs)] genesisState(genesisStateCall), + #[allow(missing_docs)] getHotShotCommitment(getHotShotCommitmentCall), + #[allow(missing_docs)] getStateHistoryCount(getStateHistoryCountCall), + #[allow(missing_docs)] getVersion(getVersionCall), + #[allow(missing_docs)] initialize(initializeCall), + #[allow(missing_docs)] isPermissionedProverEnabled(isPermissionedProverEnabledCall), + #[allow(missing_docs)] lagOverEscapeHatchThreshold(lagOverEscapeHatchThresholdCall), + #[allow(missing_docs)] newFinalizedState(newFinalizedStateCall), + #[allow(missing_docs)] owner(ownerCall), + #[allow(missing_docs)] permissionedProver(permissionedProverCall), + #[allow(missing_docs)] proxiableUUID(proxiableUUIDCall), + #[allow(missing_docs)] renounceOwnership(renounceOwnershipCall), + #[allow(missing_docs)] setPermissionedProver(setPermissionedProverCall), + #[allow(missing_docs)] setstateHistoryRetentionPeriod(setstateHistoryRetentionPeriodCall), + #[allow(missing_docs)] stateHistoryCommitments(stateHistoryCommitmentsCall), + #[allow(missing_docs)] stateHistoryFirstIndex(stateHistoryFirstIndexCall), + #[allow(missing_docs)] stateHistoryRetentionPeriod(stateHistoryRetentionPeriodCall), + #[allow(missing_docs)] transferOwnership(transferOwnershipCall), + #[allow(missing_docs)] upgradeToAndCall(upgradeToAndCallCall), } #[automatically_derived] @@ -7857,66 +7966,66 @@ pub mod LightClientArbitrum { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::currentBlockNumber(_) => { ::SELECTOR - } + }, Self::disablePermissionedProverMode(_) => { ::SELECTOR - } + }, Self::finalizedState(_) => { ::SELECTOR - } + }, Self::genesisStakeTableState(_) => { ::SELECTOR - } + }, Self::genesisState(_) => ::SELECTOR, Self::getHotShotCommitment(_) => { ::SELECTOR - } + }, Self::getStateHistoryCount(_) => { ::SELECTOR - } + }, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::isPermissionedProverEnabled(_) => { ::SELECTOR - } + }, Self::lagOverEscapeHatchThreshold(_) => { ::SELECTOR - } + }, Self::newFinalizedState(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::permissionedProver(_) => { ::SELECTOR - } + }, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::setPermissionedProver(_) => { ::SELECTOR - } + }, Self::setstateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::stateHistoryCommitments(_) => { ::SELECTOR - } + }, Self::stateHistoryFirstIndex(_) => { ::SELECTOR - } + }, Self::stateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -8363,122 +8472,142 @@ pub mod LightClientArbitrum { ::abi_encode_raw( inner, out, ) - } + }, Self::currentBlockNumber(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::disablePermissionedProverMode(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::finalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::genesisStakeTableState(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::genesisState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getHotShotCommitment(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getStateHistoryCount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isPermissionedProverEnabled(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::lagOverEscapeHatchThreshold(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::newFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::permissionedProver(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setPermissionedProver(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setstateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryCommitments(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryFirstIndex(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } ///Container for all the [`LightClientArbitrum`](self) custom errors. pub enum LightClientArbitrumErrors { + #[allow(missing_docs)] AddressEmptyCode(AddressEmptyCode), + #[allow(missing_docs)] ERC1967InvalidImplementation(ERC1967InvalidImplementation), + #[allow(missing_docs)] ERC1967NonPayable(ERC1967NonPayable), + #[allow(missing_docs)] FailedInnerCall(FailedInnerCall), + #[allow(missing_docs)] InsufficientSnapshotHistory(InsufficientSnapshotHistory), + #[allow(missing_docs)] InvalidAddress(InvalidAddress), + #[allow(missing_docs)] InvalidArgs(InvalidArgs), + #[allow(missing_docs)] InvalidHotShotBlockForCommitmentCheck(InvalidHotShotBlockForCommitmentCheck), + #[allow(missing_docs)] InvalidInitialization(InvalidInitialization), + #[allow(missing_docs)] InvalidMaxStateHistory(InvalidMaxStateHistory), + #[allow(missing_docs)] InvalidProof(InvalidProof), + #[allow(missing_docs)] NoChangeRequired(NoChangeRequired), + #[allow(missing_docs)] NotInitializing(NotInitializing), + #[allow(missing_docs)] OutdatedState(OutdatedState), + #[allow(missing_docs)] OwnableInvalidOwner(OwnableInvalidOwner), + #[allow(missing_docs)] OwnableUnauthorizedAccount(OwnableUnauthorizedAccount), + #[allow(missing_docs)] ProverNotPermissioned(ProverNotPermissioned), + #[allow(missing_docs)] UUPSUnauthorizedCallContext(UUPSUnauthorizedCallContext), + #[allow(missing_docs)] UUPSUnsupportedProxiableUUID(UUPSUnsupportedProxiableUUID), + #[allow(missing_docs)] WrongStakeTableUsed(WrongStakeTableUsed), } #[automatically_derived] @@ -8522,56 +8651,56 @@ pub mod LightClientArbitrum { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::InsufficientSnapshotHistory(_) => { ::SELECTOR - } + }, Self::InvalidAddress(_) => ::SELECTOR, Self::InvalidArgs(_) => ::SELECTOR, Self::InvalidHotShotBlockForCommitmentCheck(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidMaxStateHistory(_) => { ::SELECTOR - } + }, Self::InvalidProof(_) => ::SELECTOR, Self::NoChangeRequired(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OutdatedState(_) => ::SELECTOR, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::ProverNotPermissioned(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, Self::WrongStakeTableUsed(_) => { ::SELECTOR - } + }, } } #[inline] @@ -9067,12 +9196,19 @@ pub mod LightClientArbitrum { } ///Container for all the [`LightClientArbitrum`](self) events. pub enum LightClientArbitrumEvents { + #[allow(missing_docs)] Initialized(Initialized), + #[allow(missing_docs)] NewState(NewState), + #[allow(missing_docs)] OwnershipTransferred(OwnershipTransferred), + #[allow(missing_docs)] PermissionedProverNotRequired(PermissionedProverNotRequired), + #[allow(missing_docs)] PermissionedProverRequired(PermissionedProverRequired), + #[allow(missing_docs)] Upgrade(Upgrade), + #[allow(missing_docs)] Upgraded(Upgraded), } #[automatically_derived] @@ -9136,17 +9272,17 @@ pub mod LightClientArbitrum { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::NewState) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some( ::SIGNATURE_HASH, ) => ::decode_raw_log( @@ -9158,15 +9294,15 @@ pub mod LightClientArbitrum { topics, data, validate, ) .map(Self::PermissionedProverRequired) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -9185,17 +9321,17 @@ pub mod LightClientArbitrum { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::NewState(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -9204,23 +9340,23 @@ pub mod LightClientArbitrum { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::NewState(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/lightclientmock.rs b/contract-bindings-alloy/src/lightclientmock.rs index 6d30b708d7..7feb65d5ef 100644 --- a/contract-bindings-alloy/src/lightclientmock.rs +++ b/contract-bindings-alloy/src/lightclientmock.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -256,7 +257,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -280,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -560,36 +563,60 @@ library IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct PlonkProof { BN254.G1Point wire0; BN254.G1Point wire1; BN254.G1Point wire2; BN254.G1Point wire3; BN254.G1Point wire4; BN254.G1Point prodPerm; BN254.G1Point split0; BN254.G1Point split1; BN254.G1Point split2; BN254.G1Point split3; BN254.G1Point split4; BN254.G1Point zeta; BN254.G1Point zetaOmega; BN254.ScalarField wireEval0; BN254.ScalarField wireEval1; BN254.ScalarField wireEval2; BN254.ScalarField wireEval3; BN254.ScalarField wireEval4; BN254.ScalarField sigmaEval0; BN254.ScalarField sigmaEval1; BN254.ScalarField sigmaEval2; BN254.ScalarField sigmaEval3; BN254.ScalarField prodPermZetaOmegaEval; } ```*/ #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -658,7 +685,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1257,16 +1284,20 @@ library LightClient { clippy::empty_structs_with_brackets )] pub mod LightClient { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct LightClientState { uint64 viewNum; uint64 blockHeight; BN254.ScalarField blockCommRoot; } ```*/ #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct LightClientState { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -1295,7 +1326,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1480,9 +1511,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StakeTableState { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -1513,7 +1548,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1712,9 +1747,13 @@ pub mod LightClient { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StateHistoryCommitment { + #[allow(missing_docs)] pub l1BlockHeight: u64, + #[allow(missing_docs)] pub l1BlockTimestamp: u64, + #[allow(missing_docs)] pub hotShotBlockHeight: u64, + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, } #[allow( @@ -1745,7 +1784,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3340,8 +3379,9 @@ interface LightClientMock { clippy::empty_structs_with_brackets )] pub mod LightClientMock { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -3369,6 +3409,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct AddressEmptyCode { + #[allow(missing_docs)] pub target: alloy::sol_types::private::Address, } #[allow( @@ -3389,7 +3430,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3435,6 +3476,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ERC1967InvalidImplementation { + #[allow(missing_docs)] pub implementation: alloy::sol_types::private::Address, } #[allow( @@ -3455,7 +3497,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3521,7 +3563,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3581,7 +3623,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3641,7 +3683,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3701,7 +3743,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3761,7 +3803,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3821,7 +3863,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3881,7 +3923,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3941,7 +3983,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4001,7 +4043,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4061,7 +4103,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4121,7 +4163,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4181,7 +4223,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4223,6 +4265,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableInvalidOwner { + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } #[allow( @@ -4243,7 +4286,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4289,6 +4332,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableUnauthorizedAccount { + #[allow(missing_docs)] pub account: alloy::sol_types::private::Address, } #[allow( @@ -4309,7 +4353,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4373,7 +4417,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4433,7 +4477,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4475,6 +4519,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UUPSUnsupportedProxiableUUID { + #[allow(missing_docs)] pub slot: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -4495,7 +4540,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4559,7 +4604,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5323,9 +5368,12 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct constructorCall { + #[allow(missing_docs)] pub genesis: ::RustType, + #[allow(missing_docs)] pub genesisStakeTableState: ::RustType, + #[allow(missing_docs)] pub maxHistorySeconds: u32, } const _: () = { @@ -5349,7 +5397,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5416,6 +5464,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct UPGRADE_INTERFACE_VERSIONReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::String, } #[allow( @@ -5437,7 +5486,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5466,7 +5515,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5526,6 +5575,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct currentBlockNumberReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -5547,7 +5597,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5576,7 +5626,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5655,7 +5705,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5684,7 +5734,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5744,8 +5794,11 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct finalizedStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -5767,7 +5820,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5804,7 +5857,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5872,9 +5925,13 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStakeTableStateReturn { + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub blsKeyComm: ::RustType, + #[allow(missing_docs)] pub schnorrKeyComm: ::RustType, + #[allow(missing_docs)] pub amountComm: ::RustType, } #[allow( @@ -5896,7 +5953,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5935,7 +5992,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6010,8 +6067,11 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct genesisStateReturn { + #[allow(missing_docs)] pub viewNum: u64, + #[allow(missing_docs)] pub blockHeight: u64, + #[allow(missing_docs)] pub blockCommRoot: ::RustType, } #[allow( @@ -6033,7 +6093,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6070,7 +6130,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6134,13 +6194,16 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentCall { + #[allow(missing_docs)] pub hotShotBlockHeight: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`getHotShotCommitment(uint256)`](getHotShotCommitmentCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getHotShotCommitmentReturn { + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, + #[allow(missing_docs)] pub hotshotBlockHeight: u64, } #[allow( @@ -6162,7 +6225,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6197,7 +6260,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6264,6 +6327,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getStateHistoryCountReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -6285,7 +6349,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6314,7 +6378,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6374,8 +6438,11 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct getVersionReturn { + #[allow(missing_docs)] pub majorVersion: u8, + #[allow(missing_docs)] pub minorVersion: u8, + #[allow(missing_docs)] pub patchVersion: u8, } #[allow( @@ -6397,7 +6464,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6430,7 +6497,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6494,10 +6561,14 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct initializeCall { + #[allow(missing_docs)] pub _genesis: ::RustType, + #[allow(missing_docs)] pub _genesisStakeTableState: ::RustType, + #[allow(missing_docs)] pub _stateHistoryRetentionPeriod: u32, + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`initialize((uint64,uint64,uint256),(uint256,uint256,uint256,uint256),uint32,address)`](initializeCall) function. @@ -6533,7 +6604,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6572,7 +6643,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6650,6 +6721,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct isPermissionedProverEnabledReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6671,7 +6743,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6700,7 +6772,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6756,13 +6828,16 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdCall { + #[allow(missing_docs)] pub blockNumber: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub threshold: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`lagOverEscapeHatchThreshold(uint256,uint256)`](lagOverEscapeHatchThresholdCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct lagOverEscapeHatchThresholdReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -6790,7 +6865,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6822,7 +6897,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6888,7 +6963,9 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct newFinalizedStateCall { + #[allow(missing_docs)] pub newState: ::RustType, + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`newFinalizedState((uint64,uint64,uint256),((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](newFinalizedStateCall) function. @@ -6918,7 +6995,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6950,7 +7027,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7015,6 +7092,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ownerReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -7036,7 +7114,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7065,7 +7143,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7125,6 +7203,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct permissionedProverReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -7146,7 +7225,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7175,7 +7254,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7235,6 +7314,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct proxiableUUIDReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -7256,7 +7336,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7285,7 +7365,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7364,7 +7444,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7393,7 +7473,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7449,6 +7529,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setFinalizedStateCall { + #[allow(missing_docs)] pub state: ::RustType, } ///Container type for the return parameters of the [`setFinalizedState((uint64,uint64,uint256))`](setFinalizedStateCall) function. @@ -7475,7 +7556,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7504,7 +7585,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7564,6 +7645,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setHotShotDownSinceCall { + #[allow(missing_docs)] pub l1Height: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`setHotShotDownSince(uint256)`](setHotShotDownSinceCall) function. @@ -7589,7 +7671,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7618,7 +7700,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7701,7 +7783,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7730,7 +7812,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7786,6 +7868,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setPermissionedProverCall { + #[allow(missing_docs)] pub prover: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`setPermissionedProver(address)`](setPermissionedProverCall) function. @@ -7811,7 +7894,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7840,7 +7923,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7900,6 +7983,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setStateHistoryCall { + #[allow(missing_docs)] pub _stateHistoryCommitments: alloy::sol_types::private::Vec< ::RustType, >, @@ -7932,7 +8016,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7963,7 +8047,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8024,6 +8108,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct setstateHistoryRetentionPeriodCall { + #[allow(missing_docs)] pub historySeconds: u32, } ///Container type for the return parameters of the [`setstateHistoryRetentionPeriod(uint32)`](setstateHistoryRetentionPeriodCall) function. @@ -8049,7 +8134,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8080,7 +8165,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8140,15 +8225,20 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsCall { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } ///Container type for the return parameters of the [`stateHistoryCommitments(uint256)`](stateHistoryCommitmentsCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryCommitmentsReturn { + #[allow(missing_docs)] pub l1BlockHeight: u64, + #[allow(missing_docs)] pub l1BlockTimestamp: u64, + #[allow(missing_docs)] pub hotShotBlockHeight: u64, + #[allow(missing_docs)] pub hotShotBlockCommRoot: ::RustType, } #[allow( @@ -8170,7 +8260,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8209,7 +8299,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8288,6 +8378,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryFirstIndexReturn { + #[allow(missing_docs)] pub _0: u64, } #[allow( @@ -8309,7 +8400,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8338,7 +8429,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8398,6 +8489,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct stateHistoryRetentionPeriodReturn { + #[allow(missing_docs)] pub _0: u32, } #[allow( @@ -8419,7 +8511,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8448,7 +8540,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8504,6 +8596,7 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct transferOwnershipCall { + #[allow(missing_docs)] pub newOwner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`transferOwnership(address)`](transferOwnershipCall) function. @@ -8529,7 +8622,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8558,7 +8651,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8618,7 +8711,9 @@ pub mod LightClientMock { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct upgradeToAndCallCall { + #[allow(missing_docs)] pub newImplementation: alloy::sol_types::private::Address, + #[allow(missing_docs)] pub data: alloy::sol_types::private::Bytes, } ///Container type for the return parameters of the [`upgradeToAndCall(address,bytes)`](upgradeToAndCallCall) function. @@ -8650,7 +8745,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8682,7 +8777,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8743,33 +8838,61 @@ pub mod LightClientMock { }; ///Container for all the [`LightClientMock`](self) function calls. pub enum LightClientMockCalls { + #[allow(missing_docs)] UPGRADE_INTERFACE_VERSION(UPGRADE_INTERFACE_VERSIONCall), + #[allow(missing_docs)] currentBlockNumber(currentBlockNumberCall), + #[allow(missing_docs)] disablePermissionedProverMode(disablePermissionedProverModeCall), + #[allow(missing_docs)] finalizedState(finalizedStateCall), + #[allow(missing_docs)] genesisStakeTableState(genesisStakeTableStateCall), + #[allow(missing_docs)] genesisState(genesisStateCall), + #[allow(missing_docs)] getHotShotCommitment(getHotShotCommitmentCall), + #[allow(missing_docs)] getStateHistoryCount(getStateHistoryCountCall), + #[allow(missing_docs)] getVersion(getVersionCall), + #[allow(missing_docs)] initialize(initializeCall), + #[allow(missing_docs)] isPermissionedProverEnabled(isPermissionedProverEnabledCall), + #[allow(missing_docs)] lagOverEscapeHatchThreshold(lagOverEscapeHatchThresholdCall), + #[allow(missing_docs)] newFinalizedState(newFinalizedStateCall), + #[allow(missing_docs)] owner(ownerCall), + #[allow(missing_docs)] permissionedProver(permissionedProverCall), + #[allow(missing_docs)] proxiableUUID(proxiableUUIDCall), + #[allow(missing_docs)] renounceOwnership(renounceOwnershipCall), + #[allow(missing_docs)] setFinalizedState(setFinalizedStateCall), + #[allow(missing_docs)] setHotShotDownSince(setHotShotDownSinceCall), + #[allow(missing_docs)] setHotShotUp(setHotShotUpCall), + #[allow(missing_docs)] setPermissionedProver(setPermissionedProverCall), + #[allow(missing_docs)] setStateHistory(setStateHistoryCall), + #[allow(missing_docs)] setstateHistoryRetentionPeriod(setstateHistoryRetentionPeriodCall), + #[allow(missing_docs)] stateHistoryCommitments(stateHistoryCommitmentsCall), + #[allow(missing_docs)] stateHistoryFirstIndex(stateHistoryFirstIndexCall), + #[allow(missing_docs)] stateHistoryRetentionPeriod(stateHistoryRetentionPeriodCall), + #[allow(missing_docs)] transferOwnership(transferOwnershipCall), + #[allow(missing_docs)] upgradeToAndCall(upgradeToAndCallCall), } #[automatically_derived] @@ -8821,76 +8944,76 @@ pub mod LightClientMock { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::currentBlockNumber(_) => { ::SELECTOR - } + }, Self::disablePermissionedProverMode(_) => { ::SELECTOR - } + }, Self::finalizedState(_) => { ::SELECTOR - } + }, Self::genesisStakeTableState(_) => { ::SELECTOR - } + }, Self::genesisState(_) => ::SELECTOR, Self::getHotShotCommitment(_) => { ::SELECTOR - } + }, Self::getStateHistoryCount(_) => { ::SELECTOR - } + }, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::isPermissionedProverEnabled(_) => { ::SELECTOR - } + }, Self::lagOverEscapeHatchThreshold(_) => { ::SELECTOR - } + }, Self::newFinalizedState(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::permissionedProver(_) => { ::SELECTOR - } + }, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::setFinalizedState(_) => { ::SELECTOR - } + }, Self::setHotShotDownSince(_) => { ::SELECTOR - } + }, Self::setHotShotUp(_) => ::SELECTOR, Self::setPermissionedProver(_) => { ::SELECTOR - } + }, Self::setStateHistory(_) => { ::SELECTOR - } + }, Self::setstateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::stateHistoryCommitments(_) => { ::SELECTOR - } + }, Self::stateHistoryFirstIndex(_) => { ::SELECTOR - } + }, Self::stateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -9403,136 +9526,156 @@ pub mod LightClientMock { ::abi_encode_raw( inner, out, ) - } + }, Self::currentBlockNumber(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::disablePermissionedProverMode(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::finalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::genesisStakeTableState(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::genesisState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getHotShotCommitment(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getStateHistoryCount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isPermissionedProverEnabled(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::lagOverEscapeHatchThreshold(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::newFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::permissionedProver(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setHotShotDownSince(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setHotShotUp(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setPermissionedProver(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setStateHistory(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setstateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryCommitments(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryFirstIndex(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } ///Container for all the [`LightClientMock`](self) custom errors. pub enum LightClientMockErrors { + #[allow(missing_docs)] AddressEmptyCode(AddressEmptyCode), + #[allow(missing_docs)] ERC1967InvalidImplementation(ERC1967InvalidImplementation), + #[allow(missing_docs)] ERC1967NonPayable(ERC1967NonPayable), + #[allow(missing_docs)] FailedInnerCall(FailedInnerCall), + #[allow(missing_docs)] InsufficientSnapshotHistory(InsufficientSnapshotHistory), + #[allow(missing_docs)] InvalidAddress(InvalidAddress), + #[allow(missing_docs)] InvalidArgs(InvalidArgs), + #[allow(missing_docs)] InvalidHotShotBlockForCommitmentCheck(InvalidHotShotBlockForCommitmentCheck), + #[allow(missing_docs)] InvalidInitialization(InvalidInitialization), + #[allow(missing_docs)] InvalidMaxStateHistory(InvalidMaxStateHistory), + #[allow(missing_docs)] InvalidProof(InvalidProof), + #[allow(missing_docs)] NoChangeRequired(NoChangeRequired), + #[allow(missing_docs)] NotInitializing(NotInitializing), + #[allow(missing_docs)] OutdatedState(OutdatedState), + #[allow(missing_docs)] OwnableInvalidOwner(OwnableInvalidOwner), + #[allow(missing_docs)] OwnableUnauthorizedAccount(OwnableUnauthorizedAccount), + #[allow(missing_docs)] ProverNotPermissioned(ProverNotPermissioned), + #[allow(missing_docs)] UUPSUnauthorizedCallContext(UUPSUnauthorizedCallContext), + #[allow(missing_docs)] UUPSUnsupportedProxiableUUID(UUPSUnsupportedProxiableUUID), + #[allow(missing_docs)] WrongStakeTableUsed(WrongStakeTableUsed), } #[automatically_derived] @@ -9576,56 +9719,56 @@ pub mod LightClientMock { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::InsufficientSnapshotHistory(_) => { ::SELECTOR - } + }, Self::InvalidAddress(_) => ::SELECTOR, Self::InvalidArgs(_) => ::SELECTOR, Self::InvalidHotShotBlockForCommitmentCheck(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidMaxStateHistory(_) => { ::SELECTOR - } + }, Self::InvalidProof(_) => ::SELECTOR, Self::NoChangeRequired(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OutdatedState(_) => ::SELECTOR, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::ProverNotPermissioned(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, Self::WrongStakeTableUsed(_) => { ::SELECTOR - } + }, } } #[inline] @@ -10121,12 +10264,19 @@ pub mod LightClientMock { } ///Container for all the [`LightClientMock`](self) events. pub enum LightClientMockEvents { + #[allow(missing_docs)] Initialized(Initialized), + #[allow(missing_docs)] NewState(NewState), + #[allow(missing_docs)] OwnershipTransferred(OwnershipTransferred), + #[allow(missing_docs)] PermissionedProverNotRequired(PermissionedProverNotRequired), + #[allow(missing_docs)] PermissionedProverRequired(PermissionedProverRequired), + #[allow(missing_docs)] Upgrade(Upgrade), + #[allow(missing_docs)] Upgraded(Upgraded), } #[automatically_derived] @@ -10190,17 +10340,17 @@ pub mod LightClientMock { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::NewState) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some( ::SIGNATURE_HASH, ) => ::decode_raw_log( @@ -10212,15 +10362,15 @@ pub mod LightClientMock { topics, data, validate, ) .map(Self::PermissionedProverRequired) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -10239,17 +10389,17 @@ pub mod LightClientMock { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::NewState(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -10258,23 +10408,23 @@ pub mod LightClientMock { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::NewState(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/permissionedstaketable.rs b/contract-bindings-alloy/src/permissionedstaketable.rs index fd58dff88e..14d1f72146 100644 --- a/contract-bindings-alloy/src/permissionedstaketable.rs +++ b/contract-bindings-alloy/src/permissionedstaketable.rs @@ -15,8 +15,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -139,9 +140,13 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G2Point { + #[allow(missing_docs)] pub x0: ::RustType, + #[allow(missing_docs)] pub x1: ::RustType, + #[allow(missing_docs)] pub y0: ::RustType, + #[allow(missing_docs)] pub y1: ::RustType, } #[allow( @@ -167,7 +172,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -459,15 +464,18 @@ library EdOnBN254 { clippy::empty_structs_with_brackets )] pub mod EdOnBN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct EdOnBN254Point { uint256 x; uint256 y; } ```*/ #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct EdOnBN254Point { + #[allow(missing_docs)] pub x: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub y: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -494,7 +502,7 @@ pub mod EdOnBN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1353,8 +1361,9 @@ interface PermissionedStakeTable { clippy::empty_structs_with_brackets )] pub mod PermissionedStakeTable { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -1381,8 +1390,11 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct NodeInfo { + #[allow(missing_docs)] pub blsVK: ::RustType, + #[allow(missing_docs)] pub schnorrVK: ::RustType, + #[allow(missing_docs)] pub isDA: bool, } #[allow( @@ -1411,7 +1423,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1613,7 +1625,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1673,7 +1685,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1715,6 +1727,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableInvalidOwner { + #[allow(missing_docs)] pub owner: alloy::sol_types::private::Address, } #[allow( @@ -1735,7 +1748,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1781,6 +1794,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct OwnableUnauthorizedAccount { + #[allow(missing_docs)] pub account: alloy::sol_types::private::Address, } #[allow( @@ -1801,7 +1815,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1847,6 +1861,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StakerAlreadyExists { + #[allow(missing_docs)] pub _0: ::RustType, } #[allow( @@ -1867,7 +1882,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1912,6 +1927,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct StakerNotFound { + #[allow(missing_docs)] pub _0: ::RustType, } #[allow( @@ -1932,7 +1948,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2298,6 +2314,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct constructorCall { + #[allow(missing_docs)] pub initialStakers: alloy::sol_types::private::Vec<::RustType>, } @@ -2316,7 +2333,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2363,12 +2380,14 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct _hashBlsKeyCall { + #[allow(missing_docs)] pub blsVK: ::RustType, } ///Container type for the return parameters of the [`_hashBlsKey((uint256,uint256,uint256,uint256))`](_hashBlsKeyCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct _hashBlsKeyReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -2391,7 +2410,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2420,7 +2439,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2501,7 +2520,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2530,7 +2549,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2590,6 +2609,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct initializedAtBlockReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2611,7 +2631,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2640,7 +2660,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2696,12 +2716,14 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct isStakerCall { + #[allow(missing_docs)] pub staker: ::RustType, } ///Container type for the return parameters of the [`isStaker((uint256,uint256,uint256,uint256))`](isStakerCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct isStakerReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -2724,7 +2746,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2753,7 +2775,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2815,6 +2837,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct ownerReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::Address, } #[allow( @@ -2836,7 +2859,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2865,7 +2888,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2944,7 +2967,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2973,7 +2996,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3029,6 +3052,7 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct transferOwnershipCall { + #[allow(missing_docs)] pub newOwner: alloy::sol_types::private::Address, } ///Container type for the return parameters of the [`transferOwnership(address)`](transferOwnershipCall) function. @@ -3054,7 +3078,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3083,7 +3107,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3143,8 +3167,10 @@ pub mod PermissionedStakeTable { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct updateCall { + #[allow(missing_docs)] pub stakersToRemove: alloy::sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] pub newStakers: alloy::sol_types::private::Vec<::RustType>, } @@ -3179,7 +3205,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3211,7 +3237,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3272,13 +3298,21 @@ pub mod PermissionedStakeTable { }; ///Container for all the [`PermissionedStakeTable`](self) function calls. pub enum PermissionedStakeTableCalls { + #[allow(missing_docs)] _hashBlsKey(_hashBlsKeyCall), + #[allow(missing_docs)] initialize(initializeCall), + #[allow(missing_docs)] initializedAtBlock(initializedAtBlockCall), + #[allow(missing_docs)] isStaker(isStakerCall), + #[allow(missing_docs)] owner(ownerCall), + #[allow(missing_docs)] renounceOwnership(renounceOwnershipCall), + #[allow(missing_docs)] transferOwnership(transferOwnershipCall), + #[allow(missing_docs)] update(updateCall), } #[automatically_derived] @@ -3312,15 +3346,15 @@ pub mod PermissionedStakeTable { Self::initialize(_) => ::SELECTOR, Self::initializedAtBlock(_) => { ::SELECTOR - } + }, Self::isStaker(_) => ::SELECTOR, Self::owner(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::update(_) => ::SELECTOR, } } @@ -3446,28 +3480,28 @@ pub mod PermissionedStakeTable { match self { Self::_hashBlsKey(inner) => { <_hashBlsKeyCall as alloy_sol_types::SolCall>::abi_encoded_size(inner) - } + }, Self::initialize(inner) => { ::abi_encoded_size(inner) - } + }, Self::initializedAtBlock(inner) => { ::abi_encoded_size(inner) - } + }, Self::isStaker(inner) => { ::abi_encoded_size(inner) - } + }, Self::owner(inner) => { ::abi_encoded_size(inner) - } + }, Self::renounceOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::transferOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::update(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3475,38 +3509,44 @@ pub mod PermissionedStakeTable { match self { Self::_hashBlsKey(inner) => { <_hashBlsKeyCall as alloy_sol_types::SolCall>::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initializedAtBlock(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isStaker(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::update(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } ///Container for all the [`PermissionedStakeTable`](self) custom errors. pub enum PermissionedStakeTableErrors { + #[allow(missing_docs)] InvalidInitialization(InvalidInitialization), + #[allow(missing_docs)] NotInitializing(NotInitializing), + #[allow(missing_docs)] OwnableInvalidOwner(OwnableInvalidOwner), + #[allow(missing_docs)] OwnableUnauthorizedAccount(OwnableUnauthorizedAccount), + #[allow(missing_docs)] StakerAlreadyExists(StakerAlreadyExists), + #[allow(missing_docs)] StakerNotFound(StakerNotFound), } #[automatically_derived] @@ -3536,19 +3576,19 @@ pub mod PermissionedStakeTable { match self { Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::StakerAlreadyExists(_) => { ::SELECTOR - } + }, Self::StakerNotFound(_) => ::SELECTOR, } } @@ -3658,24 +3698,24 @@ pub mod PermissionedStakeTable { match self { Self::InvalidInitialization(inner) => { ::abi_encoded_size(inner) - } + }, Self::NotInitializing(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::StakerAlreadyExists(inner) => { ::abi_encoded_size(inner) - } + }, Self::StakerNotFound(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3683,31 +3723,34 @@ pub mod PermissionedStakeTable { match self { Self::InvalidInitialization(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::NotInitializing(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::StakerAlreadyExists(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::StakerNotFound(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } ///Container for all the [`PermissionedStakeTable`](self) events. pub enum PermissionedStakeTableEvents { + #[allow(missing_docs)] Initialized(Initialized), + #[allow(missing_docs)] OwnershipTransferred(OwnershipTransferred), + #[allow(missing_docs)] StakersUpdated(StakersUpdated), } #[automatically_derived] @@ -3751,19 +3794,19 @@ pub mod PermissionedStakeTable { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::StakersUpdated) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -3782,26 +3825,26 @@ pub mod PermissionedStakeTable { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::StakersUpdated(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, } } fn into_log_data(self) -> alloy_sol_types::private::LogData { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::StakersUpdated(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/plonkverifier.rs b/contract-bindings-alloy/src/plonkverifier.rs index 363d08a96a..060d8dccac 100644 --- a/contract-bindings-alloy/src/plonkverifier.rs +++ b/contract-bindings-alloy/src/plonkverifier.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -256,7 +257,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -280,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -561,36 +564,60 @@ library IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct PlonkProof { BN254.G1Point wire0; BN254.G1Point wire1; BN254.G1Point wire2; BN254.G1Point wire3; BN254.G1Point wire4; BN254.G1Point prodPerm; BN254.G1Point split0; BN254.G1Point split1; BN254.G1Point split2; BN254.G1Point split3; BN254.G1Point split4; BN254.G1Point zeta; BN254.G1Point zetaOmega; BN254.ScalarField wireEval0; BN254.ScalarField wireEval1; BN254.ScalarField wireEval2; BN254.ScalarField wireEval3; BN254.ScalarField wireEval4; BN254.ScalarField sigmaEval0; BN254.ScalarField sigmaEval1; BN254.ScalarField sigmaEval2; BN254.ScalarField sigmaEval3; BN254.ScalarField prodPermZetaOmegaEval; } ```*/ #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -659,7 +686,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1116,27 +1143,49 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct VerifyingKey { + #[allow(missing_docs)] pub domainSize: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub numInputs: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub sigma0: ::RustType, + #[allow(missing_docs)] pub sigma1: ::RustType, + #[allow(missing_docs)] pub sigma2: ::RustType, + #[allow(missing_docs)] pub sigma3: ::RustType, + #[allow(missing_docs)] pub sigma4: ::RustType, + #[allow(missing_docs)] pub q1: ::RustType, + #[allow(missing_docs)] pub q2: ::RustType, + #[allow(missing_docs)] pub q3: ::RustType, + #[allow(missing_docs)] pub q4: ::RustType, + #[allow(missing_docs)] pub qM12: ::RustType, + #[allow(missing_docs)] pub qM34: ::RustType, + #[allow(missing_docs)] pub qO: ::RustType, + #[allow(missing_docs)] pub qC: ::RustType, + #[allow(missing_docs)] pub qH1: ::RustType, + #[allow(missing_docs)] pub qH2: ::RustType, + #[allow(missing_docs)] pub qH3: ::RustType, + #[allow(missing_docs)] pub qH4: ::RustType, + #[allow(missing_docs)] pub qEcc: ::RustType, + #[allow(missing_docs)] pub g2LSB: alloy::sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] pub g2MSB: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -1203,7 +1252,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2560,8 +2609,9 @@ interface PlonkVerifier { clippy::empty_structs_with_brackets )] pub mod PlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -2607,7 +2657,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2667,7 +2717,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2727,7 +2777,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2769,14 +2819,18 @@ pub mod PlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyCall { + #[allow(missing_docs)] pub verifyingKey: ::RustType, + #[allow(missing_docs)] pub publicInput: [alloy::sol_types::private::primitives::aliases::U256; 7usize], + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`verify((uint256,uint256,(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),bytes32,bytes32),uint256[7],((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](verifyCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyReturn { + #[allow(missing_docs)] pub _0: bool, } #[allow( @@ -2809,7 +2863,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2842,7 +2896,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2909,6 +2963,7 @@ pub mod PlonkVerifier { }; ///Container for all the [`PlonkVerifier`](self) function calls. pub enum PlonkVerifierCalls { + #[allow(missing_docs)] verify(verifyCall), } #[automatically_derived] @@ -2974,7 +3029,7 @@ pub mod PlonkVerifier { match self { Self::verify(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -2982,14 +3037,17 @@ pub mod PlonkVerifier { match self { Self::verify(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } ///Container for all the [`PlonkVerifier`](self) custom errors. pub enum PlonkVerifierErrors { + #[allow(missing_docs)] InvalidPlonkArgs(InvalidPlonkArgs), + #[allow(missing_docs)] UnsupportedDegree(UnsupportedDegree), + #[allow(missing_docs)] WrongPlonkVK(WrongPlonkVK), } #[automatically_derived] @@ -3016,10 +3074,10 @@ pub mod PlonkVerifier { match self { Self::InvalidPlonkArgs(_) => { ::SELECTOR - } + }, Self::UnsupportedDegree(_) => { ::SELECTOR - } + }, Self::WrongPlonkVK(_) => ::SELECTOR, } } @@ -3091,13 +3149,13 @@ pub mod PlonkVerifier { match self { Self::InvalidPlonkArgs(inner) => { ::abi_encoded_size(inner) - } + }, Self::UnsupportedDegree(inner) => { ::abi_encoded_size(inner) - } + }, Self::WrongPlonkVK(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3105,13 +3163,13 @@ pub mod PlonkVerifier { match self { Self::InvalidPlonkArgs(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::UnsupportedDegree(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::WrongPlonkVK(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } diff --git a/contract-bindings-alloy/src/plonkverifier2.rs b/contract-bindings-alloy/src/plonkverifier2.rs index 12cc4014ac..1997d2f3bb 100644 --- a/contract-bindings-alloy/src/plonkverifier2.rs +++ b/contract-bindings-alloy/src/plonkverifier2.rs @@ -16,8 +16,9 @@ library BN254 { clippy::empty_structs_with_brackets )] pub mod BN254 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct BaseField(alloy::sol_types::private::primitives::aliases::U256); @@ -256,7 +257,9 @@ pub mod BN254 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct G1Point { + #[allow(missing_docs)] pub x: ::RustType, + #[allow(missing_docs)] pub y: ::RustType, } #[allow( @@ -280,7 +283,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -561,36 +564,60 @@ library IPlonkVerifier { clippy::empty_structs_with_brackets )] pub mod IPlonkVerifier { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /**```solidity struct PlonkProof { BN254.G1Point wire0; BN254.G1Point wire1; BN254.G1Point wire2; BN254.G1Point wire3; BN254.G1Point wire4; BN254.G1Point prodPerm; BN254.G1Point split0; BN254.G1Point split1; BN254.G1Point split2; BN254.G1Point split3; BN254.G1Point split4; BN254.G1Point zeta; BN254.G1Point zetaOmega; BN254.ScalarField wireEval0; BN254.ScalarField wireEval1; BN254.ScalarField wireEval2; BN254.ScalarField wireEval3; BN254.ScalarField wireEval4; BN254.ScalarField sigmaEval0; BN254.ScalarField sigmaEval1; BN254.ScalarField sigmaEval2; BN254.ScalarField sigmaEval3; BN254.ScalarField prodPermZetaOmegaEval; } ```*/ #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct PlonkProof { + #[allow(missing_docs)] pub wire0: ::RustType, + #[allow(missing_docs)] pub wire1: ::RustType, + #[allow(missing_docs)] pub wire2: ::RustType, + #[allow(missing_docs)] pub wire3: ::RustType, + #[allow(missing_docs)] pub wire4: ::RustType, + #[allow(missing_docs)] pub prodPerm: ::RustType, + #[allow(missing_docs)] pub split0: ::RustType, + #[allow(missing_docs)] pub split1: ::RustType, + #[allow(missing_docs)] pub split2: ::RustType, + #[allow(missing_docs)] pub split3: ::RustType, + #[allow(missing_docs)] pub split4: ::RustType, + #[allow(missing_docs)] pub zeta: ::RustType, + #[allow(missing_docs)] pub zetaOmega: ::RustType, + #[allow(missing_docs)] pub wireEval0: ::RustType, + #[allow(missing_docs)] pub wireEval1: ::RustType, + #[allow(missing_docs)] pub wireEval2: ::RustType, + #[allow(missing_docs)] pub wireEval3: ::RustType, + #[allow(missing_docs)] pub wireEval4: ::RustType, + #[allow(missing_docs)] pub sigmaEval0: ::RustType, + #[allow(missing_docs)] pub sigmaEval1: ::RustType, + #[allow(missing_docs)] pub sigmaEval2: ::RustType, + #[allow(missing_docs)] pub sigmaEval3: ::RustType, + #[allow(missing_docs)] pub prodPermZetaOmegaEval: ::RustType, } #[allow( @@ -659,7 +686,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1116,27 +1143,49 @@ pub mod IPlonkVerifier { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct VerifyingKey { + #[allow(missing_docs)] pub domainSize: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub numInputs: alloy::sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] pub sigma0: ::RustType, + #[allow(missing_docs)] pub sigma1: ::RustType, + #[allow(missing_docs)] pub sigma2: ::RustType, + #[allow(missing_docs)] pub sigma3: ::RustType, + #[allow(missing_docs)] pub sigma4: ::RustType, + #[allow(missing_docs)] pub q1: ::RustType, + #[allow(missing_docs)] pub q2: ::RustType, + #[allow(missing_docs)] pub q3: ::RustType, + #[allow(missing_docs)] pub q4: ::RustType, + #[allow(missing_docs)] pub qM12: ::RustType, + #[allow(missing_docs)] pub qM34: ::RustType, + #[allow(missing_docs)] pub qO: ::RustType, + #[allow(missing_docs)] pub qC: ::RustType, + #[allow(missing_docs)] pub qH1: ::RustType, + #[allow(missing_docs)] pub qH2: ::RustType, + #[allow(missing_docs)] pub qH3: ::RustType, + #[allow(missing_docs)] pub qH4: ::RustType, + #[allow(missing_docs)] pub qEcc: ::RustType, + #[allow(missing_docs)] pub g2LSB: alloy::sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] pub g2MSB: alloy::sol_types::private::FixedBytes<32>, } #[allow( @@ -1203,7 +1252,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2576,8 +2625,9 @@ interface PlonkVerifier2 { clippy::empty_structs_with_brackets )] pub mod PlonkVerifier2 { - use super::*; use alloy::sol_types as alloy_sol_types; + + use super::*; /// The creation / init bytecode of the contract. /// /// ```text @@ -2623,7 +2673,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2669,6 +2719,7 @@ pub mod PlonkVerifier2 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct P_MODReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2690,7 +2741,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2719,7 +2770,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2779,6 +2830,7 @@ pub mod PlonkVerifier2 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct R_MODReturn { + #[allow(missing_docs)] pub _0: alloy::sol_types::private::primitives::aliases::U256, } #[allow( @@ -2800,7 +2852,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2829,7 +2881,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2885,14 +2937,18 @@ pub mod PlonkVerifier2 { #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyCall { + #[allow(missing_docs)] pub vk: ::RustType, + #[allow(missing_docs)] pub publicInput: [alloy::sol_types::private::primitives::aliases::U256; 7usize], + #[allow(missing_docs)] pub proof: ::RustType, } ///Container type for the return parameters of the [`verify((uint256,uint256,(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),bytes32,bytes32),uint256[7],((uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),(uint256,uint256),uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))`](verifyCall) function. #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] #[derive(Clone)] pub struct verifyReturn { + #[allow(missing_docs)] pub success: bool, } #[allow( @@ -2925,7 +2981,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2958,7 +3014,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3023,8 +3079,11 @@ pub mod PlonkVerifier2 { }; ///Container for all the [`PlonkVerifier2`](self) function calls. pub enum PlonkVerifier2Calls { + #[allow(missing_docs)] P_MOD(P_MODCall), + #[allow(missing_docs)] R_MOD(R_MODCall), + #[allow(missing_docs)] verify(verifyCall), } #[automatically_derived] @@ -3118,13 +3177,13 @@ pub mod PlonkVerifier2 { match self { Self::P_MOD(inner) => { ::abi_encoded_size(inner) - } + }, Self::R_MOD(inner) => { ::abi_encoded_size(inner) - } + }, Self::verify(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3132,18 +3191,19 @@ pub mod PlonkVerifier2 { match self { Self::P_MOD(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::R_MOD(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::verify(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } ///Container for all the [`PlonkVerifier2`](self) custom errors. pub enum PlonkVerifier2Errors { + #[allow(missing_docs)] UnsupportedDegree(UnsupportedDegree), } #[automatically_derived] @@ -3166,7 +3226,7 @@ pub mod PlonkVerifier2 { match self { Self::UnsupportedDegree(_) => { ::SELECTOR - } + }, } } #[inline] @@ -3211,7 +3271,7 @@ pub mod PlonkVerifier2 { match self { Self::UnsupportedDegree(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3219,7 +3279,7 @@ pub mod PlonkVerifier2 { match self { Self::UnsupportedDegree(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } diff --git a/contract-bindings-ethers/src/erc1967_proxy.rs b/contract-bindings-ethers/src/erc1967_proxy.rs index cff5203cba..453bf08c61 100644 --- a/contract-bindings-ethers/src/erc1967_proxy.rs +++ b/contract-bindings-ethers/src/erc1967_proxy.rs @@ -320,7 +320,7 @@ pub mod erc1967_proxy { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), @@ -333,21 +333,21 @@ pub mod erc1967_proxy { [0x08, 0xc3, 0x79, 0xa0] => true, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ => false, } } @@ -358,7 +358,7 @@ pub mod erc1967_proxy { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), diff --git a/contract-bindings-ethers/src/fee_contract.rs b/contract-bindings-ethers/src/fee_contract.rs index 06cb0aa83d..38f00a01f0 100644 --- a/contract-bindings-ethers/src/fee_contract.rs +++ b/contract-bindings-ethers/src/fee_contract.rs @@ -1071,32 +1071,32 @@ pub mod fee_contract { Self::DepositTooSmall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FunctionDoesNotExist(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidUserAddress(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NoFunctionCalled(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -1107,70 +1107,70 @@ pub mod fee_contract { [0x08, 0xc3, 0x79, 0xa0] => true, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ => false, } } @@ -1183,7 +1183,7 @@ pub mod fee_contract { Self::DepositTooSmall(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::FunctionDoesNotExist(element) => ::core::fmt::Display::fmt(element, f), @@ -1196,7 +1196,7 @@ pub mod fee_contract { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } } @@ -1754,7 +1754,7 @@ pub mod fee_contract { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::Balances(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Deposit(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), diff --git a/contract-bindings-ethers/src/light_client.rs b/contract-bindings-ethers/src/light_client.rs index 32daa6fc12..09aa7f02bd 100644 --- a/contract-bindings-ethers/src/light_client.rs +++ b/contract-bindings-ethers/src/light_client.rs @@ -1726,45 +1726,45 @@ pub mod light_client { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InsufficientSnapshotHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidAddress(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidArgs(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidMaxStateHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidProof(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NoChangeRequired(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OutdatedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProverNotPermissioned(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::WrongStakeTableUsed(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -1859,7 +1859,7 @@ pub mod light_client { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::InsufficientSnapshotHistory(element) => ::core::fmt::Display::fmt(element, f), @@ -1867,7 +1867,7 @@ pub mod light_client { Self::InvalidArgs(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::InvalidInitialization(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidMaxStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidProof(element) => ::core::fmt::Display::fmt(element, f), @@ -1880,7 +1880,7 @@ pub mod light_client { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::WrongStakeTableUsed(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } @@ -2176,10 +2176,10 @@ pub mod light_client { Self::OwnershipTransferredFilter(element) => ::core::fmt::Display::fmt(element, f), Self::PermissionedProverNotRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::PermissionedProverRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::UpgradeFilter(element) => ::core::fmt::Display::fmt(element, f), Self::UpgradedFilter(element) => ::core::fmt::Display::fmt(element, f), } @@ -2777,54 +2777,54 @@ pub mod light_client { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::CurrentBlockNumber(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::DisablePermissionedProverMode(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::FinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GenesisStakeTableState(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GenesisState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetHotShotCommitment(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetStateHistoryCount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::IsPermissionedProverEnabled(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::LagOverEscapeHatchThreshold(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NewFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::PermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProxiableUUID(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetPermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetstateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryCommitments(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryFirstIndex(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::TransferOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::UpgradeToAndCall(element) => ::ethers::core::abi::AbiEncode::encode(element), } @@ -2837,7 +2837,7 @@ pub mod light_client { Self::CurrentBlockNumber(element) => ::core::fmt::Display::fmt(element, f), Self::DisablePermissionedProverMode(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::FinalizedState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisStakeTableState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisState(element) => ::core::fmt::Display::fmt(element, f), @@ -2855,7 +2855,7 @@ pub mod light_client { Self::SetPermissionedProver(element) => ::core::fmt::Display::fmt(element, f), Self::SetstateHistoryRetentionPeriod(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::StateHistoryCommitments(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryFirstIndex(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryRetentionPeriod(element) => ::core::fmt::Display::fmt(element, f), diff --git a/contract-bindings-ethers/src/light_client_arbitrum.rs b/contract-bindings-ethers/src/light_client_arbitrum.rs index df441f0a6d..13fa3c65d5 100644 --- a/contract-bindings-ethers/src/light_client_arbitrum.rs +++ b/contract-bindings-ethers/src/light_client_arbitrum.rs @@ -1726,45 +1726,45 @@ pub mod light_client_arbitrum { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InsufficientSnapshotHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidAddress(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidArgs(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidMaxStateHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidProof(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NoChangeRequired(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OutdatedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProverNotPermissioned(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::WrongStakeTableUsed(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -1859,7 +1859,7 @@ pub mod light_client_arbitrum { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::InsufficientSnapshotHistory(element) => ::core::fmt::Display::fmt(element, f), @@ -1867,7 +1867,7 @@ pub mod light_client_arbitrum { Self::InvalidArgs(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::InvalidInitialization(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidMaxStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidProof(element) => ::core::fmt::Display::fmt(element, f), @@ -1880,7 +1880,7 @@ pub mod light_client_arbitrum { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::WrongStakeTableUsed(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } @@ -2178,10 +2178,10 @@ pub mod light_client_arbitrum { Self::OwnershipTransferredFilter(element) => ::core::fmt::Display::fmt(element, f), Self::PermissionedProverNotRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::PermissionedProverRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::UpgradeFilter(element) => ::core::fmt::Display::fmt(element, f), Self::UpgradedFilter(element) => ::core::fmt::Display::fmt(element, f), } @@ -2779,54 +2779,54 @@ pub mod light_client_arbitrum { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::CurrentBlockNumber(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::DisablePermissionedProverMode(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::FinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GenesisStakeTableState(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GenesisState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetHotShotCommitment(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetStateHistoryCount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::IsPermissionedProverEnabled(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::LagOverEscapeHatchThreshold(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NewFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::PermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProxiableUUID(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetPermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetstateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryCommitments(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryFirstIndex(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::TransferOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::UpgradeToAndCall(element) => ::ethers::core::abi::AbiEncode::encode(element), } @@ -2839,7 +2839,7 @@ pub mod light_client_arbitrum { Self::CurrentBlockNumber(element) => ::core::fmt::Display::fmt(element, f), Self::DisablePermissionedProverMode(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::FinalizedState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisStakeTableState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisState(element) => ::core::fmt::Display::fmt(element, f), @@ -2857,7 +2857,7 @@ pub mod light_client_arbitrum { Self::SetPermissionedProver(element) => ::core::fmt::Display::fmt(element, f), Self::SetstateHistoryRetentionPeriod(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::StateHistoryCommitments(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryFirstIndex(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryRetentionPeriod(element) => ::core::fmt::Display::fmt(element, f), diff --git a/contract-bindings-ethers/src/light_client_mock.rs b/contract-bindings-ethers/src/light_client_mock.rs index 951f036d38..3132c0d7e7 100644 --- a/contract-bindings-ethers/src/light_client_mock.rs +++ b/contract-bindings-ethers/src/light_client_mock.rs @@ -1867,45 +1867,45 @@ pub mod light_client_mock { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InsufficientSnapshotHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidAddress(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidArgs(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidMaxStateHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidProof(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NoChangeRequired(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OutdatedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProverNotPermissioned(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::WrongStakeTableUsed(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -2000,7 +2000,7 @@ pub mod light_client_mock { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::InsufficientSnapshotHistory(element) => ::core::fmt::Display::fmt(element, f), @@ -2008,7 +2008,7 @@ pub mod light_client_mock { Self::InvalidArgs(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::InvalidInitialization(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidMaxStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidProof(element) => ::core::fmt::Display::fmt(element, f), @@ -2021,7 +2021,7 @@ pub mod light_client_mock { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::WrongStakeTableUsed(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } @@ -2319,10 +2319,10 @@ pub mod light_client_mock { Self::OwnershipTransferredFilter(element) => ::core::fmt::Display::fmt(element, f), Self::PermissionedProverNotRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::PermissionedProverRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::UpgradeFilter(element) => ::core::fmt::Display::fmt(element, f), Self::UpgradedFilter(element) => ::core::fmt::Display::fmt(element, f), } @@ -3015,60 +3015,60 @@ pub mod light_client_mock { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::CurrentBlockNumber(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::DisablePermissionedProverMode(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::FinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GenesisStakeTableState(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GenesisState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetHotShotCommitment(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetStateHistoryCount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::IsPermissionedProverEnabled(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::LagOverEscapeHatchThreshold(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NewFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::PermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProxiableUUID(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetHotShotDownSince(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetHotShotUp(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetPermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetStateHistory(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetstateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryCommitments(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryFirstIndex(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::TransferOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::UpgradeToAndCall(element) => ::ethers::core::abi::AbiEncode::encode(element), } @@ -3081,7 +3081,7 @@ pub mod light_client_mock { Self::CurrentBlockNumber(element) => ::core::fmt::Display::fmt(element, f), Self::DisablePermissionedProverMode(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::FinalizedState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisStakeTableState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisState(element) => ::core::fmt::Display::fmt(element, f), @@ -3103,7 +3103,7 @@ pub mod light_client_mock { Self::SetStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::SetstateHistoryRetentionPeriod(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::StateHistoryCommitments(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryFirstIndex(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryRetentionPeriod(element) => ::core::fmt::Display::fmt(element, f), diff --git a/contract-bindings-ethers/src/permissioned_stake_table.rs b/contract-bindings-ethers/src/permissioned_stake_table.rs index dfabb2739a..aa4e50abbb 100644 --- a/contract-bindings-ethers/src/permissioned_stake_table.rs +++ b/contract-bindings-ethers/src/permissioned_stake_table.rs @@ -763,17 +763,17 @@ pub mod permissioned_stake_table { match self { Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StakerAlreadyExists(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StakerNotFound(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } @@ -787,28 +787,28 @@ pub mod permissioned_stake_table { == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ => false, } } @@ -1178,7 +1178,7 @@ pub mod permissioned_stake_table { Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InitializedAtBlock(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::IsStaker(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), diff --git a/contract-bindings-ethers/src/plonk_verifier.rs b/contract-bindings-ethers/src/plonk_verifier.rs index 9f5ae4cd72..48b9775bd4 100644 --- a/contract-bindings-ethers/src/plonk_verifier.rs +++ b/contract-bindings-ethers/src/plonk_verifier.rs @@ -442,12 +442,12 @@ pub mod plonk_verifier { [0x08, 0xc3, 0x79, 0xa0] => true, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => true, _ => false, } diff --git a/contracts/rust/adapter/src/jellyfish.rs b/contracts/rust/adapter/src/jellyfish.rs index afdd1a59d5..8b55b99983 100644 --- a/contracts/rust/adapter/src/jellyfish.rs +++ b/contracts/rust/adapter/src/jellyfish.rs @@ -13,10 +13,12 @@ use ethers::{ utils::hex::ToHex, }; use jf_pcs::prelude::Commitment; -use jf_plonk::constants::KECCAK256_STATE_SIZE; -use jf_plonk::proof_system::structs::{OpenKey, Proof, ProofEvaluations, VerifyingKey}; -use jf_plonk::testing_apis::Challenges; -use jf_plonk::transcript::SolidityTranscript; +use jf_plonk::{ + constants::KECCAK256_STATE_SIZE, + proof_system::structs::{OpenKey, Proof, ProofEvaluations, VerifyingKey}, + testing_apis::Challenges, + transcript::SolidityTranscript, +}; use jf_utils::to_bytes; use num_bigint::BigUint; use num_traits::Num; diff --git a/contracts/rust/adapter/src/light_client.rs b/contracts/rust/adapter/src/light_client.rs index fdf8d95a3c..4bca08f08a 100644 --- a/contracts/rust/adapter/src/light_client.rs +++ b/contracts/rust/adapter/src/light_client.rs @@ -4,9 +4,7 @@ use ark_ff::PrimeField; use ark_std::str::FromStr; use diff_test_bn254::{field_to_u256, u256_to_field}; use ethers::{ - abi::AbiDecode, - abi::Token, - abi::Tokenize, + abi::{AbiDecode, Token, Tokenize}, prelude::{AbiError, EthAbiCodec, EthAbiType}, types::U256, }; diff --git a/contracts/rust/adapter/src/stake_table.rs b/contracts/rust/adapter/src/stake_table.rs index 1ea6e98077..5e853ad24f 100644 --- a/contracts/rust/adapter/src/stake_table.rs +++ b/contracts/rust/adapter/src/stake_table.rs @@ -1,4 +1,5 @@ -use crate::jellyfish::u256_to_field; +use std::str::FromStr; + use ark_ec::{ short_weierstrass, twisted_edwards::{self, Affine, TECurveConfig}, @@ -29,7 +30,8 @@ use hotshot_types::{ PeerConfig, }; use serde::{Deserialize, Serialize}; -use std::str::FromStr; + +use crate::jellyfish::u256_to_field; // TODO: (alex) maybe move these commonly shared util to a crate /// convert a field element to U256, panic if field size is larger than 256 bit diff --git a/contracts/rust/diff-test/src/main.rs b/contracts/rust/diff-test/src/main.rs index a07382ff11..d87e221cb8 100644 --- a/contracts/rust/diff-test/src/main.rs +++ b/contracts/rust/diff-test/src/main.rs @@ -2,12 +2,10 @@ use ark_bn254::{Bn254, Fq, Fr, G1Affine, G2Affine}; use ark_ec::{AffineRepr, CurveGroup}; use ark_ed_on_bn254::{EdwardsConfig as EdOnBn254Config, Fq as FqEd254}; use ark_ff::field_hashers::{DefaultFieldHasher, HashToField}; -use ark_poly::domain::radix2::Radix2EvaluationDomain; -use ark_poly::EvaluationDomain; +use ark_poly::{domain::radix2::Radix2EvaluationDomain, EvaluationDomain}; use ark_std::rand::{rngs::StdRng, Rng, SeedableRng}; use clap::{Parser, ValueEnum}; use diff_test_bn254::ParsedG2Point; - use ethers::{ abi::{AbiDecode, AbiEncode, Address}, types::{Bytes, U256}, @@ -17,15 +15,19 @@ use hotshot_state_prover::mock_ledger::{ gen_plonk_proof_for_test, MockLedger, MockSystemParam, STAKE_TABLE_CAPACITY, }; use jf_pcs::prelude::Commitment; -use jf_plonk::proof_system::structs::{Proof, VerifyingKey}; -use jf_plonk::proof_system::PlonkKzgSnark; use jf_plonk::{ + proof_system::{ + structs::{Proof, VerifyingKey}, + PlonkKzgSnark, + }, testing_apis::Verifier, transcript::{PlonkTranscript, SolidityTranscript}, }; -use jf_signature::bls_over_bn254::{hash_to_curve, KeyPair as BLSKeyPair, Signature}; -use jf_signature::constants::CS_ID_BLS_BN254; -use jf_signature::schnorr::KeyPair as SchnorrKeyPair; +use jf_signature::{ + bls_over_bn254::{hash_to_curve, KeyPair as BLSKeyPair, Signature}, + constants::CS_ID_BLS_BN254, + schnorr::KeyPair as SchnorrKeyPair, +}; use sha3::Keccak256; #[derive(Parser)] @@ -102,7 +104,7 @@ fn main() { field_to_u256(domain.group_gen), ); println!("{}", res.encode_hex()); - } + }, Action::EvalDomainElements => { if cli.args.len() != 2 { panic!("Should provide arg1=logSize, arg2=length"); @@ -117,7 +119,7 @@ fn main() { .map(field_to_u256) .collect::>(); println!("{}", res.encode_hex()); - } + }, Action::EvalDataGen => { if cli.args.len() != 3 { panic!("Should provide arg1=logSize, arg2=zeta, arg3=publicInput"); @@ -138,7 +140,7 @@ fn main() { field_to_u256(pi_eval), ); println!("{}", res.encode_hex()); - } + }, Action::TranscriptAppendMsg => { if cli.args.len() != 2 { panic!("Should provide arg1=transcript, arg2=message"); @@ -153,7 +155,7 @@ fn main() { >::append_message(&mut t, &[], &msg).unwrap(); let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptAppendField => { if cli.args.len() != 2 { panic!("Should provide arg1=transcript, arg2=fieldElement"); @@ -165,7 +167,7 @@ fn main() { t.append_field_elem::(&[], &field).unwrap(); let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptAppendGroup => { if cli.args.len() != 2 { panic!("Should provide arg1=transcript, arg2=groupElement"); @@ -179,7 +181,7 @@ fn main() { .unwrap(); let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptGetChal => { if cli.args.len() != 1 { panic!("Should provide arg1=transcript"); @@ -193,7 +195,7 @@ fn main() { let updated_t: ParsedTranscript = t.into(); let res = (updated_t, field_to_u256(chal)); println!("{}", res.encode_hex()); - } + }, Action::TranscriptAppendVkAndPi => { if cli.args.len() != 3 { panic!("Should provide arg1=transcript, arg2=verifyingKey, arg3=publicInput"); @@ -210,7 +212,7 @@ fn main() { let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptAppendProofEvals => { if cli.args.len() != 1 { panic!("Should provide arg1=transcript"); @@ -232,7 +234,7 @@ fn main() { let t_updated: ParsedTranscript = t.into(); let res = (t_updated, proof_parsed); println!("{}", res.encode_hex()); - } + }, Action::PlonkConstants => { let coset_k = coset_k(); let open_key = open_key(); @@ -250,7 +252,7 @@ fn main() { field_to_u256::(open_key.beta_h.y().unwrap().c0), ); println!("{}", res.encode_hex()); - } + }, Action::PlonkComputeChal => { if cli.args.len() != 4 { panic!("Should provide arg1=verifyingKey, arg2=publicInput, arg3=proof, arg4=extraTranscriptInitMsg"); @@ -275,9 +277,9 @@ fn main() { .unwrap() .into(); println!("{}", (chal,).encode_hex()); - } + }, Action::PlonkVerify => { - let (proof, vk, public_input, _, _): ( + let (proof, vk, public_input, ..): ( Proof, VerifyingKey, Vec, @@ -304,7 +306,7 @@ fn main() { let res = (vk_parsed, pi_parsed, proof_parsed); println!("{}", res.encode_hex()); - } + }, Action::DummyProof => { let mut rng = jf_utils::test_rng(); if !cli.args.is_empty() { @@ -313,10 +315,10 @@ fn main() { } let proof = ParsedPlonkProof::dummy(&mut rng); println!("{}", (proof,).encode_hex()); - } + }, Action::TestOnly => { println!("args: {:?}", cli.args); - } + }, Action::GenClientWallet => { if cli.args.len() != 2 { panic!("Should provide arg1=senderAddress arg2=seed"); @@ -358,7 +360,7 @@ fn main() { sender_address, ); println!("{}", res.encode_hex()); - } + }, Action::GenRandomG2Point => { if cli.args.len() != 1 { panic!("Should provide arg1=exponent"); @@ -370,7 +372,7 @@ fn main() { let point_parsed: ParsedG2Point = point.into(); let res = point_parsed; println!("{}", (res.encode_hex())); - } + }, Action::MockGenesis => { if cli.args.len() != 1 { panic!("Should provide arg1=numInitValidators"); @@ -382,7 +384,7 @@ fn main() { let res = (ledger.get_state(), ledger.get_stake_table_state()); println!("{}", res.encode_hex()); - } + }, Action::MockConsecutiveFinalizedStates => { if cli.args.len() != 1 { panic!("Should provide arg1=numInitValidators"); @@ -413,7 +415,7 @@ fn main() { let res = (new_states, proofs); println!("{}", res.encode_hex()); - } + }, Action::MockSkipBlocks => { if cli.args.is_empty() || cli.args.len() > 2 { panic!("Should provide arg1=numBlockSkipped,arg2(opt)=requireValidProof"); @@ -444,7 +446,7 @@ fn main() { (state_parsed, proof_parsed) }; println!("{}", res.encode_hex()); - } + }, Action::GenBLSHashes => { if cli.args.len() != 1 { panic!("Should provide arg1=message"); @@ -464,7 +466,7 @@ fn main() { let res = (fq_u256, hash_to_curve_elem_parsed); println!("{}", res.encode_hex()); - } + }, Action::GenBLSSig => { let mut rng = jf_utils::test_rng(); @@ -486,6 +488,6 @@ fn main() { let res = (vk_parsed, sig_parsed); println!("{}", res.encode_hex()); - } + }, }; } diff --git a/contracts/rust/gen-vk-contract/src/main.rs b/contracts/rust/gen-vk-contract/src/main.rs index e402297254..5617d34527 100644 --- a/contracts/rust/gen-vk-contract/src/main.rs +++ b/contracts/rust/gen-vk-contract/src/main.rs @@ -5,13 +5,12 @@ use std::{fs::OpenOptions, io::Write, path::PathBuf, process::Command}; +use clap::Parser; use ethers::core::abi::AbiEncode; use hotshot_contract_adapter::jellyfish::ParsedVerifyingKey; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use jf_pcs::prelude::UnivariateUniversalParams; -use clap::Parser; - #[derive(Parser)] struct Cli { /// indicate if it's for the mock verification key diff --git a/contracts/test/PlonkVerifier.t.sol b/contracts/test/PlonkVerifier.t.sol index c541471c40..07da9808e7 100644 --- a/contracts/test/PlonkVerifier.t.sol +++ b/contracts/test/PlonkVerifier.t.sol @@ -230,6 +230,7 @@ contract PlonkVerifier_validateProof_Test is PlonkVerifierCommonTest { /// @dev Randomly pick a coordinate of a point among points in a proof /// mutate it to another value so that the point is no longer valid, /// test if our check will revert. + /// forge-config: default.allow_internal_expect_revert = true function testFuzz_RevertIfProofContainsInvalidGroup(uint256 nthPoint, bool testX) external { // a valid proof IPlonkVerifier.PlonkProof memory proof = dummyProof(42); @@ -251,12 +252,13 @@ contract PlonkVerifier_validateProof_Test is PlonkVerifierCommonTest { } } - vm.expectRevert(); + vm.expectRevert("Bn254: invalid G1 point"); V._validateProof(proof); } /// @dev Randomly pick field in a proof mutate it to invalid value /// test if our check will revert. + /// forge-config: default.allow_internal_expect_revert = true function testFuzz_RevertIfProofContainsInvalidField(uint256 nthField) external { // a valid proof IPlonkVerifier.PlonkProof memory proof = dummyProof(42); @@ -271,7 +273,7 @@ contract PlonkVerifier_validateProof_Test is PlonkVerifierCommonTest { mstore(add(start, mul(nthField, 0x20)), invalidField) } - vm.expectRevert(); + vm.expectRevert(bytes("Bn254: invalid scalar field")); V._validateProof(proof); } } diff --git a/contracts/test/PolynomialEval.t.sol b/contracts/test/PolynomialEval.t.sol index e4ebba1523..2a7c3c8898 100644 --- a/contracts/test/PolynomialEval.t.sol +++ b/contracts/test/PolynomialEval.t.sol @@ -31,6 +31,7 @@ contract PolynomialEval_newEvalDomain_Test is Test { } /// @dev Test revert if domainSize is not among {2^16 ~ 2^20, 2^5} + /// forge-config: default.allow_internal_expect_revert = true function testFuzz_unsupportedDomainSize_reverts(uint256 domainSize) external { vm.assume( domainSize != 2 ** 16 && domainSize != 2 ** 17 && domainSize != 2 ** 18 diff --git a/flake.lock b/flake.lock index 5648cdfb01..9c6891b0fe 100644 --- a/flake.lock +++ b/flake.lock @@ -51,6 +51,21 @@ } }, "flake-utils_2": { + "locked": { + "lastModified": 1644229661, + "narHash": "sha256-1YdnJAsNy69bpcjuoKdOYQX0YxZBiCYZo4Twxerqv7k=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "3cecb5b042f7f209c56ffd8371b2711a290ec797", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_3": { "inputs": { "systems": "systems_2" }, @@ -68,7 +83,7 @@ "type": "github" } }, - "flake-utils_3": { + "flake-utils_4": { "inputs": { "systems": "systems_3" }, @@ -86,6 +101,26 @@ "type": "github" } }, + "foundry-nix": { + "inputs": { + "flake-utils": "flake-utils_2", + "nixpkgs": "nixpkgs" + }, + "locked": { + "lastModified": 1740993113, + "narHash": "sha256-XY6CUZft7wjB/cbLyi/xeOZHh2mizSAT0EaYo9wuRXI=", + "owner": "shazow", + "repo": "foundry.nix", + "rev": "ed2a08376f14c0caf2b97418c91a66872e5ab3e2", + "type": "github" + }, + "original": { + "owner": "shazow", + "ref": "monthly", + "repo": "foundry.nix", + "type": "github" + } + }, "gitignore": { "inputs": { "nixpkgs": [ @@ -109,24 +144,22 @@ }, "nixpkgs": { "locked": { - "lastModified": 1736798957, - "narHash": "sha256-qwpCtZhSsSNQtK4xYGzMiyEDhkNzOCz/Vfu4oL2ETsQ=", + "lastModified": 1666753130, + "narHash": "sha256-Wff1dGPFSneXJLI2c0kkdWTgxnQ416KE6X4KnFkgPYQ=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9abb87b552b7f55ac8916b6fc9e5cb486656a2f3", + "rev": "f540aeda6f677354f1e7144ab04352f61aaa0118", "type": "github" }, "original": { - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" + "id": "nixpkgs", + "type": "indirect" } }, "nixpkgs-cross-overlay": { "inputs": { - "flake-utils": "flake-utils_2", - "nixpkgs": "nixpkgs_2", + "flake-utils": "flake-utils_3", + "nixpkgs": "nixpkgs_3", "rust-overlay": "rust-overlay", "treefmt-nix": "treefmt-nix" }, @@ -144,7 +177,39 @@ "type": "github" } }, + "nixpkgs-legacy-foundry": { + "locked": { + "lastModified": 1736798957, + "narHash": "sha256-qwpCtZhSsSNQtK4xYGzMiyEDhkNzOCz/Vfu4oL2ETsQ=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9abb87b552b7f55ac8916b6fc9e5cb486656a2f3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9abb87b552b7f55ac8916b6fc9e5cb486656a2f3", + "type": "github" + } + }, "nixpkgs_2": { + "locked": { + "lastModified": 1741246872, + "narHash": "sha256-Q6pMP4a9ed636qilcYX8XUguvKl/0/LGXhHcRI91p0U=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "10069ef4cf863633f57238f179a0297de84bd8d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { "locked": { "lastModified": 1733550349, "narHash": "sha256-NcGumB4Lr6KSDq+nIqXtNA8QwAQKDSZT7N9OTGWbTrs=", @@ -160,7 +225,7 @@ "type": "github" } }, - "nixpkgs_3": { + "nixpkgs_4": { "locked": { "lastModified": 1730768919, "narHash": "sha256-8AKquNnnSaJRXZxc5YmF/WfmxiHX6MMZZasRP6RRQkE=", @@ -176,7 +241,7 @@ "type": "github" } }, - "nixpkgs_4": { + "nixpkgs_5": { "locked": { "lastModified": 1736320768, "narHash": "sha256-nIYdTAiKIGnFNugbomgBJR+Xv5F1ZQU+HfaBqJKroC0=", @@ -192,7 +257,7 @@ "type": "github" } }, - "nixpkgs_5": { + "nixpkgs_6": { "locked": { "lastModified": 1682516527, "narHash": "sha256-1joLG1A4mwhMrj4XVp0mBTNIHphVQSEMIlZ50t0Udxk=", @@ -211,7 +276,7 @@ "inputs": { "flake-compat": "flake-compat_2", "gitignore": "gitignore", - "nixpkgs": "nixpkgs_3" + "nixpkgs": "nixpkgs_4" }, "locked": { "lastModified": 1735882644, @@ -231,8 +296,10 @@ "inputs": { "flake-compat": "flake-compat", "flake-utils": "flake-utils", - "nixpkgs": "nixpkgs", + "foundry-nix": "foundry-nix", + "nixpkgs": "nixpkgs_2", "nixpkgs-cross-overlay": "nixpkgs-cross-overlay", + "nixpkgs-legacy-foundry": "nixpkgs-legacy-foundry", "pre-commit-hooks": "pre-commit-hooks", "rust-overlay": "rust-overlay_2", "solc-bin": "solc-bin" @@ -261,7 +328,7 @@ }, "rust-overlay_2": { "inputs": { - "nixpkgs": "nixpkgs_4" + "nixpkgs": "nixpkgs_5" }, "locked": { "lastModified": 1740104932, @@ -279,8 +346,8 @@ }, "solc-bin": { "inputs": { - "flake-utils": "flake-utils_3", - "nixpkgs": "nixpkgs_5" + "flake-utils": "flake-utils_4", + "nixpkgs": "nixpkgs_6" }, "locked": { "lastModified": 1733347147, diff --git a/flake.nix b/flake.nix index f6f5f83242..1e6a605157 100644 --- a/flake.nix +++ b/flake.nix @@ -13,6 +13,10 @@ }; inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; + inputs.nixpkgs-legacy-foundry.url = "github:NixOS/nixpkgs/9abb87b552b7f55ac8916b6fc9e5cb486656a2f3"; + + inputs.foundry-nix.url = "github:shazow/foundry.nix/monthly"; # Use monthly branch for permanent releases + inputs.rust-overlay.url = "github:oxalica/rust-overlay"; inputs.nixpkgs-cross-overlay.url = @@ -29,6 +33,8 @@ outputs = { self , nixpkgs + , nixpkgs-legacy-foundry + , foundry-nix , rust-overlay , nixpkgs-cross-overlay , flake-utils @@ -61,6 +67,7 @@ overlays = [ (import rust-overlay) + foundry-nix.overlay solc-bin.overlays.default (final: prev: { solhint = @@ -109,7 +116,7 @@ cargo-fmt = { enable = true; description = "Enforce rustfmt"; - entry = "cargo fmt --all"; + entry = "just fmt"; types_or = [ "rust" "toml" ]; pass_filenames = false; }; @@ -178,7 +185,7 @@ let stableToolchain = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml; nightlyToolchain = pkgs.rust-bin.selectLatestNightlyWith (toolchain: toolchain.minimal.override { - extensions = [ "rust-analyzer" ]; + extensions = [ "rust-analyzer" "rustfmt" ]; }); solc = pkgs.solc-bin."0.8.23"; in @@ -186,7 +193,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 # match ubuntu 24.04 that we use on CI and as base image in docker + openssl curl protobuf # to compile libp2p-autonat stableToolchain @@ -201,6 +208,7 @@ typos just nightlyToolchain.passthru.availableComponents.rust-analyzer + nightlyToolchain.passthru.availableComponents.rustfmt # Tools nixpkgs-fmt @@ -216,25 +224,7 @@ coreutils # Ethereum contracts, solidity, ... - # TODO: remove alloy patch when forge includes this fix: https://github.com/alloy-rs/core/pull/864 - # foundry - (foundry.overrideAttrs { - # Set the resolve limit to 128 by replacing the value in the vendored dependencies. - postPatch = '' - pushd $cargoDepsCopy/alloy-sol-macro-expander - - oldHash=$(sha256sum src/expand/mod.rs | cut -d " " -f 1) - - substituteInPlace src/expand/mod.rs \ - --replace-warn \ - 'const RESOLVE_LIMIT: usize = 32;' 'const RESOLVE_LIMIT: usize = 128;' - - substituteInPlace .cargo-checksum.json \ - --replace-warn $oldHash $(sha256sum src/expand/mod.rs | cut -d " " -f 1) - - popd - ''; - }) + foundry-bin solc nodePackages.prettier solhint @@ -254,10 +244,28 @@ # Add rust binaries to PATH for native demo export PATH="$PWD/$CARGO_TARGET_DIR/debug:$PATH" + + # Needed to compile with the sqlite-unbundled feature + export LIBCLANG_PATH="${pkgs.llvmPackages.libclang.lib}/lib"; '' + self.checks.${system}.pre-commit-check.shellHook; RUST_SRC_PATH = "${stableToolchain}/lib/rustlib/src/rust/library"; FOUNDRY_SOLC = "${solc}/bin/solc"; }); + # A shell with foundry v0.3.0 which can still build ethers-rs bindings. + # Can be removed when we are no longer using the ethers-rs bindings. + devShells.legacyFoundry = + let + overlays = [ + solc-bin.overlays.default + ]; + pkgs = import nixpkgs-legacy-foundry { inherit system overlays; }; + in + mkShell { + packages = with pkgs; [ + solc + foundry + ]; + }; devShells.crossShell = crossShell { config = "x86_64-unknown-linux-musl"; }; devShells.armCrossShell = @@ -272,7 +280,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 + openssl curl protobuf # to compile libp2p-autonat toolchain @@ -286,7 +294,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 + openssl curl protobuf # to compile libp2p-autonat toolchain @@ -309,7 +317,7 @@ buildInputs = [ # Rust dependencies pkg-config - openssl_3 + openssl curl protobuf # to compile libp2p-autonat stableToolchain diff --git a/foundry.toml b/foundry.toml index e80e9a402e..9d3bfc1697 100644 --- a/foundry.toml +++ b/foundry.toml @@ -22,6 +22,9 @@ extra_output = ["storageLayout"] fs_permissions = [{ access = "read-write", path = "./contracts/script/"}, { access = "read-write", path = "contracts/out"}] ignored_warnings_from = ['contracts/lib'] +# Without the optimizer we hit stack too deep errors. +optimizer = true + # See more config options https://github.com/foundry-rs/foundry/tree/master/config [rpc_endpoints] diff --git a/hotshot-builder-api/src/api.rs b/hotshot-builder-api/src/api.rs index 04042630c1..5250d1320a 100644 --- a/hotshot-builder-api/src/api.rs +++ b/hotshot-builder-api/src/api.rs @@ -34,7 +34,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/hotshot-builder-core-refactored/src/block_size_limits.rs b/hotshot-builder-core-refactored/src/block_size_limits.rs index 47cca3aca2..b33cc656ae 100644 --- a/hotshot-builder-core-refactored/src/block_size_limits.rs +++ b/hotshot-builder-core-refactored/src/block_size_limits.rs @@ -1,6 +1,7 @@ +use std::sync::atomic::Ordering; + use atomic::Atomic; use coarsetime::{Duration, Instant}; -use std::sync::atomic::Ordering; #[derive(Debug, Clone, Copy, bytemuck::NoUninit)] #[repr(C)] diff --git a/hotshot-builder-core-refactored/src/block_store.rs b/hotshot-builder-core-refactored/src/block_store.rs index 26e5dcaf2b..6119ae7eda 100644 --- a/hotshot-builder-core-refactored/src/block_store.rs +++ b/hotshot-builder-core-refactored/src/block_store.rs @@ -2,17 +2,14 @@ use std::marker::PhantomData; use hotshot::traits::BlockPayload; use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; -use hotshot_types::traits::signature_key::BuilderSignatureKey; -use marketplace_builder_shared::error::Error; -use marketplace_builder_shared::utils::BuilderKeys; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::BuilderSignatureKey}; use marketplace_builder_shared::{ - block::BuilderStateId, coordinator::tiered_view_map::TieredViewMap, + block::{BlockId, BuilderStateId}, + coordinator::tiered_view_map::TieredViewMap, + error::Error, + utils::BuilderKeys, }; -use marketplace_builder_shared::block::BlockId; - -use hotshot_types::traits::node_implementation::NodeType; - // It holds all the necessary information for a block #[derive(Debug, Clone)] pub struct BlockInfo { diff --git a/hotshot-builder-core-refactored/src/service.rs b/hotshot-builder-core-refactored/src/service.rs index d9831565d7..d3c17a7464 100644 --- a/hotshot-builder-core-refactored/src/service.rs +++ b/hotshot-builder-core-refactored/src/service.rs @@ -1,3 +1,21 @@ +use std::{ + fmt::Display, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; + +pub use async_broadcast::{broadcast, RecvError, TryRecvError}; +use async_lock::RwLock; +use async_trait::async_trait; +use committable::Commitment; +use futures::{ + future::BoxFuture, + stream::{FuturesOrdered, FuturesUnordered, StreamExt}, + Stream, TryStreamExt, +}; use hotshot::types::Event; use hotshot_builder_api::{ v0_1::{ @@ -9,51 +27,38 @@ use hotshot_builder_api::{ }, v0_2::block_info::AvailableBlockHeaderInputV1, }; -use hotshot_types::traits::block_contents::Transaction; -use hotshot_types::traits::EncodeBytes; use hotshot_types::{ data::VidCommitment, event::EventType, traits::{ - block_contents::BlockPayload, + block_contents::{BlockPayload, Transaction}, node_implementation::{ConsensusTime, NodeType}, signature_key::{BuilderSignatureKey, SignatureKey}, + EncodeBytes, }, utils::BuilderCommitment, }; -use marketplace_builder_shared::coordinator::BuilderStateLookup; -use marketplace_builder_shared::error::Error; -use marketplace_builder_shared::state::BuilderState; -use marketplace_builder_shared::utils::BuilderKeys; use marketplace_builder_shared::{ block::{BlockId, BuilderStateId, ReceivedTransaction, TransactionSource}, - coordinator::BuilderStateCoordinator, + coordinator::{BuilderStateCoordinator, BuilderStateLookup}, + error::Error, + state::BuilderState, + utils::BuilderKeys, +}; +use tagged_base64::TaggedBase64; +use tide_disco::{app::AppError, method::ReadState, App}; +use tokio::{ + spawn, + task::JoinHandle, + time::{sleep, timeout}, }; -use tide_disco::app::AppError; -use tokio::spawn; -use tokio::time::{sleep, timeout}; use tracing::{error, info, instrument, trace, warn}; use vbs::version::StaticVersion; -use crate::block_size_limits::BlockSizeLimits; -use crate::block_store::{BlockInfo, BlockStore}; -pub use async_broadcast::{broadcast, RecvError, TryRecvError}; -use async_lock::RwLock; -use async_trait::async_trait; -use committable::Commitment; -use futures::{future::BoxFuture, Stream}; -use futures::{ - stream::{FuturesOrdered, FuturesUnordered, StreamExt}, - TryStreamExt, +use crate::{ + block_size_limits::BlockSizeLimits, + block_store::{BlockInfo, BlockStore}, }; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::time::Duration; -use std::{fmt::Display, time::Instant}; -use tagged_base64::TaggedBase64; -use tide_disco::{method::ReadState, App}; -use tokio::task::JoinHandle; /// Proportion of overall allotted time to wait for optimal builder state /// to appear before resorting to highest view builder state @@ -201,7 +206,7 @@ where match event.event { EventType::Error { error } => { error!("Error event in HotShot: {:?}", error); - } + }, EventType::Transactions { transactions } => { let this = Arc::clone(&self); spawn(async move { @@ -217,7 +222,7 @@ where .collect::>() .await; }); - } + }, EventType::Decide { leaf_chain, .. } => { let prune_cutoff = leaf_chain[0].leaf.view_number(); @@ -226,16 +231,16 @@ where let this = Arc::clone(&self); spawn(async move { this.block_store.write().await.prune(prune_cutoff) }); - } + }, EventType::DaProposal { proposal, .. } => { let coordinator = Arc::clone(&self.coordinator); spawn(async move { coordinator.handle_da_proposal(proposal.data).await }); - } + }, EventType::QuorumProposal { proposal, .. } => { let coordinator = Arc::clone(&self.coordinator); spawn(async move { coordinator.handle_quorum_proposal(proposal.data).await }); - } - _ => {} + }, + _ => {}, } } } @@ -287,10 +292,10 @@ where BuilderStateLookup::Found(builder) => break Ok(builder), BuilderStateLookup::Decided => { return Err(Error::AlreadyDecided); - } + }, BuilderStateLookup::NotFound => { sleep(check_period).await; - } + }, }; } } @@ -374,7 +379,7 @@ where Err(error) => { warn!(?error, "Failed to build block payload"); return Err(Error::BuildBlock(error)); - } + }, }; // count the number of txns @@ -442,7 +447,7 @@ where // Timeout waiting for ideal state, get the highest view builder instead warn!("Couldn't find the ideal builder state"); self.coordinator.highest_view_builder().await - } + }, Ok(Err(e)) => { // State already decided let lowest_view = self.coordinator.lowest_view().await; @@ -451,7 +456,7 @@ where "get_available_blocks request for decided view" ); return Err(e); - } + }, }; let Some(builder) = builder else { @@ -485,7 +490,7 @@ where } Ok(vec![response]) - } + }, // Success, but no block: we don't have transactions and aren't prioritizing finalization Ok(Ok(None)) => Ok(vec![]), // Error building block, try to respond with a cached one as last-ditch attempt @@ -495,7 +500,7 @@ where } else { Err(e) } - } + }, } } diff --git a/hotshot-builder-core-refactored/src/testing/basic.rs b/hotshot-builder-core-refactored/src/testing/basic.rs index f353774c0a..24ace84754 100644 --- a/hotshot-builder-core-refactored/src/testing/basic.rs +++ b/hotshot-builder-core-refactored/src/testing/basic.rs @@ -1,29 +1,37 @@ +use std::{sync::Arc, time::Duration}; + use async_broadcast::broadcast; use hotshot::types::{EventType, SignatureKey}; - use hotshot_builder_api::v0_1::data_source::BuilderDataSource; -use hotshot_example_types::block_types::{TestBlockHeader, TestMetadata, TestTransaction}; -use hotshot_example_types::node_types::{TestTypes, TestVersions}; -use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; -use hotshot_types::data::VidCommitment; -use hotshot_types::data::{Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}; -use hotshot_types::event::LeafInfo; -use hotshot_types::simple_certificate::QuorumCertificate2; -use hotshot_types::traits::block_contents::BlockHeader; -use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; -use hotshot_types::utils::BuilderCommitment; -use marketplace_builder_shared::error::Error; -use marketplace_builder_shared::testing::consensus::SimulatedChainState; -use marketplace_builder_shared::testing::constants::{ - TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE, +use hotshot_example_types::{ + block_types::{TestBlockHeader, TestMetadata, TestTransaction}, + node_types::{TestTypes, TestVersions}, + state_types::{TestInstanceState, TestValidatedState}, +}; +use hotshot_types::{ + data::{Leaf2, QuorumProposal2, QuorumProposalWrapper, VidCommitment, ViewNumber}, + event::LeafInfo, + simple_certificate::QuorumCertificate2, + traits::{ + block_contents::BlockHeader, + node_implementation::{ConsensusTime, NodeType}, + }, + utils::BuilderCommitment, +}; +use marketplace_builder_shared::{ + error::Error, + testing::{ + consensus::SimulatedChainState, + constants::{TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE}, + }, }; use tokio::time::sleep; use tracing_test::traced_test; -use crate::service::{BuilderConfig, GlobalState, ProxyGlobalState}; -use crate::testing::{assert_eq_generic_err, sign, TestServiceWrapper, MOCK_LEADER_KEYS}; -use std::sync::Arc; -use std::time::Duration; +use crate::{ + service::{BuilderConfig, GlobalState, ProxyGlobalState}, + testing::{assert_eq_generic_err, sign, TestServiceWrapper, MOCK_LEADER_KEYS}, +}; /// This test simulates consensus performing as expected and builder processing a number /// of transactions diff --git a/hotshot-builder-core-refactored/src/testing/block_size.rs b/hotshot-builder-core-refactored/src/testing/block_size.rs index 7ba6c672c3..85620896fe 100644 --- a/hotshot-builder-core-refactored/src/testing/block_size.rs +++ b/hotshot-builder-core-refactored/src/testing/block_size.rs @@ -1,22 +1,27 @@ +use std::{ + sync::{atomic::Ordering, Arc}, + time::Duration, +}; + use async_broadcast::broadcast; use committable::Committable; use hotshot_builder_api::v0_1::builder::TransactionStatus; -use hotshot_example_types::block_types::TestTransaction; -use hotshot_example_types::state_types::TestInstanceState; -use hotshot_types::data::VidCommitment; -use hotshot_types::data::ViewNumber; -use hotshot_types::traits::node_implementation::ConsensusTime; -use marketplace_builder_shared::block::{BlockId, BuilderStateId}; -use marketplace_builder_shared::testing::consensus::SimulatedChainState; -use marketplace_builder_shared::testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION; +use hotshot_example_types::{block_types::TestTransaction, state_types::TestInstanceState}; +use hotshot_types::{ + data::{VidCommitment, ViewNumber}, + traits::node_implementation::ConsensusTime, +}; +use marketplace_builder_shared::{ + block::{BlockId, BuilderStateId}, + testing::{consensus::SimulatedChainState, constants::TEST_NUM_NODES_IN_VID_COMPUTATION}, +}; use tracing_test::traced_test; -use crate::block_size_limits::BlockSizeLimits; -use crate::service::{BuilderConfig, GlobalState}; -use crate::testing::TestServiceWrapper; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::time::Duration; +use crate::{ + block_size_limits::BlockSizeLimits, + service::{BuilderConfig, GlobalState}, + testing::TestServiceWrapper, +}; /// This tests simulates size limits being decreased lower than our capacity /// and then checks that size limits return to protocol maximum over time diff --git a/hotshot-builder-core-refactored/src/testing/finalization.rs b/hotshot-builder-core-refactored/src/testing/finalization.rs index 13b1d21678..40fd3833fb 100644 --- a/hotshot-builder-core-refactored/src/testing/finalization.rs +++ b/hotshot-builder-core-refactored/src/testing/finalization.rs @@ -1,17 +1,18 @@ +use std::sync::Arc; + use async_broadcast::broadcast; +use hotshot_example_types::{block_types::TestTransaction, state_types::TestInstanceState}; +use marketplace_builder_shared::testing::{ + consensus::SimulatedChainState, + constants::{TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE}, +}; use tracing_test::traced_test; -use hotshot_example_types::block_types::TestTransaction; -use hotshot_example_types::state_types::TestInstanceState; -use marketplace_builder_shared::testing::consensus::SimulatedChainState; -use marketplace_builder_shared::testing::constants::{ - TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE, +use crate::{ + service::{BuilderConfig, GlobalState, ALLOW_EMPTY_BLOCK_PERIOD}, + testing::TestServiceWrapper, }; -use crate::service::{BuilderConfig, GlobalState, ALLOW_EMPTY_BLOCK_PERIOD}; -use crate::testing::TestServiceWrapper; -use std::sync::Arc; - // How many times consensus will re-try getting available blocks const NUM_RETRIES: usize = 5; diff --git a/hotshot-builder-core-refactored/src/testing/integration.rs b/hotshot-builder-core-refactored/src/testing/integration.rs index f8df6e0a6a..41c6a84270 100644 --- a/hotshot-builder-core-refactored/src/testing/integration.rs +++ b/hotshot-builder-core-refactored/src/testing/integration.rs @@ -118,21 +118,20 @@ where mod tests { use std::time::Duration; - use crate::testing::integration::LegacyBuilderImpl; - use marketplace_builder_shared::testing::{ - generation::{self, TransactionGenerationConfig}, - run_test, - validation::BuilderValidationConfig, - }; - - use hotshot_example_types::node_types::TestVersions; - use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_macros::cross_tests; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::TestDescription, }; + use marketplace_builder_shared::testing::{ + generation::{self, TransactionGenerationConfig}, + run_test, + validation::BuilderValidationConfig, + }; + + use crate::testing::integration::LegacyBuilderImpl; #[tokio::test(flavor = "multi_thread")] #[tracing::instrument] diff --git a/hotshot-builder-core-refactored/src/testing/mod.rs b/hotshot-builder-core-refactored/src/testing/mod.rs index 08207ed994..e3b8c9ecbb 100644 --- a/hotshot-builder-core-refactored/src/testing/mod.rs +++ b/hotshot-builder-core-refactored/src/testing/mod.rs @@ -2,26 +2,30 @@ #![allow(clippy::declare_interior_mutable_const)] #![allow(clippy::borrow_interior_mutable_const)] -use std::cell::LazyCell; -use std::sync::Arc; -use std::time::Duration; +use std::{cell::LazyCell, sync::Arc, time::Duration}; use async_broadcast::Sender; use committable::Commitment; -use hotshot::rand::{thread_rng, Rng}; -use hotshot::types::{BLSPubKey, Event, EventType, SignatureKey}; -use hotshot_builder_api::v0_1::block_info::AvailableBlockHeaderInputV1; -use hotshot_builder_api::v0_1::builder::BuildError; -use hotshot_builder_api::v0_1::data_source::AcceptsTxnSubmits; -use hotshot_builder_api::v0_1::{block_info::AvailableBlockInfo, data_source::BuilderDataSource}; -use hotshot_example_types::block_types::TestTransaction; -use hotshot_example_types::node_types::TestTypes; +use hotshot::{ + rand::{thread_rng, Rng}, + types::{BLSPubKey, Event, EventType, SignatureKey}, +}; +use hotshot_builder_api::v0_1::{ + block_info::{AvailableBlockHeaderInputV1, AvailableBlockInfo}, + builder::BuildError, + data_source::{AcceptsTxnSubmits, BuilderDataSource}, +}; +use hotshot_example_types::{block_types::TestTransaction, node_types::TestTypes}; use hotshot_task_impls::builder::v0_1::BuilderClient; -use hotshot_types::data::ViewNumber; -use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; -use marketplace_builder_shared::block::{BlockId, BuilderStateId}; -use marketplace_builder_shared::error::Error; -use marketplace_builder_shared::utils::BuilderKeys; +use hotshot_types::{ + data::ViewNumber, + traits::node_implementation::{ConsensusTime, NodeType}, +}; +use marketplace_builder_shared::{ + block::{BlockId, BuilderStateId}, + error::Error, + utils::BuilderKeys, +}; use tokio::spawn; use url::Url; use vbs::version::StaticVersion; diff --git a/hotshot-builder-core/src/builder_state.rs b/hotshot-builder-core/src/builder_state.rs index 558d0d8767..0f156370e0 100644 --- a/hotshot-builder-core/src/builder_state.rs +++ b/hotshot-builder-core/src/builder_state.rs @@ -1,3 +1,17 @@ +use core::panic; +use std::{ + cmp::PartialEq, + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + fmt::Debug, + marker::PhantomData, + sync::Arc, + time::{Duration, Instant}, +}; + +use async_broadcast::{broadcast, Receiver as BroadcastReceiver, Sender as BroadcastSender}; +use async_lock::RwLock; +use committable::{Commitment, Committable}; +use futures::StreamExt; use hotshot_types::{ data::{DaProposal2, Leaf2, QuorumProposalWrapper}, message::Proposal, @@ -9,29 +23,13 @@ use hotshot_types::{ utils::BuilderCommitment, }; use marketplace_builder_shared::block::{BlockId, BuilderStateId, ParentBlockReferences}; - -use committable::{Commitment, Committable}; - -use crate::service::{GlobalState, ReceivedTransaction}; -use async_broadcast::broadcast; -use async_broadcast::Receiver as BroadcastReceiver; -use async_broadcast::Sender as BroadcastSender; -use async_lock::RwLock; -use core::panic; -use futures::StreamExt; - use tokio::{ spawn, sync::{mpsc::UnboundedSender, oneshot}, time::sleep, }; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::fmt::Debug; -use std::sync::Arc; -use std::time::Instant; -use std::{cmp::PartialEq, marker::PhantomData}; -use std::{collections::hash_map::Entry, time::Duration}; +use crate::service::{GlobalState, ReceivedTransaction}; pub type TxTimeStamp = u128; @@ -295,7 +293,7 @@ async fn best_builder_states_to_extend( Some(parent_block_references) => { parent_block_references.leaf_commit == justify_qc.data.leaf_commit && parent_block_references.view_number == justify_qc.view_number - } + }, }, ) .map(|(builder_state_id, _)| builder_state_id.clone()) @@ -1102,15 +1100,15 @@ impl BuilderState { } self.txns_in_queue.insert(tx.commit); self.tx_queue.push_back(tx); - } + }, Err(async_broadcast::TryRecvError::Empty) | Err(async_broadcast::TryRecvError::Closed) => { break; - } + }, Err(async_broadcast::TryRecvError::Overflowed(lost)) => { tracing::warn!("Missed {lost} transactions due to backlog"); continue; - } + }, } } } @@ -1122,20 +1120,19 @@ mod test { use async_broadcast::broadcast; use committable::RawCommitmentBuilder; - use hotshot_example_types::block_types::TestTransaction; - use hotshot_example_types::node_types::TestTypes; - use hotshot_example_types::node_types::TestVersions; - use hotshot_types::data::Leaf2; - use hotshot_types::data::QuorumProposalWrapper; - use hotshot_types::data::ViewNumber; - use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; - use hotshot_types::utils::BuilderCommitment; + use hotshot_example_types::{ + block_types::TestTransaction, + node_types::{TestTypes, TestVersions}, + }; + use hotshot_types::{ + data::{Leaf2, QuorumProposalWrapper, ViewNumber}, + traits::node_implementation::{ConsensusTime, NodeType}, + utils::BuilderCommitment, + }; use marketplace_builder_shared::testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION; use tracing_subscriber::EnvFilter; - use super::DAProposalInfo; - use super::MessageType; - use super::ParentBlockReferences; + use super::{DAProposalInfo, MessageType, ParentBlockReferences}; use crate::testing::{calc_builder_commitment, calc_proposal_msg, create_builder_state}; /// This test the function `process_da_proposal`. diff --git a/hotshot-builder-core/src/service.rs b/hotshot-builder-core/src/service.rs index a45c4039a0..06176a217b 100644 --- a/hotshot-builder-core/src/service.rs +++ b/hotshot-builder-core/src/service.rs @@ -1,3 +1,17 @@ +use std::{ + collections::HashMap, + fmt::Display, + num::NonZeroUsize, + sync::Arc, + time::{Duration, Instant}, +}; + +pub use async_broadcast::{broadcast, RecvError, TryRecvError}; +use async_broadcast::{Sender as BroadcastSender, TrySendError}; +use async_lock::RwLock; +use async_trait::async_trait; +use committable::{Commitment, Committable}; +use futures::{future::BoxFuture, stream::StreamExt, Stream}; use hotshot::types::Event; use hotshot_builder_api::{ v0_1::{ @@ -8,8 +22,7 @@ use hotshot_builder_api::{ v0_2::builder::TransactionStatus, }; use hotshot_types::{ - data::VidCommitment, - data::{DaProposal2, Leaf2, QuorumProposalWrapper}, + data::{DaProposal2, Leaf2, QuorumProposalWrapper, VidCommitment}, event::EventType, message::Proposal, traits::{ @@ -20,33 +33,20 @@ use hotshot_types::{ utils::BuilderCommitment, }; use lru::LruCache; -use vbs::version::StaticVersionType; - -use crate::builder_state::{ - BuildBlockInfo, DaProposalMessage, DecideMessage, QuorumProposalMessage, TransactionSource, - TriggerStatus, -}; -use crate::builder_state::{MessageType, RequestMessage, ResponseMessage}; -pub use async_broadcast::{broadcast, RecvError, TryRecvError}; -use async_broadcast::{Sender as BroadcastSender, TrySendError}; -use async_lock::RwLock; -use async_trait::async_trait; -use committable::{Commitment, Committable}; -use futures::stream::StreamExt; -use futures::{future::BoxFuture, Stream}; use marketplace_builder_shared::block::{BlockId, BuilderStateId, ParentBlockReferences}; use sha2::{Digest, Sha256}; -use std::collections::HashMap; -use std::num::NonZeroUsize; -use std::sync::Arc; -use std::time::Duration; -use std::{fmt::Display, time::Instant}; use tagged_base64::TaggedBase64; use tide_disco::method::ReadState; use tokio::{ sync::{mpsc::unbounded_channel, oneshot}, time::{sleep, timeout}, }; +use vbs::version::StaticVersionType; + +use crate::builder_state::{ + BuildBlockInfo, DaProposalMessage, DecideMessage, MessageType, QuorumProposalMessage, + RequestMessage, ResponseMessage, TransactionSource, TriggerStatus, +}; // It holds all the necessary information for a block #[derive(Debug)] @@ -409,19 +409,19 @@ impl GlobalState { match old_status { Some(TransactionStatus::Rejected { reason }) => { tracing::debug!("Changing the status of a rejected transaction to status {:?}! The reason it is previously rejected is {:?}", txn_status, reason); - } + }, Some(TransactionStatus::Sequenced { leaf }) => { let e = format!("Changing the status of a sequenced transaction to status {:?} is not allowed! The transaction is sequenced in leaf {:?}", txn_status, leaf); tracing::error!(e); return Err(BuildError::Error(e)); - } + }, _ => { tracing::debug!( "change status of transaction {txn_hash} from {:?} to {:?}", old_status, txn_status ); - } + }, } } else { tracing::debug!( @@ -540,23 +540,23 @@ impl From> for BuildError { match error { AvailableBlocksError::SignatureValidationFailed => { BuildError::Error("Signature validation failed in get_available_blocks".to_string()) - } + }, AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided => { BuildError::Error( "Request for available blocks for a view that has already been decided." .to_string(), ) - } + }, AvailableBlocksError::SigningBlockFailed(e) => { BuildError::Error(format!("Signing over block info failed: {:?}", e)) - } + }, AvailableBlocksError::GetChannelForMatchingBuilderError(e) => e.into(), AvailableBlocksError::NoBlocksAvailable => { BuildError::Error("No blocks available".to_string()) - } + }, AvailableBlocksError::ChannelUnexpectedlyClosed => { BuildError::Error("Channel unexpectedly closed".to_string()) - } + }, } } } @@ -580,13 +580,13 @@ impl From> for BuildError { match error { ClaimBlockError::SignatureValidationFailed => { BuildError::Error("Signature validation failed in claim block".to_string()) - } + }, ClaimBlockError::SigningCommitmentFailed(e) => { BuildError::Error(format!("Signing over builder commitment failed: {:?}", e)) - } + }, ClaimBlockError::BlockDataNotFound => { BuildError::Error("Block data not found".to_string()) - } + }, } } } @@ -608,10 +608,10 @@ impl From> for BuildError { ), ClaimBlockHeaderInputError::BlockHeaderNotFound => { BuildError::Error("Block header not found".to_string()) - } + }, ClaimBlockHeaderInputError::FailedToSignFeeInfo(e) => { BuildError::Error(format!("Failed to sign fee info: {:?}", e)) - } + }, } } } @@ -743,7 +743,7 @@ impl ProxyGlobalState { break Err(AvailableBlocksError::NoBlocksAvailable); } continue; - } + }, Ok(recv_attempt) => { if recv_attempt.is_none() { tracing::error!( @@ -752,7 +752,7 @@ impl ProxyGlobalState { } break recv_attempt .ok_or_else(|| AvailableBlocksError::ChannelUnexpectedlyClosed); - } + }, } }; @@ -783,13 +783,13 @@ impl ProxyGlobalState { response.builder_hash ); Ok(vec![initial_block_info]) - } + }, // We failed to get available blocks Err(e) => { tracing::debug!("Failed to get available blocks for parent {state_id}",); Err(e) - } + }, } } @@ -1111,7 +1111,7 @@ pub async fn run_non_permissioned_standalone_builder_service< match event.event { EventType::Error { error } => { tracing::error!("Error event in HotShot: {:?}", error); - } + }, // tx event EventType::Transactions { transactions } => { let max_block_size = { @@ -1151,7 +1151,7 @@ pub async fn run_non_permissioned_standalone_builder_service< .await?; } } - } + }, // decide event EventType::Decide { block_size: _, @@ -1160,19 +1160,19 @@ pub async fn run_non_permissioned_standalone_builder_service< } => { let latest_decide_view_num = leaf_chain[0].leaf.view_number(); handle_decide_event(&decide_sender, latest_decide_view_num).await; - } + }, // DA proposal event EventType::DaProposal { proposal, sender } => { handle_da_event(&da_sender, Arc::new(proposal), sender).await; - } + }, // QC proposal event EventType::QuorumProposal { proposal, sender } => { // get the leader for current view handle_quorum_event(&quorum_sender, Arc::new(proposal), sender).await; - } + }, _ => { tracing::debug!("Unhandled event from Builder"); - } + }, } } } @@ -1533,32 +1533,33 @@ mod test { use std::{sync::Arc, time::Duration}; use async_lock::RwLock; - use committable::Commitment; - use committable::Committable; + use committable::{Commitment, Committable}; use futures::StreamExt; use hotshot::{ traits::BlockPayload, types::{BLSPubKey, SignatureKey}, }; - use hotshot_builder_api::v0_1::data_source::AcceptsTxnSubmits; - use hotshot_builder_api::v0_2::block_info::AvailableBlockInfo; - use hotshot_builder_api::v0_2::builder::TransactionStatus; + use hotshot_builder_api::{ + v0_1::data_source::AcceptsTxnSubmits, + v0_2::{block_info::AvailableBlockInfo, builder::TransactionStatus}, + }; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, node_types::{TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; - use hotshot_types::data::DaProposal2; - use hotshot_types::data::EpochNumber; - use hotshot_types::data::Leaf2; - use hotshot_types::data::{QuorumProposal2, QuorumProposalWrapper}; - use hotshot_types::simple_certificate::QuorumCertificate2; - use hotshot_types::traits::block_contents::Transaction; - use hotshot_types::traits::node_implementation::Versions; use hotshot_types::{ - data::{vid_commitment, Leaf, ViewNumber}, + data::{ + vid_commitment, DaProposal2, EpochNumber, Leaf, Leaf2, QuorumProposal2, + QuorumProposalWrapper, ViewNumber, + }, message::Proposal, - traits::{node_implementation::ConsensusTime, signature_key::BuilderSignatureKey}, + simple_certificate::QuorumCertificate2, + traits::{ + block_contents::Transaction, + node_implementation::{ConsensusTime, Versions}, + signature_key::BuilderSignatureKey, + }, utils::BuilderCommitment, }; use marketplace_builder_shared::{ @@ -1575,6 +1576,11 @@ mod test { }; use vbs::version::StaticVersionType; + use super::{ + handle_da_event_implementation, handle_quorum_event_implementation, AvailableBlocksError, + BlockInfo, ClaimBlockError, ClaimBlockHeaderInputError, GlobalState, HandleDaEventError, + HandleQuorumEventError, HandleReceivedTxns, ProxyGlobalState, + }; use crate::{ builder_state::{ BuildBlockInfo, MessageType, RequestMessage, ResponseMessage, TransactionSource, @@ -1587,12 +1593,6 @@ mod test { }, }; - use super::{ - handle_da_event_implementation, handle_quorum_event_implementation, AvailableBlocksError, - BlockInfo, ClaimBlockError, ClaimBlockHeaderInputError, GlobalState, HandleDaEventError, - HandleQuorumEventError, HandleReceivedTxns, ProxyGlobalState, - }; - /// A const number on `max_tx_len` to be used consistently spanning all the tests /// It is set to 1 as current estimation on `TestTransaction` is 1 const TEST_MAX_TX_LEN: u64 = 1; @@ -2141,10 +2141,10 @@ mod test { match vid_trigger_receiver.await { Ok(TriggerStatus::Start) => { // This is expected - } + }, _ => { panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); - } + }, } } @@ -2366,10 +2366,10 @@ mod test { match vid_trigger_receiver_2.await { Ok(TriggerStatus::Start) => { // This is expected - } + }, _ => { panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); - } + }, } assert!( @@ -2960,13 +2960,13 @@ mod test { Err(AvailableBlocksError::NoBlocksAvailable) => { // This is what we expect. // This message *should* indicate that no blocks were available. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3032,13 +3032,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3103,13 +3103,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3172,13 +3172,13 @@ mod test { Err(AvailableBlocksError::GetChannelForMatchingBuilderError(_)) => { // This is what we expect. // This message *should* indicate that the response channel was closed. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3285,17 +3285,17 @@ mod test { let response_channel = match response_receiver.next().await { None => { panic!("Expected a request for available blocks, but didn't get one"); - } + }, Some(MessageType::RequestMessage(req_msg)) => { assert_eq!(req_msg.state_id, expected_builder_state_id); req_msg.response_channel - } + }, Some(message) => { panic!( "Expected a request for available blocks, but got a different message: {:?}", message ); - } + }, }; // We want to send a ResponseMessage to the channel @@ -3316,7 +3316,7 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(result) => { assert_eq!( result, @@ -3336,7 +3336,7 @@ mod test { }], "get_available_blocks response matches expectation" ); - } + }, } } @@ -3431,17 +3431,17 @@ mod test { let response_channel = match response_receiver.next().await { None => { panic!("Expected a request for available blocks, but didn't get one"); - } + }, Some(MessageType::RequestMessage(req_msg)) => { assert_eq!(req_msg.state_id, expected_builder_state_id); req_msg.response_channel - } + }, Some(message) => { panic!( "Expected a request for available blocks, but got a different message: {:?}", message ); - } + }, }; // We want to send a ResponseMessage to the channel @@ -3462,7 +3462,7 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(result) => { assert_eq!( result, @@ -3482,7 +3482,7 @@ mod test { }], "get_available_blocks response matches expectation" ); - } + }, } } @@ -3539,13 +3539,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3600,13 +3600,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3687,10 +3687,10 @@ mod test { match vid_trigger_receiver.await { Ok(TriggerStatus::Start) => { // This is what we expect. - } + }, _ => { panic!("Expected a TriggerStatus::Start event"); - } + }, } let result = claim_block_join_handle.await; @@ -3698,10 +3698,10 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { // This is expected - } + }, } } @@ -3759,13 +3759,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3820,13 +3820,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3887,10 +3887,10 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { // This is expected. - } + }, } } @@ -3943,13 +3943,13 @@ mod test { match result { Err(HandleDaEventError::SignatureValidationFailed) => { // This is expected. - } + }, Ok(_) => { panic!("expected an error, but received a successful attempt instead") - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4001,13 +4001,13 @@ mod test { match result { Err(HandleDaEventError::BroadcastFailed(_)) => { // This error is expected - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4050,20 +4050,20 @@ mod test { match result { Ok(_) => { // This is expected. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } let mut da_channel_receiver = da_channel_receiver; match da_channel_receiver.next().await { Some(MessageType::DaProposalMessage(da_proposal_message)) => { assert_eq!(da_proposal_message.proposal, signed_da_proposal); - } + }, _ => { panic!("Expected a DaProposalMessage, but got something else"); - } + }, } } @@ -4134,13 +4134,13 @@ mod test { match result { Err(HandleQuorumEventError::SignatureValidationFailed) => { // This is expected. - } + }, Ok(_) => { panic!("expected an error, but received a successful attempt instead"); - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4209,13 +4209,13 @@ mod test { match result { Err(HandleQuorumEventError::BroadcastFailed(_)) => { // This is expected. - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4275,20 +4275,20 @@ mod test { match result { Ok(_) => { // This is expected. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } let mut quorum_channel_receiver = quorum_channel_receiver; match quorum_channel_receiver.next().await { Some(MessageType::QuorumProposalMessage(da_proposal_message)) => { assert_eq!(da_proposal_message.proposal, signed_quorum_proposal); - } + }, _ => { panic!("Expected a QuorumProposalMessage, but got something else"); - } + }, } } @@ -4323,16 +4323,16 @@ mod test { match handle_received_txns_iter.next() { Some(Err(HandleReceivedTxnsError::TooManyTransactions)) => { // This is expected, - } + }, Some(Err(err)) => { panic!("Unexpected error: {:?}", err); - } + }, Some(Ok(_)) => { panic!("Expected an error, but got a result"); - } + }, None => { panic!("Expected an error, but got a result"); - } + }, } } @@ -4376,16 +4376,16 @@ mod test { // This is expected, assert!(estimated_length >= 256); assert_eq!(max_txn_len, TEST_MAX_TX_LEN); - } + }, Some(Err(err)) => { panic!("Unexpected error: {:?}", err); - } + }, Some(Ok(_)) => { panic!("Expected an error, but got a result"); - } + }, None => { panic!("Expected an error, but got a result"); - } + }, } } @@ -4431,21 +4431,21 @@ mod test { match err { async_broadcast::TrySendError::Closed(_) => { // This is expected. - } + }, _ => { panic!("Unexpected error: {:?}", err); - } + }, } - } + }, Some(Err(err)) => { panic!("Unexpected error: {:?}", err); - } + }, Some(Ok(_)) => { panic!("Expected an error, but got a result"); - } + }, None => { panic!("Expected an error, but got a result"); - } + }, } } } @@ -4473,10 +4473,10 @@ mod test { match iteration { Ok(_) => { // This is expected. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4485,10 +4485,10 @@ mod test { match tx_receiver.next().await { Some(received_txn) => { assert_eq!(received_txn.tx, tx); - } + }, _ => { panic!("Expected a TransactionMessage, but got something else"); - } + }, } } } @@ -4553,10 +4553,10 @@ mod test { match proxy_global_state.txn_status(tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Pending); - } + }, e => { panic!("transaction status should be Pending instead of {:?}", e); - } + }, } } @@ -4594,10 +4594,10 @@ mod test { match proxy_global_state.txn_status(tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Pending); - } + }, e => { panic!("transaction status should be Pending instead of {:?}", e); - } + }, } } @@ -4624,13 +4624,13 @@ mod test { } else { assert_eq!(txn_status, TransactionStatus::Pending); } - } + }, e => { panic!( "transaction status should be a valid status instead of {:?}", e ); - } + }, } } @@ -4644,22 +4644,22 @@ mod test { { Err(err) => { panic!("Expected a result, but got a error {:?}", err); - } + }, _ => { // This is expected - } + }, } match write_guard.txn_status(tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Pending); - } + }, e => { panic!( "transaction status should be a valid status instead of {:?}", e ); - } + }, } } } @@ -4682,10 +4682,10 @@ mod test { { Err(_err) => { // This is expected - } + }, _ => { panic!("Expected an error, but got a result"); - } + }, } } @@ -4695,10 +4695,10 @@ mod test { match proxy_global_state.txn_status(unknown_tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Unknown); - } + }, e => { panic!("transaction status should be Unknown instead of {:?}", e); - } + }, } } } diff --git a/hotshot-builder-core/src/testing/basic_test.rs b/hotshot-builder-core/src/testing/basic_test.rs index 867706c213..bd6cbb5635 100644 --- a/hotshot-builder-core/src/testing/basic_test.rs +++ b/hotshot-builder-core/src/testing/basic_test.rs @@ -1,4 +1,3 @@ -pub use crate::builder_state::{BuilderState, MessageType}; pub use async_broadcast::broadcast; pub use hotshot::traits::election::static_committee::StaticCommittee; pub use hotshot_types::{ @@ -12,51 +11,49 @@ pub use hotshot_types::{ }, }; use vbs::version::StaticVersionType; + +pub use crate::builder_state::{BuilderState, MessageType}; /// The following tests are performed: #[cfg(test)] mod tests { - use super::*; - use std::collections::VecDeque; - use std::{hash::Hash, marker::PhantomData}; + use std::{collections::VecDeque, hash::Hash, marker::PhantomData, sync::Arc, time::Duration}; + use async_lock::RwLock; + use committable::{Commitment, CommitmentBoundsArkless, Committable}; use hotshot::types::SignatureKey; use hotshot_builder_api::v0_2::data_source::BuilderDataSource; - use hotshot_example_types::auction_results_provider_types::TestAuctionResult; - use hotshot_example_types::node_types::TestVersions; - use hotshot_types::data::{DaProposal2, Leaf2, QuorumProposal2, QuorumProposalWrapper}; - use hotshot_types::simple_vote::QuorumData2; - use hotshot_types::traits::node_implementation::Versions; - use hotshot_types::{ - data::vid_commitment, signature_key::BuilderKey, traits::block_contents::BlockHeader, - traits::EncodeBytes, utils::BuilderCommitment, - }; - use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResult, block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, + node_types::TestVersions, state_types::{TestInstanceState, TestValidatedState}, }; - use marketplace_builder_shared::block::ParentBlockReferences; - use marketplace_builder_shared::testing::constants::{ - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_MAX_TX_NUM, TEST_NUM_NODES_IN_VID_COMPUTATION, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - }; - use tokio::time::error::Elapsed; - use tokio::time::timeout; - use tracing_subscriber::EnvFilter; - - use crate::builder_state::{ - DaProposalMessage, DecideMessage, QuorumProposalMessage, TransactionSource, + use hotshot_types::{ + data::{vid_commitment, DaProposal2, Leaf2, QuorumProposal2, QuorumProposalWrapper}, + signature_key::BuilderKey, + simple_vote::QuorumData2, + traits::{block_contents::BlockHeader, node_implementation::Versions, EncodeBytes}, + utils::BuilderCommitment, }; - use crate::service::{ - handle_received_txns, GlobalState, ProxyGlobalState, ReceivedTransaction, + use marketplace_builder_shared::{ + block::ParentBlockReferences, + testing::constants::{ + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_MAX_TX_NUM, + TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE, + }, }; - use async_lock::RwLock; - use committable::{Commitment, CommitmentBoundsArkless, Committable}; + use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; - use std::sync::Arc; - use std::time::Duration; + use tokio::time::{error::Elapsed, timeout}; + use tracing_subscriber::EnvFilter; - use serde::{Deserialize, Serialize}; + use super::*; + use crate::{ + builder_state::{ + DaProposalMessage, DecideMessage, QuorumProposalMessage, TransactionSource, + }, + service::{handle_received_txns, GlobalState, ProxyGlobalState, ReceivedTransaction}, + }; /// This test simulates multiple builder states receiving messages from the channels and processing them #[tokio::test] //#[instrument] @@ -461,7 +458,7 @@ mod tests { ) .unwrap(); current_leaf - } + }, }; DecideMessage:: { diff --git a/hotshot-builder-core/src/testing/finalization_test.rs b/hotshot-builder-core/src/testing/finalization_test.rs index a671cbbdf8..52ac28a84c 100644 --- a/hotshot-builder-core/src/testing/finalization_test.rs +++ b/hotshot-builder-core/src/testing/finalization_test.rs @@ -1,10 +1,5 @@ use std::{sync::Arc, time::Duration}; -use super::basic_test::{BuilderState, MessageType}; -use crate::{ - builder_state::{DaProposalMessage, QuorumProposalMessage, ALLOW_EMPTY_BLOCK_PERIOD}, - service::{GlobalState, ProxyGlobalState, ReceivedTransaction}, -}; use async_broadcast::{broadcast, Sender}; use async_lock::RwLock; use committable::Commitment; @@ -12,19 +7,20 @@ use hotshot::{ traits::BlockPayload, types::{BLSPubKey, SignatureKey}, }; -use hotshot_builder_api::{ - v0_1::{block_info::AvailableBlockInfo, data_source::BuilderDataSource}, - v0_1::{builder::BuildError, data_source::AcceptsTxnSubmits}, +use hotshot_builder_api::v0_1::{ + block_info::AvailableBlockInfo, + builder::BuildError, + data_source::{AcceptsTxnSubmits, BuilderDataSource}, }; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, node_types::{TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; -use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::{ data::{vid_commitment, DaProposal2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}, message::Proposal, + simple_certificate::QuorumCertificate2, traits::{ block_contents::BlockHeader, node_implementation::{ConsensusTime, Versions}, @@ -32,19 +28,23 @@ use hotshot_types::{ }, utils::BuilderCommitment, }; -use marketplace_builder_shared::testing::constants::{ - TEST_CHANNEL_BUFFER_SIZE, TEST_MAX_TX_NUM, TEST_NUM_CONSENSUS_RETRIES, - TEST_NUM_NODES_IN_VID_COMPUTATION, -}; -use marketplace_builder_shared::{ - block::BuilderStateId, testing::constants::TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, -}; use marketplace_builder_shared::{ - block::ParentBlockReferences, testing::constants::TEST_PROTOCOL_MAX_BLOCK_SIZE, + block::{BuilderStateId, ParentBlockReferences}, + testing::constants::{ + TEST_CHANNEL_BUFFER_SIZE, TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_MAX_TX_NUM, + TEST_NUM_CONSENSUS_RETRIES, TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + }, }; use sha2::{Digest, Sha256}; use vbs::version::StaticVersionType; +use super::basic_test::{BuilderState, MessageType}; +use crate::{ + builder_state::{DaProposalMessage, QuorumProposalMessage, ALLOW_EMPTY_BLOCK_PERIOD}, + service::{GlobalState, ProxyGlobalState, ReceivedTransaction}, +}; + type TestSetup = ( ProxyGlobalState, async_broadcast::Sender>, diff --git a/hotshot-builder-core/src/testing/mod.rs b/hotshot-builder-core/src/testing/mod.rs index 279e3ac84c..9f16ee5b5a 100644 --- a/hotshot-builder-core/src/testing/mod.rs +++ b/hotshot-builder-core/src/testing/mod.rs @@ -1,17 +1,17 @@ -use std::{collections::VecDeque, marker::PhantomData}; +use std::{collections::VecDeque, marker::PhantomData, sync::Arc, time::Duration}; -use crate::{ - builder_state::{ - BuilderState, DAProposalInfo, DaProposalMessage, MessageType, QuorumProposalMessage, - }, - service::ReceivedTransaction, -}; -use async_broadcast::broadcast; -use async_broadcast::Sender as BroadcastSender; +use async_broadcast::{broadcast, Sender as BroadcastSender}; +use async_lock::RwLock; +use committable::{Commitment, CommitmentBoundsArkless, Committable}; use hotshot::{ traits::BlockPayload, types::{BLSPubKey, SignatureKey}, }; +use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, + node_types::{TestTypes, TestVersions}, + state_types::{TestInstanceState, TestValidatedState}, +}; use hotshot_types::{ data::{ vid_commitment, DaProposal2, Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber, @@ -25,26 +25,21 @@ use hotshot_types::{ }, utils::BuilderCommitment, }; -use vbs::version::StaticVersionType; - -use hotshot_example_types::{ - block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, - node_types::{TestTypes, TestVersions}, - state_types::{TestInstanceState, TestValidatedState}, -}; -use sha2::{Digest, Sha256}; - -use crate::service::GlobalState; -use async_lock::RwLock; -use committable::{Commitment, CommitmentBoundsArkless, Committable}; use marketplace_builder_shared::{ block::{BuilderStateId, ParentBlockReferences}, testing::constants::{ TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_MAX_TX_NUM, TEST_PROTOCOL_MAX_BLOCK_SIZE, }, }; -use std::sync::Arc; -use std::time::Duration; +use sha2::{Digest, Sha256}; +use vbs::version::StaticVersionType; + +use crate::{ + builder_state::{ + BuilderState, DAProposalInfo, DaProposalMessage, MessageType, QuorumProposalMessage, + }, + service::{GlobalState, ReceivedTransaction}, +}; mod basic_test; pub mod finalization_test; @@ -192,7 +187,7 @@ pub async fn calc_proposal_msg( &TestInstanceState::default(), ) .await - } + }, Some(prev_proposal) => { let prev_justify_qc = prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { @@ -208,7 +203,7 @@ pub async fn calc_proposal_msg( prev_justify_qc.signatures.clone(), PhantomData, ) - } + }, }; tracing::debug!("Iteration: {} justify_qc: {:?}", round, justify_qc); diff --git a/hotshot-events-service/src/api.rs b/hotshot-events-service/src/api.rs index 215d1f28e4..dd4579cef8 100644 --- a/hotshot-events-service/src/api.rs +++ b/hotshot-events-service/src/api.rs @@ -40,7 +40,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/hotshot-events-service/src/events.rs b/hotshot-events-service/src/events.rs index f3fc9cad5b..594ffd0a31 100644 --- a/hotshot-events-service/src/events.rs +++ b/hotshot-events-service/src/events.rs @@ -1,10 +1,11 @@ +use std::path::PathBuf; + use clap::Args; use derive_more::From; use futures::{FutureExt, StreamExt, TryFutureExt}; use hotshot_types::traits::node_implementation::NodeType; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::path::PathBuf; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, StatusCode}; use vbs::version::StaticVersionType; diff --git a/hotshot-events-service/src/events_source.rs b/hotshot-events-service/src/events_source.rs index 4f905bca63..22e6793384 100644 --- a/hotshot-events-service/src/events_source.rs +++ b/hotshot-events-service/src/events_source.rs @@ -99,7 +99,7 @@ impl EventFilterSet { EventType::Decide { .. } => filter.contains(&EventFilter::Decide), EventType::ReplicaViewTimeout { .. } => { filter.contains(&EventFilter::ReplicaViewTimeout) - } + }, EventType::ViewFinished { .. } => filter.contains(&EventFilter::ViewFinished), EventType::ViewTimeout { .. } => filter.contains(&EventFilter::ViewTimeout), EventType::Transactions { .. } => filter.contains(&EventFilter::Transactions), diff --git a/hotshot-example-types/src/block_types.rs b/hotshot-example-types/src/block_types.rs index 6bdcca4fed..c96e1c7ce3 100644 --- a/hotshot-example-types/src/block_types.rs +++ b/hotshot-example-types/src/block_types.rs @@ -13,8 +13,7 @@ use std::{ use async_trait::async_trait; use committable::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ - data::VidCommitment, - data::{BlockError, Leaf2}, + data::{BlockError, Leaf2, VidCommitment}, traits::{ block_contents::{BlockHeader, BuilderFee, EncodeBytes, TestableBlock, Transaction}, node_implementation::NodeType, diff --git a/hotshot-example-types/src/storage_types.rs b/hotshot-example-types/src/storage_types.rs index 3d7885973d..26156b53d5 100644 --- a/hotshot-example-types/src/storage_types.rs +++ b/hotshot-example-types/src/storage_types.rs @@ -19,6 +19,7 @@ use hotshot_types::{ DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, VidCommitment, }, + drb::DrbResult, event::HotShotAction, message::{convert_proposal, Proposal}, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, @@ -56,6 +57,8 @@ pub struct TestStorageState { Option>, action: TYPES::View, epoch: Option, + drb_results: BTreeMap, + epoch_roots: BTreeMap, } impl Default for TestStorageState { @@ -73,6 +76,8 @@ impl Default for TestStorageState { high_qc2: None, action: TYPES::View::genesis(), epoch: None, + drb_results: BTreeMap::new(), + epoch_roots: BTreeMap::new(), } } } @@ -373,4 +378,24 @@ impl Storage for TestStorage { Ok(()) } + + async fn add_drb_result(&self, epoch: TYPES::Epoch, drb_result: DrbResult) -> Result<()> { + let mut inner = self.inner.write().await; + + inner.drb_results.insert(epoch, drb_result); + + Ok(()) + } + + async fn add_epoch_root( + &self, + epoch: TYPES::Epoch, + block_header: TYPES::BlockHeader, + ) -> Result<()> { + let mut inner = self.inner.write().await; + + inner.epoch_roots.insert(epoch, block_header); + + Ok(()) + } } diff --git a/hotshot-example-types/src/testable_delay.rs b/hotshot-example-types/src/testable_delay.rs index 07f460eaf3..ea16b4b3f3 100644 --- a/hotshot-example-types/src/testable_delay.rs +++ b/hotshot-example-types/src/testable_delay.rs @@ -85,16 +85,16 @@ pub trait TestableDelay { /// Add a delay from settings async fn handle_async_delay(settings: &DelaySettings) { match settings.delay_option { - DelayOptions::None => {} + DelayOptions::None => {}, DelayOptions::Fixed => { sleep(Duration::from_millis(settings.fixed_time_in_milliseconds)).await; - } + }, DelayOptions::Random => { let sleep_in_millis = rand::thread_rng().gen_range( settings.min_time_in_milliseconds..=settings.max_time_in_milliseconds, ); sleep(Duration::from_millis(sleep_in_millis)).await; - } + }, } } @@ -124,7 +124,7 @@ impl Iterator for SupportedTraitTypesForAsyncDelayIterator { _ => { assert_eq!(self.index, 3, "Need to ensure that newly added or removed `SupportedTraitTypesForAsyncDelay` enum is handled in iterator"); return None; - } + }, }; self.index += 1; supported_type diff --git a/hotshot-examples/infra/mod.rs b/hotshot-examples/infra/mod.rs index 875d849678..01e9d6223f 100755 --- a/hotshot-examples/infra/mod.rs +++ b/hotshot-examples/infra/mod.rs @@ -441,13 +441,13 @@ pub trait RunDa< match event_stream.next().await { None => { panic!("Error! Event stream completed before consensus ended."); - } + }, Some(Event { event, .. }) => { match event { EventType::Error { error } => { error!("Error in consensus: {:?}", error); // TODO what to do here - } + }, EventType::Decide { leaf_chain, qc: _, @@ -514,16 +514,16 @@ pub trait RunDa< warn!("Leaf chain is greater than 1 with len {}", leaf_chain.len()); } // when we make progress, submit new events - } + }, EventType::ReplicaViewTimeout { view_number } => { warn!("Timed out as a replicas in view {:?}", view_number); - } + }, EventType::ViewTimeout { view_number } => { warn!("Timed out in view {:?}", view_number); - } - _ => {} // mostly DA proposal + }, + _ => {}, // mostly DA proposal } - } + }, } } // Panic if we don't have the genesis epoch, there is no recovery from that @@ -1092,11 +1092,11 @@ where }) .collect(); bind_address = Url::parse(&format!("http://0.0.0.0:{port}")).unwrap(); - } + }, Some(ref addr) => { bind_address = Url::parse(&format!("http://{addr}")).expect("Valid URL"); advertise_urls = vec![bind_address.clone()]; - } + }, } match run_config.builder { @@ -1116,7 +1116,7 @@ where .await; Some(builder_task) - } + }, BuilderType::Simple => { let builder_task = >::start( @@ -1132,7 +1132,7 @@ where .await; Some(builder_task) - } + }, } } diff --git a/hotshot-fakeapi/src/fake_solver.rs b/hotshot-fakeapi/src/fake_solver.rs index b52418cc9b..f2b81175b5 100644 --- a/hotshot-fakeapi/src/fake_solver.rs +++ b/hotshot-fakeapi/src/fake_solver.rs @@ -91,11 +91,11 @@ impl FakeSolverState { status: tide_disco::StatusCode::INTERNAL_SERVER_ERROR, message: "Internal Server Error".to_string(), }); - } + }, FakeSolverFaultType::TimeoutFault => { // Sleep for the preconfigured 1 second timeout interval tokio::time::sleep(SOLVER_MAX_TIMEOUT_S).await; - } + }, } } diff --git a/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs index 566db12d4c..cf07181249 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs @@ -50,19 +50,19 @@ impl DHTBootstrapTask { Some(InputEvent::BootstrapFinished) => { tracing::debug!("Bootstrap finished"); self.in_progress = false; - } + }, Some(InputEvent::ShutdownBootstrap) => { tracing::info!("ShutdownBootstrap received, shutting down"); break; - } + }, Some(InputEvent::StartBootstrap) => { tracing::warn!("Trying to start bootstrap that's already in progress"); continue; - } + }, None => { tracing::debug!("Bootstrap channel closed, exiting loop"); break; - } + }, } } else if let Ok(maybe_event) = timeout(Duration::from_secs(120), self.rx.next()).await { @@ -70,18 +70,18 @@ impl DHTBootstrapTask { Some(InputEvent::StartBootstrap) => { tracing::debug!("Start bootstrap in bootstrap task"); self.bootstrap(); - } + }, Some(InputEvent::ShutdownBootstrap) => { tracing::debug!("ShutdownBootstrap received, shutting down"); break; - } + }, Some(InputEvent::BootstrapFinished) => { tracing::debug!("not in progress got bootstrap finished"); - } + }, None => { tracing::debug!("Bootstrap channel closed, exiting loop"); break; - } + }, } } else { tracing::debug!("Start bootstrap in bootstrap task after timeout"); diff --git a/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs index 7ef41e1192..950450aecc 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs @@ -274,31 +274,31 @@ impl DHTBehaviour { let num_entries = o.get_mut(); *num_entries += 1; *num_entries - } + }, std::collections::hash_map::Entry::Vacant(v) => { v.insert(1); 1 - } + }, } - } + }, GetRecordOk::FinishedWithNoAdditionalRecord { cache_candidates: _, } => { tracing::debug!("GetRecord Finished with No Additional Record"); last = true; 0 - } + }, }, Err(err) => { warn!("Error in Kademlia query: {:?}", err); 0 - } + }, }, None => { // We already finished the query (or it's been cancelled). Do nothing and exit the // function. return; - } + }, }; // if the query has completed and we need to retry @@ -398,7 +398,7 @@ impl DHTBehaviour { if query.notify.send(()).is_err() { warn!("Put DHT: client channel closed before put record request could be sent"); } - } + }, Err(e) => { query.progress = DHTProgress::NotStarted; query.backoff.start_next(false); @@ -409,7 +409,7 @@ impl DHTBehaviour { ); // push back onto the queue self.retry_put(query); - } + }, } } else { warn!("Put DHT: completed DHT query that is no longer tracked."); @@ -439,7 +439,7 @@ impl DHTBehaviour { if last { self.handle_put_query(record_results, id); } - } + }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::GetClosestPeers(r), id: query_id, @@ -454,13 +454,13 @@ impl DHTBehaviour { }; }; debug!("Successfully got closest peers for key {:?}", key); - } + }, Err(e) => { if let Some(chan) = self.in_progress_get_closest_peers.remove(&query_id) { let _: Result<_, _> = chan.send(()); }; warn!("Failed to get closest peers: {:?}", e); - } + }, }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::GetRecord(record_results), @@ -469,7 +469,7 @@ impl DHTBehaviour { .. } => { self.handle_get_query(store, record_results, id, last); - } + }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::Bootstrap(Ok(BootstrapOk { @@ -485,7 +485,7 @@ impl DHTBehaviour { debug!("Bootstrap in progress, {} nodes remaining", num_remaining); } return Some(NetworkEvent::IsBootstrapped); - } + }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::Bootstrap(Err(e)), .. @@ -495,16 +495,16 @@ impl DHTBehaviour { error!("Failed to bootstrap: {:?}", e); } self.finish_bootstrap(); - } + }, KademliaEvent::RoutablePeer { peer, address: _ } => { debug!("Found routable peer {:?}", peer); - } + }, KademliaEvent::PendingRoutablePeer { peer, address: _ } => { debug!("Found pending routable peer {:?}", peer); - } + }, KademliaEvent::UnroutablePeer { peer } => { debug!("Found unroutable peer {:?}", peer); - } + }, KademliaEvent::RoutingUpdated { peer: _, is_new_peer: _, @@ -513,13 +513,13 @@ impl DHTBehaviour { old_peer: _, } => { debug!("Routing table updated"); - } + }, e @ KademliaEvent::OutboundQueryProgressed { .. } => { debug!("Not handling dht event {:?}", e); - } + }, e => { debug!("New unhandled swarm event: {e:?}"); - } + }, } None } diff --git a/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs index cd927c2470..2c89cc741a 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs @@ -281,10 +281,10 @@ impl PersistentStore { .await .map_err(|_| anyhow::anyhow!("save operation timed out")) { - Ok(Ok(())) => {} + Ok(Ok(())) => {}, Ok(Err(error)) | Err(error) => { warn!("Failed to save DHT to persistent storage: {error}"); - } + }, }; // Reset the record delta @@ -324,10 +324,10 @@ impl PersistentStore { err ); } - } + }, Err(err) => { warn!("Failed to parse record from persistent storage: {:?}", err); - } + }, }; } diff --git a/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs b/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs index 72d378a587..dfd8e5ca4f 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs @@ -59,7 +59,7 @@ impl DMBehaviour { } => { error!("Inbound message failure from {:?}: {:?}", peer, error); None - } + }, Event::OutboundFailure { peer, request_id, @@ -83,7 +83,7 @@ impl DMBehaviour { } } None - } + }, Event::Message { message, peer, .. } => match message { Message::Request { request: msg, @@ -94,7 +94,7 @@ impl DMBehaviour { // receiver, not initiator. // don't track. If we are disconnected, sender will reinitiate Some(NetworkEvent::DirectRequest(msg, peer, channel)) - } + }, Message::Response { request_id, response: msg, @@ -107,12 +107,12 @@ impl DMBehaviour { warn!("Received response for unknown request id {:?}", request_id); None } - } + }, }, e @ Event::ResponseSent { .. } => { debug!("Response sent {:?}", e); None - } + }, } } } diff --git a/hotshot-libp2p-networking/src/network/cbor.rs b/hotshot-libp2p-networking/src/network/cbor.rs index a8ca6afedf..71f19281e7 100644 --- a/hotshot-libp2p-networking/src/network/cbor.rs +++ b/hotshot-libp2p-networking/src/network/cbor.rs @@ -126,19 +126,19 @@ fn decode_into_io_error(err: cbor4ii::serde::DecodeError) -> io::Err match err { cbor4ii::serde::DecodeError::Core(DecodeError::Read(e)) => { io::Error::new(io::ErrorKind::Other, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Core(e @ DecodeError::Unsupported { .. }) => { io::Error::new(io::ErrorKind::Unsupported, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Core(e @ DecodeError::Eof { .. }) => { io::Error::new(io::ErrorKind::UnexpectedEof, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Core(e) => { io::Error::new(io::ErrorKind::InvalidData, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Custom(e) => { io::Error::new(io::ErrorKind::Other, e.to_string()) - } + }, } } diff --git a/hotshot-libp2p-networking/src/network/node.rs b/hotshot-libp2p-networking/src/network/node.rs index 28d009f846..d5e1703a0e 100644 --- a/hotshot-libp2p-networking/src/network/node.rs +++ b/hotshot-libp2p-networking/src/network/node.rs @@ -360,7 +360,7 @@ impl NetworkNode { query.progress = DHTProgress::NotStarted; query.backoff.start_next(false); error!("Error publishing to DHT: {e:?} for peer {:?}", self.peer_id); - } + }, Ok(qid) => { debug!("Published record to DHT with qid {:?}", qid); let query = KadPutQuery { @@ -368,7 +368,7 @@ impl NetworkNode { ..query }; self.dht_handler.put_record(qid, query); - } + }, } } @@ -392,20 +392,20 @@ impl NetworkNode { ClientRequest::BeginBootstrap => { debug!("Beginning Libp2p bootstrap"); let _ = self.swarm.behaviour_mut().dht.bootstrap(); - } + }, ClientRequest::LookupPeer(pid, chan) => { let id = self.swarm.behaviour_mut().dht.get_closest_peers(pid); self.dht_handler .in_progress_get_closest_peers .insert(id, chan); - } + }, ClientRequest::GetRoutingTable(chan) => { self.dht_handler .print_routing_table(&mut self.swarm.behaviour_mut().dht); if chan.send(()).is_err() { warn!("Tried to notify client but client not tracking anymore"); } - } + }, ClientRequest::PutDHT { key, value, notify } => { let query = KadPutQuery { progress: DHTProgress::NotStarted, @@ -415,17 +415,17 @@ impl NetworkNode { backoff: ExponentialBackoff::default(), }; self.put_record(query); - } + }, ClientRequest::GetConnectedPeerNum(s) => { if s.send(self.num_connected()).is_err() { error!("error sending peer number to client"); } - } + }, ClientRequest::GetConnectedPeers(s) => { if s.send(self.connected_pids()).is_err() { error!("error sending peer set to client"); } - } + }, ClientRequest::GetDHT { key, notify, @@ -439,20 +439,20 @@ impl NetworkNode { retry_count, &mut self.swarm.behaviour_mut().dht, ); - } + }, ClientRequest::IgnorePeers(_peers) => { // NOTE used by test with conductor only - } + }, ClientRequest::Shutdown => { if let Some(listener_id) = self.listener_id { self.swarm.remove_listener(listener_id); } return Ok(true); - } + }, ClientRequest::GossipMsg(topic, contents) => { behaviour.publish_gossip(Topic::new(topic.clone()), contents.clone()); - } + }, ClientRequest::Subscribe(t, chan) => { behaviour.subscribe_gossip(&t); if let Some(chan) = chan { @@ -460,7 +460,7 @@ impl NetworkNode { error!("finished subscribing but response channel dropped"); } } - } + }, ClientRequest::Unsubscribe(t, chan) => { behaviour.unsubscribe_gossip(&t); if let Some(chan) = chan { @@ -468,7 +468,7 @@ impl NetworkNode { error!("finished unsubscribing but response channel dropped"); } } - } + }, ClientRequest::DirectRequest { pid, contents, @@ -483,23 +483,23 @@ impl NetworkNode { retry_count, }; self.direct_message_state.add_direct_request(req, id); - } + }, ClientRequest::DirectResponse(chan, msg) => { behaviour.add_direct_response(chan, msg); - } + }, ClientRequest::AddKnownPeers(peers) => { self.add_known_peers(&peers); - } + }, ClientRequest::Prune(pid) => { if self.swarm.disconnect_peer_id(pid).is_err() { warn!("Could not disconnect from {:?}", pid); } - } + }, } - } + }, None => { error!("Error receiving msg in main behaviour loop: channel closed"); - } + }, } Ok(false) } @@ -541,7 +541,7 @@ impl NetworkNode { send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; - } + }, SwarmEvent::ConnectionClosed { connection_id: _, peer_id, @@ -565,13 +565,13 @@ impl NetworkNode { send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; - } + }, SwarmEvent::Dialing { peer_id, connection_id: _, } => { debug!("Attempting to dial {:?}", peer_id); - } + }, SwarmEvent::ListenerClosed { listener_id: _, addresses: _, @@ -591,7 +591,7 @@ impl NetworkNode { connection_id: _, local_addr: _, send_back_addr: _, - } => {} + } => {}, SwarmEvent::Behaviour(b) => { let maybe_event = match b { NetworkEventInternal::DHTEvent(e) => self @@ -621,7 +621,7 @@ impl NetworkNode { } } None - } + }, NetworkEventInternal::GossipEvent(e) => match *e { GossipEvent::Message { propagation_source: _peer_id, @@ -631,25 +631,25 @@ impl NetworkNode { GossipEvent::Subscribed { peer_id, topic } => { debug!("Peer {:?} subscribed to topic {:?}", peer_id, topic); None - } + }, GossipEvent::Unsubscribed { peer_id, topic } => { debug!("Peer {:?} unsubscribed from topic {:?}", peer_id, topic); None - } + }, GossipEvent::GossipsubNotSupported { peer_id } => { warn!("Peer {:?} does not support gossipsub", peer_id); None - } + }, }, NetworkEventInternal::DMEvent(e) => self .direct_message_state .handle_dm_event(e, self.resend_tx.clone()), NetworkEventInternal::AutonatEvent(e) => { match e { - autonat::Event::InboundProbe(_) => {} + autonat::Event::InboundProbe(_) => {}, autonat::Event::OutboundProbe(e) => match e { autonat::OutboundProbeEvent::Request { .. } - | autonat::OutboundProbeEvent::Response { .. } => {} + | autonat::OutboundProbeEvent::Response { .. } => {}, autonat::OutboundProbeEvent::Error { probe_id: _, peer, @@ -659,14 +659,14 @@ impl NetworkNode { "AutoNAT Probe failed to peer {:?} with error: {:?}", peer, error ); - } + }, }, autonat::Event::StatusChanged { old, new } => { debug!("AutoNAT Status changed. Old: {:?}, New: {:?}", old, new); - } + }, }; None - } + }, }; if let Some(event) = maybe_event { @@ -675,14 +675,14 @@ impl NetworkNode { .send(event) .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } - } + }, SwarmEvent::OutgoingConnectionError { connection_id: _, peer_id, error, } => { warn!("Outgoing connection error to {:?}: {:?}", peer_id, error); - } + }, SwarmEvent::IncomingConnectionError { connection_id: _, local_addr: _, @@ -690,29 +690,29 @@ impl NetworkNode { error, } => { warn!("Incoming connection error: {:?}", error); - } + }, SwarmEvent::ListenerError { listener_id: _, error, } => { warn!("Listener error: {:?}", error); - } + }, SwarmEvent::ExternalAddrConfirmed { address } => { let my_id = *self.swarm.local_peer_id(); self.swarm .behaviour_mut() .dht .add_address(&my_id, address.clone()); - } + }, SwarmEvent::NewExternalAddrOfPeer { peer_id, address } => { self.swarm .behaviour_mut() .dht .add_address(&peer_id, address.clone()); - } + }, _ => { debug!("Unhandled swarm event {:?}", event); - } + }, } Ok(()) } diff --git a/hotshot-libp2p-networking/src/network/transport.rs b/hotshot-libp2p-networking/src/network/transport.rs index 01e94e6b90..b69c2c9019 100644 --- a/hotshot-libp2p-networking/src/network/transport.rs +++ b/hotshot-libp2p-networking/src/network/transport.rs @@ -358,7 +358,7 @@ where local_addr, send_back_addr, } - } + }, // We need to re-map the other events because we changed the type of the upgrade TransportEvent::AddressExpired { @@ -377,7 +377,7 @@ where }, TransportEvent::ListenerError { listener_id, error } => { TransportEvent::ListenerError { listener_id, error } - } + }, TransportEvent::NewAddress { listener_id, listen_addr, diff --git a/hotshot-macros/src/lib.rs b/hotshot-macros/src/lib.rs index 3608ef6da1..f318d8e1b0 100644 --- a/hotshot-macros/src/lib.rs +++ b/hotshot-macros/src/lib.rs @@ -118,7 +118,7 @@ impl ToLowerSnakeStr for syn::GenericArgument { syn::Type::Path(p) => p.to_lower_snake_str(), _ => { panic!("Unexpected type for GenericArgument::Type: {t:?}"); - } + }, }, syn::GenericArgument::Const(c) => match c { syn::Expr::Lit(l) => match &l.lit { @@ -126,15 +126,15 @@ impl ToLowerSnakeStr for syn::GenericArgument { syn::Lit::Int(v) => format!("{}_", v.base10_digits()), _ => { panic!("Unexpected type for GenericArgument::Const::Lit: {l:?}"); - } + }, }, _ => { panic!("Unexpected type for GenericArgument::Const: {c:?}"); - } + }, }, _ => { panic!("Unexpected type for GenericArgument: {self:?}"); - } + }, } } } diff --git a/hotshot-orchestrator/src/client.rs b/hotshot-orchestrator/src/client.rs index de167ff505..3c1d8e0884 100644 --- a/hotshot-orchestrator/src/client.rs +++ b/hotshot-orchestrator/src/client.rs @@ -515,7 +515,7 @@ impl OrchestratorClient { Err(err) => { tracing::info!("{err}"); sleep(Duration::from_millis(250)).await; - } + }, } } } diff --git a/hotshot-query-service/examples/simple-server.rs b/hotshot-query-service/examples/simple-server.rs index 847f8c60bc..916e8a5c7d 100644 --- a/hotshot-query-service/examples/simple-server.rs +++ b/hotshot-query-service/examples/simple-server.rs @@ -16,6 +16,8 @@ //! consensus network with two nodes and connects a query service to each node. It runs each query //! server on local host. The program continues until it is manually killed. +use std::{num::NonZeroUsize, str::FromStr, sync::Arc, time::Duration}; + use async_lock::RwLock; use clap::Parser; use futures::future::{join_all, try_join_all}; @@ -48,7 +50,6 @@ use hotshot_types::{ traits::{election::Membership, network::Topic}, HotShotConfig, PeerConfig, }; -use std::{num::NonZeroUsize, str::FromStr, sync::Arc, time::Duration}; use tracing_subscriber::EnvFilter; use url::Url; use vbs::version::StaticVersionType; diff --git a/hotshot-query-service/src/api.rs b/hotshot-query-service/src/api.rs index 5f447fbb53..dd4579cef8 100644 --- a/hotshot-query-service/src/api.rs +++ b/hotshot-query-service/src/api.rs @@ -10,8 +10,8 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use std::fs; -use std::path::Path; +use std::{fs, path::Path}; + use tide_disco::api::{Api, ApiError}; use toml::{map::Entry, Value}; use vbs::version::StaticVersionType; @@ -40,7 +40,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/hotshot-query-service/src/availability.rs b/hotshot-query-service/src/availability.rs index d1dbcd249f..1737aab585 100644 --- a/hotshot-query-service/src/availability.rs +++ b/hotshot-query-service/src/availability.rs @@ -26,10 +26,10 @@ //! chain which is tabulated by this specific node and not subject to full consensus agreement, try //! the [node](crate::node) API. -use crate::{api::load_api, Payload, QueryError}; +use std::{fmt::Display, path::PathBuf, time::Duration}; + use derive_more::From; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt}; - use hotshot_types::{ data::{Leaf, Leaf2, QuorumProposal}, simple_certificate::QuorumCertificate, @@ -37,10 +37,11 @@ use hotshot_types::{ }; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, Snafu}; -use std::{fmt::Display, path::PathBuf, time::Duration}; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, StatusCode}; use vbs::version::StaticVersionType; +use crate::{api::load_api, Payload, QueryError}; + pub(crate) mod data_source; mod fetch; pub(crate) mod query_data; @@ -527,7 +528,7 @@ where .context(FetchTransactionSnafu { resource: hash.to_string(), }) - } + }, None => { let height: u64 = req.integer_param("height")?; let fetch = state @@ -543,7 +544,7 @@ where .context(InvalidTransactionIndexSnafu { height, index: i })?; TransactionQueryData::new(&block, index, i) .context(InvalidTransactionIndexSnafu { height, index: i }) - } + }, } } .boxed() @@ -608,34 +609,32 @@ fn enforce_range_limit(from: usize, until: usize, limit: usize) -> Result<(), Er #[cfg(test)] mod test { + use std::{fmt::Debug, time::Duration}; + + use async_lock::RwLock; + use committable::Committable; + use futures::future::FutureExt; + use hotshot_types::{data::Leaf2, simple_certificate::QuorumCertificate2}; + use portpicker::pick_unused_port; + use serde::de::DeserializeOwned; + use surf_disco::{Client, Error as _}; + use tempfile::TempDir; + use tide_disco::App; + use toml::toml; + use super::*; - use crate::data_source::storage::AvailabilityStorage; - use crate::data_source::VersionedDataSource; - use crate::testing::mocks::MockVersions; use crate::{ - data_source::ExtensibleDataSource, + data_source::{storage::AvailabilityStorage, ExtensibleDataSource, VersionedDataSource}, status::StatusDataSource, task::BackgroundTask, testing::{ consensus::{MockDataSource, MockNetwork, MockSqlDataSource}, - mocks::{mock_transaction, MockBase, MockHeader, MockPayload, MockTypes}, + mocks::{mock_transaction, MockBase, MockHeader, MockPayload, MockTypes, MockVersions}, setup_test, }, types::HeightIndexed, ApiState, Error, Header, }; - use async_lock::RwLock; - use committable::Committable; - use futures::future::FutureExt; - use hotshot_types::data::Leaf2; - use hotshot_types::simple_certificate::QuorumCertificate2; - use portpicker::pick_unused_port; - use serde::de::DeserializeOwned; - use std::{fmt::Debug, time::Duration}; - use surf_disco::{Client, Error as _}; - use tempfile::TempDir; - use tide_disco::App; - use toml::toml; /// Get the current ledger height and a list of non-empty leaf/block pairs. async fn get_non_empty_blocks( @@ -657,7 +656,7 @@ mod test { let leaf = client.get(&format!("leaf/{}", i)).send().await.unwrap(); blocks.push((leaf, block)); } - } + }, Err(Error::Availability { source: super::Error::FetchBlock { .. }, }) => { @@ -665,7 +664,7 @@ mod test { "found end of ledger at height {i}, non-empty blocks are {blocks:?}", ); return (i, blocks); - } + }, Err(err) => panic!("unexpected error {}", err), } } diff --git a/hotshot-query-service/src/availability/data_source.rs b/hotshot-query-service/src/availability/data_source.rs index 9747b7814b..c35caafdc2 100644 --- a/hotshot-query-service/src/availability/data_source.rs +++ b/hotshot-query-service/src/availability/data_source.rs @@ -10,15 +10,11 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use super::{ - fetch::Fetch, - query_data::{ - BlockHash, BlockQueryData, LeafHash, LeafQueryData, PayloadMetadata, PayloadQueryData, - QueryablePayload, TransactionHash, TransactionQueryData, VidCommonMetadata, - VidCommonQueryData, - }, +use std::{ + cmp::Ordering, + ops::{Bound, RangeBounds}, }; -use crate::{types::HeightIndexed, Header, Payload}; + use async_trait::async_trait; use derivative::Derivative; use derive_more::{Display, From}; @@ -30,10 +26,16 @@ use hotshot_types::{ data::{VidCommitment, VidShare}, traits::node_implementation::NodeType, }; -use std::{ - cmp::Ordering, - ops::{Bound, RangeBounds}, + +use super::{ + fetch::Fetch, + query_data::{ + BlockHash, BlockQueryData, LeafHash, LeafQueryData, PayloadMetadata, PayloadQueryData, + QueryablePayload, TransactionHash, TransactionQueryData, VidCommonMetadata, + VidCommonQueryData, + }, }; +use crate::{types::HeightIndexed, Header, Payload}; #[derive(Derivative, From, Display)] #[derivative(Ord = "feature_allow_slow_enum")] diff --git a/hotshot-query-service/src/availability/fetch.rs b/hotshot-query-service/src/availability/fetch.rs index ca48252b5d..d3175549c4 100644 --- a/hotshot-query-service/src/availability/fetch.rs +++ b/hotshot-query-service/src/availability/fetch.rs @@ -10,9 +10,10 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::{future::IntoFuture, time::Duration}; + use futures::future::{BoxFuture, FutureExt}; use snafu::{Error, ErrorCompat, IntoError, NoneError, OptionExt}; -use std::{future::IntoFuture, time::Duration}; use tokio::time::timeout; /// An in-progress request to fetch some data. diff --git a/hotshot-query-service/src/availability/query_data.rs b/hotshot-query-service/src/availability/query_data.rs index 6e1b1eb1f7..174db36101 100644 --- a/hotshot-query-service/src/availability/query_data.rs +++ b/hotshot-query-service/src/availability/query_data.rs @@ -10,7 +10,8 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use crate::{types::HeightIndexed, Header, Metadata, Payload, Transaction, VidCommon}; +use std::fmt::Debug; + use committable::{Commitment, Committable}; use hotshot_types::{ data::{Leaf, Leaf2, VidCommitment, VidShare}, @@ -26,7 +27,8 @@ use hotshot_types::{ use jf_vid::VidScheme; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use snafu::{ensure, Snafu}; -use std::fmt::Debug; + +use crate::{types::HeightIndexed, Header, Metadata, Payload, Transaction, VidCommon}; pub type LeafHash = Commitment>; pub type QcHash = Commitment>; diff --git a/hotshot-query-service/src/data_source.rs b/hotshot-query-service/src/data_source.rs index 3d45b8cd00..30881aa5c3 100644 --- a/hotshot-query-service/src/data_source.rs +++ b/hotshot-query-service/src/data_source.rs @@ -47,16 +47,18 @@ pub use update::{Transaction, UpdateDataSource, VersionedDataSource}; #[cfg(any(test, feature = "testing"))] mod test_helpers { + use std::ops::{Bound, RangeBounds}; + + use futures::{ + future, + stream::{BoxStream, StreamExt}, + }; + use crate::{ availability::{BlockQueryData, Fetch, LeafQueryData}, node::NodeDataSource, testing::{consensus::TestableDataSource, mocks::MockTypes}, }; - use futures::{ - future, - stream::{BoxStream, StreamExt}, - }; - use std::ops::{Bound, RangeBounds}; /// Apply an upper bound to a range based on the currently available block height. async fn bound_range(ds: &D, range: R) -> impl RangeBounds @@ -119,6 +121,16 @@ mod test_helpers { #[cfg(any(test, feature = "testing"))] #[espresso_macros::generic_tests] pub mod availability_tests { + use std::{ + collections::HashMap, + fmt::Debug, + ops::{Bound, RangeBounds}, + }; + + use committable::Committable; + use futures::stream::StreamExt; + use hotshot_types::data::Leaf2; + use super::test_helpers::*; use crate::{ availability::{payload_size, BlockId}, @@ -131,12 +143,6 @@ pub mod availability_tests { }, types::HeightIndexed, }; - use committable::Committable; - use futures::stream::StreamExt; - use hotshot_types::data::Leaf2; - use std::collections::HashMap; - use std::fmt::Debug; - use std::ops::{Bound, RangeBounds}; async fn validate(ds: &impl TestableDataSource) { // Check the consistency of every block/leaf pair. Keep track of payloads and transactions @@ -537,6 +543,10 @@ pub mod availability_tests { #[cfg(any(test, feature = "testing"))] #[espresso_macros::generic_tests] pub mod persistence_tests { + use committable::Committable; + use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; + use hotshot_types::simple_certificate::QuorumCertificate2; + use crate::{ availability::{BlockQueryData, LeafQueryData}, data_source::{ @@ -552,9 +562,6 @@ pub mod persistence_tests { types::HeightIndexed, Leaf2, }; - use committable::Committable; - use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; - use hotshot_types::simple_certificate::QuorumCertificate2; #[tokio::test(flavor = "multi_thread")] pub async fn test_revert() @@ -756,6 +763,24 @@ pub mod persistence_tests { #[cfg(any(test, feature = "testing"))] #[espresso_macros::generic_tests] pub mod node_tests { + use std::time::Duration; + + use committable::Committable; + use futures::{future::join_all, stream::StreamExt}; + use hotshot::traits::BlockPayload; + use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestMetadata}, + node_types::TestTypes, + state_types::{TestInstanceState, TestValidatedState}, + }; + use hotshot_types::{ + data::{vid_commitment, VidCommitment, VidShare}, + traits::{block_contents::EncodeBytes, node_implementation::Versions}, + vid::advz::{advz_scheme, ADVZScheme}, + }; + use jf_vid::VidScheme; + use vbs::version::StaticVersionType; + use crate::{ availability::{ BlockInfo, BlockQueryData, LeafQueryData, QueryableHeader, VidCommonQueryData, @@ -773,24 +798,6 @@ pub mod node_tests { types::HeightIndexed, Header, }; - use committable::Committable; - use futures::{future::join_all, stream::StreamExt}; - use hotshot::traits::BlockPayload; - use hotshot_example_types::{ - block_types::TestBlockPayload, node_types::TestTypes, state_types::TestValidatedState, - }; - use hotshot_example_types::{ - block_types::{TestBlockHeader, TestMetadata}, - state_types::TestInstanceState, - }; - use hotshot_types::{ - data::{vid_commitment, VidCommitment, VidShare}, - traits::{block_contents::EncodeBytes, node_implementation::Versions}, - vid::advz::{advz_scheme, ADVZScheme}, - }; - use jf_vid::VidScheme; - use std::time::Duration; - use vbs::version::StaticVersionType; #[tokio::test(flavor = "multi_thread")] pub async fn test_sync_status() @@ -1387,6 +1394,8 @@ pub mod node_tests { #[cfg(any(test, feature = "testing"))] #[espresso_macros::generic_tests] pub mod status_tests { + use std::time::Duration; + use crate::{ status::StatusDataSource, testing::{ @@ -1395,7 +1404,6 @@ pub mod status_tests { setup_test, sleep, }, }; - use std::time::Duration; #[tokio::test(flavor = "multi_thread")] pub async fn test_metrics() { diff --git a/hotshot-query-service/src/data_source/extension.rs b/hotshot-query-service/src/data_source/extension.rs index 8eb38d4668..467cda0891 100644 --- a/hotshot-query-service/src/data_source/extension.rs +++ b/hotshot-query-service/src/data_source/extension.rs @@ -10,8 +10,14 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::ops::{Bound, RangeBounds}; + +use async_trait::async_trait; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; +use jf_merkle_tree::prelude::MerkleProof; +use tagged_base64::TaggedBase64; + use super::VersionedDataSource; -use crate::data_source::storage::pruning::PrunedHeightDataSource; use crate::{ availability::{ AvailabilityDataSource, BlockId, BlockInfo, BlockQueryData, Fetch, FetchStream, LeafId, @@ -19,6 +25,7 @@ use crate::{ TransactionHash, TransactionQueryData, UpdateAvailabilityData, VidCommonMetadata, VidCommonQueryData, }, + data_source::storage::pruning::PrunedHeightDataSource, explorer::{self, ExplorerDataSource, ExplorerHeader, ExplorerTransaction}, merklized_state::{ MerklizedState, MerklizedStateDataSource, MerklizedStateHeightPersistence, Snapshot, @@ -29,12 +36,6 @@ use crate::{ status::{HasMetrics, StatusDataSource}, Header, Payload, QueryResult, Transaction, }; -use async_trait::async_trait; -use hotshot_types::data::VidShare; -use hotshot_types::traits::node_implementation::NodeType; -use jf_merkle_tree::prelude::MerkleProof; -use std::ops::{Bound, RangeBounds}; -use tagged_base64::TaggedBase64; /// Wrapper to add extensibility to an existing data source. /// /// [`ExtensibleDataSource`] adds app-specific data to any existing data source. It implements all @@ -500,6 +501,8 @@ where #[cfg(any(test, feature = "testing"))] mod impl_testable_data_source { + use hotshot::types::Event; + use super::*; use crate::{ data_source::UpdateDataSource, @@ -508,7 +511,6 @@ mod impl_testable_data_source { mocks::MockTypes, }, }; - use hotshot::types::Event; #[async_trait] impl DataSourceLifeCycle for ExtensibleDataSource @@ -540,7 +542,6 @@ mod impl_testable_data_source { mod test { use super::ExtensibleDataSource; use crate::testing::consensus::MockDataSource; - // For some reason this is the only way to import the macro defined in another module of this // crate. use crate::*; diff --git a/hotshot-query-service/src/data_source/fetching.rs b/hotshot-query-service/src/data_source/fetching.rs index e98fa236df..cec480bf65 100644 --- a/hotshot-query-service/src/data_source/fetching.rs +++ b/hotshot-query-service/src/data_source/fetching.rs @@ -73,6 +73,38 @@ //! different request for the same object, one that permitted an active fetch. Or it may have been //! fetched [proactively](#proactive-fetching). +use std::{ + cmp::{max, min}, + fmt::{Debug, Display}, + iter::repeat_with, + marker::PhantomData, + ops::{Bound, Range, RangeBounds}, + sync::Arc, + time::Duration, +}; + +use anyhow::{bail, Context}; +use async_lock::Semaphore; +use async_trait::async_trait; +use backoff::{backoff::Backoff, ExponentialBackoff, ExponentialBackoffBuilder}; +use derivative::Derivative; +use futures::{ + channel::oneshot, + future::{self, join_all, BoxFuture, Either, Future, FutureExt}, + stream::{self, BoxStream, StreamExt}, +}; +use hotshot_types::{ + data::VidShare, + traits::{ + metrics::{Gauge, Metrics}, + node_implementation::NodeType, + }, +}; +use jf_merkle_tree::{prelude::MerkleProof, MerkleTreeScheme}; +use tagged_base64::TaggedBase64; +use tokio::{spawn, time::sleep}; +use tracing::Instrument; + use super::{ notifier::Notifier, storage::{ @@ -84,13 +116,12 @@ use super::{ }, Transaction, VersionedDataSource, }; -use crate::availability::HeaderQueryData; use crate::{ availability::{ - AvailabilityDataSource, BlockId, BlockInfo, BlockQueryData, Fetch, FetchStream, LeafId, - LeafQueryData, PayloadMetadata, PayloadQueryData, QueryableHeader, QueryablePayload, - TransactionHash, TransactionQueryData, UpdateAvailabilityData, VidCommonMetadata, - VidCommonQueryData, + AvailabilityDataSource, BlockId, BlockInfo, BlockQueryData, Fetch, FetchStream, + HeaderQueryData, LeafId, LeafQueryData, PayloadMetadata, PayloadQueryData, QueryableHeader, + QueryablePayload, TransactionHash, TransactionQueryData, UpdateAvailabilityData, + VidCommonMetadata, VidCommonQueryData, }, explorer::{self, ExplorerDataSource}, fetching::{self, request, Provider}, @@ -104,36 +135,6 @@ use crate::{ types::HeightIndexed, Header, Payload, QueryError, QueryResult, }; -use anyhow::{bail, Context}; -use async_lock::Semaphore; -use async_trait::async_trait; -use backoff::{backoff::Backoff, ExponentialBackoff, ExponentialBackoffBuilder}; -use derivative::Derivative; -use futures::{ - channel::oneshot, - future::{self, join_all, BoxFuture, Either, Future, FutureExt}, - stream::{self, BoxStream, StreamExt}, -}; -use hotshot_types::{ - data::VidShare, - traits::{ - metrics::{Gauge, Metrics}, - node_implementation::NodeType, - }, -}; -use jf_merkle_tree::{prelude::MerkleProof, MerkleTreeScheme}; -use std::sync::Arc; -use std::{ - cmp::{max, min}, - fmt::{Debug, Display}, - iter::repeat_with, - marker::PhantomData, - ops::{Bound, Range, RangeBounds}, - time::Duration, -}; -use tagged_base64::TaggedBase64; -use tokio::{spawn, time::sleep}; -use tracing::Instrument; mod block; mod header; @@ -467,15 +468,15 @@ where match storage.prune(&mut pruner).await { Ok(Some(height)) => { tracing::warn!("Pruned to height {height}"); - } + }, Ok(None) => { tracing::warn!("pruner run complete."); break; - } + }, Err(e) => { tracing::error!("pruner run failed: {e:?}"); break; - } + }, } } } @@ -977,7 +978,7 @@ where ?req, "unable to fetch object; spawning a task to retry: {err:#}" ); - } + }, } // We'll use this channel to get the object back if we successfully load it on retry. @@ -1005,14 +1006,14 @@ where tracing::info!(?req, "object was ready after retries"); send.send(obj).ok(); break; - } + }, Ok(None) => { // The object was not immediately available after all, but we have // successfully spawned a fetch for it if possible. The spawned fetch // will notify the original request once it completes. tracing::info!(?req, "spawned fetch after retries"); break; - } + }, Err(err) => { tracing::warn!( ?req, @@ -1023,7 +1024,7 @@ where if let Some(next_delay) = backoff.next_backoff() { delay = next_delay; } - } + }, } } } @@ -1058,12 +1059,12 @@ where tracing::debug!(?req, "object missing from local storage, will try to fetch"); self.fetch::(&mut tx, req).await?; Ok(None) - } + }, Err(err) => { // An error occurred while querying the database. We don't know if we need to fetch // the object or not. Return an error so we can try again. bail!("failed to fetch resource {req:?} from local storage: {err:#}"); - } + }, } } @@ -1224,13 +1225,13 @@ where None => passive(T::Request::from(chunk.start + i), passive_fetch), }) .collect(); - } + }, Err(err) => { tracing::warn!( ?chunk, "unable to fetch chunk; spawning a task to retry: {err:#}" ); - } + }, } // We'll use these channels to get the objects back that we successfully load on retry. @@ -1272,7 +1273,7 @@ where } } break; - } + }, Err(err) => { tracing::warn!( ?chunk, @@ -1283,7 +1284,7 @@ where if let Some(next_delay) = backoff.next_backoff() { delay = next_delay; } - } + }, } } } @@ -1432,7 +1433,7 @@ where backoff = min(2 * backoff, max_backoff); metrics.backoff.set(backoff.as_secs() as usize); continue; - } + }, }; let heights = match Heights::load(&mut tx).await { Ok(heights) => heights, @@ -1443,7 +1444,7 @@ where backoff = min(2 * backoff, max_backoff); metrics.backoff.set(backoff.as_secs() as usize); continue; - } + }, }; metrics.retries.set(0); break heights; @@ -1577,7 +1578,7 @@ where tracing::error!("unable to open read tx: {err:#}"); sleep(Duration::from_secs(5)).await; continue; - } + }, }; match tx.load_prev_aggregate().await { Ok(agg) => break agg, @@ -1585,7 +1586,7 @@ where tracing::error!("unable to load previous aggregate: {err:#}"); sleep(Duration::from_secs(5)).await; continue; - } + }, } }; @@ -1629,7 +1630,7 @@ where match res { Ok(()) => { break; - } + }, Err(err) => { tracing::warn!( num_blocks, @@ -1637,7 +1638,7 @@ where "failed to update aggregates for chunk: {err:#}" ); sleep(Duration::from_secs(1)).await; - } + }, } } metrics.height.set(height as usize); @@ -2201,7 +2202,7 @@ impl ResultExt for Result { "error loading resource from local storage, will try to fetch: {err:#}" ); None - } + }, } } } @@ -2320,7 +2321,7 @@ where // dropped. If this happens, things are very broken in any case, and it is // better to panic loudly than simply block forever. panic!("notifier dropped without satisfying request {req:?}"); - } + }, } }) .boxed(), diff --git a/hotshot-query-service/src/data_source/fetching/block.rs b/hotshot-query-service/src/data_source/fetching/block.rs index 980bb5a032..c0a255ef6f 100644 --- a/hotshot-query-service/src/data_source/fetching/block.rs +++ b/hotshot-query-service/src/data_source/fetching/block.rs @@ -12,6 +12,14 @@ //! [`Fetchable`] implementation for [`BlockQueryData`] and [`PayloadQueryData`]. +use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds, sync::Arc}; + +use async_trait::async_trait; +use derivative::Derivative; +use derive_more::From; +use futures::future::{BoxFuture, FutureExt}; +use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; + use super::{ header::{fetch_header_and_then, HeaderCallback}, AvailabilityProvider, FetchRequest, Fetchable, Fetcher, Heights, Notifiers, RangedFetchable, @@ -34,12 +42,6 @@ use crate::{ types::HeightIndexed, Header, Payload, QueryResult, }; -use async_trait::async_trait; -use derivative::Derivative; -use derive_more::From; -use futures::future::{BoxFuture, FutureExt}; -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; -use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds, sync::Arc}; pub(super) type PayloadFetcher = fetching::Fetcher>; diff --git a/hotshot-query-service/src/data_source/fetching/header.rs b/hotshot-query-service/src/data_source/fetching/header.rs index 6782fb5d37..02a8f7c629 100644 --- a/hotshot-query-service/src/data_source/fetching/header.rs +++ b/hotshot-query-service/src/data_source/fetching/header.rs @@ -12,36 +12,31 @@ //! Header fetching. +use std::{cmp::Ordering, future::IntoFuture, sync::Arc}; + +use anyhow::bail; +use async_trait::async_trait; +use committable::Committable; +use derivative::Derivative; +use futures::{future::BoxFuture, FutureExt}; +use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; + use super::{ block::fetch_block_with_header, leaf::fetch_leaf_with_callbacks, vid::fetch_vid_common_with_header, AvailabilityProvider, Fetcher, }; -use crate::data_source::fetching::Fetchable; -use crate::data_source::fetching::HeaderQueryData; -use crate::data_source::fetching::LeafQueryData; -use crate::data_source::fetching::Notifiers; -use crate::QueryResult; use crate::{ availability::{BlockId, QueryablePayload}, data_source::{ + fetching::{Fetchable, HeaderQueryData, LeafQueryData, Notifiers}, storage::{ pruning::PrunedHeightStorage, AvailabilityStorage, NodeStorage, UpdateAvailabilityStorage, }, update::VersionedDataSource, }, - Header, Payload, QueryError, + Header, Payload, QueryError, QueryResult, }; -use anyhow::bail; -use async_trait::async_trait; -use committable::Committable; -use derivative::Derivative; -use futures::future::BoxFuture; -use futures::FutureExt; -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; -use std::cmp::Ordering; -use std::future::IntoFuture; -use std::sync::Arc; impl From> for HeaderQueryData { fn from(leaf: LeafQueryData) -> Self { @@ -188,14 +183,14 @@ where header.block_number() ); fetch_block_with_header(fetcher, header); - } + }, Self::VidCommon { fetcher } => { tracing::info!( "fetched leaf {}, will now fetch VID common", header.block_number() ); fetch_vid_common_with_header(fetcher, header); - } + }, } } } @@ -225,17 +220,17 @@ where Ok(header) => { callback.run(header); return Ok(()); - } + }, Err(QueryError::Missing | QueryError::NotFound) => { // We successfully queried the database, but the header wasn't there. Fall through to // fetching it. tracing::debug!(?req, "header not available locally; trying fetch"); - } + }, Err(QueryError::Error { message }) => { // An error occurred while querying the database. We don't know if we need to fetch the // header or not. Return an error so we can try again. bail!("failed to fetch header for block {req:?}: {message}"); - } + }, } // If the header is _not_ present, we may still be able to fetch the request, but we need to @@ -245,16 +240,16 @@ where match req { BlockId::Number(n) => { fetch_leaf_with_callbacks(tx, callback.fetcher(), n.into(), [callback.into()]).await?; - } + }, BlockId::Hash(h) => { // Given only the hash, we cannot tell if the corresponding leaf actually exists, since // we don't have a corresponding header. Therefore, we will not spawn an active fetch. tracing::debug!("not fetching unknown block {h}"); - } + }, BlockId::PayloadHash(h) => { // Same as above, we don't fetch a block with a payload that is not known to exist. tracing::debug!("not fetching block with unknown payload {h}"); - } + }, } Ok(()) diff --git a/hotshot-query-service/src/data_source/fetching/leaf.rs b/hotshot-query-service/src/data_source/fetching/leaf.rs index 3692d01851..0f2b37097d 100644 --- a/hotshot-query-service/src/data_source/fetching/leaf.rs +++ b/hotshot-query-service/src/data_source/fetching/leaf.rs @@ -12,6 +12,18 @@ //! [`Fetchable`] implementation for [`LeafQueryData`]. +use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds, sync::Arc}; + +use anyhow::bail; +use async_trait::async_trait; +use committable::Committable; +use derivative::Derivative; +use derive_more::From; +use futures::future::{BoxFuture, FutureExt}; +use hotshot_types::traits::node_implementation::NodeType; +use tokio::spawn; +use tracing::Instrument; + use super::{ header::HeaderCallback, AvailabilityProvider, FetchRequest, Fetchable, Fetcher, Heights, Notifiers, RangedFetchable, Storable, @@ -29,17 +41,6 @@ use crate::{ types::HeightIndexed, Payload, QueryError, QueryResult, }; -use anyhow::bail; -use async_trait::async_trait; -use committable::Committable; -use derivative::Derivative; -use derive_more::From; -use futures::future::{BoxFuture, FutureExt}; -use hotshot_types::traits::node_implementation::NodeType; -use std::sync::Arc; -use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds}; -use tokio::spawn; -use tracing::Instrument; pub(super) type LeafFetcher = fetching::Fetcher, LeafCallback>; @@ -172,19 +173,19 @@ where callbacks, ); return Ok(()); - } + }, Err(QueryError::Missing | QueryError::NotFound) => { // We successfully queried the database, but the next leaf wasn't there. We // know for sure that based on the current state of the DB, we cannot fetch this // leaf. tracing::debug!(n, "not fetching leaf with unknown successor"); return Ok(()); - } + }, Err(QueryError::Error { message }) => { // An error occurred while querying the database. We don't know if we need to // fetch the leaf or not. Return an error so we can try again. bail!("failed to fetch successor for leaf {n}: {message}"); - } + }, }; let fetcher = fetcher.clone(); @@ -197,13 +198,13 @@ where fetcher.provider.clone(), once(LeafCallback::Leaf { fetcher }).chain(callbacks), ); - } + }, LeafId::Hash(h) => { // We don't actively fetch leaves when requested by hash, because we have no way of // knowing whether a leaf with such a hash actually exists, and we don't want to bother // peers with requests for non-existent leaves. tracing::debug!("not fetching unknown leaf {h}"); - } + }, } Ok(()) @@ -262,7 +263,7 @@ pub(super) fn trigger_fetch_for_parent( if tx.get_leaf(((height - 1) as usize).into()).await.is_ok() { return; } - } + }, Err(err) => { // If we can't open a transaction, we can't be sure that we already have the // parent, so we fall through to fetching it just to be safe. @@ -271,7 +272,7 @@ pub(super) fn trigger_fetch_for_parent( %parent, "error opening transaction to check for parent leaf: {err:#}", ); - } + }, } tracing::info!(height, %parent, "received new leaf; fetching missing parent"); @@ -369,7 +370,7 @@ impl Ord for LeafCallback { (Self::Continuation { callback: cb1 }, Self::Continuation { callback: cb2 }) => { cb1.cmp(cb2) - } + }, _ => Ordering::Equal, } } @@ -396,7 +397,7 @@ where // Trigger a fetch of the parent leaf, if we don't already have it. trigger_fetch_for_parent(&fetcher, &leaf); fetcher.store_and_notify(leaf).await; - } + }, Self::Continuation { callback } => callback.run(leaf.leaf.block_header().clone()), } } diff --git a/hotshot-query-service/src/data_source/fetching/transaction.rs b/hotshot-query-service/src/data_source/fetching/transaction.rs index f19681b24d..baf581166b 100644 --- a/hotshot-query-service/src/data_source/fetching/transaction.rs +++ b/hotshot-query-service/src/data_source/fetching/transaction.rs @@ -12,6 +12,13 @@ //! Transaction fetching. +use std::sync::Arc; + +use async_trait::async_trait; +use derive_more::From; +use futures::future::{BoxFuture, FutureExt}; +use hotshot_types::traits::node_implementation::NodeType; + use super::{AvailabilityProvider, FetchRequest, Fetchable, Fetcher, Notifiers}; use crate::{ availability::{QueryablePayload, TransactionHash, TransactionQueryData}, @@ -24,11 +31,6 @@ use crate::{ }, Payload, QueryResult, }; -use async_trait::async_trait; -use derive_more::From; -use futures::future::{BoxFuture, FutureExt}; -use hotshot_types::traits::node_implementation::NodeType; -use std::sync::Arc; #[derive(Clone, Copy, Debug, From)] pub(super) struct TransactionRequest(TransactionHash); diff --git a/hotshot-query-service/src/data_source/fetching/vid.rs b/hotshot-query-service/src/data_source/fetching/vid.rs index 51c948c79f..618c11e077 100644 --- a/hotshot-query-service/src/data_source/fetching/vid.rs +++ b/hotshot-query-service/src/data_source/fetching/vid.rs @@ -12,6 +12,17 @@ //! [`Fetchable`] implementation for [`VidCommonQueryData`]. +use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds, sync::Arc}; + +use async_trait::async_trait; +use derivative::Derivative; +use derive_more::From; +use futures::future::{BoxFuture, FutureExt}; +use hotshot_types::{ + data::VidShare, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; + use super::{ header::{fetch_header_and_then, HeaderCallback}, AvailabilityProvider, FetchRequest, Fetchable, Fetcher, Heights, Notifiers, RangedFetchable, @@ -30,16 +41,6 @@ use crate::{ types::HeightIndexed, Header, Payload, QueryResult, }; -use async_trait::async_trait; -use derivative::Derivative; -use derive_more::From; -use futures::future::{BoxFuture, FutureExt}; -use hotshot_types::{ - data::VidShare, - traits::{block_contents::BlockHeader, node_implementation::NodeType}, -}; -use std::sync::Arc; -use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds}; pub(super) type VidCommonFetcher = fetching::Fetcher>; diff --git a/hotshot-query-service/src/data_source/fs.rs b/hotshot-query-service/src/data_source/fs.rs index c77664a06d..83f6ee7bd8 100644 --- a/hotshot-query-service/src/data_source/fs.rs +++ b/hotshot-query-service/src/data_source/fs.rs @@ -12,16 +12,17 @@ #![cfg(feature = "file-system-data-source")] +use std::path::Path; + +use atomic_store::AtomicStoreLoader; +use hotshot_types::traits::node_implementation::NodeType; + +pub use super::storage::fs::Transaction; use super::{storage::FileSystemStorage, AvailabilityProvider, FetchingDataSource}; use crate::{ availability::{query_data::QueryablePayload, QueryableHeader}, Header, Payload, }; -use atomic_store::AtomicStoreLoader; -use hotshot_types::traits::node_implementation::NodeType; -use std::path::Path; - -pub use super::storage::fs::Transaction; /// A data source for the APIs provided in this crate, backed by the local file system. /// @@ -239,14 +240,15 @@ where #[cfg(any(test, feature = "testing"))] mod impl_testable_data_source { + use async_trait::async_trait; + use hotshot::types::Event; + use tempfile::TempDir; + use super::*; use crate::{ data_source::UpdateDataSource, testing::{consensus::DataSourceLifeCycle, mocks::MockTypes}, }; - use async_trait::async_trait; - use hotshot::types::Event; - use tempfile::TempDir; #[async_trait] impl + Default> DataSourceLifeCycle @@ -279,11 +281,10 @@ mod impl_testable_data_source { #[cfg(test)] mod test { use super::FileSystemDataSource; - use crate::{fetching::provider::NoFetching, testing::mocks::MockTypes}; - // For some reason this is the only way to import the macro defined in another module of this // crate. use crate::*; + use crate::{fetching::provider::NoFetching, testing::mocks::MockTypes}; instantiate_data_source_tests!(FileSystemDataSource); } diff --git a/hotshot-query-service/src/data_source/metrics.rs b/hotshot-query-service/src/data_source/metrics.rs index a539673a10..e59171ce36 100644 --- a/hotshot-query-service/src/data_source/metrics.rs +++ b/hotshot-query-service/src/data_source/metrics.rs @@ -12,12 +12,13 @@ #![cfg(feature = "metrics-data-source")] +use async_trait::async_trait; + use crate::{ metrics::PrometheusMetrics, status::{HasMetrics, StatusDataSource}, QueryError, QueryResult, }; -use async_trait::async_trait; /// A minimal data source for the status API provided in this crate, with no persistent storage. /// @@ -82,9 +83,10 @@ impl StatusDataSource for MetricsDataSource { #[cfg(any(test, feature = "testing"))] mod impl_testable_data_source { + use hotshot::types::Event; + use super::*; use crate::testing::{consensus::DataSourceLifeCycle, mocks::MockTypes}; - use hotshot::types::Event; #[async_trait] impl DataSourceLifeCycle for MetricsDataSource { @@ -112,9 +114,7 @@ mod impl_testable_data_source { #[cfg(test)] mod test { - use super::super::status_tests; - use super::MetricsDataSource; - + use super::{super::status_tests, MetricsDataSource}; // For some reason this is the only way to import the macro defined in another module of this // crate. use crate::*; diff --git a/hotshot-query-service/src/data_source/notifier.rs b/hotshot-query-service/src/data_source/notifier.rs index a21e497c9f..53227c726d 100644 --- a/hotshot-query-service/src/data_source/notifier.rs +++ b/hotshot-query-service/src/data_source/notifier.rs @@ -70,16 +70,21 @@ //! spawned to fetch missing resources and send them through the [`Notifier`], but these should be //! relatively few and rare. +use std::{ + future::IntoFuture, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + use async_lock::Mutex; use derivative::Derivative; use futures::future::{BoxFuture, FutureExt}; -use std::sync::Arc; -use std::{ - future::IntoFuture, - sync::atomic::{AtomicBool, Ordering}, +use tokio::sync::{ + mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, + oneshot, }; -use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; -use tokio::sync::oneshot; use tracing::warn; /// A predicate on a type ``. @@ -286,11 +291,12 @@ where #[cfg(test)] mod test { + use std::time::Duration; + use tokio::time::timeout; use super::*; use crate::testing::setup_test; - use std::time::Duration; #[tokio::test(flavor = "multi_thread")] async fn test_notify_drop() { diff --git a/hotshot-query-service/src/data_source/sql.rs b/hotshot-query-service/src/data_source/sql.rs index b667b66862..21c09b3b42 100644 --- a/hotshot-query-service/src/data_source/sql.rs +++ b/hotshot-query-service/src/data_source/sql.rs @@ -12,6 +12,11 @@ #![cfg(feature = "sql-data-source")] +pub use anyhow::Error; +use hotshot_types::traits::node_implementation::NodeType; +pub use refinery::Migration; +pub use sql::Transaction; + use super::{ fetching::{self}, storage::sql::{self, SqlStorage}, @@ -22,11 +27,6 @@ use crate::{ availability::{QueryableHeader, QueryablePayload}, Header, Payload, }; -pub use anyhow::Error; -use hotshot_types::traits::node_implementation::NodeType; -pub use refinery::Migration; - -pub use sql::Transaction; pub type Builder = fetching::Builder; @@ -318,15 +318,15 @@ where // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(any(test, feature = "testing"), not(target_os = "windows")))] pub mod testing { + use async_trait::async_trait; + use hotshot::types::Event; + pub use sql::testing::TmpDb; + use super::*; use crate::{ data_source::UpdateDataSource, testing::{consensus::DataSourceLifeCycle, mocks::MockTypes}, }; - use async_trait::async_trait; - use hotshot::types::Event; - - pub use sql::testing::TmpDb; #[async_trait] impl + Default> DataSourceLifeCycle @@ -372,17 +372,20 @@ pub mod testing { #[cfg(all(test, not(target_os = "windows")))] mod generic_test { use super::SqlDataSource; - use crate::{fetching::provider::NoFetching, testing::mocks::MockTypes}; - // For some reason this is the only way to import the macro defined in another module of this // crate. use crate::*; + use crate::{fetching::provider::NoFetching, testing::mocks::MockTypes}; instantiate_data_source_tests!(SqlDataSource); } #[cfg(all(test, not(target_os = "windows")))] mod test { + use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; + use hotshot_types::{data::VidShare, vid::advz::advz_scheme}; + use jf_vid::VidScheme; + use super::*; use crate::{ availability::{ @@ -396,9 +399,6 @@ mod test { fetching::provider::NoFetching, testing::{consensus::DataSourceLifeCycle, mocks::MockTypes, setup_test}, }; - use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; - use hotshot_types::{data::VidShare, vid::advz::advz_scheme}; - use jf_vid::VidScheme; type D = SqlDataSource; diff --git a/hotshot-query-service/src/data_source/storage.rs b/hotshot-query-service/src/data_source/storage.rs index 227950d9e7..46e66dba00 100644 --- a/hotshot-query-service/src/data_source/storage.rs +++ b/hotshot-query-service/src/data_source/storage.rs @@ -56,6 +56,14 @@ //! [`AvailabilityDataSource`](crate::availability::AvailabilityDataSource) in fallibility. //! +use std::ops::RangeBounds; + +use async_trait::async_trait; +use futures::future::Future; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; +use jf_merkle_tree::prelude::MerkleProof; +use tagged_base64::TaggedBase64; + use crate::{ availability::{ BlockId, BlockQueryData, LeafId, LeafQueryData, PayloadMetadata, PayloadQueryData, @@ -76,12 +84,6 @@ use crate::{ node::{SyncStatus, TimeWindowQueryData, WindowStart}, Header, Payload, QueryResult, Transaction, }; -use async_trait::async_trait; -use futures::future::Future; -use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; -use jf_merkle_tree::prelude::MerkleProof; -use std::ops::RangeBounds; -use tagged_base64::TaggedBase64; pub mod fail_storage; pub mod fs; diff --git a/hotshot-query-service/src/data_source/storage/fail_storage.rs b/hotshot-query-service/src/data_source/storage/fail_storage.rs index 8398e8c303..d090bbfa5e 100644 --- a/hotshot-query-service/src/data_source/storage/fail_storage.rs +++ b/hotshot-query-service/src/data_source/storage/fail_storage.rs @@ -12,6 +12,13 @@ #![cfg(any(test, feature = "testing"))] +use std::{ops::RangeBounds, sync::Arc}; + +use async_lock::Mutex; +use async_trait::async_trait; +use futures::future::Future; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; + use super::{ pruning::{PruneStorage, PrunedHeightStorage, PrunerCfg, PrunerConfig}, sql::MigrateTypes, @@ -32,12 +39,6 @@ use crate::{ status::HasMetrics, Header, Payload, QueryError, QueryResult, }; -use async_lock::Mutex; -use async_trait::async_trait; -use futures::future::Future; -use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; -use std::ops::RangeBounds; -use std::sync::Arc; /// A specific action that can be targeted to inject an error. #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -87,8 +88,8 @@ impl FailureMode { match self { Self::Once(fail_action) if fail_action.matches(action) => { *self = Self::Never; - } - Self::Always(fail_action) if fail_action.matches(action) => {} + }, + Self::Always(fail_action) if fail_action.matches(action) => {}, _ => return Ok(()), } diff --git a/hotshot-query-service/src/data_source/storage/fs.rs b/hotshot-query-service/src/data_source/storage/fs.rs index 3e58a04911..a0e3ff1aa7 100644 --- a/hotshot-query-service/src/data_source/storage/fs.rs +++ b/hotshot-query-service/src/data_source/storage/fs.rs @@ -12,6 +12,28 @@ #![cfg(feature = "file-system-data-source")] +use std::{ + collections::{ + hash_map::{Entry, HashMap}, + BTreeMap, + }, + hash::Hash, + ops::{Bound, Deref, RangeBounds}, + path::Path, +}; + +use async_lock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use async_trait::async_trait; +use atomic_store::{AtomicStore, AtomicStoreLoader, PersistenceError}; +use committable::Committable; +use futures::future::Future; +use hotshot_types::{ + data::{VidCommitment, VidShare}, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; +use serde::{de::DeserializeOwned, Serialize}; +use snafu::OptionExt; + use super::{ ledger_log::{Iter, LedgerLog}, pruning::{PruneStorage, PrunedHeightStorage, PrunerConfig}, @@ -19,7 +41,6 @@ use super::{ Aggregate, AggregatesStorage, AvailabilityStorage, NodeStorage, PayloadMetadata, UpdateAggregatesStorage, UpdateAvailabilityStorage, VidCommonMetadata, }; - use crate::{ availability::{ data_source::{BlockId, LeafId}, @@ -35,24 +56,6 @@ use crate::{ types::HeightIndexed, ErrorSnafu, Header, MissingSnafu, NotFoundSnafu, Payload, QueryError, QueryResult, }; -use async_lock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -use async_trait::async_trait; -use atomic_store::{AtomicStore, AtomicStoreLoader, PersistenceError}; -use committable::Committable; -use futures::future::Future; -use hotshot_types::{ - data::{VidCommitment, VidShare}, - traits::{block_contents::BlockHeader, node_implementation::NodeType}, -}; -use serde::{de::DeserializeOwned, Serialize}; -use snafu::OptionExt; -use std::collections::{ - hash_map::{Entry, HashMap}, - BTreeMap, -}; -use std::hash::Hash; -use std::ops::{Bound, Deref, RangeBounds}; -use std::path::Path; const CACHED_LEAVES_COUNT: usize = 100; const CACHED_BLOCKS_COUNT: usize = 100; @@ -88,10 +91,10 @@ where BlockId::Number(n) => Ok(n), BlockId::Hash(h) => { Ok(*self.index_by_block_hash.get(&h).context(NotFoundSnafu)? as usize) - } + }, BlockId::PayloadHash(h) => { Ok(*self.index_by_payload_hash.get(&h).context(NotFoundSnafu)? as usize) - } + }, } } @@ -405,11 +408,11 @@ where iter.nth(n - 1); } n - } + }, Bound::Excluded(n) => { iter.nth(n); n + 1 - } + }, Bound::Unbounded => 0, }; @@ -662,10 +665,10 @@ fn update_index_by_hash(index: &mut HashMap, hash: H // Overwrite the existing entry if the new object was sequenced first. e.insert(pos); } - } + }, Entry::Vacant(e) => { e.insert(pos); - } + }, } } @@ -772,7 +775,7 @@ where // entry in `index_by_time` has a non-empty list associated with it, so this // indexing is safe. blocks[0] - } + }, } as usize; let mut res = TimeWindowQueryData::default(); diff --git a/hotshot-query-service/src/data_source/storage/ledger_log.rs b/hotshot-query-service/src/data_source/storage/ledger_log.rs index 4ff798ee52..e18523c7b5 100644 --- a/hotshot-query-service/src/data_source/storage/ledger_log.rs +++ b/hotshot-query-service/src/data_source/storage/ledger_log.rs @@ -12,12 +12,12 @@ #![cfg(feature = "file-system-data-source")] +use std::{collections::VecDeque, fmt::Debug}; + use atomic_store::{ append_log, load_store::BincodeLoadStore, AppendLog, AtomicStoreLoader, PersistenceError, }; use serde::{de::DeserializeOwned, Serialize}; -use std::collections::VecDeque; -use std::fmt::Debug; use tracing::{debug, warn}; /// A caching append log for ledger objects. @@ -262,11 +262,12 @@ impl ExactSizeIterator for Iter<'_, T> #[cfg(test)] mod test { - use super::*; - use crate::testing::setup_test; use atomic_store::AtomicStore; use tempfile::TempDir; + use super::*; + use crate::testing::setup_test; + #[tokio::test(flavor = "multi_thread")] async fn test_ledger_log_creation() { setup_test(); diff --git a/hotshot-query-service/src/data_source/storage/pruning.rs b/hotshot-query-service/src/data_source/storage/pruning.rs index 26e1243729..081b3a1fbf 100644 --- a/hotshot-query-service/src/data_source/storage/pruning.rs +++ b/hotshot-query-service/src/data_source/storage/pruning.rs @@ -10,9 +10,10 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::{fmt::Debug, time::Duration}; + use anyhow::bail; use async_trait::async_trait; -use std::{fmt::Debug, time::Duration}; #[derive(Clone, Debug)] pub struct PrunerCfg { diff --git a/hotshot-query-service/src/data_source/storage/sql.rs b/hotshot-query-service/src/data_source/storage/sql.rs index b12cb5efbb..2cbd0b20d6 100644 --- a/hotshot-query-service/src/data_source/storage/sql.rs +++ b/hotshot-query-service/src/data_source/storage/sql.rs @@ -11,32 +11,22 @@ // see . #![cfg(feature = "sql-data-source")] -use crate::{ - data_source::{ - storage::pruning::{PruneStorage, PrunerCfg, PrunerConfig}, - update::Transaction as _, - VersionedDataSource, - }, - metrics::PrometheusMetrics, - status::HasMetrics, - QueryError, QueryResult, -}; +use std::{cmp::min, fmt::Debug, str::FromStr, time::Duration}; + use anyhow::Context; use async_trait::async_trait; use chrono::Utc; use committable::Committable; +#[cfg(not(feature = "embedded-db"))] +use futures::future::FutureExt; use hotshot_types::{ data::{Leaf, Leaf2, VidShare}, simple_certificate::{QuorumCertificate, QuorumCertificate2}, traits::{metrics::Metrics, node_implementation::NodeType}, vid::advz::ADVZShare, }; - use itertools::Itertools; use log::LevelFilter; - -#[cfg(not(feature = "embedded-db"))] -use futures::future::FutureExt; #[cfg(not(feature = "embedded-db"))] use sqlx::postgres::{PgConnectOptions, PgSslMode}; #[cfg(feature = "embedded-db")] @@ -45,7 +35,17 @@ use sqlx::{ pool::{Pool, PoolOptions}, ConnectOptions, Row, }; -use std::{cmp::min, fmt::Debug, str::FromStr, time::Duration}; + +use crate::{ + data_source::{ + storage::pruning::{PruneStorage, PrunerCfg, PrunerConfig}, + update::Transaction as _, + VersionedDataSource, + }, + metrics::PrometheusMetrics, + status::HasMetrics, + QueryError, QueryResult, +}; pub extern crate sqlx; pub use sqlx::{Database, Sqlite}; @@ -55,10 +55,6 @@ mod queries; mod transaction; pub use anyhow::Error; -// This needs to be reexported so that we can reference it by absolute path relative to this crate -// in the expansion of `include_migrations`, even when `include_migrations` is invoked from another -// crate which doesn't have `include_dir` as a dependency. -pub use crate::include_migrations; pub use db::*; pub use include_dir::include_dir; pub use queries::QueryBuilder; @@ -66,6 +62,10 @@ pub use refinery::Migration; pub use transaction::*; use self::{migrate::Migrator, transaction::PoolMetrics}; +// This needs to be reexported so that we can reference it by absolute path relative to this crate +// in the expansion of `include_migrations`, even when `include_migrations` is invoked from another +// crate which doesn't have `include_dir` as a dependency. +pub use crate::include_migrations; /// Embed migrations from the given directory into the current binary for PostgreSQL or SQLite. /// @@ -577,11 +577,11 @@ impl SqlStorage { match runner.run_async(&mut Migrator::from(&mut conn)).await { Ok(report) => { tracing::info!("ran DB migrations: {report:?}"); - } + }, Err(err) => { tracing::error!("DB migrations failed: {:?}", err.report()); Err(err)?; - } + }, } } @@ -709,7 +709,7 @@ impl PruneStorage for SqlStorage { }; height - } + }, }; // Prune data exceeding target retention in batches @@ -973,21 +973,19 @@ impl MigrateTypes for SqlStorage { #[cfg(all(any(test, feature = "testing"), not(target_os = "windows")))] pub mod testing { #![allow(unused_imports)] - use refinery::Migration; use std::{ env, process::{Command, Stdio}, str::{self, FromStr}, time::Duration, }; - use tokio::net::TcpStream; - use tokio::time::timeout; use portpicker::pick_unused_port; + use refinery::Migration; + use tokio::{net::TcpStream, time::timeout}; use super::Config; - use crate::availability::query_data::QueryableHeader; - use crate::testing::sleep; + use crate::{availability::query_data::QueryableHeader, testing::sleep}; #[derive(Debug)] pub struct TmpDb { #[cfg(not(feature = "embedded-db"))] @@ -1275,28 +1273,28 @@ pub mod testing { // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(test, not(target_os = "windows")))] mod test { + use std::time::Duration; + use committable::{Commitment, CommitmentBoundsArkless, Committable}; use hotshot::traits::BlockPayload; use hotshot_example_types::{ node_types::TestVersions, state_types::{TestInstanceState, TestValidatedState}, }; - use jf_vid::VidScheme; - use hotshot_types::{ - data::vid_commitment, - traits::{node_implementation::Versions, EncodeBytes}, - vid::advz::advz_scheme, - }; - use hotshot_types::{ - data::{QuorumProposal, ViewNumber}, + data::{vid_commitment, QuorumProposal, ViewNumber}, simple_vote::QuorumData, - traits::{block_contents::BlockHeader, node_implementation::ConsensusTime}, + traits::{ + block_contents::BlockHeader, + node_implementation::{ConsensusTime, Versions}, + EncodeBytes, + }, + vid::advz::advz_scheme, }; use jf_merkle_tree::{ prelude::UniversalMerkleTree, MerkleTreeScheme, ToTraversalPath, UniversalMerkleTreeScheme, }; - use std::time::Duration; + use jf_vid::VidScheme; use tokio::time::sleep; use vbs::version::StaticVersionType; diff --git a/hotshot-query-service/src/data_source/storage/sql/migrate.rs b/hotshot-query-service/src/data_source/storage/sql/migrate.rs index c86c0fb34c..2ae4056a82 100644 --- a/hotshot-query-service/src/data_source/storage/sql/migrate.rs +++ b/hotshot-query-service/src/data_source/storage/sql/migrate.rs @@ -1,4 +1,3 @@ -use super::{queries::DecodeError, Db}; use async_trait::async_trait; use derive_more::From; use futures::stream::StreamExt; @@ -9,6 +8,8 @@ use refinery_core::{ use sqlx::{pool::PoolConnection, Acquire, Executor, Row}; use time::{format_description::well_known::Rfc3339, OffsetDateTime}; +use super::{queries::DecodeError, Db}; + /// Run migrations using a sqlx connection. /// /// While SQLx has its own built-in migration functionality, we use Refinery, and alas we must diff --git a/hotshot-query-service/src/data_source/storage/sql/queries.rs b/hotshot-query-service/src/data_source/storage/sql/queries.rs index 696aca5ba3..0f3db6dcc1 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries.rs @@ -13,27 +13,30 @@ //! Immutable query functionality of a SQL database. +use std::{ + fmt::Display, + ops::{Bound, RangeBounds}, +}; + +use anyhow::Context; +use derivative::Derivative; +use hotshot_types::{ + simple_certificate::QuorumCertificate2, + traits::{ + block_contents::{BlockHeader, BlockPayload}, + node_implementation::NodeType, + }, +}; +use sqlx::{Arguments, FromRow, Row}; + use super::{Database, Db, Query, QueryAs, Transaction}; -use crate::Leaf2; use crate::{ availability::{ BlockId, BlockQueryData, LeafQueryData, PayloadQueryData, QueryablePayload, VidCommonQueryData, }, data_source::storage::{PayloadMetadata, VidCommonMetadata}, - Header, Payload, QueryError, QueryResult, -}; -use anyhow::Context; -use derivative::Derivative; -use hotshot_types::simple_certificate::QuorumCertificate2; -use hotshot_types::traits::{ - block_contents::{BlockHeader, BlockPayload}, - node_implementation::NodeType, -}; -use sqlx::{Arguments, FromRow, Row}; -use std::{ - fmt::Display, - ops::{Bound, RangeBounds}, + Header, Leaf2, Payload, QueryError, QueryResult, }; pub(super) mod availability; @@ -137,20 +140,20 @@ impl QueryBuilder<'_> { match range.start_bound() { Bound::Included(n) => { bounds.push(format!("{column} >= {}", self.bind(*n as i64)?)); - } + }, Bound::Excluded(n) => { bounds.push(format!("{column} > {}", self.bind(*n as i64)?)); - } - Bound::Unbounded => {} + }, + Bound::Unbounded => {}, } match range.end_bound() { Bound::Included(n) => { bounds.push(format!("{column} <= {}", self.bind(*n as i64)?)); - } + }, Bound::Excluded(n) => { bounds.push(format!("{column} < {}", self.bind(*n as i64)?)); - } - Bound::Unbounded => {} + }, + Bound::Unbounded => {}, } let mut where_clause = bounds.join(" AND "); diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs b/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs index 0a51d28bee..140b0cafdb 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs @@ -12,27 +12,30 @@ //! Availability storage implementation for a database query engine. +use std::ops::RangeBounds; + +use async_trait::async_trait; +use futures::stream::{StreamExt, TryStreamExt}; +use hotshot_types::traits::node_implementation::NodeType; +use snafu::OptionExt; +use sqlx::FromRow; + use super::{ super::transaction::{query, Transaction, TransactionMode}, QueryBuilder, BLOCK_COLUMNS, LEAF_COLUMNS, PAYLOAD_COLUMNS, PAYLOAD_METADATA_COLUMNS, VID_COMMON_COLUMNS, VID_COMMON_METADATA_COLUMNS, }; -use crate::data_source::storage::sql::sqlx::Row; use crate::{ availability::{ BlockId, BlockQueryData, LeafId, LeafQueryData, PayloadQueryData, QueryableHeader, QueryablePayload, TransactionHash, TransactionQueryData, VidCommonQueryData, }, - data_source::storage::{AvailabilityStorage, PayloadMetadata, VidCommonMetadata}, + data_source::storage::{ + sql::sqlx::Row, AvailabilityStorage, PayloadMetadata, VidCommonMetadata, + }, types::HeightIndexed, ErrorSnafu, Header, MissingSnafu, Payload, QueryError, QueryResult, }; -use async_trait::async_trait; -use futures::stream::{StreamExt, TryStreamExt}; -use hotshot_types::traits::node_implementation::NodeType; -use snafu::OptionExt; -use sqlx::FromRow; -use std::ops::RangeBounds; #[async_trait] impl AvailabilityStorage for Transaction diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs b/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs index 14fb489a00..a769e5f4ee 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs @@ -12,6 +12,16 @@ //! Explorer storage implementation for a database query engine. +use std::{collections::VecDeque, num::NonZeroUsize}; + +use async_trait::async_trait; +use committable::{Commitment, Committable}; +use futures::stream::{self, StreamExt, TryStreamExt}; +use hotshot_types::traits::node_implementation::NodeType; +use itertools::Itertools; +use sqlx::{types::Json, FromRow, Row}; +use tagged_base64::{Tagged, TaggedBase64}; + use super::{ super::transaction::{query, Transaction, TransactionMode}, Database, Db, DecodeError, BLOCK_COLUMNS, @@ -33,14 +43,6 @@ use crate::{ }, Header, Payload, QueryError, QueryResult, Transaction as HotshotTransaction, }; -use async_trait::async_trait; -use committable::{Commitment, Committable}; -use futures::stream::{self, StreamExt, TryStreamExt}; -use hotshot_types::traits::node_implementation::NodeType; -use itertools::Itertools; -use sqlx::{types::Json, FromRow, Row}; -use std::{collections::VecDeque, num::NonZeroUsize}; -use tagged_base64::{Tagged, TaggedBase64}; impl From for GetExplorerSummaryError { fn from(err: sqlx::Error) -> Self { @@ -282,7 +284,7 @@ where let query_stmt = match request.target { BlockIdentifier::Latest => { query(&GET_BLOCK_SUMMARIES_QUERY_FOR_LATEST).bind(request.num_blocks.get() as i64) - } + }, BlockIdentifier::Height(height) => query(&GET_BLOCK_SUMMARIES_QUERY_FOR_HEIGHT) .bind(height as i64) .bind(request.num_blocks.get() as i64), @@ -305,10 +307,10 @@ where BlockIdentifier::Latest => query(&GET_BLOCK_DETAIL_QUERY_FOR_LATEST), BlockIdentifier::Height(height) => { query(&GET_BLOCK_DETAIL_QUERY_FOR_HEIGHT).bind(height as i64) - } + }, BlockIdentifier::Hash(hash) => { query(&GET_BLOCK_DETAIL_QUERY_FOR_HASH).bind(hash.to_string()) - } + }, }; let query_result = query_stmt.fetch_one(self.as_mut()).await?; @@ -375,7 +377,7 @@ where TransactionSummaryFilter::Block(block) => { query(&GET_TRANSACTION_SUMMARIES_QUERY_FOR_BLOCK).bind(*block as i64) - } + }, }; let block_stream = query_stmt @@ -432,10 +434,10 @@ where query(&GET_TRANSACTION_DETAIL_QUERY_FOR_HEIGHT_AND_OFFSET) .bind(height as i64) .bind(offset as i64) - } + }, TransactionIdentifier::Hash(hash) => { query(&GET_TRANSACTION_DETAIL_QUERY_FOR_HASH).bind(hash.to_string()) - } + }, }; let query_row = query_stmt.fetch_one(self.as_mut()).await?; @@ -455,7 +457,7 @@ where key: format!("at {height} and {offset}"), }), ) - } + }, TransactionIdentifier::Hash(hash) => txns .into_iter() .enumerate() diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs index 326d29e695..dfd99bfc9f 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs @@ -12,6 +12,18 @@ //! Node storage implementation for a database query engine. +use std::ops::{Bound, RangeBounds}; + +use anyhow::anyhow; +use async_trait::async_trait; +use futures::stream::{StreamExt, TryStreamExt}; +use hotshot_types::{ + data::VidShare, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; +use snafu::OptionExt; +use sqlx::Row; + use super::{ super::transaction::{query, query_as, Transaction, TransactionMode, Write}, parse_header, DecodeError, QueryBuilder, HEADER_COLUMNS, @@ -24,16 +36,6 @@ use crate::{ types::HeightIndexed, Header, MissingSnafu, NotFoundSnafu, QueryError, QueryResult, }; -use anyhow::anyhow; -use async_trait::async_trait; -use futures::stream::{StreamExt, TryStreamExt}; -use hotshot_types::{ - data::VidShare, - traits::{block_contents::BlockHeader, node_implementation::NodeType}, -}; -use snafu::OptionExt; -use sqlx::Row; -use std::ops::{Bound, RangeBounds}; #[async_trait] impl NodeStorage for Transaction @@ -50,11 +52,11 @@ where // The height of the block is the number of blocks below it, so the total number of // blocks is one more than the height of the highest block. Ok(height as usize + 1) - } + }, (None,) => { // If there are no blocks yet, the height is 0. Ok(0) - } + }, } } @@ -174,11 +176,11 @@ where // The height of the block is the number of blocks below it, so the total number of // blocks is one more than the height of the highest block. height as usize + 1 - } + }, None => { // If there are no blocks yet, the height is 0. 0 - } + }, }; let total_leaves = row.get::("total_leaves") as usize; let null_payloads = row.get::("null_payloads") as usize; @@ -216,7 +218,7 @@ where // sufficient data to answer the query is not as simple as just trying `load_header` // for a specific block ID. return self.time_window::(t, end, limit).await; - } + }, WindowStart::Height(h) => h, WindowStart::Hash(h) => self.load_header::(h).await?.block_number(), }; @@ -479,7 +481,7 @@ async fn aggregate_range_bounds( return Ok(None); } height - 1 - } + }, }; Ok(Some((from, to))) } diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/state.rs b/hotshot-query-service/src/data_source/storage/sql/queries/state.rs index 4e3d41d141..328d538ab5 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/state.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/state.rs @@ -12,17 +12,11 @@ //! Merklized state storage implementation for a database query engine. -use super::{ - super::transaction::{query_as, Transaction, TransactionMode, Write}, - DecodeError, QueryBuilder, -}; -use crate::data_source::storage::sql::sqlx::Row; -use crate::data_source::storage::{pruning::PrunedHeightStorage, sql::build_where_in}; -use crate::{ - data_source::storage::{MerklizedStateHeightStorage, MerklizedStateStorage}, - merklized_state::{MerklizedState, Snapshot}, - QueryError, QueryResult, +use std::{ + collections::{HashMap, HashSet, VecDeque}, + sync::Arc, }; + use ark_serialize::CanonicalDeserialize; use async_trait::async_trait; use futures::stream::TryStreamExt; @@ -31,10 +25,21 @@ use jf_merkle_tree::{ prelude::{MerkleNode, MerkleProof}, DigestAlgorithm, MerkleCommitment, ToTraversalPath, }; -use sqlx::types::BitVec; -use sqlx::types::JsonValue; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::sync::Arc; +use sqlx::types::{BitVec, JsonValue}; + +use super::{ + super::transaction::{query_as, Transaction, TransactionMode, Write}, + DecodeError, QueryBuilder, +}; +use crate::{ + data_source::storage::{ + pruning::PrunedHeightStorage, + sql::{build_where_in, sqlx::Row}, + MerklizedStateHeightStorage, MerklizedStateStorage, + }, + merklized_state::{MerklizedState, Snapshot}, + QueryError, QueryResult, +}; #[async_trait] impl MerklizedStateStorage @@ -148,7 +153,7 @@ where .decode_error("malformed merkle node value")?, children: child_nodes, }); - } + }, // If it has an entry, it's a leaf (None, None, Some(index), Some(entry)) => { proof_path.push_back(MerkleNode::Leaf { @@ -159,16 +164,16 @@ where elem: serde_json::from_value(entry.clone()) .decode_error("malformed merkle element")?, }); - } + }, // Otherwise, it's empty. (None, None, Some(_), None) => { proof_path.push_back(MerkleNode::Empty); - } + }, _ => { return Err(QueryError::Error { message: "Invalid type of merkle node found".to_string(), }); - } + }, } } } @@ -223,7 +228,7 @@ where State::Digest::digest(&data).map_err(|err| QueryError::Error { message: format!("failed to update digest: {err:#}"), }) - } + }, MerkleNode::Empty => Ok(init), _ => Err(QueryError::Error { message: "Invalid type of Node in the proof".to_string(), @@ -292,7 +297,7 @@ impl Transaction { .await?; (height, commit) - } + }, Snapshot::Index(created) => { let created = created as i64; let (commit,) = query_as::<(String,)>(&format!( @@ -307,7 +312,7 @@ impl Transaction { let commit = serde_json::from_value(commit.into()) .decode_error("malformed state commitment")?; (created, commit) - } + }, }; // Make sure the requested snapshot is up to date. diff --git a/hotshot-query-service/src/data_source/storage/sql/transaction.rs b/hotshot-query-service/src/data_source/storage/sql/transaction.rs index f7443e36a7..850f8006f8 100644 --- a/hotshot-query-service/src/data_source/storage/sql/transaction.rs +++ b/hotshot-query-service/src/data_source/storage/sql/transaction.rs @@ -18,26 +18,12 @@ //! database connection, so that the updated state of the database can be queried midway through a //! transaction. -use super::{ - queries::{ - self, - state::{build_hash_batch_insert, Node}, - DecodeError, - }, - Database, Db, -}; -use crate::{ - availability::{ - BlockQueryData, LeafQueryData, QueryableHeader, QueryablePayload, VidCommonQueryData, - }, - data_source::{ - storage::{pruning::PrunedHeightStorage, UpdateAvailabilityStorage}, - update, - }, - merklized_state::{MerklizedState, UpdateStateData}, - types::HeightIndexed, - Header, Payload, QueryError, QueryResult, +use std::{ + collections::{HashMap, HashSet}, + marker::PhantomData, + time::Instant, }; + use anyhow::{bail, Context}; use ark_serialize::CanonicalSerialize; use async_trait::async_trait; @@ -55,13 +41,30 @@ use hotshot_types::{ }; use itertools::Itertools; use jf_merkle_tree::prelude::{MerkleNode, MerkleProof}; -use sqlx::types::BitVec; pub use sqlx::Executor; -use sqlx::{pool::Pool, query_builder::Separated, Encode, FromRow, QueryBuilder, Type}; -use std::{ - collections::{HashMap, HashSet}, - marker::PhantomData, - time::Instant, +use sqlx::{ + pool::Pool, query_builder::Separated, types::BitVec, Encode, FromRow, QueryBuilder, Type, +}; + +use super::{ + queries::{ + self, + state::{build_hash_batch_insert, Node}, + DecodeError, + }, + Database, Db, +}; +use crate::{ + availability::{ + BlockQueryData, LeafQueryData, QueryableHeader, QueryablePayload, VidCommonQueryData, + }, + data_source::{ + storage::{pruning::PrunedHeightStorage, UpdateAvailabilityStorage}, + update, + }, + merklized_state::{MerklizedState, UpdateStateData}, + types::HeightIndexed, + Header, Payload, QueryError, QueryResult, }; pub type Query<'q> = sqlx::query::Query<'q, Db, ::Arguments<'q>>; @@ -681,10 +684,10 @@ impl, const ARITY: usize> [0_u8; 32].to_vec(), )); hashset.insert([0_u8; 32].to_vec()); - } + }, MerkleNode::ForgettenSubtree { .. } => { bail!("Node in the Merkle path contains a forgetten subtree"); - } + }, MerkleNode::Leaf { value, pos, elem } => { let mut leaf_commit = Vec::new(); // Serialize the leaf node hash value into a vector @@ -711,7 +714,7 @@ impl, const ARITY: usize> )); hashset.insert(leaf_commit); - } + }, MerkleNode::Branch { value, children } => { // Get hash let mut branch_hash = Vec::new(); @@ -728,7 +731,7 @@ impl, const ARITY: usize> match child { MerkleNode::Empty => { children_bitvec.push(false); - } + }, MerkleNode::Branch { value, .. } | MerkleNode::Leaf { value, .. } | MerkleNode::ForgettenSubtree { value } => { @@ -740,7 +743,7 @@ impl, const ARITY: usize> children_values.push(hash); // Mark the entry as 1 in bitvec to indicate a non-empty child children_bitvec.push(true); - } + }, } } @@ -758,7 +761,7 @@ impl, const ARITY: usize> )); hashset.insert(branch_hash); hashset.extend(children_values); - } + }, } // advance the traversal path for the internal nodes at each iteration @@ -798,7 +801,7 @@ impl, const ARITY: usize> } } - Node::upsert(name, nodes.into_iter().map(|(n, _, _)| n), self).await?; + Node::upsert(name, nodes.into_iter().map(|(n, ..)| n), self).await?; Ok(()) } diff --git a/hotshot-query-service/src/data_source/update.rs b/hotshot-query-service/src/data_source/update.rs index 86ec016938..c3b832846f 100644 --- a/hotshot-query-service/src/data_source/update.rs +++ b/hotshot-query-service/src/data_source/update.rs @@ -11,32 +11,33 @@ // see . //! A generic algorithm for updating a HotShot Query Service data source with new data. -use crate::{ - availability::{ - BlockInfo, BlockQueryData, LeafQueryData, QueryablePayload, UpdateAvailabilityData, - VidCommonQueryData, - }, - Payload, -}; +use std::iter::once; + use anyhow::{ensure, Context}; use async_trait::async_trait; use futures::future::Future; use hotshot::types::{Event, EventType}; -use hotshot_types::{data::VidCommitment, event::LeafInfo}; use hotshot_types::{ - data::{ns_table::parse_ns_table, Leaf2}, + data::{ns_table::parse_ns_table, Leaf2, VidCommitment, VidDisperseShare, VidShare}, + event::LeafInfo, traits::{ block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES}, node_implementation::{ConsensusTime, NodeType}, }, - vid::advz::advz_scheme, -}; -use hotshot_types::{ - data::{VidDisperseShare, VidShare}, - vid::avidm::{init_avidm_param, AvidMScheme}, + vid::{ + advz::advz_scheme, + avidm::{init_avidm_param, AvidMScheme}, + }, }; use jf_vid::VidScheme; -use std::iter::once; + +use crate::{ + availability::{ + BlockInfo, BlockQueryData, LeafQueryData, QueryablePayload, UpdateAvailabilityData, + VidCommonQueryData, + }, + Payload, +}; /// An extension trait for types which implement the update trait for each API module. /// @@ -109,7 +110,7 @@ where "inconsistent leaf; cannot append leaf information: {err:#}" ); return Err(leaf2.block_header().block_number()); - } + }, }; let block_data = leaf2 .block_payload() @@ -141,12 +142,12 @@ where Err(err) => { tracing::warn!("failed to compute genesis VID: {err:#}"); (None, None) - } + }, } } else { (None, None) } - } + }, }; if vid_common.is_none() { @@ -188,7 +189,7 @@ fn genesis_vid( VidCommonQueryData::new(leaf.block_header().clone(), Some(disperse.common)), VidShare::V0(disperse.shares.remove(0)), )) - } + }, VidCommitment::V1(commit) => { let avidm_param = init_avidm_param(GENESIS_VID_NUM_STORAGE_NODES)?; let weights = vec![1; GENESIS_VID_NUM_STORAGE_NODES]; @@ -208,7 +209,7 @@ fn genesis_vid( VidCommonQueryData::new(leaf.block_header().clone(), None), VidShare::V1(shares.remove(0)), )) - } + }, } } diff --git a/hotshot-query-service/src/error.rs b/hotshot-query-service/src/error.rs index 123ac11db9..30f5d7c529 100644 --- a/hotshot-query-service/src/error.rs +++ b/hotshot-query-service/src/error.rs @@ -10,13 +10,15 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use crate::{availability, explorer, merklized_state, node, status}; +use std::fmt::Display; + use derive_more::From; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::fmt::Display; use tide_disco::StatusCode; +use crate::{availability, explorer, merklized_state, node, status}; + #[derive(Clone, Debug, From, Snafu, Deserialize, Serialize)] pub enum Error { #[snafu(display("{source}"))] diff --git a/hotshot-query-service/src/explorer.rs b/hotshot-query-service/src/explorer.rs index ef259a83e5..714aecaef3 100644 --- a/hotshot-query-service/src/explorer.rs +++ b/hotshot-query-service/src/explorer.rs @@ -17,9 +17,7 @@ pub(crate) mod monetary_value; pub(crate) mod query_data; pub(crate) mod traits; -use self::errors::InvalidLimit; -use crate::availability::{QueryableHeader, QueryablePayload}; -use crate::{api::load_api, Header, Payload, Transaction}; +use std::{fmt::Display, num::NonZeroUsize, path::Path}; pub use currency::*; pub use data_source::*; @@ -28,14 +26,17 @@ use hotshot_types::traits::node_implementation::NodeType; pub use monetary_value::*; pub use query_data::*; use serde::{Deserialize, Serialize}; -use std::fmt::Display; -use std::num::NonZeroUsize; -use std::path::Path; -use tide_disco::StatusCode; -use tide_disco::{api::ApiError, method::ReadState, Api}; +use tide_disco::{api::ApiError, method::ReadState, Api, StatusCode}; pub use traits::*; use vbs::version::StaticVersionType; +use self::errors::InvalidLimit; +use crate::{ + api::load_api, + availability::{QueryableHeader, QueryablePayload}, + Header, Payload, Transaction, +}; + /// [Error] is an enum that represents the various errors that can be returned /// from the Explorer API. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -308,7 +309,7 @@ where ) { (Ok(Some(height)), Ok(Some(offset)), _) => { TransactionIdentifier::HeightAndOffset(height, offset) - } + }, (_, _, Ok(Some(hash))) => TransactionIdentifier::Hash(hash), _ => TransactionIdentifier::Latest, }, @@ -341,7 +342,7 @@ where ) { (Ok(Some(height)), Ok(Some(offset)), _) => { TransactionIdentifier::HeightAndOffset(height, offset) - } + }, (_, _, Ok(Some(hash))) => TransactionIdentifier::Hash(hash), _ => TransactionIdentifier::Latest, }; @@ -393,6 +394,13 @@ where #[cfg(test)] mod test { + use std::{cmp::min, time::Duration}; + + use futures::StreamExt; + use portpicker::pick_unused_port; + use surf_disco::Client; + use tide_disco::App; + use super::*; use crate::{ availability, @@ -403,11 +411,6 @@ mod test { }, ApiState, Error, }; - use futures::StreamExt; - use portpicker::pick_unused_port; - use std::{cmp::min, time::Duration}; - use surf_disco::Client; - use tide_disco::App; async fn validate(client: &Client) { let explorer_summary_response: ExplorerSummaryResponse = diff --git a/hotshot-query-service/src/explorer/currency.rs b/hotshot-query-service/src/explorer/currency.rs index 2e128d17e9..0fc3a03d1f 100644 --- a/hotshot-query-service/src/explorer/currency.rs +++ b/hotshot-query-service/src/explorer/currency.rs @@ -10,11 +10,12 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use super::errors::ExplorerAPIError; -use serde::ser::SerializeStruct; -use serde::{Deserialize, Serialize, Serializer}; use std::fmt::Display; +use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; + +use super::errors::ExplorerAPIError; + /// CurrencyMismatchError is an error that occurs when two different currencies /// are attempted to be combined in any way that would result in an invalid /// state. diff --git a/hotshot-query-service/src/explorer/data_source.rs b/hotshot-query-service/src/explorer/data_source.rs index aa5613e9b3..088a75f272 100644 --- a/hotshot-query-service/src/explorer/data_source.rs +++ b/hotshot-query-service/src/explorer/data_source.rs @@ -10,6 +10,10 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use async_trait::async_trait; +use hotshot_types::traits::node_implementation::NodeType; +use tagged_base64::TaggedBase64; + use super::{ query_data::{ BlockDetail, BlockIdentifier, BlockSummary, ExplorerSummary, GetBlockDetailError, @@ -24,9 +28,6 @@ use crate::{ availability::{QueryableHeader, QueryablePayload}, Header, Payload, Transaction, }; -use async_trait::async_trait; -use hotshot_types::traits::node_implementation::NodeType; -use tagged_base64::TaggedBase64; /// An interface for querying Data and Statistics from the HotShot Blockchain. /// diff --git a/hotshot-query-service/src/explorer/errors.rs b/hotshot-query-service/src/explorer/errors.rs index 90c6a0ac1d..eab824d8be 100644 --- a/hotshot-query-service/src/explorer/errors.rs +++ b/hotshot-query-service/src/explorer/errors.rs @@ -10,9 +10,9 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use serde::ser::SerializeStruct; -use serde::{Deserialize, Serialize, Serializer}; use std::fmt::{Debug, Display}; + +use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; use tide_disco::StatusCode; /// [ExplorerAPIError] is a trait that represents an error that can be returned @@ -417,7 +417,7 @@ mod test { let want = query_error; match &have.error { - crate::QueryError::NotFound => {} + crate::QueryError::NotFound => {}, _ => panic!("deserialized QueryError mismatch: have: {have}, want: {want}"), } } diff --git a/hotshot-query-service/src/explorer/monetary_value.rs b/hotshot-query-service/src/explorer/monetary_value.rs index 399e18e021..45354e59ee 100644 --- a/hotshot-query-service/src/explorer/monetary_value.rs +++ b/hotshot-query-service/src/explorer/monetary_value.rs @@ -10,15 +10,16 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use super::currency::{CurrencyCode, CurrencyMismatchError}; -use itertools::Itertools; -use serde::{Deserialize, Serialize, Serializer}; -use std::fmt::Display; use std::{ - fmt::Debug, + fmt::{Debug, Display}, ops::{Add, Sub}, }; +use itertools::Itertools; +use serde::{Deserialize, Serialize, Serializer}; + +use super::currency::{CurrencyCode, CurrencyMismatchError}; + #[derive(Debug, Clone, PartialEq, Eq)] /// [MonetaryValue]s is a struct that paris a [CurrencyCode] with a value. /// This structure is able to represent both positive and negative currencies. @@ -195,7 +196,7 @@ where return Err(E::custom( "no non-breaking space found in expected MonetaryValue", )) - } + }, }; let first: String = value.chars().take(index).collect(); @@ -244,7 +245,7 @@ fn determine_pre_and_post_decimal_strings(value: &str) -> (String, Option { panic!("{} failed to parse: {}", value, err); - } + }, Ok(result) => result, }; @@ -436,7 +437,7 @@ mod test { let result = match result { Err(err) => { panic!("{} failed to parse: {}", value, err); - } + }, Ok(result) => result, }; diff --git a/hotshot-query-service/src/explorer/query_data.rs b/hotshot-query-service/src/explorer/query_data.rs index 7b4f0b3de7..4bb05062b2 100644 --- a/hotshot-query-service/src/explorer/query_data.rs +++ b/hotshot-query-service/src/explorer/query_data.rs @@ -10,6 +10,17 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::{ + collections::VecDeque, + fmt::{Debug, Display}, + num::{NonZeroUsize, TryFromIntError}, +}; + +use hotshot_types::traits::node_implementation::NodeType; +use serde::{Deserialize, Serialize}; +use tide_disco::StatusCode; +use time::format_description::well_known::Rfc3339; + use super::{ errors::{BadQuery, ExplorerAPIError, InvalidLimit, NotFound, QueryError, Unimplemented}, monetary_value::MonetaryValue, @@ -17,18 +28,10 @@ use super::{ }; use crate::{ availability::{BlockQueryData, QueryableHeader, QueryablePayload, TransactionHash}, + node::BlockHash, + types::HeightIndexed, Header, Payload, Resolvable, Transaction, }; -use crate::{node::BlockHash, types::HeightIndexed}; -use hotshot_types::traits::node_implementation::NodeType; -use serde::{Deserialize, Serialize}; -use std::{ - collections::VecDeque, - fmt::{Debug, Display}, - num::{NonZeroUsize, TryFromIntError}, -}; -use tide_disco::StatusCode; -use time::format_description::well_known::Rfc3339; /// BlockIdentifier is an enum that represents multiple ways of referring to /// a specific Block. These use cases are specific to a Block Explorer and @@ -79,7 +82,7 @@ impl Display for TransactionIdentifier { TransactionIdentifier::Latest => write!(f, "latest"), TransactionIdentifier::HeightAndOffset(height, offset) => { write!(f, "{} {}", height, offset) - } + }, TransactionIdentifier::Hash(hash) => write!(f, "{}", hash), } } diff --git a/hotshot-query-service/src/explorer/traits.rs b/hotshot-query-service/src/explorer/traits.rs index 7b44e9a0ad..ebd59c53a4 100644 --- a/hotshot-query-service/src/explorer/traits.rs +++ b/hotshot-query-service/src/explorer/traits.rs @@ -10,9 +10,10 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::fmt::Debug; + use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; use serde::{de::DeserializeOwned, Serialize}; -use std::fmt::Debug; /// [ExplorerHeader] is a trait that represents certain extensions to the /// [BlockHeader] that are specific to the Block Explorer API. This trait diff --git a/hotshot-query-service/src/fetching.rs b/hotshot-query-service/src/fetching.rs index 427f482553..22ea7e8b6f 100644 --- a/hotshot-query-service/src/fetching.rs +++ b/hotshot-query-service/src/fetching.rs @@ -21,16 +21,16 @@ //! implementations of [`Provider`] for various data availability sources. //! -use async_lock::Mutex; -use async_lock::Semaphore; -use backoff::{backoff::Backoff, ExponentialBackoff}; -use derivative::Derivative; use std::{ collections::{hash_map::Entry, BTreeSet, HashMap}, fmt::Debug, sync::Arc, time::Duration, }; + +use async_lock::{Mutex, Semaphore}; +use backoff::{backoff::Backoff, ExponentialBackoff}; +use derivative::Derivative; use tokio::{spawn, time::sleep}; pub mod provider; @@ -122,12 +122,12 @@ impl Fetcher { e.get_mut().extend(callbacks); tracing::info!(?req, callbacks = ?e.get(), "resource is already being fetched"); return; - } + }, Entry::Vacant(e) => { // If the object is not being fetched, we will register our own callback and // then fetch it ourselves. e.insert(callbacks.into_iter().collect()); - } + }, } } diff --git a/hotshot-query-service/src/fetching/provider.rs b/hotshot-query-service/src/fetching/provider.rs index d4f5eb4d05..7a38b4b539 100644 --- a/hotshot-query-service/src/fetching/provider.rs +++ b/hotshot-query-service/src/fetching/provider.rs @@ -35,10 +35,12 @@ //! * [`TestProvider`] //! -use super::Request; -use async_trait::async_trait; use std::sync::Arc; +use async_trait::async_trait; + +use super::Request; + mod any; mod query_service; mod testing; diff --git a/hotshot-query-service/src/fetching/provider/any.rs b/hotshot-query-service/src/fetching/provider/any.rs index 0896abdce4..9a67cdf01b 100644 --- a/hotshot-query-service/src/fetching/provider/any.rs +++ b/hotshot-query-service/src/fetching/provider/any.rs @@ -10,6 +10,12 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::{fmt::Debug, sync::Arc}; + +use async_trait::async_trait; +use derivative::Derivative; +use hotshot_types::traits::node_implementation::NodeType; + use super::{Provider, Request}; use crate::{ availability::LeafQueryData, @@ -17,11 +23,6 @@ use crate::{ fetching::request::{LeafRequest, PayloadRequest, VidCommonRequest}, Payload, VidCommon, }; -use async_trait::async_trait; -use derivative::Derivative; -use hotshot_types::traits::node_implementation::NodeType; -use std::fmt::Debug; -use std::sync::Arc; /// Blanket trait combining [`Debug`] and [`Provider`]. /// @@ -191,7 +192,7 @@ where providers.len() ); continue; - } + }, } } @@ -201,6 +202,11 @@ where // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(test, not(target_os = "windows")))] mod test { + use futures::stream::StreamExt; + use portpicker::pick_unused_port; + use tide_disco::App; + use vbs::version::StaticVersionType; + use super::*; use crate::{ availability::{define_api, AvailabilityDataSource, UpdateAvailabilityData}, @@ -215,10 +221,6 @@ mod test { types::HeightIndexed, ApiState, Error, }; - use futures::stream::StreamExt; - use portpicker::pick_unused_port; - use tide_disco::App; - use vbs::version::StaticVersionType; type Provider = AnyProvider; diff --git a/hotshot-query-service/src/fetching/provider/query_service.rs b/hotshot-query-service/src/fetching/provider/query_service.rs index dbf09aa900..5efca08c99 100644 --- a/hotshot-query-service/src/fetching/provider/query_service.rs +++ b/hotshot-query-service/src/fetching/provider/query_service.rs @@ -10,14 +10,6 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use super::Provider; - -use crate::{ - availability::{LeafQueryData, PayloadQueryData, VidCommonQueryData}, - fetching::request::{LeafRequest, PayloadRequest, VidCommonRequest}, - types::HeightIndexed, - Error, Payload, VidCommon, -}; use async_trait::async_trait; use committable::Committable; use futures::try_join; @@ -30,6 +22,14 @@ use jf_vid::VidScheme; use surf_disco::{Client, Url}; use vbs::version::StaticVersionType; +use super::Provider; +use crate::{ + availability::{LeafQueryData, PayloadQueryData, VidCommonQueryData}, + fetching::request::{LeafRequest, PayloadRequest, VidCommonRequest}, + types::HeightIndexed, + Error, Payload, VidCommon, +}; + /// Data availability provider backed by another instance of this query service. /// /// This fetcher implements the [`Provider`] interface by querying the REST API provided by another @@ -79,7 +79,7 @@ where Err(err) => { tracing::error!(%err, "unable to compute VID commitment"); return None; - } + }, }, ); if commit != req.0 { @@ -91,11 +91,11 @@ where } Some(payload.data) - } + }, Err(err) => { tracing::error!("failed to fetch payload {req:?}: {err}"); None - } + }, } } } @@ -134,11 +134,11 @@ where leaf.leaf.unfill_block_payload(); Some(leaf) - } + }, Err(err) => { tracing::error!("failed to fetch leaf {req:?}: {err}"); None - } + }, } } } @@ -171,18 +171,18 @@ where tracing::error!(?req, ?res, "Expect VID common data but found None"); None } - } + }, VidCommitment::V1(_) => { if res.common.is_some() { tracing::warn!(?req, ?res, "Expect no VID common data but found some.") } None - } + }, }, Err(err) => { tracing::error!("failed to fetch VID common {req:?}: {err}"); None - } + }, } } } @@ -190,8 +190,20 @@ where // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(test, not(target_os = "windows")))] mod test { - use super::*; + use std::{future::IntoFuture, time::Duration}; + use committable::Committable; + use futures::{ + future::{join, FutureExt}, + stream::StreamExt, + }; + use generic_array::GenericArray; + use hotshot_example_types::node_types::TestVersions; + use portpicker::pick_unused_port; + use rand::RngCore; + use tide_disco::{error::ServerError, App}; + + use super::*; use crate::{ api::load_api, availability::{ @@ -219,17 +231,6 @@ mod test { types::HeightIndexed, ApiState, }; - use committable::Committable; - use futures::{ - future::{join, FutureExt}, - stream::StreamExt, - }; - use generic_array::GenericArray; - use hotshot_example_types::node_types::TestVersions; - use portpicker::pick_unused_port; - use rand::RngCore; - use std::{future::IntoFuture, time::Duration}; - use tide_disco::{error::ServerError, App}; type Provider = TestProvider>; @@ -1201,7 +1202,7 @@ mod test { .as_ref() .fail_begins_writable(FailableAction::Any) .await - } + }, FailureType::Write => data_source.as_ref().fail_writes(FailableAction::Any).await, FailureType::Commit => data_source.as_ref().fail_commits(FailableAction::Any).await, } @@ -1304,19 +1305,19 @@ mod test { .as_ref() .fail_one_begin_writable(FailableAction::Any) .await - } + }, FailureType::Write => { data_source .as_ref() .fail_one_write(FailableAction::Any) .await - } + }, FailureType::Commit => { data_source .as_ref() .fail_one_commit(FailableAction::Any) .await - } + }, } assert_eq!(leaves[0], data_source.get_leaf(1).await.await); @@ -1882,7 +1883,7 @@ mod test { for (leaf, payload) in leaves.iter().zip(payloads) { assert_eq!(payload.block_hash, leaf.block_hash()); } - } + }, MetadataType::Vid => { let vids = data_source.subscribe_vid_common_metadata(1).await.take(3); @@ -1895,7 +1896,7 @@ mod test { for (leaf, vid) in leaves.iter().zip(vids) { assert_eq!(vid.block_hash, leaf.block_hash()); } - } + }, } } diff --git a/hotshot-query-service/src/fetching/provider/testing.rs b/hotshot-query-service/src/fetching/provider/testing.rs index 22803d7d19..8ebcc904b2 100644 --- a/hotshot-query-service/src/fetching/provider/testing.rs +++ b/hotshot-query-service/src/fetching/provider/testing.rs @@ -12,19 +12,23 @@ #![cfg(any(test, feature = "testing"))] -use super::Provider; -use crate::fetching::Request; +use std::{ + fmt::Debug, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + use async_lock::RwLock; use async_trait::async_trait; use derivative::Derivative; use hotshot_types::traits::node_implementation::NodeType; -use std::sync::Arc; -use std::{ - fmt::Debug, - sync::atomic::{AtomicBool, Ordering}, -}; use tokio::sync::broadcast; +use super::Provider; +use crate::fetching::Request; + /// Adaptor to add test-only functionality to an existing [`Provider`]. /// /// [`TestProvider`] wraps an existing provider `P` and adds some additional functionality which can diff --git a/hotshot-query-service/src/fetching/request.rs b/hotshot-query-service/src/fetching/request.rs index be3d1de434..b04838d5f2 100644 --- a/hotshot-query-service/src/fetching/request.rs +++ b/hotshot-query-service/src/fetching/request.rs @@ -12,15 +12,15 @@ //! Requests for fetching resources. +use std::{fmt::Debug, hash::Hash}; + +use derive_more::{From, Into}; +use hotshot_types::{data::VidCommitment, traits::node_implementation::NodeType}; + use crate::{ availability::{LeafHash, LeafQueryData, QcHash}, Payload, }; -use derive_more::{From, Into}; -use hotshot_types::{data::VidCommitment, traits::node_implementation::NodeType}; - -use std::fmt::Debug; -use std::hash::Hash; /// A request for a resource. pub trait Request: Copy + Debug + Eq + Hash + Send { diff --git a/hotshot-query-service/src/lib.rs b/hotshot-query-service/src/lib.rs index 5fafb197dd..eabb0e129f 100644 --- a/hotshot-query-service/src/lib.rs +++ b/hotshot-query-service/src/lib.rs @@ -428,26 +428,25 @@ pub mod task; pub mod testing; pub mod types; -pub use error::Error; -pub use resolvable::Resolvable; +use std::sync::Arc; use async_trait::async_trait; use derive_more::{Deref, From, Into}; +pub use error::Error; use futures::{future::BoxFuture, stream::StreamExt}; use hotshot::types::SystemContextHandle; use hotshot_types::traits::{ node_implementation::{NodeImplementation, NodeType, Versions}, BlockPayload, }; +pub use hotshot_types::{data::Leaf2, simple_certificate::QuorumCertificate}; +pub use resolvable::Resolvable; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::sync::Arc; use task::BackgroundTask; use tide_disco::{method::ReadState, App, StatusCode}; use vbs::version::StaticVersionType; -pub use hotshot_types::{data::Leaf2, simple_certificate::QuorumCertificate}; - pub type VidCommon = Option; pub type Payload = ::BlockPayload; @@ -589,6 +588,23 @@ where #[cfg(test)] mod test { + use std::{ + ops::{Bound, RangeBounds}, + time::Duration, + }; + + use async_lock::RwLock; + use async_trait::async_trait; + use atomic_store::{load_store::BincodeLoadStore, AtomicStore, AtomicStoreLoader, RollingLog}; + use futures::future::FutureExt; + use hotshot_types::{data::VidShare, simple_certificate::QuorumCertificate2}; + use portpicker::pick_unused_port; + use surf_disco::Client; + use tempfile::TempDir; + use testing::mocks::MockBase; + use tide_disco::App; + use toml::toml; + use super::*; use crate::{ availability::{ @@ -604,19 +620,6 @@ mod test { mocks::{MockHeader, MockPayload, MockTypes}, }, }; - use async_lock::RwLock; - use async_trait::async_trait; - use atomic_store::{load_store::BincodeLoadStore, AtomicStore, AtomicStoreLoader, RollingLog}; - use futures::future::FutureExt; - use hotshot_types::{data::VidShare, simple_certificate::QuorumCertificate2}; - use portpicker::pick_unused_port; - use std::ops::{Bound, RangeBounds}; - use std::time::Duration; - use surf_disco::Client; - use tempfile::TempDir; - use testing::mocks::MockBase; - use tide_disco::App; - use toml::toml; struct CompositeState { store: AtomicStore, diff --git a/hotshot-query-service/src/merklized_state.rs b/hotshot-query-service/src/merklized_state.rs index 453a34151c..4cbc244270 100644 --- a/hotshot-query-service/src/merklized_state.rs +++ b/hotshot-query-service/src/merklized_state.rs @@ -15,14 +15,16 @@ //! The state API provides an interface for serving queries against arbitrarily old snapshots of the state. //! This allows a full Merkle tree to be reconstructed from storage. //! If any parent state is missing then the partial snapshot can not be queried. -use std::{fmt::Display, path::PathBuf}; +use std::{ + fmt::{Debug, Display}, + path::PathBuf, +}; use derive_more::From; use futures::FutureExt; use hotshot_types::traits::node_implementation::NodeType; use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; -use std::fmt::Debug; use tagged_base64::TaggedBase64; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, StatusCode}; use vbs::version::StaticVersionType; diff --git a/hotshot-query-service/src/merklized_state/data_source.rs b/hotshot-query-service/src/merklized_state/data_source.rs index 562071413b..f5b77238ad 100644 --- a/hotshot-query-service/src/merklized_state/data_source.rs +++ b/hotshot-query-service/src/merklized_state/data_source.rs @@ -16,22 +16,20 @@ //! and provides methods for querying and reconstructing the snapshot. //! +use std::{cmp::Ordering, fmt::Debug, str::FromStr}; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use async_trait::async_trait; use derivative::Derivative; use derive_more::Display; use hotshot_types::traits::node_implementation::NodeType; - use jf_merkle_tree::{ prelude::MerkleProof, DigestAlgorithm, Element, ForgetableMerkleTreeScheme, Index, MerkleCommitment, NodeValue, ToTraversalPath, }; use serde::{de::DeserializeOwned, Serialize}; -use std::{fmt::Debug, str::FromStr}; use tagged_base64::TaggedBase64; -use std::cmp::Ordering; - use crate::QueryResult; /// This trait defines methods that a data source should implement diff --git a/hotshot-query-service/src/metrics.rs b/hotshot-query-service/src/metrics.rs index 9216fb0a30..ceb365a15a 100644 --- a/hotshot-query-service/src/metrics.rs +++ b/hotshot-query-service/src/metrics.rs @@ -12,6 +12,11 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + use hotshot_types::traits::metrics; use itertools::Itertools; use prometheus::{ @@ -19,8 +24,6 @@ use prometheus::{ Encoder, HistogramVec, Opts, Registry, TextEncoder, }; use snafu::Snafu; -use std::collections::HashMap; -use std::sync::{Arc, RwLock}; #[derive(Debug, Snafu)] pub enum MetricsError { @@ -444,11 +447,12 @@ impl metrics::MetricsFamily<()> for TextFamily { #[cfg(test)] mod test { - use super::*; - use crate::testing::setup_test; use metrics::Metrics; use tide_disco::metrics::Metrics as _; + use super::*; + use crate::testing::setup_test; + #[test] fn test_prometheus_metrics() { setup_test(); diff --git a/hotshot-query-service/src/node.rs b/hotshot-query-service/src/node.rs index 6499cc3e36..7129d2f307 100644 --- a/hotshot-query-service/src/node.rs +++ b/hotshot-query-service/src/node.rs @@ -20,16 +20,18 @@ //! fully synced with the entire history of the chain. However, the node will _eventually_ sync and //! return the expected counts. -use crate::{api::load_api, QueryError}; +use std::{fmt::Display, ops::Bound, path::PathBuf}; + use derive_more::From; use futures::FutureExt; use hotshot_types::traits::node_implementation::NodeType; use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; -use std::{fmt::Display, ops::Bound, path::PathBuf}; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, StatusCode}; use vbs::version::StaticVersionType; +use crate::{api::load_api, QueryError}; + pub(crate) mod data_source; pub(crate) mod query_data; pub use data_source::*; @@ -201,17 +203,8 @@ where #[cfg(test)] mod test { - use super::*; - use crate::{ - data_source::ExtensibleDataSource, - task::BackgroundTask, - testing::{ - consensus::{MockDataSource, MockNetwork, MockSqlDataSource}, - mocks::{mock_transaction, MockBase, MockTypes}, - setup_test, - }, - ApiState, Error, Header, - }; + use std::time::Duration; + use async_lock::RwLock; use committable::Committable; use futures::{FutureExt, StreamExt}; @@ -224,13 +217,24 @@ mod test { }, }; use portpicker::pick_unused_port; - use std::time::Duration; use surf_disco::Client; use tempfile::TempDir; use tide_disco::{App, Error as _}; use tokio::time::sleep; use toml::toml; + use super::*; + use crate::{ + data_source::ExtensibleDataSource, + task::BackgroundTask, + testing::{ + consensus::{MockDataSource, MockNetwork, MockSqlDataSource}, + mocks::{mock_transaction, MockBase, MockTypes}, + setup_test, + }, + ApiState, Error, Header, + }; + #[tokio::test(flavor = "multi_thread")] async fn test_api() { setup_test(); diff --git a/hotshot-query-service/src/node/data_source.rs b/hotshot-query-service/src/node/data_source.rs index a256b209bc..3a0b124fef 100644 --- a/hotshot-query-service/src/node/data_source.rs +++ b/hotshot-query-service/src/node/data_source.rs @@ -24,13 +24,15 @@ //! updated implicitly via the [availability API update //! trait](crate::availability::UpdateAvailabilityData). -use super::query_data::{BlockHash, BlockId, SyncStatus, TimeWindowQueryData}; -use crate::{Header, QueryResult}; +use std::ops::RangeBounds; + use async_trait::async_trait; use derivative::Derivative; use derive_more::From; use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; -use std::ops::RangeBounds; + +use super::query_data::{BlockHash, BlockId, SyncStatus, TimeWindowQueryData}; +use crate::{Header, QueryResult}; #[derive(Derivative, From)] #[derivative(Copy(bound = ""), Debug(bound = ""))] diff --git a/hotshot-query-service/src/node/query_data.rs b/hotshot-query-service/src/node/query_data.rs index f1805bc1f0..eff49e6a33 100644 --- a/hotshot-query-service/src/node/query_data.rs +++ b/hotshot-query-service/src/node/query_data.rs @@ -10,11 +10,11 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use crate::types::HeightIndexed; use derivative::Derivative; use serde::{Deserialize, Serialize}; pub use crate::availability::{BlockHash, BlockId}; +use crate::types::HeightIndexed; #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] pub struct SyncStatus { diff --git a/hotshot-query-service/src/status.rs b/hotshot-query-service/src/status.rs index abe973f10a..2c2d4379eb 100644 --- a/hotshot-query-service/src/status.rs +++ b/hotshot-query-service/src/status.rs @@ -23,17 +23,17 @@ //! * snapshots of the state right now, with no way to query historical snapshots //! * summary statistics -use crate::api::load_api; +use std::{borrow::Cow, fmt::Display, path::PathBuf}; + use derive_more::From; use futures::FutureExt; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::borrow::Cow; -use std::fmt::Display; -use std::path::PathBuf; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, StatusCode}; use vbs::version::StaticVersionType; +use crate::api::load_api; + pub(crate) mod data_source; pub use data_source::*; @@ -107,6 +107,17 @@ where #[cfg(test)] mod test { + use std::{str::FromStr, time::Duration}; + + use async_lock::RwLock; + use futures::FutureExt; + use portpicker::pick_unused_port; + use reqwest::redirect::Policy; + use surf_disco::Client; + use tempfile::TempDir; + use tide_disco::{App, Url}; + use toml::toml; + use super::*; use crate::{ data_source::ExtensibleDataSource, @@ -118,16 +129,6 @@ mod test { }, ApiState, Error, }; - use async_lock::RwLock; - use futures::FutureExt; - use portpicker::pick_unused_port; - use reqwest::redirect::Policy; - use std::str::FromStr; - use std::time::Duration; - use surf_disco::Client; - use tempfile::TempDir; - use tide_disco::{App, Url}; - use toml::toml; #[tokio::test(flavor = "multi_thread")] async fn test_api() { diff --git a/hotshot-query-service/src/status/data_source.rs b/hotshot-query-service/src/status/data_source.rs index 59af95fd23..5857c8aaa0 100644 --- a/hotshot-query-service/src/status/data_source.rs +++ b/hotshot-query-service/src/status/data_source.rs @@ -10,15 +10,15 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . +use async_trait::async_trait; +use chrono::Utc; +use hotshot_types::traits::metrics::Metrics; + use crate::{ metrics::{MetricsError, PrometheusMetrics}, QueryError, QueryResult, }; -use async_trait::async_trait; -use chrono::Utc; -use hotshot_types::traits::metrics::Metrics; - pub trait HasMetrics { fn metrics(&self) -> &PrometheusMetrics; } diff --git a/hotshot-query-service/src/task.rs b/hotshot-query-service/src/task.rs index 5b816b6ae6..fe3f3e2f20 100644 --- a/hotshot-query-service/src/task.rs +++ b/hotshot-query-service/src/task.rs @@ -12,10 +12,10 @@ //! Async task utilities. +use std::{fmt::Display, sync::Arc}; + use derivative::Derivative; use futures::future::Future; -use std::fmt::Display; -use std::sync::Arc; use tokio::{ spawn, task::{JoinError, JoinHandle}, diff --git a/hotshot-query-service/src/testing/consensus.rs b/hotshot-query-service/src/testing/consensus.rs index 28236ad6bf..ce65187d9c 100644 --- a/hotshot-query-service/src/testing/consensus.rs +++ b/hotshot-query-service/src/testing/consensus.rs @@ -10,16 +10,8 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use super::mocks::{MockMembership, MockNodeImpl, MockTransaction, MockTypes, MockVersions}; -use crate::{ - availability::{AvailabilityDataSource, UpdateAvailabilityData}, - data_source::{FileSystemDataSource, SqlDataSource, VersionedDataSource}, - fetching::provider::NoFetching, - node::NodeDataSource, - status::{StatusDataSource, UpdateStatusData}, - task::BackgroundTask, - SignatureKey, -}; +use std::{fmt::Display, num::NonZeroUsize, str::FromStr, sync::Arc, time::Duration}; + use async_lock::RwLock; use async_trait::async_trait; use futures::{ @@ -44,10 +36,6 @@ use hotshot_types::{ traits::{election::Membership, network::Topic, signature_key::SignatureKey as _}, HotShotConfig, PeerConfig, }; -use std::num::NonZeroUsize; -use std::sync::Arc; -use std::time::Duration; -use std::{fmt::Display, str::FromStr}; use tokio::{ runtime::Handle, task::{block_in_place, yield_now}, @@ -55,6 +43,17 @@ use tokio::{ use tracing::{info_span, Instrument}; use url::Url; +use super::mocks::{MockMembership, MockNodeImpl, MockTransaction, MockTypes, MockVersions}; +use crate::{ + availability::{AvailabilityDataSource, UpdateAvailabilityData}, + data_source::{FileSystemDataSource, SqlDataSource, VersionedDataSource}, + fetching::provider::NoFetching, + node::NodeDataSource, + status::{StatusDataSource, UpdateStatusData}, + task::BackgroundTask, + SignatureKey, +}; + struct MockNode { hotshot: SystemContextHandle, data_source: D, diff --git a/hotshot-query-service/src/testing/mocks.rs b/hotshot-query-service/src/testing/mocks.rs index 5c271ca1ff..8883294f79 100644 --- a/hotshot-query-service/src/testing/mocks.rs +++ b/hotshot-query-service/src/testing/mocks.rs @@ -10,12 +10,8 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use crate::explorer::traits::{ExplorerHeader, ExplorerTransaction}; -use crate::merklized_state::MerklizedState; -use crate::{ - availability::{QueryableHeader, QueryablePayload}, - types::HeightIndexed, -}; +use std::ops::Range; + use hotshot::traits::{ election::static_committee::StaticCommittee, implementations::MemoryNetwork, NodeImplementation, }; @@ -25,22 +21,26 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; -use hotshot_types::traits::node_implementation::Versions; use hotshot_types::{ data::{QuorumProposal, ViewNumber}, signature_key::BLSPubKey, - traits::node_implementation::NodeType, + traits::node_implementation::{NodeType, Versions}, }; - use jf_merkle_tree::{ prelude::{MerkleProof, Sha3Digest, Sha3Node}, universal_merkle_tree::UniversalMerkleTree, ForgetableMerkleTreeScheme, ForgetableUniversalMerkleTreeScheme, }; use serde::{Deserialize, Serialize}; -use std::ops::Range; use vbs::version::StaticVersion; +use crate::{ + availability::{QueryableHeader, QueryablePayload}, + explorer::traits::{ExplorerHeader, ExplorerTransaction}, + merklized_state::MerklizedState, + types::HeightIndexed, +}; + pub type MockHeader = TestBlockHeader; pub type MockPayload = TestBlockPayload; pub type MockTransaction = TestTransaction; diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index a6804662be..f96cf53a58 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -100,7 +100,7 @@ impl StakeTableScheme for StakeTable { Some(index) => { let branches = to_merkle_path(*index, self.height); root.simple_lookup(self.height, &branches) - } + }, None => Err(StakeTableError::KeyNotFound), } } @@ -116,7 +116,7 @@ impl StakeTableScheme for StakeTable { Some(index) => { let branches = to_merkle_path(*index, self.height); root.lookup(self.height, &branches) - } + }, None => Err(StakeTableError::KeyNotFound), }?; let amount = *proof.value().ok_or(StakeTableError::KeyNotFound)?; @@ -149,7 +149,7 @@ impl StakeTableScheme for StakeTable { negative, )?; Ok(value) - } + }, None => Err(StakeTableError::KeyNotFound), } } @@ -221,7 +221,7 @@ impl StakeTable { value, )?; Ok(old_value) - } + }, None => Err(StakeTableError::KeyNotFound), } } diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index b08301ee30..d28233f2e7 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -155,10 +155,10 @@ impl MerkleProof { let comm = Digest::evaluate(input) .map_err(|_| StakeTableError::RescueError)?[0]; Ok(comm) - } + }, MerklePathEntry::Leaf { .. } => Err(StakeTableError::MalformedProof), }) - } + }, _ => Err(StakeTableError::MalformedProof), } } @@ -305,7 +305,7 @@ impl PersistentMerkleNode { siblings: siblings.try_into().unwrap(), }); Ok(proof) - } + }, PersistentMerkleNode::Leaf { comm: _, key, @@ -341,7 +341,7 @@ impl PersistentMerkleNode { ptr += 1; } children[ptr].key_by_stake(stake_number) - } + }, PersistentMerkleNode::Leaf { comm: _, key, @@ -441,7 +441,7 @@ impl PersistentMerkleNode { }), value, )) - } + }, PersistentMerkleNode::Leaf { comm: _, key: node_key, @@ -473,7 +473,7 @@ impl PersistentMerkleNode { } else { Err(StakeTableError::MismatchedKey) } - } + }, } } @@ -518,7 +518,7 @@ impl PersistentMerkleNode { old_value, )) } - } + }, PersistentMerkleNode::Leaf { comm: _, key: cur_key, @@ -541,7 +541,7 @@ impl PersistentMerkleNode { } else { Err(StakeTableError::MismatchedKey) } - } + }, } } } @@ -584,7 +584,7 @@ impl Iterator for IntoIter { // put the left-most child to the last, so it is visited first. self.unvisited.extend(children.into_iter().rev()); self.next() - } + }, PersistentMerkleNode::Leaf { comm: _, key, diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 6267dd21ca..0117e302d5 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -126,7 +126,7 @@ where self.head_total_stake -= self.head.stake_amount[*pos]; self.head.stake_amount[*pos] = U256::zero(); Ok(()) - } + }, None => Err(StakeTableError::KeyNotFound), } } @@ -306,7 +306,7 @@ where self.head_total_stake -= old_value; self.head_total_stake += value; Ok(old_value) - } + }, None => Err(StakeTableError::KeyNotFound), } } diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index 4415555142..a752088765 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -41,7 +41,7 @@ impl ToFields for QCVerKey { FieldType::from_le_bytes_mod_order(&bytes[31..62]), FieldType::from_le_bytes_mod_order(&bytes[62..]), ] - } + }, Err(_) => unreachable!(), } } diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index baf5c8af38..c45f517c9a 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -331,8 +331,10 @@ where #[cfg(test)] mod tests { use ark_ed_on_bn254::EdwardsConfig as Config; - use hotshot_types::light_client::LightClientState; - use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableScheme}; + use hotshot_types::{ + light_client::LightClientState, + traits::stake_table::{SnapshotVersion, StakeTableScheme}, + }; use jf_crhf::CRHF; use jf_relation::Circuit; use jf_rescue::crhf::VariableLengthRescueCRHF; diff --git a/hotshot-state-prover/src/service.rs b/hotshot-state-prover/src/service.rs index 60a31acc31..d03dfd51d1 100644 --- a/hotshot-state-prover/src/service.rs +++ b/hotshot-state-prover/src/service.rs @@ -9,13 +9,13 @@ use std::{ use anyhow::{anyhow, Context, Result}; use contract_bindings_ethers::light_client::{LightClient, LightClientErrors}; use displaydoc::Display; -use ethers::middleware::{ - gas_oracle::{GasCategory, GasOracle}, - signer::SignerMiddlewareError, -}; use ethers::{ core::k256::ecdsa::SigningKey, - middleware::SignerMiddleware, + middleware::{ + gas_oracle::{GasCategory, GasOracle}, + signer::SignerMiddlewareError, + SignerMiddleware, + }, providers::{Http, Middleware, Provider, ProviderError}, signers::{LocalWallet, Signer, Wallet}, types::{transaction::eip2718::TypedTransaction, Address, U256}, @@ -42,8 +42,7 @@ use jf_pcs::prelude::UnivariateUniversalParams; use jf_plonk::errors::PlonkError; use jf_relation::Circuit as _; use jf_signature::constants::CS_ID_SCHNORR; -use sequencer_utils::blocknative::BlockNative; -use sequencer_utils::deployer::is_proxy_contract; +use sequencer_utils::{blocknative::BlockNative, deployer::is_proxy_contract}; use serde::Deserialize; use surf_disco::Client; use tide_disco::{error::ServerError, Api}; @@ -155,12 +154,12 @@ async fn init_stake_table_from_sequencer( Err(e) => { tracing::error!("Failed to parse the network config: {e}"); sleep(Duration::from_secs(5)).await; - } + }, }, Err(e) => { tracing::error!("Failed to fetch the network config: {e}"); sleep(Duration::from_secs(5)).await; - } + }, } }; @@ -288,7 +287,7 @@ pub async fn read_contract_state( Err(e) => { tracing::error!("unable to read finalized_state from contract: {}", e); return Err(ProverError::ContractError(e.into())); - } + }, }; let st_state: ParsedStakeTableState = match contract.genesis_stake_table_state().call().await { Ok(s) => s.into(), @@ -298,7 +297,7 @@ pub async fn read_contract_state( e ); return Err(ProverError::ContractError(e.into())); - } + }, }; Ok((state.into(), st_state.into())) @@ -330,10 +329,10 @@ pub async fn submit_state_and_proof( priority_fee ); } - } + }, Err(e) => { tracing::warn!("!! BlockNative Price Oracle failed: {}", e); - } + }, } } diff --git a/hotshot-task-impls/src/builder.rs b/hotshot-task-impls/src/builder.rs index d40d041e6d..ca5acd10ef 100644 --- a/hotshot-task-impls/src/builder.rs +++ b/hotshot-task-impls/src/builder.rs @@ -43,10 +43,10 @@ impl From for BuilderClientError { match value { BuilderApiError::Request(source) | BuilderApiError::TxnUnpack(source) => { Self::Api(source.to_string()) - } + }, BuilderApiError::TxnSubmit(source) | BuilderApiError::BuilderAddress(source) => { Self::Api(source.to_string()) - } + }, BuilderApiError::Custom { message, .. } => Self::Api(message), BuilderApiError::BlockAvailable { source, .. } | BuilderApiError::BlockClaim { source, .. } => match source { diff --git a/hotshot-task-impls/src/consensus/mod.rs b/hotshot-task-impls/src/consensus/mod.rs index 6e303ee4fd..98560947f1 100644 --- a/hotshot-task-impls/src/consensus/mod.rs +++ b/hotshot-task-impls/src/consensus/mod.rs @@ -4,6 +4,8 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{sync::Arc, time::Instant}; + use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; use hotshot_task::task::TaskState; @@ -22,7 +24,6 @@ use hotshot_types::{ vote::HasViewNumber, }; use hotshot_utils::anytrace::*; -use std::{sync::Arc, time::Instant}; use tokio::task::JoinHandle; use tracing::instrument; @@ -120,14 +121,14 @@ impl, V: Versions> ConsensusTaskSt { tracing::debug!("Failed to handle QuorumVoteRecv event; error = {e}"); } - } + }, HotShotEvent::TimeoutVoteRecv(ref vote) => { if let Err(e) = handle_timeout_vote_recv(vote, Arc::clone(&event), &sender, self).await { tracing::debug!("Failed to handle TimeoutVoteRecv event; error = {e}"); } - } + }, HotShotEvent::ViewChange(new_view_number, epoch_number) => { if let Err(e) = handle_view_change(*new_view_number, *epoch_number, &sender, &receiver, self) @@ -136,12 +137,12 @@ impl, V: Versions> ConsensusTaskSt tracing::trace!("Failed to handle ViewChange event; error = {e}"); } self.view_start_time = Instant::now(); - } + }, HotShotEvent::Timeout(view_number, epoch) => { if let Err(e) = handle_timeout(*view_number, *epoch, &sender, self).await { tracing::debug!("Failed to handle Timeout event; error = {e}"); } - } + }, HotShotEvent::ExtendedQc2Formed(eqc) => { let cert_view = eqc.view_number(); let cert_block_number = self @@ -168,7 +169,7 @@ impl, V: Versions> ConsensusTaskSt &sender, ) .await; - } + }, HotShotEvent::ExtendedQcRecv(high_qc, next_epoch_high_qc, _) => { if !self .consensus @@ -217,8 +218,8 @@ impl, V: Versions> ConsensusTaskSt ) .await; } - } - _ => {} + }, + _ => {}, } Ok(()) diff --git a/hotshot-task-impls/src/da.rs b/hotshot-task-impls/src/da.rs index f8bf3add36..25fa5ec14d 100644 --- a/hotshot-task-impls/src/da.rs +++ b/hotshot-task-impls/src/da.rs @@ -10,10 +10,10 @@ use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; -use hotshot_types::epoch_membership::EpochMembershipCoordinator; use hotshot_types::{ consensus::{Consensus, OuterConsensus, PayloadWithMetadata}, data::{vid_commitment, DaProposal2, PackedBundle}, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_certificate::DaCertificate2, @@ -141,7 +141,7 @@ impl, V: Versions> DaTaskState { let cur_view = self.consensus.read().await.cur_view(); let view_number = proposal.data.view_number(); @@ -315,7 +315,7 @@ impl, V: Versions> DaTaskState { tracing::debug!("DA vote recv, Main Task {:?}", vote.view_number()); // Check if we are the leader and the vote is from the sender. @@ -348,7 +348,7 @@ impl, V: Versions> DaTaskState { if *epoch > self.cur_epoch { self.cur_epoch = *epoch; @@ -364,7 +364,7 @@ impl, V: Versions> DaTaskState { let PackedBundle:: { encoded_transactions, @@ -434,8 +434,8 @@ impl, V: Versions> DaTaskState {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/events.rs b/hotshot-task-impls/src/events.rs index a1e828f27a..c543265ef3 100644 --- a/hotshot-task-impls/src/events.rs +++ b/hotshot-task-impls/src/events.rs @@ -10,10 +10,9 @@ use async_broadcast::Sender; use either::Either; use hotshot_task::task::TaskEvent; use hotshot_types::{ - data::VidCommitment, data::{ DaProposal2, Leaf2, PackedBundle, QuorumProposal2, QuorumProposalWrapper, UpgradeProposal, - VidDisperse, VidDisperseShare, + VidCommitment, VidDisperse, VidDisperseShare, }, message::Proposal, request_response::ProposalRequestPayload, @@ -284,7 +283,7 @@ impl HotShotEvent { HotShotEvent::QuorumVoteRecv(v) => Some(v.view_number()), HotShotEvent::TimeoutVoteRecv(v) | HotShotEvent::TimeoutVoteSend(v) => { Some(v.view_number()) - } + }, HotShotEvent::QuorumProposalRecv(proposal, _) | HotShotEvent::QuorumProposalSend(proposal, _) | HotShotEvent::QuorumProposalValidated(proposal, _) @@ -292,16 +291,16 @@ impl HotShotEvent { | HotShotEvent::QuorumProposalResponseSend(_, proposal) | HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { Some(proposal.data.view_number()) - } + }, HotShotEvent::QuorumVoteSend(vote) | HotShotEvent::ExtendedQuorumVoteSend(vote) => { Some(vote.view_number()) - } + }, HotShotEvent::DaProposalRecv(proposal, _) | HotShotEvent::DaProposalValidated(proposal, _) | HotShotEvent::DaProposalSend(proposal, _) => Some(proposal.data.view_number()), HotShotEvent::DaVoteRecv(vote) | HotShotEvent::DaVoteSend(vote) => { Some(vote.view_number()) - } + }, HotShotEvent::QcFormed(cert) => match cert { either::Left(qc) => Some(qc.view_number()), either::Right(tc) => Some(tc.view_number()), @@ -327,41 +326,41 @@ impl HotShotEvent { | HotShotEvent::ViewSyncCommitCertificateSend(cert, _) => Some(cert.view_number()), HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) | HotShotEvent::ViewSyncFinalizeCertificateSend(cert, _) => Some(cert.view_number()), - HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _, _) => { + HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, ..) => { Some(*view_number) - } + }, HotShotEvent::BlockRecv(packed_bundle) => Some(packed_bundle.view_number), HotShotEvent::Shutdown - | HotShotEvent::TransactionSend(_, _) + | HotShotEvent::TransactionSend(..) | HotShotEvent::TransactionsRecv(_) => None, HotShotEvent::VidDisperseSend(proposal, _) => Some(proposal.data.view_number()), HotShotEvent::VidShareRecv(_, proposal) | HotShotEvent::VidShareValidated(proposal) => { Some(proposal.data.view_number()) - } + }, HotShotEvent::UpgradeProposalRecv(proposal, _) | HotShotEvent::UpgradeProposalSend(proposal, _) => Some(proposal.data.view_number()), HotShotEvent::UpgradeVoteRecv(vote) | HotShotEvent::UpgradeVoteSend(vote) => { Some(vote.view_number()) - } + }, HotShotEvent::QuorumProposalRequestSend(req, _) | HotShotEvent::QuorumProposalRequestRecv(req, _) => Some(req.view_number), HotShotEvent::ViewChange(view_number, _) - | HotShotEvent::ViewSyncTimeout(view_number, _, _) + | HotShotEvent::ViewSyncTimeout(view_number, ..) | HotShotEvent::ViewSyncTrigger(view_number) | HotShotEvent::Timeout(view_number, ..) => Some(*view_number), HotShotEvent::DaCertificateRecv(cert) | HotShotEvent::DacSend(cert, _) => { Some(cert.view_number()) - } + }, HotShotEvent::DaCertificateValidated(cert) => Some(cert.view_number), HotShotEvent::UpgradeCertificateFormed(cert) => Some(cert.view_number()), - HotShotEvent::VidRequestSend(request, _, _) + HotShotEvent::VidRequestSend(request, ..) | HotShotEvent::VidRequestRecv(request, _) => Some(request.view), HotShotEvent::VidResponseSend(_, _, proposal) | HotShotEvent::VidResponseRecv(_, proposal) => Some(proposal.data.view_number()), HotShotEvent::HighQcRecv(qc, _) | HotShotEvent::HighQcSend(qc, ..) - | HotShotEvent::ExtendedQcRecv(qc, _, _) - | HotShotEvent::ExtendedQcSend(qc, _, _) => Some(qc.view_number()), + | HotShotEvent::ExtendedQcRecv(qc, ..) + | HotShotEvent::ExtendedQcSend(qc, ..) => Some(qc.view_number()), } } } @@ -378,20 +377,20 @@ impl Display for HotShotEvent { ), HotShotEvent::QuorumVoteRecv(v) => { write!(f, "QuorumVoteRecv(view_number={:?})", v.view_number()) - } + }, HotShotEvent::ExtendedQuorumVoteSend(v) => { write!( f, "ExtendedQuorumVoteSend(view_number={:?})", v.view_number() ) - } + }, HotShotEvent::TimeoutVoteRecv(v) => { write!(f, "TimeoutVoteRecv(view_number={:?})", v.view_number()) - } + }, HotShotEvent::TimeoutVoteSend(v) => { write!(f, "TimeoutVoteSend(view_number={:?})", v.view_number()) - } + }, HotShotEvent::DaProposalRecv(proposal, _) => write!( f, "DaProposalRecv(view_number={:?})", @@ -404,10 +403,10 @@ impl Display for HotShotEvent { ), HotShotEvent::DaVoteRecv(vote) => { write!(f, "DaVoteRecv(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::DaCertificateRecv(cert) => { write!(f, "DaCertificateRecv(view_number={:?})", cert.view_number()) - } + }, HotShotEvent::DaCertificateValidated(cert) => write!( f, "DaCertificateValidated(view_number={:?})", @@ -420,7 +419,7 @@ impl Display for HotShotEvent { ), HotShotEvent::QuorumVoteSend(vote) => { write!(f, "QuorumVoteSend(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::QuorumProposalValidated(proposal, _) => write!( f, "QuorumProposalValidated(view_number={:?})", @@ -433,7 +432,7 @@ impl Display for HotShotEvent { ), HotShotEvent::DaVoteSend(vote) => { write!(f, "DaVoteSend(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::QcFormed(cert) => match cert { either::Left(qc) => write!(f, "QcFormed(view_number={:?})", qc.view_number()), either::Right(tc) => write!(f, "QcFormed(view_number={:?})", tc.view_number()), @@ -445,26 +444,26 @@ impl Display for HotShotEvent { HotShotEvent::NextEpochQc2Formed(cert) => match cert { either::Left(qc) => { write!(f, "NextEpochQc2Formed(view_number={:?})", qc.view_number()) - } + }, either::Right(tc) => { write!(f, "NextEpochQc2Formed(view_number={:?})", tc.view_number()) - } + }, }, HotShotEvent::ExtendedQc2Formed(cert) => { write!(f, "ExtendedQc2Formed(view_number={:?})", cert.view_number()) - } + }, HotShotEvent::DacSend(cert, _) => { write!(f, "DacSend(view_number={:?})", cert.view_number()) - } + }, HotShotEvent::ViewChange(view_number, epoch_number) => { write!( f, "ViewChange(view_number={view_number:?}, epoch_number={epoch_number:?})" ) - } - HotShotEvent::ViewSyncTimeout(view_number, _, _) => { + }, + HotShotEvent::ViewSyncTimeout(view_number, ..) => { write!(f, "ViewSyncTimeout(view_number={view_number:?})") - } + }, HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => write!( f, "ViewSyncPreCommitVoteRecv(view_number={:?})", @@ -501,59 +500,59 @@ impl Display for HotShotEvent { "ViewSyncPreCommitCertificateRecv(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncCommitCertificateRecv(cert) => { write!( f, "ViewSyncCommitCertificateRecv(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) => { write!( f, "ViewSyncFinalizeCertificateRecv(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncPreCommitCertificateSend(cert, _) => { write!( f, "ViewSyncPreCommitCertificateSend(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncCommitCertificateSend(cert, _) => { write!( f, "ViewSyncCommitCertificateSend(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncFinalizeCertificateSend(cert, _) => { write!( f, "ViewSyncFinalizeCertificateSend(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncTrigger(view_number) => { write!(f, "ViewSyncTrigger(view_number={view_number:?})") - } + }, HotShotEvent::Timeout(view_number, epoch) => { write!(f, "Timeout(view_number={view_number:?}, epoch={epoch:?})") - } + }, HotShotEvent::TransactionsRecv(_) => write!(f, "TransactionsRecv"), - HotShotEvent::TransactionSend(_, _) => write!(f, "TransactionSend"), - HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _, _) => { + HotShotEvent::TransactionSend(..) => write!(f, "TransactionSend"), + HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, ..) => { write!( f, "SendPayloadCommitmentAndMetadata(view_number={view_number:?})" ) - } + }, HotShotEvent::BlockRecv(packed_bundle) => { write!(f, "BlockRecv(view_number={:?})", packed_bundle.view_number) - } + }, HotShotEvent::VidDisperseSend(proposal, _) => write!( f, "VidDisperseSend(view_number={:?})", @@ -581,10 +580,10 @@ impl Display for HotShotEvent { ), HotShotEvent::UpgradeVoteRecv(vote) => { write!(f, "UpgradeVoteRecv(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::UpgradeVoteSend(vote) => { write!(f, "UpgradeVoteSend(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::UpgradeCertificateFormed(cert) => write!( f, "UpgradeCertificateFormed(view_number={:?})", @@ -592,63 +591,63 @@ impl Display for HotShotEvent { ), HotShotEvent::QuorumProposalRequestSend(view_number, _) => { write!(f, "QuorumProposalRequestSend(view_number={view_number:?})") - } + }, HotShotEvent::QuorumProposalRequestRecv(view_number, _) => { write!(f, "QuorumProposalRequestRecv(view_number={view_number:?})") - } + }, HotShotEvent::QuorumProposalResponseSend(_, proposal) => { write!( f, "QuorumProposalResponseSend(view_number={:?})", proposal.data.view_number() ) - } + }, HotShotEvent::QuorumProposalResponseRecv(proposal) => { write!( f, "QuorumProposalResponseRecv(view_number={:?})", proposal.data.view_number() ) - } + }, HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { write!( f, "QuorumProposalPreliminarilyValidated(view_number={:?}", proposal.data.view_number() ) - } - HotShotEvent::VidRequestSend(request, _, _) => { + }, + HotShotEvent::VidRequestSend(request, ..) => { write!(f, "VidRequestSend(view_number={:?}", request.view) - } + }, HotShotEvent::VidRequestRecv(request, _) => { write!(f, "VidRequestRecv(view_number={:?}", request.view) - } + }, HotShotEvent::VidResponseSend(_, _, proposal) => { write!( f, "VidResponseSend(view_number={:?}", proposal.data.view_number() ) - } + }, HotShotEvent::VidResponseRecv(_, proposal) => { write!( f, "VidResponseRecv(view_number={:?}", proposal.data.view_number() ) - } + }, HotShotEvent::HighQcRecv(qc, _) => { write!(f, "HighQcRecv(view_number={:?}", qc.view_number()) - } + }, HotShotEvent::HighQcSend(qc, ..) => { write!(f, "HighQcSend(view_number={:?}", qc.view_number()) - } + }, HotShotEvent::ExtendedQcRecv(qc, ..) => { write!(f, "ExtendedQcRecv(view_number={:?}", qc.view_number()) - } + }, HotShotEvent::ExtendedQcSend(qc, ..) => { write!(f, "ExtendedQcSend(view_number={:?}", qc.view_number()) - } + }, } } } diff --git a/hotshot-task-impls/src/helpers.rs b/hotshot-task-impls/src/helpers.rs index 5fc78869db..6884bc0169 100644 --- a/hotshot-task-impls/src/helpers.rs +++ b/hotshot-task-impls/src/helpers.rs @@ -4,6 +4,12 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::{Duration, Instant}, +}; + use async_broadcast::{Receiver, SendError, Sender}; use async_lock::RwLock; use committable::{Commitment, Committable}; @@ -23,6 +29,7 @@ use hotshot_types::{ election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, + storage::Storage, BlockPayload, ValidatedState, }, utils::{ @@ -33,11 +40,6 @@ use hotshot_types::{ StakeTableEntries, }; use hotshot_utils::anytrace::*; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - time::{Duration, Instant}, -}; use tokio::time::timeout; use tracing::instrument; @@ -180,10 +182,11 @@ pub(crate) async fn fetch_proposal( } /// Handles calling add_epoch_root and sync_l1 on Membership if necessary. -async fn decide_epoch_root( +async fn decide_epoch_root>( decided_leaf: &Leaf2, epoch_height: u64, membership: &Arc>, + storage: &Arc>, ) { let decided_block_number = decided_leaf.block_header().block_number(); @@ -192,6 +195,19 @@ async fn decide_epoch_root( let next_epoch_number = TYPES::Epoch::new(epoch_from_block_number(decided_block_number, epoch_height) + 2); + if let Err(e) = storage + .write() + .await + .add_epoch_root(next_epoch_number, decided_leaf.block_header().clone()) + .await + { + tracing::error!( + "Failed to store epoch root for epoch {:?}: {}", + next_epoch_number, + e + ); + } + let write_callback = { tracing::debug!("Calling add_epoch_root for epoch {:?}", next_epoch_number); let membership_reader = membership.read().await; @@ -251,13 +267,14 @@ impl Default for LeafChainTraversalOutcome { /// # Panics /// If the leaf chain contains no decided leaf while reaching a decided view, which should be /// impossible. -pub async fn decide_from_proposal_2( +pub async fn decide_from_proposal_2>( proposal: &QuorumProposalWrapper, consensus: OuterConsensus, existing_upgrade_cert: Arc>>>, public_key: &TYPES::SignatureKey, with_epochs: bool, membership: &Arc>, + storage: &Arc>, ) -> LeafChainTraversalOutcome { let mut res = LeafChainTraversalOutcome::default(); let consensus_reader = consensus.read().await; @@ -331,10 +348,14 @@ pub async fn decide_from_proposal_2( let epoch_height = consensus_reader.epoch_height; drop(consensus_reader); - if let Some(decided_leaf_info) = res.leaf_views.last() { - decide_epoch_root(&decided_leaf_info.leaf, epoch_height, membership).await; - } else { - tracing::info!("No decided leaf while a view has been decided."); + for decided_leaf_info in &res.leaf_views { + decide_epoch_root::( + &decided_leaf_info.leaf, + epoch_height, + membership, + storage, + ) + .await; } } @@ -372,13 +393,14 @@ pub async fn decide_from_proposal_2( /// # Panics /// If the leaf chain contains no decided leaf while reaching a decided view, which should be /// impossible. -pub async fn decide_from_proposal( +pub async fn decide_from_proposal, V: Versions>( proposal: &QuorumProposalWrapper, consensus: OuterConsensus, existing_upgrade_cert: Arc>>>, public_key: &TYPES::SignatureKey, with_epochs: bool, membership: &Arc>, + storage: &Arc>, ) -> LeafChainTraversalOutcome { let consensus_reader = consensus.read().await; let existing_upgrade_cert_reader = existing_upgrade_cert.read().await; @@ -489,10 +511,14 @@ pub async fn decide_from_proposal( drop(consensus_reader); if with_epochs && res.new_decided_view_number.is_some() { - if let Some(decided_leaf_info) = res.leaf_views.last() { - decide_epoch_root(&decided_leaf_info.leaf, epoch_height, membership).await; - } else { - tracing::info!("No decided leaf while a view has been decided."); + for decided_leaf_info in &res.leaf_views { + decide_epoch_root::( + &decided_leaf_info.leaf, + epoch_height, + membership, + storage, + ) + .await; } } @@ -789,7 +815,7 @@ pub(crate) async fn validate_proposal_view_and_certs< *view_number, e ) })?; - } + }, ViewChangeEvidence2::ViewSync(view_sync_cert) => { ensure!( view_sync_cert.view_number == view_number, @@ -813,7 +839,7 @@ pub(crate) async fn validate_proposal_view_and_certs< ) .await .context(|e| warn!("Invalid view sync finalize cert provided: {}", e))?; - } + }, } } @@ -846,13 +872,13 @@ pub async fn broadcast_event(event: E, sender: &Send "Event sender queue overflow, Oldest event removed form queue: {:?}", overflowed ); - } + }, Err(SendError(e)) => { tracing::warn!( "Event: {:?}\n Sending failed, event stream probably shutdown", e ); - } + }, } } diff --git a/hotshot-task-impls/src/network.rs b/hotshot-task-impls/src/network.rs index 8994a594fb..a7ebb87c0a 100644 --- a/hotshot-task-impls/src/network.rs +++ b/hotshot-task-impls/src/network.rs @@ -85,7 +85,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalRecv(convert_proposal(proposal), sender) - } + }, GeneralConsensusMessage::Proposal2(proposal) => { if !self .upgrade_lock @@ -96,10 +96,10 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalRecv(convert_proposal(proposal), sender) - } + }, GeneralConsensusMessage::ProposalRequested(req, sig) => { HotShotEvent::QuorumProposalRequestRecv(req, sig) - } + }, GeneralConsensusMessage::ProposalResponse(proposal) => { if self .upgrade_lock @@ -110,7 +110,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalResponseRecv(convert_proposal(proposal)) - } + }, GeneralConsensusMessage::ProposalResponse2(proposal) => { if !self .upgrade_lock @@ -121,21 +121,21 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalResponseRecv(convert_proposal(proposal)) - } + }, GeneralConsensusMessage::Vote(vote) => { if self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received GeneralConsensusMessage::Vote for view {} but epochs are enabled for that view", vote.view_number()); return; } HotShotEvent::QuorumVoteRecv(vote.to_vote2()) - } + }, GeneralConsensusMessage::Vote2(vote) => { if !self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received GeneralConsensusMessage::Vote2 for view {} but epochs are not enabled for that view", vote.view_number()); return; } HotShotEvent::QuorumVoteRecv(vote) - } + }, GeneralConsensusMessage::ViewSyncPreCommitVote(view_sync_message) => { if self .upgrade_lock @@ -146,7 +146,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message.to_vote2()) - } + }, GeneralConsensusMessage::ViewSyncPreCommitVote2(view_sync_message) => { if !self .upgrade_lock @@ -157,7 +157,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncPreCommitCertificate( view_sync_message, ) => { @@ -172,7 +172,7 @@ impl NetworkMessageTaskState { HotShotEvent::ViewSyncPreCommitCertificateRecv( view_sync_message.to_vsc2(), ) - } + }, GeneralConsensusMessage::ViewSyncPreCommitCertificate2( view_sync_message, ) => { @@ -185,7 +185,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncPreCommitCertificateRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncCommitVote(view_sync_message) => { if self .upgrade_lock @@ -196,7 +196,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message.to_vote2()) - } + }, GeneralConsensusMessage::ViewSyncCommitVote2(view_sync_message) => { if !self .upgrade_lock @@ -207,7 +207,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate(view_sync_message) => { if self .upgrade_lock @@ -218,7 +218,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message.to_vsc2()) - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate2(view_sync_message) => { if !self .upgrade_lock @@ -229,7 +229,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncFinalizeVote(view_sync_message) => { if self .upgrade_lock @@ -240,7 +240,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message.to_vote2()) - } + }, GeneralConsensusMessage::ViewSyncFinalizeVote2(view_sync_message) => { if !self .upgrade_lock @@ -251,7 +251,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate(view_sync_message) => { if self .upgrade_lock @@ -264,7 +264,7 @@ impl NetworkMessageTaskState { HotShotEvent::ViewSyncFinalizeCertificateRecv( view_sync_message.to_vsc2(), ) - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate2( view_sync_message, ) => { @@ -277,7 +277,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_message) - } + }, GeneralConsensusMessage::TimeoutVote(message) => { if self .upgrade_lock @@ -288,7 +288,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::TimeoutVoteRecv(message.to_vote2()) - } + }, GeneralConsensusMessage::TimeoutVote2(message) => { if !self .upgrade_lock @@ -299,18 +299,18 @@ impl NetworkMessageTaskState { return; } HotShotEvent::TimeoutVoteRecv(message) - } + }, GeneralConsensusMessage::UpgradeProposal(message) => { HotShotEvent::UpgradeProposalRecv(message, sender) - } + }, GeneralConsensusMessage::UpgradeVote(message) => { tracing::error!("Received upgrade vote!"); HotShotEvent::UpgradeVoteRecv(message) - } + }, GeneralConsensusMessage::HighQc(qc) => HotShotEvent::HighQcRecv(qc, sender), GeneralConsensusMessage::ExtendedQc(qc, next_epoch_qc) => { HotShotEvent::ExtendedQcRecv(qc, next_epoch_qc, sender) - } + }, }, SequencingMessage::Da(da_message) => match da_message { DaConsensusMessage::DaProposal(proposal) => { @@ -323,7 +323,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::DaProposalRecv(convert_proposal(proposal), sender) - } + }, DaConsensusMessage::DaProposal2(proposal) => { if !self .upgrade_lock @@ -334,35 +334,35 @@ impl NetworkMessageTaskState { return; } HotShotEvent::DaProposalRecv(proposal, sender) - } + }, DaConsensusMessage::DaVote(vote) => { if self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received DaConsensusMessage::DaVote for view {} but epochs are enabled for that view", vote.view_number()); return; } HotShotEvent::DaVoteRecv(vote.clone().to_vote2()) - } + }, DaConsensusMessage::DaVote2(vote) => { if !self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received DaConsensusMessage::DaVote2 for view {} but epochs are not enabled for that view", vote.view_number()); return; } HotShotEvent::DaVoteRecv(vote.clone()) - } + }, DaConsensusMessage::DaCertificate(cert) => { if self.upgrade_lock.epochs_enabled(cert.view_number()).await { tracing::warn!("received DaConsensusMessage::DaCertificate for view {} but epochs are enabled for that view", cert.view_number()); return; } HotShotEvent::DaCertificateRecv(cert.to_dac2()) - } + }, DaConsensusMessage::DaCertificate2(cert) => { if !self.upgrade_lock.epochs_enabled(cert.view_number()).await { tracing::warn!("received DaConsensusMessage::DaCertificate2 for view {} but epochs are not enabled for that view", cert.view_number()); return; } HotShotEvent::DaCertificateRecv(cert) - } + }, DaConsensusMessage::VidDisperseMsg(proposal) => { if self .upgrade_lock @@ -373,7 +373,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::VidShareRecv(sender, convert_proposal(proposal)) - } + }, DaConsensusMessage::VidDisperseMsg2(proposal) => { if !self .upgrade_lock @@ -384,11 +384,11 @@ impl NetworkMessageTaskState { return; } HotShotEvent::VidShareRecv(sender, convert_proposal(proposal)) - } + }, }, }; broadcast_event(Arc::new(event), &self.internal_event_stream).await; - } + }, // Handle data messages MessageKind::Data(message) => match message { @@ -403,7 +403,7 @@ impl NetworkMessageTaskState { &self.internal_event_stream, ) .await; - } + }, DataMessage::DataResponse(response) => { if let ResponseMessage::Found(message) = response { match message { @@ -416,7 +416,7 @@ impl NetworkMessageTaskState { &self.internal_event_stream, ) .await; - } + }, SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg2( proposal, )) => { @@ -428,11 +428,11 @@ impl NetworkMessageTaskState { &self.internal_event_stream, ) .await; - } - _ => {} + }, + _ => {}, } } - } + }, DataMessage::RequestData(data) => { let req_data = data.clone(); if let RequestKind::Vid(_view_number, _key) = req_data.request { @@ -442,7 +442,7 @@ impl NetworkMessageTaskState { ) .await; } - } + }, }, // Handle external messages @@ -459,7 +459,7 @@ impl NetworkMessageTaskState { &self.external_event_stream, ) .await; - } + }, } } } @@ -607,7 +607,7 @@ impl< Err(e) => { tracing::error!("Failed to serialize message: {}", e); continue; - } + }, }; messages.insert(recipient, serialized_message); @@ -630,7 +630,7 @@ impl< return; } match net.vid_broadcast_message(messages).await { - Ok(()) => {} + Ok(()) => {}, Err(e) => tracing::warn!("Failed to send message from network task: {:?}", e), } }); @@ -665,7 +665,7 @@ impl< Err(e) => { tracing::warn!("Not Sending {:?} because of storage error: {:?}", action, e); Err(()) - } + }, } } else { Ok(()) @@ -718,7 +718,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, // ED Each network task is subscribed to all these message types. Need filters per network task HotShotEvent::QuorumVoteSend(vote) => { @@ -740,7 +740,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { @@ -754,7 +754,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ExtendedQuorumVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { @@ -768,7 +768,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Broadcast)) - } + }, HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( req.key.clone(), MessageKind::::from_consensus_message(SequencingMessage::General( @@ -796,11 +796,11 @@ impl< message, TransmitType::Direct(sender_key), )) - } + }, HotShotEvent::VidDisperseSend(proposal, sender) => { self.handle_vid_disperse_proposal(proposal, &sender).await; None - } + }, HotShotEvent::DaProposalSend(proposal, sender) => { *maybe_action = Some(HotShotAction::DaPropose); @@ -819,7 +819,7 @@ impl< }; Some((sender, message, TransmitType::DaCommitteeBroadcast)) - } + }, HotShotEvent::DaVoteSend(vote) => { *maybe_action = Some(HotShotAction::DaVote); let view_number = vote.view_number(); @@ -839,7 +839,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -853,7 +853,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::DacSend(certificate, sender) => { *maybe_action = Some(HotShotAction::DaCert); let message = if self @@ -871,7 +871,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::ViewSyncPreCommitVoteSend(vote) => { let view_number = vote.view_number() + vote.date().relay; let leader = match self @@ -890,7 +890,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -903,7 +903,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ViewSyncCommitVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; @@ -923,7 +923,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -936,7 +936,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ViewSyncFinalizeVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; @@ -956,7 +956,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -969,7 +969,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -983,7 +983,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::ViewSyncCommitCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -997,7 +997,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::ViewSyncFinalizeCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -1011,7 +1011,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::TimeoutVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; @@ -1031,7 +1031,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -1044,7 +1044,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( @@ -1071,7 +1071,7 @@ impl< e ); return None; - } + }, }; Some(( vote.signing_key(), @@ -1080,7 +1080,7 @@ impl< )), TransmitType::Direct(leader), )) - } + }, HotShotEvent::ViewChange(view, epoch) => { self.view = view; if epoch > self.epoch { @@ -1096,7 +1096,7 @@ impl< .await; }); None - } + }, HotShotEvent::VidRequestSend(req, sender, to) => Some(( sender, MessageKind::Data(DataMessage::RequestData(req)), @@ -1126,7 +1126,7 @@ impl< vid_share_proposal, )), ))) - } + }, VidDisperseShare::V1(data) => { if !epochs_enabled { tracing::warn!( @@ -1145,10 +1145,10 @@ impl< vid_share_proposal, )), ))) - } + }, }; Some((sender, message, TransmitType::Direct(to))) - } + }, HotShotEvent::HighQcSend(quorum_cert, leader, sender) => Some(( sender, MessageKind::Consensus(SequencingMessage::General( @@ -1234,18 +1234,18 @@ impl< Err(e) => { tracing::error!("Failed to serialize message: {}", e); return; - } + }, }; let transmit_result = match transmit { TransmitType::Direct(recipient) => { network.direct_message(serialized_message, recipient).await - } + }, TransmitType::Broadcast => { network .broadcast_message(serialized_message, committee_topic, broadcast_delay) .await - } + }, TransmitType::DaCommitteeBroadcast => { network .da_broadcast_message( @@ -1254,11 +1254,11 @@ impl< broadcast_delay, ) .await - } + }, }; match transmit_result { - Ok(()) => {} + Ok(()) => {}, Err(e) => tracing::warn!("Failed to send message task: {:?}", e), } }); diff --git a/hotshot-task-impls/src/quorum_proposal/handlers.rs b/hotshot-task-impls/src/quorum_proposal/handlers.rs index 2203370d3d..16f50edec8 100644 --- a/hotshot-task-impls/src/quorum_proposal/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal/handlers.rs @@ -13,11 +13,6 @@ use std::{ time::{Duration, Instant}, }; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, parent_leaf_and_state, wait_for_next_epoch_qc}, - quorum_proposal::{QuorumProposalTaskState, UpgradeLock, Versions}, -}; use anyhow::{ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; @@ -42,6 +37,12 @@ use hotshot_utils::anytrace::*; use tracing::instrument; use vbs::version::StaticVersionType; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, parent_leaf_and_state, wait_for_next_epoch_qc}, + quorum_proposal::{QuorumProposalTaskState, UpgradeLock, Versions}, +}; + /// Proposal dependency types. These types represent events that precipitate a proposal. #[derive(PartialEq, Debug)] pub(crate) enum ProposalDependency { @@ -431,22 +432,22 @@ impl HandleDepOutput for ProposalDependencyHandle< block_view: *view, auction_result: auction_result.clone(), }); - } + }, HotShotEvent::Qc2Formed(cert) => match cert { either::Right(timeout) => { timeout_certificate = Some(timeout.clone()); - } + }, either::Left(qc) => { parent_qc = Some(qc.clone()); - } + }, }, HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) => { view_sync_finalize_cert = Some(cert.clone()); - } + }, HotShotEvent::VidDisperseSend(share, _) => { vid_share = Some(share.clone()); - } - _ => {} + }, + _ => {}, } } diff --git a/hotshot-task-impls/src/quorum_proposal/mod.rs b/hotshot-task-impls/src/quorum_proposal/mod.rs index 88c09de09e..1b933f8374 100644 --- a/hotshot-task-impls/src/quorum_proposal/mod.rs +++ b/hotshot-task-impls/src/quorum_proposal/mod.rs @@ -15,7 +15,6 @@ use hotshot_task::{ dependency_task::DependencyTask, task::TaskState, }; -use hotshot_types::StakeTableEntries; use hotshot_types::{ consensus::OuterConsensus, epoch_membership::EpochMembershipCoordinator, @@ -28,14 +27,14 @@ use hotshot_types::{ }, utils::EpochTransitionIndicator, vote::{Certificate, HasViewNumber}, + StakeTableEntries, }; use hotshot_utils::anytrace::*; use tokio::task::JoinHandle; use tracing::instrument; use self::handlers::{ProposalDependency, ProposalDependencyHandle}; -use crate::events::HotShotEvent; -use crate::quorum_proposal::handlers::handle_eqc_formed; +use crate::{events::HotShotEvent, quorum_proposal::handlers::handle_eqc_formed}; mod handlers; @@ -115,14 +114,14 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::TimeoutCert => { if let HotShotEvent::Qc2Formed(either::Right(timeout)) = event { timeout.view_number() + 1 } else { return false; } - } + }, ProposalDependency::ViewSyncCert => { if let HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_cert) = event { @@ -130,7 +129,7 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::Proposal => { if let HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) = event { @@ -138,7 +137,7 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::PayloadAndMetadata => { if let HotShotEvent::SendPayloadCommitmentAndMetadata( _payload_commitment, @@ -153,14 +152,14 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::VidShare => { if let HotShotEvent::VidDisperseSend(vid_disperse, _) = event { vid_disperse.data.view_number() } else { return false; } - } + }, }; let valid = event_view == view_number; if valid { @@ -219,25 +218,25 @@ impl, V: Versions> match event.as_ref() { HotShotEvent::SendPayloadCommitmentAndMetadata(..) => { payload_commitment_dependency.mark_as_completed(Arc::clone(&event)); - } + }, HotShotEvent::QuorumProposalPreliminarilyValidated(..) => { proposal_dependency.mark_as_completed(event); - } + }, HotShotEvent::Qc2Formed(quorum_certificate) => match quorum_certificate { Either::Right(_) => { timeout_dependency.mark_as_completed(event); - } + }, Either::Left(_) => { qc_dependency.mark_as_completed(event); - } + }, }, HotShotEvent::ViewSyncFinalizeCertificateRecv(_) => { view_sync_dependency.mark_as_completed(event); - } - HotShotEvent::VidDisperseSend(_, _) => { + }, + HotShotEvent::VidDisperseSend(..) => { vid_share_dependency.mark_as_completed(event); - } - _ => {} + }, + _ => {}, }; // We have three cases to consider: @@ -410,7 +409,7 @@ impl, V: Versions> self.formed_upgrade_certificate = Some(cert.clone()); } - } + }, HotShotEvent::Qc2Formed(cert) => match cert.clone() { either::Right(timeout_cert) => { let view_number = timeout_cert.view_number + 1; @@ -423,7 +422,7 @@ impl, V: Versions> epoch_transition_indicator, ) .await?; - } + }, either::Left(qc) => { // Only update if the qc is from a newer view if qc.view_number <= self.consensus.read().await.high_qc().view_number { @@ -462,7 +461,7 @@ impl, V: Versions> epoch_transition_indicator, ) .await?; - } + }, }, HotShotEvent::SendPayloadCommitmentAndMetadata( _payload_commitment, @@ -483,7 +482,7 @@ impl, V: Versions> EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { let epoch_number = certificate.data.epoch; let epoch_membership = self @@ -521,7 +520,7 @@ impl, V: Versions> EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { let view_number = proposal.data.view_number(); // All nodes get the latest proposed view as a proxy of `cur_view` of old. @@ -538,7 +537,7 @@ impl, V: Versions> epoch_transition_indicator, ) .await?; - } + }, HotShotEvent::QuorumProposalSend(proposal, _) => { let view = proposal.data.view_number(); @@ -546,7 +545,7 @@ impl, V: Versions> self.update_latest_proposed_view(view).await, "Failed to update latest proposed view" ); - } + }, HotShotEvent::VidDisperseSend(vid_disperse, _) => { let view_number = vid_disperse.data.view_number(); self.create_dependency_task_if_new( @@ -558,18 +557,18 @@ impl, V: Versions> EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::ViewChange(view, epoch) => { if epoch > &self.cur_epoch { self.cur_epoch = *epoch; } let keep_view = TYPES::View::new(view.saturating_sub(1)); self.cancel_tasks(keep_view); - } + }, HotShotEvent::Timeout(view, ..) => { let keep_view = TYPES::View::new(view.saturating_sub(1)); self.cancel_tasks(keep_view); - } + }, HotShotEvent::HighQcSend(qc, ..) | HotShotEvent::ExtendedQcSend(qc, ..) => { ensure!(qc.view_number() > self.highest_qc.view_number()); let cert_epoch_number = qc.data.epoch; @@ -590,7 +589,7 @@ impl, V: Versions> .context(|e| warn!("Quorum certificate {:?} was invalid: {}", qc.data(), e))?; self.highest_qc = qc.clone(); - } + }, HotShotEvent::NextEpochQc2Formed(Either::Left(next_epoch_qc)) => { // Only update if the qc is from a newer view let current_next_epoch_qc = @@ -624,8 +623,8 @@ impl, V: Versions> &event_sender, ) .await; - } - _ => {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs b/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs index f965bbfada..a5dc3a8428 100644 --- a/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs @@ -221,7 +221,7 @@ pub(crate) async fn handle_quorum_proposal_recv< } else { bail!("Parent state not found! Consensus internally inconsistent"); } - } + }, None => None, }; diff --git a/hotshot-task-impls/src/quorum_proposal_recv/mod.rs b/hotshot-task-impls/src/quorum_proposal_recv/mod.rs index b56130f3b3..3238252fc4 100644 --- a/hotshot-task-impls/src/quorum_proposal_recv/mod.rs +++ b/hotshot-task-impls/src/quorum_proposal_recv/mod.rs @@ -22,8 +22,8 @@ use hotshot_types::{ message::UpgradeLock, simple_certificate::UpgradeCertificate, simple_vote::HasEpoch, - traits::block_contents::BlockHeader, traits::{ + block_contents::BlockHeader, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, @@ -180,10 +180,10 @@ impl, V: Versions> ) .await { - Ok(()) => {} + Ok(()) => {}, Err(e) => error!(?e, "Failed to validate the proposal"), } - } + }, HotShotEvent::ViewChange(view, epoch) => { if *epoch > self.cur_epoch { self.cur_epoch = *epoch; @@ -198,8 +198,8 @@ impl, V: Versions> // to enter view V + 1. let oldest_view_to_keep = TYPES::View::new(view.saturating_sub(1)); self.cancel_tasks(oldest_view_to_keep); - } - _ => {} + }, + _ => {}, } } } diff --git a/hotshot-task-impls/src/quorum_vote/handlers.rs b/hotshot-task-impls/src/quorum_vote/handlers.rs index 19a926a0fd..eb47f7c3e4 100644 --- a/hotshot-task-impls/src/quorum_vote/handlers.rs +++ b/hotshot-task-impls/src/quorum_vote/handlers.rs @@ -10,12 +10,11 @@ use async_broadcast::{InactiveReceiver, Sender}; use async_lock::RwLock; use chrono::Utc; use committable::Committable; -use hotshot_types::epoch_membership::EpochMembership; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposalWrapper, VidDisperseShare}, drb::{compute_drb_result, DrbResult, INITIAL_DRB_RESULT}, - epoch_membership::EpochMembershipCoordinator, + epoch_membership::{EpochMembership, EpochMembershipCoordinator}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_vote::{HasEpoch, QuorumData2, QuorumVote2}, @@ -48,11 +47,25 @@ use crate::{ quorum_vote::Versions, }; -async fn notify_membership_of_drb_result( +async fn handle_drb_result>( membership: &EpochMembership, + storage: &Arc>, drb_result: DrbResult, ) { tracing::debug!("Calling add_drb_result for epoch {:?}", membership.epoch()); + + // membership.epoch should always be Some + if let Some(epoch) = membership.epoch() { + if let Err(e) = storage + .write() + .await + .add_drb_result(epoch, drb_result) + .await + { + tracing::error!("Failed to store drb result for epoch {:?}: {}", epoch, e); + } + } + membership.add_drb_result(drb_result).await; } @@ -101,17 +114,18 @@ async fn store_and_get_computed_drb_result< .insert(epoch_number, result); drop(consensus_writer); - notify_membership_of_drb_result::( + handle_drb_result::( &task_state .membership .membership_for_epoch(Some(epoch_number)) .await?, + &task_state.storage, result, ) .await; task_state.drb_computation = None; Ok(result) - } + }, Err(e) => Err(warn!("Error in DRB calculation: {:?}.", e)), } } @@ -221,12 +235,17 @@ async fn start_drb_task, V: Versio .drb_seeds_and_results .results .insert(*task_epoch, result); - notify_membership_of_drb_result::(&epoch_membership, result).await; + handle_drb_result::( + &epoch_membership, + &task_state.storage, + result, + ) + .await; task_state.drb_computation = None; - } + }, Err(e) => { tracing::error!("error joining DRB computation task: {e:?}"); - } + }, } } else if *task_epoch == new_epoch_number { return; @@ -335,11 +354,12 @@ async fn store_drb_seed_and_result .drb_seeds_and_results .results .insert(current_epoch_number + 1, result); - notify_membership_of_drb_result::( + handle_drb_result::( &task_state .membership .membership_for_epoch(Some(current_epoch_number + 1)) .await?, + &task_state.storage, result, ) .await; @@ -379,23 +399,25 @@ pub(crate) async fn handle_quorum_proposal_validated< included_txns, decided_upgrade_cert, } = if version >= V::Epochs::VERSION { - decide_from_proposal_2( + decide_from_proposal_2::( proposal, OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), &task_state.public_key, version >= V::Epochs::VERSION, task_state.membership.membership(), + &task_state.storage, ) .await } else { - decide_from_proposal::( + decide_from_proposal::( proposal, OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), &task_state.public_key, version >= V::Epochs::VERSION, task_state.membership.membership(), + &task_state.storage, ) .await }; @@ -583,10 +605,10 @@ pub(crate) async fn update_shared_state< Some((leaf, view)) => { maybe_validated_view = Some(view); Some(leaf) - } + }, None => None, } - } + }, }; let parent = maybe_parent.context(info!( diff --git a/hotshot-task-impls/src/quorum_vote/mod.rs b/hotshot-task-impls/src/quorum_vote/mod.rs index e3d7af2771..e27b3b7cd1 100644 --- a/hotshot-task-impls/src/quorum_vote/mod.rs +++ b/hotshot-task-impls/src/quorum_vote/mod.rs @@ -15,7 +15,6 @@ use hotshot_task::{ dependency_task::{DependencyTask, HandleDepOutput}, task::TaskState, }; -use hotshot_types::StakeTableEntries; use hotshot_types::{ consensus::{ConsensusMetricsValue, OuterConsensus}, data::{Leaf2, QuorumProposalWrapper}, @@ -33,6 +32,7 @@ use hotshot_types::{ }, utils::{epoch_from_block_number, option_epoch_from_block_number}, vote::{Certificate, HasViewNumber}, + StakeTableEntries, }; use hotshot_utils::anytrace::*; use tokio::task::JoinHandle; @@ -123,7 +123,7 @@ impl + 'static, V: Versions> Handl Err(e) => { tracing::error!("{e:#}"); return; - } + }, }; let proposal_payload_comm = proposal.data.block_header().payload_commitment(); let parent_commitment = parent_leaf.commit(); @@ -165,7 +165,7 @@ impl + 'static, V: Versions> Handl } leaf = Some(proposed_leaf); parent_view_number = Some(parent_leaf.view_number()); - } + }, HotShotEvent::DaCertificateValidated(cert) => { let cert_payload_comm = &cert.data().payload_commit; let next_epoch_cert_payload_comm = cert.data().next_epoch_payload_commit; @@ -187,7 +187,7 @@ impl + 'static, V: Versions> Handl } else { next_epoch_payload_commitment = next_epoch_cert_payload_comm; } - } + }, HotShotEvent::VidShareValidated(share) => { let vid_payload_commitment = &share.data.payload_commitment(); vid_share = Some(share.clone()); @@ -211,8 +211,8 @@ impl + 'static, V: Versions> Handl } else { payload_commitment = Some(*vid_payload_commitment); } - } - _ => {} + }, + _ => {}, } } @@ -269,7 +269,7 @@ impl + 'static, V: Versions> Handl Err(e) => { tracing::warn!("{:?}", e); return; - } + }, }; tracing::trace!( @@ -380,21 +380,21 @@ impl, V: Versions> QuorumVoteTaskS } else { return false; } - } + }, VoteDependency::Dac => { if let HotShotEvent::DaCertificateValidated(cert) = event { cert.view_number } else { return false; } - } + }, VoteDependency::Vid => { if let HotShotEvent::VidShareValidated(disperse) = event { disperse.data.view_number() } else { return false; } - } + }, }; if event_view == view_number { tracing::trace!( @@ -552,7 +552,7 @@ impl, V: Versions> QuorumVoteTaskS Arc::clone(&event), ); } - } + }, HotShotEvent::DaCertificateRecv(cert) => { let view = cert.view_number; @@ -595,7 +595,7 @@ impl, V: Versions> QuorumVoteTaskS &event_sender, Arc::clone(&event), ); - } + }, HotShotEvent::VidShareRecv(sender, share) => { let view = share.data.view_number(); // Do nothing if the VID share is old @@ -659,7 +659,7 @@ impl, V: Versions> QuorumVoteTaskS &event_sender, Arc::clone(&event), ); - } + }, HotShotEvent::Timeout(view, ..) => { let view = TYPES::View::new(view.saturating_sub(1)); // cancel old tasks @@ -668,7 +668,7 @@ impl, V: Versions> QuorumVoteTaskS task.abort(); } self.vote_dependencies = current_tasks; - } + }, HotShotEvent::ViewChange(mut view, _) => { view = TYPES::View::new(view.saturating_sub(1)); if !self.update_latest_voted_view(view).await { @@ -680,8 +680,8 @@ impl, V: Versions> QuorumVoteTaskS task.abort(); } self.vote_dependencies = current_tasks; - } - _ => {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/request.rs b/hotshot-task-impls/src/request.rs index 1bf327a98f..c0320105a8 100644 --- a/hotshot-task-impls/src/request.rs +++ b/hotshot-task-impls/src/request.rs @@ -147,14 +147,14 @@ impl> TaskState for NetworkRequest .await; } Ok(()) - } + }, HotShotEvent::ViewChange(view, _) => { let view = *view; if view > self.view { self.view = view; } Ok(()) - } + }, _ => Ok(()), } } @@ -226,7 +226,7 @@ impl> NetworkRequestState { tracing::warn!(e.message); return; - } + }, }; let mut da_committee_for_view = membership_reader.da_committee_members(view).await; if let Ok(leader) = membership_reader.leader(view).await { diff --git a/hotshot-task-impls/src/response.rs b/hotshot-task-impls/src/response.rs index 1ea66cc667..4f15a3a343 100644 --- a/hotshot-task-impls/src/response.rs +++ b/hotshot-task-impls/src/response.rs @@ -111,7 +111,7 @@ impl NetworkResponseState { ) .await; } - } + }, HotShotEvent::QuorumProposalRequestRecv(req, signature) => { // Make sure that this request came from who we think it did if !req.key.validate(signature, req.commit().as_ref()) { @@ -137,16 +137,16 @@ impl NetworkResponseState { ) .await; } - } + }, HotShotEvent::Shutdown => { return; - } - _ => {} + }, + _ => {}, } - } + }, Err(e) => { tracing::error!("Failed to receive event. {:?}", e); - } + }, } } } diff --git a/hotshot-task-impls/src/rewind.rs b/hotshot-task-impls/src/rewind.rs index 82e267dfb3..d4a9bcb58e 100644 --- a/hotshot-task-impls/src/rewind.rs +++ b/hotshot-task-impls/src/rewind.rs @@ -58,7 +58,7 @@ impl TaskState for RewindTaskState { Err(e) => { tracing::error!("Failed to write file {}; error = {}", filename, e); return; - } + }, }; for (event_number, event) in self.events.iter().enumerate() { diff --git a/hotshot-task-impls/src/transactions.rs b/hotshot-task-impls/src/transactions.rs index 06a65f8b63..c3ad41e5f0 100644 --- a/hotshot-task-impls/src/transactions.rs +++ b/hotshot-task-impls/src/transactions.rs @@ -16,8 +16,7 @@ use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, - data::VidCommitment, - data::{null_block, PackedBundle}, + data::{null_block, PackedBundle, VidCommitment}, epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, message::UpgradeLock, @@ -134,7 +133,7 @@ impl, V: Versions> TransactionTask Err(e) => { tracing::error!("Failed to calculate version: {:?}", e); return None; - } + }, }; if version < V::Marketplace::VERSION { @@ -159,7 +158,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); return None; - } + }, }; // Request a block from the builder unless we are between versions. @@ -303,11 +302,11 @@ impl, V: Versions> TransactionTask Ok(Err(e)) => { tracing::debug!("Failed to retrieve bundle: {e}"); continue; - } + }, Err(e) => { tracing::debug!("Failed to retrieve bundle: {e}"); continue; - } + }, } } @@ -384,7 +383,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); return None; - } + }, }; let packed_bundle = match self @@ -410,7 +409,7 @@ impl, V: Versions> TransactionTask .add(1); null_block - } + }, }; broadcast_event( @@ -458,7 +457,7 @@ impl, V: Versions> TransactionTask &self.output_event_stream, ) .await; - } + }, HotShotEvent::ViewChange(view, epoch) => { let view = TYPES::View::new(std::cmp::max(1, **view)); let epoch = if self.upgrade_lock.epochs_enabled(view).await { @@ -491,8 +490,8 @@ impl, V: Versions> TransactionTask self.handle_view_change(&event_stream, view, epoch).await; return Ok(()); } - } - _ => {} + }, + _ => {}, } Ok(()) } @@ -513,7 +512,7 @@ impl, V: Versions> TransactionTask // We still have time, will re-try in a bit sleep(RETRY_DELAY).await; continue; - } + }, } } } @@ -547,13 +546,13 @@ impl, V: Versions> TransactionTask let leaf = consensus_reader.saved_leaves().get(leaf_commitment).context (info!("Missing leaf with commitment {leaf_commitment} for view {target_view} in saved_leaves"))?; return Ok((target_view, leaf.payload_commitment())); - } + }, ViewInner::Failed => { // For failed views, backtrack target_view = TYPES::View::new(target_view.checked_sub(1).context(warn!("Reached genesis. Something is wrong -- have we not decided any blocks since genesis?"))?); continue; - } + }, } } } @@ -571,7 +570,7 @@ impl, V: Versions> TransactionTask Err(e) => { tracing::warn!("Failed to find last vid commitment in time: {e}"); return None; - } + }, }; let parent_comm_sig = match <::SignatureKey as SignatureKey>::sign( @@ -582,7 +581,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!(%err, "Failed to sign block hash"); return None; - } + }, }; while task_start_time.elapsed() < self.builder_timeout { @@ -596,7 +595,7 @@ impl, V: Versions> TransactionTask // We got a block Ok(Ok(block)) => { return Some(block); - } + }, // We failed to get a block Ok(Err(err)) => { @@ -604,13 +603,13 @@ impl, V: Versions> TransactionTask // pause a bit sleep(RETRY_DELAY).await; continue; - } + }, // We timed out while getting available blocks Err(err) => { tracing::info!(%err, "Timeout while getting available blocks"); return None; - } + }, } } @@ -675,7 +674,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::warn!(%err,"Error getting available blocks"); None - } + }, }) .flatten() .collect::>() @@ -735,7 +734,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!(%err, "Failed to sign block hash"); continue; - } + }, }; let response = { @@ -751,7 +750,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::warn!(%err, "Error claiming block data"); continue; - } + }, }; let header_input = match header_input { @@ -759,7 +758,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::warn!(%err, "Error claiming header input"); continue; - } + }, }; // verify the signature over the message diff --git a/hotshot-task-impls/src/upgrade.rs b/hotshot-task-impls/src/upgrade.rs index 4eaffb5dd8..d45917076e 100644 --- a/hotshot-task-impls/src/upgrade.rs +++ b/hotshot-task-impls/src/upgrade.rs @@ -217,7 +217,7 @@ impl UpgradeTaskState { tracing::debug!("Sending upgrade vote {:?}", vote.view_number()); broadcast_event(Arc::new(HotShotEvent::UpgradeVoteSend(vote)), &tx).await; - } + }, HotShotEvent::UpgradeVoteRecv(ref vote) => { tracing::debug!("Upgrade vote recv, Main Task {:?}", vote.view_number()); @@ -248,7 +248,7 @@ impl UpgradeTaskState { EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::ViewChange(new_view, epoch_number) => { if *epoch_number > self.cur_epoch { self.cur_epoch = *epoch_number; @@ -328,8 +328,8 @@ impl UpgradeTaskState { ) .await; } - } - _ => {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/vid.rs b/hotshot-task-impls/src/vid.rs index 8f98e018a7..0767288ac9 100644 --- a/hotshot-task-impls/src/vid.rs +++ b/hotshot-task-impls/src/vid.rs @@ -160,7 +160,7 @@ impl, V: Versions> VidTaskState { if *epoch > self.cur_epoch { @@ -178,7 +178,7 @@ impl, V: Versions> VidTaskState { let proposed_block_number = proposal.data.block_header().block_number(); @@ -243,11 +243,11 @@ impl, V: Versions> VidTaskState { return Some(HotShotTaskCompleted); - } - _ => {} + }, + _ => {}, } None } diff --git a/hotshot-task-impls/src/view_sync.rs b/hotshot-task-impls/src/view_sync.rs index 8dcc296cfe..a87ed3aac7 100644 --- a/hotshot-task-impls/src/view_sync.rs +++ b/hotshot-task-impls/src/view_sync.rs @@ -233,7 +233,7 @@ impl ViewSyncTaskState { Err(e) => { tracing::warn!(e.message); return; - } + }, }; // We do not have a replica task already running, so start one @@ -278,25 +278,25 @@ impl ViewSyncTaskState { let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; - } + }, HotShotEvent::ViewSyncCommitCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; - } - HotShotEvent::ViewSyncTimeout(view, _, _) => { + }, + HotShotEvent::ViewSyncTimeout(view, ..) => { tracing::debug!("view sync timeout in main task {:?}", view); let view = *view; self.send_to_or_create_replica(event, view, &event_stream) .await; - } + }, HotShotEvent::ViewSyncPreCommitVoteRecv(ref vote) => { let mut map = self.pre_commit_relay_map.write().await; @@ -344,7 +344,7 @@ impl ViewSyncTaskState { .await?; relay_map.insert(relay, vote_collector); - } + }, HotShotEvent::ViewSyncCommitVoteRecv(ref vote) => { let mut map = self.commit_relay_map.write().await; @@ -392,7 +392,7 @@ impl ViewSyncTaskState { ) .await?; relay_map.insert(relay, vote_collector); - } + }, HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { let mut map = self.finalize_relay_map.write().await; @@ -441,7 +441,7 @@ impl ViewSyncTaskState { if let Ok(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } - } + }, &HotShotEvent::ViewChange(new_view, epoch) => { if epoch > self.cur_epoch { @@ -483,7 +483,7 @@ impl ViewSyncTaskState { self.last_garbage_collected_view = self.cur_view - 1; } - } + }, &HotShotEvent::Timeout(view_number, ..) => { // This is an old timeout and we can ignore it ensure!( @@ -528,9 +528,9 @@ impl ViewSyncTaskState { ) .await; } - } + }, - _ => {} + _ => {}, } Ok(()) } @@ -634,7 +634,7 @@ impl ViewSyncReplicaTaskState { .await; } })); - } + }, HotShotEvent::ViewSyncCommitCertificateRecv(certificate) => { let last_seen_certificate = ViewSyncPhase::Commit; @@ -741,7 +741,7 @@ impl ViewSyncReplicaTaskState { .await; } })); - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { // Ignore certificate if it is for an older round @@ -796,7 +796,7 @@ impl ViewSyncReplicaTaskState { ) .await; return Some(HotShotTaskCompleted); - } + }, HotShotEvent::ViewSyncTrigger(view_number) => { let view_number = *view_number; @@ -850,7 +850,7 @@ impl ViewSyncReplicaTaskState { })); return None; - } + }, HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { let round = *round; @@ -884,11 +884,11 @@ impl ViewSyncReplicaTaskState { &event_stream, ) .await; - } + }, ViewSyncPhase::Finalize => { // This should never occur unimplemented!() - } + }, } self.timeout_task = Some(spawn({ @@ -917,7 +917,7 @@ impl ViewSyncReplicaTaskState { return None; } - } + }, _ => return None, } None diff --git a/hotshot-task-impls/src/vote_collection.rs b/hotshot-task-impls/src/vote_collection.rs index eb8a4cd0aa..9dc3ebc477 100644 --- a/hotshot-task-impls/src/vote_collection.rs +++ b/hotshot-task-impls/src/vote_collection.rs @@ -138,7 +138,7 @@ impl< self.accumulator = None; Ok(Some(cert)) - } + }, } } } @@ -279,7 +279,7 @@ where entry.insert(collector); Ok(()) - } + }, Entry::Occupied(mut entry) => { // handle the vote, and garbage collect if the vote collector is finished if entry @@ -293,7 +293,7 @@ where } Ok(()) - } + }, } } @@ -517,7 +517,7 @@ impl HotShotEvent::QuorumVoteRecv(vote) => { // #3967 REVIEW NOTE: Should we error if self.epoch is None? self.accumulate_vote(&vote.clone().into(), sender).await - } + }, _ => Ok(None), } } @@ -599,7 +599,7 @@ impl match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { self.accumulate_vote(vote, sender).await - } + }, _ => Ok(None), } } @@ -641,7 +641,7 @@ impl match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { self.accumulate_vote(vote, sender).await - } + }, _ => Ok(None), } } diff --git a/hotshot-task/src/dependency.rs b/hotshot-task/src/dependency.rs index 7b3d7dfa0b..d4a0e2eed8 100644 --- a/hotshot-task/src/dependency.rs +++ b/hotshot-task/src/dependency.rs @@ -163,13 +163,13 @@ impl Dependency for EventDependency { if (self.match_fn)(&event) { return Some(event); } - } + }, Err(RecvError::Overflowed(n)) => { tracing::error!("Dependency Task overloaded, skipping {} events", n); - } + }, Err(RecvError::Closed) => { return None; - } + }, } } } diff --git a/hotshot-task/src/task.rs b/hotshot-task/src/task.rs index 170d7dbc02..c0e4ec5b07 100644 --- a/hotshot-task/src/task.rs +++ b/hotshot-task/src/task.rs @@ -86,13 +86,13 @@ impl Task { S::handle_event(&mut self.state, input, &self.sender, &self.receiver) .await .inspect_err(|e| tracing::debug!("{e}")); - } + }, Err(RecvError::Closed) => { break self.boxed_state(); - } + }, Err(e) => { tracing::error!("Failed to receive from event stream Error: {}", e); - } + }, } } }) diff --git a/hotshot-testing/src/block_builder/mod.rs b/hotshot-testing/src/block_builder/mod.rs index 103b59c14d..534e6167ef 100644 --- a/hotshot-testing/src/block_builder/mod.rs +++ b/hotshot-testing/src/block_builder/mod.rs @@ -109,13 +109,13 @@ pub fn run_builder_source( match event { BuilderChange::Up if handle.is_none() => { handle = Some(start_builder(url.clone(), source.clone())); - } + }, BuilderChange::Down => { if let Some(handle) = handle.take() { handle.abort(); } - } - _ => {} + }, + _ => {}, } } }); @@ -153,13 +153,13 @@ pub fn run_builder_source_0_1( match event { BuilderChange::Up if handle.is_none() => { handle = Some(start_builder(url.clone(), source.clone())); - } + }, BuilderChange::Down => { if let Some(handle) = handle.take() { handle.abort(); } - } - _ => {} + }, + _ => {}, } } }); diff --git a/hotshot-testing/src/block_builder/random.rs b/hotshot-testing/src/block_builder/random.rs index 7a8723b22f..ec7d6129e2 100644 --- a/hotshot-testing/src/block_builder/random.rs +++ b/hotshot-testing/src/block_builder/random.rs @@ -178,7 +178,7 @@ where match stream.next().await { None => { break; - } + }, Some(evt) => { if let EventType::ViewFinished { view_number } = evt.event { if let Some(change) = self.changes.remove(&view_number) { @@ -192,18 +192,18 @@ where self.blocks.clone(), ))) } - } + }, BuilderChange::Down => { if let Some(handle) = task.take() { handle.abort(); } - } - BuilderChange::FailClaims(_) => {} + }, + BuilderChange::FailClaims(_) => {}, } let _ = self.change_sender.broadcast(change).await; } } - } + }, } } }); diff --git a/hotshot-testing/src/block_builder/simple.rs b/hotshot-testing/src/block_builder/simple.rs index bc098033a8..c29cf8666e 100644 --- a/hotshot-testing/src/block_builder/simple.rs +++ b/hotshot-testing/src/block_builder/simple.rs @@ -382,7 +382,7 @@ impl BuilderTask for SimpleBuilderTask { match stream.next().await { None => { break; - } + }, Some(evt) => match evt.event { EventType::ViewFinished { view_number } => { if let Some(change) = self.changes.remove(&view_number) { @@ -392,14 +392,14 @@ impl BuilderTask for SimpleBuilderTask { should_build_blocks = false; self.transactions.write().await.clear(); self.blocks.write().await.clear(); - } + }, BuilderChange::FailClaims(value) => { self.should_fail_claims.store(value, Ordering::Relaxed); - } + }, } let _ = self.change_sender.broadcast(change).await; } - } + }, EventType::Decide { leaf_chain, .. } if should_build_blocks => { let mut queue = self.transactions.write().await; for leaf_info in leaf_chain.iter() { @@ -413,7 +413,7 @@ impl BuilderTask for SimpleBuilderTask { } } self.blocks.write().await.clear(); - } + }, EventType::DaProposal { proposal, .. } if should_build_blocks => { let payload = TYPES::BlockPayload::from_bytes( &proposal.data.encoded_transactions, @@ -429,7 +429,7 @@ impl BuilderTask for SimpleBuilderTask { txn.claimed = Some(now); } } - } + }, EventType::Transactions { transactions } if should_build_blocks => { let mut queue = self.transactions.write().await; for transaction in transactions { @@ -443,8 +443,8 @@ impl BuilderTask for SimpleBuilderTask { ); } } - } - _ => {} + }, + _ => {}, }, } } diff --git a/hotshot-testing/src/byzantine/byzantine_behaviour.rs b/hotshot-testing/src/byzantine/byzantine_behaviour.rs index 7816b3e354..d5c6647fa9 100644 --- a/hotshot-testing/src/byzantine/byzantine_behaviour.rs +++ b/hotshot-testing/src/byzantine/byzantine_behaviour.rs @@ -67,7 +67,7 @@ impl, V: Versions> EventTransforme consensus.write().await.reset_actions(); result - } + }, _ => vec![event.clone()], } } @@ -94,9 +94,9 @@ impl, V: Versions> EventTransforme _consensus: Arc>>, ) -> Vec> { match event { - HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::QuorumVoteSend(_) => { + HotShotEvent::QuorumProposalSend(..) | HotShotEvent::QuorumVoteSend(_) => { vec![event.clone(), event.clone()] - } + }, _ => vec![event.clone()], } } @@ -182,11 +182,11 @@ impl + std::fmt::Debug, V: Version self.handle_proposal_send_event(event, proposal, sender) .await, ]; - } + }, HotShotEvent::QuorumProposalValidated(proposal, _) => { self.validated_proposals.push(proposal.data.clone()); - } - _ => {} + }, + _ => {}, } vec![event.clone()] } @@ -412,7 +412,7 @@ impl + std::fmt::Debug, V: Version .unwrap(); return vec![HotShotEvent::QuorumVoteSend(vote)]; } - } + }, HotShotEvent::TimeoutVoteSend(vote) => { // Check if this view was a dishonest proposal view, if true dont send timeout let dishonest_proposals = self.dishonest_proposal_view_numbers.read().await; @@ -421,11 +421,11 @@ impl + std::fmt::Debug, V: Version // So, dont send the timeout to the next leader from this byzantine replica return vec![]; } - } + }, HotShotEvent::QuorumVoteSend(vote) => { self.votes_sent.push(vote.clone()); - } - _ => {} + }, + _ => {}, } vec![event.clone()] } diff --git a/hotshot-testing/src/consistency_task.rs b/hotshot-testing/src/consistency_task.rs index 8813725db9..9caac9c975 100644 --- a/hotshot-testing/src/consistency_task.rs +++ b/hotshot-testing/src/consistency_task.rs @@ -44,15 +44,15 @@ fn sanitize_node_map( reduced.dedup(); match reduced.len() { - 0 => {} + 0 => {}, 1 => { result.insert(*view, reduced[0].clone()); - } + }, _ => { bail!( "We have received inconsistent leaves for view {view:?}. Leaves:\n\n{leaves:?}" ); - } + }, } } @@ -300,12 +300,12 @@ impl, V: Versions> ConsistencyTas match result { Ok(TestProgress::Finished) => { let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; - } + }, Err(e) => { self.add_error(e); let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; - } - Ok(TestProgress::Incomplete) => {} + }, + Ok(TestProgress::Incomplete) => {}, } } diff --git a/hotshot-testing/src/helpers.rs b/hotshot-testing/src/helpers.rs index 7e331a9d65..c28cc276b3 100644 --- a/hotshot-testing/src/helpers.rs +++ b/hotshot-testing/src/helpers.rs @@ -5,6 +5,8 @@ // along with the HotShot repository. If not, see . #![allow(clippy::panic)] +use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; + use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use bitvec::bitvec; @@ -22,7 +24,6 @@ use hotshot_example_types::{ storage_types::TestStorage, }; use hotshot_task_impls::events::HotShotEvent; -use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ consensus::ConsensusMetricsValue, data::{vid_commitment, Leaf2, VidCommitment, VidDisperse, VidDisperseShare}, @@ -33,7 +34,7 @@ use hotshot_types::{ simple_vote::{DaData2, DaVote2, SimpleVote, VersionedVoteData}, traits::{ election::Membership, - node_implementation::{NodeType, Versions}, + node_implementation::{ConsensusTime, NodeType, Versions}, EncodeBytes, }, utils::{option_epoch_from_block_number, View, ViewInner}, @@ -42,7 +43,6 @@ use hotshot_types::{ }; use primitive_types::U256; use serde::Serialize; -use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; use vbs::version::Version; use crate::{test_builder::TestDescription, test_launcher::TestLauncher}; diff --git a/hotshot-testing/src/predicates/event.rs b/hotshot-testing/src/predicates/event.rs index 6ee78fb2f8..4e1139f09a 100644 --- a/hotshot-testing/src/predicates/event.rs +++ b/hotshot-testing/src/predicates/event.rs @@ -217,7 +217,7 @@ where QuorumProposalSend(proposal, _) => { Some(proposal.data.block_header().payload_commitment()) == null_block::commitment::(num_storage_nodes) - } + }, _ => false, }); Box::new(EventPredicate { check, info }) diff --git a/hotshot-testing/src/script.rs b/hotshot-testing/src/script.rs index b25e286f9f..29a853c6ba 100644 --- a/hotshot-testing/src/script.rs +++ b/hotshot-testing/src/script.rs @@ -121,6 +121,6 @@ pub async fn validate_output_or_panic_in_script( "Stage {} | Output in {} failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", stage_number, script_name, assert, output ) - } + }, } } diff --git a/hotshot-testing/src/spinning_task.rs b/hotshot-testing/src/spinning_task.rs index c13fe23d6b..ac92dc2287 100644 --- a/hotshot-testing/src/spinning_task.rs +++ b/hotshot-testing/src/spinning_task.rs @@ -199,10 +199,10 @@ where marketplace_config, ) .await - } + }, LateNodeContext::Restart => { panic!("Cannot spin up a node with Restart context") - } + }, }; let handle = context.run_tasks().await; @@ -219,13 +219,13 @@ where self.handles.write().await.push(node); } - } + }, NodeAction::Down => { if let Some(node) = self.handles.write().await.get_mut(idx) { tracing::error!("Node {} shutting down", idx); node.handle.shut_down().await; } - } + }, NodeAction::RestartDown(delay_views) => { let node_id = idx.try_into().unwrap(); if let Some(node) = self.handles.write().await.get_mut(idx) { @@ -327,25 +327,25 @@ where self.restart_contexts.insert(idx, new_ctx); } } - } + }, NodeAction::RestartUp => { if let Some(ctx) = self.restart_contexts.remove(&idx) { new_nodes.push((ctx.context, idx)); new_networks.push(ctx.network.clone()); } - } + }, NodeAction::NetworkUp => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks resuming", idx); handle.network.resume(); } - } + }, NodeAction::NetworkDown => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks pausing", idx); handle.network.pause(); } - } + }, } } } diff --git a/hotshot-testing/src/test_builder.rs b/hotshot-testing/src/test_builder.rs index 408c259360..31c69768e7 100644 --- a/hotshot-testing/src/test_builder.rs +++ b/hotshot-testing/src/test_builder.rs @@ -4,6 +4,8 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; + use async_lock::RwLock; use hotshot::{ tasks::EventTransformerState, @@ -15,16 +17,14 @@ use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, node_types::TestTypes, state_types::TestInstanceState, storage_types::TestStorage, testable_delay::DelayConfig, }; -use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ consensus::ConsensusMetricsValue, drb::INITIAL_DRB_RESULT, epoch_membership::EpochMembershipCoordinator, - traits::node_implementation::{NodeType, Versions}, + traits::node_implementation::{ConsensusTime, NodeType, Versions}, HotShotConfig, PeerConfig, ValidatorConfig, }; use hotshot_utils::anytrace::*; -use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; use tide_disco::Url; use vec1::Vec1; @@ -283,7 +283,7 @@ pub async fn create_test_handle< .await; left_handle - } + }, Behaviour::Byzantine(state) => { let state = Box::leak(state); state @@ -300,7 +300,7 @@ pub async fn create_test_handle< marketplace_config, ) .await - } + }, Behaviour::Standard => { let hotshot = SystemContext::::new( public_key, @@ -317,7 +317,7 @@ pub async fn create_test_handle< .await; hotshot.run_tasks().await - } + }, } } diff --git a/hotshot-testing/src/test_runner.rs b/hotshot-testing/src/test_runner.rs index a07cb509c8..d640050dae 100644 --- a/hotshot-testing/src/test_runner.rs +++ b/hotshot-testing/src/test_runner.rs @@ -5,14 +5,19 @@ // along with the HotShot repository. If not, see . #![allow(clippy::panic)] +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + marker::PhantomData, + sync::Arc, +}; + use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLock; use futures::future::join_all; -use hotshot::InitializerEpochInfo; use hotshot::{ traits::TestableNodeImplementation, types::{Event, SystemContextHandle}, - HotShotInitializer, MarketplaceConfig, SystemContext, + HotShotInitializer, InitializerEpochInfo, MarketplaceConfig, SystemContext, }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, @@ -22,11 +27,11 @@ use hotshot_example_types::{ }; use hotshot_fakeapi::fake_solver::FakeSolverState; use hotshot_task_impls::events::HotShotEvent; -use hotshot_types::drb::INITIAL_DRB_RESULT; use hotshot_types::{ consensus::ConsensusMetricsValue, constants::EVENT_CHANNEL_SIZE, data::Leaf2, + drb::INITIAL_DRB_RESULT, epoch_membership::EpochMembershipCoordinator, simple_certificate::QuorumCertificate2, traits::{ @@ -36,11 +41,6 @@ use hotshot_types::{ }, HotShotConfig, ValidatorConfig, }; -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - marker::PhantomData, - sync::Arc, -}; use tide_disco::Url; use tokio::{spawn, task::JoinHandle}; #[allow(deprecated)] @@ -281,12 +281,12 @@ where Ok(res) => match res { TestResult::Pass => { info!("Task shut down successfully"); - } + }, TestResult::Fail(e) => error_list.push(e), }, Err(e) => { tracing::error!("Error Joining the test task {:?}", e); - } + }, } } @@ -560,14 +560,14 @@ where if let Some(task) = builder_tasks.pop() { task.start(Box::new(handle.event_stream())) } - } + }, std::cmp::Ordering::Equal => { // If we have more builder tasks than DA nodes, pin them all on the last node. while let Some(task) = builder_tasks.pop() { task.start(Box::new(handle.event_stream())) } - } - std::cmp::Ordering::Greater => {} + }, + std::cmp::Ordering::Greater => {}, } self.nodes.push(Node { diff --git a/hotshot-testing/src/test_task.rs b/hotshot-testing/src/test_task.rs index 14520c6d13..890fd2ddf9 100644 --- a/hotshot-testing/src/test_task.rs +++ b/hotshot-testing/src/test_task.rs @@ -158,12 +158,12 @@ impl TestTask { let _ = S::handle_event(&mut self.state, (input, id)) .await .inspect_err(|e| tracing::error!("{e}")); - } + }, Ok((Err(e), _id, _)) => { error!("Error from one channel in test task {:?}", e); sleep(Duration::from_millis(4000)).await; - } - _ => {} + }, + _ => {}, }; } }) @@ -202,7 +202,7 @@ pub async fn add_network_message_test_task< Err(e) => { error!("Failed to receive message: {:?}", e); continue; - } + }, }; // Deserialize the message @@ -212,7 +212,7 @@ pub async fn add_network_message_test_task< Err(e) => { tracing::error!("Failed to deserialize message: {:?}", e); continue; - } + }, }; // Handle the message diff --git a/hotshot-testing/src/txn_task.rs b/hotshot-testing/src/txn_task.rs index 41b5ec3b14..f20d1524f3 100644 --- a/hotshot-testing/src/txn_task.rs +++ b/hotshot-testing/src/txn_task.rs @@ -52,7 +52,7 @@ impl, V: Versions> TxnTask match handles.get(idx) { None => { tracing::error!("couldn't get node in txn task"); - } + }, Some(node) => { // use rand::seq::IteratorRandom; // we're assuming all nodes have the same leaf. @@ -64,7 +64,7 @@ impl, V: Versions> TxnTask .submit_transaction(txn.clone()) .await .expect("Could not send transaction"); - } + }, } } } diff --git a/hotshot-testing/src/view_generator.rs b/hotshot-testing/src/view_generator.rs index f46d4b1101..3209b92c25 100644 --- a/hotshot-testing/src/view_generator.rs +++ b/hotshot-testing/src/view_generator.rs @@ -675,7 +675,7 @@ impl Stream for TestViewGenerator { Poll::Ready(test_view) => { self.current_view = Some(test_view.clone()); Poll::Ready(Some(test_view)) - } + }, Poll::Pending => Poll::Pending, } } diff --git a/hotshot-testing/src/view_sync_task.rs b/hotshot-testing/src/view_sync_task.rs index 9ad967c74b..e41bfc0842 100644 --- a/hotshot-testing/src/view_sync_task.rs +++ b/hotshot-testing/src/view_sync_task.rs @@ -42,7 +42,7 @@ impl> TestTaskState async fn handle_event(&mut self, (event, id): (Self::Event, usize)) -> Result<()> { match event.as_ref() { // all the view sync events - HotShotEvent::ViewSyncTimeout(_, _, _) + HotShotEvent::ViewSyncTimeout(..) | HotShotEvent::ViewSyncPreCommitVoteRecv(_) | HotShotEvent::ViewSyncCommitVoteRecv(_) | HotShotEvent::ViewSyncFinalizeVoteRecv(_) @@ -52,12 +52,12 @@ impl> TestTaskState | HotShotEvent::ViewSyncPreCommitCertificateRecv(_) | HotShotEvent::ViewSyncCommitCertificateRecv(_) | HotShotEvent::ViewSyncFinalizeCertificateRecv(_) - | HotShotEvent::ViewSyncPreCommitCertificateSend(_, _) - | HotShotEvent::ViewSyncCommitCertificateSend(_, _) - | HotShotEvent::ViewSyncFinalizeCertificateSend(_, _) + | HotShotEvent::ViewSyncPreCommitCertificateSend(..) + | HotShotEvent::ViewSyncCommitCertificateSend(..) + | HotShotEvent::ViewSyncFinalizeCertificateSend(..) | HotShotEvent::ViewSyncTrigger(_) => { self.hit_view_sync.insert(id); - } + }, _ => (), } @@ -75,7 +75,7 @@ impl> TestTaskState hit_view_sync: self.hit_view_sync.clone(), })) } - } + }, } } } diff --git a/hotshot-testing/tests/tests_6/test_epochs.rs b/hotshot-testing/tests/tests_6/test_epochs.rs index bed8d185ac..3a161ec1dd 100644 --- a/hotshot-testing/tests/tests_6/test_epochs.rs +++ b/hotshot-testing/tests/tests_6/test_epochs.rs @@ -4,12 +4,14 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::time::Duration; + use hotshot_example_types::{ node_types::{ CombinedImpl, EpochUpgradeTestVersions, EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, RandomOverlapQuorumFilterConfig, StableQuorumFilterConfig, - TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, - TestTypesRandomizedCommitteeMembers, TestTypesRandomizedLeader, TestTypesEpochCatchupTypes + TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, TestTypesEpochCatchupTypes, + TestTypesRandomizedCommitteeMembers, TestTypesRandomizedLeader, }, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; @@ -22,7 +24,6 @@ use hotshot_testing::{ test_builder::{TestDescription, TimingData}, view_sync_task::ViewSyncTaskDescription, }; -use std::time::Duration; cross_tests!( TestName: test_success_with_epochs, @@ -505,23 +506,23 @@ cross_tests!( // }; // let mut metadata = TestDescription::default().set_num_nodes(20,20); // let mut catchup_nodes = vec![]; -// +// // for i in 0..20 { // catchup_nodes.push(ChangeNode { // idx: i, // updown: NodeAction::RestartDown(0), // }) // } -// +// // metadata.timing_data = timing_data; -// +// // metadata.spinning_properties = SpinningTaskDescription { // // Restart all the nodes in view 10 // node_changes: vec![(10, catchup_nodes)], // }; // metadata.view_sync_properties = // hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); -// +// // metadata.completion_task_description = // CompletionTaskDescription::TimeBasedCompletionTaskBuilder( // TimeBasedCompletionTaskDescription { @@ -536,7 +537,7 @@ cross_tests!( // decide_timeout: Duration::from_secs(20), // ..Default::default() // }; -// +// // metadata // }, // ); diff --git a/hotshot-types/src/consensus.rs b/hotshot-types/src/consensus.rs index 66ef1db9bf..c85fce8b7c 100644 --- a/hotshot-types/src/consensus.rs +++ b/hotshot-types/src/consensus.rs @@ -589,7 +589,7 @@ impl Consensus { // because the leader of view n + 1 may propose to the DA (and we would vote) // before the leader of view n. return true; - } + }, _ => return true, }; if view > *old_view { diff --git a/hotshot-types/src/data.rs b/hotshot-types/src/data.rs index 3deb60f5dd..ad3ae8c9e7 100644 --- a/hotshot-types/src/data.rs +++ b/hotshot-types/src/data.rs @@ -505,13 +505,13 @@ impl VidDisperseShare { .into_iter() .map(|share| Self::V0(share)) .collect() - } + }, VidDisperse::V1(vid_disperse) => { VidDisperseShare2::::from_vid_disperse(vid_disperse) .into_iter() .map(|share| Self::V1(share)) .collect() - } + }, } } @@ -672,10 +672,10 @@ impl ViewChangeEvidence { match self { ViewChangeEvidence::Timeout(timeout_cert) => { ViewChangeEvidence2::Timeout(timeout_cert.to_tc2()) - } + }, ViewChangeEvidence::ViewSync(view_sync_cert) => { ViewChangeEvidence2::ViewSync(view_sync_cert.to_vsc2()) - } + }, } } } @@ -705,10 +705,10 @@ impl ViewChangeEvidence2 { match self { ViewChangeEvidence2::Timeout(timeout_cert) => { ViewChangeEvidence::Timeout(timeout_cert.to_tc()) - } + }, ViewChangeEvidence2::ViewSync(view_sync_cert) => { ViewChangeEvidence::ViewSync(view_sync_cert.to_vsc()) - } + }, } } } @@ -1242,7 +1242,7 @@ impl Leaf2 { // Easiest cases are: // - no upgrade certificate on either: this is the most common case, and is always fine. // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. - (None | Some(_), None) => {} + (None | Some(_), None) => {}, // If we no longer see a cert, we have to make sure that we either: // - no longer care because we have passed new_version_first_view, or // - no longer care because we have passed `decide_by` without deciding the certificate. @@ -1252,13 +1252,13 @@ impl Leaf2 { || (self.view_number() > parent_cert.data.decide_by && decided_upgrade_certificate_read.is_none()), "The new leaf is missing an upgrade certificate that was present in its parent, and should still be live." ); - } + }, // If we both have a certificate, they should be identical. // Technically, this prevents us from initiating a new upgrade in the view immediately following an upgrade. // I think this is a fairly lax restriction. (Some(cert), Some(parent_cert)) => { ensure!(cert == parent_cert, "The new leaf does not extend the parent leaf, because it has attached a different upgrade certificate."); - } + }, } // This check should be added once we sort out the genesis leaf/justify_qc issue. @@ -1621,7 +1621,7 @@ impl Leaf { // Easiest cases are: // - no upgrade certificate on either: this is the most common case, and is always fine. // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. - (None | Some(_), None) => {} + (None | Some(_), None) => {}, // If we no longer see a cert, we have to make sure that we either: // - no longer care because we have passed new_version_first_view, or // - no longer care because we have passed `decide_by` without deciding the certificate. @@ -1631,13 +1631,13 @@ impl Leaf { || (self.view_number() > parent_cert.data.decide_by && decided_upgrade_certificate_read.is_none()), "The new leaf is missing an upgrade certificate that was present in its parent, and should still be live." ); - } + }, // If we both have a certificate, they should be identical. // Technically, this prevents us from initiating a new upgrade in the view immediately following an upgrade. // I think this is a fairly lax restriction. (Some(cert), Some(parent_cert)) => { ensure!(cert == parent_cert, "The new leaf does not extend the parent leaf, because it has attached a different upgrade certificate."); - } + }, } // This check should be added once we sort out the genesis leaf/justify_qc issue. diff --git a/hotshot-types/src/data/ns_table.rs b/hotshot-types/src/data/ns_table.rs index c645c36957..1f9ac59b6f 100644 --- a/hotshot-types/src/data/ns_table.rs +++ b/hotshot-types/src/data/ns_table.rs @@ -29,10 +29,15 @@ pub fn parse_ns_table(payload_byte_len: usize, bytes: &[u8]) -> Vec if num_entries != bytes.len().saturating_sub(NUM_NSS_BYTE_LEN) / NS_ID_BYTE_LEN.saturating_add(NS_OFFSET_BYTE_LEN) + || (num_entries == 0 && payload_byte_len != 0) { tracing::warn!("Failed to parse the metadata as namespace table. Use a single namespace table instead."); return vec![(0..payload_byte_len)]; } + // Early breaks for empty payload and namespace table + if num_entries == 0 { + return vec![(0..payload_byte_len)]; + } let mut l = 0; for i in 0..num_entries { let offset = NUM_NSS_BYTE_LEN + i * (NS_ID_BYTE_LEN + NS_OFFSET_BYTE_LEN) + NS_ID_BYTE_LEN; diff --git a/hotshot-types/src/data/vid_disperse.rs b/hotshot-types/src/data/vid_disperse.rs index f7c90e5c68..892aed79f9 100644 --- a/hotshot-types/src/data/vid_disperse.rs +++ b/hotshot-types/src/data/vid_disperse.rs @@ -13,6 +13,7 @@ use jf_vid::{VidDisperse as JfVidDisperse, VidScheme}; use serde::{Deserialize, Serialize}; use tokio::task::spawn_blocking; +use super::ns_table::parse_ns_table; use crate::{ epoch_membership::{EpochMembership, EpochMembershipCoordinator}, impl_has_epoch, @@ -29,8 +30,6 @@ use crate::{ vote::HasViewNumber, }; -use super::ns_table::parse_ns_table; - impl_has_epoch!( ADVZDisperse, AvidMDisperse, diff --git a/hotshot-types/src/epoch_membership.rs b/hotshot-types/src/epoch_membership.rs index 0494a0c76a..3a316cf033 100644 --- a/hotshot-types/src/epoch_membership.rs +++ b/hotshot-types/src/epoch_membership.rs @@ -1,17 +1,25 @@ -use std::collections::BTreeSet; -use std::num::NonZeroU64; -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{BTreeSet, HashMap}, + num::NonZeroU64, + sync::Arc, +}; use async_broadcast::{broadcast, InactiveReceiver}; use async_lock::{Mutex, RwLock}; -use hotshot_utils::anytrace::{self, Error, Level, Result, DEFAULT_LOG_LEVEL}; -use hotshot_utils::{ensure, line_info, log, warn}; - -use crate::drb::DrbResult; -use crate::traits::election::Membership; -use crate::traits::node_implementation::{ConsensusTime, NodeType}; -use crate::utils::root_block_in_epoch; -use crate::PeerConfig; +use hotshot_utils::{ + anytrace::{self, Error, Level, Result, DEFAULT_LOG_LEVEL}, + ensure, line_info, log, warn, +}; + +use crate::{ + drb::DrbResult, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, NodeType}, + }, + utils::root_block_in_epoch, + PeerConfig, +}; type EpochMap = HashMap<::Epoch, InactiveReceiver>>>; diff --git a/hotshot-types/src/lib.rs b/hotshot-types/src/lib.rs index eac8950a5e..0d9bf701fc 100644 --- a/hotshot-types/src/lib.rs +++ b/hotshot-types/src/lib.rs @@ -134,7 +134,7 @@ impl PeerConfig { Err(e) => { error!(?e, "Failed to serialize public key"); vec![] - } + }, } } @@ -148,7 +148,7 @@ impl PeerConfig { Err(e) => { error!(?e, "Failed to deserialize public key"); None - } + }, } } } diff --git a/hotshot-types/src/message.rs b/hotshot-types/src/message.rs index 12e07efd87..8f7ac65386 100644 --- a/hotshot-types/src/message.rs +++ b/hotshot-types/src/message.rs @@ -173,9 +173,7 @@ impl HasEpoch for MessageKind { fn epoch(&self) -> Option { match &self { MessageKind::Consensus(message) => message.epoch_number(), - MessageKind::Data( - DataMessage::SubmitTransaction(_, _) | DataMessage::RequestData(_), - ) + MessageKind::Data(DataMessage::SubmitTransaction(..) | DataMessage::RequestData(_)) | MessageKind::External(_) => None, MessageKind::Data(DataMessage::DataResponse(msg)) => match msg { ResponseMessage::Found(m) => m.epoch_number(), @@ -325,66 +323,66 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, GeneralConsensusMessage::Proposal2(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, GeneralConsensusMessage::ProposalRequested(req, _) => req.view_number, GeneralConsensusMessage::ProposalResponse(proposal) => { proposal.data.view_number() - } + }, GeneralConsensusMessage::ProposalResponse2(proposal) => { proposal.data.view_number() - } + }, GeneralConsensusMessage::Vote(vote_message) => vote_message.view_number(), GeneralConsensusMessage::Vote2(vote_message) => vote_message.view_number(), GeneralConsensusMessage::TimeoutVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncFinalizeVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitCertificate(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { message.view_number() - } + }, GeneralConsensusMessage::TimeoutVote2(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitVote2(message) => message.view_number(), GeneralConsensusMessage::ViewSyncFinalizeVote2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncPreCommitCertificate2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate2(message) => { message.view_number() - } + }, GeneralConsensusMessage::UpgradeProposal(message) => message.data.view_number(), GeneralConsensusMessage::UpgradeVote(message) => message.view_number(), GeneralConsensusMessage::HighQc(qc) | GeneralConsensusMessage::ExtendedQc(qc, _) => qc.view_number(), } - } + }, SequencingMessage::Da(da_message) => { match da_message { DaConsensusMessage::DaProposal(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, DaConsensusMessage::DaVote(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate(cert) => cert.view_number, DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.view_number(), @@ -392,12 +390,12 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, DaConsensusMessage::DaVote2(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate2(cert) => cert.view_number, DaConsensusMessage::VidDisperseMsg2(disperse) => disperse.data.view_number(), } - } + }, } } @@ -410,13 +408,13 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } + }, GeneralConsensusMessage::Proposal2(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } - GeneralConsensusMessage::ProposalRequested(_, _) => None, + }, + GeneralConsensusMessage::ProposalRequested(..) => None, GeneralConsensusMessage::ProposalResponse(proposal) => proposal.data.epoch(), GeneralConsensusMessage::ProposalResponse2(proposal) => proposal.data.epoch(), GeneralConsensusMessage::Vote(vote_message) => vote_message.epoch(), @@ -427,35 +425,35 @@ impl SequencingMessage { GeneralConsensusMessage::ViewSyncFinalizeVote(message) => message.epoch(), GeneralConsensusMessage::ViewSyncPreCommitCertificate(message) => { message.epoch() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate(message) => message.epoch(), GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { message.epoch() - } + }, GeneralConsensusMessage::TimeoutVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncPreCommitVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncCommitVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncFinalizeVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncPreCommitCertificate2(message) => { message.epoch() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncFinalizeCertificate2(message) => { message.epoch() - } + }, GeneralConsensusMessage::UpgradeProposal(message) => message.data.epoch(), GeneralConsensusMessage::UpgradeVote(message) => message.epoch(), GeneralConsensusMessage::HighQc(qc) | GeneralConsensusMessage::ExtendedQc(qc, _) => qc.epoch(), } - } + }, SequencingMessage::Da(da_message) => { match da_message { DaConsensusMessage::DaProposal(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } + }, DaConsensusMessage::DaVote(vote_message) => vote_message.epoch(), DaConsensusMessage::DaCertificate(cert) => cert.epoch(), DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.epoch(), @@ -464,11 +462,11 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } + }, DaConsensusMessage::DaVote2(vote_message) => vote_message.epoch(), DaConsensusMessage::DaCertificate2(cert) => cert.epoch(), } - } + }, } } } @@ -649,7 +647,7 @@ impl UpgradeLock { } else { V::Base::VERSION } - } + }, None => V::Base::VERSION, }; @@ -669,7 +667,7 @@ impl UpgradeLock { } else { cert.data.old_version } - } + }, None => V::Base::VERSION, } } @@ -698,7 +696,7 @@ impl UpgradeLock { v if v == V::Upgrade::VERSION => Serializer::::serialize(&message), v => { bail!("Attempted to serialize with version {}, which is incompatible. This should be impossible.", v); - } + }, }; serialized_message @@ -725,7 +723,7 @@ impl UpgradeLock { v if v == V::Upgrade::VERSION => Serializer::::deserialize(message), v => { bail!("Cannot deserialize message with stated version {}", v); - } + }, } .wrap() .context(info!("Failed to deserialize message!"))?; diff --git a/hotshot-types/src/network.rs b/hotshot-types/src/network.rs index f2d35b6984..3a5dd4fe31 100644 --- a/hotshot-types/src/network.rs +++ b/hotshot-types/src/network.rs @@ -209,7 +209,7 @@ impl NetworkConfig { Ok(data) => data, Err(e) => { return Err(NetworkConfigError::ReadFromFileError(e)); - } + }, }; // deserialize @@ -256,7 +256,7 @@ impl NetworkConfig { Ok(data) => data, Err(e) => { return Err(NetworkConfigError::SerializeError(e)); - } + }, }; // write to file diff --git a/hotshot-types/src/simple_vote.rs b/hotshot-types/src/simple_vote.rs index 547db100a6..1ae78c4f20 100644 --- a/hotshot-types/src/simple_vote.rs +++ b/hotshot-types/src/simple_vote.rs @@ -19,8 +19,7 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; use vbs::version::Version; use crate::{ - data::VidCommitment, - data::{Leaf, Leaf2}, + data::{Leaf, Leaf2, VidCommitment}, message::UpgradeLock, traits::{ node_implementation::{NodeType, Versions}, diff --git a/hotshot-types/src/traits/storage.rs b/hotshot-types/src/traits/storage.rs index 991ba8282f..7ad83f58ed 100644 --- a/hotshot-types/src/traits/storage.rs +++ b/hotshot-types/src/traits/storage.rs @@ -18,12 +18,12 @@ use committable::Commitment; use super::node_implementation::NodeType; use crate::{ consensus::{CommitmentMap, View}, - data::VidCommitment, data::{ vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, - QuorumProposalWrapper, VidDisperseShare, + QuorumProposalWrapper, VidCommitment, VidDisperseShare, }, + drb::DrbResult, event::HotShotAction, message::{convert_proposal, Proposal}, simple_certificate::{ @@ -54,7 +54,7 @@ pub trait Storage: Send + Sync + Clone { _pd: std::marker::PhantomData, }) .await - } + }, VidDisperseShare::V1(share) => { self.append_vid2(&Proposal { data: share.clone(), @@ -62,7 +62,7 @@ pub trait Storage: Send + Sync + Clone { _pd: std::marker::PhantomData, }) .await - } + }, } } /// Add a proposal to the stored DA proposals. @@ -158,4 +158,12 @@ pub trait Storage: Send + Sync + Clone { async fn migrate_consensus(&self) -> Result<()> { Ok(()) } + /// Add a drb result + async fn add_drb_result(&self, epoch: TYPES::Epoch, drb_result: DrbResult) -> Result<()>; + /// Add an epoch block header + async fn add_epoch_root( + &self, + epoch: TYPES::Epoch, + block_header: TYPES::BlockHeader, + ) -> Result<()>; } diff --git a/hotshot-types/src/utils.rs b/hotshot-types/src/utils.rs index 864bd4c54a..09c0817ce8 100644 --- a/hotshot-types/src/utils.rs +++ b/hotshot-types/src/utils.rs @@ -6,6 +6,12 @@ //! Utility functions, type aliases, helper structs and enum definitions. +use std::{ + hash::{Hash, Hasher}, + ops::Deref, + sync::Arc, +}; + use anyhow::{anyhow, ensure}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::{ @@ -19,11 +25,6 @@ use committable::{Commitment, Committable}; use digest::OutputSizeUser; use serde::{Deserialize, Serialize}; use sha2::Digest; -use std::{ - hash::{Hash, Hasher}, - ops::Deref, - sync::Arc, -}; use tagged_base64::tagged; use typenum::Unsigned; use vbs::version::StaticVersionType; diff --git a/hotshot-types/src/vote.rs b/hotshot-types/src/vote.rs index e33e66f57f..1631896652 100644 --- a/hotshot-types/src/vote.rs +++ b/hotshot-types/src/vote.rs @@ -170,7 +170,7 @@ impl< Err(e) => { tracing::warn!("Failed to generate versioned vote data: {e}"); return None; - } + }, }; if !key.validate(&vote.signature(), vote_commitment.as_ref()) { diff --git a/hotshot-utils/src/anytrace.rs b/hotshot-utils/src/anytrace.rs index 12e129ca01..b9aec49221 100644 --- a/hotshot-utils/src/anytrace.rs +++ b/hotshot-utils/src/anytrace.rs @@ -24,21 +24,21 @@ impl Log for Error { match error_level { Level::Trace => { tracing::trace!("{}", self.message); - } + }, Level::Debug => { tracing::debug!("{}", self.message); - } + }, Level::Info => { tracing::info!("{}", self.message); - } + }, Level::Warn => { tracing::warn!("{}", self.message); - } + }, Level::Error => { tracing::error!("{}", self.message); - } + }, // impossible - Level::Unspecified => {} + Level::Unspecified => {}, } } } @@ -48,7 +48,7 @@ impl Log for Result { let error = match self { Ok(_) => { return; - } + }, Err(e) => e, }; @@ -60,21 +60,21 @@ impl Log for Result { match error_level { Level::Trace => { tracing::trace!("{}", error.message); - } + }, Level::Debug => { tracing::debug!("{}", error.message); - } + }, Level::Info => { tracing::info!("{}", error.message); - } + }, Level::Warn => { tracing::warn!("{}", error.message); - } + }, Level::Error => { tracing::error!("{}", error.message); - } + }, // impossible - Level::Unspecified => {} + Level::Unspecified => {}, } } } diff --git a/hotshot-utils/src/anytrace/macros.rs b/hotshot-utils/src/anytrace/macros.rs index 29c5178b07..b9f6b7db56 100644 --- a/hotshot-utils/src/anytrace/macros.rs +++ b/hotshot-utils/src/anytrace/macros.rs @@ -167,21 +167,21 @@ macro_rules! log { match error_level { Level::Trace => { tracing::trace!("{}", error.message); - } + }, Level::Debug => { tracing::debug!("{}", error.message); - } + }, Level::Info => { tracing::info!("{}", error.message); - } + }, Level::Warn => { tracing::warn!("{}", error.message); - } + }, Level::Error => { tracing::error!("{}", error.message); - } + }, // impossible - Level::Unspecified => {} + Level::Unspecified => {}, } } }; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e1ffbae935..660984de2d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -215,10 +215,10 @@ impl, V: Versions> SystemContext Arc { #[allow(clippy::panic)] match storage.migrate_consensus().await { - Ok(()) => {} + Ok(()) => {}, Err(e) => { panic!("Failed to migrate consensus storage: {e}"); - } + }, } let internal_chan = broadcast(EVENT_CHANNEL_SIZE); @@ -767,10 +767,10 @@ where match event { Either::Left(msg) => { let _ = left_sender.broadcast(msg.into()).await; - } + }, Either::Right(msg) => { let _ = right_sender.broadcast(msg.into()).await; - } + }, } } } @@ -993,7 +993,7 @@ impl, V: Versions> ConsensusApi { pub epoch: TYPES::Epoch, pub drb_result: DrbResult, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 544154a2cd..743327ea4b 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -10,7 +10,6 @@ pub mod task_state; use std::{collections::BTreeMap, fmt::Debug, num::NonZeroUsize, sync::Arc, time::Duration}; -use crate::EpochMembershipCoordinator; use async_broadcast::{broadcast, RecvError}; use async_lock::RwLock; use async_trait::async_trait; @@ -46,8 +45,9 @@ use vbs::version::StaticVersionType; use crate::{ genesis_epoch_from_version, tasks::task_state::CreateTaskState, types::SystemContextHandle, - ConsensusApi, ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, - MarketplaceConfig, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, + ConsensusApi, ConsensusMetricsValue, ConsensusTaskRegistry, EpochMembershipCoordinator, + HotShotConfig, HotShotInitializer, MarketplaceConfig, NetworkTaskRegistry, SignatureKey, + SystemContext, Versions, }; /// event for global event stream @@ -280,13 +280,13 @@ pub fn create_shutdown_event_monitor { return; - } + }, Err(e) => { tracing::error!("Shutdown event monitor channel recv error: {}", e); - } + }, } } } diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 8b3e828f71..85a467057a 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -4,6 +4,11 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{ + collections::{BTreeMap, BTreeSet}, + num::NonZeroU64, +}; + use hotshot_types::{ drb::DrbResult, traits::{ @@ -15,10 +20,6 @@ use hotshot_types::{ }; use hotshot_utils::anytrace::Result; use primitive_types::U256; -use std::{ - collections::{BTreeMap, BTreeSet}, - num::NonZeroU64, -}; #[derive(Clone, Debug, Eq, PartialEq, Hash)] diff --git a/hotshot/src/traits/election/two_static_committees.rs b/hotshot/src/traits/election/two_static_committees.rs index 2b2834710c..3825c4ed47 100644 --- a/hotshot/src/traits/election/two_static_committees.rs +++ b/hotshot/src/traits/election/two_static_committees.rs @@ -4,6 +4,12 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{ + cmp::max, + collections::{BTreeMap, BTreeSet}, + num::NonZeroU64, +}; + use hotshot_types::{ drb::DrbResult, traits::{ @@ -15,11 +21,6 @@ use hotshot_types::{ }; use hotshot_utils::anytrace::Result; use primitive_types::U256; -use std::{ - cmp::max, - collections::{BTreeMap, BTreeSet}, - num::NonZeroU64, -}; /// Tuple type for eligible leaders type EligibleLeaders = ( diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 87b4a873c2..dc6af286cc 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -185,12 +185,12 @@ impl CombinedNetworks { // The primary fail counter reached 0, the primary is now considered up primary_down.store(false, Ordering::Relaxed); debug!("primary_fail_counter reached zero, primary_down set to false"); - } + }, c => { // Decrement the primary fail counter primary_fail_counter.store(c - 1, Ordering::Relaxed); debug!("primary_fail_counter set to {:?}", c - 1); - } + }, } return Ok(()); } @@ -211,7 +211,7 @@ impl CombinedNetworks { c if c < COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL => { // Just increment the 'no delay counter' self.no_delay_counter.store(c + 1, Ordering::Relaxed); - } + }, _ => { // The 'no delay counter' reached the threshold debug!( @@ -226,7 +226,7 @@ impl CombinedNetworks { // The primary fail counter is set just below the threshold to delay the next message self.primary_fail_counter .store(COMBINED_NETWORK_MIN_PRIMARY_FAILURES, Ordering::Relaxed); - } + }, } } // Send the message diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 93e9c5ef21..52ce406350 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -7,7 +7,21 @@ //! Libp2p based/production networking implementation //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network -use crate::EpochMembershipCoordinator; +#[cfg(feature = "hotshot-testing")] +use std::str::FromStr; +use std::{ + cmp::min, + collections::{BTreeSet, HashSet}, + fmt::Debug, + net::{IpAddr, ToSocketAddrs}, + num::NonZeroUsize, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; + use anyhow::{anyhow, Context}; use async_lock::RwLock; use async_trait::async_trait; @@ -51,20 +65,6 @@ use libp2p_identity::{ }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; -#[cfg(feature = "hotshot-testing")] -use std::str::FromStr; -use std::{ - cmp::min, - collections::{BTreeSet, HashSet}, - fmt::Debug, - net::{IpAddr, ToSocketAddrs}, - num::NonZeroUsize, - sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, - }, - time::Duration, -}; use tokio::{ select, spawn, sync::{ @@ -75,7 +75,7 @@ use tokio::{ }; use tracing::{error, info, instrument, trace, warn}; -use crate::BroadcastDelay; +use crate::{BroadcastDelay, EpochMembershipCoordinator}; /// Libp2p-specific metrics #[derive(Clone, Debug)] @@ -289,7 +289,7 @@ impl TestableNetworkingImplementation for Libp2pNetwork { Ok(network) => network, Err(err) => { panic!("Failed to create libp2p network: {err:?}"); - } + }, }, ) }) @@ -372,7 +372,7 @@ pub fn derive_libp2p_multiaddr(addr: &String) -> anyhow::Result { } format!("/dns/{host}/udp/{port}/quic-v1") - } + }, }; // Convert the multiaddr string to a `Multiaddr` @@ -680,7 +680,7 @@ impl Libp2pNetwork { sender.try_send(msg).map_err(|err| { NetworkError::ChannelSendError(format!("failed to send gossip message: {err}")) })?; - } + }, DirectRequest(msg, _pid, chan) => { sender.try_send(msg).map_err(|err| { NetworkError::ChannelSendError(format!( @@ -702,12 +702,12 @@ impl Libp2pNetwork { { error!("failed to ack!"); }; - } - DirectResponse(_msg, _) => {} + }, + DirectResponse(_msg, _) => {}, NetworkEvent::IsBootstrapped => { error!("handle_recvd_events received `NetworkEvent::IsBootstrapped`, which should be impossible."); - } - NetworkEvent::ConnectedPeersUpdate(_) => {} + }, + NetworkEvent::ConnectedPeersUpdate(_) => {}, } Ok::<(), NetworkError>(()) } @@ -909,7 +909,7 @@ impl ConnectedNetwork for Libp2pNetwork { return Err(NetworkError::LookupError(format!( "failed to look up node for direct message: {err}" ))); - } + }, }; #[cfg(feature = "hotshot-testing")] @@ -941,7 +941,7 @@ impl ConnectedNetwork for Libp2pNetwork { Err(e) => { self.inner.metrics.num_failed_messages.add(1); Err(e) - } + }, } } @@ -1002,7 +1002,7 @@ impl ConnectedNetwork for Libp2pNetwork { Ok(m) => m, Err(e) => { return tracing::warn!(e.message); - } + }, }; let future_leader = match membership.leader(future_view).await { Ok(l) => l, @@ -1011,7 +1011,7 @@ impl ConnectedNetwork for Libp2pNetwork { "Failed to calculate leader for view {:?}: {e}", future_view ); - } + }, }; let _ = self diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 9aa8adfef2..4a0663d246 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -282,10 +282,10 @@ impl ConnectedNetwork for MemoryNetwork { match res { Ok(()) => { trace!(?key, "Delivered message to remote"); - } + }, Err(e) => { warn!(?e, ?key, "Error sending broadcast message to node"); - } + }, } } } @@ -336,10 +336,10 @@ impl ConnectedNetwork for MemoryNetwork { match res { Ok(()) => { trace!(?key, "Delivered message to remote"); - } + }, Err(e) => { warn!(?e, ?key, "Error sending broadcast message to node"); - } + }, } } } @@ -375,7 +375,7 @@ impl ConnectedNetwork for MemoryNetwork { Ok(()) => { trace!(?recipient, "Delivered message to remote"); Ok(()) - } + }, Err(e) => Err(NetworkError::MessageSendError(format!( "error sending direct message to node: {e}", ))), diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index a742e5f857..0f222f2729 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -591,7 +591,7 @@ impl ConnectedNetwork for PushCdnNetwork { return Err(NetworkError::MessageReceiveError(format!( "failed to receive message: {error}" ))); - } + }, }; // Extract the underlying message diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 07e6bab901..f9d1c40eeb 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -6,6 +6,8 @@ //! Provides an event-streaming handle for a [`SystemContext`] running in the background +use std::sync::Arc; + use anyhow::{anyhow, Context, Ok, Result}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; @@ -32,7 +34,6 @@ use hotshot_types::{ }, utils::option_epoch_from_block_number, }; -use std::sync::Arc; use tracing::instrument; use crate::{traits::NodeImplementation, types::Event, SystemContext, Versions}; @@ -116,17 +117,17 @@ impl + 'static, V: Versions> self.network .broadcast_message(serialized_message, Topic::Global, BroadcastDelay::None) .await?; - } + }, RecipientList::Direct(recipient) => { self.network .direct_message(serialized_message, recipient) .await?; - } + }, RecipientList::Many(recipients) => { self.network .da_broadcast_message(serialized_message, recipients, BroadcastDelay::None) .await?; - } + }, } Ok(()) } @@ -199,7 +200,7 @@ impl + 'static, V: Versions> Err(e) => { tracing::warn!(e.message); continue; - } + }, }; // Make sure that the quorum_proposal is valid if let Err(err) = quorum_proposal.validate_signature(&membership).await { diff --git a/justfile b/justfile index 8ecf0302c4..294d18e4c6 100644 --- a/justfile +++ b/justfile @@ -75,11 +75,10 @@ test-all: cargo nextest run --locked --release --workspace --verbose --profile all test-integration: - @echo 'NOTE that demo-native must be running for this test to succeed.' - INTEGRATION_TEST_SEQUENCER_VERSION=2 cargo nextest run --all-features --nocapture --profile integration smoke + INTEGRATION_TEST_SEQUENCER_VERSION=2 cargo nextest run -p tests --nocapture --profile integration test_native_demo_basic + test-integration-mp: - @echo 'NOTE that demo-native-mp must be running for this test to succeed.' - INTEGRATION_TEST_SEQUENCER_VERSION=99 cargo nextest run --all-features --nocapture --profile integration + INTEGRATION_TEST_SEQUENCER_VERSION=99 cargo nextest run -p tests --nocapture --profile integration test_native_demo_upgrade clippy: @echo 'features: "embedded-db"' @@ -89,6 +88,31 @@ clippy: check-features *args: cargo hack check --each-feature {{args}} +check-features-ci *args: + # check each pair of features plus `default` and `--no-default-features` + cargo hack check --feature-powerset \ + --depth 2 \ + --exclude contract-bindings-alloy \ + --exclude contract-bindings-ethers \ + --exclude hotshot \ + --exclude hotshot-builder-api \ + --exclude hotshot-contract-adapter \ + --exclude hotshot-events-service \ + --exclude hotshot-example-types \ + --exclude hotshot-libp2p-networking \ + --exclude hotshot-macros \ + --exclude hotshot-orchestrator \ + --exclude hotshot-query-service \ + --exclude hotshot-stake-table \ + --exclude hotshot-state-prover \ + --exclude hotshot-task \ + --exclude hotshot-task-impls \ + --exclude hotshot-testing \ + --exclude hotshot-types \ + --exclude hotshot-utils \ + --exclude vid \ + {{args}} + # Helpful shortcuts for local development dev-orchestrator: target/release/orchestrator -p 8080 -n 1 @@ -116,7 +140,7 @@ gen-bindings: git submodule update --init --recursive # Generate the ethers bindings - forge bind --contracts ./contracts/src/ --ethers --crate-name contract-bindings-ethers --bindings-path contract-bindings-ethers --select "{{REGEXP}}" --overwrite --force + nix develop .#legacyFoundry -c forge bind --contracts ./contracts/src/ --ethers --crate-name contract-bindings-ethers --bindings-path contract-bindings-ethers --select "{{REGEXP}}" --overwrite --force # Foundry doesn't include bytecode in the bindings for LightClient.sol, since it links with # libraries. However, this bytecode is still needed to link and deploy the contract. Copy it to diff --git a/marketplace-builder-core/src/service.rs b/marketplace-builder-core/src/service.rs index 29fff93c46..bb5af4bc70 100644 --- a/marketplace-builder-core/src/service.rs +++ b/marketplace-builder-core/src/service.rs @@ -1,19 +1,16 @@ -use std::time::Duration; - -use marketplace_builder_shared::{ - block::{BuilderStateId, ReceivedTransaction, TransactionSource}, - coordinator::{BuilderStateCoordinator, BuilderStateLookup}, - state::BuilderState, - utils::BuilderKeys, +use std::{ + fmt::Display, + sync::Arc, + time::{Duration, Instant}, }; pub use async_broadcast::{broadcast, RecvError, TryRecvError}; use async_trait::async_trait; use committable::{Commitment, Committable}; -use futures::{future::BoxFuture, stream::FuturesUnordered, Stream}; use futures::{ - stream::{FuturesOrdered, StreamExt}, - TryStreamExt, + future::BoxFuture, + stream::{FuturesOrdered, FuturesUnordered, StreamExt}, + Stream, TryStreamExt, }; use hotshot::types::Event; use hotshot_builder_api::{ @@ -23,26 +20,29 @@ use hotshot_builder_api::{ data_source::{AcceptsTxnSubmits, BuilderDataSource}, }, }; -use hotshot_types::bundle::Bundle; -use hotshot_types::traits::block_contents::{BuilderFee, Transaction}; use hotshot_types::{ + bundle::Bundle, data::VidCommitment, event::EventType, traits::{ + block_contents::{BuilderFee, Transaction}, node_implementation::{ConsensusTime, NodeType}, signature_key::{BuilderSignatureKey, SignatureKey}, }, }; -use std::sync::Arc; -use std::{fmt::Display, time::Instant}; +pub use marketplace_builder_shared::utils::EventServiceStream; +use marketplace_builder_shared::{ + block::{BuilderStateId, ReceivedTransaction, TransactionSource}, + coordinator::{BuilderStateCoordinator, BuilderStateLookup}, + state::BuilderState, + utils::BuilderKeys, +}; use tagged_base64::TaggedBase64; use tide_disco::{app::AppError, method::ReadState, App}; use tokio::{spawn, task::JoinHandle, time::sleep}; use tracing::Level; use vbs::version::StaticVersion; -pub use marketplace_builder_shared::utils::EventServiceStream; - use crate::hooks::BuilderHooks; /// Configuration to initialize the builder @@ -189,7 +189,7 @@ where match event.event { EventType::Error { error } => { tracing::error!("Error event in HotShot: {:?}", error); - } + }, EventType::Transactions { transactions } => { let hooks = Arc::clone(&hooks); let coordinator = Arc::clone(&coordinator); @@ -208,20 +208,20 @@ where .collect::>() .await; }); - } + }, EventType::Decide { leaf_chain, .. } => { let coordinator = Arc::clone(&coordinator); spawn(async move { coordinator.handle_decide(leaf_chain).await }); - } + }, EventType::DaProposal { proposal, .. } => { let coordinator = Arc::clone(&coordinator); spawn(async move { coordinator.handle_da_proposal(proposal.data).await }); - } + }, EventType::QuorumProposal { proposal, .. } => { let coordinator = Arc::clone(&coordinator); spawn(async move { coordinator.handle_quorum_proposal(proposal.data).await }); - } - _ => {} + }, + _ => {}, } } } @@ -356,14 +356,14 @@ where // If we couldn't find the state because it hasn't yet been created, try again sleep(self.api_timeout / 10).await; continue; - } + }, BuilderStateLookup::Decided => { // If we couldn't find the state because the view has already been decided, we can just return an error tracing::warn!("Requested a bundle for view we already GCd as decided",); return Err(BuildError::Error( "Request for a bundle for a view that has already been decided.".to_owned(), )); - } + }, }; tracing::info!( diff --git a/marketplace-builder-core/src/testing/basic_test.rs b/marketplace-builder-core/src/testing/basic_test.rs index 1d4cf42c5f..6919fd3ce8 100644 --- a/marketplace-builder-core/src/testing/basic_test.rs +++ b/marketplace-builder-core/src/testing/basic_test.rs @@ -1,14 +1,15 @@ +use std::{marker::PhantomData, sync::Arc}; + use async_broadcast::broadcast; use hotshot_builder_api::v0_99::data_source::{AcceptsTxnSubmits, BuilderDataSource}; - use hotshot_example_types::block_types::TestTransaction; +use marketplace_builder_shared::testing::consensus::SimulatedChainState; use tracing_test::traced_test; -use crate::hooks::NoHooks; -use crate::service::{BuilderConfig, GlobalState, ProxyGlobalState}; -use marketplace_builder_shared::testing::consensus::SimulatedChainState; -use std::marker::PhantomData; -use std::sync::Arc; +use crate::{ + hooks::NoHooks, + service::{BuilderConfig, GlobalState, ProxyGlobalState}, +}; /// This test simulates multiple builder states receiving messages from the channels and processing them #[tokio::test] diff --git a/marketplace-builder-core/src/testing/integration.rs b/marketplace-builder-core/src/testing/integration.rs index 1b986aa549..3cb98cd7c8 100644 --- a/marketplace-builder-core/src/testing/integration.rs +++ b/marketplace-builder-core/src/testing/integration.rs @@ -118,21 +118,20 @@ where mod tests { use std::time::Duration; - use crate::testing::integration::MarketplaceBuilderImpl; - use marketplace_builder_shared::testing::{ - generation::{self, TransactionGenerationConfig}, - run_test, - validation::BuilderValidationConfig, - }; - - use hotshot_example_types::node_types::MarketplaceTestVersions; - use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + use hotshot_example_types::node_types::{MarketplaceTestVersions, MemoryImpl, TestTypes}; use hotshot_macros::cross_tests; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::TestDescription, }; + use marketplace_builder_shared::testing::{ + generation::{self, TransactionGenerationConfig}, + run_test, + validation::BuilderValidationConfig, + }; + + use crate::testing::integration::MarketplaceBuilderImpl; #[tokio::test(flavor = "multi_thread")] #[tracing::instrument] diff --git a/marketplace-builder-core/src/testing/order_test.rs b/marketplace-builder-core/src/testing/order_test.rs index 9de0297172..b95416d6ba 100644 --- a/marketplace-builder-core/src/testing/order_test.rs +++ b/marketplace-builder-core/src/testing/order_test.rs @@ -1,5 +1,9 @@ +use std::{fmt::Debug, marker::PhantomData, sync::Arc}; + use async_broadcast::broadcast; +use hotshot::rand::{self, seq::SliceRandom, thread_rng}; use hotshot_builder_api::v0_99::data_source::{AcceptsTxnSubmits, BuilderDataSource}; +use hotshot_example_types::block_types::TestTransaction; use hotshot_types::{bundle::Bundle, traits::node_implementation::ConsensusTime}; use marketplace_builder_shared::{block::BuilderStateId, testing::consensus::SimulatedChainState}; use tracing_test::traced_test; @@ -9,12 +13,6 @@ use crate::{ service::{BuilderConfig, GlobalState, ProxyGlobalState}, }; -use std::{fmt::Debug, marker::PhantomData, sync::Arc}; - -use hotshot_example_types::block_types::TestTransaction; - -use hotshot::rand::{self, seq::SliceRandom, thread_rng}; - /// [`RoundTransactionBehavior`] is an enum that is used to represent different /// behaviors that we may want to simulate during a round. This applies to /// determining which transactions are included in the block, and how their @@ -64,12 +62,12 @@ impl RoundTransactionBehavior { ]), ); transactions - } + }, RoundTransactionBehavior::AdjustRemoveTail => { let mut transactions = transactions.clone(); transactions.pop(); transactions - } + }, RoundTransactionBehavior::ProposeInAdvance(propose_in_advance_round) => { let mut transactions = transactions.clone(); transactions.push(TestTransaction::new(vec![ @@ -77,12 +75,12 @@ impl RoundTransactionBehavior { 0_u8, ])); transactions - } + }, RoundTransactionBehavior::AdjustRemove => { let mut transactions = transactions.clone(); transactions.remove(rand::random::() % (transactions.len() - 1)); transactions - } + }, } } } diff --git a/marketplace-builder-shared/src/block.rs b/marketplace-builder-shared/src/block.rs index 1f458f2e62..b9775b2aaa 100644 --- a/marketplace-builder-shared/src/block.rs +++ b/marketplace-builder-shared/src/block.rs @@ -3,11 +3,12 @@ use std::time::Instant; use committable::{Commitment, Committable}; -use hotshot_types::data::{fake_commitment, Leaf2}; -use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ - data::VidCommitment, - traits::{block_contents::Transaction, node_implementation::NodeType}, + data::{fake_commitment, Leaf2, VidCommitment}, + traits::{ + block_contents::Transaction, + node_implementation::{ConsensusTime, NodeType}, + }, utils::BuilderCommitment, }; diff --git a/marketplace-builder-shared/src/coordinator/mod.rs b/marketplace-builder-shared/src/coordinator/mod.rs index e546aa3f23..5d8c04a480 100644 --- a/marketplace-builder-shared/src/coordinator/mod.rs +++ b/marketplace-builder-shared/src/coordinator/mod.rs @@ -195,7 +195,7 @@ where }, ); return Err(Error::TxnSender(err)); - } + }, }; self.update_txn_status(&commit, TransactionStatus::Pending); @@ -251,15 +251,15 @@ where (Either::Right(da_proposal), Either::Left(quorum_proposal)) | (Either::Left(quorum_proposal), Either::Right(da_proposal)) => { self.spawn_builder_state(quorum_proposal, da_proposal).await - } + }, _ => { unreachable!() - } + }, } - } + }, Entry::Vacant(entry) => { entry.insert(proposal); - } + }, } } @@ -499,10 +499,10 @@ where "Not changing status of rejected/sequenced transaction", ); return; - } + }, _ => { tracing::debug!(?old_status, ?new_status, "Changing status of transaction",); - } + }, } } self.tx_status.insert(*txn_hash, new_status); @@ -525,6 +525,7 @@ mod tests { use hotshot_types::data::ViewNumber; use tracing_test::traced_test; + use super::*; use crate::{ block::TransactionSource, testing::{ @@ -535,8 +536,6 @@ mod tests { }, }; - use super::*; - type BuilderStateCoordinator = super::BuilderStateCoordinator; #[tokio::test] diff --git a/marketplace-builder-shared/src/coordinator/tiered_view_map.rs b/marketplace-builder-shared/src/coordinator/tiered_view_map.rs index 6d52f3d955..385d7ec6c1 100644 --- a/marketplace-builder-shared/src/coordinator/tiered_view_map.rs +++ b/marketplace-builder-shared/src/coordinator/tiered_view_map.rs @@ -143,10 +143,10 @@ where match self.0.entry(*key.view()) { Entry::Vacant(entry) => { entry.insert(nem![key.into_subkey() => value]); - } + }, Entry::Occupied(mut entry) => { entry.get_mut().insert(key.into_subkey(), value); - } + }, } } @@ -181,14 +181,14 @@ where mod tests { use std::{cmp::Ordering, ops::Bound, sync::Arc}; - use crate::{state::BuilderState, testing::mock}; - - use super::*; use hotshot_example_types::node_types::TestTypes; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; use rand::{distributions::Standard, thread_rng, Rng}; use tracing_test::traced_test; + use super::*; + use crate::{state::BuilderState, testing::mock}; + type View = ViewNumber; type BuilderStateMap = super::TieredViewMap, Arc>>; diff --git a/marketplace-builder-shared/src/error.rs b/marketplace-builder-shared/src/error.rs index ac2d3b155e..9200c49c06 100644 --- a/marketplace-builder-shared/src/error.rs +++ b/marketplace-builder-shared/src/error.rs @@ -33,18 +33,18 @@ impl From> for BuildError { match value { Error::SignatureValidation => { BuildError::Error("Signature validation failed".to_owned()) - } + }, Error::Signing(_) => BuildError::Error("Failed to sign response".to_owned()), Error::ApiTimeout => BuildError::Error("Timeout".to_owned()), Error::NotFound => BuildError::NotFound, Error::AlreadyDecided => { BuildError::Error("Request for an already decided view".to_owned()) - } + }, Error::BuildBlock(_) => BuildError::Error("Failed to build block".to_owned()), Error::TxnSender(_) => BuildError::Error("Transaction channel error".to_owned()), Error::TxTooBig { len, max_tx_len } => { BuildError::Error(format!("Transaction too big ({len}/{max_tx_len}")) - } + }, } } } diff --git a/marketplace-builder-shared/src/state.rs b/marketplace-builder-shared/src/state.rs index 9666d17337..2ad7756fd8 100644 --- a/marketplace-builder-shared/src/state.rs +++ b/marketplace-builder-shared/src/state.rs @@ -4,10 +4,6 @@ use std::{ time::{Duration, Instant}, }; -use crate::{ - block::{BuilderStateId, ParentBlockReferences, ReceivedTransaction}, - utils::RotatingSet, -}; use async_broadcast::Receiver; use async_lock::{Mutex, RwLock}; use committable::{Commitment, Committable}; @@ -17,6 +13,11 @@ use hotshot_types::{ traits::{block_contents::BlockHeader, node_implementation::NodeType}, }; +use crate::{ + block::{BuilderStateId, ParentBlockReferences, ReceivedTransaction}, + utils::RotatingSet, +}; + #[derive(derive_more::Debug, Clone)] pub struct TransactionQueue where @@ -208,7 +209,7 @@ where self.txn_queue.write().await.insert(txn); queue_empty = false; - } + }, Err(async_broadcast::TryRecvError::Empty) | Err(async_broadcast::TryRecvError::Closed) => { @@ -216,12 +217,12 @@ where // If it's closed that's a big problem and we should // probably indicate it as such. break; - } + }, Err(async_broadcast::TryRecvError::Overflowed(lost)) => { tracing::warn!("Missed {lost} transactions due to backlog"); continue; - } + }, } } queue_empty diff --git a/marketplace-builder-shared/src/testing/consensus.rs b/marketplace-builder-shared/src/testing/consensus.rs index f4ee61cd00..13d658f8f8 100644 --- a/marketplace-builder-shared/src/testing/consensus.rs +++ b/marketplace-builder-shared/src/testing/consensus.rs @@ -2,8 +2,6 @@ use std::marker::PhantomData; -use crate::block::BuilderStateId; -use crate::testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION; use async_broadcast::Sender; use committable::Committable; use hotshot::{ @@ -17,8 +15,10 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, }; use hotshot_types::{ - data::vid_commitment, - data::{DaProposal2, EpochNumber, Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}, + data::{ + vid_commitment, DaProposal2, EpochNumber, Leaf2, QuorumProposal2, QuorumProposalWrapper, + ViewNumber, + }, message::Proposal, simple_certificate::{QuorumCertificate2, SimpleCertificate, SuccessThreshold}, simple_vote::QuorumData2, @@ -30,6 +30,8 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; use vbs::version::StaticVersionType; +use crate::{block::BuilderStateId, testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION}; + pub struct SimulatedChainState { epoch: Option, round: ViewNumber, @@ -108,7 +110,7 @@ impl SimulatedChainState { &TestInstanceState::default(), ) .await - } + }, Some(prev_proposal) => { let prev_justify_qc = &prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { @@ -124,7 +126,7 @@ impl SimulatedChainState { prev_justify_qc.signatures.clone(), PhantomData, ) - } + }, }; tracing::debug!("Iteration: {} justify_qc: {:?}", self.round, justify_qc); diff --git a/marketplace-builder-shared/src/testing/generation.rs b/marketplace-builder-shared/src/testing/generation.rs index d7667a24e8..e16293a384 100644 --- a/marketplace-builder-shared/src/testing/generation.rs +++ b/marketplace-builder-shared/src/testing/generation.rs @@ -137,7 +137,7 @@ where .map(Result::unwrap), ); } - } + }, GenerationStrategy::Random { min_per_view, max_per_view, @@ -164,7 +164,7 @@ where self.txn_nonce += 1; } - } + }, GenerationStrategy::Flood { min_tx_size, max_tx_size, @@ -188,7 +188,7 @@ where self.txn_nonce += 1; } - } + }, }; } } @@ -235,7 +235,7 @@ where .publish_transaction_async(txn) .await .expect("Failed to submit transaction to public mempool"); - } + }, SubmissionEndpoint::Private => { if let Err(e) = private_mempool_client .post::<()>("submit") @@ -248,17 +248,17 @@ where // If we can't reach the builder altogether, test should fail builder::Error::Request(request_error) => { panic!("Builder API not available: {request_error}") - } + }, // If the builder returns an error, we will re-submit this transaction // on the next view, so we return it to the queue and break error => { tracing::warn!(?error, "Builder API error"); self.txn_queue.push_front(txn); break; - } + }, }; } - } + }, } } } diff --git a/marketplace-builder-shared/src/testing/mock.rs b/marketplace-builder-shared/src/testing/mock.rs index 31d8c4fbf6..fbcb8ba176 100644 --- a/marketplace-builder-shared/src/testing/mock.rs +++ b/marketplace-builder-shared/src/testing/mock.rs @@ -2,40 +2,35 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; use async_broadcast::broadcast; -use committable::Commitment; -use committable::Committable; -use hotshot_example_types::block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}; +use committable::{Commitment, Committable}; use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, node_types::{TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; -use hotshot_types::data::DaProposal2; -use hotshot_types::data::ViewNumber; -use hotshot_types::data::{QuorumProposal2, QuorumProposalWrapper}; -use hotshot_types::event::LeafInfo; -use hotshot_types::simple_certificate::QuorumCertificate2; -use hotshot_types::simple_vote::QuorumData2; -use hotshot_types::traits::block_contents::GENESIS_VID_NUM_STORAGE_NODES; -use hotshot_types::traits::node_implementation::Versions; -use hotshot_types::traits::EncodeBytes; -use hotshot_types::vid::advz::advz_scheme; use hotshot_types::{ - data::{random_commitment, vid_commitment, Leaf, Leaf2}, + data::{ + random_commitment, vid_commitment, DaProposal2, Leaf, Leaf2, QuorumProposal2, + QuorumProposalWrapper, ViewNumber, + }, + event::LeafInfo, message::UpgradeLock, - simple_certificate::QuorumCertificate, - simple_vote::VersionedVoteData, - traits::node_implementation::{ConsensusTime, NodeType}, - traits::BlockPayload, + simple_certificate::{QuorumCertificate, QuorumCertificate2}, + simple_vote::{QuorumData2, VersionedVoteData}, + traits::{ + block_contents::GENESIS_VID_NUM_STORAGE_NODES, + node_implementation::{ConsensusTime, NodeType, Versions}, + BlockPayload, EncodeBytes, + }, utils::BuilderCommitment, + vid::advz::advz_scheme, }; use jf_vid::VidScheme; use rand::{distributions::Standard, thread_rng, Rng}; use vbs::version::StaticVersionType; -use crate::block::ParentBlockReferences; -use crate::state::BuilderState; - use super::constants::{TEST_CHANNEL_BUFFER_SIZE, TEST_NUM_NODES_IN_VID_COMPUTATION}; +use crate::{block::ParentBlockReferences, state::BuilderState}; pub fn transaction() -> TestTransaction { TestTransaction::new( diff --git a/marketplace-builder-shared/src/testing/validation.rs b/marketplace-builder-shared/src/testing/validation.rs index cc9a4b434a..c8d058f4cb 100644 --- a/marketplace-builder-shared/src/testing/validation.rs +++ b/marketplace-builder-shared/src/testing/validation.rs @@ -1,8 +1,9 @@ use std::sync::Arc; -use super::TransactionPayload; - +use anyhow::{bail, Error}; use async_lock::RwLock; +use async_trait::async_trait; +use chrono::{DateTime, Local}; use hotshot::{ traits::{BlockPayload, TestableNodeImplementation}, types::{Event, EventType}, @@ -17,9 +18,7 @@ use hotshot_types::traits::{ node_implementation::{NodeType, Versions}, }; -use anyhow::{bail, Error}; -use async_trait::async_trait; -use chrono::{DateTime, Local}; +use super::TransactionPayload; #[derive(Clone, Debug)] pub struct IncludedTransaction { diff --git a/marketplace-builder-shared/src/utils/event_service_wrapper.rs b/marketplace-builder-shared/src/utils/event_service_wrapper.rs index ece95e072d..59050f660c 100644 --- a/marketplace-builder-shared/src/utils/event_service_wrapper.rs +++ b/marketplace-builder-shared/src/utils/event_service_wrapper.rs @@ -1,16 +1,12 @@ -use std::{future::Future, pin::Pin}; - -use std::time::Duration; +use std::{future::Future, pin::Pin, time::Duration}; use anyhow::Context; use either::Either::{self, Left, Right}; -use futures::stream::unfold; -use futures::{Stream, StreamExt}; +use futures::{stream::unfold, Stream, StreamExt}; use hotshot::types::Event; use hotshot_events_service::events::Error as EventStreamError; use hotshot_types::traits::node_implementation::NodeType; -use surf_disco::client::HealthStatus; -use surf_disco::Client; +use surf_disco::{client::HealthStatus, Client}; use tokio::time::{sleep, timeout}; use tracing::{error, warn}; use url::Url; @@ -58,7 +54,7 @@ impl EventServiceStream break, Err(err) => { tracing::debug!(?err, "Healthcheck failed, retrying"); - } + }, } sleep(Self::RETRY_PERIOD).await; } @@ -90,18 +86,18 @@ impl EventServiceStream { return Some((event, this)); - } + }, Ok(Some(Err(err))) => { warn!(?err, "Error in event stream"); continue; - } + }, Ok(None) => { warn!("Event stream ended, attempting reconnection"); let fut = Self::connect_inner(this.api_url.clone()); let _ = std::mem::replace(&mut this.connection, Right(Box::pin(fut))); continue; - } + }, Err(_) => { // Timeout occurred, reconnect warn!("Timeout waiting for next event; reconnecting"); @@ -109,21 +105,21 @@ impl EventServiceStream match reconnection.await { Ok(connection) => { let _ = std::mem::replace(&mut this.connection, Left(connection)); continue; - } + }, Err(err) => { error!(?err, "Error while reconnecting, will retry in a while"); sleep(Self::RETRY_PERIOD).await; let fut = Self::connect_inner(this.api_url.clone()); let _ = std::mem::replace(&mut this.connection, Right(Box::pin(fut))); continue; - } + }, }, } } diff --git a/marketplace-builder-shared/src/utils/rotating_set.rs b/marketplace-builder-shared/src/utils/rotating_set.rs index 86ca9e3ed8..9e626db846 100644 --- a/marketplace-builder-shared/src/utils/rotating_set.rs +++ b/marketplace-builder-shared/src/utils/rotating_set.rs @@ -76,10 +76,11 @@ where #[cfg(test)] mod tests { + use std::thread::sleep; + use tracing_test::traced_test; use super::*; - use std::thread::sleep; #[test] #[traced_test] diff --git a/marketplace-builder/src/bin/marketplace-builder.rs b/marketplace-builder/src/bin/marketplace-builder.rs index c23cf6a1b3..d6aa8d16bb 100644 --- a/marketplace-builder/src/bin/marketplace-builder.rs +++ b/marketplace-builder/src/bin/marketplace-builder.rs @@ -129,11 +129,11 @@ async fn main() -> anyhow::Result<()> { match (base, upgrade) { (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { run::>(genesis, opt).await - } + }, (FeeVersion::VERSION, _) => run::>(genesis, opt).await, (MarketplaceVersion::VERSION, _) => { run::>(genesis, opt).await - } + }, _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." ), diff --git a/marketplace-builder/src/builder.rs b/marketplace-builder/src/builder.rs index cd334f9c0e..b824dbd91d 100644 --- a/marketplace-builder/src/builder.rs +++ b/marketplace-builder/src/builder.rs @@ -1,10 +1,9 @@ -use std::{arch::global_asm, collections::HashSet, num::NonZeroUsize, time::Duration}; +use std::{arch::global_asm, collections::HashSet, num::NonZeroUsize, sync::Arc, time::Duration}; use anyhow::Context; use async_broadcast::{ broadcast, Receiver as BroadcastReceiver, RecvError, Sender as BroadcastSender, TryRecvError, }; - use async_lock::RwLock; use espresso_types::{ eth_signature_key::EthKeyPair, @@ -43,7 +42,6 @@ use marketplace_builder_core::{ use marketplace_builder_shared::block::ParentBlockReferences; use marketplace_solver::SolverError; use sequencer::{catchup::StatePeers, L1Params, NetworkParams, SequencerApiVersion}; -use std::sync::Arc; use surf::http::headers::ACCEPT; use surf_disco::Client; use tide_disco::{app, method::ReadState, App, Url}; @@ -209,8 +207,7 @@ mod test { use anyhow::Error; use async_lock::RwLock; - use committable::Commitment; - use committable::Committable; + use committable::{Commitment, Committable}; use espresso_types::{ mock::MockStateCatchup, v0_99::{RollupRegistration, RollupRegistrationBody}, @@ -220,12 +217,14 @@ mod test { use ethers::{core::k256::elliptic_curve::rand_core::block, utils::Anvil}; use futures::{Stream, StreamExt}; use hooks::connect_to_solver; - use hotshot::helpers::initialize_logging; - use hotshot::types::{ - BLSPrivKey, - EventType::{Decide, *}, + use hotshot::{ + helpers::initialize_logging, + rand, + types::{ + BLSPrivKey, EventType, + EventType::{Decide, *}, + }, }; - use hotshot::{rand, types::EventType}; use hotshot_builder_api::v0_99::builder::BuildError; use hotshot_events_service::{ events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, @@ -248,15 +247,17 @@ mod test { use marketplace_solver::{testing::MockSolver, SolverError}; use portpicker::pick_unused_port; use sequencer::{ - api::test_helpers::TestNetworkConfigBuilder, + api::{ + fs::DataSource, + options::HotshotEvents, + test_helpers::{TestNetwork, TestNetworkConfigBuilder}, + Options, + }, + persistence, persistence::no_storage::{self, NoStorage}, testing::TestConfigBuilder, SequencerApiVersion, }; - use sequencer::{ - api::{fs::DataSource, options::HotshotEvents, test_helpers::TestNetwork, Options}, - persistence, - }; use sequencer_utils::test_utils::setup_test; use surf_disco::{ socket::{Connection, Unsupported}, @@ -608,7 +609,7 @@ mod test { get_bundle(builder_client, parent_view_number, parent_commitment).await, parent_view_number, ) - } + }, Mempool::Private => { submit_and_get_bundle_with_private_mempool( builder_client, @@ -616,7 +617,7 @@ mod test { urls, ) .await - } + }, }; assert_eq!(bundle.transactions, vec![registered_transaction.clone()]); @@ -728,7 +729,7 @@ mod test { get_bundle(builder_client, parent_view_number, parent_commitment).await, parent_view_number, ) - } + }, Mempool::Private => { submit_and_get_bundle_with_private_mempool( builder_client, @@ -736,7 +737,7 @@ mod test { urls, ) .await - } + }, }; assert_eq!(bundle.transactions, vec![unregistered_transaction.clone()]); diff --git a/marketplace-builder/src/hooks.rs b/marketplace-builder/src/hooks.rs index ede7452152..e96ec8ca1b 100644 --- a/marketplace-builder/src/hooks.rs +++ b/marketplace-builder/src/hooks.rs @@ -1,39 +1,21 @@ -use std::collections::HashSet; -use std::sync::Arc; -use std::time::Duration; +use std::{collections::HashSet, sync::Arc, time::Duration}; use async_lock::RwLock; use async_trait::async_trait; -use espresso_types::v0_99::BidTxBody; -use tokio::{spawn, time::sleep}; - -use espresso_types::v0_99::RollupRegistration; - -use espresso_types::MarketplaceVersion; -use espresso_types::SeqTypes; -use hotshot::types::EventType; - -use hotshot::types::Event; - -use hotshot_types::traits::node_implementation::Versions; +use espresso_types::{ + eth_signature_key::EthKeyPair, + v0_99::{BidTxBody, RollupRegistration}, + FeeAmount, MarketplaceVersion, NamespaceId, SeqTypes, +}; +use hotshot::types::{Event, EventType}; +use hotshot_types::traits::node_implementation::{NodeType, Versions}; use marketplace_builder_core::hooks::BuilderHooks; - -use espresso_types::FeeAmount; - -use espresso_types::eth_signature_key::EthKeyPair; - -use espresso_types::NamespaceId; - -use hotshot_types::traits::node_implementation::NodeType; - -use marketplace_solver::SolverError; -use marketplace_solver::SOLVER_API_PATH; +use marketplace_solver::{SolverError, SOLVER_API_PATH}; use sequencer::SequencerApiVersion; use surf_disco::Client; - use tide_disco::Url; -use tracing::error; -use tracing::info; +use tokio::{spawn, time::sleep}; +use tracing::{error, info}; /// Configurations for bid submission. pub struct BidConfig { @@ -67,11 +49,11 @@ pub async fn fetch_namespaces_to_skip(solver_base_url: Url) -> Option { error!("Failed to get the registered rollups: {:?}.", e); None - } + }, } } @@ -130,7 +112,7 @@ impl BuilderHooks for EspressoReserveHooks { Err(e) => { error!("Failed to sign the bid txn: {:?}.", e); return; - } + }, }; let solver_client = connect_to_solver(solver_base_url); @@ -172,12 +154,12 @@ impl BuilderHooks for EspressoFallbackHooks { Some(namespaces_to_skip) => { transactions.retain(|txn| !namespaces_to_skip.contains(&txn.namespace())); transactions - } + }, // Solver connection has failed and we don't have up-to-date information on this None => { error!("Not accepting transactions due to outdated information"); Vec::new() - } + }, } } diff --git a/marketplace-builder/src/lib.rs b/marketplace-builder/src/lib.rs index 18616cea5b..b58f7788d8 100755 --- a/marketplace-builder/src/lib.rs +++ b/marketplace-builder/src/lib.rs @@ -6,6 +6,7 @@ use std::{ marker::PhantomData, mem, net::{IpAddr, Ipv4Addr}, + sync::Arc, thread::Builder, }; @@ -35,16 +36,13 @@ use hotshot_builder_api::v0_99::builder::{ BuildError, Error as BuilderApiError, Options as HotshotBuilderApiOptions, }; use hotshot_orchestrator::client::{OrchestratorClient, ValidatorArgs}; -use hotshot_types::network::NetworkConfig; -use marketplace_builder_core::service::{GlobalState, ProxyGlobalState}; -use std::sync::Arc; -use tokio::{spawn, task::JoinHandle}; // Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use hotshot_types::{ consensus::ConsensusMetricsValue, event::LeafInfo, light_client::StateKeyPair, + network::NetworkConfig, signature_key::{BLSPrivKey, BLSPubKey}, traits::{ block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES}, @@ -57,6 +55,7 @@ use hotshot_types::{ }; use jf_merkle_tree::{namespaced_merkle_tree::NamespacedMerkleTreeScheme, MerkleTreeScheme}; use jf_signature::bls_over_bn254::VerKey; +use marketplace_builder_core::service::{GlobalState, ProxyGlobalState}; use sequencer::{ catchup::StatePeers, context::{Consensus, SequencerContext}, @@ -66,6 +65,7 @@ use sequencer::{ }; use surf_disco::Client; use tide_disco::{app, method::ReadState, App, Url}; +use tokio::{spawn, task::JoinHandle}; use tracing::error; use vbs::version::{StaticVersion, StaticVersionType}; diff --git a/marketplace-solver/src/api.rs b/marketplace-solver/src/api.rs index 5f0d9c8c30..11ed573a9a 100644 --- a/marketplace-solver/src/api.rs +++ b/marketplace-solver/src/api.rs @@ -164,7 +164,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/marketplace-solver/src/database.rs b/marketplace-solver/src/database.rs index c2d09f84af..06986fb6c3 100644 --- a/marketplace-solver/src/database.rs +++ b/marketplace-solver/src/database.rs @@ -60,7 +60,7 @@ impl PostgresClient { } connect_opts.to_url_lossy() - } + }, }; if let Some(max_connections) = max_connections { @@ -149,9 +149,10 @@ pub mod mock { #[cfg(all(test, not(target_os = "windows"), not(feature = "embedded-db")))] mod test { - use crate::database::mock::setup_mock_database; use hotshot::helpers::initialize_logging; + use crate::database::mock::setup_mock_database; + #[tokio::test(flavor = "multi_thread")] async fn test_database_connection() { initialize_logging(); diff --git a/marketplace-solver/src/events.rs b/marketplace-solver/src/events.rs index 913fbe3151..85a49ca1dc 100644 --- a/marketplace-solver/src/events.rs +++ b/marketplace-solver/src/events.rs @@ -59,7 +59,7 @@ pub async fn handle_events( match event.event { hotshot::types::EventType::ViewFinished { view_number } => { tracing::debug!("received view finished event {view_number:?}") - } + }, _ => (), } } @@ -69,6 +69,8 @@ pub async fn handle_events( #[cfg(any(test, feature = "testing"))] pub mod mock { + use std::{sync::Arc, time::Duration}; + use async_lock::RwLock; use espresso_types::SeqTypes; use hotshot::rand::{self}; @@ -83,7 +85,6 @@ pub mod mock { }; use portpicker::pick_unused_port; use rand::{rngs::OsRng, RngCore}; - use std::{sync::Arc, time::Duration}; use tide_disco::{App, Url}; use tokio::{spawn, task::JoinHandle, time::sleep}; use vbs::version::{StaticVersion, StaticVersionType}; @@ -184,8 +185,7 @@ pub mod mock { mod test { use espresso_types::SeqTypes; use futures::StreamExt as _; - use hotshot::helpers::initialize_logging; - use hotshot::types::Event; + use hotshot::{helpers::initialize_logging, types::Event}; use hotshot_events_service::events_source::StartupInfo; use surf_disco::Client; diff --git a/marketplace-solver/src/testing.rs b/marketplace-solver/src/testing.rs index b6745fa86f..70b392fa18 100755 --- a/marketplace-solver/src/testing.rs +++ b/marketplace-solver/src/testing.rs @@ -122,6 +122,8 @@ impl MockSolver { #[cfg(all(test, not(feature = "embedded-db")))] mod test { + use std::{str::FromStr, time::Duration}; + use committable::Committable; use espresso_types::{ v0_99::{ @@ -132,7 +134,6 @@ mod test { }; use hotshot::types::{BLSPubKey, SignatureKey}; use hotshot_types::traits::node_implementation::NodeType; - use std::{str::FromStr, time::Duration}; use tide_disco::Url; use crate::{testing::MockSolver, SolverError}; @@ -192,7 +193,7 @@ mod test { let client = surf_disco::Client::::new(solver_api); client.connect(None).await; - let (reg_ns_1, _, _) = + let (reg_ns_1, ..) = register_rollup_helper(1, Some("http://localhost"), 200, true, "test").await; // registering a rollup @@ -230,7 +231,7 @@ mod test { let client = surf_disco::Client::::new(solver_api); client.connect(None).await; - let (reg_ns_1, _, _) = + let (reg_ns_1, ..) = register_rollup_helper(1, Some("http://localhost"), 200, true, "test").await; // registering a rollup @@ -268,7 +269,7 @@ mod test { // Ensure the error indicates an invalid signature match err { SolverError::InvalidSignature(signature) - if reg_ns_2.signature.to_string() == signature => {} + if reg_ns_2.signature.to_string() == signature => {}, _ => panic!("err {err:?}"), } } @@ -375,7 +376,7 @@ mod test { .unwrap_err(); match err { - SolverError::Database(_) => {} + SolverError::Database(_) => {}, _ => panic!("err {err:?}"), } } @@ -532,7 +533,7 @@ mod test { client.connect(Some(Duration::from_secs(5))).await; // Register the first rollup (ns = 1) - let (reg_ns_1, _, _) = + let (reg_ns_1, ..) = register_rollup_helper(1, Some("http://localhost"), 200, true, "test").await; let _: RollupRegistration = client .post("register_rollup") @@ -543,7 +544,7 @@ mod test { .unwrap(); // Register the second rollup (ns = 2) - let (reg_ns_2, _, _) = + let (reg_ns_2, ..) = register_rollup_helper(2, Some("http://localhost"), 200, true, "test").await; let _: RollupRegistration = client .post("register_rollup") diff --git a/node-metrics/src/api/node_validator/v0/cdn/mod.rs b/node-metrics/src/api/node_validator/v0/cdn/mod.rs index 9359afec01..a1a5c01b8a 100644 --- a/node-metrics/src/api/node_validator/v0/cdn/mod.rs +++ b/node-metrics/src/api/node_validator/v0/cdn/mod.rs @@ -1,4 +1,3 @@ -use crate::api::node_validator::v0::create_node_validator_api::ExternalMessage; use espresso_types::{PubKey, SeqTypes}; use futures::{channel::mpsc::SendError, Sink, SinkExt}; use hotshot::{ @@ -15,6 +14,8 @@ use hotshot_types::{ use tokio::{spawn, task::JoinHandle}; use url::Url; +use crate::api::node_validator::v0::create_node_validator_api::ExternalMessage; + /// ConnectedNetworkConsumer represents a trait that splits up a portion of /// the ConnectedNetwork trait, so that the consumer only needs to be aware of /// the `wait_for_ready` and `recv_message` functions. @@ -95,7 +96,7 @@ impl CdnReceiveMessagesTask { Err(err) => { tracing::error!("error receiving message: {:?}", err); continue; - } + }, }; // We want to try and decode this message. @@ -106,17 +107,17 @@ impl CdnReceiveMessagesTask { Err(err) => { tracing::error!("error deserializing message: {:?}", err); continue; - } + }, }; let external_message_deserialize_result = match message.kind { MessageKind::External(external_message) => { bincode::deserialize::(&external_message) - } + }, _ => { tracing::error!("unexpected message kind: {:?}", message); continue; - } + }, }; let external_message = match external_message_deserialize_result { @@ -124,7 +125,7 @@ impl CdnReceiveMessagesTask { Err(err) => { tracing::error!("error deserializing message: {:?}", err); continue; - } + }, }; match external_message { @@ -137,11 +138,11 @@ impl CdnReceiveMessagesTask { tracing::error!("error sending public api url: {:?}", err); return; } - } + }, _ => { // We're not concerned about other message types - } + }, } } } @@ -237,7 +238,7 @@ impl BroadcastRollCallTask { Err(err) => { tracing::error!("error serializing rollcall request: {:?}", err); return; - } + }, }; let hotshot_message = Message:: { @@ -250,7 +251,7 @@ impl BroadcastRollCallTask { Err(err) => { tracing::error!("error serializing hotshot message: {:?}", err); return; - } + }, }; let broadcast_result = network @@ -278,31 +279,33 @@ impl Drop for BroadcastRollCallTask { #[cfg(test)] mod test { - use super::{BroadcastRollCallTask, ConnectedNetworkConsumer, ConnectedNetworkPublisher}; - use crate::api::node_validator::v0::create_node_validator_api::ExternalMessage; - use crate::api::node_validator::v0::{ - cdn::CdnReceiveMessagesTask, create_node_validator_api::RollCallInfo, - }; use core::panic; + use std::time::Duration; + use espresso_types::SeqTypes; - use futures::channel::mpsc::Sender; - use futures::SinkExt; use futures::{ - channel::mpsc::{self}, - StreamExt, + channel::mpsc::{ + Sender, {self}, + }, + SinkExt, StreamExt, }; - use hotshot::types::SignatureKey; use hotshot::{ traits::NetworkError, - types::{BLSPubKey, Message}, + types::{BLSPubKey, Message, SignatureKey}, }; - use hotshot_types::message::{DataMessage, MessageKind}; - use hotshot_types::traits::network::{BroadcastDelay, ResponseMessage}; - use std::time::Duration; - use tokio::time::error::Elapsed; - use tokio::time::{sleep, timeout}; + use hotshot_types::{ + message::{DataMessage, MessageKind}, + traits::network::{BroadcastDelay, ResponseMessage}, + }; + use tokio::time::{error::Elapsed, sleep, timeout}; use url::Url; + use super::{BroadcastRollCallTask, ConnectedNetworkConsumer, ConnectedNetworkPublisher}; + use crate::api::node_validator::v0::{ + cdn::CdnReceiveMessagesTask, + create_node_validator_api::{ExternalMessage, RollCallInfo}, + }; + /// [TestConnectedNetworkConsumer] is a test implementation of the /// [ConnectedNetworkConsumer] trait that allows for the simulation of /// network messages being received. @@ -564,7 +567,7 @@ mod test { public_key, BLSPubKey::generated_from_seed_indexed([0; 32], 0).0 ); - } + }, _ => panic!("unexpected external message"), } diff --git a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs index c60d089e62..d13b6ed950 100644 --- a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs +++ b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs @@ -1,5 +1,17 @@ use std::sync::Arc; +use async_lock::RwLock; +use espresso_types::{PubKey, SeqTypes}; +use futures::{ + channel::mpsc::{self, Receiver, SendError, Sender}, + Sink, SinkExt, Stream, StreamExt, +}; +use hotshot_query_service::Leaf2; +use hotshot_types::event::{Event, EventType}; +use serde::{Deserialize, Serialize}; +use tokio::{spawn, task::JoinHandle}; +use url::Url; + use super::{get_stake_table_from_sequencer, ProcessNodeIdentityUrlStreamTask}; use crate::service::{ client_id::ClientId, @@ -12,17 +24,6 @@ use crate::service::{ data_state::{DataState, ProcessLeafStreamTask, ProcessNodeIdentityStreamTask}, server_message::ServerMessage, }; -use async_lock::RwLock; -use espresso_types::{PubKey, SeqTypes}; -use futures::{ - channel::mpsc::{self, Receiver, SendError, Sender}, - Sink, SinkExt, Stream, StreamExt, -}; -use hotshot_query_service::Leaf2; -use hotshot_types::event::{Event, EventType}; -use serde::{Deserialize, Serialize}; -use tokio::{spawn, task::JoinHandle}; -use url::Url; pub struct NodeValidatorAPI { pub process_internal_client_message_handle: Option, @@ -119,7 +120,7 @@ impl HotShotEventProcessingTask { None => { tracing::info!("event stream closed"); break; - } + }, }; let Event { event, .. } = event; @@ -135,7 +136,7 @@ impl HotShotEventProcessingTask { panic!("HotShotEventProcessingTask leaf sender is closed, unrecoverable, the block state will stagnate."); } } - } + }, EventType::ExternalMessageReceived { data, .. } => { let roll_call_info = match bincode::deserialize(&data) { @@ -147,12 +148,12 @@ impl HotShotEventProcessingTask { err ); continue; - } + }, _ => { // Ignore any other potentially recognized messages continue; - } + }, }; let public_api_url = roll_call_info.public_api_url; @@ -163,11 +164,11 @@ impl HotShotEventProcessingTask { tracing::error!("url sender closed: {}", err); panic!("HotShotEventProcessingTask url sender is closed, unrecoverable, the node state will stagnate."); } - } + }, _ => { // Ignore all other events continue; - } + }, } } } @@ -236,7 +237,7 @@ impl ProcessExternalMessageHandlingTask { None => { tracing::error!("external message receiver closed"); break; - } + }, }; match external_message { @@ -248,12 +249,12 @@ impl ProcessExternalMessageHandlingTask { tracing::error!("url sender closed: {}", err); break; } - } + }, _ => { // Ignore all other messages continue; - } + }, } } } @@ -368,6 +369,10 @@ pub async fn create_node_validator_processing( #[cfg(test)] mod test { + use futures::channel::mpsc::{self, Sender}; + use tide_disco::App; + use tokio::spawn; + use crate::{ api::node_validator::v0::{ HotshotQueryServiceLeafStreamRetriever, ProcessProduceLeafStreamTask, @@ -375,9 +380,6 @@ mod test { }, service::{client_message::InternalClientMessage, server_message::ServerMessage}, }; - use futures::channel::mpsc::{self, Sender}; - use tide_disco::App; - use tokio::spawn; struct TestState(Sender>>); @@ -399,14 +401,14 @@ mod test { Ok(node_validator_api) => node_validator_api, Err(err) => { panic!("error defining node validator api: {:?}", err); - } + }, }; match app.register_module("node-validator", node_validator_api) { - Ok(_) => {} + Ok(_) => {}, Err(err) => { panic!("error registering node validator api: {:?}", err); - } + }, } let (leaf_sender, leaf_receiver) = mpsc::channel(10); @@ -449,7 +451,7 @@ mod test { Err(err) => { panic!("error defining node validator api: {:?}", err); - } + }, }; // We would like to wait until being signaled diff --git a/node-metrics/src/api/node_validator/v0/mod.rs b/node-metrics/src/api/node_validator/v0/mod.rs index b364c105ee..c152a8e81b 100644 --- a/node-metrics/src/api/node_validator/v0/mod.rs +++ b/node-metrics/src/api/node_validator/v0/mod.rs @@ -1,38 +1,35 @@ pub mod cdn; pub mod create_node_validator_api; -use crate::service::client_message::{ClientMessage, InternalClientMessage}; -use crate::service::data_state::{LocationDetails, NodeIdentity}; -use crate::service::server_message::ServerMessage; +use std::{fmt, future::Future, io::BufRead, pin::Pin, str::FromStr, time::Duration}; + use espresso_types::{BackoffParams, SeqTypes}; -use futures::channel::mpsc::SendError; -use futures::future::Either; use futures::{ - channel::mpsc::{self, Sender}, + channel::mpsc::{self, SendError, Sender}, + future::Either, FutureExt, Sink, SinkExt, Stream, StreamExt, }; use hotshot_query_service::Leaf2; use hotshot_stake_table::vec_based::StakeTable; -use hotshot_types::light_client::{CircuitField, StateVerKey}; -use hotshot_types::signature_key::BLSPubKey; -use hotshot_types::traits::{signature_key::StakeTableEntryType, stake_table::StakeTableScheme}; -use hotshot_types::PeerConfig; +use hotshot_types::{ + light_client::{CircuitField, StateVerKey}, + signature_key::BLSPubKey, + traits::{signature_key::StakeTableEntryType, stake_table::StakeTableScheme}, + PeerConfig, +}; use prometheus_parse::{Sample, Scrape}; use serde::{Deserialize, Serialize}; -use std::fmt; -use std::future::Future; -use std::io::BufRead; -use std::pin::Pin; -use std::str::FromStr; -use std::time::Duration; -use tide_disco::socket::Connection; -use tide_disco::{api::ApiError, Api}; -use tokio::spawn; -use tokio::task::JoinHandle; -use tokio::time::sleep; +use tide_disco::{api::ApiError, socket::Connection, Api}; +use tokio::{spawn, task::JoinHandle, time::sleep}; use url::Url; use vbs::version::{StaticVersion, StaticVersionType, Version}; +use crate::service::{ + client_message::{ClientMessage, InternalClientMessage}, + data_state::{LocationDetails, NodeIdentity}, + server_message::ServerMessage, +}; + /// CONSTANT for protocol major version pub const VERSION_MAJ: u16 = 0; @@ -64,11 +61,11 @@ impl fmt::Display for Error { match self { Self::UnhandledSurfDisco(status, msg) => { write!(f, "Unhandled Surf Disco Error: {} - {}", status, msg) - } + }, Self::UnhandledTideDisco(status, msg) => { write!(f, "Unhandled Tide Disco Error: {} - {}", status, msg) - } + }, } } } @@ -255,7 +252,7 @@ where // let's queue up the next client message to receive next_client_message = socket_stream.next(); next_server_message = remaining_server_message; - } + }, Either::Right((server_message, remaining_client_message)) => { // Alright, we have a server message, we want to forward it // to the down-stream client. @@ -277,7 +274,7 @@ where // let's queue up the next server message to receive next_server_message = server_message_receiver.next(); next_client_message = remaining_client_message; - } + }, } } @@ -327,7 +324,7 @@ pub async fn get_stake_table_from_sequencer( Err(err) => { tracing::info!("retrieve stake table request failed: {}", err); return Err(err); - } + }, }; let public_hot_shot_config = sequencer_config.config; @@ -481,7 +478,7 @@ impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { Err(err) => { tracing::info!("retrieve block height request failed: {}", err); return Err(err); - } + }, }; let latest_block_start = block_height.saturating_sub(50); @@ -504,7 +501,7 @@ impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { Err(err) => { tracing::info!("retrieve leaves stream failed: {}", err); return Err(err); - } + }, }; Ok(leaves_stream) @@ -621,7 +618,7 @@ impl ProcessProduceLeafStreamTask { delay = backoff_params.backoff(delay); sleep(delay).await; continue; - } + }, Ok(leaves_stream) => leaves_stream, }; @@ -795,7 +792,7 @@ pub fn populate_node_identity_from_scrape(node_identity: &mut NodeIdentity, scra // We couldn't parse the public key, so we can't create a NodeIdentity. tracing::info!("parsing public key failed: {}", err); return; - } + }, } } else { // We were unable to find the public key in the scrape result. @@ -878,7 +875,7 @@ pub fn node_identity_from_scrape(scrape: Scrape) -> Option { Err(err) => { tracing::info!("parsing public key failed: {}", err); return None; - } + }, }; let mut node_identity = NodeIdentity::from_public_key(public_key); @@ -937,7 +934,7 @@ impl ProcessNodeIdentityUrlStreamTask { None => { tracing::info!("node identity url stream closed"); return; - } + }, }; // Alright we have a new Url to try and scrape for a Node Identity. @@ -949,7 +946,7 @@ impl ProcessNodeIdentityUrlStreamTask { Err(err) => { tracing::warn!("get node identity from url failed. bad base url?: {}", err); continue; - } + }, }; let send_result = node_identity_sender.send(node_identity).await; diff --git a/node-metrics/src/lib.rs b/node-metrics/src/lib.rs index cc1d7c8e90..024e29c0a5 100644 --- a/node-metrics/src/lib.rs +++ b/node-metrics/src/lib.rs @@ -99,15 +99,6 @@ pub mod api; pub mod service; -use crate::{ - api::node_validator::v0::{ - cdn::{BroadcastRollCallTask, CdnReceiveMessagesTask}, - create_node_validator_api::{create_node_validator_processing, NodeValidatorConfig}, - HotshotQueryServiceLeafStreamRetriever, ProcessProduceLeafStreamTask, - StateClientMessageSender, STATIC_VER_0_1, - }, - service::{client_message::InternalClientMessage, server_message::ServerMessage}, -}; use clap::Parser; use espresso_types::{PubKey, SeqTypes}; use futures::channel::mpsc::{self, Sender}; @@ -120,6 +111,16 @@ use tide_disco::App; use tokio::spawn; use url::Url; +use crate::{ + api::node_validator::v0::{ + cdn::{BroadcastRollCallTask, CdnReceiveMessagesTask}, + create_node_validator_api::{create_node_validator_processing, NodeValidatorConfig}, + HotshotQueryServiceLeafStreamRetriever, ProcessProduceLeafStreamTask, + StateClientMessageSender, STATIC_VER_0_1, + }, + service::{client_message::InternalClientMessage, server_message::ServerMessage}, +}; + /// Options represents the configuration options that are available for running /// the node validator service via the [run_standalone_service] function. /// These options are configurable via command line arguments or environment @@ -233,10 +234,10 @@ pub async fn run_standalone_service(options: Options) { api::node_validator::v0::define_api().expect("error defining node validator api"); match app.register_module("node-validator", node_validator_api) { - Ok(_) => {} + Ok(_) => {}, Err(err) => { panic!("error registering node validator api: {:?}", err); - } + }, } let (leaf_sender, leaf_receiver) = mpsc::channel(10); @@ -260,7 +261,7 @@ pub async fn run_standalone_service(options: Options) { Err(err) => { panic!("error defining node validator api: {:?}", err); - } + }, }; let _cdn_tasks = if let Some(cdn_broker_url_string) = options.cdn_marshal_endpoint() { @@ -278,7 +279,7 @@ pub async fn run_standalone_service(options: Options) { Ok(cdn_network) => cdn_network, Err(err) => { panic!("error creating cdn network: {:?}", err); - } + }, }; let url_sender = node_validator_task_state.url_sender.clone(); diff --git a/node-metrics/src/service/client_id/mod.rs b/node-metrics/src/service/client_id/mod.rs index 11353b6e5d..65213982eb 100644 --- a/node-metrics/src/service/client_id/mod.rs +++ b/node-metrics/src/service/client_id/mod.rs @@ -1,6 +1,7 @@ -use serde::{Deserialize, Serialize}; use std::ops::{Add, AddAssign}; +use serde::{Deserialize, Serialize}; + /// [ClientId] represents the unique identifier for a client that is connected /// to the server. /// @@ -108,8 +109,10 @@ mod tests { #[test] fn test_hash() { - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; + use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + }; let hash_1 = { let client_id = ClientId::from_count(1); diff --git a/node-metrics/src/service/client_message/mod.rs b/node-metrics/src/service/client_message/mod.rs index d19881430f..e4eba18b0d 100644 --- a/node-metrics/src/service/client_message/mod.rs +++ b/node-metrics/src/service/client_message/mod.rs @@ -1,6 +1,7 @@ -use super::client_id::ClientId; use serde::{Deserialize, Serialize}; +use super::client_id::ClientId; + /// [ClientMessage] represents the messages that the client can send to the /// server for a request. /// @@ -38,12 +39,13 @@ impl ClientMessage { #[cfg(test)] mod tests { - use super::InternalClientMessage; - use super::*; - use crate::service::server_message::ServerMessage; - use futures::channel::mpsc::Sender; use std::iter::zip; + use futures::channel::mpsc::Sender; + + use super::{InternalClientMessage, *}; + use crate::service::server_message::ServerMessage; + impl PartialEq for InternalClientMessage { fn eq(&self, other: &Self) -> bool { match (self, other) { @@ -141,7 +143,7 @@ mod tests { match internal_client_message { InternalClientMessage::Request(id, _) => { assert_eq!(id, client_id); - } + }, _ => panic!("Unexpected InternalClientMessage"), } } diff --git a/node-metrics/src/service/client_state/mod.rs b/node-metrics/src/service/client_state/mod.rs index 1ca1415d57..7bc798db33 100644 --- a/node-metrics/src/service/client_state/mod.rs +++ b/node-metrics/src/service/client_state/mod.rs @@ -1,20 +1,21 @@ -use super::{ - client_id::ClientId, - client_message::{ClientMessage, InternalClientMessage}, - data_state::{DataState, NodeIdentity}, - server_message::ServerMessage, +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, }; + +use async_lock::{RwLock, RwLockWriteGuard}; use bitvec::vec::BitVec; use espresso_types::SeqTypes; use futures::{channel::mpsc::SendError, Sink, SinkExt, Stream, StreamExt}; use hotshot_query_service::explorer::{BlockDetail, ExplorerHistograms}; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; use tokio::{spawn, task::JoinHandle}; -use async_lock::{RwLock, RwLockWriteGuard}; +use super::{ + client_id::ClientId, + client_message::{ClientMessage, InternalClientMessage}, + data_state::{DataState, NodeIdentity}, + server_message::ServerMessage, +}; /// ClientState represents the service state of the connected clients. /// It maintains and represents the connected clients, and their subscriptions. @@ -112,7 +113,7 @@ impl std::fmt::Display for HandleConnectedError { match self { HandleConnectedError::ClientSendError(err) => { write!(f, "handle connected error: client send error: {}", err) - } + }, } } } @@ -235,7 +236,7 @@ impl std::fmt::Display for HandleRequestBlocksSnapshotsError { "handle request blocks snapshot error: client send error:: {}", err ) - } + }, } } } @@ -306,7 +307,7 @@ impl std::fmt::Display for HandleRequestNodeIdentitySnapshotError { "handle request node identity snapshot error: client send error: {}", err ) - } + }, } } } @@ -374,7 +375,7 @@ impl std::fmt::Display for HandleRequestHistogramSnapshotError { "handle request histogram snapshot error: client send error: {}", err ) - } + }, } } } @@ -461,7 +462,7 @@ impl std::fmt::Display for HandleRequestVotersSnapshotError { "handle request voters snapshot error: client send error: {}", err ) - } + }, } } } @@ -557,27 +558,27 @@ impl std::fmt::Display for ProcessClientMessageError { match self { ProcessClientMessageError::Connected(err) => { write!(f, "process client message error: connected: {}", err) - } + }, ProcessClientMessageError::BlocksSnapshot(err) => { write!(f, "process client message error: blocks snapshot: {}", err) - } + }, ProcessClientMessageError::NodeIdentitySnapshot(err) => { write!( f, "process client message error: node identity snapshot: {}", err ) - } + }, ProcessClientMessageError::HistogramSnapshot(err) => { write!( f, "process client message error: histogram snapshot: {}", err ) - } + }, ProcessClientMessageError::VotersSnapshot(err) => { write!(f, "process client message error: voters snapshot: {}", err) - } + }, } } } @@ -615,27 +616,27 @@ where InternalClientMessage::Connected(sender) => { handle_client_message_connected(sender, client_thread_state).await?; Ok(()) - } + }, InternalClientMessage::Disconnected(client_id) => { handle_client_message_disconnected(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::SubscribeLatestBlock) => { handle_client_message_subscribe_latest_block(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::SubscribeNodeIdentity) => { handle_client_message_subscribe_node_identity(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::SubscribeVoters) => { handle_client_message_subscribe_voters(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestBlocksSnapshot) => { handle_client_message_request_blocks_snapshot( @@ -645,7 +646,7 @@ where ) .await?; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestNodeIdentitySnapshot) => { handle_client_message_request_node_identity_snapshot( @@ -655,7 +656,7 @@ where ) .await?; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestHistogramSnapshot) => { handle_client_message_request_histogram_snapshot( @@ -665,7 +666,7 @@ where ) .await?; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestVotersSnapshot) => { handle_client_message_request_voters_snapshot( @@ -675,7 +676,7 @@ where ) .await?; Ok(()) - } + }, } } @@ -1180,6 +1181,22 @@ impl Drop for ProcessDistributeVotersHandlingTask { #[cfg(test)] pub mod tests { + use std::{sync::Arc, time::Duration}; + + use async_lock::RwLock; + use bitvec::vec::BitVec; + use espresso_types::{Leaf2, NodeState, ValidatedState}; + use futures::{ + channel::mpsc::{self, Sender}, + SinkExt, StreamExt, + }; + use hotshot_example_types::node_types::TestVersions; + use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; + use tokio::{ + spawn, + time::{sleep, timeout}, + }; + use super::{ClientThreadState, InternalClientMessageProcessingTask}; use crate::service::{ client_id::ClientId, @@ -1194,20 +1211,6 @@ pub mod tests { }, server_message::ServerMessage, }; - use async_lock::RwLock; - use bitvec::vec::BitVec; - use espresso_types::{Leaf2, NodeState, ValidatedState}; - use futures::{ - channel::mpsc::{self, Sender}, - SinkExt, StreamExt, - }; - use hotshot_example_types::node_types::TestVersions; - use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; - use std::{sync::Arc, time::Duration}; - use tokio::{ - spawn, - time::{sleep, timeout}, - }; pub fn create_test_client_thread_state() -> ClientThreadState> { ClientThreadState { diff --git a/node-metrics/src/service/data_state/mod.rs b/node-metrics/src/service/data_state/mod.rs index ae820f4e5d..82309f129b 100644 --- a/node-metrics/src/service/data_state/mod.rs +++ b/node-metrics/src/service/data_state/mod.rs @@ -1,6 +1,8 @@ pub mod location_details; pub mod node_identity; +use std::{collections::HashSet, iter::zip, sync::Arc}; + use async_lock::RwLock; use bitvec::vec::BitVec; use circular_buffer::CircularBuffer; @@ -23,7 +25,6 @@ use hotshot_types::{ }; pub use location_details::LocationDetails; pub use node_identity::NodeIdentity; -use std::{collections::HashSet, iter::zip, sync::Arc}; use time::OffsetDateTime; use tokio::{spawn, task::JoinHandle}; @@ -52,7 +53,7 @@ impl DataState { let stake_table_iter_result = stake_table.try_iter(SnapshotVersion::Head); match stake_table_iter_result { Ok(into_iter) => into_iter - .map(|(key, _, _)| NodeIdentity::from_public_key(key)) + .map(|(key, ..)| NodeIdentity::from_public_key(key)) .collect(), Err(_) => vec![], } @@ -106,10 +107,10 @@ impl DataState { }; let missing_node_identity_entries = - stake_table_iter.filter(|(key, _, _)| !current_identity_set.contains(key)); + stake_table_iter.filter(|(key, ..)| !current_identity_set.contains(key)); self.node_identity.extend( - missing_node_identity_entries.map(|(key, _, _)| NodeIdentity::from_public_key(key)), + missing_node_identity_entries.map(|(key, ..)| NodeIdentity::from_public_key(key)), ); } @@ -200,10 +201,10 @@ impl std::fmt::Display for ProcessLeafError { match self { ProcessLeafError::BlockSendError(err) => { write!(f, "error sending block detail to sender: {}", err) - } + }, ProcessLeafError::VotersSendError(err) => { write!(f, "error sending voters to sender: {}", err) - } + }, } } } @@ -283,7 +284,7 @@ where // In this case, we just want to determine who voted for this // Leaf. - let (key, _, _): (BLSPubKey, _, _) = entry; + let (key, ..): (BLSPubKey, _, _) = entry; key }); @@ -396,10 +397,10 @@ impl ProcessLeafStreamTask { match err { ProcessLeafError::BlockSendError(_) => { panic!("ProcessLeafStreamTask: process_incoming_leaf failed, underlying sink is closed, blocks will stagnate: {}", err) - } + }, ProcessLeafError::VotersSendError(_) => { panic!("ProcessLeafStreamTask: process_incoming_leaf failed, underlying sink is closed, voters will stagnate: {}", err) - } + }, } } } @@ -429,7 +430,7 @@ impl std::fmt::Display for ProcessNodeIdentityError { match self { ProcessNodeIdentityError::SendError(err) => { write!(f, "error sending node identity to sender: {}", err) - } + }, } } } @@ -563,22 +564,23 @@ impl Drop for ProcessNodeIdentityStreamTask { #[cfg(test)] mod tests { - use super::{DataState, ProcessLeafStreamTask}; - use crate::service::data_state::{ - LocationDetails, NodeIdentity, ProcessNodeIdentityStreamTask, - }; + use std::{sync::Arc, time::Duration}; + use async_lock::RwLock; use espresso_types::{ v0_99::ChainConfig, BlockMerkleTree, FeeMerkleTree, Leaf2, NodeState, ValidatedState, }; use futures::{channel::mpsc, SinkExt, StreamExt}; - use hotshot_example_types::node_types::TestVersions; use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; - use std::{sync::Arc, time::Duration}; use tokio::time::timeout; use url::Url; + use super::{DataState, ProcessLeafStreamTask}; + use crate::service::data_state::{ + LocationDetails, NodeIdentity, ProcessNodeIdentityStreamTask, + }; + #[tokio::test(flavor = "multi_thread")] async fn test_process_leaf_error_debug() { let (mut sender, receiver) = mpsc::channel(1); diff --git a/node-metrics/src/service/data_state/node_identity.rs b/node-metrics/src/service/data_state/node_identity.rs index 8396a81340..96bdc6ab5a 100644 --- a/node-metrics/src/service/data_state/node_identity.rs +++ b/node-metrics/src/service/data_state/node_identity.rs @@ -1,8 +1,9 @@ -use super::LocationDetails; use hotshot_types::signature_key::BLSPubKey; use serde::{Deserialize, Serialize}; use surf_disco::Url; +use super::LocationDetails; + /// [NodeIdentity] represents the identity of the node that is participating /// in the network. #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] @@ -126,10 +127,9 @@ impl NodeIdentity { #[cfg(test)] pub mod tests { - use super::LocationDetails; - use super::NodeIdentity; - use hotshot_types::signature_key::BLSPubKey; - use hotshot_types::traits::signature_key::SignatureKey; + use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; + + use super::{LocationDetails, NodeIdentity}; pub fn create_test_node(index: u64) -> NodeIdentity { let (pub_key, _) = BLSPubKey::generated_from_seed_indexed([0; 32], index); diff --git a/node-metrics/src/service/server_message/mod.rs b/node-metrics/src/service/server_message/mod.rs index 0620de3ecb..da8b51c0fd 100644 --- a/node-metrics/src/service/server_message/mod.rs +++ b/node-metrics/src/service/server_message/mod.rs @@ -1,11 +1,12 @@ use std::sync::Arc; -use super::{client_id::ClientId, data_state::NodeIdentity}; use bitvec::vec::BitVec; use espresso_types::SeqTypes; use hotshot_query_service::explorer::{BlockDetail, ExplorerHistograms}; use serde::{Deserialize, Serialize}; +use super::{client_id::ClientId, data_state::NodeIdentity}; + /// [ServerMessage] represents the messages that the server can send to the /// client for a response. #[derive(Debug, Serialize, Deserialize)] diff --git a/request-response/src/lib.rs b/request-response/src/lib.rs index d2cd58221b..211261cf94 100644 --- a/request-response/src/lib.rs +++ b/request-response/src/lib.rs @@ -406,24 +406,24 @@ impl< Err(e) => { warn!("Received invalid message: {e}"); continue; - } + }, }; // Handle the message based on its type match message { Message::Request(request_message) => { self.handle_request(request_message, &mut outgoing_responses); - } + }, Message::Response(response_message) => { self.handle_response(response_message, &mut incoming_responses); - } + }, } - } + }, // An error here means the receiver will _NEVER_ receive any more messages Err(e) => { error!("Request/response receive task exited: {e}"); return; - } + }, } } } diff --git a/request-response/src/message.rs b/request-response/src/message.rs index 622be26f22..a704e1c454 100644 --- a/request-response/src/message.rs +++ b/request-response/src/message.rs @@ -140,14 +140,14 @@ impl Serializable for Message { // Write the request content bytes.extend_from_slice(request_message.to_bytes()?.as_slice()); - } + }, Message::Response(response_message) => { // Write the type (response) bytes.push(1); // Write the response content bytes.extend_from_slice(response_message.to_bytes()?.as_slice()); - } + }, }; Ok(bytes) @@ -168,13 +168,13 @@ impl Serializable for Message { Ok(Message::Request(RequestMessage::from_bytes(&read_to_end( &mut bytes, )?)?)) - } + }, 1 => { // Read the `ResponseMessage` Ok(Message::Response(ResponseMessage::from_bytes( &read_to_end(&mut bytes)?, )?)) - } + }, _ => Err(anyhow::anyhow!("invalid message type")), } } @@ -353,7 +353,7 @@ mod tests { // It should not be valid anymore (false, Duration::from_secs(1)) - } + }, 2 => { // Alter the timestamp @@ -361,13 +361,13 @@ mod tests { // It should not be valid anymore (false, Duration::from_secs(1)) - } + }, 3 => { // Change the request ttl to be 0. This should make the request // invalid immediately (true, Duration::from_secs(0)) - } + }, _ => unreachable!(), }; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 6dc822914a..0b80460114 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] channel = "stable" -components = ["rustfmt", "llvm-tools-preview", "rust-src", "clippy"] +components = ["llvm-tools-preview", "rust-src", "clippy"] profile = "minimal" diff --git a/vid/rustfmt.toml b/rustfmt.toml similarity index 73% rename from vid/rustfmt.toml rename to rustfmt.toml index b288fc82e4..e4468267ac 100644 --- a/vid/rustfmt.toml +++ b/rustfmt.toml @@ -1,9 +1,8 @@ reorder_imports = true -wrap_comments = true -normalize_comments = true use_try_shorthand = true match_block_trailing_comma = true use_field_init_shorthand = true -edition = "2018" +edition = "2021" condense_wildcard_suffixes = true imports_granularity = "Crate" +group_imports = "StdExternalCrate" \ No newline at end of file diff --git a/sequencer/api/migrations/postgres/V502__epoch_drb_and_root.sql b/sequencer/api/migrations/postgres/V502__epoch_drb_and_root.sql new file mode 100644 index 0000000000..6079c41482 --- /dev/null +++ b/sequencer/api/migrations/postgres/V502__epoch_drb_and_root.sql @@ -0,0 +1,5 @@ +CREATE TABLE epoch_drb_and_root ( + epoch BIGINT PRIMARY KEY, + drb_result BYTEA, + block_header BYTEA +); \ No newline at end of file diff --git a/sequencer/api/migrations/sqlite/V302__epoch_drb_and_root.sql b/sequencer/api/migrations/sqlite/V302__epoch_drb_and_root.sql new file mode 100644 index 0000000000..09ab017e37 --- /dev/null +++ b/sequencer/api/migrations/sqlite/V302__epoch_drb_and_root.sql @@ -0,0 +1,5 @@ +CREATE TABLE epoch_drb_and_root ( + epoch BIGINT PRIMARY KEY, + drb_result BLOB, + block_header BLOB +); \ No newline at end of file diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 3a8ebff8da..0433e9f03e 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1,3 +1,5 @@ +use std::{pin::Pin, sync::Arc}; + use anyhow::{bail, Context}; use async_lock::RwLock; use async_once_cell::Lazy; @@ -18,7 +20,6 @@ use hotshot_events_service::events_source::{ EventFilterSet, EventsSource, EventsStreamer, StartupInfo, }; use hotshot_query_service::data_source::ExtensibleDataSource; -use hotshot_types::vote::HasViewNumber; use hotshot_types::{ data::ViewNumber, event::Event, @@ -30,12 +31,11 @@ use hotshot_types::{ ValidatedState as _, }, utils::{View, ViewInner}, + vote::HasViewNumber, PeerConfig, }; use itertools::Itertools; use jf_merkle_tree::MerkleTreeScheme; -use std::pin::Pin; -use std::sync::Arc; use self::data_source::{HotShotConfigDataSource, NodeStateDataSource, StateSignatureDataSource}; use crate::{ @@ -284,7 +284,7 @@ impl< Ok(accounts) => return Ok(accounts), Err(err) => { tracing::info!("accounts not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -321,7 +321,7 @@ impl< } (Arc::new(state), delta.clone()) - } + }, _ => { // If we don't already have a leaf for this view, or if we don't have the view // at all, we can create a new view based on the recovered leaf and add it to @@ -330,7 +330,7 @@ impl< let mut state = ValidatedState::from_header(leaf.block_header()); state.fee_merkle_tree = tree.clone(); (Arc::new(state), None) - } + }, }; if let Err(err) = consensus.update_leaf(leaf, Arc::clone(&state), delta) { tracing::warn!(?view, "cannot update fetched account state: {err:#}"); @@ -352,7 +352,7 @@ impl< Ok(frontier) => return Ok(frontier), Err(err) => { tracing::info!("frontier is not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -368,7 +368,7 @@ impl< Ok(cf) => return Ok(cf), Err(err) => { tracing::info!("chain config is not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -380,7 +380,7 @@ impl< Ok(cf) => return Ok(cf), Err(err) => { tracing::info!("chain config is not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -557,17 +557,12 @@ impl, V: Versions, P: SequencerPersistence> StateSig #[cfg(any(test, feature = "testing"))] pub mod test_helpers { - use committable::Committable; - use hotshot_state_prover::service::light_client_genesis_from_stake_table; use std::time::Duration; - use tempfile::TempDir; - use tokio::{spawn, time::sleep}; - use crate::network; - use espresso_types::MockSequencerVersions; + use committable::Committable; use espresso_types::{ v0::traits::{NullEventConsumer, PersistenceOptions, StateCatchup}, - MarketplaceVersion, NamespaceId, ValidatedState, + MarketplaceVersion, MockSequencerVersions, NamespaceId, ValidatedState, }; use ethers::{prelude::Address, utils::Anvil}; use futures::{ @@ -576,6 +571,7 @@ pub mod test_helpers { }; use hotshot::types::{Event, EventType}; use hotshot_contract_adapter::light_client::{ParsedLightClientState, ParsedStakeTableState}; + use hotshot_state_prover::service::light_client_genesis_from_stake_table; use hotshot_types::{ event::LeafInfo, traits::{metrics::NoMetrics, node_implementation::ConsensusTime}, @@ -585,15 +581,16 @@ pub mod test_helpers { use portpicker::pick_unused_port; use sequencer_utils::test_utils::setup_test; use surf_disco::Client; - use tide_disco::error::ServerError; - use tide_disco::{Api, App, Error, StatusCode}; - use tokio::task::JoinHandle; + use tempfile::TempDir; + use tide_disco::{error::ServerError, Api, App, Error, StatusCode}; + use tokio::{spawn, task::JoinHandle, time::sleep}; use url::Url; use vbs::version::{StaticVersion, StaticVersionType}; use super::*; use crate::{ catchup::NullStateCatchup, + network, persistence::no_storage, testing::{ run_marketplace_builder, run_test_builder, wait_for_decide_on_handle, TestConfig, @@ -1157,13 +1154,14 @@ pub mod test_helpers { #[cfg(test)] #[espresso_macros::generic_tests] mod api_tests { + use std::fmt::Debug; + use committable::Committable; use data_source::testing::TestableSequencerDataSource; use endpoints::NamespaceProofQueryData; - use espresso_types::MockSequencerVersions; use espresso_types::{ traits::{EventConsumer, PersistenceOptions}, - Header, Leaf2, NamespaceId, + Header, Leaf2, MockSequencerVersions, NamespaceId, }; use ethers::utils::Anvil; use futures::{future, stream::StreamExt}; @@ -1171,23 +1169,19 @@ mod api_tests { use hotshot_query_service::availability::{ AvailabilityDataSource, BlockQueryData, VidCommonQueryData, }; - - use hotshot_types::data::ns_table::parse_ns_table; - use hotshot_types::data::vid_disperse::VidDisperseShare2; - use hotshot_types::data::{DaProposal2, EpochNumber, VidCommitment}; - use hotshot_types::simple_certificate::QuorumCertificate2; - - use hotshot_types::vid::avidm::{init_avidm_param, AvidMScheme}; use hotshot_types::{ - data::{QuorumProposal2, QuorumProposalWrapper}, + data::{ + ns_table::parse_ns_table, vid_disperse::VidDisperseShare2, DaProposal2, EpochNumber, + QuorumProposal2, QuorumProposalWrapper, VidCommitment, + }, event::LeafInfo, message::Proposal, + simple_certificate::QuorumCertificate2, traits::{node_implementation::ConsensusTime, signature_key::SignatureKey, EncodeBytes}, + vid::avidm::{init_avidm_param, AvidMScheme}, }; - use portpicker::pick_unused_port; use sequencer_utils::test_utils::setup_test; - use std::fmt::Debug; use surf_disco::Client; use test_helpers::{ catchup_test_helper, state_signature_test_helper, status_test_helper, submit_test_helper, @@ -1197,8 +1191,8 @@ mod api_tests { use vbs::version::StaticVersion; use super::{update::ApiEventConsumer, *}; - use crate::network; use crate::{ + network, persistence::no_storage::NoStorage, testing::{wait_for_decide_on_handle, TestConfigBuilder}, }; @@ -1628,10 +1622,9 @@ mod api_tests { #[cfg(test)] mod test { - use committable::{Commitment, Committable}; use std::{collections::BTreeMap, time::Duration}; - use tokio::time::sleep; + use committable::{Commitment, Committable}; use espresso_types::{ config::PublicHotShotConfig, traits::NullEventConsumer, @@ -1665,6 +1658,7 @@ mod test { }; use tide_disco::{app::AppHealth, error::ServerError, healthcheck::HealthStatus}; use time::OffsetDateTime; + use tokio::time::sleep; use vbs::version::{StaticVersion, StaticVersionType, Version}; use self::{ @@ -2479,7 +2473,7 @@ mod test { let new_version = upgrade.new_version; assert_eq!(new_version, ::Upgrade::VERSION); break upgrade.new_version_first_view; - } + }, _ => continue, } }; diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index daa3518b40..a88ad528be 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -15,11 +15,13 @@ use hotshot_query_service::{ node::NodeDataSource, status::StatusDataSource, }; -use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ data::ViewNumber, light_client::StateSignatureRequestBody, - traits::{network::ConnectedNetwork, node_implementation::Versions}, + traits::{ + network::ConnectedNetwork, + node_implementation::{NodeType, Versions}, + }, PeerConfig, }; use tide_disco::Url; diff --git a/sequencer/src/api/endpoints.rs b/sequencer/src/api/endpoints.rs index 622d50a229..cfa4a0c440 100644 --- a/sequencer/src/api/endpoints.rs +++ b/sequencer/src/api/endpoints.rs @@ -13,11 +13,12 @@ use hotshot_query_service::{ availability::{self, AvailabilityDataSource, CustomSnafu, FetchBlockSnafu}, explorer::{self, ExplorerDataSource}, merklized_state::{ - self, MerklizedState, MerklizedStateDataSource, MerklizedStateHeightPersistence, + self, MerklizedState, MerklizedStateDataSource, MerklizedStateHeightPersistence, Snapshot, }, - node, ApiState, Error, + node, + node::NodeDataSource, + ApiState, Error, }; -use hotshot_query_service::{merklized_state::Snapshot, node::NodeDataSource}; use hotshot_types::{ data::{EpochNumber, ViewNumber}, traits::{ @@ -28,7 +29,6 @@ use hotshot_types::{ use jf_merkle_tree::MerkleTreeScheme; use serde::{de::Error as _, Deserialize, Serialize}; use snafu::OptionExt; - use tagged_base64::TaggedBase64; use tide_disco::{method::ReadState, Api, Error as _, StatusCode}; use vbs::version::{StaticVersion, StaticVersionType}; diff --git a/sequencer/src/api/options.rs b/sequencer/src/api/options.rs index 2db7fd4ea3..0f867e633f 100644 --- a/sequencer/src/api/options.rs +++ b/sequencer/src/api/options.rs @@ -1,5 +1,7 @@ //! Sequencer-specific API options and initialization. +use std::sync::Arc; + use anyhow::{bail, Context}; use clap::Parser; use espresso_types::{ @@ -22,7 +24,6 @@ use hotshot_types::traits::{ network::ConnectedNetwork, node_implementation::Versions, }; -use std::sync::Arc; use tide_disco::{listener::RateLimitListener, method::ReadState, App, Url}; use vbs::version::StaticVersionType; diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index b19fdc83f1..fb8132151a 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -1,3 +1,5 @@ +use std::collections::{HashSet, VecDeque}; + use anyhow::{bail, ensure, Context}; use async_trait::async_trait; use committable::{Commitment, Committable}; @@ -30,7 +32,6 @@ use jf_merkle_tree::{ LookupResult, MerkleTreeScheme, }; use sqlx::{Encode, Type}; -use std::collections::{HashSet, VecDeque}; use super::{ data_source::{Provider, SequencerDataSource}, @@ -145,7 +146,7 @@ impl CatchupStorage for SqlStorage { LookupResult::Ok(_, proof) => Ok(proof), _ => { bail!("state snapshot {view:?},{height} was found but does not contain frontier at height {}; this should not be possible", height - 1); - } + }, } } } @@ -275,13 +276,13 @@ async fn load_accounts( ))? { MerkleNode::Leaf { pos, elem, .. } => { snapshot.remember(*pos, *elem, proof)?; - } + }, MerkleNode::Empty => { snapshot.non_membership_remember(*account, proof)?; - } + }, _ => { bail!("Invalid proof"); - } + }, } } @@ -442,7 +443,7 @@ async fn header_dependencies( // so the STF will be able to look it up later. catchup.add_chain_config(cf); cf - } + }, } }; diff --git a/sequencer/src/api/update.rs b/sequencer/src/api/update.rs index 5f49fde095..bf9d821eba 100644 --- a/sequencer/src/api/update.rs +++ b/sequencer/src/api/update.rs @@ -1,5 +1,7 @@ //! Update loop for query API state. +use std::{fmt::Debug, sync::Arc}; + use anyhow::bail; use async_trait::async_trait; use derivative::Derivative; @@ -8,8 +10,6 @@ use espresso_types::{v0::traits::SequencerPersistence, PubKey}; use hotshot::types::Event; use hotshot_query_service::data_source::UpdateDataSource; use hotshot_types::traits::{network::ConnectedNetwork, node_implementation::Versions}; -use std::fmt::Debug; -use std::sync::Arc; use super::{data_source::SequencerDataSource, StorageState}; use crate::{EventConsumer, SeqTypes}; diff --git a/sequencer/src/bin/cdn-whitelist.rs b/sequencer/src/bin/cdn-whitelist.rs index 23273f9d8d..2e47b3d049 100644 --- a/sequencer/src/bin/cdn-whitelist.rs +++ b/sequencer/src/bin/cdn-whitelist.rs @@ -9,8 +9,10 @@ use cdn_broker::reexports::discovery::{DiscoveryClient, Embedded, Redis}; use clap::Parser; use espresso_types::SeqTypes; use hotshot_orchestrator::client::OrchestratorClient; -use hotshot_types::network::NetworkConfig; -use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; +use hotshot_types::{ + network::NetworkConfig, + traits::{node_implementation::NodeType, signature_key::SignatureKey}, +}; use surf_disco::Url; #[derive(Parser, Debug)] diff --git a/sequencer/src/bin/dev-rollup.rs b/sequencer/src/bin/dev-rollup.rs index 19736da728..ad33ab2c17 100644 --- a/sequencer/src/bin/dev-rollup.rs +++ b/sequencer/src/bin/dev-rollup.rs @@ -10,7 +10,6 @@ use espresso_types::{ use hotshot::types::BLSPubKey; use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use marketplace_solver::{SolverError, SOLVER_API_PATH}; - use sequencer_utils::logging; use tagged_base64::TaggedBase64; use url::Url; diff --git a/sequencer/src/bin/espresso-bridge.rs b/sequencer/src/bin/espresso-bridge.rs index 8201904e45..2ab78fde16 100644 --- a/sequencer/src/bin/espresso-bridge.rs +++ b/sequencer/src/bin/espresso-bridge.rs @@ -1,3 +1,5 @@ +use std::{sync::Arc, time::Duration}; + use anyhow::{bail, ensure, Context}; use clap::{Parser, Subcommand}; use client::SequencerClient; @@ -10,7 +12,6 @@ use ethers::{ }; use futures::stream::StreamExt; use sequencer_utils::logging; -use std::{sync::Arc, time::Duration}; use surf_disco::Url; /// Command-line utility for working with the Espresso bridge. @@ -213,7 +214,7 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> { Err(err) => { tracing::warn!("error in header stream: {err:#}"); continue; - } + }, }; let Some(l1_finalized) = header.l1_finalized() else { continue; diff --git a/sequencer/src/bin/espresso-dev-node.rs b/sequencer/src/bin/espresso-dev-node.rs index 7fed6cec03..fdff16e3b2 100644 --- a/sequencer/src/bin/espresso-dev-node.rs +++ b/sequencer/src/bin/espresso-dev-node.rs @@ -559,7 +559,6 @@ struct SetHotshotUpReqBody { mod tests { use std::{process::Child, sync::Arc, time::Duration}; - use crate::AltChainInfo; use committable::{Commitment, Committable}; use contract_bindings_ethers::light_client::LightClient; use escargot::CargoBuild; @@ -577,11 +576,10 @@ mod tests { use surf_disco::Client; use tide_disco::error::ServerError; use tokio::time::sleep; - use url::Url; use vbs::version::StaticVersion; - use crate::{DevInfo, SetHotshotDownReqBody, SetHotshotUpReqBody}; + use crate::{AltChainInfo, DevInfo, SetHotshotDownReqBody, SetHotshotUpReqBody}; const TEST_MNEMONIC: &str = "test test test test test test test test test test test junk"; const NUM_ALT_CHAIN_PROVIDERS: usize = 1; diff --git a/sequencer/src/bin/keygen.rs b/sequencer/src/bin/keygen.rs index 188179f982..f4ac7d7d83 100644 --- a/sequencer/src/bin/keygen.rs +++ b/sequencer/src/bin/keygen.rs @@ -33,7 +33,7 @@ impl Scheme { Self::All => { Self::Bls.gen(seed, index, env_file)?; Self::Schnorr.gen(seed, index, env_file)?; - } + }, Self::Bls => { let (pub_key, priv_key) = BLSPubKey::generated_from_seed_indexed(seed, index); let priv_key = priv_key.to_tagged_base64()?; @@ -43,7 +43,7 @@ impl Scheme { "ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY={priv_key}" )?; tracing::info!(%pub_key, "generated staking key") - } + }, Self::Schnorr => { let key_pair = StateKeyPair::generate_from_seed_indexed(seed, index); let priv_key = key_pair.sign_key_ref().to_tagged_base64()?; @@ -54,7 +54,7 @@ impl Scheme { )?; writeln!(env_file, "ESPRESSO_SEQUENCER_PRIVATE_STATE_KEY={priv_key}")?; tracing::info!(pub_key = %key_pair.ver_key(), "generated state key"); - } + }, } Ok(()) } diff --git a/sequencer/src/bin/nasty-client.rs b/sequencer/src/bin/nasty-client.rs index b3741582d8..f478e8323c 100644 --- a/sequencer/src/bin/nasty-client.rs +++ b/sequencer/src/bin/nasty-client.rs @@ -12,6 +12,16 @@ //! provides a healthcheck endpoint as well as a prometheus endpoint which provides metrics like the //! count of various types of actions performed and the number of open streams. +use std::{ + borrow::Cow, + cmp::max, + collections::{BTreeMap, HashMap}, + fmt::Debug, + pin::Pin, + sync::Arc, + time::{Duration, Instant}, +}; + use anyhow::{bail, ensure, Context}; use async_lock::RwLock; use clap::Parser; @@ -41,15 +51,6 @@ use rand::{seq::SliceRandom, RngCore}; use sequencer::{api::endpoints::NamespaceProofQueryData, SequencerApiVersion}; use sequencer_utils::logging; use serde::de::DeserializeOwned; -use std::{ - borrow::Cow, - cmp::max, - collections::{BTreeMap, HashMap}, - fmt::Debug, - pin::Pin, - sync::Arc, - time::{Duration, Instant}, -}; use strum::{EnumDiscriminants, VariantArray}; use surf_disco::{error::ClientError, socket, Error, StatusCode, Url}; use tide_disco::{error::ServerError, App}; @@ -520,7 +521,7 @@ impl ResourceManager { Ok(res) if i == 0 => { // Succeeded on the first try, get on with it. return Ok(res); - } + }, Ok(res) => { // Succeeded after at least one failure; retry a number of additional times to // be sure the endpoint is healed. @@ -531,14 +532,14 @@ impl ResourceManager { )?; } return Ok(res); - } + }, Err(err) if i < self.cfg.max_retries => { tracing::warn!("failed, will retry: {err:#}"); i += 1; - } + }, Err(err) => { return Err(err).context("failed too many times"); - } + }, } } } @@ -674,7 +675,7 @@ impl ResourceManager { obj.height() ); } - } + }, Err(_) if to - from > limit => { tracing::info!( limit, @@ -682,10 +683,10 @@ impl ResourceManager { to, "range query exceeding limit failed as expected" ); - } + }, Err(err) => { return Err(err).context("error in range query"); - } + }, } self.metrics.query_range_actions[&T::RESOURCE].add(1); @@ -800,7 +801,7 @@ impl ResourceManager { ); } break obj; - } + }, Err(err) if refreshed.elapsed() >= self.cfg.web_socket_timeout => { // Streams are allowed to fail if the connection is too old. Warn about it, // but refresh the connection and try again. @@ -818,7 +819,7 @@ impl ResourceManager { "{} stream refreshed due to connection reset", Self::singular(), ); - } + }, Err(err) => { // Errors on a relatively fresh connection are not allowed. Close the stream // since it is apparently in a bad state, and return an error. @@ -830,7 +831,7 @@ impl ResourceManager { Self::singular(), refreshed.elapsed() )); - } + }, } }; @@ -944,11 +945,11 @@ impl ResourceManager
{ // The block state at height 0 is empty, so to have a valid query just adjust to // querying at height 1. At height 1, the only valid index to query is 0. (1, 0) - } + }, block => { // At any other height, all indices between 0 and `block - 1` are valid to query. (block, index % (block - 1)) - } + }, }; // Get the header of the state snapshot we're going to query and the block commitment we're @@ -1344,7 +1345,7 @@ impl Client { Resource::Payloads => self.payloads.close_stream(id).await, }; Ok(()) - } + }, Action::PollStream { resource, id, @@ -1357,16 +1358,16 @@ impl Client { }, Action::QueryWindow { from, duration } => { self.headers.query_window(from, duration).await - } + }, Action::QueryNamespace { block, namespace } => { self.blocks.query_namespace(block, namespace).await - } + }, Action::QueryBlockState { block, index } => { self.headers.query_block_state(block, index).await - } + }, Action::QueryFeeState { block, builder } => { self.headers.query_fee_state(block, builder).await - } + }, } } } diff --git a/sequencer/src/bin/pub-key.rs b/sequencer/src/bin/pub-key.rs index 0c2bbc99cc..38efb49a86 100644 --- a/sequencer/src/bin/pub-key.rs +++ b/sequencer/src/bin/pub-key.rs @@ -49,7 +49,7 @@ fn main() { (false, PrivateKey::Bls(key)) => println!("{}", PubKey::from_private(&key)), (false, PrivateKey::Schnorr(key)) => { println!("{}", StateKeyPair::from_sign_key(key).ver_key()) - } + }, // Libp2p (true, PrivateKey::Bls(key)) => { @@ -57,9 +57,9 @@ fn main() { "{}", derive_libp2p_peer_id::(&key).expect("Failed to derive libp2p peer ID") ); - } + }, (true, _) => { eprintln!("Key type unsupported for libp2p peer ID derivation"); - } + }, } } diff --git a/sequencer/src/bin/reset-storage.rs b/sequencer/src/bin/reset-storage.rs index a635cda1b7..88d83fd1fd 100644 --- a/sequencer/src/bin/reset-storage.rs +++ b/sequencer/src/bin/reset-storage.rs @@ -35,11 +35,11 @@ async fn main() -> anyhow::Result<()> { Command::Fs(opt) => { tracing::warn!("resetting file system storage {opt:?}"); reset_storage(opt).await - } + }, Command::Sql(opt) => { tracing::warn!("resetting SQL storage {opt:?}"); reset_storage(*opt).await - } + }, } } diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 87795b4761..1422141213 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -1,5 +1,9 @@ //! Utility program to submit random transactions to an Espresso Sequencer. +#[cfg(feature = "benchmarking")] +use std::fs::OpenOptions; +#[cfg(feature = "benchmarking")] +use std::num::NonZeroUsize; use std::{ collections::HashMap, time::{Duration, Instant}, @@ -7,6 +11,8 @@ use std::{ use clap::Parser; use committable::{Commitment, Committable}; +#[cfg(feature = "benchmarking")] +use csv::Writer; use espresso_types::{parse_duration, parse_size, SeqTypes, Transaction}; use futures::{ channel::mpsc::{self, Sender}, @@ -24,13 +30,6 @@ use tide_disco::{error::ServerError, App}; use tokio::{task::spawn, time::sleep}; use vbs::version::StaticVersionType; -#[cfg(feature = "benchmarking")] -use csv::Writer; -#[cfg(feature = "benchmarking")] -use std::fs::OpenOptions; -#[cfg(feature = "benchmarking")] -use std::num::NonZeroUsize; - /// Submit random transactions to an Espresso Sequencer. #[derive(Clone, Debug, Parser)] struct Options { @@ -229,7 +228,7 @@ async fn main() { Err(err) => { tracing::warn!("error getting block: {err}"); continue; - } + }, }; let received_at = Instant::now(); tracing::debug!("got block {}", block.height()); diff --git a/sequencer/src/bin/update-permissioned-stake-table.rs b/sequencer/src/bin/update-permissioned-stake-table.rs index 28e30767d6..37534bc66c 100644 --- a/sequencer/src/bin/update-permissioned-stake-table.rs +++ b/sequencer/src/bin/update-permissioned-stake-table.rs @@ -1,3 +1,5 @@ +use std::{path::PathBuf, time::Duration}; + use anyhow::Result; use clap::Parser; use espresso_types::parse_duration; @@ -6,7 +8,6 @@ use sequencer_utils::{ logging, stake_table::{update_stake_table, PermissionedStakeTableUpdate}, }; -use std::{path::PathBuf, time::Duration}; use url::Url; #[derive(Debug, Clone, Parser)] diff --git a/sequencer/src/bin/utils/keygen.rs b/sequencer/src/bin/utils/keygen.rs index a381dc0d25..5240493f16 100644 --- a/sequencer/src/bin/utils/keygen.rs +++ b/sequencer/src/bin/utils/keygen.rs @@ -32,7 +32,7 @@ impl Scheme { Self::All => { Self::Bls.gen(seed, index, env_file)?; Self::Schnorr.gen(seed, index, env_file)?; - } + }, Self::Bls => { let (pub_key, priv_key) = BLSPubKey::generated_from_seed_indexed(seed, index); let priv_key = priv_key.to_tagged_base64()?; @@ -42,7 +42,7 @@ impl Scheme { "ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY={priv_key}" )?; tracing::info!(%pub_key, "generated staking key") - } + }, Self::Schnorr => { let key_pair = StateKeyPair::generate_from_seed_indexed(seed, index); let priv_key = key_pair.sign_key_ref().to_tagged_base64()?; @@ -53,7 +53,7 @@ impl Scheme { )?; writeln!(env_file, "ESPRESSO_SEQUENCER_PRIVATE_STATE_KEY={priv_key}")?; tracing::info!(pub_key = %key_pair.ver_key(), "generated state key"); - } + }, } Ok(()) } diff --git a/sequencer/src/bin/utils/main.rs b/sequencer/src/bin/utils/main.rs index 97ac6486ba..c3c803c204 100644 --- a/sequencer/src/bin/utils/main.rs +++ b/sequencer/src/bin/utils/main.rs @@ -1,7 +1,6 @@ //! sequencer utility programs use clap::{Parser, Subcommand}; - use sequencer_utils::logging; mod keygen; mod pubkey; @@ -34,7 +33,7 @@ async fn main() -> anyhow::Result<()> { Command::Pubkey(opt) => { pubkey::run(opt); Ok(()) - } + }, Command::ResetStorage(opt) => reset_storage::run(opt).await, } } diff --git a/sequencer/src/bin/utils/pubkey.rs b/sequencer/src/bin/utils/pubkey.rs index 84b65c5042..bd4156df8b 100644 --- a/sequencer/src/bin/utils/pubkey.rs +++ b/sequencer/src/bin/utils/pubkey.rs @@ -3,10 +3,11 @@ use std::str::FromStr; use anyhow::bail; use clap::Parser; use espresso_types::{PrivKey, PubKey}; -use hotshot::traits::implementations::derive_libp2p_peer_id; -use hotshot::types::SignatureKey; -use hotshot_types::light_client::StateSignKey; -use hotshot_types::{light_client::StateKeyPair, signature_key::BLSPubKey}; +use hotshot::{traits::implementations::derive_libp2p_peer_id, types::SignatureKey}; +use hotshot_types::{ + light_client::{StateKeyPair, StateSignKey}, + signature_key::BLSPubKey, +}; use tagged_base64::TaggedBase64; #[derive(Clone, Debug)] @@ -47,7 +48,7 @@ pub fn run(opt: Options) { (false, PrivateKey::Bls(key)) => println!("{}", PubKey::from_private(&key)), (false, PrivateKey::Schnorr(key)) => { println!("{}", StateKeyPair::from_sign_key(key).ver_key()) - } + }, // Libp2p (true, PrivateKey::Bls(key)) => { @@ -55,9 +56,9 @@ pub fn run(opt: Options) { "{}", derive_libp2p_peer_id::(&key).expect("Failed to derive libp2p peer ID") ); - } + }, (true, _) => { eprintln!("Key type unsupported for libp2p peer ID derivation"); - } + }, } } diff --git a/sequencer/src/bin/utils/reset_storage.rs b/sequencer/src/bin/utils/reset_storage.rs index b25c8a3c11..1938b5857e 100644 --- a/sequencer/src/bin/utils/reset_storage.rs +++ b/sequencer/src/bin/utils/reset_storage.rs @@ -1,10 +1,9 @@ +use clap::Subcommand; use sequencer::{ api::data_source::{DataSourceOptions, SequencerDataSource}, persistence, }; -use clap::Subcommand; - /// Options for resetting persistent storage. /// /// This will remove all the persistent storage of a sequencer node or marketplace solver, effectively resetting it to @@ -32,11 +31,11 @@ pub async fn run(opt: Commands) -> anyhow::Result<()> { SequencerStorage::Fs(opt) => { tracing::warn!("resetting sequencer file system storage {opt:?}"); reset_storage(opt).await - } + }, SequencerStorage::Sql(opt) => { tracing::warn!("resetting sequencer SQL storage {opt:?}"); reset_storage(*opt).await - } + }, }, Commands::Solver(opt) => { @@ -45,7 +44,7 @@ pub async fn run(opt: Commands) -> anyhow::Result<()> { opts.connect().await?; Ok(()) - } + }, } } diff --git a/sequencer/src/bin/verify-headers.rs b/sequencer/src/bin/verify-headers.rs index 4b999070a3..fb1c6096fe 100644 --- a/sequencer/src/bin/verify-headers.rs +++ b/sequencer/src/bin/verify-headers.rs @@ -1,6 +1,6 @@ //! Utility program to verify properties of headers sequenced by HotShot. -use std::{cmp::max, process::exit, time::Duration}; +use std::{cmp::max, process::exit, sync::Arc, time::Duration}; use clap::Parser; use espresso_types::{Header, L1BlockInfo}; @@ -9,7 +9,6 @@ use futures::future::join_all; use itertools::Itertools; use sequencer::SequencerApiVersion; use sequencer_utils::logging; -use std::sync::Arc; use surf_disco::Url; use tokio::time::sleep; use vbs::version::StaticVersionType; @@ -134,7 +133,7 @@ async fn get_header( // Back off a bit and then retry. sleep(Duration::from_millis(100)).await; - } + }, } } } @@ -147,12 +146,12 @@ async fn get_l1_block(l1: &Provider, height: u64) -> L1BlockInfo { tracing::warn!("L1 block {height} not yet available"); sleep(Duration::from_secs(1)).await; continue; - } + }, Err(err) => { tracing::warn!("error fetching L1 block {height}: {err}"); sleep(Duration::from_millis(100)).await; continue; - } + }, }; let Some(hash) = block.hash else { diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index d173f9eace..59068e3f84 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -1,15 +1,13 @@ -use std::sync::Arc; +use std::{cmp::Ordering, collections::HashMap, fmt::Display, sync::Arc, time::Duration}; use anyhow::{anyhow, bail, ensure, Context}; use async_lock::RwLock; use async_trait::async_trait; -use committable::Commitment; -use committable::Committable; -use espresso_types::config::PublicNetworkConfig; -use espresso_types::traits::SequencerPersistence; +use committable::{Commitment, Committable}; use espresso_types::{ - v0::traits::StateCatchup, v0_99::ChainConfig, BackoffParams, BlockMerkleTree, FeeAccount, - FeeAccountProof, FeeMerkleCommitment, FeeMerkleTree, Leaf2, NodeState, + config::PublicNetworkConfig, traits::SequencerPersistence, v0::traits::StateCatchup, + v0_99::ChainConfig, BackoffParams, BlockMerkleTree, FeeAccount, FeeAccountProof, + FeeMerkleCommitment, FeeMerkleTree, Leaf2, NodeState, }; use futures::future::{Future, FutureExt, TryFuture, TryFutureExt}; use hotshot_types::{ @@ -25,15 +23,13 @@ use itertools::Itertools; use jf_merkle_tree::{prelude::MerkleNode, ForgetableMerkleTreeScheme, MerkleTreeScheme}; use priority_queue::PriorityQueue; use serde::de::DeserializeOwned; -use std::{cmp::Ordering, collections::HashMap, fmt::Display, time::Duration}; use surf_disco::Request; use tide_disco::error::ServerError; use tokio::time::timeout; use url::Url; use vbs::version::StaticVersionType; -use crate::api::BlocksFrontier; -use crate::PubKey; +use crate::{api::BlocksFrontier, PubKey}; // This newtype is probably not worth having. It's only used to be able to log // URLs before doing requests. @@ -75,7 +71,7 @@ pub(crate) async fn local_and_remote( Err(err) => { tracing::warn!("not using local catchup: {err:#}"); Arc::new(remote) - } + }, } } @@ -164,15 +160,15 @@ impl StatePeers { requests.insert(id, true); res = Ok(t); break; - } + }, Ok(Err(err)) => { tracing::warn!(id, ?score, peer = %client.url, "error from peer: {err:#}"); requests.insert(id, false); - } + }, Err(_) => { tracing::warn!(id, ?score, peer = %client.url, ?timeout_dur, "request timed out"); requests.insert(id, false); - } + }, } } diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 25c8fec67b..1ad2d1a2da 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -1,3 +1,9 @@ +use std::{ + fmt::{Debug, Display}, + sync::Arc, + time::Duration, +}; + use anyhow::Context; use async_lock::RwLock; use derivative::Derivative; @@ -29,8 +35,6 @@ use hotshot_types::{ }; use parking_lot::Mutex; use request_response::{network::Bytes, RequestResponse, RequestResponseConfig}; -use std::{fmt::Debug, time::Duration}; -use std::{fmt::Display, sync::Arc}; use tokio::{ spawn, sync::mpsc::{channel, Receiver}, diff --git a/sequencer/src/external_event_handler.rs b/sequencer/src/external_event_handler.rs index 86659f946f..5b7e8175e2 100644 --- a/sequencer/src/external_event_handler.rs +++ b/sequencer/src/external_event_handler.rs @@ -1,6 +1,7 @@ //! Should probably rename this to "external" or something -use crate::context::TaskList; +use std::{marker::PhantomData, sync::Arc}; + use anyhow::{Context, Result}; use espresso_types::{PubKey, SeqTypes}; use hotshot::types::Message; @@ -13,9 +14,10 @@ use hotshot_types::{ }; use request_response::network::Bytes; use serde::{Deserialize, Serialize}; -use std::{marker::PhantomData, sync::Arc}; use tokio::sync::mpsc::{Receiver, Sender}; +use crate::context::TaskList; + /// An external message that can be sent to or received from a node #[derive(Debug, Serialize, Deserialize, Clone)] pub enum ExternalMessage { @@ -83,7 +85,7 @@ impl ExternalEventHandler { self.request_response_sender .send(request_response.into()) .await?; - } + }, } Ok(()) } @@ -111,14 +113,14 @@ impl ExternalEventHandler { Err(err) => { tracing::warn!("Failed to serialize direct message: {}", err); continue; - } + }, }; // Send the message to the recipient if let Err(err) = network.direct_message(message_bytes, recipient).await { tracing::error!("Failed to send message: {:?}", err); }; - } + }, OutboundMessage::Broadcast(message) => { // Wrap it in the real message type @@ -133,7 +135,7 @@ impl ExternalEventHandler { Err(err) => { tracing::warn!("Failed to serialize broadcast message: {}", err); continue; - } + }, }; // Broadcast the message to the global topic @@ -143,7 +145,7 @@ impl ExternalEventHandler { { tracing::error!("Failed to broadcast message: {:?}", err); }; - } + }, } } } diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 2e6235fc54..99431c8f6d 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -132,9 +132,8 @@ impl Genesis { mod version_ser { - use vbs::version::Version; - use serde::{de, Deserialize, Deserializer, Serializer}; + use vbs::version::Version; pub fn serialize(ver: &Version, serializer: S) -> Result where @@ -254,12 +253,12 @@ mod upgrade_ser { return Err(de::Error::custom( "both view and time mode parameters are set", )) - } + }, (None, None) => { return Err(de::Error::custom( "no view or time mode parameters provided", )) - } + }, (None, Some(v)) => { if v.start_proposing_view > v.stop_proposing_view { return Err(de::Error::custom( @@ -274,7 +273,7 @@ mod upgrade_ser { upgrade_type: fields.upgrade_type, }, ); - } + }, (Some(t), None) => { if t.start_proposing_time.unix_timestamp() > t.stop_proposing_time.unix_timestamp() @@ -291,7 +290,7 @@ mod upgrade_ser { upgrade_type: fields.upgrade_type.clone(), }, ); - } + }, } } @@ -321,25 +320,25 @@ impl Genesis { #[cfg(test)] mod test { - use ethers::middleware::Middleware; - use ethers::prelude::*; - use ethers::signers::Signer; - use ethers::utils::{Anvil, AnvilInstance}; - use sequencer_utils::deployer::test_helpers::{ - deploy_fee_contract, deploy_fee_contract_as_proxy, - }; use std::sync::Arc; use anyhow::Result; - use contract_bindings_ethers::fee_contract::FeeContract; use espresso_types::{ L1BlockInfo, TimeBasedUpgrade, Timestamp, UpgradeMode, UpgradeType, ViewBasedUpgrade, }; - - use sequencer_utils::deployer; - use sequencer_utils::ser::FromStringOrInteger; - use sequencer_utils::test_utils::setup_test; + use ethers::{ + middleware::Middleware, + prelude::*, + signers::Signer, + utils::{Anvil, AnvilInstance}, + }; + use sequencer_utils::{ + deployer, + deployer::test_helpers::{deploy_fee_contract, deploy_fee_contract_as_proxy}, + ser::FromStringOrInteger, + test_utils::setup_test, + }; use toml::toml; use super::*; diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 2d9c0a352e..1ebd47d0a7 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -13,45 +13,44 @@ mod restart_tests; mod message_compat_tests; +use std::sync::Arc; + use anyhow::Context; use catchup::StatePeers; use context::SequencerContext; -use espresso_types::EpochCommittees; use espresso_types::{ - traits::EventConsumer, BackoffParams, L1ClientOptions, NodeState, PubKey, SeqTypes, - SolverAuctionResultsProvider, ValidatedState, + traits::EventConsumer, BackoffParams, EpochCommittees, L1ClientOptions, NodeState, PubKey, + SeqTypes, SolverAuctionResultsProvider, ValidatedState, }; use ethers_conv::ToAlloy; use genesis::L1Finalized; -use proposal_fetcher::ProposalFetcherConfig; -use std::sync::Arc; -use tokio::select; // Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support use hotshot_libp2p_networking::network::behaviours::dht::store::persistent::DhtNoPersistence; use libp2p::Multiaddr; use network::libp2p::split_off_peer_id; use options::Identity; +use proposal_fetcher::ProposalFetcherConfig; use state_signature::static_stake_table_commitment; +use tokio::select; use tracing::info; use url::Url; pub mod persistence; pub mod state; +use std::{fmt::Debug, marker::PhantomData, time::Duration}; + use derivative::Derivative; use espresso_types::v0::traits::SequencerPersistence; pub use genesis::Genesis; -use hotshot::traits::implementations::{ - derive_libp2p_multiaddr, CombinedNetworks, GossipConfig, Libp2pNetwork, RequestResponseConfig, -}; use hotshot::{ traits::implementations::{ - derive_libp2p_peer_id, CdnMetricsValue, CdnTopic, KeyPair, MemoryNetwork, PushCdnNetwork, - WrappedSignatureKey, + derive_libp2p_multiaddr, derive_libp2p_peer_id, CdnMetricsValue, CdnTopic, + CombinedNetworks, GossipConfig, KeyPair, Libp2pNetwork, MemoryNetwork, PushCdnNetwork, + RequestResponseConfig, WrappedSignatureKey, }, types::SignatureKey, MarketplaceConfig, }; -use hotshot_orchestrator::client::get_complete_config; -use hotshot_orchestrator::client::OrchestratorClient; +use hotshot_orchestrator::client::{get_complete_config, OrchestratorClient}; use hotshot_types::{ data::ViewNumber, light_client::{StateKeyPair, StateSignKey}, @@ -66,8 +65,6 @@ use hotshot_types::{ }; pub use options::Options; use serde::{Deserialize, Serialize}; -use std::time::Duration; -use std::{fmt::Debug, marker::PhantomData}; use vbs::version::{StaticVersion, StaticVersionType}; pub mod network; @@ -312,7 +309,7 @@ pub async fn init_node( (Some(config), _) => { tracing::info!("loaded network config from storage, rejoining existing network"); (config, false) - } + }, // If we were told to fetch the config from an already-started peer, do so. (None, Some(peers)) => { tracing::info!(?peers, "loading network config from peers"); @@ -330,7 +327,7 @@ pub async fn init_node( ); persistence.save_config(&config).await?; (config, false) - } + }, // Otherwise, this is a fresh network; load from the orchestrator. (None, None) => { tracing::info!("loading network config from orchestrator"); @@ -356,7 +353,7 @@ pub async fn init_node( persistence.save_config(&config).await?; tracing::error!("all nodes connected"); (config, true) - } + }, }; if let Some(upgrade) = genesis.upgrades.get(&V::Upgrade::VERSION) { @@ -451,7 +448,7 @@ pub async fn init_node( ethers::types::U256::from(timestamp.unix_timestamp()).to_alloy(), ) .await - } + }, }; let mut genesis_state = ValidatedState { @@ -590,20 +587,22 @@ pub mod testing { use hotshot_testing::block_builder::{ BuilderTask, SimpleBuilderImplementation, TestBuilderImplementation, }; - use hotshot_types::traits::network::Topic; - use hotshot_types::traits::signature_key::StakeTableEntryType; use hotshot_types::{ event::LeafInfo, light_client::{CircuitField, StateKeyPair, StateVerKey}, - traits::signature_key::BuilderSignatureKey, - traits::{block_contents::BlockHeader, metrics::NoMetrics, stake_table::StakeTableScheme}, + traits::{ + block_contents::BlockHeader, + metrics::NoMetrics, + network::Topic, + signature_key::{BuilderSignatureKey, StakeTableEntryType}, + stake_table::StakeTableScheme, + }, HotShotConfig, PeerConfig, }; use marketplace_builder_core::{ hooks::NoHooks, service::{BuilderConfig, GlobalState}, }; - use portpicker::pick_unused_port; use tokio::spawn; use vbs::version::Version; diff --git a/sequencer/src/options.rs b/sequencer/src/options.rs index 93d52f3463..5cd5a8c008 100644 --- a/sequencer/src/options.rs +++ b/sequencer/src/options.rs @@ -1,8 +1,6 @@ #![allow(clippy::needless_lifetimes)] use core::fmt::Display; -use jf_signature::{bls_over_bn254, schnorr}; -use sequencer_utils::logging; use std::{ cmp::Ordering, collections::{HashMap, HashSet}, @@ -11,14 +9,16 @@ use std::{ path::PathBuf, time::Duration, }; -use tagged_base64::TaggedBase64; use anyhow::{bail, Context}; use clap::{error::ErrorKind, Args, FromArgMatches, Parser}; use derivative::Derivative; use espresso_types::{parse_duration, BackoffParams, L1ClientOptions}; use hotshot_types::{light_client::StateSignKey, signature_key::BLSPrivKey}; +use jf_signature::{bls_over_bn254, schnorr}; use libp2p::Multiaddr; +use sequencer_utils::logging; +use tagged_base64::TaggedBase64; use url::Url; use crate::{api, persistence, proposal_fetcher::ProposalFetcherConfig}; @@ -472,10 +472,10 @@ fn fmt_opt_urls( write!(fmt, "Some(")?; fmt_urls(urls, fmt)?; write!(fmt, ")")?; - } + }, None => { write!(fmt, "None")?; - } + }, } Ok(()) } @@ -536,13 +536,13 @@ impl ModuleArgs { match module { SequencerModule::Storage(m) => { curr = m.add(&mut modules.storage_fs, &mut provided)? - } + }, SequencerModule::StorageFs(m) => { curr = m.add(&mut modules.storage_fs, &mut provided)? - } + }, SequencerModule::StorageSql(m) => { curr = m.add(&mut modules.storage_sql, &mut provided)? - } + }, SequencerModule::Http(m) => curr = m.add(&mut modules.http, &mut provided)?, SequencerModule::Query(m) => curr = m.add(&mut modules.query, &mut provided)?, SequencerModule::Submit(m) => curr = m.add(&mut modules.submit, &mut provided)?, @@ -551,10 +551,10 @@ impl ModuleArgs { SequencerModule::Config(m) => curr = m.add(&mut modules.config, &mut provided)?, SequencerModule::HotshotEvents(m) => { curr = m.add(&mut modules.hotshot_events, &mut provided)? - } + }, SequencerModule::Explorer(m) => { curr = m.add(&mut modules.explorer, &mut provided)? - } + }, } } diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 8dff3c7448..5c39941223 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -43,8 +43,7 @@ mod testing { #[cfg(test)] #[espresso_macros::generic_tests] mod persistence_tests { - use std::{collections::BTreeMap, marker::PhantomData}; - use vbs::version::StaticVersionType; + use std::{collections::BTreeMap, marker::PhantomData, sync::Arc}; use anyhow::bail; use async_lock::RwLock; @@ -53,7 +52,10 @@ mod persistence_tests { traits::{EventConsumer, NullEventConsumer, PersistenceOptions}, Event, Leaf, Leaf2, NodeState, PubKey, SeqTypes, ValidatedState, }; - use hotshot::types::{BLSPubKey, SignatureKey}; + use hotshot::{ + types::{BLSPubKey, SignatureKey}, + InitializerEpochInfo, + }; use hotshot_example_types::node_types::TestVersions; use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::{ @@ -76,11 +78,9 @@ mod persistence_tests { vid::avidm::{init_avidm_param, AvidMScheme}, vote::HasViewNumber, }; - use sequencer_utils::test_utils::setup_test; - use std::sync::Arc; use testing::TestablePersistence; - use vbs::version::Version; + use vbs::version::{StaticVersionType, Version}; use super::*; @@ -156,6 +156,52 @@ mod persistence_tests { ); } + #[tokio::test(flavor = "multi_thread")] + pub async fn test_epoch_info() { + setup_test(); + + let tmp = P::tmp_storage().await; + let storage = P::connect(&tmp).await; + + // Initially, there is no saved info. + assert_eq!(storage.load_start_epoch_info().await.unwrap(), Vec::new()); + + // Store a drb result. + storage + .add_drb_result(EpochNumber::new(1), [1; 32]) + .await + .unwrap(); + assert_eq!( + storage.load_start_epoch_info().await.unwrap(), + vec![InitializerEpochInfo:: { + epoch: EpochNumber::new(1), + drb_result: [1; 32], + block_header: None, + }] + ); + + // Store a second DRB result + storage + .add_drb_result(EpochNumber::new(2), [3; 32]) + .await + .unwrap(); + assert_eq!( + storage.load_start_epoch_info().await.unwrap(), + vec![ + InitializerEpochInfo:: { + epoch: EpochNumber::new(1), + drb_result: [1; 32], + block_header: None, + }, + InitializerEpochInfo:: { + epoch: EpochNumber::new(2), + drb_result: [3; 32], + block_header: None, + } + ] + ); + } + fn leaf_info(leaf: Leaf2) -> LeafInfo { LeafInfo { leaf, @@ -762,7 +808,7 @@ mod persistence_tests { let leaf_chain = chain .iter() .take(2) - .map(|(leaf, qc, _, _)| (leaf_info(leaf.clone()), qc.clone())) + .map(|(leaf, qc, ..)| (leaf_info(leaf.clone()), qc.clone())) .collect::>(); tracing::info!("decide with event handling failure"); storage @@ -809,7 +855,7 @@ mod persistence_tests { let leaf_chain = chain .iter() .skip(2) - .map(|(leaf, qc, _, _)| (leaf_info(leaf.clone()), qc.clone())) + .map(|(leaf, qc, ..)| (leaf_info(leaf.clone()), qc.clone())) .collect::>(); tracing::info!("decide successfully"); storage @@ -854,7 +900,7 @@ mod persistence_tests { tracing::info!("check decide event"); let leaf_chain = consumer.leaf_chain().await; assert_eq!(leaf_chain.len(), 4, "{leaf_chain:#?}"); - for ((leaf, _, _, _), info) in chain.iter().zip(leaf_chain.iter()) { + for ((leaf, ..), info) in chain.iter().zip(leaf_chain.iter()) { assert_eq!(info.leaf, *leaf); let decided_vid_share = info.vid_share.as_ref().unwrap(); let view_number = match decided_vid_share { diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index 853142fa9a..21c25850a9 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -1,11 +1,22 @@ +use std::{ + collections::{BTreeMap, HashSet}, + fs::{self, File, OpenOptions}, + io::{Read, Seek, SeekFrom, Write}, + ops::RangeInclusive, + path::{Path, PathBuf}, + sync::Arc, +}; + use anyhow::{anyhow, Context}; use async_lock::RwLock; use async_trait::async_trait; use clap::Parser; use espresso_types::{ + upgrade_commitment_map, v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence}, Leaf, Leaf2, NetworkConfig, Payload, SeqTypes, }; +use hotshot::InitializerEpochInfo; use hotshot_types::{ consensus::CommitmentMap, data::{ @@ -13,6 +24,7 @@ use hotshot_types::{ DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, VidCommitment, VidDisperseShare, }, + drb::DrbResult, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, simple_certificate::{ @@ -20,24 +32,14 @@ use hotshot_types::{ }, traits::{ block_contents::{BlockHeader, BlockPayload}, - node_implementation::ConsensusTime, + node_implementation::{ConsensusTime, NodeType}, }, utils::View, vote::HasViewNumber, }; -use std::sync::Arc; -use std::{ - collections::{BTreeMap, HashSet}, - fs::{self, File, OpenOptions}, - io::{Read, Seek, SeekFrom, Write}, - ops::RangeInclusive, - path::{Path, PathBuf}, -}; use crate::ViewNumber; -use espresso_types::upgrade_commitment_map; - /// Options for file system backed persistence. #[derive(Parser, Clone, Debug)] pub struct Options { @@ -209,6 +211,14 @@ impl Inner { self.path.join("next_epoch_quorum_certificate") } + fn epoch_drb_result_dir_path(&self) -> PathBuf { + self.path.join("epoch_drb_result") + } + + fn epoch_root_block_header_dir_path(&self) -> PathBuf { + self.path.join("epoch_root_block_header") + } + fn update_migration(&mut self) -> anyhow::Result<()> { let path = self.migration(); let bytes = bincode::serialize(&self.migrated)?; @@ -596,7 +606,7 @@ impl SequencerPersistence for Persistence { // managed to persist the decided leaves successfully, and the event processing will // just run again at the next decide. tracing::warn!(?view, "event processing failed: {err:#}"); - } + }, Ok(intervals) => { if let Err(err) = inner.collect_garbage(view, &intervals) { // Similarly, garbage collection is not an error. We have done everything we @@ -604,7 +614,7 @@ impl SequencerPersistence for Persistence { // error but do not return it. tracing::warn!(?view, "GC failed: {err:#}"); } - } + }, } Ok(()) @@ -836,7 +846,7 @@ impl SequencerPersistence for Persistence { // some unintended file whose name happened to match the naming convention. tracing::warn!(?view, "ignoring malformed quorum proposal file: {err:#}"); continue; - } + }, }; let proposal2 = convert_proposal(proposal); @@ -1268,6 +1278,91 @@ impl SequencerPersistence for Persistence { async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { Ok(()) } + + async fn add_drb_result( + &self, + epoch: EpochNumber, + drb_result: DrbResult, + ) -> anyhow::Result<()> { + let inner = self.inner.write().await; + let dir_path = inner.epoch_drb_result_dir_path(); + + fs::create_dir_all(dir_path.clone()).context("failed to create epoch drb result dir")?; + + let drb_result_bytes = bincode::serialize(&drb_result).context("serialize drb result")?; + + let file_path = dir_path.join(epoch.to_string()).with_extension("txt"); + fs::write(file_path, drb_result_bytes) + .context(format!("writing epoch drb result file for epoch {epoch:?}"))?; + + Ok(()) + } + + async fn add_epoch_root( + &self, + epoch: EpochNumber, + block_header: ::BlockHeader, + ) -> anyhow::Result<()> { + let inner = self.inner.write().await; + let dir_path = inner.epoch_root_block_header_dir_path(); + + fs::create_dir_all(dir_path.clone()) + .context("failed to create epoch root block header dir")?; + + let block_header_bytes = + bincode::serialize(&block_header).context("serialize block header")?; + + let file_path = dir_path.join(epoch.to_string()).with_extension("txt"); + fs::write(file_path, block_header_bytes).context(format!( + "writing epoch root block header file for epoch {epoch:?}" + ))?; + + Ok(()) + } + + async fn load_start_epoch_info(&self) -> anyhow::Result>> { + let inner = self.inner.read().await; + let drb_dir_path = inner.epoch_drb_result_dir_path(); + let block_header_dir_path = inner.epoch_root_block_header_dir_path(); + + let mut result = Vec::new(); + + if drb_dir_path.is_dir() { + for (epoch, path) in epoch_files(drb_dir_path)? { + let bytes = fs::read(&path) + .context(format!("reading epoch drb result {}", path.display()))?; + let drb_result = bincode::deserialize::(&bytes) + .context(format!("parsing epoch drb result {}", path.display()))?; + + let block_header_path = block_header_dir_path + .join(epoch.to_string()) + .with_extension("txt"); + let block_header = if block_header_path.is_file() { + let bytes = fs::read(&path).context(format!( + "reading epoch root block header {}", + path.display() + ))?; + Some( + bincode::deserialize::<::BlockHeader>(&bytes) + .context(format!( + "parsing epoch root block header {}", + path.display() + ))?, + ) + } else { + None + }; + + result.push(InitializerEpochInfo:: { + epoch, + drb_result, + block_header, + }); + } + } + + Ok(result) + } } /// Update a `NetworkConfig` that may have originally been persisted with an old version. @@ -1355,6 +1450,32 @@ fn view_files( })) } +/// Get all paths under `dir` whose name is of the form .txt. +/// Should probably be made generic and merged with view_files. +fn epoch_files( + dir: impl AsRef, +) -> anyhow::Result> { + Ok(fs::read_dir(dir.as_ref())?.filter_map(move |entry| { + let dir = dir.as_ref().display(); + let entry = entry.ok()?; + if !entry.file_type().ok()?.is_file() { + tracing::debug!(%dir, ?entry, "ignoring non-file in data directory"); + return None; + } + let path = entry.path(); + if path.extension()? != "txt" { + tracing::debug!(%dir, ?entry, "ignoring non-text file in data directory"); + return None; + } + let file_name = path.file_stem()?; + let Ok(epoch_number) = file_name.to_string_lossy().parse::() else { + tracing::debug!(%dir, ?file_name, "ignoring extraneous file in data directory"); + return None; + }; + Some((EpochNumber::new(epoch_number), entry.path().to_owned())) + })) +} + #[cfg(test)] mod testing { use tempfile::TempDir; @@ -1387,32 +1508,27 @@ mod generic_tests { #[cfg(test)] mod test { - use espresso_types::{NodeState, PubKey}; + use std::marker::PhantomData; + + use committable::{Commitment, CommitmentBoundsArkless, Committable}; + use espresso_types::{Header, Leaf, NodeState, PubKey, ValidatedState}; use hotshot::types::SignatureKey; use hotshot_example_types::node_types::TestVersions; use hotshot_query_service::testing::mocks::MockVersions; - use hotshot_types::data::{vid_commitment, QuorumProposal2}; - use hotshot_types::traits::node_implementation::Versions; - - use hotshot_types::vid::advz::advz_scheme; + use hotshot_types::{ + data::{vid_commitment, QuorumProposal2}, + simple_certificate::QuorumCertificate, + simple_vote::QuorumData, + traits::{node_implementation::Versions, EncodeBytes}, + vid::advz::advz_scheme, + }; + use jf_vid::VidScheme; use sequencer_utils::test_utils::setup_test; - use vbs::version::StaticVersionType; - use serde_json::json; - use std::marker::PhantomData; + use vbs::version::StaticVersionType; use super::*; - use crate::persistence::testing::TestablePersistence; - - use crate::BLSPubKey; - use committable::Committable; - use committable::{Commitment, CommitmentBoundsArkless}; - use espresso_types::{Header, Leaf, ValidatedState}; - - use hotshot_types::{ - simple_certificate::QuorumCertificate, simple_vote::QuorumData, traits::EncodeBytes, - }; - use jf_vid::VidScheme; + use crate::{persistence::testing::TestablePersistence, BLSPubKey}; #[test] fn test_config_migrations_add_builder_urls() { diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index c47701a3ea..49f00bb46e 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -1,12 +1,15 @@ //! Mock implementation of persistence, for testing. #![cfg(any(test, feature = "testing"))] +use std::{collections::BTreeMap, sync::Arc}; + use anyhow::bail; use async_trait::async_trait; use espresso_types::{ v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence}, Leaf2, NetworkConfig, }; +use hotshot::InitializerEpochInfo; use hotshot_types::{ consensus::CommitmentMap, data::{ @@ -14,15 +17,14 @@ use hotshot_types::{ DaProposal, DaProposal2, EpochNumber, QuorumProposalWrapper, VidCommitment, VidDisperseShare, }, + drb::DrbResult, event::{Event, EventType, HotShotAction, LeafInfo}, message::Proposal, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, utils::View, }; -use std::collections::BTreeMap; -use std::sync::Arc; -use crate::{SeqTypes, ViewNumber}; +use crate::{NodeType, SeqTypes, ViewNumber}; #[derive(Clone, Copy, Debug)] pub struct Options; @@ -221,4 +223,24 @@ impl SequencerPersistence for NoStorage { async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { Ok(()) } + + async fn add_drb_result( + &self, + _epoch: EpochNumber, + _drb_result: DrbResult, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn add_epoch_root( + &self, + _epoch: EpochNumber, + _block_header: ::BlockHeader, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn load_start_epoch_info(&self) -> anyhow::Result>> { + Ok(Vec::new()) + } } diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index e2383a81ca..a49c744313 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -1,4 +1,6 @@ -use anyhow::Context; +use std::{collections::BTreeMap, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; + +use anyhow::{bail, Context}; use async_trait::async_trait; use clap::Parser; use committable::Committable; @@ -10,6 +12,7 @@ use espresso_types::{ BackoffParams, BlockMerkleTree, FeeMerkleTree, Leaf, Leaf2, NetworkConfig, Payload, }; use futures::stream::StreamExt; +use hotshot::InitializerEpochInfo; use hotshot_query_service::{ availability::LeafQueryData, data_source::{ @@ -36,6 +39,7 @@ use hotshot_types::{ DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposalWrapper, VidCommitment, VidDisperseShare, }, + drb::DrbResult, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, simple_certificate::{ @@ -49,11 +53,9 @@ use hotshot_types::{ vote::HasViewNumber, }; use itertools::Itertools; -use sqlx::Row; -use sqlx::{query, Executor}; -use std::{collections::BTreeMap, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; +use sqlx::{query, Executor, Row}; -use crate::{catchup::SqlStateCatchup, SeqTypes, ViewNumber}; +use crate::{catchup::SqlStateCatchup, NodeType, SeqTypes, ViewNumber}; /// Options for Postgres-backed persistence. #[derive(Parser, Clone, Derivative)] @@ -661,7 +663,7 @@ impl Persistence { // we do have. tracing::warn!("error loading row: {err:#}"); break; - } + }, }; let leaf_data: Vec = row.get("leaf"); @@ -1873,6 +1875,81 @@ impl SequencerPersistence for Persistence { .await?; tx.commit().await } + + async fn add_drb_result( + &self, + epoch: EpochNumber, + drb_result: DrbResult, + ) -> anyhow::Result<()> { + let drb_result_vec = Vec::from(drb_result); + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_drb_and_root", + ["epoch", "drb_result"], + ["epoch"], + [(epoch.u64() as i64, drb_result_vec)], + ) + .await?; + tx.commit().await + } + + async fn add_epoch_root( + &self, + epoch: EpochNumber, + block_header: ::BlockHeader, + ) -> anyhow::Result<()> { + let block_header_bytes = + bincode::serialize(&block_header).context("serializing block header")?; + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_drb_and_root", + ["epoch", "block_header"], + ["epoch"], + [(epoch.u64() as i64, block_header_bytes)], + ) + .await?; + tx.commit().await + } + + async fn load_start_epoch_info(&self) -> anyhow::Result>> { + let rows = self + .db + .read() + .await? + .fetch_all("SELECT * from epoch_drb_and_root ORDER BY epoch ASC") + .await?; + + rows.into_iter() + .map(|row| { + let epoch: i64 = row.get("epoch"); + let drb_result: Option> = row.get("drb_result"); + let block_header: Option> = row.get("block_header"); + if let Some(drb_result) = drb_result { + let drb_result_array = drb_result + .try_into() + .or_else(|_| bail!("invalid drb result"))?; + let block_header: Option<::BlockHeader> = block_header + .map(|data| bincode::deserialize(&data)) + .transpose()?; + Ok(Some(InitializerEpochInfo:: { + epoch: ::Epoch::new(epoch as u64), + drb_result: drb_result_array, + block_header, + })) + } else { + // Right now we skip the epoch_drb_and_root row if there is no drb result. + // This seems reasonable based on the expected order of events, but please double check! + Ok(None) + } + }) + .filter_map(|e| match e { + Err(v) => Some(Err(v)), + Ok(Some(v)) => Some(Ok(v)), + Ok(None) => None, + }) + .collect() + } } #[async_trait] @@ -1884,7 +1961,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("could not open transaction: {err:#}"); return None; - } + }, }; let bytes = match query_as::<(Vec,)>( @@ -1899,7 +1976,7 @@ impl Provider for Persistence { Err(err) => { tracing::error!("error loading VID share: {err:#}"); return None; - } + }, }; let share: Proposal> = @@ -1908,7 +1985,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("error decoding VID share: {err:#}"); return None; - } + }, }; match share.data { @@ -1928,7 +2005,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("could not open transaction: {err:#}"); return None; - } + }, }; let bytes = match query_as::<(Vec,)>( @@ -1943,7 +2020,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("error loading DA proposal: {err:#}"); return None; - } + }, }; let proposal: Proposal> = match bincode::deserialize(&bytes) @@ -1952,7 +2029,7 @@ impl Provider for Persistence { Err(err) => { tracing::error!("error decoding DA proposal: {err:#}"); return None; - } + }, }; Some(Payload::from_bytes( @@ -1971,7 +2048,7 @@ impl Provider> for Persistence { Err(err) => { tracing::warn!("could not open transaction: {err:#}"); return None; - } + }, }; let (leaf, qc) = match fetch_leaf_from_proposals(&mut tx, req).await { @@ -1979,7 +2056,7 @@ impl Provider> for Persistence { Err(err) => { tracing::info!("requested leaf not found in undecided proposals: {err:#}"); return None; - } + }, }; match LeafQueryData::new(leaf, qc) { @@ -1987,7 +2064,7 @@ impl Provider> for Persistence { Err(err) => { tracing::warn!("fetched invalid leaf: {err:#}"); None - } + }, } } } @@ -2079,8 +2156,6 @@ mod generic_tests { #[cfg(test)] mod test { - use super::*; - use crate::{persistence::testing::TestablePersistence, BLSPubKey, PubKey}; use committable::{Commitment, CommitmentBoundsArkless}; use espresso_types::{traits::NullEventConsumer, Header, Leaf, NodeState, ValidatedState}; use futures::stream::TryStreamExt; @@ -2106,6 +2181,9 @@ mod test { use sequencer_utils::test_utils::setup_test; use vbs::version::StaticVersionType; + use super::*; + use crate::{persistence::testing::TestablePersistence, BLSPubKey, PubKey}; + #[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposals_leaf_hash_migration() { setup_test(); diff --git a/sequencer/src/proposal_fetcher.rs b/sequencer/src/proposal_fetcher.rs index a5d143188a..2a783914fb 100644 --- a/sequencer/src/proposal_fetcher.rs +++ b/sequencer/src/proposal_fetcher.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use anyhow::Context; use async_channel::{Receiver, Sender}; @@ -19,7 +19,6 @@ use hotshot_types::{ }, utils::{View, ViewInner}, }; -use std::time::Duration; use tokio::time::{sleep, timeout}; use tracing::Instrument; @@ -184,10 +183,10 @@ where let leaf = proposal.data.justify_qc().data.leaf_commit; self.request((view, leaf)).await; return Ok(()); - } + }, Err(err) => { tracing::info!("proposal missing from storage; fetching from network: {err:#}"); - } + }, } let future = self.consensus.read().await.request_proposal(view, leaf)?; diff --git a/sequencer/src/request_response/data_source.rs b/sequencer/src/request_response/data_source.rs index df7476677e..e4b86193b5 100644 --- a/sequencer/src/request_response/data_source.rs +++ b/sequencer/src/request_response/data_source.rs @@ -2,11 +2,12 @@ //! to calculate/derive a response for a specific request. In the confirmation layer the implementer //! would be something like a [`FeeMerkleTree`] for fee catchup -use super::request::{Request, Response}; use anyhow::Result; use async_trait::async_trait; use request_response::data_source::DataSource as DataSourceTrait; +use super::request::{Request, Response}; + #[derive(Clone, Debug)] pub struct DataSource {} diff --git a/sequencer/src/request_response/network.rs b/sequencer/src/request_response/network.rs index 38a80b2621..d05487d308 100644 --- a/sequencer/src/request_response/network.rs +++ b/sequencer/src/request_response/network.rs @@ -1,14 +1,12 @@ -use crate::external_event_handler::ExternalMessage; -use crate::external_event_handler::OutboundMessage; use anyhow::{Context, Result}; use async_trait::async_trait; -use espresso_types::PubKey; -use espresso_types::SeqTypes; +use espresso_types::{PubKey, SeqTypes}; use hotshot_types::message::MessageKind; -use request_response::network::Bytes; -use request_response::network::Sender as SenderTrait; +use request_response::network::{Bytes, Sender as SenderTrait}; use tokio::sync::mpsc; +use crate::external_event_handler::{ExternalMessage, OutboundMessage}; + /// A wrapper type that we will implement the `Sender` trait for #[derive(Clone)] pub struct Sender(mpsc::Sender); diff --git a/sequencer/src/request_response/recipient_source.rs b/sequencer/src/request_response/recipient_source.rs index a0dcbbd69d..ce9b5819d5 100644 --- a/sequencer/src/request_response/recipient_source.rs +++ b/sequencer/src/request_response/recipient_source.rs @@ -38,7 +38,7 @@ impl RecipientSourceTrait for RecipientSource { .iter() .map(|entry| entry.stake_table_entry.stake_key) .collect() - } + }, } } } diff --git a/sequencer/src/restart_tests.rs b/sequencer/src/restart_tests.rs index 7631120994..b7a365cd49 100755 --- a/sequencer/src/restart_tests.rs +++ b/sequencer/src/restart_tests.rs @@ -1,13 +1,7 @@ #![cfg(test)] -use super::*; -use crate::{ - api::{self, data_source::testing::TestableSequencerDataSource, options::Query}, - genesis::{L1Finalized, StakeTableConfig}, - network::cdn::{TestingDef, WrappedSignatureKey}, - testing::wait_for_decide_on_handle, - SequencerApiVersion, -}; +use std::{collections::HashSet, path::Path, time::Duration}; + use anyhow::bail; use cdn_broker::{ reexports::{crypto::signature::KeyPair, def::hook::NoMessageHook}, @@ -31,10 +25,10 @@ use hotshot_testing::{ block_builder::{SimpleBuilderImplementation, TestBuilderImplementation}, test_builder::BuilderChange, }; -use hotshot_types::network::{Libp2pConfig, NetworkConfig}; use hotshot_types::{ event::{Event, EventType}, light_client::StateKeyPair, + network::{Libp2pConfig, NetworkConfig}, traits::{node_implementation::ConsensusTime, signature_key::SignatureKey}, }; use itertools::Itertools; @@ -42,17 +36,24 @@ use options::Modules; use portpicker::pick_unused_port; use run::init_with_storage; use sequencer_utils::test_utils::setup_test; -use std::{collections::HashSet, path::Path, time::Duration}; use surf_disco::{error::ClientError, Url}; use tempfile::TempDir; -use tokio::time::timeout; use tokio::{ task::{spawn, JoinHandle}, - time::sleep, + time::{sleep, timeout}, }; use vbs::version::Version; use vec1::vec1; +use super::*; +use crate::{ + api::{self, data_source::testing::TestableSequencerDataSource, options::Query}, + genesis::{L1Finalized, StakeTableConfig}, + network::cdn::{TestingDef, WrappedSignatureKey}, + testing::wait_for_decide_on_handle, + SequencerApiVersion, +}; + async fn test_restart_helper(network: (usize, usize), restart: (usize, usize), cdn: bool) { setup_test(); @@ -358,7 +359,7 @@ impl TestNode { sleep(delay).await; delay *= 2; retries -= 1; - } + }, } }; diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index 73c17561ea..baf104ffed 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -1,12 +1,5 @@ use std::sync::Arc; -use super::{ - api::{self, data_source::DataSourceOptions}, - context::SequencerContext, - init_node, network, - options::{Modules, Options}, - persistence, Genesis, L1Params, NetworkParams, -}; use clap::Parser; #[allow(unused_imports)] use espresso_types::{ @@ -18,6 +11,14 @@ use hotshot::MarketplaceConfig; use hotshot_types::traits::{metrics::NoMetrics, node_implementation::Versions}; use vbs::version::StaticVersionType; +use super::{ + api::{self, data_source::DataSourceOptions}, + context::SequencerContext, + init_node, network, + options::{Modules, Options}, + persistence, Genesis, L1Params, NetworkParams, +}; + pub async fn main() -> anyhow::Result<()> { let opt = Options::parse(); opt.logging.init(); @@ -48,7 +49,7 @@ pub async fn main() -> anyhow::Result<()> { SequencerVersions::::new(), ) .await - } + }, #[cfg(feature = "fee")] (FeeVersion::VERSION, _) => { run( @@ -58,7 +59,7 @@ pub async fn main() -> anyhow::Result<()> { SequencerVersions::::new(), ) .await - } + }, #[cfg(feature = "marketplace")] (MarketplaceVersion::VERSION, _) => { run( @@ -68,7 +69,7 @@ pub async fn main() -> anyhow::Result<()> { SequencerVersions::::new(), ) .await - } + }, _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." ), @@ -237,7 +238,7 @@ where .boxed() }) .await? - } + }, None => { init_node( genesis, @@ -253,7 +254,7 @@ where proposal_fetcher_config, ) .await? - } + }, }; Ok(ctx) @@ -263,23 +264,22 @@ where mod test { use std::time::Duration; - use tokio::spawn; - - use crate::{ - api::options::Http, - genesis::{L1Finalized, StakeTableConfig}, - persistence::fs, - SequencerApiVersion, - }; use espresso_types::{MockSequencerVersions, PubKey}; use hotshot_types::{light_client::StateKeyPair, traits::signature_key::SignatureKey}; use portpicker::pick_unused_port; use sequencer_utils::test_utils::setup_test; use surf_disco::{error::ClientError, Client, Url}; use tempfile::TempDir; + use tokio::spawn; use vbs::version::Version; use super::*; + use crate::{ + api::options::Http, + genesis::{L1Finalized, StakeTableConfig}, + persistence::fs, + SequencerApiVersion, + }; #[tokio::test(flavor = "multi_thread")] async fn test_startup_before_orchestrator() { diff --git a/sequencer/src/state.rs b/sequencer/src/state.rs index e7f9160e41..4f5cb7ac25 100644 --- a/sequencer/src/state.rs +++ b/sequencer/src/state.rs @@ -6,8 +6,7 @@ use espresso_types::{ traits::StateCatchup, v0_99::ChainConfig, BlockMerkleTree, Delta, FeeAccount, FeeMerkleTree, Leaf2, ValidatedState, }; -use futures::future::Future; -use futures::StreamExt; +use futures::{future::Future, StreamExt}; use hotshot::traits::ValidatedState as HotShotState; use hotshot_query_service::{ availability::{AvailabilityDataSource, LeafQueryData}, @@ -300,12 +299,12 @@ where parent_leaf = leaf; parent_state = state; break; - } + }, Err(err) => { tracing::error!(height = leaf.height(), "failed to updated state: {err:#}"); // If we fail, delay for a second and retry. sleep(Duration::from_secs(1)).await; - } + }, } } } diff --git a/sequencer/src/state_signature.rs b/sequencer/src/state_signature.rs index 87ff5b1761..9eeb1bf798 100644 --- a/sequencer/src/state_signature.rs +++ b/sequencer/src/state_signature.rs @@ -97,10 +97,10 @@ impl StateSigner { tracing::warn!("Error posting signature to the relay server: {:?}", error); } } - } + }, Err(err) => { tracing::error!("Error generating light client state: {:?}", err) - } + }, } } diff --git a/sequencer/src/state_signature/relay_server.rs b/sequencer/src/state_signature/relay_server.rs index fcfda46374..c718d8a185 100644 --- a/sequencer/src/state_signature/relay_server.rs +++ b/sequencer/src/state_signature/relay_server.rs @@ -149,11 +149,11 @@ impl StateRelayServerDataSource for StateRelayServerState { StatusCode::BAD_REQUEST, "A signature of this light client state is already posted at this block height for this key.".to_owned(), )); - } + }, std::collections::hash_map::Entry::Vacant(entry) => { entry.insert(signature); bundle.accumulated_weight += *weight; - } + }, } if bundle.accumulated_weight >= self.threshold { @@ -204,7 +204,7 @@ where reason: err.to_string(), })?; Api::::new(toml)? - } + }, }; api.get("getlateststate", |_req, state| { diff --git a/tests/Cargo.toml b/tests/Cargo.toml index eeb91c14a7..10593a612b 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -18,5 +18,6 @@ ethers = { workspace = true } futures = { workspace = true } reqwest = { workspace = true, features = ["json"] } surf-disco = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true } vbs = { workspace = true } diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 22b206cfbe..a000b7adb2 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,9 +1,18 @@ -use anyhow::{anyhow, Result}; +use std::{ + fmt, + fs::File, + io::{stderr, stdout}, + path::PathBuf, + process::{Child, Command}, + str::FromStr, + time::Duration, +}; + +use anyhow::{anyhow, Context, Result}; use client::SequencerClient; use espresso_types::{FeeAmount, FeeVersion, MarketplaceVersion}; use ethers::prelude::*; use futures::future::join_all; -use std::{fmt, str::FromStr, time::Duration}; use surf_disco::Url; use tokio::time::{sleep, timeout}; use vbs::version::StaticVersionType; @@ -277,3 +286,77 @@ async fn wait_for_service(url: Url, interval: u64, timeout_duration: u64) -> Res .await .map_err(|e| anyhow!("Wait for service, timeout: ({}) {}", url, e))? } + +pub struct NativeDemo { + _child: Child, +} + +impl Drop for NativeDemo { + fn drop(&mut self) { + // It would be preferable to send a SIGINT or similar to the process that we started + // originally but despite quite some effort this never worked for the process-compose + // process started from within the scripts/demo-native script. + // + // Using `process-compose down` seems to pretty reliably stop the process-compose process + // and all the services it started. + println!("Terminating process compose"); + let res = Command::new("process-compose") + .arg("down") + .stdout(stdout()) + .stderr(stderr()) + .spawn() + .expect("process-compose runs") + .wait() + .unwrap(); + println!("process-compose down exited with: {}", res); + } +} + +impl NativeDemo { + pub(crate) fn run(process_compose_extra_args: Option) -> anyhow::Result { + // Because we use nextest with the archive feature on CI we need to use the **runtime** + // value of CARGO_MANIFEST_DIR. + let crate_dir = PathBuf::from( + std::env::var("CARGO_MANIFEST_DIR") + .expect("CARGO_MANIFEST_DIR is set") + .clone(), + ); + let workspace_dir = crate_dir.parent().expect("crate_dir has a parent"); + + let mut cmd = Command::new("bash"); + cmd.arg("scripts/demo-native") + .current_dir(workspace_dir) + .arg("--tui=false"); + + if let Some(args) = process_compose_extra_args { + cmd.args(args.split(' ')); + } + + // Save output to file if PC_LOGS if that's set. + let log_path = std::env::var("PC_LOGS").unwrap_or_else(|_| { + tempfile::NamedTempFile::new() + .expect("tempfile creation succeeds") + .into_temp_path() + .to_string_lossy() + .to_string() + }); + + println!("Writing native demo logs to file: {}", log_path); + let outputs = File::create(log_path).context("unable to create log file")?; + cmd.stdout(outputs); + + println!("Spawning: {:?}", cmd); + let mut child = cmd.spawn().context("failed to spawn command")?; + + // Wait for three seconds and check if process has already exited so we don't waste time + // waiting for results later. + std::thread::sleep(Duration::from_secs(3)); + if let Some(exit_code) = child.try_wait()? { + return Err(anyhow!("process-compose exited early with: {}", exit_code)); + } + + println!("process-compose started ..."); + + Ok(Self { _child: child }) + } +} diff --git a/tests/smoke.rs b/tests/smoke.rs index d154fc03dd..12831f2cbb 100644 --- a/tests/smoke.rs +++ b/tests/smoke.rs @@ -1,15 +1,16 @@ -use crate::common::TestConfig; +use std::time::Instant; + use anyhow::Result; use futures::StreamExt; -use std::time::Instant; + +use crate::common::{NativeDemo, TestConfig}; /// We allow for no change in state across this many consecutive iterations. const MAX_STATE_NOT_INCREMENTING: u8 = 1; /// We allow for no new transactions across this many consecutive iterations. const MAX_TXNS_NOT_INCREMENTING: u8 = 5; -#[tokio::test(flavor = "multi_thread")] -async fn test_smoke() -> Result<()> { +pub async fn assert_native_demo_works() -> Result<()> { let start = Instant::now(); dotenvy::dotenv()?; @@ -19,7 +20,7 @@ async fn test_smoke() -> Result<()> { let _ = testing.readiness().await?; let initial = testing.test_state().await; - println!("Initial State:{}", initial); + println!("Initial State: {}", initial); let mut sub = testing .espresso @@ -45,7 +46,9 @@ async fn test_smoke() -> Result<()> { } // test that we progress EXPECTED_BLOCK_HEIGHT blocks from where we started - if new.block_height.unwrap() >= testing.expected_block_height() + testing.initial_height { + if new.block_height.unwrap() + >= testing.expected_block_height() + initial.block_height.unwrap() + { println!("Reached {} block(s)!", testing.expected_block_height()); if new.txn_count - initial.txn_count < 1 { panic!("Did not receive transactions"); @@ -79,3 +82,9 @@ async fn test_smoke() -> Result<()> { } Ok(()) } + +#[tokio::test(flavor = "multi_thread")] +async fn test_native_demo_basic() -> Result<()> { + let _child = NativeDemo::run(None); + assert_native_demo_works().await +} diff --git a/tests/upgrades.rs b/tests/upgrades.rs index e680d904be..e2fd7805d1 100644 --- a/tests/upgrades.rs +++ b/tests/upgrades.rs @@ -1,13 +1,23 @@ -use crate::common::TestConfig; use anyhow::Result; use espresso_types::{FeeVersion, MarketplaceVersion}; use futures::{future::join_all, StreamExt}; use vbs::version::StaticVersionType; +use crate::{ + common::{NativeDemo, TestConfig}, + smoke::assert_native_demo_works, +}; + const SEQUENCER_BLOCKS_TIMEOUT: u64 = 200; #[tokio::test(flavor = "multi_thread")] -async fn test_upgrade() -> Result<()> { +async fn test_native_demo_upgrade() -> Result<()> { + let _demo = NativeDemo::run(Some( + "-f process-compose.yaml -f process-compose-mp.yml".to_string(), + ))?; + + assert_native_demo_works().await?; + dotenvy::dotenv()?; let testing = TestConfig::new().await.unwrap(); diff --git a/types/src/eth_signature_key.rs b/types/src/eth_signature_key.rs index e5c3de4554..be0de6abbe 100644 --- a/types/src/eth_signature_key.rs +++ b/types/src/eth_signature_key.rs @@ -12,8 +12,7 @@ use ethers::{ types::{Address, Signature}, utils::public_key_to_address, }; -use hotshot_types::traits::signature_key::BuilderSignatureKey; -use hotshot_types::traits::signature_key::PrivateSignatureKey; +use hotshot_types::traits::signature_key::{BuilderSignatureKey, PrivateSignatureKey}; use serde::{Deserialize, Serialize}; use thiserror::Error; diff --git a/types/src/v0/config.rs b/types/src/v0/config.rs index 54f634ee86..860871f0ca 100644 --- a/types/src/v0/config.rs +++ b/types/src/v0/config.rs @@ -1,15 +1,17 @@ use std::{num::NonZeroUsize, time::Duration}; use anyhow::Context; -use vec1::Vec1; - -use crate::PubKey; -use hotshot_types::network::{ - BuilderType, CombinedNetworkConfig, Libp2pConfig, RandomBuilderConfig, +use hotshot_types::{ + network::{ + BuilderType, CombinedNetworkConfig, Libp2pConfig, NetworkConfig, RandomBuilderConfig, + }, + HotShotConfig, PeerConfig, ValidatorConfig, }; -use hotshot_types::{network::NetworkConfig, HotShotConfig, PeerConfig, ValidatorConfig}; use serde::{Deserialize, Serialize}; use tide_disco::Url; +use vec1::Vec1; + +use crate::PubKey; /// This struct defines the public Hotshot validator configuration. /// Private key and state key pairs are excluded for security reasons. diff --git a/types/src/v0/impls/auction.rs b/types/src/v0/impls/auction.rs index 71b5a5592d..53a7812fff 100644 --- a/types/src/v0/impls/auction.rs +++ b/types/src/v0/impls/auction.rs @@ -1,9 +1,5 @@ -use super::{state::ValidatedState, MarketplaceVersion}; -use crate::{ - eth_signature_key::{EthKeyPair, SigningError}, - v0_99::{BidTx, BidTxBody, FullNetworkTx, SolverAuctionResults}, - FeeAccount, FeeAmount, FeeError, FeeInfo, NamespaceId, -}; +use std::str::FromStr; + use anyhow::Context; use async_trait::async_trait; use committable::{Commitment, Committable}; @@ -15,11 +11,17 @@ use hotshot_types::{ signature_key::BuilderSignatureKey, }, }; -use std::str::FromStr; use thiserror::Error; use tide_disco::error::ServerError; use url::Url; +use super::{state::ValidatedState, MarketplaceVersion}; +use crate::{ + eth_signature_key::{EthKeyPair, SigningError}, + v0_99::{BidTx, BidTxBody, FullNetworkTx, SolverAuctionResults}, + FeeAccount, FeeAmount, FeeError, FeeInfo, NamespaceId, +}; + impl FullNetworkTx { /// Proxy for `execute` method of each transaction variant. pub fn execute(&self, state: &mut ValidatedState) -> Result<(), ExecutionError> { diff --git a/types/src/v0/impls/block/full_payload/ns_proof.rs b/types/src/v0/impls/block/full_payload/ns_proof.rs index 837e81885e..2b8eeaf567 100644 --- a/types/src/v0/impls/block/full_payload/ns_proof.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof.rs @@ -109,25 +109,25 @@ impl NsProof { ) .ok()? // error: internal to payload_verify() .ok()?; // verification failure - } - (None, true) => {} // 0-length namespace, nothing to verify + }, + (None, true) => {}, // 0-length namespace, nothing to verify (None, false) => { tracing::error!( "ns verify: missing proof for nonempty ns payload range {:?}", range ); return None; - } + }, (Some(_), true) => { tracing::error!("ns verify: unexpected proof for empty ns payload range"); return None; - } + }, } // verification succeeded, return some data let ns_id = ns_table.read_ns_id_unchecked(&self.ns_index); Some((self.ns_payload.export_all_txs(&ns_id), ns_id)) - } + }, VidCommitment::V1(_) => None, } } diff --git a/types/src/v0/impls/block/full_payload/ns_proof/test.rs b/types/src/v0/impls/block/full_payload/ns_proof/test.rs index 6b12ff2fb5..7e917f91ff 100644 --- a/types/src/v0/impls/block/full_payload/ns_proof/test.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof/test.rs @@ -1,6 +1,5 @@ use futures::future; -use hotshot::helpers::initialize_logging; -use hotshot::traits::BlockPayload; +use hotshot::{helpers::initialize_logging, traits::BlockPayload}; use hotshot_types::{ data::VidCommitment, traits::EncodeBytes, diff --git a/types/src/v0/impls/block/full_payload/payload.rs b/types/src/v0/impls/block/full_payload/payload.rs index 3707ab3a57..523aea7270 100644 --- a/types/src/v0/impls/block/full_payload/payload.rs +++ b/types/src/v0/impls/block/full_payload/payload.rs @@ -3,8 +3,8 @@ use std::{collections::BTreeMap, sync::Arc}; use async_trait::async_trait; use committable::Committable; use hotshot_query_service::availability::QueryablePayload; -use hotshot_types::data::ViewNumber; use hotshot_types::{ + data::ViewNumber, traits::{BlockPayload, EncodeBytes}, utils::BuilderCommitment, vid::advz::{ADVZCommon, ADVZScheme}, @@ -13,12 +13,11 @@ use jf_vid::VidScheme; use sha2::Digest; use thiserror::Error; -use crate::Transaction; use crate::{ v0::impls::{NodeState, ValidatedState}, v0_1::ChainConfig, Index, Iter, NamespaceId, NsIndex, NsPayload, NsPayloadBuilder, NsPayloadRange, NsTable, - NsTableBuilder, Payload, PayloadByteLen, SeqTypes, TxProof, + NsTableBuilder, Payload, PayloadByteLen, SeqTypes, Transaction, TxProof, }; #[derive(serde::Deserialize, serde::Serialize, Error, Debug, Eq, PartialEq)] @@ -281,7 +280,7 @@ impl PayloadByteLen { ADVZScheme::get_payload_byte_len(common) ); return false; - } + }, }; self.0 == expected diff --git a/types/src/v0/impls/block/namespace_payload/tx_proof.rs b/types/src/v0/impls/block/namespace_payload/tx_proof.rs index 5c2026088a..370e9da08b 100644 --- a/types/src/v0/impls/block/namespace_payload/tx_proof.rs +++ b/types/src/v0/impls/block/namespace_payload/tx_proof.rs @@ -199,19 +199,19 @@ impl TxProof { { return Some(false); } - } - (None, true) => {} // 0-length tx, nothing to verify + }, + (None, true) => {}, // 0-length tx, nothing to verify (None, false) => { tracing::error!( "tx verify: missing proof for nonempty tx payload range {:?}", range ); return None; - } + }, (Some(_), true) => { tracing::error!("tx verify: unexpected proof for empty tx payload range"); return None; - } + }, } } diff --git a/types/src/v0/impls/chain_config.rs b/types/src/v0/impls/chain_config.rs index d29ad6bf2b..32fa4d8b0c 100644 --- a/types/src/v0/impls/chain_config.rs +++ b/types/src/v0/impls/chain_config.rs @@ -1,11 +1,12 @@ -use crate::{BlockSize, ChainId}; +use std::str::FromStr; + use ethers::types::U256; use sequencer_utils::{ impl_serde_from_string_or_integer, impl_to_fixed_bytes, ser::FromStringOrInteger, }; -use std::str::FromStr; use super::parse_size; +use crate::{BlockSize, ChainId}; impl_serde_from_string_or_integer!(ChainId); impl_to_fixed_bytes!(ChainId, U256); @@ -74,9 +75,8 @@ impl FromStringOrInteger for BlockSize { #[cfg(test)] mod tests { - use crate::v0_99::{ChainConfig, ResolvableChainConfig}; - use super::*; + use crate::v0_99::{ChainConfig, ResolvableChainConfig}; #[test] fn test_chainid_serde_json_as_decimal() { diff --git a/types/src/v0/impls/fee_info.rs b/types/src/v0/impls/fee_info.rs index d1d61dd42e..be5ad34330 100644 --- a/types/src/v0/impls/fee_info.rs +++ b/types/src/v0/impls/fee_info.rs @@ -1,3 +1,5 @@ +use std::str::FromStr; + use anyhow::{bail, ensure, Context}; use ark_serialize::{ CanonicalDeserialize, CanonicalSerialize, Compress, Read, SerializationError, Valid, Validate, @@ -22,7 +24,6 @@ use num_traits::CheckedSub; use sequencer_utils::{ impl_serde_from_string_or_integer, impl_to_fixed_bytes, ser::FromStringOrInteger, }; -use std::str::FromStr; use thiserror::Error; use crate::{ @@ -390,7 +391,7 @@ impl FeeAccountProof { .elem() .context("presence proof is missing account balance")? .0) - } + }, FeeMerkleProof::Absence(proof) => { let tree = FeeMerkleTree::from_commitment(comm); ensure!( @@ -398,7 +399,7 @@ impl FeeAccountProof { "invalid proof" ); Ok(0.into()) - } + }, } } @@ -413,11 +414,11 @@ impl FeeAccountProof { proof, )?; Ok(()) - } + }, FeeMerkleProof::Absence(proof) => { tree.non_membership_remember(FeeAccount(self.account), proof)?; Ok(()) - } + }, } } } @@ -442,14 +443,14 @@ pub fn retain_accounts( // This remember cannot fail, since we just constructed a valid proof, and are // remembering into a tree with the same commitment. snapshot.remember(account, *elem, proof).unwrap(); - } + }, LookupResult::NotFound(proof) => { // Likewise this cannot fail. snapshot.non_membership_remember(account, proof).unwrap() - } + }, LookupResult::NotInMemory => { bail!("missing account {account}"); - } + }, } } @@ -460,9 +461,8 @@ pub fn retain_accounts( mod test { use ethers::abi::Address; - use crate::{FeeAccount, FeeAmount, FeeInfo}; - use super::IterableFeeInfo; + use crate::{FeeAccount, FeeAmount, FeeInfo}; #[test] fn test_iterable_fee_info() { diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index bc3e982bf7..3f765bca55 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -1,3 +1,5 @@ +use std::fmt; + use anyhow::{ensure, Context}; use ark_serialize::CanonicalSerialize; use committable::{Commitment, Committable, RawCommitmentBuilder}; @@ -19,11 +21,11 @@ use serde::{ Deserialize, Deserializer, Serialize, Serializer, }; use serde_json::{Map, Value}; -use std::fmt; use thiserror::Error; use time::OffsetDateTime; use vbs::version::{StaticVersionType, Version}; +use super::{instance_state::NodeState, state::ValidatedState}; use crate::{ v0::{ header::{EitherOrVersion, VersionedHeader}, @@ -35,8 +37,6 @@ use crate::{ Header, L1BlockInfo, L1Snapshot, Leaf2, NamespaceId, NsTable, SeqTypes, UpgradeType, }; -use super::{instance_state::NodeState, state::ValidatedState}; - impl v0_1::Header { pub(crate) fn commit(&self) -> Commitment
{ let mut bmt_bytes = vec![]; @@ -174,7 +174,7 @@ impl<'de> Deserialize<'de> for Header { )), EitherOrVersion::Version(v) => { Err(serde::de::Error::custom(format!("invalid version {v:?}"))) - } + }, } } @@ -211,7 +211,7 @@ impl<'de> Deserialize<'de> for Header { )), EitherOrVersion::Version(v) => { Err(de::Error::custom(format!("invalid version {v:?}"))) - } + }, chain_config => Err(de::Error::custom(format!( "expected version, found chain_config {chain_config:?}" ))), @@ -604,7 +604,7 @@ impl Header { .as_ref() .fetch_chain_config(validated_cf.commit()) .await - } + }, } } } @@ -1176,14 +1176,12 @@ mod test_headers { use ethers::{types::Address, utils::Anvil}; use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::traits::signature_key::BuilderSignatureKey; - use sequencer_utils::test_utils::setup_test; use v0_1::{BlockMerkleTree, FeeMerkleTree, L1Client}; use vbs::{bincode_serializer::BincodeSerializer, version::StaticVersion, BinarySerializer}; - use crate::{eth_signature_key::EthKeyPair, mock::MockStateCatchup, Leaf}; - use super::*; + use crate::{eth_signature_key::EthKeyPair, mock::MockStateCatchup, Leaf}; #[derive(Debug, Default)] #[must_use] diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index 0330695769..cf5c2dbeaf 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -1,15 +1,15 @@ -use crate::v0::{ - traits::StateCatchup, v0_99::ChainConfig, GenesisHeader, L1BlockInfo, L1Client, PubKey, - Timestamp, Upgrade, UpgradeMode, -}; -use hotshot_types::traits::states::InstanceState; -use hotshot_types::HotShotConfig; use std::{collections::BTreeMap, sync::Arc}; + +use hotshot_types::{traits::states::InstanceState, HotShotConfig}; use vbs::version::Version; #[cfg(any(test, feature = "testing"))] use vbs::version::{StaticVersion, StaticVersionType}; use super::state::ValidatedState; +use crate::v0::{ + traits::StateCatchup, v0_99::ChainConfig, GenesisHeader, L1BlockInfo, L1Client, PubKey, + Timestamp, Upgrade, UpgradeMode, +}; /// Represents the immutable state of a node. /// @@ -174,7 +174,7 @@ impl Upgrade { config.stop_proposing_time = u64::MAX; config.start_voting_time = 0; config.stop_voting_time = u64::MAX; - } + }, UpgradeMode::Time(t) => { config.start_proposing_time = t.start_proposing_time.unix_timestamp(); config.stop_proposing_time = t.stop_proposing_time.unix_timestamp(); @@ -187,7 +187,7 @@ impl Upgrade { config.stop_proposing_view = u64::MAX; config.start_voting_view = 0; config.stop_voting_view = u64::MAX; - } + }, } } } diff --git a/types/src/v0/impls/l1.rs b/types/src/v0/impls/l1.rs index 9661dd47ec..3aa0eb930d 100644 --- a/types/src/v0/impls/l1.rs +++ b/types/src/v0/impls/l1.rs @@ -1,3 +1,12 @@ +use std::{ + cmp::{min, Ordering}, + num::NonZeroUsize, + pin::Pin, + result::Result as StdResult, + sync::Arc, + time::Instant, +}; + use alloy::{ eips::BlockId, hex, @@ -28,14 +37,6 @@ use futures::{ use hotshot_types::traits::metrics::Metrics; use lru::LruCache; use parking_lot::RwLock; -use std::result::Result as StdResult; -use std::{ - cmp::{min, Ordering}, - num::NonZeroUsize, - pin::Pin, - sync::Arc, - time::Instant, -}; use tokio::{ spawn, sync::{Mutex, MutexGuard, Notify}, @@ -312,7 +313,7 @@ impl Service for SwitchingTransport { // If it's okay, log the success to the status current_transport.status.write().log_success(); Ok(res) - } + }, Err(err) => { // Increment the failure metric if let Some(f) = self_clone @@ -364,7 +365,7 @@ impl Service for SwitchingTransport { } Err(err) - } + }, } }) } @@ -737,12 +738,12 @@ impl L1Client { ); self.retry_delay().await; continue; - } + }, Err(err) => { tracing::warn!(number, "failed to get finalized L1 block: {err:#}"); self.retry_delay().await; continue; - } + }, }; break L1BlockInfo { number: block.header.number, @@ -815,7 +816,7 @@ impl L1Client { Err(err) => { tracing::warn!(from, to, %err, "Fee L1Event Error"); sleep(retry_delay).await; - } + }, } } } @@ -935,7 +936,7 @@ async fn get_finalized_block( #[cfg(test)] mod test { - use std::ops::Add; + use std::{ops::Add, time::Duration}; use ethers::{ middleware::SignerMiddleware, @@ -948,7 +949,6 @@ mod test { use hotshot_contract_adapter::stake_table::NodeInfoJf; use portpicker::pick_unused_port; use sequencer_utils::test_utils::setup_test; - use std::time::Duration; use time::OffsetDateTime; use super::*; diff --git a/types/src/v0/impls/mod.rs b/types/src/v0/impls/mod.rs index cc1f2fe1c9..8c58f6bcfe 100644 --- a/types/src/v0/impls/mod.rs +++ b/types/src/v0/impls/mod.rs @@ -14,12 +14,11 @@ mod transaction; pub use auction::SolverAuctionResultsProvider; pub use fee_info::{retain_accounts, FeeError}; +#[cfg(any(test, feature = "testing"))] +pub use instance_state::mock; pub use instance_state::NodeState; pub use stake_table::*; pub use state::{ get_l1_deposits, BuilderValidationError, ProposalValidationError, StateValidationError, ValidatedState, }; - -#[cfg(any(test, feature = "testing"))] -pub use instance_state::mock; diff --git a/types/src/v0/impls/solver.rs b/types/src/v0/impls/solver.rs index e16fbc7f9f..da6f525e25 100644 --- a/types/src/v0/impls/solver.rs +++ b/types/src/v0/impls/solver.rs @@ -1,10 +1,8 @@ use committable::{Commitment, Committable}; use hotshot::types::SignatureKey; -use crate::v0::utils::Update; - use super::v0_99::{RollupRegistrationBody, RollupUpdatebody}; -use crate::v0::utils::Update::Set; +use crate::v0::utils::{Update, Update::Set}; impl Committable for RollupRegistrationBody { fn tag() -> String { @@ -54,7 +52,7 @@ impl Committable for RollupUpdatebody { comm = comm .u64_field("reserve_url", 2) .var_size_bytes(url.as_str().as_ref()) - } + }, Set(None) => comm = comm.u64_field("reserve_url", 1), Update::Skip => comm = comm.u64_field("reserve_url", 0), } diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 7823fc04ec..d65d6f8335 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -1,7 +1,9 @@ use std::{ cmp::max, collections::{BTreeMap, BTreeSet, HashMap}, + fmt::Debug, num::NonZeroU64, + sync::Arc, }; use anyhow::Context; @@ -24,9 +26,7 @@ use hotshot_types::{ }, PeerConfig, }; - use itertools::Itertools; -use std::{fmt::Debug, sync::Arc}; use thiserror::Error; use super::{ diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index 8f0439d21e..b13a48fb17 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -1,3 +1,5 @@ +use std::ops::Add; + use anyhow::bail; use committable::{Commitment, Committable}; use ethers::types::Address; @@ -19,7 +21,6 @@ use jf_merkle_tree::{ }; use num_traits::CheckedSub; use serde::{Deserialize, Serialize}; -use std::ops::Add; use thiserror::Error; use time::OffsetDateTime; use vbs::version::Version; @@ -1118,7 +1119,7 @@ mod test { }), Header::V99(_) => { panic!("You called `Header.next()` on unimplemented version (v3)") - } + }, } } /// Replaces builder signature w/ invalid one. @@ -1147,7 +1148,7 @@ mod test { }), Header::V99(_) => { panic!("You called `Header.sign()` on unimplemented version (v3)") - } + }, } } diff --git a/types/src/v0/impls/transaction.rs b/types/src/v0/impls/transaction.rs index cd473d0398..ca09a1533a 100644 --- a/types/src/v0/impls/transaction.rs +++ b/types/src/v0/impls/transaction.rs @@ -3,9 +3,8 @@ use hotshot_query_service::explorer::ExplorerTransaction; use hotshot_types::traits::block_contents::Transaction as HotShotTransaction; use serde::{de::Error, Deserialize, Deserializer}; -use crate::{NamespaceId, Transaction}; - use super::{NsPayloadBuilder, NsTableBuilder}; +use crate::{NamespaceId, Transaction}; impl From for NamespaceId { fn from(value: u32) -> Self { diff --git a/types/src/v0/mod.rs b/types/src/v0/mod.rs index e5dbe0d015..578f0c363a 100644 --- a/types/src/v0/mod.rs +++ b/types/src/v0/mod.rs @@ -1,3 +1,5 @@ +use std::marker::PhantomData; + use hotshot_types::{ data::{EpochNumber, ViewNumber}, signature_key::BLSPubKey, @@ -7,7 +9,6 @@ use hotshot_types::{ }, }; use serde::{Deserialize, Serialize}; -use std::marker::PhantomData; pub mod config; mod header; @@ -15,6 +16,8 @@ mod impls; pub mod traits; mod utils; pub use header::Header; +#[cfg(any(test, feature = "testing"))] +pub use impls::mock; pub use impls::{ get_l1_deposits, retain_accounts, BuilderValidationError, EpochCommittees, FeeError, ProposalValidationError, StateValidationError, @@ -22,9 +25,6 @@ pub use impls::{ pub use utils::*; use vbs::version::{StaticVersion, StaticVersionType}; -#[cfg(any(test, feature = "testing"))] -pub use impls::mock; - // This is the single source of truth for minor versions supported by this major version. // // It is written as a higher-level macro which takes a macro invocation as an argument and appends diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index 610fc19e3f..e4f1389077 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -6,7 +6,7 @@ use anyhow::{bail, ensure, Context}; use async_trait::async_trait; use committable::{Commitment, Committable}; use futures::{FutureExt, TryFutureExt}; -use hotshot::{types::EventType, HotShotInitializer}; +use hotshot::{types::EventType, HotShotInitializer, InitializerEpochInfo}; use hotshot_types::{ consensus::CommitmentMap, data::{ @@ -14,13 +14,14 @@ use hotshot_types::{ DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, VidCommitment, VidDisperseShare, ViewNumber, }, + drb::DrbResult, event::{HotShotAction, LeafInfo}, message::{convert_proposal, Proposal, UpgradeLock}, simple_certificate::{ NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, UpgradeCertificate, }, traits::{ - node_implementation::{ConsensusTime, Versions}, + node_implementation::{ConsensusTime, NodeType, Versions}, storage::Storage, ValidatedState as HotShotState, }, @@ -29,15 +30,14 @@ use hotshot_types::{ use itertools::Itertools; use serde::{de::DeserializeOwned, Serialize}; +use super::{ + impls::NodeState, utils::BackoffParams, EpochCommittees, EpochVersion, Leaf, SequencerVersions, +}; use crate::{ v0::impls::ValidatedState, v0_99::ChainConfig, BlockMerkleTree, Event, FeeAccount, FeeAccountProof, FeeMerkleCommitment, FeeMerkleTree, Leaf2, NetworkConfig, SeqTypes, }; -use super::{ - impls::NodeState, utils::BackoffParams, EpochCommittees, EpochVersion, Leaf, SequencerVersions, -}; - #[async_trait] pub trait StateCatchup: Send + Sync { async fn try_fetch_leaves(&self, retry: usize, height: u64) -> anyhow::Result>; @@ -379,7 +379,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch leaves: {err:#}" ); - } + }, } } @@ -414,7 +414,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch accounts: {err:#}" ); - } + }, } } @@ -441,7 +441,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch frontier: {err:#}" ); - } + }, } } @@ -461,7 +461,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch chain config: {err:#}" ); - } + }, } } @@ -538,6 +538,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { async fn load_upgrade_certificate( &self, ) -> anyhow::Result>>; + async fn load_start_epoch_info(&self) -> anyhow::Result>>; /// Load the latest known consensus state. /// @@ -558,11 +559,11 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { Some(view) => { tracing::info!(?view, "starting from saved view"); view - } + }, None => { tracing::info!("no saved view, starting from genesis"); ViewNumber::genesis() - } + }, }; let next_epoch_high_qc = self @@ -587,7 +588,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { let anchor_view = leaf.view_number(); (leaf, high_qc, Some(anchor_view)) - } + }, None => { tracing::info!("no saved leaf, starting from genesis leaf"); ( @@ -596,7 +597,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { QuorumCertificate2::genesis::(&genesis_validated_state, &state).await, None, ) - } + }, }; let validated_state = if leaf.block_header().height() == 0 { // If we are starting from genesis, we can provide the full state. @@ -615,6 +616,16 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { // TODO: let epoch = genesis_epoch_from_version::(); + let config = self.load_config().await.context("loading config")?; + let epoch_height = config + .as_ref() + .map(|c| c.config.epoch_height) + .unwrap_or_default(); + let epoch_start_block = config + .as_ref() + .map(|c| c.config.epoch_start_block) + .unwrap_or_default(); + let (undecided_leaves, undecided_state) = self .load_undecided_state() .await @@ -631,6 +642,11 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { .await .context("loading upgrade certificate")?; + let start_epoch_info = self + .load_start_epoch_info() + .await + .context("loading start epoch info")?; + tracing::info!( ?leaf, ?view, @@ -647,8 +663,8 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { Ok(( HotShotInitializer { instance_state: state, - epoch_height: 0, - epoch_start_block: 0, + epoch_height, + epoch_start_block, anchor_leaf: leaf, anchor_state: validated_state.unwrap_or_default(), anchor_state_delta: None, @@ -665,7 +681,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { .collect(), undecided_state, saved_vid_shares: Default::default(), // TODO: implement saved_vid_shares - start_epoch_info: Default::default(), // TODO: implement start_epoch_info + start_epoch_info, }, anchor_view, )) @@ -819,6 +835,17 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { ) -> anyhow::Result<()> { self.append_quorum_proposal2(proposal).await } + + async fn add_drb_result( + &self, + epoch: ::Epoch, + drb_result: DrbResult, + ) -> anyhow::Result<()>; + async fn add_epoch_root( + &self, + epoch: ::Epoch, + block_header: ::BlockHeader, + ) -> anyhow::Result<()>; } #[async_trait] @@ -947,6 +974,22 @@ impl Storage for Arc

{ ) -> anyhow::Result<()> { (**self).update_undecided_state2(leaves, state).await } + + async fn add_drb_result( + &self, + epoch: ::Epoch, + drb_result: DrbResult, + ) -> anyhow::Result<()> { + (**self).add_drb_result(epoch, drb_result).await + } + + async fn add_epoch_root( + &self, + epoch: ::Epoch, + block_header: ::BlockHeader, + ) -> anyhow::Result<()> { + (**self).add_epoch_root(epoch, block_header).await + } } /// Data that can be deserialized from a subslice of namespace payload bytes. diff --git a/types/src/v0/utils.rs b/types/src/v0/utils.rs index d2c32e598f..af751f99fd 100644 --- a/types/src/v0/utils.rs +++ b/types/src/v0/utils.rs @@ -1,3 +1,11 @@ +use std::{ + cmp::{min, Ordering}, + fmt::{self, Debug, Display, Formatter}, + num::ParseIntError, + str::FromStr, + time::Duration, +}; + use anyhow::Context; use bytesize::ByteSize; use clap::Parser; @@ -12,13 +20,6 @@ use hotshot_types::{ use rand::Rng; use sequencer_utils::{impl_serde_from_string_or_integer, ser::FromStringOrInteger}; use serde::{Deserialize, Serialize}; -use std::{ - cmp::{min, Ordering}, - fmt::{self, Debug, Display, Formatter}, - num::ParseIntError, - str::FromStr, - time::Duration, -}; use thiserror::Error; use time::{ format_description::well_known::Rfc3339 as TimestampFormat, macros::time, Date, OffsetDateTime, @@ -264,14 +265,14 @@ impl BackoffParams { Ok(res) => return Ok(res), Err(err) if self.disable => { return Err(err.context("Retryable operation failed; retries disabled")); - } + }, Err(err) => { tracing::warn!( "Retryable operation failed, will retry after {delay:?}: {err:#}" ); sleep(delay).await; delay = self.backoff(delay); - } + }, } } unreachable!() diff --git a/utils/src/deployer.rs b/utils/src/deployer.rs index b19f249d6b..6bb4702413 100644 --- a/utils/src/deployer.rs +++ b/utils/src/deployer.rs @@ -1,3 +1,5 @@ +use std::{collections::HashMap, io::Write, ops::Deref, sync::Arc, time::Duration}; + use alloy::{ contract::RawCallBuilder, network::{Ethereum, EthereumWallet}, @@ -24,7 +26,6 @@ use futures::future::{BoxFuture, FutureExt}; use hotshot_contract_adapter::light_client::{ LightClientConstructorArgs, ParsedLightClientState, ParsedStakeTableState, }; -use std::{collections::HashMap, io::Write, ops::Deref, sync::Arc, time::Duration}; use url::Url; /// Set of predeployed contracts. @@ -548,6 +549,8 @@ fn link_light_client_contract( #[cfg(any(test, feature = "testing"))] pub mod test_helpers { + use std::sync::Arc; + use anyhow::Context; use contract_bindings_ethers::{ erc1967_proxy::ERC1967Proxy, @@ -557,11 +560,9 @@ pub mod test_helpers { }; use ethers::prelude::*; use hotshot_contract_adapter::light_client::LightClientConstructorArgs; - use std::sync::Arc; - - use crate::deployer::link_light_client_contract; use super::{Contract, Contracts}; + use crate::deployer::link_light_client_contract; /// Deployment `LightClientMock.sol` as proxy for testing pub async fn deploy_light_client_contract_as_proxy_for_test( diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 014cecec6d..b76ecf50d5 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -257,14 +257,14 @@ pub async fn init_signer(provider: &Url, mnemonic: &str, index: u32) -> Option { tracing::error!("error connecting to RPC {}: {}", provider, err); return None; - } + }, }; let chain_id = match provider.get_chainid().await { Ok(id) => id.as_u64(), Err(err) => { tracing::error!("error getting chain ID: {}", err); return None; - } + }, }; let mnemonic = match MnemonicBuilder::::default() .phrase(mnemonic) @@ -274,14 +274,14 @@ pub async fn init_signer(provider: &Url, mnemonic: &str, index: u32) -> Option { tracing::error!("error building wallet: {}", err); return None; - } + }, }; let wallet = match mnemonic.build() { Ok(wallet) => wallet, Err(err) => { tracing::error!("error opening wallet: {}", err); return None; - } + }, }; let wallet = wallet.with_chain_id(chain_id); Some(SignerMiddleware::new(provider, wallet)) @@ -367,7 +367,7 @@ where tracing::error!("contract revert: {:?}", e); } return Err(anyhow!("error sending transaction: {:?}", err)); - } + }, }; let hash = pending.tx_hash(); @@ -382,12 +382,12 @@ where Ok(Some(receipt)) => receipt, Ok(None) => { return Err(anyhow!("contract call {hash:x}: no receipt")); - } + }, Err(err) => { return Err(anyhow!( "contract call {hash:x}: error getting transaction receipt: {err}" )) - } + }, }; if receipt.status != Some(1.into()) { return Err(anyhow!("contract call {hash:x}: transaction reverted")); @@ -418,19 +418,19 @@ async fn wait_for_transaction_to_be_mined( if i >= log_retries { tracing::warn!("contract call {hash:?} (retry {i}/{retries}): error getting transaction status: {err}"); } - } + }, Ok(None) => { if i >= log_retries { tracing::warn!( "contract call {hash:?} (retry {i}/{retries}): missing from mempool" ); } - } + }, Ok(Some(tx)) if tx.block_number.is_none() => { if i >= log_retries { tracing::warn!("contract call {hash:?} (retry {i}/{retries}): pending"); } - } + }, Ok(Some(_)) => return true, } diff --git a/utils/src/stake_table.rs b/utils/src/stake_table.rs index 943c53f808..f82d6ac66c 100644 --- a/utils/src/stake_table.rs +++ b/utils/src/stake_table.rs @@ -1,3 +1,5 @@ +use std::{fs, path::Path, sync::Arc, time::Duration}; + /// Utilities for loading an initial permissioned stake table from a toml file. /// /// The initial stake table is passed to the permissioned stake table contract @@ -17,8 +19,6 @@ use hotshot_contract_adapter::stake_table::{bls_jf_to_sol, NodeInfoJf}; use hotshot_types::network::PeerConfigKeys; use url::Url; -use std::{fs, path::Path, sync::Arc, time::Duration}; - /// A stake table config stored in a file #[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] #[serde(bound(deserialize = ""))] @@ -144,12 +144,15 @@ pub async fn update_stake_table( #[cfg(test)] mod test { - use crate::stake_table::{PermissionedStakeTableConfig, PermissionedStakeTableUpdate}; - use crate::test_utils::setup_test; use hotshot::types::{BLSPubKey, SignatureKey}; use hotshot_types::{light_client::StateKeyPair, network::PeerConfigKeys}; use toml::toml; + use crate::{ + stake_table::{PermissionedStakeTableConfig, PermissionedStakeTableUpdate}, + test_utils::setup_test, + }; + fn assert_peer_config_eq(p1: &PeerConfigKeys, p2: &PeerConfigKeys) { assert_eq!(p1.stake_table_key, p2.stake_table_key); assert_eq!(p1.state_ver_key, p2.state_ver_key); diff --git a/vid/src/avid_m.rs b/vid/src/avid_m.rs index 29cc31308f..6e7f5be281 100644 --- a/vid/src/avid_m.rs +++ b/vid/src/avid_m.rs @@ -11,10 +11,8 @@ //! vectors. And for dispersal, each storage node gets some vectors and their //! Merkle proofs according to its weight. -use crate::{ - utils::bytes_to_field::{self, bytes_to_field, field_to_bytes}, - VidError, VidResult, VidScheme, -}; +use std::ops::Range; + use ark_ff::PrimeField; use ark_poly::{EvaluationDomain, Radix2EvaluationDomain}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -26,9 +24,13 @@ use p3_maybe_rayon::prelude::{ IntoParallelIterator, IntoParallelRefIterator, ParallelIterator, ParallelSlice, }; use serde::{Deserialize, Serialize}; -use std::ops::Range; use tagged_base64::tagged; +use crate::{ + utils::bytes_to_field::{self, bytes_to_field, field_to_bytes}, + VidError, VidResult, VidScheme, +}; + mod config; pub mod namespaced; @@ -401,9 +403,10 @@ impl VidScheme for AvidMScheme { /// Unit tests #[cfg(test)] pub mod tests { - use crate::{avid_m::AvidMScheme, VidScheme}; use rand::{seq::SliceRandom, RngCore}; + use crate::{avid_m::AvidMScheme, VidScheme}; + #[test] fn round_trip() { // play with these items diff --git a/vid/src/avid_m/namespaced.rs b/vid/src/avid_m/namespaced.rs index aeda112d39..d49cb679fd 100644 --- a/vid/src/avid_m/namespaced.rs +++ b/vid/src/avid_m/namespaced.rs @@ -1,13 +1,15 @@ //! This file implements the namespaced AvidM scheme. +use std::ops::Range; + +use jf_merkle_tree::MerkleTreeScheme; +use serde::{Deserialize, Serialize}; + use super::{AvidMCommit, AvidMShare, RawAvidMShare}; use crate::{ avid_m::{AvidMScheme, MerkleTree}, VidError, VidResult, VidScheme, }; -use jf_merkle_tree::MerkleTreeScheme; -use serde::{Deserialize, Serialize}; -use std::ops::Range; /// Dummy struct for namespaced AvidM scheme pub struct NsAvidMScheme; diff --git a/vid/src/utils/bytes_to_field.rs b/vid/src/utils/bytes_to_field.rs index 5808c3e701..1b22ed81ea 100644 --- a/vid/src/utils/bytes_to_field.rs +++ b/vid/src/utils/bytes_to_field.rs @@ -1,4 +1,3 @@ -use ark_ff::{BigInteger, PrimeField}; use std::{ borrow::Borrow, iter::Take, @@ -6,6 +5,8 @@ use std::{ vec::{IntoIter, Vec}, }; +use ark_ff::{BigInteger, PrimeField}; + /// Deterministic, infallible, invertible iterator adaptor to convert from /// arbitrary bytes to field elements. /// @@ -178,11 +179,12 @@ pub fn elem_byte_capacity() -> usize { #[cfg(test)] mod tests { - use super::{bytes_to_field, field_to_bytes, PrimeField, Vec}; use ark_bls12_381::Fr as Fr381; use ark_bn254::Fr as Fr254; use rand::RngCore; + use super::{bytes_to_field, field_to_bytes, PrimeField, Vec}; + fn bytes_to_field_iter() { let byte_lens = [0, 1, 2, 16, 31, 32, 33, 48, 65, 100, 200, 5000];