diff --git a/Cargo.lock b/Cargo.lock index 580a302a..eda24333 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3658,7 +3658,7 @@ dependencies = [ [[package]] name = "ice-node" -version = "0.4.50" +version = "0.4.51" dependencies = [ "arctic-runtime", "async-trait", diff --git a/node/Cargo.toml b/node/Cargo.toml index e0c2fbf4..ea53debc 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -7,7 +7,7 @@ homepage = 'https://substrate.dev' license = 'Apache-2.0' name = 'ice-node' repository = 'https://github.com/web3labs/ice-substrate' -version = '0.4.50' +version = '0.4.51' publish = false [package.metadata.docs.rs] diff --git a/node/src/chain_spec/snow.rs b/node/src/chain_spec/snow.rs index d14ccf44..ad8477c2 100644 --- a/node/src/chain_spec/snow.rs +++ b/node/src/chain_spec/snow.rs @@ -124,11 +124,11 @@ pub fn testnet_spec() -> SnowChainSpec { let endowed_accounts = vec![ ( hex!["10b3ae7ebb7d722c8e8d0d6bf421f6d5dbde8d329f7c905a201539c635d61872"].into(), - ICY * 630000000, + ICY * 931000000, ), ( TreasuryPalletId::get().into_account_truncating(), - ICY * 1170000000, + ICY * 1729000000, ), ( hex!["6f38cb15a6ec17a68f2aec60d2cd8cd15e58b4e33ee7f705d1cbcde07009d33f"].into(), @@ -293,3 +293,15 @@ pub fn get_authority_keys_from_seed(seed: &str) -> (AccountId, AuraId) { fn session_keys(aura: AuraId) -> SessionKeys { SessionKeys { aura } } + +pub fn snow_kusama_config() -> Result { + sc_chain_spec::GenericChainSpec::from_json_bytes( + &include_bytes!("../../../resources/snow-kusama.json")[..], + ) +} + +pub fn snow_staging_rococo_config() -> Result { + sc_chain_spec::GenericChainSpec::from_json_bytes( + &include_bytes!("../../../resources/snow-staging-rococo.json")[..], + ) +} diff --git a/node/src/command.rs b/node/src/command.rs index 78bbf2eb..26168683 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -24,6 +24,7 @@ use sp_core::hexdisplay::HexDisplay; use sp_runtime::traits::Block as BlockT; use std::{io::Write, net::SocketAddr}; +use crate::chain_spec::snow::{snow_kusama_config, snow_staging_rococo_config}; #[cfg(feature = "runtime-benchmarks")] use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; @@ -64,6 +65,8 @@ fn load_spec(id: &str) -> std::result::Result, St "arctic" => Box::new(chain_spec::arctic::get_chain_spec()), "snow-dev" => Box::new(chain_spec::snow::get_dev_chain_spec()), "snow-testnet" => Box::new(chain_spec::snow::testnet_spec()), + "snow-kusama" => Box::new(snow_kusama_config()?), + "snow-staging-rococo" => Box::new(snow_staging_rococo_config()?), path => { let chain_spec = chain_spec::snow::SnowChainSpec::from_json_file(path.into())?; diff --git a/node/src/service/frost.rs b/node/src/service/frost.rs deleted file mode 100644 index 1e783518..00000000 --- a/node/src/service/frost.rs +++ /dev/null @@ -1,439 +0,0 @@ -#![allow(clippy::type_complexity)] - -//! Local Service and ServiceFactory implementation. Specialized wrapper over substrate service. - -use fc_consensus::FrontierBlockImport; -use fc_rpc_core::types::{FeeHistoryCache, FilterPool}; -use futures::StreamExt; -use pallet_contracts_rpc::ContractsApiServer; -use sc_client_api::{BlockBackend, BlockchainEvents, ExecutorProvider}; -use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; -use sc_executor::NativeElseWasmExecutor; -use sc_finality_grandpa::SharedVoterState; -use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; -use sc_telemetry::{Telemetry, TelemetryWorker}; -use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; -use std::{collections::BTreeMap, sync::Arc, time::Duration}; - -pub use frost_runtime::RuntimeApi; - -use crate::primitives::*; - -/// Local runtime native executor. -pub struct ExecutorDispatch; - -impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { - #[cfg(not(feature = "runtime-benchmarks"))] - type ExtendHostFunctions = (); - - #[cfg(feature = "runtime-benchmarks")] - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - frost_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - frost_runtime::native_version() - } -} - -type FullClient = - sc_service::TFullClient>; -type FullBackend = sc_service::TFullBackend; -type FullSelectChain = sc_consensus::LongestChain; - -/// Build a partial chain component config -pub fn new_partial( - config: &Configuration, -) -> Result< - sc_service::PartialComponents< - FullClient, - FullBackend, - FullSelectChain, - sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, - ( - FrontierBlockImport< - Block, - sc_finality_grandpa::GrandpaBlockImport< - FullBackend, - Block, - FullClient, - FullSelectChain, - >, - FullClient, - >, - sc_finality_grandpa::LinkHalf, - Option, - Arc>, - ), - >, - ServiceError, -> { - if config.keystore_remote.is_some() { - return Err(ServiceError::Other( - "Remote Keystores are not supported.".to_string(), - )); - } - - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let executor = NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - config.runtime_cache_size, - ); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager - .spawn_handle() - .spawn("telemetry", None, worker.run()); - telemetry - }); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( - client.clone(), - &(client.clone() as Arc<_>), - select_chain.clone(), - telemetry.as_ref().map(|x| x.handle()), - )?; - - let frontier_backend = crate::rpc::open_frontier_backend(config)?; - let frontier_block_import = FrontierBlockImport::new( - grandpa_block_import.clone(), - client.clone(), - frontier_backend.clone(), - ); - - let slot_duration = sc_consensus_aura::slot_duration(&*client)?; - - let import_queue = sc_consensus_aura::import_queue::( - ImportQueueParams { - block_import: frontier_block_import.clone(), - justification_import: Some(Box::new(grandpa_block_import)), - client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((timestamp, slot)) - }, - spawner: &task_manager.spawn_essential_handle(), - can_author_with: sp_consensus::CanAuthorWithNativeVersion::new( - client.executor().clone(), - ), - registry: config.prometheus_registry(), - check_for_equivocation: Default::default(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - }, - )?; - - Ok(sc_service::PartialComponents { - client, - backend, - task_manager, - import_queue, - keystore_container, - select_chain, - transaction_pool, - other: ( - frontier_block_import, - grandpa_link, - telemetry, - frontier_backend, - ), - }) -} - -/// Builds a new service. -pub fn start_frost_node(config: Configuration) -> Result { - let sc_service::PartialComponents { - client, - backend, - mut task_manager, - import_queue, - keystore_container, - select_chain, - transaction_pool, - other: (block_import, grandpa_link, mut telemetry, frontier_backend), - } = new_partial(&config)?; - - let protocol_name = sc_finality_grandpa::protocol_standard_name( - &client - .block_hash(0) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - &config.chain_spec, - ); - - let (network, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - block_announce_validator_builder: None, - warp_sync: None, - })?; - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - - let filter_pool: FilterPool = Arc::new(std::sync::Mutex::new(BTreeMap::new())); - let fee_history_cache: FeeHistoryCache = Arc::new(std::sync::Mutex::new(BTreeMap::new())); - let overrides = crate::rpc::overrides_handle(client.clone()); - - // Frontier offchain DB task. Essential. - // Maps emulated ethereum data to substrate native data. - task_manager.spawn_essential_handle().spawn( - "frontier-mapping-sync-worker", - Some("frontier"), - fc_mapping_sync::MappingSyncWorker::new( - client.import_notification_stream(), - Duration::new(6, 0), - client.clone(), - backend.clone(), - frontier_backend.clone(), - 3, - 0, - fc_mapping_sync::SyncStrategy::Parachain, - ) - .for_each(|()| futures::future::ready(())), - ); - - // Frontier `EthFilterApi` maintenance. Manages the pool of user-created Filters. - // Each filter is allowed to stay in the pool for 100 blocks. - const FILTER_RETAIN_THRESHOLD: u64 = 100; - task_manager.spawn_essential_handle().spawn( - "frontier-filter-pool", - Some("frontier"), - fc_rpc::EthTask::filter_pool_task( - client.clone(), - filter_pool.clone(), - FILTER_RETAIN_THRESHOLD, - ), - ); - - task_manager.spawn_essential_handle().spawn( - "frontier-schema-cache-task", - Some("frontier"), - fc_rpc::EthTask::ethereum_schema_cache_task(client.clone(), frontier_backend.clone()), - ); - - const FEE_HISTORY_LIMIT: u64 = 2048; - task_manager.spawn_essential_handle().spawn( - "frontier-fee-history", - Some("frontier"), - fc_rpc::EthTask::fee_history_task( - client.clone(), - overrides.clone(), - fee_history_cache.clone(), - FEE_HISTORY_LIMIT, - ), - ); - - let role = config.role.clone(); - let force_authoring = config.force_authoring; - let backoff_authoring_blocks: Option<()> = None; - let name = config.network.node_name.clone(); - let enable_grandpa = !config.disable_grandpa; - let prometheus_registry = config.prometheus_registry().cloned(); - let is_authority = config.role.is_authority(); - - let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new( - task_manager.spawn_handle(), - overrides.clone(), - 50, - 50, - prometheus_registry.clone(), - )); - - let rpc_extensions_builder = { - let client = client.clone(); - let network = network.clone(); - let transaction_pool = transaction_pool.clone(); - - Box::new(move |deny_unsafe, subscription| { - let deps = crate::rpc::FullDeps { - client: client.clone(), - pool: transaction_pool.clone(), - graph: transaction_pool.pool().clone(), - network: network.clone(), - is_authority, - deny_unsafe, - frontier_backend: frontier_backend.clone(), - filter_pool: filter_pool.clone(), - fee_history_limit: FEE_HISTORY_LIMIT, - fee_history_cache: fee_history_cache.clone(), - block_data_cache: block_data_cache.clone(), - overrides: overrides.clone(), - }; - - let mut io = crate::rpc::create_full(deps, subscription) - .map_err::(Into::into)?; - - // Local node support WASM contracts - io.merge(pallet_contracts_rpc::Contracts::new(Arc::clone(&client)).into_rpc()) - .map_err(|_| { - ServiceError::Other("Failed to register pallet-contracts RPC methods.".into()) - })?; - - Ok(io) - }) - }; - - let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - network: network.clone(), - client: client.clone(), - keystore: keystore_container.sync_keystore(), - task_manager: &mut task_manager, - transaction_pool: transaction_pool.clone(), - rpc_builder: rpc_extensions_builder, - backend, - system_rpc_tx, - config, - telemetry: telemetry.as_mut(), - })?; - - if role.is_authority() { - let proposer_factory = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|x| x.handle()), - ); - - let can_author_with = - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - - let slot_duration = sc_consensus_aura::slot_duration(&*client)?; - - let aura = sc_consensus_aura::start_aura::( - StartAuraParams { - slot_duration, - client, - select_chain, - block_import, - proposer_factory, - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((timestamp, slot)) - }, - force_authoring, - backoff_authoring_blocks, - keystore: keystore_container.sync_keystore(), - can_author_with, - sync_oracle: network.clone(), - justification_sync_link: network.clone(), - block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), - max_block_proposal_slot_portion: None, - telemetry: telemetry.as_ref().map(|x| x.handle()), - }, - )?; - - // the AURA authoring task is considered essential, i.e. if it - // fails we take down the service with it. - task_manager - .spawn_essential_handle() - .spawn_blocking("aura", Some("block-authoring"), aura); - } - - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { - Some(keystore_container.sync_keystore()) - } else { - None - }; - - let grandpa_config = sc_finality_grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore, - local_role: role, - telemetry: telemetry.as_ref().map(|x| x.handle()), - protocol_name, - }; - - if enable_grandpa { - // start the full GRANDPA voter - // NOTE: non-authorities could run the GRANDPA observer protocol, but at - // this point the full voter should provide better guarantees of block - // and vote data availability than the observer. The observer has not - // been tested extensively yet and having most nodes in a network run it - // could lead to finality stalls. - let grandpa_config = sc_finality_grandpa::GrandpaParams { - config: grandpa_config, - link: grandpa_link, - network, - voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), - prometheus_registry, - shared_voter_state: SharedVoterState::empty(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - task_manager.spawn_essential_handle().spawn_blocking( - "grandpa-voter", - None, - sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, - ); - } - - network_starter.start_network(); - Ok(task_manager) -} diff --git a/resources/snow-kusama.json b/resources/snow-kusama.json new file mode 100644 index 00000000..e69de29b diff --git a/resources/snow-staging-rococo.json b/resources/snow-staging-rococo.json new file mode 100644 index 00000000..e69de29b