From f8487d5dbd98fd7f58138140d98c5dae77eabc2a Mon Sep 17 00:00:00 2001 From: Reto Achermann Date: Mon, 19 Aug 2024 19:24:00 +0000 Subject: [PATCH] benchmarks: apply formatter Signed-off-by: Reto Achermann --- .../ironsync/benches/ironsync_counter/main.rs | 50 ++++-- .../upstream/benches/nr_counter/main.rs | 18 +- .../upstream/benches/nr_hashmap/main.rs | 6 +- benchmarks/upstream/benches/nr_vspace/main.rs | 122 ++++++------- benchmarks/upstream/src/nr_vspace.rs | 122 ++++++------- .../verified/benches/vnr_counter/main.rs | 11 +- .../verified/benches/vnr_vspace/main.rs | 162 ++++++++---------- benchmarks/verified/src/vnr_vspace.rs | 131 +++++++------- 8 files changed, 297 insertions(+), 325 deletions(-) diff --git a/benchmarks/ironsync/benches/ironsync_counter/main.rs b/benchmarks/ironsync/benches/ironsync_counter/main.rs index 9ceeafb..03bd682 100644 --- a/benchmarks/ironsync/benches/ironsync_counter/main.rs +++ b/benchmarks/ironsync/benches/ironsync_counter/main.rs @@ -3,15 +3,14 @@ //! Defines a hash-map that can be replicated. +use env_logger::Logger; use std::env; use std::fs::{remove_file, OpenOptions}; +use std::io::{self, Write}; use std::path::{Path, PathBuf}; use std::process::Command; -use std::io::{self, Write}; -use env_logger::Logger; fn obtain_dotnet(ironsync_dir: &Path) -> PathBuf { - // TODO: add an exist check! let dotnet_dir = ironsync_dir.join(".dotnet"); @@ -26,7 +25,11 @@ fn obtain_dotnet(ironsync_dir: &Path) -> PathBuf { let output = Command::new("wget") .current_dir(ironsync_dir) - .args(&["https://dot.net/v1/dotnet-install.sh", "-O", "dotnet-install.sh"]) + .args(&[ + "https://dot.net/v1/dotnet-install.sh", + "-O", + "dotnet-install.sh", + ]) .output() .expect("failed to downlaod the dotnet install script"); @@ -39,7 +42,13 @@ fn obtain_dotnet(ironsync_dir: &Path) -> PathBuf { let output = Command::new("bash") .current_dir(ironsync_dir) - .args(&["dotnet-install.sh", "--channel", "5.0" , "--install-dir", ".dotnet"]) + .args(&[ + "dotnet-install.sh", + "--channel", + "5.0", + "--install-dir", + ".dotnet", + ]) .output() .expect("failed to downlaod the dotnet install script"); @@ -56,11 +65,16 @@ fn obtain_dotnet(ironsync_dir: &Path) -> PathBuf { } fn run_linear_dafny() -> PathBuf { - let this_file = file!(); let this_file_path = Path::new(this_file).canonicalize().unwrap(); - let benchmarks_dir = this_file_path.parent().unwrap().parent().unwrap().parent().unwrap(); + let benchmarks_dir = this_file_path + .parent() + .unwrap() + .parent() + .unwrap() + .parent() + .unwrap(); // let cwd = std::env::current_dir().unwrap(); // let benchmarks_dir = cwd.parent().unwrap(); @@ -72,7 +86,7 @@ fn run_linear_dafny() -> PathBuf { let build_script = ironsync_dir.join("run-dafny-in-docker.sh"); if !build_script.is_file() { - let output = Command::new("git") + let output = Command::new("git") .args(&["submodule", "update", "--init"]) .output() .expect("failed to execute process"); @@ -118,8 +132,7 @@ fn run_linear_dafny() -> PathBuf { if !output.status.success() { if dafny_path.exists() { - std::fs::remove_dir_all(dafny_path) - .expect("[dafny] failed to remove .dafny directory"); + std::fs::remove_dir_all(dafny_path).expect("[dafny] failed to remove .dafny directory"); } println!("[dafny] Building LinearDafny..."); @@ -129,10 +142,10 @@ fn run_linear_dafny() -> PathBuf { .output() .expect("[dafny] failed to run `artifact-setup-dafny.sh` command"); if !output.status.success() { - println!("status: {}", output.status); - io::stdout().write_all(&output.stdout).unwrap(); - io::stderr().write_all(&output.stderr).unwrap(); - panic!("[dafny] Dafny Build has failed"); + println!("status: {}", output.status); + io::stdout().write_all(&output.stdout).unwrap(); + io::stderr().write_all(&output.stderr).unwrap(); + panic!("[dafny] Dafny Build has failed"); } println!("[dafny] Dafny version ok."); @@ -158,10 +171,11 @@ fn run_linear_dafny() -> PathBuf { println!("[dafny] building nr binaries"); let nr_dir = ironsync_dir.join("concurrency/node-replication"); - let filtered_env : std::collections::HashMap = - env::vars().filter(|&(ref k, _)| + let filtered_env: std::collections::HashMap = env::vars() + .filter(|&(ref k, _)| { !k.starts_with("CARGO") && k != "RUSTUP_TOOLCHAIN" && k != "RUST_RECURSION_COUNT" - ).collect(); + }) + .collect(); let output = Command::new("./compile-bench.sh") .env_clear() @@ -181,7 +195,6 @@ fn run_linear_dafny() -> PathBuf { nr_dir } - fn run_bench(nr_dir: PathBuf) { println!("[run] running benchmark"); let output = Command::new("./bench.py") @@ -212,7 +225,6 @@ pub fn disable_dvfs() { assert!(o.status.success()); } - fn main() { let _r = env_logger::try_init(); diff --git a/benchmarks/upstream/benches/nr_counter/main.rs b/benchmarks/upstream/benches/nr_counter/main.rs index 0312bea..ce1db5f 100644 --- a/benchmarks/upstream/benches/nr_counter/main.rs +++ b/benchmarks/upstream/benches/nr_counter/main.rs @@ -9,16 +9,15 @@ use std::marker::Sync; use std::time::Duration; use logging::warn; -use rand::seq::SliceRandom; use rand::prelude::*; +use rand::seq::SliceRandom; use rand_chacha::ChaCha8Rng; use bench_utils::benchmark::*; use bench_utils::mkbench::{self, DsInterface, NodeReplicated}; use bench_utils::topology::ThreadMapping; use bench_utils::Operation; -use node_replication::{Dispatch}; - +use node_replication::Dispatch; /// The initial amount of entries all Hashmaps are initialized with #[cfg(feature = "smokebench")] @@ -45,7 +44,7 @@ pub const NOP: usize = 25_000_000; #[derive(Debug, Eq, PartialEq, Clone, Copy)] pub enum OpWr { /// Increment the Counter - Inc + Inc, } #[derive(Debug, Eq, PartialEq, Clone, Copy)] @@ -93,9 +92,7 @@ impl Dispatch for NrCounter { /// Implements how we execute operation from the log against our local stack fn dispatch_mut(&mut self, op: Self::WriteOperation) -> Self::Response { match op { - OpWr::Inc => { - Ok(self.inc()) - } + OpWr::Inc => Ok(self.inc()), } } } @@ -107,10 +104,7 @@ impl Dispatch for NrCounter { /// - `write`: true will Put, false will generate Get sequences /// - `span`: Maximum key /// - `distribution`: Supported distribution 'uniform' or 'skewed' -pub fn generate_operations( - nop: usize, - write_ratio: usize, -) -> Vec> { +pub fn generate_operations(nop: usize, write_ratio: usize) -> Vec> { let mut ops = Vec::with_capacity(nop); let mut rng = ChaCha8Rng::seed_from_u64(42); @@ -192,4 +186,4 @@ fn main() { for write_ratio in write_ratios.into_iter() { counter_scale_out::>(&mut harness, " nr-counter", write_ratio); } -} \ No newline at end of file +} diff --git a/benchmarks/upstream/benches/nr_hashmap/main.rs b/benchmarks/upstream/benches/nr_hashmap/main.rs index b5e804f..9924739 100644 --- a/benchmarks/upstream/benches/nr_hashmap/main.rs +++ b/benchmarks/upstream/benches/nr_hashmap/main.rs @@ -20,8 +20,7 @@ use bench_utils::benchmark::*; use bench_utils::mkbench::{self, DsInterface, NodeReplicated}; use bench_utils::topology::ThreadMapping; use bench_utils::Operation; -use node_replication::{Dispatch}; - +use node_replication::Dispatch; /// The initial amount of entries all Hashmaps are initialized with #[cfg(feature = "smokebench")] @@ -57,7 +56,6 @@ pub enum OpRd { Get(u64), } - /// Single-threaded implementation of the stack /// /// We just use a vector. @@ -213,4 +211,4 @@ fn main() { for write_ratio in write_ratios.into_iter() { hashmap_scale_out::>(&mut harness, "hashmap", write_ratio); } -} \ No newline at end of file +} diff --git a/benchmarks/upstream/benches/nr_vspace/main.rs b/benchmarks/upstream/benches/nr_vspace/main.rs index 68b967d..dfcfa66 100644 --- a/benchmarks/upstream/benches/nr_vspace/main.rs +++ b/benchmarks/upstream/benches/nr_vspace/main.rs @@ -12,16 +12,15 @@ use std::time::Duration; use logging::warn; use rand::seq::SliceRandom; -use rand::{Rng}; -use rand_chacha::ChaCha8Rng; +use rand::Rng; use rand::SeedableRng; +use rand_chacha::ChaCha8Rng; use bench_utils::benchmark::*; use bench_utils::mkbench::{self, DsInterface, NodeReplicated}; use bench_utils::topology::ThreadMapping; use bench_utils::Operation; - // Number of operation for test-harness. #[cfg(feature = "smokebench")] pub const NOP: usize = 2_500_000; @@ -41,9 +40,8 @@ use std::pin::Pin; use logging::{debug, trace}; use x86::bits64::paging::*; -use node_replication::{Dispatch}; -const VSPACE_RANGE: u64 = 512*1024*1024*1024; - +use node_replication::Dispatch; +const VSPACE_RANGE: u64 = 512 * 1024 * 1024 * 1024; fn kernel_vaddr_to_paddr(v: VAddr) -> PAddr { let vaddr_val: usize = v.into(); @@ -162,7 +160,6 @@ impl fmt::Display for MapAction { } } - pub struct VSpace { pub pml4: Pin>, pub mem_counter: usize, @@ -177,52 +174,49 @@ unsafe impl Send for VSpace {} /// We support a mutable put operation on the hashmap. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Modify { - Map(u64, u64), + Map(u64, u64), } /// We support an immutable read operation to lookup a key from the hashmap. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Access { - Resolve(u64), + Resolve(u64), } /// The Dispatch traits executes `ReadOperation` (our Access enum) /// and `WriteOperation` (our Modify enum) against the replicated /// data-structure. impl Dispatch for VSpace { - type ReadOperation = Access; - type WriteOperation = Modify; - type Response = u64; - - /// The `dispatch` function applies the immutable operations. - fn dispatch(&self, op: Self::ReadOperation) -> Self::Response { - match op { - Access::Resolve(key) => self.resolve_wrapped(key), - } - } - - /// The `dispatch_mut` function applies the mutable operations. - fn dispatch_mut( - &mut self, - op: Self::WriteOperation, - ) -> Self::Response { - match op { - Modify::Map(key, value) => self.map_generic_wrapped(key, value, 0x1000) as u64, - } - } + type ReadOperation = Access; + type WriteOperation = Modify; + type Response = u64; + + /// The `dispatch` function applies the immutable operations. + fn dispatch(&self, op: Self::ReadOperation) -> Self::Response { + match op { + Access::Resolve(key) => self.resolve_wrapped(key), + } + } + + /// The `dispatch_mut` function applies the mutable operations. + fn dispatch_mut(&mut self, op: Self::WriteOperation) -> Self::Response { + match op { + Modify::Map(key, value) => self.map_generic_wrapped(key, value, 0x1000) as u64, + } + } } /* - pub fn map_generic_wrapped( - self: &mut VSpace, - vbase: u64, - pregion: u64, - pregion_len: usize, - //rights: &MapAction, - ) -> bool; + pub fn map_generic_wrapped( + self: &mut VSpace, + vbase: u64, + pregion: u64, + pregion_len: usize, + //rights: &MapAction, + ) -> bool; - pub fn resolve_wrapped(self: &mut VSpace, vbase: u64) -> u64; - */ + pub fn resolve_wrapped(self: &mut VSpace, vbase: u64) -> u64; +*/ impl Drop for VSpace { fn drop(&mut self) { @@ -259,8 +253,7 @@ pub fn alloc(size: usize, ps: usize) -> mmap::MemoryMap { pub const FOUR_KIB: usize = 4 * 1024; const PAGESIZE: u64 = FOUR_KIB as u64; - - assert!(size % FOUR_KIB == 0|| size % TWO_MIB ==0 || size % ONE_GIB ==0); + assert!(size % FOUR_KIB == 0 || size % TWO_MIB == 0 || size % ONE_GIB == 0); let mut non_standard_flags = MAP_SHARED | MAP_ANON | MAP_POPULATE; match ps { @@ -291,12 +284,9 @@ pub fn alloc(size: usize, ps: usize) -> mmap::MemoryMap { // cpp glue fun - - impl Default for VSpace { fn default() -> VSpace { - - let mapping = alloc(3*ONE_GIB, ONE_GIB); + let mapping = alloc(3 * ONE_GIB, ONE_GIB); let mem_ptr = mapping.data(); // make sure the memory for ptable is some contiguous block @@ -311,15 +301,16 @@ impl Default for VSpace { ), mapping, mem_counter: 4096, - mem_ptr - //allocs: Vec::with_capacity(1024), + mem_ptr, //allocs: Vec::with_capacity(1024), }; for i in 0..VSPACE_RANGE / 4096 { - assert!(vs.map_generic( - VAddr::from(i * 4096), - (PAddr::from(i * 4096), 4096), - MapAction::ReadWriteExecuteUser, - ).is_ok()); + assert!(vs + .map_generic( + VAddr::from(i * 4096), + (PAddr::from(i * 4096), 4096), + MapAction::ReadWriteExecuteUser, + ) + .is_ok()); } // logging::error!("vs.mem_counter {}", vs.mem_counter); @@ -527,7 +518,7 @@ impl VSpace { while mapped < psize && pt_idx < 512 { // XXX: allow updates //if !pt[pt_idx].is_present() { - pt[pt_idx] = PTEntry::new(pbase + mapped, PTFlags::P | rights.to_pt_rights()); + pt[pt_idx] = PTEntry::new(pbase + mapped, PTFlags::P | rights.to_pt_rights()); //} else { // return Err(VSpaceError { at: vbase.as_u64() }); //} @@ -567,7 +558,7 @@ impl VSpace { how_many * BASE_PAGE_SIZE, 4096, ))*/ - assert!(self.mem_counter < 3*ONE_GIB); // if this triggers you need to adjust the alloc size of `mem_ptr` + assert!(self.mem_counter < 3 * ONE_GIB); // if this triggers you need to adjust the alloc size of `mem_ptr` self.mem_ptr.offset(self.mem_counter as isize) }; self.mem_counter += how_many * 4096; @@ -614,7 +605,10 @@ impl VSpace { } pub fn resolve_wrapped(&self, addr: u64) -> u64 { - let a = self.resolve_addr(VAddr::from(addr)).map(|pa| pa.as_u64()).unwrap_or(0x0); + let a = self + .resolve_addr(VAddr::from(addr)) + .map(|pa| pa.as_u64()) + .unwrap_or(0x0); //log::error!("{:#x} -> {:#x}", addr, a); a } @@ -653,7 +647,7 @@ impl VSpace { } } } - }else { + } else { // log::error!("pml4 not present {:#x}", addr); unreachable!("dont go here"); } @@ -676,10 +670,6 @@ impl VSpace { } } - - - - /// Generate a random sequence of operations /// /// # Arguments @@ -687,10 +677,7 @@ impl VSpace { /// - `write`: true will Put, false will generate Get sequences /// - `span`: Maximum key /// - `distribution`: Supported distribution 'uniform' or 'skewed' -pub fn generate_operations( - nop: usize, - write_ratio: usize, -) -> Vec> { +pub fn generate_operations(nop: usize, write_ratio: usize) -> Vec> { let mut ops = Vec::with_capacity(nop); let mut rng = ChaCha8Rng::seed_from_u64(42); @@ -734,14 +721,17 @@ fn main() { let numa_policy = match args[4].as_str() { "fill" => ThreadMapping::NUMAFill, "interleave" => ThreadMapping::Interleave, - _ => panic!("supply fill or interleave as numa mapping") + _ => panic!("supply fill or interleave as numa mapping"), }; let run_id_num = &args[5]; let mut harness = TestHarness::new(Duration::from_secs(runtime)); let ops = generate_operations(NOP, write_ratio); - let bench_name = format!("nr_vspace-{}-{}-{}-{}", n_threads, write_ratio, numa_policy, run_id_num); + let bench_name = format!( + "nr_vspace-{}-{}-{}-{}", + n_threads, write_ratio, numa_policy, run_id_num + ); mkbench::ScaleBenchBuilder::>::new(ops) .threads(n_threads) @@ -765,4 +755,4 @@ fn main() { } }, ); - } \ No newline at end of file +} diff --git a/benchmarks/upstream/src/nr_vspace.rs b/benchmarks/upstream/src/nr_vspace.rs index 68b967d..dfcfa66 100644 --- a/benchmarks/upstream/src/nr_vspace.rs +++ b/benchmarks/upstream/src/nr_vspace.rs @@ -12,16 +12,15 @@ use std::time::Duration; use logging::warn; use rand::seq::SliceRandom; -use rand::{Rng}; -use rand_chacha::ChaCha8Rng; +use rand::Rng; use rand::SeedableRng; +use rand_chacha::ChaCha8Rng; use bench_utils::benchmark::*; use bench_utils::mkbench::{self, DsInterface, NodeReplicated}; use bench_utils::topology::ThreadMapping; use bench_utils::Operation; - // Number of operation for test-harness. #[cfg(feature = "smokebench")] pub const NOP: usize = 2_500_000; @@ -41,9 +40,8 @@ use std::pin::Pin; use logging::{debug, trace}; use x86::bits64::paging::*; -use node_replication::{Dispatch}; -const VSPACE_RANGE: u64 = 512*1024*1024*1024; - +use node_replication::Dispatch; +const VSPACE_RANGE: u64 = 512 * 1024 * 1024 * 1024; fn kernel_vaddr_to_paddr(v: VAddr) -> PAddr { let vaddr_val: usize = v.into(); @@ -162,7 +160,6 @@ impl fmt::Display for MapAction { } } - pub struct VSpace { pub pml4: Pin>, pub mem_counter: usize, @@ -177,52 +174,49 @@ unsafe impl Send for VSpace {} /// We support a mutable put operation on the hashmap. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Modify { - Map(u64, u64), + Map(u64, u64), } /// We support an immutable read operation to lookup a key from the hashmap. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Access { - Resolve(u64), + Resolve(u64), } /// The Dispatch traits executes `ReadOperation` (our Access enum) /// and `WriteOperation` (our Modify enum) against the replicated /// data-structure. impl Dispatch for VSpace { - type ReadOperation = Access; - type WriteOperation = Modify; - type Response = u64; - - /// The `dispatch` function applies the immutable operations. - fn dispatch(&self, op: Self::ReadOperation) -> Self::Response { - match op { - Access::Resolve(key) => self.resolve_wrapped(key), - } - } - - /// The `dispatch_mut` function applies the mutable operations. - fn dispatch_mut( - &mut self, - op: Self::WriteOperation, - ) -> Self::Response { - match op { - Modify::Map(key, value) => self.map_generic_wrapped(key, value, 0x1000) as u64, - } - } + type ReadOperation = Access; + type WriteOperation = Modify; + type Response = u64; + + /// The `dispatch` function applies the immutable operations. + fn dispatch(&self, op: Self::ReadOperation) -> Self::Response { + match op { + Access::Resolve(key) => self.resolve_wrapped(key), + } + } + + /// The `dispatch_mut` function applies the mutable operations. + fn dispatch_mut(&mut self, op: Self::WriteOperation) -> Self::Response { + match op { + Modify::Map(key, value) => self.map_generic_wrapped(key, value, 0x1000) as u64, + } + } } /* - pub fn map_generic_wrapped( - self: &mut VSpace, - vbase: u64, - pregion: u64, - pregion_len: usize, - //rights: &MapAction, - ) -> bool; + pub fn map_generic_wrapped( + self: &mut VSpace, + vbase: u64, + pregion: u64, + pregion_len: usize, + //rights: &MapAction, + ) -> bool; - pub fn resolve_wrapped(self: &mut VSpace, vbase: u64) -> u64; - */ + pub fn resolve_wrapped(self: &mut VSpace, vbase: u64) -> u64; +*/ impl Drop for VSpace { fn drop(&mut self) { @@ -259,8 +253,7 @@ pub fn alloc(size: usize, ps: usize) -> mmap::MemoryMap { pub const FOUR_KIB: usize = 4 * 1024; const PAGESIZE: u64 = FOUR_KIB as u64; - - assert!(size % FOUR_KIB == 0|| size % TWO_MIB ==0 || size % ONE_GIB ==0); + assert!(size % FOUR_KIB == 0 || size % TWO_MIB == 0 || size % ONE_GIB == 0); let mut non_standard_flags = MAP_SHARED | MAP_ANON | MAP_POPULATE; match ps { @@ -291,12 +284,9 @@ pub fn alloc(size: usize, ps: usize) -> mmap::MemoryMap { // cpp glue fun - - impl Default for VSpace { fn default() -> VSpace { - - let mapping = alloc(3*ONE_GIB, ONE_GIB); + let mapping = alloc(3 * ONE_GIB, ONE_GIB); let mem_ptr = mapping.data(); // make sure the memory for ptable is some contiguous block @@ -311,15 +301,16 @@ impl Default for VSpace { ), mapping, mem_counter: 4096, - mem_ptr - //allocs: Vec::with_capacity(1024), + mem_ptr, //allocs: Vec::with_capacity(1024), }; for i in 0..VSPACE_RANGE / 4096 { - assert!(vs.map_generic( - VAddr::from(i * 4096), - (PAddr::from(i * 4096), 4096), - MapAction::ReadWriteExecuteUser, - ).is_ok()); + assert!(vs + .map_generic( + VAddr::from(i * 4096), + (PAddr::from(i * 4096), 4096), + MapAction::ReadWriteExecuteUser, + ) + .is_ok()); } // logging::error!("vs.mem_counter {}", vs.mem_counter); @@ -527,7 +518,7 @@ impl VSpace { while mapped < psize && pt_idx < 512 { // XXX: allow updates //if !pt[pt_idx].is_present() { - pt[pt_idx] = PTEntry::new(pbase + mapped, PTFlags::P | rights.to_pt_rights()); + pt[pt_idx] = PTEntry::new(pbase + mapped, PTFlags::P | rights.to_pt_rights()); //} else { // return Err(VSpaceError { at: vbase.as_u64() }); //} @@ -567,7 +558,7 @@ impl VSpace { how_many * BASE_PAGE_SIZE, 4096, ))*/ - assert!(self.mem_counter < 3*ONE_GIB); // if this triggers you need to adjust the alloc size of `mem_ptr` + assert!(self.mem_counter < 3 * ONE_GIB); // if this triggers you need to adjust the alloc size of `mem_ptr` self.mem_ptr.offset(self.mem_counter as isize) }; self.mem_counter += how_many * 4096; @@ -614,7 +605,10 @@ impl VSpace { } pub fn resolve_wrapped(&self, addr: u64) -> u64 { - let a = self.resolve_addr(VAddr::from(addr)).map(|pa| pa.as_u64()).unwrap_or(0x0); + let a = self + .resolve_addr(VAddr::from(addr)) + .map(|pa| pa.as_u64()) + .unwrap_or(0x0); //log::error!("{:#x} -> {:#x}", addr, a); a } @@ -653,7 +647,7 @@ impl VSpace { } } } - }else { + } else { // log::error!("pml4 not present {:#x}", addr); unreachable!("dont go here"); } @@ -676,10 +670,6 @@ impl VSpace { } } - - - - /// Generate a random sequence of operations /// /// # Arguments @@ -687,10 +677,7 @@ impl VSpace { /// - `write`: true will Put, false will generate Get sequences /// - `span`: Maximum key /// - `distribution`: Supported distribution 'uniform' or 'skewed' -pub fn generate_operations( - nop: usize, - write_ratio: usize, -) -> Vec> { +pub fn generate_operations(nop: usize, write_ratio: usize) -> Vec> { let mut ops = Vec::with_capacity(nop); let mut rng = ChaCha8Rng::seed_from_u64(42); @@ -734,14 +721,17 @@ fn main() { let numa_policy = match args[4].as_str() { "fill" => ThreadMapping::NUMAFill, "interleave" => ThreadMapping::Interleave, - _ => panic!("supply fill or interleave as numa mapping") + _ => panic!("supply fill or interleave as numa mapping"), }; let run_id_num = &args[5]; let mut harness = TestHarness::new(Duration::from_secs(runtime)); let ops = generate_operations(NOP, write_ratio); - let bench_name = format!("nr_vspace-{}-{}-{}-{}", n_threads, write_ratio, numa_policy, run_id_num); + let bench_name = format!( + "nr_vspace-{}-{}-{}-{}", + n_threads, write_ratio, numa_policy, run_id_num + ); mkbench::ScaleBenchBuilder::>::new(ops) .threads(n_threads) @@ -765,4 +755,4 @@ fn main() { } }, ); - } \ No newline at end of file +} diff --git a/benchmarks/verified/benches/vnr_counter/main.rs b/benchmarks/verified/benches/vnr_counter/main.rs index b3c00b2..14797a9 100644 --- a/benchmarks/verified/benches/vnr_counter/main.rs +++ b/benchmarks/verified/benches/vnr_counter/main.rs @@ -10,15 +10,17 @@ use std::num::NonZeroUsize; use std::time::Duration; use logging::warn; -use rand::seq::SliceRandom; use rand::prelude::*; +use rand::seq::SliceRandom; use rand_chacha::ChaCha8Rng; use bench_utils::benchmark::*; use bench_utils::mkbench::{self, DsInterface}; use bench_utils::topology::ThreadMapping; use bench_utils::Operation; -use verified_node_replication::{Dispatch, AffinityFn, NodeReplicated, ReplicaId, ThreadToken, NodeReplicatedT}; +use verified_node_replication::{ + AffinityFn, Dispatch, NodeReplicated, NodeReplicatedT, ReplicaId, ThreadToken, +}; use builtin::Tracked; @@ -131,7 +133,10 @@ impl DsInterface for VNRWrapper { /// - `logs`: How many logs the data-structure should be partitioned over. fn new(replicas: NonZeroUsize, logs: NonZeroUsize, log_size: usize) -> Self { VNRWrapper { - val: NodeReplicatedT::::new(replicas.into(), AffinityFn::new(mkbench::chg_affinity)), + val: NodeReplicatedT::::new( + replicas.into(), + AffinityFn::new(mkbench::chg_affinity), + ), } } diff --git a/benchmarks/verified/benches/vnr_vspace/main.rs b/benchmarks/verified/benches/vnr_vspace/main.rs index f7debbc..46a3554 100644 --- a/benchmarks/verified/benches/vnr_vspace/main.rs +++ b/benchmarks/verified/benches/vnr_vspace/main.rs @@ -14,16 +14,18 @@ use std::time::Duration; use logging::warn; use rand::seq::SliceRandom; +use rand::SeedableRng; use rand::{distributions::Distribution, Rng, RngCore}; use rand_chacha::ChaCha8Rng; -use rand::SeedableRng; use zipf::ZipfDistribution; use bench_utils::benchmark::*; use bench_utils::mkbench::{self, DsInterface}; use bench_utils::topology::ThreadMapping; use bench_utils::Operation; -use verified_node_replication::{Dispatch, AffinityFn, NodeReplicated, ReplicaId, ThreadToken, NodeReplicatedT}; +use verified_node_replication::{ + AffinityFn, Dispatch, NodeReplicated, NodeReplicatedT, ReplicaId, ThreadToken, +}; use builtin::Tracked; @@ -46,8 +48,7 @@ use std::pin::Pin; use logging::{debug, trace}; use x86::bits64::paging::*; -const VSPACE_RANGE: u64 = 512*1024*1024*1024; - +const VSPACE_RANGE: u64 = 512 * 1024 * 1024 * 1024; fn kernel_vaddr_to_paddr(v: VAddr) -> PAddr { let vaddr_val: usize = v.into(); @@ -166,7 +167,6 @@ impl fmt::Display for MapAction { } } - pub struct VSpace { pub pml4: Pin>, pub mem_counter: usize, @@ -181,45 +181,41 @@ unsafe impl Send for VSpace {} /// We support a mutable put operation on the hashmap. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Modify { - Map(u64, u64), + Map(u64, u64), } /// We support an immutable read operation to lookup a key from the hashmap. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Access { - Resolve(u64), + Resolve(u64), } /// The Dispatch traits executes `ReadOperation` (our Access enum) /// and `WriteOperation` (our Modify enum) against the replicated /// data-structure. impl Dispatch for VSpace { - type ReadOperation = Access; - type WriteOperation = Modify; - type Response = u64; - type View = VSpace; + type ReadOperation = Access; + type WriteOperation = Modify; + type Response = u64; + type View = VSpace; - fn init() -> Self { + fn init() -> Self { Default::default() } + /// The `dispatch` function applies the immutable operations. + fn dispatch(&self, op: Self::ReadOperation) -> Self::Response { + match op { + Access::Resolve(key) => self.resolveWrapped(key), + } + } - /// The `dispatch` function applies the immutable operations. - fn dispatch(&self, op: Self::ReadOperation) -> Self::Response { - match op { - Access::Resolve(key) => self.resolveWrapped(key), - } - } - - /// The `dispatch_mut` function applies the mutable operations. - fn dispatch_mut( - &mut self, - op: Self::WriteOperation, - ) -> Self::Response { - match op { - Modify::Map(key, value) => self.mapGenericWrapped(key, value, 0x1000) as u64, - } - } + /// The `dispatch_mut` function applies the mutable operations. + fn dispatch_mut(&mut self, op: Self::WriteOperation) -> Self::Response { + match op { + Modify::Map(key, value) => self.mapGenericWrapped(key, value, 0x1000) as u64, + } + } // partial eq also add an exec operation fn clone_write_op(op: &Self::WriteOperation) -> Self::WriteOperation { @@ -229,11 +225,8 @@ impl Dispatch for VSpace { fn clone_response(op: &Self::Response) -> Self::Response { op.clone() } - } - - // impl SomeTrait for T // where T: AnotherTrait struct VNRWrapper { @@ -251,7 +244,10 @@ impl DsInterface for VNRWrapper { /// - `logs`: How many logs the data-structure should be partitioned over. fn new(replicas: NonZeroUsize, logs: NonZeroUsize, log_size: usize) -> Self { VNRWrapper { - val: NodeReplicatedT::::new(replicas.into(), AffinityFn::new(mkbench::chg_affinity)), + val: NodeReplicatedT::::new( + replicas.into(), + AffinityFn::new(mkbench::chg_affinity), + ), } } @@ -287,18 +283,17 @@ impl DsInterface for VNRWrapper { } } - /* - pub fn mapGenericWrapped( - self: &mut VSpace, - vbase: u64, - pregion: u64, - pregion_len: usize, - //rights: &MapAction, - ) -> bool; - - pub fn resolveWrapped(self: &mut VSpace, vbase: u64) -> u64; - */ + pub fn mapGenericWrapped( + self: &mut VSpace, + vbase: u64, + pregion: u64, + pregion_len: usize, + //rights: &MapAction, + ) -> bool; + + pub fn resolveWrapped(self: &mut VSpace, vbase: u64) -> u64; +*/ impl Drop for VSpace { fn drop(&mut self) { @@ -335,8 +330,7 @@ pub fn alloc(size: usize, ps: usize) -> mmap::MemoryMap { pub const FOUR_KIB: usize = 4 * 1024; const PAGESIZE: u64 = FOUR_KIB as u64; - - assert!(size % FOUR_KIB == 0|| size % TWO_MIB ==0 || size % ONE_GIB ==0); + assert!(size % FOUR_KIB == 0 || size % TWO_MIB == 0 || size % ONE_GIB == 0); let mut non_standard_flags = MAP_SHARED | MAP_ANON | MAP_POPULATE; match ps { @@ -369,7 +363,7 @@ pub fn alloc(size: usize, ps: usize) -> mmap::MemoryMap { pub fn createVSpace() -> &'static mut VSpace { //env_logger::try_init(); //log::error!("createVSpace"); - let mapping = alloc(3*ONE_GIB, ONE_GIB); + let mapping = alloc(3 * ONE_GIB, ONE_GIB); let mem_ptr = mapping.data(); //unsafe { alloc::alloc::alloc(core::alloc::Layout::from_size_align_unchecked(1075851264, 4096)) }; @@ -381,25 +375,25 @@ pub fn createVSpace() -> &'static mut VSpace { //allocs: Vec::with_capacity(1024), mapping, mem_counter: 4096, - mem_ptr + mem_ptr, })); for i in 0..VSPACE_RANGE / 4096 { - assert!(vs.map_generic( - VAddr::from(i * 4096), - (PAddr::from(i * 4096), 4096), - MapAction::ReadWriteExecuteUser, - ).is_ok()); + assert!(vs + .map_generic( + VAddr::from(i * 4096), + (PAddr::from(i * 4096), 4096), + MapAction::ReadWriteExecuteUser, + ) + .is_ok()); } vs } - impl Default for VSpace { fn default() -> VSpace { - - let mapping = alloc(3*ONE_GIB, ONE_GIB); + let mapping = alloc(3 * ONE_GIB, ONE_GIB); let mem_ptr = mapping.data(); // make sure the memory for ptable is some contiguous block @@ -414,15 +408,16 @@ impl Default for VSpace { ), mapping, mem_counter: 4096, - mem_ptr - //allocs: Vec::with_capacity(1024), + mem_ptr, //allocs: Vec::with_capacity(1024), }; for i in 0..VSPACE_RANGE / 4096 { - assert!(vs.map_generic( - VAddr::from(i * 4096), - (PAddr::from(i * 4096), 4096), - MapAction::ReadWriteExecuteUser, - ).is_ok()); + assert!(vs + .map_generic( + VAddr::from(i * 4096), + (PAddr::from(i * 4096), 4096), + MapAction::ReadWriteExecuteUser, + ) + .is_ok()); } logging::error!("vs.mem_counter {}", vs.mem_counter); @@ -630,7 +625,7 @@ impl VSpace { while mapped < psize && pt_idx < 512 { // XXX: allow updates //if !pt[pt_idx].is_present() { - pt[pt_idx] = PTEntry::new(pbase + mapped, PTFlags::P | rights.to_pt_rights()); + pt[pt_idx] = PTEntry::new(pbase + mapped, PTFlags::P | rights.to_pt_rights()); //} else { // return Err(VSpaceError { at: vbase.as_u64() }); //} @@ -670,7 +665,7 @@ impl VSpace { how_many * BASE_PAGE_SIZE, 4096, ))*/ - assert!(self.mem_counter < 3*ONE_GIB); // if this triggers you need to adjust the alloc size of `mem_ptr` + assert!(self.mem_counter < 3 * ONE_GIB); // if this triggers you need to adjust the alloc size of `mem_ptr` self.mem_ptr.offset(self.mem_counter as isize) }; self.mem_counter += how_many * 4096; @@ -717,7 +712,10 @@ impl VSpace { } pub fn resolveWrapped(&self, addr: u64) -> u64 { - let a = self.resolve_addr(VAddr::from(addr)).map(|pa| pa.as_u64()).unwrap_or(0x0); + let a = self + .resolve_addr(VAddr::from(addr)) + .map(|pa| pa.as_u64()) + .unwrap_or(0x0); //log::error!("{:#x} -> {:#x}", addr, a); a } @@ -756,7 +754,7 @@ impl VSpace { } } } - }else { + } else { // log::error!("pml4 not present {:#x}", addr); unreachable!("dont go here"); } @@ -924,21 +922,18 @@ fn silly() { assert!(vs.mapGenericWrapped((VSPACE_RANGE) - 4096, 0xd000, 0x1000)); assert!(vs.resolveWrapped((VSPACE_RANGE) - 4096) == 0xd000); + /* pub fn mapGenericWrapped( + self: &mut VSpace, + vbase: u64, + pregion: u64, + pregion_len: usize, + //rights: &MapAction, + ) -> bool; -/* pub fn mapGenericWrapped( - self: &mut VSpace, - vbase: u64, - pregion: u64, - pregion_len: usize, - //rights: &MapAction, - ) -> bool; - - pub fn resolveWrapped(self: &mut VSpace, vbase: u64) -> u64; -*/ + pub fn resolveWrapped(self: &mut VSpace, vbase: u64) -> u64; + */ } - - /// Generate a random sequence of operations /// /// # Arguments @@ -946,10 +941,7 @@ fn silly() { /// - `write`: true will Put, false will generate Get sequences /// - `span`: Maximum key /// - `distribution`: Supported distribution 'uniform' or 'skewed' -pub fn generate_operations( - nop: usize, - write_ratio: usize, -) -> Vec> { +pub fn generate_operations(nop: usize, write_ratio: usize) -> Vec> { let mut ops = Vec::with_capacity(nop); let mut rng = ChaCha8Rng::seed_from_u64(42); @@ -957,9 +949,7 @@ pub fn generate_operations( const MAP_SIZE_MASK: u64 = !0xffff_ffff_f000_0fff; for _i in 0..nop { match rng.gen::() % 2 { - 0 => ops.push(Operation::ReadOperation(Access::Resolve( - rng.gen::(), - ))), + 0 => ops.push(Operation::ReadOperation(Access::Resolve(rng.gen::()))), 1 => ops.push(Operation::WriteOperation(Modify::Map( rng.gen::() & PAGE_RANGE_MASK, rng.gen::() & PAGE_RANGE_MASK, @@ -1037,4 +1027,4 @@ fn main() { for write_ratio in write_ratios.into_iter() { hashmap_scale_out::(&mut harness, "vnr-vspace", write_ratio); } -} \ No newline at end of file +} diff --git a/benchmarks/verified/src/vnr_vspace.rs b/benchmarks/verified/src/vnr_vspace.rs index bed6d0f..609e5ac 100644 --- a/benchmarks/verified/src/vnr_vspace.rs +++ b/benchmarks/verified/src/vnr_vspace.rs @@ -13,15 +13,17 @@ use std::time::Duration; use logging::warn; use rand::seq::SliceRandom; -use rand::{Rng}; -use rand_chacha::ChaCha8Rng; +use rand::Rng; use rand::SeedableRng; +use rand_chacha::ChaCha8Rng; use bench_utils::benchmark::*; use bench_utils::mkbench::{self, DsInterface}; use bench_utils::topology::ThreadMapping; use bench_utils::Operation; -use verified_node_replication::{Dispatch, AffinityFn, NodeReplicated, ReplicaId, ThreadToken, NodeReplicatedT}; +use verified_node_replication::{ + AffinityFn, Dispatch, NodeReplicated, NodeReplicatedT, ReplicaId, ThreadToken, +}; use builtin::Tracked; @@ -44,8 +46,7 @@ use std::pin::Pin; use logging::{debug, trace}; use x86::bits64::paging::*; -const VSPACE_RANGE: u64 = 512*1024*1024*1024; - +const VSPACE_RANGE: u64 = 512 * 1024 * 1024 * 1024; fn kernel_vaddr_to_paddr(v: VAddr) -> PAddr { let vaddr_val: usize = v.into(); @@ -164,7 +165,6 @@ impl fmt::Display for MapAction { } } - pub struct VSpace { pub pml4: Pin>, pub mem_counter: usize, @@ -179,45 +179,41 @@ unsafe impl Send for VSpace {} /// We support a mutable put operation on the hashmap. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Modify { - Map(u64, u64), + Map(u64, u64), } /// We support an immutable read operation to lookup a key from the hashmap. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Access { - Resolve(u64), + Resolve(u64), } /// The Dispatch traits executes `ReadOperation` (our Access enum) /// and `WriteOperation` (our Modify enum) against the replicated /// data-structure. impl Dispatch for VSpace { - type ReadOperation = Access; - type WriteOperation = Modify; - type Response = u64; - type View = VSpace; + type ReadOperation = Access; + type WriteOperation = Modify; + type Response = u64; + type View = VSpace; - fn init() -> Self { + fn init() -> Self { Default::default() } + /// The `dispatch` function applies the immutable operations. + fn dispatch(&self, op: Self::ReadOperation) -> Self::Response { + match op { + Access::Resolve(key) => self.resolve_wrapped(key), + } + } - /// The `dispatch` function applies the immutable operations. - fn dispatch(&self, op: Self::ReadOperation) -> Self::Response { - match op { - Access::Resolve(key) => self.resolve_wrapped(key), - } - } - - /// The `dispatch_mut` function applies the mutable operations. - fn dispatch_mut( - &mut self, - op: Self::WriteOperation, - ) -> Self::Response { - match op { - Modify::Map(key, value) => self.map_generic_wrapped(key, value, 0x1000) as u64, - } - } + /// The `dispatch_mut` function applies the mutable operations. + fn dispatch_mut(&mut self, op: Self::WriteOperation) -> Self::Response { + match op { + Modify::Map(key, value) => self.map_generic_wrapped(key, value, 0x1000) as u64, + } + } // partial eq also add an exec operation fn clone_write_op(op: &Self::WriteOperation) -> Self::WriteOperation { @@ -227,11 +223,8 @@ impl Dispatch for VSpace { fn clone_response(op: &Self::Response) -> Self::Response { op.clone() } - } - - // impl SomeTrait for T // where T: AnotherTrait struct VNRWrapper { @@ -249,7 +242,10 @@ impl DsInterface for VNRWrapper { /// - `logs`: How many logs the data-structure should be partitioned over. fn new(replicas: NonZeroUsize, _logs: NonZeroUsize, _log_size: usize) -> Self { VNRWrapper { - val: NodeReplicatedT::::new(replicas.into(), AffinityFn::new(mkbench::chg_affinity)), + val: NodeReplicatedT::::new( + replicas.into(), + AffinityFn::new(mkbench::chg_affinity), + ), } } @@ -285,18 +281,17 @@ impl DsInterface for VNRWrapper { } } - /* - pub fn map_generic_wrapped( - self: &mut VSpace, - vbase: u64, - pregion: u64, - pregion_len: usize, - //rights: &MapAction, - ) -> bool; + pub fn map_generic_wrapped( + self: &mut VSpace, + vbase: u64, + pregion: u64, + pregion_len: usize, + //rights: &MapAction, + ) -> bool; - pub fn resolve_wrapped(self: &mut VSpace, vbase: u64) -> u64; - */ + pub fn resolve_wrapped(self: &mut VSpace, vbase: u64) -> u64; +*/ impl Drop for VSpace { fn drop(&mut self) { @@ -331,8 +326,7 @@ pub fn alloc(size: usize, ps: usize) -> mmap::MemoryMap { pub const FOUR_KIB: usize = 4 * 1024; const PAGESIZE: u64 = FOUR_KIB as u64; - - assert!(size % FOUR_KIB == 0|| size % TWO_MIB ==0 || size % ONE_GIB ==0); + assert!(size % FOUR_KIB == 0 || size % TWO_MIB == 0 || size % ONE_GIB == 0); let mut non_standard_flags = MAP_SHARED | MAP_ANON | MAP_POPULATE; match ps { @@ -361,13 +355,9 @@ pub fn alloc(size: usize, ps: usize) -> mmap::MemoryMap { res } - - - impl Default for VSpace { fn default() -> VSpace { - - let mapping = alloc(3*ONE_GIB, ONE_GIB); + let mapping = alloc(3 * ONE_GIB, ONE_GIB); let mem_ptr = mapping.data(); // make sure the memory for ptable is some contiguous block @@ -382,15 +372,16 @@ impl Default for VSpace { ), mapping, mem_counter: 4096, - mem_ptr - //allocs: Vec::with_capacity(1024), + mem_ptr, //allocs: Vec::with_capacity(1024), }; for i in 0..VSPACE_RANGE / 4096 { - assert!(vs.map_generic( - VAddr::from(i * 4096), - (PAddr::from(i * 4096), 4096), - MapAction::ReadWriteExecuteUser, - ).is_ok()); + assert!(vs + .map_generic( + VAddr::from(i * 4096), + (PAddr::from(i * 4096), 4096), + MapAction::ReadWriteExecuteUser, + ) + .is_ok()); } logging::error!("vs.mem_counter {}", vs.mem_counter); @@ -598,7 +589,7 @@ impl VSpace { while mapped < psize && pt_idx < 512 { // XXX: allow updates //if !pt[pt_idx].is_present() { - pt[pt_idx] = PTEntry::new(pbase + mapped, PTFlags::P | rights.to_pt_rights()); + pt[pt_idx] = PTEntry::new(pbase + mapped, PTFlags::P | rights.to_pt_rights()); //} else { // return Err(VSpaceError { at: vbase.as_u64() }); //} @@ -638,7 +629,7 @@ impl VSpace { how_many * BASE_PAGE_SIZE, 4096, ))*/ - assert!(self.mem_counter < 3*ONE_GIB); // if this triggers you need to adjust the alloc size of `mem_ptr` + assert!(self.mem_counter < 3 * ONE_GIB); // if this triggers you need to adjust the alloc size of `mem_ptr` self.mem_ptr.offset(self.mem_counter as isize) }; self.mem_counter += how_many * 4096; @@ -685,7 +676,10 @@ impl VSpace { } pub fn resolve_wrapped(&self, addr: u64) -> u64 { - let a = self.resolve_addr(VAddr::from(addr)).map(|pa| pa.as_u64()).unwrap_or(0x0); + let a = self + .resolve_addr(VAddr::from(addr)) + .map(|pa| pa.as_u64()) + .unwrap_or(0x0); //log::error!("{:#x} -> {:#x}", addr, a); a } @@ -724,7 +718,7 @@ impl VSpace { } } } - }else { + } else { // log::error!("pml4 not present {:#x}", addr); unreachable!("dont go here"); } @@ -746,7 +740,6 @@ impl VSpace { } } - /// Generate a random sequence of operations /// /// # Arguments @@ -754,10 +747,7 @@ impl VSpace { /// - `write`: true will Put, false will generate Get sequences /// - `span`: Maximum key /// - `distribution`: Supported distribution 'uniform' or 'skewed' -pub fn generate_operations( - nop: usize, - write_ratio: usize, -) -> Vec> { +pub fn generate_operations(nop: usize, write_ratio: usize) -> Vec> { let mut ops = Vec::with_capacity(nop); let mut rng = ChaCha8Rng::seed_from_u64(42); @@ -801,14 +791,17 @@ fn main() { let numa_policy = match args[4].as_str() { "fill" => ThreadMapping::NUMAFill, "interleave" => ThreadMapping::Interleave, - _ => panic!("supply fill or interleave as numa mapping") + _ => panic!("supply fill or interleave as numa mapping"), }; let run_id_num = &args[5]; let mut harness = TestHarness::new(Duration::from_secs(runtime)); let ops = generate_operations(NOP, write_ratio); - let bench_name = format!("vnr_vspace-{}-{}-{}-{}", n_threads, write_ratio, numa_policy, run_id_num); + let bench_name = format!( + "vnr_vspace-{}-{}-{}-{}", + n_threads, write_ratio, numa_policy, run_id_num + ); mkbench::ScaleBenchBuilder::::new(ops) .threads(n_threads) @@ -832,4 +825,4 @@ fn main() { }, }, ); - } \ No newline at end of file +}