From aef0bdef998caad082c8f551dcfdd2c55912c79b Mon Sep 17 00:00:00 2001 From: Jay Bosamiya Date: Mon, 26 Feb 2024 15:45:34 -0500 Subject: [PATCH 01/10] Improve output of syntax.rs-unchanged test --- Cargo.toml | 1 + tests/syntax-rs-unchanged.rs | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7f66690..d0b9f9d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,6 +35,7 @@ tracing-subscriber = { version = "0.3.17" } [dev-dependencies] insta = { version = "1.30.0" } +similar = { version = "2.2.1" } # Spend more time on initial compilation in exchange for faster runs [profile.dev.package.insta] diff --git a/tests/syntax-rs-unchanged.rs b/tests/syntax-rs-unchanged.rs index 048c626..52b0e86 100644 --- a/tests/syntax-rs-unchanged.rs +++ b/tests/syntax-rs-unchanged.rs @@ -1,7 +1,5 @@ //! Testing ../examples/syntax.rs -use verusfmt::parse_and_format; - /// Just an automatic test to make sure that ../examples/syntax.rs is left unchanged by verusfmt. /// /// This is essentially intended to be a snapshot test, like ./snap-tests.rs, but only as a quick @@ -9,6 +7,17 @@ use verusfmt::parse_and_format; /// `syntax.rs` always in sync with the output that would show up from verusfmt. #[test] fn syntax_rs_unchanged() { - let syntax_rs = include_str!("../examples/syntax.rs").to_owned(); - assert_eq!(parse_and_format(&syntax_rs).unwrap(), syntax_rs); + let original = include_str!("../examples/syntax.rs").to_owned(); + let formatted = verusfmt::parse_and_format(&original).unwrap(); + if original != formatted { + let diff = similar::udiff::unified_diff( + similar::Algorithm::Patience, + &original, + &formatted, + 3, + Some(("original", "formatted")), + ); + println!("{diff}"); + panic!("Formatted output does not match"); + } } From 7334eca04abb1b9c4b4512178e1e3cdc2ebfa1f4 Mon Sep 17 00:00:00 2001 From: Jay Bosamiya Date: Mon, 26 Feb 2024 15:59:04 -0500 Subject: [PATCH 02/10] Fix FnSpec parsing. Due to newly added `spec_fn`, `FnSpec` itself is deprecated. Nonetheless, we should support parsing it for older instances of Verus. We used to support parsing it, but in a21b251fbdad060602a4361247c53b7f0a54d8dc, a copy-paste-typo got introduced which broke support for it. --- src/verus.pest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/verus.pest b/src/verus.pest index e6bd0b7..bc7a460 100644 --- a/src/verus.pest +++ b/src/verus.pest @@ -260,7 +260,7 @@ false_str = ${ "false" ~ !("_" | ASCII_ALPHANUMERIC) } fn_str = ${ "fn" ~ !("_" | ASCII_ALPHANUMERIC) } FnOnce_str = ${ "FnOnce" ~ !("_" | ASCII_ALPHANUMERIC) } FnMut_str = ${ "FnMut" ~ !("_" | ASCII_ALPHANUMERIC) } -FnSpec_str = ${ "FnMut" ~ !("_" | ASCII_ALPHANUMERIC) } +FnSpec_str = ${ "FnSpec" ~ !("_" | ASCII_ALPHANUMERIC) } Fn_str = ${ "Fn" ~ !("_" | ASCII_ALPHANUMERIC) } for_str = ${ "for" ~ !("_" | ASCII_ALPHANUMERIC) } forall_str = ${ "forall" ~ !("_" | ASCII_ALPHANUMERIC) } From 70a47f51c1737d466c0887d35c24d6e48eb48f7b Mon Sep 17 00:00:00 2001 From: Jay Bosamiya Date: Mon, 26 Feb 2024 16:36:48 -0500 Subject: [PATCH 03/10] Run verusfmt on owl-output --- examples/owl-output.rs | 919 +++++++++++++++++++++++++---------------- 1 file changed, 557 insertions(+), 362 deletions(-) diff --git a/examples/owl-output.rs b/examples/owl-output.rs index 0249124..5b40526 100644 --- a/examples/owl-output.rs +++ b/examples/owl-output.rs @@ -7,9 +7,9 @@ pub use vstd::{modes::*, prelude::*, seq::*, slice::*, string::*, *}; pub mod speclib; -pub use crate::speclib::{*, itree::*}; +pub use crate::speclib::{itree::*, *}; pub mod execlib; -pub use crate::execlib::{*}; +pub use crate::execlib::*; pub mod owl_aead; pub mod owl_dhke; pub mod owl_hkdf; @@ -30,31 +30,85 @@ pub use std::time::Duration; pub use std::time::Instant; verus! { -pub open const spec fn CIPHER() -> owl_aead::Mode { crate::owl_aead::Mode::Chacha20Poly1305 } -pub const fn cipher() -> (r:owl_aead::Mode) ensures r == CIPHER() { crate::owl_aead::Mode::Chacha20Poly1305 } -pub open const spec fn KEY_SIZE() -> usize { owl_aead::spec_key_size(CIPHER()) } -pub const fn key_size() -> (r:usize) ensures r == KEY_SIZE() { owl_aead::key_size(cipher()) } -pub open const spec fn TAG_SIZE() -> usize { owl_aead::spec_tag_size(CIPHER()) } -pub const fn tag_size() -> (r:usize) ensures r == TAG_SIZE() { owl_aead::tag_size(cipher()) } -pub open const spec fn NONCE_SIZE() -> usize { owl_aead::spec_nonce_size(CIPHER()) } -pub const fn nonce_size() -> (r:usize) ensures r == NONCE_SIZE() { owl_aead::nonce_size(cipher()) } -pub open const spec fn HMAC_MODE() -> owl_hmac::Mode { crate::owl_hmac::Mode::Sha512 } -pub const fn hmac_mode() -> (r:owl_hmac::Mode) ensures r == HMAC_MODE() { crate::owl_hmac::Mode::Sha512 } + +pub open const spec fn CIPHER() -> owl_aead::Mode { + crate::owl_aead::Mode::Chacha20Poly1305 +} + +pub const fn cipher() -> (r: owl_aead::Mode) + ensures + r == CIPHER(), +{ + crate::owl_aead::Mode::Chacha20Poly1305 +} + +pub open const spec fn KEY_SIZE() -> usize { + owl_aead::spec_key_size(CIPHER()) +} + +pub const fn key_size() -> (r: usize) + ensures + r == KEY_SIZE(), +{ + owl_aead::key_size(cipher()) +} + +pub open const spec fn TAG_SIZE() -> usize { + owl_aead::spec_tag_size(CIPHER()) +} + +pub const fn tag_size() -> (r: usize) + ensures + r == TAG_SIZE(), +{ + owl_aead::tag_size(cipher()) +} + +pub open const spec fn NONCE_SIZE() -> usize { + owl_aead::spec_nonce_size(CIPHER()) +} + +pub const fn nonce_size() -> (r: usize) + ensures + r == NONCE_SIZE(), +{ + owl_aead::nonce_size(cipher()) +} + +pub open const spec fn HMAC_MODE() -> owl_hmac::Mode { + crate::owl_hmac::Mode::Sha512 +} + +pub const fn hmac_mode() -> (r: owl_hmac::Mode) + ensures + r == HMAC_MODE(), +{ + crate::owl_hmac::Mode::Sha512 +} #[verifier(external_type_specification)] #[verifier(external_body)] -pub struct TcpListenerWrapper ( std::net::TcpListener ); +pub struct TcpListenerWrapper(std::net::TcpListener); #[verifier(external_type_specification)] -pub struct OwlErrorWrapper ( OwlError ); - +pub struct OwlErrorWrapper(OwlError); #[verifier(external_body)] -pub fn owl_output(Tracked(t): Tracked<&mut ITreeToken>, x: &[u8], dest_addr: &StrSlice, ret_addr: &StrSlice) - requires old(t)@.is_output(x@, endpoint_of_addr(dest_addr.view())) - ensures t@ == old(t)@.give_output() +pub fn owl_output( + Tracked(t): Tracked<&mut ITreeToken>, + x: &[u8], + dest_addr: &StrSlice, + ret_addr: &StrSlice, +) + requires + old(t)@.is_output(x@, endpoint_of_addr(dest_addr.view())), + ensures + t@ == old(t)@.give_output(), { - let msg = msg { ret_addr: std::string::String::from(ret_addr.into_rust_str()), payload: std::vec::Vec::from(x) }; + let msg = msg { + ret_addr: std::string::String::from(ret_addr.into_rust_str()), + payload: std::vec::Vec::from(x), + }; let serialized = serialize_msg(&msg); let mut stream = TcpStream::connect(dest_addr.into_rust_str()).unwrap(); stream.write_all(&serialized).unwrap(); @@ -62,95 +116,118 @@ pub fn owl_output(Tracked(t): Tracked<&mut ITreeToken>, x: &[u8], } #[verifier(external_body)] -pub fn owl_input(Tracked(t): Tracked<&mut ITreeToken>, listener: &TcpListener) -> (ie:(Vec, String)) - requires old(t)@.is_input() - ensures t@ == old(t)@.take_input(ie.0@, endpoint_of_addr(ie.1.view())) +pub fn owl_input( + Tracked(t): Tracked<&mut ITreeToken>, + listener: &TcpListener, +) -> (ie: (Vec, String)) + requires + old(t)@.is_input(), + ensures + t@ == old(t)@.take_input(ie.0@, endpoint_of_addr(ie.1.view())), { let (mut stream, _addr) = listener.accept().unwrap(); let mut reader = io::BufReader::new(&mut stream); let received: std::vec::Vec = reader.fill_buf().unwrap().to_vec(); reader.consume(received.len()); - let msg : msg = deserialize_msg(&received); + let msg: msg = deserialize_msg(&received); (msg.payload, String::from_rust_string(msg.ret_addr)) } #[verifier(external_body)] -pub fn owl_sample(Tracked(t): Tracked<&mut ITreeToken>, n: usize) -> (res:Vec) - requires old(t)@.is_sample(n) - ensures t@ == old(t)@.get_sample(res@) +pub fn owl_sample(Tracked(t): Tracked<&mut ITreeToken>, n: usize) -> (res: Vec) + requires + old(t)@.is_sample(n), + ensures + t@ == old(t)@.get_sample(res@), { owl_util::gen_rand_bytes(n) } } // verus! - - - - verus! { // ------------------------------------ // ---------- SPECIFICATIONS ---------- // ------------------------------------ - -pub struct owlSpec_t{ -pub owlSpec__x : Seq, -pub owlSpec__y : Seq +pub struct owlSpec_t { + pub owlSpec__x: Seq, + pub owlSpec__y: Seq, } -#[verifier(external_body)] pub closed spec fn parse_owlSpec_t(x: Seq) -> Option { -todo!() + +#[verifier(external_body)] +pub closed spec fn parse_owlSpec_t(x: Seq) -> Option { + todo!() } -#[verifier(external_body)] pub closed spec fn serialize_owlSpec_t(x: owlSpec_t) -> Seq { -todo!() + +#[verifier(external_body)] +pub closed spec fn serialize_owlSpec_t(x: owlSpec_t) -> Seq { + todo!() } + pub open spec fn t(arg__x: Seq, arg__y: Seq) -> Seq { -serialize_owlSpec_t(owlSpec_t{owlSpec__x: arg__x, owlSpec__y: arg__y}) + serialize_owlSpec_t(owlSpec_t { owlSpec__x: arg__x, owlSpec__y: arg__y }) } + pub open spec fn _x(arg: Seq) -> Seq { -match parse_owlSpec_t(arg) { -Some(parsed) => parsed.owlSpec__x, -None => seq![] // TODO -} + match parse_owlSpec_t(arg) { + Some(parsed) => parsed.owlSpec__x, + None => seq![] // TODO + , + } } + pub open spec fn _y(arg: Seq) -> Seq { -match parse_owlSpec_t(arg) { -Some(parsed) => parsed.owlSpec__y, -None => seq![] // TODO -} + match parse_owlSpec_t(arg) { + Some(parsed) => parsed.owlSpec__y, + None => seq![] // TODO + , + } } #[is_variant] -pub enum owlSpec_Result{ -owlSpec_SomeResult(Seq), -owlSpec_NoResult() +pub enum owlSpec_Result { + owlSpec_SomeResult(Seq), + owlSpec_NoResult(), } + use crate::owlSpec_Result::*; -#[verifier(external_body)] pub closed spec fn parse_owlSpec_Result(x: Seq) -> Option { -todo!() +#[verifier(external_body)] +pub closed spec fn parse_owlSpec_Result(x: Seq) -> Option { + todo!() } -#[verifier(external_body)] pub closed spec fn serialize_owlSpec_Result(x: owlSpec_Result) -> Seq { -todo!() + +#[verifier(external_body)] +pub closed spec fn serialize_owlSpec_Result(x: owlSpec_Result) -> Seq { + todo!() } + pub open spec fn SomeResult(x: Seq) -> Seq { -serialize_owlSpec_Result(crate::owlSpec_Result::owlSpec_SomeResult(x)) + serialize_owlSpec_Result(crate::owlSpec_Result::owlSpec_SomeResult(x)) } + pub open spec fn NoResult() -> Seq { -serialize_owlSpec_Result(crate::owlSpec_Result::owlSpec_NoResult()) + serialize_owlSpec_Result(crate::owlSpec_Result::owlSpec_NoResult()) } - - #[is_variant] #[derive(Copy, Clone)] pub enum Endpoint { -Loc_alice, -Loc_bob + Loc_alice, + Loc_bob, +} + +#[verifier(external_body)] +pub closed spec fn endpoint_of_addr(addr: Seq) -> Endpoint { + unimplemented!() /* axiomatized */ + } -#[verifier(external_body)] pub closed spec fn endpoint_of_addr(addr: Seq) -> Endpoint { unimplemented!() /* axiomatized */ } -pub open spec fn alice_main_spec(cfg: cfg_alice, mut_state: state_alice) -> (res: ITree<(Seq, state_alice), Endpoint>) { -owl_spec!(mut_state,state_alice, +pub open spec fn alice_main_spec(cfg: cfg_alice, mut_state: state_alice) -> (res: ITree< + (Seq, state_alice), + Endpoint, +>) { + owl_spec!(mut_state,state_alice, let c = ((sample( NONCE_SIZE() , enc((*cfg.owl_shared_key).view(), (*cfg.owl_k_data).view()) ))) in (output (c) to (Endpoint::Loc_bob)) in @@ -164,8 +241,11 @@ None => {(ret (NoResult()))},})) in ) } -pub open spec fn bob_main_spec(cfg: cfg_bob, mut_state: state_bob) -> (res: ITree<((), state_bob), Endpoint>) { -owl_spec!(mut_state,state_bob, +pub open spec fn bob_main_spec(cfg: cfg_bob, mut_state: state_bob) -> (res: ITree< + ((), state_bob), + Endpoint, +>) { + owl_spec!(mut_state,state_bob, (input (i, ev)) in let caseval = ((ret(dec((*cfg.owl_shared_key).view(), i)))) in (case (caseval) @@ -176,346 +256,461 @@ None => {(ret (()))},}) ) } - - // ------------------------------------ // ---------- IMPLEMENTATIONS --------- // ------------------------------------ - /* TODO this will be generated by parsley */ + pub struct owl_t { -pub owl__x : Vec, -pub owl__y : Vec + pub owl__x: Vec, + pub owl__y: Vec, } -#[verifier(external_body)] // TODO remove once parsley integrated + +#[verifier(external_body)] // TODO remove once parsley integrated pub exec fn parse_owl_t(arg: &[u8]) -> (res: Option) -ensures res.is_Some() ==> parse_owlSpec_t(arg.view()).is_Some(), + ensures + res.is_Some() ==> parse_owlSpec_t(arg.view()).is_Some(), res.is_None() ==> parse_owlSpec_t(arg.view()).is_None(), - res.is_Some() ==> res.get_Some_0().owl__x.view() == parse_owlSpec_t(arg.view()).get_Some_0().owlSpec__x, - res.is_Some() ==> res.get_Some_0().owl__y.view() == parse_owlSpec_t(arg.view()).get_Some_0().owlSpec__y, + res.is_Some() ==> res.get_Some_0().owl__x.view() == parse_owlSpec_t( + arg.view(), + ).get_Some_0().owlSpec__x, + res.is_Some() ==> res.get_Some_0().owl__y.view() == parse_owlSpec_t( + arg.view(), + ).get_Some_0().owlSpec__y, { -todo!() // call parsley exec parser + todo!() // call parsley exec parser + } -#[verifier(external_body)] // TODO remove once parsley integrated +#[verifier(external_body)] // TODO remove once parsley integrated pub exec fn serialize_owl_t(arg: &owl_t) -> (res: Vec) -ensures res.view() == serialize_owlSpec_t(owlSpec_t{owlSpec__x : arg.owl__x.view(), owlSpec__y : arg.owl__y.view()}) + ensures + res.view() == serialize_owlSpec_t( + owlSpec_t { owlSpec__x: arg.owl__x.view(), owlSpec__y: arg.owl__y.view() }, + ), { -todo!() // call parsley exec serializer and unwrap -} + todo!() // call parsley exec serializer and unwrap +} +// owl_NoResult -> 1, owl_SomeResult -> 2, +pub struct owl_Result { + pub data: Rc>, + pub parsing_outcome: owl_Result_ParsingOutcome, +} -// owl_NoResult -> 1, owl_SomeResult -> 2, -pub struct owl_Result { pub data: Rc>, pub parsing_outcome: owl_Result_ParsingOutcome} // #[derive(PartialEq, Eq, Debug)] pub enum owl_Result_ParsingOutcome { -Success, -Failure, -} -#[verifier(external_body)] pub fn len_valid_owl_Result(arg: &[u8]) -> Option { -if arg.len() < 1 { return None; } else -if *slice_index_get(arg, 0) == 1u8 && arg.len() >= 1 { return Some( 1); } else -if *slice_index_get(arg, 0) == 2u8 && arg.len() >= 13 { return Some( 13); } -else { return None; } -} -#[verifier(external_body)] pub fn parse_into_owl_Result(arg: &mut owl_Result) { -match arg.parsing_outcome { -owl_Result_ParsingOutcome::Failure => { -match len_valid_owl_Result(&(*arg.data).as_slice()) { -Some(l) => {arg.parsing_outcome = owl_Result_ParsingOutcome::Success;} -None => {arg.data = rc_new(vec_u8_from_elem(0, 1)); -arg.parsing_outcome = owl_Result_ParsingOutcome::Failure;} -} -}, -_ => {}} -} -#[verifier(external_body)] pub fn construct_owl_Result_owl_NoResult() -> (res: owl_Result) -ensures res.data.view() === NoResult() -{ -let v = vec_u8_from_elem(1u8, 1); -let res = owl_Result { data: rc_new(v), parsing_outcome: owl_Result_ParsingOutcome::Success}; -res + Success, + Failure, } -#[verifier(external_body)] pub fn construct_owl_Result_owl_SomeResult(arg: &[u8]) -> (res: owl_Result) -ensures res.data.view() === SomeResult(arg@) -{ -let mut v = vec_u8_from_elem(2u8, 1); -if arg.len() < 12 {return owl_Result {data: rc_new(vec_u8_from_elem(0, 1)), parsing_outcome: owl_Result_ParsingOutcome::Failure};} -extend_vec_u8(&mut v, slice_subrange(arg, 0, 12)); -let res = owl_Result {data: rc_new(v), parsing_outcome: owl_Result_ParsingOutcome::Success}; -res +#[verifier(external_body)] +pub fn len_valid_owl_Result(arg: &[u8]) -> Option { + if arg.len() < 1 { + return None; + } else if *slice_index_get(arg, 0) == 1u8 && arg.len() >= 1 { + return Some(1); + } else if *slice_index_get(arg, 0) == 2u8 && arg.len() >= 13 { + return Some(13); + } else { + return None; + } } +#[verifier(external_body)] +pub fn parse_into_owl_Result(arg: &mut owl_Result) { + match arg.parsing_outcome { + owl_Result_ParsingOutcome::Failure => { + match len_valid_owl_Result(&(*arg.data).as_slice()) { + Some(l) => { + arg.parsing_outcome = owl_Result_ParsingOutcome::Success; + }, + None => { + arg.data = rc_new(vec_u8_from_elem(0, 1)); + arg.parsing_outcome = owl_Result_ParsingOutcome::Failure; + }, + } + }, + _ => {}, + } +} - -#[verifier(external_body)] pub const fn alice_addr() -> (a:StrSlice<'static>) -ensures endpoint_of_addr(a.view()) == Endpoint::Loc_alice +#[verifier(external_body)] +pub fn construct_owl_Result_owl_NoResult() -> (res: owl_Result) + ensures + res.data.view() === NoResult(), { -new_strlit("127.0.0.1:9001") + let v = vec_u8_from_elem(1u8, 1); + let res = owl_Result { data: rc_new(v), parsing_outcome: owl_Result_ParsingOutcome::Success }; + res } -#[verifier(external_body)] pub const fn bob_addr() -> (a:StrSlice<'static>) -ensures endpoint_of_addr(a.view()) == Endpoint::Loc_bob + +#[verifier(external_body)] +pub fn construct_owl_Result_owl_SomeResult(arg: &[u8]) -> (res: owl_Result) + ensures + res.data.view() === SomeResult(arg@), { -new_strlit("127.0.0.1:9002") + let mut v = vec_u8_from_elem(2u8, 1); + if arg.len() < 12 { + return owl_Result { + data: rc_new(vec_u8_from_elem(0, 1)), + parsing_outcome: owl_Result_ParsingOutcome::Failure, + }; + } + extend_vec_u8(&mut v, slice_subrange(arg, 0, 12)); + let res = owl_Result { data: rc_new(v), parsing_outcome: owl_Result_ParsingOutcome::Success }; + res } -pub struct state_alice {} -impl state_alice { -#[verifier(external_body)] pub fn init_state_alice () -> Self { -state_alice {}}}pub struct cfg_alice {pub listener: TcpListener, -pub owl_k_data: Rc>, -pub owl_shared_key: Rc>, -pub salt: Rc>} -impl cfg_alice { -#[verifier(external_body)] pub fn init_cfg_alice (config_path : &StrSlice) -> Self { -let listener = TcpListener::bind(alice_addr().into_rust_str()).unwrap(); -let owl_k_data = owl_aead::gen_rand_key(cipher()); -let config_str = fs::read_to_string(config_path.into_rust_str()).expect("Config file not found"); -let config = deserialize_cfg_alice_config(&config_str); -return cfg_alice {listener, owl_k_data : rc_new(owl_k_data), owl_shared_key : rc_new(config.owl_shared_key), salt : rc_new(config.salt)}} pub fn owl_alice_main(&self, Tracked(itree): Tracked, state_alice), Endpoint>>, mut_state: &mut state_alice) -> (res: Result<( owl_Result -, Tracked, state_alice), Endpoint>> ), OwlError>) -requires itree@ == alice_main_spec(*self, *old(mut_state)) -ensures res.is_Ok() ==> (res.get_Ok_0().1)@@.results_in((res.get_Ok_0().0.data.view(), *mut_state)) - { -let tracked mut itree = itree;let res_inner = { - - - -let temp_owl__x5 = { rc_clone(&self.owl_shared_key) }; -let owl__x5 = rc_clone(&temp_owl__x5); - - -let temp_owl__x7 = { rc_clone(&self.owl_k_data) }; -let owl__x7 = rc_clone(&temp_owl__x7); - - - -let temp_owl__x8 = { let coins = owl_sample::<(Seq, state_alice)>(Tracked(&mut itree), nonce_size()); owl_enc(&(*rc_clone(&owl__x5)).as_slice(), &(*rc_clone(&owl__x7)).as_slice(), &coins.as_slice()) }; -let owl__x8 = rc_clone(&temp_owl__x8); - - -let temp_owl__x12 = { rc_clone(&owl__x8) }; -let owl__x12 = rc_clone(&temp_owl__x12); - - -let temp_owl__x13 = { owl_output::<(Seq, state_alice)>(Tracked(&mut itree), &(*rc_clone(&owl__x12)).as_slice(), &bob_addr(), &alice_addr()) }; -let owl__x13 = temp_owl__x13; - -let (temp_owl_i15, owl__14) = owl_input::<(Seq, state_alice)>(Tracked(&mut itree), &self.listener); -let owl_i15 = Rc::new(temp_owl_i15); - - -let temp_owl__x35 = { rc_clone(&self.owl_k_data) }; -let owl__x35 = rc_clone(&temp_owl__x35); - - -let temp_owl__x37 = { rc_clone(&owl_i15) }; -let owl__x37 = rc_clone(&temp_owl__x37); - - - -let temp_owl__x39 = { owl_dec(&(*rc_clone(&owl__x35)).as_slice(), &(*rc_clone(&owl__x37)).as_slice()) }; -let owl__x39 = temp_owl__x39; - - -let temp_owl__x40 = { -let temp_owl_caseval42 = { owl__x39 }; -let owl_caseval42 = temp_owl_caseval42; - - -match owl_caseval42 {Some(temp_owl_j19) => {let owl_j19 = rc_clone(&temp_owl_j19); - - -let temp_owl__x27 = { rc_clone(&owl_j19) }; -let owl__x27 = rc_clone(&temp_owl__x27); - -if let Some(parseval) = parse_owl_t(&(*rc_clone(&owl__x27)).as_slice()) { -let owl_x22 = parseval.owl__x; -let owl_y21 = parseval.owl__y; - -let temp_owl__x25 = { owl_y21 }; -let owl__x25 = rc_new(temp_owl__x25); - - -let temp_owl__x26 = { construct_owl_Result_owl_SomeResult(&(*rc_clone(&owl__x25)).as_slice()) }; -let owl__x26 = temp_owl__x26; - -owl__x26 -} else { - -let temp_owl__x20 = { construct_owl_Result_owl_NoResult() }; -let owl__x20 = temp_owl__x20; - -owl__x20 -}} -None => { - -let temp_owl__x28 = { construct_owl_Result_owl_NoResult() }; -let owl__x28 = temp_owl__x28; - -owl__x28}} }; -let owl__x40 = temp_owl__x40; +#[verifier(external_body)] +pub const fn alice_addr() -> (a: StrSlice<'static>) + ensures + endpoint_of_addr(a.view()) == Endpoint::Loc_alice, +{ + new_strlit("127.0.0.1:9001") +} +#[verifier(external_body)] +pub const fn bob_addr() -> (a: StrSlice<'static>) + ensures + endpoint_of_addr(a.view()) == Endpoint::Loc_bob, +{ + new_strlit("127.0.0.1:9002") +} -let temp_owl__x41 = { owl__x40 }; -let owl__x41 = temp_owl__x41; +pub struct state_alice {} -(owl__x41, Tracked(itree)) +impl state_alice { + #[verifier(external_body)] + pub fn init_state_alice() -> Self { + state_alice { } + } +} +pub struct cfg_alice { + pub listener: TcpListener, + pub owl_k_data: Rc>, + pub owl_shared_key: Rc>, + pub salt: Rc>, +} -}; -Ok(res_inner)} +impl cfg_alice { + #[verifier(external_body)] + pub fn init_cfg_alice(config_path: &StrSlice) -> Self { + let listener = TcpListener::bind(alice_addr().into_rust_str()).unwrap(); + let owl_k_data = owl_aead::gen_rand_key(cipher()); + let config_str = fs::read_to_string(config_path.into_rust_str()).expect( + "Config file not found", + ); + let config = deserialize_cfg_alice_config(&config_str); + return cfg_alice { + listener, + owl_k_data: rc_new(owl_k_data), + owl_shared_key: rc_new(config.owl_shared_key), + salt: rc_new(config.salt), + } + } + + pub fn owl_alice_main( + &self, + Tracked(itree): Tracked, state_alice), Endpoint>>, + mut_state: &mut state_alice, + ) -> (res: Result< + (owl_Result, Tracked, state_alice), Endpoint>>), + OwlError, + >) + requires + itree@ == alice_main_spec(*self, *old(mut_state)), + ensures + res.is_Ok() ==> (res.get_Ok_0().1)@@.results_in( + (res.get_Ok_0().0.data.view(), *mut_state), + ), + { + let tracked mut itree = itree; + let res_inner = { + let temp_owl__x5 = { rc_clone(&self.owl_shared_key) }; + let owl__x5 = rc_clone(&temp_owl__x5); + let temp_owl__x7 = { rc_clone(&self.owl_k_data) }; + let owl__x7 = rc_clone(&temp_owl__x7); + let temp_owl__x8 = { + let coins = owl_sample::<(Seq, state_alice)>(Tracked(&mut itree), nonce_size()); + owl_enc( + &(*rc_clone(&owl__x5)).as_slice(), + &(*rc_clone(&owl__x7)).as_slice(), + &coins.as_slice(), + ) + }; + let owl__x8 = rc_clone(&temp_owl__x8); + let temp_owl__x12 = { rc_clone(&owl__x8) }; + let owl__x12 = rc_clone(&temp_owl__x12); + let temp_owl__x13 = { + owl_output::<(Seq, state_alice)>( + Tracked(&mut itree), + &(*rc_clone(&owl__x12)).as_slice(), + &bob_addr(), + &alice_addr(), + ) + }; + let owl__x13 = temp_owl__x13; + let (temp_owl_i15, owl__14) = owl_input::<(Seq, state_alice)>( + Tracked(&mut itree), + &self.listener, + ); + let owl_i15 = Rc::new(temp_owl_i15); + let temp_owl__x35 = { rc_clone(&self.owl_k_data) }; + let owl__x35 = rc_clone(&temp_owl__x35); + let temp_owl__x37 = { rc_clone(&owl_i15) }; + let owl__x37 = rc_clone(&temp_owl__x37); + let temp_owl__x39 = { + owl_dec(&(*rc_clone(&owl__x35)).as_slice(), &(*rc_clone(&owl__x37)).as_slice()) + }; + let owl__x39 = temp_owl__x39; + let temp_owl__x40 = { + let temp_owl_caseval42 = { owl__x39 }; + let owl_caseval42 = temp_owl_caseval42; + match owl_caseval42 { + Some(temp_owl_j19) => { + let owl_j19 = rc_clone(&temp_owl_j19); + let temp_owl__x27 = { rc_clone(&owl_j19) }; + let owl__x27 = rc_clone(&temp_owl__x27); + if let Some(parseval) = parse_owl_t(&(*rc_clone(&owl__x27)).as_slice()) { + let owl_x22 = parseval.owl__x; + let owl_y21 = parseval.owl__y; + let temp_owl__x25 = { owl_y21 }; + let owl__x25 = rc_new(temp_owl__x25); + let temp_owl__x26 = { + construct_owl_Result_owl_SomeResult( + &(*rc_clone(&owl__x25)).as_slice(), + ) + }; + let owl__x26 = temp_owl__x26; + owl__x26 + } else { + let temp_owl__x20 = { construct_owl_Result_owl_NoResult() }; + let owl__x20 = temp_owl__x20; + owl__x20 + } + }, + None => { + let temp_owl__x28 = { construct_owl_Result_owl_NoResult() }; + let owl__x28 = temp_owl__x28; + owl__x28 + }, + } + }; + let owl__x40 = temp_owl__x40; + let temp_owl__x41 = { owl__x40 }; + let owl__x41 = temp_owl__x41; + (owl__x41, Tracked(itree)) + }; + Ok(res_inner) + } + + #[verifier(external_body)] + pub exec fn owl_alice_main_wrapper(&self, s: &mut state_alice) -> (_: owl_Result) { + let tracked dummy_tok: ITreeToken<(), Endpoint> = ITreeToken::< + (), + Endpoint, + >::dummy_itree_token(); + let tracked (Tracked(call_token), _) = split_bind(dummy_tok, alice_main_spec(*self, *s)); + let (res, _): (owl_Result, Tracked, state_alice), Endpoint>>) = + self.owl_alice_main(Tracked(call_token), s /* todo args? */ ).unwrap(); + res + } +} -#[verifier(external_body)] pub exec fn owl_alice_main_wrapper(&self, s: &mut state_alice)->(_: owl_Result){ -let tracked dummy_tok: ITreeToken<(), Endpoint> = ITreeToken::<(), Endpoint>::dummy_itree_token(); -let tracked (Tracked(call_token), _) = split_bind(dummy_tok, alice_main_spec(*self, *s)); -let (res,_): ( owl_Result -, Tracked, state_alice), Endpoint>> ) = self.owl_alice_main(Tracked(call_token), s, /* todo args? */).unwrap(); -res -}} pub struct state_bob {} -impl state_bob { -#[verifier(external_body)] pub fn init_state_bob () -> Self { -state_bob {}}}pub struct cfg_bob {pub listener: TcpListener, -pub owl_y: Rc>, -pub owl_x: Rc>, -pub owl_shared_key: Rc>, -pub salt: Rc>} -impl cfg_bob { -#[verifier(external_body)] pub fn init_cfg_bob (config_path : &StrSlice) -> Self { -let listener = TcpListener::bind(bob_addr().into_rust_str()).unwrap(); -let owl_y = owl_aead::gen_rand_nonce(cipher()); -let owl_x = owl_aead::gen_rand_nonce(cipher()); -let config_str = fs::read_to_string(config_path.into_rust_str()).expect("Config file not found"); -let config = deserialize_cfg_bob_config(&config_str); -return cfg_bob {listener, owl_y : rc_new(owl_y), owl_x : rc_new(owl_x), owl_shared_key : rc_new(config.owl_shared_key), salt : rc_new(config.salt)}} pub fn owl_bob_main(&self, Tracked(itree): Tracked>, mut_state: &mut state_bob) -> (res: Result<( () -, Tracked> ), OwlError>) -requires itree@ == bob_main_spec(*self, *old(mut_state)) -ensures res.is_Ok() ==> (res.get_Ok_0().1)@@.results_in(((), *mut_state)) - { -let tracked mut itree = itree;let res_inner = { - - -let (temp_owl_i45, owl_ev44) = owl_input::<((), state_bob)>(Tracked(&mut itree), &self.listener); -let owl_i45 = Rc::new(temp_owl_i45); - - -let temp_owl__x75 = { rc_clone(&self.owl_shared_key) }; -let owl__x75 = rc_clone(&temp_owl__x75); - - -let temp_owl__x77 = { rc_clone(&owl_i45) }; -let owl__x77 = rc_clone(&temp_owl__x77); - - - -let temp_owl__x78 = { owl_dec(&(*rc_clone(&owl__x75)).as_slice(), &(*rc_clone(&owl__x77)).as_slice()) }; -let owl__x78 = temp_owl__x78; - - -let temp_owl_caseval79 = { owl__x78 }; -let owl_caseval79 = temp_owl_caseval79; - - -match owl_caseval79 {Some(temp_owl_k48) => {let owl_k48 = rc_clone(&temp_owl_k48); - - -let temp_owl__x58 = { rc_clone(&self.owl_x) }; -let owl__x58 = rc_clone(&temp_owl__x58); - - -let temp_owl__x60 = { rc_clone(&self.owl_y) }; -let owl__x60 = rc_clone(&temp_owl__x60); - - -let temp_owl__x62 = { owl_t {owl__x : clone_vec_u8(&*rc_clone(&owl__x58)), owl__y : clone_vec_u8(&*rc_clone(&owl__x60))} }; -let owl__x62 = temp_owl__x62; - - -let temp_owl__x63 = { owl__x62 }; -let owl__x63 = temp_owl__x63; - - -let temp_owl__x68 = { rc_clone(&owl_k48) }; -let owl__x68 = rc_clone(&temp_owl__x68); - - -let temp_owl__x70 = { owl__x63 }; -let owl__x70 = temp_owl__x70; - - - -let temp_owl__x71 = { let coins = owl_sample::<((), state_bob)>(Tracked(&mut itree), nonce_size()); owl_enc(&(*rc_clone(&owl__x68)).as_slice(), &(serialize_owl_t(&owl__x70)).as_slice(), &coins.as_slice()) }; -let owl__x71 = rc_clone(&temp_owl__x71); - - -let temp_owl__x72 = { rc_clone(&owl__x71) }; -let owl__x72 = rc_clone(&temp_owl__x72); - -( owl_output::<((), state_bob)>(Tracked(&mut itree), &(*rc_clone(&owl__x72)).as_slice(), &owl_ev44.as_str(), &bob_addr()) -, Tracked(itree) )} -None => { - -let temp_owl__x73 = { () }; -let owl__x73 = temp_owl__x73; - -(owl__x73, Tracked(itree))}} +impl state_bob { + #[verifier(external_body)] + pub fn init_state_bob() -> Self { + state_bob { } + } +} -}; -Ok(res_inner)} +pub struct cfg_bob { + pub listener: TcpListener, + pub owl_y: Rc>, + pub owl_x: Rc>, + pub owl_shared_key: Rc>, + pub salt: Rc>, +} -#[verifier(external_body)] pub exec fn owl_bob_main_wrapper(&self, s: &mut state_bob)->(_: ()){ -let tracked dummy_tok: ITreeToken<(), Endpoint> = ITreeToken::<(), Endpoint>::dummy_itree_token(); -let tracked (Tracked(call_token), _) = split_bind(dummy_tok, bob_main_spec(*self, *s)); -let (res,_): ( () -, Tracked> ) = self.owl_bob_main(Tracked(call_token), s, /* todo args? */).unwrap(); -res -}} +impl cfg_bob { + #[verifier(external_body)] + pub fn init_cfg_bob(config_path: &StrSlice) -> Self { + let listener = TcpListener::bind(bob_addr().into_rust_str()).unwrap(); + let owl_y = owl_aead::gen_rand_nonce(cipher()); + let owl_x = owl_aead::gen_rand_nonce(cipher()); + let config_str = fs::read_to_string(config_path.into_rust_str()).expect( + "Config file not found", + ); + let config = deserialize_cfg_bob_config(&config_str); + return cfg_bob { + listener, + owl_y: rc_new(owl_y), + owl_x: rc_new(owl_x), + owl_shared_key: rc_new(config.owl_shared_key), + salt: rc_new(config.salt), + } + } + + pub fn owl_bob_main( + &self, + Tracked(itree): Tracked>, + mut_state: &mut state_bob, + ) -> (res: Result<((), Tracked>), OwlError>) + requires + itree@ == bob_main_spec(*self, *old(mut_state)), + ensures + res.is_Ok() ==> (res.get_Ok_0().1)@@.results_in(((), *mut_state)), + { + let tracked mut itree = itree; + let res_inner = { + let (temp_owl_i45, owl_ev44) = owl_input::<((), state_bob)>( + Tracked(&mut itree), + &self.listener, + ); + let owl_i45 = Rc::new(temp_owl_i45); + let temp_owl__x75 = { rc_clone(&self.owl_shared_key) }; + let owl__x75 = rc_clone(&temp_owl__x75); + let temp_owl__x77 = { rc_clone(&owl_i45) }; + let owl__x77 = rc_clone(&temp_owl__x77); + let temp_owl__x78 = { + owl_dec(&(*rc_clone(&owl__x75)).as_slice(), &(*rc_clone(&owl__x77)).as_slice()) + }; + let owl__x78 = temp_owl__x78; + let temp_owl_caseval79 = { owl__x78 }; + let owl_caseval79 = temp_owl_caseval79; + match owl_caseval79 { + Some(temp_owl_k48) => { + let owl_k48 = rc_clone(&temp_owl_k48); + let temp_owl__x58 = { rc_clone(&self.owl_x) }; + let owl__x58 = rc_clone(&temp_owl__x58); + let temp_owl__x60 = { rc_clone(&self.owl_y) }; + let owl__x60 = rc_clone(&temp_owl__x60); + let temp_owl__x62 = { + owl_t { + owl__x: clone_vec_u8(&*rc_clone(&owl__x58)), + owl__y: clone_vec_u8(&*rc_clone(&owl__x60)), + } + }; + let owl__x62 = temp_owl__x62; + let temp_owl__x63 = { owl__x62 }; + let owl__x63 = temp_owl__x63; + let temp_owl__x68 = { rc_clone(&owl_k48) }; + let owl__x68 = rc_clone(&temp_owl__x68); + let temp_owl__x70 = { owl__x63 }; + let owl__x70 = temp_owl__x70; + let temp_owl__x71 = { + let coins = owl_sample::<((), state_bob)>( + Tracked(&mut itree), + nonce_size(), + ); + owl_enc( + &(*rc_clone(&owl__x68)).as_slice(), + &(serialize_owl_t(&owl__x70)).as_slice(), + &coins.as_slice(), + ) + }; + let owl__x71 = rc_clone(&temp_owl__x71); + let temp_owl__x72 = { rc_clone(&owl__x71) }; + let owl__x72 = rc_clone(&temp_owl__x72); + ( + owl_output::<((), state_bob)>( + Tracked(&mut itree), + &(*rc_clone(&owl__x72)).as_slice(), + &owl_ev44.as_str(), + &bob_addr(), + ), + Tracked(itree), + ) + }, + None => { + let temp_owl__x73 = { () }; + let owl__x73 = temp_owl__x73; + (owl__x73, Tracked(itree)) + }, + } + }; + Ok(res_inner) + } + + #[verifier(external_body)] + pub exec fn owl_bob_main_wrapper(&self, s: &mut state_bob) -> (_: ()) { + let tracked dummy_tok: ITreeToken<(), Endpoint> = ITreeToken::< + (), + Endpoint, + >::dummy_itree_token(); + let tracked (Tracked(call_token), _) = split_bind(dummy_tok, bob_main_spec(*self, *s)); + let (res, _): ((), Tracked>) = self.owl_bob_main( + Tracked(call_token), + s, /* todo args? */ + ).unwrap(); + res + } +} // ------------------------------------ // ------------ ENTRY POINT ----------- // ------------------------------------ - -#[verifier(external_body)] #[allow(unreachable_code)] #[allow(unused_variables)] +#[verifier(external_body)] +#[allow(unreachable_code)] +#[allow(unused_variables)] fn entrypoint() { -let args: std::vec::Vec = env::args().collect(); -if args.len() >= 4 && args[1] == "run" && args[2] == "alice"{let loc = cfg_alice::init_cfg_alice(&String::from_rust_string(args[3].clone()).as_str()); -let mut mut_state = state_alice::init_state_alice(); -println!("Waiting for 5 seconds to let other parties start..."); -thread::sleep(Duration::new(5, 0)); -println!("Running owl_alice_main ..."); -let now = Instant::now(); -let res = loc.owl_alice_main_wrapper(&mut mut_state); -let elapsed = now.elapsed(); -println!("alice returned "/*, res*/); -println!("Elapsed: {:?}", elapsed);}else -if args.len() >= 4 && args[1] == "run" && args[2] == "bob"{let loc = cfg_bob::init_cfg_bob(&String::from_rust_string(args[3].clone()).as_str()); -let mut mut_state = state_bob::init_state_bob(); -println!("Waiting for 5 seconds to let other parties start..."); -thread::sleep(Duration::new(5, 0)); -println!("Running owl_bob_main ..."); -let now = Instant::now(); -let res = loc.owl_bob_main_wrapper(&mut mut_state); -let elapsed = now.elapsed(); -println!("bob returned "/*, res*/); -println!("Elapsed: {:?}", elapsed);}else -if args.len() >= 3 && args[1] == "config"{let owl_shared_key = owl_aead::gen_rand_key(cipher()); -let salt = owl_util::gen_rand_bytes(64); -let cfg_alice_config: cfg_alice_config = cfg_alice_config {owl_shared_key : owl_shared_key.clone(), salt : salt.clone()}; -let cfg_alice_config_serialized = serialize_cfg_alice_config(&cfg_alice_config); -let mut cfg_alice_f = fs::File::create(format!("{}/{}.owl_config", &args[2], "cfg_alice")).expect("Can't create config file"); -cfg_alice_f.write_all(cfg_alice_config_serialized.as_bytes()).expect("Can't write config file"); -let cfg_bob_config: cfg_bob_config = cfg_bob_config {owl_shared_key : owl_shared_key.clone(), salt : salt.clone()}; -let cfg_bob_config_serialized = serialize_cfg_bob_config(&cfg_bob_config); -let mut cfg_bob_f = fs::File::create(format!("{}/{}.owl_config", &args[2], "cfg_bob")).expect("Can't create config file"); -cfg_bob_f.write_all(cfg_bob_config_serialized.as_bytes()).expect("Can't write config file");}else{println!("Incorrect usage");} + let args: std::vec::Vec = env::args().collect(); + if args.len() >= 4 && args[1] == "run" && args[2] == "alice" { + let loc = cfg_alice::init_cfg_alice(&String::from_rust_string(args[3].clone()).as_str()); + let mut mut_state = state_alice::init_state_alice(); + println!("Waiting for 5 seconds to let other parties start..."); + thread::sleep(Duration::new(5, 0)); + println!("Running owl_alice_main ..."); + let now = Instant::now(); + let res = loc.owl_alice_main_wrapper(&mut mut_state); + let elapsed = now.elapsed(); + println!("alice returned "/*, res*/); + println!("Elapsed: {:?}", elapsed); + } else if args.len() >= 4 && args[1] == "run" && args[2] == "bob" { + let loc = cfg_bob::init_cfg_bob(&String::from_rust_string(args[3].clone()).as_str()); + let mut mut_state = state_bob::init_state_bob(); + println!("Waiting for 5 seconds to let other parties start..."); + thread::sleep(Duration::new(5, 0)); + println!("Running owl_bob_main ..."); + let now = Instant::now(); + let res = loc.owl_bob_main_wrapper(&mut mut_state); + let elapsed = now.elapsed(); + println!("bob returned "/*, res*/); + println!("Elapsed: {:?}", elapsed); + } else if args.len() >= 3 && args[1] == "config" { + let owl_shared_key = owl_aead::gen_rand_key(cipher()); + let salt = owl_util::gen_rand_bytes(64); + let cfg_alice_config: cfg_alice_config = cfg_alice_config { + owl_shared_key: owl_shared_key.clone(), + salt: salt.clone(), + }; + let cfg_alice_config_serialized = serialize_cfg_alice_config(&cfg_alice_config); + let mut cfg_alice_f = fs::File::create( + format!("{}/{}.owl_config", &args[2], "cfg_alice"), + ).expect("Can't create config file"); + cfg_alice_f.write_all(cfg_alice_config_serialized.as_bytes()).expect( + "Can't write config file", + ); + let cfg_bob_config: cfg_bob_config = cfg_bob_config { + owl_shared_key: owl_shared_key.clone(), + salt: salt.clone(), + }; + let cfg_bob_config_serialized = serialize_cfg_bob_config(&cfg_bob_config); + let mut cfg_bob_f = fs::File::create( + format!("{}/{}.owl_config", &args[2], "cfg_bob"), + ).expect("Can't create config file"); + cfg_bob_f.write_all(cfg_bob_config_serialized.as_bytes()).expect("Can't write config file"); + } else { + println!("Incorrect usage"); + } } - - } // verus! - -fn main() { entrypoint() } +fn main() { + entrypoint() +} From 9849171a4645986ccd507ff6c2f91b516ca5ba3e Mon Sep 17 00:00:00 2001 From: Jay Bosamiya Date: Mon, 26 Feb 2024 16:37:09 -0500 Subject: [PATCH 04/10] Check more files in examples/ on CI Multiple files are blocked currently on https://github.com/verus-lang/verusfmt/issues/33 --- tests/snapshot-examples.rs | 54 ++++++++++++++++++++++++++++++++++++ tests/syntax-rs-unchanged.rs | 23 --------------- 2 files changed, 54 insertions(+), 23 deletions(-) create mode 100644 tests/snapshot-examples.rs delete mode 100644 tests/syntax-rs-unchanged.rs diff --git a/tests/snapshot-examples.rs b/tests/snapshot-examples.rs new file mode 100644 index 0000000..956a634 --- /dev/null +++ b/tests/snapshot-examples.rs @@ -0,0 +1,54 @@ +//! Automatic tests for various files in ../examples/ +//! +//! This is essentially intended to be a snapshot test, like ./verus-consistency.rs, but only as a +//! quick indicator for whether files in `../examples/` (such as `../examples/syntax.rs`) have been +//! modified by any change. + +fn check_snapshot(original: &str) { + let formatted = verusfmt::parse_and_format(&original).unwrap(); + if original != formatted { + let diff = similar::udiff::unified_diff( + similar::Algorithm::Patience, + &original, + &formatted, + 3, + Some(("original", "formatted")), + ); + println!("{diff}"); + panic!("Formatted output does not match"); + } +} + +#[test] +fn syntax_rs_unchanged() { + check_snapshot(include_str!("../examples/syntax.rs")); +} + +#[test] +#[ignore] // Due to https://github.com/verus-lang/verusfmt/issues/33 +fn ironfleet_rs_unchanged() { + check_snapshot(include_str!("../examples/ironfleet.rs")); +} + +#[test] +#[ignore] // Due to "fatal runtime error: stack overflow" during testing +fn mimalloc_rs_unchanged() { + check_snapshot(include_str!("../examples/mimalloc.rs")); +} + +#[test] +#[ignore] // Due to https://github.com/verus-lang/verusfmt/issues/33 +fn nr_rs_unchanged() { + check_snapshot(include_str!("../examples/nr.rs")); +} + +#[test] +fn owl_output_rs_unchanged() { + check_snapshot(include_str!("../examples/owl-output.rs")); +} + +#[test] +#[ignore] // Due to https://github.com/verus-lang/verusfmt/issues/33 +fn pagetable_rs_unchanged() { + check_snapshot(include_str!("../examples/pagetable.rs")); +} diff --git a/tests/syntax-rs-unchanged.rs b/tests/syntax-rs-unchanged.rs deleted file mode 100644 index 52b0e86..0000000 --- a/tests/syntax-rs-unchanged.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Testing ../examples/syntax.rs - -/// Just an automatic test to make sure that ../examples/syntax.rs is left unchanged by verusfmt. -/// -/// This is essentially intended to be a snapshot test, like ./snap-tests.rs, but only as a quick -/// indicator for whether `syntax.rs` has been modified by any change, in order to ensure that -/// `syntax.rs` always in sync with the output that would show up from verusfmt. -#[test] -fn syntax_rs_unchanged() { - let original = include_str!("../examples/syntax.rs").to_owned(); - let formatted = verusfmt::parse_and_format(&original).unwrap(); - if original != formatted { - let diff = similar::udiff::unified_diff( - similar::Algorithm::Patience, - &original, - &formatted, - 3, - Some(("original", "formatted")), - ); - println!("{diff}"); - panic!("Formatted output does not match"); - } -} From e5d9e1df2ab48be1702946b7bb49e9977eb96452 Mon Sep 17 00:00:00 2001 From: Jay Bosamiya Date: Mon, 26 Feb 2024 17:07:38 -0500 Subject: [PATCH 05/10] Prevent rustfmt from touching inside `verus!{...}` Fixes #33 --- src/lib.rs | 3 ++- tests/rustfmt-does-not-touch-verus.rs | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 tests/rustfmt-does-not-touch-verus.rs diff --git a/src/lib.rs b/src/lib.rs index 5666a9e..84da34c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1431,11 +1431,12 @@ fn strip_whitespace(s: String) -> String { pub const VERUS_PREFIX: &str = "verus! {\n\n"; pub const VERUS_SUFFIX: &str = "\n} // verus!\n"; -/// Run rustfmt +/// Run rustfmt, only on code outside the `verus!` macro pub fn rustfmt(value: &str) -> Option { if let Ok(mut proc) = Command::new("rustfmt") .arg("--emit=stdout") .arg("--edition=2021") + .arg(r#"--config=skip_macro_invocations=["verus"]"#) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) diff --git a/tests/rustfmt-does-not-touch-verus.rs b/tests/rustfmt-does-not-touch-verus.rs new file mode 100644 index 0000000..53f102a --- /dev/null +++ b/tests/rustfmt-does-not-touch-verus.rs @@ -0,0 +1,18 @@ +#[test] +/// Regression test for https://github.com/verus-lang/verusfmt/issues/33 +fn mod_macro_item_idempotent() { + let file = r#" + mod foo { + verus! { + + bar! { + baz + } + + } + } +"#; + let formatted1 = verusfmt::rustfmt(&verusfmt::parse_and_format(file).unwrap()).unwrap(); + let formatted2 = verusfmt::rustfmt(&verusfmt::parse_and_format(&formatted1).unwrap()).unwrap(); + assert_eq!(formatted1, formatted2); +} From 502b24adff6595f0baf41ca46223d0ae50373f8c Mon Sep 17 00:00:00 2001 From: Jay Bosamiya Date: Mon, 26 Feb 2024 17:11:04 -0500 Subject: [PATCH 06/10] Run verusfmt on ironfleet --- examples/ironfleet.rs | 17036 +++++++++++++++++++---------------- tests/snapshot-examples.rs | 1 - 2 files changed, 9301 insertions(+), 7736 deletions(-) diff --git a/examples/ironfleet.rs b/examples/ironfleet.rs index 47d3568..c9e8cb1 100644 --- a/examples/ironfleet.rs +++ b/examples/ironfleet.rs @@ -13,25 +13,25 @@ mod abstract_end_point_t { verus! { - // This translates ironfleet's NodeIdentity type. - pub struct AbstractEndPoint { - pub id: Seq, - } - - impl AbstractEndPoint { - // Translates Common/Native/Io.s.dfy0 - pub open spec fn valid_physical_address(self) -> bool { - self.id.len() < 0x100000 - } +// This translates ironfleet's NodeIdentity type. +pub struct AbstractEndPoint { + pub id: Seq, +} - pub open spec fn abstractable(self) -> bool { - self.valid_physical_address() - } +impl AbstractEndPoint { + // Translates Common/Native/Io.s.dfy0 + pub open spec fn valid_physical_address(self) -> bool { + self.id.len() < 0x100000 } + pub open spec fn abstractable(self) -> bool { + self.valid_physical_address() } } +} // verus! +} + mod abstract_parameters_t { #![verus::trusted] @@ -41,23 +41,22 @@ mod abstract_parameters_t { verus! { - pub struct AbstractParameters { - pub max_seqno: nat, - pub max_delegations: nat, - } +pub struct AbstractParameters { + pub max_seqno: nat, + pub max_delegations: nat, +} - impl AbstractParameters { - // Translates Impl/SHT/Parameters::StaticParams - pub open spec fn static_params() -> AbstractParameters - { - AbstractParameters { - max_seqno: 0xffff_ffff_ffff_ffff as nat, - max_delegations: 0x7FFF_FFFF_FFFF_FFFF as nat, - } +impl AbstractParameters { + // Translates Impl/SHT/Parameters::StaticParams + pub open spec fn static_params() -> AbstractParameters { + AbstractParameters { + max_seqno: 0xffff_ffff_ffff_ffff as nat, + max_delegations: 0x7FFF_FFFF_FFFF_FFFF as nat, } } +} - } +} // verus! } mod abstract_service_t { @@ -80,18 +79,19 @@ mod abstract_service_t { verus! { - #[is_variant] - pub enum AppRequest { - AppGetRequest{seqno:nat, key:AbstractKey}, - AppSetRequest{seqno:nat, key:AbstractKey, ov:Option}, - } +#[is_variant] +pub enum AppRequest { + AppGetRequest { seqno: nat, key: AbstractKey }, + AppSetRequest { seqno: nat, key: AbstractKey, ov: Option }, +} - #[is_variant] - pub enum AppReply { - AppReply{g_seqno:nat, key:AbstractKey, ov:Option}, - } +#[is_variant] +pub enum AppReply { + AppReply { g_seqno: nat, key: AbstractKey, ov: Option }, +} - } // verus +} // verus! + // verus } mod app_interface_t { @@ -110,29 +110,34 @@ mod app_interface_t { verus! { - pub type AbstractValue = Seq; +pub type AbstractValue = Seq; - pub type Hashtable = Map; +pub type Hashtable = Map; - // Translates Services/SHT/AppInterface.i.dfy :: max_val_len - pub open spec fn max_val_len() -> int { 1024 } - // Translates Services/SHT/AppInterface.i.dfy :: ValidKey - pub open spec fn valid_key(key: AbstractKey) -> bool { true } - // Translates Services/SHT/AppInterface.i.dfy :: ValidValue - pub open spec fn valid_value(value: AbstractValue) -> bool { value.len() < max_val_len() } +// Translates Services/SHT/AppInterface.i.dfy :: max_val_len +pub open spec fn max_val_len() -> int { + 1024 +} +// Translates Services/SHT/AppInterface.i.dfy :: ValidKey +pub open spec fn valid_key(key: AbstractKey) -> bool { + true +} - // Protocol/SHT/Delegations.i.dfy ExtractRange - pub open spec fn extract_range(h: Hashtable, kr: KeyRange) -> Hashtable - { - Map::::new( - |k: AbstractKey| h.dom().contains(k) && kr.contains(k), - |k: AbstractKey| h[k] - ) - } +// Translates Services/SHT/AppInterface.i.dfy :: ValidValue +pub open spec fn valid_value(value: AbstractValue) -> bool { + value.len() < max_val_len() +} +// Protocol/SHT/Delegations.i.dfy ExtractRange +pub open spec fn extract_range(h: Hashtable, kr: KeyRange) -> Hashtable { + Map::::new( + |k: AbstractKey| h.dom().contains(k) && kr.contains(k), + |k: AbstractKey| h[k], + ) +} - } +} // verus! } mod args_t { @@ -144,54 +149,54 @@ mod args_t { verus! { - pub type AbstractArg = Seq; - pub type AbstractArgs = Seq; +pub type AbstractArg = Seq; - pub type Arg = Vec; - pub type Args = Vec; +pub type AbstractArgs = Seq; - /// Clone a Vec. - /// - /// Implemented as a loop, so might not be as efficient as the - /// `std::vec::Vec::clone` method. - // TODO: implemented to avoid depending on (and waiting for) Vec::clone, - // which is made complicated by how it should treat its generic type - // parameter. Here the elements are u8 which are easy to deal with. - pub fn clone_vec_u8(v: &Vec) -> (out: Vec) - ensures - out@ == v@ - { - let mut out: Arg = Vec::with_capacity(v.len()); - let mut i = 0; - while i < v.len() - invariant - i <= v.len(), - i == out.len(), - forall |j| #![auto] 0 <= j < i ==> out@[j] == v@[j], - { - out.push(v[i]); - i = i + 1; - } - proof { - assert_seqs_equal!(out@, v@); - } - out - } +pub type Arg = Vec; +pub type Args = Vec; - pub fn clone_arg(arg: &Arg) -> (out: Arg) +/// Clone a Vec. +/// +/// Implemented as a loop, so might not be as efficient as the +/// `std::vec::Vec::clone` method. +// TODO: implemented to avoid depending on (and waiting for) Vec::clone, +// which is made complicated by how it should treat its generic type +// parameter. Here the elements are u8 which are easy to deal with. +pub fn clone_vec_u8(v: &Vec) -> (out: Vec) ensures - out@ == arg@ + out@ == v@, +{ + let mut out: Arg = Vec::with_capacity(v.len()); + let mut i = 0; + while i < v.len() + invariant + i <= v.len(), + i == out.len(), + forall|j| #![auto] 0 <= j < i ==> out@[j] == v@[j], { - clone_vec_u8(arg) + out.push(v[i]); + i = i + 1; } - - pub open spec fn abstractify_args(args: Args) -> AbstractArgs - { - args@.map(|i, arg: Arg| arg@) + proof { + assert_seqs_equal!(out@, v@); } + out +} - } +pub fn clone_arg(arg: &Arg) -> (out: Arg) + ensures + out@ == arg@, +{ + clone_vec_u8(arg) +} + +pub open spec fn abstractify_args(args: Args) -> AbstractArgs { + args@.map(|i, arg: Arg| arg@) +} + +} // verus! } mod cmessage_v { @@ -219,164 +224,198 @@ mod cmessage_v { verus! { - #[is_variant] - pub enum CMessage { - GetRequest{ k: CKey}, - SetRequest{ k: CKey, v: Option::>}, - Reply{ k: CKey, v: Option::> }, - Redirect{ k: CKey, id: EndPoint }, - Shard{ kr: KeyRange::, recipient: EndPoint }, - Delegate{ range: KeyRange::, h: CKeyHashMap}, - } +#[is_variant] +pub enum CMessage { + GetRequest { k: CKey }, + SetRequest { k: CKey, v: Option::> }, + Reply { k: CKey, v: Option::> }, + Redirect { k: CKey, id: EndPoint }, + Shard { kr: KeyRange::, recipient: EndPoint }, + Delegate { range: KeyRange::, h: CKeyHashMap }, +} - pub open spec fn optional_value_view(ov: Option::>) -> Option::> - { - match ov { - Some(v) => Some(v@), - None => None, - } +pub open spec fn optional_value_view(ov: Option::>) -> Option::> { + match ov { + Some(v) => Some(v@), + None => None, } +} - pub fn clone_optional_value(ov: &Option::>) -> (res: Option::>) - ensures optional_value_view(*ov) == optional_value_view(res) - { - match ov.as_ref() { - Some(v) => Some(clone_vec_u8(v)), - None => None, - } +pub fn clone_optional_value(ov: &Option::>) -> (res: Option::>) + ensures + optional_value_view(*ov) == optional_value_view(res), +{ + match ov.as_ref() { + Some(v) => Some(clone_vec_u8(v)), + None => None, } +} - // Translates Impl/SHT/AppInterface.i.dfy :: IsKeyValid - pub fn is_key_valid(key: &CKey) -> (b: bool) - ensures b == valid_key(*key) - { - true - } +// Translates Impl/SHT/AppInterface.i.dfy :: IsKeyValid +pub fn is_key_valid(key: &CKey) -> (b: bool) + ensures + b == valid_key(*key), +{ + true +} - // Translates Impl/SHT/AppInterface.i.dfy :: IsValueValid - pub fn is_value_valid(val: &Vec) -> (b: bool) - ensures b == valid_value(val@) - { - val.len() < 1024 - } +// Translates Impl/SHT/AppInterface.i.dfy :: IsValueValid +pub fn is_value_valid(val: &Vec) -> (b: bool) + ensures + b == valid_value(val@), +{ + val.len() < 1024 +} - impl CMessage { - // CMessageIsAbstractable - pub open spec fn abstractable(self) -> bool { +impl CMessage { + // CMessageIsAbstractable + pub open spec fn abstractable(self) -> bool { match self { CMessage::Redirect { k, id } => id@.abstractable(), CMessage::Shard { kr, recipient } => recipient@.abstractable(), _ => true, } - } + } - pub open spec fn view(self) -> Message { + pub open spec fn view(self) -> Message { match self { CMessage::GetRequest { k } => Message::GetRequest { key: k }, - CMessage::SetRequest { k, v } => Message::SetRequest { key: k, value: optional_value_view(v) }, + CMessage::SetRequest { k, v } => Message::SetRequest { + key: k, + value: optional_value_view(v), + }, CMessage::Reply { k, v } => Message::Reply { key: k, value: optional_value_view(v) }, CMessage::Redirect { k, id } => Message::Redirect { key: k, id: id@ }, - CMessage::Shard { kr, recipient } => Message::Shard { range: kr, recipient: recipient@ }, + CMessage::Shard { kr, recipient } => Message::Shard { + range: kr, + recipient: recipient@, + }, CMessage::Delegate { range, h } => Message::Delegate { range: range, h: h@ }, } - } + } - pub proof fn view_equal_spec() - ensures forall |x: &CMessage, y: &CMessage| #[trigger] x.view_equal(y) <==> x@ == y@ - { - assert forall |x: &CMessage, y: &CMessage| - #[trigger] x.view_equal(y) <==> x@ == y@ by - { - match (x, y) { - (CMessage::GetRequest { k: k1 }, CMessage::GetRequest { k: k2 }) => {}, - (CMessage::SetRequest { k: k1, v: v1 }, CMessage::SetRequest { k: k2, v: v2 }) => {}, - (CMessage::Reply { k: k1, v: v1 }, CMessage::Reply { k: k2, v: v2 }) => {}, - (CMessage::Redirect { k: k1, id: id1 }, CMessage::Redirect { k: k2, id: id2 }) => {}, - (CMessage::Shard { kr: kr1, recipient: r1 }, CMessage::Shard { kr: kr2, recipient: r2 }) => {}, - (CMessage::Delegate { range: r1, h: h1 }, CMessage::Delegate { range: r2, h: h2 }) => {}, - _ => { - assert(!x.view_equal(y) && x@ != y@); + pub proof fn view_equal_spec() + ensures + forall|x: &CMessage, y: &CMessage| #[trigger] x.view_equal(y) <==> x@ == y@, + { + assert forall|x: &CMessage, y: &CMessage| #[trigger] x.view_equal(y) <==> x@ == y@ by { + match (x, y) { + (CMessage::GetRequest { k: k1 }, CMessage::GetRequest { k: k2 }) => {}, + ( + CMessage::SetRequest { k: k1, v: v1 }, + CMessage::SetRequest { k: k2, v: v2 }, + ) => {}, + (CMessage::Reply { k: k1, v: v1 }, CMessage::Reply { k: k2, v: v2 }) => {}, + ( + CMessage::Redirect { k: k1, id: id1 }, + CMessage::Redirect { k: k2, id: id2 }, + ) => {}, + ( + CMessage::Shard { kr: kr1, recipient: r1 }, + CMessage::Shard { kr: kr2, recipient: r2 }, + ) => {}, + ( + CMessage::Delegate { range: r1, h: h1 }, + CMessage::Delegate { range: r2, h: h2 }, + ) => {}, + _ => { + assert(!x.view_equal(y) && x@ != y@); + }, } - } } - } - - // TODO this is awful ... because we don't have a View trait yet. - pub fn clone_value(value: &Option>) -> (out: Option>) - ensures - match value { - Some(vec) => { - &&& out.is_Some() - &&& out.unwrap()@ == vec@ - } - None => { - &&& out.is_None() - } - } - { - match value { - Some(vec) => Some(clone_vec_u8(vec)), - None => None, - } - } - - pub fn clone_up_to_view(&self) -> (c: Self) - ensures - c@ == self@ - { - match self { - CMessage::GetRequest{ k } => { CMessage::GetRequest{ k: k.clone() } }, - CMessage::SetRequest{ k , v } => { CMessage::SetRequest{ k: k.clone(), v: CMessage::clone_value(v) } }, - CMessage::Reply{ k, v } => { CMessage::Reply{ k: k.clone(), v: CMessage::clone_value(v) } }, - CMessage::Redirect{ k, id } => { CMessage::Redirect{ k: k.clone(), id: id.clone_up_to_view() } }, - CMessage::Shard{ kr, recipient } => { CMessage::Shard{ kr: kr.clone(), recipient: recipient.clone_up_to_view() } }, - CMessage::Delegate{ range, h } => { CMessage::Delegate{ range: range.clone(), h: h.clone_up_to_view() } }, - } - } + } - // Translates Impl/SHT/PacketParsing.i.dfy :: MessageMarshallable - pub open spec fn message_marshallable(&self) -> bool - { - match self { - CMessage::GetRequest{ k } => valid_key(*k), - CMessage::SetRequest{ k , v } => valid_key(*k) && valid_optional_value(optional_value_view(*v)), - CMessage::Reply{ k, v } => valid_key(*k) && valid_optional_value(optional_value_view(*v)), - CMessage::Redirect{ k, id } => valid_key(*k) && id@.valid_physical_address(), - CMessage::Shard{ kr, recipient } => recipient@.valid_physical_address() && !kr.is_empty(), - CMessage::Delegate{ range, h } => !range.is_empty() && valid_hashtable(h@), - } - } + // TODO this is awful ... because we don't have a View trait yet. + pub fn clone_value(value: &Option>) -> (out: Option>) + ensures + match value { + Some(vec) => { + &&& out.is_Some() + &&& out.unwrap()@ == vec@ + }, + None => { &&& out.is_None() }, + }, + { + match value { + Some(vec) => Some(clone_vec_u8(vec)), + None => None, + } + } - // Translates Impl/SHT/PacketParsing.i.dfy :: IsMessageMarshallable - pub fn is_message_marshallable(&self) -> (b: bool) - ensures b == self.message_marshallable() - { - match self { - CMessage::GetRequest{ k } => is_key_valid(k), - CMessage::SetRequest{ k , v } => - is_key_valid(k) && - match v { - Some(v) => is_value_valid(v), - None => true, - }, - CMessage::Reply{ k, v } => - is_key_valid(k) && - match v { - Some(v) => is_value_valid(v), - None => true, - }, - CMessage::Redirect{ k, id } => is_key_valid(k) && id.valid_physical_address(), - CMessage::Shard{ kr, recipient } => recipient.valid_physical_address() && kr.lo.lt(&kr.hi), - CMessage::Delegate{ range, h } => range.lo.lt(&range.hi) && h.valid(), - } - } + pub fn clone_up_to_view(&self) -> (c: Self) + ensures + c@ == self@, + { + match self { + CMessage::GetRequest { k } => { CMessage::GetRequest { k: k.clone() } }, + CMessage::SetRequest { k, v } => { + CMessage::SetRequest { k: k.clone(), v: CMessage::clone_value(v) } + }, + CMessage::Reply { k, v } => { + CMessage::Reply { k: k.clone(), v: CMessage::clone_value(v) } + }, + CMessage::Redirect { k, id } => { + CMessage::Redirect { k: k.clone(), id: id.clone_up_to_view() } + }, + CMessage::Shard { kr, recipient } => { + CMessage::Shard { kr: kr.clone(), recipient: recipient.clone_up_to_view() } + }, + CMessage::Delegate { range, h } => { + CMessage::Delegate { range: range.clone(), h: h.clone_up_to_view() } + }, + } } - pub open spec fn abstractify_cmessage_seq(messages: Seq) -> Seq> { - messages.map_values(|msg: CSingleMessage| msg@) + // Translates Impl/SHT/PacketParsing.i.dfy :: MessageMarshallable + pub open spec fn message_marshallable(&self) -> bool { + match self { + CMessage::GetRequest { k } => valid_key(*k), + CMessage::SetRequest { k, v } => valid_key(*k) && valid_optional_value( + optional_value_view(*v), + ), + CMessage::Reply { k, v } => valid_key(*k) && valid_optional_value( + optional_value_view(*v), + ), + CMessage::Redirect { k, id } => valid_key(*k) && id@.valid_physical_address(), + CMessage::Shard { kr, recipient } => recipient@.valid_physical_address() + && !kr.is_empty(), + CMessage::Delegate { range, h } => !range.is_empty() && valid_hashtable(h@), + } + } + + // Translates Impl/SHT/PacketParsing.i.dfy :: IsMessageMarshallable + pub fn is_message_marshallable(&self) -> (b: bool) + ensures + b == self.message_marshallable(), + { + match self { + CMessage::GetRequest { k } => is_key_valid(k), + CMessage::SetRequest { k, v } => is_key_valid(k) && match v { + Some(v) => is_value_valid(v), + None => true, + }, + CMessage::Reply { k, v } => is_key_valid(k) && match v { + Some(v) => is_value_valid(v), + None => true, + }, + CMessage::Redirect { k, id } => is_key_valid(k) && id.valid_physical_address(), + CMessage::Shard { kr, recipient } => recipient.valid_physical_address() && kr.lo.lt( + &kr.hi, + ), + CMessage::Delegate { range, h } => range.lo.lt(&range.hi) && h.valid(), + } } +} - /* $line_count$Proof$ */ define_enum_and_derive_marshalable! { +pub open spec fn abstractify_cmessage_seq(messages: Seq) -> Seq< + SingleMessage, +> { + messages.map_values(|msg: CSingleMessage| msg@) +} + +/* $line_count$Proof$ */ + +define_enum_and_derive_marshalable! { /* $line_count$Exec$ */ #[is_variant] /* $line_count$Exec$ */ pub enum CSingleMessage { /* $line_count$Exec$ */ #[tag = 0] @@ -388,149 +427,171 @@ mod cmessage_v { /* $line_count$Exec$ */ InvalidMessage, /* $line_count$Exec$ */ } } +// Note simplifications from IronFleet: +// - we don't have a runtime Parameters, just a static value supplied by a function. +// - we don't have a separate CParameters, just AbstractParameters. - // Note simplifications from IronFleet: - // - we don't have a runtime Parameters, just a static value supplied by a function. - // - we don't have a separate CParameters, just AbstractParameters. - impl CSingleMessage { - /// translates CSingleMessageIsAbstractable - pub open spec fn abstractable(self) -> bool { +impl CSingleMessage { + /// translates CSingleMessageIsAbstractable + pub open spec fn abstractable(self) -> bool { match self { CSingleMessage::Message { seqno: _, dst, m } => dst@.abstractable() && m.abstractable(), CSingleMessage::Ack { ack_seqno: _ } => true, - CSingleMessage::InvalidMessage {} => true, + CSingleMessage::InvalidMessage { } => true, } - } + } - /// translates CSingleMessageIsValid - // temp_valid to catch old callsites that intended to call abstractable() - pub open spec fn temp_valid(&self) -> bool { + /// translates CSingleMessageIsValid + // temp_valid to catch old callsites that intended to call abstractable() + pub open spec fn temp_valid(&self) -> bool { match self { - CSingleMessage::Message { seqno, .. } => seqno < AbstractParameters::static_params().max_seqno, - CSingleMessage::Ack { ack_seqno } => ack_seqno < AbstractParameters::static_params().max_seqno, - CSingleMessage::InvalidMessage { } => false, + CSingleMessage::Message { seqno, .. } => seqno + < AbstractParameters::static_params().max_seqno, + CSingleMessage::Ack { ack_seqno } => ack_seqno + < AbstractParameters::static_params().max_seqno, + CSingleMessage::InvalidMessage { } => false, } - } + } - pub open spec fn view(self) -> SingleMessage { + pub open spec fn view(self) -> SingleMessage { match self { - CSingleMessage::Message { seqno, dst, m } => SingleMessage::Message { seqno: seqno as nat, dst: dst@, m: m@ }, + CSingleMessage::Message { seqno, dst, m } => SingleMessage::Message { + seqno: seqno as nat, + dst: dst@, + m: m@, + }, CSingleMessage::Ack { ack_seqno } => SingleMessage::Ack { ack_seqno: ack_seqno as nat }, - CSingleMessage::InvalidMessage { } => SingleMessage::InvalidMessage { }, + CSingleMessage::InvalidMessage { } => SingleMessage::InvalidMessage { }, } - } + } - pub proof fn view_equal_spec() - ensures forall |x: &CSingleMessage, y: &CSingleMessage| #[trigger] x.view_equal(y) <==> x@ == y@ - { - assert forall |x: &CSingleMessage, y: &CSingleMessage| #[trigger] x.view_equal(y) <==> x@ == y@ by { - match (x, y) { - (CSingleMessage::Message { seqno: seqno1, dst: dst1, m: m1, }, - CSingleMessage::Message { seqno: seqno2, dst: dst2, m: m2, }) => { - CMessage::view_equal_spec(); - assert(seqno1.view_equal(seqno2) <==> seqno1 == seqno2); - assert(dst1.view_equal(dst2) <==> dst1@ == dst2@); - assert(m1.view_equal(m2) <==> m1@ == m2@); - } - (CSingleMessage::InvalidMessage { }, CSingleMessage::InvalidMessage { }) => {} - (CSingleMessage::Ack { ack_seqno: x1 }, CSingleMessage::Ack { ack_seqno: x2 }) => {} - _ => { - assert(!x.view_equal(y) && x@ != y@); + pub proof fn view_equal_spec() + ensures + forall|x: &CSingleMessage, y: &CSingleMessage| #[trigger] x.view_equal(y) <==> x@ == y@, + { + assert forall|x: &CSingleMessage, y: &CSingleMessage| #[trigger] + x.view_equal(y) <==> x@ == y@ by { + match (x, y) { + ( + CSingleMessage::Message { seqno: seqno1, dst: dst1, m: m1 }, + CSingleMessage::Message { seqno: seqno2, dst: dst2, m: m2 }, + ) => { + CMessage::view_equal_spec(); + assert(seqno1.view_equal(seqno2) <==> seqno1 == seqno2); + assert(dst1.view_equal(dst2) <==> dst1@ == dst2@); + assert(m1.view_equal(m2) <==> m1@ == m2@); + }, + (CSingleMessage::InvalidMessage { }, CSingleMessage::InvalidMessage { }) => {}, + ( + CSingleMessage::Ack { ack_seqno: x1 }, + CSingleMessage::Ack { ack_seqno: x2 }, + ) => {}, + _ => { + assert(!x.view_equal(y) && x@ != y@); + }, } - } } - } - - pub fn clone_up_to_view(&self) -> (c: Self) - ensures - c@ == self@ - { - match self { - CSingleMessage::Message{seqno, dst, m} => { - CSingleMessage::Message{seqno: *seqno, dst: dst.clone_up_to_view(), m: m.clone_up_to_view() } - }, - CSingleMessage::Ack{ack_seqno} => { CSingleMessage::Ack{ ack_seqno: *ack_seqno } }, - CSingleMessage::InvalidMessage{} => { CSingleMessage::InvalidMessage{} } - } - } + } - // Translates Impl/SHT/PacketParsing.i.dfy :: CSingleMessageMarshallable - pub open spec fn marshallable(&self) -> bool - { - match self { - CSingleMessage::Ack{..} => true, - CSingleMessage::Message{seqno, dst, m} => dst.valid_public_key() && m.message_marshallable(), - CSingleMessage::InvalidMessage{} => false, - } - } + pub fn clone_up_to_view(&self) -> (c: Self) + ensures + c@ == self@, + { + match self { + CSingleMessage::Message { seqno, dst, m } => { + CSingleMessage::Message { + seqno: *seqno, + dst: dst.clone_up_to_view(), + m: m.clone_up_to_view(), + } + }, + CSingleMessage::Ack { ack_seqno } => { CSingleMessage::Ack { ack_seqno: *ack_seqno } }, + CSingleMessage::InvalidMessage { } => { CSingleMessage::InvalidMessage { } }, + } + } - // Translates Impl/SHT/PacketParsing.i.dfy :: IsCSingleMessageMarshallable - pub fn is_marshallable(&self) -> (b: bool) - ensures b == self.marshallable() - { - match self { - CSingleMessage::Ack{ack_seqno} => true, - CSingleMessage::Message{seqno, dst, m} => dst.valid_physical_address() && m.is_message_marshallable(), - CSingleMessage::InvalidMessage{} => false, - } - } + // Translates Impl/SHT/PacketParsing.i.dfy :: CSingleMessageMarshallable + pub open spec fn marshallable(&self) -> bool { + match self { + CSingleMessage::Ack { .. } => true, + CSingleMessage::Message { seqno, dst, m } => dst.valid_public_key() + && m.message_marshallable(), + CSingleMessage::InvalidMessage { } => false, + } } - pub struct CPacket { - pub dst: EndPoint, - pub src: EndPoint, - pub msg: CSingleMessage, + // Translates Impl/SHT/PacketParsing.i.dfy :: IsCSingleMessageMarshallable + pub fn is_marshallable(&self) -> (b: bool) + ensures + b == self.marshallable(), + { + match self { + CSingleMessage::Ack { ack_seqno } => true, + CSingleMessage::Message { seqno, dst, m } => dst.valid_physical_address() + && m.is_message_marshallable(), + CSingleMessage::InvalidMessage { } => false, + } } +} + +pub struct CPacket { + pub dst: EndPoint, + pub src: EndPoint, + pub msg: CSingleMessage, +} - impl CPacket { - pub open spec fn valid(self) -> bool { +impl CPacket { + pub open spec fn valid(self) -> bool { &&& self.msg.temp_valid() - } + } - // Translates Impl/SHT/PacketParsing.i.dfy :: AbstractifyCPacketToShtPacket - pub open spec fn view(self) -> Packet { + // Translates Impl/SHT/PacketParsing.i.dfy :: AbstractifyCPacketToShtPacket + pub open spec fn view(self) -> Packet { Packet { dst: self.dst@, src: self.src@, msg: self.msg@ } - } + } - pub open spec fn abstractable(self) -> bool { + pub open spec fn abstractable(self) -> bool { &&& self.dst.abstractable() &&& self.src.abstractable() &&& self.msg.abstractable() - } } +} - // Translates Impl/SHT/CMessage :: CPacketSeqIsAbstractable - pub open spec fn cpacket_seq_is_abstractable(packets: Seq) -> bool - { - forall |i: int| 0 <= i && i < packets.len() ==> #[trigger] packets[i].abstractable() - } +// Translates Impl/SHT/CMessage :: CPacketSeqIsAbstractable +pub open spec fn cpacket_seq_is_abstractable(packets: Seq) -> bool { + forall|i: int| 0 <= i && i < packets.len() ==> #[trigger] packets[i].abstractable() +} - // Translates Impl/SHT/PacketParsing.i.dfy :: AbstractifyOutboundPacketsToSeqOfLSHTPackets - pub open spec fn abstractify_outbound_packets_to_seq_of_lsht_packets(packets: Seq) -> Seq - recommends cpacket_seq_is_abstractable(packets) - { - packets.map_values(|packet: CPacket| abstractify_cpacket_to_lsht_packet(packet)) - } +// Translates Impl/SHT/PacketParsing.i.dfy :: AbstractifyOutboundPacketsToSeqOfLSHTPackets +pub open spec fn abstractify_outbound_packets_to_seq_of_lsht_packets(packets: Seq) -> Seq< + LSHTPacket, +> + recommends + cpacket_seq_is_abstractable(packets), +{ + packets.map_values(|packet: CPacket| abstractify_cpacket_to_lsht_packet(packet)) +} - // Translates Impl/SHT/CMessage.i.dfy :: AbstractifySeqOfCPacketsToSetOfShtPackets - pub open spec fn abstractify_seq_of_cpackets_to_set_of_sht_packets(cps: Seq) -> Set - recommends cpacket_seq_is_abstractable(cps) - { - cps.map_values(|cp: CPacket| cp@).to_set() - } +// Translates Impl/SHT/CMessage.i.dfy :: AbstractifySeqOfCPacketsToSetOfShtPackets +pub open spec fn abstractify_seq_of_cpackets_to_set_of_sht_packets(cps: Seq) -> Set + recommends + cpacket_seq_is_abstractable(cps), +{ + cps.map_values(|cp: CPacket| cp@).to_set() +} - impl CPacket { - fn clone_up_to_view(&self) -> (o: Self) { - CPacket { - dst: self.dst.clone_up_to_view(), - src: self.src.clone_up_to_view(), - msg: self.msg.clone_up_to_view(), - } +impl CPacket { + fn clone_up_to_view(&self) -> (o: Self) { + CPacket { + dst: self.dst.clone_up_to_view(), + src: self.src.clone_up_to_view(), + msg: self.msg.clone_up_to_view(), } } +} - } // verus! +} // verus! } mod delegation_map_t { @@ -553,51 +614,55 @@ mod delegation_map_t { verus! { - #[verifier::ext_equal] // effing INSAASAAAAANNE - pub struct AbstractDelegationMap(pub Map); - - impl AbstractDelegationMap { - pub open spec fn init(root_identity: AbstractEndPoint) -> Self - { - AbstractDelegationMap(Map::total(|k: AbstractKey| root_identity)) - } +#[verifier::ext_equal] // effing INSAASAAAAANNE +pub struct AbstractDelegationMap(pub Map); - #[verifier(inline)] - pub open spec fn view(self) -> Map { - self.0 - } +impl AbstractDelegationMap { + pub open spec fn init(root_identity: AbstractEndPoint) -> Self { + AbstractDelegationMap(Map::total(|k: AbstractKey| root_identity)) + } - #[verifier(inline)] - pub open spec fn spec_index(self, key: AbstractKey) -> AbstractEndPoint - recommends self.0.dom().contains(key) - { - self@.index(key) - } + #[verifier(inline)] + pub open spec fn view(self) -> Map { + self.0 + } - pub open spec fn is_complete(self) -> bool { - self@.dom().is_full() - } + #[verifier(inline)] + pub open spec fn spec_index(self, key: AbstractKey) -> AbstractEndPoint + recommends + self.0.dom().contains(key), + { + self@.index(key) + } - /// Translates Protocol/SHT/Delegations.i.dfy :: UpdateDelegationMap - pub open spec fn update(self, newkr: KeyRange, host: AbstractEndPoint) -> Self - recommends - self.is_complete(), - { - AbstractDelegationMap(self@.union_prefer_right(Map::new(|k| newkr.contains(k), |k| host))) - } + pub open spec fn is_complete(self) -> bool { + self@.dom().is_full() + } - /// Translates Protocol/SHT/Delegations.i.dfy :: DelegateForKeyRangeIsHost - pub open spec fn delegate_for_key_range_is_host(self, kr: KeyRange, id: AbstractEndPoint) -> bool - recommends - self.is_complete(), - { - forall |k: AbstractKey| #[trigger] kr.contains(k) ==> self[k] == id - } + /// Translates Protocol/SHT/Delegations.i.dfy :: UpdateDelegationMap + pub open spec fn update(self, newkr: KeyRange, host: AbstractEndPoint) -> Self + recommends + self.is_complete(), + { + AbstractDelegationMap(self@.union_prefer_right(Map::new(|k| newkr.contains(k), |k| host))) } + /// Translates Protocol/SHT/Delegations.i.dfy :: DelegateForKeyRangeIsHost + pub open spec fn delegate_for_key_range_is_host( + self, + kr: KeyRange, + id: AbstractEndPoint, + ) -> bool + recommends + self.is_complete(), + { + forall|k: AbstractKey| #[trigger] kr.contains(k) ==> self[k] == id } } +} // verus! +} + mod delegation_map_v { //! Translates file Distributed/Impl/SHT/Delegations.i.dfy @@ -617,80 +682,84 @@ mod delegation_map_v { verus! { - impl Ordering { - pub open spec fn eq(self) -> bool { - matches!(self, Ordering::Equal) - } - - pub open spec fn ne(self) -> bool { - !matches!(self, Ordering::Equal) - } - - pub open spec fn lt(self) -> bool { - matches!(self, Ordering::Less) - } - - pub open spec fn gt(self) -> bool { - matches!(self, Ordering::Greater) - } - - pub open spec fn le(self) -> bool { - !matches!(self, Ordering::Greater) - } +impl Ordering { + pub open spec fn eq(self) -> bool { + matches!(self, Ordering::Equal) + } - pub open spec fn ge(self) -> bool { - !matches!(self, Ordering::Less) - } + pub open spec fn ne(self) -> bool { + !matches!(self, Ordering::Equal) + } - pub fn is_eq(self) -> (b:bool) - ensures b == self.eq(), - { - matches!(self, Ordering::Equal) - } + pub open spec fn lt(self) -> bool { + matches!(self, Ordering::Less) + } - pub fn is_ne(self) -> (b:bool) - ensures b == self.ne(), - { - !matches!(self, Ordering::Equal) - } + pub open spec fn gt(self) -> bool { + matches!(self, Ordering::Greater) + } - pub const fn is_lt(self) -> (b:bool) - ensures b == self.lt(), - { - matches!(self, Ordering::Less) - } + pub open spec fn le(self) -> bool { + !matches!(self, Ordering::Greater) + } - pub const fn is_gt(self) -> (b:bool) - ensures b == self.gt(), - { - matches!(self, Ordering::Greater) - } + pub open spec fn ge(self) -> bool { + !matches!(self, Ordering::Less) + } - pub const fn is_le(self) -> (b:bool) - ensures b == self.le(), - { - !matches!(self, Ordering::Greater) - } + pub fn is_eq(self) -> (b: bool) + ensures + b == self.eq(), + { + matches!(self, Ordering::Equal) + } - pub const fn is_ge(self) -> (b:bool) - ensures b == self.ge(), - { - !matches!(self, Ordering::Less) - } + pub fn is_ne(self) -> (b: bool) + ensures + b == self.ne(), + { + !matches!(self, Ordering::Equal) } + pub const fn is_lt(self) -> (b: bool) + ensures + b == self.lt(), + { + matches!(self, Ordering::Less) + } - // Stores the entries from smallest to largest - struct StrictlyOrderedVec { - v: Vec, + pub const fn is_gt(self) -> (b: bool) + ensures + b == self.gt(), + { + matches!(self, Ordering::Greater) } + pub const fn is_le(self) -> (b: bool) + ensures + b == self.le(), + { + !matches!(self, Ordering::Greater) + } - spec fn sorted(s: Seq) -> bool + pub const fn is_ge(self) -> (b: bool) + ensures + b == self.ge(), { - forall |i, j| #![auto] 0 <= i < j < s.len() ==> s[i].cmp_spec(s[j]).lt() + !matches!(self, Ordering::Less) } - /* +} + +// Stores the entries from smallest to largest +struct StrictlyOrderedVec { + v: Vec, +} + +spec fn sorted(s: Seq) -> bool { + forall|i, j| #![auto] 0 <= i < j < s.len() ==> s[i].cmp_spec(s[j]).lt() +} + +/* proof fn sorted_subrange(s: Seq, i: int, j: int) requires 0 <= i <= j <= s.len(), @@ -705,269 +774,295 @@ mod delegation_map_v { } */ - impl StrictlyOrderedVec { - pub closed spec fn view(self) -> Seq { - self.v@ - } +impl StrictlyOrderedVec { + pub closed spec fn view(self) -> Seq { + self.v@ + } - pub closed spec fn valid(self) -> bool { - sorted(self@) && self@.no_duplicates() - } - - proof fn to_set(self) -> (s: Set) - requires self.valid(), - ensures s == self@.to_set(), - s.finite(), - s.len() == self@.len(), - { - seq_to_set_is_finite::(self@); - self@.unique_seq_to_set(); - self@.to_set() - } - - fn new() -> (v: Self) - ensures v@ == Seq::::empty(), - v.valid(), - { - StrictlyOrderedVec { v: Vec::new() } - } + pub closed spec fn valid(self) -> bool { + sorted(self@) && self@.no_duplicates() + } - fn len(&self) -> (len: usize ) - ensures len == self@.len() - { - self.v.len() - } + proof fn to_set(self) -> (s: Set) + requires + self.valid(), + ensures + s == self@.to_set(), + s.finite(), + s.len() == self@.len(), + { + seq_to_set_is_finite::(self@); + self@.unique_seq_to_set(); + self@.to_set() + } - // TODO(parno): returning an &K is a bit more Rusty (and faster!) - fn index(&self, i: usize) -> (k: K) - requires i < self@.len(), - ensures k == self@[i as int] - { - (self.v[i]).clone() - } + fn new() -> (v: Self) + ensures + v@ == Seq::::empty(), + v.valid(), + { + StrictlyOrderedVec { v: Vec::new() } + } - fn set(&mut self, i: usize, k: K) - requires old(self).valid(), - i < old(self)@.len(), - i > 0 ==> old(self)@[i as int - 1].cmp_spec(k).lt(), - i < old(self)@.len() - 1 ==> k.cmp_spec(old(self)@[i as int + 1]).lt(), - ensures - self.valid(), - self@ == old(self)@.update(i as int, k), - { - self.v.set(i, k); - assert forall |m, n| 0 <= m < n < self@.len() implies #[trigger](self@[m].cmp_spec(self@[n]).lt()) by { - K::cmp_properties(); - } + fn len(&self) -> (len: usize) + ensures + len == self@.len(), + { + self.v.len() + } - assert forall |i, j| 0 <= i < self@.len() && 0 <= j < self@.len() && i != j implies self@[i] != self@[j] by { - K::cmp_properties(); - } + // TODO(parno): returning an &K is a bit more Rusty (and faster!) + fn index(&self, i: usize) -> (k: K) + requires + i < self@.len(), + ensures + k == self@[i as int], + { + (self.v[i]).clone() + } + fn set(&mut self, i: usize, k: K) + requires + old(self).valid(), + i < old(self)@.len(), + i > 0 ==> old(self)@[i as int - 1].cmp_spec(k).lt(), + i < old(self)@.len() - 1 ==> k.cmp_spec(old(self)@[i as int + 1]).lt(), + ensures + self.valid(), + self@ == old(self)@.update(i as int, k), + { + self.v.set(i, k); + assert forall|m, n| 0 <= m < n < self@.len() implies #[trigger] (self@[m].cmp_spec( + self@[n], + ).lt()) by { + K::cmp_properties(); + } + assert forall|i, j| 0 <= i < self@.len() && 0 <= j < self@.len() && i != j implies self@[i] + != self@[j] by { + K::cmp_properties(); } + } - fn remove(&mut self, i: usize) -> (k: K) - requires - old(self).valid(), - i < old(self)@.len(), - ensures - self.valid(), - k == old(self)@.index(i as int), - self@ == old(self)@.remove(i as int), - self@.to_set() == old(self)@.to_set().remove(k), - { - let k = self.v.remove(i); - proof { - let old_s = old(self)@.to_set().remove(k); - let new_s = self@.to_set(); - assert forall |e| old_s.contains(e) implies new_s.contains(e) by { - assert(old(self)@.to_set().contains(e)); - let n = choose |n: int| 0 <= n < old(self)@.len() && old(self)@[n] == e; - if n < i { - assert(self@[n] == e); // OBSERVE - } else { - assert(self@[n-1] == e); // OBSERVE - } + fn remove(&mut self, i: usize) -> (k: K) + requires + old(self).valid(), + i < old(self)@.len(), + ensures + self.valid(), + k == old(self)@.index(i as int), + self@ == old(self)@.remove(i as int), + self@.to_set() == old(self)@.to_set().remove(k), + { + let k = self.v.remove(i); + proof { + let old_s = old(self)@.to_set().remove(k); + let new_s = self@.to_set(); + assert forall|e| old_s.contains(e) implies new_s.contains(e) by { + assert(old(self)@.to_set().contains(e)); + let n = choose|n: int| 0 <= n < old(self)@.len() && old(self)@[n] == e; + if n < i { + assert(self@[n] == e); // OBSERVE + } else { + assert(self@[n - 1] == e); // OBSERVE } - assert_sets_equal!(self@.to_set(), old(self)@.to_set().remove(k)); } - k + assert_sets_equal!(self@.to_set(), old(self)@.to_set().remove(k)); } + k + } - /// Remove entries in the range [start, end) - fn erase(&mut self, start: usize, end: usize) - requires - old(self).valid(), - start <= end <= old(self)@.len(), - ensures - self.valid(), - self@ == old(self)@.subrange(0, start as int) + old(self)@.subrange(end as int, old(self)@.len() as int), - // TODO: We might want to strengthen this further to say that the two sets on the RHS - // are disjoint - old(self)@.to_set() == self@.to_set() + old(self)@.subrange(start as int, end as int).to_set(), - { - let mut deleted = 0; - let ghost mut deleted_set; - proof { - deleted_set = Set::empty(); - assert_seqs_equal!(self@, + /// Remove entries in the range [start, end) + fn erase(&mut self, start: usize, end: usize) + requires + old(self).valid(), + start <= end <= old(self)@.len(), + ensures + self.valid(), + self@ == old(self)@.subrange(0, start as int) + old(self)@.subrange( + end as int, + old(self)@.len() as int, + ), + // TODO: We might want to strengthen this further to say that the two sets on the RHS + // are disjoint + old(self)@.to_set() == self@.to_set() + old(self)@.subrange( + start as int, + end as int, + ).to_set(), + { + let mut deleted = 0; + let ghost mut deleted_set; + proof { + deleted_set = Set::empty(); + assert_seqs_equal!(self@, old(self)@.subrange(0, start as int) + old(self)@.subrange(start as int + deleted as int, old(self)@.len() as int)); - assert_sets_equal!(deleted_set, + assert_sets_equal!(deleted_set, old(self)@.subrange(start as int, start as int + deleted as int).to_set()); - assert_sets_equal!(old(self)@.to_set(), + assert_sets_equal!(old(self)@.to_set(), self@.to_set() + deleted_set); + } + while deleted < end - start + invariant + start <= end <= old(self)@.len(), + self@.len() == old(self)@.len() - deleted, + 0 <= deleted <= end - start, + old(self).valid(), + self.valid(), + self@ == old(self)@.subrange(0, start as int) + old(self)@.subrange( + start as int + deleted as int, + old(self)@.len() as int, + ), + deleted_set == old(self)@.subrange( + start as int, + start as int + deleted as int, + ).to_set(), + deleted_set.len() == deleted, + old(self)@.to_set() == self@.to_set() + deleted_set, + { + let ghost mut old_deleted_set; + let ghost mut old_deleted_seq; + let ghost mut target; + proof { + old_deleted_set = deleted_set; + old_deleted_seq = old(self)@.subrange(start as int, start as int + deleted as int); + target = self@[start as int]; + deleted_set = deleted_set.insert(self@[start as int]); } - while deleted < end - start - invariant - start <= end <= old(self)@.len(), - self@.len() == old(self)@.len() - deleted, - 0 <= deleted <= end - start, - old(self).valid(), - self.valid(), - self@ == old(self)@.subrange(0, start as int) + old(self)@.subrange(start as int + deleted as int, old(self)@.len() as int), - deleted_set == old(self)@.subrange(start as int, start as int + deleted as int).to_set(), - deleted_set.len() == deleted, - old(self)@.to_set() == self@.to_set() + deleted_set, - { - let ghost mut old_deleted_set; - let ghost mut old_deleted_seq; - let ghost mut target; - proof { - old_deleted_set = deleted_set; - old_deleted_seq = old(self)@.subrange(start as int, start as int + deleted as int); - target = self@[start as int]; - deleted_set = deleted_set.insert(self@[start as int]); - } - self.remove(start); - deleted = deleted + 1; - proof { - assert_seqs_equal!(self@, + self.remove(start); + deleted = deleted + 1; + proof { + assert_seqs_equal!(self@, old(self)@.subrange(0, start as int) + old(self)@.subrange(start as int + deleted as int, old(self)@.len() as int)); - let deleted_seq = old(self)@.subrange(start as int, - start as int + deleted as int); - seq_to_set_is_finite::(deleted_seq); - deleted_seq.unique_seq_to_set(); - - assert forall |e| #[trigger] deleted_set.contains(e) - implies deleted_seq.to_set().contains(e) by { - if e == target { - assert(deleted_seq[deleted as int - 1] == e); // OBSERVE - } else { - assert(old_deleted_set.contains(e)); - assert(old_deleted_seq.contains(e)); - let i = choose |i| 0 <= i < old_deleted_seq.len() && old_deleted_seq[i] == e; - assert(deleted_seq[i] == e); // OBSERVE - } + let deleted_seq = old(self)@.subrange(start as int, start as int + deleted as int); + seq_to_set_is_finite::(deleted_seq); + deleted_seq.unique_seq_to_set(); + assert forall|e| #[trigger] + deleted_set.contains(e) implies deleted_seq.to_set().contains(e) by { + if e == target { + assert(deleted_seq[deleted as int - 1] == e); // OBSERVE + } else { + assert(old_deleted_set.contains(e)); + assert(old_deleted_seq.contains(e)); + let i = choose|i| 0 <= i < old_deleted_seq.len() && old_deleted_seq[i] == e; + assert(deleted_seq[i] == e); // OBSERVE } - assert forall |e| #[trigger] deleted_seq.to_set().contains(e) - implies deleted_set.contains(e) by { - if e == target { - } else { - let i = choose |i| 0 <= i < deleted_seq.len() && deleted_seq[i] == e; - assert(old_deleted_seq[i] == e); // OBSERVE - } + } + assert forall|e| #[trigger] + deleted_seq.to_set().contains(e) implies deleted_set.contains(e) by { + if e == target { + } else { + let i = choose|i| 0 <= i < deleted_seq.len() && deleted_seq[i] == e; + assert(old_deleted_seq[i] == e); // OBSERVE } - assert_sets_equal!(deleted_set, + } + assert_sets_equal!(deleted_set, deleted_seq.to_set()); - assert_sets_equal!(old(self)@.to_set(), + assert_sets_equal!(old(self)@.to_set(), self@.to_set() + deleted_set); - } } - } + } - fn insert(&mut self, k: K) -> (i: usize) - requires - old(self).valid(), - !old(self)@.contains(k), - ensures self.valid(), - self@.len() == old(self)@.len() + 1, - 0 <= i < self@.len(), - self@ == old(self)@.insert(i as int, k), - self@.to_set() == old(self)@.to_set().insert(k), + fn insert(&mut self, k: K) -> (i: usize) + requires + old(self).valid(), + !old(self)@.contains(k), + ensures + self.valid(), + self@.len() == old(self)@.len() + 1, + 0 <= i < self@.len(), + self@ == old(self)@.insert(i as int, k), + self@.to_set() == old(self)@.to_set().insert(k), + { + // Find the index where we should insert k + let mut index: usize = 0; + while index < self.v.len() && self.v[index].cmp(&k).is_lt() + invariant + 0 <= index <= self@.len(), + forall|i| 0 <= i < index ==> (#[trigger] self@.index(i).cmp_spec(k)).lt(), { - // Find the index where we should insert k - let mut index: usize = 0; - while index < self.v.len() && self.v[index].cmp(&k).is_lt() - invariant - 0 <= index <= self@.len(), - forall |i| 0 <= i < index ==> (#[trigger] self@.index(i).cmp_spec(k)).lt() - { - index = index + 1; - } - self.v.insert(index, k); - assert forall |m, n| 0 <= m < n < self@.len() implies #[trigger](self@[m].cmp_spec(self@[n]).lt()) by { - K::cmp_properties(); - } - assert(self@.to_set() == old(self)@.to_set().insert(k)) by { - let new_s = self@.to_set(); - let old_s = old(self)@.to_set().insert(k); - assert(self@[index as int] == k); // OBSERVE - assert forall |e| old_s.contains(e) implies new_s.contains(e) by { - if e == k { + index = index + 1; + } + self.v.insert(index, k); + assert forall|m, n| 0 <= m < n < self@.len() implies #[trigger] (self@[m].cmp_spec( + self@[n], + ).lt()) by { + K::cmp_properties(); + } + assert(self@.to_set() == old(self)@.to_set().insert(k)) by { + let new_s = self@.to_set(); + let old_s = old(self)@.to_set().insert(k); + assert(self@[index as int] == k); // OBSERVE + assert forall|e| old_s.contains(e) implies new_s.contains(e) by { + if e == k { + } else { + let i = choose|i: int| 0 <= i < old(self)@.len() && old(self)@[i] == e; + if i < index { + assert(self@[i] == e); // OBSERVE } else { - let i = choose |i: int| 0 <= i < old(self)@.len() && old(self)@[i] == e; - if i < index { - assert(self@[i] == e); // OBSERVE - } else { - assert(self@[i+1] == e); // OBSERVE - } + assert(self@[i + 1] == e); // OBSERVE } - }; - assert_sets_equal!(new_s, old_s); + } }; - return index; - } + assert_sets_equal!(new_s, old_s); + }; + return index; } +} - impl KeyIterator { - // #[verifier(when_used_as_spec(new_spec))] - pub fn new(k: K) -> (s: Self) - ensures s.k == Some(k) - { - KeyIterator { k: Some(k) } - } +impl KeyIterator { + // #[verifier(when_used_as_spec(new_spec))] + pub fn new(k: K) -> (s: Self) + ensures + s.k == Some(k), + { + KeyIterator { k: Some(k) } + } - pub open spec fn end_spec() -> (s: Self) { - KeyIterator { k: None } - } + pub open spec fn end_spec() -> (s: Self) { + KeyIterator { k: None } + } - #[verifier(when_used_as_spec(end_spec))] - pub fn end() -> (s: Self) - ensures s.k.is_None() - { - KeyIterator { k: None } - } + #[verifier(when_used_as_spec(end_spec))] + pub fn end() -> (s: Self) + ensures + s.k.is_None(), + { + KeyIterator { k: None } + } - pub open spec fn is_end_spec(&self) -> bool { - self.k.is_None() - } + pub open spec fn is_end_spec(&self) -> bool { + self.k.is_None() + } - #[verifier(when_used_as_spec(is_end_spec))] - pub fn is_end(&self) -> (b: bool) - ensures b == self.is_end_spec() - { - matches!(self.k, None) - } + #[verifier(when_used_as_spec(is_end_spec))] + pub fn is_end(&self) -> (b: bool) + ensures + b == self.is_end_spec(), + { + matches!(self.k, None) + } - pub open spec fn get_spec(&self) -> &K - recommends self.k.is_some(), - { - &self.k.get_Some_0() - } + pub open spec fn get_spec(&self) -> &K + recommends + self.k.is_some(), + { + &self.k.get_Some_0() + } - #[verifier(when_used_as_spec(get_spec))] - pub fn get(&self) -> (k: &K) - requires !self.is_end(), - ensures k == self.get_spec(), - { - self.k.as_ref().unwrap() - } + #[verifier(when_used_as_spec(get_spec))] + pub fn get(&self) -> (k: &K) + requires + !self.is_end(), + ensures + k == self.get_spec(), + { + self.k.as_ref().unwrap() + } // fn cmp(&self, other: &Self) -> (o: Ordering) // ensures o == self.cmp_spec(*other), @@ -980,246 +1075,274 @@ mod delegation_map_v { // } // } // - // #[verifier(when_used_as_spec(lt_spec))] - pub fn lt(&self, other: &Self) -> (b: bool) - ensures b == self.lt_spec(*other), - { - (!self.is_end() && other.is_end()) - || (!self.is_end() && !other.is_end() && self.k.as_ref().unwrap().cmp(&other.k.as_ref().unwrap()).is_lt()) - } - - spec fn leq_spec(self, other: Self) -> bool { - self.lt_spec(other) || self == other - } - - spec fn geq_K(self, other: K) -> bool { - !self.lt_spec(KeyIterator::new_spec(other)) - } + // #[verifier(when_used_as_spec(lt_spec))] + pub fn lt(&self, other: &Self) -> (b: bool) + ensures + b == self.lt_spec(*other), + { + (!self.is_end() && other.is_end()) || (!self.is_end() && !other.is_end() + && self.k.as_ref().unwrap().cmp(&other.k.as_ref().unwrap()).is_lt()) + } - // Ivy calls this `done` - spec fn above_spec(&self, k: K) -> bool { - self.k.is_None() || k.cmp_spec(self.k.get_Some_0()).lt() - } + spec fn leq_spec(self, other: Self) -> bool { + self.lt_spec(other) || self == other + } - // Is this iterator strictly above the supplied value? - #[verifier(when_used_as_spec(above_spec))] - fn above(&self, k: K) -> (b: bool) - ensures b == self.above_spec(k), - { - self.is_end() || k.cmp(&self.k.as_ref().unwrap().clone()).is_lt() - } + spec fn geq_K(self, other: K) -> bool { + !self.lt_spec(KeyIterator::new_spec(other)) + } - // Is k in the range [lhs, rhs) - pub open spec fn between(lhs: Self, ki: Self, rhs: Self) -> bool { - !ki.lt_spec(lhs) && ki.lt_spec(rhs) - } + // Ivy calls this `done` + spec fn above_spec(&self, k: K) -> bool { + self.k.is_None() || k.cmp_spec(self.k.get_Some_0()).lt() } - pub fn vec_erase(v: &mut Vec, start: usize, end: usize) - requires - start <= end <= old(v).len(), + // Is this iterator strictly above the supplied value? + #[verifier(when_used_as_spec(above_spec))] + fn above(&self, k: K) -> (b: bool) ensures - true, - v@ == old(v)@.subrange(0, start as int) + old(v)@.subrange(end as int, old(v)@.len() as int), + b == self.above_spec(k), { - let mut deleted = 0; - proof { - assert_seqs_equal!(v@, + self.is_end() || k.cmp(&self.k.as_ref().unwrap().clone()).is_lt() + } + + // Is k in the range [lhs, rhs) + pub open spec fn between(lhs: Self, ki: Self, rhs: Self) -> bool { + !ki.lt_spec(lhs) && ki.lt_spec(rhs) + } +} + +pub fn vec_erase(v: &mut Vec, start: usize, end: usize) + requires + start <= end <= old(v).len(), + ensures + true, + v@ == old(v)@.subrange(0, start as int) + old(v)@.subrange( + end as int, + old(v)@.len() as int, + ), +{ + let mut deleted = 0; + proof { + assert_seqs_equal!(v@, old(v)@.subrange(0, start as int) + old(v)@.subrange(start as int + deleted as int, old(v)@.len() as int)); - } - while deleted < end - start - invariant - start <= end <= old(v)@.len(), - v@.len() == old(v)@.len() - deleted, - 0 <= deleted <= end - start, - v@ == old(v)@.subrange(0, start as int) + old(v)@.subrange(start as int + deleted as int, old(v)@.len() as int), - { - v.remove(start); - deleted = deleted + 1; - proof { - assert_seqs_equal!(v@, + } + while deleted < end - start + invariant + start <= end <= old(v)@.len(), + v@.len() == old(v)@.len() - deleted, + 0 <= deleted <= end - start, + v@ == old(v)@.subrange(0, start as int) + old(v)@.subrange( + start as int + deleted as int, + old(v)@.len() as int, + ), + { + v.remove(start); + deleted = deleted + 1; + proof { + assert_seqs_equal!(v@, old(v)@.subrange(0, start as int) + old(v)@.subrange(start as int + deleted as int, old(v)@.len() as int)); - } } } +} + +// TODO: Restore this to be generic over values V +struct StrictlyOrderedMap< + #[verifier(maybe_negative)] + K: KeyTrait + VerusClone, +> { + keys: StrictlyOrderedVec, + vals: Vec, + m: Ghost>, +} - // TODO: Restore this to be generic over values V - struct StrictlyOrderedMap<#[verifier(maybe_negative)] K: KeyTrait + VerusClone> { - keys: StrictlyOrderedVec, - vals: Vec, - m: Ghost>, +impl StrictlyOrderedMap { + pub closed spec fn view(self) -> Map { + self.m@ } - impl StrictlyOrderedMap { - pub closed spec fn view(self) -> Map { - self.m@ - } + pub closed spec fn map_valid( + self, + ) -> bool // recommends self.keys@.len() == self.vals.len() + // error: public function requires cannot refer to private items + { + &&& self.m@.dom().finite() + &&& self.m@.dom() == self.keys@.to_set() + &&& forall|i| + 0 <= i < self.keys@.len() ==> #[trigger] (self.m@[self.keys@.index(i)]) + == self.vals@.index(i) + } - pub closed spec fn map_valid(self) -> bool - // recommends self.keys@.len() == self.vals.len() // error: public function requires cannot refer to private items - { - &&& self.m@.dom().finite() - &&& self.m@.dom() == self.keys@.to_set() - &&& forall |i| 0 <= i < self.keys@.len() ==> #[trigger] (self.m@[self.keys@.index(i)]) == self.vals@.index(i) - } + pub closed spec fn valid(self) -> bool { + &&& self.keys.valid() + &&& self.keys@.len() == self.vals.len() + &&& self.map_valid() + } - pub closed spec fn valid(self) -> bool { - &&& self.keys.valid() - &&& self.keys@.len() == self.vals.len() - &&& self.map_valid() - } + /// We hold no keys in the range (lo, hi) + spec fn gap(self, lo: KeyIterator, hi: KeyIterator) -> bool { + forall|ki| lo.lt_spec(ki) && ki.lt_spec(hi) ==> !(#[trigger] self@.contains_key(*ki.get())) + } - /// We hold no keys in the range (lo, hi) - spec fn gap(self, lo: KeyIterator, hi: KeyIterator) -> bool { - forall |ki| lo.lt_spec(ki) && ki.lt_spec(hi) ==> !(#[trigger] self@.contains_key(*ki.get())) - } + proof fn mind_the_gap(self) + ensures + forall|w, x, y, z| + self.gap(w, x) && self.gap(y, z) && #[trigger] y.lt_spec(x) ==> #[trigger] self.gap( + w, + z, + ), + forall|w, x, y: KeyIterator, z| #[trigger] + self.gap(w, x) && y.geq_spec(w) && x.geq_spec(z) ==> #[trigger] self.gap(y, z), + forall|l: KeyIterator, k, m| #[trigger] + self.gap(k, m) ==> !(k.lt_spec(l) && l.lt_spec(m) && #[trigger] self@.contains_key( + *l.get(), + )), + { + K::cmp_properties(); + } - proof fn mind_the_gap(self) - ensures - forall|w, x, y, z| self.gap(w, x) && self.gap(y, z) && #[trigger] y.lt_spec(x) ==> #[trigger] self.gap(w, z), - forall|w, x, y: KeyIterator, z| #[trigger] self.gap(w, x) && y.geq_spec(w) && x.geq_spec(z) ==> #[trigger] self.gap(y, z), - forall|l:KeyIterator, k, m| #[trigger] self.gap(k, m) ==> !(k.lt_spec(l) && l.lt_spec(m) && #[trigger] self@.contains_key(*l.get())) - { - K::cmp_properties(); - } + proof fn gap_means_empty(self, lo: KeyIterator, hi: KeyIterator, k: KeyIterator) + requires + self.gap(lo, hi), + lo.lt_spec(k) && k.lt_spec(hi), + self@.contains_key(*k.get()), + ensures + false, + { + self.mind_the_gap(); + } - proof fn gap_means_empty(self, lo:KeyIterator, hi:KeyIterator, k:KeyIterator) - requires - self.gap(lo, hi), - lo.lt_spec(k) && k.lt_spec(hi), - self@.contains_key(*k.get()), - ensures - false, - { - self.mind_the_gap(); - } + proof fn choose_gap_violator(self, lo: KeyIterator, hi: KeyIterator) -> (r: KeyIterator< + K, + >) + requires + !self.gap(lo, hi), + ensures + lo.lt_spec(r) && r.lt_spec(hi) && self@.contains_key(*r.get()), + { + choose|r| #![auto] lo.lt_spec(r) && r.lt_spec(hi) && self@.contains_key(*r.get_spec()) + } - proof fn choose_gap_violator(self, lo:KeyIterator, hi:KeyIterator) -> (r: KeyIterator) - requires - !self.gap(lo, hi), - ensures - lo.lt_spec(r) && r.lt_spec(hi) && self@.contains_key(*r.get()), - { - choose |r| #![auto] lo.lt_spec(r) && r.lt_spec(hi) && self@.contains_key(*r.get_spec()) + fn new() -> (s: Self) + ensures + s.valid(), + s@ == Map::::empty(), + { + let keys = StrictlyOrderedVec::new(); + let m = Ghost(Map::empty()); + proof { + assert_sets_equal!(m@.dom(), keys@.to_set()); } + StrictlyOrderedMap { keys, vals: Vec::new(), m } + } - - fn new() -> (s: Self) - ensures - s.valid(), - s@ == Map::::empty(), + fn find_key(&self, k: &K) -> (o: Option) + requires + self.valid(), + ensures + match o { + None => !self@.contains_key(*k), + Some(i) => 0 <= i < self.keys@.len() && self.keys@[i as int] == k, + }, + { + let mut i = 0; + while i < self.keys.len() + invariant + forall|j| 0 <= j < i ==> self.keys@[j] != k, { - let keys = StrictlyOrderedVec::new(); - let m = Ghost(Map::empty()); - proof { - assert_sets_equal!(m@.dom(), keys@.to_set()); - } - StrictlyOrderedMap { - keys, - vals: Vec::new(), - m, + //println!("Loop {} of find_key", i); + if self.keys.index(i).cmp(&k).is_eq() { + proof { + K::cmp_properties(); + } + return Some(i); + } else { + proof { + K::cmp_properties(); + } } + i = i + 1; } + return None; + } - fn find_key(&self, k: &K) -> (o: Option) - requires self.valid(), - ensures - match o { - None => !self@.contains_key(*k), - Some(i) => 0 <= i < self.keys@.len() && self.keys@[i as int] == k, - }, - { - let mut i = 0; - while i < self.keys.len() - invariant forall |j| 0 <= j < i ==> self.keys@[j] != k, - { - //println!("Loop {} of find_key", i); - if self.keys.index(i).cmp(&k).is_eq() { - proof { - K::cmp_properties(); - } - return Some(i); + // All values in the index range [lo, hi] are `v` + // Second return value says that all values in the index range [lo, hi) are `v`, + // but the value at hi is not `v` + fn values_agree(&self, lo: usize, hi: usize, v: &ID) -> (ret: (bool, bool)) + requires + self.valid(), + 0 <= lo <= hi < self.keys@.len(), + ensures + ret.0 == forall|i| #![auto] lo <= i <= hi ==> self.vals@[i]@ == v@, + !ret.0 ==> (ret.1 == (self.vals@[hi as int]@ != v@ && forall|i| + #![auto] + lo <= i < hi ==> self.vals@[i]@ == v@)), + { + let mut i = lo; + while i <= hi + invariant + lo <= i, + hi < self.keys@.len() as usize == self.vals@.len(), + forall|j| #![auto] lo <= j < i ==> self.vals@[j]@ == v@, + { + let eq = do_end_points_match(&self.vals[i], v); + if !eq { + if i == hi { + return (false, true); } else { - proof { - K::cmp_properties(); - } + return (false, false); } - i = i + 1; - } - return None; - } - - // All values in the index range [lo, hi] are `v` - // Second return value says that all values in the index range [lo, hi) are `v`, - // but the value at hi is not `v` - fn values_agree(&self, lo: usize, hi: usize, v: &ID) -> (ret:(bool, bool)) - requires - self.valid(), - 0 <= lo <= hi < self.keys@.len(), - ensures - ret.0 == forall |i| #![auto] lo <= i <= hi ==> self.vals@[i]@ == v@, - !ret.0 ==> (ret.1 == (self.vals@[hi as int]@ != v@ && forall |i| #![auto] lo <= i < hi ==> self.vals@[i]@ == v@)), - { - let mut i = lo; - while i <= hi - invariant - lo <= i, - hi < self.keys@.len() as usize == self.vals@.len(), - forall |j| #![auto] lo <= j < i ==> self.vals@[j]@ == v@, - { - let eq = do_end_points_match(&self.vals[i], v); - if !eq { - if i == hi { - return (false, true); - } else { - return (false, false); - } - } else { - proof { - //K::cmp_properties(); - } + } else { + proof { + //K::cmp_properties(); } - i = i + 1; } - (true, true) + i = i + 1; } + (true, true) + } - // All keys in the range [keys[lo .. hi]] are `v` - // Second return value says that all keys in the index range [keys[lo, hi)] are `v`, - // but the value at hi is not `v` - fn keys_in_index_range_agree(&self, lo: usize, hi: usize, v: &ID) -> (ret:(bool, bool)) - requires - self.valid(), - 0 <= lo <= hi < self.keys@.len(), - ensures - ret.0 == forall |i| #![auto] lo <= i <= hi ==> self@[self.keys@[i]]@ == v@, - !ret.0 ==> (ret.1 == (self@[self.keys@[hi as int]]@ != v@ && (forall |i| #![auto] lo <= i < hi ==> self@[self.keys@[i]]@ == v@))), - { - let (agree, almost) = self.values_agree(lo, hi, v); - proof { - if agree { + // All keys in the range [keys[lo .. hi]] are `v` + // Second return value says that all keys in the index range [keys[lo, hi)] are `v`, + // but the value at hi is not `v` + fn keys_in_index_range_agree(&self, lo: usize, hi: usize, v: &ID) -> (ret: (bool, bool)) + requires + self.valid(), + 0 <= lo <= hi < self.keys@.len(), + ensures + ret.0 == forall|i| #![auto] lo <= i <= hi ==> self@[self.keys@[i]]@ == v@, + !ret.0 ==> (ret.1 == (self@[self.keys@[hi as int]]@ != v@ && (forall|i| + #![auto] + lo <= i < hi ==> self@[self.keys@[i]]@ == v@))), + { + let (agree, almost) = self.values_agree(lo, hi, v); + proof { + if agree { + } else { + assert(!forall|i| #![auto] lo <= i <= hi ==> self.vals@[i]@ == v@); + let i = choose|i| #![auto] !(lo <= i <= hi ==> self.vals@.index(i)@ == v@); + assert(self.vals@.index(i)@ != v@); + assert(self@[self.keys@[i]]@ == self.vals@.index(i)@); + if almost { } else { - assert(!forall |i| #![auto] lo <= i <= hi ==> self.vals@[i]@ == v@); - let i = choose |i| #![auto] !(lo <= i <= hi ==> self.vals@.index(i)@ == v@); - assert(self.vals@.index(i)@ != v@); - assert(self@[self.keys@[i]]@ == self.vals@.index(i)@); - if almost { + assert(!(self.vals@[hi as int]@ != v@ && forall|i| + #![auto] + lo <= i < hi ==> self.vals@[i]@ == v@)); + if self.vals@[hi as int]@ == v@ { } else { - assert(!(self.vals@[hi as int]@ != v@ && forall |i| #![auto] lo <= i < hi ==> self.vals@[i]@ == v@)); - if self.vals@[hi as int]@ == v@ { - } else { - let j = choose |j| #![auto] lo <= j < hi && self.vals@[j]@ != v@; - assert(self@[self.keys@[j]]@ != v@); - } + let j = choose|j| #![auto] lo <= j < hi && self.vals@[j]@ != v@; + assert(self@[self.keys@[j]]@ != v@); } } } - (agree, almost) } + (agree, almost) + } // // All keys present in the range [lo .. hi] map `v` // fn keys_agree(&self, ghost(lo): Ghost<&K>, lo_index: usize, ghost(hi): Ghost<&K>, hi_index: usize, v: &ID) -> (b: bool) @@ -1260,281 +1383,285 @@ mod delegation_map_v { // } // ret // } - - - fn get<'a>(&'a self, k: &K) -> (o: Option<&'a ID>) - requires - self.valid(), - ensures - match o { - None => !self@.contains_key(*k), - Some(v) => self@[*k] == v, - } - { - match self.find_key(k) { - None => None, - Some(i) => Some(&self.vals[i]), - } + fn get<'a>(&'a self, k: &K) -> (o: Option<&'a ID>) + requires + self.valid(), + ensures + match o { + None => !self@.contains_key(*k), + Some(v) => self@[*k] == v, + }, + { + match self.find_key(k) { + None => None, + Some(i) => Some(&self.vals[i]), } + } - fn set(&mut self, k: K, v: ID) - requires - old(self).valid(), - ensures - self.valid(), - self@ == old(self)@.insert(k, v), - forall |lo, hi| self.gap(lo, hi) <==> - old(self).gap(lo, hi) - && !(lo.lt_spec(KeyIterator::new_spec(k)) - && KeyIterator::new_spec(k).lt_spec(hi)), - { - match self.find_key(&k) { - Some(i) => { - self.vals.set(i, v); - self.m = Ghost(self.m@.insert(k, v)); - proof { - assert_sets_equal!(self.m@.dom() == self.keys@.to_set()); - } - }, - None => { - let index = self.keys.insert(k.clone()); - self.vals.insert(index, v); - self.m = Ghost(self.m@.insert(k, v)); + fn set(&mut self, k: K, v: ID) + requires + old(self).valid(), + ensures + self.valid(), + self@ == old(self)@.insert(k, v), + forall|lo, hi| + self.gap(lo, hi) <==> old(self).gap(lo, hi) && !(lo.lt_spec( + KeyIterator::new_spec(k), + ) && KeyIterator::new_spec(k).lt_spec(hi)), + { + match self.find_key(&k) { + Some(i) => { + self.vals.set(i, v); + self.m = Ghost(self.m@.insert(k, v)); + proof { + assert_sets_equal!(self.m@.dom() == self.keys@.to_set()); } - } - assert forall |lo, hi| self.gap(lo, hi) <==> - old(self).gap(lo, hi) - && !(lo.lt_spec(KeyIterator::new_spec(k)) - && KeyIterator::new_spec(k).lt_spec(hi)) by { - self.mind_the_gap(); - old(self).mind_the_gap(); - if old(self).gap(lo, hi) && !(lo.lt_spec(KeyIterator::new_spec(k)) && KeyIterator::new_spec(k).lt_spec(hi)) { - assert forall |ki| lo.lt_spec(ki) && ki.lt_spec(hi) implies !(#[trigger] self@.contains_key(*ki.get())) by { - // TODO: This was the previous (flaky) proof: - // K::cmp_properties(); - // - assert_by_contradiction!(!old(self)@.contains_key(*ki.get()), { + }, + None => { + let index = self.keys.insert(k.clone()); + self.vals.insert(index, v); + self.m = Ghost(self.m@.insert(k, v)); + }, + } + assert forall|lo, hi| + self.gap(lo, hi) <==> old(self).gap(lo, hi) && !(lo.lt_spec(KeyIterator::new_spec(k)) + && KeyIterator::new_spec(k).lt_spec(hi)) by { + self.mind_the_gap(); + old(self).mind_the_gap(); + if old(self).gap(lo, hi) && !(lo.lt_spec(KeyIterator::new_spec(k)) + && KeyIterator::new_spec(k).lt_spec(hi)) { + assert forall|ki| lo.lt_spec(ki) && ki.lt_spec(hi) implies !( + #[trigger] self@.contains_key(*ki.get())) by { + // TODO: This was the previous (flaky) proof: + // K::cmp_properties(); + // + assert_by_contradiction!(!old(self)@.contains_key(*ki.get()), { old(self).gap_means_empty(lo, hi, ki); }); - }; - assert(self.gap(lo, hi)); - } - - if self.gap(lo, hi) { - assert forall |ki| lo.lt_spec(ki) && ki.lt_spec(hi) implies !(#[trigger] old(self)@.contains_key(*ki.get())) by { - assert_by_contradiction!(!(old(self)@.contains_key(*ki.get())), { + }; + assert(self.gap(lo, hi)); + } + if self.gap(lo, hi) { + assert forall|ki| lo.lt_spec(ki) && ki.lt_spec(hi) implies !(#[trigger] old( + self, + )@.contains_key(*ki.get())) by { + assert_by_contradiction!(!(old(self)@.contains_key(*ki.get())), { assert(self@.contains_key(*ki.get())); K::cmp_properties(); }); - }; - assert(old(self).gap(lo, hi)); - assert_by_contradiction!(!(lo.lt_spec(KeyIterator::new_spec(k)) && KeyIterator::new_spec(k).lt_spec(hi)), { + }; + assert(old(self).gap(lo, hi)); + assert_by_contradiction!(!(lo.lt_spec(KeyIterator::new_spec(k)) && KeyIterator::new_spec(k).lt_spec(hi)), { assert(self@.contains_key(k)); self.gap_means_empty(lo, hi, KeyIterator::new_spec(k)); }); - } - }; - } + } + }; + } - spec fn greatest_lower_bound_spec(self, iter: KeyIterator, glb: KeyIterator) -> bool { - (glb == iter || glb.lt_spec(iter)) && - (forall|k| KeyIterator::new_spec(k) != glb && #[trigger] self@.contains_key(k) && iter.above(k) ==> glb.above(k)) && - (!iter.is_end_spec() ==> - glb.k.is_Some() && - self@.contains_key(glb.k.get_Some_0()) && - // There are no keys in the interval (glb, hi), and iter falls into that gap - (exists|hi| #[trigger] self.gap(glb, hi) && #[trigger] KeyIterator::between(glb, iter, hi))) - } + spec fn greatest_lower_bound_spec(self, iter: KeyIterator, glb: KeyIterator) -> bool { + (glb == iter || glb.lt_spec(iter)) && (forall|k| + KeyIterator::new_spec(k) != glb && #[trigger] self@.contains_key(k) && iter.above(k) + ==> glb.above(k)) && (!iter.is_end_spec() ==> glb.k.is_Some() && self@.contains_key( + glb.k.get_Some_0(), + ) + && + // There are no keys in the interval (glb, hi), and iter falls into that gap + (exists|hi| #[trigger] self.gap(glb, hi) && #[trigger] KeyIterator::between(glb, iter, hi))) + } - // Finds the index of the largest iterator <= iter - fn greatest_lower_bound_index(&self, iter: &KeyIterator) -> (index: usize) - requires - self.valid(), - self@.contains_key(K::zero_spec()), - ensures - 0 <= index < self.keys@.len(), - self.greatest_lower_bound_spec(*iter, KeyIterator::new_spec(self.keys@[index as int])), - { - let mut bound = 0; - let mut i = 1; - - // Prove the initial starting condition - assert forall |j:nat| j < i implies iter.geq_K(#[trigger]self.keys@.index(j as int)) by { - let z = K::zero_spec(); - assert(self.keys@.contains(z)); - let n = choose |n: int| 0 <= n < self.keys@.len() && self.keys@[n] == z; - K::zero_properties(); - assert_by_contradiction!(n == 0, { + // Finds the index of the largest iterator <= iter + fn greatest_lower_bound_index(&self, iter: &KeyIterator) -> (index: usize) + requires + self.valid(), + self@.contains_key(K::zero_spec()), + ensures + 0 <= index < self.keys@.len(), + self.greatest_lower_bound_spec(*iter, KeyIterator::new_spec(self.keys@[index as int])), + { + let mut bound = 0; + let mut i = 1; + // Prove the initial starting condition + assert forall|j: nat| j < i implies iter.geq_K(#[trigger] self.keys@.index(j as int)) by { + let z = K::zero_spec(); + assert(self.keys@.contains(z)); + let n = choose|n: int| 0 <= n < self.keys@.len() && self.keys@[n] == z; + K::zero_properties(); + assert_by_contradiction!(n == 0, { assert(self.keys@[0].cmp_spec(self.keys@[n]).lt()); K::cmp_properties(); }); - assert(self.keys@[0] == z); - K::cmp_properties(); - } - - // Find the glb's index (bound) - while i < self.keys.len() - invariant - 1 <= i <= self.keys@.len(), - bound == i - 1, - forall |j:nat| j < i ==> iter.geq_K(#[trigger]self.keys@.index(j as int)), - ensures - bound == i - 1, - (i == self.keys@.len() && - forall |j:nat| j < i ==> iter.geq_K(#[trigger]self.keys@.index(j as int))) - || (i < self.keys@.len() && - !iter.geq_K(self.keys@.index(i as int)) && - forall |j:nat| j < i ==> iter.geq_K(#[trigger]self.keys@.index(j as int))), - { - if iter.lt(&KeyIterator::new(self.keys.index(i))) { - // Reached a key that's too large - break; - } - bound = i; - i = i + 1; - } - - let glb = KeyIterator::new(self.keys.index(bound)); + assert(self.keys@[0] == z); + K::cmp_properties(); + } + // Find the glb's index (bound) - assert forall |k| - KeyIterator::new_spec(k) != glb - && #[trigger] self@.contains_key(k) - && iter.above(k) - implies glb.above(k) by { - K::cmp_properties(); + while i < self.keys.len() + invariant + 1 <= i <= self.keys@.len(), + bound == i - 1, + forall|j: nat| j < i ==> iter.geq_K(#[trigger] self.keys@.index(j as int)), + ensures + bound == i - 1, + (i == self.keys@.len() && forall|j: nat| + j < i ==> iter.geq_K(#[trigger] self.keys@.index(j as int))) || (i + < self.keys@.len() && !iter.geq_K(self.keys@.index(i as int)) && forall|j: nat| + j < i ==> iter.geq_K(#[trigger] self.keys@.index(j as int))), + { + if iter.lt(&KeyIterator::new(self.keys.index(i))) { + // Reached a key that's too large + break ; } - - proof { - if !iter.is_end_spec() { - if i == self.keys@.len() { - let hi = KeyIterator::end(); - // Prove self.gap(glb, hi) - assert forall |ki| glb.lt_spec(ki) && ki.lt_spec(hi) implies !(#[trigger] self@.contains_key(*ki.get())) by - { - K::cmp_properties(); - } - assert(self.gap(glb, hi)); - assert(KeyIterator::between(glb, *iter, hi)) by { - K::cmp_properties(); - } - } else { - let hi = KeyIterator::new_spec(self.keys@[i as int]); - // Prove self.gap(glb, hi) - assert forall |ki| glb.lt_spec(ki) && ki.lt_spec(hi) implies !(#[trigger] self@.contains_key(*ki.get())) by - { - K::cmp_properties(); - } - assert(self.gap(glb, hi)); - assert(KeyIterator::between(glb, *iter, hi)) by { - assert(iter.lt_spec(hi)); - K::cmp_properties(); - } + bound = i; + i = i + 1; + } + let glb = KeyIterator::new(self.keys.index(bound)); + assert forall|k| + KeyIterator::new_spec(k) != glb && #[trigger] self@.contains_key(k) && iter.above( + k, + ) implies glb.above(k) by { + K::cmp_properties(); + } + proof { + if !iter.is_end_spec() { + if i == self.keys@.len() { + let hi = KeyIterator::end(); + // Prove self.gap(glb, hi) + assert forall|ki| glb.lt_spec(ki) && ki.lt_spec(hi) implies !( + #[trigger] self@.contains_key(*ki.get())) by { + K::cmp_properties(); + } + assert(self.gap(glb, hi)); + assert(KeyIterator::between(glb, *iter, hi)) by { + K::cmp_properties(); + } + } else { + let hi = KeyIterator::new_spec(self.keys@[i as int]); + // Prove self.gap(glb, hi) + assert forall|ki| glb.lt_spec(ki) && ki.lt_spec(hi) implies !( + #[trigger] self@.contains_key(*ki.get())) by { + K::cmp_properties(); + } + assert(self.gap(glb, hi)); + assert(KeyIterator::between(glb, *iter, hi)) by { + assert(iter.lt_spec(hi)); + K::cmp_properties(); } } } - - assert (glb == iter || glb.lt_spec(*iter)) by { - K::cmp_properties(); - } - return bound; } - - // Finds the largest iterator <= iter - fn greatest_lower_bound(&self, iter: &KeyIterator) -> (glb: KeyIterator) - requires - self.valid(), - self@.contains_key(K::zero_spec()), - ensures - self.greatest_lower_bound_spec(*iter, glb), - { - let index = self.greatest_lower_bound_index(iter); - let glb = KeyIterator::new(self.keys.index(index)); - glb + assert(glb == iter || glb.lt_spec(*iter)) by { + K::cmp_properties(); } + return bound; + } - // Remove all keys in the range [lo, hi) - fn erase(&mut self, lo: &KeyIterator, hi: &KeyIterator) - requires - old(self).valid(), - ensures - self.valid(), - forall |k| { + // Finds the largest iterator <= iter + fn greatest_lower_bound(&self, iter: &KeyIterator) -> (glb: KeyIterator) + requires + self.valid(), + self@.contains_key(K::zero_spec()), + ensures + self.greatest_lower_bound_spec(*iter, glb), + { + let index = self.greatest_lower_bound_index(iter); + let glb = KeyIterator::new(self.keys.index(index)); + glb + } + + // Remove all keys in the range [lo, hi) + fn erase(&mut self, lo: &KeyIterator, hi: &KeyIterator) + requires + old(self).valid(), + ensures + self.valid(), + forall|k| + { let ki = KeyIterator::new_spec(k); (if ki.geq_spec(*lo) && ki.lt_spec(*hi) { !(#[trigger] self@.contains_key(k)) } else { - (old(self)@.contains_key(k) ==> - self@.contains_key(k) && self@[k] == old(self)@[k]) - && (self@.contains_key(k) ==> old(self)@.contains_key(k)) - })}, - forall |x, y| self.gap(x, y) <==> ({ - ||| old(self).gap(x, y) - ||| (old(self).gap(x, *lo) && - old(self).gap(*hi, y) && - (hi.geq_spec(y) || hi.is_end_spec() || !self@.contains_key(*hi.get()))) - }), + (old(self)@.contains_key(k) ==> self@.contains_key(k) && self@[k] == old( + self, + )@[k]) && (self@.contains_key(k) ==> old(self)@.contains_key(k)) + }) + }, + forall|x, y| + self.gap(x, y) <==> ({ + ||| old(self).gap(x, y) + ||| (old(self).gap(x, *lo) && old(self).gap(*hi, y) && (hi.geq_spec(y) + || hi.is_end_spec() || !self@.contains_key(*hi.get()))) + }), + { + // Find the point where keys are >= lo + let mut start = 0; + while start < self.keys.len() && lo.above(self.keys.index(start)) + invariant + self.valid(), + 0 <= start <= self.keys@.len(), + forall|j| #![auto] 0 <= j < start ==> lo.above(self.keys@.index(j)), { - // Find the point where keys are >= lo - let mut start = 0; - while start < self.keys.len() && lo.above(self.keys.index(start)) - invariant - self.valid(), - 0 <= start <= self.keys@.len(), - forall |j| #![auto] 0 <= j < start ==> lo.above(self.keys@.index(j)) - { - start = start + 1; - } - - // Find the first point where keys are >= hi - let mut end = start; - while end < self.keys.len() && hi.above(self.keys.index(end)) - invariant - self.valid(), - start <= end <= self.keys@.len(), - forall |j| #![auto] start <= j < end ==> hi.above(self.keys@[j]) - { - end = end + 1; - } - - //assert(forall |i| #![auto] 0 <= i < start ==> lo.above(self.keys@.index(i))); - assert forall |i| start <= i < end implies !lo.above(#[trigger] self.keys@[i]) && hi.above(self.keys@[i]) by { - K::cmp_properties(); - } - - self.keys.erase(start, end); - vec_erase(&mut self.vals, start, end); - self.m = Ghost(Map::new(|k| self.keys@.to_set().contains(k), - |k| { let i = choose |i| 0 <= i < self.keys@.len() && self.keys@[i] == k; - self.vals@[i]})); - proof { - let ks = self.keys.to_set(); - assert(self.keys@.to_set() == ks); - assert_sets_equal!(self.m@.dom(), ks); - } + start = start + 1; + } + // Find the first point where keys are >= hi - assert forall |k| { - let ki = KeyIterator::new_spec(k); - (if ki.geq_spec(*lo) && ki.lt_spec(*hi) { - !(#[trigger] self@.contains_key(k)) - } else { - old(self)@.contains_key(k) ==> - self@.contains_key(k) && self@[k] == old(self)@[k] - })} by { + let mut end = start; + while end < self.keys.len() && hi.above(self.keys.index(end)) + invariant + self.valid(), + start <= end <= self.keys@.len(), + forall|j| #![auto] start <= j < end ==> hi.above(self.keys@[j]), + { + end = end + 1; + } + //assert(forall |i| #![auto] 0 <= i < start ==> lo.above(self.keys@.index(i))); + assert forall|i| start <= i < end implies !lo.above(#[trigger] self.keys@[i]) && hi.above( + self.keys@[i], + ) by { + K::cmp_properties(); + } + self.keys.erase(start, end); + vec_erase(&mut self.vals, start, end); + self.m = Ghost( + Map::new( + |k| self.keys@.to_set().contains(k), + |k| + { + let i = choose|i| 0 <= i < self.keys@.len() && self.keys@[i] == k; + self.vals@[i] + }, + ), + ); + proof { + let ks = self.keys.to_set(); + assert(self.keys@.to_set() == ks); + assert_sets_equal!(self.m@.dom(), ks); + } + assert forall|k| + { let ki = KeyIterator::new_spec(k); - if ki.geq_spec(*lo) && ki.lt_spec(*hi) { - assert_by_contradiction!(!self@.contains_key(k), { + (if ki.geq_spec(*lo) && ki.lt_spec(*hi) { + !(#[trigger] self@.contains_key(k)) + } else { + old(self)@.contains_key(k) ==> self@.contains_key(k) && self@[k] == old( + self, + )@[k] + }) + } by { + let ki = KeyIterator::new_spec(k); + if ki.geq_spec(*lo) && ki.lt_spec(*hi) { + assert_by_contradiction!(!self@.contains_key(k), { K::cmp_properties(); }); - } } - assert forall |x, y| self.gap(x, y) implies ({ - ||| old(self).gap(x, y) - ||| (old(self).gap(x, *lo) && - old(self).gap(*hi, y) && - (hi.geq_spec(y) || hi.is_end_spec() || !self@.contains_key(*hi.get()))) - }) by { - assert_by_contradiction!( + } + assert forall|x, y| self.gap(x, y) implies ({ + ||| old(self).gap(x, y) + ||| (old(self).gap(x, *lo) && old(self).gap(*hi, y) && (hi.geq_spec(y) + || hi.is_end_spec() || !self@.contains_key(*hi.get()))) + }) by { + assert_by_contradiction!( old(self).gap(x, y) || (old(self).gap(x, *lo) && old(self).gap(*hi, y) && @@ -1565,188 +1692,226 @@ mod delegation_map_v { } assert(self@.contains_key(*ki.get())); }); - } - assert forall |x, y| ({ - ||| old(self).gap(x, y) - ||| (old(self).gap(x, *lo) && - old(self).gap(*hi, y) && - (hi.geq_spec(y) || hi.is_end_spec() || !self@.contains_key(*hi.get()))) - }) implies #[trigger] self.gap(x, y) by { - if old(self).gap(x, y) { - assert_by_contradiction!(self.gap(x, y), { + } + assert forall|x, y| + ({ + ||| old(self).gap(x, y) + ||| (old(self).gap(x, *lo) && old(self).gap(*hi, y) && (hi.geq_spec(y) + || hi.is_end_spec() || !self@.contains_key(*hi.get()))) + }) implies #[trigger] self.gap(x, y) by { + if old(self).gap(x, y) { + assert_by_contradiction!(self.gap(x, y), { //let ki = self.choose_gap_violator(x, y); // Flaky proof -- sometimes needs this line }); - } - - if old(self).gap(x, *lo) && old(self).gap(*hi, y) && - (hi.geq_spec(y) || hi.is_end_spec() || !self@.contains_key(*hi.get())) { - assert forall |ki| x.lt_spec(ki) && ki.lt_spec(y) implies !(#[trigger] self@.contains_key(*ki.get())) by { - assert(KeyIterator::between(x, ki, y)) by { K::cmp_properties(); }; - K::cmp_properties(); // Flaky - if ki.lt_spec(*lo) { - // flaky without assert_by_contradiction (and maybe still flaky) - assert_by_contradiction!(!(self@.contains_key(*ki.get())), { + } + if old(self).gap(x, *lo) && old(self).gap(*hi, y) && (hi.geq_spec(y) || hi.is_end_spec() + || !self@.contains_key(*hi.get())) { + assert forall|ki| x.lt_spec(ki) && ki.lt_spec(y) implies !( + #[trigger] self@.contains_key(*ki.get())) by { + assert(KeyIterator::between(x, ki, y)) by { + K::cmp_properties(); + }; + K::cmp_properties(); // Flaky + if ki.lt_spec(*lo) { + // flaky without assert_by_contradiction (and maybe still flaky) + assert_by_contradiction!(!(self@.contains_key(*ki.get())), { assert(old(self)@.contains_key(*ki.get())); }); - } else if hi.lt_spec(ki) { - assert_by_contradiction!(!(self@.contains_key(*ki.get())), { + } else if hi.lt_spec(ki) { + assert_by_contradiction!(!(self@.contains_key(*ki.get())), { assert(old(self)@.contains_key(*ki.get())); }); - } else if ki == lo { - assert(!(self@.contains_key(*ki.get()))); - } else if ki == hi { - assert(!(self@.contains_key(*ki.get()))); - } else { - assert(KeyIterator::between(*lo, ki, *hi)); - } - //old(self).mind_the_gap(); - }; - } + } else if ki == lo { + assert(!(self@.contains_key(*ki.get()))); + } else if ki == hi { + assert(!(self@.contains_key(*ki.get()))); + } else { + assert(KeyIterator::between(*lo, ki, *hi)); + } + //old(self).mind_the_gap(); + + }; } } } +} - type ID = EndPoint; // this code was trying to be too generic, but we need to know how to clone IDs. So we specialize. - - pub struct DelegationMap<#[verifier(maybe_negative)] K: KeyTrait + VerusClone> { - // Our efficient implementation based on ranges - lows: StrictlyOrderedMap, - // Our spec version - m: Ghost>, +type ID = EndPoint; + +// this code was trying to be too generic, but we need to know how to clone IDs. So we specialize. +pub struct DelegationMap< + #[verifier(maybe_negative)] + K: KeyTrait + VerusClone, +> { + // Our efficient implementation based on ranges + lows: StrictlyOrderedMap, + // Our spec version + m: Ghost>, +} +impl DelegationMap { + pub closed spec fn view(self) -> Map { + self.m@ } - impl DelegationMap { - pub closed spec fn view(self) -> Map { - self.m@ - } - - pub closed spec fn valid(self) -> bool { - &&& self.lows.valid() - &&& self.lows@.contains_key(K::zero_spec()) - &&& self@.dom().is_full() - &&& (forall|k| #[trigger] self@[k].valid_physical_address()) - &&& (forall|k, i, j| - self.lows@.contains_key(i) - && self.lows.gap(KeyIterator::new_spec(i), j) - && #[trigger] KeyIterator::between(KeyIterator::new_spec(i), KeyIterator::new_spec(k), j) - ==> self@[k] == self.lows@[i]@) - } - - pub fn new(k_zero: K, id_zero: ID) -> (s: Self) - requires - k_zero == K::zero_spec(), - id_zero@.valid_physical_address(), - ensures - s.valid(), - s@ == Map::total(|k: K| id_zero@), - { - let mut lows = StrictlyOrderedMap::new(); - lows.set(k_zero, id_zero); - let m = Ghost(Map::total(|k| id_zero@)); - let s = DelegationMap { lows, m }; - s - } + pub closed spec fn valid(self) -> bool { + &&& self.lows.valid() + &&& self.lows@.contains_key(K::zero_spec()) + &&& self@.dom().is_full() + &&& (forall|k| #[trigger] self@[k].valid_physical_address()) + &&& (forall|k, i, j| + self.lows@.contains_key(i) && self.lows.gap(KeyIterator::new_spec(i), j) + && #[trigger] KeyIterator::between( + KeyIterator::new_spec(i), + KeyIterator::new_spec(k), + j, + ) ==> self@[k] == self.lows@[i]@) + } - pub proof fn valid_implies_complete(&self) - requires self.valid() - ensures self@.dom().is_full() - { - } + pub fn new(k_zero: K, id_zero: ID) -> (s: Self) + requires + k_zero == K::zero_spec(), + id_zero@.valid_physical_address(), + ensures + s.valid(), + s@ == Map::total(|k: K| id_zero@), + { + let mut lows = StrictlyOrderedMap::new(); + lows.set(k_zero, id_zero); + let m = Ghost(Map::total(|k| id_zero@)); + let s = DelegationMap { lows, m }; + s + } + pub proof fn valid_implies_complete(&self) + requires + self.valid(), + ensures + self@.dom().is_full(), + { + } - // Returns the greatest_lower_bound as evidence for the proof of correctness for set - fn get_internal(&self, k: &K) -> (res: (ID, Ghost>)) - requires - self.valid(), - ensures ({ + // Returns the greatest_lower_bound as evidence for the proof of correctness for set + fn get_internal(&self, k: &K) -> (res: (ID, Ghost>)) + requires + self.valid(), + ensures + ({ let (id, glb) = res; &&& id@ == self@[*k] &&& self.lows.greatest_lower_bound_spec(KeyIterator::new_spec(*k), glb@) &&& id@.valid_physical_address() }), - { - let ki = KeyIterator::new(k.clone()); - let glb = self.lows.greatest_lower_bound(&ki); - proof { - let glb_k = *glb.get(); - assert(self.lows@.contains_key(glb_k)); // OBSERVE - let hi = choose |hi| self.lows.gap(glb, hi) && #[trigger] KeyIterator::between(glb, ki, hi); // OBSERVE - assert(KeyIterator::between(KeyIterator::new_spec(glb_k), ki, hi)); - // OBSERVE The following is required; unclear why the line above isn't sufficient - assert(self.lows@.contains_key(glb_k) - && self.lows.gap(KeyIterator::new_spec(glb_k), hi) - && KeyIterator::between(KeyIterator::new_spec(glb_k), KeyIterator::new_spec(*k), hi)); - } - let id = (*self.lows.get(glb.get()).unwrap()).clone_up_to_view(); - (id, Ghost(glb)) + { + let ki = KeyIterator::new(k.clone()); + let glb = self.lows.greatest_lower_bound(&ki); + proof { + let glb_k = *glb.get(); + assert(self.lows@.contains_key(glb_k)); // OBSERVE + let hi = choose|hi| + self.lows.gap(glb, hi) && #[trigger] KeyIterator::between(glb, ki, hi); // OBSERVE + assert(KeyIterator::between(KeyIterator::new_spec(glb_k), ki, hi)); + // OBSERVE The following is required; unclear why the line above isn't sufficient + assert(self.lows@.contains_key(glb_k) && self.lows.gap(KeyIterator::new_spec(glb_k), hi) + && KeyIterator::between( + KeyIterator::new_spec(glb_k), + KeyIterator::new_spec(*k), + hi, + )); } + let id = (*self.lows.get(glb.get()).unwrap()).clone_up_to_view(); + (id, Ghost(glb)) + } - pub fn get(&self, k: &K) -> (id: ID) - requires - self.valid(), - ensures - id@ == self@[*k], - id@.valid_physical_address(), - { - let (id, glb_ret) = self.get_internal(k); - id - } + pub fn get(&self, k: &K) -> (id: ID) + requires + self.valid(), + ensures + id@ == self@[*k], + id@.valid_physical_address(), + { + let (id, glb_ret) = self.get_internal(k); + id + } - // Maps keys from [lo, hi) to dst - pub fn set(&mut self, lo: &KeyIterator, hi: &KeyIterator, dst: &ID) - requires - old(self).valid(), - dst@.valid_physical_address(), - ensures - self.valid(), - forall |ki:KeyIterator| #[trigger] KeyIterator::between(*lo, ki, *hi) ==> self@[*ki.get()] == dst@, - forall |ki:KeyIterator| !ki.is_end_spec() && !(#[trigger] KeyIterator::between(*lo, ki, *hi)) ==> self@[*ki.get()] == old(self)@[*ki.get()], - { - if lo.lt(&hi) { - let ghost mut glb; - if !hi.is_end() { - // Get the current value of hi - let (id, glb_ret) = self.get_internal(hi.get()); - proof { glb = glb_ret@; } - // Set it explicitly - self.lows.set(hi.get().clone(), id); - } - let ghost mut pre_erase; proof { pre_erase = self.lows@; } - let ghost mut pre_erase_vec; proof { pre_erase_vec = self.lows; } - self.lows.erase(&lo, &hi); - let ghost mut erased; proof { erased = self.lows@; } - let ghost mut erased_vec; proof { erased_vec = self.lows; } - self.lows.set(lo.get().clone(), clone_end_point(dst)); - self.m = Ghost(self.m@.union_prefer_right( - Map::new(|k| KeyIterator::between(*lo, KeyIterator::new_spec(k), *hi), - |k| dst@))); - assert(self@.dom().is_full()) by { - assert_sets_equal!(self@.dom(), Set::full()); + // Maps keys from [lo, hi) to dst + pub fn set(&mut self, lo: &KeyIterator, hi: &KeyIterator, dst: &ID) + requires + old(self).valid(), + dst@.valid_physical_address(), + ensures + self.valid(), + forall|ki: KeyIterator| #[trigger] + KeyIterator::between(*lo, ki, *hi) ==> self@[*ki.get()] == dst@, + forall|ki: KeyIterator| + !ki.is_end_spec() && !(#[trigger] KeyIterator::between(*lo, ki, *hi)) + ==> self@[*ki.get()] == old(self)@[*ki.get()], + { + if lo.lt(&hi) { + let ghost mut glb; + if !hi.is_end() { + // Get the current value of hi + let (id, glb_ret) = self.get_internal(hi.get()); + proof { + glb = glb_ret@; } - assert (self.lows@.contains_key(K::zero_spec())) by { - let ki = KeyIterator::new_spec(K::zero_spec()); - assert_by_contradiction!(!lo.lt_spec(ki), { + // Set it explicitly + self.lows.set(hi.get().clone(), id); + } + let ghost mut pre_erase; + proof { + pre_erase = self.lows@; + } + let ghost mut pre_erase_vec; + proof { + pre_erase_vec = self.lows; + } + self.lows.erase(&lo, &hi); + let ghost mut erased; + proof { + erased = self.lows@; + } + let ghost mut erased_vec; + proof { + erased_vec = self.lows; + } + self.lows.set(lo.get().clone(), clone_end_point(dst)); + self.m = Ghost( + self.m@.union_prefer_right( + Map::new( + |k| KeyIterator::between(*lo, KeyIterator::new_spec(k), *hi), + |k| dst@, + ), + ), + ); + assert(self@.dom().is_full()) by { + assert_sets_equal!(self@.dom(), Set::full()); + } + assert(self.lows@.contains_key(K::zero_spec())) by { + let ki = KeyIterator::new_spec(K::zero_spec()); + assert_by_contradiction!(!lo.lt_spec(ki), { K::zero_properties(); K::cmp_properties(); }); - if lo == ki { - } else { - assert(ki.lt_spec(*lo)) by { - K::zero_properties(); - } + if lo == ki { + } else { + assert(ki.lt_spec(*lo)) by { + K::zero_properties(); } - }; - assert forall |k, i, j| - self.lows@.contains_key(i) - && self.lows.gap(KeyIterator::new_spec(i), j) - && #[trigger] KeyIterator::between(KeyIterator::new_spec(i), KeyIterator::new_spec(k), j) - implies self@[k] == self.lows@[i]@ by { - let ii = KeyIterator::new_spec(i); - let ki = KeyIterator::new_spec(k); - if KeyIterator::between(*lo, ki, *hi) { - assert(self@[k] == dst@); - assert_by_contradiction!(ii == lo, { + } + }; + assert forall|k, i, j| + self.lows@.contains_key(i) && self.lows.gap(KeyIterator::new_spec(i), j) + && #[trigger] KeyIterator::between( + KeyIterator::new_spec(i), + KeyIterator::new_spec(k), + j, + ) implies self@[k] == self.lows@[i]@ by { + let ii = KeyIterator::new_spec(i); + let ki = KeyIterator::new_spec(k); + if KeyIterator::between(*lo, ki, *hi) { + assert(self@[k] == dst@); + assert_by_contradiction!(ii == lo, { if lo.lt_spec(ii) { K::cmp_properties(); } else { @@ -1759,50 +1924,52 @@ mod delegation_map_v { assert(!self.lows@.contains_key(*lo.get())); // OBSERVE } }); - assert(self.lows@[i]@ == dst@); - } else if ki.lt_spec(*lo) { - assert(self@[k] == old(self)@[k]); - assert(!(ki.geq_spec(*lo) && ki.lt_spec(*hi))); - assert(erased.contains_key(i)); - assert(ii != hi) by { K::cmp_properties(); }; - assert(old(self).lows@.contains_key(i)); - assert(self.lows@[i] == old(self).lows@[i]); - assert(old(self).lows.gap(ii, j)) by { - assert_by_contradiction!(!lo.lt_spec(j), { + assert(self.lows@[i]@ == dst@); + } else if ki.lt_spec(*lo) { + assert(self@[k] == old(self)@[k]); + assert(!(ki.geq_spec(*lo) && ki.lt_spec(*hi))); + assert(erased.contains_key(i)); + assert(ii != hi) by { + K::cmp_properties(); + }; + assert(old(self).lows@.contains_key(i)); + assert(self.lows@[i] == old(self).lows@[i]); + assert(old(self).lows.gap(ii, j)) by { + assert_by_contradiction!(!lo.lt_spec(j), { K::cmp_properties(); assert(!self.lows@.contains_key(*lo.get())); // OBSERVE }); - // TODO: add a trigger annotation once https://github.com/verus-lang/verus/issues/335 is fixed - assert forall |m| KeyIterator::new_spec(m).lt_spec(*lo) implies - (old(self).lows@.contains_key(m) == - #[trigger] self.lows@.contains_key(m)) by { - K::cmp_properties(); - }; - // TODO: add a trigger annotation once https://github.com/verus-lang/verus/issues/335 is fixed - assert forall |mi| ii.lt_spec(mi) && mi.lt_spec(j) - implies !(#[trigger] old(self).lows@.contains_key(*mi.get())) by { - K::cmp_properties(); - } + // TODO: add a trigger annotation once https://github.com/verus-lang/verus/issues/335 is fixed + assert forall|m| KeyIterator::new_spec(m).lt_spec(*lo) implies (old( + self, + ).lows@.contains_key(m) == #[trigger] self.lows@.contains_key(m)) by { + K::cmp_properties(); }; - assert(old(self)@[k] == old(self).lows@[i]@); - } else { - // We have: - // self.lows@.contains i - // nothing in (i, j) - // i < k < j - // lo < hi <= k < j - assert(ki.geq_spec(*hi)); - assert(self@[k] == old(self)@[k]); - assert(!hi.is_end()); - - assert((ii != hi && old(self)@[k] == old(self).lows@[i]@) || self@[k] == self.lows@[i]@) by { - assert((ii != hi && old(self).lows@.contains_key(i)) || ii == hi) by { - assert_by_contradiction!(!ii.lt_spec(*lo), { + // TODO: add a trigger annotation once https://github.com/verus-lang/verus/issues/335 is fixed + assert forall|mi| ii.lt_spec(mi) && mi.lt_spec(j) implies !(#[trigger] old( + self, + ).lows@.contains_key(*mi.get())) by { + K::cmp_properties(); + } + }; + assert(old(self)@[k] == old(self).lows@[i]@); + } else { + // We have: + // self.lows@.contains i + // nothing in (i, j) + // i < k < j + // lo < hi <= k < j + assert(ki.geq_spec(*hi)); + assert(self@[k] == old(self)@[k]); + assert(!hi.is_end()); + assert((ii != hi && old(self)@[k] == old(self).lows@[i]@) || self@[k] + == self.lows@[i]@) by { + assert((ii != hi && old(self).lows@.contains_key(i)) || ii == hi) by { + assert_by_contradiction!(!ii.lt_spec(*lo), { // Flaky proof here K::cmp_properties(); }); - - assert_by_contradiction!(ii != lo, { + assert_by_contradiction!(ii != lo, { // We need the following to prove hi is in self.lows@ assert(!hi.lt_spec(*hi)) by { K::cmp_properties(); }; assert(pre_erase.contains_key(*hi.get())); @@ -1815,518 +1982,707 @@ mod delegation_map_v { // which violates lows.gap(i, j) //assert(false); }); - - assert(lo.lt_spec(ii)) by { K::cmp_properties(); }; - // lo < i ==> - // lo < i <= k < j - // lo < hi <= k < j - assert_by_contradiction!(!ii.lt_spec(*hi), { + assert(lo.lt_spec(ii)) by { + K::cmp_properties(); + }; + // lo < i ==> + // lo < i <= k < j + // lo < hi <= k < j + assert_by_contradiction!(!ii.lt_spec(*hi), { // If this were true, we would have i < hi < j, // which violates gap(i, j) assert(hi.lt_spec(j)) by { K::cmp_properties(); }; //assert(false); }); - // Therefore hi <= i - if ii == hi { - } else { - // hi < i ==> keys from i to j in lows didn't change - assert(erased.contains_key(i)); - assert(pre_erase.contains_key(i)); - assert(old(self).lows@.contains_key(i)); - // assert forall |m| ii.lt_spec(m) && m.lt_spec(j) - // implies !(#[trigger] old(self)@.contains_key(*m.get())) by { - // K::cmp_properties(); - //// assert_by_contradiction!(!old(self)@.contains_key(*m.get()), { - //// K::cmp_properties(); - //// assert(self@.contains_key(*m.get())); - //// assert(KeyIterator::between(ii, m, j)); - //// self.lows.gap_means_empty(ii, j, m); - //// }); - // }; - K::cmp_properties(); // Flaky - assert(old(self).lows.gap(KeyIterator::new_spec(i), j)); - } - }; - - //assert(erased.gap(i, j)); - + // Therefore hi <= i if ii == hi { - // lo < (hi == i) < k < j - assert(pre_erase[*hi.get()]@ == old(self)@[*hi.get()]); - assert(erased[*hi.get()] == pre_erase[*hi.get()]) by { K::cmp_properties(); }; - assert(self@[*hi.get()] == erased[*hi.get()]@); - // Above establishes self@[*hi.get()] == old(self)@[*hi.get()] - assert(erased_vec.gap(ii, j)); - assert(pre_erase_vec.gap(ii, j)); - assert(old(self).lows.gap(ii, j)); - if old(self).lows@.contains_key(i) { - assert(old(self)@[k] == old(self).lows@[i]@); - } else { - // old(self) did not contain hi; instead we added it inside the `if !hi.is_end()` clause - // But we know that glb was the closest bound to hi and glb is in old(self).lows@ - assert(old(self).lows@.contains_key(*glb.get())); - assert(old(self).lows@[*glb.get()]@ == pre_erase[*hi.get()]@); - assert_by_contradiction!(!ii.lt_spec(glb), { + } else { + // hi < i ==> keys from i to j in lows didn't change + assert(erased.contains_key(i)); + assert(pre_erase.contains_key(i)); + assert(old(self).lows@.contains_key(i)); + // assert forall |m| ii.lt_spec(m) && m.lt_spec(j) + // implies !(#[trigger] old(self)@.contains_key(*m.get())) by { + // K::cmp_properties(); + //// assert_by_contradiction!(!old(self)@.contains_key(*m.get()), { + //// K::cmp_properties(); + //// assert(self@.contains_key(*m.get())); + //// assert(KeyIterator::between(ii, m, j)); + //// self.lows.gap_means_empty(ii, j, m); + //// }); + // }; + K::cmp_properties(); // Flaky + assert(old(self).lows.gap(KeyIterator::new_spec(i), j)); + } + }; + //assert(erased.gap(i, j)); + if ii == hi { + // lo < (hi == i) < k < j + assert(pre_erase[*hi.get()]@ == old(self)@[*hi.get()]); + assert(erased[*hi.get()] == pre_erase[*hi.get()]) by { + K::cmp_properties(); + }; + assert(self@[*hi.get()] == erased[*hi.get()]@); + // Above establishes self@[*hi.get()] == old(self)@[*hi.get()] + assert(erased_vec.gap(ii, j)); + assert(pre_erase_vec.gap(ii, j)); + assert(old(self).lows.gap(ii, j)); + if old(self).lows@.contains_key(i) { + assert(old(self)@[k] == old(self).lows@[i]@); + } else { + // old(self) did not contain hi; instead we added it inside the `if !hi.is_end()` clause + // But we know that glb was the closest bound to hi and glb is in old(self).lows@ + assert(old(self).lows@.contains_key(*glb.get())); + assert(old(self).lows@[*glb.get()]@ == pre_erase[*hi.get()]@); + assert_by_contradiction!(!ii.lt_spec(glb), { K::cmp_properties(); }); - assert(ii.geq_spec(glb)); - // Establish the preconditions to use @old(self).valid() to relate - // old(self)@[k] to old(self).lows@[glb] - let hi_hi = choose |h| #[trigger] old(self).lows.gap(glb, h) && KeyIterator::between(glb, *hi, h); - assert(old(self).lows.gap(glb, j)) by { old(self).lows.mind_the_gap(); } - assert(KeyIterator::between(glb, ki, j)) by { K::cmp_properties(); }; - assert(old(self)@[k] == old(self).lows@[*glb.get()]@); - - // Directly prove that self@[k] == self.lows@[i] - assert(old(self).lows@[*glb.get()]@ == pre_erase[*hi.get()]@); - assert(old(self).lows@[*glb.get()]@ == self@[*hi.get()]); - assert(old(self)@[k] == self@[*hi.get()]); - assert(self@[k] == self@[*hi.get()]); - assert(*lo.get() != i) by { K::cmp_properties(); }; - assert(self.lows@[i] == erased[i]); - assert(self@[*hi.get()] == self.lows@[i]@); - assert(self@[k] == self.lows@[i]@); + assert(ii.geq_spec(glb)); + // Establish the preconditions to use @old(self).valid() to relate + // old(self)@[k] to old(self).lows@[glb] + let hi_hi = choose|h| #[trigger] + old(self).lows.gap(glb, h) && KeyIterator::between(glb, *hi, h); + assert(old(self).lows.gap(glb, j)) by { + old(self).lows.mind_the_gap(); } - } else { - assert(old(self).lows@.contains_key(i)); - assert(erased_vec.gap(KeyIterator::new_spec(i), j)); - // Prove that we can't be in the second clause of erase's gap - // postcondition - assert_by_contradiction!(!(hi.geq_spec(j) || + assert(KeyIterator::between(glb, ki, j)) by { + K::cmp_properties(); + }; + assert(old(self)@[k] == old(self).lows@[*glb.get()]@); + // Directly prove that self@[k] == self.lows@[i] + assert(old(self).lows@[*glb.get()]@ == pre_erase[*hi.get()]@); + assert(old(self).lows@[*glb.get()]@ == self@[*hi.get()]); + assert(old(self)@[k] == self@[*hi.get()]); + assert(self@[k] == self@[*hi.get()]); + assert(*lo.get() != i) by { + K::cmp_properties(); + }; + assert(self.lows@[i] == erased[i]); + assert(self@[*hi.get()] == self.lows@[i]@); + assert(self@[k] == self.lows@[i]@); + } + } else { + assert(old(self).lows@.contains_key(i)); + assert(erased_vec.gap(KeyIterator::new_spec(i), j)); + // Prove that we can't be in the second clause of erase's gap + // postcondition + assert_by_contradiction!(!(hi.geq_spec(j) || hi.is_end_spec() || !erased_vec@.contains_key(*hi.get())), { K::cmp_properties(); }); - // Therefore we must be in the first clause, and hence: - assert(pre_erase_vec.gap(KeyIterator::new_spec(i), j)); - assert(old(self).lows.gap(KeyIterator::new_spec(i), j)); - } - }; - - if ii != hi { - assert(erased.contains_key(i)) by { K::cmp_properties(); }; - assert(self.lows@[i] == erased[i]) by { K::cmp_properties(); }; - assert(pre_erase.contains_key(i)) by { K::cmp_properties(); }; - assert(erased[i] == pre_erase[i]); - assert(old(self).lows@.contains_key(i)); - assert(old(self).lows@[i] == pre_erase[i]); - assert(old(self).lows@[i] == pre_erase[i]); - assert(self.lows@[i] == old(self).lows@[i]); + // Therefore we must be in the first clause, and hence: + assert(pre_erase_vec.gap(KeyIterator::new_spec(i), j)); + assert(old(self).lows.gap(KeyIterator::new_spec(i), j)); } + }; + if ii != hi { + assert(erased.contains_key(i)) by { + K::cmp_properties(); + }; + assert(self.lows@[i] == erased[i]) by { + K::cmp_properties(); + }; + assert(pre_erase.contains_key(i)) by { + K::cmp_properties(); + }; + assert(erased[i] == pre_erase[i]); + assert(old(self).lows@.contains_key(i)); + assert(old(self).lows@[i] == pre_erase[i]); + assert(old(self).lows@[i] == pre_erase[i]); + assert(self.lows@[i] == old(self).lows@[i]); } } } - assert forall |ki:KeyIterator| #[trigger] KeyIterator::between(*lo, ki, *hi) implies self@[*ki.get()] == dst@ by { - K::cmp_properties(); - }; - // TODO: add a trigger annotation once https://github.com/verus-lang/verus/issues/335 is fixed - assert forall |ki:KeyIterator| !ki.is_end_spec() && !(#[trigger] KeyIterator::between(*lo, ki, *hi)) - implies self@[*ki.get()] == old(self)@[*ki.get()] by { - K::cmp_properties(); - }; - } - - pub open spec fn range_consistent(self, lo: &KeyIterator, hi: &KeyIterator, dst: &ID) -> bool { - forall |k| KeyIterator::between(*lo, KeyIterator::new_spec(k), *hi) ==> (#[trigger] self@[k]) == dst@ } + assert forall|ki: KeyIterator| #[trigger] + KeyIterator::between(*lo, ki, *hi) implies self@[*ki.get()] == dst@ by { + K::cmp_properties(); + }; + // TODO: add a trigger annotation once https://github.com/verus-lang/verus/issues/335 is fixed + assert forall|ki: KeyIterator| + !ki.is_end_spec() && !(#[trigger] KeyIterator::between( + *lo, + ki, + *hi, + )) implies self@[*ki.get()] == old(self)@[*ki.get()] by { + K::cmp_properties(); + }; + } - proof fn not_range_consistent(self, lo: &KeyIterator, hi: &KeyIterator, dst: &ID, bad: &KeyIterator) - requires - KeyIterator::between(*lo, *bad, *hi), - self@.contains_key(*bad.get()), - self@[*bad.get()] != dst@, - ensures - !self.range_consistent(lo, hi, dst), - { - } + pub open spec fn range_consistent( + self, + lo: &KeyIterator, + hi: &KeyIterator, + dst: &ID, + ) -> bool { + forall|k| + KeyIterator::between(*lo, KeyIterator::new_spec(k), *hi) ==> (#[trigger] self@[k]) + == dst@ + } + + proof fn not_range_consistent( + self, + lo: &KeyIterator, + hi: &KeyIterator, + dst: &ID, + bad: &KeyIterator, + ) + requires + KeyIterator::between(*lo, *bad, *hi), + self@.contains_key(*bad.get()), + self@[*bad.get()] != dst@, + ensures + !self.range_consistent(lo, hi, dst), + { + } - proof fn extend_range_consistent(self, x: &KeyIterator, y: &KeyIterator, z: &KeyIterator, dst: &ID) - requires - self.range_consistent(x, y, dst), - self.range_consistent(y, z, dst), - ensures - self.range_consistent(x, z, dst), - { - } + proof fn extend_range_consistent( + self, + x: &KeyIterator, + y: &KeyIterator, + z: &KeyIterator, + dst: &ID, + ) + requires + self.range_consistent(x, y, dst), + self.range_consistent(y, z, dst), + ensures + self.range_consistent(x, z, dst), + { + } - proof fn range_consistent_subset(self, x: &KeyIterator, y: &KeyIterator, x_inner: &KeyIterator, y_inner: &KeyIterator, dst: &ID) - requires - self.range_consistent(x, y, dst), - x_inner.geq_spec(*x), - !y.lt_spec(*y_inner), - ensures - self.range_consistent(x_inner, y_inner, dst), - { - K::cmp_properties(); - } + proof fn range_consistent_subset( + self, + x: &KeyIterator, + y: &KeyIterator, + x_inner: &KeyIterator, + y_inner: &KeyIterator, + dst: &ID, + ) + requires + self.range_consistent(x, y, dst), + x_inner.geq_spec(*x), + !y.lt_spec(*y_inner), + ensures + self.range_consistent(x_inner, y_inner, dst), + { + K::cmp_properties(); + } - proof fn empty_key_range_is_consistent(&self, lo: &KeyIterator, hi: &KeyIterator, id: &ID) - requires - lo.geq_spec(*hi), - ensures - self.range_consistent(lo, hi, id), - { - K::cmp_properties(); - } + proof fn empty_key_range_is_consistent(&self, lo: &KeyIterator, hi: &KeyIterator, id: &ID) + requires + lo.geq_spec(*hi), + ensures + self.range_consistent(lo, hi, id), + { + K::cmp_properties(); + } - proof fn all_keys_agree(&self, lo: usize, hi: usize, id: &ID) - requires - self.valid(), - 0 <= lo <= hi < self.lows.keys@.len(), - forall |i| #![auto] lo <= i <= hi ==> self.lows@[self.lows.keys@[i]]@ == id@, - ensures - self.range_consistent(&KeyIterator::new_spec(self.lows.keys@[lo as int]), &KeyIterator::new_spec(self.lows.keys@[hi as int]), id), - decreases hi - lo, - { - self.almost_all_keys_agree(lo, hi, id); - } + proof fn all_keys_agree(&self, lo: usize, hi: usize, id: &ID) + requires + self.valid(), + 0 <= lo <= hi < self.lows.keys@.len(), + forall|i| #![auto] lo <= i <= hi ==> self.lows@[self.lows.keys@[i]]@ == id@, + ensures + self.range_consistent( + &KeyIterator::new_spec(self.lows.keys@[lo as int]), + &KeyIterator::new_spec(self.lows.keys@[hi as int]), + id, + ), + decreases hi - lo, + { + self.almost_all_keys_agree(lo, hi, id); + } - proof fn almost_all_keys_agree(&self, lo: usize, hi: usize, id: &ID) - requires - self.valid(), - 0 <= lo <= hi < self.lows.keys@.len(), - forall |i| #![auto] lo <= i < hi ==> self.lows@[self.lows.keys@[i]]@ == id@, - ensures - self.range_consistent(&KeyIterator::new_spec(self.lows.keys@[lo as int]), &KeyIterator::new_spec(self.lows.keys@[hi as int]), id), - decreases hi - lo, - { - let lo_k = self.lows.keys@[lo as int]; - let hi_k = self.lows.keys@[hi as int]; - let lo_ki = KeyIterator::new_spec(lo_k); - let hi_ki = KeyIterator::new_spec(hi_k); - if lo_ki.geq_spec(hi_ki) { - self.empty_key_range_is_consistent(&lo_ki, &hi_ki, id); - } else { - assert(lo_ki.lt_spec(hi_ki) && lo < hi) by { - K::cmp_properties(); - } - let lo_next = (lo + 1) as usize; - let lo_next_k = self.lows.keys@[lo_next as int]; - let lo_next_ki = KeyIterator::new_spec(lo_next_k); - assert(self.lows.gap(lo_ki, lo_next_ki)) by { - K::cmp_properties(); - } - assert(self.range_consistent(&lo_ki, &lo_next_ki, id)); - self.almost_all_keys_agree(lo_next, hi, id); - self.extend_range_consistent(&lo_ki, &lo_next_ki, &hi_ki, id); + proof fn almost_all_keys_agree(&self, lo: usize, hi: usize, id: &ID) + requires + self.valid(), + 0 <= lo <= hi < self.lows.keys@.len(), + forall|i| #![auto] lo <= i < hi ==> self.lows@[self.lows.keys@[i]]@ == id@, + ensures + self.range_consistent( + &KeyIterator::new_spec(self.lows.keys@[lo as int]), + &KeyIterator::new_spec(self.lows.keys@[hi as int]), + id, + ), + decreases hi - lo, + { + let lo_k = self.lows.keys@[lo as int]; + let hi_k = self.lows.keys@[hi as int]; + let lo_ki = KeyIterator::new_spec(lo_k); + let hi_ki = KeyIterator::new_spec(hi_k); + if lo_ki.geq_spec(hi_ki) { + self.empty_key_range_is_consistent(&lo_ki, &hi_ki, id); + } else { + assert(lo_ki.lt_spec(hi_ki) && lo < hi) by { + K::cmp_properties(); } - + let lo_next = (lo + 1) as usize; + let lo_next_k = self.lows.keys@[lo_next as int]; + let lo_next_ki = KeyIterator::new_spec(lo_next_k); + assert(self.lows.gap(lo_ki, lo_next_ki)) by { + K::cmp_properties(); + } + assert(self.range_consistent(&lo_ki, &lo_next_ki, id)); + self.almost_all_keys_agree(lo_next, hi, id); + self.extend_range_consistent(&lo_ki, &lo_next_ki, &hi_ki, id); } + } - - pub fn range_consistent_impl(&self, lo: &KeyIterator, hi: &KeyIterator, dst: &ID) -> (b: bool) - requires - self.valid(), - ensures - b == self.range_consistent(lo, hi, dst), - { - if lo.lt(hi) { - let lo_glb_index = self.lows.greatest_lower_bound_index(lo); - let hi_glb_index = self.lows.greatest_lower_bound_index(hi); - assert(lo_glb_index <= hi_glb_index) by { - K::cmp_properties(); - }; - let ghost lo_glb = self.lows.keys@[lo_glb_index as int]; - let hi_glb = self.lows.keys.index(hi_glb_index); - let ghost lo_glb_ki = KeyIterator::new_spec(lo_glb); - let ghost hi_glb_ki = KeyIterator::new_spec(hi_glb); - - //let ret = self.lows.keys_agree(Ghost(&lo_glb), lo_glb_index, Ghost(&hi_glb), hi_glb_index, dst); - let (agree, almost) = self.lows.keys_in_index_range_agree(lo_glb_index, hi_glb_index, dst); - let ret = if agree { - // Simple case where everything agrees - true - } else if !agree && almost && !hi.is_end() && hi_glb.cmp(hi.get()).is_eq() { - // Corner case where almost everything agrees; the one disagreement - // is exactly at the hi key, which isn't included in range_consistent - true - } else { - // Simpler case where disagreement occurs before hi - false - }; - proof { - let end_ki = KeyIterator::end_spec(); - if ret { - if agree { - self.all_keys_agree(lo_glb_index, hi_glb_index, dst); - if hi_glb_index == self.lows.keys@.len() - 1 { - assert forall |k| KeyIterator::between(hi_glb_ki, KeyIterator::new_spec(k), end_ki) implies (#[trigger] self@[k]) == dst@ by { - K::cmp_properties(); - } - assert(self.range_consistent(&hi_glb_ki, &end_ki, dst)); - self.extend_range_consistent(&lo_glb_ki, &hi_glb_ki, &end_ki, dst); - self.range_consistent_subset(&lo_glb_ki, &end_ki, lo, hi, dst); - } else { - let hi_next_index = hi_glb_index + 1; - let hi_next = self.lows.keys@[hi_next_index]; - let hi_next_ki = KeyIterator::new_spec(hi_next); - assert(self.lows.gap(hi_glb_ki, hi_next_ki)) by { - K::cmp_properties(); - } - - assert_by_contradiction!(!hi.above(hi_next), { + pub fn range_consistent_impl(&self, lo: &KeyIterator, hi: &KeyIterator, dst: &ID) -> (b: + bool) + requires + self.valid(), + ensures + b == self.range_consistent(lo, hi, dst), + { + if lo.lt(hi) { + let lo_glb_index = self.lows.greatest_lower_bound_index(lo); + let hi_glb_index = self.lows.greatest_lower_bound_index(hi); + assert(lo_glb_index <= hi_glb_index) by { + K::cmp_properties(); + }; + let ghost lo_glb = self.lows.keys@[lo_glb_index as int]; + let hi_glb = self.lows.keys.index(hi_glb_index); + let ghost lo_glb_ki = KeyIterator::new_spec(lo_glb); + let ghost hi_glb_ki = KeyIterator::new_spec(hi_glb); + //let ret = self.lows.keys_agree(Ghost(&lo_glb), lo_glb_index, Ghost(&hi_glb), hi_glb_index, dst); + let (agree, almost) = self.lows.keys_in_index_range_agree( + lo_glb_index, + hi_glb_index, + dst, + ); + let ret = if agree { + // Simple case where everything agrees + true + } else if !agree && almost && !hi.is_end() && hi_glb.cmp(hi.get()).is_eq() { + // Corner case where almost everything agrees; the one disagreement + // is exactly at the hi key, which isn't included in range_consistent + true + } else { + // Simpler case where disagreement occurs before hi + false + }; + proof { + let end_ki = KeyIterator::end_spec(); + if ret { + if agree { + self.all_keys_agree(lo_glb_index, hi_glb_index, dst); + if hi_glb_index == self.lows.keys@.len() - 1 { + assert forall|k| + KeyIterator::between( + hi_glb_ki, + KeyIterator::new_spec(k), + end_ki, + ) implies (#[trigger] self@[k]) == dst@ by { + K::cmp_properties(); + } + assert(self.range_consistent(&hi_glb_ki, &end_ki, dst)); + self.extend_range_consistent(&lo_glb_ki, &hi_glb_ki, &end_ki, dst); + self.range_consistent_subset(&lo_glb_ki, &end_ki, lo, hi, dst); + } else { + let hi_next_index = hi_glb_index + 1; + let hi_next = self.lows.keys@[hi_next_index]; + let hi_next_ki = KeyIterator::new_spec(hi_next); + assert(self.lows.gap(hi_glb_ki, hi_next_ki)) by { + K::cmp_properties(); + } + assert_by_contradiction!(!hi.above(hi_next), { K::cmp_properties(); assert(self.lows@.contains_key(hi_next)); // Trigger conclusion of glb_spec }); - assert(!hi.is_end_spec()) by { - K::cmp_properties(); - } - let upper = choose |u| #[trigger] self.lows.gap(hi_glb_ki, u) && KeyIterator::between(hi_glb_ki, *hi, u); - assert(self.range_consistent(&hi_glb_ki, &upper, dst)); - self.extend_range_consistent(&lo_glb_ki, &hi_glb_ki, &upper, dst); - assert(!upper.lt_spec(*hi)) by { - K::cmp_properties(); - } - self.range_consistent_subset(&lo_glb_ki, &upper, lo, hi, dst); + assert(!hi.is_end_spec()) by { + K::cmp_properties(); } - } else { - assert(!agree && almost && !hi.is_end() && hi_glb.cmp_spec(*hi.get_spec()).eq()); - self.almost_all_keys_agree(lo_glb_index, hi_glb_index, dst); - self.range_consistent(&KeyIterator::new_spec(self.lows.keys@[lo_glb_index as int]), - &KeyIterator::new_spec(self.lows.keys@[hi_glb_index as int]), dst); - assert(lo.geq_spec(lo_glb_ki)); - self.range_consistent_subset(&lo_glb_ki, &hi_glb_ki, lo, hi, dst); + let upper = choose|u| #[trigger] + self.lows.gap(hi_glb_ki, u) && KeyIterator::between( + hi_glb_ki, + *hi, + u, + ); + assert(self.range_consistent(&hi_glb_ki, &upper, dst)); + self.extend_range_consistent(&lo_glb_ki, &hi_glb_ki, &upper, dst); + assert(!upper.lt_spec(*hi)) by { + K::cmp_properties(); + } + self.range_consistent_subset(&lo_glb_ki, &upper, lo, hi, dst); } } else { - assert(!agree); - let bad_index = choose |bad_index| #![auto] lo_glb_index <= bad_index <= hi_glb_index && self.lows@[self.lows.keys@[bad_index]]@ != dst@; - let bad = self.lows.keys@[bad_index]; - let bad_ki = KeyIterator::new_spec(bad); - - if bad_index == lo_glb_index { - let lo_k = *lo.get(); - let upper = choose |u| #[trigger] self.lows.gap(lo_glb_ki, u) && KeyIterator::between(lo_glb_ki, KeyIterator::new_spec(lo_k), u); - assert(self.lows@.contains_key(lo_glb)); - assert(self.lows.gap(KeyIterator::new_spec(lo_glb), upper)); - assert(KeyIterator::between(KeyIterator::new_spec(lo_glb), KeyIterator::new_spec(lo_k), upper)); - assert(self@[lo_k] == self.lows@[lo_glb]@); - assert(self.lows@[lo_glb]@ == self.lows@[self.lows.keys@[bad_index]]@); - assert(self@[lo_k] != dst@); - assert(KeyIterator::between(*lo, *lo, *hi)) by { K::cmp_properties(); } - self.not_range_consistent(lo, hi, dst, lo); - } else { - - assert(hi.is_end_spec() ==> hi_glb_ki != hi); - assert(hi_glb_ki.cmp_spec(*hi).eq() == (hi_glb_ki == hi)) by { K::cmp_properties(); }; - - assert(bad_index > lo_glb_index && !bad_ki.lt_spec(*lo)) by { - K::cmp_properties(); - assert(self.lows@.contains_key(bad)); // Trigger conclusion of glb_spec - }; - - // almost == (self@[self.keys@[hi_glb_index as int]]@ != v@ && - // (forall |i| #![auto] lo_glb_index <= i < hi_glb_index ==> self@[self.keys@[i]]@ == v@))) - if almost { - assert(hi_glb_index == bad_index); + assert(!agree && almost && !hi.is_end() && hi_glb.cmp_spec( + *hi.get_spec(), + ).eq()); + self.almost_all_keys_agree(lo_glb_index, hi_glb_index, dst); + self.range_consistent( + &KeyIterator::new_spec(self.lows.keys@[lo_glb_index as int]), + &KeyIterator::new_spec(self.lows.keys@[hi_glb_index as int]), + dst, + ); + assert(lo.geq_spec(lo_glb_ki)); + self.range_consistent_subset(&lo_glb_ki, &hi_glb_ki, lo, hi, dst); + } + } else { + assert(!agree); + let bad_index = choose|bad_index| + #![auto] + lo_glb_index <= bad_index <= hi_glb_index + && self.lows@[self.lows.keys@[bad_index]]@ != dst@; + let bad = self.lows.keys@[bad_index]; + let bad_ki = KeyIterator::new_spec(bad); + if bad_index == lo_glb_index { + let lo_k = *lo.get(); + let upper = choose|u| #[trigger] + self.lows.gap(lo_glb_ki, u) && KeyIterator::between( + lo_glb_ki, + KeyIterator::new_spec(lo_k), + u, + ); + assert(self.lows@.contains_key(lo_glb)); + assert(self.lows.gap(KeyIterator::new_spec(lo_glb), upper)); + assert(KeyIterator::between( + KeyIterator::new_spec(lo_glb), + KeyIterator::new_spec(lo_k), + upper, + )); + assert(self@[lo_k] == self.lows@[lo_glb]@); + assert(self.lows@[lo_glb]@ == self.lows@[self.lows.keys@[bad_index]]@); + assert(self@[lo_k] != dst@); + assert(KeyIterator::between(*lo, *lo, *hi)) by { + K::cmp_properties(); + } + self.not_range_consistent(lo, hi, dst, lo); + } else { + assert(hi.is_end_spec() ==> hi_glb_ki != hi); + assert(hi_glb_ki.cmp_spec(*hi).eq() == (hi_glb_ki == hi)) by { + K::cmp_properties(); + }; + assert(bad_index > lo_glb_index && !bad_ki.lt_spec(*lo)) by { + K::cmp_properties(); + assert(self.lows@.contains_key(bad)); // Trigger conclusion of glb_spec + }; + // almost == (self@[self.keys@[hi_glb_index as int]]@ != v@ && + // (forall |i| #![auto] lo_glb_index <= i < hi_glb_index ==> self@[self.keys@[i]]@ == v@))) + if almost { + assert(hi_glb_index == bad_index); + if !hi.is_end_spec() { + if hi_glb_ki == hi { + assert(ret); + assert(false); + } else { + assert(KeyIterator::between(*lo, bad_ki, *hi)) by { + K::cmp_properties(); + }; + //assert(self.lows.gap(bad_ki, KeyIterator::new_spec(self.lows.keys@[bad_index + 1]))); + let upper = choose|u| + #![auto] + self.lows.gap(hi_glb_ki, u) && KeyIterator::between( + hi_glb_ki, + *hi, + u, + ); + assert(self.lows@.contains_key(bad)); + //assert(self.lows.gap(bad_ki, upper)); + assert(self.lows.gap(bad_ki, *hi)) by { + K::cmp_properties(); + }; + assert(KeyIterator::between(hi_glb_ki, bad_ki, upper)) by { + K::cmp_properties(); + }; + assert(self@[bad] == self.lows@[bad]@); + self.not_range_consistent(lo, hi, dst, &bad_ki); + } + } else { + if hi_glb_ki == hi { + assert(false); + } else { + assert(KeyIterator::between(*lo, bad_ki, *hi)) by { + K::cmp_properties(); + }; + //assert(self.lows.gap(bad_ki, KeyIterator::new_spec(self.lows.keys@[bad_index + 1]))); + //let upper = choose |u| #![auto] self.lows.gap(hi_glb_ki, u) && KeyIterator::between(hi_glb_ki, *hi, u); + assert(self.lows@.contains_key(bad)); + //assert(self.lows.gap(bad_ki, upper)); + assert(self.lows.gap(bad_ki, *hi)) by { + K::cmp_properties(); + }; + assert(KeyIterator::between(hi_glb_ki, bad_ki, *hi)) by { + K::cmp_properties(); + }; + assert(self@[bad] == self.lows@[bad]@); + self.not_range_consistent(lo, hi, dst, &bad_ki); + } + } + } else { + assert(self.lows@[self.lows.keys@[hi_glb_index as int]]@ == dst@ || !( + forall|i| + #![auto] + lo_glb_index <= i < hi_glb_index ==> self.lows@[self.lows.keys@[i]]@ + == dst@)); + if self.lows@[self.lows.keys@[hi_glb_index as int]]@ == dst@ { if !hi.is_end_spec() { if hi_glb_ki == hi { - assert(ret); - assert(false); - } else { - assert(KeyIterator::between(*lo, bad_ki, *hi)) by { K::cmp_properties(); }; - //assert(self.lows.gap(bad_ki, KeyIterator::new_spec(self.lows.keys@[bad_index + 1]))); - - let upper = choose |u| #![auto] self.lows.gap(hi_glb_ki, u) && KeyIterator::between(hi_glb_ki, *hi, u); - assert(self.lows@.contains_key(bad)); - //assert(self.lows.gap(bad_ki, upper)); - assert(self.lows.gap(bad_ki, *hi)) by { K::cmp_properties(); }; - assert(KeyIterator::between(hi_glb_ki, bad_ki, upper)) by { K::cmp_properties(); }; - assert(self@[bad] == self.lows@[bad]@); - + assert(bad_index < hi_glb_index); + // Proof X + let bad_next = self.lows.keys@[bad_index + 1]; + let bad_next_ki = KeyIterator::new_spec(bad_next); + assert(KeyIterator::between(*lo, bad_ki, *hi)) by { + K::cmp_properties(); + } + assert(self@[bad] != dst@) by { + // Trigger DelegationMap::valid + assert(self.lows.gap(bad_ki, bad_next_ki)) by { + K::cmp_properties(); + }; + assert(KeyIterator::between( + bad_ki, + bad_ki, + bad_next_ki, + )) by { + K::cmp_properties(); + }; + } self.not_range_consistent(lo, hi, dst, &bad_ki); - } - } else { - if hi_glb_ki == hi { - assert(false); } else { - assert(KeyIterator::between(*lo, bad_ki, *hi)) by { K::cmp_properties(); }; - //assert(self.lows.gap(bad_ki, KeyIterator::new_spec(self.lows.keys@[bad_index + 1]))); - - //let upper = choose |u| #![auto] self.lows.gap(hi_glb_ki, u) && KeyIterator::between(hi_glb_ki, *hi, u); - assert(self.lows@.contains_key(bad)); - //assert(self.lows.gap(bad_ki, upper)); - assert(self.lows.gap(bad_ki, *hi)) by { K::cmp_properties(); }; - assert(KeyIterator::between(hi_glb_ki, bad_ki, *hi)) by { K::cmp_properties(); }; - assert(self@[bad] == self.lows@[bad]@); - - self.not_range_consistent(lo, hi, dst, &bad_ki); - } - } - } else { - assert(self.lows@[self.lows.keys@[hi_glb_index as int]]@ == dst@ || - !(forall |i| #![auto] lo_glb_index <= i < hi_glb_index ==> self.lows@[self.lows.keys@[i]]@ == dst@)); - - if self.lows@[self.lows.keys@[hi_glb_index as int]]@ == dst@ { - if !hi.is_end_spec() { - if hi_glb_ki == hi { - assert(bad_index < hi_glb_index); - // Proof X - let bad_next = self.lows.keys@[bad_index+1]; + // TODO: Duplicates entire Proof Y + if bad_index < hi_glb_index { + // TODO: This duplicates Proof X + assert(bad_index + 1 < self.lows.keys@.len()); + let bad_next = self.lows.keys@[bad_index + 1]; let bad_next_ki = KeyIterator::new_spec(bad_next); - assert(KeyIterator::between(*lo, bad_ki, *hi)) by { K::cmp_properties(); } + assert(KeyIterator::between(*lo, bad_ki, *hi)) by { + K::cmp_properties(); + } assert(self@[bad] != dst@) by { // Trigger DelegationMap::valid - assert(self.lows.gap(bad_ki, bad_next_ki)) by { K::cmp_properties(); }; - assert(KeyIterator::between(bad_ki, bad_ki, bad_next_ki)) by { K::cmp_properties(); }; + assert(self.lows.gap(bad_ki, bad_next_ki)) by { + K::cmp_properties(); + }; + assert(KeyIterator::between( + bad_ki, + bad_ki, + bad_next_ki, + )) by { + K::cmp_properties(); + }; } self.not_range_consistent(lo, hi, dst, &bad_ki); } else { - // TODO: Duplicates entire Proof Y - if bad_index < hi_glb_index { - // TODO: This duplicates Proof X - assert(bad_index+1 < self.lows.keys@.len()); - let bad_next = self.lows.keys@[bad_index+1]; - let bad_next_ki = KeyIterator::new_spec(bad_next); - assert(KeyIterator::between(*lo, bad_ki, *hi)) by { K::cmp_properties(); } - assert(self@[bad] != dst@) by { - // Trigger DelegationMap::valid - assert(self.lows.gap(bad_ki, bad_next_ki)) by { K::cmp_properties(); }; - assert(KeyIterator::between(bad_ki, bad_ki, bad_next_ki)) by { K::cmp_properties(); }; - } - self.not_range_consistent(lo, hi, dst, &bad_ki); - } else { - // From glb_spec: - let upper = choose |u| #![auto] self.lows.gap(hi_glb_ki, u) && KeyIterator::between(hi_glb_ki, *hi, u); - assert(self@[hi_glb] == self.lows@[hi_glb]@) by { - assert(self.lows@.contains_key(hi_glb)); - assert(self.lows.gap(hi_glb_ki, upper) && KeyIterator::between(hi_glb_ki, *hi, upper)); - assert(KeyIterator::between(hi_glb_ki, hi_glb_ki, upper)) by { K::cmp_properties(); }; // Trigger: DelegationMap::valid() - } - self.not_range_consistent(lo, hi, dst, &bad_ki); + // From glb_spec: + let upper = choose|u| + #![auto] + self.lows.gap(hi_glb_ki, u) && KeyIterator::between( + hi_glb_ki, + *hi, + u, + ); + assert(self@[hi_glb] == self.lows@[hi_glb]@) by { + assert(self.lows@.contains_key(hi_glb)); + assert(self.lows.gap(hi_glb_ki, upper) + && KeyIterator::between(hi_glb_ki, *hi, upper)); + assert(KeyIterator::between( + hi_glb_ki, + hi_glb_ki, + upper, + )) by { + K::cmp_properties(); + }; // Trigger: DelegationMap::valid() } - + self.not_range_consistent(lo, hi, dst, &bad_ki); } + } + } else { + if hi_glb_ki == hi { + assert(false); } else { - if hi_glb_ki == hi { - assert(false); + // Proof Y + if bad_index < hi_glb_index { + // TODO: This duplicates Proof X + assert(bad_index + 1 < self.lows.keys@.len()); + let bad_next = self.lows.keys@[bad_index + 1]; + let bad_next_ki = KeyIterator::new_spec(bad_next); + assert(KeyIterator::between(*lo, bad_ki, *hi)) by { + K::cmp_properties(); + } + assert(self@[bad] != dst@) by { + // Trigger DelegationMap::valid + assert(self.lows.gap(bad_ki, bad_next_ki)) by { + K::cmp_properties(); + }; + assert(KeyIterator::between( + bad_ki, + bad_ki, + bad_next_ki, + )) by { + K::cmp_properties(); + }; + } + self.not_range_consistent(lo, hi, dst, &bad_ki); } else { - // Proof Y - if bad_index < hi_glb_index { - // TODO: This duplicates Proof X - assert(bad_index+1 < self.lows.keys@.len()); - let bad_next = self.lows.keys@[bad_index+1]; - let bad_next_ki = KeyIterator::new_spec(bad_next); - assert(KeyIterator::between(*lo, bad_ki, *hi)) by { K::cmp_properties(); } - assert(self@[bad] != dst@) by { - // Trigger DelegationMap::valid - assert(self.lows.gap(bad_ki, bad_next_ki)) by { K::cmp_properties(); }; - assert(KeyIterator::between(bad_ki, bad_ki, bad_next_ki)) by { K::cmp_properties(); }; - } - self.not_range_consistent(lo, hi, dst, &bad_ki); - } else { - // From glb_spec: - let upper = choose |u| #![auto] self.lows.gap(hi_glb_ki, u) && KeyIterator::between(hi_glb_ki, *hi, u); - assert(self@[hi_glb] == self.lows@[hi_glb]@) by { - assert(self.lows@.contains_key(hi_glb)); - assert(self.lows.gap(hi_glb_ki, upper) && KeyIterator::between(hi_glb_ki, *hi, upper)); - assert(KeyIterator::between(hi_glb_ki, hi_glb_ki, upper)) by { K::cmp_properties(); }; // Trigger: DelegationMap::valid() - } - self.not_range_consistent(lo, hi, dst, &bad_ki); + // From glb_spec: + let upper = choose|u| + #![auto] + self.lows.gap(hi_glb_ki, u) && KeyIterator::between( + hi_glb_ki, + *hi, + u, + ); + assert(self@[hi_glb] == self.lows@[hi_glb]@) by { + assert(self.lows@.contains_key(hi_glb)); + assert(self.lows.gap(hi_glb_ki, upper) + && KeyIterator::between(hi_glb_ki, *hi, upper)); + assert(KeyIterator::between( + hi_glb_ki, + hi_glb_ki, + upper, + )) by { + K::cmp_properties(); + }; // Trigger: DelegationMap::valid() } + self.not_range_consistent(lo, hi, dst, &bad_ki); } } - } - - if !(forall |i:int| lo_glb_index <= i < hi_glb_index ==> #[trigger](self.lows@[self.lows.keys@[i]]@) == dst@) { - // Choose a badder index - let bad_index = choose |bad_index| #![auto] lo_glb_index <= bad_index < hi_glb_index && self.lows@[self.lows.keys@[bad_index]]@ != dst@; - let bad = self.lows.keys@[bad_index]; - let bad_ki = KeyIterator::new_spec(bad); - - if bad_index == lo_glb_index { - // TODO: Duplicates proof above - let lo_k = *lo.get(); - let upper = choose |u| #[trigger] self.lows.gap(lo_glb_ki, u) && KeyIterator::between(lo_glb_ki, KeyIterator::new_spec(lo_k), u); - assert(self.lows@.contains_key(lo_glb)); - assert(self.lows.gap(KeyIterator::new_spec(lo_glb), upper)); - assert(KeyIterator::between(KeyIterator::new_spec(lo_glb), KeyIterator::new_spec(lo_k), upper)); - assert(self@[lo_k] == self.lows@[lo_glb]@); - assert(self.lows@[lo_glb]@ == self.lows@[self.lows.keys@[bad_index]]@); - assert(self@[lo_k] != dst@); - assert(KeyIterator::between(*lo, *lo, *hi)) by { K::cmp_properties(); } - self.not_range_consistent(lo, hi, dst, lo); - } else { - // TODO: This duplicates Proof X - assert(bad_index+1 < self.lows.keys@.len()); - let bad_next = self.lows.keys@[bad_index+1]; - let bad_next_ki = KeyIterator::new_spec(bad_next); - assert(KeyIterator::between(*lo, bad_ki, *hi)) by { + } + if !(forall|i: int| + lo_glb_index <= i < hi_glb_index ==> #[trigger] ( + self.lows@[self.lows.keys@[i]]@) == dst@) { + // Choose a badder index + let bad_index = choose|bad_index| + #![auto] + lo_glb_index <= bad_index < hi_glb_index + && self.lows@[self.lows.keys@[bad_index]]@ != dst@; + let bad = self.lows.keys@[bad_index]; + let bad_ki = KeyIterator::new_spec(bad); + if bad_index == lo_glb_index { + // TODO: Duplicates proof above + let lo_k = *lo.get(); + let upper = choose|u| #[trigger] + self.lows.gap(lo_glb_ki, u) && KeyIterator::between( + lo_glb_ki, + KeyIterator::new_spec(lo_k), + u, + ); + assert(self.lows@.contains_key(lo_glb)); + assert(self.lows.gap(KeyIterator::new_spec(lo_glb), upper)); + assert(KeyIterator::between( + KeyIterator::new_spec(lo_glb), + KeyIterator::new_spec(lo_k), + upper, + )); + assert(self@[lo_k] == self.lows@[lo_glb]@); + assert(self.lows@[lo_glb]@ + == self.lows@[self.lows.keys@[bad_index]]@); + assert(self@[lo_k] != dst@); + assert(KeyIterator::between(*lo, *lo, *hi)) by { + K::cmp_properties(); + } + self.not_range_consistent(lo, hi, dst, lo); + } else { + // TODO: This duplicates Proof X + assert(bad_index + 1 < self.lows.keys@.len()); + let bad_next = self.lows.keys@[bad_index + 1]; + let bad_next_ki = KeyIterator::new_spec(bad_next); + assert(KeyIterator::between(*lo, bad_ki, *hi)) by { + K::cmp_properties(); + assert(self.lows@.contains_key(bad)); // Trigger conclusion of glb_spec + } + assert(self@[bad] != dst@) by { + // Trigger DelegationMap::valid + assert(self.lows.gap(bad_ki, bad_next_ki)) by { K::cmp_properties(); - assert(self.lows@.contains_key(bad)); // Trigger conclusion of glb_spec - } - assert(self@[bad] != dst@) by { - // Trigger DelegationMap::valid - assert(self.lows.gap(bad_ki, bad_next_ki)) by { K::cmp_properties(); }; - assert(KeyIterator::between(bad_ki, bad_ki, bad_next_ki)) by { K::cmp_properties(); }; - } - self.not_range_consistent(lo, hi, dst, &bad_ki); + }; + assert(KeyIterator::between(bad_ki, bad_ki, bad_next_ki)) + by { + K::cmp_properties(); + }; } + self.not_range_consistent(lo, hi, dst, &bad_ki); } - } } } } - ret - } else { - proof { - self.empty_key_range_is_consistent(lo, hi, dst); - } - true } + ret + } else { + proof { + self.empty_key_range_is_consistent(lo, hi, dst); + } + true } } +} - impl DelegationMap { - - pub fn delegate_for_key_range_is_host_impl(&self, lo: &KeyIterator, hi: &KeyIterator, dst: &ID) -> (b: bool) - requires - self.valid(), - ensures - b == AbstractDelegationMap::delegate_for_key_range_is_host(AbstractDelegationMap(self@), KeyRange { lo: *lo, hi: *hi }, dst@), - { - let ret = self.range_consistent_impl(lo, hi, dst); - proof { - let kr = KeyRange { lo: *lo, hi: *hi }; - if ret { - assert forall |k| #[trigger] kr.contains(k) implies self@[k] == dst@ by { - assert(KeyIterator::between(*lo, KeyIterator::new_spec(k), *hi)); // Trigger for range_consistent - } - } else { - let k = choose |k| KeyIterator::between(*lo, KeyIterator::new_spec(k), *hi) && #[trigger] self@[k] != dst@; - assert(kr.contains(k)); +impl DelegationMap { + pub fn delegate_for_key_range_is_host_impl( + &self, + lo: &KeyIterator, + hi: &KeyIterator, + dst: &ID, + ) -> (b: bool) + requires + self.valid(), + ensures + b == AbstractDelegationMap::delegate_for_key_range_is_host( + AbstractDelegationMap(self@), + KeyRange { lo: *lo, hi: *hi }, + dst@, + ), + { + let ret = self.range_consistent_impl(lo, hi, dst); + proof { + let kr = KeyRange { lo: *lo, hi: *hi }; + if ret { + assert forall|k| #[trigger] kr.contains(k) implies self@[k] == dst@ by { + assert(KeyIterator::between(*lo, KeyIterator::new_spec(k), *hi)); // Trigger for range_consistent } + } else { + let k = choose|k| + KeyIterator::between(*lo, KeyIterator::new_spec(k), *hi) && #[trigger] self@[k] + != dst@; + assert(kr.contains(k)); } - ret } - + ret } +} - // Another waste of time because we missed an verifier::ext_equal on a struct. :eyeroll: - impl DelegationMap { - - pub proof fn lemma_set_is_update(pre: Self, post: Self, lo: KeyIterator, hi: KeyIterator, dst: &ID) +// Another waste of time because we missed an verifier::ext_equal on a struct. :eyeroll: +impl DelegationMap { + pub proof fn lemma_set_is_update( + pre: Self, + post: Self, + lo: KeyIterator, + hi: KeyIterator, + dst: &ID, + ) requires pre.valid(), dst@.valid_physical_address(), // fn set postconditions post.valid(), - forall |ki:KeyIterator| #[trigger] KeyIterator::between(lo, ki, hi) ==> post@[*ki.get()] == dst@, - forall |ki:KeyIterator| !ki.is_end_spec() && !(#[trigger] KeyIterator::between(lo, ki, hi)) ==> post@[*ki.get()] == pre@[*ki.get()], + forall|ki: KeyIterator| #[trigger] + KeyIterator::between(lo, ki, hi) ==> post@[*ki.get()] == dst@, + forall|ki: KeyIterator| + !ki.is_end_spec() && !(#[trigger] KeyIterator::between(lo, ki, hi)) + ==> post@[*ki.get()] == pre@[*ki.get()], ensures - AbstractDelegationMap(post@) =~= AbstractDelegationMap(pre@).update(KeyRange{lo, hi}, dst@), - { - // let setted = AbstractDelegationMap(post@); - // let updated = AbstractDelegationMap(pre@).update(KeyRange{lo, hi}, dst@); - // assert forall |k| setted.0.contains_key(k) <==> updated.0.contains_key(k) by {} - // assert forall |k| setted.0.contains_key(k) implies setted.0[k] == updated.0[k] by {} - //AbstractDelegationMap(self@.union_prefer_right(Map::new(|k| newkr.contains(k), |k| host))) - // assert( AbstractDelegationMap(post@) =~= AbstractDelegationMap(pre@).update(KeyRange{lo, hi}, dst@) ); - } + AbstractDelegationMap(post@) =~= AbstractDelegationMap(pre@).update( + KeyRange { lo, hi }, + dst@, + ), + { + // let setted = AbstractDelegationMap(post@); + // let updated = AbstractDelegationMap(pre@).update(KeyRange{lo, hi}, dst@); + // assert forall |k| setted.0.contains_key(k) <==> updated.0.contains_key(k) by {} + // assert forall |k| setted.0.contains_key(k) implies setted.0[k] == updated.0[k] by {} + //AbstractDelegationMap(self@.union_prefer_right(Map::new(|k| newkr.contains(k), |k| host))) + // assert( AbstractDelegationMap(post@) =~= AbstractDelegationMap(pre@).update(KeyRange{lo, hi}, dst@) ); } +} - } // end verus! +} // verus! + // end verus! } mod endpoint_hashmap_t { @@ -2344,171 +2700,208 @@ mod endpoint_hashmap_t { verus! { +#[verifier(external_body)] +pub struct HashMap< + #[verifier(strictly_positive)] + V, +> { + m: collections::HashMap, +} + +impl HashMap { + /// The abstract contents of the HashMap. + pub closed spec fn view(self) -> Map; + #[verifier(external_body)] - pub struct HashMap<#[verifier(strictly_positive)] V> { - m: collections::HashMap, + pub fn new() -> (out: Self) + ensures + out@ == Map::::empty(), + { + HashMap { m: collections::HashMap::new() } } - impl HashMap + #[verifier(external_body)] + pub fn insert(&mut self, key: &EndPoint, value: V) + ensures + self@ == old(self)@.insert(key@, value), { - /// The abstract contents of the HashMap. - pub closed spec fn view(self) -> Map; - - #[verifier(external_body)] - pub fn new() -> (out: Self) - ensures out@ == Map::::empty() - { - HashMap { m: collections::HashMap::new() } - } - - #[verifier(external_body)] - pub fn insert(&mut self, key: &EndPoint, value: V) - ensures self@ == old(self)@.insert(key@, value) - { - let key_clone: EndPoint = key.clone_up_to_view(); - self.m.insert(key_clone, value); - } + let key_clone: EndPoint = key.clone_up_to_view(); + self.m.insert(key_clone, value); + } - pub open spec fn spec_index(self, key: &EndPoint) -> V + pub open spec fn spec_index(self, key: &EndPoint) -> V recommends self@.contains_key(key@), - { - self@[key@] - } + { + self@[key@] + } - pub open spec fn get_spec(map_v: Map, key: AbstractEndPoint) -> (value: Option) - { - if map_v.dom().contains(key) { - Some(map_v[key]) - } else { - None - } + pub open spec fn get_spec(map_v: Map, key: AbstractEndPoint) -> (value: + Option) { + if map_v.dom().contains(key) { + Some(map_v[key]) + } else { + None } + } - #[verifier(external_body)] - pub fn get<'a>(&'a self, key: &EndPoint) -> (value: Option<&'a V>) + #[verifier(external_body)] + pub fn get<'a>(&'a self, key: &EndPoint) -> (value: Option<&'a V>) ensures - value == match Self::get_spec(self@, key@) { Some(v) => Some(&v), None => None }, - { - match self.m.get(&key) { + value == match Self::get_spec(self@, key@) { + Some(v) => Some(&v), + None => None, + }, + { + match self.m.get(&key) { std::option::Option::Some(v) => Some(v), std::option::Option::None => None, - } } + } - // TODO replace put_spec with insert spec - pub open spec fn put_spec(old_map_v: Map, new_map_v: Map, key: AbstractEndPoint, value: V) -> bool - { - new_map_v == old_map_v.insert(key, value) - // &&& new_map_v.contains_key(key) - // &&& new_map_v[key] == value - // &&& forall |k| /*#![auto]*/ k != key ==> if old_map_v.contains_key(k) { - // (#[trigger] new_map_v.contains_key(k)) && new_map_v[k] == old_map_v[k] - // } else { - // !new_map_v.contains_key(k) - // } - } + // TODO replace put_spec with insert spec + pub open spec fn put_spec( + old_map_v: Map, + new_map_v: Map, + key: AbstractEndPoint, + value: V, + ) -> bool { + new_map_v == old_map_v.insert( + key, + value, + ) + // &&& new_map_v.contains_key(key) + // &&& new_map_v[key] == value + // &&& forall |k| /*#![auto]*/ k != key ==> if old_map_v.contains_key(k) { + // (#[trigger] new_map_v.contains_key(k)) && new_map_v[k] == old_map_v[k] + // } else { + // !new_map_v.contains_key(k) + // } + + } - //#[verifier(external_body)] - //TODO: replace call sites with insert - pub fn put(&mut self, key: &EndPoint, value: V) + //#[verifier(external_body)] + //TODO: replace call sites with insert + pub fn put(&mut self, key: &EndPoint, value: V) ensures Self::put_spec(old(self)@, self@, key@, value), - { - self.insert(key, value); - } + { + self.insert(key, value); + } - pub open spec fn swap_spec(old_map_v: Map, new_map_v: Map, key: AbstractEndPoint, input_value: V, output_value: V, default_value: V) -> bool - { - &&& match Self::get_spec(old_map_v, key) { - Some(v) => output_value == v, - None => output_value == default_value, - } - &&& Self::put_spec(old_map_v, new_map_v, key, input_value) + pub open spec fn swap_spec( + old_map_v: Map, + new_map_v: Map, + key: AbstractEndPoint, + input_value: V, + output_value: V, + default_value: V, + ) -> bool { + &&& match Self::get_spec(old_map_v, key) { + Some(v) => output_value == v, + None => output_value == default_value, } + &&& Self::put_spec(old_map_v, new_map_v, key, input_value) + } - #[verifier(external_body)] - pub fn swap<'a>(&'a mut self, key: &EndPoint, updated_value: &'a mut V, default_value: V) + #[verifier(external_body)] + pub fn swap<'a>(&'a mut self, key: &EndPoint, updated_value: &'a mut V, default_value: V) ensures - Self::swap_spec(old(self)@, self@, key@, *old(updated_value), *updated_value, default_value), - { - match self.m.get_mut(key) { - Some(v) => core::mem::swap(v, updated_value), - None => { - let mut swap_tmp = default_value; - core::mem::swap(&mut swap_tmp, updated_value); - self.put(key, swap_tmp); - } - } - } - - #[verifier(external_body)] - pub fn keys(&self) -> (out: Vec) - ensures out@.map_values(|e: EndPoint| e@).to_set() == self@.dom() - { - self.m.keys().map(|k| k.clone_up_to_view()).collect() + Self::swap_spec( + old(self)@, + self@, + key@, + *old(updated_value), + *updated_value, + default_value, + ), + { + match self.m.get_mut(key) { + Some(v) => core::mem::swap(v, updated_value), + None => { + let mut swap_tmp = default_value; + core::mem::swap(&mut swap_tmp, updated_value); + self.put(key, swap_tmp); + }, } } + + #[verifier(external_body)] + pub fn keys(&self) -> (out: Vec) + ensures + out@.map_values(|e: EndPoint| e@).to_set() == self@.dom(), + { + self.m.keys().map(|k| k.clone_up_to_view()).collect() } } +} // verus! +} + mod environment_t { #![verus::trusted] use vstd::prelude::*; verus! { - pub struct LPacket { - pub dst: IdType, - pub src: IdType, - pub msg: MessageType, - } +pub struct LPacket { + pub dst: IdType, + pub src: IdType, + pub msg: MessageType, +} - #[is_variant] - pub enum LIoOp { - Send{s: LPacket}, - Receive{r: LPacket}, - TimeoutReceive{}, - ReadClock{t: int}, - } +#[is_variant] +pub enum LIoOp { + Send { s: LPacket }, + Receive { r: LPacket }, + TimeoutReceive { }, + ReadClock { t: int }, +} - #[is_variant] - pub enum LEnvStep { - HostIos{actor: IdType, ios: Seq>}, - DeliverPacket{p: LPacket}, - AdvanceTime{}, - Stutter{}, - } +#[is_variant] +pub enum LEnvStep { + HostIos { actor: IdType, ios: Seq> }, + DeliverPacket { p: LPacket }, + AdvanceTime { }, + Stutter { }, +} - pub struct LHostInfo { - pub queue: Seq>, - } +pub struct LHostInfo { + pub queue: Seq>, +} - pub struct LEnvironment<#[verifier(maybe_negative)] IdType, #[verifier::maybe_negative] MessageType> { - pub time:int, - pub sent_packets:Set>, - pub host_info:Map>, - pub next_step:LEnvStep, - } +pub struct LEnvironment< + #[verifier(maybe_negative)] + IdType, + #[verifier::maybe_negative] + MessageType, +> { + pub time: int, + pub sent_packets: Set>, + pub host_info: Map>, + pub next_step: LEnvStep, +} - pub open spec fn is_valid_lio_op(io:LIoOp, actor:IdType, e:LEnvironment) -> bool - { - match io { - LIoOp::Send{s} => s.src == actor, - LIoOp::Receive{r} => r.dst == actor, - LIoOp::TimeoutReceive{} => true, - LIoOp::ReadClock{t} => true, - } +pub open spec fn is_valid_lio_op( + io: LIoOp, + actor: IdType, + e: LEnvironment, +) -> bool { + match io { + LIoOp::Send { s } => s.src == actor, + LIoOp::Receive { r } => r.dst == actor, + LIoOp::TimeoutReceive { } => true, + LIoOp::ReadClock { t } => true, } +} - // These Ironfleet predicates go away, replaced by a requires-type check in NetClient receieve and - // send interaces. - // LIoOpOrderingOKForAction - // LIoOpSeqCompatibleWithReduction - - +// These Ironfleet predicates go away, replaced by a requires-type check in NetClient receieve and +// send interaces. +// LIoOpOrderingOKForAction +// LIoOpSeqCompatibleWithReduction - } // verus +} // verus! + // verus } mod hashmap_t { @@ -2534,216 +2927,241 @@ mod hashmap_t { verus! { - #[verifier(external_body)] - pub struct CKeyHashMap { - m: collections::HashMap>, - } +#[verifier(external_body)] +pub struct CKeyHashMap { + m: collections::HashMap>, +} - impl CKeyHashMap { - /// The abstract contents of the CKeyHashMap. - pub closed spec fn view(self) -> Map>; +impl CKeyHashMap { + /// The abstract contents of the CKeyHashMap. + pub closed spec fn view(self) -> Map>; - #[verifier(external_body)] - pub fn new() -> (out: CKeyHashMap) - ensures out@ == Map::>::empty() - { - CKeyHashMap { m: collections::HashMap::new() } - } + #[verifier(external_body)] + pub fn new() -> (out: CKeyHashMap) + ensures + out@ == Map::>::empty(), + { + CKeyHashMap { m: collections::HashMap::new() } + } - #[verifier::external_body] - pub fn len(&self) -> (l: usize) - ensures l as int == self@.len() - { - self.m.len() - } + #[verifier::external_body] + pub fn len(&self) -> (l: usize) + ensures + l as int == self@.len(), + { + self.m.len() + } - #[verifier(external_body)] - pub fn insert(&mut self, key: CKey, value: Vec) - ensures self@ == old(self)@.insert(key, value@) - { - //TODO(parno): think carefully of properties we must demand of Key for this ensures to be correct. - // (If Key has a nondeterministic hash, this ensures will be a lie.) - self.m.insert(key, value); - } + #[verifier(external_body)] + pub fn insert(&mut self, key: CKey, value: Vec) + ensures + self@ == old(self)@.insert(key, value@), + { + //TODO(parno): think carefully of properties we must demand of Key for this ensures to be correct. + // (If Key has a nondeterministic hash, this ensures will be a lie.) + self.m.insert(key, value); + } - #[verifier(external_body)] - pub fn remove(&mut self, key: &CKey) - ensures self@ == old(self)@.remove(*key) - { - panic!() - } + #[verifier(external_body)] + pub fn remove(&mut self, key: &CKey) + ensures + self@ == old(self)@.remove(*key), + { + panic!() + } - #[verifier(external_body)] - pub fn get(&self, key: &CKey) -> (value: Option<&Vec>) - ensures (match value { - Some(v) => self@.dom().contains(*key) && self@[*key] == v@, - None => !self@.dom().contains(*key), - }) - { - //TODO(parno): think carefully of properties we must demand of Key for this ensures to be correct. - // (If Key has a nondeterministic hash, this ensures will be a lie.) - match self.m.get(&key) { + #[verifier(external_body)] + pub fn get(&self, key: &CKey) -> (value: Option<&Vec>) + ensures + (match value { + Some(v) => self@.dom().contains(*key) && self@[*key] == v@, + None => !self@.dom().contains(*key), + }), + { + //TODO(parno): think carefully of properties we must demand of Key for this ensures to be correct. + // (If Key has a nondeterministic hash, this ensures will be a lie.) + match self.m.get(&key) { std::option::Option::Some(v) => Some(v), std::option::Option::None => None, - } } + } - #[verifier(external_body)] - pub fn bulk_update(&mut self, kr: &KeyRange::, other: &Self) - ensures self@ == Map::>::new( - |k: AbstractKey| (old(self)@.dom().contains(k) || other@.dom().contains(k)) - && (kr.contains(k) ==> other@.dom().contains(k)), - |k: AbstractKey| if other@.dom().contains(k) { other@[k] } else { old(self)@[k] } - ) - { - panic!() - } + #[verifier(external_body)] + pub fn bulk_update(&mut self, kr: &KeyRange::, other: &Self) + ensures + self@ == Map::>::new( + |k: AbstractKey| + (old(self)@.dom().contains(k) || other@.dom().contains(k)) && (kr.contains(k) + ==> other@.dom().contains(k)), + |k: AbstractKey| + if other@.dom().contains(k) { + other@[k] + } else { + old(self)@[k] + }, + ), + { + panic!() + } - #[verifier(external_body)] - pub fn bulk_remove(&mut self, kr: &KeyRange::) + #[verifier(external_body)] + pub fn bulk_remove(&mut self, kr: &KeyRange::) ensures self@ == Map::>::new( |k: AbstractKey| old(self)@.dom().contains(k) && !kr.contains(k), - |k: AbstractKey| old(self)@[k]) - { - panic!() - } + |k: AbstractKey| old(self)@[k], + ), + { + panic!() + } - pub closed spec fn spec_to_vec(&self) -> Vec; - pub closed spec fn spec_from_vec(v: Vec) -> Self; - #[verifier(external_body)] - #[verifier(when_used_as_spec(spec_to_vec))] - pub fn to_vec(&self) -> (res: Vec) - ensures res == self.spec_to_vec() - { - let mut v: std::vec::Vec<(u64, std::vec::Vec)> = - self.m.iter().map(|(k, v)| (k.ukey, v.clone())).collect(); - v.sort(); - v.into_iter() - .map(|(k, v)| CKeyKV { k: CKey { ukey: k }, v }) - .collect() - } - #[verifier(external_body)] - #[verifier(when_used_as_spec(spec_from_vec))] - pub fn from_vec(v: Vec) -> (res: Self) - ensures res == Self::spec_from_vec(v) - { - let mut res = CKeyHashMap::new(); - for CKeyKV { k, v } in v { - res.insert(k, v); - } - res + pub closed spec fn spec_to_vec(&self) -> Vec; + + pub closed spec fn spec_from_vec(v: Vec) -> Self; + + #[verifier(external_body)] + #[verifier(when_used_as_spec(spec_to_vec))] + pub fn to_vec(&self) -> (res: Vec) + ensures + res == self.spec_to_vec(), + { + let mut v: std::vec::Vec<(u64, std::vec::Vec)> = self.m.iter().map( + |(k, v)| (k.ukey, v.clone()), + ).collect(); + v.sort(); + v.into_iter().map(|(k, v)| CKeyKV { k: CKey { ukey: k }, v }).collect() + } + + #[verifier(external_body)] + #[verifier(when_used_as_spec(spec_from_vec))] + pub fn from_vec(v: Vec) -> (res: Self) + ensures + res == Self::spec_from_vec(v), + { + let mut res = CKeyHashMap::new(); + for CKeyKV { k, v } in v { + res.insert(k, v); } - #[verifier(external_body)] - #[verifier(broadcast_forall)] - pub proof fn lemma_to_vec(self) - ensures + res + } + + #[verifier(external_body)] + #[verifier(broadcast_forall)] + pub proof fn lemma_to_vec(self) + ensures #[trigger(self.spec_to_vec())] Self::spec_from_vec(self.spec_to_vec()) == self, self.spec_to_vec().len() == self@.dom().len(), spec_sorted_keys(self.spec_to_vec()), - (forall |i: int| - #![trigger(self.spec_to_vec()[i])] - 0 <= i < self.spec_to_vec().len() ==> { - let (k, v) = self.spec_to_vec()[i]@; - self@.contains_pair(k, v) - }); - #[verifier(external_body)] - pub proof fn lemma_to_vec_view(self, other: Self) - ensures - (self@ == other@ <==> self.spec_to_vec()@ == other.spec_to_vec()@) - && (self@ == other@ <==> ( - self.spec_to_vec().len() == other.spec_to_vec().len() && - forall |i: int| #![auto] 0 <= i < self.spec_to_vec().len() ==> - self.spec_to_vec()[i]@ == other.spec_to_vec()[i]@ - )); - #[verifier(external_body)] - #[verifier(broadcast_forall)] - pub proof fn lemma_from_vec(v: Vec) - ensures + (forall|i: int| + #![trigger(self.spec_to_vec()[i])] + 0 <= i < self.spec_to_vec().len() ==> { + let (k, v) = self.spec_to_vec()[i]@; + self@.contains_pair(k, v) + }), + ; + + #[verifier(external_body)] + pub proof fn lemma_to_vec_view(self, other: Self) + ensures + (self@ == other@ <==> self.spec_to_vec()@ == other.spec_to_vec()@) && (self@ == other@ + <==> (self.spec_to_vec().len() == other.spec_to_vec().len() && forall|i: int| + #![auto] + 0 <= i < self.spec_to_vec().len() ==> self.spec_to_vec()[i]@ + == other.spec_to_vec()[i]@)), + ; + + #[verifier(external_body)] + #[verifier(broadcast_forall)] + pub proof fn lemma_from_vec(v: Vec) + ensures #[trigger(Self::spec_from_vec(v))] - spec_sorted_keys(v) ==> Self::spec_from_vec(v).spec_to_vec() == v; + spec_sorted_keys(v) ==> Self::spec_from_vec(v).spec_to_vec() == v, + ; - #[verifier(external_body)] - pub fn clone_up_to_view(&self) -> (out: Self) + #[verifier(external_body)] + pub fn clone_up_to_view(&self) -> (out: Self) ensures - out@ == self@ - { - Self::from_vec(self.to_vec()) - } + out@ == self@, + { + Self::from_vec(self.to_vec()) + } - #[verifier(external_body)] - pub fn valid(&self) -> (b: bool) - ensures b == valid_hashtable(self@) - { - panic!() - } + #[verifier(external_body)] + pub fn valid(&self) -> (b: bool) + ensures + b == valid_hashtable(self@), + { + panic!() + } - pub open spec fn filter_spec(self, fs: FnSpec(CKey)->bool) -> Map> - { - Map::>::new( - |k: AbstractKey| self@.dom().contains(k) && fs(k), - |k: AbstractKey| self@[k] - ) - } + pub open spec fn filter_spec(self, fs: FnSpec(CKey) -> bool) -> Map> { + Map::>::new( + |k: AbstractKey| self@.dom().contains(k) && fs(k), + |k: AbstractKey| self@[k], + ) + } - // This thing should be a Verus library - pub open spec fn predicate_modelsbool>( - exec_fn: EF, spec_fn: FnSpec(T)->bool) -> bool - { - &&& forall |t| #![auto] exec_fn.requires((t,)) - &&& forall |t, b| exec_fn.ensures((t,), b) ==> spec_fn(t)==b - } + // This thing should be a Verus library + pub open spec fn predicate_models bool>( + exec_fn: EF, + spec_fn: FnSpec(T) -> bool, + ) -> bool { + &&& forall|t| #![auto] exec_fn.requires((t,)) + &&& forall|t, b| exec_fn.ensures((t,), b) ==> spec_fn(t) == b + } - #[verifier(external_body)] // iter is not supported - pub fn filterbool>(&self, f: F, fs: Ghostbool>) -> (res: Self) + #[verifier(external_body)] // iter is not supported + pub fn filter bool>(&self, f: F, fs: Ghost bool>) -> (res: Self) requires Self::predicate_models(f, fs@), ensures res@ == self.filter_spec(fs@), - { - let mut res = CKeyHashMap::new(); - let mut iter = self.m.iter(); - let cur: Option<(&CKey, &Vec)> = iter.next(); - while cur.is_some() { - let Some((key, val)) = cur else { panic!() /* covered by while condition */ }; - res.insert(key.clone(), val.clone()); - } - res + { + let mut res = CKeyHashMap::new(); + let mut iter = self.m.iter(); + let cur: Option<(&CKey, &Vec)> = iter.next(); + while cur.is_some() { + let Some((key, val)) = cur else { + panic!() /* covered by while condition */ + + }; + res.insert(key.clone(), val.clone()); } + res } +} - // pub struct KeyIterator - // { - // view: Set, - // iter: Keys>, - // } - - pub struct CKeyKV { - pub k: CKey, - pub v: Vec, - } +// pub struct KeyIterator +// { +// view: Set, +// iter: Keys>, +// } +pub struct CKeyKV { + pub k: CKey, + pub v: Vec, +} - impl CKeyKV { - pub open spec fn view(self) -> (AbstractKey, Seq) - { - (self.k, self.v@) - } +impl CKeyKV { + pub open spec fn view(self) -> (AbstractKey, Seq) { + (self.k, self.v@) } +} - pub open spec fn ckeykvlt(a: CKeyKV, b: CKeyKV) -> bool { - a.k.ukey < b.k.ukey - } +pub open spec fn ckeykvlt(a: CKeyKV, b: CKeyKV) -> bool { + a.k.ukey < b.k.ukey +} - pub open spec fn spec_sorted_keys(v: Vec) -> bool { - // ckeykvlt ensures that this forall does not create a trigger loop on - // v@[i].k.ukey, v@[i+1].k.ukey, ... - // - // we weren't able to fix this by making the whole < the trigger - forall |i: int, j: int| 0 <= i && i + 1 < v.len() && j == i+1 ==> #[trigger] ckeykvlt(v@[i], v@[j]) - } +pub open spec fn spec_sorted_keys(v: Vec) -> bool { + // ckeykvlt ensures that this forall does not create a trigger loop on + // v@[i].k.ukey, v@[i+1].k.ukey, ... + // + // we weren't able to fix this by making the whole < the trigger + forall|i: int, j: int| + 0 <= i && i + 1 < v.len() && j == i + 1 ==> #[trigger] ckeykvlt(v@[i], v@[j]) +} - } +} // verus! } mod host_impl_t { @@ -2778,131 +3196,133 @@ mod host_impl_t { verus! { - type Ios = Seq; +type Ios = Seq; +// Verus doesn't yet support associated types or trait type parameters, so we can't +// abstract the ConcreteConfiguration type as IronFleet does. Instead, our protocol +// init accepts the Args on the command line. +// +//pub trait ConcreteConfiguration { +// open spec fn init(&self) -> bool; +// +// open spec fn to_servers(&self) -> Set; +//} +pub struct EventResults { + // What netc actually observed: + pub recvs: Seq, + pub clocks: Seq, + pub sends: Seq, + /// What we were trying to make happen: + /// ios may claim an event that doesn't appear in event_seq() if, say, the netc socket broke on + /// send. We already received, so the only way we can refine is by claiming we finished the + /// corresponding send (in ios). In this case, the postcondition of next_ensures gives + /// us the out because !netc.ok allows ios!=event_seq(). + pub ios: Ios, +} - // Verus doesn't yet support associated types or trait type parameters, so we can't - // abstract the ConcreteConfiguration type as IronFleet does. Instead, our protocol - // init accepts the Args on the command line. - // - //pub trait ConcreteConfiguration { - // open spec fn init(&self) -> bool; - // - // open spec fn to_servers(&self) -> Set; - //} - - pub struct EventResults { - // What netc actually observed: - pub recvs: Seq, - pub clocks: Seq, - pub sends: Seq, - - /// What we were trying to make happen: - /// ios may claim an event that doesn't appear in event_seq() if, say, the netc socket broke on - /// send. We already received, so the only way we can refine is by claiming we finished the - /// corresponding send (in ios). In this case, the postcondition of next_ensures gives - /// us the out because !netc.ok allows ios!=event_seq(). - pub ios: Ios, +impl EventResults { + pub open spec fn event_seq(self) -> Seq { + self.recvs + self.clocks + self.sends } - impl EventResults { - pub open spec fn event_seq(self) -> Seq { - self.recvs + self.clocks + self.sends - } - - pub open spec fn well_typed_events(self) -> bool { - &&& forall |i| 0 <= i < self.recvs.len() ==> self.recvs[i].is_Receive() - &&& forall |i| 0 <= i < self.clocks.len() ==> self.clocks[i].is_ReadClock() || self.clocks[i].is_TimeoutReceive() - &&& forall |i| 0 <= i < self.sends.len() ==> self.sends[i].is_Send() - &&& self.clocks.len() <= 1 - } + pub open spec fn well_typed_events(self) -> bool { + &&& forall|i| 0 <= i < self.recvs.len() ==> self.recvs[i].is_Receive() + &&& forall|i| + 0 <= i < self.clocks.len() ==> self.clocks[i].is_ReadClock() + || self.clocks[i].is_TimeoutReceive() + &&& forall|i| 0 <= i < self.sends.len() ==> self.sends[i].is_Send() + &&& self.clocks.len() <= 1 + } - pub open spec fn empty() -> Self { - EventResults{ - recvs: seq!(), - clocks: seq!(), - sends: seq!(), - ios: seq!(), - } - } + pub open spec fn empty() -> Self { + EventResults { recvs: seq!(), clocks: seq!(), sends: seq!(), ios: seq!() } } +} - /// Translates Common/Framework/Host.s - /// - /// This changes the way NetClient/HostEnvironment is managed slightly - instead - /// of giving the host a reference to the NetClient to hold on to in init (as in - /// Ironfleet), we only let the host borrow it in each call to next_impl and it - /// is owned by the main loop. - - // Obligations for the implementer's HostState implementation. - // We'd like to do this with a trait, so that the auditor could tell statically - // that this trusted file doesn't depend on any surprises from the verified file. - impl HostState { - pub open spec fn init_ensures(netc: &NetClient, args: Args, rc: Option) -> bool - { - match rc { - None => true, - Some(host_state) => { - &&& netc.ok() // port of env.ok.ok() - &&& host_state.invariants(&netc.my_end_point()) - &&& crate::host_protocol_t::init(host_state@, netc.my_end_point(), abstractify_args(args)) - } - } +/// Translates Common/Framework/Host.s +/// +/// This changes the way NetClient/HostEnvironment is managed slightly - instead +/// of giving the host a reference to the NetClient to hold on to in init (as in +/// Ironfleet), we only let the host borrow it in each call to next_impl and it +/// is owned by the main loop. +// Obligations for the implementer's HostState implementation. +// We'd like to do this with a trait, so that the auditor could tell statically +// that this trusted file doesn't depend on any surprises from the verified file. +impl HostState { + pub open spec fn init_ensures(netc: &NetClient, args: Args, rc: Option) -> bool { + match rc { + None => true, + Some(host_state) => { + &&& netc.ok() // port of env.ok.ok() + + &&& host_state.invariants(&netc.my_end_point()) + &&& crate::host_protocol_t::init( + host_state@, + netc.my_end_point(), + abstractify_args(args), + ) + }, } + } - /// No longer takes a netclient and environment; a netclient is loaned to - /// the HostState only for next_impl. - pub fn init_impl(netc: &NetClient, args: &Args) -> (rc: Option) + /// No longer takes a netclient and environment; a netclient is loaned to + /// the HostState only for next_impl. + pub fn init_impl(netc: &NetClient, args: &Args) -> (rc: Option) requires - netc.valid() - // IronFleet also gives us netc.IsOpen(), but it seems to be rotted, so we're ignoring it + netc.valid(), + // IronFleet also gives us netc.IsOpen(), but it seems to be rotted, so we're ignoring it + ensures Self::init_ensures(netc, *args, rc), - { - Self::real_init_impl(netc, args) - } + { + Self::real_init_impl(netc, args) + } // spec fn parse_command_line_configuration(args:Seq>) -> CC; - // spec fn concrete_config_init(cc: CC) -> bool; // spec fn concrete_config_to_servers(cc: CC) -> Set; + pub open spec fn next_requires(self, netc: NetClient) -> bool { + &&& self.invariants(&netc.my_end_point()) + &&& netc.state().is_Receiving() // new wrt ironfleet because we're encoding reduction rules in NetClient interface instead of by reading the history. - pub open spec fn next_requires(self, netc: NetClient) -> bool - { - &&& self.invariants(&netc.my_end_point()) - &&& netc.state().is_Receiving() // new wrt ironfleet because we're encoding reduction rules in NetClient interface instead of by reading the history. - } + } - pub open spec fn next_ensures(old_self: Self, old_netc: NetClient, new_self: Self, new_netc: NetClient, rc: (bool, Ghost)) -> bool + pub open spec fn next_ensures( + old_self: Self, + old_netc: NetClient, + new_self: Self, + new_netc: NetClient, + rc: (bool, Ghost), + ) -> bool { + let (ok, res) = rc; { - let (ok, res) = rc; { - &&& ok == new_netc.ok() - &&& ok ==> new_self.invariants(&new_netc.my_end_point()) - &&& ok ==> Self::next(old_self.view(), new_self.view(), res@.ios) - &&& ok ==> res@.event_seq() == res@.ios - &&& (ok || res@.sends.len()>0) ==> new_netc.history() == old_netc.history() + res@.event_seq() - &&& res@.well_typed_events() - } + &&& ok == new_netc.ok() + &&& ok ==> new_self.invariants(&new_netc.my_end_point()) + &&& ok ==> Self::next(old_self.view(), new_self.view(), res@.ios) + &&& ok ==> res@.event_seq() == res@.ios + &&& (ok || res@.sends.len() > 0) ==> new_netc.history() == old_netc.history() + + res@.event_seq() + &&& res@.well_typed_events() } + } - // This ports Impl/LiveSHT/Host.i::HostNextImpl, riiiiight? - pub fn next_impl(&mut self, netc: &mut NetClient) -> (rc: (bool, Ghost)) - requires - Self::next_requires(*old(self), *old(netc)), - ensures - Self::next_ensures(*old(self), *old(netc), *self, *netc, rc), - { - self.real_next_impl(netc) - } + // This ports Impl/LiveSHT/Host.i::HostNextImpl, riiiiight? + pub fn next_impl(&mut self, netc: &mut NetClient) -> (rc: (bool, Ghost)) + requires + Self::next_requires(*old(self), *old(netc)), + ensures + Self::next_ensures(*old(self), *old(netc), *self, *netc, rc), + { + self.real_next_impl(netc) + } - // this ports Protocol/SHT/Host.i.dfy ::Host_Next - pub open spec fn next(pre: AbstractHostState, post: AbstractHostState, ios: Ios) -> bool - { - host_protocol_t::next(pre, post, abstractify_raw_log_to_ios(ios)) - } + // this ports Protocol/SHT/Host.i.dfy ::Host_Next + pub open spec fn next(pre: AbstractHostState, post: AbstractHostState, ios: Ios) -> bool { + host_protocol_t::next(pre, post, abstractify_raw_log_to_ios(ios)) } +} - } // verus! +} // verus! } mod host_impl_v { @@ -2948,8 +3368,7 @@ mod host_impl_v { verus! { - - /* +/* This file ports this call stack from Ironfleet Distributed/Common/Framework::IronfleetMain.s (trusted) -> host_impl_t @@ -2961,606 +3380,693 @@ mod host_impl_v { Impl/SHT/HostModel::HostModelNextGetRequest */ +pub struct Constants { + pub root_identity: EndPoint, + pub host_ids: Vec, + pub params: Parameters, + pub me: EndPoint, +} - pub struct Constants { - pub root_identity: EndPoint, - pub host_ids: Vec, - pub params: Parameters, - pub me: EndPoint, - } - - impl Constants { - pub open spec fn view(self) -> AbstractConstants { - AbstractConstants{ - root_identity: self.root_identity@, - host_ids: abstractify_end_points(self.host_ids), - params: self.params@, - me: self.me@, - } - } - - pub closed spec fn abstractable(self) -> bool { - true - } - - pub closed spec fn valid(self) -> bool { - &&& self.params.valid() - &&& seq_is_unique(abstractify_end_points(self.host_ids)) - &&& self.root_identity@.valid_physical_address() +impl Constants { + pub open spec fn view(self) -> AbstractConstants { + AbstractConstants { + root_identity: self.root_identity@, + host_ids: abstractify_end_points(self.host_ids), + params: self.params@, + me: self.me@, } } - pub struct Parameters { - pub max_seqno: u64, - pub max_delegations: u64, + pub closed spec fn abstractable(self) -> bool { + true } - impl Parameters { - pub open spec fn view(self) -> AbstractParameters { - AbstractParameters{ - max_seqno: self.max_seqno as nat, - max_delegations: self.max_delegations as nat, - } - } + pub closed spec fn valid(self) -> bool { + &&& self.params.valid() + &&& seq_is_unique(abstractify_end_points(self.host_ids)) + &&& self.root_identity@.valid_physical_address() + } +} - // Translates Impl/SHT/Parameters::StaticParams - pub fn static_params() -> (out: Parameters) - ensures - out@ == AbstractParameters::static_params(), - { - Parameters { - max_seqno: 0xffff_ffff_ffff_ffff, - max_delegations: 0x7FFF_FFFF_FFFF_FFFF, - } - } +pub struct Parameters { + pub max_seqno: u64, + pub max_delegations: u64, +} - // Translates Impl/SHT/Parameters.i.dfy :: CParametersIsValid - pub open spec fn valid(self) -> bool { - &&& self.max_seqno == 0xffff_ffff_ffff_ffff - &&& 3 < self.max_delegations - &&& self.max_delegations < 0x8000_0000_0000_0000 +impl Parameters { + pub open spec fn view(self) -> AbstractParameters { + AbstractParameters { + max_seqno: self.max_seqno as nat, + max_delegations: self.max_delegations as nat, } } - // Translates Impl/LiveSHT/SchedulerModel.i.dfy :: AllIosAreSends - pub open spec fn all_ios_are_sends(ios: Seq) -> bool + // Translates Impl/SHT/Parameters::StaticParams + pub fn static_params() -> (out: Parameters) + ensures + out@ == AbstractParameters::static_params(), { - forall |i: int| 0 <= i && i < ios.len() ==> ios[i].is_Send() + Parameters { max_seqno: 0xffff_ffff_ffff_ffff, max_delegations: 0x7FFF_FFFF_FFFF_FFFF } } - // Translates Impl/SHT/PacketParsing.i.dfy :: AbstractifyCPacketToLSHTPacket - pub open spec fn abstractify_cpacket_to_lsht_packet(cp: CPacket) -> LSHTPacket - recommends cp.abstractable() - { - LPacket{ dst: cp.dst@, src: cp.src@, msg: cp.msg@ } + // Translates Impl/SHT/Parameters.i.dfy :: CParametersIsValid + pub open spec fn valid(self) -> bool { + &&& self.max_seqno == 0xffff_ffff_ffff_ffff + &&& 3 < self.max_delegations + &&& self.max_delegations < 0x8000_0000_0000_0000 } +} - // Translates Impl/LiveSHT/SchedulerModel.i.dfy :: MapSentPacketSeqToIos - pub open spec fn map_sent_packet_seq_to_ios(sent_packets: Seq) -> Seq - { - sent_packets.map_values(|sent_packet: CPacket| - LIoOp::>::Send { - s: abstractify_cpacket_to_lsht_packet(sent_packet) - }) - } +// Translates Impl/LiveSHT/SchedulerModel.i.dfy :: AllIosAreSends +pub open spec fn all_ios_are_sends(ios: Seq) -> bool { + forall|i: int| 0 <= i && i < ios.len() ==> ios[i].is_Send() +} - // Translates Impl/LiveSHT/NetSHT.i.dfy :: AbstractifyRawLogToIos - pub open spec fn abstractify_raw_log_to_ios(rawlog: Seq) -> Seq - { - rawlog.map_values(|evt: NetEvent| abstractify_net_event_to_lsht_io(evt)) - } +// Translates Impl/SHT/PacketParsing.i.dfy :: AbstractifyCPacketToLSHTPacket +pub open spec fn abstractify_cpacket_to_lsht_packet(cp: CPacket) -> LSHTPacket + recommends + cp.abstractable(), +{ + LPacket { dst: cp.dst@, src: cp.src@, msg: cp.msg@ } +} - // Translates Impl/LiveSHT/RawIoConsistentWithSpecIO - pub open spec fn raw_io_consistent_with_spec_io(rawlog: Seq, ios: Seq) -> bool - { - &&& net_event_log_is_abstractable(rawlog) - &&& abstractify_raw_log_to_ios(rawlog) == ios - } +// Translates Impl/LiveSHT/SchedulerModel.i.dfy :: MapSentPacketSeqToIos +pub open spec fn map_sent_packet_seq_to_ios(sent_packets: Seq) -> Seq { + sent_packets.map_values( + |sent_packet: CPacket| + LIoOp::>::Send { + s: abstractify_cpacket_to_lsht_packet(sent_packet), + }, + ) +} - pub fn make_empty_event_results() -> (res: Ghost) - ensures - res@.recvs == Seq::::empty(), - res@.clocks == Seq::::empty(), - res@.sends == Seq::::empty(), - res@.ios == Seq::::empty(), - extract_packets_from_abstract_ios(abstractify_raw_log_to_ios(res@.ios)) == Set::::empty(), - { - let ghost res = EventResults { - recvs: Seq::::empty(), - clocks: Seq::::empty(), - sends: Seq::::empty(), - ios: Seq::::empty(), - }; - proof { - assert_sets_equal!(extract_packets_from_abstract_ios(abstractify_raw_log_to_ios(res.ios)), +// Translates Impl/LiveSHT/NetSHT.i.dfy :: AbstractifyRawLogToIos +pub open spec fn abstractify_raw_log_to_ios(rawlog: Seq) -> Seq { + rawlog.map_values(|evt: NetEvent| abstractify_net_event_to_lsht_io(evt)) +} + +// Translates Impl/LiveSHT/RawIoConsistentWithSpecIO +pub open spec fn raw_io_consistent_with_spec_io(rawlog: Seq, ios: Seq) -> bool { + &&& net_event_log_is_abstractable(rawlog) + &&& abstractify_raw_log_to_ios(rawlog) == ios +} + +pub fn make_empty_event_results() -> (res: Ghost) + ensures + res@.recvs == Seq::::empty(), + res@.clocks == Seq::::empty(), + res@.sends == Seq::::empty(), + res@.ios == Seq::::empty(), + extract_packets_from_abstract_ios(abstractify_raw_log_to_ios(res@.ios)) == Set::< + Packet, + >::empty(), +{ + let ghost res = EventResults { + recvs: Seq::::empty(), + clocks: Seq::::empty(), + sends: Seq::::empty(), + ios: Seq::::empty(), + }; + proof { + assert_sets_equal!(extract_packets_from_abstract_ios(abstractify_raw_log_to_ios(res.ios)), Set::::empty()); - }; - Ghost(res) } + ; + Ghost(res) +} - pub fn make_send_only_event_results(net_events: Ghost>) -> (res: Ghost) - requires - forall |i: int| 0 <= i && i < net_events@.len() ==> net_events@[i].is_Send() - ensures - res@.recvs == Seq::::empty(), - res@.clocks == Seq::::empty(), - res@.sends == net_events@, - res@.ios == net_events@, - res@.event_seq() == net_events@, - res@.well_typed_events(), - { - let ghost res = EventResults { - recvs: Seq::::empty(), - clocks: Seq::::empty(), - sends: net_events@, - ios: net_events@, - }; - assert (forall |i| 0 <= i < res.recvs.len() ==> res.recvs[i].is_Receive()); - assert (forall |i| 0 <= i < res.clocks.len() ==> res.clocks[i].is_ReadClock() || res.clocks[i].is_TimeoutReceive()); - assert (forall |i| 0 <= i < res.sends.len() ==> res.sends[i].is_Send()); - assert (res.clocks.len() <= 1); - assert (res.well_typed_events()); - proof { assert_seqs_equal!(res.event_seq(), net_events@); }; - Ghost(res) - } - - pub struct HostState { - // Fields from Impl/LiveSHT/SchedulerImpl::SchedulerImpl - next_action_index: u64, - resend_count: u64, - - // Fields from Impl/SHT/HostState::HostState - constants: Constants, - delegation_map: DelegationMap, - h: CKeyHashMap, - sd: CSingleDelivery, - received_packet: Option, - num_delegations: u64, - received_requests: Ghost>, - } - - /// Translates Distributed/Impl/SHT/HostModel.i ExtractRange - fn extract_range_impl(h: &CKeyHashMap, kr: &KeyRange) -> (ext: CKeyHashMap) +pub fn make_send_only_event_results(net_events: Ghost>) -> (res: Ghost) requires - //h@.valid_key_range() // (See Distributed/Services/SHT/AppInterface.i.dfy: ValidKey() == true) - forall |k| h@.contains_key(k) ==> /*#[trigger] valid_key(k) &&*/ #[trigger] valid_value(h@[k]), + forall|i: int| 0 <= i && i < net_events@.len() ==> net_events@[i].is_Send(), + ensures + res@.recvs == Seq::::empty(), + res@.clocks == Seq::::empty(), + res@.sends == net_events@, + res@.ios == net_events@, + res@.event_seq() == net_events@, + res@.well_typed_events(), +{ + let ghost res = EventResults { + recvs: Seq::::empty(), + clocks: Seq::::empty(), + sends: net_events@, + ios: net_events@, + }; + assert(forall|i| 0 <= i < res.recvs.len() ==> res.recvs[i].is_Receive()); + assert(forall|i| + 0 <= i < res.clocks.len() ==> res.clocks[i].is_ReadClock() + || res.clocks[i].is_TimeoutReceive()); + assert(forall|i| 0 <= i < res.sends.len() ==> res.sends[i].is_Send()); + assert(res.clocks.len() <= 1); + assert(res.well_typed_events()); + proof { + assert_seqs_equal!(res.event_seq(), net_events@); + } + ; + Ghost(res) +} + +pub struct HostState { + // Fields from Impl/LiveSHT/SchedulerImpl::SchedulerImpl + next_action_index: u64, + resend_count: u64, + // Fields from Impl/SHT/HostState::HostState + constants: Constants, + delegation_map: DelegationMap, + h: CKeyHashMap, + sd: CSingleDelivery, + received_packet: Option, + num_delegations: u64, + received_requests: Ghost>, +} + +/// Translates Distributed/Impl/SHT/HostModel.i ExtractRange +fn extract_range_impl(h: &CKeyHashMap, kr: &KeyRange) -> (ext: CKeyHashMap) + requires //h@.valid_key_range() +// (See Distributed/Services/SHT/AppInterface.i.dfy: ValidKey() == true) + + forall|k| + h@.contains_key(k) ==> /*#[trigger] valid_key(k) &&*/ + #[trigger] valid_value(h@[k]), ensures ext@ =~= extract_range(h@, *kr), - { - let exec_lambda = |key| -> (b: bool) ensures b == kr.contains(key) { kr.contains_exec(&key)}; +{ + let exec_lambda = |key| -> (b: bool) + ensures + b == kr.contains(key), + { kr.contains_exec(&key) }; + h.filter(exec_lambda, Ghost(|ak| kr.contains(ak))) +} - h.filter(exec_lambda, Ghost(|ak| kr.contains(ak))) +impl HostState { + // AbstractHostState is the protocol host state + pub closed spec fn view(self) -> AbstractHostState { + AbstractHostState { + constants: self.constants@, + delegation_map: AbstractDelegationMap(self.delegation_map@), + h: self.h@, + sd: self.sd@, + received_packet: match self.received_packet { + None => None, + Some(cpacket) => Some(cpacket@), + // TODO(tej): add map to Verus Option + // received_packet.map(|cpacket| cpacket@), + }, + num_delegations: self.num_delegations as int, + received_requests: self.received_requests@, + } } - impl HostState { - // AbstractHostState is the protocol host state - pub closed spec fn view(self) -> AbstractHostState - { - AbstractHostState{ - constants: self.constants@, - delegation_map: AbstractDelegationMap(self.delegation_map@), - h: self.h@, - sd: self.sd@, - received_packet: match self.received_packet { - None => None, - Some(cpacket) => Some(cpacket@), - // TODO(tej): add map to Verus Option - // received_packet.map(|cpacket| cpacket@), - }, - num_delegations: self.num_delegations as int, - received_requests: self.received_requests@, - } - } + pub closed spec fn abstractable(&self) -> bool { + self.constants.abstractable() + } - pub closed spec fn abstractable(&self) -> bool - { - self.constants.abstractable() - } + pub closed spec fn valid(&self) -> bool { + &&& self.abstractable() + &&& self.delegation_map.valid() + // TODO why no valid_key? - pub closed spec fn valid(&self) -> bool - { - &&& self.abstractable() - &&& self.delegation_map.valid() - // TODO why no valid_key? - &&& (forall |k| self.h@.dom().contains(k) ==> #[trigger] valid_value(self.h@[k])) - &&& self.sd.valid() - &&& match &self.received_packet { - Some(v) => v.abstractable() && v.msg.is_Message() && v.dst@ == self.constants.me@, - None => true, - } - &&& self.constants.valid() - &&& self.num_delegations < self.constants.params.max_delegations - // TODO why no delegation_map.lows + &&& (forall|k| self.h@.dom().contains(k) ==> #[trigger] valid_value(self.h@[k])) + &&& self.sd.valid() + &&& match &self.received_packet { + Some(v) => v.abstractable() && v.msg.is_Message() && v.dst@ == self.constants.me@, + None => true, } + &&& self.constants.valid() + &&& self.num_delegations + < self.constants.params.max_delegations + // TODO why no delegation_map.lows - /// Translates Impl/LiveSHT/Host.i.dfy :: HostStateInvariants - /// - /// Still many invariants missing; will be faulted in as proof is completed. - pub closed spec fn invariants(&self, netc_end_point: &AbstractEndPoint) -> bool - { - &&& self.next_action_index <3 - &&& self.delegation_map.valid() - &&& self@.constants.me == netc_end_point - &&& self.valid() - &&& self@.constants.me.abstractable() - &&& self.num_delegations < self.constants.params.max_delegations // why did we move this here? - &&& self.constants.params@ == AbstractParameters::static_params() - &&& self.resend_count < 100000000 - } + } - fn parse_end_point(arg: &Arg) -> (out: EndPoint) + /// Translates Impl/LiveSHT/Host.i.dfy :: HostStateInvariants + /// + /// Still many invariants missing; will be faulted in as proof is completed. + pub closed spec fn invariants(&self, netc_end_point: &AbstractEndPoint) -> bool { + &&& self.next_action_index < 3 + &&& self.delegation_map.valid() + &&& self@.constants.me == netc_end_point + &&& self.valid() + &&& self@.constants.me.abstractable() + &&& self.num_delegations + < self.constants.params.max_delegations // why did we move this here? + + &&& self.constants.params@ == AbstractParameters::static_params() + &&& self.resend_count < 100000000 + } + + fn parse_end_point(arg: &Arg) -> (out: EndPoint) ensures out@ == host_protocol_t::parse_arg_as_end_point(arg@), - { - EndPoint{id: clone_arg(arg)} - } + { + EndPoint { id: clone_arg(arg) } + } - // translates Impl/Common/CmdLineParser parse_end_points - fn parse_end_points(args: &Args) -> (out: Option>) + // translates Impl/Common/CmdLineParser parse_end_points + fn parse_end_points(args: &Args) -> (out: Option>) ensures match out { None => host_protocol_t::parse_args(abstractify_args(*args)).is_None(), Some(vec) => { &&& host_protocol_t::parse_args(abstractify_args(*args)).is_Some() - &&& abstractify_end_points(vec) == host_protocol_t::parse_args(abstractify_args(*args)).unwrap() + &&& abstractify_end_points(vec) == host_protocol_t::parse_args( + abstractify_args(*args), + ).unwrap() }, - } - { - let mut end_points: Vec = Vec::new(); - let mut i: usize = 0; - - while i = Vec::new(); + let mut i: usize = 0; + while i < args.len() invariant i <= args.len(), end_points.len() == i, - forall |j| #![auto] 0 <= j < i ==> parse_arg_as_end_point(abstractify_args(*args)[j]) == end_points@[j]@, - forall |j| #![auto] 0 <= j < i ==> end_points@[j]@.valid_physical_address(), - { - let end_point = Self::parse_end_point(&(*args)[i]); - if !end_point.valid_physical_address() { - assert(!unchecked_parse_args(abstractify_args(*args))[i as int].valid_physical_address()); // witness to !forall - return None; - } - end_points.push(end_point); - i = i + 1; - } - - proof { - assert_seqs_equal!(abstractify_end_points(end_points), unchecked_parse_args(abstractify_args(*args))); + forall|j| + #![auto] + 0 <= j < i ==> parse_arg_as_end_point(abstractify_args(*args)[j]) + == end_points@[j]@, + forall|j| #![auto] 0 <= j < i ==> end_points@[j]@.valid_physical_address(), + { + let end_point = Self::parse_end_point(&(*args)[i]); + if !end_point.valid_physical_address() { + assert(!unchecked_parse_args( + abstractify_args(*args), + )[i as int].valid_physical_address()); // witness to !forall + return None; } - Some(end_points) + end_points.push(end_point); + i = i + 1; } + proof { + assert_seqs_equal!(abstractify_end_points(end_points), unchecked_parse_args(abstractify_args(*args))); + } + Some(end_points) + } - //pub open spec fn parse_command_line_configuration_matches(args: &Args, me: EndPoint, rc: Option) - - // Not sure why it's okay that this is now entirely verified, not part of - // the trusted application spec. - fn parse_command_line_configuration(args: &Args, me: EndPoint) -> (rc: Option) - ensures ({ - let abstract_end_points = parse_args(abstractify_args(*args)); - match rc { - None => { - ||| abstract_end_points.is_None() - ||| abstract_end_points.unwrap().len()==0 - ||| !seq_is_unique(abstract_end_points.unwrap()) - }, - Some(c) => { - &&& abstract_end_points.is_some() - &&& abstract_end_points.unwrap().len() > 0 - &&& seq_is_unique(abstract_end_points.unwrap()) - &&& c@ == AbstractConstants{ - root_identity: abstract_end_points.unwrap()[0], - host_ids: abstract_end_points.unwrap(), - params: AbstractParameters::static_params(), - me: me@ } - }}}), - { - let end_points = Self::parse_end_points(args); - if matches!(end_points, None) { return None; } - - let abstract_end_points:Ghost>> = Ghost(parse_args(abstractify_args(*args))); - + //pub open spec fn parse_command_line_configuration_matches(args: &Args, me: EndPoint, rc: Option) + // Not sure why it's okay that this is now entirely verified, not part of + // the trusted application spec. + fn parse_command_line_configuration(args: &Args, me: EndPoint) -> (rc: Option) + ensures + ({ + let abstract_end_points = parse_args(abstractify_args(*args)); + match rc { + None => { + ||| abstract_end_points.is_None() + ||| abstract_end_points.unwrap().len() == 0 + ||| !seq_is_unique(abstract_end_points.unwrap()) + }, + Some(c) => { + &&& abstract_end_points.is_some() + &&& abstract_end_points.unwrap().len() > 0 + &&& seq_is_unique(abstract_end_points.unwrap()) + &&& c@ == AbstractConstants { + root_identity: abstract_end_points.unwrap()[0], + host_ids: abstract_end_points.unwrap(), + params: AbstractParameters::static_params(), + me: me@, + } + }, + } + }), + { + let end_points = Self::parse_end_points(args); + if matches!(end_points, None) { + return None; + } + let abstract_end_points: Ghost>> = Ghost( + parse_args(abstractify_args(*args)), + ); + assert(abstract_end_points@.is_some()); + let end_points: Vec = end_points.unwrap(); + if end_points.len() == 0 { + return None; + } + assert(abstract_end_points@.unwrap().len() > 0); + let unique = test_unique(&end_points); + if !unique { + return None; + } + let constants = Constants { + root_identity: end_points[0].clone_up_to_view(), + host_ids: end_points, + params: Parameters::static_params(), + me: me, + }; + proof { + assert(!(abstract_end_points@.is_None() || abstract_end_points@.unwrap().len() == 0)); assert(abstract_end_points@.is_some()); - - let end_points:Vec = end_points.unwrap(); - if end_points.len()==0 { return None; } - assert(abstract_end_points@.unwrap().len() > 0); - - let unique = test_unique(&end_points); - if !unique { - return None; - } - - let constants = Constants { - root_identity: end_points[0].clone_up_to_view(), - host_ids: end_points, - params: Parameters::static_params(), - me: me, - }; - - proof { - assert(!(abstract_end_points@.is_None() || abstract_end_points@.unwrap().len()==0)); - assert( abstract_end_points@.is_some() ); - assert( abstract_end_points@.unwrap().len() > 0 ); - assert( constants@.root_identity == abstract_end_points@.unwrap()[0] ); - assert( constants@.host_ids == abstract_end_points@.unwrap() ); - assert( constants@.params == AbstractParameters::static_params() ); - assert( constants@.me == me@ ); - - assert( constants@ == AbstractConstants{ - root_identity: abstract_end_points@.unwrap()[0], - host_ids: abstract_end_points@.unwrap(), - params: AbstractParameters::static_params(), - me: me@ } ); - - } - Some(constants) + assert(constants@.root_identity == abstract_end_points@.unwrap()[0]); + assert(constants@.host_ids == abstract_end_points@.unwrap()); + assert(constants@.params == AbstractParameters::static_params()); + assert(constants@.me == me@); + assert(constants@ == AbstractConstants { + root_identity: abstract_end_points@.unwrap()[0], + host_ids: abstract_end_points@.unwrap(), + params: AbstractParameters::static_params(), + me: me@, + }); } + Some(constants) + } - // translates Impl/LiveSHT/SchedulerImpl :: Host_Init_Impl - // and Impl/LiveSHT/Host :: - pub fn real_init_impl(netc: &NetClient, args: &Args) -> (rc: Option) + // translates Impl/LiveSHT/SchedulerImpl :: Host_Init_Impl + // and Impl/LiveSHT/Host :: + pub fn real_init_impl(netc: &NetClient, args: &Args) -> (rc: Option) requires netc.valid(), ensures Self::init_ensures(netc, *args, rc), - { - let me = netc.get_my_end_point(); - let constants = /*Self -- Verus unimpl*/ HostState::parse_command_line_configuration(args, me); - if matches!(constants, None) { return None; } - let constants = constants.unwrap(); - let spare_root = constants.root_identity.clone_up_to_view(); - let zero_key = SHTKey::zero(); //SHTKey{ukey: 0}; // for some reason we can't make this call inside the ::new method below - assert(SHTKey::zero_spec() == zero_key); - let host_state = HostState{ - next_action_index: 0, - resend_count: 0, - constants: constants, - delegation_map: DelegationMap::new(/*SHTKey::zero()*/ zero_key, spare_root), - h: CKeyHashMap::new(), - sd: CSingleDelivery::empty(), - received_packet: None, - num_delegations: 1, - received_requests: Ghost(Seq::::empty()), - }; - let rc = Some(host_state); - assert(netc.ok()); - assert(host_state.invariants(&netc.my_end_point())); // would pass some initial env state? - assert(host_state@.delegation_map == AbstractDelegationMap::init(constants.root_identity@)) by { - reveal(HostState::view); - assert_maps_equal!(host_state.delegation_map@, AbstractDelegationMap::init(constants.root_identity@)@); - assert(host_state.delegation_map@ == AbstractDelegationMap::init(constants.root_identity@)@); - } - assert(crate::host_protocol_t::init(host_state@, netc.my_end_point(), abstractify_args(*args))); - rc + { + let me = netc.get_my_end_point(); + let constants = /*Self -- Verus unimpl*/ + HostState::parse_command_line_configuration(args, me); + if matches!(constants, None) { + return None; } - - // Translates Impl/LiveSHT/SchedulerImpl.i.dfy :: DeliverPacketSeq - pub fn deliver_packet_seq(&self, netc: &mut NetClient, packets: &Vec) -> - (rc: (bool, Ghost>, Ghost>)) - requires - old(netc).ok(), - outbound_packet_seq_is_valid(packets@), - outbound_packet_seq_has_correct_srcs(packets@, old(netc).my_end_point()), - ensures - netc.my_end_point() == old(netc).my_end_point(), - ({ - let (ok, Ghost(net_events), Ghost(ios)) = rc; - { - &&& netc.ok() <==> ok - &&& ok ==> { - &&& all_ios_are_sends(ios) - &&& (forall |i: int| 0 <= i && i < net_events.len() ==> net_events[i].is_Send()) - &&& ios == map_sent_packet_seq_to_ios(packets@) - &&& abstractify_outbound_packets_to_seq_of_lsht_packets(packets@) == - extract_sent_packets_from_ios(ios) - &&& abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@) == - extract_packets_from_abstract_ios(ios) - &&& no_invalid_sends(ios) - &&& raw_io_consistent_with_spec_io(net_events, ios) - &&& only_sent_marshalable_data(net_events) - &&& netc.history() == old(netc).history() + net_events - } - } - }) - { - let (ok, events) = send_packet_seq(packets, netc); - if !ok { - (ok, Ghost(Seq::::empty()), Ghost(Seq::::empty())) - } - else { - let ghost ios: Seq = map_sent_packet_seq_to_ios(packets@); - proof { - assert (abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@) == - extract_packets_from_abstract_ios(ios)) by { - lemma_if_everything_in_seq_satisfies_filter_then_filter_is_identity(ios, |io: LSHTIo| io.is_Send()); - assert (ios.filter(|io: LSHTIo| io.is_Send()) == ios); - let set1 = abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@); - let set2 = extract_packets_from_abstract_ios(ios); - let seq1 = packets@.map_values(|cp: CPacket| cp@); - let seq2 = extract_sent_packets_from_ios(ios).map_values(|lp: LSHTPacket| extract_packet_from_lsht_packet(lp)); - assert (set1 == seq1.to_set()); - assert (set2 == seq2.to_set()); - assert forall |x| set1.contains(x) implies set2.contains(x) by { - let idx: int = choose |idx: int| 0 <= idx && idx < seq1.len() && #[trigger] seq1[idx] == x; - assert (seq2[idx] == x); - assert (set2.contains(x)); - }; - assert forall |x| set2.contains(x) implies set1.contains(x) by { - let idx: int = choose |idx: int| 0 <= idx && idx < seq2.len() && #[trigger] seq2[idx] == x; - assert (seq1[idx] == x); - assert (set1.contains(x)); - }; - assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@), - extract_packets_from_abstract_ios(ios)); - } - assert (abstractify_outbound_packets_to_seq_of_lsht_packets(packets@) == - extract_sent_packets_from_ios(ios)) by { - lemma_if_everything_in_seq_satisfies_filter_then_filter_is_identity(ios, |io: LSHTIo| io.is_Send()); - assert (ios.filter(|io: LSHTIo| io.is_Send()) == ios); - assert_seqs_equal!(abstractify_outbound_packets_to_seq_of_lsht_packets(packets@), - extract_sent_packets_from_ios(ios)); + let constants = constants.unwrap(); + let spare_root = constants.root_identity.clone_up_to_view(); + let zero_key = SHTKey::zero(); //SHTKey{ukey: 0}; // for some reason we can't make this call inside the ::new method below + assert(SHTKey::zero_spec() == zero_key); + let host_state = HostState { + next_action_index: 0, + resend_count: 0, + constants: constants, + delegation_map: DelegationMap::new( /*SHTKey::zero()*/ + zero_key, spare_root), + h: CKeyHashMap::new(), + sd: CSingleDelivery::empty(), + received_packet: None, + num_delegations: 1, + received_requests: Ghost(Seq::::empty()), + }; + let rc = Some(host_state); + assert(netc.ok()); + assert(host_state.invariants(&netc.my_end_point())); // would pass some initial env state? + assert(host_state@.delegation_map == AbstractDelegationMap::init(constants.root_identity@)) + by { + reveal(HostState::view); + assert_maps_equal!(host_state.delegation_map@, AbstractDelegationMap::init(constants.root_identity@)@); + assert(host_state.delegation_map@ == AbstractDelegationMap::init( + constants.root_identity@, + )@); + } + assert(crate::host_protocol_t::init( + host_state@, + netc.my_end_point(), + abstractify_args(*args), + )); + rc + } + + // Translates Impl/LiveSHT/SchedulerImpl.i.dfy :: DeliverPacketSeq + pub fn deliver_packet_seq(&self, netc: &mut NetClient, packets: &Vec) -> (rc: ( + bool, + Ghost>, + Ghost>, + )) + requires + old(netc).ok(), + outbound_packet_seq_is_valid(packets@), + outbound_packet_seq_has_correct_srcs(packets@, old(netc).my_end_point()), + ensures + netc.my_end_point() == old(netc).my_end_point(), + ({ + let (ok, Ghost(net_events), Ghost(ios)) = rc; + { + &&& netc.ok() <==> ok + &&& ok ==> { + &&& all_ios_are_sends(ios) + &&& (forall|i: int| + 0 <= i && i < net_events.len() ==> net_events[i].is_Send()) + &&& ios == map_sent_packet_seq_to_ios(packets@) + &&& abstractify_outbound_packets_to_seq_of_lsht_packets(packets@) + == extract_sent_packets_from_ios(ios) + &&& abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@) + == extract_packets_from_abstract_ios(ios) + &&& no_invalid_sends(ios) + &&& raw_io_consistent_with_spec_io(net_events, ios) + &&& only_sent_marshalable_data(net_events) + &&& netc.history() == old(netc).history() + net_events } - assert (abstractify_raw_log_to_ios(events@) == ios) by { - let aios = abstractify_raw_log_to_ios(events@); - assert forall |i: int| 0 <= i && i < ios.len() implies aios[i] == ios[i] by { - assert (send_log_entry_reflects_packet(events@[i], &packets[i])); - } - assert_seqs_equal!(aios, ios); - }; - assert forall |i| 0 <= i < ios.len() && #[trigger] ios[i].is_Send() implies !ios[i].get_Send_s().msg.is_InvalidMessage() by { - let msg = ios[i].get_Send_s().msg; - assert (msg == abstractify_cpacket_to_lsht_packet(packets[i]).msg); - assert (outbound_packet_is_valid(&packets[i])); + } + }), + { + let (ok, events) = send_packet_seq(packets, netc); + if !ok { + (ok, Ghost(Seq::::empty()), Ghost(Seq::::empty())) + } else { + let ghost ios: Seq = map_sent_packet_seq_to_ios(packets@); + proof { + assert(abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@) + == extract_packets_from_abstract_ios(ios)) by { + lemma_if_everything_in_seq_satisfies_filter_then_filter_is_identity( + ios, + |io: LSHTIo| io.is_Send(), + ); + assert(ios.filter(|io: LSHTIo| io.is_Send()) == ios); + let set1 = abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@); + let set2 = extract_packets_from_abstract_ios(ios); + let seq1 = packets@.map_values(|cp: CPacket| cp@); + let seq2 = extract_sent_packets_from_ios(ios).map_values( + |lp: LSHTPacket| extract_packet_from_lsht_packet(lp), + ); + assert(set1 == seq1.to_set()); + assert(set2 == seq2.to_set()); + assert forall|x| set1.contains(x) implies set2.contains(x) by { + let idx: int = choose|idx: int| + 0 <= idx && idx < seq1.len() && #[trigger] seq1[idx] == x; + assert(seq2[idx] == x); + assert(set2.contains(x)); }; - assert forall |i: int| 0 <= i && i < events@.len() implies events@[i].is_Send() by { - assert (send_log_entry_reflects_packet(events@[i], &packets[i])); + assert forall|x| set2.contains(x) implies set1.contains(x) by { + let idx: int = choose|idx: int| + 0 <= idx && idx < seq2.len() && #[trigger] seq2[idx] == x; + assert(seq1[idx] == x); + assert(set1.contains(x)); }; + assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@), + extract_packets_from_abstract_ios(ios)); + } + assert(abstractify_outbound_packets_to_seq_of_lsht_packets(packets@) + == extract_sent_packets_from_ios(ios)) by { + lemma_if_everything_in_seq_satisfies_filter_then_filter_is_identity( + ios, + |io: LSHTIo| io.is_Send(), + ); + assert(ios.filter(|io: LSHTIo| io.is_Send()) == ios); + assert_seqs_equal!(abstractify_outbound_packets_to_seq_of_lsht_packets(packets@), + extract_sent_packets_from_ios(ios)); } - (ok, events, Ghost(ios)) + assert(abstractify_raw_log_to_ios(events@) == ios) by { + let aios = abstractify_raw_log_to_ios(events@); + assert forall|i: int| 0 <= i && i < ios.len() implies aios[i] == ios[i] by { + assert(send_log_entry_reflects_packet(events@[i], &packets[i])); + } + assert_seqs_equal!(aios, ios); + }; + assert forall|i| + 0 <= i < ios.len() + && #[trigger] ios[i].is_Send() implies !ios[i].get_Send_s().msg.is_InvalidMessage() by { + let msg = ios[i].get_Send_s().msg; + assert(msg == abstractify_cpacket_to_lsht_packet(packets[i]).msg); + assert(outbound_packet_is_valid(&packets[i])); + }; + assert forall|i: int| 0 <= i && i < events@.len() implies events@[i].is_Send() by { + assert(send_log_entry_reflects_packet(events@[i], &packets[i])); + }; } + (ok, events, Ghost(ios)) } + } - // Translates Impl/LiveSHT/SchedulerImpl.i.dfy :: DeliverOutboundPackets - pub fn deliver_outbound_packets(&self, netc: &mut NetClient, packets: &Vec) -> - (rc: (bool, Ghost>, Ghost>)) - requires - old(netc).ok(), - outbound_packet_seq_is_valid(packets@), - outbound_packet_seq_has_correct_srcs(packets@, old(netc).my_end_point()), - ensures - netc.my_end_point() == old(netc).my_end_point(), - ({ - let (ok, Ghost(net_events), Ghost(ios)) = rc; - { - &&& netc.ok() <==> ok - &&& ok ==> { - &&& all_ios_are_sends(ios) - &&& (forall |i: int| 0 <= i && i < net_events.len() ==> net_events[i].is_Send()) - &&& ios == map_sent_packet_seq_to_ios(packets@) - &&& abstractify_outbound_packets_to_seq_of_lsht_packets(packets@) == - extract_sent_packets_from_ios(ios) - &&& abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@) == - extract_packets_from_abstract_ios(ios) - &&& no_invalid_sends(ios) - &&& raw_io_consistent_with_spec_io(net_events, ios) - &&& only_sent_marshalable_data(net_events) - &&& netc.history() == old(netc).history() + net_events - } + // Translates Impl/LiveSHT/SchedulerImpl.i.dfy :: DeliverOutboundPackets + pub fn deliver_outbound_packets(&self, netc: &mut NetClient, packets: &Vec) -> (rc: ( + bool, + Ghost>, + Ghost>, + )) + requires + old(netc).ok(), + outbound_packet_seq_is_valid(packets@), + outbound_packet_seq_has_correct_srcs(packets@, old(netc).my_end_point()), + ensures + netc.my_end_point() == old(netc).my_end_point(), + ({ + let (ok, Ghost(net_events), Ghost(ios)) = rc; + { + &&& netc.ok() <==> ok + &&& ok ==> { + &&& all_ios_are_sends(ios) + &&& (forall|i: int| + 0 <= i && i < net_events.len() ==> net_events[i].is_Send()) + &&& ios == map_sent_packet_seq_to_ios(packets@) + &&& abstractify_outbound_packets_to_seq_of_lsht_packets(packets@) + == extract_sent_packets_from_ios(ios) + &&& abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@) + == extract_packets_from_abstract_ios(ios) + &&& no_invalid_sends(ios) + &&& raw_io_consistent_with_spec_io(net_events, ios) + &&& only_sent_marshalable_data(net_events) + &&& netc.history() == old(netc).history() + net_events } - }) - { - self.deliver_packet_seq(netc, packets) - } + } + }), + { + self.deliver_packet_seq(netc, packets) + } - // Impl/LiveSHT/SchedulerImpl.i.dfy Host_ReceivePacket_Next - pub fn receive_packet_next(&mut self, netc: &mut NetClient) -> (rc: (bool, Ghost)) + // Impl/LiveSHT/SchedulerImpl.i.dfy Host_ReceivePacket_Next + pub fn receive_packet_next(&mut self, netc: &mut NetClient) -> (rc: (bool, Ghost)) requires Self::next_requires(*old(self), *old(netc)), ensures Self::next_ensures(*old(self), *old(netc), *self, *netc, rc), - { - let ghost old_self: HostState = *self; - let (rr, net_event) = receive_with_demarshal(netc, &self.constants.me); - match rr { - ReceiveResult::Fail{} => { - return (false, Ghost(EventResults{ recvs: seq![], clocks: seq![], sends: seq![], ios: seq![] })); - } - ReceiveResult::Timeout{} => { - let iop: NetEvent = LIoOp::TimeoutReceive{}; - let ghost res = EventResults{ recvs: seq![], clocks: seq![ iop ], sends: seq![], ios: seq![ iop ] }; - proof { - old_self.delegation_map.valid_implies_complete(); - assert (next_step(old_self@, self@, abstractify_raw_log_to_ios(res.ios), - Step::ReceivePacket)); - } - return (true, Ghost(res)); // iop should also appear as a clock? + { + let ghost old_self: HostState = *self; + let (rr, net_event) = receive_with_demarshal(netc, &self.constants.me); + match rr { + ReceiveResult::Fail { } => { + return ( + false, + Ghost( + EventResults { recvs: seq![], clocks: seq![], sends: seq![], ios: seq![] }, + ), + ); + }, + ReceiveResult::Timeout { } => { + let iop: NetEvent = LIoOp::TimeoutReceive { }; + let ghost res = EventResults { + recvs: seq![], + clocks: seq![ iop ], + sends: seq![], + ios: seq![ iop ], + }; + proof { + old_self.delegation_map.valid_implies_complete(); + assert(next_step( + old_self@, + self@, + abstractify_raw_log_to_ios(res.ios), + Step::ReceivePacket, + )); } - ReceiveResult::Packet{ cpacket } => { - match cpacket.msg { - CSingleMessage::InvalidMessage { } => { - let ghost res = EventResults{ recvs: seq![ net_event@ ], clocks: seq![], sends: seq![], - ios: seq![ net_event@ ] }; - proof { - old_self.delegation_map.valid_implies_complete(); - let ios = abstractify_raw_log_to_ios(res.ios); - let r = ios[0].get_Receive_r(); - let pkt = Packet{dst: r.dst, src: r.src, msg: r.msg}; - let sent_packets = extract_packets_from_abstract_ios(ios); - lemma_if_nothing_in_seq_satisfies_filter_then_filter_result_is_empty( - ios, |io: LSHTIo| io.is_Send()); - assert(extract_sent_packets_from_ios(ios) =~= Seq::::empty()); - assert(sent_packets =~= Set::::empty()); - workaround_dermarshal_not_invertible(); - assert(host_protocol_t::receive_packet(old(self)@, self@, pkt, sent_packets, arbitrary())); - assert(receive_packet_wrapper(old(self)@, self@, pkt, sent_packets)); - assert(receive_packet_without_reading_clock(old(self)@, self@, - abstractify_raw_log_to_ios(res.ios))); - assert(host_protocol_t::next_step(old(self)@, self@, abstractify_raw_log_to_ios(res.ios), - Step::ReceivePacket)); - } - return (true, Ghost(res)); + return (true, Ghost(res)); // iop should also appear as a clock? + }, + ReceiveResult::Packet { cpacket } => { + match cpacket.msg { + CSingleMessage::InvalidMessage { } => { + let ghost res = EventResults { + recvs: seq![ net_event@ ], + clocks: seq![], + sends: seq![], + ios: seq![ net_event@ ], + }; + proof { + old_self.delegation_map.valid_implies_complete(); + let ios = abstractify_raw_log_to_ios(res.ios); + let r = ios[0].get_Receive_r(); + let pkt = Packet { dst: r.dst, src: r.src, msg: r.msg }; + let sent_packets = extract_packets_from_abstract_ios(ios); + lemma_if_nothing_in_seq_satisfies_filter_then_filter_result_is_empty( + ios, + |io: LSHTIo| io.is_Send(), + ); + assert(extract_sent_packets_from_ios(ios) =~= Seq::< + LSHTPacket, + >::empty()); + assert(sent_packets =~= Set::::empty()); + workaround_dermarshal_not_invertible(); + assert(host_protocol_t::receive_packet( + old(self)@, + self@, + pkt, + sent_packets, + arbitrary(), + )); + assert(receive_packet_wrapper(old(self)@, self@, pkt, sent_packets)); + assert(receive_packet_without_reading_clock( + old(self)@, + self@, + abstractify_raw_log_to_ios(res.ios), + )); + assert(host_protocol_t::next_step( + old(self)@, + self@, + abstractify_raw_log_to_ios(res.ios), + Step::ReceivePacket, + )); } - _ => { - assert( *old(self) == *self ); - let ghost mid_netc = *netc; - let (ok, Ghost(event_results), Ghost(ios)) = self.host_next_receive_packet(netc, Ghost(old(netc).history()), cpacket, Ghost(net_event@)); - - if !ok { - return (ok, Ghost(event_results)); - } - - let rc = (ok, Ghost(event_results)); - assert(self.invariants(&netc.my_end_point())); - proof { - old(self).delegation_map.valid_implies_complete(); - } - assert(host_protocol_t::next_step(old(self)@, self@, ios, Step::ReceivePacket)); - assert(Self::next(old(self)@, self@, event_results.ios)); - rc + return (true, Ghost(res)); + }, + _ => { + assert(*old(self) == *self); + let ghost mid_netc = *netc; + let (ok, Ghost(event_results), Ghost(ios)) = self.host_next_receive_packet( + netc, + Ghost(old(netc).history()), + cpacket, + Ghost(net_event@), + ); + if !ok { + return (ok, Ghost(event_results)); } - } + let rc = (ok, Ghost(event_results)); + assert(self.invariants(&netc.my_end_point())); + proof { + old(self).delegation_map.valid_implies_complete(); + } + assert(host_protocol_t::next_step( + old(self)@, + self@, + ios, + Step::ReceivePacket, + )); + assert(Self::next(old(self)@, self@, event_results.ios)); + rc + }, } - } + }, } + } - // Jon and Jay L found a bug in the Ironfleet spec, in Host.s.dfy. It says: - // ``` - // ensures (ok || |sends| > 0) ==> env.net.history() == old(env.net.history()) + (recvs + clocks + sends) - // `` - // but this isn't strong enough. Indeed, in Dafny we were able to unwittingly - // ``trick'' it by just setting the sends to empty. What it should say is that - // if ok was false, then the env history reflects a prefix of the receives, - // clocks, and sends we intended to perform, and the HostNext holds on the - // full list of receives, clocks, and sends we intended to perform. - // - // So here we have to trick the spec in the same way that the Ironfleet Dafny - // code did. - proof fn empty_event_results() -> (event_results: EventResults) + // Jon and Jay L found a bug in the Ironfleet spec, in Host.s.dfy. It says: + // ``` + // ensures (ok || |sends| > 0) ==> env.net.history() == old(env.net.history()) + (recvs + clocks + sends) + // `` + // but this isn't strong enough. Indeed, in Dafny we were able to unwittingly + // ``trick'' it by just setting the sends to empty. What it should say is that + // if ok was false, then the env history reflects a prefix of the receives, + // clocks, and sends we intended to perform, and the HostNext holds on the + // full list of receives, clocks, and sends we intended to perform. + // + // So here we have to trick the spec in the same way that the Ironfleet Dafny + // code did. + proof fn empty_event_results() -> (event_results: EventResults) ensures event_results.well_typed_events(), event_results.ios == event_results.event_seq(), event_results.ios == Seq::::empty(), - { - EventResults{ - recvs: Seq::::empty(), - clocks: Seq::::empty(), - sends: Seq::::empty(), - ios: Seq::::empty(), - } + { + EventResults { + recvs: Seq::::empty(), + clocks: Seq::::empty(), + sends: Seq::::empty(), + ios: Seq::::empty(), } + } - /// Impl/LiveSHT/SchedulerImpl.i.dfy HostNextReceivePacket - /// Here we've replaced the Dafny input parameter `rr` with `cpacket`, which represents `rr.cpacket`. - #[verifier(spinoff_prover)] // suddenly this is taking a long time due to an unrelated change elsewhere - fn host_next_receive_packet( - &mut self, - netc: &mut NetClient, - Ghost(old_netc_history): Ghost>, - cpacket: CPacket, - Ghost(receive_event): Ghost - ) -> (rc: (bool, Ghost, Ghost>)) + /// Impl/LiveSHT/SchedulerImpl.i.dfy HostNextReceivePacket + /// Here we've replaced the Dafny input parameter `rr` with `cpacket`, which represents `rr.cpacket`. + #[verifier(spinoff_prover)] // suddenly this is taking a long time due to an unrelated change elsewhere + fn host_next_receive_packet( + &mut self, + netc: &mut NetClient, + Ghost(old_netc_history): Ghost>, + cpacket: CPacket, + Ghost(receive_event): Ghost, + ) -> (rc: (bool, Ghost, Ghost>)) requires old(netc).ok(), old(self).invariants(&old(netc).my_end_point()), @@ -3571,1162 +4077,1452 @@ mod host_impl_v { cpacket.src@.valid_physical_address(), old(netc).history() == old_netc_history.push(receive_event), receive_event is Receive, - abstractify_cpacket_to_lsht_packet(cpacket) == abstractify_net_packet_to_lsht_packet(receive_event.get_Receive_r()), - ensures ({ - let (ok, Ghost(event_results), Ghost(ios)) = rc; - &&& self.invariants(&netc.my_end_point()) - &&& self@.constants == old(self)@.constants - &&& ok == netc.ok() - // Because all `net_events` are sends, the condition "even if ok is false, if we sent at least one - // packet..." is implied by "even if ok is false, if `net_events` has length > 0...". - &&& (ok || event_results.sends.len() > 0) ==> netc.history() == old_netc_history + event_results.ios - // There's supposed to be a distinction between the ios that we intended to do and the - // event_seq that we actually did. (See EventResult definition.) But in the interest of - // mimicking Dafny Ironfleet, we make no such distinction. - &&& event_results.ios == event_results.event_seq() - &&& event_results.well_typed_events() - &&& ok ==> { - &&& host_protocol_t::receive_packet_next(old(self)@, self@, ios) - &&& raw_io_consistent_with_spec_io(event_results.ios, ios) - &&& no_invalid_sends(ios) + abstractify_cpacket_to_lsht_packet(cpacket) == abstractify_net_packet_to_lsht_packet( + receive_event.get_Receive_r(), + ), + ensures + ({ + let (ok, Ghost(event_results), Ghost(ios)) = rc; + &&& self.invariants(&netc.my_end_point()) + &&& self@.constants == old(self)@.constants + &&& ok + == netc.ok() + // Because all `net_events` are sends, the condition "even if ok is false, if we sent at least one + // packet..." is implied by "even if ok is false, if `net_events` has length > 0...". + + &&& (ok || event_results.sends.len() > 0) ==> netc.history() == old_netc_history + + event_results.ios + // There's supposed to be a distinction between the ios that we intended to do and the + // event_seq that we actually did. (See EventResult definition.) But in the interest of + // mimicking Dafny Ironfleet, we make no such distinction. + + &&& event_results.ios == event_results.event_seq() + &&& event_results.well_typed_events() + &&& ok ==> { + &&& host_protocol_t::receive_packet_next(old(self)@, self@, ios) + &&& raw_io_consistent_with_spec_io(event_results.ios, ios) + &&& no_invalid_sends(ios) } }), - { - let (sent_packets, ack) = self.host_model_receive_packet(cpacket); - let ghost io0 = LIoOp::Receive{ r: abstractify_cpacket_to_lsht_packet(cpacket) }; - let ghost ios_head = seq![io0]; - proof { - let sent_packets_v = abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@); - let mapped = sent_packets@.map_values(|cp: CPacket| cp@); - let setted = mapped.to_set(); - if 0 < sent_packets_v.len() { - - // TODO(andrea): wow, .map and .to_set are pretty poorly automated. :vP - assert forall |i| #![auto] 0 <= i < sent_packets@.len() - implies sent_packets@[i].src@ == netc.my_end_point() by { - let cpacket = sent_packets@[i]; - assert( mapped[i] == cpacket@ ); // witness - assert( setted.contains(cpacket@) ); // trigger - } - } else { - assert( 0 == mapped.len() ) by { - if 0 < mapped.len() { - assert( setted.contains(mapped[0]) ); // witness - } - } + { + let (sent_packets, ack) = self.host_model_receive_packet(cpacket); + let ghost io0 = LIoOp::Receive { r: abstractify_cpacket_to_lsht_packet(cpacket) }; + let ghost ios_head = seq![io0]; + proof { + let sent_packets_v = abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@); + let mapped = sent_packets@.map_values(|cp: CPacket| cp@); + let setted = mapped.to_set(); + if 0 < sent_packets_v.len() { + // TODO(andrea): wow, .map and .to_set are pretty poorly automated. :vP + assert forall|i| #![auto] 0 <= i < sent_packets@.len() implies sent_packets@[i].src@ + == netc.my_end_point() by { + let cpacket = sent_packets@[i]; + assert(mapped[i] == cpacket@); // witness + assert(setted.contains(cpacket@)); // trigger } - assert( outbound_packet_seq_has_correct_srcs(sent_packets@, netc.my_end_point()) ); - } - assert(netc.history() == old(netc).history()); - let rc = self.deliver_outbound_packets(netc, &sent_packets); - - let (ok, Ghost(net_events), Ghost(ios_tail)) = rc; - assert( ok == netc.ok() ); - if !ok { - proof { - self.delegation_map.valid_implies_complete(); // sorry, valid is opaque now + } else { + assert(0 == mapped.len()) by { + if 0 < mapped.len() { + assert(setted.contains(mapped[0])); // witness + } } - let ghost event_results = Self::empty_event_results(); - return (false, Ghost(event_results), Ghost(Seq::::empty())); } - - let ghost ios = ios_head + ios_tail; - proof { - old(self).delegation_map.valid_implies_complete(); // sorry, valid is opaque now - } - assert(ios_tail =~= ios.skip(1)); - proof { - lemma_filter_skip_rejected(ios, |io: LSHTIo| io.is_Send(), 1); - } - assert(receive_packet(old(self)@, self@, cpacket@, extract_packets_from_abstract_ios(ios), ack@@)); // trigger - - let ghost event_results = EventResults{ - recvs: seq![receive_event], - clocks: seq![], - sends: net_events, - ios: seq![receive_event] + net_events, - }; - assert(abstractify_raw_log_to_ios(event_results.ios) =~= ios); // extensional equality - + assert(outbound_packet_seq_has_correct_srcs(sent_packets@, netc.my_end_point())); + } + assert(netc.history() == old(netc).history()); + let rc = self.deliver_outbound_packets(netc, &sent_packets); + let (ok, Ghost(net_events), Ghost(ios_tail)) = rc; + assert(ok == netc.ok()); + if !ok { proof { - self.delegation_map.valid_implies_complete(); // sorry, valid is opaque now - assert( netc.history() =~= old_netc_history + event_results.ios ); + self.delegation_map.valid_implies_complete(); // sorry, valid is opaque now } - (ok, Ghost(event_results), Ghost(ios)) + let ghost event_results = Self::empty_event_results(); + return (false, Ghost(event_results), Ghost(Seq::::empty())); + } + let ghost ios = ios_head + ios_tail; + proof { + old(self).delegation_map.valid_implies_complete(); // sorry, valid is opaque now + } + assert(ios_tail =~= ios.skip(1)); + proof { + lemma_filter_skip_rejected(ios, |io: LSHTIo| io.is_Send(), 1); + } + assert(receive_packet( + old(self)@, + self@, + cpacket@, + extract_packets_from_abstract_ios(ios), + ack@@, + )); // trigger + let ghost event_results = EventResults { + recvs: seq![receive_event], + clocks: seq![], + sends: net_events, + ios: seq![receive_event] + net_events, + }; + assert(abstractify_raw_log_to_ios(event_results.ios) =~= ios); // extensional equality + proof { + self.delegation_map.valid_implies_complete(); // sorry, valid is opaque now + assert(netc.history() =~= old_netc_history + event_results.ios); } + (ok, Ghost(event_results), Ghost(ios)) + } - /// Impl/SHT/HostModel.i HostModelReceivePacket - /// - /// In Dafny, ack (rc.1) isn't an Option, it is an InvalidPacket that didn't have any ensures - /// obligations. That is confusing (surprising) to read, but changing it to an Option would - /// entail making the corresponding change in the host_protocol so that abstraction stays - /// parallel. That's too big of a change; we're going to stay true to the original lyrics. - fn host_model_receive_packet(&mut self, cpacket: CPacket) -> (rc: (Vec, Ghost)) + /// Impl/SHT/HostModel.i HostModelReceivePacket + /// + /// In Dafny, ack (rc.1) isn't an Option, it is an InvalidPacket that didn't have any ensures + /// obligations. That is confusing (surprising) to read, but changing it to an Option would + /// entail making the corresponding change in the host_protocol so that abstraction stays + /// parallel. That's too big of a change; we're going to stay true to the original lyrics. + fn host_model_receive_packet(&mut self, cpacket: CPacket) -> (rc: ( + Vec, + Ghost, + )) requires old(self).valid(), old(self).host_state_packet_preconditions(cpacket), !(cpacket.msg is InvalidMessage), cpacket.dst@ == old(self).constants.me@, - ensures ({ - let (sent_packets, ack) = rc; - &&& outbound_packet_seq_is_valid(sent_packets@) - &&& receive_packet(old(self)@, self@, cpacket@, abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), ack@@) - // The Dafny Ironfleet "common preconditions" take an explicit cpacket, but we need to talk - // about - &&& self.host_state_common_postconditions(*old(self), cpacket, sent_packets@) - }) - { - let mut sent_packets = Vec::new(); - - if self.received_packet.is_none() { - let recv_rr = self.sd.receive_impl(&cpacket); - - if matches!(recv_rr, ReceiveImplResult::AckOrInvalid) { - let ghost g_ack: CPacket = arbitrary(); - proof { - assert( !Set::::empty().contains(g_ack@) ); // trigger - assert( - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@) =~= - extract_packets_from_lsht_packets(abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@)) ); - - // assert( self.host_state_common_postconditions(*old(self), cpacket, sent_packets@) ); - // assert( receive_packet(old(self)@, self@, cpacket@, abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), g_ack@) ); - } - (sent_packets, Ghost(g_ack)) - } else { - match recv_rr { - ReceiveImplResult::FreshPacket{ack} => { - sent_packets.push(ack); - self.received_packet = Some(cpacket); - } - ReceiveImplResult::DuplicatePacket{ack} => { - sent_packets.push(ack); - } - _ => { unreached() } - }; - let ghost g_ack = recv_rr.get_ack(); - - proof { - lemma_map_values_singleton_auto::(); - lemma_to_set_singleton_auto::(); - let abs_seq_lsht = abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@); - let ext_seq = abs_seq_lsht.map_values(|lp: LSHTPacket| extract_packet_from_lsht_packet(lp)); - assert( ext_seq =~= seq![g_ack@] ); // trigger auto lemmas - // assert( receive_packet(old(self)@, self@, cpacket@, abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), g_ack@) ); - } - (sent_packets, Ghost(g_ack)) + ensures + ({ + let (sent_packets, ack) = rc; + &&& outbound_packet_seq_is_valid(sent_packets@) + &&& receive_packet( + old(self)@, + self@, + cpacket@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + ack@@, + ) + // The Dafny Ironfleet "common preconditions" take an explicit cpacket, but we need to talk + // about + + &&& self.host_state_common_postconditions(*old(self), cpacket, sent_packets@) + }), + { + let mut sent_packets = Vec::new(); + if self.received_packet.is_none() { + let recv_rr = self.sd.receive_impl(&cpacket); + if matches!(recv_rr, ReceiveImplResult::AckOrInvalid) { + let ghost g_ack: CPacket = arbitrary(); + proof { + assert(!Set::::empty().contains(g_ack@)); // trigger + assert(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@) + =~= extract_packets_from_lsht_packets( + abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@), + )); + // assert( self.host_state_common_postconditions(*old(self), cpacket, sent_packets@) ); + // assert( receive_packet(old(self)@, self@, cpacket@, abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), g_ack@) ); } + (sent_packets, Ghost(g_ack)) } else { - let ack = Ghost(cpacket); // NB cpacket is a garbage value, since rc.0 vec is empty + match recv_rr { + ReceiveImplResult::FreshPacket { ack } => { + sent_packets.push(ack); + self.received_packet = Some(cpacket); + }, + ReceiveImplResult::DuplicatePacket { ack } => { + sent_packets.push(ack); + }, + _ => { unreached() }, + }; + let ghost g_ack = recv_rr.get_ack(); proof { - assert( - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@) =~= - extract_packets_from_lsht_packets(abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@)) ); - - assert( abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@) =~= Set::empty() ); - // assert( receive_packet(old(self)@, self@, cpacket@, abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), ack@@) ); + lemma_map_values_singleton_auto::(); + lemma_to_set_singleton_auto::(); + let abs_seq_lsht = abstractify_outbound_packets_to_seq_of_lsht_packets( + sent_packets@, + ); + let ext_seq = abs_seq_lsht.map_values( + |lp: LSHTPacket| extract_packet_from_lsht_packet(lp), + ); + assert(ext_seq =~= seq![g_ack@]); // trigger auto lemmas + // assert( receive_packet(old(self)@, self@, cpacket@, abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), g_ack@) ); } - (sent_packets, ack) + (sent_packets, Ghost(g_ack)) } - } - - // Implements Impl/SHT/HostModel.i.dfy :: ShouldProcessReceivedMessageImpl - fn should_process_received_message_impl(&self) -> (b: bool) - requires self.num_delegations < self.constants.params.max_delegations // part of invariants() - ensures b == should_process_received_message(self@) - { - match &self.received_packet { - Some(v) => { - match &v.msg { - CSingleMessage::Message{seqno: _seqno, dst: _dst, m: cm} => { - match cm { - CMessage::Delegate{..} | CMessage::Shard{..} => { - // We can't just compare self.num_delegations < - // self.constants.params.max_delegations - 2 because the - // latter quantity might underflow. So we do the following, - // which is equivalent but can't overflow or underflow because - // self.num_delegations < self.constants.params.max_delegations. - self.num_delegations + 1 < self.constants.params.max_delegations - 1 - }, - _ => true, - } - }, - _ => false, - } - }, - None => false, + } else { + let ack = Ghost(cpacket); // NB cpacket is a garbage value, since rc.0 vec is empty + proof { + assert(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@) + =~= extract_packets_from_lsht_packets( + abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@), + )); + assert(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@) + =~= Set::empty()); + // assert( receive_packet(old(self)@, self@, cpacket@, abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), ack@@) ); } + (sent_packets, ack) } + } - // Translates Impl/SHT/HostModel.i.dfy :: HostIgnoringUnParseable - pub closed spec fn host_ignoring_unparseable( - pre: AbstractHostState, - post: AbstractHostState, - packets: Set - ) -> bool - { - &&& packets.len() == 0 - &&& post == AbstractHostState{received_packet: None, ..pre} - &&& pre.received_packet.is_Some() - &&& pre.received_packet.get_Some_0().msg.is_Message() - &&& match pre.received_packet.get_Some_0().msg.get_Message_m() { - Message::Delegate{range: range, h: h} => !({ - // no need to check for valid_key_range(range) - &&& valid_hashtable(h) - &&& !range.is_empty() - &&& pre.received_packet.get_Some_0().msg.get_Message_dst().valid_physical_address() - }), - _ => false, - } - } - - // Forked from host_state_common_preconditions because Verus can't pass a copy - // of cpacket around willy-nilly as Dafny Ironfleet does. - pub closed spec fn host_state_packet_preconditions(&self, cpacket: CPacket) -> bool - { - &&& self.abstractable() - &&& cpacket.abstractable() - &&& self.valid() - &&& cpacket.src@.valid_physical_address() - &&& self.constants.params@ == AbstractParameters::static_params() - &&& self.resend_count < 100000000 - } - - // Translates Impl/SHT/HostState.i.dfy :: HostState_common_preconditions - // These are now only "common" to the processing methods, not the receive a fresh packet and - // record it method. - pub closed spec fn host_state_common_preconditions(&self) -> bool - { - match self.received_packet { - Some(cpacket) => self.host_state_packet_preconditions(cpacket), - None => false, - } - } + // Implements Impl/SHT/HostModel.i.dfy :: ShouldProcessReceivedMessageImpl + fn should_process_received_message_impl(&self) -> (b: bool) + requires + self.num_delegations + < self.constants.params.max_delegations, // part of invariants() - // Translates Impl/SHT/HostState.i.dfy :: NextGetRequestPreconditions - pub closed spec fn next_get_request_preconditions(&self) -> bool - { - &&& self.abstractable() - &&& self.received_packet.is_Some() - &&& { let cpacket = self.received_packet.unwrap(); - { &&& cpacket.abstractable() - &&& cpacket.msg.is_Message() - &&& cpacket.msg.get_Message_m().is_GetRequest() - &&& cpacket.src@.valid_physical_address() - } } - &&& self.sd.valid() - &&& self.host_state_common_preconditions() - } - - // Translates Impl/SHT/HostState.i.dfy :: NextGetRequestPostconditions - pub closed spec fn next_get_request_postconditions(&self, pre: Self, sent_packets: Seq) -> bool - { - &&& pre.next_get_request_preconditions() - &&& self.abstractable() - &&& cpacket_seq_is_abstractable(sent_packets) - &&& match pre.received_packet { - Some(cpacket) => next_get_request(pre@, self@, cpacket@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets)), - None => false, + ensures + b == should_process_received_message(self@), + { + match &self.received_packet { + Some(v) => { + match &v.msg { + CSingleMessage::Message { seqno: _seqno, dst: _dst, m: cm } => { + match cm { + CMessage::Delegate { .. } | CMessage::Shard { .. } => { + // We can't just compare self.num_delegations < + // self.constants.params.max_delegations - 2 because the + // latter quantity might underflow. So we do the following, + // which is equivalent but can't overflow or underflow because + // self.num_delegations < self.constants.params.max_delegations. + self.num_delegations + 1 < self.constants.params.max_delegations - 1 + }, + _ => true, + } + }, + _ => false, } - &&& self.host_state_common_postconditions(pre, pre.received_packet.unwrap(), sent_packets) - &&& self.received_packet.is_None() + }, + None => false, } + } - // Translates Impl/SHT/HostState.i.dfy :: NextSetRequestPreconditions - pub closed spec fn next_set_request_preconditions(&self) -> bool - { - &&& self.abstractable() - &&& { let cpacket = self.received_packet.unwrap(); - { &&& cpacket.abstractable() - &&& cpacket.msg.is_Message() - &&& cpacket.msg.get_Message_m().is_SetRequest() - &&& cpacket.src@.valid_physical_address() - } } - &&& self.sd.valid() - &&& self.host_state_common_preconditions() - } - - // Translates Impl/SHT/HostState.i.dfy :: NextSetRequestPostconditions - pub closed spec fn next_set_request_postconditions(&self, pre: Self, sent_packets: Seq) -> bool - { - &&& pre.next_set_request_preconditions() - &&& self.abstractable() - &&& cpacket_seq_is_abstractable(sent_packets) - &&& match pre.received_packet { - Some(cpacket) => next_set_request(pre@, self@, cpacket@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets)), - None => false, - } - &&& self.host_state_common_postconditions(pre, pre.received_packet.unwrap(), sent_packets) - &&& self.received_packet.is_None() + // Translates Impl/SHT/HostModel.i.dfy :: HostIgnoringUnParseable + pub closed spec fn host_ignoring_unparseable( + pre: AbstractHostState, + post: AbstractHostState, + packets: Set, + ) -> bool { + &&& packets.len() == 0 + &&& post == AbstractHostState { received_packet: None, ..pre } + &&& pre.received_packet.is_Some() + &&& pre.received_packet.get_Some_0().msg.is_Message() + &&& match pre.received_packet.get_Some_0().msg.get_Message_m() { + Message::Delegate { range: range, h: h } => !({ + // no need to check for valid_key_range(range) + &&& valid_hashtable(h) + &&& !range.is_empty() + &&& pre.received_packet.get_Some_0().msg.get_Message_dst().valid_physical_address() + }), + _ => false, } + } - // Translates Impl/SHT/HostState.i.dfy :: NextDelegatePreconditions - // This includes the extra condition: - // &&& self.num_delegations < self.constants.params.max_delegations - 2 - // since this is always required alongside NextDelegatePreconditions. - pub closed spec fn next_delegate_preconditions(&self) -> bool - { - &&& self.abstractable() - &&& { let cpacket = self.received_packet.unwrap(); - { &&& cpacket.abstractable() - &&& cpacket.msg.is_Message() - &&& cpacket.msg.get_Message_m().is_Delegate() - &&& cpacket.src@.valid_physical_address() - } } - &&& self.sd.valid() - &&& self.host_state_common_preconditions() - &&& self.constants.me@.valid_physical_address() - &&& self.sd.valid() - &&& self.num_delegations < self.constants.params.max_delegations - 2 - } - - // Translates Impl/SHT/HostState.i.dfy :: NextDelegatePostconditions - // It includes the extra condition next_delegate(...) since that's an - // extra postcondition always included with NextDelegatePostconditions. - pub closed spec fn next_delegate_postconditions(&self, pre: Self, sent_packets: Seq) -> bool - { - &&& pre.next_delegate_preconditions() - &&& self.abstractable() - &&& cpacket_seq_is_abstractable(sent_packets) - &&& self.host_state_common_postconditions(pre, pre.received_packet.unwrap(), sent_packets) - &&& self.received_packet.is_None() - &&& { - ||| next_delegate(pre@, self@, pre.received_packet.unwrap()@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets)) - ||| Self::host_ignoring_unparseable(pre@, self@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets)) - } + // Forked from host_state_common_preconditions because Verus can't pass a copy + // of cpacket around willy-nilly as Dafny Ironfleet does. + pub closed spec fn host_state_packet_preconditions(&self, cpacket: CPacket) -> bool { + &&& self.abstractable() + &&& cpacket.abstractable() + &&& self.valid() + &&& cpacket.src@.valid_physical_address() + &&& self.constants.params@ == AbstractParameters::static_params() + &&& self.resend_count < 100000000 + } + // Translates Impl/SHT/HostState.i.dfy :: HostState_common_preconditions + // These are now only "common" to the processing methods, not the receive a fresh packet and + // record it method. + pub closed spec fn host_state_common_preconditions(&self) -> bool { + match self.received_packet { + Some(cpacket) => self.host_state_packet_preconditions(cpacket), + None => false, } + } - // Translates Impl/SHT/HostState.i.dfy :: NextShardPreconditions - // This includes the extra condition: - // &&& self.num_delegations < self.constants.params.max_delegations - 2 - // since this is always required alongside NextShardPreconditions. - pub closed spec fn next_shard_preconditions(&self) -> bool - { - &&& self.abstractable() - &&& { let cpacket = self.received_packet.unwrap(); - { &&& cpacket.abstractable() - &&& cpacket.msg.is_Message() - &&& cpacket.msg.get_Message_m().is_Shard() - &&& cpacket.src@.valid_physical_address() - } } - &&& self.sd.valid() - &&& self.host_state_common_preconditions() - &&& self.num_delegations < self.constants.params.max_delegations - 2 - } - - // Translates Impl/SHT/HostState.i.dfy :: NextShardPostconditions - pub closed spec fn next_shard_postconditions(&self, pre: Self, sent_packets: Seq) -> bool - { - &&& self.abstractable() - &&& cpacket_seq_is_abstractable(sent_packets) - &&& self.host_state_common_postconditions(pre, pre.received_packet.unwrap(), sent_packets) - &&& self.received_packet.is_None() - &&& match pre.received_packet { - Some(cpacket) => - next_shard_wrapper(pre@, self@, cpacket@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets)), - None => false, - } + // Translates Impl/SHT/HostState.i.dfy :: NextGetRequestPreconditions + pub closed spec fn next_get_request_preconditions(&self) -> bool { + &&& self.abstractable() + &&& self.received_packet.is_Some() + &&& { + let cpacket = self.received_packet.unwrap(); + { + &&& cpacket.abstractable() + &&& cpacket.msg.is_Message() + &&& cpacket.msg.get_Message_m().is_GetRequest() + &&& cpacket.src@.valid_physical_address() + } + } + &&& self.sd.valid() + &&& self.host_state_common_preconditions() + } + + // Translates Impl/SHT/HostState.i.dfy :: NextGetRequestPostconditions + pub closed spec fn next_get_request_postconditions( + &self, + pre: Self, + sent_packets: Seq, + ) -> bool { + &&& pre.next_get_request_preconditions() + &&& self.abstractable() + &&& cpacket_seq_is_abstractable(sent_packets) + &&& match pre.received_packet { + Some(cpacket) => next_get_request( + pre@, + self@, + cpacket@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets), + ), + None => false, + } + &&& self.host_state_common_postconditions(pre, pre.received_packet.unwrap(), sent_packets) + &&& self.received_packet.is_None() + } + + // Translates Impl/SHT/HostState.i.dfy :: NextSetRequestPreconditions + pub closed spec fn next_set_request_preconditions(&self) -> bool { + &&& self.abstractable() + &&& { + let cpacket = self.received_packet.unwrap(); + { + &&& cpacket.abstractable() + &&& cpacket.msg.is_Message() + &&& cpacket.msg.get_Message_m().is_SetRequest() + &&& cpacket.src@.valid_physical_address() + } + } + &&& self.sd.valid() + &&& self.host_state_common_preconditions() + } + + // Translates Impl/SHT/HostState.i.dfy :: NextSetRequestPostconditions + pub closed spec fn next_set_request_postconditions( + &self, + pre: Self, + sent_packets: Seq, + ) -> bool { + &&& pre.next_set_request_preconditions() + &&& self.abstractable() + &&& cpacket_seq_is_abstractable(sent_packets) + &&& match pre.received_packet { + Some(cpacket) => next_set_request( + pre@, + self@, + cpacket@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets), + ), + None => false, + } + &&& self.host_state_common_postconditions(pre, pre.received_packet.unwrap(), sent_packets) + &&& self.received_packet.is_None() + } + + // Translates Impl/SHT/HostState.i.dfy :: NextDelegatePreconditions + // This includes the extra condition: + // &&& self.num_delegations < self.constants.params.max_delegations - 2 + // since this is always required alongside NextDelegatePreconditions. + pub closed spec fn next_delegate_preconditions(&self) -> bool { + &&& self.abstractable() + &&& { + let cpacket = self.received_packet.unwrap(); + { + &&& cpacket.abstractable() + &&& cpacket.msg.is_Message() + &&& cpacket.msg.get_Message_m().is_Delegate() + &&& cpacket.src@.valid_physical_address() + } + } + &&& self.sd.valid() + &&& self.host_state_common_preconditions() + &&& self.constants.me@.valid_physical_address() + &&& self.sd.valid() + &&& self.num_delegations < self.constants.params.max_delegations - 2 + } + + // Translates Impl/SHT/HostState.i.dfy :: NextDelegatePostconditions + // It includes the extra condition next_delegate(...) since that's an + // extra postcondition always included with NextDelegatePostconditions. + pub closed spec fn next_delegate_postconditions( + &self, + pre: Self, + sent_packets: Seq, + ) -> bool { + &&& pre.next_delegate_preconditions() + &&& self.abstractable() + &&& cpacket_seq_is_abstractable(sent_packets) + &&& self.host_state_common_postconditions(pre, pre.received_packet.unwrap(), sent_packets) + &&& self.received_packet.is_None() + &&& { + ||| next_delegate( + pre@, + self@, + pre.received_packet.unwrap()@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets), + ) + ||| Self::host_ignoring_unparseable( + pre@, + self@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets), + ) } + } - // Translates Impl/SHT/HostModel.i.dfy :: HostState_common_postconditions - pub closed spec fn host_state_common_postconditions( - &self, - pre: Self, - cpacket: CPacket, - sent_packets: Seq - ) -> bool - { - // Removed at Lorch's suggestion: In Dafny, we needed this line to satisfy requires for later - // terms; in Verus we don't because we're living carelessly wrt recommends. - // Since we've split off host_state_common_preconditions for the receive_packet case (due to - // not being able to duplicate exec cpacket), we're trying to avoid propagating that change here. - // &&& pre.host_state_common_preconditions() - &&& self.abstractable() - &&& self.constants == pre.constants - &&& cpacket_seq_is_abstractable(sent_packets) - &&& self.valid() - &&& self.next_action_index == pre.next_action_index - &&& outbound_packet_seq_is_valid(sent_packets) - &&& outbound_packet_seq_has_correct_srcs(sent_packets, pre.constants.me@) - &&& (forall |i: int| 0 <= i && i < sent_packets.len() ==> - (#[trigger] sent_packets[i].msg).is_Message() || sent_packets[i].msg.is_Ack()) - &&& abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets) =~= - extract_packets_from_lsht_packets(abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets)) - &&& self.resend_count < 100000000 - } - - /// Translates Impl/SHT/HostModel.i.dfy HostModelNextGetRequest - /// - /// The cpacket argument is self.received_packet.unwrap() - this is not - /// translated because it is already mutably borrowed in self and thus - /// cannot be passed separately. - fn host_model_next_get_request(&mut self) -> (sent_packets: Vec) - requires old(self).next_get_request_preconditions() - ensures self.next_get_request_postconditions(*old(self), sent_packets@) - { - let cpacket: &CPacket = &self.received_packet.as_ref().unwrap(); - let ghost pkt: Packet = cpacket@; - match &cpacket.msg { - CSingleMessage::Message{m: CMessage::GetRequest{k}, seqno, ..} => { - let owner: EndPoint = self.delegation_map.get(k); - let ghost received_request: AppRequest = AppRequest::AppGetRequest{seqno: seqno@ as nat, key: *k}; - let its_me: bool = do_end_points_match(&owner, &self.constants.me); - let m: CMessage = - if its_me { - let v = self.h.get(k); - // OBSERVE: Need to say `valid_value` to trigger the quantifier saying all values are valid. - assert (v.is_some() ==> valid_value(v.get_Some_0()@)); - CMessage::Reply{k: SHTKey{ukey: k.ukey}, v: clone_option_vec_u8(v)} - } - else { - CMessage::Redirect{k: SHTKey{ukey: k.ukey}, id: clone_end_point(&owner)} - }; - let ghost new_received_requests: Seq = - if its_me { - self.received_requests@.push(received_request) - } - else { - self.received_requests@ + // Translates Impl/SHT/HostState.i.dfy :: NextShardPreconditions + // This includes the extra condition: + // &&& self.num_delegations < self.constants.params.max_delegations - 2 + // since this is always required alongside NextShardPreconditions. + pub closed spec fn next_shard_preconditions(&self) -> bool { + &&& self.abstractable() + &&& { + let cpacket = self.received_packet.unwrap(); + { + &&& cpacket.abstractable() + &&& cpacket.msg.is_Message() + &&& cpacket.msg.get_Message_m().is_Shard() + &&& cpacket.src@.valid_physical_address() + } + } + &&& self.sd.valid() + &&& self.host_state_common_preconditions() + &&& self.num_delegations < self.constants.params.max_delegations - 2 + } + + // Translates Impl/SHT/HostState.i.dfy :: NextShardPostconditions + pub closed spec fn next_shard_postconditions( + &self, + pre: Self, + sent_packets: Seq, + ) -> bool { + &&& self.abstractable() + &&& cpacket_seq_is_abstractable(sent_packets) + &&& self.host_state_common_postconditions(pre, pre.received_packet.unwrap(), sent_packets) + &&& self.received_packet.is_None() + &&& match pre.received_packet { + Some(cpacket) => next_shard_wrapper( + pre@, + self@, + cpacket@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets), + ), + None => false, + } + } + + // Translates Impl/SHT/HostModel.i.dfy :: HostState_common_postconditions + pub closed spec fn host_state_common_postconditions( + &self, + pre: Self, + cpacket: CPacket, + sent_packets: Seq, + ) -> bool { + // Removed at Lorch's suggestion: In Dafny, we needed this line to satisfy requires for later + // terms; in Verus we don't because we're living carelessly wrt recommends. + // Since we've split off host_state_common_preconditions for the receive_packet case (due to + // not being able to duplicate exec cpacket), we're trying to avoid propagating that change here. + // &&& pre.host_state_common_preconditions() + &&& self.abstractable() + &&& self.constants == pre.constants + &&& cpacket_seq_is_abstractable(sent_packets) + &&& self.valid() + &&& self.next_action_index == pre.next_action_index + &&& outbound_packet_seq_is_valid(sent_packets) + &&& outbound_packet_seq_has_correct_srcs(sent_packets, pre.constants.me@) + &&& (forall|i: int| + 0 <= i && i < sent_packets.len() ==> (#[trigger] sent_packets[i].msg).is_Message() + || sent_packets[i].msg.is_Ack()) + &&& abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets) + =~= extract_packets_from_lsht_packets( + abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets), + ) + &&& self.resend_count < 100000000 + } + + /// Translates Impl/SHT/HostModel.i.dfy HostModelNextGetRequest + /// + /// The cpacket argument is self.received_packet.unwrap() - this is not + /// translated because it is already mutably borrowed in self and thus + /// cannot be passed separately. + fn host_model_next_get_request(&mut self) -> (sent_packets: Vec) + requires + old(self).next_get_request_preconditions(), + ensures + self.next_get_request_postconditions(*old(self), sent_packets@), + { + let cpacket: &CPacket = &self.received_packet.as_ref().unwrap(); + let ghost pkt: Packet = cpacket@; + match &cpacket.msg { + CSingleMessage::Message { m: CMessage::GetRequest { k }, seqno, .. } => { + let owner: EndPoint = self.delegation_map.get(k); + let ghost received_request: AppRequest = AppRequest::AppGetRequest { + seqno: seqno@ as nat, + key: *k, + }; + let its_me: bool = do_end_points_match(&owner, &self.constants.me); + let m: CMessage = if its_me { + let v = self.h.get(k); + // OBSERVE: Need to say `valid_value` to trigger the quantifier saying all values are valid. + assert(v.is_some() ==> valid_value(v.get_Some_0()@)); + CMessage::Reply { k: SHTKey { ukey: k.ukey }, v: clone_option_vec_u8(v) } + } else { + CMessage::Redirect { k: SHTKey { ukey: k.ukey }, id: clone_end_point(&owner) } + }; + let ghost new_received_requests: Seq = if its_me { + self.received_requests@.push(received_request) + } else { + self.received_requests@ + }; + proof { + lemma_auto_spec_u64_to_from_le_bytes(); + } + assert(m.is_marshalable()); + let optional_sm = self.sd.send_single_cmessage(&m, &cpacket.src); + let mut sent_packets = Vec::::new(); + match optional_sm { + Some(sm) => { + let p = CPacket { + dst: clone_end_point(&cpacket.src), + src: clone_end_point(&self.constants.me), + msg: sm, }; - proof { lemma_auto_spec_u64_to_from_le_bytes(); } - assert (m.is_marshalable()); - let optional_sm = self.sd.send_single_cmessage(&m, &cpacket.src); - let mut sent_packets = Vec::::new(); - match optional_sm { - Some(sm) => { - let p = CPacket{ - dst: clone_end_point(&cpacket.src), - src: clone_end_point(&self.constants.me), - msg: sm - }; - self.received_requests = Ghost(new_received_requests); - self.received_packet = None; - sent_packets.push(p); - // TODO replace a bunch of the proof below with these lines: - // proof { - // lemma_map_values_singleton_auto::(); - // to_set_singleton_auto::(); - // } - proof { - let ap = abstractify_cpacket_to_lsht_packet(p); - let bp = Packet{dst: ap.dst, src: ap.src, msg: ap.msg}; - assert_seqs_equal!(Seq::::empty().push(p).map_values(|cp: CPacket| cp@), + self.received_requests = Ghost(new_received_requests); + self.received_packet = None; + sent_packets.push(p); + // TODO replace a bunch of the proof below with these lines: + // proof { + // lemma_map_values_singleton_auto::(); + // to_set_singleton_auto::(); + // } + proof { + let ap = abstractify_cpacket_to_lsht_packet(p); + let bp = Packet { dst: ap.dst, src: ap.src, msg: ap.msg }; + assert_seqs_equal!(Seq::::empty().push(p).map_values(|cp: CPacket| cp@), Seq::::empty().push(p@)); - assert (Seq::::empty().push(p@).index(0) == p@); // needed to show it contains p@ - assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + assert(Seq::::empty().push(p@).index(0) == p@); // needed to show it contains p@ + assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), Seq::::empty().push(p).map_values(|cp: CPacket| cp@).to_set()); - assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), Set::::empty().insert(p@)); - assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), set![ Packet{dst: pkt.src, src: self.constants.me@, msg: sm@} ]); - assert_seqs_equal!(abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@), + assert_seqs_equal!(abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@), Seq::::empty().push(ap)); - assert_seqs_equal!(abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@) + assert_seqs_equal!(abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@) .map_values(|lp: LSHTPacket| extract_packet_from_lsht_packet(lp)), Seq::::empty().push(bp)); - assert (Seq::::empty().push(bp).index(0) == bp); // needed to show it contains bp - assert_sets_equal!(Seq::::empty().push(bp).to_set(), + assert(Seq::::empty().push(bp).index(0) == bp); // needed to show it contains bp + assert_sets_equal!(Seq::::empty().push(bp).to_set(), Set::::empty().insert(bp)); - assert (next_get_request_reply( - old(self)@, self@, pkt.src, pkt.msg.get_Message_seqno(), - pkt.msg.get_Message_m().get_GetRequest_key(), - sm@, m@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), - true)); - } - return sent_packets; - }, - None => { - self.received_packet = None; - proof { - assert( sent_packets@ =~= Seq::::empty() ); - assert( abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@) =~= Set::::empty() ); - assert (next_get_request_reply( - old(self)@, self@, pkt.src, pkt.msg.get_Message_seqno(), - pkt.msg.get_Message_m().get_GetRequest_key(), - SingleMessage::::InvalidMessage{}, m@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), - false)); - assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + assert(next_get_request_reply( + old(self)@, + self@, + pkt.src, + pkt.msg.get_Message_seqno(), + pkt.msg.get_Message_m().get_GetRequest_key(), + sm@, + m@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + true, + )); + } + return sent_packets; + }, + None => { + self.received_packet = None; + proof { + assert(sent_packets@ =~= Seq::::empty()); + assert(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@) + =~= Set::::empty()); + assert(next_get_request_reply( + old(self)@, + self@, + pkt.src, + pkt.msg.get_Message_seqno(), + pkt.msg.get_Message_m().get_GetRequest_key(), + SingleMessage::::InvalidMessage { }, + m@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + false, + )); + assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), extract_packets_from_lsht_packets( abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@))); - } - return sent_packets; } - } - }, - _ => { - assert(false); - unreached() - }, - } + return sent_packets; + }, + } + }, + _ => { + assert(false); + unreached() + }, } + } - // Implements Impl/SHT/HostModel.i.dfy HostModelNextSetRequest - fn host_model_next_set_request(&mut self) -> (sent_packets: Vec) - requires old(self).next_set_request_preconditions() - ensures self.next_set_request_postconditions(*old(self), sent_packets@) - { - proof { self.delegation_map.valid_implies_complete(); } - let cpacket: &CPacket = &self.received_packet.as_ref().unwrap(); - let ghost pkt: Packet = cpacket@; - let ghost pre = *self; - match &cpacket.msg { - CSingleMessage::Message{m, seqno, ..} => { - match m { - CMessage::SetRequest{k, v: ov} => { - let owner: EndPoint = self.delegation_map.get(k); - let marshalable: bool = m.is_message_marshallable(); - if (!marshalable) { - self.received_packet = None; - let sent_packets = Vec::::new(); - let ghost sm = SingleMessage::Ack{ack_seqno: 0}; - proof { - assert (!valid_key(*k) || !valid_optional_value(optional_value_view(*ov))); - assert (sent_packets@ == Seq::::empty()); - assert_seqs_equal!(sent_packets@.map_values(|cp: CPacket| cp@), + // Implements Impl/SHT/HostModel.i.dfy HostModelNextSetRequest + fn host_model_next_set_request(&mut self) -> (sent_packets: Vec) + requires + old(self).next_set_request_preconditions(), + ensures + self.next_set_request_postconditions(*old(self), sent_packets@), + { + proof { + self.delegation_map.valid_implies_complete(); + } + let cpacket: &CPacket = &self.received_packet.as_ref().unwrap(); + let ghost pkt: Packet = cpacket@; + let ghost pre = *self; + match &cpacket.msg { + CSingleMessage::Message { m, seqno, .. } => { + match m { + CMessage::SetRequest { k, v: ov } => { + let owner: EndPoint = self.delegation_map.get(k); + let marshalable: bool = m.is_message_marshallable(); + if (!marshalable) { + self.received_packet = None; + let sent_packets = Vec::::new(); + let ghost sm = SingleMessage::Ack { ack_seqno: 0 }; + proof { + assert(!valid_key(*k) || !valid_optional_value( + optional_value_view(*ov), + )); + assert(sent_packets@ == Seq::::empty()); + assert_seqs_equal!(sent_packets@.map_values(|cp: CPacket| cp@), Seq::::empty()); - assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), extract_packets_from_lsht_packets( abstractify_outbound_packets_to_seq_of_lsht_packets( sent_packets@))); - assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), Set::::empty()); - assert (next_set_request_complete(old(self)@, self@, pkt.src, - pkt.msg.get_Message_seqno(), - pkt.msg.get_Message_m(), - sm, - Message::Reply{key: *k, - value: optional_value_view(*ov)}, - Set::::empty(), true)); - assert (next_set_request(old(self)@, self@, cpacket@, - abstractify_seq_of_cpackets_to_set_of_sht_packets( - sent_packets@))); - }; - return sent_packets; + assert(next_set_request_complete( + old(self)@, + self@, + pkt.src, + pkt.msg.get_Message_seqno(), + pkt.msg.get_Message_m(), + sm, + Message::Reply { key: *k, value: optional_value_view(*ov) }, + Set::::empty(), + true, + )); + assert(next_set_request( + old(self)@, + self@, + cpacket@, + abstractify_seq_of_cpackets_to_set_of_sht_packets( + sent_packets@, + ), + )); } - else { - assert (valid_key(*k) && valid_optional_value(optional_value_view(*ov))); - let its_me: bool = do_end_points_match(&owner, &self.constants.me); - let mm: CMessage = + ; + return sent_packets; + } else { + assert(valid_key(*k) && valid_optional_value(optional_value_view(*ov))); + let its_me: bool = do_end_points_match(&owner, &self.constants.me); + let mm: CMessage = if its_me { + CMessage::Reply { k: k.clone(), v: clone_optional_value(ov) } + } else { + CMessage::Redirect { k: k.clone(), id: owner } + }; + assert(mm.is_marshalable()) by { + lemma_auto_spec_u64_to_from_le_bytes(); + } + let optional_sm = self.sd.send_single_cmessage(&mm, &cpacket.src); + let ghost received_request = AppRequest::AppSetRequest { + seqno: seqno@ as nat, + key: *k, + ov: optional_value_view(*ov), + }; + let mut sent_packets = Vec::::new(); + let ghost dst = cpacket.src@; + match optional_sm { + Some(sm) => { + let p = CPacket { + dst: clone_end_point(&cpacket.src), + src: clone_end_point(&self.constants.me), + msg: sm, + }; + assert(p@ == Packet { + dst: cpacket.src@, + src: self.constants.me@, + msg: sm@, + }); + sent_packets.push(p); if its_me { - CMessage::Reply{k: k.clone(), v: clone_optional_value(ov)} + assert(SingleDelivery::send_single_message( + old(self).sd@, + self.sd@, + mm@, + dst, + Some(sm@), + AbstractParameters::static_params(), + )); + self.received_requests = Ghost( + self.received_requests@.push(received_request), + ); + match ov { + Some(v) => self.h.insert(k.clone(), clone_vec_u8(v)), + None => self.h.remove(&k), + }; + self.received_packet = None; + } else { + self.received_packet = None; } - else { - CMessage::Redirect{k: k.clone(), id: owner} - }; - assert (mm.is_marshalable()) by { - lemma_auto_spec_u64_to_from_le_bytes(); - } - let optional_sm = self.sd.send_single_cmessage(&mm, &cpacket.src); - let ghost received_request = AppRequest::AppSetRequest{seqno: seqno@ as nat, key: *k, - ov: optional_value_view(*ov)}; - let mut sent_packets = Vec::::new(); - let ghost dst = cpacket.src@; - match optional_sm { - Some(sm) => { - let p = CPacket{dst: clone_end_point(&cpacket.src), - src: clone_end_point(&self.constants.me), - msg: sm}; - assert (p@ == Packet{dst: cpacket.src@, src: self.constants.me@, msg: sm@}); - sent_packets.push(p); - if its_me { - assert (SingleDelivery::send_single_message(old(self).sd@, self.sd@, mm@, dst, Some(sm@), - AbstractParameters::static_params())); - self.received_requests = Ghost(self.received_requests@.push(received_request)); - match ov { - Some(v) => self.h.insert(k.clone(), clone_vec_u8(v)), - None => self.h.remove(&k), - }; - self.received_packet = None; - } - else { - self.received_packet = None; - } - proof { - assert (SingleDelivery::send_single_message(old(self).sd@, self.sd@, mm@, dst, Some(sm@), - AbstractParameters::static_params())); - assert_seqs_equal!(sent_packets@.map_values(|cp: CPacket| cp@), + proof { + assert(SingleDelivery::send_single_message( + old(self).sd@, + self.sd@, + mm@, + dst, + Some(sm@), + AbstractParameters::static_params(), + )); + assert_seqs_equal!(sent_packets@.map_values(|cp: CPacket| cp@), seq![Packet{dst: cpacket.src@, src: self.constants.me@, msg: sm@}]); - singleton_seq_to_set_is_singleton_set(Packet{dst: cpacket.src@, - src: self.constants.me@, - msg: sm@}); - assert_sets_equal!( + singleton_seq_to_set_is_singleton_set( + Packet { + dst: cpacket.src@, + src: self.constants.me@, + msg: sm@, + }, + ); + assert_sets_equal!( abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), set![Packet{dst: pkt.src, src: self.constants.me@, msg: sm@}]); - assert (next_set_request_complete( - old(self)@, self@, pkt.src, - pkt.msg.get_Message_seqno(), - m@, sm@, mm@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), - true)); - assert (sm.is_marshalable()) by { - lemma_auto_spec_u64_to_from_le_bytes(); - } - assert (outbound_packet_is_valid(&p)); - assert (outbound_packet_seq_is_valid(sent_packets@)); - assert (abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@) == - set![Packet{dst: pkt.src, src: self.constants.me@, msg: sm@}]); - assert (sent_packets@.map_values(|packet: CPacket| - abstractify_cpacket_to_lsht_packet(packet))[0] == - LPacket{dst: pkt.src, src: self.constants.me@, msg: sm@}); - singleton_seq_to_set_is_singleton_set( - LPacket{dst: pkt.src, src: self.constants.me@, msg: sm@}); - assert_seqs_equal!( + assert(next_set_request_complete( + old(self)@, + self@, + pkt.src, + pkt.msg.get_Message_seqno(), + m@, + sm@, + mm@, + abstractify_seq_of_cpackets_to_set_of_sht_packets( + sent_packets@, + ), + true, + )); + assert(sm.is_marshalable()) by { + lemma_auto_spec_u64_to_from_le_bytes(); + } + assert(outbound_packet_is_valid(&p)); + assert(outbound_packet_seq_is_valid(sent_packets@)); + assert(abstractify_seq_of_cpackets_to_set_of_sht_packets( + sent_packets@, + ) + == set![Packet{dst: pkt.src, src: self.constants.me@, msg: sm@}]); + assert(sent_packets@.map_values( + |packet: CPacket| + abstractify_cpacket_to_lsht_packet(packet), + )[0] == LPacket { + dst: pkt.src, + src: self.constants.me@, + msg: sm@, + }); + singleton_seq_to_set_is_singleton_set( + LPacket { + dst: pkt.src, + src: self.constants.me@, + msg: sm@, + }, + ); + assert_seqs_equal!( sent_packets@.map_values(|packet: CPacket| abstractify_cpacket_to_lsht_packet(packet)), seq![LPacket{dst: pkt.src, src: self.constants.me@, msg: sm@}]); - assert (abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@)[0] == - abstractify_cpacket_to_lsht_packet(p)); - assert_seqs_equal!( + assert(abstractify_outbound_packets_to_seq_of_lsht_packets( + sent_packets@, + )[0] == abstractify_cpacket_to_lsht_packet(p)); + assert_seqs_equal!( abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@), seq![abstractify_cpacket_to_lsht_packet(p)]); - assert (extract_packets_from_lsht_packets( - abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@)) - == extract_packets_from_lsht_packets( - seq![abstractify_cpacket_to_lsht_packet(p)])); - assert (seq![abstractify_cpacket_to_lsht_packet(p)]. - map_values(|lp: LSHTPacket| extract_packet_from_lsht_packet(lp))[0] == - Packet {dst: pkt.src, src: self.constants.me@, msg: sm@} ); - assert_seqs_equal!( + assert(extract_packets_from_lsht_packets( + abstractify_outbound_packets_to_seq_of_lsht_packets( + sent_packets@, + ), + ) == extract_packets_from_lsht_packets( + seq![abstractify_cpacket_to_lsht_packet(p)], + )); + assert(seq![abstractify_cpacket_to_lsht_packet(p)].map_values( + |lp: LSHTPacket| extract_packet_from_lsht_packet(lp))[0] + == Packet { + dst: pkt.src, + src: self.constants.me@, + msg: sm@, + }); + assert_seqs_equal!( seq![abstractify_cpacket_to_lsht_packet(p)]. map_values(|lp: LSHTPacket| extract_packet_from_lsht_packet(lp)), seq![Packet {dst: pkt.src, src: self.constants.me@, msg: sm@}]); - singleton_seq_to_set_is_singleton_set(Packet{dst: pkt.src, - src: self.constants.me@, - msg: sm@}); - assert (extract_packets_from_lsht_packets( - seq![abstractify_cpacket_to_lsht_packet(p)]) == - set![Packet{dst: pkt.src, src: self.constants.me@, msg: sm@}]); - assert (self.host_state_common_postconditions( - pre, pre.received_packet.unwrap(), sent_packets@)); - } - return sent_packets; - }, - None => { - self.received_packet = None; - proof { - let abs_sent_packets = abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@); - assert( abs_sent_packets =~= Set::::empty() ); - assert( abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@) =~= Seq::::empty() ); - assert( extract_packets_from_lsht_packets(Seq::::empty()) =~= Set::::empty() ); - - assert( next_set_request_complete(old(self)@, self@, pkt.src, pkt.msg.get_Message_seqno(), pkt.msg.get_Message_m(), arbitrary(), arbitrary(), abs_sent_packets, false) ); // exists witness - } - return sent_packets; + singleton_seq_to_set_is_singleton_set( + Packet { + dst: pkt.src, + src: self.constants.me@, + msg: sm@, + }, + ); + assert(extract_packets_from_lsht_packets( + seq![abstractify_cpacket_to_lsht_packet(p)], + ) + == set![Packet{dst: pkt.src, src: self.constants.me@, msg: sm@}]); + assert(self.host_state_common_postconditions( + pre, + pre.received_packet.unwrap(), + sent_packets@, + )); } - } + return sent_packets; + }, + None => { + self.received_packet = None; + proof { + let abs_sent_packets = + abstractify_seq_of_cpackets_to_set_of_sht_packets( + sent_packets@, + ); + assert(abs_sent_packets =~= Set::::empty()); + assert(abstractify_outbound_packets_to_seq_of_lsht_packets( + sent_packets@, + ) =~= Seq::::empty()); + assert(extract_packets_from_lsht_packets( + Seq::::empty(), + ) =~= Set::::empty()); + assert(next_set_request_complete( + old(self)@, + self@, + pkt.src, + pkt.msg.get_Message_seqno(), + pkt.msg.get_Message_m(), + arbitrary(), + arbitrary(), + abs_sent_packets, + false, + )); // exists witness + } + return sent_packets; + }, } - }, - _ => { assert(false); unreached() }, - } - }, - _ => { assert(false); unreached() }, - } + } + }, + _ => { + assert(false); + unreached() + }, + } + }, + _ => { + assert(false); + unreached() + }, } + } - proof fn effect_of_delegation_map_set( - pre: DelegationMap, - post: DelegationMap, - lo: &KeyIterator, - hi: &KeyIterator, - dst: &EndPoint - ) - requires - pre.valid(), - post.valid(), - forall |ki:KeyIterator| #[trigger] KeyIterator::between(*lo, ki, *hi) ==> post@[*ki.get()] == dst@, - forall |ki:KeyIterator| !ki.is_end_spec() && !(#[trigger] KeyIterator::between(*lo, ki, *hi)) ==> post@[*ki.get()] == pre@[*ki.get()], - ensures - AbstractDelegationMap(post@) == AbstractDelegationMap(pre@).update(KeyRange::{ lo: *lo, hi: *hi }, dst@) - { - pre.valid_implies_complete(); - post.valid_implies_complete(); - assert_maps_equal!(AbstractDelegationMap(post@).0, + proof fn effect_of_delegation_map_set( + pre: DelegationMap, + post: DelegationMap, + lo: &KeyIterator, + hi: &KeyIterator, + dst: &EndPoint, + ) + requires + pre.valid(), + post.valid(), + forall|ki: KeyIterator| #[trigger] + KeyIterator::between(*lo, ki, *hi) ==> post@[*ki.get()] == dst@, + forall|ki: KeyIterator| + !ki.is_end_spec() && !(#[trigger] KeyIterator::between(*lo, ki, *hi)) + ==> post@[*ki.get()] == pre@[*ki.get()], + ensures + AbstractDelegationMap(post@) == AbstractDelegationMap(pre@).update( + KeyRange:: { lo: *lo, hi: *hi }, + dst@, + ), + { + pre.valid_implies_complete(); + post.valid_implies_complete(); + assert_maps_equal!(AbstractDelegationMap(post@).0, AbstractDelegationMap(pre@).update(KeyRange::{ lo: *lo, hi: *hi }, dst@).0); - } + } - proof fn effect_of_hashmap_bulk_update( - pre: CKeyHashMap, - post: CKeyHashMap, - kr: &KeyRange::, - other: CKeyHashMap - ) - requires - forall |k| pre@.dom().contains(k) ==> #[trigger] valid_value(pre@[k]), - valid_hashtable(other@), - post@ == Map::>::new( - |k: AbstractKey| (pre@.dom().contains(k) || other@.dom().contains(k)) - && (kr.contains(k) ==> other@.dom().contains(k)), - |k: AbstractKey| if other@.dom().contains(k) { other@[k] } else { pre@[k] } - ), - ensures - post@ == bulk_update_hashtable(pre@, *kr, other@), - forall |k| post@.dom().contains(k) ==> #[trigger] valid_value(post@[k]) - { - assert_maps_equal!(post@, bulk_update_hashtable(pre@, *kr, other@)); - } + proof fn effect_of_hashmap_bulk_update( + pre: CKeyHashMap, + post: CKeyHashMap, + kr: &KeyRange::, + other: CKeyHashMap, + ) + requires + forall|k| pre@.dom().contains(k) ==> #[trigger] valid_value(pre@[k]), + valid_hashtable(other@), + post@ == Map::>::new( + |k: AbstractKey| + (pre@.dom().contains(k) || other@.dom().contains(k)) && (kr.contains(k) + ==> other@.dom().contains(k)), + |k: AbstractKey| + if other@.dom().contains(k) { + other@[k] + } else { + pre@[k] + }, + ), + ensures + post@ == bulk_update_hashtable(pre@, *kr, other@), + forall|k| post@.dom().contains(k) ==> #[trigger] valid_value(post@[k]), + { + assert_maps_equal!(post@, bulk_update_hashtable(pre@, *kr, other@)); + } - // Implements Impl/SHT/HostModel.i.dfy HostModelNextDelegate - fn host_model_next_delegate(&mut self) -> (sent_packets: Vec) - requires - old(self).next_delegate_preconditions(), - ensures - self.next_delegate_postconditions(*old(self), sent_packets@), - { - let sent_packets = vec![]; - proof { self.delegation_map.valid_implies_complete(); }; - let cpacket: &CPacket = &self.received_packet.as_ref().unwrap(); - let ghost pkt: Packet = cpacket@; - let ghost pre = *self; - proof { - assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + // Implements Impl/SHT/HostModel.i.dfy HostModelNextDelegate + fn host_model_next_delegate(&mut self) -> (sent_packets: Vec) + requires + old(self).next_delegate_preconditions(), + ensures + self.next_delegate_postconditions(*old(self), sent_packets@), + { + let sent_packets = vec![]; + proof { + self.delegation_map.valid_implies_complete(); + } + ; + let cpacket: &CPacket = &self.received_packet.as_ref().unwrap(); + let ghost pkt: Packet = cpacket@; + let ghost pre = *self; + proof { + assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), Set::::empty()); - assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), extract_packets_from_lsht_packets( abstractify_outbound_packets_to_seq_of_lsht_packets( sent_packets@))); - }; - match &cpacket.msg { - CSingleMessage::Message{m, seqno, ..} => { - match m { - CMessage::Delegate{range, h} => { - let marshalable: bool = m.is_message_marshallable(); - if (!marshalable) { - self.received_packet = None; - assert (Self::host_ignoring_unparseable( - pre@, self@, abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@))); - return sent_packets; + } + ; + match &cpacket.msg { + CSingleMessage::Message { m, seqno, .. } => { + match m { + CMessage::Delegate { range, h } => { + let marshalable: bool = m.is_message_marshallable(); + if (!marshalable) { + self.received_packet = None; + assert(Self::host_ignoring_unparseable( + pre@, + self@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + )); + return sent_packets; + } else if !endpoints_contain(&self.constants.host_ids, &cpacket.src) { + self.received_packet = None; + assert(next_delegate( + pre@, + self@, + pre.received_packet.unwrap()@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + )); + return sent_packets; + } else { + self.delegation_map.set(&range.lo, &range.hi, &self.constants.me); + assert(valid_hashtable(h@)); + self.h.bulk_update(&range, &h); + self.received_packet = None; + self.num_delegations = self.num_delegations + 1; + proof { + Self::effect_of_delegation_map_set( + pre.delegation_map, + self.delegation_map, + &range.lo, + &range.hi, + &self.constants.me, + ); + Self::effect_of_hashmap_bulk_update(pre.h, self.h, &range, *h); } - else if !endpoints_contain(&self.constants.host_ids, &cpacket.src) { + assert(next_delegate( + pre@, + self@, + pre.received_packet.unwrap()@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + )); + return sent_packets; + } + }, + _ => { + assert(false); + unreached() + }, + } + }, + _ => { + assert(false); + unreached() + }, + }; + assert(false); + unreached() + } + + // Implements Impl/SHT/HostModel.i.dfy HostModelNextShard + fn host_model_next_shard(&mut self) -> (sent_packets: Vec) + requires + old(self).next_shard_preconditions(), + ensures + self.next_shard_postconditions(*old(self), sent_packets@), + { + proof { + self.delegation_map.valid_implies_complete(); + } + ; + let cpacket: &CPacket = &self.received_packet.as_ref().unwrap(); + let ghost pkt: Packet = cpacket@; + let ghost pre = *self; + match &cpacket.msg { + CSingleMessage::Message { m, .. } => { + let mut sent_packets: Vec = vec![]; + // Learn this for early return cases. + assert(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@) =~= Set::< + Packet, + >::empty()); + reveal(abstractify_seq_of_cpackets_to_set_of_sht_packets); + let marshalable: bool = m.is_message_marshallable(); + match m { + CMessage::Shard { ref kr, ref recipient } => { + if { + ||| !marshalable + ||| do_end_points_match(&recipient, &self.constants.me) + ||| !endpoints_contain(&self.constants.host_ids, &recipient) + } { + assert(recipient.abstractable()); + self.received_packet = None; + return sent_packets; + } else { + let this_host_owns_range = + self.delegation_map.delegate_for_key_range_is_host_impl( + &kr.lo, + &kr.hi, + &self.constants.me, + ); + if !this_host_owns_range { self.received_packet = None; - assert (next_delegate(pre@, self@, pre.received_packet.unwrap()@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@))); return sent_packets; } - else { - self.delegation_map.set(&range.lo, &range.hi, &self.constants.me); - assert (valid_hashtable(h@)); - self.h.bulk_update(&range, &h); + let h = extract_range_impl(&self.h, kr); + if h.len() >= 62 { self.received_packet = None; - self.num_delegations = self.num_delegations + 1; - proof { - Self::effect_of_delegation_map_set(pre.delegation_map, self.delegation_map, - &range.lo, &range.hi, &self.constants.me); - Self::effect_of_hashmap_bulk_update(pre.h, self.h, &range, *h); - } - assert (next_delegate(pre@, self@, pre.received_packet.unwrap()@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@))); return sent_packets; } - }, - _ => { assert(false); unreached() }, - } - }, - _ => { assert(false); unreached() }, - }; - assert(false); unreached() - } - - // Implements Impl/SHT/HostModel.i.dfy HostModelNextShard - fn host_model_next_shard(&mut self) -> (sent_packets: Vec) - requires - old(self).next_shard_preconditions(), - ensures - self.next_shard_postconditions(*old(self), sent_packets@), - { - proof { self.delegation_map.valid_implies_complete(); }; - let cpacket: &CPacket = &self.received_packet.as_ref().unwrap(); - let ghost pkt: Packet = cpacket@; - let ghost pre = *self; - match &cpacket.msg { - CSingleMessage::Message{ m, .. } => { - let mut sent_packets: Vec = vec![]; - - // Learn this for early return cases. - assert( abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@) =~= Set::::empty() ); - - reveal(abstractify_seq_of_cpackets_to_set_of_sht_packets); - - let marshalable: bool = m.is_message_marshallable(); - - match m { - CMessage::Shard{ ref kr, ref recipient } => { - if { - ||| !marshalable - ||| do_end_points_match(&recipient, &self.constants.me) - ||| !endpoints_contain(&self.constants.host_ids, &recipient) - } - { - assert(recipient.abstractable()); - self.received_packet = None; - return sent_packets; - } else { - let this_host_owns_range = self.delegation_map.delegate_for_key_range_is_host_impl(&kr.lo, &kr.hi, &self.constants.me); - - if !this_host_owns_range { + // assert( !next_shard_wrapper_must_reject(old(self)@, m@) ); + // Comically, the Dafny code called ExtractRange twice! + + let out_m = CMessage::Delegate { range: kr.clone(), h }; + assert(out_m.is_marshalable()) by { + vstd::bytes::lemma_auto_spec_u64_to_from_le_bytes(); + crate::marshal_ironsht_specific_v::lemma_is_marshalable_CKeyHashMap( + h, + ); + reveal( + crate::marshal_ironsht_specific_v::ckeyhashmap_max_serialized_size, + ); + } + let optional_sm = self.sd.send_single_cmessage(&out_m, &recipient); + match optional_sm { + None => { self.received_packet = None; + self.num_delegations = self.num_delegations + 1; + assert(next_shard( + old(self)@, + self@, + abstractify_seq_of_cpackets_to_set_of_sht_packets( + sent_packets@, + ), + *kr, + recipient@, + arbitrary(), + false, + )); // exists witness return sent_packets; - } - - let h = extract_range_impl(&self.h, kr); - if h.len() >= 62 { + }, + Some(sm) => { + self.delegation_map.set(&kr.lo, &kr.hi, recipient); + proof { + // (jonh/lorch) we couldn't figure out why this lemma proof + // consists entirely of a =~=, yet playing that same + // twiddle here isn't sufficient. + DelegationMap::lemma_set_is_update( + old(self).delegation_map, + self.delegation_map, + kr.lo, + kr.hi, + recipient, + ) + } + ; + self.h.bulk_remove(&kr); + // sure would be nice to not copy-paste this stuff, but + // we're borrowing kr. + let p = CPacket { + dst: clone_end_point(&recipient), + src: clone_end_point(&self.constants.me), + msg: sm, + }; + sent_packets.push(p); self.received_packet = None; - return sent_packets; - } - - // assert( !next_shard_wrapper_must_reject(old(self)@, m@) ); - - // Comically, the Dafny code called ExtractRange twice! - let out_m = CMessage::Delegate{ range: kr.clone(), h }; - assert( out_m.is_marshalable() ) by { - vstd::bytes::lemma_auto_spec_u64_to_from_le_bytes(); - crate::marshal_ironsht_specific_v::lemma_is_marshalable_CKeyHashMap(h); - reveal(crate::marshal_ironsht_specific_v::ckeyhashmap_max_serialized_size); - } - let optional_sm = self.sd.send_single_cmessage(&out_m, &recipient); - match optional_sm { - None => { - self.received_packet = None; - self.num_delegations = self.num_delegations + 1; - assert( next_shard(old(self)@, self@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), - *kr, recipient@, arbitrary(), false) ); // exists witness - return sent_packets; - }, - Some(sm) => { - self.delegation_map.set(&kr.lo, &kr.hi, recipient); - proof { - // (jonh/lorch) we couldn't figure out why this lemma proof - // consists entirely of a =~=, yet playing that same - // twiddle here isn't sufficient. - DelegationMap::lemma_set_is_update( - old(self).delegation_map, self.delegation_map, - kr.lo, kr.hi, recipient) - }; - - self.h.bulk_remove(&kr); - - // sure would be nice to not copy-paste this stuff, but - // we're borrowing kr. - let p = CPacket{ - dst: clone_end_point(&recipient), - src: clone_end_point(&self.constants.me), - msg: sm - }; - sent_packets.push(p); - self.received_packet = None; - self.num_delegations = self.num_delegations + 1; - - proof { - lemma_map_values_singleton_auto::(); - lemma_to_set_singleton_auto::(); - - assert( - abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@).map_values(|lp: LSHTPacket| extract_packet_from_lsht_packet(lp)) - =~= seq![extract_packet_from_lsht_packet(abstractify_cpacket_to_lsht_packet(p))] ); // twiddle - - assert( next_shard(old(self)@, self@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), - *kr, recipient@, sm@, true) ); // exists witness - - assert( p.msg.is_marshalable() ); - } - return sent_packets; + self.num_delegations = self.num_delegations + 1; + proof { + lemma_map_values_singleton_auto::(); + lemma_to_set_singleton_auto::(); + assert(abstractify_outbound_packets_to_seq_of_lsht_packets( + sent_packets@, + ).map_values( + |lp: LSHTPacket| extract_packet_from_lsht_packet(lp), + ) + =~= seq![extract_packet_from_lsht_packet(abstractify_cpacket_to_lsht_packet(p))]); // twiddle + assert(next_shard( + old(self)@, + self@, + abstractify_seq_of_cpackets_to_set_of_sht_packets( + sent_packets@, + ), + *kr, + recipient@, + sm@, + true, + )); // exists witness + assert(p.msg.is_marshalable()); } - } + return sent_packets; + }, } - }, - _ => assert(false), - } - }, - _ => assert(false) - } - unreached() + } + }, + _ => assert(false), + } + }, + _ => assert(false), } + unreached() + } - // Implements Impl/SHT/HostModel.i.dfy HostModelNextReceiveMessage - fn host_model_next_receive_message(&mut self) -> (sent_packets: Vec) - requires ({ + // Implements Impl/SHT/HostModel.i.dfy HostModelNextReceiveMessage + fn host_model_next_receive_message(&mut self) -> (sent_packets: Vec) + requires + ({ let old_self = *old(self); match old_self.received_packet { Some(cpacket) => { &&& old(self).sd.valid() &&& old(self).host_state_common_preconditions() &&& match cpacket.msg { - CSingleMessage::Message{m: m, ..} => - match m { - CMessage::GetRequest{..} => old_self.next_get_request_preconditions(), - CMessage::SetRequest{..} => old_self.next_set_request_preconditions(), - CMessage::Delegate{..} => old_self.next_delegate_preconditions(), - CMessage::Shard{..} => old_self.next_shard_preconditions(), - _ => true, - } - _ => false, - } + CSingleMessage::Message { m: m, .. } => match m { + CMessage::GetRequest { + .. + } => old_self.next_get_request_preconditions(), + CMessage::SetRequest { + .. + } => old_self.next_set_request_preconditions(), + CMessage::Delegate { .. } => old_self.next_delegate_preconditions(), + CMessage::Shard { .. } => old_self.next_shard_preconditions(), + _ => true, + }, + _ => false, + } }, None => false, } - }) - ensures - match old(self).received_packet { - Some(cpacket) => { - &&& cpacket_seq_is_abstractable(sent_packets@) - &&& self.host_state_common_postconditions(*old(self), (*old(self)).received_packet.unwrap(), - sent_packets@) - &&& { - ||| process_message(old(self)@, self@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@)) - ||| Self::host_ignoring_unparseable(old(self)@, self@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@)) - } - }, - None => false, + }), + ensures + match old(self).received_packet { + Some(cpacket) => { + &&& cpacket_seq_is_abstractable(sent_packets@) + &&& self.host_state_common_postconditions( + *old(self), + (*old(self)).received_packet.unwrap(), + sent_packets@, + ) + &&& { + ||| process_message( + old(self)@, + self@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + ) + ||| Self::host_ignoring_unparseable( + old(self)@, + self@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + ) + } }, - { - proof { self.delegation_map.valid_implies_complete(); } - let cpacket = self.received_packet.as_ref().unwrap(); - match &cpacket.msg { - CSingleMessage::Message{m, ..} => - match m { - CMessage::GetRequest{..} => self.host_model_next_get_request(), - CMessage::SetRequest{..} => self.host_model_next_set_request(), - CMessage::Delegate{..} => self.host_model_next_delegate(), - CMessage::Shard{..} => self.host_model_next_shard(), - CMessage::Reply{..} | CMessage::Redirect{..} => { - self.received_packet = None; - let sent_packets = vec![]; - proof { - assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + None => false, + }, + { + proof { + self.delegation_map.valid_implies_complete(); + } + let cpacket = self.received_packet.as_ref().unwrap(); + match &cpacket.msg { + CSingleMessage::Message { m, .. } => match m { + CMessage::GetRequest { .. } => self.host_model_next_get_request(), + CMessage::SetRequest { .. } => self.host_model_next_set_request(), + CMessage::Delegate { .. } => self.host_model_next_delegate(), + CMessage::Shard { .. } => self.host_model_next_shard(), + CMessage::Reply { .. } | CMessage::Redirect { .. } => { + self.received_packet = None; + let sent_packets = vec![]; + proof { + assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), Set::::empty()); - assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + assert_sets_equal!(abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), extract_packets_from_lsht_packets( abstractify_outbound_packets_to_seq_of_lsht_packets(sent_packets@))); - }; - sent_packets - }, - }, - _ => { - assert(false); - unreached() + } + ; + sent_packets }, - } + }, + _ => { + assert(false); + unreached() + }, } + } - // Impl/LiveSHT/SchedulerImpl.i.dfy Host_ProcessReceivedPacket_Next - fn process_received_packet_next_impl(&mut self, netc: &mut NetClient) -> (rc: (bool, Ghost)) + // Impl/LiveSHT/SchedulerImpl.i.dfy Host_ProcessReceivedPacket_Next + fn process_received_packet_next_impl(&mut self, netc: &mut NetClient) -> (rc: ( + bool, + Ghost, + )) requires Self::next_requires(*old(self), *old(netc)), - ensures ({ - let (ok, res) = rc; - &&& ok == netc.ok() - &&& (*self).invariants(&netc.my_end_point()) - &&& ok ==> { - ||| process_received_packet_next((*old(self))@, (*self)@, abstractify_raw_log_to_ios(res@.ios)) - ||| ignore_nonsensical_delegation_packet((*old(self))@, (*self)@, abstractify_raw_log_to_ios(res@.ios)) + ensures + ({ + let (ok, res) = rc; + &&& ok == netc.ok() + &&& (*self).invariants(&netc.my_end_point()) + &&& ok ==> { + ||| process_received_packet_next( + (*old(self))@, + (*self)@, + abstractify_raw_log_to_ios(res@.ios), + ) + ||| ignore_nonsensical_delegation_packet( + (*old(self))@, + (*self)@, + abstractify_raw_log_to_ios(res@.ios), + ) } - &&& self.constants == (*old(self)).constants - &&& ok ==> res@.event_seq() == res@.ios - &&& (ok || res@.sends.len()>0) ==> (*netc).history() == (*old(netc)).history() + res@.event_seq() - &&& res@.well_typed_events() - &&& no_invalid_sends(abstractify_raw_log_to_ios(res@.ios)) - }) - { - let ghost old_self = *self; - - // A lot of the cases below require that we establish that the delegation map was complete at the outset. - // So we prove this first. - + &&& self.constants == (*old(self)).constants + &&& ok ==> res@.event_seq() == res@.ios + &&& (ok || res@.sends.len() > 0) ==> (*netc).history() == (*old(netc)).history() + + res@.event_seq() + &&& res@.well_typed_events() + &&& no_invalid_sends(abstractify_raw_log_to_ios(res@.ios)) + }), + { + let ghost old_self = *self; + // A lot of the cases below require that we establish that the delegation map was complete at the outset. + // So we prove this first. + proof { + old_self.delegation_map.valid_implies_complete(); + } + // First, check if we should process this message. If not, do nothing. It's pretty weird that + // we don't set received_packet to None, but Ironfleet doesn't do it either. I guess the liveness + // proof relies on the fact that these messages are never actually sent. + if !self.should_process_received_message_impl() { + let res = make_empty_event_results(); proof { - old_self.delegation_map.valid_implies_complete(); + // The following assert isn't really necessary, but it may help the solver see that + // we're in the case of process_received_packet_next, not the case of ignore_unparseable_packet. + assert(process_received_packet_next( + old_self@, + (*self)@, + abstractify_raw_log_to_ios(res@.ios), + )); } + return (true, res) + } + // Second, check if this is something other than a CSingleMessage::Message (e.g., an ack) + // If so, process it by just setting self.received_packet = None. - // First, check if we should process this message. If not, do nothing. It's pretty weird that - // we don't set received_packet to None, but Ironfleet doesn't do it either. I guess the liveness - // proof relies on the fact that these messages are never actually sent. - - if !self.should_process_received_message_impl() { + match self.received_packet.as_ref().unwrap().msg { + CSingleMessage::Message { .. } => {}, + _ => { + self.received_packet = None; let res = make_empty_event_results(); proof { // The following assert isn't really necessary, but it may help the solver see that // we're in the case of process_received_packet_next, not the case of ignore_unparseable_packet. - assert (process_received_packet_next(old_self@, (*self)@, abstractify_raw_log_to_ios(res@.ios))); + assert(process_received_packet_next( + old_self@, + (*self)@, + abstractify_raw_log_to_ios(res@.ios), + )); } + ; return (true, res) - } - - // Second, check if this is something other than a CSingleMessage::Message (e.g., an ack) - // If so, process it by just setting self.received_packet = None. - - match self.received_packet.as_ref().unwrap().msg { - CSingleMessage::Message{..} => {}, - _ => { - self.received_packet = None; - let res = make_empty_event_results(); - proof { - // The following assert isn't really necessary, but it may help the solver see that - // we're in the case of process_received_packet_next, not the case of ignore_unparseable_packet. - assert (process_received_packet_next(old_self@, (*self)@, abstractify_raw_log_to_ios(res@.ios))); - }; - return (true, res) - } - } - - assert (self.sd.valid()); - assert (self.received_packet.is_Some()); - assert (self.received_packet.get_Some_0().msg.is_Message()); - assert (self.host_state_common_preconditions()); - let sent_packets = self.host_model_next_receive_message(); - let (ok, net_event_log, ios) = self.deliver_outbound_packets(netc, &sent_packets); - if !ok { - return (false, make_empty_event_results()); - } - else { - proof { - if process_message(old(self)@, self@, - abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@)) { - assert (process_received_packet_next((*old(self))@, (*self)@, ios@)); - } - else { - assert (Self::host_ignoring_unparseable(old(self)@, self@, - abstractify_seq_of_cpackets_to_set_of_sht_packets( - sent_packets@))); - assert_by_contradiction!(sent_packets@.len() == 0, { + }, + } + assert(self.sd.valid()); + assert(self.received_packet.is_Some()); + assert(self.received_packet.get_Some_0().msg.is_Message()); + assert(self.host_state_common_preconditions()); + let sent_packets = self.host_model_next_receive_message(); + let (ok, net_event_log, ios) = self.deliver_outbound_packets(netc, &sent_packets); + if !ok { + return (false, make_empty_event_results()); + } else { + proof { + if process_message( + old(self)@, + self@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + ) { + assert(process_received_packet_next((*old(self))@, (*self)@, ios@)); + } else { + assert(Self::host_ignoring_unparseable( + old(self)@, + self@, + abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@), + )); + assert_by_contradiction!(sent_packets@.len() == 0, { let p = sent_packets@[0]; let s = abstractify_seq_of_cpackets_to_set_of_sht_packets(sent_packets@); //XXX lemma_len0_is_empty(s); assert (sent_packets@.map_values(|cp: CPacket| cp@)[0] == p@); assert (s.contains(p@)); }); - assert (ignore_nonsensical_delegation_packet((*old(self))@, (*self)@, ios@)); - } + assert(ignore_nonsensical_delegation_packet((*old(self))@, (*self)@, ios@)); } - return (true, make_send_only_event_results(net_event_log)); } + return (true, make_send_only_event_results(net_event_log)); } + } - // Distributed/Impl/LiveSHT/SchedulerImpl.i.dfy Host_NoReceive_NoClock_Next? - #[verifier(spinoff_prover)] - pub fn host_noreceive_noclock_next(&mut self, netc: &mut NetClient) -> (rc: (bool, Ghost)) + // Distributed/Impl/LiveSHT/SchedulerImpl.i.dfy Host_NoReceive_NoClock_Next? + #[verifier(spinoff_prover)] + pub fn host_noreceive_noclock_next(&mut self, netc: &mut NetClient) -> (rc: ( + bool, + Ghost, + )) requires Self::next_requires(*old(self), *old(netc)), ensures Self::next_ensures(*old(self), *old(netc), *self, *netc, rc), - { - // HostModel.HostModelSpontaneouslyRetransmit - // SingleDeliveryModel.RetransmitUnAckedPackets - let sent_packets = self.sd.retransmit_un_acked_packets(&self.constants.me); - - // SchedulerImpl.DeliverOutboundPackets (seems to be a no-op wrapper?) - // SchedulerImpl.DeliverPacketSeq - // NetSHT.SendPacketSeq - let (ok, Ghost(send_events)) = send_packet_seq(&sent_packets, netc); - if !ok { - let ghost event_results = Self::empty_event_results(); - let rc = (false, Ghost(event_results)); - assert( Self::next_ensures(*old(self), *old(netc), *self, *netc, rc) ); - // this return path seems unstable - return rc; - } - - let event_results = Ghost(EventResults { - recvs: seq![], - clocks: seq![], - sends: send_events, - ios: send_events, - }); - proof { - let aios = abstractify_raw_log_to_ios(event_results@.ios); - - assert forall |i| #![auto] 0 <= i < aios.len() && aios[i].is_Send() - implies !aios[i].get_Send_s().msg.is_InvalidMessage() by { - assert( send_log_entry_reflects_packet(send_events[i], &sent_packets[i]) ); // trigger - } - - self.delegation_map.valid_implies_complete(); // Needed to get old(self)@.wf() - - // Have to do some =~= to the parts of these definitions before .to_set() - let view_seq = sent_packets@.map_values(|cp: CPacket| cp@); - let extract_seq = extract_sent_packets_from_ios(aios).map_values(|lp: LSHTPacket| extract_packet_from_lsht_packet(lp)); - - // Skip through the filter in extract_sent_packets_from_ios, which is a no-op here - lemma_if_everything_in_seq_satisfies_filter_then_filter_is_identity(aios, |io: LSHTIo| io.is_Send()); - - // Reach into an inconvenient trigger - assert forall |i| 0<=i (rc: (bool, Ghost)) + pub fn real_next_impl(&mut self, netc: &mut NetClient) -> (rc: (bool, Ghost)) requires - Self::next_requires(*old(self), *old(netc)), - ensures - Self::next_ensures(*old(self), *old(netc), *self, *netc, rc), - { - proof { old(self).delegation_map.valid_implies_complete(); } - let cur_action_index = self.next_action_index; - let rc; - if cur_action_index == 0 { - rc = self.receive_packet_next(netc); - } else if cur_action_index == 1 { - let ghost old_self: HostState = *self; - let ghost old_netc: NetClient = *netc; - rc = self.process_received_packet_next_impl(netc); - proof { - let (ok, res) = rc; { - if ok { - if process_received_packet_next(old_self@, self@, abstractify_raw_log_to_ios(res@.ios)) { - assert (next_step(old_self@, self@, abstractify_raw_log_to_ios(res@.ios), - Step::ProcessReceivedPacket{})); // establish exists |step| next_step... - } - else { - assert (ignore_nonsensical_delegation_packet(old_self@, self@, - abstractify_raw_log_to_ios(res@.ios))); - // establish exists |step| next_step... - assert (next_step(old_self@, self@, abstractify_raw_log_to_ios(res@.ios), - Step::IgnoreNonsensicalDelegationPacket{})); - } - assert (host_protocol_t::next(old_self@, self@, abstractify_raw_log_to_ios(res@.ios))); + Self::next_requires(*old(self), *old(netc)), + ensures + Self::next_ensures(*old(self), *old(netc), *self, *netc, rc), + { + proof { + old(self).delegation_map.valid_implies_complete(); + } + let cur_action_index = self.next_action_index; + let rc; + if cur_action_index == 0 { + rc = self.receive_packet_next(netc); + } else if cur_action_index == 1 { + let ghost old_self: HostState = *self; + let ghost old_netc: NetClient = *netc; + rc = self.process_received_packet_next_impl(netc); + proof { + let (ok, res) = rc; + { + if ok { + if process_received_packet_next( + old_self@, + self@, + abstractify_raw_log_to_ios(res@.ios), + ) { + assert(next_step( + old_self@, + self@, + abstractify_raw_log_to_ios(res@.ios), + Step::ProcessReceivedPacket { }, + )); // establish exists |step| next_step... + } else { + assert(ignore_nonsensical_delegation_packet( + old_self@, + self@, + abstractify_raw_log_to_ios(res@.ios), + )); + // establish exists |step| next_step... + assert(next_step( + old_self@, + self@, + abstractify_raw_log_to_ios(res@.ios), + Step::IgnoreNonsensicalDelegationPacket { }, + )); } + assert(host_protocol_t::next( + old_self@, + self@, + abstractify_raw_log_to_ios(res@.ios), + )); } } - } else if cur_action_index == 2 { - self.resend_count = (self.resend_count + 1) % 100000000; - if (self.resend_count == 0) { - rc = self.host_noreceive_noclock_next(netc); - assert( rc.0 ==> Self::next(old(self)@, self@, rc.1@.ios) ); - } else { - rc = (true, make_empty_event_results()); - assert( next_step(old(self)@, self@, abstractify_raw_log_to_ios(rc.1@.ios), Step::SpontaneouslyRetransmit{})); // witness - } + } + } else if cur_action_index == 2 { + self.resend_count = (self.resend_count + 1) % 100000000; + if (self.resend_count == 0) { + rc = self.host_noreceive_noclock_next(netc); + assert(rc.0 ==> Self::next(old(self)@, self@, rc.1@.ios)); } else { - assert (false); - rc = unreached() + rc = (true, make_empty_event_results()); + assert(next_step( + old(self)@, + self@, + abstractify_raw_log_to_ios(rc.1@.ios), + Step::SpontaneouslyRetransmit { }, + )); // witness } - - if !rc.0 { return rc; } - - assert(self.invariants(&netc.my_end_point())); - self.next_action_index = (self.next_action_index + 1) % 3; - rc + } else { + assert(false); + rc = unreached() } - + if !rc.0 { + return rc; + } + assert(self.invariants(&netc.my_end_point())); + self.next_action_index = (self.next_action_index + 1) % 3; + rc } +} - } +} // verus! } mod host_protocol_t { @@ -4753,569 +5549,740 @@ mod host_protocol_t { // Protocol/SHT/Host.i.dfy verus! { - // TODO Try translating this into *_state_machine!{} form - // This ports Protocol/LiveSHT/RefinementProof/Environment ::LSHTIo - pub type LSHTIo = LIoOp>; - // This ports Protocol/LiveSHT/RefinementProof/Environment ::LSHTPacket - pub use crate::net_sht_v::LSHTPacket; +// TODO Try translating this into *_state_machine!{} form +// This ports Protocol/LiveSHT/RefinementProof/Environment ::LSHTIo +pub type LSHTIo = LIoOp>; - pub type AbstractIos = Seq; +// This ports Protocol/LiveSHT/RefinementProof/Environment ::LSHTPacket +pub use crate::net_sht_v::LSHTPacket; - pub struct AbstractConstants { - pub root_identity: AbstractEndPoint, - pub host_ids: Seq, - pub params: AbstractParameters, - pub me: AbstractEndPoint, - } +pub type AbstractIos = Seq; - pub struct AbstractHostState { - pub constants: AbstractConstants, - pub delegation_map: AbstractDelegationMap, - pub h: Hashtable, - pub sd: SingleDelivery, - pub received_packet: Option, - pub num_delegations: int, // TODO nat? - pub received_requests: Seq, - // We decided to elide resendCount and nextActionIndex from this translated spec - // because they're only relevant to liveness. - } +pub struct AbstractConstants { + pub root_identity: AbstractEndPoint, + pub host_ids: Seq, + pub params: AbstractParameters, + pub me: AbstractEndPoint, +} - impl AbstractHostState { - pub open spec(checked) fn wf(self) -> bool { - &&& self.delegation_map.is_complete() - } +pub struct AbstractHostState { + pub constants: AbstractConstants, + pub delegation_map: AbstractDelegationMap, + pub h: Hashtable, + pub sd: SingleDelivery, + pub received_packet: Option, + pub num_delegations: int, // TODO nat? + pub received_requests: Seq< + AppRequest, + >, + // We decided to elide resendCount and nextActionIndex from this translated spec + // because they're only relevant to liveness. +} + +impl AbstractHostState { + pub open spec(checked) fn wf(self) -> bool { + &&& self.delegation_map.is_complete() } +} - // Protocol/SHT/Host.i.dfy max_hashtable_size() +// Protocol/SHT/Host.i.dfy max_hashtable_size() +pub open spec fn max_hashtable_size() -> int { + 62 +} - pub open spec fn max_hashtable_size() -> int - { - 62 - } +// Ports Impl/SHT/PacketParsing.i.dfy :: ValidHashtable +pub open spec fn valid_hashtable(h: Hashtable) -> bool { + &&& h.dom().len() < max_hashtable_size() + &&& (forall|k| h.dom().contains(k) ==> valid_key(k) && #[trigger] valid_value(h[k])) +} - // Ports Impl/SHT/PacketParsing.i.dfy :: ValidHashtable - pub open spec fn valid_hashtable(h: Hashtable) -> bool - { - &&& h.dom().len() < max_hashtable_size() - &&& (forall |k| h.dom().contains(k) ==> valid_key(k) && #[trigger] valid_value(h[k])) +pub open spec(checked) fn hashtable_lookup(h: Hashtable, k: AbstractKey) -> Option { + if h.dom().contains(k) { + Some(h[k]) + } else { + None } +} - pub open spec(checked) fn hashtable_lookup(h: Hashtable, k: AbstractKey) -> Option - { - if h.dom().contains(k) { Some(h[k]) } else { None } - } +// Protocol/SHT/Delegations.i.dfy BulkUpdateDomain +pub open spec(checked) fn bulk_update_domain( + h: Hashtable, + kr: KeyRange, + u: Hashtable, +) -> Set { + Set::::new( + |k| + (h.dom().contains(k) || u.dom().contains(k)) && (kr.contains(k) ==> u.dom().contains( + k, + )), + ) +} - // Protocol/SHT/Delegations.i.dfy BulkUpdateDomain - pub open spec(checked) fn bulk_update_domain(h: Hashtable, kr: KeyRange, u: Hashtable) -> Set - { - Set::::new(|k| (h.dom().contains(k) || u.dom().contains(k)) - && (kr.contains(k) ==> u.dom().contains(k))) - } +// Protocol/SHT/Delegations.i.dfy BulkUpdateHashtable +pub open spec /*(checked) because lambdas*/ +fn bulk_update_hashtable(h: Hashtable, kr: KeyRange, u: Hashtable) -> Hashtable { + Map::::new( + |k: AbstractKey| bulk_update_domain(h, kr, u).contains(k), + |k: AbstractKey| + if u.dom().contains(k) { + u[k] + } else { + h[k] + }, + ) +} - // Protocol/SHT/Delegations.i.dfy BulkUpdateHashtable - pub open spec /*(checked) because lambdas*/ fn bulk_update_hashtable(h: Hashtable, kr: KeyRange, u: Hashtable) -> Hashtable - { - Map::::new( - |k: AbstractKey| bulk_update_domain(h, kr, u).contains(k), - |k: AbstractKey| if u.dom().contains(k) { u[k] } else { h[k] } - ) - } +// Impl/SHT/HostModel.i.dfy BulkRemoveHashtable +pub open spec /*(checked) because lambdas*/ +fn bulk_remove_hashtable(h: Hashtable, kr: KeyRange) -> Hashtable { + Map::::new( + |k: AbstractKey| h.dom().contains(k) && !kr.contains(k), + |k: AbstractKey| h[k], + ) +} - // Impl/SHT/HostModel.i.dfy BulkRemoveHashtable - pub open spec/*(checked) because lambdas*/ fn bulk_remove_hashtable(h: Hashtable, kr: KeyRange) -> Hashtable - { - Map::::new( - |k: AbstractKey| h.dom().contains(k) && !kr.contains(k), - |k: AbstractKey| h[k] - ) +pub open spec(checked) fn valid_optional_value(ov: Option) -> bool { + match ov { + None => true, + Some(value) => valid_value(value), } +} - pub open spec(checked) fn valid_optional_value(ov: Option) -> bool - { - match ov { - None => true, - Some(value) => valid_value(value), - } - } +// In Ironfleet, proving liveness demands that we not simply willy-nilly reject packets we don't like. +// However, in this port, we used a freshly-written general marshalling library. It's a good +// library, but it didn't happen to provide support for proving that, if demarshal(bytes) fails, +// then no structured message marshals to those bytes. Since we're not proving liveness of this +// implementation, we instead provide this placeholder marker for where the implementation is +// unable to prove demarshaling invertibility. +#[verifier::opaque] +pub open spec fn okay_to_ignore_packets() -> bool { + true +} - // In Ironfleet, proving liveness demands that we not simply willy-nilly reject packets we don't like. - // However, in this port, we used a freshly-written general marshalling library. It's a good - // library, but it didn't happen to provide support for proving that, if demarshal(bytes) fails, - // then no structured message marshals to those bytes. Since we're not proving liveness of this - // implementation, we instead provide this placeholder marker for where the implementation is - // unable to prove demarshaling invertibility. - #[verifier::opaque] - pub open spec fn okay_to_ignore_packets() -> bool { - true - } +pub proof fn workaround_dermarshal_not_invertible() + ensures + okay_to_ignore_packets(), +{ + reveal(okay_to_ignore_packets); +} - pub proof fn workaround_dermarshal_not_invertible() - ensures okay_to_ignore_packets() - { - reveal(okay_to_ignore_packets); - } +pub open spec(checked) fn receive_packet( + pre: AbstractHostState, + post: AbstractHostState, + pkt: Packet, + out: Set, + ack: Packet, +) -> bool { + ||| { + &&& pre.received_packet is None // No packet currently waiting to be processed (buffered in my state) + // Record incoming packet in my state and possibly ack it + + &&& SingleDelivery::receive(pre.sd, post.sd, pkt, ack, out) + &&& if SingleDelivery::new_single_message(pre.sd, pkt) { + post.received_packet == Some(pkt) // Enqueue this packet for processing - pub open spec(checked) fn receive_packet(pre: AbstractHostState, post: AbstractHostState, pkt: Packet, out: Set, ack: Packet) -> bool { - ||| { - &&& pre.received_packet is None // No packet currently waiting to be processed (buffered in my state) - // Record incoming packet in my state and possibly ack it - &&& SingleDelivery::receive(pre.sd, post.sd, pkt, ack, out) - &&& if SingleDelivery::new_single_message(pre.sd, pkt) { - post.received_packet == Some(pkt) // Enqueue this packet for processing - } else { - post.received_packet is None - } - &&& post == AbstractHostState {sd: post.sd, received_packet: post.received_packet, ..post} // Nothing else changes - } - ||| { - // internal buffer full or okay to ignore packets; drop this message and wait for it to be retransmitted. - &&& pre.received_packet is Some || okay_to_ignore_packets() - &&& post == pre - &&& out == Set::::empty() - } - } + } else { + post.received_packet is None + } + &&& post == AbstractHostState { + sd: post.sd, + received_packet: post.received_packet, + ..post + } // Nothing else changes - // Translates Protocol/LiveSHT/Scheduler.i.dfy :: ExtractSentPacketsFromIos - pub open spec fn extract_sent_packets_from_ios(ios: Seq) -> Seq - { - ios.filter(|io: LSHTIo| io.is_Send()).map_values(|io: LSHTIo| io.get_Send_s()) } - - // Protocol/SHT/Host.i.dfy :: LSHTPacketToPacket - pub open spec fn extract_packet_from_lsht_packet(lp: LSHTPacket) -> Packet - { - Packet { dst: lp.dst, src: lp.src, msg: lp.msg } + ||| { + // internal buffer full or okay to ignore packets; drop this message and wait for it to be retransmitted. + &&& pre.received_packet is Some || okay_to_ignore_packets() + &&& post == pre + &&& out == Set::::empty() } +} - // Translates Protocol/SHT/Host.i.dfy :: ExtractPacketsFromLSHTPackets - pub open spec fn extract_packets_from_lsht_packets(seq_packets: Seq) -> Set - { - seq_packets.map_values(|lp: LSHTPacket| extract_packet_from_lsht_packet(lp)).to_set() - } +// Translates Protocol/LiveSHT/Scheduler.i.dfy :: ExtractSentPacketsFromIos +pub open spec fn extract_sent_packets_from_ios(ios: Seq) -> Seq { + ios.filter(|io: LSHTIo| io.is_Send()).map_values(|io: LSHTIo| io.get_Send_s()) +} - // Translates ExtractPacketsFromLSHTPackets(ExtractSentPacketsFromIos(ios)) - pub open spec fn extract_packets_from_abstract_ios(ios: AbstractIos) -> Set - { - extract_packets_from_lsht_packets(extract_sent_packets_from_ios(ios)) - } +// Protocol/SHT/Host.i.dfy :: LSHTPacketToPacket +pub open spec fn extract_packet_from_lsht_packet(lp: LSHTPacket) -> Packet { + Packet { dst: lp.dst, src: lp.src, msg: lp.msg } +} - // Protocol/LiveSHT/Scheduler.i.dfy ::ReceivePacket_Wrapper - // nb: Dafny split this out to enable easy triggering of this exists - pub open spec(checked) fn receive_packet_wrapper(pre: AbstractHostState, post: AbstractHostState, pkt: Packet, sent_packets: Set) -> bool - { - exists |ack| receive_packet(pre, post, pkt, sent_packets, ack) - } +// Translates Protocol/SHT/Host.i.dfy :: ExtractPacketsFromLSHTPackets +pub open spec fn extract_packets_from_lsht_packets(seq_packets: Seq) -> Set { + seq_packets.map_values(|lp: LSHTPacket| extract_packet_from_lsht_packet(lp)).to_set() +} + +// Translates ExtractPacketsFromLSHTPackets(ExtractSentPacketsFromIos(ios)) +pub open spec fn extract_packets_from_abstract_ios(ios: AbstractIos) -> Set { + extract_packets_from_lsht_packets(extract_sent_packets_from_ios(ios)) +} + +// Protocol/LiveSHT/Scheduler.i.dfy ::ReceivePacket_Wrapper +// nb: Dafny split this out to enable easy triggering of this exists +pub open spec(checked) fn receive_packet_wrapper( + pre: AbstractHostState, + post: AbstractHostState, + pkt: Packet, + sent_packets: Set, +) -> bool { + exists|ack| receive_packet(pre, post, pkt, sent_packets, ack) +} - // Protocol/LiveSHT/Scheduler.i.dfy ::LHost_ReceivePacketWithoutReadingClock - pub open spec(checked) fn receive_packet_without_reading_clock(pre: AbstractHostState, post: AbstractHostState, ios: AbstractIos) -> bool +// Protocol/LiveSHT/Scheduler.i.dfy ::LHost_ReceivePacketWithoutReadingClock +pub open spec(checked) fn receive_packet_without_reading_clock( + pre: AbstractHostState, + post: AbstractHostState, + ios: AbstractIos, +) -> bool recommends ios.len() >= 1, ios[0].is_Receive(), pre.delegation_map.is_complete(), - { - let r = ios[0].get_Receive_r(); - let pkt = Packet{dst: r.dst, src: r.src, msg: r.msg}; - let sent_packets = extract_packets_from_abstract_ios(ios); - receive_packet_wrapper(pre, post, pkt, sent_packets) - } - - // Protocol/LiveSHT/Scheduler.i.dfy ::LHost_ReceivePacket_Next - pub open spec(checked) fn receive_packet_next(pre: AbstractHostState, post: AbstractHostState, ios: AbstractIos) -> bool { - &&& ios.len() >= 1 - &&& if ios[0].is_TimeoutReceive() { - &&& post == pre - &&& ios.len() == 1 - } else { - &&& pre.delegation_map.is_complete() - &&& ios[0].is_Receive() - &&& forall |i| 1 <= i < ios.len() ==> /*#[trigger]*/ ios[i].is_Send() - &&& receive_packet_without_reading_clock(pre, post, ios) - } +{ + let r = ios[0].get_Receive_r(); + let pkt = Packet { dst: r.dst, src: r.src, msg: r.msg }; + let sent_packets = extract_packets_from_abstract_ios(ios); + receive_packet_wrapper(pre, post, pkt, sent_packets) +} + +// Protocol/LiveSHT/Scheduler.i.dfy ::LHost_ReceivePacket_Next +pub open spec(checked) fn receive_packet_next( + pre: AbstractHostState, + post: AbstractHostState, + ios: AbstractIos, +) -> bool { + &&& ios.len() >= 1 + &&& if ios[0].is_TimeoutReceive() { + &&& post == pre + &&& ios.len() == 1 + } else { + &&& pre.delegation_map.is_complete() + &&& ios[0].is_Receive() + &&& forall|i| 1 <= i < ios.len() ==> /*#[trigger]*/ ios[i].is_Send() + &&& receive_packet_without_reading_clock(pre, post, ios) } +} - pub open spec(checked) fn next_get_request_reply(pre: AbstractHostState, post: AbstractHostState, src: AbstractEndPoint, seqno: nat, k: AbstractKey, sm: SingleMessage, m: Message, out: Set, should_send: bool) -> bool - recommends pre.delegation_map.is_complete() - { - let owner = pre.delegation_map[k]; - if should_send && valid_key(k) { - &&& if owner == pre.constants.me { - &&& m == Message::Reply{key: k, value: hashtable_lookup(pre.h, k)} - &&& post.received_requests == pre.received_requests.push(AppRequest::AppGetRequest{seqno, key: k}) - } else { - &&& m == Message::Redirect{key: k, id: owner} - &&& post.received_requests == pre.received_requests - } - &&& SingleDelivery::send_single_message(pre.sd, post.sd, m, src, Some(sm), pre.constants.params) - &&& sm.get_Message_dst() == src - &&& out == set![ Packet{dst: src, src: pre.constants.me, msg: sm} ] +pub open spec(checked) fn next_get_request_reply( + pre: AbstractHostState, + post: AbstractHostState, + src: AbstractEndPoint, + seqno: nat, + k: AbstractKey, + sm: SingleMessage, + m: Message, + out: Set, + should_send: bool, +) -> bool + recommends + pre.delegation_map.is_complete(), +{ + let owner = pre.delegation_map[k]; + if should_send && valid_key(k) { + &&& if owner == pre.constants.me { + &&& m == Message::Reply { key: k, value: hashtable_lookup(pre.h, k) } + &&& post.received_requests == pre.received_requests.push( + AppRequest::AppGetRequest { seqno, key: k }, + ) } else { - &&& post == AbstractHostState { received_packet: post.received_packet, ..pre } - &&& out == Set::::empty() - } + &&& m == Message::Redirect { key: k, id: owner } + &&& post.received_requests == pre.received_requests + } + &&& SingleDelivery::send_single_message( + pre.sd, + post.sd, + m, + src, + Some(sm), + pre.constants.params, + ) + &&& sm.get_Message_dst() == src + &&& out == set![ Packet{dst: src, src: pre.constants.me, msg: sm} ] + } else { + &&& post == AbstractHostState { received_packet: post.received_packet, ..pre } + &&& out == Set::::empty() } +} - pub open spec(checked) fn next_get_request(pre: AbstractHostState, post: AbstractHostState, pkt: Packet, out: Set) -> bool - recommends - pkt.msg.is_Message(), - pre.delegation_map.is_complete(), - { - &&& pkt.msg.get_Message_m().is_GetRequest() - &&& post.delegation_map == pre.delegation_map - &&& post.h == pre.h - &&& post.num_delegations == pre.num_delegations - &&& (exists |sm,m,b| next_get_request_reply(pre, post, pkt.src, pkt.msg.get_Message_seqno(), pkt.msg.get_Message_m().get_GetRequest_key(), sm, m, out, b)) - } +pub open spec(checked) fn next_get_request( + pre: AbstractHostState, + post: AbstractHostState, + pkt: Packet, + out: Set, +) -> bool + recommends + pkt.msg.is_Message(), + pre.delegation_map.is_complete(), +{ + &&& pkt.msg.get_Message_m().is_GetRequest() + &&& post.delegation_map == pre.delegation_map + &&& post.h == pre.h + &&& post.num_delegations == pre.num_delegations + &&& (exists|sm, m, b| + next_get_request_reply( + pre, + post, + pkt.src, + pkt.msg.get_Message_seqno(), + pkt.msg.get_Message_m().get_GetRequest_key(), + sm, + m, + out, + b, + )) +} - // Protocol/SHT/Host.i.dfy :: NextSetRequest_Complete - pub open spec(checked) fn next_set_request_complete( - pre: AbstractHostState, - post: AbstractHostState, - src: AbstractEndPoint, - seqno: nat, - reqm: Message, - sm: SingleMessage, - replym: Message, - out: Set, - should_send: bool - ) -> bool - recommends - pre.delegation_map.is_complete(), - reqm.is_SetRequest(), - { - let k = reqm.get_SetRequest_key(); - let ov = reqm.get_SetRequest_value(); - let owner = pre.delegation_map[k]; - if should_send && valid_key(k) && valid_optional_value(ov) { - &&& if owner == pre.constants.me { - &&& post.h == match ov { None => pre.h.remove(k), Some(v) => pre.h.insert(k, v) } - &&& replym == Message::Reply { key: k, value: ov } - &&& post.received_requests == pre.received_requests.push(AppRequest::AppSetRequest { seqno: seqno, key: k, ov: ov }) - } - else { - &&& post.h == pre.h - &&& replym == Message::Redirect { key: k, id: owner } - &&& post.received_requests == pre.received_requests - } - &&& SingleDelivery::send_single_message(pre.sd, post.sd, replym, src, Some(sm), pre.constants.params) - &&& sm.get_Message_dst() == src - &&& out == set![Packet{dst: src, src: pre.constants.me, msg: sm}] - } - else { - &&& post == AbstractHostState { received_packet: post.received_packet, ..pre } - &&& out == Set::::empty() - } +// Protocol/SHT/Host.i.dfy :: NextSetRequest_Complete +pub open spec(checked) fn next_set_request_complete( + pre: AbstractHostState, + post: AbstractHostState, + src: AbstractEndPoint, + seqno: nat, + reqm: Message, + sm: SingleMessage, + replym: Message, + out: Set, + should_send: bool, +) -> bool + recommends + pre.delegation_map.is_complete(), + reqm.is_SetRequest(), +{ + let k = reqm.get_SetRequest_key(); + let ov = reqm.get_SetRequest_value(); + let owner = pre.delegation_map[k]; + if should_send && valid_key(k) && valid_optional_value(ov) { + &&& if owner == pre.constants.me { + &&& post.h == match ov { + None => pre.h.remove(k), + Some(v) => pre.h.insert(k, v), + } + &&& replym == Message::Reply { key: k, value: ov } + &&& post.received_requests == pre.received_requests.push( + AppRequest::AppSetRequest { seqno: seqno, key: k, ov: ov }, + ) + } else { + &&& post.h == pre.h + &&& replym == Message::Redirect { key: k, id: owner } + &&& post.received_requests == pre.received_requests + } + &&& SingleDelivery::send_single_message( + pre.sd, + post.sd, + replym, + src, + Some(sm), + pre.constants.params, + ) + &&& sm.get_Message_dst() == src + &&& out == set![Packet{dst: src, src: pre.constants.me, msg: sm}] + } else { + &&& post == AbstractHostState { received_packet: post.received_packet, ..pre } + &&& out == Set::::empty() } +} - // Protocol/SHT/Host.i.dfy :: NextSetRequest - pub open spec(checked) fn next_set_request( - pre: AbstractHostState, - post: AbstractHostState, - pkt: Packet, - out: Set - ) -> bool - recommends - pkt.msg.is_Message(), - pre.delegation_map.is_complete(), - { - &&& pkt.msg.get_Message_m().is_SetRequest() - &&& exists |sm: SingleMessage, replym: Message, should_send: bool| next_set_request_complete(pre, post, pkt.src, pkt.msg.get_Message_seqno(), pkt.msg.get_Message_m(), sm, replym, out, should_send) +// Protocol/SHT/Host.i.dfy :: NextSetRequest +pub open spec(checked) fn next_set_request( + pre: AbstractHostState, + post: AbstractHostState, + pkt: Packet, + out: Set, +) -> bool + recommends + pkt.msg.is_Message(), + pre.delegation_map.is_complete(), +{ + &&& pkt.msg.get_Message_m().is_SetRequest() + &&& exists|sm: SingleMessage, replym: Message, should_send: bool| + next_set_request_complete( + pre, + post, + pkt.src, + pkt.msg.get_Message_seqno(), + pkt.msg.get_Message_m(), + sm, + replym, + out, + should_send, + ) + &&& post.delegation_map == pre.delegation_map + &&& post.num_delegations == pre.num_delegations +} + +// Protocol/SHT/Host.i.dfy :: NextDelegate +pub open spec(checked) fn next_delegate( + pre: AbstractHostState, + post: AbstractHostState, + pkt: Packet, + out: Set, +) -> bool + recommends + pkt.msg.is_Message(), + pre.delegation_map.is_complete(), +{ + &&& pkt.msg.get_Message_m().is_Delegate() + &&& if pre.constants.host_ids.contains(pkt.src) { + let m = pkt.msg.get_Message_m(); + &&& post.delegation_map == pre.delegation_map.update( + m.get_Delegate_range(), + pre.constants.me, + ) + &&& post.h == bulk_update_hashtable(pre.h, m.get_Delegate_range(), m.get_Delegate_h()) + &&& post.num_delegations == pre.num_delegations + 1 + } else { &&& post.delegation_map == pre.delegation_map + &&& post.h == pre.h &&& post.num_delegations == pre.num_delegations } + &&& SingleDelivery::::send_no_message(pre.sd, post.sd) + &&& SingleDelivery::::receive_no_message(pre.sd, post.sd) + &&& out == Set::::empty() + &&& post.received_requests == pre.received_requests +} - // Protocol/SHT/Host.i.dfy :: NextDelegate - pub open spec(checked) fn next_delegate(pre: AbstractHostState, post: AbstractHostState, pkt: Packet, out: Set) -> bool - recommends - pkt.msg.is_Message(), - pre.delegation_map.is_complete(), - { - &&& pkt.msg.get_Message_m().is_Delegate() - &&& if pre.constants.host_ids.contains(pkt.src) { - let m = pkt.msg.get_Message_m(); - &&& post.delegation_map == pre.delegation_map.update(m.get_Delegate_range(), pre.constants.me) - &&& post.h == bulk_update_hashtable(pre.h, m.get_Delegate_range(), m.get_Delegate_h()) - &&& post.num_delegations == pre.num_delegations + 1 - } - else { - &&& post.delegation_map == pre.delegation_map - &&& post.h == pre.h - &&& post.num_delegations == pre.num_delegations - } - &&& SingleDelivery::::send_no_message(pre.sd, post.sd) - &&& SingleDelivery::::receive_no_message(pre.sd, post.sd) +// Protocol/SHT/Host.i.dfy NextShard +pub open spec(checked) fn next_shard( + pre: AbstractHostState, + post: AbstractHostState, + out: Set, + kr: KeyRange, + recipient: AbstractEndPoint, + sm: SingleMessage, + should_send: bool, +) -> bool + recommends + pre.delegation_map.is_complete(), +{ + &&& recipient != pre.constants.me + &&& pre.constants.host_ids.contains(recipient) + &&& pre.delegation_map.delegate_for_key_range_is_host(kr, pre.constants.me) + &&& SingleDelivery::send_single_message( + pre.sd, + post.sd, + Message::Delegate { range: kr, h: extract_range(pre.h, kr) }, + recipient, + if should_send { + Some(sm) + } else { + None + }, + pre.constants.params, + ) + &&& should_send ==> recipient == sm.get_Message_dst() + &&& pre.constants == post.constants + &&& post.num_delegations == pre.num_delegations + 1 + &&& post.received_requests == pre.received_requests + &&& if should_send { + &&& out == set![Packet{dst: recipient, src: pre.constants.me, msg: sm}] + &&& post.delegation_map == pre.delegation_map.update(kr, recipient) + &&& post.h == bulk_remove_hashtable(pre.h, kr) + } else { &&& out == Set::::empty() - &&& post.received_requests == pre.received_requests - } - - // Protocol/SHT/Host.i.dfy NextShard - pub open spec(checked) fn next_shard( - pre: AbstractHostState, - post: AbstractHostState, - out: Set, - kr: KeyRange, - recipient: AbstractEndPoint, - sm: SingleMessage, - should_send: bool - ) -> bool - recommends - pre.delegation_map.is_complete(), - { - &&& recipient != pre.constants.me - &&& pre.constants.host_ids.contains(recipient) - &&& pre.delegation_map.delegate_for_key_range_is_host(kr, pre.constants.me) - &&& SingleDelivery::send_single_message(pre.sd, post.sd, Message::Delegate{range: kr, h: extract_range(pre.h, kr)}, recipient, if should_send { Some(sm) } else { None }, pre.constants.params) - &&& should_send ==> recipient == sm.get_Message_dst() - &&& pre.constants == post.constants - - &&& post.num_delegations == pre.num_delegations + 1 - &&& post.received_requests == pre.received_requests - &&& if should_send { - &&& out == set![Packet{dst: recipient, src: pre.constants.me, msg: sm}] - &&& post.delegation_map == pre.delegation_map.update(kr, recipient) - &&& post.h == bulk_remove_hashtable(pre.h, kr) - } - else { - &&& out == Set::::empty() - &&& post.delegation_map == pre.delegation_map - &&& post.h == pre.h - } + &&& post.delegation_map == pre.delegation_map + &&& post.h == pre.h } +} - pub open spec/*(checked)*/ fn next_shard_wrapper_must_reject(pre: AbstractHostState, m: Message) -> bool - { - let recipient = m.get_Shard_recipient(); - let kr = m.get_Shard_range(); - ||| recipient == pre.constants.me - ||| !recipient.valid_physical_address() - ||| kr.is_empty() - ||| !pre.constants.host_ids.contains(recipient) - ||| !pre.delegation_map.delegate_for_key_range_is_host(kr, pre.constants.me) - ||| extract_range(pre.h, kr).dom().len() >= max_hashtable_size() - } +pub open spec /*(checked)*/ +fn next_shard_wrapper_must_reject(pre: AbstractHostState, m: Message) -> bool { + let recipient = m.get_Shard_recipient(); + let kr = m.get_Shard_range(); + ||| recipient == pre.constants.me + ||| !recipient.valid_physical_address() + ||| kr.is_empty() + ||| !pre.constants.host_ids.contains(recipient) + ||| !pre.delegation_map.delegate_for_key_range_is_host(kr, pre.constants.me) + ||| extract_range(pre.h, kr).dom().len() >= max_hashtable_size() +} - // Protocol/SHT/Host.i.dfy NextShard_Wrapper - pub open spec(checked) fn next_shard_wrapper(pre: AbstractHostState, post: AbstractHostState, pkt: Packet, out: Set) -> bool +// Protocol/SHT/Host.i.dfy NextShard_Wrapper +pub open spec(checked) fn next_shard_wrapper( + pre: AbstractHostState, + post: AbstractHostState, + pkt: Packet, + out: Set, +) -> bool recommends pkt.msg.is_Message(), pre.delegation_map.is_complete(), - { - let m: Message = pkt.msg.get_Message_m(); - let recipient = m.get_Shard_recipient(); - let kr = m.get_Shard_range(); - - &&& m.is_Shard() - &&& if next_shard_wrapper_must_reject(pre, m) { - &&& post == AbstractHostState { received_packet: post.received_packet, ..pre } - &&& out == Set::::empty() - } else { - exists |sm: SingleMessage, b: bool| next_shard(pre, post, out, kr, recipient, sm, b) - } +{ + let m: Message = pkt.msg.get_Message_m(); + let recipient = m.get_Shard_recipient(); + let kr = m.get_Shard_range(); + &&& m.is_Shard() + &&& if next_shard_wrapper_must_reject(pre, m) { + &&& post == AbstractHostState { received_packet: post.received_packet, ..pre } + &&& out == Set::::empty() + } else { + exists|sm: SingleMessage, b: bool| next_shard(pre, post, out, kr, recipient, sm, b) } +} - // Protocol/SHT/Host.i.dfy :: NextReply - pub open spec(checked) fn next_reply(pre: AbstractHostState, post: AbstractHostState, pkt: Packet, out: Set) -> bool +// Protocol/SHT/Host.i.dfy :: NextReply +pub open spec(checked) fn next_reply( + pre: AbstractHostState, + post: AbstractHostState, + pkt: Packet, + out: Set, +) -> bool recommends pkt.msg.is_Message(), pre.delegation_map.is_complete(), - { - &&& pkt.msg.get_Message_m().is_Reply() - &&& out == Set::::empty() - &&& post == AbstractHostState { received_packet: post.received_packet, ..pre } - } +{ + &&& pkt.msg.get_Message_m().is_Reply() + &&& out == Set::::empty() + &&& post == AbstractHostState { received_packet: post.received_packet, ..pre } +} - // Protocol/SHT/Host.i.dfy :: NextRedirect - pub open spec(checked) fn next_redirect(pre: AbstractHostState, post: AbstractHostState, pkt: Packet, out: Set) -> bool +// Protocol/SHT/Host.i.dfy :: NextRedirect +pub open spec(checked) fn next_redirect( + pre: AbstractHostState, + post: AbstractHostState, + pkt: Packet, + out: Set, +) -> bool recommends pkt.msg.is_Message(), pre.delegation_map.is_complete(), - { - &&& pkt.msg.get_Message_m().is_Redirect() - &&& out == Set::::empty() - &&& post == AbstractHostState { received_packet: post.received_packet, ..pre } - } +{ + &&& pkt.msg.get_Message_m().is_Redirect() + &&& out == Set::::empty() + &&& post == AbstractHostState { received_packet: post.received_packet, ..pre } +} - pub open spec(checked) fn should_process_received_message(pre: AbstractHostState) -> bool { - &&& pre.received_packet.is_some() - &&& pre.received_packet.get_Some_0().msg.is_Message() +pub open spec(checked) fn should_process_received_message(pre: AbstractHostState) -> bool { + &&& pre.received_packet.is_some() + &&& pre.received_packet.get_Some_0().msg.is_Message() + &&& { + ||| pre.received_packet.get_Some_0().msg.get_Message_m().is_Delegate() + ||| pre.received_packet.get_Some_0().msg.get_Message_m().is_Shard() + } ==> pre.num_delegations < pre.constants.params.max_delegations - 2 +} + +pub open spec(checked) fn process_message( + pre: AbstractHostState, + post: AbstractHostState, + out: Set, +) -> bool + recommends + pre.delegation_map.is_complete(), +{ + if should_process_received_message(pre) { + let packet = pre.received_packet.get_Some_0(); &&& { - ||| pre.received_packet.get_Some_0().msg.get_Message_m().is_Delegate() - ||| pre.received_packet.get_Some_0().msg.get_Message_m().is_Shard() - } ==> pre.num_delegations < pre.constants.params.max_delegations - 2 + ||| next_get_request(pre, post, packet, out) + ||| next_set_request(pre, post, packet, out) + ||| next_delegate(pre, post, packet, out) + ||| next_shard_wrapper(pre, post, packet, out) + ||| next_reply(pre, post, packet, out) + ||| next_redirect(pre, post, packet, out) + } + &&& post.received_packet.is_None() + } else { + &&& post == pre + &&& out == Set::::empty() } +} - pub open spec(checked) fn process_message(pre: AbstractHostState, post: AbstractHostState, out: Set) -> bool - recommends - pre.delegation_map.is_complete(), - { - if should_process_received_message(pre) { - let packet = pre.received_packet.get_Some_0(); - &&& { - ||| next_get_request(pre, post, packet, out) - ||| next_set_request(pre, post, packet, out) - ||| next_delegate(pre, post, packet, out) - ||| next_shard_wrapper(pre, post, packet, out) - ||| next_reply(pre, post, packet, out) - ||| next_redirect(pre, post, packet, out) - } - &&& post.received_packet.is_None() - } - else { +pub open spec(checked) fn process_received_packet( + pre: AbstractHostState, + post: AbstractHostState, + out: Set, +) -> bool + recommends + pre.delegation_map.is_complete(), +{ + match pre.received_packet { + Some(_) => process_message(pre, post, out), + None => { &&& post == pre &&& out == Set::::empty() - } - } - - pub open spec(checked) fn process_received_packet(pre: AbstractHostState, post: AbstractHostState, out: Set) -> bool - recommends - pre.delegation_map.is_complete(), - { - match pre.received_packet { - Some(_) => process_message(pre, post, out), - None => { - &&& post == pre - &&& out == Set::::empty() - } - } - } - - // Translates Protocol/LiveSHT/Scheduler.i.dfy :: LHost_ProcessReceivedPacket_Next - pub open spec(checked) fn process_received_packet_next(pre: AbstractHostState, post: AbstractHostState, ios: AbstractIos) -> bool - { - &&& pre.delegation_map.is_complete() - &&& forall |i| 0 <= i < ios.len() ==> ios[i].is_Send() - &&& process_received_packet(pre, post, extract_packets_from_abstract_ios(ios)) - } - - pub open spec(checked) fn spontaneously_retransmit(pre: AbstractHostState, post: AbstractHostState, out: Set) -> bool { - &&& out == SingleDelivery::un_acked_messages(pre.sd, pre.constants.me) - &&& post == pre - } - - // Skips LHost_NoReceive_Next_Wrapper, which delays resends, and translates LHost_NoReceive_Next - pub open spec(checked) fn spontaneously_retransmit_next(pre: AbstractHostState, post: AbstractHostState, ios: AbstractIos) -> bool { - &&& pre.delegation_map.is_complete() - &&& { - ||| { - &&& forall |i| 0 <= i < ios.len() ==> ios[i].is_Send() - &&& spontaneously_retransmit(pre, post, extract_packets_from_abstract_ios(ios)) - } - ||| { - &&& post == pre - &&& ios =~= Seq::::empty() - } - } - } - - // Translates Impl/LiveSHT/Unsendable HostNextIgnoreUnsendableReceive (inlining IosReflectIgnoringUnDemarshallable) - pub open spec(checked) fn ignore_unparseable_packet(pre: AbstractHostState, post: AbstractHostState, ios: AbstractIos) -> bool { - &&& ios.len() == 1 - &&& ios[0].is_Receive() - &&& ios[0].get_Receive_r().msg.is_InvalidMessage() - &&& pre == post - } - - // Translates Impl/LiveSHT/Unsendable HostNextIgnoreUnsendableProcess (inlining IosReflectIgnoringUnParseable) - pub open spec(checked) fn ignore_nonsensical_delegation_packet(pre: AbstractHostState, post: AbstractHostState, ios: AbstractIos) -> bool { - &&& ios.len() == 0 - &&& pre.received_packet.is_some() - &&& pre.received_packet.get_Some_0().msg.is_Message() - &&& match pre.received_packet.get_Some_0().msg.get_Message_m() { - Message::Delegate{range: range, h: h} => !({ - // no need to check for valid_key_range(range) - // (See Distributed/Services/SHT/AppInterface.i.dfy: ValidKey() == true) - &&& valid_hashtable(h) - &&& !range.is_empty() - &&& pre.received_packet.get_Some_0().msg.get_Message_dst().valid_physical_address() - }), - _ => false, - } - &&& if should_process_received_message(pre) { - post == AbstractHostState{received_packet: None, ..pre} - } else { - post == pre - } + }, } +} - #[is_variant] - pub enum Step { - ReceivePacket, - ProcessReceivedPacket, - SpontaneouslyRetransmit, - Stutter, // Allowed by LHost_NoReceive_Next_Wrapper when resendCount != 0 - IgnoreUnparseablePacket, - IgnoreNonsensicalDelegationPacket, - } +// Translates Protocol/LiveSHT/Scheduler.i.dfy :: LHost_ProcessReceivedPacket_Next +pub open spec(checked) fn process_received_packet_next( + pre: AbstractHostState, + post: AbstractHostState, + ios: AbstractIos, +) -> bool { + &&& pre.delegation_map.is_complete() + &&& forall|i| 0 <= i < ios.len() ==> ios[i].is_Send() + &&& process_received_packet(pre, post, extract_packets_from_abstract_ios(ios)) +} - pub open spec fn parse_arg_as_end_point(arg: AbstractArg) -> AbstractEndPoint - { - AbstractEndPoint{id: arg} +pub open spec(checked) fn spontaneously_retransmit( + pre: AbstractHostState, + post: AbstractHostState, + out: Set, +) -> bool { + &&& out == SingleDelivery::un_acked_messages(pre.sd, pre.constants.me) + &&& post == pre +} + +// Skips LHost_NoReceive_Next_Wrapper, which delays resends, and translates LHost_NoReceive_Next +pub open spec(checked) fn spontaneously_retransmit_next( + pre: AbstractHostState, + post: AbstractHostState, + ios: AbstractIos, +) -> bool { + &&& pre.delegation_map.is_complete() + &&& { + ||| { + &&& forall|i| 0 <= i < ios.len() ==> ios[i].is_Send() + &&& spontaneously_retransmit(pre, post, extract_packets_from_abstract_ios(ios)) + } + ||| { + &&& post == pre + &&& ios =~= Seq::::empty() + } } +} - pub open spec fn unchecked_parse_args(args: AbstractArgs) -> Seq - { - args.map(|idx, arg: AbstractArg| parse_arg_as_end_point(arg)) +// Translates Impl/LiveSHT/Unsendable HostNextIgnoreUnsendableReceive (inlining IosReflectIgnoringUnDemarshallable) +pub open spec(checked) fn ignore_unparseable_packet( + pre: AbstractHostState, + post: AbstractHostState, + ios: AbstractIos, +) -> bool { + &&& ios.len() == 1 + &&& ios[0].is_Receive() + &&& ios[0].get_Receive_r().msg.is_InvalidMessage() + &&& pre == post +} + +// Translates Impl/LiveSHT/Unsendable HostNextIgnoreUnsendableProcess (inlining IosReflectIgnoringUnParseable) +pub open spec(checked) fn ignore_nonsensical_delegation_packet( + pre: AbstractHostState, + post: AbstractHostState, + ios: AbstractIos, +) -> bool { + &&& ios.len() == 0 + &&& pre.received_packet.is_some() + &&& pre.received_packet.get_Some_0().msg.is_Message() + &&& match pre.received_packet.get_Some_0().msg.get_Message_m() { + Message::Delegate { range: range, h: h } => !({ + // no need to check for valid_key_range(range) + // (See Distributed/Services/SHT/AppInterface.i.dfy: ValidKey() == true) + &&& valid_hashtable(h) + &&& !range.is_empty() + &&& pre.received_packet.get_Some_0().msg.get_Message_dst().valid_physical_address() + }), + _ => false, + } + &&& if should_process_received_message(pre) { + post == AbstractHostState { received_packet: None, ..pre } + } else { + post == pre } +} - pub open spec(checked) fn parse_args(args: AbstractArgs) -> Option> - { - let end_points = unchecked_parse_args(args); - if forall |i| #![auto] 0 <= i < end_points.len() ==> end_points[i].valid_physical_address() { - Some(end_points) - } else { - None - } +#[is_variant] +pub enum Step { + ReceivePacket, + ProcessReceivedPacket, + SpontaneouslyRetransmit, + Stutter, // Allowed by LHost_NoReceive_Next_Wrapper when resendCount != 0 + IgnoreUnparseablePacket, + IgnoreNonsensicalDelegationPacket, +} + +pub open spec fn parse_arg_as_end_point(arg: AbstractArg) -> AbstractEndPoint { + AbstractEndPoint { id: arg } +} + +pub open spec fn unchecked_parse_args(args: AbstractArgs) -> Seq { + args.map(|idx, arg: AbstractArg| parse_arg_as_end_point(arg)) +} + +pub open spec(checked) fn parse_args(args: AbstractArgs) -> Option> { + let end_points = unchecked_parse_args(args); + if forall|i| #![auto] 0 <= i < end_points.len() ==> end_points[i].valid_physical_address() { + Some(end_points) + } else { + None } +} - // Ironfleet's trusted spec left ParseCommandLineConfiguration unspecified, which was an auditing - // hole. Here we're going to define the parsing in the trusted domain. - pub open spec(checked) fn init(pre: AbstractHostState, id: AbstractEndPoint, args: AbstractArgs) -> bool { - let end_points = parse_args(args); - if end_points.is_None() || end_points.unwrap().len()==0 { - false - } else { - let root_identity = end_points.unwrap()[0]; - let params = AbstractParameters::static_params(); - pre == AbstractHostState{ - constants: AbstractConstants{root_identity, host_ids: end_points.unwrap(), params, me: id }, - delegation_map: AbstractDelegationMap::init(root_identity), - h: Map::empty(), - sd: SingleDelivery::init(), - received_packet: None, - num_delegations: 1, // TODO nat? - received_requests: seq![], - } +// Ironfleet's trusted spec left ParseCommandLineConfiguration unspecified, which was an auditing +// hole. Here we're going to define the parsing in the trusted domain. +pub open spec(checked) fn init( + pre: AbstractHostState, + id: AbstractEndPoint, + args: AbstractArgs, +) -> bool { + let end_points = parse_args(args); + if end_points.is_None() || end_points.unwrap().len() == 0 { + false + } else { + let root_identity = end_points.unwrap()[0]; + let params = AbstractParameters::static_params(); + pre == AbstractHostState { + constants: AbstractConstants { + root_identity, + host_ids: end_points.unwrap(), + params, + me: id, + }, + delegation_map: AbstractDelegationMap::init(root_identity), + h: Map::empty(), + sd: SingleDelivery::init(), + received_packet: None, + num_delegations: 1, // TODO nat? + received_requests: seq![], } } +} - // This translates Distributed/Protocol/SHT/Host.i.dfy - pub open spec(checked) fn next_step(pre: AbstractHostState, post: AbstractHostState, ios: AbstractIos, step: Step) -> bool { - &&& pre.delegation_map.is_complete() - &&& match step { - Step::ReceivePacket => receive_packet_next(pre, post, ios), - Step::ProcessReceivedPacket => process_received_packet_next(pre, post, ios), - Step::SpontaneouslyRetransmit => spontaneously_retransmit_next(pre, post, ios), - Step::Stutter => pre == post && ios.len() == 0, // See LHost_NoReceive_Next_Wrapper when resendCount != 0 - - Step::IgnoreUnparseablePacket => ignore_unparseable_packet(pre, post, ios), - Step::IgnoreNonsensicalDelegationPacket => ignore_nonsensical_delegation_packet(pre, post, ios), - } +// This translates Distributed/Protocol/SHT/Host.i.dfy +pub open spec(checked) fn next_step( + pre: AbstractHostState, + post: AbstractHostState, + ios: AbstractIos, + step: Step, +) -> bool { + &&& pre.delegation_map.is_complete() + &&& match step { + Step::ReceivePacket => receive_packet_next(pre, post, ios), + Step::ProcessReceivedPacket => process_received_packet_next(pre, post, ios), + Step::SpontaneouslyRetransmit => spontaneously_retransmit_next(pre, post, ios), + Step::Stutter => pre == post && ios.len() == 0, // See LHost_NoReceive_Next_Wrapper when resendCount != 0 + Step::IgnoreUnparseablePacket => ignore_unparseable_packet(pre, post, ios), + Step::IgnoreNonsensicalDelegationPacket => ignore_nonsensical_delegation_packet( + pre, + post, + ios, + ), } +} - //pub open no_invalid_messages fn next(pre: AbstractHostState, post: AbstractHostState, recv: Set, out: Set) -> bool { +//pub open no_invalid_messages fn next(pre: AbstractHostState, post: AbstractHostState, recv: Set, out: Set) -> bool { +pub open spec(checked) fn no_invalid_sends(ios: AbstractIos) -> bool { + forall|i| + #![auto] + 0 <= i < ios.len() && ios[i].is_Send() ==> !ios[i].get_Send_s().msg.is_InvalidMessage() +} - pub open spec(checked) fn no_invalid_sends(ios: AbstractIos) -> bool { - forall |i| #![auto] 0 <= i < ios.len() && ios[i].is_Send() ==> !ios[i].get_Send_s().msg.is_InvalidMessage() - } +pub open spec(checked) fn next( + pre: AbstractHostState, + post: AbstractHostState, + ios: AbstractIos, +) -> bool { + &&& pre.wf() + &&& pre.constants == post.constants + &&& exists|step| next_step(pre, post, ios, step) + &&& no_invalid_sends( + ios, + ) // A double check that our trusted translation of Host satisfies OnlySentMarshallableData - pub open spec(checked) fn next(pre: AbstractHostState, post: AbstractHostState, ios: AbstractIos) -> bool { - &&& pre.wf() - &&& pre.constants == post.constants - &&& exists |step| next_step(pre, post, ios, step) - &&& no_invalid_sends(ios) // A double check that our trusted translation of Host satisfies OnlySentMarshallableData - } +} - } // verus! +} // verus! } mod io_t { @@ -5340,418 +6307,466 @@ mod io_t { verus! { - /// NOTE: no longer need HostEnvironment, its state is inlined in NetClient - /// - /// - OkState is replaced with State - /// - NetState is history - /// - NowState is not (yet) implemented - /// - files was empty in Ironfleet - pub struct HostEnvironment { - } - - //pub struct Environment { - // ok: bool - //} +/// NOTE: no longer need HostEnvironment, its state is inlined in NetClient +/// +/// - OkState is replaced with State +/// - NetState is history +/// - NowState is not (yet) implemented +/// - files was empty in Ironfleet +pub struct HostEnvironment {} + +//pub struct Environment { +// ok: bool +//} +// #[derive(Copy, Clone)] +#[derive(PartialEq, Eq, Hash)] +pub struct EndPoint { + pub id: Vec, +} - // #[derive(Copy, Clone)] - #[derive(PartialEq, Eq, Hash)] - pub struct EndPoint { - pub id: Vec, +//impl Clone for EndPoint { +// fn clone(&self) -> (res: EndPoint) +// ensures res@ == self@ +// { +// EndPoint{id: clone_vec_u8(&self.id)} +// } +//} +impl EndPoint { + // Verus unimpl: Can't call clone through the trait + pub fn clone_up_to_view(&self) -> (res: EndPoint) + ensures + res@ == self@, + { + EndPoint { id: clone_vec_u8(&self.id) } } - //impl Clone for EndPoint { - // fn clone(&self) -> (res: EndPoint) - // ensures res@ == self@ - // { - // EndPoint{id: clone_vec_u8(&self.id)} - // } - //} - - impl EndPoint { - // Verus unimpl: Can't call clone through the trait - pub fn clone_up_to_view(&self) -> (res: EndPoint) - ensures res@ == self@ - { - EndPoint{id: clone_vec_u8(&self.id)} - } - - pub open spec fn view(self) -> AbstractEndPoint { - AbstractEndPoint{id: self.id@} - } + pub open spec fn view(self) -> AbstractEndPoint { + AbstractEndPoint { id: self.id@ } + } - // EndPointIsAbstractable - // (this has generally been unused) - #[verifier(inline)] - pub open spec fn abstractable(self) -> bool { - self@.valid_physical_address() - } + // EndPointIsAbstractable + // (this has generally been unused) + #[verifier(inline)] + pub open spec fn abstractable(self) -> bool { + self@.valid_physical_address() + } - // TODO: actually call this everywhere IronFleet calls it. - pub open spec fn valid_public_key(&self) -> bool { - self@.valid_physical_address() - } + // TODO: actually call this everywhere IronFleet calls it. + pub open spec fn valid_public_key(&self) -> bool { + self@.valid_physical_address() + } - // Translates Common/Native/Io.s.dfy - pub fn valid_physical_address(&self) -> (out: bool) + // Translates Common/Native/Io.s.dfy + pub fn valid_physical_address(&self) -> (out: bool) ensures out == self@.valid_physical_address(), - { - self.id.len() < 0x100000 - } - } - - pub open spec fn abstractify_end_points(end_points: Vec) -> Seq { - end_points@.map(|i, end_point: EndPoint| end_point@) + self.id.len() < 0x100000 } +} - pub type NetPacket = LPacket>; - pub type NetEvent = LIoOp>; - pub type History = Seq; +pub open spec fn abstractify_end_points(end_points: Vec) -> Seq { + end_points@.map(|i, end_point: EndPoint| end_point@) +} - #[is_variant] - pub enum State { - Receiving, - Sending, - Error, - } +pub type NetPacket = LPacket>; - #[is_variant] - pub enum NetcReceiveResult { // Not to be confused with Ironfleet's ReceiveResult type, which contains a parsed message - Received { sender: EndPoint, message: Vec }, - TimedOut, - Error, - } +pub type NetEvent = LIoOp>; - pub struct IronfleetIOError { - pub message: String, - } +pub type History = Seq; - pub closed spec fn from_trusted_code() -> bool { true } +#[is_variant] +pub enum State { + Receiving, + Sending, + Error, +} - #[verifier(external_body)] - pub struct NetClientCPointers { - get_time_func: extern "C" fn() -> u64, - receive_func: extern "C" fn(i32, *mut bool, *mut bool, *mut *mut std::vec::Vec, *mut *mut std::vec::Vec), - send_func: extern "C" fn(u64, *const u8, u64, *const u8) -> bool - } +#[is_variant] +pub enum NetcReceiveResult { // Not to be confused with Ironfleet's ReceiveResult type, which contains a parsed message + Received { sender: EndPoint, message: Vec }, + TimedOut, + Error, +} - #[verifier::external_body] - pub struct DuctTapeProfiler { - last_event: SystemTime, - last_report: SystemTime, - event_counter: HashMap, - } - - impl DuctTapeProfiler { - #[verifier(external)] - fn new() -> Self { - println!("Report-ready"); - DuctTapeProfiler { - last_event: SystemTime::now(), - last_report: SystemTime::now(), - event_counter: HashMap::new(), - } - } +pub struct IronfleetIOError { + pub message: String, +} - #[verifier(external)] - fn duration_as_ns(duration: &Duration) -> u64 - { - duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64 - } +pub closed spec fn from_trusted_code() -> bool { + true +} - #[verifier(external)] - fn mark_duration(&mut self, label: &str) { - let now = SystemTime::now(); - let duration_ns = Self::duration_as_ns(&now.duration_since(self.last_event).expect("arrow of time")); - self.increment_event(label, duration_ns); - self.last_event = now; - self.maybe_report(&now); - } +#[verifier(external_body)] +pub struct NetClientCPointers { + get_time_func: extern "C" fn () -> u64, + receive_func: extern "C" fn ( + i32, + *mut bool, + *mut bool, + *mut *mut std::vec::Vec, + *mut *mut std::vec::Vec, + ), + send_func: extern "C" fn (u64, *const u8, u64, *const u8) -> bool, +} + +#[verifier::external_body] +pub struct DuctTapeProfiler { + last_event: SystemTime, + last_report: SystemTime, + event_counter: HashMap, +} - #[verifier(external)] - fn record_event(&mut self, label: &str) { - self.increment_event(label, 1); +impl DuctTapeProfiler { + #[verifier(external)] + fn new() -> Self { + println!("Report-ready"); + DuctTapeProfiler { + last_event: SystemTime::now(), + last_report: SystemTime::now(), + event_counter: HashMap::new(), } + } - #[verifier(external)] - fn increment_event(&mut self, label: &str, incr: u64) { - if let Some(entry) = self.event_counter.get_mut(label) { - *entry += incr; - } else { - self.event_counter.insert(label.to_string(), incr); - } + #[verifier(external)] + fn duration_as_ns(duration: &Duration) -> u64 { + duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64 + } + + #[verifier(external)] + fn mark_duration(&mut self, label: &str) { + let now = SystemTime::now(); + let duration_ns = Self::duration_as_ns( + &now.duration_since(self.last_event).expect("arrow of time"), + ); + self.increment_event(label, duration_ns); + self.last_event = now; + self.maybe_report(&now); + } + + #[verifier(external)] + fn record_event(&mut self, label: &str) { + self.increment_event(label, 1); + } + + #[verifier(external)] + fn increment_event(&mut self, label: &str, incr: u64) { + if let Some(entry) = self.event_counter.get_mut(label) { + *entry += incr; + } else { + self.event_counter.insert(label.to_string(), incr); } + } - #[verifier(external)] - fn maybe_report(&mut self, now: &SystemTime) - { - let report_period = 1 * 1_000_000_000; - let report_duration_ns = Self::duration_as_ns(&now.duration_since(self.last_report).expect("arrow of time")); - if report_duration_ns > report_period { - self.increment_event("report-duration-ns", report_duration_ns); - self.report(); - self.last_report = now.clone(); - self.event_counter = HashMap::new(); - } + #[verifier(external)] + fn maybe_report(&mut self, now: &SystemTime) { + let report_period = 1 * 1_000_000_000; + let report_duration_ns = Self::duration_as_ns( + &now.duration_since(self.last_report).expect("arrow of time"), + ); + if report_duration_ns > report_period { + self.increment_event("report-duration-ns", report_duration_ns); + self.report(); + self.last_report = now.clone(); + self.event_counter = HashMap::new(); } + } - #[verifier(external)] - fn report(&self) - { - for (key, value) in &self.event_counter { - if key.ends_with("-ns") { - let ms = *value as f64 / 1e6; - println!("{key}: {ms} ms"); - } else { - println!("{key}: {value} count"); - } + #[verifier(external)] + fn report(&self) { + for (key, value) in &self.event_counter { + if key.ends_with("-ns") { + let ms = *value as f64 / 1e6; + println!("{key}: {ms} ms"); + } else { + println!("{key}: {value} count"); } - println!(""); } + println!(""); } +} - pub struct NetClient { - state: Ghost, - history: Ghost, +pub struct NetClient { + state: Ghost, + history: Ghost, + end_point: EndPoint, + c_pointers: NetClientCPointers, + profiler: DuctTapeProfiler, +} + +impl NetClient { + ////////////////////////////////////////////////////////////////////////////// + // player-1 accessible interfaces (note requires from_trusted_code()) + ////////////////////////////////////////////////////////////////////////////// + #[verifier(external)] + pub fn new( end_point: EndPoint, - c_pointers: NetClientCPointers, - profiler: DuctTapeProfiler, - } - - impl NetClient { - ////////////////////////////////////////////////////////////////////////////// - // player-1 accessible interfaces (note requires from_trusted_code()) - ////////////////////////////////////////////////////////////////////////////// - - #[verifier(external)] - pub fn new( - end_point: EndPoint, - get_time_func: extern "C" fn() -> u64, - receive_func: extern "C" fn(i32, *mut bool, *mut bool, *mut *mut std::vec::Vec, *mut *mut std::vec::Vec), - send_func: extern "C" fn(u64, *const u8, u64, *const u8) -> bool - ) -> (net_client: Self) - requires from_trusted_code(), - ensures - net_client.state().is_Receiving(), - net_client.history() == Seq::::empty(), - net_client.my_end_point() == end_point, - { - //TODO(chris): thread 'rustc' panicked at 'The verifier does not yet support the following Rust feature: non_struct_ctor', rust_verify/src/rust_to_vir_expr.rs:2796:21 - //Self{state: State::Receiving, history: todo!(), end_point} - NetClient{ - state: Ghost(State::Receiving), - history: Ghost(seq![]), - end_point, - c_pointers: NetClientCPointers{get_time_func: get_time_func, receive_func: receive_func, send_func: send_func}, - profiler: DuctTapeProfiler::new(), - } + get_time_func: extern "C" fn () -> u64, + receive_func: extern "C" fn ( + i32, + *mut bool, + *mut bool, + *mut *mut std::vec::Vec, + *mut *mut std::vec::Vec, + ), + send_func: extern "C" fn (u64, *const u8, u64, *const u8) -> bool, + ) -> (net_client: Self) + requires + from_trusted_code(), + ensures + net_client.state().is_Receiving(), + net_client.history() == Seq::::empty(), + net_client.my_end_point() == end_point, + { + //TODO(chris): thread 'rustc' panicked at 'The verifier does not yet support the following Rust feature: non_struct_ctor', rust_verify/src/rust_to_vir_expr.rs:2796:21 + //Self{state: State::Receiving, history: todo!(), end_point} + NetClient { + state: Ghost(State::Receiving), + history: Ghost(seq![]), + end_point, + c_pointers: NetClientCPointers { + get_time_func: get_time_func, + receive_func: receive_func, + send_func: send_func, + }, + profiler: DuctTapeProfiler::new(), } + } - // Main loop (Player 1 audited code) resets the state after having seen Player 2 - // complete a proof of refinement to an atomic protocol step. - pub fn reset(&mut self) - requires - from_trusted_code() - ensures - self.state().is_Receiving(), - self.my_end_point() == old(self).my_end_point() - // TODO: surely something needs to be said about history? - { - self.state = Ghost(State::Receiving); - } + // Main loop (Player 1 audited code) resets the state after having seen Player 2 + // complete a proof of refinement to an atomic protocol step. + pub fn reset(&mut self) + requires + from_trusted_code(), + ensures + self.state().is_Receiving(), + self.my_end_point() == old( + self, + ).my_end_point(), + // TODO: surely something needs to be said about history? - ////////////////////////////////////////////////////////////////////////////// - // player-2 accessible interfaces - ////////////////////////////////////////////////////////////////////////////// + { + self.state = Ghost(State::Receiving); + } - // This state field is how Player 2 proves that it calls receive before send. - pub closed spec fn state(&self) -> State - { - self.state@ - } + ////////////////////////////////////////////////////////////////////////////// + // player-2 accessible interfaces + ////////////////////////////////////////////////////////////////////////////// + // This state field is how Player 2 proves that it calls receive before send. + pub closed spec fn state(&self) -> State { + self.state@ + } - /// Translates calls to env.ok.ok(). - pub open spec fn ok(&self) -> bool - { - !self.state().is_Error() - } + /// Translates calls to env.ok.ok(). + pub open spec fn ok(&self) -> bool { + !self.state().is_Error() + } - /// translates NetClient.NetClientIsValid - pub open spec fn valid(&self) -> bool - { - &&& self.ok() - &&& self.my_end_point().abstractable() - } + /// translates NetClient.NetClientIsValid + pub open spec fn valid(&self) -> bool { + &&& self.ok() + &&& self.my_end_point().abstractable() + } - pub closed spec fn history(&self) -> History - { - self.history@ - } + pub closed spec fn history(&self) -> History { + self.history@ + } - /// Translates MyPublicKey() - pub closed spec fn my_end_point(&self) -> AbstractEndPoint - { - self.end_point@ - } + /// Translates MyPublicKey() + pub closed spec fn my_end_point(&self) -> AbstractEndPoint { + self.end_point@ + } - pub fn get_my_end_point(&self) -> (ep: EndPoint) - ensures - ep@ == self.my_end_point() - { - self.end_point.clone_up_to_view() - } + pub fn get_my_end_point(&self) -> (ep: EndPoint) + ensures + ep@ == self.my_end_point(), + { + self.end_point.clone_up_to_view() + } - #[verifier(external)] - pub fn get_time_internal(&self) -> (time: u64) - requires - from_trusted_code() - { - (self.c_pointers.get_time_func)() - } + #[verifier(external)] + pub fn get_time_internal(&self) -> (time: u64) + requires + from_trusted_code(), + { + (self.c_pointers.get_time_func)() + } - #[verifier(external_body)] - pub fn get_time(&mut self) -> (time: u64) - requires - old(self).state().is_Receiving() - ensures ({ + #[verifier(external_body)] + pub fn get_time(&mut self) -> (time: u64) + requires + old(self).state().is_Receiving(), + ensures + ({ &&& self.state().is_Sending() &&& self.history() == old(self).history() + seq![LIoOp::ReadClock{t: time as int}] - }) - { - let time: u64 = self.get_time_internal(); - self.state = Ghost(State::Sending); - self.history = Ghost(self.history@ + seq![LIoOp::>::ReadClock{t: time as int}]); - time - } + }), + { + let time: u64 = self.get_time_internal(); + self.state = Ghost(State::Sending); + self.history = Ghost( + self.history@ + seq![LIoOp::>::ReadClock{t: time as int}], + ); + time + } - #[verifier(external)] - pub unsafe fn receive_internal(&mut self, time_limit_s: i32) -> (result: NetcReceiveResult) - { - let mut ok: bool = true; - let mut timed_out: bool = true; - let mut remote = std::mem::MaybeUninit::<*mut std::vec::Vec>::uninit(); - let mut buffer = std::mem::MaybeUninit::<*mut std::vec::Vec>::uninit(); - - self.profiler.mark_duration("processing-ns"); - (self.c_pointers.receive_func)(time_limit_s, &mut ok, &mut timed_out, remote.as_mut_ptr(), buffer.as_mut_ptr()); - self.profiler.mark_duration("awaiting-receive-ns"); - - if ok { - if timed_out { - self.profiler.record_event("receive-timedout"); - NetcReceiveResult::TimedOut{} - } - else { - self.profiler.record_event("receive-ok"); - let remote_ptr: *mut std::vec::Vec = remote.assume_init(); - let buffer_ptr: *mut std::vec::Vec = buffer.assume_init(); - let remote_box: Box> = Box::>::from_raw(remote_ptr); - let buffer_box: Box> = Box::>::from_raw(buffer_ptr); - let remote_vec: std::vec::Vec = *remote_box; - let buffer_vec: std::vec::Vec = *buffer_box; - let mut remote_verus_vec: Vec = Vec::new(); - remote_verus_vec = remote_vec; - let mut buffer_verus_vec: Vec = Vec::new(); - buffer_verus_vec = buffer_vec; - NetcReceiveResult::Received{sender: EndPoint{id: remote_verus_vec}, message: buffer_verus_vec} + #[verifier(external)] + pub unsafe fn receive_internal(&mut self, time_limit_s: i32) -> (result: NetcReceiveResult) { + let mut ok: bool = true; + let mut timed_out: bool = true; + let mut remote = std::mem::MaybeUninit::<*mut std::vec::Vec>::uninit(); + let mut buffer = std::mem::MaybeUninit::<*mut std::vec::Vec>::uninit(); + self.profiler.mark_duration("processing-ns"); + (self.c_pointers.receive_func)( + time_limit_s, + &mut ok, + &mut timed_out, + remote.as_mut_ptr(), + buffer.as_mut_ptr(), + ); + self.profiler.mark_duration("awaiting-receive-ns"); + if ok { + if timed_out { + self.profiler.record_event("receive-timedout"); + NetcReceiveResult::TimedOut { } + } else { + self.profiler.record_event("receive-ok"); + let remote_ptr: *mut std::vec::Vec = remote.assume_init(); + let buffer_ptr: *mut std::vec::Vec = buffer.assume_init(); + let remote_box: Box> = Box::>::from_raw( + remote_ptr, + ); + let buffer_box: Box> = Box::>::from_raw( + buffer_ptr, + ); + let remote_vec: std::vec::Vec = *remote_box; + let buffer_vec: std::vec::Vec = *buffer_box; + let mut remote_verus_vec: Vec = Vec::new(); + remote_verus_vec = remote_vec; + let mut buffer_verus_vec: Vec = Vec::new(); + buffer_verus_vec = buffer_vec; + NetcReceiveResult::Received { + sender: EndPoint { id: remote_verus_vec }, + message: buffer_verus_vec, } } - else { - self.profiler.record_event("receive-error"); - NetcReceiveResult::Error{} - } + } else { + self.profiler.record_event("receive-error"); + NetcReceiveResult::Error { } } + } - #[verifier(external_body)] - pub fn receive(&mut self, time_limit_s: i32) -> (result: NetcReceiveResult) - requires - // TODO(verus:jonh): start a discussion about demanding old(self) in requires - old(self).state().is_Receiving() - ensures - self.my_end_point() == old(self).my_end_point(), - match result { - NetcReceiveResult::Received{sender, message} => { + #[verifier(external_body)] + pub fn receive(&mut self, time_limit_s: i32) -> (result: NetcReceiveResult) + requires + // TODO(verus:jonh): start a discussion about demanding old(self) in requires + + old(self).state().is_Receiving(), + ensures + self.my_end_point() == old(self).my_end_point(), + match result { + NetcReceiveResult::Received { sender, message } => { &&& self.state().is_Receiving() &&& sender.abstractable() - &&& self.history() == old(self).history() + seq![ + &&& self.history() == old(self).history() + + seq![ LIoOp::Receive{ r: LPacket{ dst: self.my_end_point(), src: sender@, msg: message@} }] - } - NetcReceiveResult::TimedOut{} => { + }, + NetcReceiveResult::TimedOut { } => { &&& self.state().is_Sending() - &&& self.history() == old(self).history() + seq![LIoOp/*TODO(verus) fix name when qpath fix*/::TimeoutReceive{}] - } - NetcReceiveResult::Error{} => { - self.state().is_Error() - } - } - { - let result: NetcReceiveResult = unsafe { self.receive_internal(time_limit_s) }; - match result { - NetcReceiveResult::Received{ref sender, ref message} => { - self.history = Ghost(self.history@ + seq![LIoOp::Receive { r: LPacket::> { dst: self.my_end_point(), src: sender@, msg: message@ } } ]); - } - NetcReceiveResult::TimedOut{} => { - self.history = Ghost(self.history@ + seq![LIoOp::TimeoutReceive{}]); - } - NetcReceiveResult::Error{} => { - self.state = Ghost(State::Error{}); - } - } - result + &&& self.history() == old(self).history() + + seq![LIoOp/*TODO(verus) fix name when qpath fix*/::TimeoutReceive{}] + }, + NetcReceiveResult::Error { } => { self.state().is_Error() }, + }, + { + let result: NetcReceiveResult = unsafe { self.receive_internal(time_limit_s) }; + match result { + NetcReceiveResult::Received { ref sender, ref message } => { + self.history = Ghost( + self.history@ + + seq![LIoOp::Receive { r: LPacket::> { dst: self.my_end_point(), src: sender@, msg: message@ } } ], + ); + }, + NetcReceiveResult::TimedOut { } => { + self.history = Ghost(self.history@ + seq![LIoOp::TimeoutReceive{}]); + }, + NetcReceiveResult::Error { } => { + self.state = Ghost(State::Error { }); + }, } - - #[verifier(external)] - pub unsafe fn send_internal(&mut self, remote: &EndPoint, message: &Vec) -> (result: Result<(), IronfleetIOError>) - { - let remote_raw: *const u8 = remote.id.as_ptr(); - let message_raw: *const u8 = message.as_ptr(); - let b: bool = (self.c_pointers.send_func)(remote.id.len() as u64, remote_raw, message.len() as u64, message_raw); - if b { - Ok(()) - } - else { - Err(IronfleetIOError{message: String::from_rust_string("Failed to send".to_string())}) - } + result + } + + #[verifier(external)] + pub unsafe fn send_internal(&mut self, remote: &EndPoint, message: &Vec) -> (result: Result< + (), + IronfleetIOError, + >) { + let remote_raw: *const u8 = remote.id.as_ptr(); + let message_raw: *const u8 = message.as_ptr(); + let b: bool = (self.c_pointers.send_func)( + remote.id.len() as u64, + remote_raw, + message.len() as u64, + message_raw, + ); + if b { + Ok(()) + } else { + Err( + IronfleetIOError { + message: String::from_rust_string("Failed to send".to_string()), + }, + ) } + } - #[verifier(external_body)] - pub fn send_internal_wrapper(&mut self, remote: &EndPoint, message: &Vec) -> (result: Result<(), IronfleetIOError>) + #[verifier(external_body)] + pub fn send_internal_wrapper(&mut self, remote: &EndPoint, message: &Vec) -> (result: + Result<(), IronfleetIOError>) ensures *self == *old(self), - { - unsafe { self.send_internal(remote, message) } - } + { + unsafe { self.send_internal(remote, message) } + } - pub fn send(&mut self, recipient: &EndPoint, message: &Vec) -> (result: Result<(), IronfleetIOError> ) - requires - !old(self).state().is_Error() - ensures - self.my_end_point() == old(self).my_end_point(), - self.state().is_Error() <==> result.is_Err(), - result.is_Ok() ==> self.state().is_Sending(), - result.is_Ok() ==> self.history() == old(self).history() + seq![LIoOp::Send{s: LPacket{dst: recipient@, src: self.my_end_point(), msg: message@}}], - { - let result: Result<(), IronfleetIOError> = self.send_internal_wrapper(recipient, message); - match result { - Ok(_) => { - self.state = Ghost(State::Sending{}); - self.history = Ghost(self.history@ + seq![LIoOp::Send{s: LPacket{dst: recipient@, src: self.my_end_point(), msg: message@}}]); - } - Err(_) => { - self.state = Ghost(State::Error{}); - } - }; - result - } + pub fn send(&mut self, recipient: &EndPoint, message: &Vec) -> (result: Result< + (), + IronfleetIOError, + >) + requires + !old(self).state().is_Error(), + ensures + self.my_end_point() == old(self).my_end_point(), + self.state().is_Error() <==> result.is_Err(), + result.is_Ok() ==> self.state().is_Sending(), + result.is_Ok() ==> self.history() == old(self).history() + + seq![LIoOp::Send{s: LPacket{dst: recipient@, src: self.my_end_point(), msg: message@}}], + { + let result: Result<(), IronfleetIOError> = self.send_internal_wrapper(recipient, message); + match result { + Ok(_) => { + self.state = Ghost(State::Sending { }); + self.history = Ghost( + self.history@ + + seq![LIoOp::Send{s: LPacket{dst: recipient@, src: self.my_end_point(), msg: message@}}], + ); + }, + Err(_) => { + self.state = Ghost(State::Error { }); + }, + }; + result } +} - } // verus! +} // verus! } mod keys_t { @@ -5771,150 +6786,156 @@ mod keys_t { verus! { - // TODO(chris): Want to write KeyTrait : VerusClone, but "The verifier does not yet support the following Rust feature: trait generic bounds" - pub trait KeyTrait : Sized { +// TODO(chris): Want to write KeyTrait : VerusClone, but "The verifier does not yet support the following Rust feature: trait generic bounds" +pub trait KeyTrait: Sized { + spec fn zero_spec() -> Self where Self: std::marker::Sized; - spec fn zero_spec() -> Self where Self: std::marker::Sized; + proof fn zero_properties() + ensures + forall|k: Self| + k != Self::zero_spec() ==> (#[trigger] Self::zero_spec().cmp_spec(k)).lt(), + ; - proof fn zero_properties() - ensures - forall |k:Self| k != Self::zero_spec() ==> (#[trigger] Self::zero_spec().cmp_spec(k)).lt(); + spec fn cmp_spec(self, other: Self) -> Ordering; - spec fn cmp_spec(self, other: Self) -> Ordering; + proof fn cmp_properties() + ensures + // Equality is eq --- TODO: Without this we need to redefine Seq, Set, etc. operators that use == - proof fn cmp_properties() - ensures - // Equality is eq --- TODO: Without this we need to redefine Seq, Set, etc. operators that use == - forall |a:Self, b:Self| #![auto] a == b <==> a.cmp_spec(b).eq(), + forall|a: Self, b: Self| #![auto] a == b <==> a.cmp_spec(b).eq(), // Reflexivity of equality - forall |a:Self| #![auto] a.cmp_spec(a).eq(), + forall|a: Self| #![auto] a.cmp_spec(a).eq(), // Commutativity of equality - forall |a:Self, b:Self| (#[trigger] a.cmp_spec(b)).eq() == b.cmp_spec(a).eq(), + forall|a: Self, b: Self| (#[trigger] a.cmp_spec(b)).eq() == b.cmp_spec(a).eq(), // Transitivity of equality - forall |a:Self, b:Self, c:Self| - #[trigger] a.cmp_spec(b).eq() && #[trigger] b.cmp_spec(c).eq() ==> a.cmp_spec(c).eq(), + forall|a: Self, b: Self, c: Self| #[trigger] + a.cmp_spec(b).eq() && #[trigger] b.cmp_spec(c).eq() ==> a.cmp_spec(c).eq(), // Inequality is asymmetric - forall |a:Self, b:Self| - #[trigger] a.cmp_spec(b).lt() <==> b.cmp_spec(a).gt(), + forall|a: Self, b: Self| #[trigger] a.cmp_spec(b).lt() <==> b.cmp_spec(a).gt(), // Connected - forall |a:Self, b:Self| - #![auto] a.cmp_spec(b).ne() ==> a.cmp_spec(b).lt() || b.cmp_spec(a).lt(), + forall|a: Self, b: Self| + #![auto] + a.cmp_spec(b).ne() ==> a.cmp_spec(b).lt() || b.cmp_spec(a).lt(), // Transitivity of inequality - forall |a:Self, b:Self, c:Self| - #[trigger] a.cmp_spec(b).lt() && #[trigger] b.cmp_spec(c).lt() ==> a.cmp_spec(c).lt(), - forall |a:Self, b:Self, c:Self| - #[trigger] a.cmp_spec(b).lt() && #[trigger] b.cmp_spec(c).le() ==> a.cmp_spec(c).lt(), - forall |a:Self, b:Self, c:Self| - #[trigger] a.cmp_spec(b).le() && #[trigger] b.cmp_spec(c).lt() ==> a.cmp_spec(c).lt(); + forall|a: Self, b: Self, c: Self| #[trigger] + a.cmp_spec(b).lt() && #[trigger] b.cmp_spec(c).lt() ==> a.cmp_spec(c).lt(), + forall|a: Self, b: Self, c: Self| #[trigger] + a.cmp_spec(b).lt() && #[trigger] b.cmp_spec(c).le() ==> a.cmp_spec(c).lt(), + forall|a: Self, b: Self, c: Self| #[trigger] + a.cmp_spec(b).le() && #[trigger] b.cmp_spec(c).lt() ==> a.cmp_spec(c).lt(), + ; + + // zero should be smaller than all other keys + fn zero() -> (z: Self) + ensures + z == Self::zero_spec(), + ; - // zero should be smaller than all other keys - fn zero() -> (z: Self) - ensures z == Self::zero_spec(); + fn cmp(&self, other: &Self) -> (o: Ordering) + requires + true, + ensures + o == self.cmp_spec(*other), + ; +} - fn cmp(&self, other: &Self) -> (o: Ordering) - requires true, - ensures o == self.cmp_spec(*other); - } +// Based on Rust's Ordering +#[derive(Structural, PartialEq, Eq)] +pub enum Ordering { + Less, + Equal, + Greater, +} - // Based on Rust's Ordering - #[derive(Structural, PartialEq, Eq)] - pub enum Ordering { - Less, - Equal, - Greater, - } +pub struct KeyIterator { + // None means we hit the end + pub k: Option, +} - pub struct KeyIterator { - // None means we hit the end - pub k: Option, +impl KeyIterator { + pub open spec fn new_spec(k: K) -> Self { + KeyIterator { k: Some(k) } } - impl KeyIterator { - pub open spec fn new_spec(k: K) -> Self { - KeyIterator { k: Some(k) } + pub open spec fn cmp_spec(self, other: Self) -> Ordering { + match (self.k, other.k) { + (None, None) => Ordering::Equal, + (None, Some(_)) => Ordering::Less, + (Some(_), None) => Ordering::Greater, + (Some(i), Some(j)) => { i.cmp_spec(j) }, } + } - pub open spec fn cmp_spec(self, other: Self) -> Ordering { - match (self.k, other.k) { - (None, None) => Ordering::Equal, - (None, Some(_)) => Ordering::Less, - (Some(_), None) => Ordering::Greater, - (Some(i), Some(j)) => { i.cmp_spec(j) } - } - } + pub open spec fn lt_spec(self, other: Self) -> bool { + (!self.k.is_None() && other.k.is_None()) || (!self.k.is_None() && !other.k.is_None() + && self.k.get_Some_0().cmp_spec(other.k.get_Some_0()).lt()) + } - pub open spec fn lt_spec(self, other: Self) -> bool { - (!self.k.is_None() && other.k.is_None()) - || (!self.k.is_None() && !other.k.is_None() && self.k.get_Some_0().cmp_spec(other.k.get_Some_0()).lt()) - } + // TODO: Use the name `spec_ge` instead of `geq_spec` to enable Verus magic overloading + pub open spec fn geq_spec(self, other: Self) -> bool { + !self.lt_spec(other) //|| self == other - // TODO: Use the name `spec_ge` instead of `geq_spec` to enable Verus magic overloading - pub open spec fn geq_spec(self, other: Self) -> bool { - !self.lt_spec(other) //|| self == other - } } +} - pub struct KeyRange { - pub lo: KeyIterator, - pub hi: KeyIterator, - } +pub struct KeyRange { + pub lo: KeyIterator, + pub hi: KeyIterator, +} - impl KeyRange - { - pub open spec fn contains(self, k: K) -> bool - { - KeyIterator::::between(self.lo, KeyIterator::::new_spec(k), self.hi) - } +impl KeyRange { + pub open spec fn contains(self, k: K) -> bool { + KeyIterator::::between(self.lo, KeyIterator::::new_spec(k), self.hi) + } - pub open spec fn is_empty(self) -> bool - { - self.lo.geq_spec(self.hi) - } + pub open spec fn is_empty(self) -> bool { + self.lo.geq_spec(self.hi) } +} - impl KeyRange - { - pub fn contains_exec(&self, k: &K) -> (b: bool) +impl KeyRange { + pub fn contains_exec(&self, k: &K) -> (b: bool) ensures b == self.contains(*k), - { - let ki = KeyIterator { k: Some(k.clone()) }; - !ki.lt(&self.lo) && ki.lt(&self.hi) - } - } - - impl VerusClone for KeyIterator { - fn clone(&self) -> Self { - KeyIterator { - k: match &self.k { - Some(v) => Some(v.clone()), - None => None, - }, - } - } + { + let ki = KeyIterator { k: Some(k.clone()) }; + !ki.lt(&self.lo) && ki.lt(&self.hi) } +} - impl VerusClone for KeyRange { - fn clone(&self) -> Self { - KeyRange { lo: self.lo.clone(), hi: self.hi.clone() } +impl VerusClone for KeyIterator { + fn clone(&self) -> Self { + KeyIterator { + k: match &self.k { + Some(v) => Some(v.clone()), + None => None, + }, } } +} - #[derive(Eq,PartialEq,Hash)] - pub struct SHTKey { - pub // workaround - ukey: u64, +impl VerusClone for KeyRange { + fn clone(&self) -> Self { + KeyRange { lo: self.lo.clone(), hi: self.hi.clone() } } +} - impl SHTKey { - pub fn clone(&self) -> (out: SHTKey) - ensures out == self - { - SHTKey{ ukey: self.ukey } - } +#[derive(Eq,PartialEq,Hash)] +pub struct SHTKey { + pub // workaround + ukey: u64, +} + +impl SHTKey { + pub fn clone(&self) -> (out: SHTKey) + ensures + out == self, + { + SHTKey { ukey: self.ukey } } +} - /* +/* impl std::hash::Hash for SHTKey { } @@ -5925,37 +6946,34 @@ mod keys_t { } */ - impl KeyTrait for SHTKey { - fn zero() -> (z: Self) - { - // This assert is necessary due to https://github.com/verus-lang/verus/issues/885 - assert(SHTKey{ukey: 0} == Self::zero_spec()); - SHTKey{ukey: 0} - } +impl KeyTrait for SHTKey { + fn zero() -> (z: Self) { + // This assert is necessary due to https://github.com/verus-lang/verus/issues/885 + assert(SHTKey { ukey: 0 } == Self::zero_spec()); + SHTKey { ukey: 0 } + } - open spec fn zero_spec() -> Self - { - SHTKey{ukey: 0} - } + open spec fn zero_spec() -> Self { + SHTKey { ukey: 0 } + } - proof fn zero_properties() - { - // Maybe this should not be necessary - assert(forall |k:Self| k != Self::zero_spec() ==> (#[trigger] Self::zero_spec().cmp_spec(k)).lt()); - } + proof fn zero_properties() { + // Maybe this should not be necessary + assert(forall|k: Self| + k != Self::zero_spec() ==> (#[trigger] Self::zero_spec().cmp_spec(k)).lt()); + } - open spec fn cmp_spec(self, other: Self) -> Ordering - { - if self.ukey < other.ukey { - Ordering::Less - } else if self.ukey == other.ukey { - Ordering::Equal - } else { - Ordering::Greater - } + open spec fn cmp_spec(self, other: Self) -> Ordering { + if self.ukey < other.ukey { + Ordering::Less + } else if self.ukey == other.ukey { + Ordering::Equal + } else { + Ordering::Greater } + } - proof fn cmp_properties() + proof fn cmp_properties() // ensures // // Equality is eq --- TODO: Without this we need to redefine Seq, Set, etc. operators that use == // forall |a:Self, b:Self| #![auto] a == b <==> a.cmp_spec(b).eq(), @@ -5979,35 +6997,36 @@ mod keys_t { // #[trigger] a.cmp_spec(b).lt() && #[trigger] b.cmp_spec(c).le() ==> a.cmp_spec(c).lt(), // forall |a:Self, b:Self, c:Self| // #[trigger] a.cmp_spec(b).le() && #[trigger] b.cmp_spec(c).lt() ==> a.cmp_spec(c).lt() - { - } + { + } - fn cmp(&self, other: &Self) -> (o: Ordering) + fn cmp(&self, other: &Self) -> (o: Ordering) // requires true, // ensures o == self.cmp_spec(*other) - { - if self.ukey < other.ukey { - Ordering::Less - } else if self.ukey == other.ukey { - Ordering::Equal - } else { - Ordering::Greater - } + { + if self.ukey < other.ukey { + Ordering::Less + } else if self.ukey == other.ukey { + Ordering::Equal + } else { + Ordering::Greater } } +} - impl VerusClone for SHTKey { - fn clone(&self) -> (o: Self) - //ensures o == self - { - SHTKey{ukey: self.ukey} - } +impl VerusClone for SHTKey { + fn clone(&self) -> (o: Self) + //ensures o == self + { + SHTKey { ukey: self.ukey } } +} + +pub type AbstractKey = SHTKey; - pub type AbstractKey = SHTKey; - pub type CKey = SHTKey; +pub type CKey = SHTKey; - } // verus! +} // verus! } mod main_t { @@ -6035,78 +7054,66 @@ mod main_t { verus! { - // io::Error is not supported yet. Placeholder. - pub struct IronError { - } - - // net_impl comes from outside so this function can be verified. - pub fn sht_main(netc: NetClient, args: Args) -> Result<(), IronError> - requires - netc.valid(), - netc.state().is_Receiving(), - crate::io_t::from_trusted_code() - { - let mut netc = netc; // Verus does not support `mut` arguments +// io::Error is not supported yet. Placeholder. +pub struct IronError {} +// net_impl comes from outside so this function can be verified. +pub fn sht_main(netc: NetClient, args: Args) -> Result<(), IronError> + requires + netc.valid(), + netc.state().is_Receiving(), + crate::io_t::from_trusted_code(), +{ + let mut netc = netc; // Verus does not support `mut` arguments // let mut host_c: host_protocol_t::Constants; // let mut host: host_protocol_t::Variables; - - let opt_host_state: Option = HostState::init_impl(&netc, &args); - let mut host_state = match opt_host_state { - None => { return Err(IronError{}) }, - Some(thing) => thing, - }; - let mut ok: bool = true; - + let opt_host_state: Option = HostState::init_impl(&netc, &args); + let mut host_state = match opt_host_state { + None => { return Err(IronError { }) }, + Some(thing) => thing, + }; + let mut ok: bool = true; // let config = HostState::parse_command_line_configuration(args); - let end_point = netc.get_my_end_point(); - // This init function in Dafny is in Impl/LiveSHT/Host.i. - // It calls LScheduler_Init, which does some scheduly stuff (which I'm hoping - // we can ignore) and then calls Protocol/SHT/Host.i/Host_Init. - assert(crate::host_protocol_t::init(host_state@, end_point@, abstractify_args(args))); - - while (ok) - invariant - crate::io_t::from_trusted_code(), // this predicate's value cannot change, but has to be explicitly imported into the loop invariant + let end_point = netc.get_my_end_point(); + // This init function in Dafny is in Impl/LiveSHT/Host.i. + // It calls LScheduler_Init, which does some scheduly stuff (which I'm hoping + // we can ignore) and then calls Protocol/SHT/Host.i/Host_Init. + assert(crate::host_protocol_t::init(host_state@, end_point@, abstractify_args(args))); + while (ok) + invariant + crate::io_t::from_trusted_code(), // this predicate's value cannot change, but has to be explicitly imported into the loop invariant ok ==> host_state.invariants(&netc.my_end_point()), ok == netc.ok(), ok ==> netc.state().is_Receiving(), - { - // no need for decreases * because exec functions don't termination-check - - let old_net_history: Ghost = Ghost(netc.history()); - let old_state: Ghost = Ghost(host_state); - - let (shadow_ok, event_results) = host_state.next_impl(&mut netc); - ok = shadow_ok; - - if ok { + { + // no need for decreases * because exec functions don't termination-check + let old_net_history: Ghost = Ghost(netc.history()); + let old_state: Ghost = Ghost(host_state); + let (shadow_ok, event_results) = host_state.next_impl(&mut netc); + ok = shadow_ok; + if ok { assert(host_state.invariants(&netc.my_end_point())); - //NB these assertions are just here to help the spec auditor see we're //doing the right thing. They duplicate the ensures on the next_impl trait method in //host_impl_t. - // Correctly executed one action assert(HostState::next(old_state@@, host_state@, event_results@.ios)); - // Connect the low-level IO events to the spec-level IO events assert(event_results@.event_seq() == event_results@.ios); - // The event_seq obligation enable us to apply reduction. But we shouldn't need to separate these // events out anymore (relative to ironfleet) now that we're enforcing this ordering in the // NetClient interface. assert(netc.history() == old_net_history@ + event_results@.event_seq()); assert(event_results@.well_typed_events()); - // Reset to allow receiving for the next atomic step. netc.reset(); - } } - Ok(()) } + Ok(()) +} - } // verus +} // verus! + // verus } mod marshal_ironsht_specific_v { @@ -6139,40 +7146,48 @@ mod marshal_ironsht_specific_v { verus! { - use crate::keys_t::{SHTKey, CKey, KeyRange, KeyIterator}; - use crate::io_t::EndPoint; - use crate::hashmap_t::{CKeyHashMap, CKeyKV}; - use crate::cmessage_v::CMessage; +use crate::keys_t::{SHTKey, CKey, KeyRange, KeyIterator}; +use crate::io_t::EndPoint; +use crate::hashmap_t::{CKeyHashMap, CKeyKV}; +use crate::cmessage_v::CMessage; + +/* $line_count$Proof$ */ - /* $line_count$Proof$ */ marshalable_by_bijection! { +marshalable_by_bijection! { /* $line_count$Proof$ */ [SHTKey] <-> [u64]; /* $line_count$Proof$ */ forward(self) self.ukey; /* $line_count$Proof$ */ backward(x) SHTKey { ukey: x }; /* $line_count$Proof$ */ } - impl SHTKey { - /// Document that view_equal is definitionally to ==, with no explicit proof required. - pub proof fn view_equal_spec() - ensures forall |x: &SHTKey, y: &SHTKey| #[trigger] x.view_equal(y) <==> x == y - { - } - } +impl SHTKey { + /// Document that view_equal is definitionally to ==, with no explicit proof required. + pub proof fn view_equal_spec() + ensures + forall|x: &SHTKey, y: &SHTKey| #[trigger] x.view_equal(y) <==> x == y, + { + } +} - /* $line_count$Proof$}$ */ marshalable_by_bijection! { +/* $line_count$Proof$}$ */ + +marshalable_by_bijection! { /* $line_count$Proof$}$ */ [EndPoint] <-> [Vec::]; /* $line_count$Proof$}$ */ forward(self) self.id; /* $line_count$Proof$}$ */ backward(x) EndPoint { id: x }; /* $line_count$Proof$}$ */ } - impl EndPoint { - /// Document that view_equal is definitially x@ == y@, with no explicit proof required. - pub proof fn view_equal_spec() - ensures forall |x: &EndPoint, y: &EndPoint| #[trigger] x.view_equal(y) <==> x@ == y@ - { - } - } +impl EndPoint { + /// Document that view_equal is definitially x@ == y@, with no explicit proof required. + pub proof fn view_equal_spec() + ensures + forall|x: &EndPoint, y: &EndPoint| #[trigger] x.view_equal(y) <==> x@ == y@, + { + } +} - /* $line_count$Proof$ */ marshalable_by_bijection! { +/* $line_count$Proof$ */ + +marshalable_by_bijection! { /* $line_count$Proof$ */ [KeyRange::] <-> [(Option::, Option::)]; /* $line_count$Proof$ */ forward(self) { /* $line_count$Proof$ */ ( @@ -6202,231 +7217,251 @@ mod marshal_ironsht_specific_v { /* $line_count$Proof$ */ }, /* $line_count$Proof$ */ } /* $line_count$Proof$ */ }; - /* $line_count$Proof$ */ } + /* $line_count$Proof$ */ }/* $line_count$Proof$ */ - /* $line_count$Proof$ */ derive_marshalable_for_struct! { + +derive_marshalable_for_struct! { /* $line_count$Proof$ */ pub struct CKeyKV { /* $line_count$Proof$ */ pub k: CKey, /* $line_count$Proof$ */ pub v: Vec::, /* $line_count$Proof$ */ } /* $line_count$Proof$ */ } - pub exec fn sorted_keys(v: &Vec) -> (res: bool) - ensures res == crate::hashmap_t::spec_sorted_keys(*v), +pub exec fn sorted_keys(v: &Vec) -> (res: bool) + ensures + res == crate::hashmap_t::spec_sorted_keys(*v), +{ + if v.len() <= 1 { + true + } else { + let mut idx = 1; + while idx < v.len() + invariant + (0 < idx <= v.len()), + (forall|i: int, j: int| + 0 <= i && i + 1 < idx && j == i + 1 ==> #[trigger] ckeykvlt(v@[i], v@[j])), { - if v.len() <= 1 { - true + if v[idx - 1].k.ukey >= v[idx].k.ukey { + assert(!ckeykvlt(v@[idx as int - 1], v@[idx as int])); + return false; } else { - let mut idx = 1; - while idx < v.len() - invariant - (0 < idx <= v.len()), - (forall |i: int, j: int| 0 <= i && i + 1 < idx && j == i+1 ==> #[trigger] ckeykvlt(v@[i], v@[j])), - { - if v[idx - 1].k.ukey >= v[idx].k.ukey { - assert(!ckeykvlt(v@[idx as int-1], v@[idx as int])); - return false; - } else { - idx = idx + 1; - } - } - assert forall |i: int| 0 <= i && i + 1 < v.len() implies #[trigger] v@[i].k.ukey < v@[i + 1].k.ukey by { - assert(ckeykvlt(v@[i], v@[i + 1])); // OBSERVE - // reveal(ckeykvlt); // TODO: this should be illegal since ckeykvlt is open - } - true + idx = idx + 1; } } - - // NOTE: This is an arbitrary upper limit, set up because the hashmap axiomatization isn't - // powerful enough to easily otherwise prove marshalability; the `valid_value` function already - // basically guarantees us this (in fact, it guarantees a smaller size than even this), but - // yeah, placing this arbitrary upper limit allows things to go through for the hash table. - #[verifier::opaque] - pub open spec fn ckeyhashmap_max_serialized_size() -> usize { - 0x100000 - } - pub fn ckeyhashmap_max_serialized_size_exec() -> (r: usize) - ensures r == ckeyhashmap_max_serialized_size() - { - reveal(ckeyhashmap_max_serialized_size); - 0x100000 + assert forall|i: int| 0 <= i && i + 1 < v.len() implies #[trigger] v@[i].k.ukey < v@[i + + 1].k.ukey by { + assert(ckeykvlt(v@[i], v@[i + 1])); // OBSERVE + // reveal(ckeykvlt); // TODO: this should be illegal since ckeykvlt is open } + true + } +} - impl Marshalable for CKeyHashMap { - open spec fn view_equal(&self, other: &Self) -> bool { - self@ === other@ - } - proof fn lemma_view_equal_symmetric(&self, other: &Self) - // req, ens from trait - {} - open spec fn is_marshalable(&self) -> bool { - self.to_vec().is_marshalable() - && crate::hashmap_t::spec_sorted_keys(self.to_vec()) - && self.to_vec().ghost_serialize().len() <= (ckeyhashmap_max_serialized_size() as int) - } - exec fn _is_marshalable(&self) -> (res: bool) - // req, ens from trait - { - let v = self.to_vec(); - let a = sorted_keys(&self.to_vec()); - v._is_marshalable() && a - && self.to_vec().serialized_size() <= ckeyhashmap_max_serialized_size_exec() - } - open spec fn ghost_serialize(&self) -> Seq - // req, ens from trait - { - self.to_vec().ghost_serialize() - } - exec fn serialized_size(&self) -> (res: usize) - // req, ens from trait - { - self.to_vec().serialized_size() - } - exec fn serialize(&self, data: &mut Vec) - // req, ens from trait - { - self.to_vec().serialize(data) - } - exec fn deserialize(data: &Vec, start: usize) -> (res: Option<(Self, usize)>) - // req, ens from trait - { - match >::deserialize(data, start) { - None => { - None - }, - Some((x, end)) => { - if !sorted_keys(&x) { - None - } else { - let res = CKeyHashMap::from_vec(x); - if end - start > ckeyhashmap_max_serialized_size_exec() { - None - } else { - Some((res, end)) - } - } - } - } - } - proof fn lemma_serialization_is_not_a_prefix_of(&self, other: &Self) - // req, ens from trait - { - self.lemma_to_vec_view(*other); - assert(self.to_vec()@ != other.to_vec()@); - if self.to_vec().len() != other.to_vec().len() { - self.to_vec().lemma_serialization_is_not_a_prefix_of(&other.to_vec()); - } else { - assert( - exists |i: int| #![auto] 0 <= i < self.spec_to_vec().len() && - self.spec_to_vec()[i]@ != other.spec_to_vec()[i]@ - ); - let i = choose |i: int| #![auto] 0 <= i < self.spec_to_vec().len() && - self.spec_to_vec()[i]@ != other.spec_to_vec()[i]@; - assert(self.to_vec()[i]@ != other.to_vec()[i]@); - assert(!self.to_vec()[i].view_equal(&other.to_vec()[i])); - assert(!self.to_vec().view_equal(&other.to_vec())); - self.to_vec().lemma_serialization_is_not_a_prefix_of(&other.to_vec()); - } - } - proof fn lemma_same_views_serialize_the_same(self: &Self, other: &Self) - // req, ens from trait - { - self.lemma_to_vec_view(*other); - self.to_vec().lemma_same_views_serialize_the_same(&other.to_vec()); - } - proof fn lemma_serialize_injective(self: &Self, other: &Self) - // req, ens from trait - { - if !self.view_equal(other) { - self.lemma_serialization_is_not_a_prefix_of(other); - assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int) - =~= other.ghost_serialize()); // OBSERVE - } - } - } +// NOTE: This is an arbitrary upper limit, set up because the hashmap axiomatization isn't +// powerful enough to easily otherwise prove marshalability; the `valid_value` function already +// basically guarantees us this (in fact, it guarantees a smaller size than even this), but +// yeah, placing this arbitrary upper limit allows things to go through for the hash table. +#[verifier::opaque] +pub open spec fn ckeyhashmap_max_serialized_size() -> usize { + 0x100000 +} - #[allow(non_snake_case)] - pub proof fn lemma_is_marshalable_CKeyHashMap(h: CKeyHashMap) - requires - crate::host_protocol_t::valid_hashtable(h@) - ensures - h.is_marshalable() - { - lemma_auto_spec_u64_to_from_le_bytes(); +pub fn ckeyhashmap_max_serialized_size_exec() -> (r: usize) + ensures + r == ckeyhashmap_max_serialized_size(), +{ + reveal(ckeyhashmap_max_serialized_size); + 0x100000 +} + +impl Marshalable for CKeyHashMap { + open spec fn view_equal(&self, other: &Self) -> bool { + self@ === other@ + } - assert(h@.dom().len() < 62); - h.lemma_to_vec(); + proof fn lemma_view_equal_symmetric( + &self, + other: &Self, + ) + // req, ens from trait + { + } - let vec = h.spec_to_vec(); + open spec fn is_marshalable(&self) -> bool { + self.to_vec().is_marshalable() && crate::hashmap_t::spec_sorted_keys(self.to_vec()) + && self.to_vec().ghost_serialize().len() <= (ckeyhashmap_max_serialized_size() as int) + } - assert(vec.len() < 62); + exec fn _is_marshalable(&self) -> (res: bool) + // req, ens from trait + { + let v = self.to_vec(); + let a = sorted_keys(&self.to_vec()); + v._is_marshalable() && a && self.to_vec().serialized_size() + <= ckeyhashmap_max_serialized_size_exec() + } - let max_len : int = 10_000; + open spec fn ghost_serialize(&self) -> Seq + // req, ens from trait + { + self.to_vec().ghost_serialize() + } - assert forall |i:int| 0 <= i < vec.len() implies ( - #[trigger] vec[i].is_marshalable() && vec[i].ghost_serialize().len() < max_len - ) by { - let (k, v) = vec[i]@; - assert(h@.contains_pair(k, v)); - assert(h@.dom().contains(k)); - assert(crate::app_interface_t::valid_key(k)); - assert(crate::app_interface_t::valid_value(h@[k])); - assert(vec[i].is_marshalable()); - assert(vec[i].ghost_serialize().len() < max_len); - } + exec fn serialized_size(&self) -> (res: usize) + // req, ens from trait + { + self.to_vec().serialized_size() + } - reveal(crate::marshal_ironsht_specific_v::ckeyhashmap_max_serialized_size); + exec fn serialize(&self, data: &mut Vec) + // req, ens from trait + { + self.to_vec().serialize(data) + } - assert( - (vec@.len() as usize).ghost_serialize().len() + - vec@.fold_left(0, |acc: int, x: CKeyKV| acc + x.ghost_serialize().len()) <= 0x100000 - ) by { - let f = |x: CKeyKV| x.ghost_serialize().len() as int; - let ag = |acc: int, x: CKeyKV| acc + x.ghost_serialize().len(); - let af = |acc: int, x: CKeyKV| acc + f(x); - assert forall |i:int| 0 <= i < vec@.len() implies f(vec@[i]) <= max_len by { - let (k, v) = vec[i]@; - assert(h@.contains_pair(k, v)); - assert(h@.dom().contains(k)); - assert(crate::app_interface_t::valid_key(k)); - assert(crate::app_interface_t::valid_value(h@[k])); - assert(vec[i].is_marshalable()); - assert(vec[i].ghost_serialize().len() < max_len); + exec fn deserialize(data: &Vec, start: usize) -> (res: Option< + (Self, usize), + >) + // req, ens from trait + { + match >::deserialize(data, start) { + None => { None }, + Some((x, end)) => { + if !sorted_keys(&x) { + None + } else { + let res = CKeyHashMap::from_vec(x); + if end - start > ckeyhashmap_max_serialized_size_exec() { + None + } else { + Some((res, end)) + } } - lemma_seq_fold_left_sum_le(vec@, 0, max_len, f); - fun_ext_2(ag, af); - } - - assert( - (vec@.len() as usize).ghost_serialize().len() - + vec@.fold_left(Seq::::empty(), |acc: Seq, x: CKeyKV| acc + x.ghost_serialize()).len() - <= 0x100000 - ) by { - let emp = Seq::::empty(); - let s = |x: CKeyKV| x.ghost_serialize(); - let agl = |acc: int, x: CKeyKV| acc + x.ghost_serialize().len() as int; - let asl = |acc: int, x: CKeyKV| acc + s(x).len() as int; - let sg = |acc: Seq, x: CKeyKV| acc + x.ghost_serialize(); - let sa = |acc: Seq, x: CKeyKV| acc + s(x); - lemma_seq_fold_left_append_len_int(vec@, emp, s); - assert(vec@.fold_left(emp, sa).len() as int == vec@.fold_left(0, asl)); - fun_ext_2(sa, sg); - assert(vec@.fold_left(emp, sg).len() as int == vec@.fold_left(0, asl)); - fun_ext_2(agl, asl); - assert(vec@.fold_left(emp, sg).len() == vec@.fold_left(0, agl)); - } + }, + } + } - assert(vec.is_marshalable()) by { - assert(vec@.len() <= usize::MAX); - assert(forall |x: CKeyKV| vec@.contains(x) ==> #[trigger] x.is_marshalable()); - } - assert(crate::hashmap_t::spec_sorted_keys(vec)); + proof fn lemma_serialization_is_not_a_prefix_of( + &self, + other: &Self, + ) + // req, ens from trait + { + self.lemma_to_vec_view(*other); + assert(self.to_vec()@ != other.to_vec()@); + if self.to_vec().len() != other.to_vec().len() { + self.to_vec().lemma_serialization_is_not_a_prefix_of(&other.to_vec()); + } else { + assert(exists|i: int| + #![auto] + 0 <= i < self.spec_to_vec().len() && self.spec_to_vec()[i]@ + != other.spec_to_vec()[i]@); + let i = choose|i: int| + #![auto] + 0 <= i < self.spec_to_vec().len() && self.spec_to_vec()[i]@ + != other.spec_to_vec()[i]@; + assert(self.to_vec()[i]@ != other.to_vec()[i]@); + assert(!self.to_vec()[i].view_equal(&other.to_vec()[i])); + assert(!self.to_vec().view_equal(&other.to_vec())); + self.to_vec().lemma_serialization_is_not_a_prefix_of(&other.to_vec()); + } + } + + proof fn lemma_same_views_serialize_the_same( + self: &Self, + other: &Self, + ) + // req, ens from trait + { + self.lemma_to_vec_view(*other); + self.to_vec().lemma_same_views_serialize_the_same(&other.to_vec()); + } - assert(h.is_marshalable()); + proof fn lemma_serialize_injective( + self: &Self, + other: &Self, + ) + // req, ens from trait + { + if !self.view_equal(other) { + self.lemma_serialization_is_not_a_prefix_of(other); + assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int) + =~= other.ghost_serialize()); // OBSERVE } + } +} + +#[allow(non_snake_case)] +pub proof fn lemma_is_marshalable_CKeyHashMap(h: CKeyHashMap) + requires + crate::host_protocol_t::valid_hashtable(h@), + ensures + h.is_marshalable(), +{ + lemma_auto_spec_u64_to_from_le_bytes(); + assert(h@.dom().len() < 62); + h.lemma_to_vec(); + let vec = h.spec_to_vec(); + assert(vec.len() < 62); + let max_len: int = 10_000; + assert forall|i: int| 0 <= i < vec.len() implies (#[trigger] vec[i].is_marshalable() + && vec[i].ghost_serialize().len() < max_len) by { + let (k, v) = vec[i]@; + assert(h@.contains_pair(k, v)); + assert(h@.dom().contains(k)); + assert(crate::app_interface_t::valid_key(k)); + assert(crate::app_interface_t::valid_value(h@[k])); + assert(vec[i].is_marshalable()); + assert(vec[i].ghost_serialize().len() < max_len); + } + reveal(crate::marshal_ironsht_specific_v::ckeyhashmap_max_serialized_size); + assert((vec@.len() as usize).ghost_serialize().len() + vec@.fold_left( + 0, + |acc: int, x: CKeyKV| acc + x.ghost_serialize().len(), + ) <= 0x100000) by { + let f = |x: CKeyKV| x.ghost_serialize().len() as int; + let ag = |acc: int, x: CKeyKV| acc + x.ghost_serialize().len(); + let af = |acc: int, x: CKeyKV| acc + f(x); + assert forall|i: int| 0 <= i < vec@.len() implies f(vec@[i]) <= max_len by { + let (k, v) = vec[i]@; + assert(h@.contains_pair(k, v)); + assert(h@.dom().contains(k)); + assert(crate::app_interface_t::valid_key(k)); + assert(crate::app_interface_t::valid_value(h@[k])); + assert(vec[i].is_marshalable()); + assert(vec[i].ghost_serialize().len() < max_len); + } + lemma_seq_fold_left_sum_le(vec@, 0, max_len, f); + fun_ext_2(ag, af); + } + assert((vec@.len() as usize).ghost_serialize().len() + vec@.fold_left( + Seq::::empty(), + |acc: Seq, x: CKeyKV| acc + x.ghost_serialize(), + ).len() <= 0x100000) by { + let emp = Seq::::empty(); + let s = |x: CKeyKV| x.ghost_serialize(); + let agl = |acc: int, x: CKeyKV| acc + x.ghost_serialize().len() as int; + let asl = |acc: int, x: CKeyKV| acc + s(x).len() as int; + let sg = |acc: Seq, x: CKeyKV| acc + x.ghost_serialize(); + let sa = |acc: Seq, x: CKeyKV| acc + s(x); + lemma_seq_fold_left_append_len_int(vec@, emp, s); + assert(vec@.fold_left(emp, sa).len() as int == vec@.fold_left(0, asl)); + fun_ext_2(sa, sg); + assert(vec@.fold_left(emp, sg).len() as int == vec@.fold_left(0, asl)); + fun_ext_2(agl, asl); + assert(vec@.fold_left(emp, sg).len() == vec@.fold_left(0, agl)); + } + assert(vec.is_marshalable()) by { + assert(vec@.len() <= usize::MAX); + assert(forall|x: CKeyKV| vec@.contains(x) ==> #[trigger] x.is_marshalable()); + } + assert(crate::hashmap_t::spec_sorted_keys(vec)); + assert(h.is_marshalable()); +} - /* $line_count$Proof$ */ derive_marshalable_for_enum! { +/* $line_count$Proof$ */ + +derive_marshalable_for_enum! { /* $line_count$Proof$ */ pub enum CMessage { /* $line_count$Proof$ */ #[tag = 0] /* $line_count$Proof$ */ GetRequest{ #[o=o0] k: CKey}, @@ -6444,7 +7479,7 @@ mod marshal_ironsht_specific_v { /* $line_count$Proof$ */ [rlimit attr = verifier::rlimit(20)] /* $line_count$Proof$ */ } - } +} // verus! } mod marshal_v { @@ -6487,62 +7522,87 @@ mod marshal_v { verus! { - pub trait Marshalable : Sized { - spec fn is_marshalable(&self) -> bool; - exec fn _is_marshalable(&self) -> (res: bool) - ensures res == self.is_marshalable(); - spec fn ghost_serialize(&self) -> Seq - recommends self.is_marshalable(); - exec fn serialized_size(&self) -> (res: usize) - requires self.is_marshalable(), - ensures res as int == self.ghost_serialize().len(); - exec fn serialize(&self, data: &mut Vec) - requires self.is_marshalable() - ensures - data@.len() >= old(data).len(), - data@.subrange(0, old(data)@.len() as int) == old(data)@, - data@.subrange(old(data)@.len() as int, data@.len() as int) == self.ghost_serialize(); - exec fn deserialize(data: &Vec, start: usize) -> (res: Option<(Self, usize)>) - ensures match res { - Some((x, end)) => { - &&& x.is_marshalable() - &&& start <= end <= data.len() - &&& data@.subrange(start as int, end as int) == x.ghost_serialize() - } - None => true, - }; +pub trait Marshalable: Sized { + spec fn is_marshalable(&self) -> bool; + + exec fn _is_marshalable(&self) -> (res: bool) + ensures + res == self.is_marshalable(), + ; + + spec fn ghost_serialize(&self) -> Seq + recommends + self.is_marshalable(), + ; + + exec fn serialized_size(&self) -> (res: usize) + requires + self.is_marshalable(), + ensures + res as int == self.ghost_serialize().len(), + ; + + exec fn serialize(&self, data: &mut Vec) + requires + self.is_marshalable(), + ensures + data@.len() >= old(data).len(), + data@.subrange(0, old(data)@.len() as int) == old(data)@, + data@.subrange(old(data)@.len() as int, data@.len() as int) == self.ghost_serialize(), + ; + + exec fn deserialize(data: &Vec, start: usize) -> (res: Option<(Self, usize)>) + ensures + match res { + Some((x, end)) => { + &&& x.is_marshalable() + &&& start <= end <= data.len() + &&& data@.subrange(start as int, end as int) == x.ghost_serialize() + }, + None => true, + }, + ; + + // since Verus doesn't have a trait for `View` or such yet, also defining as a separate trait + // doesn't work, because Verus doesn't support trait generic bounds, we place `view_equal` within + // this trait itself. + spec fn view_equal(&self, other: &Self) -> bool; - // since Verus doesn't have a trait for `View` or such yet, also defining as a separate trait - // doesn't work, because Verus doesn't support trait generic bounds, we place `view_equal` within - // this trait itself. - spec fn view_equal(&self, other: &Self) -> bool; - proof fn lemma_view_equal_symmetric(&self, other: &Self) - ensures self.view_equal(other) == other.view_equal(self); + proof fn lemma_view_equal_symmetric(&self, other: &Self) + ensures + self.view_equal(other) == other.view_equal(self), + ; - proof fn lemma_serialization_is_not_a_prefix_of(&self, other: &Self) + proof fn lemma_serialization_is_not_a_prefix_of(&self, other: &Self) requires - !self.view_equal(other), - self.ghost_serialize().len() <= other.ghost_serialize().len(), + !self.view_equal(other), + self.ghost_serialize().len() <= other.ghost_serialize().len(), ensures - self.ghost_serialize() != other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int); + self.ghost_serialize() != other.ghost_serialize().subrange( + 0, + self.ghost_serialize().len() as int, + ), + ; - proof fn lemma_same_views_serialize_the_same(&self, other: &Self) + proof fn lemma_same_views_serialize_the_same(&self, other: &Self) requires - self.view_equal(other), + self.view_equal(other), ensures - self.is_marshalable() == other.is_marshalable(), - self.ghost_serialize() == other.ghost_serialize(); + self.is_marshalable() == other.is_marshalable(), + self.ghost_serialize() == other.ghost_serialize(), + ; - proof fn lemma_serialize_injective(&self, other: &Self) + proof fn lemma_serialize_injective(&self, other: &Self) requires - self.is_marshalable(), - other.is_marshalable(), - self.ghost_serialize() == other.ghost_serialize(), + self.is_marshalable(), + other.is_marshalable(), + self.ghost_serialize() == other.ghost_serialize(), ensures - self.view_equal(other); - } + self.view_equal(other), + ; +} - #[allow(unused)] +#[allow(unused)] macro_rules! ext_equalities_aux { ($s:expr $(,)?) => {}; ($s1:expr, $s2:expr, $($rest:expr,)* $(,)?) => {verus_proof_expr!{{ @@ -6550,1062 +7610,1319 @@ mod marshal_v { ext_equalities_aux!($s2, $($rest,)*); }}}; } - #[allow(unused)] + +#[allow(unused)] macro_rules! ext_equalities { ($($tt:tt)*) => { verus_proof_macro_exprs!(ext_equalities_aux!($($tt)*)) }; } - impl Marshalable for u64 { - - open spec fn view_equal(&self, other: &Self) -> bool { +impl Marshalable for u64 { + open spec fn view_equal(&self, other: &Self) -> bool { self@ === other@ - } + } - proof fn lemma_view_equal_symmetric(&self, other: &Self) - // req, ens from trait - {} + proof fn lemma_view_equal_symmetric( + &self, + other: &Self, + ) + // req, ens from trait + { + } - open spec fn is_marshalable(&self) -> bool { + open spec fn is_marshalable(&self) -> bool { true - } + } - exec fn _is_marshalable(&self) -> (res: bool) - // req, ens from trait - { + exec fn _is_marshalable(&self) -> (res: bool) + // req, ens from trait + { true - } + } - open spec fn ghost_serialize(&self) -> Seq { + open spec fn ghost_serialize(&self) -> Seq { spec_u64_to_le_bytes(*self) - } + } - exec fn serialized_size(&self) -> (res: usize) - // req, ens from trait - { + exec fn serialized_size(&self) -> (res: usize) + // req, ens from trait + { proof { - lemma_auto_spec_u64_to_from_le_bytes(); + lemma_auto_spec_u64_to_from_le_bytes(); } 8 - } + } - exec fn serialize(&self, data: &mut Vec) - // req, ens from trait - { + exec fn serialize(&self, data: &mut Vec) + // req, ens from trait + { let s = u64_to_le_bytes(*self); let mut i: usize = 0; - proof { - assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); - assert(data@.subrange(old(data)@.len() as int, data@.len() as int) =~= self.ghost_serialize().subrange(0, i as int)); - lemma_auto_spec_u64_to_from_le_bytes(); + assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + =~= self.ghost_serialize().subrange(0, i as int)); + lemma_auto_spec_u64_to_from_le_bytes(); } - while i < 8 - invariant - 0 <= i <= 8, - s.len() == 8, - s@ == self.ghost_serialize(), - data@.subrange(0, old(data)@.len() as int) == old(data)@, - data@.subrange(old(data)@.len() as int, data@.len() as int) == self.ghost_serialize().subrange(0, i as int), - data@.len() == old(data)@.len() + i as int, - { - assert(data@.subrange(old(data)@.len() as int, data@.len() as int) == data@.subrange(old(data)@.len() as int, old(data)@.len() + i as int)); - - let x: u8 = s[i]; - data.push(x); - i = i + 1; - - proof { - assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); - assert (data@.subrange(old(data)@.len() as int, data@.len() as int) == self.ghost_serialize().subrange(0, i as int)) by { - assert(self.ghost_serialize().subrange(0, (i - 1) as int).push(x) =~= self.ghost_serialize().subrange(0, i as int)); - assert(data@.subrange(old(data)@.len() as int, data@.len() as int) =~= self.ghost_serialize().subrange(0, (i - 1) as int).push(x)); + invariant + 0 <= i <= 8, + s.len() == 8, + s@ == self.ghost_serialize(), + data@.subrange(0, old(data)@.len() as int) == old(data)@, + data@.subrange(old(data)@.len() as int, data@.len() as int) + == self.ghost_serialize().subrange(0, i as int), + data@.len() == old(data)@.len() + i as int, + { + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) == data@.subrange( + old(data)@.len() as int, + old(data)@.len() + i as int, + )); + let x: u8 = s[i]; + data.push(x); + i = i + 1; + proof { + assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + == self.ghost_serialize().subrange(0, i as int)) by { + assert(self.ghost_serialize().subrange(0, (i - 1) as int).push(x) + =~= self.ghost_serialize().subrange(0, i as int)); + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + =~= self.ghost_serialize().subrange(0, (i - 1) as int).push(x)); + } } - } } - proof { - assert(self.ghost_serialize().subrange(0, i as int) =~= self.ghost_serialize()); + assert(self.ghost_serialize().subrange(0, i as int) =~= self.ghost_serialize()); } - } + } - exec fn deserialize(data: &Vec, start: usize) -> (res: Option<(Self, usize)>) - // req, ens from trait - { + exec fn deserialize(data: &Vec, start: usize) -> (res: Option< + (Self, usize), + >) + // req, ens from trait + { proof { - lemma_auto_spec_u64_to_from_le_bytes(); + lemma_auto_spec_u64_to_from_le_bytes(); } - if data.len() < 8 { - return None; + return None; } if start > data.len() - 8 { - return None; + return None; } let end = start + 8; - - let v = u64_from_le_bytes( - slice_subrange(data.as_slice(), start, end)); - + let v = u64_from_le_bytes(slice_subrange(data.as_slice(), start, end)); Some((v, end)) - } + } - proof fn lemma_serialization_is_not_a_prefix_of(&self, other: &Self) - // req, ens from trait - { + proof fn lemma_serialization_is_not_a_prefix_of( + &self, + other: &Self, + ) + // req, ens from trait + { lemma_auto_spec_u64_to_from_le_bytes(); - assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int) =~= other.ghost_serialize()); - } + assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int) + =~= other.ghost_serialize()); + } - proof fn lemma_same_views_serialize_the_same(self: &Self, other: &Self) - // req, ens from trait - { + proof fn lemma_same_views_serialize_the_same( + self: &Self, + other: &Self, + ) + // req, ens from trait + { lemma_auto_spec_u64_to_from_le_bytes(); - } + } - proof fn lemma_serialize_injective(self: &Self, other: &Self) - // req, ens from trait - { + proof fn lemma_serialize_injective( + self: &Self, + other: &Self, + ) + // req, ens from trait + { lemma_auto_spec_u64_to_from_le_bytes(); - } } +} - impl Marshalable for usize { - open spec fn view_equal(&self, other: &Self) -> bool { +impl Marshalable for usize { + open spec fn view_equal(&self, other: &Self) -> bool { self@ === other@ - } - proof fn lemma_view_equal_symmetric(&self, other: &Self) - // req, ens from trait - {} - open spec fn is_marshalable(&self) -> bool { + } + + proof fn lemma_view_equal_symmetric( + &self, + other: &Self, + ) + // req, ens from trait + { + } + + open spec fn is_marshalable(&self) -> bool { &&& *self as int <= u64::MAX - } - exec fn _is_marshalable(&self) -> (res: bool) - // req, ens from trait - { + } + + exec fn _is_marshalable(&self) -> (res: bool) + // req, ens from trait + { *self as u64 <= u64::MAX - } - open spec fn ghost_serialize(&self) -> Seq { + } + + open spec fn ghost_serialize(&self) -> Seq { (*self as u64).ghost_serialize() - } - exec fn serialized_size(&self) -> (res: usize) - // req, ens from trait - { + } + + exec fn serialized_size(&self) -> (res: usize) + // req, ens from trait + { (*self as u64).serialized_size() - } - exec fn serialize(&self, data: &mut Vec) - // req, ens from trait - { + } + + exec fn serialize(&self, data: &mut Vec) + // req, ens from trait + { (*self as u64).serialize(data) - } - exec fn deserialize(data: &Vec, start: usize) -> (res: Option<(Self, usize)>) - // req, ens from trait - { + } + + exec fn deserialize(data: &Vec, start: usize) -> (res: Option< + (Self, usize), + >) + // req, ens from trait + { proof { - lemma_auto_spec_u64_to_from_le_bytes(); + lemma_auto_spec_u64_to_from_le_bytes(); } - let (r, end) = match u64::deserialize(data, start) { None => { return None; }, Some(x) => x, }; + let (r, end) = match u64::deserialize(data, start) { + None => { + return None; + }, + Some(x) => x, + }; if r <= usize::MAX as u64 { - let res = r as usize; - // assert(res.0 as int <= u64::MAX); - // assert(r.0 as int <= u64::MAX); - // assert(res.0 as int <= usize::MAX); - // assert(r.0 as int <= usize::MAX); - // assert(res.0 as int == r.0 as int); - Some((res, end)) + let res = r as usize; + // assert(res.0 as int <= u64::MAX); + // assert(r.0 as int <= u64::MAX); + // assert(res.0 as int <= usize::MAX); + // assert(r.0 as int <= usize::MAX); + // assert(res.0 as int == r.0 as int); + Some((res, end)) } else { - None + None } - } - proof fn lemma_serialization_is_not_a_prefix_of(&self, other: &Self) - // req, ens from trait - { + } + + proof fn lemma_serialization_is_not_a_prefix_of( + &self, + other: &Self, + ) + // req, ens from trait + { (*self as u64).lemma_serialization_is_not_a_prefix_of(&(*other as u64)); - } - proof fn lemma_same_views_serialize_the_same(self: &Self, other: &Self) - // req, ens from trait - { + } + + proof fn lemma_same_views_serialize_the_same( + self: &Self, + other: &Self, + ) + // req, ens from trait + { (*self as u64).lemma_same_views_serialize_the_same(&(*other as u64)); - } - proof fn lemma_serialize_injective(self: &Self, other: &Self) - // req, ens from trait - { + } + + proof fn lemma_serialize_injective( + self: &Self, + other: &Self, + ) + // req, ens from trait + { (*self as u64).lemma_serialize_injective(&(*other as u64)); - } } +} - impl Marshalable for Vec { - open spec fn view_equal(&self, other: &Self) -> bool { +impl Marshalable for Vec { + open spec fn view_equal(&self, other: &Self) -> bool { self@ === other@ - } - proof fn lemma_view_equal_symmetric(&self, other: &Self) - // req, ens from trait - {} - open spec fn is_marshalable(&self) -> bool { - self@.len() <= usize::MAX && - (self@.len() as usize).ghost_serialize().len() + self@.len() as int <= usize::MAX - } - - exec fn _is_marshalable(&self) -> (res: bool) - // req, ens from trait - { - self.len() <= usize::MAX && - self.len().serialized_size() <= usize::MAX - self.len() - } + } - open spec fn ghost_serialize(&self) -> Seq { - (self@.len() as usize).ghost_serialize() - + self@ - } + proof fn lemma_view_equal_symmetric( + &self, + other: &Self, + ) + // req, ens from trait + { + } - exec fn serialized_size(&self) -> (res: usize) - // req, ens from trait - { + open spec fn is_marshalable(&self) -> bool { + self@.len() <= usize::MAX && (self@.len() as usize).ghost_serialize().len() + + self@.len() as int <= usize::MAX + } + + exec fn _is_marshalable(&self) -> (res: bool) + // req, ens from trait + { + self.len() <= usize::MAX && self.len().serialized_size() <= usize::MAX - self.len() + } + + open spec fn ghost_serialize(&self) -> Seq { + (self@.len() as usize).ghost_serialize() + self@ + } + + exec fn serialized_size(&self) -> (res: usize) + // req, ens from trait + { self.len().serialized_size() + self.len() - } + } - exec fn serialize(&self, data: &mut Vec) - // req, ens from trait - { + exec fn serialize(&self, data: &mut Vec) + // req, ens from trait + { let self_len = self.len(); self_len.serialize(data); let init: Ghost = Ghost(self_len.ghost_serialize().len() as int); - let mut i: usize = 0; - proof { - assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); - assert(data@.subrange(old(data)@.len() as int, data@.len() as int) =~= self.ghost_serialize().subrange(0, i + init@)); + assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + =~= self.ghost_serialize().subrange(0, i + init@)); } - while i < self.len() - invariant - 0 <= i <= self.len(), - self.is_marshalable(), - self.ghost_serialize().len() == init@ + self.len(), - 0 <= i + init@ <= self.ghost_serialize().len(), - data@.subrange(0, old(data)@.len() as int) == old(data)@, - data@.subrange(old(data)@.len() as int, data@.len() as int) == self.ghost_serialize().subrange(0, i + init@), - data@.len() == old(data)@.len() + i + init@, - { - assert(data@.subrange(old(data)@.len() as int, data@.len() as int) == data@.subrange(old(data)@.len() as int, old(data)@.len() + i + init@)); - - let x: u8 = self[i]; - data.push(x); - i = i + 1; - - proof { - assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); - assert (data@.subrange(old(data)@.len() as int, data@.len() as int) == self.ghost_serialize().subrange(0, i + init@)) by { - assert(self.ghost_serialize().subrange(0, (i + init@ - 1) as int).push(x) =~= self.ghost_serialize().subrange(0, i + init@)); - assert(data@.subrange(old(data)@.len() as int, data@.len() as int) =~= self.ghost_serialize().subrange(0, (i + init@ - 1) as int).push(x)); + invariant + 0 <= i <= self.len(), + self.is_marshalable(), + self.ghost_serialize().len() == init@ + self.len(), + 0 <= i + init@ <= self.ghost_serialize().len(), + data@.subrange(0, old(data)@.len() as int) == old(data)@, + data@.subrange(old(data)@.len() as int, data@.len() as int) + == self.ghost_serialize().subrange(0, i + init@), + data@.len() == old(data)@.len() + i + init@, + { + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) == data@.subrange( + old(data)@.len() as int, + old(data)@.len() + i + init@, + )); + let x: u8 = self[i]; + data.push(x); + i = i + 1; + proof { + assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + == self.ghost_serialize().subrange(0, i + init@)) by { + assert(self.ghost_serialize().subrange(0, (i + init@ - 1) as int).push(x) + =~= self.ghost_serialize().subrange(0, i + init@)); + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + =~= self.ghost_serialize().subrange(0, (i + init@ - 1) as int).push(x)); + } } - } } - proof { - assert(self.ghost_serialize().subrange(0, i + init@) =~= self.ghost_serialize()); + assert(self.ghost_serialize().subrange(0, i + init@) =~= self.ghost_serialize()); } - } + } - exec fn deserialize(data: &Vec, start: usize) -> (res: Option<(Self, usize)>) - // req, ens from trait - { - let (len, mid) = match usize::deserialize(data, start) { None => { - return None; - }, Some(x) => x, }; + exec fn deserialize(data: &Vec, start: usize) -> (res: Option< + (Self, usize), + >) + // req, ens from trait + { + let (len, mid) = match usize::deserialize(data, start) { + None => { + return None; + }, + Some(x) => x, + }; let len = len as usize; - // assert(mid <= data.len()); // assert(data@.subrange(start as int, mid as int) == usize(len).ghost_serialize()); - let end = if usize::MAX - mid >= len { - mid + len + mid + len } else { - return None; + return None; }; if end > data.len() { - return None; + return None; } - // assert(0 <= mid); // assert(len >= 0); // assert(mid <= end); // assert(end <= data.len()); + let res_slice = slice_subrange(data.as_slice(), mid, end); let res = slice_to_vec(res_slice); - // assert(res_slice@ == data@.subrange(mid as int, end as int)); // assert(res@ == res_slice@); - // assert(res.ghost_serialize() == usize(len).ghost_serialize() + res@); // assert(data@.subrange(start as int, mid as int) == usize(len).ghost_serialize()); // assert(data@.subrange(mid as int, end as int) == res@); - // assert(0 <= start); // assert(start <= mid); // assert(mid <= end); // assert(end <= data.len()); - proof { - // For performance reasons, we need to split this into a lemma. - // Weirdly, if we inline the lemma, the proof fails. - // - // This is especially weird because the lemma just has a call to the `assert_seqs_equal!` - // macro. which itself is supposed to do things via `assert by` which should not cause a - // blow-up in the solver time. - seq_lib_v::lemma_seq_add_subrange::(data@, start as int, mid as int, end as int); + // For performance reasons, we need to split this into a lemma. + // Weirdly, if we inline the lemma, the proof fails. + // + // This is especially weird because the lemma just has a call to the `assert_seqs_equal!` + // macro. which itself is supposed to do things via `assert by` which should not cause a + // blow-up in the solver time. + seq_lib_v::lemma_seq_add_subrange::(data@, start as int, mid as int, end as int); } - Some((res, end)) - } + } - proof fn lemma_serialization_is_not_a_prefix_of(&self, other: &Self) - // req, ens from trait - { + proof fn lemma_serialization_is_not_a_prefix_of( + &self, + other: &Self, + ) + // req, ens from trait + { lemma_auto_spec_u64_to_from_le_bytes(); assert(self.ghost_serialize().subrange(0, 8) =~= (self@.len() as usize).ghost_serialize()); - assert(other.ghost_serialize().subrange(0, 8) =~= (other@.len() as usize).ghost_serialize()); + assert(other.ghost_serialize().subrange(0, 8) =~= ( + other@.len() as usize).ghost_serialize()); if self.ghost_serialize().len() == other.ghost_serialize().len() { - assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int) =~= other.ghost_serialize()); - assert(self.ghost_serialize().subrange(8, self.ghost_serialize().len() as int) =~= self@); - assert(other.ghost_serialize().subrange(8, self.ghost_serialize().len() as int) =~= other@); + assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int) + =~= other.ghost_serialize()); + assert(self.ghost_serialize().subrange(8, self.ghost_serialize().len() as int) + =~= self@); + assert(other.ghost_serialize().subrange(8, self.ghost_serialize().len() as int) + =~= other@); } else { - assert(other.len() > self.len()); // OBSERVE - assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int).subrange(0, 8) =~= other.ghost_serialize().subrange(0, 8)); + assert(other.len() > self.len()); // OBSERVE + assert(other.ghost_serialize().subrange( + 0, + self.ghost_serialize().len() as int, + ).subrange(0, 8) =~= other.ghost_serialize().subrange(0, 8)); } - } + } - proof fn lemma_same_views_serialize_the_same(self: &Self, other: &Self) - // req, ens from trait - {} + proof fn lemma_same_views_serialize_the_same( + self: &Self, + other: &Self, + ) + // req, ens from trait + { + } - proof fn lemma_serialize_injective(self: &Self, other: &Self) - // req, ens from trait - { + proof fn lemma_serialize_injective( + self: &Self, + other: &Self, + ) + // req, ens from trait + { lemma_auto_spec_u64_to_from_le_bytes(); - assert(self@ =~= self.ghost_serialize().subrange((self@.len() as usize).ghost_serialize().len() as int, self.ghost_serialize().len() as int)); - assert(other@ =~= other.ghost_serialize().subrange((other@.len() as usize).ghost_serialize().len() as int, other.ghost_serialize().len() as int)); - } + assert(self@ =~= self.ghost_serialize().subrange( + (self@.len() as usize).ghost_serialize().len() as int, + self.ghost_serialize().len() as int, + )); + assert(other@ =~= other.ghost_serialize().subrange( + (other@.len() as usize).ghost_serialize().len() as int, + other.ghost_serialize().len() as int, + )); } +} - impl Marshalable for Option { - open spec fn view_equal(&self, other: &Self) -> bool { +impl Marshalable for Option { + open spec fn view_equal(&self, other: &Self) -> bool { match (self, other) { - (None, None) => true, - (Some(s), Some(o)) => s.view_equal(o), - _ => false, + (None, None) => true, + (Some(s), Some(o)) => s.view_equal(o), + _ => false, } - } - proof fn lemma_view_equal_symmetric(&self, other: &Self) - // req, ens from trait - { + } + + proof fn lemma_view_equal_symmetric( + &self, + other: &Self, + ) + // req, ens from trait + { match (self, other) { - (None, None) => (), - (Some(s), Some(o)) => s.lemma_view_equal_symmetric(o), - _ => (), + (None, None) => (), + (Some(s), Some(o)) => s.lemma_view_equal_symmetric(o), + _ => (), } - } - open spec fn is_marshalable(&self) -> bool { + } + + open spec fn is_marshalable(&self) -> bool { match self { - None => true, - Some(x) => x.is_marshalable() && 1 + x.ghost_serialize().len() <= usize::MAX, + None => true, + Some(x) => x.is_marshalable() && 1 + x.ghost_serialize().len() <= usize::MAX, } - } - exec fn _is_marshalable(&self) -> (res: bool) - // req, ens from trait - { + } + + exec fn _is_marshalable(&self) -> (res: bool) + // req, ens from trait + { match self { - None => true, - Some(x) => x._is_marshalable() && x.serialized_size() <= usize::MAX - 1, + None => true, + Some(x) => x._is_marshalable() && x.serialized_size() <= usize::MAX - 1, } - } - open spec fn ghost_serialize(&self) -> Seq - // req, ens from trait - { + } + + open spec fn ghost_serialize(&self) -> Seq + // req, ens from trait + { match self { - None => seq![0], - Some(x) => seq![1] + x.ghost_serialize(), + None => seq![0], + Some(x) => seq![1] + x.ghost_serialize(), } - } - exec fn serialized_size(&self) -> (res: usize) - // req, ens from trait - { + } + + exec fn serialized_size(&self) -> (res: usize) + // req, ens from trait + { match self { - None => 1, - Some(x) => 1 + x.serialized_size(), + None => 1, + Some(x) => 1 + x.serialized_size(), } - } - exec fn serialize(&self, data: &mut Vec) - // req, ens from trait - { + } + + exec fn serialize(&self, data: &mut Vec) + // req, ens from trait + { match self { - None => { - data.push(0); - let mid_data_len: Ghost = Ghost(data@.len() as int); - proof { - assert(data@.subrange(old(data)@.len() as int, data@.len() as int) =~= self.ghost_serialize()); - assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); - } - } - Some(x) => { - data.push(1); - let mid_data_len: Ghost = Ghost(data@.len() as int); - proof { - assert(data@.subrange(old(data)@.len() as int, mid_data_len@) =~= seq![1]); - assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); - } - x.serialize(data); - proof { - assert(data@.subrange(old(data)@.len() as int, data@.len() as int) =~= self.ghost_serialize()); - assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); - } - } + None => { + data.push(0); + let mid_data_len: Ghost = Ghost(data@.len() as int); + proof { + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + =~= self.ghost_serialize()); + assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); + } + }, + Some(x) => { + data.push(1); + let mid_data_len: Ghost = Ghost(data@.len() as int); + proof { + assert(data@.subrange(old(data)@.len() as int, mid_data_len@) =~= seq![1]); + assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); + } + x.serialize(data); + proof { + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + =~= self.ghost_serialize()); + assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); + } + }, } - } - exec fn deserialize(data: &Vec, start: usize) -> (res: Option<(Self, usize)>) - // req, ens from trait - { + } + + exec fn deserialize(data: &Vec, start: usize) -> (res: Option< + (Self, usize), + >) + // req, ens from trait + { if data.len() == 0 || start > data.len() - 1 { - return None; + return None; } let tag = data[start]; let (x, end) = if tag == 0 { - let mid = start + 1; - (None, mid) + let mid = start + 1; + (None, mid) } else if tag == 1 { - let mid = start + 1; - let (x, mid) = match T::deserialize(data, mid) { None => { - return None; - }, Some(x) => x, }; - (Some(x), mid) + let mid = start + 1; + let (x, mid) = match T::deserialize(data, mid) { + None => { + return None; + }, + Some(x) => x, + }; + (Some(x), mid) } else { - return None; + return None; }; proof { - assert(data@.subrange(start as int, end as int) =~= x.ghost_serialize()); + assert(data@.subrange(start as int, end as int) =~= x.ghost_serialize()); } Some((x, end)) - } - proof fn lemma_serialization_is_not_a_prefix_of(&self, other: &Self) - // req, ens from trait - { + } + + proof fn lemma_serialization_is_not_a_prefix_of( + &self, + other: &Self, + ) + // req, ens from trait + { match (self, other) { - (None, None) => {} - (Some(_), None) | (None, Some(_)) => { - assert(self.ghost_serialize()[0] != other.ghost_serialize()[0]); // OBSERVE - } - (Some(s), Some(o)) => { - s.lemma_serialization_is_not_a_prefix_of(o); - assert(s.ghost_serialize() =~= self.ghost_serialize().subrange(1, self.ghost_serialize().len() as int)); - assert(o.ghost_serialize().subrange(0, s.ghost_serialize().len() as int) =~= other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int).subrange(1, self.ghost_serialize().len() as int)); - } + (None, None) => {}, + (Some(_), None) | (None, Some(_)) => { + assert(self.ghost_serialize()[0] != other.ghost_serialize()[0]); // OBSERVE + }, + (Some(s), Some(o)) => { + s.lemma_serialization_is_not_a_prefix_of(o); + assert(s.ghost_serialize() =~= self.ghost_serialize().subrange( + 1, + self.ghost_serialize().len() as int, + )); + assert(o.ghost_serialize().subrange(0, s.ghost_serialize().len() as int) + =~= other.ghost_serialize().subrange( + 0, + self.ghost_serialize().len() as int, + ).subrange(1, self.ghost_serialize().len() as int)); + }, } - } - proof fn lemma_same_views_serialize_the_same(self: &Self, other: &Self) - // req, ens from trait - { + } + + proof fn lemma_same_views_serialize_the_same( + self: &Self, + other: &Self, + ) + // req, ens from trait + { match (self, other) { - (Some(s), Some(o)) => s.lemma_same_views_serialize_the_same(o), - _ => (), + (Some(s), Some(o)) => s.lemma_same_views_serialize_the_same(o), + _ => (), } - } - proof fn lemma_serialize_injective(self: &Self, other: &Self) - // req, ens from trait - { + } + + proof fn lemma_serialize_injective( + self: &Self, + other: &Self, + ) + // req, ens from trait + { match (self, other) { - (Some(s), Some(o)) => { - assert(s.ghost_serialize() =~= self.ghost_serialize().subrange(1, self.ghost_serialize().len() as int)); - assert(o.ghost_serialize() =~= other.ghost_serialize().subrange(1, other.ghost_serialize().len() as int)); - s.lemma_serialize_injective(o); - }, - (None, None) => {}, - (Some(s), None) => { - assert(other.ghost_serialize()[0] == 0); // OBSERVE - }, - (None, Some(o)) => { - assert(self.ghost_serialize()[0] == 0); // OBSERVE - } + (Some(s), Some(o)) => { + assert(s.ghost_serialize() =~= self.ghost_serialize().subrange( + 1, + self.ghost_serialize().len() as int, + )); + assert(o.ghost_serialize() =~= other.ghost_serialize().subrange( + 1, + other.ghost_serialize().len() as int, + )); + s.lemma_serialize_injective(o); + }, + (None, None) => {}, + (Some(s), None) => { + assert(other.ghost_serialize()[0] == 0); // OBSERVE + }, + (None, Some(o)) => { + assert(self.ghost_serialize()[0] == 0); // OBSERVE + }, } - } } +} - impl Marshalable for Vec { - open spec fn view_equal(&self, other: &Self) -> bool { +impl Marshalable for Vec { + open spec fn view_equal(&self, other: &Self) -> bool { let s = self@; let o = other@; - s.len() == o.len() && (forall |i: int| 0 <= i < s.len() ==> #[trigger] s[i].view_equal(&o[i])) - } - proof fn lemma_view_equal_symmetric(&self, other: &Self) - // req, ens from trait - { + s.len() == o.len() && (forall|i: int| + 0 <= i < s.len() ==> #[trigger] s[i].view_equal(&o[i])) + } + + proof fn lemma_view_equal_symmetric( + &self, + other: &Self, + ) + // req, ens from trait + { let s = self@; let o = other@; if self.view_equal(other) { - assert forall |i: int| 0 <= i < o.len() implies #[trigger] o[i].view_equal(&s[i]) by { - s[i].lemma_view_equal_symmetric(&o[i]); - } + assert forall|i: int| 0 <= i < o.len() implies #[trigger] o[i].view_equal(&s[i]) by { + s[i].lemma_view_equal_symmetric(&o[i]); + } } else { - if s.len() != o.len() { - // trivial - } else { - let i = choose |i: int| 0 <= i < s.len() && ! #[trigger] s[i].view_equal(&o[i]); - s[i].lemma_view_equal_symmetric(&o[i]); - } + if s.len() != o.len() { + // trivial + } else { + let i = choose|i: int| 0 <= i < s.len() && !#[trigger] s[i].view_equal(&o[i]); + s[i].lemma_view_equal_symmetric(&o[i]); + } } - } - open spec fn is_marshalable(&self) -> bool { + } + + open spec fn is_marshalable(&self) -> bool { &&& self@.len() <= usize::MAX - &&& (forall |x: T| self@.contains(x) ==> #[trigger] x.is_marshalable()) - &&& (self@.len() as usize).ghost_serialize().len() + - self@.fold_left(0, |acc: int, x: T| acc + x.ghost_serialize().len()) <= usize::MAX - } + &&& (forall|x: T| self@.contains(x) ==> #[trigger] x.is_marshalable()) + &&& (self@.len() as usize).ghost_serialize().len() + self@.fold_left( + 0, + |acc: int, x: T| acc + x.ghost_serialize().len(), + ) <= usize::MAX + } - exec fn _is_marshalable(&self) -> bool { + exec fn _is_marshalable(&self) -> bool { let mut res = true; let mut i = 0; let mut total_len = self.len().serialized_size(); - proof { - assert(self@ =~= self@.subrange(0, self@.len() as int)); + assert(self@ =~= self@.subrange(0, self@.len() as int)); } - while res && i < self.len() - invariant - 0 <= i <= self.len(), - res ==> total_len as int == (self@.len() as usize).ghost_serialize().len() + - self@.subrange(0, i as int).fold_left(0, |acc: int, x: T| acc + x.ghost_serialize().len()), - res ==> (forall |x: T| self@.subrange(0, i as int).contains(x) ==> #[trigger] x.is_marshalable()), - res ==> total_len as int <= usize::MAX, - !res ==> !self.is_marshalable(), - { - assert(res); - res = res && self[i]._is_marshalable() && (usize::MAX - total_len >= self[i].serialized_size()); - if res { - let old_total_len = total_len; - total_len = total_len + self[i].serialized_size(); - i = i + 1; - proof { - assert forall |x: T| #[trigger] self@.subrange(0, i as int).contains(x) implies x.is_marshalable() by { - if (exists |j:int| 0 <= j < self@.subrange(0, i as int).len() - 1 && self@.subrange(0, i as int)[j] == x) { - let j = choose|j:int| 0 <= j < self@.subrange(0, i as int).len() - 1 && self@.subrange(0, i as int)[j] == x; - assert(self@.subrange(0, i as int - 1)[j] == x); // OBSERVE + invariant + 0 <= i <= self.len(), + res ==> total_len as int == (self@.len() as usize).ghost_serialize().len() + + self@.subrange(0, i as int).fold_left( + 0, + |acc: int, x: T| acc + x.ghost_serialize().len(), + ), + res ==> (forall|x: T| + self@.subrange(0, i as int).contains(x) ==> #[trigger] x.is_marshalable()), + res ==> total_len as int <= usize::MAX, + !res ==> !self.is_marshalable(), + { + assert(res); + res = + res && self[i]._is_marshalable() && (usize::MAX - total_len + >= self[i].serialized_size()); + if res { + let old_total_len = total_len; + total_len = total_len + self[i].serialized_size(); + i = i + 1; + proof { + assert forall|x: T| #[trigger] + self@.subrange(0, i as int).contains(x) implies x.is_marshalable() by { + if (exists|j: int| + 0 <= j < self@.subrange(0, i as int).len() - 1 && self@.subrange( + 0, + i as int, + )[j] == x) { + let j = choose|j: int| + 0 <= j < self@.subrange(0, i as int).len() - 1 && self@.subrange( + 0, + i as int, + )[j] == x; + assert(self@.subrange(0, i as int - 1)[j] == x); // OBSERVE + } + }; + let sl = |x: T| x.ghost_serialize().len() as int; + fun_ext_2::( + |acc: int, x: T| acc + x.ghost_serialize().len() as int, + |acc: int, x: T| acc + sl(x), + ); + let s = self@.subrange(0 as int, i as int); + seq_lib_v::lemma_seq_fold_left_sum_right::(s, 0, sl); + assert(s.subrange(0, s.len() - 1) =~= self@.subrange(0 as int, i - 1 as int)); + } + } else { + proof { + if usize::MAX < total_len + self@[i as int].ghost_serialize().len() { + assert(((self@.len() as usize).ghost_serialize().len() + self@.fold_left( + 0, + |acc: int, x: T| acc + x.ghost_serialize().len(), + )) >= (total_len + self@[i as int].ghost_serialize().len())) by { + let f = |x: T| x.ghost_serialize(); + let sl = |x: T| x.ghost_serialize().len() as int; + let s = self@.subrange(0 as int, i as int + 1); + fun_ext_2::( + |acc: int, x: T| acc + x.ghost_serialize().len() as int, + |acc: int, x: T| acc + sl(x), + ); + seq_lib_v::lemma_seq_fold_left_sum_right::(s, 0, sl); + assert(s.subrange(0, s.len() - 1) =~= self@.subrange( + 0 as int, + i as int, + )); + seq_lib_v::lemma_seq_fold_left_append_len_int_le( + self@, + i as int + 1, + 0, + f, + ); + fun_ext_2( + |acc: int, x: T| acc + x.ghost_serialize().len() as int, + |acc: int, x: T| acc + f(x).len(), + ); + }; + } else { + assert(!self@[i as int].is_marshalable()); + } } - }; - let sl = |x: T| x.ghost_serialize().len() as int; - fun_ext_2::(|acc: int, x: T| acc + x.ghost_serialize().len() as int, |acc: int, x: T| acc + sl(x)); - let s = self@.subrange(0 as int, i as int); - seq_lib_v::lemma_seq_fold_left_sum_right::(s, 0, sl); - assert(s.subrange(0, s.len() - 1) =~= self@.subrange(0 as int, i - 1 as int)); - } - } else { - proof { - if usize::MAX < total_len + self@[i as int].ghost_serialize().len() { - assert( - ((self@.len() as usize).ghost_serialize().len() + - self@.fold_left(0, |acc: int, x: T| acc + x.ghost_serialize().len())) - >= - (total_len + self@[i as int].ghost_serialize().len()) - ) by { - let f = |x: T| x.ghost_serialize(); - let sl = |x: T| x.ghost_serialize().len() as int; - let s = self@.subrange(0 as int, i as int + 1); - fun_ext_2::(|acc: int, x: T| acc + x.ghost_serialize().len() as int, |acc: int, x: T| acc + sl(x)); - seq_lib_v::lemma_seq_fold_left_sum_right::(s, 0, sl); - assert(s.subrange(0, s.len() - 1) =~= self@.subrange(0 as int, i as int)); - seq_lib_v::lemma_seq_fold_left_append_len_int_le(self@, i as int + 1, 0, f); - fun_ext_2(|acc: int, x: T| acc + x.ghost_serialize().len() as int, |acc: int, x: T| acc + f(x).len()); - }; - } else { - assert(!self@[i as int].is_marshalable()); - } } - } } - res - } + } - open spec fn ghost_serialize(&self) -> Seq { - (self@.len() as usize).ghost_serialize() - + self@.fold_left(Seq::::empty(), |acc: Seq, x: T| acc + x.ghost_serialize()) - } + open spec fn ghost_serialize(&self) -> Seq { + (self@.len() as usize).ghost_serialize() + self@.fold_left( + Seq::::empty(), + |acc: Seq, x: T| acc + x.ghost_serialize(), + ) + } - exec fn serialized_size(&self) -> (res: usize) - // req, ens from trait - { + exec fn serialized_size(&self) -> (res: usize) + // req, ens from trait + { let mut res = self.len().serialized_size(); let mut i = 0; - proof { - assert(self@ =~= self@.subrange(0, self@.len() as int)); + assert(self@ =~= self@.subrange(0, self@.len() as int)); } - while i < self.len() - invariant - 0 <= i <= self.len(), - (forall |x: T| self@.contains(x) ==> #[trigger] x.is_marshalable()), - (self@.len() as usize).ghost_serialize().len() + - self@.subrange(0 as int, self@.len() as int).fold_left(0, |acc: int, x: T| acc + x.ghost_serialize().len()) <= usize::MAX, - res == (self@.len() as usize).ghost_serialize().len() + - self@.subrange(0 as int, i as int).fold_left(0, |acc: int, x: T| acc + x.ghost_serialize().len()), + invariant + 0 <= i <= self.len(), + (forall|x: T| self@.contains(x) ==> #[trigger] x.is_marshalable()), + (self@.len() as usize).ghost_serialize().len() + self@.subrange( + 0 as int, + self@.len() as int, + ).fold_left(0, |acc: int, x: T| acc + x.ghost_serialize().len()) <= usize::MAX, + res == (self@.len() as usize).ghost_serialize().len() + self@.subrange( + 0 as int, + i as int, + ).fold_left(0, |acc: int, x: T| acc + x.ghost_serialize().len()), { - proof { - let f = |x: T| x.ghost_serialize(); - fun_ext_2::(|acc: int, x: T| acc + f(x).len(), |acc: int, x: T| acc + x.ghost_serialize().len()); - seq_lib_v::lemma_seq_fold_left_append_len_int_le::(self@, i + 1 as int, 0, f); - let sl = |x: T| x.ghost_serialize().len() as int; - let accl = |acc: int, x: T| acc + x.ghost_serialize().len() as int; - fun_ext_2::(accl, |acc: int, x: T| acc + sl(x)); - let s = self@.subrange(0 as int, i + 1 as int); - seq_lib_v::lemma_seq_fold_left_sum_right::(s, 0, sl); - assert(s.subrange(0, s.len() - 1 as int) =~= self@.subrange(0 as int, i as int)); - assert(self@.subrange(0 as int, self@.len() as int) =~= self@); - } - let old_res: Ghost = Ghost(res); - res = res + self[i].serialized_size(); - i = i + 1; - proof { - let sl = |x: T| x.ghost_serialize().len() as int; - fun_ext_2::(|acc: int, x: T| acc + x.ghost_serialize().len() as int, |acc: int, x: T| acc + sl(x)); - let s = self@.subrange(0 as int, i as int); - seq_lib_v::lemma_seq_fold_left_sum_right::(s, 0, sl); - assert(s.subrange(0, s.len() - 1) =~= self@.subrange(0 as int, i - 1 as int)); - } + proof { + let f = |x: T| x.ghost_serialize(); + fun_ext_2::( + |acc: int, x: T| acc + f(x).len(), + |acc: int, x: T| acc + x.ghost_serialize().len(), + ); + seq_lib_v::lemma_seq_fold_left_append_len_int_le::( + self@, + i + 1 as int, + 0, + f, + ); + let sl = |x: T| x.ghost_serialize().len() as int; + let accl = |acc: int, x: T| acc + x.ghost_serialize().len() as int; + fun_ext_2::(accl, |acc: int, x: T| acc + sl(x)); + let s = self@.subrange(0 as int, i + 1 as int); + seq_lib_v::lemma_seq_fold_left_sum_right::(s, 0, sl); + assert(s.subrange(0, s.len() - 1 as int) =~= self@.subrange(0 as int, i as int)); + assert(self@.subrange(0 as int, self@.len() as int) =~= self@); + } + let old_res: Ghost = Ghost(res); + res = res + self[i].serialized_size(); + i = i + 1; + proof { + let sl = |x: T| x.ghost_serialize().len() as int; + fun_ext_2::( + |acc: int, x: T| acc + x.ghost_serialize().len() as int, + |acc: int, x: T| acc + sl(x), + ); + let s = self@.subrange(0 as int, i as int); + seq_lib_v::lemma_seq_fold_left_sum_right::(s, 0, sl); + assert(s.subrange(0, s.len() - 1) =~= self@.subrange(0 as int, i - 1 as int)); + } } - proof { - let f = |x: T| x.ghost_serialize(); - seq_lib_v::lemma_seq_fold_left_append_len_int::(self@, Seq::::empty(), f); - fun_ext_2::, T, Seq>(|acc: Seq, x: T| acc + f(x), |acc: Seq, x: T| acc + x.ghost_serialize()); - fun_ext_2::(|acc: int, x: T| acc + f(x).len(), |acc: int, x: T| acc + x.ghost_serialize().len()); - assert(self@.subrange(0 as int, i as int) =~= self@); + let f = |x: T| x.ghost_serialize(); + seq_lib_v::lemma_seq_fold_left_append_len_int::(self@, Seq::::empty(), f); + fun_ext_2::, T, Seq>( + |acc: Seq, x: T| acc + f(x), + |acc: Seq, x: T| acc + x.ghost_serialize(), + ); + fun_ext_2::( + |acc: int, x: T| acc + f(x).len(), + |acc: int, x: T| acc + x.ghost_serialize().len(), + ); + assert(self@.subrange(0 as int, i as int) =~= self@); } - res - } + } - exec fn serialize(&self, data: &mut Vec) - // req, ens from trait - { + exec fn serialize(&self, data: &mut Vec) + // req, ens from trait + { let self_len = self.len(); self_len.serialize(data); let init: Ghost = Ghost(self_len.ghost_serialize().len() as int); - let mut i: usize = 0; - proof { - assert( - data@.subrange(old(data)@.len() as int, data@.len() as int) =~= self.len().ghost_serialize() + - self@.subrange(0, i as int).fold_left(Seq::::empty(), |acc: Seq, x: T| acc + x.ghost_serialize()) - ); + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + =~= self.len().ghost_serialize() + self@.subrange(0, i as int).fold_left( + Seq::::empty(), + |acc: Seq, x: T| acc + x.ghost_serialize(), + )); } - while i < self.len() - invariant - i <= self.len(), - data@.subrange(0, old(data)@.len() as int) == old(data)@, - data@.subrange(old(data)@.len() as int, data@.len() as int) == - self.len().ghost_serialize() + - self@.subrange(0, i as int).fold_left(Seq::::empty(), |acc: Seq, x: T| acc + x.ghost_serialize()), - forall |x: T| self@.contains(x) ==> #[trigger] x.is_marshalable(), - data@.len() >= old(data)@.len(), + invariant + i <= self.len(), + data@.subrange(0, old(data)@.len() as int) == old(data)@, + data@.subrange(old(data)@.len() as int, data@.len() as int) + == self.len().ghost_serialize() + self@.subrange(0, i as int).fold_left( + Seq::::empty(), + |acc: Seq, x: T| acc + x.ghost_serialize(), + ), + forall|x: T| self@.contains(x) ==> #[trigger] x.is_marshalable(), + data@.len() >= old(data)@.len(), { - self[i].serialize(data); - i = i + 1; - - proof { - assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); - assert(data@.subrange(old(data)@.len() as int, data@.len() as int) == - self.len().ghost_serialize() + - self@.subrange(0, i as int).fold_left(Seq::::empty(), |acc: Seq, x: T| acc + x.ghost_serialize())) by { - let s = self@; - let emp = Seq::::empty(); - let accf = |acc: Seq, x: T| acc + x.ghost_serialize(); - let f = |x: T| x.ghost_serialize(); - let t = s.subrange(0, i as int); - - fun_ext_2(accf, |acc: Seq, x: T| acc + f(x)); - assert(t.subrange(0, t.len() - 1) =~= s.subrange(0, i - 1)); - seq_lib_v::lemma_seq_fold_left_append_right(t, emp, f); - assert( - data@.subrange(old(data)@.len() as int, data@.len() as int) =~= self.len().ghost_serialize() + - s.subrange(0, (i - 1) as int).fold_left(emp, accf) + - s.index((i - 1) as int).ghost_serialize() - ); - assert( - data@.subrange(old(data)@.len() as int, data@.len() as int) =~= self.len().ghost_serialize() + - t.fold_left(emp, accf) - ); + self[i].serialize(data); + i = i + 1; + proof { + assert(data@.subrange(0, old(data)@.len() as int) =~= old(data)@); + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + == self.len().ghost_serialize() + self@.subrange(0, i as int).fold_left( + Seq::::empty(), + |acc: Seq, x: T| acc + x.ghost_serialize(), + )) by { + let s = self@; + let emp = Seq::::empty(); + let accf = |acc: Seq, x: T| acc + x.ghost_serialize(); + let f = |x: T| x.ghost_serialize(); + let t = s.subrange(0, i as int); + fun_ext_2(accf, |acc: Seq, x: T| acc + f(x)); + assert(t.subrange(0, t.len() - 1) =~= s.subrange(0, i - 1)); + seq_lib_v::lemma_seq_fold_left_append_right(t, emp, f); + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + =~= self.len().ghost_serialize() + s.subrange(0, (i - 1) as int).fold_left( + emp, + accf, + ) + s.index((i - 1) as int).ghost_serialize()); + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + =~= self.len().ghost_serialize() + t.fold_left(emp, accf)); + } } - } } - proof { - assert(self@ =~= self@.subrange(0, self@.len() as int)); + assert(self@ =~= self@.subrange(0, self@.len() as int)); } - } + } - exec fn deserialize(data: &Vec, start: usize) -> (res: Option<(Self, usize)>) - // req, ens from trait - { - let (len, mid) = match usize::deserialize(data, start) { None => { - return None; - }, Some(x) => x, }; + exec fn deserialize(data: &Vec, start: usize) -> (res: Option< + (Self, usize), + >) + // req, ens from trait + { + let (len, mid) = match usize::deserialize(data, start) { + None => { + return None; + }, + Some(x) => x, + }; let len = len as usize; - let mut res: Vec = Vec::with_capacity(len); let mut i: usize = 0; let mut end = mid; - let emp: Ghost> = Ghost(Seq::::empty()); - let accf: Ghost, T) -> Seq> = Ghost(|acc: Seq, x: T| acc + x.ghost_serialize()); - + let accf: Ghost, T) -> Seq> = Ghost( + |acc: Seq, x: T| acc + x.ghost_serialize(), + ); proof { - assert(data@.subrange(mid as int, end as int) =~= emp@); - // assert(emp == seq_lib_v::seq_fold_left(res@, emp@, accf@)); - - lemma_auto_spec_u64_to_from_le_bytes(); + assert(data@.subrange(mid as int, end as int) =~= emp@); + // assert(emp == seq_lib_v::seq_fold_left(res@, emp@, accf@)); + lemma_auto_spec_u64_to_from_le_bytes(); } - while i < len - invariant - 0 <= i <= len, - res.is_marshalable(), - start <= mid <= end <= data@.len(), - data@.subrange(mid as int, end as int) == res@.fold_left(emp@, accf@), - res@.len() == i, - len.ghost_serialize().len() + - res@.fold_left(0, |acc: int, x: T| acc + x.ghost_serialize().len()) == end - start, - accf@ == |acc: Seq, x: T| acc + x.ghost_serialize(), - { - let (x, end1) = match T::deserialize(data, end) { None => { - return None; - }, Some(x) => x, }; - - let old_end: Ghost = Ghost(end as int); - let old_res: Ghost> = Ghost(res@); - - res.push(x); - end = end1; - i = i + 1; - - assert(data@.subrange(mid as int, end as int) == res@.fold_left(emp@, accf@)) by { - let f = |x: T| x.ghost_serialize(); - // assert(data@.subrange(mid as int, old_end@) == seq_lib_v::seq_fold_left(old_res@, emp@, accf@)); - seq_lib_v::lemma_seq_add_subrange::(data@, mid as int, old_end@, end as int); - // assert(data@.subrange(mid as int, end as int) == - // seq_lib_v::seq_fold_left(old_res@, emp@, accf@) + data@.subrange(old_end@, end as int)); - // assert(data@.subrange(mid as int, end as int) == - // seq_lib_v::seq_fold_left(old_res@, emp@, accf@) + x.ghost_serialize()); - // assert(f(x) == x.ghost_serialize()); - // assert(data@.subrange(mid as int, end as int) == - // seq_lib_v::seq_fold_left(old_res@, emp@, accf@) + f(x)); - seq_lib_v::lemma_seq_fold_left_append_right(res@, emp@, f); - assert(accf@ == (|acc: Seq, x: T| acc + f(x))) by { - fun_ext_2(accf@, |acc: Seq, x: T| acc + f(x)); + invariant + 0 <= i <= len, + res.is_marshalable(), + start <= mid <= end <= data@.len(), + data@.subrange(mid as int, end as int) == res@.fold_left(emp@, accf@), + res@.len() == i, + len.ghost_serialize().len() + res@.fold_left( + 0, + |acc: int, x: T| acc + x.ghost_serialize().len(), + ) == end - start, + accf@ == |acc: Seq, x: T| acc + x.ghost_serialize(), + { + let (x, end1) = match T::deserialize(data, end) { + None => { + return None; + }, + Some(x) => x, + }; + let old_end: Ghost = Ghost(end as int); + let old_res: Ghost> = Ghost(res@); + res.push(x); + end = end1; + i = i + 1; + assert(data@.subrange(mid as int, end as int) == res@.fold_left(emp@, accf@)) by { + let f = |x: T| x.ghost_serialize(); + // assert(data@.subrange(mid as int, old_end@) == seq_lib_v::seq_fold_left(old_res@, emp@, accf@)); + seq_lib_v::lemma_seq_add_subrange::(data@, mid as int, old_end@, end as int); + // assert(data@.subrange(mid as int, end as int) == + // seq_lib_v::seq_fold_left(old_res@, emp@, accf@) + data@.subrange(old_end@, end as int)); + // assert(data@.subrange(mid as int, end as int) == + // seq_lib_v::seq_fold_left(old_res@, emp@, accf@) + x.ghost_serialize()); + // assert(f(x) == x.ghost_serialize()); + // assert(data@.subrange(mid as int, end as int) == + // seq_lib_v::seq_fold_left(old_res@, emp@, accf@) + f(x)); + seq_lib_v::lemma_seq_fold_left_append_right(res@, emp@, f); + assert(accf@ == (|acc: Seq, x: T| acc + f(x))) by { + fun_ext_2(accf@, |acc: Seq, x: T| acc + f(x)); + } + assert(old_res@ =~= res@.subrange(0, res@.len() - 1)); + // assert(data@.subrange(mid as int, end as int) == seq_lib_v::seq_fold_left(res@, emp@, accf@)); + } + assert(len.ghost_serialize().len() + res@.fold_left( + 0, + |acc: int, x: T| acc + x.ghost_serialize().len(), + ) == end - start) by { + let l = |x: T| x.ghost_serialize().len() as int; + let suml = |acc: int, x: T| acc + l(x); + seq_lib_v::lemma_seq_fold_left_sum_right(res@, 0, l); + fun_ext_2(|acc: int, x: T| acc + x.ghost_serialize().len(), suml); + assert(old_res@ =~= res@.subrange(0, res@.len() - 1)); + } + assert(len.ghost_serialize().len() == (res@.len() as usize).ghost_serialize().len()) + by { + lemma_auto_spec_u64_to_from_le_bytes(); } - assert(old_res@ =~= res@.subrange(0, res@.len() - 1)); - // assert(data@.subrange(mid as int, end as int) == seq_lib_v::seq_fold_left(res@, emp@, accf@)); - } - - assert (len.ghost_serialize().len() + - res@.fold_left(0, |acc: int, x: T| acc + x.ghost_serialize().len()) == end - start) by { - let l = |x: T| x.ghost_serialize().len() as int; - let suml = |acc: int, x: T| acc + l(x); - seq_lib_v::lemma_seq_fold_left_sum_right(res@, 0, l); - fun_ext_2(|acc: int, x: T| acc + x.ghost_serialize().len(), suml); - assert(old_res@ =~= res@.subrange(0, res@.len() - 1)); - } - - assert (len.ghost_serialize().len() == (res@.len() as usize).ghost_serialize().len()) by { - lemma_auto_spec_u64_to_from_le_bytes(); - } } assert(data@.subrange(start as int, end as int) == res.ghost_serialize()) by { - seq_lib_v::lemma_seq_add_subrange::(data@, start as int, mid as int, end as int); + seq_lib_v::lemma_seq_add_subrange::(data@, start as int, mid as int, end as int); } - Some((res, end)) - } - proof fn lemma_serialization_is_not_a_prefix_of(&self, other: &Self) - // req, ens from trait - { + } + + proof fn lemma_serialization_is_not_a_prefix_of( + &self, + other: &Self, + ) + // req, ens from trait + { lemma_auto_spec_u64_to_from_le_bytes(); if self.len() != other.len() { - assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int).subrange(0, 8) =~= other.ghost_serialize().subrange(0, 8)); - assert(self.ghost_serialize().subrange(0, 8) =~= (self.len() as usize).ghost_serialize()); - assert(other.ghost_serialize().subrange(0, 8) =~= (other.len() as usize).ghost_serialize()); + assert(other.ghost_serialize().subrange( + 0, + self.ghost_serialize().len() as int, + ).subrange(0, 8) =~= other.ghost_serialize().subrange(0, 8)); + assert(self.ghost_serialize().subrange(0, 8) =~= ( + self.len() as usize).ghost_serialize()); + assert(other.ghost_serialize().subrange(0, 8) =~= ( + other.len() as usize).ghost_serialize()); } else { - let not_view_equal_at_idx = |i:int| !self@[i].view_equal(&other@[i]); - let idx = { - let temp = choose |i:int| 0 <= i < self@.len() && !#[trigger] self@[i].view_equal(&other@[i]); - assert (not_view_equal_at_idx(temp)); // OBSERVE - choose_smallest(0, self@.len() as int, not_view_equal_at_idx) - }; - let emp = Seq::::empty(); - let g = |x: T| x.ghost_serialize(); - let accg = |acc: Seq, x: T| acc + g(x); - let accgs = |acc: Seq, x: T| acc + x.ghost_serialize(); - let gs = |s: Seq, start: int, end: int| s.subrange(start, end).fold_left(emp, accg); - fun_ext_2(accg, accgs); - assert(self.ghost_serialize() =~= ((self@.len() as usize).ghost_serialize() + gs(self@, 0, idx)) + g(self@[idx]) + gs(self@, idx + 1, self.len() as int)) by { - assert(gs(self@, 0, self.len() as int) == gs(self@, 0, idx) + gs(self@, idx, self.len() as int)) by { - let s1 = self@.subrange(0, idx); - let s2 = self@.subrange(idx, self.len() as int); - lemma_fold_left_append_merge(s1, s2, g); - assert(self@.subrange(0, self.len() as int) =~= s1 + s2); - } - assert(gs(self@, idx, self.len() as int) == g(self@[idx]) + gs(self@, idx + 1, self.len() as int)) by { - let s1 = self@.subrange(idx, idx + 1); - let s2 = self@.subrange(idx + 1, self.len() as int); - lemma_fold_left_append_merge(s1, s2, g); - assert(self@.subrange(idx, self.len() as int) =~= s1 + s2); - assert(self@.subrange(idx, idx + 1) =~= seq![self@[idx]]); - reveal_with_fuel(Seq::fold_left, 2); - assert(emp + g(self@[idx]) =~= g(self@[idx])); - } - assert((self@.len() as usize).ghost_serialize() + gs(self@, 0, self.len() as int) == self.ghost_serialize()) by { - assert(self@.subrange(0, self.len() as int) =~= self@); - } - } - assert(other.ghost_serialize() =~= ((other@.len() as usize).ghost_serialize() + gs(other@, 0, idx)) + g(other@[idx]) + gs(other@, idx + 1, other.len() as int)) by { - assert(gs(other@, 0, other.len() as int) == gs(other@, 0, idx) + gs(other@, idx, other.len() as int)) by { - let s1 = other@.subrange(0, idx); - let s2 = other@.subrange(idx, other.len() as int); - lemma_fold_left_append_merge(s1, s2, g); - assert(other@.subrange(0, other.len() as int) =~= s1 + s2); - } - assert(gs(other@, idx, other.len() as int) == g(other@[idx]) + gs(other@, idx + 1, other.len() as int)) by { - let s1 = other@.subrange(idx, idx + 1); - let s2 = other@.subrange(idx + 1, other.len() as int); - lemma_fold_left_append_merge(s1, s2, g); - assert(other@.subrange(idx, other.len() as int) =~= s1 + s2); - assert(other@.subrange(idx, idx + 1) =~= seq![other@[idx]]); - reveal_with_fuel(Seq::fold_left, 2); - assert(emp + g(other@[idx]) =~= g(other@[idx])); + let not_view_equal_at_idx = |i: int| !self@[i].view_equal(&other@[i]); + let idx = { + let temp = choose|i: int| + 0 <= i < self@.len() && !#[trigger] self@[i].view_equal(&other@[i]); + assert(not_view_equal_at_idx(temp)); // OBSERVE + choose_smallest(0, self@.len() as int, not_view_equal_at_idx) + }; + let emp = Seq::::empty(); + let g = |x: T| x.ghost_serialize(); + let accg = |acc: Seq, x: T| acc + g(x); + let accgs = |acc: Seq, x: T| acc + x.ghost_serialize(); + let gs = |s: Seq, start: int, end: int| s.subrange(start, end).fold_left(emp, accg); + fun_ext_2(accg, accgs); + assert(self.ghost_serialize() =~= ((self@.len() as usize).ghost_serialize() + gs( + self@, + 0, + idx, + )) + g(self@[idx]) + gs(self@, idx + 1, self.len() as int)) by { + assert(gs(self@, 0, self.len() as int) == gs(self@, 0, idx) + gs( + self@, + idx, + self.len() as int, + )) by { + let s1 = self@.subrange(0, idx); + let s2 = self@.subrange(idx, self.len() as int); + lemma_fold_left_append_merge(s1, s2, g); + assert(self@.subrange(0, self.len() as int) =~= s1 + s2); + } + assert(gs(self@, idx, self.len() as int) == g(self@[idx]) + gs( + self@, + idx + 1, + self.len() as int, + )) by { + let s1 = self@.subrange(idx, idx + 1); + let s2 = self@.subrange(idx + 1, self.len() as int); + lemma_fold_left_append_merge(s1, s2, g); + assert(self@.subrange(idx, self.len() as int) =~= s1 + s2); + assert(self@.subrange(idx, idx + 1) =~= seq![self@[idx]]); + reveal_with_fuel(Seq::fold_left, 2); + assert(emp + g(self@[idx]) =~= g(self@[idx])); + } + assert((self@.len() as usize).ghost_serialize() + gs(self@, 0, self.len() as int) + == self.ghost_serialize()) by { + assert(self@.subrange(0, self.len() as int) =~= self@); + } } - assert((other@.len() as usize).ghost_serialize() + gs(other@, 0, other.len() as int) == other.ghost_serialize()) by { - assert(other@.subrange(0, other.len() as int) =~= other@); + assert(other.ghost_serialize() =~= ((other@.len() as usize).ghost_serialize() + gs( + other@, + 0, + idx, + )) + g(other@[idx]) + gs(other@, idx + 1, other.len() as int)) by { + assert(gs(other@, 0, other.len() as int) == gs(other@, 0, idx) + gs( + other@, + idx, + other.len() as int, + )) by { + let s1 = other@.subrange(0, idx); + let s2 = other@.subrange(idx, other.len() as int); + lemma_fold_left_append_merge(s1, s2, g); + assert(other@.subrange(0, other.len() as int) =~= s1 + s2); + } + assert(gs(other@, idx, other.len() as int) == g(other@[idx]) + gs( + other@, + idx + 1, + other.len() as int, + )) by { + let s1 = other@.subrange(idx, idx + 1); + let s2 = other@.subrange(idx + 1, other.len() as int); + lemma_fold_left_append_merge(s1, s2, g); + assert(other@.subrange(idx, other.len() as int) =~= s1 + s2); + assert(other@.subrange(idx, idx + 1) =~= seq![other@[idx]]); + reveal_with_fuel(Seq::fold_left, 2); + assert(emp + g(other@[idx]) =~= g(other@[idx])); + } + assert((other@.len() as usize).ghost_serialize() + gs(other@, 0, other.len() as int) + == other.ghost_serialize()) by { + assert(other@.subrange(0, other.len() as int) =~= other@); + } } - } - assert((self@.len() as usize).ghost_serialize() == (other@.len() as usize).ghost_serialize()); - assert(gs(self@, 0, idx) == gs(other@, 0, idx)) by { - assert forall |i:int| 0 <= i < idx implies g(self@.subrange(0, idx)[i]) == g(other@.subrange(0, idx)[i]) by { - assert(self@.subrange(0, idx)[i] == self@[i] && other@.subrange(0, idx)[i] == other@[i]); - assert(!not_view_equal_at_idx(i)); - self@[i].lemma_same_views_serialize_the_same(&other@[i]); + assert((self@.len() as usize).ghost_serialize() == ( + other@.len() as usize).ghost_serialize()); + assert(gs(self@, 0, idx) == gs(other@, 0, idx)) by { + assert forall|i: int| 0 <= i < idx implies g(self@.subrange(0, idx)[i]) == g( + other@.subrange(0, idx)[i], + ) by { + assert(self@.subrange(0, idx)[i] == self@[i] && other@.subrange(0, idx)[i] + == other@[i]); + assert(!not_view_equal_at_idx(i)); + self@[i].lemma_same_views_serialize_the_same(&other@[i]); + } + lemma_fold_left_on_equiv_seqs( + self@.subrange(0, idx), + other@.subrange(0, idx), + |x: T, y: T| g(x) == g(y), + emp, + accg, + ); } - lemma_fold_left_on_equiv_seqs(self@.subrange(0, idx), other@.subrange(0, idx), |x: T, y: T| g(x) == g(y), emp, accg); - } - assert( - ((self@.len() as usize).ghost_serialize() + gs(self@, 0, idx)) - == - ((other@.len() as usize).ghost_serialize() + gs(other@, 0, idx)) - ); - let prefix_len = ((self@.len() as usize).ghost_serialize() + gs(self@, 0, idx)).len(); - let i = if g(self@[idx]).len() <= g(other@[idx]).len() { - self@[idx].lemma_serialization_is_not_a_prefix_of(&other@[idx]); - some_differing_index_for_unequal_seqs(g(self@[idx]), g(other@[idx]).subrange(0, g(self@[idx]).len() as int)) - } else { - self@[idx].lemma_view_equal_symmetric(&other@[idx]); - other@[idx].lemma_serialization_is_not_a_prefix_of(&self@[idx]); - some_differing_index_for_unequal_seqs(g(other@[idx]), g(self@[idx]).subrange(0, g(other@[idx]).len() as int)) - }; - assert(g(self@[idx])[i] != g(other@[idx])[i]); - assert(self.ghost_serialize()[prefix_len + i] != other.ghost_serialize()[prefix_len + i]); - assert(self.ghost_serialize() != other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int)); - } - } - proof fn lemma_same_views_serialize_the_same(self: &Self, other: &Self) - // req, ens from trait - { + assert(((self@.len() as usize).ghost_serialize() + gs(self@, 0, idx)) == (( + other@.len() as usize).ghost_serialize() + gs(other@, 0, idx))); + let prefix_len = ((self@.len() as usize).ghost_serialize() + gs(self@, 0, idx)).len(); + let i = if g(self@[idx]).len() <= g(other@[idx]).len() { + self@[idx].lemma_serialization_is_not_a_prefix_of(&other@[idx]); + some_differing_index_for_unequal_seqs( + g(self@[idx]), + g(other@[idx]).subrange(0, g(self@[idx]).len() as int), + ) + } else { + self@[idx].lemma_view_equal_symmetric(&other@[idx]); + other@[idx].lemma_serialization_is_not_a_prefix_of(&self@[idx]); + some_differing_index_for_unequal_seqs( + g(other@[idx]), + g(self@[idx]).subrange(0, g(other@[idx]).len() as int), + ) + }; + assert(g(self@[idx])[i] != g(other@[idx])[i]); + assert(self.ghost_serialize()[prefix_len + i] != other.ghost_serialize()[prefix_len + + i]); + assert(self.ghost_serialize() != other.ghost_serialize().subrange( + 0, + self.ghost_serialize().len() as int, + )); + } + } + + proof fn lemma_same_views_serialize_the_same( + self: &Self, + other: &Self, + ) + // req, ens from trait + { lemma_auto_spec_u64_to_from_le_bytes(); assert(self@.len() == other@.len()); - assert forall |i: int| 0 <= i < self@.len() implies - #[trigger] self@[i].is_marshalable() == other@[i].is_marshalable() && - #[trigger] self@[i].ghost_serialize() == other@[i].ghost_serialize() by { + assert forall|i: int| 0 <= i < self@.len() implies #[trigger] self@[i].is_marshalable() + == other@[i].is_marshalable() && #[trigger] self@[i].ghost_serialize() + == other@[i].ghost_serialize() by { self@[i].lemma_same_views_serialize_the_same(&other@[i]); } let veq = |x: T, y: T| x.view_equal(&y); assert(self.is_marshalable() == other.is_marshalable()) by { - assert((self@.len() <= usize::MAX) == (other@.len() <= usize::MAX)); - if (forall |x: T| self@.contains(x) ==> #[trigger] x.is_marshalable()) { - assert forall |y: T| other@.contains(y) implies #[trigger] y.is_marshalable() by { - let i = choose |i:int| 0 <= i < other@.len() && other@[i] == y; - self@[i].lemma_same_views_serialize_the_same(&other@[i]); - } - } else { - let i = choose |i:int| 0 <= i < self@.len() && !(#[trigger] self@[i].is_marshalable()); - self@[i].lemma_same_views_serialize_the_same(&other@[i]); - } - assert((self@.len() as usize).ghost_serialize().len() == - (other@.len() as usize).ghost_serialize().len()); - let f = |acc: int, x: T| acc + x.ghost_serialize().len(); - assert forall |b: int, a1: T, a2: T| #[trigger] veq(a1, a2) implies #[trigger] f(b, a1) == f(b, a2) by { - a1.lemma_same_views_serialize_the_same(&a2); - } - seq_lib_v::lemma_fold_left_on_equiv_seqs(self@, other@, veq, 0, f); - assert(self@.fold_left(0, f) == other@.fold_left(0, f)); + assert((self@.len() <= usize::MAX) == (other@.len() <= usize::MAX)); + if (forall|x: T| self@.contains(x) ==> #[trigger] x.is_marshalable()) { + assert forall|y: T| other@.contains(y) implies #[trigger] y.is_marshalable() by { + let i = choose|i: int| 0 <= i < other@.len() && other@[i] == y; + self@[i].lemma_same_views_serialize_the_same(&other@[i]); + } + } else { + let i = choose|i: int| + 0 <= i < self@.len() && !(#[trigger] self@[i].is_marshalable()); + self@[i].lemma_same_views_serialize_the_same(&other@[i]); + } + assert((self@.len() as usize).ghost_serialize().len() == ( + other@.len() as usize).ghost_serialize().len()); + let f = |acc: int, x: T| acc + x.ghost_serialize().len(); + assert forall|b: int, a1: T, a2: T| #[trigger] veq(a1, a2) implies #[trigger] f(b, a1) + == f(b, a2) by { + a1.lemma_same_views_serialize_the_same(&a2); + } + seq_lib_v::lemma_fold_left_on_equiv_seqs(self@, other@, veq, 0, f); + assert(self@.fold_left(0, f) == other@.fold_left(0, f)); }; assert(self.ghost_serialize() == other.ghost_serialize()) by { - let f = |acc: Seq, x: T| acc + x.ghost_serialize(); - assert forall |b: Seq, a1: T, a2: T| #[trigger] veq(a1, a2) implies #[trigger] f(b, a1) == f(b, a2) by { - a1.lemma_same_views_serialize_the_same(&a2); - } - seq_lib_v::lemma_fold_left_on_equiv_seqs(self@, other@, veq, Seq::::empty(), f); + let f = |acc: Seq, x: T| acc + x.ghost_serialize(); + assert forall|b: Seq, a1: T, a2: T| #[trigger] veq(a1, a2) implies #[trigger] f( + b, + a1, + ) == f(b, a2) by { + a1.lemma_same_views_serialize_the_same(&a2); + } + seq_lib_v::lemma_fold_left_on_equiv_seqs(self@, other@, veq, Seq::::empty(), f); } - } - proof fn lemma_serialize_injective(self: &Self, other: &Self) - // req, ens from trait - { + } + + proof fn lemma_serialize_injective( + self: &Self, + other: &Self, + ) + // req, ens from trait + { if !self.view_equal(other) { - self.lemma_serialization_is_not_a_prefix_of(other); - assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int) - =~= other.ghost_serialize()); // OBSERVE + self.lemma_serialization_is_not_a_prefix_of(other); + assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int) + =~= other.ghost_serialize()); // OBSERVE } - } } +} - - // NOTE: This can be replaced with a `define_struct_and_derive_marshalable` invocation - impl Marshalable for (T, U) { - open spec fn view_equal(&self, other: &Self) -> bool { +// NOTE: This can be replaced with a `define_struct_and_derive_marshalable` invocation +impl Marshalable for (T, U) { + open spec fn view_equal(&self, other: &Self) -> bool { self.0.view_equal(&other.0) && self.1.view_equal(&other.1) - } - proof fn lemma_view_equal_symmetric(&self, other: &Self) - // req, ens from trait - { + } + + proof fn lemma_view_equal_symmetric( + &self, + other: &Self, + ) + // req, ens from trait + { self.0.lemma_view_equal_symmetric(&other.0); self.1.lemma_view_equal_symmetric(&other.1); - } - open spec fn is_marshalable(&self) -> bool { + } + + open spec fn is_marshalable(&self) -> bool { &&& self.0.is_marshalable() &&& self.1.is_marshalable() &&& self.0.ghost_serialize().len() + self.1.ghost_serialize().len() <= usize::MAX - } + } - exec fn _is_marshalable(&self) -> bool - // req, ens from trait - { - self.0._is_marshalable() && self.1._is_marshalable() && - self.0.serialized_size() <= usize::MAX - self.1.serialized_size() - } + exec fn _is_marshalable(&self) -> bool + // req, ens from trait + { + self.0._is_marshalable() && self.1._is_marshalable() && self.0.serialized_size() + <= usize::MAX - self.1.serialized_size() + } - open spec fn ghost_serialize(&self) -> Seq { + open spec fn ghost_serialize(&self) -> Seq { self.0.ghost_serialize() + self.1.ghost_serialize() - } + } - exec fn serialized_size(&self) -> (res: usize) - // req, ens from trait - { + exec fn serialized_size(&self) -> (res: usize) + // req, ens from trait + { self.0.serialized_size() + self.1.serialized_size() - } + } - exec fn serialize(&self, data: &mut Vec) - // req, ens from trait - { + exec fn serialize(&self, data: &mut Vec) + // req, ens from trait + { self.0.serialize(data); // assert(data@.subrange(0, old(data)@.len() as int) == old(data)@); - let mid_data_len: Ghost = Ghost(data@.len() as int); - self.1.serialize(data); - proof { - assert(data@.subrange(0, old(data)@.len() as int) =~= data@.subrange(0, mid_data_len@).subrange(0, old(data)@.len() as int)); - // assert(data@.subrange(0, old(data)@.len() as int) == old(data)@); - assert(data@.subrange(old(data)@.len() as int, mid_data_len@) =~= data@.subrange(0, mid_data_len@).subrange(old(data)@.len() as int, mid_data_len@)); - // assert(data@.subrange(old(data)@.len() as int, mid_data_len@) == self.0.ghost_serialize()); - // assert(data@.subrange(mid_data_len@, data@.len() as int) == self.1.ghost_serialize()); - assert(data@.subrange(old(data)@.len() as int, data@.len() as int) =~= self.0.ghost_serialize() + self.1.ghost_serialize()); - } - } - - exec fn deserialize(data: &Vec, start: usize) -> (res: Option<(Self, usize)>) - // req, ens from trait - { - let (t, mid) = match T::deserialize(data, start) { None => { - return None; - }, Some(x) => x, }; - let (u, end) = match U::deserialize(data, mid) { None => { - return None; - }, Some(x) => x, }; + assert(data@.subrange(0, old(data)@.len() as int) =~= data@.subrange( + 0, + mid_data_len@, + ).subrange(0, old(data)@.len() as int)); + // assert(data@.subrange(0, old(data)@.len() as int) == old(data)@); + assert(data@.subrange(old(data)@.len() as int, mid_data_len@) =~= data@.subrange( + 0, + mid_data_len@, + ).subrange(old(data)@.len() as int, mid_data_len@)); + // assert(data@.subrange(old(data)@.len() as int, mid_data_len@) == self.0.ghost_serialize()); + // assert(data@.subrange(mid_data_len@, data@.len() as int) == self.1.ghost_serialize()); + assert(data@.subrange(old(data)@.len() as int, data@.len() as int) + =~= self.0.ghost_serialize() + self.1.ghost_serialize()); + } + } + + exec fn deserialize(data: &Vec, start: usize) -> (res: Option< + (Self, usize), + >) + // req, ens from trait + { + let (t, mid) = match T::deserialize(data, start) { + None => { + return None; + }, + Some(x) => x, + }; + let (u, end) = match U::deserialize(data, mid) { + None => { + return None; + }, + Some(x) => x, + }; let p = (t, u); proof { - assert(data@.subrange(start as int, end as int) =~= p.ghost_serialize()); + assert(data@.subrange(start as int, end as int) =~= p.ghost_serialize()); } Some((p, end)) - } + } - proof fn lemma_serialization_is_not_a_prefix_of(&self, other: &Self) - // req, ens from trait - { + proof fn lemma_serialization_is_not_a_prefix_of( + &self, + other: &Self, + ) + // req, ens from trait + { let si = self.ghost_serialize(); let so = other.ghost_serialize(); let mid: int = 0; if !self.0.view_equal(&other.0) { - let (x0, x1) = (self.0, other.0); - let (s0, s1) = (x0.ghost_serialize(), x1.ghost_serialize()); - x0.lemma_view_equal_symmetric(&x1); - let (x0, x1, s0, s1) = if s0.len() <= s1.len() { - (x0, x1, s0, s1) - } else { - (x1, x0, s1, s0) - }; - x0.lemma_serialization_is_not_a_prefix_of(&x1); - assert(!(s0 =~= s1.subrange(0, s0.len() as int))); // OBSERVE - let idx = choose |i:int| 0 <= i < s0.len() as int && s0[i] != s1[i]; - if si == so.subrange(0, si.len() as int) { - assert(si[mid + idx] == so[mid + idx]); // OBSERVE - } - return; + let (x0, x1) = (self.0, other.0); + let (s0, s1) = (x0.ghost_serialize(), x1.ghost_serialize()); + x0.lemma_view_equal_symmetric(&x1); + let (x0, x1, s0, s1) = if s0.len() <= s1.len() { + (x0, x1, s0, s1) + } else { + (x1, x0, s1, s0) + }; + x0.lemma_serialization_is_not_a_prefix_of(&x1); + assert(!(s0 =~= s1.subrange(0, s0.len() as int))); // OBSERVE + let idx = choose|i: int| 0 <= i < s0.len() as int && s0[i] != s1[i]; + if si == so.subrange(0, si.len() as int) { + assert(si[mid + idx] == so[mid + idx]); // OBSERVE + } + return ; } else { - self.0.lemma_same_views_serialize_the_same(&other.0); + self.0.lemma_same_views_serialize_the_same(&other.0); } let mid = mid + self.0.ghost_serialize().len(); if !self.1.view_equal(&other.1) { - let (x0, x1) = (self.1, other.1); - let (s0, s1) = (x0.ghost_serialize(), x1.ghost_serialize()); - x0.lemma_view_equal_symmetric(&x1); - let (x0, x1, s0, s1) = if s0.len() <= s1.len() { - (x0, x1, s0, s1) - } else { - (x1, x0, s1, s0) - }; - x0.lemma_serialization_is_not_a_prefix_of(&x1); - assert(!(s0 =~= s1.subrange(0, s0.len() as int))); // OBSERVE - let idx = choose |i:int| 0 <= i < s0.len() as int && s0[i] != s1[i]; - if si == so.subrange(0, si.len() as int) { - assert(si[mid + idx] == so[mid + idx]); // OBSERVE - } - return; + let (x0, x1) = (self.1, other.1); + let (s0, s1) = (x0.ghost_serialize(), x1.ghost_serialize()); + x0.lemma_view_equal_symmetric(&x1); + let (x0, x1, s0, s1) = if s0.len() <= s1.len() { + (x0, x1, s0, s1) + } else { + (x1, x0, s1, s0) + }; + x0.lemma_serialization_is_not_a_prefix_of(&x1); + assert(!(s0 =~= s1.subrange(0, s0.len() as int))); // OBSERVE + let idx = choose|i: int| 0 <= i < s0.len() as int && s0[i] != s1[i]; + if si == so.subrange(0, si.len() as int) { + assert(si[mid + idx] == so[mid + idx]); // OBSERVE + } + return ; } else { - self.1.lemma_same_views_serialize_the_same(&other.1); + self.1.lemma_same_views_serialize_the_same(&other.1); } - } + } - proof fn lemma_same_views_serialize_the_same(self: &Self, other: &Self) - // req, ens from trait - { + proof fn lemma_same_views_serialize_the_same( + self: &Self, + other: &Self, + ) + // req, ens from trait + { self.0.lemma_same_views_serialize_the_same(&other.0); self.1.lemma_same_views_serialize_the_same(&other.1); - } + } - proof fn lemma_serialize_injective(self: &Self, other: &Self) { + proof fn lemma_serialize_injective(self: &Self, other: &Self) { if !self.view_equal(other) { - self.lemma_serialization_is_not_a_prefix_of(other); - assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int) - =~= other.ghost_serialize()); // OBSERVE + self.lemma_serialization_is_not_a_prefix_of(other); + assert(other.ghost_serialize().subrange(0, self.ghost_serialize().len() as int) + =~= other.ghost_serialize()); // OBSERVE } - } } +} - /// A convenience macro to produce the triangle necessary to confirm that there are no overflows - /// that occur when adding up a bunch of different expressions together. - #[allow(unused_macros)] +/// A convenience macro to produce the triangle necessary to confirm that there are no overflows +/// that occur when adding up a bunch of different expressions together. +#[allow(unused_macros)] macro_rules! no_usize_overflows { ($e:expr,) => { true @@ -7625,14 +8942,15 @@ mod marshal_v { no_usize_overflows!(@@internal ($total + $a), $($rest),*) }; } - pub(crate) use no_usize_overflows; - /// `derive_marshalable_for_struct` is a macro that implements [`Marshalable`] for a struct. You - /// probably want to use [`define_struct_and_derive_marshalable`] wherever possible instead, since - /// it prevents code duplication. However, if you are (for some reason) unable to define at the - /// struct definition site, then this macro lets you derive the macro by simply (textually) - /// copy-pasting the struct. - #[allow(unused_macros)] +pub(crate) use no_usize_overflows; + +/// `derive_marshalable_for_struct` is a macro that implements [`Marshalable`] for a struct. You +/// probably want to use [`define_struct_and_derive_marshalable`] wherever possible instead, since +/// it prevents code duplication. However, if you are (for some reason) unable to define at the +/// struct definition site, then this macro lets you derive the macro by simply (textually) +/// copy-pasting the struct. +#[allow(unused_macros)] macro_rules! derive_marshalable_for_struct { { $( #[$attr:meta] )* @@ -7754,26 +9072,27 @@ mod marshal_v { } }; } - pub(crate) use derive_marshalable_for_struct; - /// `define_struct_and_derive_marshalable` is a macro that, well, defines an struct, and implements - /// [`Marshalable`] on it. This is intended to make it easier to produce serializers and - /// deserializers for arbitrary types (including polymorphic ones). - /// - /// See also [`define_enum_and_derive_marshalable`] for the equivalent enum-based macro. - /// - /// Example usage: - /// - /// ``` - /// define_struct_and_derive_marshalable! { - /// struct Example { - /// t: T, - /// u: U, - /// v: Vec::, - /// } - /// } - /// ``` - #[allow(unused_macros)] +pub(crate) use derive_marshalable_for_struct; + +/// `define_struct_and_derive_marshalable` is a macro that, well, defines an struct, and implements +/// [`Marshalable`] on it. This is intended to make it easier to produce serializers and +/// deserializers for arbitrary types (including polymorphic ones). +/// +/// See also [`define_enum_and_derive_marshalable`] for the equivalent enum-based macro. +/// +/// Example usage: +/// +/// ``` +/// define_struct_and_derive_marshalable! { +/// struct Example { +/// t: T, +/// u: U, +/// v: Vec::, +/// } +/// } +/// ``` +#[allow(unused_macros)] macro_rules! define_struct_and_derive_marshalable { { $( #[$attr:meta] )* @@ -7806,14 +9125,15 @@ mod marshal_v { }; } - pub(crate) use define_struct_and_derive_marshalable; - /// `derive_marshalable_for_enum` is a macro that implements [`Marshalable`] for a enum. You - /// probably want to use [`define_enum_and_derive_marshalable`] wherever possible instead, since it - /// prevents code duplication. However, if you are (for some reason) unable to define at the enum - /// definition site, then this macro lets you derive the macro by simply (textually) copy-pasting - /// the enum. - macro_rules! derive_marshalable_for_enum { +pub(crate) use define_struct_and_derive_marshalable; + +/// `derive_marshalable_for_enum` is a macro that implements [`Marshalable`] for a enum. You +/// probably want to use [`define_enum_and_derive_marshalable`] wherever possible instead, since it +/// prevents code duplication. However, if you are (for some reason) unable to define at the enum +/// definition site, then this macro lets you derive the macro by simply (textually) copy-pasting +/// the enum. +macro_rules! derive_marshalable_for_enum { { $( #[$attr:meta] )* $pub:vis @@ -8021,35 +9341,36 @@ mod marshal_v { } }; } - pub(crate) use derive_marshalable_for_enum; - /// `define_enum_and_derive_marshalable` is a macro that, well, defines an enum, and implements - /// [`Marshalable`] on it. This is intended to make it easier to produce serializers and - /// deserializers for arbitrary types (including polymorphic ones). - /// - /// It currently supports enums that have a maximum of 256 variants, since it uses a 1-byte tag to - /// pick between the variants. - /// - /// See also [`define_struct_and_derive_marshalable`] for the equivalent struct-based macro. - /// - /// Example usage: - /// - /// ``` - /// define_enum_and_derive_marshalable! { - /// enum Example { - /// #[tag = 0] - /// U { u: u64, v: u64, x: Vec::, z: T }, - /// #[tag = 1] - /// V { zz: U }, - /// } - /// } - /// ``` - /// - /// Note: Currently due to https://github.com/verus-lang/verus/issues/444, passingly only a - /// single-variant enum will cause Verus to panic. But if you wanted just one variant, why are you - /// wasting a byte in the serialized output? Just use [`define_struct_and_derive_marshalable`] - /// instead! - #[allow(unused_macros)] +pub(crate) use derive_marshalable_for_enum; + +/// `define_enum_and_derive_marshalable` is a macro that, well, defines an enum, and implements +/// [`Marshalable`] on it. This is intended to make it easier to produce serializers and +/// deserializers for arbitrary types (including polymorphic ones). +/// +/// It currently supports enums that have a maximum of 256 variants, since it uses a 1-byte tag to +/// pick between the variants. +/// +/// See also [`define_struct_and_derive_marshalable`] for the equivalent struct-based macro. +/// +/// Example usage: +/// +/// ``` +/// define_enum_and_derive_marshalable! { +/// enum Example { +/// #[tag = 0] +/// U { u: u64, v: u64, x: Vec::, z: T }, +/// #[tag = 1] +/// V { zz: U }, +/// } +/// } +/// ``` +/// +/// Note: Currently due to https://github.com/verus-lang/verus/issues/444, passingly only a +/// single-variant enum will cause Verus to panic. But if you wanted just one variant, why are you +/// wasting a byte in the serialized output? Just use [`define_struct_and_derive_marshalable`] +/// instead! +#[allow(unused_macros)] macro_rules! define_enum_and_derive_marshalable { { $( #[$attr:meta] )* @@ -8086,15 +9407,16 @@ mod marshal_v { } }; } - pub(crate) use define_enum_and_derive_marshalable; - /// A macro that conveniently lets one produce a marshaler for a type `$type` by showing a - /// bijection to another (already known to be marshalable) type `$marshalable`. - /// - /// This macro only needs to be provided with a forward and backward function (and a few new - /// identifiers, just to keep things distinct from things already known) and it'll produce - /// everything necessary to make the type marshalable. - #[allow(unused_macros)] +pub(crate) use define_enum_and_derive_marshalable; + +/// A macro that conveniently lets one produce a marshaler for a type `$type` by showing a +/// bijection to another (already known to be marshalable) type `$marshalable`. +/// +/// This macro only needs to be provided with a forward and backward function (and a few new +/// identifiers, just to keep things distinct from things already known) and it'll produce +/// everything necessary to make the type marshalable. +#[allow(unused_macros)] macro_rules! marshalable_by_bijection { { [$type:ty] <-> [$marshalable:ty]; @@ -8180,9 +9502,10 @@ mod marshal_v { } }; } - pub(crate) use marshalable_by_bijection; - } // verus! +pub(crate) use marshalable_by_bijection; + +} // verus! } mod message_t { @@ -8198,34 +9521,17 @@ mod message_t { verus! { - #[is_variant] - pub enum Message { - GetRequest { - key: AbstractKey, - }, - SetRequest { - key: AbstractKey, - value: Option, - }, - Reply { - key: AbstractKey, - value: Option, - }, - Redirect { - key: AbstractKey, - id: AbstractEndPoint, - }, - Shard { - range: KeyRange, - recipient: AbstractEndPoint, - }, - Delegate { - range: KeyRange, - h: Hashtable, - }, - } +#[is_variant] +pub enum Message { + GetRequest { key: AbstractKey }, + SetRequest { key: AbstractKey, value: Option }, + Reply { key: AbstractKey, value: Option }, + Redirect { key: AbstractKey, id: AbstractEndPoint }, + Shard { range: KeyRange, recipient: AbstractEndPoint }, + Delegate { range: KeyRange, h: Hashtable }, +} - } +} // verus! } mod net_sht_v { @@ -8261,332 +9567,374 @@ mod net_sht_v { verus! { - #[is_variant] - pub enum ReceiveResult { - Fail, - Timeout, - Packet{cpacket: CPacket}, - } - - // zoology ontology: - // - // Packet: Message (parsed payload), AbstractEndPoint - // (seqs) - // CPacket: CMessage (parsed payload), raw endpoint - // (vecs) - // ReceiveResult wraps a CPacket. - // NetEvent: LIoOp> - // NetPacket: LPacket> - // NetcReceiveResult: Vec - - pub type LSHTPacket = LPacket>; +#[is_variant] +pub enum ReceiveResult { + Fail, + Timeout, + Packet { cpacket: CPacket }, +} - // Ports Impl/SHT/PacketParsing.i.dfy :: NetPacketIsAbstractable - pub open spec fn net_packet_is_abstractable(net: NetPacket) -> bool - { - true - } +// zoology ontology: +// +// Packet: Message (parsed payload), AbstractEndPoint +// (seqs) +// CPacket: CMessage (parsed payload), raw endpoint +// (vecs) +// ReceiveResult wraps a CPacket. +// NetEvent: LIoOp> +// NetPacket: LPacket> +// NetcReceiveResult: Vec +pub type LSHTPacket = LPacket>; + +// Ports Impl/SHT/PacketParsing.i.dfy :: NetPacketIsAbstractable +pub open spec fn net_packet_is_abstractable(net: NetPacket) -> bool { + true +} - // Translates Impl/LiveSHT/NetSHT.i.dfy :: NetEventIsAbstractable - pub open spec fn net_event_is_abstractable(evt: NetEvent) -> bool - { - match evt { - LIoOp::>::Send{s} => net_packet_is_abstractable(s), - LIoOp::>::Receive{r} => net_packet_is_abstractable(r), - LIoOp::>::TimeoutReceive{} => true, - LIoOp::>::ReadClock{t} => true, - } +// Translates Impl/LiveSHT/NetSHT.i.dfy :: NetEventIsAbstractable +pub open spec fn net_event_is_abstractable(evt: NetEvent) -> bool { + match evt { + LIoOp::>::Send { s } => net_packet_is_abstractable(s), + LIoOp::>::Receive { r } => net_packet_is_abstractable(r), + LIoOp::>::TimeoutReceive { } => true, + LIoOp::>::ReadClock { t } => true, } +} - // Translates Distributed/Impl/SHT/PacketParsing.i.dfy SHTDemarshallData - pub open spec fn sht_demarshal_data(data: Seq) -> CSingleMessage - recommends exists |v: CSingleMessage| v.is_marshalable() && v.ghost_serialize() == data - { - let v = choose |v: CSingleMessage| v.is_marshalable() && v.ghost_serialize() == data; - v - } +// Translates Distributed/Impl/SHT/PacketParsing.i.dfy SHTDemarshallData +pub open spec fn sht_demarshal_data(data: Seq) -> CSingleMessage + recommends + exists|v: CSingleMessage| v.is_marshalable() && v.ghost_serialize() == data, +{ + let v = choose|v: CSingleMessage| v.is_marshalable() && v.ghost_serialize() == data; + v +} - #[verifier(spinoff_prover)] - pub proof fn sht_marshal_data_injective(a: &CSingleMessage, b: &CSingleMessage) +#[verifier(spinoff_prover)] +pub proof fn sht_marshal_data_injective(a: &CSingleMessage, b: &CSingleMessage) requires a.is_marshalable(), b.is_marshalable(), a.ghost_serialize() == b.ghost_serialize(), ensures a@ == b@, - { - a.lemma_serialize_injective(b); - assert(a@ == b@); // OBSERVE; although not entirely sure why this is necessary here, esp since it exactly matches the postcondition. - } +{ + a.lemma_serialize_injective(b); + assert(a@ == b@); // OBSERVE; although not entirely sure why this is necessary here, esp since it exactly matches the postcondition. +} - // Ports Impl/SHT/PacketParsing.i.dfy :: AbstractifyNetPacketToLSHTPacket - pub open spec fn abstractify_net_packet_to_lsht_packet(net: NetPacket) -> LSHTPacket - recommends net_packet_is_abstractable(net) - { - LPacket { - dst: net.dst, - src: net.src, - msg: (sht_demarshal_data(net.msg))@ - } - } +// Ports Impl/SHT/PacketParsing.i.dfy :: AbstractifyNetPacketToLSHTPacket +pub open spec fn abstractify_net_packet_to_lsht_packet(net: NetPacket) -> LSHTPacket + recommends + net_packet_is_abstractable(net), +{ + LPacket { dst: net.dst, src: net.src, msg: (sht_demarshal_data(net.msg))@ } +} - // Translates Impl/LiveSHT/NetSHT.i.dfy :: AbstractifyNetEventToLSHTIo - pub open spec fn abstractify_net_event_to_lsht_io(evt: NetEvent) -> LSHTIo - recommends net_event_is_abstractable(evt) - { - match evt { - LIoOp::>::Send{s} => - LIoOp::>::Send{ s: abstractify_net_packet_to_lsht_packet(s) }, - LIoOp::>::Receive{r} => - LIoOp::>::Receive{ r: abstractify_net_packet_to_lsht_packet(r) }, - LIoOp::>::TimeoutReceive{} => - LIoOp::>::TimeoutReceive{}, - LIoOp::>::ReadClock{t} => - LIoOp::>::ReadClock{ t: t as int }, - } +// Translates Impl/LiveSHT/NetSHT.i.dfy :: AbstractifyNetEventToLSHTIo +pub open spec fn abstractify_net_event_to_lsht_io(evt: NetEvent) -> LSHTIo + recommends + net_event_is_abstractable(evt), +{ + match evt { + LIoOp::>::Send { s } => LIoOp::< + AbstractEndPoint, + SingleMessage, + >::Send { s: abstractify_net_packet_to_lsht_packet(s) }, + LIoOp::>::Receive { r } => LIoOp::< + AbstractEndPoint, + SingleMessage, + >::Receive { r: abstractify_net_packet_to_lsht_packet(r) }, + LIoOp::>::TimeoutReceive { } => LIoOp::< + AbstractEndPoint, + SingleMessage, + >::TimeoutReceive { }, + LIoOp::>::ReadClock { t } => LIoOp::< + AbstractEndPoint, + SingleMessage, + >::ReadClock { t: t as int }, } +} - // Ports Impl/SHT/PacketParsing.i.dfy :: AbstractifyNetPacketToShtPacket - pub open spec fn abstractify_net_packet_to_sht_packet(net: NetPacket) -> Packet - recommends net_packet_is_abstractable(net) - { - let lp = abstractify_net_packet_to_lsht_packet(net); - Packet { dst: lp.dst, src: lp.src, msg: lp.msg } - } +// Ports Impl/SHT/PacketParsing.i.dfy :: AbstractifyNetPacketToShtPacket +pub open spec fn abstractify_net_packet_to_sht_packet(net: NetPacket) -> Packet + recommends + net_packet_is_abstractable(net), +{ + let lp = abstractify_net_packet_to_lsht_packet(net); + Packet { dst: lp.dst, src: lp.src, msg: lp.msg } +} - // Translates Impl/LiveSHT/NetSHT.i.dfy :: NetEventLogIsAbstractable - pub open spec fn net_event_log_is_abstractable(rawlog: Seq) -> bool - { - forall |i: int| 0 <= i && i < rawlog.len() ==> #[trigger] net_event_is_abstractable(rawlog[i]) - } +// Translates Impl/LiveSHT/NetSHT.i.dfy :: NetEventLogIsAbstractable +pub open spec fn net_event_log_is_abstractable(rawlog: Seq) -> bool { + forall|i: int| 0 <= i && i < rawlog.len() ==> #[trigger] net_event_is_abstractable(rawlog[i]) +} - // Translates Distributed/Impl/SHT/PacketParsing.i.dfy SHTDemarshallDataMethod - pub fn sht_demarshall_data_method(buffer: &Vec) -> (out: CSingleMessage) +// Translates Distributed/Impl/SHT/PacketParsing.i.dfy SHTDemarshallDataMethod +pub fn sht_demarshall_data_method(buffer: &Vec) -> (out: CSingleMessage) ensures !(out is InvalidMessage) ==> { &&& out.is_marshalable() &&& out@ == sht_demarshal_data(buffer@)@ &&& out.abstractable() }, - { - match CSingleMessage::deserialize(&buffer, 0) { - None => { - CSingleMessage::InvalidMessage - }, - Some((cmessage, count)) => { - if count != buffer.len() { return CSingleMessage::InvalidMessage; } - match &cmessage { - CSingleMessage::Message{dst, m, ..} => { - if !dst.valid_physical_address() { return CSingleMessage::InvalidMessage; } - match m { - CMessage::Redirect{id, ..} => { - if !id.valid_physical_address() { return CSingleMessage::InvalidMessage; } - }, - CMessage::Shard{recipient, ..} => { - if !recipient.valid_physical_address() { return CSingleMessage::InvalidMessage; } - }, - _ => {}, - } - }, - _ => {}, - } - - proof { - assert( buffer@.subrange(0, count as int) =~= buffer@ ); - sht_marshal_data_injective(&sht_demarshal_data(buffer@), &cmessage); - } - cmessage +{ + match CSingleMessage::deserialize(&buffer, 0) { + None => { CSingleMessage::InvalidMessage }, + Some((cmessage, count)) => { + if count != buffer.len() { + return CSingleMessage::InvalidMessage; + } + match &cmessage { + CSingleMessage::Message { dst, m, .. } => { + if !dst.valid_physical_address() { + return CSingleMessage::InvalidMessage; + } + match m { + CMessage::Redirect { id, .. } => { + if !id.valid_physical_address() { + return CSingleMessage::InvalidMessage; + } + }, + CMessage::Shard { recipient, .. } => { + if !recipient.valid_physical_address() { + return CSingleMessage::InvalidMessage; + } + }, + _ => {}, + } + }, + _ => {}, } - } + proof { + assert(buffer@.subrange(0, count as int) =~= buffer@); + sht_marshal_data_injective(&sht_demarshal_data(buffer@), &cmessage); + } + cmessage + }, } +} - // ported from Impl/LiveSHT/NetSHT Receive - pub fn receive_with_demarshal(netc: &mut NetClient, local_addr: &EndPoint) -> (rc: (ReceiveResult, Ghost)) +// ported from Impl/LiveSHT/NetSHT Receive +pub fn receive_with_demarshal(netc: &mut NetClient, local_addr: &EndPoint) -> (rc: ( + ReceiveResult, + Ghost, +)) requires old(netc).ok(), old(netc).my_end_point() == local_addr@, old(netc).state().is_Receiving(), local_addr.abstractable(), ensures - ({let (rr, net_event) = rc; + ({ + let (rr, net_event) = rc; &&& netc.my_end_point() == old(netc).my_end_point() &&& netc.ok() == !rr.is_Fail() - &&& !rr.is_Fail() ==> netc.ok() && netc.history() == old(netc).history() + seq!( net_event@ ) + &&& !rr.is_Fail() ==> netc.ok() && netc.history() == old(netc).history() + + seq!( net_event@ ) &&& rr.is_Timeout() ==> net_event@.is_TimeoutReceive() &&& (rr.is_Packet() ==> { &&& net_event@.is_Receive() - &&& true // NetPacketIsAbstractable is true - &&& rr.get_Packet_cpacket().abstractable() // can parse u8s up to NetEvent. + &&& true // NetPacketIsAbstractable is true + + &&& rr.get_Packet_cpacket().abstractable() // can parse u8s up to NetEvent. + &&& true // EndPointIsValidPublicKey + &&& !(rr.get_Packet_cpacket()@.msg is InvalidMessage) ==> { - &&& rr.get_Packet_cpacket()@ == abstractify_net_packet_to_sht_packet(net_event@.get_Receive_r()) - &&& rr.get_Packet_cpacket().msg@ == sht_demarshal_data(net_event@.get_Receive_r().msg)@ + &&& rr.get_Packet_cpacket()@ == abstractify_net_packet_to_sht_packet( + net_event@.get_Receive_r(), + ) + &&& rr.get_Packet_cpacket().msg@ == sht_demarshal_data( + net_event@.get_Receive_r().msg, + )@ } &&& rr.get_Packet_cpacket().dst@ == local_addr@ }) - }) - { - let timeout = 0; - let netr = netc.receive(timeout); - - match netr { - NetcReceiveResult::Error => { - // Dafny IronFleet leaves this unassigned, but we have to make something up. - let dummy = NetEvent::TimeoutReceive{}; - (ReceiveResult::Fail, Ghost(dummy)) - }, - NetcReceiveResult::TimedOut{} => { - (ReceiveResult::Timeout, Ghost(NetEvent::TimeoutReceive{})) - }, - NetcReceiveResult::Received{sender, message} => { - let csinglemessage = sht_demarshall_data_method(&message); - assert( csinglemessage is Message ==> csinglemessage@ == sht_demarshal_data(message@)@ ); - let src_ep = sender; - let cpacket = CPacket{dst: local_addr.clone_up_to_view(), src: src_ep, msg: csinglemessage}; - let ghost net_event: NetEvent = LIoOp::Receive{ - r: LPacket{dst: local_addr@, src: src_ep@, msg: message@}}; - assert( cpacket.dst@ == local_addr@ ); - assert( cpacket.src.abstractable() ); - assert( cpacket.abstractable() ); - - proof { - let ghost gsinglemessage = csinglemessage; - if !(gsinglemessage is InvalidMessage) { - let lp = LPacket { - dst: local_addr@, - src: src_ep@, - msg: (sht_demarshal_data(message@))@ - }; - assert( lp == abstractify_net_packet_to_lsht_packet(net_event.get_Receive_r()) ); - let p = Packet { dst: lp.dst, src: lp.src, msg: lp.msg }; - assert( p == abstractify_net_packet_to_sht_packet(net_event.get_Receive_r()) ); - - assert( !(gsinglemessage is InvalidMessage) ); - assert( gsinglemessage@ == (sht_demarshal_data(message@))@ ); - assert( cpacket@.dst =~= p.dst ); - assert( cpacket@.src =~= p.src ); - assert( cpacket@.msg =~= p.msg ); - assert( cpacket@ =~= p ); - assert( cpacket@ == abstractify_net_packet_to_sht_packet(net_event.get_Receive_r()) ); - assert( gsinglemessage is Message ==> cpacket.msg@ == sht_demarshal_data(net_event.get_Receive_r().msg)@ ); - } + }), +{ + let timeout = 0; + let netr = netc.receive(timeout); + match netr { + NetcReceiveResult::Error => { + // Dafny IronFleet leaves this unassigned, but we have to make something up. + let dummy = NetEvent::TimeoutReceive { }; + (ReceiveResult::Fail, Ghost(dummy)) + }, + NetcReceiveResult::TimedOut { } => { + (ReceiveResult::Timeout, Ghost(NetEvent::TimeoutReceive { })) + }, + NetcReceiveResult::Received { sender, message } => { + let csinglemessage = sht_demarshall_data_method(&message); + assert(csinglemessage is Message ==> csinglemessage@ == sht_demarshal_data(message@)@); + let src_ep = sender; + let cpacket = CPacket { + dst: local_addr.clone_up_to_view(), + src: src_ep, + msg: csinglemessage, + }; + let ghost net_event: NetEvent = LIoOp::Receive { + r: LPacket { dst: local_addr@, src: src_ep@, msg: message@ }, + }; + assert(cpacket.dst@ == local_addr@); + assert(cpacket.src.abstractable()); + assert(cpacket.abstractable()); + proof { + let ghost gsinglemessage = csinglemessage; + if !(gsinglemessage is InvalidMessage) { + let lp = LPacket { + dst: local_addr@, + src: src_ep@, + msg: (sht_demarshal_data(message@))@, + }; + assert(lp == abstractify_net_packet_to_lsht_packet(net_event.get_Receive_r())); + let p = Packet { dst: lp.dst, src: lp.src, msg: lp.msg }; + assert(p == abstractify_net_packet_to_sht_packet(net_event.get_Receive_r())); + assert(!(gsinglemessage is InvalidMessage)); + assert(gsinglemessage@ == (sht_demarshal_data(message@))@); + assert(cpacket@.dst =~= p.dst); + assert(cpacket@.src =~= p.src); + assert(cpacket@.msg =~= p.msg); + assert(cpacket@ =~= p); + assert(cpacket@ == abstractify_net_packet_to_sht_packet( + net_event.get_Receive_r(), + )); + assert(gsinglemessage is Message ==> cpacket.msg@ == sht_demarshal_data( + net_event.get_Receive_r().msg, + )@); } - (ReceiveResult::Packet{cpacket}, Ghost(net_event)) } - } + (ReceiveResult::Packet { cpacket }, Ghost(net_event)) + }, } +} - fn take_buf(buf: &mut Vec) {} +fn take_buf(buf: &mut Vec) { +} - /// Impl.SHT.PacketParsing.OutboundPacketsIsValid, which curiously doesn't involve the notion of - /// valid() - pub open spec fn outbound_packet_is_valid(cpacket: &CPacket) -> bool - { - &&& cpacket.abstractable() // CPacketIsAbstractable - &&& cpacket.msg.is_marshalable() // CSingleMessageMarshallable - &&& (!cpacket.msg.is_InvalidMessage()) // (out.msg.CSingleMessage? || out.msg.CAck?) - } +/// Impl.SHT.PacketParsing.OutboundPacketsIsValid, which curiously doesn't involve the notion of +/// valid() +pub open spec fn outbound_packet_is_valid(cpacket: &CPacket) -> bool { + &&& cpacket.abstractable() // CPacketIsAbstractable - pub open spec fn send_log_entry_reflects_packet(event: NetEvent, cpacket: &CPacket) -> bool - { - &&& event.is_Send() - &&& true // NetPacketIsAbstractable == EndPointIsAbstractable == true - &&& cpacket.abstractable() - &&& cpacket@ == abstractify_net_packet_to_sht_packet(event.get_Send_s()) - } + &&& cpacket.msg.is_marshalable() // CSingleMessageMarshallable - //impl EventResults { - // pub open spec fn singleton_send(net_event: NetEvent) -> EventResults - // { - // EventResults{ - // recvs: seq!(), - // clocks: seq!(), - // sends: seq!(net_event), - // ios: seq!(net_event), - // } - // } - //} + &&& ( + !cpacket.msg.is_InvalidMessage()) // (out.msg.CSingleMessage? || out.msg.CAck?) + +} + +pub open spec fn send_log_entry_reflects_packet(event: NetEvent, cpacket: &CPacket) -> bool { + &&& event.is_Send() + &&& true // NetPacketIsAbstractable == EndPointIsAbstractable == true + + &&& cpacket.abstractable() + &&& cpacket@ == abstractify_net_packet_to_sht_packet(event.get_Send_s()) +} - pub fn send_packet(cpacket: &CPacket, netc: &mut NetClient) -> (rc: (bool, Ghost>)) +//impl EventResults { +// pub open spec fn singleton_send(net_event: NetEvent) -> EventResults +// { +// EventResults{ +// recvs: seq!(), +// clocks: seq!(), +// sends: seq!(net_event), +// ios: seq!(net_event), +// } +// } +//} +pub fn send_packet(cpacket: &CPacket, netc: &mut NetClient) -> (rc: (bool, Ghost>)) requires old(netc).ok(), outbound_packet_is_valid(cpacket), - cpacket.src@ == old(netc).my_end_point(), // OutboundPacketsSeqHasCorrectSrc + cpacket.src@ == old(netc).my_end_point(), // OutboundPacketsSeqHasCorrectSrc + ensures netc.my_end_point() == old(netc).my_end_point(), ({ let (ok, Ghost(net_event)) = rc; { - &&& netc.ok() <==> ok - &&& ok ==> net_event.is_Some() - &&& ok ==> netc.history() == old(netc).history() + seq![net_event.unwrap()] - &&& ok ==> rc.1@.is_Some() && send_log_entry_reflects_packet(net_event.unwrap(), &cpacket) - && is_marshalable_data(net_event.unwrap()) - } - }) - { - let mut buf: Vec = Vec::new(); - cpacket.msg.serialize(&mut buf); - // witness that buf@.len() < 2^64 - let _ = buf.len(); - match netc.send(&cpacket.dst, &buf) - { - Ok(_) => { - let ghost lpacket = LPacket::>{ dst: cpacket.dst@, src: cpacket.src@, msg: buf@ }; - let ghost net_event = LIoOp::Send{s: lpacket}; - - proof { - assert_seqs_equal!( buf@ == cpacket.msg.ghost_serialize() ); - assert(net_packet_bound(buf@)); - let purported_cpacket = sht_demarshal_data(buf@); - sht_marshal_data_injective( &cpacket.msg, &purported_cpacket ); - } - (true, Ghost(Some(net_event))) - }, - Err(_) => { - (false, Ghost(None)) + &&& netc.ok() <==> ok + &&& ok ==> net_event.is_Some() + &&& ok ==> netc.history() == old(netc).history() + seq![net_event.unwrap()] + &&& ok ==> rc.1@.is_Some() && send_log_entry_reflects_packet( + net_event.unwrap(), + &cpacket, + ) && is_marshalable_data(net_event.unwrap()) + } + }), +{ + let mut buf: Vec = Vec::new(); + cpacket.msg.serialize(&mut buf); + // witness that buf@.len() < 2^64 + let _ = buf.len(); + match netc.send(&cpacket.dst, &buf) { + Ok(_) => { + let ghost lpacket = LPacket::> { + dst: cpacket.dst@, + src: cpacket.src@, + msg: buf@, + }; + let ghost net_event = LIoOp::Send { s: lpacket }; + proof { + assert_seqs_equal!( buf@ == cpacket.msg.ghost_serialize() ); + assert(net_packet_bound(buf@)); + let purported_cpacket = sht_demarshal_data(buf@); + sht_marshal_data_injective(&cpacket.msg, &purported_cpacket); } - } + (true, Ghost(Some(net_event))) + }, + Err(_) => { (false, Ghost(None)) }, } +} - pub open spec fn outbound_packet_seq_is_valid(cpackets: Seq) -> bool - { - forall |i| 0 <= i < cpackets.len() ==> #[trigger] outbound_packet_is_valid(&cpackets[i]) - } +pub open spec fn outbound_packet_seq_is_valid(cpackets: Seq) -> bool { + forall|i| 0 <= i < cpackets.len() ==> #[trigger] outbound_packet_is_valid(&cpackets[i]) +} - pub open spec fn outbound_packet_seq_has_correct_srcs(cpackets: Seq, end_point: AbstractEndPoint) -> bool - { - // TODO(chris): Why doesn't this trigger attribute satisfy Verus!? - forall |i| #![auto] 0 <= i < cpackets.len() ==> cpackets[i].src@ == end_point - } +pub open spec fn outbound_packet_seq_has_correct_srcs( + cpackets: Seq, + end_point: AbstractEndPoint, +) -> bool { + // TODO(chris): Why doesn't this trigger attribute satisfy Verus!? + forall|i| #![auto] 0 <= i < cpackets.len() ==> cpackets[i].src@ == end_point +} - pub open spec fn net_packet_bound(data: Seq) -> bool - { - data.len() <= 0xffff_ffff_ffff_ffff - } +pub open spec fn net_packet_bound(data: Seq) -> bool { + data.len() <= 0xffff_ffff_ffff_ffff +} - pub open spec fn is_marshalable_data(event: NetEvent) -> bool - recommends event.is_Send() - { - &&& net_packet_bound(event.get_Send_s().msg) - &&& sht_demarshal_data(event.get_Send_s().msg).is_marshalable() - } +pub open spec fn is_marshalable_data(event: NetEvent) -> bool + recommends + event.is_Send(), +{ + &&& net_packet_bound(event.get_Send_s().msg) + &&& sht_demarshal_data(event.get_Send_s().msg).is_marshalable() +} - pub open spec fn only_sent_marshalable_data(rawlog:Seq) -> bool - { - forall |i| 0 <= i < rawlog.len() && rawlog[i].is_Send() ==> - #[trigger] is_marshalable_data(rawlog[i]) - } +pub open spec fn only_sent_marshalable_data(rawlog: Seq) -> bool { + forall|i| + 0 <= i < rawlog.len() && rawlog[i].is_Send() ==> #[trigger] is_marshalable_data(rawlog[i]) +} - /// translates SendLogReflectsPacket - pub open spec fn send_log_entries_reflect_packets(net_event_log: Seq, cpackets: Seq) -> bool - { - &&& net_event_log.len() == cpackets.len() - &&& (forall |i| 0 <= i < cpackets.len() ==> #[trigger] send_log_entry_reflects_packet(net_event_log[i], &cpackets[i])) - } +/// translates SendLogReflectsPacket +pub open spec fn send_log_entries_reflect_packets( + net_event_log: Seq, + cpackets: Seq, +) -> bool { + &&& net_event_log.len() == cpackets.len() + &&& (forall|i| + 0 <= i < cpackets.len() ==> #[trigger] send_log_entry_reflects_packet( + net_event_log[i], + &cpackets[i], + )) +} - #[verifier(spinoff_prover)] // suddenly this is taking a long time due to an unrelated change elsewhere - pub fn send_packet_seq(cpackets: &Vec, netc: &mut NetClient) -> (rc: (bool, Ghost>)) +#[verifier(spinoff_prover)] // suddenly this is taking a long time due to an unrelated change elsewhere +pub fn send_packet_seq(cpackets: &Vec, netc: &mut NetClient) -> (rc: ( + bool, + Ghost>, +)) requires old(netc).ok(), outbound_packet_seq_is_valid(cpackets@), @@ -8600,62 +9948,66 @@ mod net_sht_v { &&& ok ==> netc.history() == old(netc).history() + net_events &&& ok ==> send_log_entries_reflect_packets(net_events, cpackets@) &&& ok ==> only_sent_marshalable_data(net_events) - &&& forall |i| 0 <= i < net_events.len() ==> net_events[i] is Send + &&& forall|i| 0 <= i < net_events.len() ==> net_events[i] is Send } - }) + }), +{ + let ghost net_events = Seq::::empty(); + let mut i: usize = 0; + while i < cpackets.len() + invariant + i <= cpackets.len(), + outbound_packet_seq_is_valid(cpackets@), + outbound_packet_seq_has_correct_srcs(cpackets@, old(netc).my_end_point()), + netc.my_end_point() == old(netc).my_end_point(), + netc.ok(), + netc.history() == old(netc).history() + net_events, + send_log_entries_reflect_packets(net_events, cpackets@.subrange(0, i as int)), + only_sent_marshalable_data(net_events), + forall|i| 0 <= i < net_events.len() ==> net_events[i] is Send, { - let ghost net_events = Seq::::empty(); - - let mut i:usize = 0; - while i < cpackets.len() - invariant - i <= cpackets.len(), - outbound_packet_seq_is_valid(cpackets@), - outbound_packet_seq_has_correct_srcs(cpackets@, old(netc).my_end_point()), - netc.my_end_point() == old(netc).my_end_point(), - netc.ok(), - netc.history() == old(netc).history() + net_events, - send_log_entries_reflect_packets(net_events, cpackets@.subrange(0, i as int)), - only_sent_marshalable_data(net_events), - forall |i| 0 <= i < net_events.len() ==> net_events[i] is Send, - { - let cpacket: &CPacket = &cpackets[i]; - let (ok, Ghost(net_event)) = send_packet(cpacket, netc); - if !ok { - return (false, Ghost(Seq::::empty())); - } - i = i + 1; - proof { - let net_event = net_event.unwrap(); - let net_events0 = net_events; - net_events = net_events + seq![net_event]; - let cpackets_prefix = cpackets@.subrange(0, i as int); - assert forall |j| 0 <= j < i as int - implies #[trigger] send_log_entry_reflects_packet(net_events[j], &cpackets_prefix[j]) by { - if j == i-1 { - assert(net_events[j] == net_event); - } else { - assert(cpackets_prefix[j] == cpackets@.subrange(0, i-1 as int)[j]); - } + let cpacket: &CPacket = &cpackets[i]; + let (ok, Ghost(net_event)) = send_packet(cpacket, netc); + if !ok { + return (false, Ghost(Seq::::empty())); + } + i = i + 1; + proof { + let net_event = net_event.unwrap(); + let net_events0 = net_events; + net_events = net_events + seq![net_event]; + let cpackets_prefix = cpackets@.subrange(0, i as int); + assert forall|j| 0 <= j < i as int implies #[trigger] send_log_entry_reflects_packet( + net_events[j], + &cpackets_prefix[j], + ) by { + if j == i - 1 { + assert(net_events[j] == net_event); + } else { + assert(cpackets_prefix[j] == cpackets@.subrange(0, i - 1 as int)[j]); } - assert forall |j| 0 <= j < net_events.len() && net_events[j].is_Send() - implies #[trigger] is_marshalable_data(net_events[j]) by { - assert(send_log_entry_reflects_packet(net_events[j], &cpackets_prefix[j])); - if j == i-1 { - assert(net_events[j] == net_event); - } else { - assert(net_events[j] == net_events0[j]); - } + } + assert forall|j| + 0 <= j < net_events.len() + && net_events[j].is_Send() implies #[trigger] is_marshalable_data( + net_events[j], + ) by { + assert(send_log_entry_reflects_packet(net_events[j], &cpackets_prefix[j])); + if j == i - 1 { + assert(net_events[j] == net_event); + } else { + assert(net_events[j] == net_events0[j]); } } } - proof { - assert_seqs_equal!(cpackets@.subrange(0, cpackets@.len() as int), cpackets@); - } - (true, Ghost(net_events)) } - + proof { + assert_seqs_equal!(cpackets@.subrange(0, cpackets@.len() as int), cpackets@); } + (true, Ghost(net_events)) +} + +} // verus! } mod network_t { @@ -8671,18 +10023,20 @@ mod network_t { use crate::single_message_t::*; verus! { - pub type PMsg = SingleMessage; - /// A Packet is an abstract version of a `CPacket`. - /// - /// It's isomorphic to an `LSHTPacket = LPacket>`. - pub struct Packet { - pub dst: AbstractEndPoint, - pub src: AbstractEndPoint, - pub msg: PMsg, - } - } +pub type PMsg = SingleMessage; + +/// A Packet is an abstract version of a `CPacket`. +/// +/// It's isomorphic to an `LSHTPacket = LPacket>`. +pub struct Packet { + pub dst: AbstractEndPoint, + pub src: AbstractEndPoint, + pub msg: PMsg, +} + +} // verus! } mod seq_is_unique_v { @@ -8700,161 +10054,170 @@ mod seq_is_unique_v { verus! { - // Translates Impl/Common/SeqIsUniqueDef.i.dfy :: SeqIsUnique - #[verifier::opaque] - pub open spec fn seq_is_unique(s: Seq) -> bool - { - forall |i: int, j: int| #![trigger s[i], s[j]] 0 <= i && i < s.len() && 0 <= j && j < s.len() && s[i] == s[j] ==> i == j - } - - pub fn do_vec_u8s_match(e1: &Vec, e2: &Vec) -> (eq: bool) - ensures - eq == (e1@ == e2@) - { - if e1.len() != e2.len() { - assert (e1@.len() != e2@.len()); - assert (e1@ != e2@); - return false; - } +// Translates Impl/Common/SeqIsUniqueDef.i.dfy :: SeqIsUnique +#[verifier::opaque] +pub open spec fn seq_is_unique(s: Seq) -> bool { + forall|i: int, j: int| + #![trigger s[i], s[j]] + 0 <= i && i < s.len() && 0 <= j && j < s.len() && s[i] == s[j] ==> i == j +} - let mut i: usize = 0; - while i < e1.len() - invariant - 0 <= i, - i <= e1.len(), - e1.len() == e2.len(), - forall |j: int| 0 <= j && j < i ==> e1@[j] == e2@[j] - { - if e1[i] != e2[i] { - return false; - } - i += 1; - } - proof { - assert_seqs_equal!(e1@, e2@); - } - return true; +pub fn do_vec_u8s_match(e1: &Vec, e2: &Vec) -> (eq: bool) + ensures + eq == (e1@ == e2@), +{ + if e1.len() != e2.len() { + assert(e1@.len() != e2@.len()); + assert(e1@ != e2@); + return false; + } + let mut i: usize = 0; + while i < e1.len() + invariant + 0 <= i, + i <= e1.len(), + e1.len() == e2.len(), + forall|j: int| 0 <= j && j < i ==> e1@[j] == e2@[j], + { + if e1[i] != e2[i] { + return false; } + i += 1; + } + proof { + assert_seqs_equal!(e1@, e2@); + } + return true; +} - pub fn do_end_points_match(e1: &EndPoint, e2: &EndPoint) -> (eq: bool) - ensures - eq == (e1@ == e2@) - { - do_vec_u8s_match(&e1.id, &e2.id) - } +pub fn do_end_points_match(e1: &EndPoint, e2: &EndPoint) -> (eq: bool) + ensures + eq == (e1@ == e2@), +{ + do_vec_u8s_match(&e1.id, &e2.id) +} - // Translates Impl/Common/CmdLineParser.i.dfy :: test_unique - pub fn test_unique(endpoints: &Vec) -> (unique: bool) - ensures - unique == seq_is_unique(abstractify_end_points(*endpoints)), - { - let mut i: usize = 0; - while i < endpoints.len() - invariant - 0 <= i, - i <= endpoints.len(), - forall |j: int, k: int| #![trigger endpoints@[j]@, endpoints@[k]@] - 0 <= j && j < endpoints.len() && 0 <= k && k < i && j != k ==> endpoints@[j]@ != endpoints@[k]@, - { - let mut j: usize = 0; - while j < endpoints.len() - invariant - 0 <= i, - i < endpoints.len(), - forall |j: int, k: int| #![trigger endpoints@[j]@, endpoints@[k]@] - 0 <= j && j < endpoints.len() && 0 <= k && k < i && j != k ==> endpoints@[j]@ != endpoints@[k]@, - 0 <= j, - j <= endpoints.len(), - forall |k: int| #![trigger endpoints@[k]@] 0 <= k && k < j && k != i ==> endpoints@[i as int]@ != endpoints@[k]@, - { - if i != j && do_end_points_match(&endpoints[i], &endpoints[j]) { - assert (!seq_is_unique(abstractify_end_points(*endpoints))) by { - reveal(seq_is_unique::); - let aeps = abstractify_end_points(*endpoints); - assert (aeps[i as int] == endpoints@[i as int]@); - assert (aeps[j as int] == endpoints@[j as int]@); - assert (endpoints@[i as int]@ == endpoints@[j as int]@ && i != j); - } - return false; - } - j = j + 1; +// Translates Impl/Common/CmdLineParser.i.dfy :: test_unique +pub fn test_unique(endpoints: &Vec) -> (unique: bool) + ensures + unique == seq_is_unique(abstractify_end_points(*endpoints)), +{ + let mut i: usize = 0; + while i < endpoints.len() + invariant + 0 <= i, + i <= endpoints.len(), + forall|j: int, k: int| + #![trigger endpoints@[j]@, endpoints@[k]@] + 0 <= j && j < endpoints.len() && 0 <= k && k < i && j != k ==> endpoints@[j]@ + != endpoints@[k]@, + { + let mut j: usize = 0; + while j < endpoints.len() + invariant + 0 <= i, + i < endpoints.len(), + forall|j: int, k: int| + #![trigger endpoints@[j]@, endpoints@[k]@] + 0 <= j && j < endpoints.len() && 0 <= k && k < i && j != k ==> endpoints@[j]@ + != endpoints@[k]@, + 0 <= j, + j <= endpoints.len(), + forall|k: int| + #![trigger endpoints@[k]@] + 0 <= k && k < j && k != i ==> endpoints@[i as int]@ != endpoints@[k]@, + { + if i != j && do_end_points_match(&endpoints[i], &endpoints[j]) { + assert(!seq_is_unique(abstractify_end_points(*endpoints))) by { + reveal(seq_is_unique::); + let aeps = abstractify_end_points(*endpoints); + assert(aeps[i as int] == endpoints@[i as int]@); + assert(aeps[j as int] == endpoints@[j as int]@); + assert(endpoints@[i as int]@ == endpoints@[j as int]@ && i != j); } - i = i + 1; - }; - assert (seq_is_unique(abstractify_end_points(*endpoints))) by { - reveal(seq_is_unique::); + return false; } - return true; + j = j + 1; } + i = i + 1; + }; + assert(seq_is_unique(abstractify_end_points(*endpoints))) by { + reveal(seq_is_unique::); + } + return true; +} - pub fn endpoints_contain(endpoints: &Vec, endpoint: &EndPoint) -> (present: bool) - ensures present == abstractify_end_points(*endpoints).contains(endpoint@) - { - let mut j: usize = 0; - while j < endpoints.len() - invariant - 0 <= j && j <= endpoints.len(), - forall |k: int| #![trigger endpoints@[k]@] 0 <= k && k < j ==> endpoint@ != endpoints@[k]@, - { - if do_end_points_match(endpoint, &endpoints[j]) { - assert (abstractify_end_points(*endpoints)[j as int] == endpoint@); - return true; - } - j = j + 1; - } - return false; +pub fn endpoints_contain(endpoints: &Vec, endpoint: &EndPoint) -> (present: bool) + ensures + present == abstractify_end_points(*endpoints).contains(endpoint@), +{ + let mut j: usize = 0; + while j < endpoints.len() + invariant + 0 <= j && j <= endpoints.len(), + forall|k: int| + #![trigger endpoints@[k]@] + 0 <= k && k < j ==> endpoint@ != endpoints@[k]@, + { + if do_end_points_match(endpoint, &endpoints[j]) { + assert(abstractify_end_points(*endpoints)[j as int] == endpoint@); + return true; } + j = j + 1; + } + return false; +} - pub fn clone_option_vec_u8(ov: Option<&Vec>) -> (res: Option>) - ensures - match ov { - Some(e1) => res.is_some() && e1@ == res.get_Some_0()@, - None => res.is_None(), - } - { - match ov { - Some(e1) => Some(clone_vec_u8(e1)), - None => None, - } - } +pub fn clone_option_vec_u8(ov: Option<&Vec>) -> (res: Option>) + ensures + match ov { + Some(e1) => res.is_some() && e1@ == res.get_Some_0()@, + None => res.is_None(), + }, +{ + match ov { + Some(e1) => Some(clone_vec_u8(e1)), + None => None, + } +} - pub fn clone_end_point(ep: &EndPoint) -> (cloned_ep: EndPoint) - ensures - cloned_ep@ == ep@ - { - EndPoint{id: clone_vec_u8(&ep.id)} - } +pub fn clone_end_point(ep: &EndPoint) -> (cloned_ep: EndPoint) + ensures + cloned_ep@ == ep@, +{ + EndPoint { id: clone_vec_u8(&ep.id) } +} - pub fn clone_option_end_point(oep: &Option) -> (cloned_oep: Option) - ensures - match oep { - Some(ep) => cloned_oep.is_some() && ep@ == cloned_oep.get_Some_0()@, - None => cloned_oep.is_None(), - } - { - match oep.as_ref() { - Some(ep) => Some(clone_end_point(ep)), - None => None - } - } +pub fn clone_option_end_point(oep: &Option) -> (cloned_oep: Option) + ensures + match oep { + Some(ep) => cloned_oep.is_some() && ep@ == cloned_oep.get_Some_0()@, + None => cloned_oep.is_None(), + }, +{ + match oep.as_ref() { + Some(ep) => Some(clone_end_point(ep)), + None => None, + } +} - pub proof fn singleton_seq_to_set_is_singleton_set(x: T) - ensures seq![x].to_set() == set![x] - { - let seq1 = seq![x]; - let set1 = seq1.to_set(); - let set2 = set![x]; - assert forall |y| set1.contains(y) <==> set2.contains(y) by - { - if y == x { - assert (seq1[0] == y); - assert (set1.contains(y)); - } - } - assert_sets_equal!(seq![x].to_set(), set![x]); +pub proof fn singleton_seq_to_set_is_singleton_set(x: T) + ensures + seq![x].to_set() == set![x], +{ + let seq1 = seq![x]; + let set1 = seq1.to_set(); + let set2 = set![x]; + assert forall|y| set1.contains(y) <==> set2.contains(y) by { + if y == x { + assert(seq1[0] == y); + assert(set1.contains(y)); } - } + assert_sets_equal!(seq![x].to_set(), set![x]); +} + +} // verus! } mod single_delivery_model_v { @@ -8888,79 +10251,86 @@ mod single_delivery_model_v { verus! { - pub proof fn same_view_same_marshalable(x: &CSingleMessage, y: &CSingleMessage) +pub proof fn same_view_same_marshalable(x: &CSingleMessage, y: &CSingleMessage) requires x@ == y@, ensures x.is_marshalable() == y.is_marshalable(), - { - CSingleMessage::view_equal_spec(); - assert(x.view_equal(y)); - x.lemma_same_views_serialize_the_same(y); - } +{ + CSingleMessage::view_equal_spec(); + assert(x.view_equal(y)); + x.lemma_same_views_serialize_the_same(y); +} - // We had to do a refactoring from the original material here. The original - // code called NewSingleMessageImpl twice, apparently for proof expediency, - // since the second call used an *old copy* of the SingleDelivery state. That's - // kind of silly, and more importantly hard to represent with references in Rust. - // So we want receive_impl to communicate back to the caller the extra bit of - // info it learned from NewSingleMessageImpl. - pub enum ReceiveImplResult { - // What does caller need to do? - FreshPacket{ack: CPacket}, // Buffer the receivedPacket, send an ack - DuplicatePacket{ack: CPacket}, // Send another ack - AckOrInvalid, // No obligation - } +// We had to do a refactoring from the original material here. The original +// code called NewSingleMessageImpl twice, apparently for proof expediency, +// since the second call used an *old copy* of the SingleDelivery state. That's +// kind of silly, and more importantly hard to represent with references in Rust. +// So we want receive_impl to communicate back to the caller the extra bit of +// info it learned from NewSingleMessageImpl. +pub enum ReceiveImplResult { + // What does caller need to do? + FreshPacket { ack: CPacket }, // Buffer the receivedPacket, send an ack + DuplicatePacket { ack: CPacket }, // Send another ack + AckOrInvalid, // No obligation +} - pub open spec fn valid_ack(ack: CPacket, original: CPacket) -> bool { - &&& ack.abstractable() - &&& outbound_packet_is_valid(&ack) // how does this relate to abstractable? - &&& ack.src@ == original.dst@ - &&& ack.dst@ == original.src@ - } +pub open spec fn valid_ack(ack: CPacket, original: CPacket) -> bool { + &&& ack.abstractable() + &&& outbound_packet_is_valid(&ack) // how does this relate to abstractable? - impl ReceiveImplResult { - pub open spec fn ok(self) -> bool { - self is FreshPacket || self is DuplicatePacket - } + &&& ack.src@ == original.dst@ + &&& ack.dst@ == original.src@ +} + +impl ReceiveImplResult { + pub open spec fn ok(self) -> bool { + self is FreshPacket || self is DuplicatePacket + } - pub open spec fn get_ack(self) -> CPacket - // we rely on get_ack(AckOrInvalid) returning something about which - // we don't care so we can pass it to SingleDelivery::receive. Meh. + pub open spec fn get_ack( + self, + ) -> CPacket + // we rely on get_ack(AckOrInvalid) returning something about which + // we don't care so we can pass it to SingleDelivery::receive. Meh. // recommends // self.ok(), - { - match self { - Self::FreshPacket{ack} => ack, - Self::DuplicatePacket{ack} => ack, - _ => arbitrary(), - } + { + match self { + Self::FreshPacket { ack } => ack, + Self::DuplicatePacket { ack } => ack, + _ => arbitrary(), } + } - pub open spec fn get_abstracted_ack_set(self) -> Set - { - match self { - Self::FreshPacket{ack} => set!{ack@}, - Self::DuplicatePacket{ack} => set!{ack@}, - _ => set!{}, - } + pub open spec fn get_abstracted_ack_set(self) -> Set { + match self { + Self::FreshPacket { ack } => set!{ack@}, + Self::DuplicatePacket { ack } => set!{ack@}, + _ => set!{}, } + } - /// True when, if `self` contains an ack, that ack is a valid response to `pkt`. - pub open spec fn valid_ack(self, pkt: CPacket) -> bool { - self.ok() ==> valid_ack(self.get_ack(), pkt) - } + /// True when, if `self` contains an ack, that ack is a valid response to `pkt`. + pub open spec fn valid_ack(self, pkt: CPacket) -> bool { + self.ok() ==> valid_ack(self.get_ack(), pkt) } +} - /// Impl/SHT/SingleDeliveryModel.RetransmitUnAckedPackets - impl CSingleDelivery { - pub open spec fn packets_are_valid_messages(packets: Seq) -> bool { - forall |i| 0 <= i < packets.len() ==> #[trigger] packets[i].msg is Message - } +/// Impl/SHT/SingleDeliveryModel.RetransmitUnAckedPackets +impl CSingleDelivery { + pub open spec fn packets_are_valid_messages(packets: Seq) -> bool { + forall|i| 0 <= i < packets.len() ==> #[trigger] packets[i].msg is Message + } - // Does not exist in ironfleet; both this method and its caller are a single glorious - // executable set-with-mapping comprehension. - pub fn retransmit_un_acked_packets_for_dst(&self, src: &EndPoint, dst: &EndPoint, packets: &mut Vec) + // Does not exist in ironfleet; both this method and its caller are a single glorious + // executable set-with-mapping comprehension. + pub fn retransmit_un_acked_packets_for_dst( + &self, + src: &EndPoint, + dst: &EndPoint, + packets: &mut Vec, + ) requires self.valid(), src.abstractable(), @@ -8969,147 +10339,141 @@ mod single_delivery_model_v { self.send_state@.contains_key(dst@), Self::packets_are_valid_messages(old(packets)@), ensures - packets@.map_values(|p: CPacket| p@).to_set() == - old(packets)@.map_values(|p: CPacket| p@).to_set() + self@.un_acked_messages_for_dest(src@, dst@), + packets@.map_values(|p: CPacket| p@).to_set() == old(packets)@.map_values( + |p: CPacket| p@, + ).to_set() + self@.un_acked_messages_for_dest(src@, dst@), outbound_packet_seq_is_valid(packets@), outbound_packet_seq_has_correct_srcs(packets@, src@), Self::packets_are_valid_messages(packets@), - { - proof { - assert_sets_equal!( + { + proof { + assert_sets_equal!( packets@.map_values(|p: CPacket| p@).to_set(), old(packets)@.map_values(|p: CPacket| p@).to_set() + self@.un_acked_messages_for_dest_up_to(src@, dst@, 0 as nat), ); - } - - match self.send_state.epmap.get(dst) { - Some(ack_state) => { - let mut i=0; - - while i < ack_state.un_acked.len() + } + match self.send_state.epmap.get(dst) { + Some(ack_state) => { + let mut i = 0; + while i < ack_state.un_acked.len() invariant 0 <= i <= ack_state.un_acked.len(), - self.valid(), // Everybody hates having to carry everything through here. :v( + self.valid(), // Everybody hates having to carry everything through here. :v( src.abstractable(), outbound_packet_seq_is_valid(packets@), outbound_packet_seq_has_correct_srcs(packets@, src@), self.send_state@.contains_key(dst@), ack_state == self.send_state.epmap[dst], - packets@.map_values(|p: CPacket| p@).to_set() == - old(packets)@.map_values(|p: CPacket| p@).to_set() + self@.un_acked_messages_for_dest_up_to(src@, dst@, i as nat), + packets@.map_values(|p: CPacket| p@).to_set() == old(packets)@.map_values( + |p: CPacket| p@, + ).to_set() + self@.un_acked_messages_for_dest_up_to(src@, dst@, i as nat), Self::packets_are_valid_messages(packets@), - { - let ghost packets0_view = packets@; - - assert( CAckState::un_acked_valid(&ack_state.un_acked@[i as int]) ); // trigger - - let sm = &ack_state.un_acked[i]; - let dst = match sm { - CSingleMessage::Message{dst, .. } => dst, - _ => { proof {assert(false); } unreached() }, - }; - - let cpacket = CPacket{dst: dst.clone_up_to_view(), src: src.clone_up_to_view(), msg: sm.clone_up_to_view()}; - packets.push(cpacket); - - i = i + 1; - - proof{ - same_view_same_marshalable( &cpacket.msg, &sm ); - - lemma_seq_push_to_set(packets0_view, cpacket); - - assert_seqs_equal!(packets@.map_values(|p: CPacket| p@), + { + let ghost packets0_view = packets@; + assert(CAckState::un_acked_valid(&ack_state.un_acked@[i as int])); // trigger + let sm = &ack_state.un_acked[i]; + let dst = match sm { + CSingleMessage::Message { dst, .. } => dst, + _ => { + proof { + assert(false); + } + unreached() + }, + }; + let cpacket = CPacket { + dst: dst.clone_up_to_view(), + src: src.clone_up_to_view(), + msg: sm.clone_up_to_view(), + }; + packets.push(cpacket); + i = i + 1; + proof { + same_view_same_marshalable(&cpacket.msg, &sm); + lemma_seq_push_to_set(packets0_view, cpacket); + assert_seqs_equal!(packets@.map_values(|p: CPacket| p@), packets0_view.map_values(|p: CPacket| p@).push(cpacket@)); - - lemma_seq_push_to_set(packets0_view.map_values(|p: CPacket| p@), cpacket@); - self.un_acked_messages_extend(src@, dst@, (i-1) as nat); - - assert_sets_equal!( + lemma_seq_push_to_set(packets0_view.map_values(|p: CPacket| p@), cpacket@); + self.un_acked_messages_extend(src@, dst@, (i - 1) as nat); + assert_sets_equal!( packets@.map_values(|p: CPacket| p@).to_set(), old(packets)@.map_values(|p: CPacket| p@).to_set() + self@.un_acked_messages_for_dest_up_to(src@, dst@, i as nat) ); - } } - }, - None => { - proof { assert(false); } } - } + }, + None => { + proof { + assert(false); + } + }, } + } - /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: NewSingleMessageImpl - pub fn new_impl(&self, pkt: &CPacket) -> (ok: bool) + /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: NewSingleMessageImpl + pub fn new_impl(&self, pkt: &CPacket) -> (ok: bool) requires self.valid(), self.abstractable(), pkt.abstractable(), ensures ok == SingleDelivery::new_single_message(self@, pkt@), - { - match pkt.msg { - CSingleMessage::Message{seqno, ..} => { - seqno > 0 && seqno - 1 == self.receive_state.lookup(&pkt.src) - }, - _ => false, - } + { + match pkt.msg { + CSingleMessage::Message { seqno, .. } => { + seqno > 0 && seqno - 1 == self.receive_state.lookup(&pkt.src) + }, + _ => false, } + } - /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: ReceiveAckImpl - pub fn receive_ack_impl(&mut self, pkt: &CPacket) + /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: ReceiveAckImpl + pub fn receive_ack_impl(&mut self, pkt: &CPacket) requires old(self).valid(), - // self.abstractable(), + // self.abstractable(), pkt.abstractable(), pkt.msg is Ack, ensures self.valid(), SingleDelivery::receive_ack(old(self)@, self@, pkt@, set!{}), - { - let num_packets_acked = - match self.send_state.get(&pkt.src) { - Some(ref cack_state) => { cack_state.num_packets_acked } - None => { 0 }, - // In the None case, we have no ack state. To meet our AckState::receive_ack assum-ed - // protocol spec, we are forced to not update the send_state; stuffing an - // CAckState::new in there would make spec unmeetable. - }; - - if let CSingleMessage::Ack{ack_seqno} = pkt.msg { - if ack_seqno <= num_packets_acked { - return; - } - - let mut local_state = CAckState::new(); - let default = CAckState::new(); - - let ghost int_local_state = local_state; // trigger fodder - self.send_state.cack_state_swap(&pkt.src, &mut local_state, default); - - local_state.truncate(ack_seqno, Ghost(pkt.src@)); - self.send_state.put(&pkt.src, local_state); - - assert forall |ep: EndPoint| #[trigger] self.send_state@.contains_key(ep@) - implies { - &&& ep.abstractable() - &&& self.send_state.epmap[&ep].abstractable() - } by { - if ep@ != pkt.src@ { - assert( old(self).send_state@.contains_key(ep@) ); - } + { + let num_packets_acked = match self.send_state.get(&pkt.src) { + Some(ref cack_state) => { cack_state.num_packets_acked }, + None => { 0 }, + // In the None case, we have no ack state. To meet our AckState::receive_ack assum-ed + // protocol spec, we are forced to not update the send_state; stuffing an + // CAckState::new in there would make spec unmeetable. + }; + if let CSingleMessage::Ack { ack_seqno } = pkt.msg { + if ack_seqno <= num_packets_acked { + return ; + } + let mut local_state = CAckState::new(); + let default = CAckState::new(); + let ghost int_local_state = local_state; // trigger fodder + self.send_state.cack_state_swap(&pkt.src, &mut local_state, default); + local_state.truncate(ack_seqno, Ghost(pkt.src@)); + self.send_state.put(&pkt.src, local_state); + assert forall|ep: EndPoint| #[trigger] self.send_state@.contains_key(ep@) implies { + &&& ep.abstractable() + &&& self.send_state.epmap[&ep].abstractable() + } by { + if ep@ != pkt.src@ { + assert(old(self).send_state@.contains_key(ep@)); } - - assert forall |ep: AbstractEndPoint| #[trigger] self.send_state@.contains_key(ep) - implies self.send_state.epmap@[ep].valid(ep) by { - if ep != pkt.src@ { - assert( old(self).send_state@.contains_key(ep) ); - } + } + assert forall|ep: AbstractEndPoint| #[trigger] + self.send_state@.contains_key(ep) implies self.send_state.epmap@[ep].valid(ep) by { + if ep != pkt.src@ { + assert(old(self).send_state@.contains_key(ep)); } } } + } - /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: ReceiveRealPacketImpl - pub fn receive_real_packet_impl(&mut self, pkt: &CPacket) -> (packet_is_fresh: bool) + /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: ReceiveRealPacketImpl + pub fn receive_real_packet_impl(&mut self, pkt: &CPacket) -> (packet_is_fresh: bool) requires old(self).valid(), pkt.abstractable(), @@ -9118,89 +10482,98 @@ mod single_delivery_model_v { self.valid(), SingleDelivery::receive_real_packet(old(self)@, self@, pkt@), packet_is_fresh == SingleDelivery::new_single_message(old(self)@, pkt@), - { - // We inlined NewSingleMessageImpl here. - let last_seqno = self.receive_state.lookup(&pkt.src); - match pkt.msg { - CSingleMessage::Message{seqno: pkt_seqno, ..} => { - let packet_is_fresh = pkt_seqno > 0 && pkt_seqno - 1 == last_seqno; - if packet_is_fresh { - self.receive_state.insert(&pkt.src, last_seqno + 1); - } - packet_is_fresh + { + // We inlined NewSingleMessageImpl here. + let last_seqno = self.receive_state.lookup(&pkt.src); + match pkt.msg { + CSingleMessage::Message { seqno: pkt_seqno, .. } => { + let packet_is_fresh = pkt_seqno > 0 && pkt_seqno - 1 == last_seqno; + if packet_is_fresh { + self.receive_state.insert(&pkt.src, last_seqno + 1); } - _ => { assert(false); unreached() } - } - + packet_is_fresh + }, + _ => { + assert(false); + unreached() + }, } + } - /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: ShouldAckSingleMessageImpl - pub fn should_ack_sigle_message_impl(&self, pkt: &CPacket) -> bool { - match pkt.msg { - CSingleMessage::Message{seqno, ..} => { - seqno <= self.receive_state.lookup(&pkt.src) - }, - _ => false, - } + /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: ShouldAckSingleMessageImpl + pub fn should_ack_sigle_message_impl(&self, pkt: &CPacket) -> bool { + match pkt.msg { + CSingleMessage::Message { seqno, .. } => { seqno <= self.receive_state.lookup(&pkt.src) + }, + _ => false, } + } - pub open spec fn option_cpacket_to_set_packet(opt_pkt: Option) -> Set - { - match opt_pkt { - Some(pkt) => set!{pkt@}, - None => Set::::empty(), - } + pub open spec fn option_cpacket_to_set_packet(opt_pkt: Option) -> Set { + match opt_pkt { + Some(pkt) => set!{pkt@}, + None => Set::::empty(), } + } - /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: MaybeAckPacketImpl - /// We know coming into this call that pkt satisfies ReceiveRealPacketImpl, so the possible - /// outcomes are: - /// Some -> pkt is fresh or duplicate - /// None -> pkt is from the future; don't ack it! - pub fn maybe_ack_packet_impl(&self, pkt: &CPacket) -> (opt_ack: Option) + /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: MaybeAckPacketImpl + /// We know coming into this call that pkt satisfies ReceiveRealPacketImpl, so the possible + /// outcomes are: + /// Some -> pkt is fresh or duplicate + /// None -> pkt is from the future; don't ack it! + pub fn maybe_ack_packet_impl(&self, pkt: &CPacket) -> (opt_ack: Option) requires self.valid(), pkt.abstractable(), pkt.msg is Message, ensures - SingleDelivery::maybe_ack_packet(self@, pkt@, opt_ack.unwrap()@, Self::option_cpacket_to_set_packet(opt_ack)), + SingleDelivery::maybe_ack_packet( + self@, + pkt@, + opt_ack.unwrap()@, + Self::option_cpacket_to_set_packet(opt_ack), + ), opt_ack is Some ==> valid_ack(opt_ack.unwrap(), *pkt), - { - // jonh inlined ShouldAckSingleMessageImpl and SendAckImpl. - // I feel like we could inline a LOT of these methods; they're - // very much consequences of the painful Dafny break-everything-into- - // two-line-methods lifestyle. - match pkt.msg { - CSingleMessage::Message{seqno, ..} => { - if seqno <= self.receive_state.lookup(&pkt.src) { - let m_ack = CSingleMessage::Ack{ack_seqno: seqno}; - assert(m_ack.is_marshalable()) by { - vstd::bytes::lemma_auto_spec_u64_to_from_le_bytes(); - } - let p_ack = CPacket{ - dst: pkt.src.clone_up_to_view(), - src: pkt.dst.clone_up_to_view(), - msg: m_ack - }; - Some(p_ack) // Fresh or Duplicate - } else { - None + { + // jonh inlined ShouldAckSingleMessageImpl and SendAckImpl. + // I feel like we could inline a LOT of these methods; they're + // very much consequences of the painful Dafny break-everything-into- + // two-line-methods lifestyle. + match pkt.msg { + CSingleMessage::Message { seqno, .. } => { + if seqno <= self.receive_state.lookup(&pkt.src) { + let m_ack = CSingleMessage::Ack { ack_seqno: seqno }; + assert(m_ack.is_marshalable()) by { + vstd::bytes::lemma_auto_spec_u64_to_from_le_bytes(); } - }, - _ => { assert(false); unreached() } - } + let p_ack = CPacket { + dst: pkt.src.clone_up_to_view(), + src: pkt.dst.clone_up_to_view(), + msg: m_ack, + }; + Some(p_ack) // Fresh or Duplicate - // When ReceiveSingleMessageImpl calls MaybeAckPacketImpl(acct'), the returned b must be true, - // because acct' came from ReceiveRealPacketImpl. - // - // The "weird" case is receiving a duplicate message; here's the call stack: - // HMRP / ReceiveSingleMessageImpl / ReceiveRealPacketImpl / NewSingleMessageImpl returns false - // HMRP / ReceiveSingleMessageImpl / MaybeAckPacketImpl(acct') returns true - // HMRP / NewSingleMessageImpl(acct0) returns false + } else { + None + } + }, + _ => { + assert(false); + unreached() + }, } + // When ReceiveSingleMessageImpl calls MaybeAckPacketImpl(acct'), the returned b must be true, + // because acct' came from ReceiveRealPacketImpl. + // + // The "weird" case is receiving a duplicate message; here's the call stack: + // HMRP / ReceiveSingleMessageImpl / ReceiveRealPacketImpl / NewSingleMessageImpl returns false + // HMRP / ReceiveSingleMessageImpl / MaybeAckPacketImpl(acct') returns true + // HMRP / NewSingleMessageImpl(acct0) returns false - /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: ReceiveSingleMessageImpl - pub fn receive_impl(&mut self, pkt: &CPacket) -> (rr: ReceiveImplResult) + } + + /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: ReceiveSingleMessageImpl + pub fn receive_impl(&mut self, pkt: &CPacket) -> (rr: ReceiveImplResult) requires old(self).valid(), old(self).abstractable(), @@ -9208,272 +10581,297 @@ mod single_delivery_model_v { ensures self.valid(), rr.valid_ack(*pkt), - SingleDelivery::receive(old(self)@, self@, pkt@, rr.get_ack()@, rr.get_abstracted_ack_set()), + SingleDelivery::receive( + old(self)@, + self@, + pkt@, + rr.get_ack()@, + rr.get_abstracted_ack_set(), + ), rr is FreshPacket ==> SingleDelivery::new_single_message(old(self)@, pkt@), rr is DuplicatePacket ==> !SingleDelivery::new_single_message(old(self)@, pkt@), - { - match pkt.msg { - CSingleMessage::Ack{..} => { - self.receive_ack_impl(pkt); - let rr = ReceiveImplResult::AckOrInvalid{}; - rr - }, - CSingleMessage::Message{..} => { - let packet_is_fresh = self.receive_real_packet_impl(pkt); - let opt_ack = self.maybe_ack_packet_impl(pkt); - - match opt_ack { - Some(ack) => { - let rr = - if packet_is_fresh { - ReceiveImplResult::FreshPacket{ack} - } else { - ReceiveImplResult::DuplicatePacket{ack} - }; - assert( SingleDelivery::receive(old(self)@, self@, pkt@, rr.get_ack()@, rr.get_abstracted_ack_set()) ); - rr - } - None => { - assert( Self::option_cpacket_to_set_packet(opt_ack).is_empty() ); // this is an unfortunate trigger to need - let rr = ReceiveImplResult::AckOrInvalid{}; - rr - } - } - }, - CSingleMessage::InvalidMessage{..} => { - let rr = ReceiveImplResult::AckOrInvalid{}; - // assert( SingleDelivery::receive(old(self)@, self@, pkt@, rr.get_ack()@, rr.get_abstracted_ack_set()) ); - // assert( self.valid() ); - rr - }, - } - } - - /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: SendSingleCMessage - /// TODO: Andrea points out that this postcondition is unachievable. So we should probably - /// go back to the Ironfleet Dafny approach of returning a `sm: CSingleMessage` and a `should_send: bool`. - /// The issue is that even in the `!should_send` case we still need a field of the (dummy) `sm`, namely - /// its `dst`. - #[verifier::rlimit(15)] - pub fn send_single_cmessage(&mut self, m: &CMessage, dst: &EndPoint) -> (sm: Option) - requires - old(self).valid(), - old(self).abstractable(), - m.abstractable(), - m.message_marshallable(), - m.is_marshalable(), - dst@.valid_physical_address(), - ensures - self.valid(), - match sm { - Some(sm) => { - &&& sm.abstractable() - &&& sm.is_Message() - &&& sm.get_Message_dst()@ == dst@ - &&& SingleDelivery::send_single_message(old(self)@, self@, m@, dst@, Some(sm@), AbstractParameters::static_params()) - &&& sm.is_marshalable() + { + match pkt.msg { + CSingleMessage::Ack { .. } => { + self.receive_ack_impl(pkt); + let rr = ReceiveImplResult::AckOrInvalid { }; + rr + }, + CSingleMessage::Message { .. } => { + let packet_is_fresh = self.receive_real_packet_impl(pkt); + let opt_ack = self.maybe_ack_packet_impl(pkt); + match opt_ack { + Some(ack) => { + let rr = if packet_is_fresh { + ReceiveImplResult::FreshPacket { ack } + } else { + ReceiveImplResult::DuplicatePacket { ack } + }; + assert(SingleDelivery::receive( + old(self)@, + self@, + pkt@, + rr.get_ack()@, + rr.get_abstracted_ack_set(), + )); + rr }, - None => - SingleDelivery::send_single_message(old(self)@, self@, m@, dst@, None, AbstractParameters::static_params()), - }, - // TODO: capture the part of send_single_message when should_send == false - { - let (num_packets_acked, un_acked_len) = - match self.send_state.get(dst) { - Some(ref cack_state) => { - proof { - if cack_state.un_acked.len() > 0 { - // This is necessary to show that appending our new seqno keeps the list sequential - cack_state.lemma_seqno_in_un_acked_list(dst@, (cack_state.un_acked.len() - 1) as int); - } - } - (cack_state.num_packets_acked, cack_state.un_acked.len() as u64) - } - None => { (0, 0) }, - // In the None case, we have no ack state. To meet our AckState::receive_ack assum-ed - // protocol spec, we are forced to not update the send_state; stuffing an - // CAckState::new in there would make spec unmeetable. - }; - - if Parameters::static_params().max_seqno - num_packets_acked == un_acked_len { - // No more seqnos; must give up. - return None; - } - - assert( num_packets_acked + un_acked_len <= AbstractParameters::static_params().max_seqno ); - let new_seqno = num_packets_acked + un_acked_len + 1; - let sm_new = CSingleMessage::Message { - seqno: new_seqno, - dst: dst.clone_up_to_view(), - m: m.clone_up_to_view(), - }; - assert(sm_new.abstractable()); - assert(sm_new.is_marshalable()) by { - vstd::bytes::lemma_auto_spec_u64_to_from_le_bytes(); - match sm_new { - CSingleMessage::Message { seqno, dst: dst_new, m: m_new } => { - dst_new.lemma_same_views_serialize_the_same(&dst); - m_new.lemma_same_views_serialize_the_same(&m); - assert(sm_new.ghost_serialize().len() <= usize::MAX) by { - // assert(seqno.ghost_serialize().len() == 8); - // assert(dst_new.ghost_serialize().len() == dst.ghost_serialize().len()); - // assert(m_new.ghost_serialize().len() == m.ghost_serialize().len()); - // assert(dst_new.ghost_serialize().len() <= 0x100000 + 8); - match m_new { - CMessage::GetRequest { k } => { - // assert(m_new.ghost_serialize().len() <= 0x100000 + 8); - }, - CMessage::SetRequest { k, v } => { - // assert(m_new.ghost_serialize().len() <= 0x100000 + 8); - }, - CMessage::Reply { k, v } => { - // assert(m_new.ghost_serialize().len() <= 0x100000 + 8); - }, - CMessage::Redirect { k, id } => { - // assert(m_new.ghost_serialize().len() <= 0x100000 + 16); - }, - CMessage::Shard { kr, recipient } => { - // assert(recipient.ghost_serialize().len() <= 0x100000 + 24); - // assert(kr.ghost_serialize().len() <= 0x100000 + 24); - // assert(m_new.ghost_serialize().len() <= 0x100000 * 2); - }, - CMessage::Delegate { range, h } => { - // assert(range.ghost_serialize().len() <= 30); - // assert(h.to_vec().len() <= 100); - // assert(h.is_marshalable()); - // assert(h.ghost_serialize().len() <= crate::marshal_ironsht_specific_v::ckeyhashmap_max_serialized_size()); - reveal(crate::marshal_ironsht_specific_v::ckeyhashmap_max_serialized_size); - }, - }; - } + None => { + assert(Self::option_cpacket_to_set_packet(opt_ack).is_empty()); // this is an unfortunate trigger to need + let rr = ReceiveImplResult::AckOrInvalid { }; + rr }, - _ => {}, } - } - assert forall |sm_alt: CSingleMessage| sm_alt@ == sm_new@ implies sm_alt.is_marshalable() by { - sm_alt.lemma_same_views_serialize_the_same(&sm_new); - } - - let mut local_state = CAckState::new(); - let default = CAckState::new(); - - let ghost int_local_state = local_state; // trigger fodder - self.send_state.cack_state_swap(&dst, &mut local_state, default); - local_state.un_acked.push(sm_new.clone_up_to_view()); - - let ghost old_ack_state = ack_state_lookup(dst@, old(self)@.send_state); - assert(local_state@.un_acked =~= old_ack_state.un_acked.push(sm_new@)); - self.send_state.put(&dst, local_state); + }, + CSingleMessage::InvalidMessage { .. } => { + let rr = ReceiveImplResult::AckOrInvalid { }; + // assert( SingleDelivery::receive(old(self)@, self@, pkt@, rr.get_ack()@, rr.get_abstracted_ack_set()) ); + // assert( self.valid() ); + rr + }, + } + } - assert forall |ep: EndPoint| #[trigger] self.send_state@.contains_key(ep@) implies - ep.abstractable() && self.send_state.epmap[&ep].abstractable() by { - if ep@ != dst@ { - assert(old(self).send_state@.contains_key(ep@)); - } - } + /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: SendSingleCMessage + /// TODO: Andrea points out that this postcondition is unachievable. So we should probably + /// go back to the Ironfleet Dafny approach of returning a `sm: CSingleMessage` and a `should_send: bool`. + /// The issue is that even in the `!should_send` case we still need a field of the (dummy) `sm`, namely + /// its `dst`. + #[verifier::rlimit(15)] + pub fn send_single_cmessage(&mut self, m: &CMessage, dst: &EndPoint) -> (sm: Option< + CSingleMessage, + >) + requires + old(self).valid(), + old(self).abstractable(), + m.abstractable(), + m.message_marshallable(), + m.is_marshalable(), + dst@.valid_physical_address(), + ensures + self.valid(), + match sm { + Some(sm) => { + &&& sm.abstractable() + &&& sm.is_Message() + &&& sm.get_Message_dst()@ == dst@ + &&& SingleDelivery::send_single_message( + old(self)@, + self@, + m@, + dst@, + Some(sm@), + AbstractParameters::static_params(), + ) + &&& sm.is_marshalable() + }, + None => SingleDelivery::send_single_message( + old(self)@, + self@, + m@, + dst@, + None, + AbstractParameters::static_params(), + ), + }, + // TODO: capture the part of send_single_message when should_send == false - assert forall |ep: AbstractEndPoint| #[trigger] self.send_state@.contains_key(ep) implies self.send_state.epmap@[ep].valid(ep) by { - if ep != dst@ { - assert(old(self).send_state@.contains_key(ep)); - assert(self.send_state.epmap@[ep].valid(ep)); - } - else { - assert(self.send_state.epmap@[ep] == local_state); - assert(self.send_state.epmap@[ep].valid(ep)); + { + let (num_packets_acked, un_acked_len) = match self.send_state.get(dst) { + Some(ref cack_state) => { + proof { + if cack_state.un_acked.len() > 0 { + // This is necessary to show that appending our new seqno keeps the list sequential + cack_state.lemma_seqno_in_un_acked_list( + dst@, + (cack_state.un_acked.len() - 1) as int, + ); + } } + (cack_state.num_packets_acked, cack_state.un_acked.len() as u64) + }, + None => { (0, 0) }, + // In the None case, we have no ack state. To meet our AckState::receive_ack assum-ed + // protocol spec, we are forced to not update the send_state; stuffing an + // CAckState::new in there would make spec unmeetable. + }; + if Parameters::static_params().max_seqno - num_packets_acked == un_acked_len { + // No more seqnos; must give up. + return None; + } + assert(num_packets_acked + un_acked_len <= AbstractParameters::static_params().max_seqno); + let new_seqno = num_packets_acked + un_acked_len + 1; + let sm_new = CSingleMessage::Message { + seqno: new_seqno, + dst: dst.clone_up_to_view(), + m: m.clone_up_to_view(), + }; + assert(sm_new.abstractable()); + assert(sm_new.is_marshalable()) by { + vstd::bytes::lemma_auto_spec_u64_to_from_le_bytes(); + match sm_new { + CSingleMessage::Message { seqno, dst: dst_new, m: m_new } => { + dst_new.lemma_same_views_serialize_the_same(&dst); + m_new.lemma_same_views_serialize_the_same(&m); + assert(sm_new.ghost_serialize().len() <= usize::MAX) by { + // assert(seqno.ghost_serialize().len() == 8); + // assert(dst_new.ghost_serialize().len() == dst.ghost_serialize().len()); + // assert(m_new.ghost_serialize().len() == m.ghost_serialize().len()); + // assert(dst_new.ghost_serialize().len() <= 0x100000 + 8); + match m_new { + CMessage::GetRequest { k } => { + // assert(m_new.ghost_serialize().len() <= 0x100000 + 8); + }, + CMessage::SetRequest { k, v } => { + // assert(m_new.ghost_serialize().len() <= 0x100000 + 8); + }, + CMessage::Reply { k, v } => { + // assert(m_new.ghost_serialize().len() <= 0x100000 + 8); + }, + CMessage::Redirect { k, id } => { + // assert(m_new.ghost_serialize().len() <= 0x100000 + 16); + }, + CMessage::Shard { kr, recipient } => { + // assert(recipient.ghost_serialize().len() <= 0x100000 + 24); + // assert(kr.ghost_serialize().len() <= 0x100000 + 24); + // assert(m_new.ghost_serialize().len() <= 0x100000 * 2); + }, + CMessage::Delegate { range, h } => { + // assert(range.ghost_serialize().len() <= 30); + // assert(h.to_vec().len() <= 100); + // assert(h.is_marshalable()); + // assert(h.ghost_serialize().len() <= crate::marshal_ironsht_specific_v::ckeyhashmap_max_serialized_size()); + reveal( + crate::marshal_ironsht_specific_v::ckeyhashmap_max_serialized_size, + ); + }, + }; + } + }, + _ => {}, + } + } + assert forall|sm_alt: CSingleMessage| + sm_alt@ == sm_new@ implies sm_alt.is_marshalable() by { + sm_alt.lemma_same_views_serialize_the_same(&sm_new); + } + let mut local_state = CAckState::new(); + let default = CAckState::new(); + let ghost int_local_state = local_state; // trigger fodder + self.send_state.cack_state_swap(&dst, &mut local_state, default); + local_state.un_acked.push(sm_new.clone_up_to_view()); + let ghost old_ack_state = ack_state_lookup(dst@, old(self)@.send_state); + assert(local_state@.un_acked =~= old_ack_state.un_acked.push(sm_new@)); + self.send_state.put(&dst, local_state); + assert forall|ep: EndPoint| #[trigger] + self.send_state@.contains_key(ep@) implies ep.abstractable() + && self.send_state.epmap[&ep].abstractable() by { + if ep@ != dst@ { + assert(old(self).send_state@.contains_key(ep@)); + } + } + assert forall|ep: AbstractEndPoint| #[trigger] + self.send_state@.contains_key(ep) implies self.send_state.epmap@[ep].valid(ep) by { + if ep != dst@ { + assert(old(self).send_state@.contains_key(ep)); + assert(self.send_state.epmap@[ep].valid(ep)); + } else { + assert(self.send_state.epmap@[ep] == local_state); + assert(self.send_state.epmap@[ep].valid(ep)); } - - assert(self@.send_state =~= - old(self)@.send_state.insert(dst@, AckState{ un_acked: old_ack_state.un_acked.push(sm_new@), .. old_ack_state })); - Some(sm_new) } + assert(self@.send_state =~= old(self)@.send_state.insert( + dst@, + AckState { un_acked: old_ack_state.un_acked.push(sm_new@), ..old_ack_state }, + )); + Some(sm_new) + } - /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: RetransmitUnAckedPackets - /// - /// Does not actually retransmit; returns the packets that should be - /// retransmitted because they are unacked - pub fn retransmit_un_acked_packets(&self, src: &EndPoint) -> (packets: Vec) + /// Translates Impl/SHT/SingleDeliveryModel.i.dfy :: RetransmitUnAckedPackets + /// + /// Does not actually retransmit; returns the packets that should be + /// retransmitted because they are unacked + pub fn retransmit_un_acked_packets(&self, src: &EndPoint) -> (packets: Vec) requires self.valid(), src.abstractable(), ensures - abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@) == self@.un_acked_messages(src@), + abstractify_seq_of_cpackets_to_set_of_sht_packets(packets@) == self@.un_acked_messages( + src@, + ), outbound_packet_seq_is_valid(packets@), outbound_packet_seq_has_correct_srcs(packets@, src@), self@.un_acked_messages(src@) == packets@.map_values(|p: CPacket| p@).to_set(), Self::packets_are_valid_messages(packets@), - { - let mut packets = Vec::new(); - - let dests = self.send_state.epmap.keys(); - let mut dst_i = 0; - - proof { - assert_seqs_equal!( dests@.subrange(0, dst_i as int) == seq![] ); - assert_sets_equal!(packets@.map_values(|p: CPacket| p@).to_set(), set![]); - assert_sets_equal!(dests@.subrange(0, dst_i as int).map_values(|ep: EndPoint| ep@).to_set() == set![]); - self@.lemma_un_acked_messages_for_dests_empty(src@, dests@.subrange(0, dst_i as int).map_values(|ep: EndPoint| ep@).to_set()); - } - - while dst_i < dests.len() + { + let mut packets = Vec::new(); + let dests = self.send_state.epmap.keys(); + let mut dst_i = 0; + proof { + assert_seqs_equal!( dests@.subrange(0, dst_i as int) == seq![] ); + assert_sets_equal!(packets@.map_values(|p: CPacket| p@).to_set(), set![]); + assert_sets_equal!(dests@.subrange(0, dst_i as int).map_values(|ep: EndPoint| ep@).to_set() == set![]); + self@.lemma_un_acked_messages_for_dests_empty( + src@, + dests@.subrange(0, dst_i as int).map_values(|ep: EndPoint| ep@).to_set(), + ); + } + while dst_i < dests.len() invariant - self.valid(), // Everybody hates having to carry everything through here. :v( - dests@.map_values(|ep: EndPoint| ep@).to_set() == self.send_state.epmap@.dom(), // NOTE: hard to figure this one out: this comes from postcondition of .keys(). Losing while context extra sad here because it was very painful to reconstruct. + self.valid(), // Everybody hates having to carry everything through here. :v( + dests@.map_values(|ep: EndPoint| ep@).to_set() == self.send_state.epmap@.dom(), // NOTE: hard to figure this one out: this comes from postcondition of .keys(). Losing while context extra sad here because it was very painful to reconstruct. src.abstractable(), 0 <= dst_i <= dests.len(), outbound_packet_seq_is_valid(packets@), outbound_packet_seq_has_correct_srcs(packets@, src@), - packets@.map_values(|p: CPacket| p@).to_set() == - self@.un_acked_messages_for_dests(src@, dests@.subrange(0, dst_i as int).map_values(|ep: EndPoint| ep@).to_set()), + packets@.map_values(|p: CPacket| p@).to_set() == self@.un_acked_messages_for_dests( + src@, + dests@.subrange(0, dst_i as int).map_values(|ep: EndPoint| ep@).to_set(), + ), Self::packets_are_valid_messages(packets@), - { - let ghost packets0_view = packets@; - - let dst = &dests[dst_i]; - assert(dests@.map_values(|ep: EndPoint| ep@)[dst_i as int] == dst@); // OBSERVE - // in principle basically need to call `lemma_flatten_sets_insert`, - // but there's a Set::map in the way that will probably break things - - // The presence of this line seems to hide the trigger loop behavior. - // proof { - // lemma_seq_push_to_set(dests@.subrange(0, dst_i as int).map_values(|ep: EndPoint| ep@), dst@); - // } - self.retransmit_un_acked_packets_for_dst(src, dst, &mut packets); - let ghost dst_i0 = dst_i; - dst_i = dst_i + 1; - - proof { - let depa = dests@.subrange(0, dst_i0 as int); - let depb = dests@.subrange(dst_i0 as int, dst_i as int); - let depc = dests@.subrange(0, dst_i as int); - assert_seqs_equal!(depc == depa + depb); - assert_seqs_equal!( depb == seq![*dst] ); - - lemma_to_set_singleton_auto::(); - lemma_to_set_singleton_auto::(); - lemma_map_values_singleton_auto::(); - lemma_map_set_singleton_auto::>(); - lemma_map_seq_singleton_auto::>(); // was involved in a trigger loop when `set_map_union_auto` also required finite maps - lemma_flatten_sets_union_auto::(); - lemma_to_set_union_auto::(); // somehow helps below, so saith Tej - seq_map_values_concat_auto::(); - map_set_finite_auto::>(); - set_map_union_auto::>(); - flatten_sets_singleton_auto::(); - // assert(packets@.map_values(|p: CPacket| p@).to_set() == - // self@.un_acked_messages_for_dests(src@, dests@.subrange(0, dst_i as int).map_values(|ep: EndPoint| ep@).to_set())); - } - } + { + let ghost packets0_view = packets@; + let dst = &dests[dst_i]; + assert(dests@.map_values(|ep: EndPoint| ep@)[dst_i as int] == dst@); // OBSERVE + // in principle basically need to call `lemma_flatten_sets_insert`, + // but there's a Set::map in the way that will probably break things + // The presence of this line seems to hide the trigger loop behavior. + // proof { + // lemma_seq_push_to_set(dests@.subrange(0, dst_i as int).map_values(|ep: EndPoint| ep@), dst@); + // } + self.retransmit_un_acked_packets_for_dst(src, dst, &mut packets); + let ghost dst_i0 = dst_i; + dst_i = dst_i + 1; proof { - assert_seqs_equal!(dests@.subrange(0, dests@.len() as int), dests@); - assert_sets_equal!(self.send_state.epmap@.dom() == self@.send_state.dom()); // OBSERVE + let depa = dests@.subrange(0, dst_i0 as int); + let depb = dests@.subrange(dst_i0 as int, dst_i as int); + let depc = dests@.subrange(0, dst_i as int); + assert_seqs_equal!(depc == depa + depb); + assert_seqs_equal!( depb == seq![*dst] ); + lemma_to_set_singleton_auto::(); + lemma_to_set_singleton_auto::(); + lemma_map_values_singleton_auto::(); + lemma_map_set_singleton_auto::>(); + lemma_map_seq_singleton_auto::>(); // was involved in a trigger loop when `set_map_union_auto` also required finite maps + lemma_flatten_sets_union_auto::(); + lemma_to_set_union_auto::(); // somehow helps below, so saith Tej + seq_map_values_concat_auto::(); + map_set_finite_auto::>(); + set_map_union_auto::>(); + flatten_sets_singleton_auto::(); + // assert(packets@.map_values(|p: CPacket| p@).to_set() == + // self@.un_acked_messages_for_dests(src@, dests@.subrange(0, dst_i as int).map_values(|ep: EndPoint| ep@).to_set())); } - - packets } + proof { + assert_seqs_equal!(dests@.subrange(0, dests@.len() as int), dests@); + assert_sets_equal!(self.send_state.epmap@.dom() == self@.send_state.dom()); // OBSERVE + } + packets } +} - } +} // verus! } mod single_delivery_state_v { @@ -9507,232 +10905,258 @@ mod single_delivery_state_v { verus! { - /// translates `AckState` (that is, we specialize the message type) - #[verifier::ext_equal] // effing INSAASAAAAANNE - pub struct CAckState { - pub num_packets_acked: u64, - pub un_acked: Vec, - } +/// translates `AckState` (that is, we specialize the message type) +#[verifier::ext_equal] // effing INSAASAAAAANNE +pub struct CAckState { + pub num_packets_acked: u64, + pub un_acked: Vec, +} - impl CAckState { - pub fn new() -> (e: CAckState) +impl CAckState { + pub fn new() -> (e: CAckState) ensures e.num_packets_acked == 0, e.un_acked.len() == 0, e@ =~= AckState::new(), - { - CAckState{ num_packets_acked: 0, un_acked: vec![] } - // let e = CAckState{ num_packets_acked: 0, un_acked: vec![] }; - // assert( e@.num_packets_acked == AckState::new().num_packets_acked ); - // assert( e@.un_acked =~= AckState::new().un_acked ); - // e + { + CAckState { + num_packets_acked: 0, + un_acked: vec![], } + // let e = CAckState{ num_packets_acked: 0, un_acked: vec![] }; + // assert( e@.num_packets_acked == AckState::new().num_packets_acked ); + // assert( e@.un_acked =~= AckState::new().un_acked ); + // e - pub open spec fn view(&self) -> AckState { - AckState { - num_packets_acked: self.num_packets_acked as nat, - un_acked: abstractify_cmessage_seq(self.un_acked@), - } + } + + pub open spec fn view(&self) -> AckState { + AckState { + num_packets_acked: self.num_packets_acked as nat, + un_acked: abstractify_cmessage_seq(self.un_acked@), } + } - pub fn clone_up_to_view(&self) -> (o:Self) - ensures o@ == self@ + pub fn clone_up_to_view(&self) -> (o: Self) + ensures + o@ == self@, + { + let mut un_acked: Vec = Vec::new(); + let mut i = 0; + while i < self.un_acked.len() + invariant + i <= self.un_acked.len(), + un_acked@.len() == i as nat, + forall|j: int| + 0 <= j < i as nat ==> #[trigger] (un_acked@[j]@) == self.un_acked@[j]@, { - let mut un_acked: Vec = Vec::new(); - let mut i = 0; - while i < self.un_acked.len() - invariant - i <= self.un_acked.len(), - un_acked@.len() == i as nat, - forall |j: int| 0 <= j < i as nat ==> #[trigger] (un_acked@[j]@) == self.un_acked@[j]@ - { - un_acked.push(self.un_acked[i].clone_up_to_view()); - i = i + 1; - } - proof { - assert_seqs_equal!(abstractify_cmessage_seq(un_acked@) == abstractify_cmessage_seq(self.un_acked@)); - } - CAckState { - num_packets_acked: self.num_packets_acked, - un_acked, - } + un_acked.push(self.un_acked[i].clone_up_to_view()); + i = i + 1; } - - pub open spec fn abstractable(&self) -> bool { - forall |i: int| 0 <= i < self.un_acked.len() ==> #[trigger] self.un_acked[i].abstractable() + proof { + assert_seqs_equal!(abstractify_cmessage_seq(un_acked@) == abstractify_cmessage_seq(self.un_acked@)); } + CAckState { num_packets_acked: self.num_packets_acked, un_acked } + } - pub open spec fn no_acks_in_unacked(list: Seq) -> bool { - forall |i: int| 0 <= i < list.len() ==> #[trigger] list[i].is_Message() - } + pub open spec fn abstractable(&self) -> bool { + forall|i: int| 0 <= i < self.un_acked.len() ==> #[trigger] self.un_acked[i].abstractable() + } - pub open spec fn un_acked_list_sequential(list: Seq) -> bool - recommends Self::no_acks_in_unacked(list) - { - forall |i: int, j: int| #![auto] 0 <= i && j == i + 1 && j < list.len() ==> - list[i].get_Message_seqno() as int + 1 == list[j].get_Message_seqno() as int - } + pub open spec fn no_acks_in_unacked(list: Seq) -> bool { + forall|i: int| 0 <= i < list.len() ==> #[trigger] list[i].is_Message() + } - pub open spec fn un_acked_valid(msg: &CSingleMessage) -> bool { - &&& msg.is_Message() - &&& msg.abstractable() - &&& msg.is_marshalable() - } + pub open spec fn un_acked_list_sequential(list: Seq) -> bool + recommends + Self::no_acks_in_unacked(list), + { + forall|i: int, j: int| + #![auto] + 0 <= i && j == i + 1 && j < list.len() ==> list[i].get_Message_seqno() as int + 1 + == list[j].get_Message_seqno() as int + } - pub open spec fn un_acked_list_valid(list: Seq) -> bool { - &&& forall |i:int| 0 <= i < list.len() ==> #[trigger] Self::un_acked_valid(&list[i]) - &&& Self::un_acked_list_sequential(list) - } + pub open spec fn un_acked_valid(msg: &CSingleMessage) -> bool { + &&& msg.is_Message() + &&& msg.abstractable() + &&& msg.is_marshalable() + } - pub open spec fn un_acked_list_valid_for_dst(list: Seq, dst: AbstractEndPoint) -> bool { - &&& Self::un_acked_list_valid(list) - &&& forall |i:int| 0 <= i < list.len() ==> (#[trigger] list[i].get_Message_dst())@ == dst - } + pub open spec fn un_acked_list_valid(list: Seq) -> bool { + &&& forall|i: int| 0 <= i < list.len() ==> #[trigger] Self::un_acked_valid(&list[i]) + &&& Self::un_acked_list_sequential(list) + } - pub open spec fn valid_list(msgs: Seq, num_packets_acked: int, dst: AbstractEndPoint) -> bool { - &&& Self::un_acked_list_valid_for_dst(msgs, dst) - &&& num_packets_acked as int + msgs.len() as int <= AbstractParameters::static_params().max_seqno - &&& (msgs.len() > 0 ==> msgs[0].get_Message_seqno() == num_packets_acked + 1) - } + pub open spec fn un_acked_list_valid_for_dst( + list: Seq, + dst: AbstractEndPoint, + ) -> bool { + &&& Self::un_acked_list_valid(list) + &&& forall|i: int| 0 <= i < list.len() ==> (#[trigger] list[i].get_Message_dst())@ == dst + } - /// Translates CAckStateIsValid - pub open spec fn valid(&self, dst: AbstractEndPoint) -> bool { - &&& self.abstractable() - &&& Self::valid_list(self.un_acked@, self.num_packets_acked as int, dst) - } + pub open spec fn valid_list( + msgs: Seq, + num_packets_acked: int, + dst: AbstractEndPoint, + ) -> bool { + &&& Self::un_acked_list_valid_for_dst(msgs, dst) + &&& num_packets_acked as int + msgs.len() as int + <= AbstractParameters::static_params().max_seqno + &&& (msgs.len() > 0 ==> msgs[0].get_Message_seqno() == num_packets_acked + 1) + } - pub proof fn lemma_seqno_in_un_acked_list(&self, dst: AbstractEndPoint, k: int) - requires - self.valid(dst), - 0 <= k < self.un_acked@.len(), - ensures - self.un_acked@[k].get_Message_seqno() == self.num_packets_acked + k + 1 - decreases - k - { - if k > 0 { - self.lemma_seqno_in_un_acked_list(dst, k - 1); - } + /// Translates CAckStateIsValid + pub open spec fn valid(&self, dst: AbstractEndPoint) -> bool { + &&& self.abstractable() + &&& Self::valid_list(self.un_acked@, self.num_packets_acked as int, dst) + } + + pub proof fn lemma_seqno_in_un_acked_list(&self, dst: AbstractEndPoint, k: int) + requires + self.valid(dst), + 0 <= k < self.un_acked@.len(), + ensures + self.un_acked@[k].get_Message_seqno() == self.num_packets_acked + k + 1, + decreases k, + { + if k > 0 { + self.lemma_seqno_in_un_acked_list(dst, k - 1); } + } - proof fn abstractify_distributes_over_skip(cm: Seq, i: int) + proof fn abstractify_distributes_over_skip(cm: Seq, i: int) requires 0 <= i <= cm.len(), ensures abstractify_cmessage_seq(cm.skip(i)) =~= abstractify_cmessage_seq(cm).skip(i), - decreases i - { - if 0 < i { - Self::abstractify_distributes_over_skip(cm.subrange(1, cm.len() as int), i-1); - } + decreases i, + { + if 0 < i { + Self::abstractify_distributes_over_skip(cm.subrange(1, cm.len() as int), i - 1); } + } - pub fn truncate(&mut self, seqno_acked: u64, Ghost(dst): Ghost) + pub fn truncate(&mut self, seqno_acked: u64, Ghost(dst): Ghost) requires old(self).valid(dst), old(self).num_packets_acked <= seqno_acked, ensures self.valid(dst), - abstractify_cmessage_seq(self.un_acked@) == truncate_un_ack_list(abstractify_cmessage_seq(old(self).un_acked@), seqno_acked as nat), + abstractify_cmessage_seq(self.un_acked@) == truncate_un_ack_list( + abstractify_cmessage_seq(old(self).un_acked@), + seqno_acked as nat, + ), self.un_acked@.len() > 0 ==> self.un_acked[0]@.get_Message_seqno() == seqno_acked + 1, self.num_packets_acked == seqno_acked, - { - let mut i: usize = 0; - assert( self.un_acked@.skip(0 as int) =~= self.un_acked@ ); - - while (i < self.un_acked.len() - && match self.un_acked[i] { - CSingleMessage::Message{seqno, ..} => { seqno <= seqno_acked }, - _ => { - assert( Self::un_acked_valid(&self.un_acked[i as int]) ); - assert(false); - true - }, - }) + { + let mut i: usize = 0; + assert(self.un_acked@.skip(0 as int) =~= self.un_acked@); + while (i < self.un_acked.len() && match self.un_acked[i] { + CSingleMessage::Message { seqno, .. } => { seqno <= seqno_acked }, + _ => { + assert(Self::un_acked_valid(&self.un_acked[i as int])); + assert(false); + true + }, + }) invariant self.valid(dst), self == old(self), i <= self.un_acked.len(), - i < self.un_acked.len() ==> self.un_acked[i as int].get_Message_seqno() <= seqno_acked + 1, - forall |j: int| #![auto] 0 <= j < i ==> self.un_acked[j].get_Message_seqno() <= seqno_acked, + i < self.un_acked.len() ==> self.un_acked[i as int].get_Message_seqno() + <= seqno_acked + 1, + forall|j: int| + #![auto] + 0 <= j < i ==> self.un_acked[j].get_Message_seqno() <= seqno_acked, Self::valid_list(self.un_acked@.skip(i as int), self.num_packets_acked + i, dst), - truncate_un_ack_list(abstractify_cmessage_seq(self.un_acked@.skip(i as int)), seqno_acked as nat) - == truncate_un_ack_list(abstractify_cmessage_seq(old(self).un_acked@), seqno_acked as nat), + truncate_un_ack_list( + abstractify_cmessage_seq(self.un_acked@.skip(i as int)), + seqno_acked as nat, + ) == truncate_un_ack_list( + abstractify_cmessage_seq(old(self).un_acked@), + seqno_acked as nat, + ), self.num_packets_acked + i <= seqno_acked, - { - assert( self.un_acked@.skip(i as int).skip(1) =~= self.un_acked@.skip((i + 1) as int) ); - i = i + 1; - - proof { Self::abstractify_distributes_over_skip(self.un_acked@.skip(i - 1 as int), 1); } + { + assert(self.un_acked@.skip(i as int).skip(1) =~= self.un_acked@.skip((i + 1) as int)); + i = i + 1; + proof { + Self::abstractify_distributes_over_skip(self.un_acked@.skip(i - 1 as int), 1); } - - self.num_packets_acked = seqno_acked; - self.un_acked = self.un_acked.split_off(i); // snip! } + self.num_packets_acked = seqno_acked; + self.un_acked = self.un_acked.split_off(i); // snip! } +} - pub struct CTombstoneTable { - pub epmap: endpoint_hashmap_t::HashMap, - } +pub struct CTombstoneTable { + pub epmap: endpoint_hashmap_t::HashMap, +} - impl CTombstoneTable { - pub open spec fn abstractable(&self) -> bool { - forall |k: AbstractEndPoint| #[trigger] self@.contains_key(k) ==> k.valid_physical_address() - } +impl CTombstoneTable { + pub open spec fn abstractable(&self) -> bool { + forall|k: AbstractEndPoint| #[trigger] self@.contains_key(k) ==> k.valid_physical_address() + } - /// Since I'm a map, I already have a simple view(), hence the special name. - pub open spec fn view(&self) -> TombstoneTable { - self.epmap@.map_values(|v: u64| v as nat) - } + /// Since I'm a map, I already have a simple view(), hence the special name. + pub open spec fn view(&self) -> TombstoneTable { + self.epmap@.map_values(|v: u64| v as nat) + } - /// Translates Impl/SHT/SingleDeliveryModel.i CTombstoneTableLookup - pub fn lookup(&self, src: &EndPoint) -> (last_seqno: u64) + /// Translates Impl/SHT/SingleDeliveryModel.i CTombstoneTableLookup + pub fn lookup(&self, src: &EndPoint) -> (last_seqno: u64) ensures last_seqno as int == tombstone_table_lookup(src@, self@), - { - match self.epmap.get(src) { - Some(v) => *v, - _ => 0, - } + { + match self.epmap.get(src) { + Some(v) => *v, + _ => 0, } + } - /// - pub fn insert(&mut self, src: &EndPoint, last_seqno: u64) + /// + pub fn insert(&mut self, src: &EndPoint, last_seqno: u64) requires old(self).abstractable(), src@.valid_physical_address(), ensures self@ =~= old(self)@.insert(src@, last_seqno as nat), self.abstractable(), - { - self.epmap.insert(src, last_seqno); - assert( forall |k: AbstractEndPoint| #[trigger] self@.contains_key(k) ==> old(self)@.contains_key(k) || k == src@ ); - } + { + self.epmap.insert(src, last_seqno); + assert(forall|k: AbstractEndPoint| #[trigger] + self@.contains_key(k) ==> old(self)@.contains_key(k) || k == src@); } +} - pub struct CSendState { - pub epmap: endpoint_hashmap_t::HashMap - } +pub struct CSendState { + pub epmap: endpoint_hashmap_t::HashMap, +} - impl CSendState { - /// CSendStateIsAbstractable - pub open spec fn abstractable(&self) -> bool { - forall |ep: EndPoint| #[trigger] self@.contains_key(ep@) ==> ep.abstractable() && self.epmap[&ep].abstractable() - // NB ignoring the "ReverseKey" stuff from GenericRefinement.MapIsAbstractable - } +impl CSendState { + /// CSendStateIsAbstractable + pub open spec fn abstractable(&self) -> bool { + forall|ep: EndPoint| #[trigger] + self@.contains_key(ep@) ==> ep.abstractable() + && self.epmap[&ep].abstractable() + // NB ignoring the "ReverseKey" stuff from GenericRefinement.MapIsAbstractable - // AbstractifyCSendStateToSendState is implied by the second type argument to HashMap. A - // consequence is that you don't get recommends on view. + } - /// CSendStateIsValid - pub open spec fn valid(&self) -> bool { - &&& self.abstractable() - &&& forall |ep: AbstractEndPoint| #[trigger] self@.contains_key(ep) ==> self.epmap@[ep].valid(ep) - } + // AbstractifyCSendStateToSendState is implied by the second type argument to HashMap. A + // consequence is that you don't get recommends on view. + /// CSendStateIsValid + pub open spec fn valid(&self) -> bool { + &&& self.abstractable() + &&& forall|ep: AbstractEndPoint| #[trigger] + self@.contains_key(ep) ==> self.epmap@[ep].valid(ep) + } - pub open spec fn view(&self) -> SendState { - self.epmap@.map_values(|v: CAckState| v@) - } + pub open spec fn view(&self) -> SendState { + self.epmap@.map_values(|v: CAckState| v@) + } // /// Translates CAckStateLookup // pub fn cack_state_lookup(&self, src: &EndPoint) -> (ack_state: CAckState) @@ -9746,123 +11170,134 @@ mod single_delivery_state_v { // None => CAckState { num_packets_acked: 0, un_acked: Vec::new(), } // } // } - - pub fn get(&self, src: &EndPoint) -> (value: Option<&CAckState>) + pub fn get(&self, src: &EndPoint) -> (value: Option<&CAckState>) ensures - value == match HashMap::get_spec(self.epmap@, src@) { Some(v) => Some(&v), None => None }, - value is Some ==> self@.contains_key(src@), // helpfully trigger valid - { - self.epmap.get(src) - } + value == match HashMap::get_spec(self.epmap@, src@) { + Some(v) => Some(&v), + None => None, + }, + value is Some ==> self@.contains_key(src@), // helpfully trigger valid + { + self.epmap.get(src) + } - /// Translates CAckStateLookup. Swappy semantics because we can't return mutable - /// refs in Verus yet. - pub fn cack_state_swap(&mut self, src: &EndPoint, ack_state: &mut CAckState, default: CAckState) + /// Translates CAckStateLookup. Swappy semantics because we can't return mutable + /// refs in Verus yet. + pub fn cack_state_swap(&mut self, src: &EndPoint, ack_state: &mut CAckState, default: CAckState) requires old(self).valid(), src.abstractable(), ensures - HashMap::swap_spec(old(self).epmap@, self.epmap@, src@, *old(ack_state), *ack_state, default), - { - self.epmap.swap(src, ack_state, default) - } + HashMap::swap_spec( + old(self).epmap@, + self.epmap@, + src@, + *old(ack_state), + *ack_state, + default, + ), + { + self.epmap.swap(src, ack_state, default) + } - pub fn put(&mut self, src: &EndPoint, value: CAckState) + pub fn put(&mut self, src: &EndPoint, value: CAckState) ensures HashMap::put_spec(old(self).epmap@, self.epmap@, src@, value), - { - self.epmap.put(src, value) - } + { + self.epmap.put(src, value) } +} - /// Translates CSingleDeliveryAcct - pub struct CSingleDelivery { - pub receive_state: CTombstoneTable, - pub send_state: CSendState, - } +/// Translates CSingleDeliveryAcct +pub struct CSingleDelivery { + pub receive_state: CTombstoneTable, + pub send_state: CSendState, +} - impl CSingleDelivery { - pub fn empty() -> (out:Self) - ensures out@ == SingleDelivery::::init() - { - let result = CSingleDelivery { - receive_state: CTombstoneTable{epmap: HashMap::new()}, - send_state: CSendState{epmap: HashMap::new()}, - }; - proof { - assert_maps_equal!(result.receive_state@, SingleDelivery::::init().receive_state); - assert_maps_equal!(result.send_state@, SingleDelivery::::init().send_state); - } - result +impl CSingleDelivery { + pub fn empty() -> (out: Self) + ensures + out@ == SingleDelivery::::init(), + { + let result = CSingleDelivery { + receive_state: CTombstoneTable { epmap: HashMap::new() }, + send_state: CSendState { epmap: HashMap::new() }, + }; + proof { + assert_maps_equal!(result.receive_state@, SingleDelivery::::init().receive_state); + assert_maps_equal!(result.send_state@, SingleDelivery::::init().send_state); } + result + } - /// Translates CSingleDeliveryAccountIsValid - pub open spec fn abstractable(&self) -> bool { - &&& self.receive_state.abstractable() - &&& self.send_state.abstractable() - } + /// Translates CSingleDeliveryAccountIsValid + pub open spec fn abstractable(&self) -> bool { + &&& self.receive_state.abstractable() + &&& self.send_state.abstractable() + } - /// Translates AbstractifyCSingleDeliveryAcctToSingleDeliveryAcct - pub open spec fn view(self) -> SingleDelivery { - SingleDelivery { - receive_state: self.receive_state@, - send_state: self.send_state@, - } - } + /// Translates AbstractifyCSingleDeliveryAcctToSingleDeliveryAcct + pub open spec fn view(self) -> SingleDelivery { + SingleDelivery { receive_state: self.receive_state@, send_state: self.send_state@ } + } - /// Translates CSingleDeliveryAccountIsValid - pub open spec fn valid(&self) -> bool { - &&& self.abstractable() - &&& self.send_state.valid() - } + /// Translates CSingleDeliveryAccountIsValid + pub open spec fn valid(&self) -> bool { + &&& self.abstractable() + &&& self.send_state.valid() + } - /// Extend a un_acked_messages_for_dest_up_to fact from i to i+1. - /// - /// Not a translation - helper lemma to prove retransmit_un_acked_packets_for_dst - pub proof fn un_acked_messages_extend(&self, src: AbstractEndPoint, dst: AbstractEndPoint, i: nat) - requires - self@.send_state.contains_key(dst), - i < self@.send_state[dst].un_acked.len(), - self.send_state.valid() - ensures - self@.un_acked_messages_for_dest_up_to(src, dst, i+1) == - self@.un_acked_messages_for_dest_up_to(src, dst, i).insert( - Packet { - src, - dst, - msg: self@.send_state[dst].un_acked[i as int] - }) - { - let packet = Packet { - src, - dst, - msg: self@.send_state[dst].un_acked[i as int] - }; - assert(self.send_state.epmap@[dst].valid(dst)); - let un_acked: Seq = self.send_state.epmap@[dst].un_acked@; - let un_acked_at: Seq> = self@.send_state[dst].un_acked; - assert(CAckState::un_acked_list_valid(un_acked)); - assert(CAckState::un_acked_valid(&un_acked[i as int])); - // assert(un_acked[i as int]@ == un_acked_at[i as int]); - assert(un_acked_at[i as int].is_Message()); - assert_sets_equal!( + /// Extend a un_acked_messages_for_dest_up_to fact from i to i+1. + /// + /// Not a translation - helper lemma to prove retransmit_un_acked_packets_for_dst + pub proof fn un_acked_messages_extend( + &self, + src: AbstractEndPoint, + dst: AbstractEndPoint, + i: nat, + ) + requires + self@.send_state.contains_key(dst), + i < self@.send_state[dst].un_acked.len(), + self.send_state.valid(), + ensures + self@.un_acked_messages_for_dest_up_to(src, dst, i + 1) + == self@.un_acked_messages_for_dest_up_to(src, dst, i).insert( + Packet { src, dst, msg: self@.send_state[dst].un_acked[i as int] }, + ), + { + let packet = Packet { src, dst, msg: self@.send_state[dst].un_acked[i as int] }; + assert(self.send_state.epmap@[dst].valid(dst)); + let un_acked: Seq = self.send_state.epmap@[dst].un_acked@; + let un_acked_at: Seq> = self@.send_state[dst].un_acked; + assert(CAckState::un_acked_list_valid(un_acked)); + assert(CAckState::un_acked_valid(&un_acked[i as int])); + // assert(un_acked[i as int]@ == un_acked_at[i as int]); + assert(un_acked_at[i as int].is_Message()); + assert_sets_equal!( self@.un_acked_messages_for_dest_up_to(src, dst, i+1) == self@.un_acked_messages_for_dest_up_to(src, dst, i).insert(packet) ); - } } +} - impl SingleDelivery { - pub proof fn lemma_un_acked_messages_for_dests_empty(&self, src: AbstractEndPoint, dests: Set) - requires dests == Set::::empty() - ensures self.un_acked_messages_for_dests(src, dests) == Set::::empty() - { - assert_sets_equal!(dests.map(|dst: AbstractEndPoint| self.un_acked_messages_for_dest(src, dst)) == set![]); - assert_sets_equal!(self.un_acked_messages_for_dests(src, dests) == set![]); - } +impl SingleDelivery { + pub proof fn lemma_un_acked_messages_for_dests_empty( + &self, + src: AbstractEndPoint, + dests: Set, + ) + requires + dests == Set::::empty(), + ensures + self.un_acked_messages_for_dests(src, dests) == Set::::empty(), + { + assert_sets_equal!(dests.map(|dst: AbstractEndPoint| self.un_acked_messages_for_dest(src, dst)) == set![]); + assert_sets_equal!(self.un_acked_messages_for_dests(src, dests) == set![]); } +} - } // verus! +} // verus! } mod single_delivery_t { @@ -9886,234 +11321,285 @@ mod single_delivery_t { verus! { - pub type TombstoneTable = Map; +pub type TombstoneTable = Map; - pub open spec fn tombstone_table_lookup(src: AbstractEndPoint, t: TombstoneTable) -> nat - { - if t.dom().contains(src) { t[src] } else { 0 } +pub open spec fn tombstone_table_lookup(src: AbstractEndPoint, t: TombstoneTable) -> nat { + if t.dom().contains(src) { + t[src] + } else { + 0 } +} - pub type AckList = Seq>; +pub type AckList = Seq>; - pub open spec(checked) fn truncate_un_ack_list(un_acked: AckList, seqno_acked: nat) -> Seq> - decreases un_acked.len() - { - if un_acked.len() > 0 && un_acked[0] is Message && un_acked[0].get_Message_seqno() <= seqno_acked { - truncate_un_ack_list(un_acked.skip(1), seqno_acked) - } else { - un_acked - } +pub open spec(checked) fn truncate_un_ack_list(un_acked: AckList, seqno_acked: nat) -> Seq< + SingleMessage, +> + decreases un_acked.len(), +{ + if un_acked.len() > 0 && un_acked[0] is Message && un_acked[0].get_Message_seqno() + <= seqno_acked { + truncate_un_ack_list(un_acked.skip(1), seqno_acked) + } else { + un_acked } +} - #[verifier::ext_equal] // effing INSAASAAAAANNE - pub struct AckState { - pub num_packets_acked: nat, - pub un_acked: AckList, - } +#[verifier::ext_equal] // effing INSAASAAAAANNE +pub struct AckState { + pub num_packets_acked: nat, + pub un_acked: AckList, +} - impl AckState { - //pub spec fn abstractable - pub open spec fn new() -> Self { - AckState{ num_packets_acked: 0, un_acked: seq![] } - } +impl AckState { + //pub spec fn abstractable + pub open spec fn new() -> Self { + AckState { num_packets_acked: 0, un_acked: seq![] } } +} - pub type SendState = Map>; +pub type SendState = Map>; - pub open spec(checked) fn ack_state_lookup(src: AbstractEndPoint, send_state: SendState) -> AckState { - if send_state.contains_key(src) - { send_state[src] } - else - { AckState{num_packets_acked: 0, un_acked: Seq::empty()} } +pub open spec(checked) fn ack_state_lookup( + src: AbstractEndPoint, + send_state: SendState, +) -> AckState { + if send_state.contains_key(src) { + send_state[src] + } else { + AckState { num_packets_acked: 0, un_acked: Seq::empty() } } +} - // NB we renamed SingleDeliveryAcct to SingleDelivery - #[verifier::ext_equal] // effing INSAASAAAAANNE - pub struct SingleDelivery { - pub receive_state: TombstoneTable, - pub send_state: SendState - } +// NB we renamed SingleDeliveryAcct to SingleDelivery +#[verifier::ext_equal] // effing INSAASAAAAANNE +pub struct SingleDelivery { + pub receive_state: TombstoneTable, + pub send_state: SendState, +} - impl SingleDelivery { - pub open spec fn init() -> Self - { - SingleDelivery{ receive_state: Map::empty(), send_state: Map::empty() } - } +impl SingleDelivery { + pub open spec fn init() -> Self { + SingleDelivery { receive_state: Map::empty(), send_state: Map::empty() } + } - /// Protocol/SHT/SingleDelivery.i.dfy NewSingleMessage - pub open spec(checked) fn new_single_message(self, pkt: Packet) -> bool { - let last_seqno = tombstone_table_lookup(pkt.src, self.receive_state); - &&& pkt.msg is Message - &&& pkt.msg.get_Message_seqno() == last_seqno + 1 - } + /// Protocol/SHT/SingleDelivery.i.dfy NewSingleMessage + pub open spec(checked) fn new_single_message(self, pkt: Packet) -> bool { + let last_seqno = tombstone_table_lookup(pkt.src, self.receive_state); + &&& pkt.msg is Message + &&& pkt.msg.get_Message_seqno() == last_seqno + 1 + } - /// Protocol/SHT/SingleDelivery.i.dfy ReceiveAck - pub open spec(checked) fn receive_ack(pre: Self, post: Self, pkt: Packet, acks:Set) -> bool + /// Protocol/SHT/SingleDelivery.i.dfy ReceiveAck + pub open spec(checked) fn receive_ack( + pre: Self, + post: Self, + pkt: Packet, + acks: Set, + ) -> bool recommends pkt.msg is Ack, - { - &&& acks.is_empty() - &&& { - let old_ack_state = ack_state_lookup(pkt.src, pre.send_state); - if pkt.msg.get_Ack_ack_seqno() > old_ack_state.num_packets_acked { - let new_ack_state = AckState{ - num_packets_acked: pkt.msg.get_Ack_ack_seqno(), - un_acked: truncate_un_ack_list(old_ack_state.un_acked, pkt.msg.get_Ack_ack_seqno()), - .. old_ack_state}; - post =~= Self{ send_state: pre.send_state.insert(pkt.src, new_ack_state), ..post } - } else { - post == pre - } + { + &&& acks.is_empty() + &&& { + let old_ack_state = ack_state_lookup(pkt.src, pre.send_state); + if pkt.msg.get_Ack_ack_seqno() > old_ack_state.num_packets_acked { + let new_ack_state = AckState { + num_packets_acked: pkt.msg.get_Ack_ack_seqno(), + un_acked: truncate_un_ack_list( + old_ack_state.un_acked, + pkt.msg.get_Ack_ack_seqno(), + ), + ..old_ack_state + }; + post =~= Self { send_state: pre.send_state.insert(pkt.src, new_ack_state), ..post } + } else { + post == pre } } + } - /// Protocol/SHT/SingleDelivery.i.dfy ReceiveRealPacket - pub open spec(checked) fn receive_real_packet(self, post: Self, pkt: Packet) -> bool { - if self.new_single_message(pkt) { - let last_seqno = tombstone_table_lookup(pkt.src, self.receive_state); - // Mark it received - post == Self{ receive_state: self.receive_state.insert(pkt.src, (last_seqno + 1) as nat), ..self } - } else { - post == self + /// Protocol/SHT/SingleDelivery.i.dfy ReceiveRealPacket + pub open spec(checked) fn receive_real_packet(self, post: Self, pkt: Packet) -> bool { + if self.new_single_message(pkt) { + let last_seqno = tombstone_table_lookup(pkt.src, self.receive_state); + // Mark it received + post == Self { + receive_state: self.receive_state.insert(pkt.src, (last_seqno + 1) as nat), + ..self } + } else { + post == self } + } - /// Protocol/SHT/SingleDelivery.i.dfy ShouldAckSingleMessage - pub open spec(checked) fn should_ack_single_message(self, pkt: Packet) -> bool - { - &&& pkt.msg is Message // Don't want to ack acks - &&& { - let last_seqno = tombstone_table_lookup(pkt.src, self.receive_state); - pkt.msg.get_Message_seqno() <= last_seqno - } + /// Protocol/SHT/SingleDelivery.i.dfy ShouldAckSingleMessage + pub open spec(checked) fn should_ack_single_message(self, pkt: Packet) -> bool { + &&& pkt.msg is Message // Don't want to ack acks + + &&& { + let last_seqno = tombstone_table_lookup(pkt.src, self.receive_state); + pkt.msg.get_Message_seqno() <= last_seqno } + } - /// Protocol/SHT/SingleDelivery.i.dfy SendAck - pub open spec(checked) fn send_ack(self, pkt: Packet, ack: Packet, acks:Set) -> bool + /// Protocol/SHT/SingleDelivery.i.dfy SendAck + pub open spec(checked) fn send_ack(self, pkt: Packet, ack: Packet, acks: Set) -> bool recommends self.should_ack_single_message(pkt), - { - &&& ack.msg is Ack - &&& ack.msg.get_Ack_ack_seqno() == pkt.msg.get_Message_seqno() - &&& ack.src == pkt.dst - &&& ack.dst == pkt.src - &&& acks == set![ ack ] - } + { + &&& ack.msg is Ack + &&& ack.msg.get_Ack_ack_seqno() == pkt.msg.get_Message_seqno() + &&& ack.src == pkt.dst + &&& ack.dst == pkt.src + &&& acks == set![ ack ] + } - /// Protocol/SHT/SingleDelivery.i.dfy MaybeAckPacket - pub open spec(checked) fn maybe_ack_packet(pre: Self, pkt: Packet, ack: Packet, acks:Set) -> bool { - if pre.should_ack_single_message(pkt) { - pre.send_ack(pkt, ack, acks) - } else { - acks.is_empty() - } + /// Protocol/SHT/SingleDelivery.i.dfy MaybeAckPacket + pub open spec(checked) fn maybe_ack_packet( + pre: Self, + pkt: Packet, + ack: Packet, + acks: Set, + ) -> bool { + if pre.should_ack_single_message(pkt) { + pre.send_ack(pkt, ack, acks) + } else { + acks.is_empty() } + } - /// Protocol/SHT/SingleDelivery.i.dfy ReceiveSingleMessage - pub open spec(checked) fn receive(pre: Self, post: Self, pkt: Packet, ack: Packet, acks:Set) -> bool { - match pkt.msg { - SingleMessage::Ack{ack_seqno: _} => Self::receive_ack(pre, post, pkt, acks), - SingleMessage::Message{seqno, dst: _, m} => { - &&& Self::receive_real_packet(pre, post, pkt) - &&& Self::maybe_ack_packet(post, pkt, ack, acks) - } - SingleMessage::InvalidMessage{} => { - &&& post === pre - &&& acks === Set::empty() - } - } + /// Protocol/SHT/SingleDelivery.i.dfy ReceiveSingleMessage + pub open spec(checked) fn receive( + pre: Self, + post: Self, + pkt: Packet, + ack: Packet, + acks: Set, + ) -> bool { + match pkt.msg { + SingleMessage::Ack { ack_seqno: _ } => Self::receive_ack(pre, post, pkt, acks), + SingleMessage::Message { seqno, dst: _, m } => { + &&& Self::receive_real_packet(pre, post, pkt) + &&& Self::maybe_ack_packet(post, pkt, ack, acks) + }, + SingleMessage::InvalidMessage { } => { + &&& post === pre + &&& acks === Set::empty() + }, } + } - /// Protocol/SHT/SingleDelivery.i.dfy SendSingleMessage - /// NOTE the Verus port modifies this spec to carry the dst - /// as a separate field, so that we can talk about it even in the - /// !should_send (sm is None) case. In the original Dafny spec, - /// sm was always present, and in the !should_send case, only the - /// dst field was meaningful. - pub open spec(checked) fn send_single_message(pre: Self, post: Self, m: MT, dst: AbstractEndPoint, /*out*/ sm: Option>, params: AbstractParameters) -> bool - { - let old_ack_state = ack_state_lookup(dst, pre.send_state); - let new_seqno = old_ack_state.num_packets_acked + old_ack_state.un_acked.len() + 1; - if new_seqno > params.max_seqno { - // Packet shouldn't be sent if we exceed the maximum sequence number - &&& post == pre - &&& sm is None - } else { - &&& sm == Some(SingleMessage::::Message{ - seqno: new_seqno, - m: m, - dst: dst, - }) - &&& post == SingleDelivery { - send_state: pre.send_state.insert(dst, - AckState{ - un_acked: old_ack_state.un_acked.push(sm.unwrap()), - ..old_ack_state }), - ..pre } + /// Protocol/SHT/SingleDelivery.i.dfy SendSingleMessage + /// NOTE the Verus port modifies this spec to carry the dst + /// as a separate field, so that we can talk about it even in the + /// !should_send (sm is None) case. In the original Dafny spec, + /// sm was always present, and in the !should_send case, only the + /// dst field was meaningful. + pub open spec(checked) fn send_single_message( + pre: Self, + post: Self, + m: MT, + dst: AbstractEndPoint, /*out*/ + sm: Option>, + params: AbstractParameters, + ) -> bool { + let old_ack_state = ack_state_lookup(dst, pre.send_state); + let new_seqno = old_ack_state.num_packets_acked + old_ack_state.un_acked.len() + 1; + if new_seqno > params.max_seqno { + // Packet shouldn't be sent if we exceed the maximum sequence number + &&& post == pre + &&& sm is None + } else { + &&& sm == Some(SingleMessage::::Message { seqno: new_seqno, m: m, dst: dst }) + &&& post == SingleDelivery { + send_state: pre.send_state.insert( + dst, + AckState { + un_acked: old_ack_state.un_acked.push(sm.unwrap()), + ..old_ack_state + }, + ), + ..pre } } + } - // Protocol/SHT/SingleDelivery.i.dfy ReceiveNoMessage - pub open spec(checked) fn receive_no_message(pre: Self, post: Self) -> bool - { - post.receive_state == pre.receive_state - } + // Protocol/SHT/SingleDelivery.i.dfy ReceiveNoMessage + pub open spec(checked) fn receive_no_message(pre: Self, post: Self) -> bool { + post.receive_state == pre.receive_state + } - // Protocol/SHT/SingleDelivery.i.dfy SendNoMessage - pub open spec(checked) fn send_no_message(pre: Self, post: Self) -> bool - { - post.send_state == pre.send_state - } + // Protocol/SHT/SingleDelivery.i.dfy SendNoMessage + pub open spec(checked) fn send_no_message(pre: Self, post: Self) -> bool { + post.send_state == pre.send_state } +} - impl SingleDelivery { - pub open spec(checked) fn un_acked_messages_for_dest_up_to(self, src: AbstractEndPoint, dst: AbstractEndPoint, count: nat) -> Set +impl SingleDelivery { + pub open spec(checked) fn un_acked_messages_for_dest_up_to( + self, + src: AbstractEndPoint, + dst: AbstractEndPoint, + count: nat, + ) -> Set recommends self.send_state.contains_key(dst), - count <= self.send_state[dst].un_acked.len() - { - Set::new(|p: Packet| { + count <= self.send_state[dst].un_acked.len(), + { + Set::new( + |p: Packet| + { &&& p.src == src - &&& exists |i: int| { - &&& 0 <= i < count - &&& self.send_state[dst].un_acked[i].is_Message() - &&& p.msg == self.send_state[dst].un_acked[i] - &&& p.dst == p.msg.get_Message_dst() - } - }) - } + &&& exists|i: int| + { + &&& 0 <= i < count + &&& self.send_state[dst].un_acked[i].is_Message() + &&& p.msg == self.send_state[dst].un_acked[i] + &&& p.dst == p.msg.get_Message_dst() + } + }, + ) + } - pub open spec(checked) fn un_acked_messages_for_dest(self, src: AbstractEndPoint, dst: AbstractEndPoint) -> Set + pub open spec(checked) fn un_acked_messages_for_dest( + self, + src: AbstractEndPoint, + dst: AbstractEndPoint, + ) -> Set recommends - self.send_state.contains_key(dst) - { - self.un_acked_messages_for_dest_up_to(src, dst, self.send_state[dst].un_acked.len()) - } - - // TODO(tchajed): I now think this should avoid mapping over a Set and - // instead take dsts: Seq. Currently we convert a list of destinations to be - // iterated over into a set, map over it, and then take the union; we might - // as well remember the order and make use of it the whole way through. The - // only slight cost is that we will need to implement a - // union-of-seq-of-sets, just like in IronFleet. - pub open spec fn un_acked_messages_for_dests(self, src: AbstractEndPoint, dsts: Set) -> Set - recommends dsts.subset_of(self.send_state.dom()) - { - flatten_sets( - dsts.map(|dst: AbstractEndPoint| self.un_acked_messages_for_dest(src, dst)) - ) - } + self.send_state.contains_key(dst), + { + self.un_acked_messages_for_dest_up_to(src, dst, self.send_state[dst].un_acked.len()) + } - /// Re-written Protocol/SHT/SingleDelivery.i.dfy UnAckedMessages - pub open spec fn un_acked_messages(self, src: AbstractEndPoint) -> Set - { - self.un_acked_messages_for_dests(src, self.send_state.dom()) - } + // TODO(tchajed): I now think this should avoid mapping over a Set and + // instead take dsts: Seq. Currently we convert a list of destinations to be + // iterated over into a set, map over it, and then take the union; we might + // as well remember the order and make use of it the whole way through. The + // only slight cost is that we will need to implement a + // union-of-seq-of-sets, just like in IronFleet. + pub open spec fn un_acked_messages_for_dests( + self, + src: AbstractEndPoint, + dsts: Set, + ) -> Set + recommends + dsts.subset_of(self.send_state.dom()), + { + flatten_sets(dsts.map(|dst: AbstractEndPoint| self.un_acked_messages_for_dest(src, dst))) } + /// Re-written Protocol/SHT/SingleDelivery.i.dfy UnAckedMessages + pub open spec fn un_acked_messages(self, src: AbstractEndPoint) -> Set { + self.un_acked_messages_for_dests(src, self.send_state.dom()) } } +} // verus! +} + mod single_message_t { #![verus::trusted] use vstd::map::*; @@ -10130,20 +11616,14 @@ mod single_message_t { verus! { - #[is_variant] - pub enum SingleMessage { - Message { - seqno: nat, - dst: AbstractEndPoint, - m: MT, - }, - Ack { - ack_seqno: nat, - }, // I have received everything up to and including seqno - InvalidMessage {}, // ... what parse returns for raw messages we can't otherwise parse into a valid message above - } +#[is_variant] +pub enum SingleMessage { + Message { seqno: nat, dst: AbstractEndPoint, m: MT }, + Ack { ack_seqno: nat }, // I have received everything up to and including seqno + InvalidMessage {}, // ... what parse returns for raw messages we can't otherwise parse into a valid message above +} - } +} // verus! } mod verus_extra { @@ -10152,26 +11632,25 @@ mod verus_extra { verus! { - /// Equivalent to `choose |i:int| low <= i < high && p(i)` except it guarantees to pick the smallest - /// such value `i` where `p(i)` is true. - pub proof fn choose_smallest(low: int, high: int, p: FnSpec(int)->bool) -> (res:int) - requires - exists |i:int| #![trigger(p(i))] low <= i < high && p(i), - ensures - low <= res < high, - p(res), - forall |i:int| #![trigger(p(i))] low <= i < res ==> !p(i), - decreases - high - low, - { - if p(low) { - low - } else { - choose_smallest(low + 1, high, p) - } - } +/// Equivalent to `choose |i:int| low <= i < high && p(i)` except it guarantees to pick the smallest +/// such value `i` where `p(i)` is true. +pub proof fn choose_smallest(low: int, high: int, p: FnSpec(int) -> bool) -> (res: int) + requires + exists|i: int| #![trigger(p(i))] low <= i < high && p(i), + ensures + low <= res < high, + p(res), + forall|i: int| #![trigger(p(i))] low <= i < res ==> !p(i), + decreases high - low, +{ + if p(low) { + low + } else { + choose_smallest(low + 1, high, p) + } +} - } // verus! +} // verus! } pub mod clone_v { @@ -10179,12 +11658,15 @@ mod verus_extra { verus! { - pub trait VerusClone : Sized { - fn clone(&self) -> (o: Self) - ensures o == self; // this is way too restrictive; it kind of demands Copy. But we don't have a View trait yet. :v( - } +pub trait VerusClone: Sized { + fn clone(&self) -> (o: Self) + ensures + o == self, + ; // this is way too restrictive; it kind of demands Copy. But we don't have a View trait yet. :v( - } +} + +} // verus! } pub mod seq_lib_v { @@ -10196,268 +11678,330 @@ mod verus_extra { verus! { - pub proof fn lemma_subrange_subrange(s: Seq, start: int, midsize: int, endsize: int) - requires - 0 <= start <= s.len(), - 0 <= midsize <= endsize <= s.len() - start, - ensures - s.subrange(start, start + endsize).subrange(0, midsize) == s.subrange(start, start + midsize), - { - assert(s.subrange(start, start + endsize).subrange(0, midsize) =~= s.subrange(start, start + midsize)); - } +pub proof fn lemma_subrange_subrange(s: Seq, start: int, midsize: int, endsize: int) + requires + 0 <= start <= s.len(), + 0 <= midsize <= endsize <= s.len() - start, + ensures + s.subrange(start, start + endsize).subrange(0, midsize) == s.subrange( + start, + start + midsize, + ), +{ + assert(s.subrange(start, start + endsize).subrange(0, midsize) =~= s.subrange( + start, + start + midsize, + )); +} +pub proof fn lemma_seq_add_subrange(s: Seq, i: int, j: int, k: int) + requires + 0 <= i <= j <= k <= s.len(), + ensures + s.subrange(i, j) + s.subrange(j, k) == s.subrange(i, k), +{ + assert_seqs_equal!{s.subrange(i, j) + s.subrange(j, k), s.subrange(i, k)} +} - pub proof fn lemma_seq_add_subrange(s: Seq, i: int, j: int, k: int) - requires 0 <= i <= j <= k <= s.len(), - ensures s.subrange(i, j) + s.subrange(j, k) == s.subrange(i, k), - { - assert_seqs_equal!{s.subrange(i, j) + s.subrange(j, k), s.subrange(i, k)} - } - - pub proof fn lemma_seq_fold_left_merge_right_assoc(s: Seq, init: B, f: FnSpec(A) -> B, g: FnSpec(B, B) -> B) - requires - s.len() > 0, - forall |x, y, z| - #[trigger g(x, y)] - g(g(x, y), z) == g(x, g(y, z)), - ensures - g(s.subrange(0, s.len() - 1).fold_left(init, |b: B, a: A| g(b, f(a))), f(s[s.len() - 1])) - == - s.fold_left(init, |b: B, a: A| g(b, f(a))) - decreases s.len(), - { - let emp = Seq::::empty(); - let len: int = s.len() as int; - let i = len - 1; - let s1 = s.subrange(0, len - 1); - let last = s[len - 1]; - let accf = |b: B, a: A| g(b, f(a)); - - let start = s1.fold_left(init, accf); - let all = s.fold_left(init, accf); - - if s1.len() == 0 { - assert(s.len() == 1); - reveal_with_fuel(Seq::fold_left, 2); - reveal_with_fuel(Seq::fold_left, 2); - } else { - reveal_with_fuel(Seq::fold_left, 2); - let head = s[0]; - let tail = s.subrange(1, len); - let p = accf(init, s[0]); - // assert(tail.len() > 0); - // assert(all == tail.fold_left(p, accf)); - // assert(start == s1.fold_left(init, accf)); - // assert(s1.len() > 0); - // assert(start == s1.subrange(1, s1.len() as int).fold_left(p, accf)); - // assert(start == s1.subrange(1, len - 1).fold_left(p, accf)); - assert_seqs_equal!(tail.subrange(0, len - 2) == s1.subrange(1, len - 1)); - // assert(start == tail.subrange(0, tail.len() - 1).fold_left(p, accf)); - // assert(all == tail.fold_left(p, accf)); - lemma_seq_fold_left_merge_right_assoc::(tail, p, f, g); - // assert(all == g(start, f(last))); - } - } +pub proof fn lemma_seq_fold_left_merge_right_assoc( + s: Seq, + init: B, + f: FnSpec(A) -> B, + g: FnSpec(B, B) -> B, +) + requires + s.len() > 0, + forall|x, y, z| #[trigger g(x, y)] g(g(x, y), z) == g(x, g(y, z)), + ensures + g(s.subrange(0, s.len() - 1).fold_left(init, |b: B, a: A| g(b, f(a))), f(s[s.len() - 1])) + == s.fold_left(init, |b: B, a: A| g(b, f(a))), + decreases s.len(), +{ + let emp = Seq::::empty(); + let len: int = s.len() as int; + let i = len - 1; + let s1 = s.subrange(0, len - 1); + let last = s[len - 1]; + let accf = |b: B, a: A| g(b, f(a)); + let start = s1.fold_left(init, accf); + let all = s.fold_left(init, accf); + if s1.len() == 0 { + assert(s.len() == 1); + reveal_with_fuel(Seq::fold_left, 2); + reveal_with_fuel(Seq::fold_left, 2); + } else { + reveal_with_fuel(Seq::fold_left, 2); + let head = s[0]; + let tail = s.subrange(1, len); + let p = accf(init, s[0]); + // assert(tail.len() > 0); + // assert(all == tail.fold_left(p, accf)); + // assert(start == s1.fold_left(init, accf)); + // assert(s1.len() > 0); + // assert(start == s1.subrange(1, s1.len() as int).fold_left(p, accf)); + // assert(start == s1.subrange(1, len - 1).fold_left(p, accf)); + assert_seqs_equal!(tail.subrange(0, len - 2) == s1.subrange(1, len - 1)); + // assert(start == tail.subrange(0, tail.len() - 1).fold_left(p, accf)); + // assert(all == tail.fold_left(p, accf)); + lemma_seq_fold_left_merge_right_assoc::(tail, p, f, g); + // assert(all == g(start, f(last))); + } +} - pub proof fn lemma_seq_fold_left_sum_right(s: Seq, low: int, f: FnSpec(A) -> int) - requires - s.len() > 0, - ensures - s.subrange(0, s.len() - 1).fold_left(low, |b: int, a: A| b + f(a)) + f(s[s.len() - 1]) - == - s.fold_left(low, |b: int, a: A| b + f(a)) - { - let g = |x: int, y: int| x + y; - fun_ext_2::(|b: int, a: A| b + f(a), |b: int, a: A| g(b, f(a))); - lemma_seq_fold_left_merge_right_assoc::(s, low, f, g); - } +pub proof fn lemma_seq_fold_left_sum_right(s: Seq, low: int, f: FnSpec(A) -> int) + requires + s.len() > 0, + ensures + s.subrange(0, s.len() - 1).fold_left(low, |b: int, a: A| b + f(a)) + f(s[s.len() - 1]) + == s.fold_left(low, |b: int, a: A| b + f(a)), +{ + let g = |x: int, y: int| x + y; + fun_ext_2::(|b: int, a: A| b + f(a), |b: int, a: A| g(b, f(a))); + lemma_seq_fold_left_merge_right_assoc::(s, low, f, g); +} - pub proof fn lemma_seq_fold_left_append_right(s: Seq, prefix: Seq, f: FnSpec(A) -> Seq) - requires s.len() > 0, - ensures - s.subrange(0, s.len() - 1).fold_left(prefix, |sb: Seq, a: A| sb + f(a)) + f(s[s.len() - 1]) - == - s.fold_left(prefix, |sb: Seq, a: A| sb + f(a)) - { - let g = |x: Seq, y: Seq| x + y; - assert forall |x, y, z| #[trigger g(x,y)] g(g(x, y), z) == g(x, g(y, z)) by { - assert_seqs_equal!(g(g(x, y), z) == g(x, g(y, z))); - }; - fun_ext_2::, A, Seq>(|b: Seq, a: A| b + f(a), |b: Seq, a: A| g(b, f(a))); - lemma_seq_fold_left_merge_right_assoc::>(s, prefix, f, g); - } - - pub proof fn lemma_seq_fold_left_append_len_int(s: Seq, prefix: Seq, f: FnSpec(A) -> Seq) - ensures - s.fold_left(prefix, |sb: Seq, a: A| sb + f(a)).len() as int - == - s.fold_left(prefix.len() as int, |i: int, a: A| i + f(a).len() as int), - decreases s.len(), - { - s.lemma_fold_left_alt(prefix, |sb: Seq, a: A| sb + f(a)); - s.lemma_fold_left_alt(prefix.len() as int, |i: int, a: A| i + f(a).len() as int); - if s.len() != 0 { - lemma_seq_fold_left_append_len_int::(s.subrange(1, s.len() as int), prefix + f(s[0]), f); - s.subrange(1, s.len() as int).lemma_fold_left_alt(prefix + f(s[0]), |sb: Seq, a: A| sb + f(a)); - s.subrange(1, s.len() as int).lemma_fold_left_alt(prefix.len() as int + f(s[0]).len() as int, |i: int, a: A| i + f(a).len() as int); - } - } +pub proof fn lemma_seq_fold_left_append_right( + s: Seq, + prefix: Seq, + f: FnSpec(A) -> Seq, +) + requires + s.len() > 0, + ensures + s.subrange(0, s.len() - 1).fold_left(prefix, |sb: Seq, a: A| sb + f(a)) + f( + s[s.len() - 1], + ) == s.fold_left(prefix, |sb: Seq, a: A| sb + f(a)), +{ + let g = |x: Seq, y: Seq| x + y; + assert forall|x, y, z| #[trigger g(x,y)] g(g(x, y), z) == g(x, g(y, z)) by { + assert_seqs_equal!(g(g(x, y), z) == g(x, g(y, z))); + }; + fun_ext_2::, A, Seq>(|b: Seq, a: A| b + f(a), |b: Seq, a: A| g(b, f(a))); + lemma_seq_fold_left_merge_right_assoc::>(s, prefix, f, g); +} - pub proof fn lemma_seq_fold_left_sum_len_int_positive(s: Seq, low: nat, f: FnSpec(A) -> Seq) - ensures - s.fold_left(low as int, |acc: int, x: A| acc + f(x).len()) >= 0, - decreases s.len(), - { - s.lemma_fold_left_alt(low as int, |acc: int, x: A| acc + f(x).len()); - if s.len() != 0 { - lemma_seq_fold_left_sum_len_int_positive::(s.subrange(1, s.len() as int), low + f(s[0]).len(), f); - s.subrange(1, s.len() as int).lemma_fold_left_alt(low + f(s[0]).len() as int, |acc: int, x: A| acc + f(x).len()); - } - } +pub proof fn lemma_seq_fold_left_append_len_int( + s: Seq, + prefix: Seq, + f: FnSpec(A) -> Seq, +) + ensures + s.fold_left(prefix, |sb: Seq, a: A| sb + f(a)).len() as int == s.fold_left( + prefix.len() as int, + |i: int, a: A| i + f(a).len() as int, + ), + decreases s.len(), +{ + s.lemma_fold_left_alt(prefix, |sb: Seq, a: A| sb + f(a)); + s.lemma_fold_left_alt(prefix.len() as int, |i: int, a: A| i + f(a).len() as int); + if s.len() != 0 { + lemma_seq_fold_left_append_len_int::( + s.subrange(1, s.len() as int), + prefix + f(s[0]), + f, + ); + s.subrange(1, s.len() as int).lemma_fold_left_alt( + prefix + f(s[0]), + |sb: Seq, a: A| sb + f(a), + ); + s.subrange(1, s.len() as int).lemma_fold_left_alt( + prefix.len() as int + f(s[0]).len() as int, + |i: int, a: A| i + f(a).len() as int, + ); + } +} - pub proof fn lemma_seq_fold_left_append_len_int_le(s: Seq, i: int, low: int, f: FnSpec(A) -> Seq) - requires - 0 <= i <= s.len() as int, - 0 <= low, - ensures - s.fold_left(low, |acc: int, x: A| acc + f(x).len()) >= 0, - s.subrange(0, i).fold_left(low, |acc: int, x: A| acc + f(x).len()) <= - s.fold_left(low, |acc: int, x: A| acc + f(x).len()), - decreases (2 * s.len() - i), - { - lemma_seq_fold_left_sum_len_int_positive::(s, low as nat, f); - let accfl = |acc: int, x: A| acc + f(x).len(); - if s.len() == 0 { - // done - } else if i == s.len() { - assert_seqs_equal!(s.subrange(0, i) == s); - lemma_seq_fold_left_append_len_int_le::(s.subrange(1, s.len() as int), i - 1, low + f(s[0]).len() as int, f); - } else if i == s.len() - 1 { - let fl = |x| f(x).len() as int; - fun_ext_2::(accfl, |acc: int, x: A| acc + fl(x)); - lemma_seq_fold_left_sum_right::(s, low, fl); - } else { - lemma_seq_fold_left_append_len_int_le::(s.subrange(0, s.len() - 1), i, low, f); - lemma_seq_fold_left_append_len_int_le::(s, s.len() - 1, low, f); - assert_seqs_equal!(s.subrange(0, s.len() - 1).subrange(0, i) == s.subrange(0, i)); - } - } +pub proof fn lemma_seq_fold_left_sum_len_int_positive( + s: Seq, + low: nat, + f: FnSpec(A) -> Seq, +) + ensures + s.fold_left(low as int, |acc: int, x: A| acc + f(x).len()) >= 0, + decreases s.len(), +{ + s.lemma_fold_left_alt(low as int, |acc: int, x: A| acc + f(x).len()); + if s.len() != 0 { + lemma_seq_fold_left_sum_len_int_positive::( + s.subrange(1, s.len() as int), + low + f(s[0]).len(), + f, + ); + s.subrange(1, s.len() as int).lemma_fold_left_alt( + low + f(s[0]).len() as int, + |acc: int, x: A| acc + f(x).len(), + ); + } +} - pub proof fn lemma_seq_fold_left_sum_le(s: Seq, init: int, high: int, f: FnSpec(A) -> int) - requires - forall |i:int| 0 <= i < s.len() ==> f(s[i]) <= high, - ensures - s.fold_left(init, |acc: int, x: A| acc + f(x)) <= init + s.len() * high, - decreases s.len(), - { - if s.len() != 0 { - lemma_seq_fold_left_sum_le(s.drop_last(), init, high, f); - assert(init + (s.len() - 1) * high + high <= init + s.len() * high) by (nonlinear_arith); - } - } +pub proof fn lemma_seq_fold_left_append_len_int_le( + s: Seq, + i: int, + low: int, + f: FnSpec(A) -> Seq, +) + requires + 0 <= i <= s.len() as int, + 0 <= low, + ensures + s.fold_left(low, |acc: int, x: A| acc + f(x).len()) >= 0, + s.subrange(0, i).fold_left(low, |acc: int, x: A| acc + f(x).len()) <= s.fold_left( + low, + |acc: int, x: A| acc + f(x).len(), + ), + decreases (2 * s.len() - i), +{ + lemma_seq_fold_left_sum_len_int_positive::(s, low as nat, f); + let accfl = |acc: int, x: A| acc + f(x).len(); + if s.len() == 0 { + // done + } else if i == s.len() { + assert_seqs_equal!(s.subrange(0, i) == s); + lemma_seq_fold_left_append_len_int_le::( + s.subrange(1, s.len() as int), + i - 1, + low + f(s[0]).len() as int, + f, + ); + } else if i == s.len() - 1 { + let fl = |x| f(x).len() as int; + fun_ext_2::(accfl, |acc: int, x: A| acc + fl(x)); + lemma_seq_fold_left_sum_right::(s, low, fl); + } else { + lemma_seq_fold_left_append_len_int_le::(s.subrange(0, s.len() - 1), i, low, f); + lemma_seq_fold_left_append_len_int_le::(s, s.len() - 1, low, f); + assert_seqs_equal!(s.subrange(0, s.len() - 1).subrange(0, i) == s.subrange(0, i)); + } +} - pub proof fn lemma_if_everything_in_seq_satisfies_filter_then_filter_is_identity(s: Seq, pred: FnSpec(A) -> bool) - requires forall |i: int| 0 <= i && i < s.len() ==> pred(s[i]) - ensures s.filter(pred) == s - decreases s.len() - { - reveal(Seq::filter); - if s.len() != 0 { - let subseq = s.drop_last(); - lemma_if_everything_in_seq_satisfies_filter_then_filter_is_identity(subseq, pred); - assert_seqs_equal!(s, subseq.push(s.last())); - } - } +pub proof fn lemma_seq_fold_left_sum_le(s: Seq, init: int, high: int, f: FnSpec(A) -> int) + requires + forall|i: int| 0 <= i < s.len() ==> f(s[i]) <= high, + ensures + s.fold_left(init, |acc: int, x: A| acc + f(x)) <= init + s.len() * high, + decreases s.len(), +{ + if s.len() != 0 { + lemma_seq_fold_left_sum_le(s.drop_last(), init, high, f); + assert(init + (s.len() - 1) * high + high <= init + s.len() * high) by (nonlinear_arith); + } +} - pub proof fn lemma_if_nothing_in_seq_satisfies_filter_then_filter_result_is_empty(s: Seq, pred: FnSpec(A) -> bool) - requires forall |i: int| 0 <= i && i < s.len() ==> !pred(s[i]) - ensures s.filter(pred) =~= Seq::::empty() - decreases s.len() - { - reveal(Seq::filter); - if s.len() != 0 { - let subseq = s.drop_last(); - lemma_if_nothing_in_seq_satisfies_filter_then_filter_result_is_empty(subseq, pred); - assert_seqs_equal!(s, subseq.push(s.last())); - } - } +pub proof fn lemma_if_everything_in_seq_satisfies_filter_then_filter_is_identity( + s: Seq, + pred: FnSpec(A) -> bool, +) + requires + forall|i: int| 0 <= i && i < s.len() ==> pred(s[i]), + ensures + s.filter(pred) == s, + decreases s.len(), +{ + reveal(Seq::filter); + if s.len() != 0 { + let subseq = s.drop_last(); + lemma_if_everything_in_seq_satisfies_filter_then_filter_is_identity(subseq, pred); + assert_seqs_equal!(s, subseq.push(s.last())); + } +} - pub proof fn lemma_filter_skip_rejected(s: Seq, pred: FnSpec(A) -> bool, i: int) - requires - 0 <= i <= s.len(), - forall |j| 0 <= j < i ==> !pred(s[j]), - ensures - s.filter(pred) == s.skip(i).filter(pred) - decreases - s.len() - { - reveal(Seq::filter); - if s.len() == 0 { - assert(s.skip(i) =~= s); - } - else if i < s.len() { - assert(s.skip(i).drop_last() =~= s.drop_last().skip(i)); - lemma_filter_skip_rejected(s.drop_last(), pred, i); - } - else { - assert(s.skip(i) =~= s.drop_last().skip(i - 1)); - lemma_filter_skip_rejected(s.drop_last(), pred, i - 1); - } - } +pub proof fn lemma_if_nothing_in_seq_satisfies_filter_then_filter_result_is_empty( + s: Seq, + pred: FnSpec(A) -> bool, +) + requires + forall|i: int| 0 <= i && i < s.len() ==> !pred(s[i]), + ensures + s.filter(pred) =~= Seq::::empty(), + decreases s.len(), +{ + reveal(Seq::filter); + if s.len() != 0 { + let subseq = s.drop_last(); + lemma_if_nothing_in_seq_satisfies_filter_then_filter_result_is_empty(subseq, pred); + assert_seqs_equal!(s, subseq.push(s.last())); + } +} - pub proof fn lemma_fold_left_on_equiv_seqs(s1: Seq, s2: Seq, eq: FnSpec(A, A) -> bool, init: B, f: FnSpec(B, A) -> B) - requires - s1.len() == s2.len(), - (forall |i: int| 0 <= i < s1.len() ==> eq(s1[i], s2[i])), - (forall |b: B, a1: A, a2: A| #[trigger] eq(a1, a2) ==> #[trigger] f(b, a1) == f(b, a2)), - ensures - s1.fold_left(init, f) == s2.fold_left(init, f) - decreases s1.len(), - { - reveal(Seq::fold_left); - if s1.len() != 0 { - lemma_fold_left_on_equiv_seqs(s1.drop_last(), s2.drop_last(), eq, init, f); - } - } +pub proof fn lemma_filter_skip_rejected(s: Seq, pred: FnSpec(A) -> bool, i: int) + requires + 0 <= i <= s.len(), + forall|j| 0 <= j < i ==> !pred(s[j]), + ensures + s.filter(pred) == s.skip(i).filter(pred), + decreases s.len(), +{ + reveal(Seq::filter); + if s.len() == 0 { + assert(s.skip(i) =~= s); + } else if i < s.len() { + assert(s.skip(i).drop_last() =~= s.drop_last().skip(i)); + lemma_filter_skip_rejected(s.drop_last(), pred, i); + } else { + assert(s.skip(i) =~= s.drop_last().skip(i - 1)); + lemma_filter_skip_rejected(s.drop_last(), pred, i - 1); + } +} - pub proof fn lemma_fold_left_append_merge(s1: Seq, s2: Seq, f: FnSpec(A) -> Seq) - ensures - (s1 + s2).fold_left(Seq::empty(), |acc: Seq, a: A| acc + f(a)) - == - s1.fold_left(Seq::empty(), |acc: Seq, a: A| acc + f(a)) - + - s2.fold_left(Seq::empty(), |acc: Seq, a: A| acc + f(a)) - decreases - s1.len() + s2.len() - { - let e = Seq::::empty(); - let af = |acc: Seq, a: A| acc + f(a); - let fl = |s: Seq| s.fold_left(e, af); - if s2.len() == 0 { - assert(s1 + s2 =~= s1); - assert(fl(s1) =~= fl(s1) + e); - } else { - lemma_fold_left_append_merge(s1, s2.drop_last(), f); - assert((s1 + s2).drop_last() =~= s1 + s2.drop_last()); - assert((fl(s1) + fl(s2.drop_last())) + f(s2.last()) =~= fl(s1) + (fl(s2.drop_last()) + f(s2.last()))); - } - } +pub proof fn lemma_fold_left_on_equiv_seqs( + s1: Seq, + s2: Seq, + eq: FnSpec(A, A) -> bool, + init: B, + f: FnSpec(B, A) -> B, +) + requires + s1.len() == s2.len(), + (forall|i: int| 0 <= i < s1.len() ==> eq(s1[i], s2[i])), + (forall|b: B, a1: A, a2: A| #[trigger] eq(a1, a2) ==> #[trigger] f(b, a1) == f(b, a2)), + ensures + s1.fold_left(init, f) == s2.fold_left(init, f), + decreases s1.len(), +{ + reveal(Seq::fold_left); + if s1.len() != 0 { + lemma_fold_left_on_equiv_seqs(s1.drop_last(), s2.drop_last(), eq, init, f); + } +} - pub proof fn some_differing_index_for_unequal_seqs(s1: Seq, s2: Seq) -> (i: int) - requires - s1 != s2, - s1.len() == s2.len(), - ensures - 0 <= i < s1.len(), - s1[i] != s2[i], - { - if forall |i| 0 <= i < s1.len() ==> s1[i] == s2[i] { - assert(s1 =~= s2); - } - choose |i:int| 0 <= i < s1.len() && s1[i] != s2[i] - } +pub proof fn lemma_fold_left_append_merge(s1: Seq, s2: Seq, f: FnSpec(A) -> Seq) + ensures + (s1 + s2).fold_left(Seq::empty(), |acc: Seq, a: A| acc + f(a)) == s1.fold_left( + Seq::empty(), + |acc: Seq, a: A| acc + f(a), + ) + s2.fold_left(Seq::empty(), |acc: Seq, a: A| acc + f(a)), + decreases s1.len() + s2.len(), +{ + let e = Seq::::empty(); + let af = |acc: Seq, a: A| acc + f(a); + let fl = |s: Seq| s.fold_left(e, af); + if s2.len() == 0 { + assert(s1 + s2 =~= s1); + assert(fl(s1) =~= fl(s1) + e); + } else { + lemma_fold_left_append_merge(s1, s2.drop_last(), f); + assert((s1 + s2).drop_last() =~= s1 + s2.drop_last()); + assert((fl(s1) + fl(s2.drop_last())) + f(s2.last()) =~= fl(s1) + (fl(s2.drop_last()) + f( + s2.last(), + ))); + } +} + +pub proof fn some_differing_index_for_unequal_seqs(s1: Seq, s2: Seq) -> (i: int) + requires + s1 != s2, + s1.len() == s2.len(), + ensures + 0 <= i < s1.len(), + s1[i] != s2[i], +{ + if forall|i| 0 <= i < s1.len() ==> s1[i] == s2[i] { + assert(s1 =~= s2); + } + choose|i: int| 0 <= i < s1.len() && s1[i] != s2[i] +} - } // verus! +} // verus! } pub mod set_lib_ext_v { @@ -10469,65 +12013,71 @@ mod verus_extra { use vstd::set_lib::*; verus! { - /// This fold uses a fixed zero rather than accumulating results in that - /// argument. This means proofs don't need to generalize over the accumulator, - /// unlike the Set::fold currently in Verus. - pub open spec fn set_fold(s: Set, zero: B, f: FnSpec(B, A) -> B) -> B - recommends s.finite() - decreases s.len() - { - if s.finite() { - if s.len() == 0 { - zero - } else { - let a = s.choose(); - f(set_fold(s.remove(a), zero, f), a) - } - } else { - zero - } - } - pub open spec fn flatten_sets(sets: Set>) -> Set - { - // extra parens are for rust-analyzer - Set::new(|a: A| (exists |s: Set| sets.contains(s) && s.contains(a))) +/// This fold uses a fixed zero rather than accumulating results in that +/// argument. This means proofs don't need to generalize over the accumulator, +/// unlike the Set::fold currently in Verus. +pub open spec fn set_fold(s: Set, zero: B, f: FnSpec(B, A) -> B) -> B + recommends + s.finite(), + decreases s.len(), +{ + if s.finite() { + if s.len() == 0 { + zero + } else { + let a = s.choose(); + f(set_fold(s.remove(a), zero, f), a) } + } else { + zero + } +} - pub proof fn flatten_sets_spec(sets: Set>) - ensures - (forall |e| #[trigger] flatten_sets(sets).contains(e) ==> exists |s| sets.contains(s) && s.contains(e)), - (forall |s: Set| #[trigger] sets.contains(s) ==> s.subset_of(flatten_sets(sets))) - { - } +pub open spec fn flatten_sets(sets: Set>) -> Set { + // extra parens are for rust-analyzer + Set::new(|a: A| (exists|s: Set| sets.contains(s) && s.contains(a))) +} - pub proof fn lemma_flatten_sets_insert(sets: Set>, s: Set) - ensures flatten_sets(sets.insert(s)) == flatten_sets(sets).union(s) - { - assert_sets_equal!(flatten_sets(sets.insert(s)) == flatten_sets(sets).union(s)); - } +pub proof fn flatten_sets_spec(sets: Set>) + ensures + (forall|e| #[trigger] + flatten_sets(sets).contains(e) ==> exists|s| sets.contains(s) && s.contains(e)), + (forall|s: Set| #[trigger] sets.contains(s) ==> s.subset_of(flatten_sets(sets))), +{ +} - pub proof fn lemma_flatten_sets_union(sets1: Set>, sets2: Set>) - ensures flatten_sets(sets1.union(sets2)) == flatten_sets(sets1).union(flatten_sets(sets2)) - { - assert_sets_equal!(flatten_sets(sets1.union(sets2)) == +pub proof fn lemma_flatten_sets_insert(sets: Set>, s: Set) + ensures + flatten_sets(sets.insert(s)) == flatten_sets(sets).union(s), +{ + assert_sets_equal!(flatten_sets(sets.insert(s)) == flatten_sets(sets).union(s)); +} + +pub proof fn lemma_flatten_sets_union(sets1: Set>, sets2: Set>) + ensures + flatten_sets(sets1.union(sets2)) == flatten_sets(sets1).union(flatten_sets(sets2)), +{ + assert_sets_equal!(flatten_sets(sets1.union(sets2)) == flatten_sets(sets1).union(flatten_sets(sets2))); - } +} - pub proof fn lemma_flatten_sets_union_auto() - ensures forall |sets1: Set>, sets2: Set>| - #[trigger] flatten_sets(sets1.union(sets2)) == flatten_sets(sets1).union(flatten_sets(sets2)) - { - assert forall |sets1: Set>, sets2: Set>| - #[trigger] flatten_sets(sets1.union(sets2)) == flatten_sets(sets1).union(flatten_sets(sets2)) by { - lemma_flatten_sets_union(sets1, sets2); - } - } +pub proof fn lemma_flatten_sets_union_auto() + ensures + forall|sets1: Set>, sets2: Set>| #[trigger] + flatten_sets(sets1.union(sets2)) == flatten_sets(sets1).union(flatten_sets(sets2)), +{ + assert forall|sets1: Set>, sets2: Set>| #[trigger] + flatten_sets(sets1.union(sets2)) == flatten_sets(sets1).union(flatten_sets(sets2)) by { + lemma_flatten_sets_union(sets1, sets2); + } +} - pub proof fn set_map_union(s1: Set, s2: Set, f: FnSpec(A) -> B) - ensures (s1 + s2).map(f) == s1.map(f) + s2.map(f) - { - assert_sets_equal!((s1 + s2).map(f) == s1.map(f) + s2.map(f), y => { +pub proof fn set_map_union(s1: Set, s2: Set, f: FnSpec(A) -> B) + ensures + (s1 + s2).map(f) == s1.map(f) + s2.map(f), +{ + assert_sets_equal!((s1 + s2).map(f) == s1.map(f) + s2.map(f), y => { if s1.map(f).contains(y) { let x = choose |x| s1.contains(x) && f(x) == y; assert((s1 + s2).contains(x)); @@ -10536,80 +12086,89 @@ mod verus_extra { assert((s1 + s2).contains(x)); } }); - } +} - pub proof fn set_map_union_auto() - ensures forall |s1: Set, s2: Set, f: FnSpec(A) -> B| - #[trigger] (s1 + s2).map(f) == s1.map(f) + s2.map(f) - { - assert forall |s1: Set, s2: Set, f: FnSpec(A) -> B| - #[trigger] ((s1 + s2).map(f)) == s1.map(f) + s2.map(f) by { - set_map_union(s1, s2, f); - } - } +pub proof fn set_map_union_auto() + ensures + forall|s1: Set, s2: Set, f: FnSpec(A) -> B| #[trigger] + (s1 + s2).map(f) == s1.map(f) + s2.map(f), +{ + assert forall|s1: Set, s2: Set, f: FnSpec(A) -> B| #[trigger] + ((s1 + s2).map(f)) == s1.map(f) + s2.map(f) by { + set_map_union(s1, s2, f); + } +} - pub proof fn seq_map_values_concat(s1: Seq, s2: Seq, f: FnSpec(A) -> B) - ensures (s1 + s2).map_values(f) == s1.map_values(f) + s2.map_values(f) - { - assert_seqs_equal!((s1 + s2).map_values(f) == s1.map_values(f) + s2.map_values(f), i => { +pub proof fn seq_map_values_concat(s1: Seq, s2: Seq, f: FnSpec(A) -> B) + ensures + (s1 + s2).map_values(f) == s1.map_values(f) + s2.map_values(f), +{ + assert_seqs_equal!((s1 + s2).map_values(f) == s1.map_values(f) + s2.map_values(f), i => { if i < s1.len() { assert((s1+s2)[i] == s1[i]); } else { assert((s1+s2)[i] == s2[i - s1.len()]); } }); - } +} - pub proof fn seq_map_values_concat_auto() - ensures forall |s1: Seq, s2: Seq, f: FnSpec(A) -> B| - #[trigger] (s1 + s2).map_values(f) == s1.map_values(f) + s2.map_values(f) - { - assert forall |s1: Seq, s2: Seq, f: FnSpec(A) -> B| - #[trigger] ((s1 + s2).map_values(f)) == s1.map_values(f) + s2.map_values(f) by { - seq_map_values_concat(s1, s2, f); - } - } +pub proof fn seq_map_values_concat_auto() + ensures + forall|s1: Seq, s2: Seq, f: FnSpec(A) -> B| #[trigger] + (s1 + s2).map_values(f) == s1.map_values(f) + s2.map_values(f), +{ + assert forall|s1: Seq, s2: Seq, f: FnSpec(A) -> B| #[trigger] + ((s1 + s2).map_values(f)) == s1.map_values(f) + s2.map_values(f) by { + seq_map_values_concat(s1, s2, f); + } +} - pub open spec fn flatten_set_seq(sets: Seq>) -> Set - { - sets.fold_left(Set::::empty(), |s1: Set, s2: Set| s1.union(s2)) - } +pub open spec fn flatten_set_seq(sets: Seq>) -> Set { + sets.fold_left(Set::::empty(), |s1: Set, s2: Set| s1.union(s2)) +} - pub proof fn lemma_flatten_set_seq_spec(sets: Seq>) - ensures - (forall |x:A| #[trigger] flatten_set_seq(sets).contains(x) ==> - exists |i: int| 0 <= i < sets.len() && #[trigger] sets[i].contains(x)), - (forall |x:A, i:int| 0 <= i < sets.len() && #[trigger] sets[i].contains(x) ==> - flatten_set_seq(sets).contains(x)) - decreases sets.len() - { - if sets.len() == 0 { +pub proof fn lemma_flatten_set_seq_spec(sets: Seq>) + ensures + (forall|x: A| #[trigger] + flatten_set_seq(sets).contains(x) ==> exists|i: int| + 0 <= i < sets.len() && #[trigger] sets[i].contains(x)), + (forall|x: A, i: int| + 0 <= i < sets.len() && #[trigger] sets[i].contains(x) ==> flatten_set_seq( + sets, + ).contains(x)), + decreases sets.len(), +{ + if sets.len() == 0 { + } else { + lemma_flatten_set_seq_spec(sets.drop_last()); + assert forall|x: A| flatten_set_seq(sets).contains(x) implies exists|i: int| + 0 <= i < sets.len() && #[trigger] sets[i].contains(x) by { + if sets.last().contains(x) { } else { - lemma_flatten_set_seq_spec(sets.drop_last()); - assert forall |x:A| flatten_set_seq(sets).contains(x) implies - exists |i: int| 0 <= i < sets.len() && #[trigger] sets[i].contains(x) by { - if sets.last().contains(x) { - } else { - assert(flatten_set_seq(sets.drop_last()).contains(x)); - } - } - assert forall |x:A, i:int| 0 <= i < sets.len() && #[trigger] sets[i].contains(x) implies - flatten_set_seq(sets).contains(x) by { - if i == sets.len() - 1 { - assert(sets.last().contains(x)); - assert(flatten_set_seq(sets) == flatten_set_seq(sets.drop_last()).union(sets.last())); - } else { - assert(0 <= i < sets.drop_last().len() && sets.drop_last()[i].contains(x)); - } - } + assert(flatten_set_seq(sets.drop_last()).contains(x)); + } + } + assert forall|x: A, i: int| + 0 <= i < sets.len() && #[trigger] sets[i].contains(x) implies flatten_set_seq( + sets, + ).contains(x) by { + if i == sets.len() - 1 { + assert(sets.last().contains(x)); + assert(flatten_set_seq(sets) == flatten_set_seq(sets.drop_last()).union( + sets.last(), + )); + } else { + assert(0 <= i < sets.drop_last().len() && sets.drop_last()[i].contains(x)); } } + } +} - - pub proof fn lemma_seq_push_to_set(s: Seq, x: A) - ensures s.push(x).to_set() == s.to_set().insert(x) - { - assert_sets_equal!(s.push(x).to_set() == s.to_set().insert(x), elem => { +pub proof fn lemma_seq_push_to_set(s: Seq, x: A) + ensures + s.push(x).to_set() == s.to_set().insert(x), +{ + assert_sets_equal!(s.push(x).to_set() == s.to_set().insert(x), elem => { if elem == x { assert(s.push(x)[s.len() as int] == x); assert(s.push(x).contains(x)) @@ -10621,12 +12180,13 @@ mod verus_extra { } } }); - } +} - pub proof fn lemma_set_map_insert(s: Set, f: FnSpec(A) -> B, x: A) - ensures s.insert(x).map(f) == s.map(f).insert(f(x)) - { - assert_sets_equal!(s.insert(x).map(f) == s.map(f).insert(f(x)), y => { +pub proof fn lemma_set_map_insert(s: Set, f: FnSpec(A) -> B, x: A) + ensures + s.insert(x).map(f) == s.map(f).insert(f(x)), +{ + assert_sets_equal!(s.insert(x).map(f) == s.map(f).insert(f(x)), y => { if y == f(x) { assert(s.insert(x).contains(x)); // OBSERVE // assert(s.map(f).insert(f(x)).contains(f(x))); @@ -10643,141 +12203,146 @@ mod verus_extra { } } }); - } - - // TODO(verus): This consequence should somehow be broadcast from map_values/map - pub proof fn lemma_seq_map_equiv(f: FnSpec(A) -> B, g: FnSpec(int, A) -> B) - requires - forall |i: int, a: A| #[trigger] g(i, a) == f(a) - ensures - forall |s: Seq| s.map_values(f) == s.map(g) - { - assert forall |s: Seq| s.map_values(f) == s.map(g) by { - assert_seqs_equal!(s.map_values(f), s.map(g)); - } - } - - pub proof fn lemma_to_set_distributes_over_addition(s: Seq, t: Seq) - ensures (s+t).to_set() == s.to_set() + t.to_set() - { - let left = (s+t).to_set(); - let right = s.to_set() + t.to_set(); - assert forall |x| right.contains(x) implies left.contains(x) by { - assert(s.to_set()+t.to_set() == s.to_set().union(t.to_set())); - if s.to_set().contains(x) { - let si = choose |si| 0<=si(f: FnSpec(A) -> B, g: FnSpec(int, A) -> B) + requires + forall|i: int, a: A| #[trigger] g(i, a) == f(a), + ensures + forall|s: Seq| s.map_values(f) == s.map(g), +{ + assert forall|s: Seq| s.map_values(f) == s.map(g) by { + assert_seqs_equal!(s.map_values(f), s.map(g)); + } +} - pub proof fn lemma_to_set_union_auto() - ensures forall |s: Seq, t: Seq| #[trigger] (s+t).to_set() == s.to_set() + t.to_set() - { - assert forall |s: Seq, t: Seq| #[trigger] (s+t).to_set() == s.to_set() + t.to_set() by { - lemma_to_set_distributes_over_addition(s, t); - } +pub proof fn lemma_to_set_distributes_over_addition(s: Seq, t: Seq) + ensures + (s + t).to_set() == s.to_set() + t.to_set(), +{ + let left = (s + t).to_set(); + let right = s.to_set() + t.to_set(); + assert forall|x| right.contains(x) implies left.contains(x) by { + assert(s.to_set() + t.to_set() == s.to_set().union(t.to_set())); + if s.to_set().contains(x) { + let si = choose|si| 0 <= si < s.len() && s[si] == x; + assert((s + t)[si] == x); + } else { + let ti = choose|ti| 0 <= ti < t.len() && t[ti] == x; + assert((s + t)[s.len() + ti] == x); } + } + assert_sets_equal!(left, right); +} - spec fn map_fold(s: Set, f: FnSpec(A) -> B) -> Set - recommends s.finite() - { - set_fold(s, Set::empty(), |s1: Set, a: A| s1.insert(f(a))) - } +pub proof fn lemma_to_set_union_auto() + ensures + forall|s: Seq, t: Seq| #[trigger] (s + t).to_set() == s.to_set() + t.to_set(), +{ + assert forall|s: Seq, t: Seq| #[trigger] (s + t).to_set() == s.to_set() + t.to_set() by { + lemma_to_set_distributes_over_addition(s, t); + } +} - proof fn map_fold_ok(s: Set, f: FnSpec(A) -> B) - requires s.finite() - ensures map_fold(s, f) =~= s.map(f) - decreases s.len() - { - if s.len() == 0 { - return; - } else { - let a = s.choose(); - map_fold_ok(s.remove(a), f); - return; - } - } +spec fn map_fold(s: Set, f: FnSpec(A) -> B) -> Set + recommends + s.finite(), +{ + set_fold(s, Set::empty(), |s1: Set, a: A| s1.insert(f(a))) +} - proof fn map_fold_finite(s: Set, f: FnSpec(A) -> B) - requires s.finite() - ensures map_fold(s, f).finite() - decreases s.len() - { - if s.len() == 0 { - return; - } else { - let a = s.choose(); - map_fold_finite(s.remove(a), f); - return; - } - } +proof fn map_fold_ok(s: Set, f: FnSpec(A) -> B) + requires + s.finite(), + ensures + map_fold(s, f) =~= s.map(f), + decreases s.len(), +{ + if s.len() == 0 { + return ; + } else { + let a = s.choose(); + map_fold_ok(s.remove(a), f); + return ; + } +} - pub proof fn map_finite(s: Set, f: FnSpec(A) -> B) - requires - s.finite(), - ensures - s.map(f).finite(), - { - map_fold_ok(s, f); - map_fold_finite(s, f); - } +proof fn map_fold_finite(s: Set, f: FnSpec(A) -> B) + requires + s.finite(), + ensures + map_fold(s, f).finite(), + decreases s.len(), +{ + if s.len() == 0 { + return ; + } else { + let a = s.choose(); + map_fold_finite(s.remove(a), f); + return ; + } +} - pub proof fn map_set_finite_auto() - ensures - forall |s: Set, f: FnSpec(A) -> B| s.finite() ==> #[trigger] (s.map(f).finite()), - { - assert forall |s: Set, f: FnSpec(A) -> B| s.finite() implies #[trigger] s.map(f).finite() by { - map_finite(s, f); - } - } +pub proof fn map_finite(s: Set, f: FnSpec(A) -> B) + requires + s.finite(), + ensures + s.map(f).finite(), +{ + map_fold_ok(s, f); + map_fold_finite(s, f); +} - pub proof fn lemma_to_set_singleton_auto() - ensures - forall |x: A| #[trigger] seq![x].to_set() == set![x], - { - assert forall |x: A| #[trigger] seq![x].to_set() =~= set![x] by { - assert(seq![x][0] == x); - } - } +pub proof fn map_set_finite_auto() + ensures + forall|s: Set, f: FnSpec(A) -> B| s.finite() ==> #[trigger] (s.map(f).finite()), +{ + assert forall|s: Set, f: FnSpec(A) -> B| s.finite() implies #[trigger] s.map(f).finite() by { + map_finite(s, f); + } +} - pub proof fn lemma_map_values_singleton_auto() - ensures - forall |x: A, f: FnSpec(A) -> B| #[trigger] seq![x].map_values(f) =~= seq![f(x)], - { - } +pub proof fn lemma_to_set_singleton_auto() + ensures + forall|x: A| #[trigger] seq![x].to_set() == set![x], +{ + assert forall|x: A| #[trigger] seq![x].to_set() =~= set![x] by { + assert(seq![x][0] == x); + } +} - pub proof fn lemma_map_set_singleton_auto() - ensures - forall |x: A, f: FnSpec(A) -> B| #[trigger] set![x].map(f) == set![f(x)], - { - assert forall |x: A, f: FnSpec(A) -> B| #[trigger] set![x].map(f) =~= set![f(x)] by { - assert(set![x].contains(x)); - } - } +pub proof fn lemma_map_values_singleton_auto() + ensures + forall|x: A, f: FnSpec(A) -> B| #[trigger] seq![x].map_values(f) =~= seq![f(x)], +{ +} - pub proof fn lemma_map_seq_singleton_auto() - ensures - forall |x: A, f: FnSpec(A) -> B| #[trigger] seq![x].map_values(f) =~= seq![f(x)], - { - } +pub proof fn lemma_map_set_singleton_auto() + ensures + forall|x: A, f: FnSpec(A) -> B| #[trigger] set![x].map(f) == set![f(x)], +{ + assert forall|x: A, f: FnSpec(A) -> B| #[trigger] set![x].map(f) =~= set![f(x)] by { + assert(set![x].contains(x)); + } +} +pub proof fn lemma_map_seq_singleton_auto() + ensures + forall|x: A, f: FnSpec(A) -> B| #[trigger] seq![x].map_values(f) =~= seq![f(x)], +{ +} - pub proof fn flatten_sets_singleton_auto() - ensures - forall |x: Set| #[trigger] flatten_sets(set![x]) =~= x, - { - } +pub proof fn flatten_sets_singleton_auto() + ensures + forall|x: Set| #[trigger] flatten_sets(set![x]) =~= x, +{ +} - // TODO(Tej): We strongly suspect there is a trigger loop in these auto - // lemmas somewhere, but it's not easy to see from the profiler yet. +// TODO(Tej): We strongly suspect there is a trigger loop in these auto +// lemmas somewhere, but it's not easy to see from the profiler yet. - } +} // verus! } } // TODO: maybe move into Verus? @@ -10791,21 +12356,22 @@ verus! { // executable and unflattens then into a vector of arguments. C# flattens // the arguments by contatenating them all together, and passing us an array // of their lengths. - #[verifier(external)] #[verus::line_count::ignore] -pub unsafe fn unflatten_args ( +pub unsafe fn unflatten_args( num_args: i32, arg_lengths: *const i32, _total_arg_length: i32, - flattened_args: *const u8 -) -> Vec> -{ + flattened_args: *const u8, +) -> Vec> { let mut offset: isize = 0; let mut args: Vec> = Vec::new(); for i in 0..num_args as isize { let arg_length = *arg_lengths.offset(i as isize); - let arg_array: &[u8] = std::slice::from_raw_parts(flattened_args.offset(offset), arg_length as usize); + let arg_array: &[u8] = std::slice::from_raw_parts( + flattened_args.offset(offset), + arg_length as usize, + ); let arg_vec: std::vec::Vec = arg_array.to_vec(); let mut arg: Vec = Vec::new(); arg = arg_vec; @@ -10819,46 +12385,39 @@ pub unsafe fn unflatten_args ( #[verus::line_count::ignore] pub unsafe fn sht_main_placeholder_to_test_netclient( nc: &mut io_t::NetClient, - args: &Vec> -) -{ - for i in 0..args.len() - { + args: &Vec>, +) { + for i in 0..args.len() { println!("Command-line argument #{}: {:#?}", i+1, args[i]); } - let my_end_point: EndPoint = nc.get_my_end_point(); println!("My end point: {:#?}", my_end_point.id); println!("Current time is {}", nc.get_time()); - let mut message: Vec = Vec::new(); message = "Hello, world!".as_bytes().to_vec(); let _ = nc.send(&my_end_point, &message); - match nc.receive(0) { - NetcReceiveResult::Received{sender, message} => { + NetcReceiveResult::Received { sender, message } => { println!("Received message {:#?}", message); }, - NetcReceiveResult::TimedOut{} => { + NetcReceiveResult::TimedOut { } => { println!("Timed out"); - } - NetcReceiveResult::Error{} => { + }, + NetcReceiveResult::Error { } => { println!("Error"); - } + }, } - std::thread::sleep(std::time::Duration::from_millis(1000)); - match nc.receive(0) { - NetcReceiveResult::Received{sender, message} => { + NetcReceiveResult::Received { sender, message } => { println!("Received message {:#?}", message); }, - NetcReceiveResult::TimedOut{} => { + NetcReceiveResult::TimedOut { } => { println!("Timed out"); - } - NetcReceiveResult::Error{} => { + }, + NetcReceiveResult::Error { } => { println!("Error"); - } + }, } } @@ -10871,26 +12430,21 @@ pub unsafe fn sht_main_placeholder_to_test_netclient( // return to it two things: `buffer_ptr`, a pointer to a region of memory with // length `length`, and `box_vec_ptr`, a pointer that it will return to us when // we ask to receive a message. - #[verifier(external)] #[no_mangle] #[verus::line_count::ignore] pub unsafe extern "C" fn allocate_buffer( length: u64, box_vec_ptr: *mut *mut std::vec::Vec, - buffer_ptr: *mut *mut u8 -) -{ + buffer_ptr: *mut *mut u8, +) { // Allocate a std::vec::Vec with the given length. let mut v: std::vec::Vec = std::vec::Vec::::with_capacity(length as usize); v.set_len(length as usize); - // Box the vector. let mut b: Box> = Box::>::new(v); - // Return the raw pointer to the vector's buffer as `*buffer_ptr`. *buffer_ptr = (*b).as_mut_ptr(); - // Return the raw pointer to the Box as `*box_vec_ptr`. *box_vec_ptr = Box::>::into_raw(b); } @@ -10901,14 +12455,10 @@ pub unsafe extern "C" fn allocate_buffer( // us but has now decided it doesn't want to return to us. For instance, // if the I/O framework allocated it to store an incoming packet, but // detected that the connection closed, it needs to free the buffer. - #[verifier(external)] #[verus::line_count::ignore] #[no_mangle] -pub unsafe extern "C" fn free_buffer( - box_vec_ptr: *mut std::vec::Vec -) -{ +pub unsafe extern "C" fn free_buffer(box_vec_ptr: *mut std::vec::Vec) { // Convert back from a raw pointer to a Box so that when the Box // goes out of scope at the end of this function, it will be // freed. @@ -10923,26 +12473,42 @@ pub unsafe extern "C" fn sht_main_wrapper( arg_lengths: *const i32, total_arg_length: i32, flattened_args: *const u8, - get_my_end_point_func: extern "C" fn(*mut *mut std::vec::Vec), - get_time_func: extern "C" fn() -> u64, - receive_func: extern "C" fn(i32, *mut bool, *mut bool, *mut *mut std::vec::Vec, *mut *mut std::vec::Vec), - send_func: extern "C" fn(u64, *const u8, u64, *const u8) -> bool + get_my_end_point_func: extern "C" fn (*mut *mut std::vec::Vec), + get_time_func: extern "C" fn () -> u64, + receive_func: extern "C" fn ( + i32, + *mut bool, + *mut bool, + *mut *mut std::vec::Vec, + *mut *mut std::vec::Vec, + ), + send_func: extern "C" fn (u64, *const u8, u64, *const u8) -> bool, ) -> i32 { - let args: Vec> = unflatten_args(num_args, arg_lengths, total_arg_length, flattened_args); - + let args: Vec> = unflatten_args( + num_args, + arg_lengths, + total_arg_length, + flattened_args, + ); let mut my_end_point_vec_ptr = std::mem::MaybeUninit::<*mut std::vec::Vec>::uninit(); get_my_end_point_func(my_end_point_vec_ptr.as_mut_ptr()); let my_end_point_ptr: *mut std::vec::Vec = my_end_point_vec_ptr.assume_init(); - let my_end_point_box: Box> = Box::>::from_raw(my_end_point_ptr); + let my_end_point_box: Box> = Box::>::from_raw( + my_end_point_ptr, + ); let my_end_point_vec: std::vec::Vec = *my_end_point_box; let mut my_end_point: Vec = Vec::new(); my_end_point = my_end_point_vec; - - let mut nc = crate::io_t::NetClient::new(EndPoint{id: my_end_point}, get_time_func, receive_func, send_func); + let mut nc = crate::io_t::NetClient::new( + EndPoint { id: my_end_point }, + get_time_func, + receive_func, + send_func, + ); match main_t::sht_main(nc, args) { Ok(_) => 0, Err(_) => 1, } } -} +} // verus! diff --git a/tests/snapshot-examples.rs b/tests/snapshot-examples.rs index 956a634..c5ba5a5 100644 --- a/tests/snapshot-examples.rs +++ b/tests/snapshot-examples.rs @@ -25,7 +25,6 @@ fn syntax_rs_unchanged() { } #[test] -#[ignore] // Due to https://github.com/verus-lang/verusfmt/issues/33 fn ironfleet_rs_unchanged() { check_snapshot(include_str!("../examples/ironfleet.rs")); } From cb8c7acba239f7a444af3ecaba29387e7985cca1 Mon Sep 17 00:00:00 2001 From: Jay Bosamiya Date: Mon, 26 Feb 2024 17:13:20 -0500 Subject: [PATCH 07/10] Explain why nr & mimalloc are still ignored --- tests/snapshot-examples.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/snapshot-examples.rs b/tests/snapshot-examples.rs index c5ba5a5..2db21c1 100644 --- a/tests/snapshot-examples.rs +++ b/tests/snapshot-examples.rs @@ -30,13 +30,13 @@ fn ironfleet_rs_unchanged() { } #[test] -#[ignore] // Due to "fatal runtime error: stack overflow" during testing +#[ignore] // Due to "fatal runtime error: stack overflow" during `cargo test`, and comment failure during regular execution fn mimalloc_rs_unchanged() { check_snapshot(include_str!("../examples/mimalloc.rs")); } #[test] -#[ignore] // Due to https://github.com/verus-lang/verusfmt/issues/33 +#[ignore] // Due to a version of https://github.com/verus-lang/verusfmt/issues/33 on the `state_machine` macro fn nr_rs_unchanged() { check_snapshot(include_str!("../examples/nr.rs")); } From 168e846709cdb3d7ad83756c0ef434b5cb2995bf Mon Sep 17 00:00:00 2001 From: Jay Bosamiya Date: Mon, 26 Feb 2024 17:13:36 -0500 Subject: [PATCH 08/10] Run verusfmt on pagetable --- examples/pagetable.rs | 17278 +++++++++++++++++++++-------------- tests/snapshot-examples.rs | 1 - 2 files changed, 10280 insertions(+), 6999 deletions(-) diff --git a/examples/pagetable.rs b/examples/pagetable.rs index b75167a..2e602b1 100644 --- a/examples/pagetable.rs +++ b/examples/pagetable.rs @@ -20,295 +20,365 @@ pub mod impl_u { verus! { - #[verifier(nonlinear)] - pub proof fn ambient_arith() - ensures - forall|a: nat, b: nat| a == 0 ==> #[trigger] (a * b) == 0, - forall|a: nat, b: nat| b == 0 ==> #[trigger] (a * b) == 0, - forall|a: nat, b: nat| a > 0 && b > 0 ==> #[trigger] (a * b) > 0, - forall|a: int, b: int| #[trigger] (a * b) == (b * a), - forall|a:nat| a != 0 ==> aligned(0, a) - { - lib::aligned_zero(); - } +#[verifier(nonlinear)] +pub proof fn ambient_arith() + ensures + forall|a: nat, b: nat| a == 0 ==> #[trigger] (a * b) == 0, + forall|a: nat, b: nat| b == 0 ==> #[trigger] (a * b) == 0, + forall|a: nat, b: nat| a > 0 && b > 0 ==> #[trigger] (a * b) > 0, + forall|a: int, b: int| #[trigger] (a * b) == (b * a), + forall|a: nat| a != 0 ==> aligned(0, a), +{ + lib::aligned_zero(); +} - pub proof fn ambient_lemmas1() - ensures - forall|s1: Map, s2: Map| s1.dom().finite() && s2.dom().finite() ==> #[trigger] s1.union_prefer_right(s2).dom().finite(), - forall|a: int, b: int| #[trigger] (a * b) == b * a, - forall|m1: Map, m2: Map, n: nat| - (m1.dom().contains(n) && !m2.dom().contains(n)) - ==> equal(m1.remove(n).union_prefer_right(m2), m1.union_prefer_right(m2).remove(n)), - forall|m1: Map, m2: Map, n: nat| - (m2.dom().contains(n) && !m1.dom().contains(n)) - ==> equal(m1.union_prefer_right(m2.remove(n)), m1.union_prefer_right(m2).remove(n)), - forall|m1: Map, m2: Map, n: nat, v: PageTableEntry| - (!m1.dom().contains(n) && !m2.dom().contains(n)) - ==> equal(m1.insert(n, v).union_prefer_right(m2), m1.union_prefer_right(m2).insert(n, v)), - forall|m1: Map, m2: Map, n: nat, v: PageTableEntry| - (!m1.dom().contains(n) && !m2.dom().contains(n)) - ==> equal(m1.union_prefer_right(m2.insert(n, v)), m1.union_prefer_right(m2).insert(n, v)), - // forall(|d: Directory| d.inv() ==> (#[trigger] d.interp().upper == d.upper_vaddr())), - // forall(|d: Directory| d.inv() ==> (#[trigger] d.interp().lower == d.base_vaddr)), - { - lemma_finite_map_union::(); - // assert_nonlinear_by({ ensures(forall|d: Directory| equal(d.num_entries() * d.entry_size(), d.entry_size() * d.num_entries())); }); - // assert_forall_by(|d: Directory, i: nat| { - // requires(#[auto_trigger] d.inv() && i < d.num_entries() && d.entries.index(i).is_Directory()); - // ensures(#[auto_trigger] d.entries.index(i).get_Directory_0().inv()); - // assert(d.directories_obey_invariant()); - // }); - lemma_map_union_prefer_right_remove_commute::(); - lemma_map_union_prefer_right_insert_commute::(); - assert(forall|a: int, b: int| #[trigger] (a * b) == b * a) by (nonlinear_arith) { }; - } +pub proof fn ambient_lemmas1() + ensures + forall|s1: Map, s2: Map| + s1.dom().finite() && s2.dom().finite() ==> #[trigger] s1.union_prefer_right( + s2, + ).dom().finite(), + forall|a: int, b: int| #[trigger] (a * b) == b * a, + forall|m1: Map, m2: Map, n: nat| + (m1.dom().contains(n) && !m2.dom().contains(n)) ==> equal( + m1.remove(n).union_prefer_right(m2), + m1.union_prefer_right(m2).remove(n), + ), + forall|m1: Map, m2: Map, n: nat| + (m2.dom().contains(n) && !m1.dom().contains(n)) ==> equal( + m1.union_prefer_right(m2.remove(n)), + m1.union_prefer_right(m2).remove(n), + ), + forall| + m1: Map, + m2: Map, + n: nat, + v: PageTableEntry, + | + (!m1.dom().contains(n) && !m2.dom().contains(n)) ==> equal( + m1.insert(n, v).union_prefer_right(m2), + m1.union_prefer_right(m2).insert(n, v), + ), + forall| + m1: Map, + m2: Map, + n: nat, + v: PageTableEntry, + | + (!m1.dom().contains(n) && !m2.dom().contains(n)) ==> equal( + m1.union_prefer_right(m2.insert(n, v)), + m1.union_prefer_right(m2).insert(n, v), + ), +// forall(|d: Directory| d.inv() ==> (#[trigger] d.interp().upper == d.upper_vaddr())), +// forall(|d: Directory| d.inv() ==> (#[trigger] d.interp().lower == d.base_vaddr)), + +{ + lemma_finite_map_union::(); + // assert_nonlinear_by({ ensures(forall|d: Directory| equal(d.num_entries() * d.entry_size(), d.entry_size() * d.num_entries())); }); + // assert_forall_by(|d: Directory, i: nat| { + // requires(#[auto_trigger] d.inv() && i < d.num_entries() && d.entries.index(i).is_Directory()); + // ensures(#[auto_trigger] d.entries.index(i).get_Directory_0().inv()); + // assert(d.directories_obey_invariant()); + // }); + lemma_map_union_prefer_right_remove_commute::(); + lemma_map_union_prefer_right_insert_commute::(); + assert(forall|a: int, b: int| #[trigger] (a * b) == b * a) by (nonlinear_arith){}; +} +pub struct PageTableContents { + pub map: Map, + pub arch: Arch, + pub lower: nat, + pub upper: nat, +} - pub struct PageTableContents { - pub map: Map, - pub arch: Arch, - pub lower: nat, - pub upper: nat, - } +impl PageTableContents { + pub open spec(checked) fn inv(&self) -> bool { + &&& self.map.dom().finite() + &&& self.arch.inv() + &&& self.mappings_are_of_valid_size() + &&& self.mappings_are_aligned() + &&& self.mappings_dont_overlap() + &&& self.mappings_in_bounds() + } - impl PageTableContents { - pub open spec(checked) fn inv(&self) -> bool { - &&& self.map.dom().finite() - &&& self.arch.inv() - &&& self.mappings_are_of_valid_size() - &&& self.mappings_are_aligned() - &&& self.mappings_dont_overlap() - &&& self.mappings_in_bounds() - } + pub open spec(checked) fn mappings_are_of_valid_size(self) -> bool { + forall|va: nat| + #![trigger self.map.index(va).frame.size] + #![trigger self.map.index(va).frame.base] + self.map.dom().contains(va) ==> self.arch.contains_entry_size( + self.map.index(va).frame.size, + ) + } - pub open spec(checked) fn mappings_are_of_valid_size(self) -> bool { - forall|va: nat| - #![trigger self.map.index(va).frame.size] #![trigger self.map.index(va).frame.base] - self.map.dom().contains(va) ==> self.arch.contains_entry_size(self.map.index(va).frame.size) - } + pub open spec(checked) fn mappings_are_aligned(self) -> bool { + forall|va: nat| + #![trigger self.map.index(va).frame.size] + #![trigger self.map.index(va).frame.base] + self.map.dom().contains(va) ==> aligned(va, self.map.index(va).frame.size) && aligned( + self.map.index(va).frame.base, + self.map.index(va).frame.size, + ) + } - pub open spec(checked) fn mappings_are_aligned(self) -> bool { - forall|va: nat| - #![trigger self.map.index(va).frame.size] #![trigger self.map.index(va).frame.base] - self.map.dom().contains(va) ==> - aligned(va, self.map.index(va).frame.size) && aligned(self.map.index(va).frame.base, self.map.index(va).frame.size) - } + pub open spec(checked) fn mappings_dont_overlap(self) -> bool { + forall|b1: nat, b2: nat| + #![trigger self.map[b1], self.map[b2]] + #![trigger self.map.dom().contains(b1), self.map.dom().contains(b2)] + self.map.dom().contains(b1) && self.map.dom().contains(b2) ==> ((b1 == b2) || !overlap( + MemRegion { base: b1, size: self.map[b1].frame.size }, + MemRegion { base: b2, size: self.map[b2].frame.size }, + )) + } - pub open spec(checked) fn mappings_dont_overlap(self) -> bool { - forall|b1: nat, b2: nat| - #![trigger self.map[b1], self.map[b2]] - #![trigger self.map.dom().contains(b1), self.map.dom().contains(b2)] - self.map.dom().contains(b1) && self.map.dom().contains(b2) ==> - ((b1 == b2) || !overlap( - MemRegion { base: b1, size: self.map[b1].frame.size }, - MemRegion { base: b2, size: self.map[b2].frame.size })) - } + pub open spec(checked) fn candidate_mapping_in_bounds( + self, + base: nat, + pte: PageTableEntry, + ) -> bool { + self.lower <= base && base + pte.frame.size <= self.upper + } - pub open spec(checked) fn candidate_mapping_in_bounds(self, base: nat, pte: PageTableEntry) -> bool { - self.lower <= base && base + pte.frame.size <= self.upper - } + pub open spec(checked) fn mappings_in_bounds(self) -> bool { + forall|b1: nat| + #![trigger self.map[b1]] + #![trigger self.map.dom().contains(b1)] + #![trigger self.candidate_mapping_in_bounds(b1, self.map[b1])] + self.map.dom().contains(b1) ==> self.candidate_mapping_in_bounds(b1, self.map[b1]) + } - pub open spec(checked) fn mappings_in_bounds(self) -> bool { - forall|b1: nat| - #![trigger self.map[b1]] #![trigger self.map.dom().contains(b1)] - #![trigger self.candidate_mapping_in_bounds(b1, self.map[b1])] - self.map.dom().contains(b1) ==> self.candidate_mapping_in_bounds(b1, self.map[b1]) - } + pub open spec(checked) fn accepted_mapping(self, base: nat, pte: PageTableEntry) -> bool { + &&& aligned(base, pte.frame.size) + &&& aligned(pte.frame.base, pte.frame.size) + &&& self.candidate_mapping_in_bounds(base, pte) + &&& self.arch.contains_entry_size(pte.frame.size) + } - pub open spec(checked) fn accepted_mapping(self, base: nat, pte: PageTableEntry) -> bool { - &&& aligned(base, pte.frame.size) - &&& aligned(pte.frame.base, pte.frame.size) - &&& self.candidate_mapping_in_bounds(base, pte) - &&& self.arch.contains_entry_size(pte.frame.size) - } + pub open spec(checked) fn valid_mapping(self, base: nat, pte: PageTableEntry) -> bool { + forall|b: nat| + #![auto] + self.map.dom().contains(b) ==> !overlap( + MemRegion { base: base, size: pte.frame.size }, + MemRegion { base: b, size: self.map.index(b).frame.size }, + ) + } - pub open spec(checked) fn valid_mapping(self, base: nat, pte: PageTableEntry) -> bool { - forall|b: nat| #![auto] - self.map.dom().contains(b) ==> !overlap( - MemRegion { base: base, size: pte.frame.size }, - MemRegion { base: b, size: self.map.index(b).frame.size }) + /// Maps the given `pte` at `base` in the address space + pub open spec(checked) fn map_frame(self, base: nat, pte: PageTableEntry) -> Result< + PageTableContents, + PageTableContents, + > { + if self.accepted_mapping(base, pte) { + if self.valid_mapping(base, pte) { + Ok(PageTableContents { map: self.map.insert(base, pte), ..self }) + } else { + Err(self) } + } else { + arbitrary() + } + } - /// Maps the given `pte` at `base` in the address space - pub open spec(checked) fn map_frame(self, base: nat, pte: PageTableEntry) -> Result { - if self.accepted_mapping(base, pte) { - if self.valid_mapping(base, pte) { - Ok(PageTableContents { - map: self.map.insert(base, pte), - ..self - }) - } else { - Err(self) - } - } else { - arbitrary() - } - } + proof fn map_frame_preserves_inv(self, base: nat, pte: PageTableEntry) + requires + self.inv(), + self.accepted_mapping( + base, + pte, + ), + // self.map_frame(base, frame).is_Ok(), - proof fn map_frame_preserves_inv(self, base: nat, pte: PageTableEntry) - requires - self.inv(), - self.accepted_mapping(base, pte), - // self.map_frame(base, frame).is_Ok(), - ensures - self.map_frame(base, pte).is_Ok() ==> self.map_frame(base, pte).get_Ok_0().inv(), - self.map_frame(base, pte).is_Err() ==> self.map_frame(base, pte).get_Err_0() === self, - { - if self.map_frame(base, pte).is_Ok() { - let nself = self.map_frame(base, pte).get_Ok_0(); - assert(nself.mappings_in_bounds()); - } - } + ensures + self.map_frame(base, pte).is_Ok() ==> self.map_frame(base, pte).get_Ok_0().inv(), + self.map_frame(base, pte).is_Err() ==> self.map_frame(base, pte).get_Err_0() === self, + { + if self.map_frame(base, pte).is_Ok() { + let nself = self.map_frame(base, pte).get_Ok_0(); + assert(nself.mappings_in_bounds()); + } + } - pub open spec(checked) fn accepted_resolve(self, vaddr: nat) -> bool { - between(vaddr, self.lower, self.upper) - } + pub open spec(checked) fn accepted_resolve(self, vaddr: nat) -> bool { + between(vaddr, self.lower, self.upper) + } - /// Given a virtual address `vaddr` it returns the corresponding `PAddr` - /// and access rights or an error in case no mapping is found. - pub open spec(checked) fn resolve(self, vaddr: nat) -> Result<(nat, PageTableEntry),()> - recommends self.accepted_resolve(vaddr) - { - if exists|base:nat, pte:PageTableEntry| - self.map.contains_pair(base, pte) && - between(vaddr, base, base + pte.frame.size) - { - let (base, pte) = choose|base:nat, pte:PageTableEntry| - self.map.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size); - Ok((base, pte)) - } else { - Err(()) - } - } + /// Given a virtual address `vaddr` it returns the corresponding `PAddr` + /// and access rights or an error in case no mapping is found. + pub open spec(checked) fn resolve(self, vaddr: nat) -> Result<(nat, PageTableEntry), ()> + recommends + self.accepted_resolve(vaddr), + { + if exists|base: nat, pte: PageTableEntry| + self.map.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size) { + let (base, pte) = choose|base: nat, pte: PageTableEntry| + self.map.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size); + Ok((base, pte)) + } else { + Err(()) + } + } - pub open spec(checked) fn remove(self, n: nat) -> PageTableContents { - PageTableContents { - map: self.map.remove(n), - ..self - } - } + pub open spec(checked) fn remove(self, n: nat) -> PageTableContents { + PageTableContents { map: self.map.remove(n), ..self } + } - pub open spec(checked) fn accepted_unmap(self, base: nat) -> bool { - &&& between(base, self.lower, self.upper) - &&& exists|size: nat| - #![trigger self.arch.contains_entry_size(size)] - #![trigger aligned(base, size)] - self.arch.contains_entry_size(size) && aligned(base, size) - } + pub open spec(checked) fn accepted_unmap(self, base: nat) -> bool { + &&& between(base, self.lower, self.upper) + &&& exists|size: nat| + #![trigger self.arch.contains_entry_size(size)] + #![trigger aligned(base, size)] + self.arch.contains_entry_size(size) && aligned(base, size) + } - /// Removes the frame from the address space that contains `base`. - pub open spec(checked) fn unmap(self, base: nat) -> Result - recommends self.accepted_unmap(base) - { - if self.map.dom().contains(base) { - Ok(self.remove(base)) - } else { - Err(self) - } - } + /// Removes the frame from the address space that contains `base`. + pub open spec(checked) fn unmap(self, base: nat) -> Result + recommends + self.accepted_unmap(base), + { + if self.map.dom().contains(base) { + Ok(self.remove(base)) + } else { + Err(self) + } + } - proof fn lemma_unmap_preserves_inv(self, base: nat) - requires - self.inv(), - ensures - self.unmap(base).is_Ok() ==> self.unmap(base).get_Ok_0().inv(), - self.unmap(base).is_Err() ==> self.unmap(base).get_Err_0() === self; + proof fn lemma_unmap_preserves_inv(self, base: nat) + requires + self.inv(), + ensures + self.unmap(base).is_Ok() ==> self.unmap(base).get_Ok_0().inv(), + self.unmap(base).is_Err() ==> self.unmap(base).get_Err_0() === self, + ; - pub proof fn lemma_unmap_decrements_len(self, base: nat) - requires - self.inv(), - self.unmap(base).is_Ok() - ensures - self.map.dom().len() > 0, - equal(self.unmap(base).get_Ok_0().map.dom(), self.map.dom().remove(base)), - self.unmap(base).get_Ok_0().map.dom().len() == self.map.dom().len() - 1 - { - assert(self.map.dom().contains(base)); - lemma_set_contains_IMP_len_greater_zero::(self.map.dom(), base); - } + pub proof fn lemma_unmap_decrements_len(self, base: nat) + requires + self.inv(), + self.unmap(base).is_Ok(), + ensures + self.map.dom().len() > 0, + equal(self.unmap(base).get_Ok_0().map.dom(), self.map.dom().remove(base)), + self.unmap(base).get_Ok_0().map.dom().len() == self.map.dom().len() - 1, + { + assert(self.map.dom().contains(base)); + lemma_set_contains_IMP_len_greater_zero::(self.map.dom(), base); + } - pub open spec fn ranges_disjoint(self, other: Self) -> bool { - if self.lower <= other.lower { - self.upper <= other.lower - } else { - // other.lower < self.lower - other.upper <= self.lower - } - } + pub open spec fn ranges_disjoint(self, other: Self) -> bool { + if self.lower <= other.lower { + self.upper <= other.lower + } else { + // other.lower < self.lower + other.upper <= self.lower + } + } - pub open spec fn mappings_disjoint(self, other: Self) -> bool { - forall|s: nat, o: nat| self.map.dom().contains(s) && other.map.dom().contains(o) ==> - !overlap(MemRegion { base: s, size: self.map.index(s).frame.size }, MemRegion { base: o, size: other.map.index(o).frame.size }) - } + pub open spec fn mappings_disjoint(self, other: Self) -> bool { + forall|s: nat, o: nat| + self.map.dom().contains(s) && other.map.dom().contains(o) ==> !overlap( + MemRegion { base: s, size: self.map.index(s).frame.size }, + MemRegion { base: o, size: other.map.index(o).frame.size }, + ) + } - pub proof fn lemma_ranges_disjoint_implies_mappings_disjoint(self, other: Self) - requires - self.inv(), - other.inv(), - self.ranges_disjoint(other), - ensures - self.mappings_disjoint(other); + pub proof fn lemma_ranges_disjoint_implies_mappings_disjoint(self, other: Self) + requires + self.inv(), + other.inv(), + self.ranges_disjoint(other), + ensures + self.mappings_disjoint(other), + ; - proof fn lemma_mappings_have_positive_entry_size(self) - requires - self.inv(), - ensures - forall|va: nat| #[trigger] self.map.dom().contains(va) ==> self.map.index(va).frame.size > 0; - } + proof fn lemma_mappings_have_positive_entry_size(self) + requires + self.inv(), + ensures + forall|va: nat| #[trigger] + self.map.dom().contains(va) ==> self.map.index(va).frame.size > 0, + ; +} - // TODO: move - pub proof fn lemma_set_contains_IMP_len_greater_zero(s: Set, a: T) - requires - s.finite(), - s.contains(a), - ensures - s.len() > 0, - { - if s.len() == 0 { - // contradiction - assert(s.remove(a).len() + 1 == 0); - } - } +// TODO: move +pub proof fn lemma_set_contains_IMP_len_greater_zero(s: Set, a: T) + requires + s.finite(), + s.contains(a), + ensures + s.len() > 0, +{ + if s.len() == 0 { + // contradiction + assert(s.remove(a).len() + 1 == 0); + } +} - pub proof fn lemma_map_union_prefer_right_insert_commute() - ensures - forall|m1: Map, m2: Map, n: S, v: T| - !m1.dom().contains(n) && !m2.dom().contains(n) - ==> equal(m1.insert(n, v).union_prefer_right(m2), m1.union_prefer_right(m2).insert(n, v)), - forall|m1: Map, m2: Map, n: S, v: T| - !m1.dom().contains(n) && !m2.dom().contains(n) - ==> equal(m1.union_prefer_right(m2.insert(n, v)), m1.union_prefer_right(m2).insert(n, v)), - { - assert_forall_by(|m1: Map, m2: Map, n: S, v: T| { +pub proof fn lemma_map_union_prefer_right_insert_commute() + ensures + forall|m1: Map, m2: Map, n: S, v: T| + !m1.dom().contains(n) && !m2.dom().contains(n) ==> equal( + m1.insert(n, v).union_prefer_right(m2), + m1.union_prefer_right(m2).insert(n, v), + ), + forall|m1: Map, m2: Map, n: S, v: T| + !m1.dom().contains(n) && !m2.dom().contains(n) ==> equal( + m1.union_prefer_right(m2.insert(n, v)), + m1.union_prefer_right(m2).insert(n, v), + ), +{ + assert_forall_by( + |m1: Map, m2: Map, n: S, v: T| + { requires(!m1.dom().contains(n) && !m2.dom().contains(n)); - ensures(equal(m1.insert(n, v).union_prefer_right(m2), m1.union_prefer_right(m2).insert(n, v))); + ensures( + equal( + m1.insert(n, v).union_prefer_right(m2), + m1.union_prefer_right(m2).insert(n, v), + ), + ); let union1 = m1.insert(n, v).union_prefer_right(m2); let union2 = m1.union_prefer_right(m2).insert(n, v); assert_maps_equal!(union1, union2); assert(equal(union1, union2)); - }); - assert_forall_by(|m1: Map, m2: Map, n: S, v: T| { + }, + ); + assert_forall_by( + |m1: Map, m2: Map, n: S, v: T| + { requires(!m1.dom().contains(n) && !m2.dom().contains(n)); - ensures(equal(m1.union_prefer_right(m2.insert(n, v)), m1.union_prefer_right(m2).insert(n, v))); + ensures( + equal( + m1.union_prefer_right(m2.insert(n, v)), + m1.union_prefer_right(m2).insert(n, v), + ), + ); let union1 = m1.union_prefer_right(m2.insert(n, v)); let union2 = m1.union_prefer_right(m2).insert(n, v); assert_maps_equal!(union1, union2); assert(equal(union1, union2)); - }); - } + }, + ); +} - pub proof fn lemma_map_union_prefer_right_remove_commute() - ensures - forall|m1: Map, m2: Map, n: S| - m1.dom().contains(n) && !m2.dom().contains(n) - ==> equal(m1.remove(n).union_prefer_right(m2), m1.union_prefer_right(m2).remove(n)), - forall|m1: Map, m2: Map, n: S| - m2.dom().contains(n) && !m1.dom().contains(n) - ==> equal(m1.union_prefer_right(m2.remove(n)), m1.union_prefer_right(m2).remove(n)), - { - assert_forall_by(|m1: Map, m2: Map, n: S| { +pub proof fn lemma_map_union_prefer_right_remove_commute() + ensures + forall|m1: Map, m2: Map, n: S| + m1.dom().contains(n) && !m2.dom().contains(n) ==> equal( + m1.remove(n).union_prefer_right(m2), + m1.union_prefer_right(m2).remove(n), + ), + forall|m1: Map, m2: Map, n: S| + m2.dom().contains(n) && !m1.dom().contains(n) ==> equal( + m1.union_prefer_right(m2.remove(n)), + m1.union_prefer_right(m2).remove(n), + ), +{ + assert_forall_by( + |m1: Map, m2: Map, n: S| + { requires(m1.dom().contains(n) && !m2.dom().contains(n)); - ensures(equal(m1.remove(n).union_prefer_right(m2), m1.union_prefer_right(m2).remove(n))); + ensures( + equal(m1.remove(n).union_prefer_right(m2), m1.union_prefer_right(m2).remove(n)), + ); let union1 = m1.remove(n).union_prefer_right(m2); let union2 = m1.union_prefer_right(m2).remove(n); assert_maps_equal!(union1, union2); @@ -317,40 +387,47 @@ pub mod impl_u { // substituting union1 and/or union2's definition makes the assertion fail: // assert(equal(m1.remove(n).union_prefer_right(m2), union2)); // assert(equal(union1, m1.union_prefer_right(m2).remove(n))); - }); - assert_forall_by(|m1: Map, m2: Map, n: S| { + }, + ); + assert_forall_by( + |m1: Map, m2: Map, n: S| + { requires(m2.dom().contains(n) && !m1.dom().contains(n)); - ensures(equal(m1.union_prefer_right(m2.remove(n)), m1.union_prefer_right(m2).remove(n))); + ensures( + equal(m1.union_prefer_right(m2.remove(n)), m1.union_prefer_right(m2).remove(n)), + ); let union1 = m1.union_prefer_right(m2.remove(n)); let union2 = m1.union_prefer_right(m2).remove(n); assert_maps_equal!(union1, union2); assert(equal(union1, union2)); - }); - } + }, + ); +} - // TODO: should go somewhere else - pub proof fn lemma_finite_map_union() - ensures - forall|s1: Map, s2: Map| s1.dom().finite() && s2.dom().finite() ==> #[trigger] s1.union_prefer_right(s2).dom().finite(), - { - assert_forall_by(|s1: Map, s2: Map| { +// TODO: should go somewhere else +pub proof fn lemma_finite_map_union() + ensures + forall|s1: Map, s2: Map| + s1.dom().finite() && s2.dom().finite() ==> #[trigger] s1.union_prefer_right( + s2, + ).dom().finite(), +{ + assert_forall_by( + |s1: Map, s2: Map| + { requires(s1.dom().finite() && s2.dom().finite()); ensures((#[trigger] s1.union_prefer_right(s2)).dom().finite()); - assert(s1.dom().union(s2.dom()).finite()); - let union_dom = s1.union_prefer_right(s2).dom(); let dom_union = s1.dom().union(s2.dom()); - assert(forall|s: S| union_dom.contains(s) ==> dom_union.contains(s)); assert(forall|s: S| dom_union.contains(s) ==> union_dom.contains(s)); - assert_sets_equal!(union_dom, dom_union); - }); - } - + }, + ); +} - } +} // verus! } pub mod l1 { @@ -377,365 +454,406 @@ pub mod impl_u { verus! { - pub proof fn ambient_lemmas2() - ensures - forall|d: Directory, i: nat| - #![trigger d.inv(), d.entries.index(i as int)] - d.inv() && i < d.num_entries() && d.entries.index(i as int).is_Directory() ==> d.entries.index(i as int).get_Directory_0().inv(), - forall|d: Directory| d.inv() ==> (#[trigger] d.interp()).upper == d.upper_vaddr(), - forall|d: Directory| d.inv() ==> (#[trigger] d.interp()).lower == d.base_vaddr, - { - assert forall |d: Directory, i: nat| #![auto] d.inv() && i < d.num_entries() && d.entries.index(i as int).is_Directory() - implies d.entries.index(i as int).get_Directory_0().inv() by { - assert(d.directories_obey_invariant()); - }; - assert forall |d: Directory| #![auto] d.inv() implies d.interp().upper == d.upper_vaddr() && d.interp().lower == d.base_vaddr by { - d.lemma_inv_implies_interp_inv(); - }; - } +pub proof fn ambient_lemmas2() + ensures + forall|d: Directory, i: nat| + #![trigger d.inv(), d.entries.index(i as int)] + d.inv() && i < d.num_entries() && d.entries.index(i as int).is_Directory() + ==> d.entries.index(i as int).get_Directory_0().inv(), + forall|d: Directory| d.inv() ==> (#[trigger] d.interp()).upper == d.upper_vaddr(), + forall|d: Directory| d.inv() ==> (#[trigger] d.interp()).lower == d.base_vaddr, +{ + assert forall|d: Directory, i: nat| + #![auto] + d.inv() && i < d.num_entries() && d.entries.index( + i as int, + ).is_Directory() implies d.entries.index(i as int).get_Directory_0().inv() by { + assert(d.directories_obey_invariant()); + }; + assert forall|d: Directory| #![auto] d.inv() implies d.interp().upper == d.upper_vaddr() + && d.interp().lower == d.base_vaddr by { + d.lemma_inv_implies_interp_inv(); + }; +} - // Simply uncommenting this thing slows down verification of this file by 2.5x - // #[proof] - // fn ambient_lemmas3() { - // ensures([ - // forall(|d: Directory, base: nat, pte: PageTableEntry| - // d.inv() && #[trigger] d.accepted_mapping(base, pte) ==> - // d.interp().accepted_mapping(base, pte)), - // ]); - // assert_forall_by(|d: Directory, base: nat, pte: PageTableEntry| { - // requires(d.inv() && #[trigger] d.accepted_mapping(base, pte)); - // ensures(d.interp().accepted_mapping(base, pte)); - // d.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); - // }); - // } - - #[is_variant] - pub enum NodeEntry { - Directory(Directory), - Page(PageTableEntry), - Empty(), - } +// Simply uncommenting this thing slows down verification of this file by 2.5x +// #[proof] +// fn ambient_lemmas3() { +// ensures([ +// forall(|d: Directory, base: nat, pte: PageTableEntry| +// d.inv() && #[trigger] d.accepted_mapping(base, pte) ==> +// d.interp().accepted_mapping(base, pte)), +// ]); +// assert_forall_by(|d: Directory, base: nat, pte: PageTableEntry| { +// requires(d.inv() && #[trigger] d.accepted_mapping(base, pte)); +// ensures(d.interp().accepted_mapping(base, pte)); +// d.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); +// }); +// } +#[is_variant] +pub enum NodeEntry { + Directory(Directory), + Page(PageTableEntry), + Empty(), +} - pub struct Directory { - pub entries: Seq, - pub layer: nat, // index into layer_sizes - pub base_vaddr: nat, - pub arch: Arch, - pub flags: Flags, - } +pub struct Directory { + pub entries: Seq, + pub layer: nat, // index into layer_sizes + pub base_vaddr: nat, + pub arch: Arch, + pub flags: Flags, +} - // Layer 0: 425 Directory -> - // Layer 1: 47 Directory -> - // Layer 2: 5 Page (1K) +// Layer 0: 425 Directory -> +// Layer 1: 47 Directory -> +// Layer 2: 5 Page (1K) +// Layer 1: 46 Directory -> (1M) +// Layer 2: 1024 Pages +// Layer 0: 1024 Directories (1T) +// Layer 1: 1024 Directories (1G) +// Layer 2: 1024 Pages +impl Directory { + pub open spec(checked) fn well_formed(&self) -> bool { + &&& self.arch.inv() + &&& self.layer < self.arch.layers.len() + &&& aligned(self.base_vaddr, self.entry_size() * self.num_entries()) + &&& self.entries.len() == self.num_entries() + &&& self.flags == permissive_flags + } - // Layer 1: 46 Directory -> (1M) - // Layer 2: 1024 Pages + pub open spec(checked) fn entry_size(&self) -> nat + recommends + self.layer < self.arch.layers.len(), + { + self.arch.entry_size(self.layer) + } - // Layer 0: 1024 Directories (1T) - // Layer 1: 1024 Directories (1G) - // Layer 2: 1024 Pages + pub open spec(checked) fn num_entries(&self) -> nat // number of entries - impl Directory { - pub open spec(checked) fn well_formed(&self) -> bool { - &&& self.arch.inv() - &&& self.layer < self.arch.layers.len() - &&& aligned(self.base_vaddr, self.entry_size() * self.num_entries()) - &&& self.entries.len() == self.num_entries() - &&& self.flags == permissive_flags - } + recommends + self.layer < self.arch.layers.len(), + { + self.arch.num_entries(self.layer) + } - pub open spec(checked) fn entry_size(&self) -> nat - recommends self.layer < self.arch.layers.len() - { - self.arch.entry_size(self.layer) - } + pub open spec(checked) fn empty(&self) -> bool + recommends + self.well_formed(), + { + forall|i: nat| i < self.num_entries() ==> self.entries.index(i as int).is_Empty() + } - pub open spec(checked) fn num_entries(&self) -> nat // number of entries - recommends self.layer < self.arch.layers.len() - { - self.arch.num_entries(self.layer) - } + pub open spec(checked) fn pages_match_entry_size(&self) -> bool + recommends + self.well_formed(), + { + forall|i: nat| + (i < self.entries.len() && self.entries[i as int].is_Page()) ==> ( + #[trigger] self.entries[i as int].get_Page_0().frame.size) == self.entry_size() + } - pub open spec(checked) fn empty(&self) -> bool - recommends self.well_formed() - { - forall|i: nat| i < self.num_entries() ==> self.entries.index(i as int).is_Empty() + pub open spec(checked) fn directories_are_in_next_layer(&self) -> bool + recommends + self.well_formed(), + { + forall|i: nat| + i < self.entries.len() && self.entries.index(i as int).is_Directory() ==> { + let directory = #[trigger] self.entries[i as int].get_Directory_0(); + &&& directory.layer == self.layer + 1 + &&& directory.base_vaddr == self.base_vaddr + i * self.entry_size() } + } - pub open spec(checked) fn pages_match_entry_size(&self) -> bool - recommends self.well_formed() - { - forall|i: nat| (i < self.entries.len() && self.entries[i as int].is_Page()) - ==> (#[trigger] self.entries[i as int].get_Page_0().frame.size) == self.entry_size() - } + pub open spec(checked) fn directories_obey_invariant(&self) -> bool + recommends + self.well_formed(), + self.directories_are_in_next_layer(), + self.directories_match_arch(), + decreases self.arch.layers.len() - self.layer, 0nat, + { + if self.well_formed() && self.directories_are_in_next_layer() + && self.directories_match_arch() { + forall|i: nat| + (i < self.entries.len() && self.entries[i as int].is_Directory()) ==> ( + #[trigger] self.entries[i as int].get_Directory_0()).inv() + } else { + arbitrary() + } + } - pub open spec(checked) fn directories_are_in_next_layer(&self) -> bool - recommends self.well_formed() - { - forall|i: nat| i < self.entries.len() && self.entries.index(i as int).is_Directory() - ==> { - let directory = #[trigger] self.entries[i as int].get_Directory_0(); - &&& directory.layer == self.layer + 1 - &&& directory.base_vaddr == self.base_vaddr + i * self.entry_size() - } - } + pub open spec(checked) fn directories_match_arch(&self) -> bool { + forall|i: nat| + (i < self.entries.len() && self.entries.index(i as int).is_Directory()) ==> ( + #[trigger] self.entries.index(i as int).get_Directory_0().arch) == self.arch + } - pub open spec(checked) fn directories_obey_invariant(&self) -> bool - recommends - self.well_formed(), - self.directories_are_in_next_layer(), - self.directories_match_arch(), - decreases self.arch.layers.len() - self.layer, 0nat - { - if self.well_formed() && self.directories_are_in_next_layer() && self.directories_match_arch() { - forall|i: nat| (i < self.entries.len() && self.entries[i as int].is_Directory()) - ==> (#[trigger] self.entries[i as int].get_Directory_0()).inv() - } else { - arbitrary() - } - } + pub open spec fn directories_are_nonempty(&self) -> bool + recommends + self.well_formed(), + self.directories_are_in_next_layer(), + self.directories_match_arch(), + { + forall|i: nat| + i < self.entries.len() && self.entries.index(i as int).is_Directory() ==> !( + #[trigger] self.entries.index(i as int).get_Directory_0().empty()) + } - pub open spec(checked) fn directories_match_arch(&self) -> bool { - forall|i: nat| (i < self.entries.len() && self.entries.index(i as int).is_Directory()) - ==> (#[trigger] self.entries.index(i as int).get_Directory_0().arch) == self.arch - } + pub open spec(checked) fn frames_aligned(&self) -> bool + recommends + self.well_formed(), + { + forall|i: nat| + i < self.entries.len() && self.entries.index(i as int).is_Page() ==> aligned( + (#[trigger] self.entries.index(i as int).get_Page_0()).frame.base, + self.entry_size(), + ) + } - pub open spec fn directories_are_nonempty(&self) -> bool - recommends - self.well_formed(), - self.directories_are_in_next_layer(), - self.directories_match_arch(), - { - forall|i: nat| i < self.entries.len() && self.entries.index(i as int).is_Directory() - ==> !(#[trigger] self.entries.index(i as int).get_Directory_0().empty()) - } + pub open spec(checked) fn inv(&self) -> bool + decreases self.arch.layers.len() - self.layer, + { + &&& self.well_formed() + &&& self.pages_match_entry_size() + &&& self.directories_are_in_next_layer() + &&& self.directories_match_arch() + &&& self.directories_obey_invariant() + &&& self.directories_are_nonempty() + &&& self.frames_aligned() + } - pub open spec(checked) fn frames_aligned(&self) -> bool - recommends self.well_formed() - { - forall|i: nat| i < self.entries.len() && self.entries.index(i as int).is_Page() ==> - aligned((#[trigger] self.entries.index(i as int).get_Page_0()).frame.base, self.entry_size()) - } + pub open spec(checked) fn interp(self) -> l0::PageTableContents { + self.interp_aux(0) + } - pub open spec(checked) fn inv(&self) -> bool - decreases self.arch.layers.len() - self.layer - { - &&& self.well_formed() - &&& self.pages_match_entry_size() - &&& self.directories_are_in_next_layer() - &&& self.directories_match_arch() - &&& self.directories_obey_invariant() - &&& self.directories_are_nonempty() - &&& self.frames_aligned() - } + pub open spec(checked) fn upper_vaddr(self) -> nat + recommends + self.well_formed(), + { + self.arch.upper_vaddr(self.layer, self.base_vaddr) + } - pub open spec(checked) fn interp(self) -> l0::PageTableContents { - self.interp_aux(0) - } + pub open spec fn index_for_vaddr(self, vaddr: nat) -> nat { + self.arch.index_for_vaddr(self.layer, self.base_vaddr, vaddr) + } - pub open spec(checked) fn upper_vaddr(self) -> nat - recommends self.well_formed() - { - self.arch.upper_vaddr(self.layer, self.base_vaddr) - } + pub open spec(checked) fn entry_base(self, idx: nat) -> nat + recommends + self.inv(), + { + self.arch.entry_base(self.layer, self.base_vaddr, idx) + } - pub open spec fn index_for_vaddr(self, vaddr: nat) -> nat { - self.arch.index_for_vaddr(self.layer, self.base_vaddr, vaddr) - } + pub open spec(checked) fn next_entry_base(self, idx: nat) -> nat + recommends + self.inv(), + { + self.arch.next_entry_base(self.layer, self.base_vaddr, idx) + } - pub open spec(checked) fn entry_base(self, idx: nat) -> nat - recommends self.inv() - { - self.arch.entry_base(self.layer, self.base_vaddr, idx) - } + pub open spec fn entry_bounds(self, entry: nat) -> (nat, nat) { + (self.entry_base(entry), self.entry_base(entry + 1)) + } - pub open spec(checked) fn next_entry_base(self, idx: nat) -> nat - recommends self.inv() - { - self.arch.next_entry_base(self.layer, self.base_vaddr, idx) + pub open spec fn interp_of_entry(self, entry: nat) -> l0::PageTableContents + decreases self.arch.layers.len() - self.layer, self.num_entries() - entry, 0nat, + { + if self.inv() && entry < self.entries.len() { + let (lower, upper) = self.entry_bounds(entry); + l0::PageTableContents { + map: match self.entries.index(entry as int) { + NodeEntry::Page(p) => map![self.entry_base(entry) => p], + NodeEntry::Directory(d) => d.interp_aux(0).map, + NodeEntry::Empty() => map![], + }, + arch: self.arch, + lower, + upper, } + } else { + arbitrary() + } + } - pub open spec fn entry_bounds(self, entry: nat) -> (nat, nat) { - (self.entry_base(entry), self.entry_base(entry + 1)) - } + proof fn lemma_interp_of_entry(self) + requires + self.inv(), + ensures + forall|i: nat| + #![auto] + i < self.num_entries() ==> self.interp_of_entry(i).inv() && self.interp_of_entry( + i, + ).lower == self.entry_base(i) && self.interp_of_entry(i).upper == self.entry_base( + i + 1, + ) && (forall|base: nat| + self.interp_of_entry(i).map.dom().contains(base) ==> between( + base, + self.entry_base(i), + self.entry_base(i + 1), + )) && (forall|base: nat, pte: PageTableEntry| + self.interp_of_entry(i).map.contains_pair(base, pte) ==> between( + base, + self.entry_base(i), + self.entry_base(i + 1), + )), + { + assert forall|i: nat| #![auto] i < self.num_entries() implies self.interp_of_entry(i).inv() + && self.interp_of_entry(i).lower == self.entry_base(i) && self.interp_of_entry(i).upper + == self.entry_base(i + 1) by { + self.lemma_inv_implies_interp_of_entry_inv(i); + }; + } - pub open spec fn interp_of_entry(self, entry: nat) -> l0::PageTableContents - decreases self.arch.layers.len() - self.layer, self.num_entries() - entry, 0nat - { - if self.inv() && entry < self.entries.len() { - let (lower, upper) = self.entry_bounds(entry); - l0::PageTableContents { - map: match self.entries.index(entry as int) { - NodeEntry::Page(p) => map![self.entry_base(entry) => p], - NodeEntry::Directory(d) => d.interp_aux(0).map, - NodeEntry::Empty() => map![], - }, - arch: self.arch, - lower, - upper, - } - } else { - arbitrary() - } - } + proof fn lemma_inv_implies_interp_of_entry_inv(self, i: nat) + requires + self.inv(), + i < self.num_entries(), + ensures + self.interp_of_entry(i).inv(), + self.interp_of_entry(i).lower == self.entry_base(i), + self.interp_of_entry(i).upper == self.entry_base(i + 1), + { + indexing::lemma_entry_base_from_index(self.base_vaddr, i, self.entry_size()); + indexing::lemma_entry_base_from_index_support(self.base_vaddr, i, self.entry_size()); + if let NodeEntry::Directory(d) = self.entries[i as int] { + d.lemma_inv_implies_interp_inv(); + } + } - proof fn lemma_interp_of_entry(self) - requires - self.inv(), - ensures - forall|i: nat| #![auto] - i < self.num_entries() ==> - self.interp_of_entry(i).inv() && - self.interp_of_entry(i).lower == self.entry_base(i) && - self.interp_of_entry(i).upper == self.entry_base(i+1) && - (forall|base: nat| self.interp_of_entry(i).map.dom().contains(base) ==> between(base, self.entry_base(i), self.entry_base(i+1))) && - (forall|base: nat, pte: PageTableEntry| self.interp_of_entry(i).map.contains_pair(base, pte) ==> between(base, self.entry_base(i), self.entry_base(i+1))), - { - assert forall |i: nat| #![auto] i < self.num_entries() implies - self.interp_of_entry(i).inv() && - self.interp_of_entry(i).lower == self.entry_base(i) && - self.interp_of_entry(i).upper == self.entry_base(i+1) by { - self.lemma_inv_implies_interp_of_entry_inv(i); - }; + proof fn lemma_interp_of_entries_disjoint(self) + requires + self.inv(), + ensures + forall|i: nat, j: nat| + i < self.num_entries() && j < self.num_entries() && i != j ==> self.interp_of_entry( + i, + ).ranges_disjoint(self.interp_of_entry(j)), + { + assert forall|i: nat, j: nat| + i < self.num_entries() && j < self.num_entries() && i != j implies self.interp_of_entry( + i, + ).ranges_disjoint(self.interp_of_entry(j)) by { + if i < j { + assert(self.base_vaddr + i * self.entry_size() <= self.base_vaddr + j + * self.entry_size()) by (nonlinear_arith) + requires + self.inv() && i < j && self.entry_size() > 0, + {}; + assert(self.base_vaddr + (i + 1) * self.entry_size() <= self.base_vaddr + j + * self.entry_size()) by (nonlinear_arith) + requires + self.inv() && i < j && self.entry_size() > 0, + {}; + } else { + assert(self.base_vaddr + j * self.entry_size() < self.base_vaddr + i + * self.entry_size()) by (nonlinear_arith) + requires + self.inv() && j < i && self.entry_size() > 0, + {}; + assert(self.base_vaddr + (j + 1) * self.entry_size() <= self.base_vaddr + i + * self.entry_size()) by (nonlinear_arith) + requires + self.inv() && j < i && self.entry_size() > 0, + {}; } + } + } - proof fn lemma_inv_implies_interp_of_entry_inv(self, i: nat) - requires - self.inv(), - i < self.num_entries(), - ensures - self.interp_of_entry(i).inv(), - self.interp_of_entry(i).lower == self.entry_base(i), - self.interp_of_entry(i).upper == self.entry_base(i+1), - { - indexing::lemma_entry_base_from_index(self.base_vaddr, i, self.entry_size()); - indexing::lemma_entry_base_from_index_support(self.base_vaddr, i, self.entry_size()); - if let NodeEntry::Directory(d) = self.entries[i as int] { - d.lemma_inv_implies_interp_inv(); + pub open spec fn interp_aux(self, i: nat) -> l0::PageTableContents + decreases self.arch.layers.len() - self.layer, self.num_entries() - i, 1nat, + { + if self.inv() { + if i >= self.entries.len() { + l0::PageTableContents { + map: map![], + arch: self.arch, + lower: self.upper_vaddr(), + upper: self.upper_vaddr(), } - } - - proof fn lemma_interp_of_entries_disjoint(self) - requires - self.inv(), - ensures - forall|i: nat, j: nat| - i < self.num_entries() && j < self.num_entries() && i != j - ==> self.interp_of_entry(i).ranges_disjoint(self.interp_of_entry(j)), - { - assert forall|i: nat, j: nat| - i < self.num_entries() && j < self.num_entries() && i != j - implies self.interp_of_entry(i).ranges_disjoint(self.interp_of_entry(j)) - by { - if i < j { - assert(self.base_vaddr + i * self.entry_size() <= self.base_vaddr + j * self.entry_size()) by (nonlinear_arith) - requires self.inv() && i < j && self.entry_size() > 0 {}; - assert(self.base_vaddr + (i+1) * self.entry_size() <= self.base_vaddr + j * self.entry_size()) by (nonlinear_arith) - requires self.inv() && i < j && self.entry_size() > 0 {}; - } else { - assert(self.base_vaddr + j * self.entry_size() < self.base_vaddr + i * self.entry_size()) by (nonlinear_arith) - requires self.inv() && j < i && self.entry_size() > 0 {}; - assert(self.base_vaddr + (j+1) * self.entry_size() <= self.base_vaddr + i * self.entry_size()) by (nonlinear_arith) - requires self.inv() && j < i && self.entry_size() > 0 {}; - } + } else { // i < self.entries.len() + let rem = self.interp_aux(i + 1); + let entry_i = self.interp_of_entry(i); + l0::PageTableContents { + map: rem.map.union_prefer_right(entry_i.map), + arch: self.arch, + lower: entry_i.lower, + upper: rem.upper, } } + } else { + arbitrary() + } + } - pub open spec fn interp_aux(self, i: nat) -> l0::PageTableContents - decreases self.arch.layers.len() - self.layer, self.num_entries() - i, 1nat - { - if self.inv() { - if i >= self.entries.len() { - l0::PageTableContents { - map: map![], - arch: self.arch, - lower: self.upper_vaddr(), - upper: self.upper_vaddr(), - } - } else { // i < self.entries.len() - let rem = self.interp_aux(i + 1); - let entry_i = self.interp_of_entry(i); - l0::PageTableContents { - map: rem.map.union_prefer_right(entry_i.map), - arch: self.arch, - lower: entry_i.lower, - upper: rem.upper, - } - } - } else { - arbitrary() - } - } + pub proof fn lemma_inv_implies_interp_inv(self) + requires + self.inv(), + ensures + self.interp().inv(), + self.interp().upper == self.upper_vaddr(), + self.interp().lower == self.base_vaddr, + { + self.lemma_inv_implies_interp_aux_inv(0); + } - pub proof fn lemma_inv_implies_interp_inv(self) - requires - self.inv(), - ensures - self.interp().inv(), - self.interp().upper == self.upper_vaddr(), - self.interp().lower == self.base_vaddr, - { - self.lemma_inv_implies_interp_aux_inv(0); + pub proof fn lemma_inv_implies_interp_aux_inv(self, i: nat) + requires + self.inv(), + ensures + self.interp_aux(i).inv(), + i <= self.entries.len() ==> self.interp_aux(i).lower == self.entry_base(i), + self.interp_aux(i).upper == self.upper_vaddr(), + i == 0 ==> self.interp_aux(0).lower == self.base_vaddr, + decreases self.arch.layers.len() - self.layer, self.num_entries() - i, + { + ambient_lemmas1(); + let interp = self.interp_aux(i); + if i >= self.entries.len() { + } else { + assert(i < self.entries.len()); + self.lemma_inv_implies_interp_aux_inv(i + 1); + assert(self.directories_obey_invariant()); + let entry = self.entries.index(i as int); + let entry_i = self.interp_of_entry(i); + let rem = self.interp_aux(i + 1); + match entry { + NodeEntry::Page(p) => {}, + NodeEntry::Directory(d) => { + d.lemma_inv_implies_interp_aux_inv(0); + }, + NodeEntry::Empty() => {}, } - - pub proof fn lemma_inv_implies_interp_aux_inv(self, i: nat) - requires - self.inv(), - ensures - self.interp_aux(i).inv(), - i <= self.entries.len() ==> self.interp_aux(i).lower == self.entry_base(i), - self.interp_aux(i).upper == self.upper_vaddr(), - i == 0 ==> self.interp_aux(0).lower == self.base_vaddr, - decreases self.arch.layers.len() - self.layer, self.num_entries() - i - { - ambient_lemmas1(); - - let interp = self.interp_aux(i); - - if i >= self.entries.len() { - } else { - assert(i < self.entries.len()); - - self.lemma_inv_implies_interp_aux_inv(i + 1); - - assert(self.directories_obey_invariant()); - - let entry = self.entries.index(i as int); - let entry_i = self.interp_of_entry(i); - let rem = self.interp_aux(i+1); - - match entry { - NodeEntry::Page(p) => {} - NodeEntry::Directory(d) => { - d.lemma_inv_implies_interp_aux_inv(0); - } - NodeEntry::Empty() => { } - } - - assert(interp.mappings_are_of_valid_size()); - - if let NodeEntry::Page(pte) = entry { - indexing::lemma_entry_base_from_index(self.base_vaddr, i, self.entry_size()); - indexing::lemma_entry_base_from_index_support(self.base_vaddr, i, self.entry_size()); - } - - assert(interp.mappings_are_aligned()); - - match entry { - NodeEntry::Page(pte) => { - assert_nonlinear_by({ - requires([ + assert(interp.mappings_are_of_valid_size()); + if let NodeEntry::Page(pte) = entry { + indexing::lemma_entry_base_from_index(self.base_vaddr, i, self.entry_size()); + indexing::lemma_entry_base_from_index_support( + self.base_vaddr, + i, + self.entry_size(), + ); + } + assert(interp.mappings_are_aligned()); + match entry { + NodeEntry::Page(pte) => { + assert_nonlinear_by( + { + requires( + [ self.inv(), equal(entry_i, self.interp_of_entry(i)), self.entry_size() == pte.frame.size, i < self.entries.len(), - ]); - ensures(entry_i.candidate_mapping_in_bounds(self.entry_base(i), pte)); - }); - } - NodeEntry::Directory(d) => { - assert_nonlinear_by({ - requires([ + ], + ); + ensures(entry_i.candidate_mapping_in_bounds(self.entry_base(i), pte)); + }, + ); + }, + NodeEntry::Directory(d) => { + assert_nonlinear_by( + { + requires( + [ self.inv(), equal(entry_i, self.interp_of_entry(i)), d.interp_aux(0).inv(), @@ -745,1399 +863,1703 @@ pub mod impl_u { d.interp_aux(0).upper == d.upper_vaddr(), equal(self.interp_of_entry(i).map, d.interp_aux(0).map), i < self.entries.len(), - ]); - ensures(entry_i.mappings_in_bounds()); - assert(self.well_formed()); - assert(entry_i.lower <= d.interp_aux(0).lower); // proof stability - assert(entry_i.upper >= d.interp_aux(0).upper); // proof stability - }); - } - NodeEntry::Empty() => {} - } - assert(entry_i.mappings_in_bounds()); - - assert(entry_i.inv()); - - - assert(self.interp_aux(i + 1).lower == self.entry_base(i + 1)); - - assert_nonlinear_by({ - requires([ + ], + ); + ensures(entry_i.mappings_in_bounds()); + assert(self.well_formed()); + assert(entry_i.lower <= d.interp_aux(0).lower); // proof stability + assert(entry_i.upper >= d.interp_aux(0).upper); // proof stability + }, + ); + }, + NodeEntry::Empty() => {}, + } + assert(entry_i.mappings_in_bounds()); + assert(entry_i.inv()); + assert(self.interp_aux(i + 1).lower == self.entry_base(i + 1)); + assert_nonlinear_by( + { + requires( + [ self.inv(), equal(rem, self.interp_aux(i + 1)), equal(entry_i, self.interp_of_entry(i)), - self.interp_aux(i + 1).lower == self.entry_base(i + 1) - ]); - ensures(rem.ranges_disjoint(entry_i)); - }); - rem.lemma_ranges_disjoint_implies_mappings_disjoint(entry_i); - - assert(interp.mappings_dont_overlap()); - - assert_nonlinear_by({ - requires([ + self.interp_aux(i + 1).lower == self.entry_base(i + 1), + ], + ); + ensures(rem.ranges_disjoint(entry_i)); + }, + ); + rem.lemma_ranges_disjoint_implies_mappings_disjoint(entry_i); + assert(interp.mappings_dont_overlap()); + assert_nonlinear_by( + { + requires( + [ equal(interp, self.interp_aux(i)), equal(entry_i, self.interp_of_entry(i)), equal(rem, self.interp_aux(i + 1)), self.interp_aux(i + 1).lower == self.entry_base(i + 1), entry_i.upper == self.entry_base(i + 1), interp.upper == self.upper_vaddr(), - ]); - ensures([ + ], + ); + ensures( + [ interp.lower <= entry_i.lower, interp.upper >= entry_i.upper, interp.lower <= self.interp_aux(i + 1).lower, interp.upper >= self.interp_aux(i + 1).upper, - ]); - }); - - assert(interp.mappings_in_bounds()); - - assert(interp.map.dom().finite()); - - if i == 0 { - assert_nonlinear_by({ - requires([ + ], + ); + }, + ); + assert(interp.mappings_in_bounds()); + assert(interp.map.dom().finite()); + if i == 0 { + assert_nonlinear_by( + { + requires( + [ equal(entry_i, self.interp_of_entry(i)), entry_i.lower == self.base_vaddr + i * self.entry_size(), i == 0, - ]); - ensures(self.interp_aux(0).lower == self.base_vaddr); - }); - } - } + ], + ); + ensures(self.interp_aux(0).lower == self.base_vaddr); + }, + ); } + } + } - pub proof fn lemma_empty_implies_interp_aux_empty(self, i: nat) - requires - self.inv(), - self.empty(), - ensures - equal(self.interp_aux(i).map, Map::empty()), - equal(self.interp_aux(i).map.dom(), Set::empty()), - decreases self.arch.layers.len() - self.layer, self.num_entries() - i - { - if i >= self.entries.len() { - } else { - let rem = self.interp_aux(i + 1); - let entry_i = self.interp_of_entry(i); - self.lemma_empty_implies_interp_aux_empty(i + 1); - assert_maps_equal!(rem.map.union_prefer_right(entry_i.map), Map::empty()); - } - } + pub proof fn lemma_empty_implies_interp_aux_empty(self, i: nat) + requires + self.inv(), + self.empty(), + ensures + equal(self.interp_aux(i).map, Map::empty()), + equal(self.interp_aux(i).map.dom(), Set::empty()), + decreases self.arch.layers.len() - self.layer, self.num_entries() - i, + { + if i >= self.entries.len() { + } else { + let rem = self.interp_aux(i + 1); + let entry_i = self.interp_of_entry(i); + self.lemma_empty_implies_interp_aux_empty(i + 1); + assert_maps_equal!(rem.map.union_prefer_right(entry_i.map), Map::empty()); + } + } - proof fn lemma_empty_implies_interp_empty(self) - requires - self.inv(), - self.empty() - ensures - equal(self.interp().map, Map::empty()), - equal(self.interp().map.dom(), Set::empty()) - { - self.lemma_empty_implies_interp_aux_empty(0); - } + proof fn lemma_empty_implies_interp_empty(self) + requires + self.inv(), + self.empty(), + ensures + equal(self.interp().map, Map::empty()), + equal(self.interp().map.dom(), Set::empty()), + { + self.lemma_empty_implies_interp_aux_empty(0); + } - proof fn lemma_ranges_disjoint_interp_aux_interp_of_entry(self) - requires - self.inv(), - ensures - forall|i: nat, j: nat| - j < i && i < self.num_entries() - ==> self.interp_aux(i).ranges_disjoint(self.interp_of_entry(j)), - { - assert_forall_by(|i: nat, j: nat| { + proof fn lemma_ranges_disjoint_interp_aux_interp_of_entry(self) + requires + self.inv(), + ensures + forall|i: nat, j: nat| + j < i && i < self.num_entries() ==> self.interp_aux(i).ranges_disjoint( + self.interp_of_entry(j), + ), + { + assert_forall_by( + |i: nat, j: nat| + { requires(j < i && i < self.num_entries()); ensures(self.interp_aux(i).ranges_disjoint(self.interp_of_entry(j))); - let interp = self.interp_aux(i); + let interp = self.interp_aux(i); let entry_j = self.interp_of_entry(j); self.lemma_inv_implies_interp_aux_inv(i); - assert_nonlinear_by({ - requires( - self.entry_size() > 0 && - j < i && - interp.lower == self.entry_base(i) && - entry_j.lower == self.entry_base(j) && - entry_j.upper == self.entry_base(j+1)); - ensures( - entry_j.upper <= interp.lower && - interp.lower > entry_j.lower); - }); - }); + assert_nonlinear_by( + { + requires( + self.entry_size() > 0 && j < i && interp.lower == self.entry_base(i) + && entry_j.lower == self.entry_base(j) && entry_j.upper + == self.entry_base(j + 1), + ); + ensures(entry_j.upper <= interp.lower && interp.lower > entry_j.lower); + }, + ); + }, + ); + } + + #[verifier(spinoff_prover)] + proof fn lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping( + self, + i: nat, + j: nat, + ) + requires + self.inv(), + i <= j, + j < self.entries.len(), + ensures + forall|va: nat, pte: PageTableEntry| + #![auto] + self.interp_of_entry(j).map.contains_pair(va, pte) ==> self.interp_aux( + i, + ).map.contains_pair(va, pte), + forall|va: nat| + #![auto] + self.interp_of_entry(j).map.dom().contains(va) ==> self.interp_aux( + i, + ).map.dom().contains(va), + forall|va: nat| + between(va, self.entry_base(j), self.entry_base(j + 1)) && !self.interp_of_entry( + j, + ).map.dom().contains(va) ==> !self.interp_aux(i).map.dom().contains(va), + decreases self.arch.layers.len() - self.layer, self.num_entries() - i, + { + self.lemma_inv_implies_interp_aux_inv(i + 1); + self.lemma_inv_implies_interp_of_entry_inv(i); + self.lemma_inv_implies_interp_of_entry_inv(j); + let rem = self.interp_aux(i + 1); + let entry_i = self.interp_of_entry(i); + if i != j { + self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping( + i + 1, + j, + ); + if let NodeEntry::Directory(d) = self.entries.index(i as int) { + assert(self.directories_obey_invariant()); + assert(d.inv()); + d.lemma_inv_implies_interp_inv(); + self.lemma_ranges_disjoint_interp_aux_interp_of_entry(); + rem.lemma_ranges_disjoint_implies_mappings_disjoint(entry_i); } + } + indexing::lemma_entry_base_from_index(self.base_vaddr, i, self.entry_size()); + indexing::lemma_entry_base_from_index(self.base_vaddr, j, self.entry_size()); + } - #[verifier(spinoff_prover)] - proof fn lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping(self, i: nat, j: nat) - requires - self.inv(), - i <= j, - j < self.entries.len(), - ensures - forall|va: nat, pte: PageTableEntry| #![auto] self.interp_of_entry(j).map.contains_pair(va, pte) ==> self.interp_aux(i).map.contains_pair(va, pte), - forall|va: nat| #![auto] self.interp_of_entry(j).map.dom().contains(va) ==> self.interp_aux(i).map.dom().contains(va), - forall|va: nat| - between(va, self.entry_base(j), self.entry_base(j+1)) && !self.interp_of_entry(j).map.dom().contains(va) - ==> !self.interp_aux(i).map.dom().contains(va), - decreases self.arch.layers.len() - self.layer, self.num_entries() - i - { - self.lemma_inv_implies_interp_aux_inv(i+1); - self.lemma_inv_implies_interp_of_entry_inv(i); - self.lemma_inv_implies_interp_of_entry_inv(j); + pub proof fn lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + self, + j: nat, + ) + requires + self.inv(), + j < self.entries.len(), + ensures + forall|va: nat| + #![auto] + self.interp_of_entry(j).map.dom().contains(va) ==> self.interp().map.dom().contains( + va, + ), + forall|va: nat, pte: PageTableEntry| + #![auto] + self.interp_of_entry(j).map.contains_pair(va, pte) + ==> self.interp().map.contains_pair(va, pte), + forall|va: nat| + #![auto] + between(va, self.entry_base(j), self.entry_base(j + 1)) && !self.interp_of_entry( + j, + ).map.dom().contains(va) ==> !self.interp().map.dom().contains(va), + { + self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping(0, j); + } - let rem = self.interp_aux(i + 1); - let entry_i = self.interp_of_entry(i); + // TODO restore spec(checked) when recommends_by is fixed + pub open spec fn resolve(self, vaddr: nat) -> Result<(nat, PageTableEntry), ()> + recommends + self.inv(), + self.interp().accepted_resolve(vaddr), + decreases self.arch.layers.len() - self.layer, + { + decreases_when(self.inv() && self.interp().accepted_resolve(vaddr)); + decreases_by(Self::check_resolve); + let entry = self.index_for_vaddr(vaddr); + match self.entries.index(entry as int) { + NodeEntry::Page(pte) => { + let offset = vaddr - self.entry_base(entry); + Ok((self.entry_base(entry), pte)) + }, + NodeEntry::Directory(d) => { d.resolve(vaddr) }, + NodeEntry::Empty() => { Err(()) }, + } + } - if i != j { - self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping(i+1, j); + #[verifier(decreases_by)] + proof fn check_resolve(self, vaddr: nat) { + assert(self.inv() && self.interp().accepted_resolve(vaddr)); + ambient_lemmas1(); + ambient_lemmas2(); + self.lemma_inv_implies_interp_inv(); + assert(between(vaddr, self.base_vaddr, self.upper_vaddr())); + let entry = self.index_for_vaddr(vaddr); + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + vaddr, + self.entry_size(), + self.num_entries(), + ); + // TODO: This makes the recommends failure on the line below go away but not the one in the + // corresponding spec function. wtf + assert(0 <= entry < self.entries.len()); + match self.entries.index(entry as int) { + NodeEntry::Page(p) => {}, + NodeEntry::Directory(d) => { + d.lemma_inv_implies_interp_inv(); + assert(d.inv()); + }, + NodeEntry::Empty() => {}, + } + } - if let NodeEntry::Directory(d) = self.entries.index(i as int) { - assert(self.directories_obey_invariant()); - assert(d.inv()); - d.lemma_inv_implies_interp_inv(); - self.lemma_ranges_disjoint_interp_aux_interp_of_entry(); - rem.lemma_ranges_disjoint_implies_mappings_disjoint(entry_i); - } + proof fn lemma_interp_aux_contains_implies_interp_of_entry_contains(self, j: nat) + requires + self.inv(), + ensures + forall|base: nat, pte: PageTableEntry| + self.interp_aux(j).map.contains_pair(base, pte) ==> exists|i: nat| + #![auto] + i < self.num_entries() && self.interp_of_entry(i).map.contains_pair(base, pte), + forall|base: nat| + self.interp_aux(j).map.dom().contains(base) ==> exists|i: nat| + #![auto] + i < self.num_entries() && self.interp_of_entry(i).map.dom().contains(base), + decreases self.arch.layers.len() - self.layer, self.num_entries() - j, + { + if j >= self.entries.len() { + } else { + let _ = self.interp_of_entry(j); + self.lemma_interp_aux_contains_implies_interp_of_entry_contains(j + 1); + assert forall|base: nat, pte: PageTableEntry| + #![auto] + self.interp_aux(j).map.contains_pair(base, pte) implies exists|i: nat| + #![auto] + i < self.num_entries() && self.interp_of_entry(i).map.contains_pair(base, pte) by { + if self.interp_aux(j + 1).map.contains_pair(base, pte) { + } else { } + }; + assert forall|base: nat| + #![auto] + self.interp_aux(j).map.dom().contains(base) implies exists|i: nat| + #![auto] + i < self.num_entries() && self.interp_of_entry(i).map.dom().contains(base) by { + if self.interp_aux(j + 1).map.dom().contains(base) { + } else { + } + }; + } + } - indexing::lemma_entry_base_from_index(self.base_vaddr, i, self.entry_size()); - indexing::lemma_entry_base_from_index(self.base_vaddr, j, self.entry_size()); - } - - pub proof fn lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(self, j: nat) - requires - self.inv(), - j < self.entries.len(), - ensures - forall|va: nat| #![auto] self.interp_of_entry(j).map.dom().contains(va) ==> self.interp().map.dom().contains(va), - forall|va: nat, pte: PageTableEntry| #![auto] self.interp_of_entry(j).map.contains_pair(va, pte) ==> self.interp().map.contains_pair(va, pte), - forall|va: nat| #![auto] - between(va, self.entry_base(j), self.entry_base(j+1)) && !self.interp_of_entry(j).map.dom().contains(va) - ==> !self.interp().map.dom().contains(va), - { - self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping(0, j); - } + proof fn lemma_interp_contains_implies_interp_of_entry_contains(self) + requires + self.inv(), + ensures + forall|base: nat, pte: PageTableEntry| + self.interp().map.contains_pair(base, pte) ==> exists|i: nat| + #![auto] + i < self.num_entries() && self.interp_of_entry(i).map.contains_pair(base, pte), + forall|base: nat| + self.interp().map.dom().contains(base) ==> exists|i: nat| + #![auto] + i < self.num_entries() && self.interp_of_entry(i).map.dom().contains(base), + { + self.lemma_interp_aux_contains_implies_interp_of_entry_contains(0); + } - // TODO restore spec(checked) when recommends_by is fixed - pub open spec fn resolve(self, vaddr: nat) -> Result<(nat, PageTableEntry),()> - recommends - self.inv(), - self.interp().accepted_resolve(vaddr), - decreases self.arch.layers.len() - self.layer - { - decreases_when(self.inv() && self.interp().accepted_resolve(vaddr)); - decreases_by(Self::check_resolve); - - let entry = self.index_for_vaddr(vaddr); - match self.entries.index(entry as int) { - NodeEntry::Page(pte) => { - let offset = vaddr - self.entry_base(entry); - Ok((self.entry_base(entry), pte)) - }, - NodeEntry::Directory(d) => { - d.resolve(vaddr) - }, - NodeEntry::Empty() => { - Err(()) - }, - } - } + #[verifier(spinoff_prover)] + proof fn lemma_no_mapping_in_interp_of_entry_implies_no_mapping_in_interp( + self, + vaddr: nat, + i: nat, + ) + requires + self.inv(), + i < self.num_entries(), + between(vaddr, self.interp_of_entry(i).lower, self.interp_of_entry(i).upper), + !exists|base: nat| + self.interp_of_entry(i).map.dom().contains(base) && between( + vaddr, + base, + base + (#[trigger] self.interp_of_entry(i).map.index(base)).frame.size, + ), + ensures + !exists|base: nat| + self.interp().map.dom().contains(base) && between( + vaddr, + base, + base + (#[trigger] self.interp().map.index(base)).frame.size, + ), + { + assert(0 < self.arch.entry_size(self.layer)); + assert forall|idx: nat, idx2: nat, base: nat, layer: nat| + layer < self.arch.layers.len() && idx < idx2 implies self.arch.entry_base( + layer, + base, + idx, + ) < self.arch.entry_base(layer, base, idx2) by { + indexing::lemma_entry_base_from_index(base, idx, self.arch.entry_size(layer)); + }; + self.lemma_interp_of_entry(); + self.lemma_interp_contains_implies_interp_of_entry_contains(); + assert(self.directories_obey_invariant()); + if exists|base: nat| + self.interp().map.dom().contains(base) && between( + vaddr, + base, + base + (#[trigger] self.interp().map.index(base)).frame.size, + ) { + let base = choose|base: nat| + self.interp().map.dom().contains(base) && between( + vaddr, + base, + base + (#[trigger] self.interp().map.index(base)).frame.size, + ); + let p = self.interp().map.index(base); + assert(self.interp().map.contains_pair(base, p)); + assert(self.interp().map.dom().contains(base)); + assert(self.interp().map.index(base) == p); + } + } - #[verifier(decreases_by)] - proof fn check_resolve(self, vaddr: nat) { - assert(self.inv() && self.interp().accepted_resolve(vaddr)); - - ambient_lemmas1(); - ambient_lemmas2(); - self.lemma_inv_implies_interp_inv(); - - assert(between(vaddr, self.base_vaddr, self.upper_vaddr())); - let entry = self.index_for_vaddr(vaddr); - indexing::lemma_index_from_base_and_addr(self.base_vaddr, vaddr, self.entry_size(), self.num_entries()); - // TODO: This makes the recommends failure on the line below go away but not the one in the - // corresponding spec function. wtf - assert(0 <= entry < self.entries.len()); - match self.entries.index(entry as int) { - NodeEntry::Page(p) => { - }, - NodeEntry::Directory(d) => { - d.lemma_inv_implies_interp_inv(); - assert(d.inv()); - }, - NodeEntry::Empty() => { - }, - } - } + #[verifier(spinoff_prover)] + pub proof fn lemma_resolve_structure_assertions(self, vaddr: nat, idx: nat) + requires + self.inv(), + self.interp().accepted_resolve(vaddr), + idx == self.index_for_vaddr(vaddr), + ensures + self.entries.index(idx as int).is_Directory() ==> { + let d = self.entries.index(idx as int).get_Directory_0(); + &&& d.interp().accepted_resolve(vaddr) + &&& d.inv() + }, + decreases self.arch.layers.len() - self.layer, + { + ambient_lemmas1(); + ambient_lemmas2(); + indexing::lemma_entry_base_from_index(self.base_vaddr, idx, self.entry_size()); + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + vaddr, + self.entry_size(), + self.num_entries(), + ); + match self.entries.index(idx as int) { + NodeEntry::Page(p) => {}, + NodeEntry::Directory(d) => { + d.lemma_inv_implies_interp_inv(); + assert(d.interp().accepted_resolve(vaddr)); + }, + NodeEntry::Empty() => {}, + } + } - proof fn lemma_interp_aux_contains_implies_interp_of_entry_contains(self, j: nat) - requires - self.inv(), - ensures - forall|base: nat, pte: PageTableEntry| - self.interp_aux(j).map.contains_pair(base, pte) ==> - exists|i: nat| #![auto] i < self.num_entries() && self.interp_of_entry(i).map.contains_pair(base, pte), - forall|base: nat| - self.interp_aux(j).map.dom().contains(base) ==> - exists|i: nat| #![auto] i < self.num_entries() && self.interp_of_entry(i).map.dom().contains(base) - decreases self.arch.layers.len() - self.layer, self.num_entries() - j - { - if j >= self.entries.len() { + #[verifier(spinoff_prover)] + pub proof fn lemma_resolve_refines(self, vaddr: nat) + requires + self.inv(), + self.interp().accepted_resolve(vaddr), + ensures + equal(self.interp().resolve(vaddr), self.resolve(vaddr)), + decreases self.arch.layers.len() - self.layer, + { + ambient_lemmas1(); + ambient_lemmas2(); + self.lemma_inv_implies_interp_inv(); + let entry = self.index_for_vaddr(vaddr); + indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + vaddr, + self.entry_size(), + self.num_entries(), + ); + self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(entry); + match self.entries.index(entry as int) { + NodeEntry::Page(p) => { + assert(self.resolve(vaddr).is_Ok()); + let p_vaddr = self.entry_base(entry); + assert(self.interp().map.contains_pair(p_vaddr, p)); + assert(vaddr < p_vaddr + self.interp().map.index(p_vaddr).frame.size); + }, + NodeEntry::Directory(d) => { + d.lemma_inv_implies_interp_inv(); + assert(d.interp().accepted_resolve(vaddr)); + d.lemma_resolve_refines(vaddr); + assert(equal(self.interp_of_entry(entry), d.interp())); + assert(equal(d.interp().resolve(vaddr), d.resolve(vaddr))); + if d.resolve(vaddr).is_Ok() { + assert(self.resolve(vaddr).is_Ok()); + assert(exists|base: nat| + d.interp().map.dom().contains(base) && between( + vaddr, + base, + base + (#[trigger] d.interp().map.index(base)).frame.size, + )); + let base = choose|base: nat| + d.interp().map.dom().contains(base) && between( + vaddr, + base, + base + (#[trigger] d.interp().map.index(base)).frame.size, + ); + assert(self.interp().map.contains_pair( + base, + self.interp_of_entry(entry).map.index(base), + )); + assert(d.resolve(vaddr).is_Ok()); + assert(d.interp().resolve(vaddr).is_Ok()); + assert(equal(d.interp().resolve(vaddr), self.interp().resolve(vaddr))); } else { - let _ = self.interp_of_entry(j); - self.lemma_interp_aux_contains_implies_interp_of_entry_contains(j+1); - assert forall |base: nat, pte: PageTableEntry| #![auto] - self.interp_aux(j).map.contains_pair(base, pte) implies - exists|i: nat| #![auto] i < self.num_entries() && self.interp_of_entry(i).map.contains_pair(base, pte) by { - if self.interp_aux(j+1).map.contains_pair(base, pte) { } else { } - }; - assert forall |base: nat| #![auto] - self.interp_aux(j).map.dom().contains(base) implies - exists|i: nat| #![auto] i < self.num_entries() && self.interp_of_entry(i).map.dom().contains(base) by { - if self.interp_aux(j+1).map.dom().contains(base) { } else { } + assert(d.resolve(vaddr).is_Err()); + assert(self.resolve(vaddr).is_Err()); + assert(d.interp().resolve(vaddr).is_Err()); + assert(!exists|base: nat| + d.interp().map.dom().contains(base) && between( + vaddr, + base, + base + (#[trigger] d.interp().map.index(base)).frame.size, + )) by { + assert(!exists|base: nat, pte: PageTableEntry| + d.interp().map.contains_pair(base, pte) && between( + vaddr, + base, + base + pte.frame.size, + )); + if exists|base: nat| + d.interp().map.dom().contains(base) && between( + vaddr, + base, + base + (#[trigger] d.interp().map.index(base)).frame.size, + ) { + let base = choose|base: nat| + d.interp().map.dom().contains(base) && between( + vaddr, + base, + base + (#[trigger] d.interp().map.index(base)).frame.size, + ); + let pte = d.interp().map.index(base); + assert(d.interp().map.contains_pair(base, pte)); + } }; + self.lemma_no_mapping_in_interp_of_entry_implies_no_mapping_in_interp( + vaddr, + entry, + ); } - } - - proof fn lemma_interp_contains_implies_interp_of_entry_contains(self) - requires - self.inv(), - ensures - forall|base: nat, pte: PageTableEntry| - self.interp().map.contains_pair(base, pte) ==> - exists|i: nat| #![auto] i < self.num_entries() && self.interp_of_entry(i).map.contains_pair(base, pte), - forall|base: nat| - self.interp().map.dom().contains(base) ==> - exists|i: nat| #![auto] i < self.num_entries() && self.interp_of_entry(i).map.dom().contains(base), - { - self.lemma_interp_aux_contains_implies_interp_of_entry_contains(0); - } - - #[verifier(spinoff_prover)] - proof fn lemma_no_mapping_in_interp_of_entry_implies_no_mapping_in_interp(self, vaddr: nat, i: nat) - requires - self.inv(), - i < self.num_entries(), - between(vaddr, self.interp_of_entry(i).lower, self.interp_of_entry(i).upper), - !exists|base:nat| - self.interp_of_entry(i).map.dom().contains(base) && - between(vaddr, base, base + (#[trigger] self.interp_of_entry(i).map.index(base)).frame.size), - ensures - !exists|base:nat| - self.interp().map.dom().contains(base) && - between(vaddr, base, base + (#[trigger] self.interp().map.index(base)).frame.size), - { - assert(0 < self.arch.entry_size(self.layer)); - assert forall|idx: nat, idx2: nat, base: nat, layer: nat| - layer < self.arch.layers.len() && idx < idx2 - implies self.arch.entry_base(layer, base, idx) < self.arch.entry_base(layer, base, idx2) by - { indexing::lemma_entry_base_from_index(base, idx, self.arch.entry_size(layer)); }; - self.lemma_interp_of_entry(); - self.lemma_interp_contains_implies_interp_of_entry_contains(); - - assert(self.directories_obey_invariant()); - if exists|base:nat| - self.interp().map.dom().contains(base) && - between(vaddr, base, base + (#[trigger] self.interp().map.index(base)).frame.size) { - let base = choose|base:nat| - self.interp().map.dom().contains(base) && - between(vaddr, base, base + (#[trigger] self.interp().map.index(base)).frame.size); - let p = self.interp().map.index(base); - assert(self.interp().map.contains_pair(base, p)); - assert(self.interp().map.dom().contains(base)); - assert(self.interp().map.index(base) == p); - } - } - - #[verifier(spinoff_prover)] - pub proof fn lemma_resolve_structure_assertions(self, vaddr: nat, idx: nat) - requires - self.inv(), - self.interp().accepted_resolve(vaddr), - idx == self.index_for_vaddr(vaddr), - ensures - self.entries.index(idx as int).is_Directory() ==> { - let d = self.entries.index(idx as int).get_Directory_0(); - &&& d.interp().accepted_resolve(vaddr) - &&& d.inv() - }, - decreases self.arch.layers.len() - self.layer - { - ambient_lemmas1(); - ambient_lemmas2(); + assert(equal(d.interp().resolve(vaddr), self.interp().resolve(vaddr))); + }, + NodeEntry::Empty() => { + assert(self.resolve(vaddr).is_Err()); + self.lemma_no_mapping_in_interp_of_entry_implies_no_mapping_in_interp(vaddr, entry); + assert(self.interp().resolve(vaddr).is_Err()); + }, + } + } - indexing::lemma_entry_base_from_index(self.base_vaddr, idx, self.entry_size()); - indexing::lemma_index_from_base_and_addr(self.base_vaddr, vaddr, self.entry_size(), self.num_entries()); + pub open spec(checked) fn update(self, n: nat, e: NodeEntry) -> Self + recommends + n < self.entries.len(), + { + Directory { entries: self.entries.update(n as int, e), ..self } + } - match self.entries.index(idx as int) { - NodeEntry::Page(p) => { }, - NodeEntry::Directory(d) => { - d.lemma_inv_implies_interp_inv(); - assert(d.interp().accepted_resolve(vaddr)); - }, - NodeEntry::Empty() => { }, - } - } + pub open spec(checked) fn candidate_mapping_in_bounds( + self, + base: nat, + pte: PageTableEntry, + ) -> bool + recommends + self.inv(), + { + self.base_vaddr <= base && base + pte.frame.size <= self.upper_vaddr() + } - #[verifier(spinoff_prover)] - pub proof fn lemma_resolve_refines(self, vaddr: nat) - requires - self.inv(), - self.interp().accepted_resolve(vaddr), - ensures - equal(self.interp().resolve(vaddr), self.resolve(vaddr)), - decreases self.arch.layers.len() - self.layer - { - ambient_lemmas1(); - ambient_lemmas2(); - self.lemma_inv_implies_interp_inv(); - - let entry = self.index_for_vaddr(vaddr); - indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); - indexing::lemma_index_from_base_and_addr(self.base_vaddr, vaddr, self.entry_size(), self.num_entries()); - self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(entry); - - match self.entries.index(entry as int) { - NodeEntry::Page(p) => { - assert(self.resolve(vaddr).is_Ok()); - let p_vaddr = self.entry_base(entry); - assert(self.interp().map.contains_pair(p_vaddr, p)); - assert(vaddr < p_vaddr + self.interp().map.index(p_vaddr).frame.size); - }, - NodeEntry::Directory(d) => { - d.lemma_inv_implies_interp_inv(); - assert(d.interp().accepted_resolve(vaddr)); - d.lemma_resolve_refines(vaddr); + pub open spec(checked) fn accepted_mapping(self, base: nat, pte: PageTableEntry) -> bool + recommends + self.inv(), + { + &&& aligned(base, pte.frame.size) + &&& aligned(pte.frame.base, pte.frame.size) + &&& self.candidate_mapping_in_bounds(base, pte) + &&& self.arch.contains_entry_size_at_index_atleast(pte.frame.size, self.layer) + } - assert(equal(self.interp_of_entry(entry), d.interp())); + pub proof fn lemma_accepted_mapping_implies_interp_accepted_mapping_manual( + self, + base: nat, + pte: PageTableEntry, + ) + requires + self.inv(), + self.accepted_mapping(base, pte), + ensures + self.interp().accepted_mapping(base, pte), + { + self.lemma_inv_implies_interp_inv(); + } - assert(equal(d.interp().resolve(vaddr), d.resolve(vaddr))); + pub proof fn lemma_accepted_mapping_implies_interp_accepted_mapping_auto(self) + ensures + forall|base: nat, pte: PageTableEntry| + self.inv() && #[trigger] self.accepted_mapping(base, pte) + ==> self.interp().accepted_mapping(base, pte), + { + assert_forall_by( + |base: nat, pte: PageTableEntry| + { + requires(self.inv() && #[trigger] self.accepted_mapping(base, pte)); + ensures(self.interp().accepted_mapping(base, pte)); + self.lemma_accepted_mapping_implies_interp_accepted_mapping_manual(base, pte); + }, + ); + } - if d.resolve(vaddr).is_Ok() { - assert(self.resolve(vaddr).is_Ok()); - assert(exists|base: nat| - d.interp().map.dom().contains(base) && - between(vaddr, base, base + (#[trigger] d.interp().map.index(base)).frame.size)); + // Creates new empty directory to map to entry 'entry' + pub open spec fn new_empty_dir(self, entry: nat) -> Self + recommends + self.inv(), + entry < self.num_entries(), + self.layer + 1 < self.arch.layers.len(), + { + Directory { + entries: new_seq(self.arch.num_entries((self.layer + 1) as nat), NodeEntry::Empty()), + layer: self.layer + 1, + base_vaddr: self.entry_base(entry), + arch: self.arch, + flags: permissive_flags, + } + } - let base = choose|base:nat| - d.interp().map.dom().contains(base) && - between(vaddr, base, base + (#[trigger] d.interp().map.index(base)).frame.size); + pub proof fn lemma_new_empty_dir(self, entry: nat) + requires + self.inv(), + entry < self.num_entries(), + self.layer + 1 < self.arch.layers.len(), + ensures + self.new_empty_dir(entry).inv(), + self.new_empty_dir(entry).entries.len() == self.arch.num_entries( + (self.layer + 1) as nat, + ), + forall|j: nat| + j < self.new_empty_dir(entry).num_entries() ==> equal( + self.new_empty_dir(entry).entries.index(j as int), + NodeEntry::Empty(), + ), + { + let new_dir = self.new_empty_dir(entry); + let num_entries = self.arch.num_entries((self.layer + 1) as nat); + indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); + indexing::lemma_entry_base_from_index_support(self.base_vaddr, entry, self.entry_size()); + lemma_new_seq::(num_entries, NodeEntry::Empty()); + assert(new_dir.directories_obey_invariant()); + assert(new_dir.well_formed()); + assert(new_dir.inv()); + } - assert(self.interp().map.contains_pair(base, self.interp_of_entry(entry).map.index(base))); - - assert(d.resolve(vaddr).is_Ok()); - assert(d.interp().resolve(vaddr).is_Ok()); - assert(equal(d.interp().resolve(vaddr), self.interp().resolve(vaddr))); - } else { - assert(d.resolve(vaddr).is_Err()); - assert(self.resolve(vaddr).is_Err()); - - assert(d.interp().resolve(vaddr).is_Err()); - assert(!exists|base:nat| - d.interp().map.dom().contains(base) && - between(vaddr, base, base + (#[trigger] d.interp().map.index(base)).frame.size)) by - { - assert(!exists|base:nat, pte:PageTableEntry| - d.interp().map.contains_pair(base, pte) && - between(vaddr, base, base + pte.frame.size)); - if exists|base:nat| - d.interp().map.dom().contains(base) && - between(vaddr, base, base + (#[trigger] d.interp().map.index(base)).frame.size) { - let base = choose|base:nat| - d.interp().map.dom().contains(base) && - between(vaddr, base, base + (#[trigger] d.interp().map.index(base)).frame.size); - let pte = d.interp().map.index(base); - assert(d.interp().map.contains_pair(base, pte)); - } - }; - self.lemma_no_mapping_in_interp_of_entry_implies_no_mapping_in_interp(vaddr, entry); + pub open spec fn map_frame(self, base: nat, pte: PageTableEntry) -> Result + decreases self.arch.layers.len() - self.layer, + { + decreases_by(Self::check_map_frame); + if self.inv() && self.accepted_mapping(base, pte) { + let entry = self.index_for_vaddr(base); + match self.entries.index(entry as int) { + NodeEntry::Page(p) => { Err(self) }, + NodeEntry::Directory(d) => { + if self.entry_size() == pte.frame.size { + Err(self) + } else { + match d.map_frame(base, pte) { + Ok(d) => Ok(self.update(entry, NodeEntry::Directory(d))), + Err(d) => Err(self.update(entry, NodeEntry::Directory(d))), } - assert(equal(d.interp().resolve(vaddr), self.interp().resolve(vaddr))); - }, - NodeEntry::Empty() => { - assert(self.resolve(vaddr).is_Err()); - self.lemma_no_mapping_in_interp_of_entry_implies_no_mapping_in_interp(vaddr, entry); - assert(self.interp().resolve(vaddr).is_Err()); - }, - } - } - - pub open spec(checked) fn update(self, n: nat, e: NodeEntry) -> Self - recommends n < self.entries.len() - { - Directory { - entries: self.entries.update(n as int, e), - ..self - } - } - - pub open spec(checked) fn candidate_mapping_in_bounds(self, base: nat, pte: PageTableEntry) -> bool - recommends self.inv() - { - self.base_vaddr <= base && base + pte.frame.size <= self.upper_vaddr() - } - - pub open spec(checked) fn accepted_mapping(self, base: nat, pte: PageTableEntry) -> bool - recommends self.inv() - { - &&& aligned(base, pte.frame.size) - &&& aligned(pte.frame.base, pte.frame.size) - &&& self.candidate_mapping_in_bounds(base, pte) - &&& self.arch.contains_entry_size_at_index_atleast(pte.frame.size, self.layer) - } - - pub proof fn lemma_accepted_mapping_implies_interp_accepted_mapping_manual(self, base: nat, pte: PageTableEntry) - requires - self.inv(), - self.accepted_mapping(base, pte) - ensures - self.interp().accepted_mapping(base, pte), - { - self.lemma_inv_implies_interp_inv(); + } + }, + NodeEntry::Empty() => { + if self.entry_size() == pte.frame.size { + Ok(self.update(entry, NodeEntry::Page(pte))) + } else { + // new_empty_dir's recommendation for `self.layer + 1 < self.arch.layers.len()` + // is satisfied because we know the frame size isn't this layer's entrysize + // (i.e. must be on some lower level). + let new_dir = self.new_empty_dir(entry); + // We never fail to insert an accepted mapping into an empty directory + Ok( + self.update( + entry, + NodeEntry::Directory(new_dir.map_frame(base, pte).get_Ok_0()), + ), + ) + } + }, } + } else { + arbitrary() + } + } - pub proof fn lemma_accepted_mapping_implies_interp_accepted_mapping_auto(self) - ensures - forall|base: nat, pte: PageTableEntry| - self.inv() && #[trigger] self.accepted_mapping(base, pte) ==> - self.interp().accepted_mapping(base, pte), - { - assert_forall_by(|base: nat, pte: PageTableEntry| { - requires(self.inv() && #[trigger] self.accepted_mapping(base, pte)); - ensures(self.interp().accepted_mapping(base, pte)); - - self.lemma_accepted_mapping_implies_interp_accepted_mapping_manual(base, pte); - }); - } + #[verifier(decreases_by)] + proof fn check_map_frame(self, base: nat, pte: PageTableEntry) { + ambient_lemmas1(); + ambient_lemmas2(); // TODO: unnecessary? + self.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); + if self.inv() && self.accepted_mapping(base, pte) { + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + base, + self.entry_size(), + self.num_entries(), + ); + } + } - // Creates new empty directory to map to entry 'entry' - pub open spec fn new_empty_dir(self, entry: nat) -> Self - recommends - self.inv(), - entry < self.num_entries(), - self.layer + 1 < self.arch.layers.len(), - { - Directory { - entries: new_seq(self.arch.num_entries((self.layer + 1) as nat), NodeEntry::Empty()), - layer: self.layer + 1, - base_vaddr: self.entry_base(entry), - arch: self.arch, - flags: permissive_flags, - } - } + pub proof fn lemma_accepted_mapping_implies_directory_accepted_mapping( + self, + base: nat, + pte: PageTableEntry, + d: Directory, + ) + requires + self.inv(), + self.accepted_mapping(base, pte), + equal(d.arch, self.arch), + d.base_vaddr == self.entry_base(self.index_for_vaddr(base)), + d.upper_vaddr() == self.entry_base(self.index_for_vaddr(base) + 1), + d.inv(), + d.layer == self.layer + 1, + self.entry_size() != pte.frame.size, + ensures + d.accepted_mapping(base, pte), + { + ambient_lemmas1(); + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + base, + self.entry_size(), + self.num_entries(), + ); + self.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); + let entry = self.index_for_vaddr(base); + indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); + indexing::lemma_entry_base_from_index_support(self.base_vaddr, entry, self.entry_size()); + assert(self.directories_obey_invariant()); + assert(d.inv()); + assert(aligned(base, pte.frame.size)); + assert(aligned(pte.frame.base, pte.frame.size)); + assert(d.arch.contains_entry_size_at_index_atleast(pte.frame.size, d.layer)); + assert(self.entry_base(entry) <= base); + assert(aligned(base, pte.frame.size)); + self.arch.lemma_entry_sizes_aligned_auto(); + assert(aligned(self.entry_size(), pte.frame.size)); + lib::aligned_transitive_auto(); + assert(aligned(self.next_entry_base(entry), pte.frame.size)); + lib::leq_add_aligned_less(base, pte.frame.size, self.entry_base(entry + 1)); + assert(base + pte.frame.size <= self.entry_base(entry + 1)); + assert(base + pte.frame.size <= self.entry_base(entry) + self.entry_size()); + assert(base + pte.frame.size <= d.base_vaddr + self.entry_size()); + assert(base + pte.frame.size <= d.base_vaddr + d.num_entries() * d.entry_size()); + assert(base + pte.frame.size <= d.upper_vaddr()); + assert(d.candidate_mapping_in_bounds(base, pte)); + assert(aligned(base, pte.frame.size)); + assert(aligned(pte.frame.base, pte.frame.size)); + } - pub proof fn lemma_new_empty_dir(self, entry: nat) - requires - self.inv(), - entry < self.num_entries(), - self.layer + 1 < self.arch.layers.len(), - ensures - self.new_empty_dir(entry).inv(), - self.new_empty_dir(entry).entries.len() == self.arch.num_entries((self.layer + 1) as nat), - forall|j: nat| j < self.new_empty_dir(entry).num_entries() ==> equal(self.new_empty_dir(entry).entries.index(j as int), NodeEntry::Empty()), - { - let new_dir = self.new_empty_dir(entry); - let num_entries = self.arch.num_entries((self.layer + 1) as nat); - indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); - indexing::lemma_entry_base_from_index_support(self.base_vaddr, entry, self.entry_size()); - lemma_new_seq::(num_entries, NodeEntry::Empty()); - - assert(new_dir.directories_obey_invariant()); - assert(new_dir.well_formed()); - assert(new_dir.inv()); - } + proof fn lemma_map_frame_empty_is_ok(self, base: nat, pte: PageTableEntry) + requires + self.inv(), + self.accepted_mapping(base, pte), + self.entries.index(self.index_for_vaddr(base) as int).is_Empty(), + ensures + self.map_frame( + base, + pte, + ).is_Ok(), + // self.new_empty_dir(self.index_for_vaddr(base)).map_frame(base, pte).is_Ok() - pub open spec fn map_frame(self, base: nat, pte: PageTableEntry) -> Result - decreases self.arch.layers.len() - self.layer - { - decreases_by(Self::check_map_frame); + decreases self.arch.layers.len() - self.layer, + ; - if self.inv() && self.accepted_mapping(base, pte) { - let entry = self.index_for_vaddr(base); - match self.entries.index(entry as int) { - NodeEntry::Page(p) => { - Err(self) - }, - NodeEntry::Directory(d) => { - if self.entry_size() == pte.frame.size { - Err(self) - } else { - match d.map_frame(base, pte) { - Ok(d) => Ok(self.update(entry, NodeEntry::Directory(d))), - Err(d) => Err(self.update(entry, NodeEntry::Directory(d))), - } - } - }, - NodeEntry::Empty() => { - if self.entry_size() == pte.frame.size { - Ok(self.update(entry, NodeEntry::Page(pte))) - } else { - // new_empty_dir's recommendation for `self.layer + 1 < self.arch.layers.len()` - // is satisfied because we know the frame size isn't this layer's entrysize - // (i.e. must be on some lower level). - let new_dir = self.new_empty_dir(entry); - // We never fail to insert an accepted mapping into an empty directory - Ok(self.update(entry, NodeEntry::Directory(new_dir.map_frame(base, pte).get_Ok_0()))) - } - }, - } + pub proof fn lemma_map_frame_preserves_inv(self, base: nat, pte: PageTableEntry) + requires + self.inv(), + self.accepted_mapping(base, pte), + self.map_frame(base, pte).is_Ok(), + ensures + self.map_frame(base, pte).get_Ok_0().layer === self.layer, + self.map_frame(base, pte).get_Ok_0().arch === self.arch, + self.map_frame(base, pte).get_Ok_0().base_vaddr === self.base_vaddr, + !self.map_frame(base, pte).get_Ok_0().empty(), + self.map_frame(base, pte).get_Ok_0().inv(), + !exists|b: nat| + true && self.interp().map.dom().contains(b) && between( + base, + b, + b + (#[trigger] self.interp().map.index(b)).frame.size, + ), + decreases self.arch.layers.len() - self.layer, + { + ambient_lemmas1(); + ambient_lemmas2(); + self.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + base, + self.entry_size(), + self.num_entries(), + ); + let res = self.map_frame(base, pte).get_Ok_0(); + let entry = self.index_for_vaddr(base); + indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); + match self.entries.index(entry as int) { + NodeEntry::Page(p) => (), + NodeEntry::Directory(d) => { + if self.entry_size() == pte.frame.size { } else { - arbitrary() + self.lemma_accepted_mapping_implies_directory_accepted_mapping(base, pte, d); + d.lemma_inv_implies_interp_inv(); + assert(d.inv()); + d.lemma_map_frame_preserves_inv(base, pte); + assert(res.well_formed()); + assert(res.pages_match_entry_size()); + assert(res.directories_match_arch()); + // assert_forall_by(|i: nat| { + // requires(i < res.entries.len() && res.entries.index(i as int).is_Directory()); + // ensures(true + // && (#[trigger] res.entries.index(i as int)).get_Directory_0().layer == res.layer + 1 + // && res.entries.index(i as int).get_Directory_0().base_vaddr == res.base_vaddr + i * res.entry_size()); + // if i < res.entries.len() && res.entries.index(i as int).is_Directory() { + // if i == entry { + // } + // } + // }); + assert(res.directories_are_in_next_layer()); + assert(res.directories_obey_invariant()); + assert(res.directories_are_nonempty()); + assert(res.inv()); + assert(equal(self.map_frame(base, pte).get_Ok_0().layer, self.layer)); + assert(res.entries.index(entry as int).is_Directory()); + assert(!res.empty()); + self.lemma_no_mapping_in_interp_of_entry_implies_no_mapping_in_interp( + base, + entry, + ); } - } - - #[verifier(decreases_by)] - proof fn check_map_frame(self, base: nat, pte: PageTableEntry) { - ambient_lemmas1(); - ambient_lemmas2(); // TODO: unnecessary? - self.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); - if self.inv() && self.accepted_mapping(base, pte) { - indexing::lemma_index_from_base_and_addr(self.base_vaddr, base, self.entry_size(), self.num_entries()); + }, + NodeEntry::Empty() => { + self.lemma_no_mapping_in_interp_of_entry_implies_no_mapping_in_interp(base, entry); + if self.entry_size() == pte.frame.size { + assert(equal(res.layer, self.layer)); + assert(res.entries.index(entry as int).is_Page()); + assert(!res.empty()); + assert(res.directories_are_in_next_layer()); + assert(res.directories_obey_invariant()); + assert(res.inv()); + } else { + assert(((self.layer + 1) as nat) < self.arch.layers.len()); + let new_dir = self.new_empty_dir(entry); + self.lemma_new_empty_dir(entry); + self.lemma_accepted_mapping_implies_directory_accepted_mapping( + base, + pte, + new_dir, + ); + new_dir.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); + assert(new_dir.accepted_mapping(base, pte)); + indexing::lemma_index_from_base_and_addr( + new_dir.base_vaddr, + base, + new_dir.entry_size(), + new_dir.num_entries(), + ); + new_dir.lemma_map_frame_empty_is_ok(base, pte); + new_dir.lemma_map_frame_preserves_inv(base, pte); + assert(res.directories_are_in_next_layer()); + assert(res.directories_obey_invariant()); + assert(res.directories_are_nonempty()); + assert(res.frames_aligned()); + assert(res.inv()); + assert(equal(res.layer, self.layer)); + assert(res.entries.index(entry as int).is_Directory()); + assert(!res.empty()); + assert(new_dir.map_frame(base, pte).is_Ok()); } - } - - pub proof fn lemma_accepted_mapping_implies_directory_accepted_mapping(self, base: nat, pte: PageTableEntry, d: Directory) - requires - self.inv(), - self.accepted_mapping(base, pte), - equal(d.arch, self.arch), - d.base_vaddr == self.entry_base(self.index_for_vaddr(base)), - d.upper_vaddr() == self.entry_base(self.index_for_vaddr(base)+1), - d.inv(), - d.layer == self.layer + 1, - self.entry_size() != pte.frame.size, - ensures - d.accepted_mapping(base, pte), - { - ambient_lemmas1(); - indexing::lemma_index_from_base_and_addr(self.base_vaddr, base, self.entry_size(), self.num_entries()); - self.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); + }, + } + } - let entry = self.index_for_vaddr(base); - indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); - indexing::lemma_entry_base_from_index_support(self.base_vaddr, entry, self.entry_size()); + proof fn lemma_insert_interp_of_entry_implies_insert_interp_aux( + self, + i: nat, + j: nat, + base: nat, + n: NodeEntry, + pte: PageTableEntry, + ) + requires + self.inv(), + i <= j, + j < self.num_entries(), + !self.interp_aux(i).map.dom().contains(base), + self.update(j, n).inv(), + equal( + self.interp_of_entry(j).map.insert(base, pte), + match n { + NodeEntry::Page(p) => map![self.entry_base(j) => p], + NodeEntry::Directory(d) => d.interp_aux(0).map, + NodeEntry::Empty() => map![], + }, + ), + ensures + equal(self.interp_aux(i).map.insert(base, pte), self.update(j, n).interp_aux(i).map), + decreases self.arch.layers.len() - self.layer, self.num_entries() - i, + { + ambient_lemmas1(); + ambient_lemmas2(); + self.lemma_inv_implies_interp_aux_inv(i); + self.lemma_inv_implies_interp_aux_inv(i + 1); + self.lemma_inv_implies_interp_of_entry_inv(i); + self.lemma_inv_implies_interp_of_entry_inv(j); + self.lemma_interp_of_entry(); + self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping(i, j); + let nself = self.update(j, n); + if i >= self.entries.len() { + } else { + if i == j { + assert(!self.interp_aux(i + 1).map.dom().contains(base)); + assert(equal( + self.interp_aux(i).map, + self.interp_aux(i + 1).map.union_prefer_right(self.interp_of_entry(i).map), + )); + assert(equal( + self.interp_of_entry(i).map.insert(base, pte), + nself.interp_of_entry(i).map, + )); + self.lemma_entries_equal_implies_interp_aux_equal(nself, i + 1); + assert(equal(self.interp_aux(i + 1).map, nself.interp_aux(i + 1).map)); + assert(!self.interp_aux(i + 1).map.union_prefer_right( + self.interp_of_entry(i).map, + ).dom().contains(base)); + assert(equal( + self.interp_aux(i + 1).map.union_prefer_right( + self.interp_of_entry(i).map, + ).insert(base, pte), + self.update(j, n).interp_aux(i + 1).map.union_prefer_right( + nself.interp_of_entry(i).map, + ), + )); + assert(equal( + self.interp_aux(i).map.insert(base, pte), + self.update(j, n).interp_aux(i).map, + )); + } else { + assert(i < j); assert(self.directories_obey_invariant()); - assert(d.inv()); - - assert(aligned(base, pte.frame.size)); - assert(aligned(pte.frame.base, pte.frame.size)); - assert(d.arch.contains_entry_size_at_index_atleast(pte.frame.size, d.layer)); - - assert(self.entry_base(entry) <= base); - assert(aligned(base, pte.frame.size)); - self.arch.lemma_entry_sizes_aligned_auto(); - assert(aligned(self.entry_size(), pte.frame.size)); - - lib::aligned_transitive_auto(); - assert(aligned(self.next_entry_base(entry), pte.frame.size)); - lib::leq_add_aligned_less(base, pte.frame.size, self.entry_base(entry+1)); - assert(base + pte.frame.size <= self.entry_base(entry+1)); - assert(base + pte.frame.size <= self.entry_base(entry) + self.entry_size()); - assert(base + pte.frame.size <= d.base_vaddr + self.entry_size()); - assert(base + pte.frame.size <= d.base_vaddr + d.num_entries() * d.entry_size()); - assert(base + pte.frame.size <= d.upper_vaddr()); - assert(d.candidate_mapping_in_bounds(base, pte)); - assert(aligned(base, pte.frame.size)); - assert(aligned(pte.frame.base, pte.frame.size)); - } - - proof fn lemma_map_frame_empty_is_ok(self, base: nat, pte: PageTableEntry) - requires - self.inv(), - self.accepted_mapping(base, pte), - self.entries.index(self.index_for_vaddr(base) as int).is_Empty(), - ensures - self.map_frame(base, pte).is_Ok(), - // self.new_empty_dir(self.index_for_vaddr(base)).map_frame(base, pte).is_Ok() - decreases self.arch.layers.len() - self.layer; - - pub proof fn lemma_map_frame_preserves_inv(self, base: nat, pte: PageTableEntry) - requires - self.inv(), - self.accepted_mapping(base, pte), - self.map_frame(base, pte).is_Ok(), - ensures - self.map_frame(base, pte).get_Ok_0().layer === self.layer, - self.map_frame(base, pte).get_Ok_0().arch === self.arch, - self.map_frame(base, pte).get_Ok_0().base_vaddr === self.base_vaddr, - !self.map_frame(base, pte).get_Ok_0().empty(), - self.map_frame(base, pte).get_Ok_0().inv(), - !exists|b:nat| true - && self.interp().map.dom().contains(b) - && between(base, b, b + (#[trigger] self.interp().map.index(b)).frame.size), - - decreases self.arch.layers.len() - self.layer - { - - ambient_lemmas1(); - ambient_lemmas2(); - self.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); - indexing::lemma_index_from_base_and_addr(self.base_vaddr, base, self.entry_size(), self.num_entries()); - - let res = self.map_frame(base, pte).get_Ok_0(); - - let entry = self.index_for_vaddr(base); - indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); - match self.entries.index(entry as int) { - NodeEntry::Page(p) => (), - NodeEntry::Directory(d) => { - if self.entry_size() == pte.frame.size { - } else { - self.lemma_accepted_mapping_implies_directory_accepted_mapping(base, pte, d); - d.lemma_inv_implies_interp_inv(); - assert(d.inv()); - d.lemma_map_frame_preserves_inv(base, pte); - assert(res.well_formed()); - assert(res.pages_match_entry_size()); - assert(res.directories_match_arch()); - // assert_forall_by(|i: nat| { - // requires(i < res.entries.len() && res.entries.index(i as int).is_Directory()); - // ensures(true - // && (#[trigger] res.entries.index(i as int)).get_Directory_0().layer == res.layer + 1 - // && res.entries.index(i as int).get_Directory_0().base_vaddr == res.base_vaddr + i * res.entry_size()); - // if i < res.entries.len() && res.entries.index(i as int).is_Directory() { - // if i == entry { - // } - // } - // }); - assert(res.directories_are_in_next_layer()); - assert(res.directories_obey_invariant()); - assert(res.directories_are_nonempty()); - assert(res.inv()); - assert(equal(self.map_frame(base, pte).get_Ok_0().layer, self.layer)); - - assert(res.entries.index(entry as int).is_Directory()); - assert(!res.empty()); - self.lemma_no_mapping_in_interp_of_entry_implies_no_mapping_in_interp(base, entry); - } - }, - NodeEntry::Empty() => { - self.lemma_no_mapping_in_interp_of_entry_implies_no_mapping_in_interp(base, entry); - if self.entry_size() == pte.frame.size { - assert(equal(res.layer, self.layer)); - assert(res.entries.index(entry as int).is_Page()); - assert(!res.empty()); - assert(res.directories_are_in_next_layer()); - assert(res.directories_obey_invariant()); - assert(res.inv()); - } else { - assert(((self.layer + 1) as nat) < self.arch.layers.len()); - let new_dir = self.new_empty_dir(entry); - self.lemma_new_empty_dir(entry); - - self.lemma_accepted_mapping_implies_directory_accepted_mapping(base, pte, new_dir); - new_dir.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); - assert(new_dir.accepted_mapping(base, pte)); - indexing::lemma_index_from_base_and_addr(new_dir.base_vaddr, base, new_dir.entry_size(), new_dir.num_entries()); - new_dir.lemma_map_frame_empty_is_ok(base, pte); - new_dir.lemma_map_frame_preserves_inv(base, pte); - - assert(res.directories_are_in_next_layer()); - assert(res.directories_obey_invariant()); - assert(res.directories_are_nonempty()); - assert(res.frames_aligned()); - assert(res.inv()); - assert(equal(res.layer, self.layer)); - assert(res.entries.index(entry as int).is_Directory()); - assert(!res.empty()); - assert(new_dir.map_frame(base, pte).is_Ok()); - } - }, - } + self.lemma_insert_interp_of_entry_implies_insert_interp_aux(i + 1, j, base, n, pte); + self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping( + i + 1, + j, + ); + assert(!self.interp_of_entry(j).map.dom().contains(base)); + assert(!self.interp_aux(i).map.dom().contains(base)); + assert(equal( + self.interp_aux(i + 1).map.insert(base, pte), + self.update(j, n).interp_aux(i + 1).map, + )); + assert(equal( + self.interp_aux(i).map, + self.interp_aux(i + 1).map.union_prefer_right(self.interp_of_entry(i).map), + )); + assert(nself.inv()); + assert(equal( + nself.interp_aux(i).map, + nself.interp_aux(i + 1).map.union_prefer_right(nself.interp_of_entry(i).map), + )); + assert(equal( + self.interp_aux(i).map.insert(base, pte), + self.update(j, n).interp_aux(i).map, + )); } + } + } - proof fn lemma_insert_interp_of_entry_implies_insert_interp_aux(self, i: nat, j: nat, base: nat, n: NodeEntry, pte: PageTableEntry) - requires - self.inv(), - i <= j, - j < self.num_entries(), - !self.interp_aux(i).map.dom().contains(base), - self.update(j, n).inv(), - equal( - self.interp_of_entry(j).map.insert(base, pte), - match n { - NodeEntry::Page(p) => map![self.entry_base(j) => p], - NodeEntry::Directory(d) => d.interp_aux(0).map, - NodeEntry::Empty() => map![], - }), - ensures - equal(self.interp_aux(i).map.insert(base, pte), self.update(j, n).interp_aux(i).map), - decreases self.arch.layers.len() - self.layer, self.num_entries() - i - { - ambient_lemmas1(); - ambient_lemmas2(); - - self.lemma_inv_implies_interp_aux_inv(i); - self.lemma_inv_implies_interp_aux_inv(i + 1); - self.lemma_inv_implies_interp_of_entry_inv(i); - self.lemma_inv_implies_interp_of_entry_inv(j); - - self.lemma_interp_of_entry(); - self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping(i, j); - - let nself = self.update(j, n); - - if i >= self.entries.len() { - } else { - if i == j { - assert(!self.interp_aux(i + 1).map.dom().contains(base)); - assert(equal(self.interp_aux(i).map, self.interp_aux(i + 1).map.union_prefer_right(self.interp_of_entry(i).map))); - - assert(equal(self.interp_of_entry(i).map.insert(base, pte), nself.interp_of_entry(i).map)); - self.lemma_entries_equal_implies_interp_aux_equal(nself, i+1); - assert(equal(self.interp_aux(i + 1).map, nself.interp_aux(i + 1).map)); - - - assert(!self.interp_aux(i + 1).map.union_prefer_right(self.interp_of_entry(i).map).dom().contains(base)); + proof fn lemma_insert_interp_of_entry_implies_insert_interp( + self, + j: nat, + base: nat, + n: NodeEntry, + pte: PageTableEntry, + ) + requires + self.inv(), + j < self.num_entries(), + !self.interp().map.dom().contains(base), + self.update(j, n).inv(), + equal( + self.interp_of_entry(j).map.insert(base, pte), + match n { + NodeEntry::Page(p) => map![self.entry_base(j) => p], + NodeEntry::Directory(d) => d.interp_aux(0).map, + NodeEntry::Empty() => map![], + }, + ), + ensures + equal(self.interp().map.insert(base, pte), self.update(j, n).interp().map), + decreases self.arch.layers.len() - self.layer, + { + self.lemma_insert_interp_of_entry_implies_insert_interp_aux(0, j, base, n, pte); + } - assert(equal(self.interp_aux(i + 1).map.union_prefer_right(self.interp_of_entry(i).map).insert(base, pte), - self.update(j, n).interp_aux(i + 1).map.union_prefer_right(nself.interp_of_entry(i).map))); + proof fn lemma_nonempty_implies_exists_interp_dom_contains(self) + requires + self.inv(), + !self.empty(), + ensures + exists|b: nat| self.interp().map.dom().contains(b), + decreases self.arch.layers.len() - self.layer, + { + ambient_lemmas1(); + ambient_lemmas2(); + assert(exists|i: nat| i < self.num_entries() && !self.entries.index(i as int).is_Empty()); + let i = choose|i: nat| i < self.num_entries() && !self.entries.index(i as int).is_Empty(); + assert(i < self.num_entries()); + assert(!self.entries.index(i as int).is_Empty()); + self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(i); + match self.entries.index(i as int) { + NodeEntry::Page(p) => { + assert(self.interp().map.dom().contains(self.entry_base(i))); + }, + NodeEntry::Directory(d) => { + d.lemma_nonempty_implies_exists_interp_dom_contains(); + let b = choose|b: nat| d.interp().map.dom().contains(b); + assert(self.interp().map.dom().contains(b)); + }, + NodeEntry::Empty() => (), + } + } - assert(equal(self.interp_aux(i).map.insert(base, pte), self.update(j, n).interp_aux(i).map)); + pub proof fn lemma_map_frame_structure_assertions( + self, + base: nat, + pte: PageTableEntry, + idx: nat, + ) + requires + self.inv(), + self.accepted_mapping(base, pte), + idx == self.index_for_vaddr(base), + ensures + match self.entries.index(idx as int) { + NodeEntry::Page(p) => true, + NodeEntry::Directory(d) => { + &&& d.inv() + &&& if self.entry_size() == pte.frame.size { + true } else { - assert(i < j); - assert(self.directories_obey_invariant()); - - self.lemma_insert_interp_of_entry_implies_insert_interp_aux(i + 1, j, base, n, pte); - self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping(i + 1, j); - assert(!self.interp_of_entry(j).map.dom().contains(base)); - - assert(!self.interp_aux(i).map.dom().contains(base)); - - assert(equal(self.interp_aux(i + 1).map.insert(base, pte), self.update(j, n).interp_aux(i + 1).map)); - - assert(equal(self.interp_aux(i).map, self.interp_aux(i + 1).map.union_prefer_right(self.interp_of_entry(i).map))); - - assert(nself.inv()); - assert(equal(nself.interp_aux(i).map, nself.interp_aux(i + 1).map.union_prefer_right(nself.interp_of_entry(i).map))); - - assert(equal(self.interp_aux(i).map.insert(base, pte), self.update(j, n).interp_aux(i).map)); + d.accepted_mapping(base, pte) + } + }, + NodeEntry::Empty() => { + if self.entry_size() == pte.frame.size { + true + } else { + &&& ((self.layer + 1) as nat) < self.arch.layers.len() + &&& self.new_empty_dir(idx).inv() + &&& self.new_empty_dir(idx).accepted_mapping(base, pte) + &&& self.new_empty_dir(idx).map_frame(base, pte).is_Ok() } + }, + }, + decreases self.arch.layers.len() - self.layer, + { + ambient_lemmas1(); + ambient_lemmas2(); + self.lemma_inv_implies_interp_inv(); + self.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + base, + self.entry_size(), + self.num_entries(), + ); + let res = self.map_frame(base, pte).get_Ok_0(); + if self.map_frame(base, pte).is_Ok() { + self.lemma_map_frame_preserves_inv(base, pte); + } + let entry = self.index_for_vaddr(base); + indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); + self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(entry); + match self.entries.index(entry as int) { + NodeEntry::Page(p) => {}, + NodeEntry::Directory(d) => { + assert(d.inv()); + if self.entry_size() == pte.frame.size { + } else { + self.lemma_accepted_mapping_implies_directory_accepted_mapping(base, pte, d); + assert(d.accepted_mapping(base, pte)); } - } - - proof fn lemma_insert_interp_of_entry_implies_insert_interp(self, j: nat, base: nat, n: NodeEntry, pte: PageTableEntry) - requires - self.inv(), - j < self.num_entries(), - !self.interp().map.dom().contains(base), - self.update(j, n).inv(), - equal( - self.interp_of_entry(j).map.insert(base, pte), - match n { - NodeEntry::Page(p) => map![self.entry_base(j) => p], - NodeEntry::Directory(d) => d.interp_aux(0).map, - NodeEntry::Empty() => map![], - }), - ensures - equal(self.interp().map.insert(base, pte), self.update(j, n).interp().map), - decreases - self.arch.layers.len() - self.layer, - { - self.lemma_insert_interp_of_entry_implies_insert_interp_aux(0, j, base, n, pte); - } - - proof fn lemma_nonempty_implies_exists_interp_dom_contains(self) - requires - self.inv(), - !self.empty() - ensures - exists|b: nat| self.interp().map.dom().contains(b) - decreases self.arch.layers.len() - self.layer - { - ambient_lemmas1(); - ambient_lemmas2(); - - assert(exists|i: nat| i < self.num_entries() && !self.entries.index(i as int).is_Empty()); - let i = choose|i: nat| i < self.num_entries() && !self.entries.index(i as int).is_Empty(); - assert(i < self.num_entries()); - assert(!self.entries.index(i as int).is_Empty()); - self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(i); - match self.entries.index(i as int) { - NodeEntry::Page(p) => { - assert(self.interp().map.dom().contains(self.entry_base(i))); - }, - NodeEntry::Directory(d) => { - d.lemma_nonempty_implies_exists_interp_dom_contains(); - let b = choose|b: nat| d.interp().map.dom().contains(b); - assert(self.interp().map.dom().contains(b)); - }, - NodeEntry::Empty() => (), + }, + NodeEntry::Empty() => { + if self.entry_size() == pte.frame.size { + } else { + assert(((self.layer + 1) as nat) < self.arch.layers.len()); + let new_dir = self.new_empty_dir(entry); + self.lemma_new_empty_dir(entry); + assert(new_dir.inv()); + self.lemma_accepted_mapping_implies_directory_accepted_mapping( + base, + pte, + new_dir, + ); + new_dir.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); + assert(new_dir.accepted_mapping(base, pte)); + indexing::lemma_index_from_base_and_addr( + new_dir.base_vaddr, + base, + new_dir.entry_size(), + new_dir.num_entries(), + ); + new_dir.lemma_map_frame_refines_map_frame(base, pte); + assert(new_dir.interp().map_frame(base, pte).is_Ok()); } - } + }, + } + } - pub proof fn lemma_map_frame_structure_assertions(self, base: nat, pte: PageTableEntry, idx: nat) - requires - self.inv(), - self.accepted_mapping(base, pte), - idx == self.index_for_vaddr(base), - ensures - match self.entries.index(idx as int) { - NodeEntry::Page(p) => true, - NodeEntry::Directory(d) => { - &&& d.inv() - &&& if self.entry_size() == pte.frame.size { - true - } else { - d.accepted_mapping(base, pte) - } + pub proof fn lemma_map_frame_refines_map_frame(self, base: nat, pte: PageTableEntry) + requires + self.inv(), + self.accepted_mapping(base, pte), + ensures + self.map_frame(base, pte).is_Err() ==> self.map_frame(base, pte).get_Err_0() === self, + result_map(self.map_frame(base, pte), |d: Directory| d.interp()) + === self.interp().map_frame(base, pte), + decreases self.arch.layers.len() - self.layer, + { + ambient_lemmas1(); + ambient_lemmas2(); + self.lemma_inv_implies_interp_inv(); + self.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + base, + self.entry_size(), + self.num_entries(), + ); + assert(aligned(self.base_vaddr, self.entry_size())) by { + lib::mod_mult_zero_implies_mod_zero( + self.base_vaddr, + self.entry_size(), + self.num_entries(), + ); + }; + let res = self.map_frame(base, pte).get_Ok_0(); + if self.map_frame(base, pte).is_Ok() { + self.lemma_map_frame_preserves_inv(base, pte); + } + let entry = self.index_for_vaddr(base); + indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); + self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(entry); + match self.entries.index(entry as int) { + NodeEntry::Page(p) => { + assert(self.map_frame(base, pte).is_Err()); + assert(self.interp_of_entry(entry).map.contains_pair(self.entry_base(entry), p)); + assert(self.interp().map.contains_pair(self.entry_base(entry), p)); + assert(self.interp().map_frame(base, pte).is_Err()); + }, + NodeEntry::Directory(d) => { + d.lemma_inv_implies_interp_inv(); + assert(d.inv()); + if self.entry_size() == pte.frame.size { + assert(self.map_frame(base, pte).is_Err()); + d.lemma_nonempty_implies_exists_interp_dom_contains(); + let b = choose|b: nat| d.interp().map.dom().contains(b); + assert(self.interp().map.dom().contains(b)); + self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + entry, + ); + assert(!self.interp().valid_mapping(base, pte)); + assert(self.interp().map_frame(base, pte).is_Err()); + } else { + self.lemma_accepted_mapping_implies_directory_accepted_mapping(base, pte, d); + assert(d.accepted_mapping(base, pte)); + d.lemma_map_frame_refines_map_frame(base, pte); + assert(equal( + result_map(d.map_frame(base, pte), |d: Directory| d.interp()), + d.interp().map_frame(base, pte), + )); + match d.map_frame(base, pte) { + Ok(nd) => { + assert(d.map_frame(base, pte).is_Ok()); + assert(d.interp().map_frame(base, pte).is_Ok()); + assert(d.interp().accepted_mapping(base, pte)); + assert(d.interp().valid_mapping(base, pte)); + assert(self.interp().accepted_mapping(base, pte)); + assert(self.interp().valid_mapping(base, pte)); + assert(self.map_frame(base, pte).is_Ok()); + self.lemma_insert_interp_of_entry_implies_insert_interp( + entry, + base, + NodeEntry::Directory(nd), + pte, + ); + assert(self.interp().map_frame(base, pte).is_Ok()); + assert(equal( + self.interp().map.insert(base, pte), + self.update(entry, NodeEntry::Directory(nd)).interp().map, + )); + assert(equal( + self.interp().map.insert(base, pte), + self.interp().map_frame(base, pte).get_Ok_0().map, + )); + assert(equal( + self.map_frame(base, pte).get_Ok_0().interp(), + self.interp().map_frame(base, pte).get_Ok_0(), + )); }, - NodeEntry::Empty() => { - if self.entry_size() == pte.frame.size { - true - } else { - &&& ((self.layer + 1) as nat) < self.arch.layers.len() - &&& self.new_empty_dir(idx).inv() - &&& self.new_empty_dir(idx).accepted_mapping(base, pte) - &&& self.new_empty_dir(idx).map_frame(base, pte).is_Ok() - } + Err(e) => { + assert(d.map_frame(base, pte).is_Err()); + assert(d.interp().map_frame(base, pte).is_Err()); + assert(d.interp().accepted_mapping(base, pte)); + assert(!d.interp().valid_mapping(base, pte)); + let b = choose|b: nat| + #![auto] + d.interp().map.dom().contains(b) && overlap( + MemRegion { base: base, size: pte.frame.size }, + MemRegion { base: b, size: d.interp().map.index(b).frame.size }, + ); + let bbase = d.interp().map.index(b).frame.base; + let bsize = d.interp().map.index(b).frame.size; + assert(d.interp().map.contains_pair(b, d.interp().map.index(b))); + assert(overlap( + MemRegion { base: base, size: pte.frame.size }, + MemRegion { base: b, size: bsize }, + )); + self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + entry); + assert(self.interp().map.contains_pair(b, d.interp().map.index(b))); + assert(self.interp().accepted_mapping(base, pte)); + assert(!self.interp().valid_mapping(base, pte)); + assert(self.map_frame(base, pte).is_Err()); + assert(self.interp().map_frame(base, pte).is_Err()); + assert(self.entries.index(entry as int) === NodeEntry::Directory(d)); + assert(self.entries.index(entry as int) === NodeEntry::Directory(e)); + let res = self.update(entry, NodeEntry::Directory(e)).entries; + assert(res.index(entry as int) === self.entries.index(entry as int)); + assert_seqs_equal!(res, self.entries); }, } - decreases self.arch.layers.len() - self.layer - { - ambient_lemmas1(); - ambient_lemmas2(); - self.lemma_inv_implies_interp_inv(); - self.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); - indexing::lemma_index_from_base_and_addr(self.base_vaddr, base, self.entry_size(), self.num_entries()); - - let res = self.map_frame(base, pte).get_Ok_0(); - if self.map_frame(base, pte).is_Ok() { - self.lemma_map_frame_preserves_inv(base, pte); - } + // d.lemma_map_frame_preserves_inv(base, pte); - let entry = self.index_for_vaddr(base); - indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); - self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(entry); - match self.entries.index(entry as int) { - NodeEntry::Page(p) => { }, - NodeEntry::Directory(d) => { - assert(d.inv()); - if self.entry_size() == pte.frame.size { - } else { - self.lemma_accepted_mapping_implies_directory_accepted_mapping(base, pte, d); - assert(d.accepted_mapping(base, pte)); - } - }, - NodeEntry::Empty() => { - if self.entry_size() == pte.frame.size { - } else { - assert(((self.layer + 1) as nat) < self.arch.layers.len()); - let new_dir = self.new_empty_dir(entry); - self.lemma_new_empty_dir(entry); - assert(new_dir.inv()); - - self.lemma_accepted_mapping_implies_directory_accepted_mapping(base, pte, new_dir); - new_dir.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); - assert(new_dir.accepted_mapping(base, pte)); - indexing::lemma_index_from_base_and_addr(new_dir.base_vaddr, base, new_dir.entry_size(), new_dir.num_entries()); - - new_dir.lemma_map_frame_refines_map_frame(base, pte); - assert(new_dir.interp().map_frame(base, pte).is_Ok()); - } - }, } - } - - pub proof fn lemma_map_frame_refines_map_frame(self, base: nat, pte: PageTableEntry) - requires - self.inv(), - self.accepted_mapping(base, pte), - ensures - self.map_frame(base, pte).is_Err() ==> self.map_frame(base, pte).get_Err_0() === self, - result_map(self.map_frame(base, pte), |d: Directory| d.interp()) === self.interp().map_frame(base, pte), - decreases self.arch.layers.len() - self.layer - { - ambient_lemmas1(); - ambient_lemmas2(); - self.lemma_inv_implies_interp_inv(); - self.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); - indexing::lemma_index_from_base_and_addr(self.base_vaddr, base, self.entry_size(), self.num_entries()); - assert(aligned(self.base_vaddr, self.entry_size())) by { - lib::mod_mult_zero_implies_mod_zero(self.base_vaddr, self.entry_size(), self.num_entries()); - }; - - let res = self.map_frame(base, pte).get_Ok_0(); - if self.map_frame(base, pte).is_Ok() { - self.lemma_map_frame_preserves_inv(base, pte); + }, + NodeEntry::Empty() => { + if self.entry_size() == pte.frame.size { + self.lemma_insert_interp_of_entry_implies_insert_interp( + entry, + base, + NodeEntry::Page(pte), + pte, + ); + assert(equal( + result_map(self.map_frame(base, pte), |d: Directory| d.interp()), + self.interp().map_frame(base, pte), + )); + } else { + assert(((self.layer + 1) as nat) < self.arch.layers.len()); + let new_dir = self.new_empty_dir(entry); + self.lemma_new_empty_dir(entry); + assert(new_dir.inv()); + self.lemma_accepted_mapping_implies_directory_accepted_mapping( + base, + pte, + new_dir, + ); + new_dir.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); + assert(new_dir.accepted_mapping(base, pte)); + indexing::lemma_index_from_base_and_addr( + new_dir.base_vaddr, + base, + new_dir.entry_size(), + new_dir.num_entries(), + ); + new_dir.lemma_map_frame_empty_is_ok(base, pte); + new_dir.lemma_map_frame_preserves_inv(base, pte); + let new_dir_mapped = new_dir.map_frame(base, pte).get_Ok_0(); + assert(new_dir.map_frame(base, pte).is_Ok()); + assert(new_dir_mapped.inv()); + new_dir.lemma_map_frame_refines_map_frame(base, pte); + assert(new_dir.interp().map_frame(base, pte).is_Ok()); + assert(equal( + new_dir_mapped.interp(), + new_dir.interp().map_frame(base, pte).get_Ok_0(), + )); + new_dir.lemma_empty_implies_interp_empty(); + assert_maps_equal!(new_dir.interp().map, map![]); + assert_maps_equal!(new_dir.interp().map_frame(base, pte).get_Ok_0().map, map![base => pte]); + assert_maps_equal!(self.interp_of_entry(entry).map, map![]); + assert(equal(self.interp_of_entry(entry).map, map![])); + assert(equal(map![].insert(base, pte), new_dir_mapped.interp().map)); + assert(equal( + self.interp_of_entry(entry).map.insert(base, pte), + new_dir_mapped.interp().map, + )); + self.lemma_insert_interp_of_entry_implies_insert_interp( + entry, + base, + NodeEntry::Directory(new_dir_mapped), + pte, + ); + assert(equal( + result_map(self.map_frame(base, pte), |d: Directory| d.interp()), + self.interp().map_frame(base, pte), + )); } + }, + } + } - let entry = self.index_for_vaddr(base); - indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); - self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(entry); - match self.entries.index(entry as int) { - NodeEntry::Page(p) => { - assert(self.map_frame(base, pte).is_Err()); - - assert(self.interp_of_entry(entry).map.contains_pair(self.entry_base(entry), p)); - assert(self.interp().map.contains_pair(self.entry_base(entry), p)); - assert(self.interp().map_frame(base, pte).is_Err()); - }, - NodeEntry::Directory(d) => { - d.lemma_inv_implies_interp_inv(); - assert(d.inv()); - if self.entry_size() == pte.frame.size { - assert(self.map_frame(base, pte).is_Err()); - d.lemma_nonempty_implies_exists_interp_dom_contains(); - let b = choose|b: nat| d.interp().map.dom().contains(b); - assert(self.interp().map.dom().contains(b)); - self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(entry); + pub open spec(checked) fn accepted_unmap(self, base: nat) -> bool + recommends + self.well_formed(), + { + self.interp().accepted_unmap(base) + } - assert(!self.interp().valid_mapping(base, pte)); - assert(self.interp().map_frame(base, pte).is_Err()); - } else { - self.lemma_accepted_mapping_implies_directory_accepted_mapping(base, pte, d); - assert(d.accepted_mapping(base, pte)); - d.lemma_map_frame_refines_map_frame(base, pte); - assert(equal(result_map(d.map_frame(base, pte), |d: Directory| d.interp()), d.interp().map_frame(base, pte))); - match d.map_frame(base, pte) { - Ok(nd) => { - assert(d.map_frame(base, pte).is_Ok()); - assert(d.interp().map_frame(base, pte).is_Ok()); - assert(d.interp().accepted_mapping(base, pte)); - assert(d.interp().valid_mapping(base, pte)); - assert(self.interp().accepted_mapping(base, pte)); - assert(self.interp().valid_mapping(base, pte)); - assert(self.map_frame(base, pte).is_Ok()); - self.lemma_insert_interp_of_entry_implies_insert_interp(entry, base, NodeEntry::Directory(nd), pte); - assert(self.interp().map_frame(base, pte).is_Ok()); - - assert(equal(self.interp().map.insert(base, pte), self.update(entry, NodeEntry::Directory(nd)).interp().map)); - assert(equal(self.interp().map.insert(base, pte), self.interp().map_frame(base, pte).get_Ok_0().map)); - - assert(equal(self.map_frame(base, pte).get_Ok_0().interp(), self.interp().map_frame(base, pte).get_Ok_0())); - }, - Err(e) => { - assert(d.map_frame(base, pte).is_Err()); - assert(d.interp().map_frame(base, pte).is_Err()); - assert(d.interp().accepted_mapping(base, pte)); - assert(!d.interp().valid_mapping(base, pte)); - let b = choose|b: nat| #![auto] - d.interp().map.dom().contains(b) && overlap( - MemRegion { base: base, size: pte.frame.size }, - MemRegion { base: b, size: d.interp().map.index(b).frame.size } - ); - let bbase = d.interp().map.index(b).frame.base; - let bsize = d.interp().map.index(b).frame.size; - assert(d.interp().map.contains_pair(b, d.interp().map.index(b))); - assert(overlap( - MemRegion { base: base, size: pte.frame.size }, - MemRegion { base: b, size: bsize } - )); - self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(entry); - assert(self.interp().map.contains_pair(b, d.interp().map.index(b))); - - assert(self.interp().accepted_mapping(base, pte)); - assert(!self.interp().valid_mapping(base, pte)); - - assert(self.map_frame(base, pte).is_Err()); - assert(self.interp().map_frame(base, pte).is_Err()); - assert(self.entries.index(entry as int) === NodeEntry::Directory(d)); - assert(self.entries.index(entry as int) === NodeEntry::Directory(e)); - let res = self.update(entry, NodeEntry::Directory(e)).entries; - assert(res.index(entry as int) === self.entries.index(entry as int)); - assert_seqs_equal!(res, self.entries); + pub open spec fn unmap(self, base: nat) -> Result + recommends + self.inv(), + self.accepted_unmap(base), + decreases self.arch.layers.len() - self.layer, + via Self::check_unmap + { + if self.inv() && self.accepted_unmap(base) { + let entry = self.index_for_vaddr(base); + match self.entries.index(entry as int) { + NodeEntry::Page(p) => { + if aligned(base, self.entry_size()) { + // This implies: + // base == self.base_vaddr + entry * self.entry_size() + // (i.e. no remainder on division) + // (proved in lemma_index_for_vaddr_bounds) + Ok(self.update(entry, NodeEntry::Empty())) + } else { + Err(self) + } + }, + NodeEntry::Directory(d) => { + match d.unmap(base) { + Ok(new_d) => Ok( + self.update( + entry, + if new_d.empty() { + NodeEntry::Empty() + } else { + NodeEntry::Directory(new_d) }, - } - // d.lemma_map_frame_preserves_inv(base, pte); - } - }, - NodeEntry::Empty() => { - if self.entry_size() == pte.frame.size { - self.lemma_insert_interp_of_entry_implies_insert_interp(entry, base, NodeEntry::Page(pte), pte); - assert(equal(result_map(self.map_frame(base, pte), |d: Directory| d.interp()), self.interp().map_frame(base, pte))); - } else { - assert(((self.layer + 1) as nat) < self.arch.layers.len()); - let new_dir = self.new_empty_dir(entry); - self.lemma_new_empty_dir(entry); - assert(new_dir.inv()); - - self.lemma_accepted_mapping_implies_directory_accepted_mapping(base, pte, new_dir); - new_dir.lemma_accepted_mapping_implies_interp_accepted_mapping_auto(); - assert(new_dir.accepted_mapping(base, pte)); - indexing::lemma_index_from_base_and_addr(new_dir.base_vaddr, base, new_dir.entry_size(), new_dir.num_entries()); - new_dir.lemma_map_frame_empty_is_ok(base, pte); - new_dir.lemma_map_frame_preserves_inv(base, pte); - - let new_dir_mapped = new_dir.map_frame(base, pte).get_Ok_0(); - assert(new_dir.map_frame(base, pte).is_Ok()); - assert(new_dir_mapped.inv()); - new_dir.lemma_map_frame_refines_map_frame(base, pte); - assert(new_dir.interp().map_frame(base, pte).is_Ok()); - assert(equal(new_dir_mapped.interp(), new_dir.interp().map_frame(base, pte).get_Ok_0())); - - new_dir.lemma_empty_implies_interp_empty(); - assert_maps_equal!(new_dir.interp().map, map![]); - assert_maps_equal!(new_dir.interp().map_frame(base, pte).get_Ok_0().map, map![base => pte]); - assert_maps_equal!(self.interp_of_entry(entry).map, map![]); - assert(equal(self.interp_of_entry(entry).map, map![])); - assert(equal(map![].insert(base, pte), new_dir_mapped.interp().map)); - assert(equal(self.interp_of_entry(entry).map.insert(base, pte), new_dir_mapped.interp().map)); - self.lemma_insert_interp_of_entry_implies_insert_interp(entry, base, NodeEntry::Directory(new_dir_mapped), pte); - - assert(equal(result_map(self.map_frame(base, pte), |d: Directory| d.interp()), self.interp().map_frame(base, pte))); - } - }, - } - } - - pub open spec(checked) fn accepted_unmap(self, base: nat) -> bool - recommends self.well_formed() - { - self.interp().accepted_unmap(base) - } - - pub open spec fn unmap(self, base: nat) -> Result - recommends - self.inv(), - self.accepted_unmap(base), - decreases self.arch.layers.len() - self.layer via Self::check_unmap - { - if self.inv() && self.accepted_unmap(base) { - let entry = self.index_for_vaddr(base); - match self.entries.index(entry as int) { - NodeEntry::Page(p) => { - if aligned(base, self.entry_size()) { - // This implies: - // base == self.base_vaddr + entry * self.entry_size() - // (i.e. no remainder on division) - // (proved in lemma_index_for_vaddr_bounds) - Ok(self.update(entry, NodeEntry::Empty())) - } else { - Err(self) - } - }, - NodeEntry::Directory(d) => { - match d.unmap(base) { - Ok(new_d) => - Ok(self.update(entry, if new_d.empty() { - NodeEntry::Empty() - } else { - NodeEntry::Directory(new_d) - })), - Err(new_d) => Err(self.update(entry, NodeEntry::Directory(new_d))) - } - }, - NodeEntry::Empty() => Err(self), + ), + ), + Err(new_d) => Err(self.update(entry, NodeEntry::Directory(new_d))), } - } else { - arbitrary() - } + }, + NodeEntry::Empty() => Err(self), } + } else { + arbitrary() + } + } + + #[verifier(decreases_by)] + proof fn check_unmap(self, base: nat) { + if self.inv() && self.accepted_unmap(base) { + ambient_lemmas2(); + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + base, + self.entry_size(), + self.num_entries(), + ); + } else { + } + } - #[verifier(decreases_by)] - proof fn check_unmap(self, base: nat) { - if self.inv() && self.accepted_unmap(base) { - ambient_lemmas2(); - indexing::lemma_index_from_base_and_addr(self.base_vaddr, base, self.entry_size(), self.num_entries()); + pub proof fn lemma_unmap_preserves_inv(self, base: nat) + requires + self.inv(), + self.accepted_unmap(base), + self.unmap(base).is_Ok(), + ensures + self.unmap(base).get_Ok_0().inv(), + decreases self.arch.layers.len() - self.layer, + { + ambient_lemmas1(); + ambient_lemmas2(); + let res = self.unmap(base).get_Ok_0(); + let entry = self.index_for_vaddr(base); + indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + base, + self.entry_size(), + self.num_entries(), + ); + assert(entry < self.num_entries()); + match self.entries.index(entry as int) { + NodeEntry::Page(p) => { + if aligned(base, self.entry_size()) { + assert(res.directories_obey_invariant()); } else { } - } - - - pub proof fn lemma_unmap_preserves_inv(self, base: nat) - requires - self.inv(), - self.accepted_unmap(base), - self.unmap(base).is_Ok(), - ensures - self.unmap(base).get_Ok_0().inv(), - decreases self.arch.layers.len() - self.layer - { - ambient_lemmas1(); - ambient_lemmas2(); - - let res = self.unmap(base).get_Ok_0(); - - let entry = self.index_for_vaddr(base); - indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); - indexing::lemma_index_from_base_and_addr(self.base_vaddr, base, self.entry_size(), self.num_entries()); - - assert(entry < self.num_entries()); - match self.entries.index(entry as int) { - NodeEntry::Page(p) => { - if aligned(base, self.entry_size()) { - assert(res.directories_obey_invariant()); - } else { - } - }, - NodeEntry::Directory(d) => { - d.lemma_inv_implies_interp_inv(); - assert(d.accepted_unmap(base)); - match d.unmap(base) { - Ok(new_d) => { - d.lemma_unmap_preserves_inv(base); - assert(res.directories_obey_invariant()); - } - Err(_) => { } - } + }, + NodeEntry::Directory(d) => { + d.lemma_inv_implies_interp_inv(); + assert(d.accepted_unmap(base)); + match d.unmap(base) { + Ok(new_d) => { + d.lemma_unmap_preserves_inv(base); + assert(res.directories_obey_invariant()); }, - NodeEntry::Empty() => { }, + Err(_) => {}, } - } + }, + NodeEntry::Empty() => {}, + } + } - pub proof fn lemma_unmap_structure_assertions(self, base: nat, idx: nat) - requires - self.inv(), - self.accepted_unmap(base), - idx == self.index_for_vaddr(base), - ensures - match self.entries.index(idx as int) { - NodeEntry::Page(p) => { - if aligned(base, self.entry_size()) { - base == self.base_vaddr + idx * self.entry_size() - } else { - true - } - }, - NodeEntry::Directory(d) => { - &&& d.inv() - &&& d.accepted_unmap(base) - }, - NodeEntry::Empty() => true, + pub proof fn lemma_unmap_structure_assertions(self, base: nat, idx: nat) + requires + self.inv(), + self.accepted_unmap(base), + idx == self.index_for_vaddr(base), + ensures + match self.entries.index(idx as int) { + NodeEntry::Page(p) => { + if aligned(base, self.entry_size()) { + base == self.base_vaddr + idx * self.entry_size() + } else { + true } - decreases self.arch.layers.len() - self.layer - { - ambient_lemmas1(); - ambient_lemmas2(); - self.lemma_inv_implies_interp_inv(); - - indexing::lemma_entry_base_from_index(self.base_vaddr, idx, self.entry_size()); - indexing::lemma_index_from_base_and_addr(self.base_vaddr, base, self.entry_size(), self.num_entries()); - assert(aligned(self.base_vaddr, self.entry_size())) by { - lib::mod_mult_zero_implies_mod_zero(self.base_vaddr, self.entry_size(), self.num_entries()); - }; - - match self.entries.index(self.index_for_vaddr(base) as int) { - NodeEntry::Page(p) => { - if aligned(base, self.entry_size()) { - } else { - } - }, - NodeEntry::Directory(d) => { - assert(d.inv()); - assert(d.accepted_unmap(base)); - d.lemma_unmap_refines_unmap(base); - }, - NodeEntry::Empty() => { }, + }, + NodeEntry::Directory(d) => { + &&& d.inv() + &&& d.accepted_unmap(base) + }, + NodeEntry::Empty() => true, + }, + decreases self.arch.layers.len() - self.layer, + { + ambient_lemmas1(); + ambient_lemmas2(); + self.lemma_inv_implies_interp_inv(); + indexing::lemma_entry_base_from_index(self.base_vaddr, idx, self.entry_size()); + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + base, + self.entry_size(), + self.num_entries(), + ); + assert(aligned(self.base_vaddr, self.entry_size())) by { + lib::mod_mult_zero_implies_mod_zero( + self.base_vaddr, + self.entry_size(), + self.num_entries(), + ); + }; + match self.entries.index(self.index_for_vaddr(base) as int) { + NodeEntry::Page(p) => { + if aligned(base, self.entry_size()) { + } else { } - } + }, + NodeEntry::Directory(d) => { + assert(d.inv()); + assert(d.accepted_unmap(base)); + d.lemma_unmap_refines_unmap(base); + }, + NodeEntry::Empty() => {}, + } + } - pub proof fn lemma_unmap_refines_unmap(self, base: nat) - requires - self.inv(), - self.accepted_unmap(base), - ensures - self.unmap(base).is_Err() ==> self.unmap(base).get_Err_0() === self, - equal(result_map(self.unmap(base), |d: Directory| d.interp()), self.interp().unmap(base)), - decreases self.arch.layers.len() - self.layer - { - ambient_lemmas1(); - ambient_lemmas2(); - self.lemma_inv_implies_interp_inv(); - - if let Ok(nself) = self.unmap(base) { - self.lemma_unmap_preserves_inv(base); - assert(nself.inv()); - nself.lemma_inv_implies_interp_inv(); - assert(nself.interp().inv()); + pub proof fn lemma_unmap_refines_unmap(self, base: nat) + requires + self.inv(), + self.accepted_unmap(base), + ensures + self.unmap(base).is_Err() ==> self.unmap(base).get_Err_0() === self, + equal( + result_map(self.unmap(base), |d: Directory| d.interp()), + self.interp().unmap(base), + ), + decreases self.arch.layers.len() - self.layer, + { + ambient_lemmas1(); + ambient_lemmas2(); + self.lemma_inv_implies_interp_inv(); + if let Ok(nself) = self.unmap(base) { + self.lemma_unmap_preserves_inv(base); + assert(nself.inv()); + nself.lemma_inv_implies_interp_inv(); + assert(nself.interp().inv()); + } + let nself_res = self.unmap(base); + let nself = self.unmap(base).get_Ok_0(); + let i_nself_res = self.interp().unmap(base); + let i_nself = self.interp().unmap(base).get_Ok_0(); + let entry = self.index_for_vaddr(base); + indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); + indexing::lemma_entry_base_from_index_support(self.base_vaddr, entry, self.entry_size()); + indexing::lemma_index_from_base_and_addr( + self.base_vaddr, + base, + self.entry_size(), + self.num_entries(), + ); + self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(entry); + match self.entries.index(entry as int) { + NodeEntry::Page(p) => { + if aligned(base, self.entry_size()) { + assert_maps_equal!(self.interp_of_entry(entry).map.remove(base), map![]); + assert(self.update(entry, NodeEntry::Empty()).inv()); + self.lemma_remove_from_interp_of_entry_implies_remove_from_interp( + entry, + base, + NodeEntry::Empty(), + ); + } else { + indexing::lemma_entry_base_from_index( + self.base_vaddr, + entry, + self.entry_size(), + ); + assert(!self.interp().map.dom().contains(base)); + assert(i_nself_res.is_Err()); } - - let nself_res = self.unmap(base); - let nself = self.unmap(base).get_Ok_0(); - - let i_nself_res = self.interp().unmap(base); - let i_nself = self.interp().unmap(base).get_Ok_0(); - - let entry = self.index_for_vaddr(base); - indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); - indexing::lemma_entry_base_from_index_support(self.base_vaddr, entry, self.entry_size()); - indexing::lemma_index_from_base_and_addr(self.base_vaddr, base, self.entry_size(), self.num_entries()); - self.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(entry); - - match self.entries.index(entry as int) { - NodeEntry::Page(p) => { - if aligned(base, self.entry_size()) { + }, + NodeEntry::Directory(d) => { + assert(d.inv()); + d.lemma_inv_implies_interp_inv(); + assert(d.accepted_unmap(base)); + d.lemma_unmap_refines_unmap(base); + match d.unmap(base) { + Ok(new_d) => { + d.lemma_unmap_preserves_inv(base); + assert(new_d.inv()); + assert(d.unmap(base).is_Ok()); + assert(d.interp().unmap(base).is_Ok()); + assert(equal(new_d.interp(), d.interp().unmap(base).get_Ok_0())); + if new_d.empty() { + new_d.lemma_empty_implies_interp_empty(); + d.interp().lemma_unmap_decrements_len(base); + assert(new_d.interp().map.dom().len() == 0); + assert(d.interp().map.dom().len() == 1); + assert(d.interp().map.dom().contains(base)); + assert_sets_equal!(d.interp().map.dom(), set![base]); + assert(nself_res.is_Ok()); + assert(equal(self.interp_of_entry(entry).map, d.interp().map)); + assert(equal( + d.interp().unmap(base).get_Ok_0().map, + d.interp().map.remove(base), + )); assert_maps_equal!(self.interp_of_entry(entry).map.remove(base), map![]); assert(self.update(entry, NodeEntry::Empty()).inv()); - self.lemma_remove_from_interp_of_entry_implies_remove_from_interp(entry, base, NodeEntry::Empty()); + self.lemma_remove_from_interp_of_entry_implies_remove_from_interp( + entry, + base, + NodeEntry::Empty(), + ); + assert(equal(nself.interp(), i_nself)); } else { - indexing::lemma_entry_base_from_index(self.base_vaddr, entry, self.entry_size()); - assert(!self.interp().map.dom().contains(base)); - assert(i_nself_res.is_Err()); + assert(self.update(entry, NodeEntry::Directory(new_d)).inv()); + self.lemma_remove_from_interp_of_entry_implies_remove_from_interp( + entry, + base, + NodeEntry::Directory(new_d), + ); } }, - NodeEntry::Directory(d) => { - assert(d.inv()); - d.lemma_inv_implies_interp_inv(); - assert(d.accepted_unmap(base)); - d.lemma_unmap_refines_unmap(base); - match d.unmap(base) { - Ok(new_d) => { - d.lemma_unmap_preserves_inv(base); - assert(new_d.inv()); - assert(d.unmap(base).is_Ok()); - assert(d.interp().unmap(base).is_Ok()); - assert(equal(new_d.interp(), d.interp().unmap(base).get_Ok_0())); - if new_d.empty() { - new_d.lemma_empty_implies_interp_empty(); - d.interp().lemma_unmap_decrements_len(base); - assert(new_d.interp().map.dom().len() == 0); - assert(d.interp().map.dom().len() == 1); - assert(d.interp().map.dom().contains(base)); - assert_sets_equal!(d.interp().map.dom(), set![base]); - assert(nself_res.is_Ok()); - assert(equal(self.interp_of_entry(entry).map, d.interp().map)); - assert(equal(d.interp().unmap(base).get_Ok_0().map, d.interp().map.remove(base))); - assert_maps_equal!(self.interp_of_entry(entry).map.remove(base), map![]); - assert(self.update(entry, NodeEntry::Empty()).inv()); - self.lemma_remove_from_interp_of_entry_implies_remove_from_interp(entry, base, NodeEntry::Empty()); - assert(equal(nself.interp(), i_nself)); - } else { - assert(self.update(entry, NodeEntry::Directory(new_d)).inv()); - self.lemma_remove_from_interp_of_entry_implies_remove_from_interp(entry, base, NodeEntry::Directory(new_d)); - } - } - Err(e) => { - assert(self.entries.index(entry as int) === NodeEntry::Directory(d)); - assert(self.entries.index(entry as int) === NodeEntry::Directory(e)); - let res = self.update(entry, NodeEntry::Directory(e)).entries; - assert(res.index(entry as int) === self.entries.index(entry as int)); - assert_seqs_equal!(res, self.entries); - assert(res === self.entries); - } - } + Err(e) => { + assert(self.entries.index(entry as int) === NodeEntry::Directory(d)); + assert(self.entries.index(entry as int) === NodeEntry::Directory(e)); + let res = self.update(entry, NodeEntry::Directory(e)).entries; + assert(res.index(entry as int) === self.entries.index(entry as int)); + assert_seqs_equal!(res, self.entries); + assert(res === self.entries); }, - NodeEntry::Empty() => { }, - } - } - - proof fn lemma_entries_equal_implies_interp_aux_equal(self, other: Directory, i: nat) - requires - self.inv(), - other.inv(), - equal(self.arch, other.arch), - equal(self.layer, other.layer), - equal(self.base_vaddr, other.base_vaddr), - equal(self.num_entries(), other.num_entries()), - forall|j: int| i <= j && j < self.entries.len() ==> equal(self.entries.index(j), other.entries.index(j)), - ensures - equal(self.interp_aux(i), other.interp_aux(i)), - decreases self.arch.layers.len() - self.layer, self.num_entries() - i - { - if i >= self.entries.len() { - } else { - let rem1 = self.interp_aux(i + 1); - let rem2 = other.interp_aux(i + 1); - let entry_i1 = self.interp_of_entry(i); - let entry_i2 = other.interp_of_entry(i); - self.lemma_entries_equal_implies_interp_aux_equal(other, i + 1); - assert_maps_equal!(rem1.map.union_prefer_right(entry_i1.map), rem2.map.union_prefer_right(entry_i2.map)); - } - } - - proof fn lemma_remove_from_interp_of_entry_implies_remove_from_interp_aux(self, j: nat, i: nat, vaddr: nat, n: NodeEntry) - requires - self.inv(), - i <= j, - j < self.num_entries(), - self.interp_of_entry(j).map.dom().contains(vaddr), - self.update(j, n).inv(), - equal( - self.interp_of_entry(j).map.remove(vaddr), - match n { - NodeEntry::Page(p) => map![self.entry_base(j) => p], - NodeEntry::Directory(d) => d.interp_aux(0).map, - NodeEntry::Empty() => map![], - }), - ensures - equal(self.interp_aux(i).map.remove(vaddr), self.update(j, n).interp_aux(i).map), - decreases self.arch.layers.len() - self.layer, self.num_entries() - i - { - - assert(j < self.entries.len()); - ambient_lemmas1(); - self.lemma_inv_implies_interp_aux_inv(i); - self.lemma_inv_implies_interp_aux_inv(i + 1); - self.lemma_inv_implies_interp_of_entry_inv(i); - self.lemma_inv_implies_interp_of_entry_inv(j); - - self.lemma_interp_of_entry(); - self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping(i, j); - - let nself = self.update(j, n); - - if i >= self.entries.len() { - } else { - if i == j { - assert(equal(self.interp_aux(i).map, self.interp_aux(i + 1).map.union_prefer_right(self.interp_of_entry(i).map))); - - assert(equal(self.interp_of_entry(i).map.remove(vaddr), nself.interp_of_entry(i).map)); - self.lemma_entries_equal_implies_interp_aux_equal(nself, i+1); - assert(equal(self.interp_aux(i + 1).map, nself.interp_aux(i + 1).map)); - - assert(equal(self.interp_aux(i + 1).map.union_prefer_right(self.interp_of_entry(i).map).remove(vaddr), - nself.interp_aux(i + 1).map.union_prefer_right(nself.interp_of_entry(i).map))); - - assert(equal(self.interp_aux(i).map.remove(vaddr), self.update(j, n).interp_aux(i).map)); - } else { - assert(i < j); - assert(self.directories_obey_invariant()); - - self.lemma_remove_from_interp_of_entry_implies_remove_from_interp_aux(j, i + 1, vaddr, n); - self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping(i + 1, j); - - assert(self.interp_aux(j).map.dom().contains(vaddr)); - assert(self.interp_aux(i + 1).map.dom().contains(vaddr)); - - assert(equal(self.interp_aux(i + 1).map.remove(vaddr), self.update(j, n).interp_aux(i + 1).map)); - - assert(equal(self.interp_aux(i).map, self.interp_aux(i + 1).map.union_prefer_right(self.interp_of_entry(i).map))); - - - - assert(nself.inv()); - assert(equal(nself.interp_aux(i).map, nself.interp_aux(i + 1).map.union_prefer_right(nself.interp_of_entry(i).map))); - - assert(equal(self.interp_aux(i).map.remove(vaddr), self.update(j, n).interp_aux(i).map)); - } } - } - - proof fn lemma_remove_from_interp_of_entry_implies_remove_from_interp(self, j: nat, vaddr: nat, n: NodeEntry) - requires - self.inv(), - j < self.num_entries(), - self.interp_of_entry(j).map.dom().contains(vaddr), - self.update(j, n).inv(), - equal( - self.interp_of_entry(j).map.remove(vaddr), - match n { - NodeEntry::Page(p) => map![self.entry_base(j) => p], - NodeEntry::Directory(d) => d.interp_aux(0).map, - NodeEntry::Empty() => map![], - }) - ensures - equal(self.interp().map.remove(vaddr), self.update(j, n).interp().map), - { - self.lemma_remove_from_interp_of_entry_implies_remove_from_interp_aux(j, 0, vaddr, n); - } + }, + NodeEntry::Empty() => {}, } + } - // FIXME: Something like these functions should probably be added to vstd. One problem with that: - // May want exec versions of the functions but can't give them the same name. - pub open spec(checked) fn result_map_ok(res: Result, f: FnSpec(A) -> C) -> Result { - match res { - Ok(a) => Ok(f(a)), - Err(b) => Err(b), - } + proof fn lemma_entries_equal_implies_interp_aux_equal(self, other: Directory, i: nat) + requires + self.inv(), + other.inv(), + equal(self.arch, other.arch), + equal(self.layer, other.layer), + equal(self.base_vaddr, other.base_vaddr), + equal(self.num_entries(), other.num_entries()), + forall|j: int| + i <= j && j < self.entries.len() ==> equal( + self.entries.index(j), + other.entries.index(j), + ), + ensures + equal(self.interp_aux(i), other.interp_aux(i)), + decreases self.arch.layers.len() - self.layer, self.num_entries() - i, + { + if i >= self.entries.len() { + } else { + let rem1 = self.interp_aux(i + 1); + let rem2 = other.interp_aux(i + 1); + let entry_i1 = self.interp_of_entry(i); + let entry_i2 = other.interp_of_entry(i); + self.lemma_entries_equal_implies_interp_aux_equal(other, i + 1); + assert_maps_equal!(rem1.map.union_prefer_right(entry_i1.map), rem2.map.union_prefer_right(entry_i2.map)); } + } - pub open spec(checked) fn result_map(res: Result, f: FnSpec(A) -> B) -> Result { - match res { - Ok(a) => Ok(f(a)), - Err(a) => Err(f(a)), + proof fn lemma_remove_from_interp_of_entry_implies_remove_from_interp_aux( + self, + j: nat, + i: nat, + vaddr: nat, + n: NodeEntry, + ) + requires + self.inv(), + i <= j, + j < self.num_entries(), + self.interp_of_entry(j).map.dom().contains(vaddr), + self.update(j, n).inv(), + equal( + self.interp_of_entry(j).map.remove(vaddr), + match n { + NodeEntry::Page(p) => map![self.entry_base(j) => p], + NodeEntry::Directory(d) => d.interp_aux(0).map, + NodeEntry::Empty() => map![], + }, + ), + ensures + equal(self.interp_aux(i).map.remove(vaddr), self.update(j, n).interp_aux(i).map), + decreases self.arch.layers.len() - self.layer, self.num_entries() - i, + { + assert(j < self.entries.len()); + ambient_lemmas1(); + self.lemma_inv_implies_interp_aux_inv(i); + self.lemma_inv_implies_interp_aux_inv(i + 1); + self.lemma_inv_implies_interp_of_entry_inv(i); + self.lemma_inv_implies_interp_of_entry_inv(j); + self.lemma_interp_of_entry(); + self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping(i, j); + let nself = self.update(j, n); + if i >= self.entries.len() { + } else { + if i == j { + assert(equal( + self.interp_aux(i).map, + self.interp_aux(i + 1).map.union_prefer_right(self.interp_of_entry(i).map), + )); + assert(equal( + self.interp_of_entry(i).map.remove(vaddr), + nself.interp_of_entry(i).map, + )); + self.lemma_entries_equal_implies_interp_aux_equal(nself, i + 1); + assert(equal(self.interp_aux(i + 1).map, nself.interp_aux(i + 1).map)); + assert(equal( + self.interp_aux(i + 1).map.union_prefer_right( + self.interp_of_entry(i).map, + ).remove(vaddr), + nself.interp_aux(i + 1).map.union_prefer_right(nself.interp_of_entry(i).map), + )); + assert(equal( + self.interp_aux(i).map.remove(vaddr), + self.update(j, n).interp_aux(i).map, + )); + } else { + assert(i < j); + assert(self.directories_obey_invariant()); + self.lemma_remove_from_interp_of_entry_implies_remove_from_interp_aux( + j, + i + 1, + vaddr, + n, + ); + self.lemma_interp_of_entry_contains_mapping_implies_interp_aux_contains_mapping( + i + 1, + j, + ); + assert(self.interp_aux(j).map.dom().contains(vaddr)); + assert(self.interp_aux(i + 1).map.dom().contains(vaddr)); + assert(equal( + self.interp_aux(i + 1).map.remove(vaddr), + self.update(j, n).interp_aux(i + 1).map, + )); + assert(equal( + self.interp_aux(i).map, + self.interp_aux(i + 1).map.union_prefer_right(self.interp_of_entry(i).map), + )); + assert(nself.inv()); + assert(equal( + nself.interp_aux(i).map, + nself.interp_aux(i + 1).map.union_prefer_right(nself.interp_of_entry(i).map), + )); + assert(equal( + self.interp_aux(i).map.remove(vaddr), + self.update(j, n).interp_aux(i).map, + )); } } + } - } + proof fn lemma_remove_from_interp_of_entry_implies_remove_from_interp( + self, + j: nat, + vaddr: nat, + n: NodeEntry, + ) + requires + self.inv(), + j < self.num_entries(), + self.interp_of_entry(j).map.dom().contains(vaddr), + self.update(j, n).inv(), + equal( + self.interp_of_entry(j).map.remove(vaddr), + match n { + NodeEntry::Page(p) => map![self.entry_base(j) => p], + NodeEntry::Directory(d) => d.interp_aux(0).map, + NodeEntry::Empty() => map![], + }, + ), + ensures + equal(self.interp().map.remove(vaddr), self.update(j, n).interp().map), + { + self.lemma_remove_from_interp_of_entry_implies_remove_from_interp_aux(j, 0, vaddr, n); + } +} + +// FIXME: Something like these functions should probably be added to vstd. One problem with that: +// May want exec versions of the functions but can't give them the same name. +pub open spec(checked) fn result_map_ok(res: Result, f: FnSpec(A) -> C) -> Result< + C, + B, +> { + match res { + Ok(a) => Ok(f(a)), + Err(b) => Err(b), + } +} + +pub open spec(checked) fn result_map(res: Result, f: FnSpec(A) -> B) -> Result { + match res { + Ok(a) => Ok(f(a)), + Err(a) => Err(f(a)), + } +} + +} // verus! } pub mod l2_impl { @@ -2181,1227 +2603,2166 @@ pub mod impl_u { verus! { - proof fn lemma_page_aligned_implies_mask_dir_addr_is_identity() - ensures forall|addr: u64| addr <= MAX_PHYADDR ==> #[trigger] aligned(addr as nat, PAGE_SIZE as nat) ==> addr & MASK_DIR_ADDR == addr, - { - assert forall|addr: u64| - addr <= MAX_PHYADDR && - #[trigger] aligned(addr as nat, PAGE_SIZE as nat) - implies - addr & MASK_DIR_ADDR == addr - by { - let max_width: u64 = MAX_PHYADDR_WIDTH; - let mask_dir_addr: u64 = MASK_DIR_ADDR; - assert(addr & mask_dir_addr == addr) by (bit_vector) - requires - addr <= sub(1u64 << max_width, 1u64), - addr % 4096u64 == 0, - mask_dir_addr == bitmask_inc!(12u64, max_width - 1); - }; - } - - proof fn lemma_aligned_addr_mask_facts(addr: u64) - ensures - aligned(addr as nat, L1_ENTRY_SIZE as nat) ==> (addr & MASK_L1_PG_ADDR == addr & MASK_ADDR), - aligned(addr as nat, L2_ENTRY_SIZE as nat) ==> (addr & MASK_L2_PG_ADDR == addr & MASK_ADDR), - (addr & MASK_L3_PG_ADDR == addr & MASK_ADDR), - addr <= MAX_PHYADDR && aligned(addr as nat, L1_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR == addr), - addr <= MAX_PHYADDR && aligned(addr as nat, L2_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR == addr), - addr <= MAX_PHYADDR && aligned(addr as nat, L3_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR == addr), - { - axiom_max_phyaddr_width_facts(); - assert(aligned(addr as nat, L1_ENTRY_SIZE as nat) ==> (addr & MASK_L1_PG_ADDR == addr & MASK_ADDR)) by { - if aligned(addr as nat, L1_ENTRY_SIZE as nat) { - let max_width: u64 = MAX_PHYADDR_WIDTH; - assert(addr & bitmask_inc!(30u64, max_width - 1) == addr & bitmask_inc!(12u64, max_width - 1)) by (bit_vector) - requires - addr % 0x40000000u64 == 0, - 32 <= max_width; - } - }; - assert(aligned(addr as nat, L2_ENTRY_SIZE as nat) ==> (addr & MASK_L2_PG_ADDR == addr & MASK_ADDR)) by { - if aligned(addr as nat, L2_ENTRY_SIZE as nat) { - let max_width: u64 = MAX_PHYADDR_WIDTH; - assert(addr & bitmask_inc!(21u64, max_width - 1) == addr & bitmask_inc!(12u64, max_width - 1)) by (bit_vector) - requires - addr % 0x200000u64 == 0, - 32 <= max_width; - } - }; - assert(addr <= MAX_PHYADDR && aligned(addr as nat, L1_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR == addr)) by { - if addr <= MAX_PHYADDR && aligned(addr as nat, L1_ENTRY_SIZE as nat) { - assert(aligned(L1_ENTRY_SIZE as nat, PAGE_SIZE as nat)) by(nonlinear_arith); - lib::aligned_transitive(addr as nat, L1_ENTRY_SIZE as nat, PAGE_SIZE as nat); - lemma_page_aligned_implies_mask_dir_addr_is_identity(); - } - }; - assert(addr <= MAX_PHYADDR && aligned(addr as nat, L2_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR == addr)) by { - if addr <= MAX_PHYADDR && aligned(addr as nat, L2_ENTRY_SIZE as nat) { - assert(aligned(L2_ENTRY_SIZE as nat, PAGE_SIZE as nat)) by(nonlinear_arith); - lib::aligned_transitive(addr as nat, L2_ENTRY_SIZE as nat, PAGE_SIZE as nat); - lemma_page_aligned_implies_mask_dir_addr_is_identity(); - } - }; - assert(addr <= MAX_PHYADDR && aligned(addr as nat, L3_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR == addr)) by { - if addr <= MAX_PHYADDR && aligned(addr as nat, L3_ENTRY_SIZE as nat) { - assert(aligned(L3_ENTRY_SIZE as nat, PAGE_SIZE as nat)) by(nonlinear_arith); - lib::aligned_transitive(addr as nat, L3_ENTRY_SIZE as nat, PAGE_SIZE as nat); - lemma_page_aligned_implies_mask_dir_addr_is_identity(); - } - }; - } +proof fn lemma_page_aligned_implies_mask_dir_addr_is_identity() + ensures + forall|addr: u64| + addr <= MAX_PHYADDR ==> #[trigger] aligned(addr as nat, PAGE_SIZE as nat) ==> addr + & MASK_DIR_ADDR == addr, +{ + assert forall|addr: u64| + addr <= MAX_PHYADDR && #[trigger] aligned(addr as nat, PAGE_SIZE as nat) implies addr + & MASK_DIR_ADDR == addr by { + let max_width: u64 = MAX_PHYADDR_WIDTH; + let mask_dir_addr: u64 = MASK_DIR_ADDR; + assert(addr & mask_dir_addr == addr) by (bit_vector) + requires + addr <= sub(1u64 << max_width, 1u64), + addr % 4096u64 == 0, + mask_dir_addr == bitmask_inc!(12u64, max_width - 1), + ; + }; +} - pub open spec fn addr_is_zero_padded(layer: nat, addr: u64, is_page: bool) -> bool { - is_page ==> { - if layer == 1 { - addr & MASK_L1_PG_ADDR == addr - } else if layer == 2 { - addr & MASK_L2_PG_ADDR == addr - } else if layer == 3 { - addr & MASK_L3_PG_ADDR == addr - } else { - arbitrary() - } - } +proof fn lemma_aligned_addr_mask_facts(addr: u64) + ensures + aligned(addr as nat, L1_ENTRY_SIZE as nat) ==> (addr & MASK_L1_PG_ADDR == addr & MASK_ADDR), + aligned(addr as nat, L2_ENTRY_SIZE as nat) ==> (addr & MASK_L2_PG_ADDR == addr & MASK_ADDR), + (addr & MASK_L3_PG_ADDR == addr & MASK_ADDR), + addr <= MAX_PHYADDR && aligned(addr as nat, L1_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR + == addr), + addr <= MAX_PHYADDR && aligned(addr as nat, L2_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR + == addr), + addr <= MAX_PHYADDR && aligned(addr as nat, L3_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR + == addr), +{ + axiom_max_phyaddr_width_facts(); + assert(aligned(addr as nat, L1_ENTRY_SIZE as nat) ==> (addr & MASK_L1_PG_ADDR == addr + & MASK_ADDR)) by { + if aligned(addr as nat, L1_ENTRY_SIZE as nat) { + let max_width: u64 = MAX_PHYADDR_WIDTH; + assert(addr & bitmask_inc!(30u64, max_width - 1) == addr + & bitmask_inc!(12u64, max_width - 1)) by (bit_vector) + requires + addr % 0x40000000u64 == 0, + 32 <= max_width, + ; + } + }; + assert(aligned(addr as nat, L2_ENTRY_SIZE as nat) ==> (addr & MASK_L2_PG_ADDR == addr + & MASK_ADDR)) by { + if aligned(addr as nat, L2_ENTRY_SIZE as nat) { + let max_width: u64 = MAX_PHYADDR_WIDTH; + assert(addr & bitmask_inc!(21u64, max_width - 1) == addr + & bitmask_inc!(12u64, max_width - 1)) by (bit_vector) + requires + addr % 0x200000u64 == 0, + 32 <= max_width, + ; + } + }; + assert(addr <= MAX_PHYADDR && aligned(addr as nat, L1_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR + == addr)) by { + if addr <= MAX_PHYADDR && aligned(addr as nat, L1_ENTRY_SIZE as nat) { + assert(aligned(L1_ENTRY_SIZE as nat, PAGE_SIZE as nat)) by (nonlinear_arith); + lib::aligned_transitive(addr as nat, L1_ENTRY_SIZE as nat, PAGE_SIZE as nat); + lemma_page_aligned_implies_mask_dir_addr_is_identity(); + } + }; + assert(addr <= MAX_PHYADDR && aligned(addr as nat, L2_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR + == addr)) by { + if addr <= MAX_PHYADDR && aligned(addr as nat, L2_ENTRY_SIZE as nat) { + assert(aligned(L2_ENTRY_SIZE as nat, PAGE_SIZE as nat)) by (nonlinear_arith); + lib::aligned_transitive(addr as nat, L2_ENTRY_SIZE as nat, PAGE_SIZE as nat); + lemma_page_aligned_implies_mask_dir_addr_is_identity(); + } + }; + assert(addr <= MAX_PHYADDR && aligned(addr as nat, L3_ENTRY_SIZE as nat) ==> (addr & MASK_ADDR + == addr)) by { + if addr <= MAX_PHYADDR && aligned(addr as nat, L3_ENTRY_SIZE as nat) { + assert(aligned(L3_ENTRY_SIZE as nat, PAGE_SIZE as nat)) by (nonlinear_arith); + lib::aligned_transitive(addr as nat, L3_ENTRY_SIZE as nat, PAGE_SIZE as nat); + lemma_page_aligned_implies_mask_dir_addr_is_identity(); } + }; +} - // PageDirectoryEntry is defined in crate::spec_t::hardware to define the page table walk - // semantics. Here we reuse it for the implementation and add exec functions to it. - impl PageDirectoryEntry { - // PAT flag is set to zero for huge pages and super pages - pub open spec fn hp_pat_is_zero(self) -> bool { - &&& self@.is_Page() && self.layer == 1 ==> self.entry & MASK_PG_FLAG_PAT == 0 - &&& self@.is_Page() && self.layer == 2 ==> self.entry & MASK_PG_FLAG_PAT == 0 - } +pub open spec fn addr_is_zero_padded(layer: nat, addr: u64, is_page: bool) -> bool { + is_page ==> { + if layer == 1 { + addr & MASK_L1_PG_ADDR == addr + } else if layer == 2 { + addr & MASK_L2_PG_ADDR == addr + } else if layer == 3 { + addr & MASK_L3_PG_ADDR == addr + } else { + arbitrary() + } + } +} - pub proof fn lemma_addr_mask_when_hp_pat_is_zero(self) - requires self.hp_pat_is_zero() && self.all_mb0_bits_are_zero() && self@.is_Page() - ensures - self.layer == 1 ==> self.entry & MASK_L1_PG_ADDR == self.entry & MASK_ADDR, - self.layer == 2 ==> self.entry & MASK_L2_PG_ADDR == self.entry & MASK_ADDR - { - let e = self.entry; let mw = MAX_PHYADDR_WIDTH; - axiom_max_phyaddr_width_facts(); - reveal(PageDirectoryEntry::all_mb0_bits_are_zero); - if self.layer() == 1 { - assert(e & bitmask_inc!(12u64, mw - 1) == e & bitmask_inc!(30u64, mw - 1)) by (bit_vector) - requires e & bit!(12u64) == 0, e & bitmask_inc!(13u64,29u64) == 0, 32 <= mw <= 52; - } else if self.layer() == 2 { - assert(e & bitmask_inc!(12u64, mw - 1) == e & bitmask_inc!(21u64, mw - 1)) by (bit_vector) - requires e & bit!(12u64) == 0, e & bitmask_inc!(13u64,20u64) == 0, 32 <= mw <= 52; - } - } +// PageDirectoryEntry is defined in crate::spec_t::hardware to define the page table walk +// semantics. Here we reuse it for the implementation and add exec functions to it. +impl PageDirectoryEntry { + // PAT flag is set to zero for huge pages and super pages + pub open spec fn hp_pat_is_zero(self) -> bool { + &&& self@.is_Page() && self.layer == 1 ==> self.entry & MASK_PG_FLAG_PAT == 0 + &&& self@.is_Page() && self.layer == 2 ==> self.entry & MASK_PG_FLAG_PAT == 0 + } - pub proof fn lemma_zero_entry_facts(self) + pub proof fn lemma_addr_mask_when_hp_pat_is_zero(self) + requires + self.hp_pat_is_zero() && self.all_mb0_bits_are_zero() && self@.is_Page(), + ensures + self.layer == 1 ==> self.entry & MASK_L1_PG_ADDR == self.entry & MASK_ADDR, + self.layer == 2 ==> self.entry & MASK_L2_PG_ADDR == self.entry & MASK_ADDR, + { + let e = self.entry; + let mw = MAX_PHYADDR_WIDTH; + axiom_max_phyaddr_width_facts(); + reveal(PageDirectoryEntry::all_mb0_bits_are_zero); + if self.layer() == 1 { + assert(e & bitmask_inc!(12u64, mw - 1) == e & bitmask_inc!(30u64, mw - 1)) + by (bit_vector) requires - self.entry == 0, - self.layer@ <= 3, - ensures - self@.is_Empty(), - self.all_mb0_bits_are_zero(), - { - assert(forall|a: u64| 0 & a == 0) by (bit_vector); - reveal(PageDirectoryEntry::all_mb0_bits_are_zero); - assert(1u64 << 0 == 1) by (bit_vector); - assert(0u64 & 1 == 0) by (bit_vector); - } - - pub proof fn lemma_new_entry_mb0_bits_are_zero( - layer: usize, - address: u64, - is_page: bool, - is_writable: bool, - is_supervisor: bool, - is_writethrough: bool, - disable_cache: bool, - disable_execute: bool, - ) + e & bit!(12u64) == 0, + e & bitmask_inc!(13u64,29u64) == 0, + 32 <= mw <= 52, + ; + } else if self.layer() == 2 { + assert(e & bitmask_inc!(12u64, mw - 1) == e & bitmask_inc!(21u64, mw - 1)) + by (bit_vector) requires - layer <= 3, - if is_page { 0 < layer } else { layer < 3 }, - addr_is_zero_padded(layer as nat, address, is_page), - address & MASK_ADDR == address, - ensures - ({ let e = address - | MASK_FLAG_P - | if is_page && layer != 3 { MASK_L1_PG_FLAG_PS } else { 0 } - | if is_writable { MASK_FLAG_RW } else { 0 } - | if is_supervisor { 0 } else { MASK_FLAG_US } - | if is_writethrough { MASK_FLAG_PWT } else { 0 } - | if disable_cache { MASK_FLAG_PCD } else { 0 } - | if disable_execute { MASK_FLAG_XD } else { 0 }; - (PageDirectoryEntry { entry: e, layer: Ghost(layer as nat) }).all_mb0_bits_are_zero() - }), - { - let or1 = MASK_FLAG_P; - let or2 = if is_page && layer != 3 { MASK_L1_PG_FLAG_PS as u64 } else { 0 }; - let or3 = if is_writable { MASK_FLAG_RW as u64 } else { 0 }; - let or4 = if is_supervisor { 0 } else { MASK_FLAG_US as u64 }; - let or5 = if is_writethrough { MASK_FLAG_PWT as u64 } else { 0 }; - let or6 = if disable_cache { MASK_FLAG_PCD as u64 } else { 0 }; - let or7 = if disable_execute { MASK_FLAG_XD as u64 } else { 0 }; - let e = address | or1 | or2 | or3 | or4 | or5 | or6 | or7; - let mw: u64 = MAX_PHYADDR_WIDTH; - assert(forall|a:u64| #![auto] a == a | 0) by (bit_vector); - - axiom_max_phyaddr_width_facts(); - assert(forall|a:u64,i:u64| #![auto] i < 12 ==> a & bitmask_inc!(12u64,sub(mw,1)) == a ==> a & bit!(i) == 0) by (bit_vector) - requires 32 <= mw <= 52; - assert(forall|a:u64,i:u64| #![auto] i != 7 && (a & bit!(7u64) == 0) ==> (a | bit!(i)) & bit!(7u64) == 0) by (bit_vector); - assert(forall|a:u64,i:u64| #![auto] i < 13 && (a & bitmask_inc!(13u64,29u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(13u64,29u64) == 0)) by (bit_vector); - assert(forall|a:u64,i:u64| #![auto] i > 29 && (a & bitmask_inc!(13u64,29u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(13u64,29u64) == 0)) by (bit_vector); - assert(forall|a:u64,i:u64| #![auto] i < 13 && (a & bitmask_inc!(13u64,20u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(13u64,20u64) == 0)) by (bit_vector); - assert(forall|a:u64,i:u64| #![auto] i > 20 && (a & bitmask_inc!(13u64,20u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(13u64,20u64) == 0)) by (bit_vector); - assert(forall|a:u64,i:u64| #![auto] i < mw && (a & bitmask_inc!(mw,51u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(mw,51u64) == 0)) by (bit_vector); - assert(forall|a:u64,i:u64| #![auto] i > 51 && (a & bitmask_inc!(mw,51u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(mw,51u64) == 0)) by (bit_vector) - requires mw <= 52; - assert(address & bitmask_inc!(mw, 51) == 0) by (bit_vector) - requires - address & bitmask_inc!(12u64, mw - 1) == address, - 32 <= mw <= 52; - assert(forall|a:u64,i:u64| #![auto] i < mw && (a & bitmask_inc!(mw,62u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(mw,62u64) == 0)) by (bit_vector); - assert(forall|a:u64,i:u64| #![auto] i > 62 && (a & bitmask_inc!(mw,62u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(mw,62u64) == 0)) by (bit_vector) - requires mw <= 52; - assert(address & bitmask_inc!(mw, 62) == 0) by (bit_vector) - requires - address & bitmask_inc!(12u64, mw - 1) == address, - 32 <= mw <= 52; - PageDirectoryEntry::lemma_new_entry_addr_mask_is_address(layer, address, is_page, is_writable, is_supervisor, is_writethrough, disable_cache, disable_execute); - if layer == 0 { - assert(!is_page); - assert(e & bit!(7u64) == 0); - assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0); - } else if layer == 1 { - if is_page { - assert(address & bitmask_inc!(30u64,sub(mw,1)) == address ==> address & bitmask_inc!(13u64,29u64) == 0) by (bit_vector); - assert(e & bitmask_inc!(13u64,29u64) == 0); - assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0); - } else { - assert(e & bit!(7u64) == 0); - assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0); - } - } else if layer == 2 { - if is_page { - assert(address & bitmask_inc!(21u64,sub(mw,1)) == address ==> address & bitmask_inc!(13u64,20u64) == 0) by (bit_vector); - assert(e & bitmask_inc!(13u64,20u64) == 0); - assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0); - } else { - assert(e & bit!(7u64) == 0); - assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0); - } - } else if layer == 3 { - assert(is_page); - // assert(e & bit!(7u64) == 0); - assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0); - } else { assert(false); } - - let pde = PageDirectoryEntry { entry: e, layer: Ghost(layer as nat) }; - reveal(PageDirectoryEntry::all_mb0_bits_are_zero); - assert(pde.all_mb0_bits_are_zero()); - } + e & bit!(12u64) == 0, + e & bitmask_inc!(13u64,20u64) == 0, + 32 <= mw <= 52, + ; + } + } + pub proof fn lemma_zero_entry_facts(self) + requires + self.entry == 0, + self.layer@ <= 3, + ensures + self@.is_Empty(), + self.all_mb0_bits_are_zero(), + { + assert(forall|a: u64| 0 & a == 0) by (bit_vector); + reveal(PageDirectoryEntry::all_mb0_bits_are_zero); + assert(1u64 << 0 == 1) by (bit_vector); + assert(0u64 & 1 == 0) by (bit_vector); + } - pub proof fn lemma_new_entry_addr_mask_is_address( - layer: usize, - address: u64, - is_page: bool, - is_writable: bool, - is_supervisor: bool, - is_writethrough: bool, - disable_cache: bool, - disable_execute: bool, - ) - requires - layer <= 3, - if is_page { 0 < layer } else { layer < 3 }, - addr_is_zero_padded(layer as nat, address, is_page), - address & MASK_ADDR == address, - ensures - ({ let e = address - | MASK_FLAG_P - | if is_page && layer != 3 { MASK_L1_PG_FLAG_PS } else { 0 } - | if is_writable { MASK_FLAG_RW } else { 0 } - | if is_supervisor { 0 } else { MASK_FLAG_US } - | if is_writethrough { MASK_FLAG_PWT } else { 0 } - | if disable_cache { MASK_FLAG_PCD } else { 0 } - | if disable_execute { MASK_FLAG_XD } else { 0 }; - &&& e & MASK_ADDR == address - &&& e & MASK_FLAG_P == MASK_FLAG_P - &&& (e & MASK_L1_PG_FLAG_PS == MASK_L1_PG_FLAG_PS) == (is_page && layer != 3) - &&& (e & MASK_FLAG_RW == MASK_FLAG_RW) == is_writable - &&& (e & MASK_FLAG_US == MASK_FLAG_US) == !is_supervisor - &&& (e & MASK_FLAG_PWT == MASK_FLAG_PWT) == is_writethrough - &&& (e & MASK_FLAG_PCD == MASK_FLAG_PCD) == disable_cache - &&& (e & MASK_FLAG_XD == MASK_FLAG_XD) == disable_execute - &&& (is_page && layer == 1 ==> e & MASK_PG_FLAG_PAT == 0) - &&& (is_page && layer == 2 ==> e & MASK_PG_FLAG_PAT == 0) - - }), - { - let or1 = MASK_FLAG_P; - let or2 = if is_page && layer != 3 { MASK_L1_PG_FLAG_PS as u64 } else { 0 }; - let or3 = if is_writable { MASK_FLAG_RW as u64 } else { 0 }; - let or4 = if is_supervisor { 0 } else { MASK_FLAG_US as u64 }; - let or5 = if is_writethrough { MASK_FLAG_PWT as u64 } else { 0 }; - let or6 = if disable_cache { MASK_FLAG_PCD as u64 } else { 0 }; - let or7 = if disable_execute { MASK_FLAG_XD as u64 } else { 0 }; - let e = address | or1 | or2 | or3 | or4 | or5 | or6 | or7; - let mw: u64 = MAX_PHYADDR_WIDTH; - axiom_max_phyaddr_width_facts(); - assert(forall|a:u64,x:u64| x < 64 && (a & bit!(x) == 0) ==> a & bit!(x) != bit!(x)) by (bit_vector); - assert(forall|a:u64| #![auto] a == a | 0) by (bit_vector); - assert(forall|a:u64,i:u64| #![auto] i < 12 ==> a & bitmask_inc!(12u64, sub(mw, 1)) == (a | bit!(i)) & bitmask_inc!(12u64, sub(mw, 1))) by (bit_vector) - requires 32 <= mw <= 52; - assert(forall|a:u64,i:u64| #![auto] i > sub(mw, 1) ==> a & bitmask_inc!(12u64, sub(mw, 1)) == (a | bit!(i)) & bitmask_inc!(12u64, sub(mw, 1))) by (bit_vector) - requires 32 <= mw <= 52; - - assert(forall|a:u64,i:u64| #![auto] i < 12 ==> a & bitmask_inc!(12u64, sub(mw, 1)) == a ==> a & bit!(i) == 0) by (bit_vector) - requires 32 <= mw <= 52; - assert(forall|a:u64,i:u64| #![auto] i > sub(mw, 1) ==> a & bitmask_inc!(12u64, sub(mw, 1)) == a ==> a & bit!(i) == 0) by (bit_vector) - requires 32 <= mw <= 52; - assert(forall|a:u64,i:u64| #![auto] i < 64 ==> a & bit!(i) == 0 ==> (a | bit!(i)) & bit!(i) == bit!(i)) by (bit_vector); - assert(forall|a:u64,i:u64,j:u64| #![auto] i != j ==> a & bit!(i) == (a | bit!(j)) & bit!(i)) by (bit_vector); - assert({ - &&& is_page && layer == 1 ==> e & MASK_PG_FLAG_PAT == 0 - &&& is_page && layer == 2 ==> e & MASK_PG_FLAG_PAT == 0 - }) by { - if is_page && layer == 1 { - assert(address & bit!(12u64) == 0) by (bit_vector) - requires address & bitmask_inc!(30u64, sub(mw, 1)) == address; - } - if is_page && layer == 2 { - assert(address & bit!(12u64) == 0) by (bit_vector) - requires address & bitmask_inc!(21u64, sub(mw, 1)) == address; - } + pub proof fn lemma_new_entry_mb0_bits_are_zero( + layer: usize, + address: u64, + is_page: bool, + is_writable: bool, + is_supervisor: bool, + is_writethrough: bool, + disable_cache: bool, + disable_execute: bool, + ) + requires + layer <= 3, + if is_page { + 0 < layer + } else { + layer < 3 + }, + addr_is_zero_padded(layer as nat, address, is_page), + address & MASK_ADDR == address, + ensures + ({ + let e = address | MASK_FLAG_P | if is_page && layer != 3 { + MASK_L1_PG_FLAG_PS + } else { + 0 + } | if is_writable { + MASK_FLAG_RW + } else { + 0 + } | if is_supervisor { + 0 + } else { + MASK_FLAG_US + } | if is_writethrough { + MASK_FLAG_PWT + } else { + 0 + } | if disable_cache { + MASK_FLAG_PCD + } else { + 0 + } | if disable_execute { + MASK_FLAG_XD + } else { + 0 }; + (PageDirectoryEntry { + entry: e, + layer: Ghost(layer as nat), + }).all_mb0_bits_are_zero() + }), + { + let or1 = MASK_FLAG_P; + let or2 = if is_page && layer != 3 { + MASK_L1_PG_FLAG_PS as u64 + } else { + 0 + }; + let or3 = if is_writable { + MASK_FLAG_RW as u64 + } else { + 0 + }; + let or4 = if is_supervisor { + 0 + } else { + MASK_FLAG_US as u64 + }; + let or5 = if is_writethrough { + MASK_FLAG_PWT as u64 + } else { + 0 + }; + let or6 = if disable_cache { + MASK_FLAG_PCD as u64 + } else { + 0 + }; + let or7 = if disable_execute { + MASK_FLAG_XD as u64 + } else { + 0 + }; + let e = address | or1 | or2 | or3 | or4 | or5 | or6 | or7; + let mw: u64 = MAX_PHYADDR_WIDTH; + assert(forall|a: u64| #![auto] a == a | 0) by (bit_vector); + axiom_max_phyaddr_width_facts(); + assert(forall|a: u64, i: u64| + #![auto] + i < 12 ==> a & bitmask_inc!(12u64,sub(mw,1)) == a ==> a & bit!(i) == 0) by (bit_vector) + requires + 32 <= mw <= 52, + ; + assert(forall|a: u64, i: u64| + #![auto] + i != 7 && (a & bit!(7u64) == 0) ==> (a | bit!(i)) & bit!(7u64) == 0) by (bit_vector); + assert(forall|a: u64, i: u64| + #![auto] + i < 13 && (a & bitmask_inc!(13u64,29u64) == 0) ==> ((a | bit!(i)) + & bitmask_inc!(13u64,29u64) == 0)) by (bit_vector); + assert(forall|a: u64, i: u64| + #![auto] + i > 29 && (a & bitmask_inc!(13u64,29u64) == 0) ==> ((a | bit!(i)) + & bitmask_inc!(13u64,29u64) == 0)) by (bit_vector); + assert(forall|a: u64, i: u64| + #![auto] + i < 13 && (a & bitmask_inc!(13u64,20u64) == 0) ==> ((a | bit!(i)) + & bitmask_inc!(13u64,20u64) == 0)) by (bit_vector); + assert(forall|a: u64, i: u64| + #![auto] + i > 20 && (a & bitmask_inc!(13u64,20u64) == 0) ==> ((a | bit!(i)) + & bitmask_inc!(13u64,20u64) == 0)) by (bit_vector); + assert(forall|a: u64, i: u64| + #![auto] + i < mw && (a & bitmask_inc!(mw,51u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(mw,51u64) + == 0)) by (bit_vector); + assert(forall|a: u64, i: u64| + #![auto] + i > 51 && (a & bitmask_inc!(mw,51u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(mw,51u64) + == 0)) by (bit_vector) + requires + mw <= 52, + ; + assert(address & bitmask_inc!(mw, 51) == 0) by (bit_vector) + requires + address & bitmask_inc!(12u64, mw - 1) == address, + 32 <= mw <= 52, + ; + assert(forall|a: u64, i: u64| + #![auto] + i < mw && (a & bitmask_inc!(mw,62u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(mw,62u64) + == 0)) by (bit_vector); + assert(forall|a: u64, i: u64| + #![auto] + i > 62 && (a & bitmask_inc!(mw,62u64) == 0) ==> ((a | bit!(i)) & bitmask_inc!(mw,62u64) + == 0)) by (bit_vector) + requires + mw <= 52, + ; + assert(address & bitmask_inc!(mw, 62) == 0) by (bit_vector) + requires + address & bitmask_inc!(12u64, mw - 1) == address, + 32 <= mw <= 52, + ; + PageDirectoryEntry::lemma_new_entry_addr_mask_is_address( + layer, + address, + is_page, + is_writable, + is_supervisor, + is_writethrough, + disable_cache, + disable_execute, + ); + if layer == 0 { + assert(!is_page); + assert(e & bit!(7u64) == 0); + assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0); + } else if layer == 1 { + if is_page { + assert(address & bitmask_inc!(30u64,sub(mw,1)) == address ==> address + & bitmask_inc!(13u64,29u64) == 0) by (bit_vector); + assert(e & bitmask_inc!(13u64,29u64) == 0); + assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0); + } else { + assert(e & bit!(7u64) == 0); + assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0); + } + } else if layer == 2 { + if is_page { + assert(address & bitmask_inc!(21u64,sub(mw,1)) == address ==> address + & bitmask_inc!(13u64,20u64) == 0) by (bit_vector); + assert(e & bitmask_inc!(13u64,20u64) == 0); + assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0); + } else { + assert(e & bit!(7u64) == 0); + assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0); } + } else if layer == 3 { + assert(is_page); + // assert(e & bit!(7u64) == 0); + assert(e & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0); + } else { + assert(false); + } + let pde = PageDirectoryEntry { entry: e, layer: Ghost(layer as nat) }; + reveal(PageDirectoryEntry::all_mb0_bits_are_zero); + assert(pde.all_mb0_bits_are_zero()); + } - pub fn new_page_entry(layer: usize, pte: PageTableEntryExec) -> (r: Self) - requires - 0 < layer <= 3, - addr_is_zero_padded(layer as nat, pte.frame.base as u64, true), - pte.frame.base as u64 & MASK_ADDR == pte.frame.base as u64, - ensures - r.all_mb0_bits_are_zero(), - r.hp_pat_is_zero(), - r@.is_Page(), - r.layer@ == layer, - r@.get_Page_addr() == pte.frame.base, - r.entry & MASK_ADDR == pte.frame.base, - r.entry & MASK_FLAG_P == MASK_FLAG_P, - (r.entry & MASK_L1_PG_FLAG_PS == MASK_L1_PG_FLAG_PS) == (layer != 3), - (r.entry & MASK_FLAG_RW == MASK_FLAG_RW) == pte.flags.is_writable, - r@.get_Page_flag_RW() == pte.flags.is_writable, - (r.entry & MASK_FLAG_US == MASK_FLAG_US) == !pte.flags.is_supervisor, - r@.get_Page_flag_US() == !pte.flags.is_supervisor, - r.entry & MASK_FLAG_PWT != MASK_FLAG_PWT, - r.entry & MASK_FLAG_PCD != MASK_FLAG_PCD, - (r.entry & MASK_FLAG_XD == MASK_FLAG_XD) == pte.flags.disable_execute, - r@.get_Page_flag_XD() == pte.flags.disable_execute, - { - Self::new_entry(layer, pte.frame.base as u64, true, pte.flags.is_writable, pte.flags.is_supervisor, false, false, pte.flags.disable_execute) - } - - pub fn new_dir_entry(layer: usize, address: u64) -> (r: Self) - requires - layer < 3, - address & MASK_DIR_ADDR == address - ensures - r.all_mb0_bits_are_zero(), - r.hp_pat_is_zero(), - r@.is_Directory(), - r.layer@ == layer, - r@.get_Directory_addr() == address, - r@.get_Directory_flag_RW(), - r@.get_Directory_flag_US(), - !r@.get_Directory_flag_XD(), - { - Self::new_entry( - layer, - address, - false, // is_page - true, // is_writable - false, // is_supervisor - false, // is_writethrough - false, // disable_cache - false) // disable_execute - } - - pub fn new_entry( - layer: usize, - address: u64, - is_page: bool, - is_writable: bool, - is_supervisor: bool, - is_writethrough: bool, - disable_cache: bool, - disable_execute: bool, - ) -> (r: PageDirectoryEntry) - requires - layer <= 3, - if is_page { 0 < layer } else { layer < 3 }, - addr_is_zero_padded(layer as nat, address, is_page), - address & MASK_ADDR == address, - ensures - r.all_mb0_bits_are_zero(), - if is_page { r@.is_Page() && r@.get_Page_addr() == address } else { r@.is_Directory() && r@.get_Directory_addr() == address}, - r.hp_pat_is_zero(), - r.layer@ == layer, - r.entry & MASK_ADDR == address, - r.entry & MASK_FLAG_P == MASK_FLAG_P, - (r.entry & MASK_L1_PG_FLAG_PS == MASK_L1_PG_FLAG_PS) == (is_page && layer != 3), - (r.entry & MASK_FLAG_RW == MASK_FLAG_RW) == is_writable, - (r.entry & MASK_FLAG_US == MASK_FLAG_US) == !is_supervisor, - (r.entry & MASK_FLAG_PWT == MASK_FLAG_PWT) == is_writethrough, - (r.entry & MASK_FLAG_PCD == MASK_FLAG_PCD) == disable_cache, - (r.entry & MASK_FLAG_XD == MASK_FLAG_XD) == disable_execute, - { - let e = - PageDirectoryEntry { - entry: { - address - | MASK_FLAG_P - | if is_page && layer != 3 { MASK_L1_PG_FLAG_PS } else { 0 } - | if is_writable { MASK_FLAG_RW } else { 0 } - | if is_supervisor { 0 } else { MASK_FLAG_US } - | if is_writethrough { MASK_FLAG_PWT } else { 0 } - | if disable_cache { MASK_FLAG_PCD } else { 0 } - | if disable_execute { MASK_FLAG_XD } else { 0 } - }, - layer: Ghost(layer as nat), + pub proof fn lemma_new_entry_addr_mask_is_address( + layer: usize, + address: u64, + is_page: bool, + is_writable: bool, + is_supervisor: bool, + is_writethrough: bool, + disable_cache: bool, + disable_execute: bool, + ) + requires + layer <= 3, + if is_page { + 0 < layer + } else { + layer < 3 + }, + addr_is_zero_padded(layer as nat, address, is_page), + address & MASK_ADDR == address, + ensures + ({ + let e = address | MASK_FLAG_P | if is_page && layer != 3 { + MASK_L1_PG_FLAG_PS + } else { + 0 + } | if is_writable { + MASK_FLAG_RW + } else { + 0 + } | if is_supervisor { + 0 + } else { + MASK_FLAG_US + } | if is_writethrough { + MASK_FLAG_PWT + } else { + 0 + } | if disable_cache { + MASK_FLAG_PCD + } else { + 0 + } | if disable_execute { + MASK_FLAG_XD + } else { + 0 }; - - proof { - PageDirectoryEntry::lemma_new_entry_addr_mask_is_address(layer, address, is_page, is_writable, is_supervisor, is_writethrough, disable_cache, disable_execute); - PageDirectoryEntry::lemma_new_entry_mb0_bits_are_zero(layer, address, is_page, is_writable, is_supervisor, is_writethrough, disable_cache, disable_execute); - if is_page { e.lemma_addr_mask_when_hp_pat_is_zero(); } - } - e + &&& e & MASK_ADDR == address + &&& e & MASK_FLAG_P == MASK_FLAG_P + &&& (e & MASK_L1_PG_FLAG_PS == MASK_L1_PG_FLAG_PS) == (is_page && layer != 3) + &&& (e & MASK_FLAG_RW == MASK_FLAG_RW) == is_writable + &&& (e & MASK_FLAG_US == MASK_FLAG_US) == !is_supervisor + &&& (e & MASK_FLAG_PWT == MASK_FLAG_PWT) == is_writethrough + &&& (e & MASK_FLAG_PCD == MASK_FLAG_PCD) == disable_cache + &&& (e & MASK_FLAG_XD == MASK_FLAG_XD) == disable_execute + &&& (is_page && layer == 1 ==> e & MASK_PG_FLAG_PAT == 0) + &&& (is_page && layer == 2 ==> e & MASK_PG_FLAG_PAT == 0) + }), + { + let or1 = MASK_FLAG_P; + let or2 = if is_page && layer != 3 { + MASK_L1_PG_FLAG_PS as u64 + } else { + 0 + }; + let or3 = if is_writable { + MASK_FLAG_RW as u64 + } else { + 0 + }; + let or4 = if is_supervisor { + 0 + } else { + MASK_FLAG_US as u64 + }; + let or5 = if is_writethrough { + MASK_FLAG_PWT as u64 + } else { + 0 + }; + let or6 = if disable_cache { + MASK_FLAG_PCD as u64 + } else { + 0 + }; + let or7 = if disable_execute { + MASK_FLAG_XD as u64 + } else { + 0 + }; + let e = address | or1 | or2 | or3 | or4 | or5 | or6 | or7; + let mw: u64 = MAX_PHYADDR_WIDTH; + axiom_max_phyaddr_width_facts(); + assert(forall|a: u64, x: u64| x < 64 && (a & bit!(x) == 0) ==> a & bit!(x) != bit!(x)) + by (bit_vector); + assert(forall|a: u64| #![auto] a == a | 0) by (bit_vector); + assert(forall|a: u64, i: u64| + #![auto] + i < 12 ==> a & bitmask_inc!(12u64, sub(mw, 1)) == (a | bit!(i)) + & bitmask_inc!(12u64, sub(mw, 1))) by (bit_vector) + requires + 32 <= mw <= 52, + ; + assert(forall|a: u64, i: u64| + #![auto] + i > sub(mw, 1) ==> a & bitmask_inc!(12u64, sub(mw, 1)) == (a | bit!(i)) + & bitmask_inc!(12u64, sub(mw, 1))) by (bit_vector) + requires + 32 <= mw <= 52, + ; + assert(forall|a: u64, i: u64| + #![auto] + i < 12 ==> a & bitmask_inc!(12u64, sub(mw, 1)) == a ==> a & bit!(i) == 0) + by (bit_vector) + requires + 32 <= mw <= 52, + ; + assert(forall|a: u64, i: u64| + #![auto] + i > sub(mw, 1) ==> a & bitmask_inc!(12u64, sub(mw, 1)) == a ==> a & bit!(i) == 0) + by (bit_vector) + requires + 32 <= mw <= 52, + ; + assert(forall|a: u64, i: u64| + #![auto] + i < 64 ==> a & bit!(i) == 0 ==> (a | bit!(i)) & bit!(i) == bit!(i)) by (bit_vector); + assert(forall|a: u64, i: u64, j: u64| + #![auto] + i != j ==> a & bit!(i) == (a | bit!(j)) & bit!(i)) by (bit_vector); + assert({ + &&& is_page && layer == 1 ==> e & MASK_PG_FLAG_PAT == 0 + &&& is_page && layer == 2 ==> e & MASK_PG_FLAG_PAT == 0 + }) by { + if is_page && layer == 1 { + assert(address & bit!(12u64) == 0) by (bit_vector) + requires + address & bitmask_inc!(30u64, sub(mw, 1)) == address, + ; } - - pub fn flags(&self) -> (res: Flags) - requires - self.layer() <= 3, - self@.is_Page() - ensures - res.is_writable <==> self.entry & MASK_FLAG_RW == MASK_FLAG_RW, - res.is_supervisor <==> self.entry & MASK_FLAG_US != MASK_FLAG_US, - res.disable_execute <==> self.entry & MASK_FLAG_XD == MASK_FLAG_XD, - { - Flags { - is_writable: self.entry & MASK_FLAG_RW == MASK_FLAG_RW, - is_supervisor: self.entry & MASK_FLAG_US != MASK_FLAG_US, - disable_execute: self.entry & MASK_FLAG_XD == MASK_FLAG_XD, - } + if is_page && layer == 2 { + assert(address & bit!(12u64) == 0) by (bit_vector) + requires + address & bitmask_inc!(21u64, sub(mw, 1)) == address, + ; } + }; + } - pub fn address(&self) -> (res: u64) - requires - self.layer() <= 3, - self@.is_Page() ==> 0 < self.layer(), - self.hp_pat_is_zero(), - self.all_mb0_bits_are_zero(), - !self@.is_Empty(), - ensures - res as usize == match self@ { - GhostPageDirectoryEntry::Page { addr, .. } => addr, - GhostPageDirectoryEntry::Directory { addr, .. } => addr, - GhostPageDirectoryEntry::Empty => arbitrary(), - } - { - proof { - match self@ { - GhostPageDirectoryEntry::Page { addr, .. } => self.lemma_addr_mask_when_hp_pat_is_zero(), - GhostPageDirectoryEntry::Directory { addr, .. } => { }, - GhostPageDirectoryEntry::Empty => { }, - } - } - self.entry & MASK_ADDR - } + pub fn new_page_entry(layer: usize, pte: PageTableEntryExec) -> (r: Self) + requires + 0 < layer <= 3, + addr_is_zero_padded(layer as nat, pte.frame.base as u64, true), + pte.frame.base as u64 & MASK_ADDR == pte.frame.base as u64, + ensures + r.all_mb0_bits_are_zero(), + r.hp_pat_is_zero(), + r@.is_Page(), + r.layer@ == layer, + r@.get_Page_addr() == pte.frame.base, + r.entry & MASK_ADDR == pte.frame.base, + r.entry & MASK_FLAG_P == MASK_FLAG_P, + (r.entry & MASK_L1_PG_FLAG_PS == MASK_L1_PG_FLAG_PS) == (layer != 3), + (r.entry & MASK_FLAG_RW == MASK_FLAG_RW) == pte.flags.is_writable, + r@.get_Page_flag_RW() == pte.flags.is_writable, + (r.entry & MASK_FLAG_US == MASK_FLAG_US) == !pte.flags.is_supervisor, + r@.get_Page_flag_US() == !pte.flags.is_supervisor, + r.entry & MASK_FLAG_PWT != MASK_FLAG_PWT, + r.entry & MASK_FLAG_PCD != MASK_FLAG_PCD, + (r.entry & MASK_FLAG_XD == MASK_FLAG_XD) == pte.flags.disable_execute, + r@.get_Page_flag_XD() == pte.flags.disable_execute, + { + Self::new_entry( + layer, + pte.frame.base as u64, + true, + pte.flags.is_writable, + pte.flags.is_supervisor, + false, + false, + pte.flags.disable_execute, + ) + } - pub fn is_mapping(&self) -> (r: bool) - requires - self.all_mb0_bits_are_zero(), - self.layer() <= 3 - ensures - r == !self@.is_Empty(), - { - (self.entry & MASK_FLAG_P) == MASK_FLAG_P - } + pub fn new_dir_entry(layer: usize, address: u64) -> (r: Self) + requires + layer < 3, + address & MASK_DIR_ADDR == address, + ensures + r.all_mb0_bits_are_zero(), + r.hp_pat_is_zero(), + r@.is_Directory(), + r.layer@ == layer, + r@.get_Directory_addr() == address, + r@.get_Directory_flag_RW(), + r@.get_Directory_flag_US(), + !r@.get_Directory_flag_XD(), + { + Self::new_entry( + layer, + address, + false, // is_page + true, // is_writable + false, // is_supervisor + false, // is_writethrough + false, // disable_cache + false, + ) // disable_execute - pub fn is_page(&self, layer: usize) -> (r: bool) - requires - !self@.is_Empty(), - layer as nat == self.layer@, - layer <= 3, - ensures - if r { self@.is_Page() } else { self@.is_Directory() }, - { - if layer == 3 { - true - } else if layer == 0 { - false + } + + pub fn new_entry( + layer: usize, + address: u64, + is_page: bool, + is_writable: bool, + is_supervisor: bool, + is_writethrough: bool, + disable_cache: bool, + disable_execute: bool, + ) -> (r: PageDirectoryEntry) + requires + layer <= 3, + if is_page { + 0 < layer + } else { + layer < 3 + }, + addr_is_zero_padded(layer as nat, address, is_page), + address & MASK_ADDR == address, + ensures + r.all_mb0_bits_are_zero(), + if is_page { + r@.is_Page() && r@.get_Page_addr() == address + } else { + r@.is_Directory() && r@.get_Directory_addr() == address + }, + r.hp_pat_is_zero(), + r.layer@ == layer, + r.entry & MASK_ADDR == address, + r.entry & MASK_FLAG_P == MASK_FLAG_P, + (r.entry & MASK_L1_PG_FLAG_PS == MASK_L1_PG_FLAG_PS) == (is_page && layer != 3), + (r.entry & MASK_FLAG_RW == MASK_FLAG_RW) == is_writable, + (r.entry & MASK_FLAG_US == MASK_FLAG_US) == !is_supervisor, + (r.entry & MASK_FLAG_PWT == MASK_FLAG_PWT) == is_writethrough, + (r.entry & MASK_FLAG_PCD == MASK_FLAG_PCD) == disable_cache, + (r.entry & MASK_FLAG_XD == MASK_FLAG_XD) == disable_execute, + { + let e = PageDirectoryEntry { + entry: { + address | MASK_FLAG_P | if is_page && layer != 3 { + MASK_L1_PG_FLAG_PS + } else { + 0 + } | if is_writable { + MASK_FLAG_RW + } else { + 0 + } | if is_supervisor { + 0 } else { - (self.entry & MASK_L1_PG_FLAG_PS) == MASK_L1_PG_FLAG_PS + MASK_FLAG_US + } | if is_writethrough { + MASK_FLAG_PWT + } else { + 0 + } | if disable_cache { + MASK_FLAG_PCD + } else { + 0 + } | if disable_execute { + MASK_FLAG_XD + } else { + 0 } - } + }, + layer: Ghost(layer as nat), + }; + proof { + PageDirectoryEntry::lemma_new_entry_addr_mask_is_address( + layer, + address, + is_page, + is_writable, + is_supervisor, + is_writethrough, + disable_cache, + disable_execute, + ); + PageDirectoryEntry::lemma_new_entry_mb0_bits_are_zero( + layer, + address, + is_page, + is_writable, + is_supervisor, + is_writethrough, + disable_cache, + disable_execute, + ); + if is_page { + e.lemma_addr_mask_when_hp_pat_is_zero(); + } + } + e + } - pub fn is_dir(&self, layer: usize) -> (r: bool) - requires - !self@.is_Empty(), - layer as nat == self.layer@, - layer <= 3, - ensures - if r { self@.is_Directory() } else { self@.is_Page() }, - { - !self.is_page(layer) + pub fn flags(&self) -> (res: Flags) + requires + self.layer() <= 3, + self@.is_Page(), + ensures + res.is_writable <==> self.entry & MASK_FLAG_RW == MASK_FLAG_RW, + res.is_supervisor <==> self.entry & MASK_FLAG_US != MASK_FLAG_US, + res.disable_execute <==> self.entry & MASK_FLAG_XD == MASK_FLAG_XD, + { + Flags { + is_writable: self.entry & MASK_FLAG_RW == MASK_FLAG_RW, + is_supervisor: self.entry & MASK_FLAG_US != MASK_FLAG_US, + disable_execute: self.entry & MASK_FLAG_XD == MASK_FLAG_XD, + } + } + + pub fn address(&self) -> (res: u64) + requires + self.layer() <= 3, + self@.is_Page() ==> 0 < self.layer(), + self.hp_pat_is_zero(), + self.all_mb0_bits_are_zero(), + !self@.is_Empty(), + ensures + res as usize == match self@ { + GhostPageDirectoryEntry::Page { addr, .. } => addr, + GhostPageDirectoryEntry::Directory { addr, .. } => addr, + GhostPageDirectoryEntry::Empty => arbitrary(), + }, + { + proof { + match self@ { + GhostPageDirectoryEntry::Page { + addr, + .. + } => self.lemma_addr_mask_when_hp_pat_is_zero(), + GhostPageDirectoryEntry::Directory { addr, .. } => {}, + GhostPageDirectoryEntry::Empty => {}, } } + self.entry & MASK_ADDR + } + + pub fn is_mapping(&self) -> (r: bool) + requires + self.all_mb0_bits_are_zero(), + self.layer() <= 3, + ensures + r == !self@.is_Empty(), + { + (self.entry & MASK_FLAG_P) == MASK_FLAG_P + } - /// PTDir is used in the `ghost_pt` field of the PageTable. It's used to keep track of the memory - /// regions in which the corresponding translation structures are stored. - pub struct PTDir { - /// Region of physical memory in which this PTDir is stored - pub region: MemRegion, - pub entries: Seq>, - /// reflexive-transitive closure of `region` over `entries` - pub used_regions: Set, + pub fn is_page(&self, layer: usize) -> (r: bool) + requires + !self@.is_Empty(), + layer as nat == self.layer@, + layer <= 3, + ensures + if r { + self@.is_Page() + } else { + self@.is_Directory() + }, + { + if layer == 3 { + true + } else if layer == 0 { + false + } else { + (self.entry & MASK_L1_PG_FLAG_PS) == MASK_L1_PG_FLAG_PS } + } - // Page table methods are in a separate module for namespacing, since we can't use a struct + impl - // (To use a struct we'd have to keep a &mut reference to the memory in the struct, which Verus - // doesn't support. Or we keep an owned copy but then can't have an external interface that mutably - // borrows a memory.) - pub mod PT { + pub fn is_dir(&self, layer: usize) -> (r: bool) + requires + !self@.is_Empty(), + layer as nat == self.layer@, + layer <= 3, + ensures + if r { + self@.is_Directory() + } else { + self@.is_Page() + }, + { + !self.is_page(layer) + } +} - use super::*; +/// PTDir is used in the `ghost_pt` field of the PageTable. It's used to keep track of the memory +/// regions in which the corresponding translation structures are stored. +pub struct PTDir { + /// Region of physical memory in which this PTDir is stored + pub region: MemRegion, + pub entries: Seq>, + /// reflexive-transitive closure of `region` over `entries` + pub used_regions: Set, +} - pub open spec(checked) fn well_formed(mem: &mem::PageTableMemory, pt: PTDir, ptr: usize) -> bool { - &&& x86_arch_spec.inv() - } +// Page table methods are in a separate module for namespacing, since we can't use a struct + impl +// (To use a struct we'd have to keep a &mut reference to the memory in the struct, which Verus +// doesn't support. Or we keep an owned copy but then can't have an external interface that mutably +// borrows a memory.) +pub mod PT { + use super::*; + + pub open spec(checked) fn well_formed( + mem: &mem::PageTableMemory, + pt: PTDir, + ptr: usize, + ) -> bool { + &&& x86_arch_spec.inv() + } - pub open spec(checked) fn inv(mem: &mem::PageTableMemory, pt: PTDir) -> bool { - &&& pt.region == mem.cr3_spec()@ - &&& inv_at(mem, pt, 0, mem.cr3_spec().base) - } + pub open spec(checked) fn inv(mem: &mem::PageTableMemory, pt: PTDir) -> bool { + &&& pt.region == mem.cr3_spec()@ + &&& inv_at(mem, pt, 0, mem.cr3_spec().base) + } - /// Get the view of the entry at address ptr + i * WORD_SIZE - pub open spec fn entry_at_spec(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, i: nat) -> PageDirectoryEntry { - PageDirectoryEntry { - entry: mem.spec_read(i, pt.region), - layer: Ghost(layer), - } - } + /// Get the view of the entry at address ptr + i * WORD_SIZE + pub open spec fn entry_at_spec( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + i: nat, + ) -> PageDirectoryEntry { + PageDirectoryEntry { entry: mem.spec_read(i, pt.region), layer: Ghost(layer) } + } - /// Get the view of the entry at address ptr + i * WORD_SIZE - pub open spec fn view_at(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, i: nat) -> GhostPageDirectoryEntry { - PageDirectoryEntry { - entry: mem.spec_read(i, pt.region), - layer: Ghost(layer), - }@ - } + /// Get the view of the entry at address ptr + i * WORD_SIZE + pub open spec fn view_at( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + i: nat, + ) -> GhostPageDirectoryEntry { + PageDirectoryEntry { entry: mem.spec_read(i, pt.region), layer: Ghost(layer) }@ + } - /// Get the entry at address ptr + i * WORD_SIZE - fn entry_at(mem: &mem::PageTableMemory, Ghost(pt): Ghost, layer: usize, ptr: usize, i: usize) -> (res: PageDirectoryEntry) - requires - i < 512, - inv_at(mem, pt, layer as nat, ptr), - ensures - res.layer@ == layer as nat, - res@ === view_at(mem, pt, layer as nat, ptr, i as nat), - res == entry_at_spec(mem, pt, layer as nat, ptr, i as nat), - res.hp_pat_is_zero(), - (res@.is_Page() ==> 0 < res.layer()), - { - assert(aligned((ptr + i * WORD_SIZE) as nat, 8)) by { - assert(inv_at(mem, pt, layer as nat, ptr)); - assert(well_formed(mem, pt, ptr)); - assert(ptr % PAGE_SIZE == 0); - }; - // triggering - proof { let _ = entry_at_spec(mem, pt, layer as nat, ptr, i as nat); } - PageDirectoryEntry { - entry: mem.read(ptr, i, Ghost(pt.region)), - layer: Ghost(layer as nat), - } + /// Get the entry at address ptr + i * WORD_SIZE + fn entry_at( + mem: &mem::PageTableMemory, + Ghost(pt): Ghost, + layer: usize, + ptr: usize, + i: usize, + ) -> (res: PageDirectoryEntry) + requires + i < 512, + inv_at(mem, pt, layer as nat, ptr), + ensures + res.layer@ == layer as nat, + res@ === view_at(mem, pt, layer as nat, ptr, i as nat), + res == entry_at_spec(mem, pt, layer as nat, ptr, i as nat), + res.hp_pat_is_zero(), + (res@.is_Page() ==> 0 < res.layer()), + { + assert(aligned((ptr + i * WORD_SIZE) as nat, 8)) by { + assert(inv_at(mem, pt, layer as nat, ptr)); + assert(well_formed(mem, pt, ptr)); + assert(ptr % PAGE_SIZE == 0); + }; + // triggering + proof { + let _ = entry_at_spec(mem, pt, layer as nat, ptr, i as nat); } + PageDirectoryEntry { entry: mem.read(ptr, i, Ghost(pt.region)), layer: Ghost(layer as nat) } + } - pub open spec fn ghost_pt_matches_structure(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) -> bool { - forall|i: nat| #![trigger pt.entries[i as int], view_at(mem, pt, layer, ptr, i)] + pub open spec fn ghost_pt_matches_structure( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + ) -> bool { + forall|i: nat| + #![trigger pt.entries[i as int], view_at(mem, pt, layer, ptr, i)] i < X86_NUM_ENTRIES ==> { let entry = view_at(mem, pt, layer, ptr, i); entry.is_Directory() == pt.entries[i as int].is_Some() } - } + } - pub open spec fn directories_obey_invariant_at(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) -> bool - decreases X86_NUM_LAYERS - layer, 0nat - when well_formed(mem, pt, ptr) && layer_in_range(layer) - { - forall|i: nat| i < X86_NUM_ENTRIES ==> { + pub open spec fn directories_obey_invariant_at( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + ) -> bool + decreases X86_NUM_LAYERS - layer, 0nat, + when well_formed(mem, pt, ptr) && layer_in_range(layer) + { + forall|i: nat| + i < X86_NUM_ENTRIES ==> { let entry = #[trigger] view_at(mem, pt, layer, ptr, i); entry.is_Directory() ==> { - &&& inv_at(mem, pt.entries[i as int].get_Some_0(), layer + 1, entry.get_Directory_addr()) + &&& inv_at( + mem, + pt.entries[i as int].get_Some_0(), + layer + 1, + entry.get_Directory_addr(), + ) } } - } + } - pub open spec fn empty_at(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) -> bool - recommends well_formed(mem, pt, ptr) - { - forall|i: nat| i < X86_NUM_ENTRIES ==> view_at(mem, pt, layer, ptr, i).is_Empty() - } + pub open spec fn empty_at(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) -> bool + recommends + well_formed(mem, pt, ptr), + { + forall|i: nat| i < X86_NUM_ENTRIES ==> view_at(mem, pt, layer, ptr, i).is_Empty() + } - pub open spec(checked) fn layer_in_range(layer: nat) -> bool { - layer < X86_NUM_LAYERS - } + pub open spec(checked) fn layer_in_range(layer: nat) -> bool { + layer < X86_NUM_LAYERS + } - pub open spec(checked) fn inv_at(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) -> bool - decreases X86_NUM_LAYERS - layer - { - &&& ptr % PAGE_SIZE == 0 - &&& well_formed(mem, pt, ptr) - &&& mem.inv() - &&& mem.regions().contains(pt.region) - &&& pt.region.base == ptr - &&& pt.region.size == PAGE_SIZE - &&& mem.region_view(pt.region).len() == pt.entries.len() - &&& layer_in_range(layer) - &&& pt.entries.len() == X86_NUM_ENTRIES - &&& directories_obey_invariant_at(mem, pt, layer, ptr) - &&& directories_have_flags(mem, pt, layer, ptr) - &&& ghost_pt_matches_structure(mem, pt, layer, ptr) - &&& ghost_pt_used_regions_rtrancl(mem, pt, layer, ptr) - &&& ghost_pt_used_regions_pairwise_disjoint(mem, pt, layer, ptr) - &&& ghost_pt_region_notin_used_regions(mem, pt, layer, ptr) - &&& pt.used_regions.subset_of(mem.regions()) - &&& hp_pat_is_zero(mem, pt, layer, ptr) - &&& entry_mb0_bits_are_zero(mem, pt, layer, ptr) - } + pub open spec(checked) fn inv_at( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + ) -> bool + decreases X86_NUM_LAYERS - layer, + { + &&& ptr % PAGE_SIZE == 0 + &&& well_formed(mem, pt, ptr) + &&& mem.inv() + &&& mem.regions().contains(pt.region) + &&& pt.region.base == ptr + &&& pt.region.size == PAGE_SIZE + &&& mem.region_view(pt.region).len() == pt.entries.len() + &&& layer_in_range(layer) + &&& pt.entries.len() == X86_NUM_ENTRIES + &&& directories_obey_invariant_at(mem, pt, layer, ptr) + &&& directories_have_flags(mem, pt, layer, ptr) + &&& ghost_pt_matches_structure(mem, pt, layer, ptr) + &&& ghost_pt_used_regions_rtrancl(mem, pt, layer, ptr) + &&& ghost_pt_used_regions_pairwise_disjoint(mem, pt, layer, ptr) + &&& ghost_pt_region_notin_used_regions(mem, pt, layer, ptr) + &&& pt.used_regions.subset_of(mem.regions()) + &&& hp_pat_is_zero(mem, pt, layer, ptr) + &&& entry_mb0_bits_are_zero(mem, pt, layer, ptr) + } - pub open spec fn directories_have_flags(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) -> bool { - forall|i: nat| i < X86_NUM_ENTRIES ==> { + pub open spec fn directories_have_flags( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + ) -> bool { + forall|i: nat| + i < X86_NUM_ENTRIES ==> { let entry = #[trigger] view_at(mem, pt, layer, ptr, i); - entry.is_Directory() ==> entry.get_Directory_flag_RW() && entry.get_Directory_flag_US() && !entry.get_Directory_flag_XD() + entry.is_Directory() ==> entry.get_Directory_flag_RW() + && entry.get_Directory_flag_US() && !entry.get_Directory_flag_XD() } - } + } - pub open spec fn entry_mb0_bits_are_zero(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) -> bool { - forall|i: nat| i < X86_NUM_ENTRIES ==> - (#[trigger] entry_at_spec(mem, pt, layer, ptr, i)).all_mb0_bits_are_zero() - } + pub open spec fn entry_mb0_bits_are_zero( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + ) -> bool { + forall|i: nat| + i < X86_NUM_ENTRIES ==> (#[trigger] entry_at_spec( + mem, + pt, + layer, + ptr, + i, + )).all_mb0_bits_are_zero() + } - /// Entries for super pages and huge pages use bit 12 to denote the PAT flag. We always set that - /// flag to zero, which allows us to always use the same mask to get the address. - pub open spec fn hp_pat_is_zero(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) -> bool { - forall|i: nat| #![auto] i < X86_NUM_ENTRIES ==> entry_at_spec(mem, pt, layer, ptr, i).hp_pat_is_zero() - } + /// Entries for super pages and huge pages use bit 12 to denote the PAT flag. We always set that + /// flag to zero, which allows us to always use the same mask to get the address. + pub open spec fn hp_pat_is_zero( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + ) -> bool { + forall|i: nat| + #![auto] + i < X86_NUM_ENTRIES ==> entry_at_spec(mem, pt, layer, ptr, i).hp_pat_is_zero() + } - pub open spec fn ghost_pt_used_regions_pairwise_disjoint(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) -> bool { - forall|i: nat, j: nat, r: MemRegion| - i != j && - i < pt.entries.len() && pt.entries[i as int].is_Some() && - #[trigger] pt.entries[i as int].get_Some_0().used_regions.contains(r) && - j < pt.entries.len() && pt.entries[j as int].is_Some() - ==> !(#[trigger] pt.entries[j as int].get_Some_0().used_regions.contains(r)) - } + pub open spec fn ghost_pt_used_regions_pairwise_disjoint( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + ) -> bool { + forall|i: nat, j: nat, r: MemRegion| + i != j && i < pt.entries.len() && pt.entries[i as int].is_Some() + && #[trigger] pt.entries[i as int].get_Some_0().used_regions.contains(r) && j + < pt.entries.len() && pt.entries[j as int].is_Some() ==> !( + #[trigger] pt.entries[j as int].get_Some_0().used_regions.contains(r)) + } - // TODO: this may be implied by the other ones - pub open spec fn ghost_pt_region_notin_used_regions(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) -> bool { - forall|i: nat| - i < pt.entries.len() && pt.entries[i as int].is_Some() - ==> !(#[trigger] pt.entries[i as int].get_Some_0().used_regions.contains(pt.region)) - } + // TODO: this may be implied by the other ones + pub open spec fn ghost_pt_region_notin_used_regions( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + ) -> bool { + forall|i: nat| + i < pt.entries.len() && pt.entries[i as int].is_Some() ==> !( + #[trigger] pt.entries[i as int].get_Some_0().used_regions.contains(pt.region)) + } - pub open spec fn ghost_pt_used_regions_rtrancl(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) -> bool { - // reflexive - &&& pt.used_regions.contains(pt.region) - // transitive - &&& forall|i: nat, r: MemRegion| #![trigger pt.entries[i as int].get_Some_0().used_regions.contains(r), pt.used_regions.contains(r)] - i < pt.entries.len() && pt.entries[i as int].is_Some() && - pt.entries[i as int].get_Some_0().used_regions.contains(r) - ==> pt.used_regions.contains(r) - } + pub open spec fn ghost_pt_used_regions_rtrancl( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + ) -> bool { + // reflexive + &&& pt.used_regions.contains(pt.region) + // transitive + + &&& forall|i: nat, r: MemRegion| + #![trigger pt.entries[i as int].get_Some_0().used_regions.contains(r), pt.used_regions.contains(r)] + i < pt.entries.len() && pt.entries[i as int].is_Some() + && pt.entries[i as int].get_Some_0().used_regions.contains(r) + ==> pt.used_regions.contains(r) + } - pub open spec fn interp_at(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base_vaddr: nat) -> l1::Directory - decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES, 2nat - { - decreases_when(inv_at(mem, pt, layer, ptr)); - l1::Directory { - entries: interp_at_aux(mem, pt, layer, ptr, base_vaddr, seq![]), - layer: layer, - base_vaddr, - arch: x86_arch_spec, - // We don't have to check the flags because we know (from the invariant) that all - // directories have these flags set. - flags: permissive_flags, - } + pub open spec fn interp_at( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base_vaddr: nat, + ) -> l1::Directory + decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES, 2nat, + { + decreases_when(inv_at(mem, pt, layer, ptr)); + l1::Directory { + entries: interp_at_aux(mem, pt, layer, ptr, base_vaddr, seq![]), + layer: layer, + base_vaddr, + arch: x86_arch_spec, + // We don't have to check the flags because we know (from the invariant) that all + // directories have these flags set. + flags: permissive_flags, } + } - pub open spec fn interp_at_entry(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base_vaddr: nat, idx: nat) -> l1::NodeEntry - decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES - idx, 0nat - { - decreases_when(inv_at(mem, pt, layer, ptr)); - match view_at(mem, pt, layer, ptr, idx) { - GhostPageDirectoryEntry::Directory { addr: dir_addr, .. } => { - let entry_base = x86_arch_spec.entry_base(layer, base_vaddr, idx); - l1::NodeEntry::Directory(interp_at(mem, pt.entries[idx as int].get_Some_0(), layer + 1, dir_addr, entry_base)) + pub open spec fn interp_at_entry( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base_vaddr: nat, + idx: nat, + ) -> l1::NodeEntry + decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES - idx, 0nat, + { + decreases_when(inv_at(mem, pt, layer, ptr)); + match view_at(mem, pt, layer, ptr, idx) { + GhostPageDirectoryEntry::Directory { addr: dir_addr, .. } => { + let entry_base = x86_arch_spec.entry_base(layer, base_vaddr, idx); + l1::NodeEntry::Directory( + interp_at( + mem, + pt.entries[idx as int].get_Some_0(), + layer + 1, + dir_addr, + entry_base, + ), + ) + }, + GhostPageDirectoryEntry::Page { + addr, + flag_RW, + flag_US, + flag_XD, + .. + } => l1::NodeEntry::Page( + PageTableEntry { + frame: MemRegion { base: addr as nat, size: x86_arch_spec.entry_size(layer) }, + flags: Flags { + is_writable: flag_RW, + is_supervisor: !flag_US, + disable_execute: flag_XD, + }, }, - GhostPageDirectoryEntry::Page { addr, flag_RW, flag_US, flag_XD, .. } => - l1::NodeEntry::Page( - PageTableEntry { - frame: MemRegion { base: addr as nat, size: x86_arch_spec.entry_size(layer) }, - flags: Flags { - is_writable: flag_RW, - is_supervisor: !flag_US, - disable_execute: flag_XD, - }, - }), - GhostPageDirectoryEntry::Empty => - l1::NodeEntry::Empty(), - } + ), + GhostPageDirectoryEntry::Empty => l1::NodeEntry::Empty(), } + } - pub open spec fn interp_at_aux(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base_vaddr: nat, init: Seq) -> Seq - decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES - init.len(), 1nat - when inv_at(mem, pt, layer, ptr) - { - if init.len() >= X86_NUM_ENTRIES { - init - } else { - let entry = interp_at_entry(mem, pt, layer, ptr, base_vaddr, init.len()); - interp_at_aux(mem, pt, layer, ptr, base_vaddr, init.push(entry)) - } + pub open spec fn interp_at_aux( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base_vaddr: nat, + init: Seq, + ) -> Seq + decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES - init.len(), 1nat, + when inv_at(mem, pt, layer, ptr) + { + if init.len() >= X86_NUM_ENTRIES { + init + } else { + let entry = interp_at_entry(mem, pt, layer, ptr, base_vaddr, init.len()); + interp_at_aux(mem, pt, layer, ptr, base_vaddr, init.push(entry)) } + } - pub open spec fn interp(mem: &mem::PageTableMemory, pt: PTDir) -> l1::Directory { - interp_at(mem, pt, 0, mem.cr3_spec().base, 0) - } + pub open spec fn interp(mem: &mem::PageTableMemory, pt: PTDir) -> l1::Directory { + interp_at(mem, pt, 0, mem.cr3_spec().base, 0) + } - proof fn lemma_inv_at_different_memory(mem1: &mem::PageTableMemory, mem2: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) - requires - inv_at(mem1, pt, layer, ptr), - forall|r: MemRegion| pt.used_regions.contains(r) - ==> #[trigger] mem1.region_view(r) === mem2.region_view(r), - // Some parts of mem2's invariant that we should already know - mem2.inv(), - mem2.regions().contains(pt.region), - pt.used_regions.subset_of(mem2.regions()), - ensures - inv_at(mem2, pt, layer, ptr), - decreases X86_NUM_LAYERS - layer - { - assert forall|i: nat| i < X86_NUM_ENTRIES implies - view_at(mem2, pt, layer, ptr, i) == view_at(mem1, pt, layer, ptr, i) by { }; - assert forall|i: nat| i < X86_NUM_ENTRIES implies - entry_at_spec(mem2, pt, layer, ptr, i) == entry_at_spec(mem1, pt, layer, ptr, i) by { }; - - // Prove directories_obey_invariant_at(mem2, pt, layer, ptr) - assert forall|i: nat| - i < X86_NUM_ENTRIES implies { - let entry = #[trigger] view_at(mem2, pt, layer, ptr, i); - entry.is_Directory() ==> inv_at(mem2, pt.entries[i as int].get_Some_0(), layer + 1, entry.get_Directory_addr()) - } by { - let entry = view_at(mem2, pt, layer, ptr, i); - if entry.is_Directory() { - assert(directories_obey_invariant_at(mem1, pt, layer, ptr)); - lemma_inv_at_different_memory(mem1, mem2, pt.entries[i as int].get_Some_0(), layer + 1, entry.get_Directory_addr()); - } - }; - } + proof fn lemma_inv_at_different_memory( + mem1: &mem::PageTableMemory, + mem2: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + ) + requires + inv_at(mem1, pt, layer, ptr), + forall|r: MemRegion| + pt.used_regions.contains(r) ==> #[trigger] mem1.region_view(r) === mem2.region_view( + r, + ), + // Some parts of mem2's invariant that we should already know + mem2.inv(), + mem2.regions().contains(pt.region), + pt.used_regions.subset_of(mem2.regions()), + ensures + inv_at(mem2, pt, layer, ptr), + decreases X86_NUM_LAYERS - layer, + { + assert forall|i: nat| i < X86_NUM_ENTRIES implies view_at(mem2, pt, layer, ptr, i) + == view_at(mem1, pt, layer, ptr, i) by {}; + assert forall|i: nat| i < X86_NUM_ENTRIES implies entry_at_spec(mem2, pt, layer, ptr, i) + == entry_at_spec(mem1, pt, layer, ptr, i) by {}; + // Prove directories_obey_invariant_at(mem2, pt, layer, ptr) + assert forall|i: nat| i < X86_NUM_ENTRIES implies { + let entry = #[trigger] view_at(mem2, pt, layer, ptr, i); + entry.is_Directory() ==> inv_at( + mem2, + pt.entries[i as int].get_Some_0(), + layer + 1, + entry.get_Directory_addr(), + ) + } by { + let entry = view_at(mem2, pt, layer, ptr, i); + if entry.is_Directory() { + assert(directories_obey_invariant_at(mem1, pt, layer, ptr)); + lemma_inv_at_different_memory( + mem1, + mem2, + pt.entries[i as int].get_Some_0(), + layer + 1, + entry.get_Directory_addr(), + ); + } + }; + } - proof fn lemma_interp_at_entry_different_memory(mem1: &mem::PageTableMemory, pt1: PTDir, mem2: &mem::PageTableMemory, pt2: PTDir, layer: nat, ptr: usize, base: nat, idx: nat) - requires - idx < X86_NUM_ENTRIES, - pt2.region == pt1.region, - pt2.entries[idx as int] == pt1.entries[idx as int], - inv_at(mem1, pt1, layer, ptr), - inv_at(mem2, pt2, layer, ptr), - mem1.spec_read(idx, pt1.region) == mem2.spec_read(idx, pt2.region), - pt2.entries[idx as int].is_Some() ==> (forall|r: MemRegion| pt2.entries[idx as int].get_Some_0().used_regions.contains(r) + proof fn lemma_interp_at_entry_different_memory( + mem1: &mem::PageTableMemory, + pt1: PTDir, + mem2: &mem::PageTableMemory, + pt2: PTDir, + layer: nat, + ptr: usize, + base: nat, + idx: nat, + ) + requires + idx < X86_NUM_ENTRIES, + pt2.region == pt1.region, + pt2.entries[idx as int] == pt1.entries[idx as int], + inv_at(mem1, pt1, layer, ptr), + inv_at(mem2, pt2, layer, ptr), + mem1.spec_read(idx, pt1.region) == mem2.spec_read(idx, pt2.region), + pt2.entries[idx as int].is_Some() ==> (forall|r: MemRegion| + pt2.entries[idx as int].get_Some_0().used_regions.contains(r) ==> #[trigger] mem1.region_view(r) == mem2.region_view(r)), - ensures - interp_at_entry(mem1, pt1, layer, ptr, base, idx) == interp_at_entry(mem2, pt2, layer, ptr, base, idx), - decreases X86_NUM_LAYERS - layer - { - match view_at(mem1, pt1, layer, ptr, idx) { - GhostPageDirectoryEntry::Directory { addr: dir_addr, .. } => { - let e_base = x86_arch_spec.entry_base(layer, base, idx); - let dir_pt = pt1.entries[idx as int].get_Some_0(); - assert(directories_obey_invariant_at(mem1, pt1, layer, ptr)); - assert(directories_obey_invariant_at(mem2, pt2, layer, ptr)); - lemma_interp_at_aux_facts(mem1, dir_pt, layer + 1, dir_addr, e_base, seq![]); - lemma_interp_at_aux_facts(mem2, dir_pt, layer + 1, dir_addr, e_base, seq![]); - - assert forall|i: nat| i < X86_NUM_ENTRIES implies - interp_at_entry(mem1, dir_pt, layer + 1, dir_addr, e_base, i) - == interp_at_entry(mem2, dir_pt, layer + 1, dir_addr, e_base, i) - && #[trigger] interp_at(mem1, dir_pt, layer + 1, dir_addr, e_base).entries[i as int] - == interp_at(mem2, dir_pt, layer + 1, dir_addr, e_base).entries[i as int] by - { - lemma_interp_at_entry_different_memory(mem1, dir_pt, mem2, dir_pt, layer + 1, dir_addr, e_base, i); - }; - assert(interp_at(mem1, dir_pt, layer + 1, dir_addr, e_base).entries - =~= interp_at(mem2, dir_pt, layer + 1, dir_addr, e_base).entries); - }, - _ => (), - } + ensures + interp_at_entry(mem1, pt1, layer, ptr, base, idx) == interp_at_entry( + mem2, + pt2, + layer, + ptr, + base, + idx, + ), + decreases X86_NUM_LAYERS - layer, + { + match view_at(mem1, pt1, layer, ptr, idx) { + GhostPageDirectoryEntry::Directory { addr: dir_addr, .. } => { + let e_base = x86_arch_spec.entry_base(layer, base, idx); + let dir_pt = pt1.entries[idx as int].get_Some_0(); + assert(directories_obey_invariant_at(mem1, pt1, layer, ptr)); + assert(directories_obey_invariant_at(mem2, pt2, layer, ptr)); + lemma_interp_at_aux_facts(mem1, dir_pt, layer + 1, dir_addr, e_base, seq![]); + lemma_interp_at_aux_facts(mem2, dir_pt, layer + 1, dir_addr, e_base, seq![]); + assert forall|i: nat| i < X86_NUM_ENTRIES implies interp_at_entry( + mem1, + dir_pt, + layer + 1, + dir_addr, + e_base, + i, + ) == interp_at_entry(mem2, dir_pt, layer + 1, dir_addr, e_base, i) + && #[trigger] interp_at( + mem1, + dir_pt, + layer + 1, + dir_addr, + e_base, + ).entries[i as int] == interp_at( + mem2, + dir_pt, + layer + 1, + dir_addr, + e_base, + ).entries[i as int] by { + lemma_interp_at_entry_different_memory( + mem1, + dir_pt, + mem2, + dir_pt, + layer + 1, + dir_addr, + e_base, + i, + ); + }; + assert(interp_at(mem1, dir_pt, layer + 1, dir_addr, e_base).entries =~= interp_at( + mem2, + dir_pt, + layer + 1, + dir_addr, + e_base, + ).entries); + }, + _ => (), } + } - pub proof fn lemma_interp_at_facts(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base_vaddr: nat) - requires - inv_at(mem, pt, layer, ptr), - interp_at(mem, pt, layer, ptr, base_vaddr).inv(), - ensures - interp_at(mem, pt, layer, ptr, base_vaddr).base_vaddr == base_vaddr, - interp_at(mem, pt, layer, ptr, base_vaddr).upper_vaddr() == x86_arch_spec.upper_vaddr(layer, base_vaddr), - interp_at(mem, pt, layer, ptr, base_vaddr).interp().lower == base_vaddr, - interp_at(mem, pt, layer, ptr, base_vaddr).interp().upper == x86_arch_spec.upper_vaddr(layer, base_vaddr), - ({ let res = interp_at(mem, pt, layer, ptr, base_vaddr); - forall|j: nat| j < res.entries.len() ==> res.entries[j as int] === #[trigger] interp_at_entry(mem, pt, layer, ptr, base_vaddr, j) - }), - { - lemma_interp_at_aux_facts(mem, pt, layer, ptr, base_vaddr, seq![]); - let res = interp_at(mem, pt, layer, ptr, base_vaddr); - assert(res.pages_match_entry_size()); - assert(res.directories_are_in_next_layer()); - assert(res.directories_match_arch()); - assert(res.directories_obey_invariant()); - assert(res.directories_are_nonempty()); - assert(res.frames_aligned()); - res.lemma_inv_implies_interp_inv(); - } + pub proof fn lemma_interp_at_facts( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base_vaddr: nat, + ) + requires + inv_at(mem, pt, layer, ptr), + interp_at(mem, pt, layer, ptr, base_vaddr).inv(), + ensures + interp_at(mem, pt, layer, ptr, base_vaddr).base_vaddr == base_vaddr, + interp_at(mem, pt, layer, ptr, base_vaddr).upper_vaddr() == x86_arch_spec.upper_vaddr( + layer, + base_vaddr, + ), + interp_at(mem, pt, layer, ptr, base_vaddr).interp().lower == base_vaddr, + interp_at(mem, pt, layer, ptr, base_vaddr).interp().upper == x86_arch_spec.upper_vaddr( + layer, + base_vaddr, + ), + ({ + let res = interp_at(mem, pt, layer, ptr, base_vaddr); + forall|j: nat| + j < res.entries.len() ==> res.entries[j as int] === #[trigger] interp_at_entry( + mem, + pt, + layer, + ptr, + base_vaddr, + j, + ) + }), + { + lemma_interp_at_aux_facts(mem, pt, layer, ptr, base_vaddr, seq![]); + let res = interp_at(mem, pt, layer, ptr, base_vaddr); + assert(res.pages_match_entry_size()); + assert(res.directories_are_in_next_layer()); + assert(res.directories_match_arch()); + assert(res.directories_obey_invariant()); + assert(res.directories_are_nonempty()); + assert(res.frames_aligned()); + res.lemma_inv_implies_interp_inv(); + } - pub proof fn lemma_interp_at_facts_entries(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base_vaddr: nat, i: nat) - requires - i < 512, - inv_at(mem, pt, layer, ptr), - interp_at(mem, pt, layer, ptr, base_vaddr).inv(), - ensures - ({ let res = interp_at(mem, pt, layer, ptr, base_vaddr); - match view_at(mem, pt, layer, ptr, i) { - GhostPageDirectoryEntry::Directory { addr: dir_addr, .. } => { - &&& res.entries[i as int].is_Directory() - &&& res.entries[i as int].get_Directory_0() == interp_at(mem, pt.entries[i as int].get_Some_0(), (layer + 1) as nat, dir_addr, x86_arch_spec.entry_base(layer, base_vaddr, i)) - }, - GhostPageDirectoryEntry::Page { addr, .. } => res.entries[i as int].is_Page() && res.entries[i as int].get_Page_0().frame.base == addr, - GhostPageDirectoryEntry::Empty => res.entries[i as int].is_Empty(), - } }) - { lemma_interp_at_aux_facts(mem, pt, layer, ptr, base_vaddr, seq![]); } - - proof fn lemma_interp_at_aux_facts(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base_vaddr: nat, init: Seq) - requires inv_at(mem, pt, layer, ptr), - ensures - interp_at_aux(mem, pt, layer, ptr, base_vaddr, init).len() == if init.len() > X86_NUM_ENTRIES { init.len() } else { X86_NUM_ENTRIES as nat }, - forall|j: nat| j < init.len() ==> #[trigger] interp_at_aux(mem, pt, layer, ptr, base_vaddr, init)[j as int] == init[j as int], - ({ let res = interp_at_aux(mem, pt, layer, ptr, base_vaddr, init); - &&& (forall|j: nat| - #![trigger res[j as int]] - init.len() <= j && j < res.len() ==> - match view_at(mem, pt, layer, ptr, j) { - GhostPageDirectoryEntry::Directory { addr: dir_addr, .. } => { - &&& res[j as int].is_Directory() - &&& res[j as int].get_Directory_0() == interp_at(mem, pt.entries[j as int].get_Some_0(), (layer + 1) as nat, dir_addr, x86_arch_spec.entry_base(layer, base_vaddr, j)) - }, - GhostPageDirectoryEntry::Page { addr, .. } => res[j as int].is_Page() && res[j as int].get_Page_0().frame.base == addr, - GhostPageDirectoryEntry::Empty => res[j as int].is_Empty(), - }) - &&& (forall|j: nat| init.len() <= j && j < res.len() ==> res[j as int] == #[trigger] interp_at_entry(mem, pt, layer, ptr, base_vaddr, j)) - }), - decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES - init.len(), 0nat - { - if init.len() >= X86_NUM_ENTRIES as nat { + pub proof fn lemma_interp_at_facts_entries( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base_vaddr: nat, + i: nat, + ) + requires + i < 512, + inv_at(mem, pt, layer, ptr), + interp_at(mem, pt, layer, ptr, base_vaddr).inv(), + ensures + ({ + let res = interp_at(mem, pt, layer, ptr, base_vaddr); + match view_at(mem, pt, layer, ptr, i) { + GhostPageDirectoryEntry::Directory { addr: dir_addr, .. } => { + &&& res.entries[i as int].is_Directory() + &&& res.entries[i as int].get_Directory_0() == interp_at( + mem, + pt.entries[i as int].get_Some_0(), + (layer + 1) as nat, + dir_addr, + x86_arch_spec.entry_base(layer, base_vaddr, i), + ) + }, + GhostPageDirectoryEntry::Page { addr, .. } => res.entries[i as int].is_Page() + && res.entries[i as int].get_Page_0().frame.base == addr, + GhostPageDirectoryEntry::Empty => res.entries[i as int].is_Empty(), + } + }), + { + lemma_interp_at_aux_facts(mem, pt, layer, ptr, base_vaddr, seq![]); + } + + proof fn lemma_interp_at_aux_facts( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base_vaddr: nat, + init: Seq, + ) + requires + inv_at(mem, pt, layer, ptr), + ensures + interp_at_aux(mem, pt, layer, ptr, base_vaddr, init).len() == if init.len() + > X86_NUM_ENTRIES { + init.len() } else { - assert(directories_obey_invariant_at(mem, pt, layer, ptr)); - let entry = interp_at_entry(mem, pt, layer, ptr, base_vaddr, init.len()); - lemma_interp_at_aux_facts(mem, pt, layer, ptr, base_vaddr, init.push(entry)); - } + X86_NUM_ENTRIES as nat + }, + forall|j: nat| + j < init.len() ==> #[trigger] interp_at_aux( + mem, + pt, + layer, + ptr, + base_vaddr, + init, + )[j as int] == init[j as int], + ({ + let res = interp_at_aux(mem, pt, layer, ptr, base_vaddr, init); + &&& (forall|j: nat| + #![trigger res[j as int]] + init.len() <= j && j < res.len() ==> match view_at(mem, pt, layer, ptr, j) { + GhostPageDirectoryEntry::Directory { addr: dir_addr, .. } => { + &&& res[j as int].is_Directory() + &&& res[j as int].get_Directory_0() == interp_at( + mem, + pt.entries[j as int].get_Some_0(), + (layer + 1) as nat, + dir_addr, + x86_arch_spec.entry_base(layer, base_vaddr, j), + ) + }, + GhostPageDirectoryEntry::Page { addr, .. } => res[j as int].is_Page() + && res[j as int].get_Page_0().frame.base == addr, + GhostPageDirectoryEntry::Empty => res[j as int].is_Empty(), + }) + &&& (forall|j: nat| + init.len() <= j && j < res.len() ==> res[j as int] + == #[trigger] interp_at_entry(mem, pt, layer, ptr, base_vaddr, j)) + }), + decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES - init.len(), 0nat, + { + if init.len() >= X86_NUM_ENTRIES as nat { + } else { + assert(directories_obey_invariant_at(mem, pt, layer, ptr)); + let entry = interp_at_entry(mem, pt, layer, ptr, base_vaddr, init.len()); + lemma_interp_at_aux_facts(mem, pt, layer, ptr, base_vaddr, init.push(entry)); } + } - fn resolve_aux(mem: &mem::PageTableMemory, Ghost(pt): Ghost, layer: usize, ptr: usize, base: usize, vaddr: usize) -> (res: Result<(usize, PageTableEntryExec), ()>) - requires - inv_at(mem, pt, layer as nat, ptr), - interp_at(mem, pt, layer as nat, ptr, base as nat).inv(), - interp_at(mem, pt, layer as nat, ptr, base as nat).interp().accepted_resolve(vaddr as nat), - base <= vaddr < MAX_BASE, - ensures - // Refinement of l1 - l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) === interp_at(mem, pt, layer as nat, ptr, base as nat).resolve(vaddr as nat), - // Refinement of l0 - l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) === interp_at(mem, pt, layer as nat, ptr, base as nat).interp().resolve(vaddr as nat), - // decreases X86_NUM_LAYERS - layer - { - proof { lemma_interp_at_facts(mem, pt, layer as nat, ptr, base as nat); } - let idx: usize = x86_arch_exec().index_for_vaddr(layer, base, vaddr); - proof { indexing::lemma_index_from_base_and_addr(base as nat, vaddr as nat, x86_arch_spec.entry_size(layer as nat), X86_NUM_ENTRIES as nat); } - let entry = entry_at(mem, Ghost(pt), layer, ptr, idx); - let interp: Ghost = Ghost(interp_at(mem, pt, layer as nat, ptr, base as nat)); + fn resolve_aux( + mem: &mem::PageTableMemory, + Ghost(pt): Ghost, + layer: usize, + ptr: usize, + base: usize, + vaddr: usize, + ) -> (res: Result<(usize, PageTableEntryExec), ()>) + requires + inv_at(mem, pt, layer as nat, ptr), + interp_at(mem, pt, layer as nat, ptr, base as nat).inv(), + interp_at(mem, pt, layer as nat, ptr, base as nat).interp().accepted_resolve( + vaddr as nat, + ), + base <= vaddr < MAX_BASE, + ensures + // Refinement of l1 + + l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) + === interp_at(mem, pt, layer as nat, ptr, base as nat).resolve(vaddr as nat), + // Refinement of l0 + l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) + === interp_at(mem, pt, layer as nat, ptr, base as nat).interp().resolve( + vaddr as nat, + ), + // decreases X86_NUM_LAYERS - layer + + { + proof { + lemma_interp_at_facts(mem, pt, layer as nat, ptr, base as nat); + } + let idx: usize = x86_arch_exec().index_for_vaddr(layer, base, vaddr); + proof { + indexing::lemma_index_from_base_and_addr( + base as nat, + vaddr as nat, + x86_arch_spec.entry_size(layer as nat), + X86_NUM_ENTRIES as nat, + ); + } + let entry = entry_at(mem, Ghost(pt), layer, ptr, idx); + let interp: Ghost = Ghost( + interp_at(mem, pt, layer as nat, ptr, base as nat), + ); + proof { + interp@.lemma_resolve_structure_assertions(vaddr as nat, idx as nat); + interp@.lemma_resolve_refines(vaddr as nat); + } + if entry.is_mapping() { + let entry_base: usize = x86_arch_exec().entry_base(layer, base, idx); proof { - interp@.lemma_resolve_structure_assertions(vaddr as nat, idx as nat); - interp@.lemma_resolve_refines(vaddr as nat); + indexing::lemma_entry_base_from_index( + base as nat, + idx as nat, + x86_arch_spec.entry_size(layer as nat), + ); + assert(entry_base <= vaddr); } - if entry.is_mapping() { - let entry_base: usize = x86_arch_exec().entry_base(layer, base, idx); + if entry.is_dir(layer) { + assert(entry@.is_Directory()); + let dir_addr = entry.address() as usize; + assert(pt.entries[idx as int].is_Some()); + let dir_pt: Ghost = Ghost(pt.entries.index(idx as int).get_Some_0()); + assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)); proof { - indexing::lemma_entry_base_from_index(base as nat, idx as nat, x86_arch_spec.entry_size(layer as nat)); - assert(entry_base <= vaddr); + assert(interp@.inv()); + assert(interp@.directories_obey_invariant()); + assert(interp@.entries[idx as int].is_Directory()); + assert(interp@.entries[idx as int].get_Directory_0().inv()); + assert(l1::NodeEntry::Directory( + interp_at(mem, dir_pt@, (layer + 1) as nat, dir_addr, entry_base as nat), + ) === interp@.entries[idx as int]); + assert(inv_at(mem, dir_pt@, (layer + 1) as nat, dir_addr)); } - if entry.is_dir(layer) { - assert(entry@.is_Directory()); - let dir_addr = entry.address() as usize; - assert(pt.entries[idx as int].is_Some()); - let dir_pt: Ghost = Ghost(pt.entries.index(idx as int).get_Some_0()); - assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)); - proof { - assert(interp@.inv()); - assert(interp@.directories_obey_invariant()); - assert(interp@.entries[idx as int].is_Directory()); - assert(interp@.entries[idx as int].get_Directory_0().inv()); - assert(l1::NodeEntry::Directory(interp_at(mem, dir_pt@, (layer + 1) as nat, dir_addr, entry_base as nat)) === interp@.entries[idx as int]); - assert(inv_at(mem, dir_pt@, (layer + 1) as nat, dir_addr)); - } - let res = resolve_aux(mem, dir_pt, layer + 1, dir_addr, entry_base, vaddr); - assert(l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) === interp@.resolve(vaddr as nat)); - res - } else { - assert(entry@.is_Page()); - assert(interp@.entries[idx as int].is_Page()); - let pte = PageTableEntryExec { - frame: MemRegionExec { base: entry.address() as usize, size: x86_arch_exec().entry_size(layer) }, - flags: entry.flags() - }; - let res = Ok((entry_base, pte)); - proof { + let res = resolve_aux(mem, dir_pt, layer + 1, dir_addr, entry_base, vaddr); + assert(l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) + === interp@.resolve(vaddr as nat)); + res + } else { + assert(entry@.is_Page()); + assert(interp@.entries[idx as int].is_Page()); + let pte = PageTableEntryExec { + frame: MemRegionExec { + base: entry.address() as usize, + size: x86_arch_exec().entry_size(layer), + }, + flags: entry.flags(), + }; + let res = Ok((entry_base, pte)); + proof { if interp@.resolve(vaddr as nat).is_Ok() { - assert(interp@.entries[idx as int].get_Page_0() === interp@.resolve(vaddr as nat).get_Ok_0().1); - assert(interp@.entries[idx as int] === interp_at_entry(mem, pt, layer as nat, ptr, base as nat, idx as nat)); + assert(interp@.entries[idx as int].get_Page_0() === interp@.resolve( + vaddr as nat, + ).get_Ok_0().1); + assert(interp@.entries[idx as int] === interp_at_entry( + mem, + pt, + layer as nat, + ptr, + base as nat, + idx as nat, + )); } - } - assert(l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@).0) === l1::result_map_ok(interp@.resolve(vaddr as nat), |v: (nat, PageTableEntry)| v.0)); - assert(l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@).1.frame) === l1::result_map_ok(interp@.resolve(vaddr as nat), |v: (nat, PageTableEntry)| v.1.frame)); - assert(l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@).1.flags) === l1::result_map_ok(interp@.resolve(vaddr as nat), |v: (nat, PageTableEntry)| v.1.flags)); - assert(l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) === interp@.resolve(vaddr as nat)); - res } - } else { - assert(entry@.is_Empty()); - assert(interp@.entries[idx as int].is_Empty()); - assert(l1::result_map_ok(Err(()), |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) === interp@.resolve(vaddr as nat)); - Err(()) + assert(l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@).0) + === l1::result_map_ok( + interp@.resolve(vaddr as nat), + |v: (nat, PageTableEntry)| v.0, + )); + assert(l1::result_map_ok( + res, + |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@).1.frame, + ) === l1::result_map_ok( + interp@.resolve(vaddr as nat), + |v: (nat, PageTableEntry)| v.1.frame, + )); + assert(l1::result_map_ok( + res, + |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@).1.flags, + ) === l1::result_map_ok( + interp@.resolve(vaddr as nat), + |v: (nat, PageTableEntry)| v.1.flags, + )); + assert(l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) + === interp@.resolve(vaddr as nat)); + res } + } else { + assert(entry@.is_Empty()); + assert(interp@.entries[idx as int].is_Empty()); + assert(l1::result_map_ok(Err(()), |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) + === interp@.resolve(vaddr as nat)); + Err(()) } + } - pub fn resolve(mem: &mem::PageTableMemory, Ghost(pt): Ghost, vaddr: usize) -> (res: Result<(usize, PageTableEntryExec),()>) - requires - inv(mem, pt), - interp(mem, pt).inv(), - interp(mem, pt).interp().accepted_resolve(vaddr as nat), - vaddr < MAX_BASE, - ensures - // Refinement of l1 - l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) === interp(mem, pt).resolve(vaddr as nat), - // Refinement of l0 - l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) === interp(mem, pt).interp().resolve(vaddr as nat), - { - proof { ambient_arith(); } - let res = resolve_aux(mem, Ghost(pt), 0, mem.cr3().base, 0, vaddr); - res + pub fn resolve(mem: &mem::PageTableMemory, Ghost(pt): Ghost, vaddr: usize) -> (res: + Result<(usize, PageTableEntryExec), ()>) + requires + inv(mem, pt), + interp(mem, pt).inv(), + interp(mem, pt).interp().accepted_resolve(vaddr as nat), + vaddr < MAX_BASE, + ensures + // Refinement of l1 + + l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) === interp( + mem, + pt, + ).resolve(vaddr as nat), + // Refinement of l0 + l1::result_map_ok(res, |v: (usize, PageTableEntryExec)| (v.0 as nat, v.1@)) === interp( + mem, + pt, + ).interp().resolve(vaddr as nat), + { + proof { + ambient_arith(); } + let res = resolve_aux(mem, Ghost(pt), 0, mem.cr3().base, 0, vaddr); + res + } - pub open spec fn accepted_mapping(vaddr: nat, pte: PageTableEntry) -> bool { - // Can't map pages in PML4, i.e. layer 0 - &&& x86_arch_spec.contains_entry_size_at_index_atleast(pte.frame.size, 1) - &&& pte.frame.base <= MAX_PHYADDR - } + pub open spec fn accepted_mapping(vaddr: nat, pte: PageTableEntry) -> bool { + // Can't map pages in PML4, i.e. layer 0 + &&& x86_arch_spec.contains_entry_size_at_index_atleast(pte.frame.size, 1) + &&& pte.frame.base <= MAX_PHYADDR + } - fn map_frame_aux(mem: &mut mem::PageTableMemory, Ghost(pt): Ghost, layer: usize, ptr: usize, base: usize, vaddr: usize, pte: PageTableEntryExec) - -> (res: Result)>,()>) - requires - inv_at(&*old(mem), pt, layer as nat, ptr), - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).inv(), - old(mem).inv(), - old(mem).alloc_available_pages() >= 3 - layer, - accepted_mapping(vaddr as nat, pte@), - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).accepted_mapping(vaddr as nat, pte@), - base <= vaddr < MAX_BASE, - ensures - match res { - Ok(resv) => { - let (pt_res, new_regions) = resv@; - // We return the regions that we added - &&& mem.regions() === old(mem).regions().union(new_regions) - &&& pt_res.used_regions === pt.used_regions.union(new_regions) - // and only those we added - &&& new_regions.disjoint(old(mem).regions()) - &&& (forall|r: MemRegion| new_regions.contains(r) ==> !(#[trigger] pt.used_regions.contains(r))) - // Invariant preserved - &&& inv_at(mem, pt_res, layer as nat, ptr) - // We only touch already allocated regions if they're in pt.used_regions - &&& (forall|r: MemRegion| !(#[trigger] pt.used_regions.contains(r)) && !(new_regions.contains(r)) + fn map_frame_aux( + mem: &mut mem::PageTableMemory, + Ghost(pt): Ghost, + layer: usize, + ptr: usize, + base: usize, + vaddr: usize, + pte: PageTableEntryExec, + ) -> (res: Result)>, ()>) + requires + inv_at(&*old(mem), pt, layer as nat, ptr), + interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).inv(), + old(mem).inv(), + old(mem).alloc_available_pages() >= 3 - layer, + accepted_mapping(vaddr as nat, pte@), + interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).accepted_mapping( + vaddr as nat, + pte@, + ), + base <= vaddr < MAX_BASE, + ensures + match res { + Ok(resv) => { + let (pt_res, new_regions) = resv@; + // We return the regions that we added + &&& mem.regions() === old(mem).regions().union(new_regions) + &&& pt_res.used_regions === pt.used_regions.union( + new_regions, + ) + // and only those we added + + &&& new_regions.disjoint(old(mem).regions()) + &&& (forall|r: MemRegion| + new_regions.contains(r) ==> !(#[trigger] pt.used_regions.contains( + r, + ))) + // Invariant preserved + + &&& inv_at( + mem, + pt_res, + layer as nat, + ptr, + ) + // We only touch already allocated regions if they're in pt.used_regions + + &&& (forall|r: MemRegion| + !(#[trigger] pt.used_regions.contains(r)) && !(new_regions.contains(r)) ==> mem.region_view(r) === old(mem).region_view(r)) - &&& pt_res.region === pt.region - }, - Err(e) => { - // If error, unchanged - &&& mem === old(mem) - }, + &&& pt_res.region === pt.region }, - // Refinement of l1 - match res { - Ok(resv) => { - let (pt_res, new_regions) = resv@; - Ok(interp_at(mem, pt_res, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@) - }, - Err(e) => - Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@), + Err(e) => { + // If error, unchanged + &&& mem === old(mem) }, - mem.cr3_spec() == old(mem).cr3_spec(), - // decreases X86_NUM_LAYERS - layer - { - proof { lemma_interp_at_facts(mem, pt, layer as nat, ptr, base as nat); } - let idx: usize = x86_arch_exec().index_for_vaddr(layer, base, vaddr); - proof { - assert({ - &&& between(vaddr as nat, x86_arch_spec.entry_base(layer as nat, base as nat, idx as nat), x86_arch_spec.next_entry_base(layer as nat, base as nat, idx as nat)) - &&& aligned(vaddr as nat, x86_arch_spec.entry_size(layer as nat)) ==> vaddr == x86_arch_spec.entry_base(layer as nat, base as nat, idx as nat) - &&& idx < X86_NUM_ENTRIES }) by - { - let es = x86_arch_spec.entry_size(layer as nat); - assert(aligned(base as nat, es)) by { - lib::mod_mult_zero_implies_mod_zero(base as nat, es, X86_NUM_ENTRIES as nat); - }; - indexing::lemma_index_from_base_and_addr(base as nat, vaddr as nat, es, X86_NUM_ENTRIES as nat); + }, + // Refinement of l1 + match res { + Ok(resv) => { + let (pt_res, new_regions) = resv@; + Ok(interp_at(mem, pt_res, layer as nat, ptr, base as nat)) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@) + }, + Err(e) => Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@), + }, + mem.cr3_spec() == old( + mem, + ).cr3_spec(), + // decreases X86_NUM_LAYERS - layer + + { + proof { + lemma_interp_at_facts(mem, pt, layer as nat, ptr, base as nat); + } + let idx: usize = x86_arch_exec().index_for_vaddr(layer, base, vaddr); + proof { + assert({ + &&& between( + vaddr as nat, + x86_arch_spec.entry_base(layer as nat, base as nat, idx as nat), + x86_arch_spec.next_entry_base(layer as nat, base as nat, idx as nat), + ) + &&& aligned(vaddr as nat, x86_arch_spec.entry_size(layer as nat)) ==> vaddr + == x86_arch_spec.entry_base(layer as nat, base as nat, idx as nat) + &&& idx < X86_NUM_ENTRIES + }) by { + let es = x86_arch_spec.entry_size(layer as nat); + assert(aligned(base as nat, es)) by { + lib::mod_mult_zero_implies_mod_zero(base as nat, es, X86_NUM_ENTRIES as nat); }; - lemma_interp_at_facts_entries(&*old(mem), pt, layer as nat, ptr, base as nat, idx as nat); - } - let entry = entry_at(mem, Ghost(pt), layer, ptr, idx); - let interp: Ghost = Ghost(interp_at(mem, pt, layer as nat, ptr, base as nat)); - proof { - interp@.lemma_map_frame_structure_assertions(vaddr as nat, pte@, idx as nat); - interp@.lemma_map_frame_refines_map_frame(vaddr as nat, pte@); - } - let entry_base: usize = x86_arch_exec().entry_base(layer, base, idx); - proof { - indexing::lemma_entry_base_from_index(base as nat, idx as nat, x86_arch_spec.entry_size(layer as nat)); - assert(entry_base <= vaddr); - } - if entry.is_mapping() { - if entry.is_dir(layer) { - if x86_arch_exec().entry_size(layer) == pte.frame.size { - assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@)); - Err(()) - } else { - let dir_addr = entry.address() as usize; - assert(pt.entries[idx as int].is_Some()); - let dir_pt: Ghost = Ghost(pt.entries.index(idx as int).get_Some_0()); - assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)); - match map_frame_aux(mem, dir_pt, layer + 1, dir_addr, entry_base, vaddr, pte) { - Ok(rec_res) => { - let dir_pt_res: Ghost = Ghost(rec_res@.0); - let new_regions: Ghost> = Ghost(rec_res@.1); - - assert(dir_pt_res@.used_regions === dir_pt@.used_regions.union(new_regions@)); - assert(inv_at(mem, dir_pt_res@, (layer + 1) as nat, dir_addr)); - assert(Ok(interp_at(mem, dir_pt_res@, (layer + 1) as nat, dir_addr, entry_base as nat)) - === interp_at(&*old(mem), dir_pt@, (layer + 1) as nat, dir_addr, entry_base as nat).map_frame(vaddr as nat, pte@)); - let pt_res: Ghost = Ghost( - PTDir { - region: pt.region, - entries: pt.entries.update(idx as int, Some(dir_pt_res@)), - used_regions: pt.used_regions.union(new_regions@), - }); - - assert(idx < pt.entries.len()); - assert(pt_res@.region === pt.region); - assert(!new_regions@.contains(pt_res@.region)); - assert(!dir_pt_res@.used_regions.contains(pt_res@.region)); - - // None of the entries at this level change - assert forall|i: nat| i < X86_NUM_ENTRIES implies - view_at(mem, pt_res@, layer as nat, ptr, i) == view_at(&*old(mem), pt, layer as nat, ptr, i) by { }; - assert forall|i: nat| i < X86_NUM_ENTRIES implies - entry_at_spec(mem, pt_res@, layer as nat, ptr, i) == entry_at_spec(&*old(mem), pt, layer as nat, ptr, i) by { }; - - assert(inv_at(mem, pt_res@, layer as nat, ptr) - && Ok(interp_at(mem, pt_res@, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@)) by - { - assert forall|i: nat| i < X86_NUM_ENTRIES - implies { - let entry = view_at(mem, pt_res@, layer as nat, ptr, i); - entry.is_Directory() == (#[trigger] pt_res@.entries[i as int]).is_Some() - } - by { - assert(mem.region_view(pt_res@.region) === mem.region_view(pt_res@.region)); - let entry = view_at(mem, pt_res@, layer as nat, ptr, i); - if i == idx { + indexing::lemma_index_from_base_and_addr( + base as nat, + vaddr as nat, + es, + X86_NUM_ENTRIES as nat, + ); + }; + lemma_interp_at_facts_entries( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + idx as nat, + ); + } + let entry = entry_at(mem, Ghost(pt), layer, ptr, idx); + let interp: Ghost = Ghost( + interp_at(mem, pt, layer as nat, ptr, base as nat), + ); + proof { + interp@.lemma_map_frame_structure_assertions(vaddr as nat, pte@, idx as nat); + interp@.lemma_map_frame_refines_map_frame(vaddr as nat, pte@); + } + let entry_base: usize = x86_arch_exec().entry_base(layer, base, idx); + proof { + indexing::lemma_entry_base_from_index( + base as nat, + idx as nat, + x86_arch_spec.entry_size(layer as nat), + ); + assert(entry_base <= vaddr); + } + if entry.is_mapping() { + if entry.is_dir(layer) { + if x86_arch_exec().entry_size(layer) == pte.frame.size { + assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@)); + Err(()) + } else { + let dir_addr = entry.address() as usize; + assert(pt.entries[idx as int].is_Some()); + let dir_pt: Ghost = Ghost(pt.entries.index(idx as int).get_Some_0()); + assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)); + match map_frame_aux(mem, dir_pt, layer + 1, dir_addr, entry_base, vaddr, pte) { + Ok(rec_res) => { + let dir_pt_res: Ghost = Ghost(rec_res@.0); + let new_regions: Ghost> = Ghost(rec_res@.1); + assert(dir_pt_res@.used_regions === dir_pt@.used_regions.union( + new_regions@, + )); + assert(inv_at(mem, dir_pt_res@, (layer + 1) as nat, dir_addr)); + assert(Ok( + interp_at( + mem, + dir_pt_res@, + (layer + 1) as nat, + dir_addr, + entry_base as nat, + ), + ) === interp_at( + &*old(mem), + dir_pt@, + (layer + 1) as nat, + dir_addr, + entry_base as nat, + ).map_frame(vaddr as nat, pte@)); + let pt_res: Ghost = Ghost( + PTDir { + region: pt.region, + entries: pt.entries.update(idx as int, Some(dir_pt_res@)), + used_regions: pt.used_regions.union(new_regions@), + }, + ); + assert(idx < pt.entries.len()); + assert(pt_res@.region === pt.region); + assert(!new_regions@.contains(pt_res@.region)); + assert(!dir_pt_res@.used_regions.contains(pt_res@.region)); + // None of the entries at this level change + assert forall|i: nat| i < X86_NUM_ENTRIES implies view_at( + mem, + pt_res@, + layer as nat, + ptr, + i, + ) == view_at(&*old(mem), pt, layer as nat, ptr, i) by {}; + assert forall|i: nat| i < X86_NUM_ENTRIES implies entry_at_spec( + mem, + pt_res@, + layer as nat, + ptr, + i, + ) == entry_at_spec(&*old(mem), pt, layer as nat, ptr, i) by {}; + assert(inv_at(mem, pt_res@, layer as nat, ptr) && Ok( + interp_at(mem, pt_res@, layer as nat, ptr, base as nat), + ) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@)) by { + assert forall|i: nat| i < X86_NUM_ENTRIES implies { + let entry = view_at(mem, pt_res@, layer as nat, ptr, i); + entry.is_Directory() == ( + #[trigger] pt_res@.entries[i as int]).is_Some() + } by { + assert(mem.region_view(pt_res@.region) === mem.region_view( + pt_res@.region, + )); + let entry = view_at(mem, pt_res@, layer as nat, ptr, i); + if i == idx { + } else { + assert(pt.entries[i as int] === pt_res@.entries[i as int]); + assert(entry === view_at( + &*old(mem), + pt, + layer as nat, + ptr, + i, + )); + assert(entry.is_Directory() + == pt_res@.entries[i as int].is_Some()); + } + }; + assert(ghost_pt_matches_structure(mem, pt_res@, layer as nat, ptr)); + assert(ghost_pt_used_regions_rtrancl( + mem, + pt_res@, + layer as nat, + ptr, + )); + assert(ghost_pt_region_notin_used_regions( + mem, + pt_res@, + layer as nat, + ptr, + )); + assert forall|i: nat, j: nat, r: MemRegion| + i != j && i < pt_res@.entries.len() + && pt_res@.entries[i as int].is_Some() + && #[trigger] pt_res@.entries[i as int].get_Some_0().used_regions.contains( + r) && j < pt_res@.entries.len() + && pt_res@.entries[j as int].is_Some() implies !( + #[trigger] pt_res@.entries[j as int].get_Some_0().used_regions.contains( + r)) by { + assert(ghost_pt_used_regions_pairwise_disjoint( + mem, + pt, + layer as nat, + ptr, + )); + if j == idx { + assert(pt_res@.entries[j as int].get_Some_0() + === dir_pt_res@); + assert(pt_res@.entries[i as int] === pt.entries[i as int]); + if new_regions@.contains(r) { + assert(!dir_pt@.used_regions.contains(r)); + assert(!old(mem).regions().contains(r)); + assert(!dir_pt_res@.used_regions.contains(r)); } else { - assert(pt.entries[i as int] === pt_res@.entries[i as int]); - assert(entry === view_at(&*old(mem), pt, layer as nat, ptr, i)); - assert(entry.is_Directory() == pt_res@.entries[i as int].is_Some()); + if dir_pt@.used_regions.contains(r) { + assert(pt.used_regions.contains(r)); + assert(old(mem).regions().contains(r)); + assert(!dir_pt_res@.used_regions.contains(r)); + } } - }; - assert(ghost_pt_matches_structure(mem, pt_res@, layer as nat, ptr)); - - assert(ghost_pt_used_regions_rtrancl(mem, pt_res@, layer as nat, ptr)); - assert(ghost_pt_region_notin_used_regions(mem, pt_res@, layer as nat, ptr)); - assert forall|i: nat, j: nat, r: MemRegion| - i != j && - i < pt_res@.entries.len() && pt_res@.entries[i as int].is_Some() && - #[trigger] pt_res@.entries[i as int].get_Some_0().used_regions.contains(r) && - j < pt_res@.entries.len() && pt_res@.entries[j as int].is_Some() - implies !(#[trigger] pt_res@.entries[j as int].get_Some_0().used_regions.contains(r)) by - { - assert(ghost_pt_used_regions_pairwise_disjoint(mem, pt, layer as nat, ptr)); - if j == idx { - assert(pt_res@.entries[j as int].get_Some_0() === dir_pt_res@); - assert(pt_res@.entries[i as int] === pt.entries[i as int]); + } else { + if i == idx { + assert(pt_res@.entries[i as int].get_Some_0() + === dir_pt_res@); + assert(pt_res@.entries[j as int] + === pt.entries[j as int]); if new_regions@.contains(r) { + assert(dir_pt_res@.used_regions.contains(r)); assert(!dir_pt@.used_regions.contains(r)); assert(!old(mem).regions().contains(r)); - assert(!dir_pt_res@.used_regions.contains(r)); + assert(!pt.entries[j as int].get_Some_0().used_regions.contains( + r)); } else { - if dir_pt@.used_regions.contains(r) { - assert(pt.used_regions.contains(r)); - assert(old(mem).regions().contains(r)); - assert(!dir_pt_res@.used_regions.contains(r)); - } + assert(dir_pt@.used_regions.contains(r)); + assert(!pt.entries[j as int].get_Some_0().used_regions.contains( + r)); } } else { - if i == idx { - assert(pt_res@.entries[i as int].get_Some_0() === dir_pt_res@); - assert(pt_res@.entries[j as int] === pt.entries[j as int]); - if new_regions@.contains(r) { - assert(dir_pt_res@.used_regions.contains(r)); - assert(!dir_pt@.used_regions.contains(r)); - assert(!old(mem).regions().contains(r)); - assert(!pt.entries[j as int].get_Some_0().used_regions.contains(r)); - } else { - assert(dir_pt@.used_regions.contains(r)); - assert(!pt.entries[j as int].get_Some_0().used_regions.contains(r)); - } - } else { - assert(pt_res@.entries[i as int] === pt.entries[i as int]); - assert(pt_res@.entries[j as int] === pt.entries[j as int]); - } - } - }; - assert(ghost_pt_used_regions_pairwise_disjoint(mem, pt_res@, layer as nat, ptr)); - - assert forall|i: nat| i < X86_NUM_ENTRIES implies { - let entry = #[trigger] view_at(mem, pt_res@, layer as nat, ptr, i); - entry.is_Directory() ==> { - &&& inv_at(mem, pt_res@.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr()) + assert(pt_res@.entries[i as int] + === pt.entries[i as int]); + assert(pt_res@.entries[j as int] + === pt.entries[j as int]); } } - by { - let entry = #[trigger] view_at(mem, pt_res@, layer as nat, ptr, i); - if i == idx { - assert(pt_res@.entries[i as int].get_Some_0() === dir_pt_res@); - assert(entry.get_Directory_addr() === dir_addr); - assert(inv_at(mem, pt_res@.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr())); - } else { - assert(directories_obey_invariant_at(&*old(mem), pt, layer as nat, ptr)); - assert(pt.entries[i as int] === pt_res@.entries[i as int]); - assert(entry === view_at(&*old(mem), pt, layer as nat, ptr, i)); - assert(entry === view_at(&*old(mem), pt_res@, layer as nat, ptr, i)); - if entry.is_Directory() { - let pt_entry = pt_res@.entries[i as int].get_Some_0(); - assert(ghost_pt_used_regions_pairwise_disjoint(mem, pt_res@, layer as nat, ptr)); - assert forall|r: MemRegion| #[trigger] pt_entry.used_regions.contains(r) - implies !new_regions@.contains(r) by - { - assert(pt_entry.used_regions.contains(r)); - assert(old(mem).regions().contains(r)); - }; - assert(forall|r: MemRegion| #[trigger] pt_entry.used_regions.contains(r) - ==> !dir_pt@.used_regions.contains(r)); - assert(forall|r: MemRegion| pt_entry.used_regions.contains(r) - ==> #[trigger] mem.region_view(r) === mem.region_view(r)); - assert(inv_at(&*old(mem), pt.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr())); - assert(forall|r: MemRegion| pt_res@.entries[i as int].get_Some_0().used_regions.contains(r) - ==> #[trigger] mem.region_view(r) === old(mem).region_view(r)); - assert(pt_res@.entries[i as int].is_Some()); - assert(pt_res@.entries[i as int].get_Some_0().used_regions === pt.entries[i as int].get_Some_0().used_regions); - lemma_inv_at_different_memory(&*old(mem), mem, pt.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr()); - assert(inv_at(mem, pt_res@.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr())); - } + }; + assert(ghost_pt_used_regions_pairwise_disjoint( + mem, + pt_res@, + layer as nat, + ptr, + )); + assert forall|i: nat| i < X86_NUM_ENTRIES implies { + let entry = #[trigger] view_at( + mem, + pt_res@, + layer as nat, + ptr, + i, + ); + entry.is_Directory() ==> { + &&& inv_at( + mem, + pt_res@.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + ) + } + } by { + let entry = #[trigger] view_at( + mem, + pt_res@, + layer as nat, + ptr, + i, + ); + if i == idx { + assert(pt_res@.entries[i as int].get_Some_0() + === dir_pt_res@); + assert(entry.get_Directory_addr() === dir_addr); + assert(inv_at( + mem, + pt_res@.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + )); + } else { + assert(directories_obey_invariant_at( + &*old(mem), + pt, + layer as nat, + ptr, + )); + assert(pt.entries[i as int] === pt_res@.entries[i as int]); + assert(entry === view_at( + &*old(mem), + pt, + layer as nat, + ptr, + i, + )); + assert(entry === view_at( + &*old(mem), + pt_res@, + layer as nat, + ptr, + i, + )); + if entry.is_Directory() { + let pt_entry = pt_res@.entries[i as int].get_Some_0(); + assert(ghost_pt_used_regions_pairwise_disjoint( + mem, + pt_res@, + layer as nat, + ptr, + )); + assert forall|r: MemRegion| #[trigger] + pt_entry.used_regions.contains( + r, + ) implies !new_regions@.contains(r) by { + assert(pt_entry.used_regions.contains(r)); + assert(old(mem).regions().contains(r)); + }; + assert(forall|r: MemRegion| #[trigger] + pt_entry.used_regions.contains(r) + ==> !dir_pt@.used_regions.contains(r)); + assert(forall|r: MemRegion| + pt_entry.used_regions.contains(r) + ==> #[trigger] mem.region_view(r) + === mem.region_view(r)); + assert(inv_at( + &*old(mem), + pt.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + )); + assert(forall|r: MemRegion| + pt_res@.entries[i as int].get_Some_0().used_regions.contains( + r) ==> #[trigger] mem.region_view(r) === old( + mem, + ).region_view(r)); + assert(pt_res@.entries[i as int].is_Some()); + assert(pt_res@.entries[i as int].get_Some_0().used_regions + === pt.entries[i as int].get_Some_0().used_regions); + lemma_inv_at_different_memory( + &*old(mem), + mem, + pt.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + ); + assert(inv_at( + mem, + pt_res@.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + )); } - }; - assert(directories_obey_invariant_at(mem, pt_res@, layer as nat, ptr)); + } + }; + assert(directories_obey_invariant_at( + mem, + pt_res@, + layer as nat, + ptr, + )); + assert(inv_at(mem, pt_res@, layer as nat, ptr)); + assert(Ok(interp_at(mem, pt_res@, layer as nat, ptr, base as nat)) + === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@)) by { + lemma_interp_at_aux_facts( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + seq![], + ); + assert(pt_res@.region === pt.region); + // recursive postcondition: + assert(Ok( + interp_at( + mem, + dir_pt_res@, + (layer + 1) as nat, + dir_addr, + entry_base as nat, + ), + ) === interp_at( + &*old(mem), + dir_pt@, + (layer + 1) as nat, + dir_addr, + entry_base as nat, + ).map_frame(vaddr as nat, pte@)); assert(inv_at(mem, pt_res@, layer as nat, ptr)); - - assert(Ok(interp_at(mem, pt_res@, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@)) by { - lemma_interp_at_aux_facts(mem, pt_res@, layer as nat, ptr, base as nat, seq![]); - assert(pt_res@.region === pt.region); - // recursive postcondition: - assert(Ok(interp_at(mem, dir_pt_res@, (layer + 1) as nat, dir_addr, entry_base as nat)) - === interp_at(&*old(mem), dir_pt@, (layer + 1) as nat, dir_addr, entry_base as nat).map_frame(vaddr as nat, pte@)); - assert(inv_at(mem, pt_res@, layer as nat, ptr)); - assert(inv_at(&*old(mem), pt, layer as nat, ptr)); - assert(pt_res@.entries[idx as int].is_Some()); - assert(pt_res@.entries[idx as int].get_Some_0() === dir_pt_res@); - - assert(forall|i: nat| i < X86_NUM_ENTRIES && i != idx ==> pt.entries[i as int] === pt_res@.entries[i as int]); - - assert forall|i: nat| - i < X86_NUM_ENTRIES && i != idx - implies - interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries[i as int] - === #[trigger] interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@).get_Ok_0().entries[i as int] by - { - assert(interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@).is_Ok()); - assert(interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@).get_Ok_0().entries[i as int] === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).entries[i as int]); - assert(interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries[i as int] === interp_at_entry(mem, pt_res@, layer as nat, ptr, base as nat, i)); - assert(interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).entries[i as int] === interp_at_entry(&*old(mem), pt, layer as nat, ptr, base as nat, i)); - if pt_res@.entries[i as int].is_Some() { - let pt_entry = pt_res@.entries[i as int].get_Some_0(); - assert(ghost_pt_used_regions_pairwise_disjoint(mem, pt_res@, layer as nat, ptr)); - assert forall|r: MemRegion| #[trigger] pt_entry.used_regions.contains(r) - implies !new_regions@.contains(r) by - { - assert(pt_entry.used_regions.contains(r)); - assert(old(mem).regions().contains(r)); - }; - assert(forall|r: MemRegion| #[trigger] pt_entry.used_regions.contains(r) - ==> !dir_pt_res@.used_regions.contains(r)); - assert(forall|r: MemRegion| pt_entry.used_regions.contains(r) - ==> #[trigger] old(mem).region_view(r) === mem.region_view(r)); - } - lemma_interp_at_entry_different_memory(&*old(mem), pt, mem, pt_res@, layer as nat, ptr, base as nat, i); - assert(interp_at_entry(mem, pt_res@, layer as nat, ptr, base as nat, i) === interp_at_entry(&*old(mem), pt, layer as nat, ptr, base as nat, i)); - }; - - assert(interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries[idx as int] === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@).get_Ok_0().entries[idx as int]); - assert_seqs_equal!(interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries, interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@).get_Ok_0().entries); - assert(interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@).get_Ok_0().entries); - assert(Ok(interp_at(mem, pt_res@, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@)); + assert(inv_at(&*old(mem), pt, layer as nat, ptr)); + assert(pt_res@.entries[idx as int].is_Some()); + assert(pt_res@.entries[idx as int].get_Some_0() + === dir_pt_res@); + assert(forall|i: nat| + i < X86_NUM_ENTRIES && i != idx ==> pt.entries[i as int] + === pt_res@.entries[i as int]); + assert forall|i: nat| + i < X86_NUM_ENTRIES && i != idx implies interp_at( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + ).entries[i as int] === #[trigger] interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame( + vaddr as nat, + pte@, + ).get_Ok_0().entries[i as int] by { + assert(interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@).is_Ok()); + assert(interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@).get_Ok_0().entries[i as int] + === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).entries[i as int]); + assert(interp_at( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + ).entries[i as int] === interp_at_entry( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + i, + )); + assert(interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).entries[i as int] === interp_at_entry( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + i, + )); + if pt_res@.entries[i as int].is_Some() { + let pt_entry = pt_res@.entries[i as int].get_Some_0(); + assert(ghost_pt_used_regions_pairwise_disjoint( + mem, + pt_res@, + layer as nat, + ptr, + )); + assert forall|r: MemRegion| #[trigger] + pt_entry.used_regions.contains( + r, + ) implies !new_regions@.contains(r) by { + assert(pt_entry.used_regions.contains(r)); + assert(old(mem).regions().contains(r)); + }; + assert(forall|r: MemRegion| #[trigger] + pt_entry.used_regions.contains(r) + ==> !dir_pt_res@.used_regions.contains(r)); + assert(forall|r: MemRegion| + pt_entry.used_regions.contains(r) + ==> #[trigger] old(mem).region_view(r) + === mem.region_view(r)); + } + lemma_interp_at_entry_different_memory( + &*old(mem), + pt, + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + i, + ); + assert(interp_at_entry( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + i, + ) === interp_at_entry( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + i, + )); }; + assert(interp_at( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + ).entries[idx as int] === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@).get_Ok_0().entries[idx as int]); + assert_seqs_equal!(interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries, interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@).get_Ok_0().entries); + assert(interp_at( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + ).entries === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@).get_Ok_0().entries); + assert(Ok( + interp_at(mem, pt_res@, layer as nat, ptr, base as nat), + ) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@)); }; - - // posts - assert forall|r: MemRegion| !pt.used_regions.contains(r) && !new_regions@.contains(r) - implies #[trigger] mem.region_view(r) === old(mem).region_view(r) by - { assert(!dir_pt@.used_regions.contains(r)); }; - assert(mem.regions() === old(mem).regions().union(new_regions@)); - assert(pt_res@.used_regions === pt.used_regions.union(new_regions@)); - assert(pt_res@.region === pt.region); - - Ok(Ghost((pt_res@,new_regions@))) - }, - Err(e) => { - assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@)); - Err(e) - }, - } + }; + // posts + assert forall|r: MemRegion| + !pt.used_regions.contains(r) && !new_regions@.contains( + r, + ) implies #[trigger] mem.region_view(r) === old(mem).region_view( + r, + ) by { + assert(!dir_pt@.used_regions.contains(r)); + }; + assert(mem.regions() === old(mem).regions().union(new_regions@)); + assert(pt_res@.used_regions === pt.used_regions.union(new_regions@)); + assert(pt_res@.region === pt.region); + Ok(Ghost((pt_res@, new_regions@))) + }, + Err(e) => { + assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) + === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@)); + Err(e) + }, } - } else { - assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@)); - Err(()) } } else { - if x86_arch_exec().entry_size(layer) == pte.frame.size { - proof { - assert_by_contradiction!(layer > 0, { + assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@)); + Err(()) + } + } else { + if x86_arch_exec().entry_size(layer) == pte.frame.size { + proof { + assert_by_contradiction!(layer > 0, { let iprime = choose|i: nat| 0 < i && i < X86_NUM_LAYERS && #[trigger] x86_arch_spec.entry_size(i) == pte.frame.size; assert(x86_arch_spec.entry_size(0) == pte.frame.size); assert(x86_arch_spec.contains_entry_size_at_index_atleast(pte.frame.size as nat, 1)); @@ -3414,951 +4775,1862 @@ pub mod impl_u { }; assert(false); }); - let frame_base = pte.frame.base as u64; - assert(addr_is_zero_padded(layer as nat, frame_base, true)) by { - assert(x86_arch_spec.contains_entry_size_at_index_atleast(pte.frame.size as nat, 1)); - assert(x86_arch_spec.entry_size(layer as nat) == pte.frame.size); - assert(aligned(pte.frame.base as nat, pte.frame.size as nat)); - lemma_aligned_addr_mask_facts(frame_base); - if layer == 1 { - assert(x86_arch_spec.entry_size(1) == L1_ENTRY_SIZE); - assert(frame_base & MASK_L1_PG_ADDR == frame_base & MASK_ADDR); - } else if layer == 2 { - assert(x86_arch_spec.entry_size(2) == L2_ENTRY_SIZE); - assert(frame_base & MASK_L2_PG_ADDR == frame_base & MASK_ADDR); - } else if layer == 3 { - assert(x86_arch_spec.entry_size(3) == L3_ENTRY_SIZE); - assert(frame_base & MASK_L3_PG_ADDR == frame_base & MASK_ADDR); - } else { - assert(false); - } - }; - assert(frame_base & MASK_ADDR == frame_base) by { - lemma_aligned_addr_mask_facts(frame_base); - }; - } - let new_page_entry = PageDirectoryEntry::new_page_entry(layer, pte); - let pwmem: Ghost = Ghost(*mem); - mem.write(ptr, idx, Ghost(pt.region), new_page_entry.entry); - assert(mem.region_view(pt.region) === pwmem@.region_view(pt.region).update(idx as int, new_page_entry.entry)); - - assert forall|i: nat| i < X86_NUM_ENTRIES implies - #[trigger] view_at(mem, pt, layer as nat, ptr, i) == if i == idx { new_page_entry@ } else { view_at(&*old(mem), pt, layer as nat, ptr, i) } by { }; - assert forall|i: nat| i < X86_NUM_ENTRIES && i != idx implies - entry_at_spec(mem, pt, layer as nat, ptr, i) == entry_at_spec(&*old(mem), pt, layer as nat, ptr, i) by { }; - - assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)) by { - assert forall|i: nat| i < X86_NUM_ENTRIES implies { - let entry = #[trigger] view_at(mem, pt, layer as nat, ptr, i); - entry.is_Directory() ==> { - &&& inv_at(mem, pt.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr()) - } - } by { - let entry = view_at(mem, pt, layer as nat, ptr, i); - if i != idx { - assert(directories_obey_invariant_at(&*old(mem), pt, layer as nat, ptr)); - if entry.is_Directory() { - lemma_inv_at_different_memory(&*old(mem), mem, pt.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr()); - } - } - }; + let frame_base = pte.frame.base as u64; + assert(addr_is_zero_padded(layer as nat, frame_base, true)) by { + assert(x86_arch_spec.contains_entry_size_at_index_atleast( + pte.frame.size as nat, + 1, + )); + assert(x86_arch_spec.entry_size(layer as nat) == pte.frame.size); + assert(aligned(pte.frame.base as nat, pte.frame.size as nat)); + lemma_aligned_addr_mask_facts(frame_base); + if layer == 1 { + assert(x86_arch_spec.entry_size(1) == L1_ENTRY_SIZE); + assert(frame_base & MASK_L1_PG_ADDR == frame_base & MASK_ADDR); + } else if layer == 2 { + assert(x86_arch_spec.entry_size(2) == L2_ENTRY_SIZE); + assert(frame_base & MASK_L2_PG_ADDR == frame_base & MASK_ADDR); + } else if layer == 3 { + assert(x86_arch_spec.entry_size(3) == L3_ENTRY_SIZE); + assert(frame_base & MASK_L3_PG_ADDR == frame_base & MASK_ADDR); + } else { + assert(false); + } }; - assert(inv_at(mem, pt, layer as nat, ptr)); - - assert(Ok(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@)) by { - lemma_interp_at_aux_facts(mem, pt, layer as nat, ptr, base as nat, seq![]); - assert(inv_at(mem, pt, layer as nat, ptr)); - assert(inv_at(&*old(mem), pt, layer as nat, ptr)); - assert(pt.entries[idx as int].is_None()); - - assert forall|i: nat| - i < X86_NUM_ENTRIES && i != idx - implies - interp_at(mem, pt, layer as nat, ptr, base as nat).entries[i as int] - === #[trigger] interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@).get_Ok_0().entries[i as int] by - { - assert(interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@).is_Ok()); - assert(interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@).get_Ok_0().entries[i as int] === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).entries[i as int]); - assert(interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).entries[i as int] === interp_at_entry(&*old(mem), pt, layer as nat, ptr, base as nat, i)); - assert(old(mem).spec_read(i, pt.region) === mem.spec_read(i, pt.region)); - lemma_interp_at_entry_different_memory(&*old(mem), pt, mem, pt, layer as nat, ptr, base as nat, i); - assert(interp_at_entry(mem, pt, layer as nat, ptr, base as nat, i) === interp_at_entry(&*old(mem), pt, layer as nat, ptr, base as nat, i)); - }; - - let new_interp = interp_at(mem, pt, layer as nat, ptr, base as nat); - assert(new_interp.entries[idx as int] === interp_at_entry(mem, pt, layer as nat, ptr, base as nat, idx as nat)); - assert(view_at(mem, pt, layer as nat, ptr, idx as nat) === new_page_entry@); - - assert(interp_at_entry(mem, pt, layer as nat, ptr, base as nat, idx as nat) === l1::NodeEntry::Page(pte@)); - - assert(new_interp.entries[idx as int] == interp@.map_frame(vaddr as nat, pte@).get_Ok_0().entries[idx as int]); - assert(new_interp.entries =~= interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@).get_Ok_0().entries); - assert(Ok(new_interp) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@)); + assert(frame_base & MASK_ADDR == frame_base) by { + lemma_aligned_addr_mask_facts(frame_base); }; - - - // posts - assert(forall|r: MemRegion| !pt.used_regions.contains(r) ==> #[trigger] mem.region_view(r) === old(mem).region_view(r)); - proof { - lemma_set_union_empty_equals_set::(mem.regions()); - lemma_set_union_empty_equals_set::(pt.used_regions); - } - assert(forall|r: MemRegion| set![].contains(r) ==> !(#[trigger] old(mem).regions().contains(r))); - assert(forall|r: MemRegion| set![].contains(r) ==> !(#[trigger] pt.used_regions.contains(r))); - assert(pt.region === pt.region); - - Ok(Ghost((pt, set![]))) + } + let new_page_entry = PageDirectoryEntry::new_page_entry(layer, pte); + let pwmem: Ghost = Ghost(*mem); + mem.write(ptr, idx, Ghost(pt.region), new_page_entry.entry); + assert(mem.region_view(pt.region) === pwmem@.region_view(pt.region).update( + idx as int, + new_page_entry.entry, + )); + assert forall|i: nat| i < X86_NUM_ENTRIES implies #[trigger] view_at( + mem, + pt, + layer as nat, + ptr, + i, + ) == if i == idx { + new_page_entry@ } else { - let new_dir_region = mem.alloc_page(); - let new_dir_ptr = new_dir_region.base; - let new_dir_ptr_u64 = new_dir_ptr as u64; - let new_dir_pt: Ghost = Ghost( - PTDir { - region: new_dir_region@, - entries: new_seq::>(X86_NUM_ENTRIES as nat, None), - used_regions: set![new_dir_region@], - }); - assert(new_dir_ptr_u64 & MASK_DIR_ADDR == new_dir_ptr_u64) by { - lemma_page_aligned_implies_mask_dir_addr_is_identity(); + view_at(&*old(mem), pt, layer as nat, ptr, i) + } by {}; + assert forall|i: nat| i < X86_NUM_ENTRIES && i != idx implies entry_at_spec( + mem, + pt, + layer as nat, + ptr, + i, + ) == entry_at_spec(&*old(mem), pt, layer as nat, ptr, i) by {}; + assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)) by { + assert forall|i: nat| i < X86_NUM_ENTRIES implies { + let entry = #[trigger] view_at(mem, pt, layer as nat, ptr, i); + entry.is_Directory() ==> { + &&& inv_at( + mem, + pt.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + ) + } + } by { + let entry = view_at(mem, pt, layer as nat, ptr, i); + if i != idx { + assert(directories_obey_invariant_at( + &*old(mem), + pt, + layer as nat, + ptr, + )); + if entry.is_Directory() { + lemma_inv_at_different_memory( + &*old(mem), + mem, + pt.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + ); + } + } }; - let new_dir_entry = PageDirectoryEntry::new_dir_entry(layer, new_dir_ptr_u64); - mem.write(ptr, idx, Ghost(pt.region), new_dir_entry.entry); - - // After writing the new empty directory entry we prove that the resulting state - // satisfies the invariant and that the interpretation remains unchanged. - let pt_with_empty: Ghost = Ghost( - PTDir { - region: pt.region, - entries: pt.entries.update(idx as int, Some(new_dir_pt@)), - used_regions: pt.used_regions.insert(new_dir_pt@.region), - }); - // For easier reference we take a snapshot of mem here. In the subsequent proofs - // (after the recursive call) we have old(mem), mem_with_empty and mem to refer - // to each relevant state. - let mem_with_empty: Ghost<&mem::PageTableMemory> = Ghost(mem); - proof { - assert forall|i: nat| i < X86_NUM_ENTRIES implies - #[trigger] view_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr, i) == if i == idx { new_dir_entry@ } else { view_at(&*old(mem), pt, layer as nat, ptr, i) } by { }; - assert forall|i: nat| i < X86_NUM_ENTRIES implies - #[trigger] entry_at_spec(mem_with_empty@, pt_with_empty@, layer as nat, ptr, i) == if i == idx { new_dir_entry } else { entry_at_spec(&*old(mem), pt, layer as nat, ptr, i) } by { }; - assert(pt_with_empty@.region === pt.region); - lemma_new_seq::(512nat, 0u64); - lemma_new_seq::>(X86_NUM_ENTRIES as nat, None); - assert(new_dir_pt@.entries.len() == 512); - assert(new_dir_region@.contains(new_dir_ptr as nat)); - assert(mem_with_empty@.region_view(new_dir_region@) === new_seq(512nat, 0u64)); - lemma_zeroed_page_implies_empty_at(mem_with_empty@, new_dir_pt@, (layer + 1) as nat, new_dir_ptr); - assert(empty_at(mem_with_empty@, new_dir_pt@, (layer + 1) as nat, new_dir_ptr)); - assert(inv_at(mem_with_empty@, new_dir_pt@, (layer + 1) as nat, new_dir_ptr)); - - assert(forall|r: MemRegion| r !== new_dir_pt@.region && r !== pt_with_empty@.region - ==> mem_with_empty@.region_view(r) === old(mem).region_view(r)); - assert(mem_with_empty@.region_view(pt_with_empty@.region) - === old(mem).region_view(pt_with_empty@.region).update(idx as int, new_dir_entry.entry)); - assert(inv_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr)) by { - assert(ghost_pt_matches_structure(mem_with_empty@, pt_with_empty@, layer as nat, ptr)); - assert(directories_obey_invariant_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr)) by { - assert forall|i: nat| i < X86_NUM_ENTRIES implies { - let entry = #[trigger] view_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr, i); - entry.is_Directory() - ==> inv_at(mem_with_empty@, pt_with_empty@.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr()) - } by { - let entry = view_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr, i); - if i == idx { - } else { - if entry.is_Directory() { - let pt_entry = pt.entries[i as int].get_Some_0(); - assert(inv_at(&*old(mem), pt_entry, (layer + 1) as nat, entry.get_Directory_addr())); - assert(pt.entries[i as int] == pt_with_empty@.entries[i as int]); - assert(old(mem).regions().contains(pt_entry.region)); - lemma_inv_at_different_memory(&*old(mem), mem_with_empty@, pt_entry, (layer + 1) as nat, entry.get_Directory_addr()); - assert(inv_at(mem_with_empty@, pt_with_empty@.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr())); - } + }; + assert(inv_at(mem, pt, layer as nat, ptr)); + assert(Ok(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@)) by { + lemma_interp_at_aux_facts(mem, pt, layer as nat, ptr, base as nat, seq![]); + assert(inv_at(mem, pt, layer as nat, ptr)); + assert(inv_at(&*old(mem), pt, layer as nat, ptr)); + assert(pt.entries[idx as int].is_None()); + assert forall|i: nat| i < X86_NUM_ENTRIES && i != idx implies interp_at( + mem, + pt, + layer as nat, + ptr, + base as nat, + ).entries[i as int] === #[trigger] interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@).get_Ok_0().entries[i as int] by { + assert(interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame( + vaddr as nat, + pte@, + ).is_Ok()); + assert(interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame( + vaddr as nat, + pte@, + ).get_Ok_0().entries[i as int] === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).entries[i as int]); + assert(interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).entries[i as int] === interp_at_entry( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + i, + )); + assert(old(mem).spec_read(i, pt.region) === mem.spec_read(i, pt.region)); + lemma_interp_at_entry_different_memory( + &*old(mem), + pt, + mem, + pt, + layer as nat, + ptr, + base as nat, + i, + ); + assert(interp_at_entry(mem, pt, layer as nat, ptr, base as nat, i) + === interp_at_entry(&*old(mem), pt, layer as nat, ptr, base as nat, i)); + }; + let new_interp = interp_at(mem, pt, layer as nat, ptr, base as nat); + assert(new_interp.entries[idx as int] === interp_at_entry( + mem, + pt, + layer as nat, + ptr, + base as nat, + idx as nat, + )); + assert(view_at(mem, pt, layer as nat, ptr, idx as nat) === new_page_entry@); + assert(interp_at_entry(mem, pt, layer as nat, ptr, base as nat, idx as nat) + === l1::NodeEntry::Page(pte@)); + assert(new_interp.entries[idx as int] == interp@.map_frame( + vaddr as nat, + pte@, + ).get_Ok_0().entries[idx as int]); + assert(new_interp.entries =~= interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@).get_Ok_0().entries); + assert(Ok(new_interp) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@)); + }; + // posts + assert(forall|r: MemRegion| + !pt.used_regions.contains(r) ==> #[trigger] mem.region_view(r) === old( + mem, + ).region_view(r)); + proof { + lemma_set_union_empty_equals_set::(mem.regions()); + lemma_set_union_empty_equals_set::(pt.used_regions); + } + assert(forall|r: MemRegion| + set![].contains(r) ==> !(#[trigger] old(mem).regions().contains(r))); + assert(forall|r: MemRegion| + set![].contains(r) ==> !(#[trigger] pt.used_regions.contains(r))); + assert(pt.region === pt.region); + Ok(Ghost((pt, set![]))) + } else { + let new_dir_region = mem.alloc_page(); + let new_dir_ptr = new_dir_region.base; + let new_dir_ptr_u64 = new_dir_ptr as u64; + let new_dir_pt: Ghost = Ghost( + PTDir { + region: new_dir_region@, + entries: new_seq::>(X86_NUM_ENTRIES as nat, None), + used_regions: set![new_dir_region@], + }, + ); + assert(new_dir_ptr_u64 & MASK_DIR_ADDR == new_dir_ptr_u64) by { + lemma_page_aligned_implies_mask_dir_addr_is_identity(); + }; + let new_dir_entry = PageDirectoryEntry::new_dir_entry(layer, new_dir_ptr_u64); + mem.write(ptr, idx, Ghost(pt.region), new_dir_entry.entry); + // After writing the new empty directory entry we prove that the resulting state + // satisfies the invariant and that the interpretation remains unchanged. + let pt_with_empty: Ghost = Ghost( + PTDir { + region: pt.region, + entries: pt.entries.update(idx as int, Some(new_dir_pt@)), + used_regions: pt.used_regions.insert(new_dir_pt@.region), + }, + ); + // For easier reference we take a snapshot of mem here. In the subsequent proofs + // (after the recursive call) we have old(mem), mem_with_empty and mem to refer + // to each relevant state. + let mem_with_empty: Ghost<&mem::PageTableMemory> = Ghost(mem); + proof { + assert forall|i: nat| i < X86_NUM_ENTRIES implies #[trigger] view_at( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + i, + ) == if i == idx { + new_dir_entry@ + } else { + view_at(&*old(mem), pt, layer as nat, ptr, i) + } by {}; + assert forall|i: nat| i < X86_NUM_ENTRIES implies #[trigger] entry_at_spec( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + i, + ) == if i == idx { + new_dir_entry + } else { + entry_at_spec(&*old(mem), pt, layer as nat, ptr, i) + } by {}; + assert(pt_with_empty@.region === pt.region); + lemma_new_seq::(512nat, 0u64); + lemma_new_seq::>(X86_NUM_ENTRIES as nat, None); + assert(new_dir_pt@.entries.len() == 512); + assert(new_dir_region@.contains(new_dir_ptr as nat)); + assert(mem_with_empty@.region_view(new_dir_region@) === new_seq(512nat, 0u64)); + lemma_zeroed_page_implies_empty_at( + mem_with_empty@, + new_dir_pt@, + (layer + 1) as nat, + new_dir_ptr, + ); + assert(empty_at(mem_with_empty@, new_dir_pt@, (layer + 1) as nat, new_dir_ptr)); + assert(inv_at(mem_with_empty@, new_dir_pt@, (layer + 1) as nat, new_dir_ptr)); + assert(forall|r: MemRegion| + r !== new_dir_pt@.region && r !== pt_with_empty@.region + ==> mem_with_empty@.region_view(r) === old(mem).region_view(r)); + assert(mem_with_empty@.region_view(pt_with_empty@.region) === old( + mem, + ).region_view(pt_with_empty@.region).update(idx as int, new_dir_entry.entry)); + assert(inv_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr)) by { + assert(ghost_pt_matches_structure( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + )); + assert(directories_obey_invariant_at( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + )) by { + assert forall|i: nat| i < X86_NUM_ENTRIES implies { + let entry = #[trigger] view_at( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + i, + ); + entry.is_Directory() ==> inv_at( + mem_with_empty@, + pt_with_empty@.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + ) + } by { + let entry = view_at( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + i, + ); + if i == idx { + } else { + if entry.is_Directory() { + let pt_entry = pt.entries[i as int].get_Some_0(); + assert(inv_at( + &*old(mem), + pt_entry, + (layer + 1) as nat, + entry.get_Directory_addr(), + )); + assert(pt.entries[i as int] + == pt_with_empty@.entries[i as int]); + assert(old(mem).regions().contains(pt_entry.region)); + lemma_inv_at_different_memory( + &*old(mem), + mem_with_empty@, + pt_entry, + (layer + 1) as nat, + entry.get_Directory_addr(), + ); + assert(inv_at( + mem_with_empty@, + pt_with_empty@.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + )); } - }; + } }; }; - - lemma_empty_at_interp_at_equal_l1_empty_dir(mem_with_empty@, pt_with_empty@, layer as nat, ptr, base as nat, idx as nat); - interp@.lemma_new_empty_dir(idx as nat); - lemma_interp_at_aux_facts(mem_with_empty@, pt_with_empty@, layer as nat, ptr, base as nat, seq![]); - - } - let new_dir_interp = Ghost(interp_at(mem_with_empty@, new_dir_pt@, (layer + 1) as nat, new_dir_ptr, entry_base as nat)); - proof { - assert(forall|i: nat| i < new_dir_interp@.entries.len() ==> new_dir_interp@.entries[i as int] === l1::NodeEntry::Empty()); - assert(new_dir_interp@.entries =~= interp@.new_empty_dir(idx as nat).entries); - assert(new_dir_interp@ == interp@.new_empty_dir(idx as nat)); - } - match map_frame_aux(mem, new_dir_pt, layer + 1, new_dir_ptr, entry_base, vaddr, pte) { - Ok(rec_res) => { - let dir_pt_res: Ghost = Ghost(rec_res@.0); - let dir_new_regions: Ghost> = Ghost(rec_res@.1); - let pt_final: Ghost = Ghost( - PTDir { - region: pt_with_empty@.region, - entries: pt_with_empty@.entries.update(idx as int, Some(dir_pt_res@)), - used_regions: pt_with_empty@.used_regions.union(dir_new_regions@), - }); - let new_regions: Ghost> = Ghost(dir_new_regions@.insert(new_dir_region@)); - proof { - assert(idx < pt_with_empty@.entries.len()); - assert(!dir_new_regions@.contains(pt_final@.region)); - assert(!new_dir_pt@.used_regions.contains(pt_final@.region)); - - assert forall|i: nat| i < X86_NUM_ENTRIES implies - #[trigger] view_at(mem, pt_final@, layer as nat, ptr, i) == if i == idx { new_dir_entry@ } else { view_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr, i) } by { }; - assert forall|i: nat| i < X86_NUM_ENTRIES implies - #[trigger] entry_at_spec(mem, pt_final@, layer as nat, ptr, i) == if i == idx { new_dir_entry } else { entry_at_spec(mem_with_empty@, pt_with_empty@, layer as nat, ptr, i) } by { }; - assert(inv_at(mem, pt_final@, layer as nat, ptr)) by { - assert(ghost_pt_matches_structure(mem, pt_final@, layer as nat, ptr)) by { - assert forall|i: nat| - i < X86_NUM_ENTRIES implies { - let entry = #[trigger] view_at(mem, pt_final@, layer as nat, ptr, i); - entry.is_Directory() == pt_final@.entries[i as int].is_Some() - } by { - assert(directories_obey_invariant_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr)); - assert(ghost_pt_matches_structure(mem_with_empty@, pt_with_empty@, layer as nat, ptr)); - if i == idx { } else { } - }; + }; + lemma_empty_at_interp_at_equal_l1_empty_dir( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + base as nat, + idx as nat, + ); + interp@.lemma_new_empty_dir(idx as nat); + lemma_interp_at_aux_facts( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + base as nat, + seq![], + ); + } + let new_dir_interp = Ghost( + interp_at( + mem_with_empty@, + new_dir_pt@, + (layer + 1) as nat, + new_dir_ptr, + entry_base as nat, + ), + ); + proof { + assert(forall|i: nat| + i < new_dir_interp@.entries.len() ==> new_dir_interp@.entries[i as int] + === l1::NodeEntry::Empty()); + assert(new_dir_interp@.entries =~= interp@.new_empty_dir(idx as nat).entries); + assert(new_dir_interp@ == interp@.new_empty_dir(idx as nat)); + } + match map_frame_aux( + mem, + new_dir_pt, + layer + 1, + new_dir_ptr, + entry_base, + vaddr, + pte, + ) { + Ok(rec_res) => { + let dir_pt_res: Ghost = Ghost(rec_res@.0); + let dir_new_regions: Ghost> = Ghost(rec_res@.1); + let pt_final: Ghost = Ghost( + PTDir { + region: pt_with_empty@.region, + entries: pt_with_empty@.entries.update( + idx as int, + Some(dir_pt_res@), + ), + used_regions: pt_with_empty@.used_regions.union(dir_new_regions@), + }, + ); + let new_regions: Ghost> = Ghost( + dir_new_regions@.insert(new_dir_region@), + ); + proof { + assert(idx < pt_with_empty@.entries.len()); + assert(!dir_new_regions@.contains(pt_final@.region)); + assert(!new_dir_pt@.used_regions.contains(pt_final@.region)); + assert forall|i: nat| i < X86_NUM_ENTRIES implies #[trigger] view_at( + mem, + pt_final@, + layer as nat, + ptr, + i, + ) == if i == idx { + new_dir_entry@ + } else { + view_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr, i) + } by {}; + assert forall|i: nat| + i < X86_NUM_ENTRIES implies #[trigger] entry_at_spec( + mem, + pt_final@, + layer as nat, + ptr, + i, + ) == if i == idx { + new_dir_entry + } else { + entry_at_spec(mem_with_empty@, pt_with_empty@, layer as nat, ptr, i) + } by {}; + assert(inv_at(mem, pt_final@, layer as nat, ptr)) by { + assert(ghost_pt_matches_structure( + mem, + pt_final@, + layer as nat, + ptr, + )) by { + assert forall|i: nat| i < X86_NUM_ENTRIES implies { + let entry = #[trigger] view_at( + mem, + pt_final@, + layer as nat, + ptr, + i, + ); + entry.is_Directory() + == pt_final@.entries[i as int].is_Some() + } by { + assert(directories_obey_invariant_at( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + )); + assert(ghost_pt_matches_structure( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + )); + if i == idx { + } else { + } }; - - assert(directories_obey_invariant_at(mem, pt_final@, layer as nat, ptr)) by { - assert forall|i: nat| i < X86_NUM_ENTRIES implies { - let entry = #[trigger] view_at(mem, pt_final@, layer as nat, ptr, i); - entry.is_Directory() - ==> inv_at(mem, pt_final@.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr()) - } by { - let entry = view_at(mem, pt_final@, layer as nat, ptr, i); - assert(directories_obey_invariant_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr)); - assert(ghost_pt_matches_structure(mem_with_empty@, pt_with_empty@, layer as nat, ptr)); - assert(ghost_pt_used_regions_rtrancl(mem_with_empty@, pt_with_empty@, layer as nat, ptr)); - - if i == idx { + }; + assert(directories_obey_invariant_at( + mem, + pt_final@, + layer as nat, + ptr, + )) by { + assert forall|i: nat| i < X86_NUM_ENTRIES implies { + let entry = #[trigger] view_at( + mem, + pt_final@, + layer as nat, + ptr, + i, + ); + entry.is_Directory() ==> inv_at( + mem, + pt_final@.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + ) + } by { + let entry = view_at(mem, pt_final@, layer as nat, ptr, i); + assert(directories_obey_invariant_at( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + )); + assert(ghost_pt_matches_structure( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + )); + assert(ghost_pt_used_regions_rtrancl( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + )); + if i == idx { + } else { + assert(entry == view_at( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + i, + )); + assert(pt_final@.entries[i as int] + === pt_with_empty@.entries[i as int]); + if entry.is_Directory() { + assert(pt_with_empty@.entries[i as int].is_Some()); + let pt_entry = + pt_with_empty@.entries[i as int].get_Some_0(); + assert(pt_with_empty@.entries[i as int] + === pt_final@.entries[i as int]); + assert(pt_with_empty@.entries[i as int].get_Some_0() + === pt_final@.entries[i as int].get_Some_0()); + assert(forall|r: MemRegion| #[trigger] + pt_entry.used_regions.contains(r) + ==> !dir_new_regions@.contains(r) + && !new_dir_pt@.used_regions.contains(r)); + assert(forall|r: MemRegion| + pt_entry.used_regions.contains(r) + ==> #[trigger] mem_with_empty@.region_view( + r, + ) === mem.region_view(r)); + lemma_inv_at_different_memory( + mem_with_empty@, + mem, + pt_entry, + (layer + 1) as nat, + entry.get_Directory_addr(), + ); + assert(inv_at( + mem, + pt_final@.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + )); + } + } + }; + }; + assert(directories_have_flags(mem, pt_final@, layer as nat, ptr)); + assert(pt_final@.entries.len() == pt_with_empty@.entries.len()); + assert(forall|i: nat| + i != idx && i < pt_final@.entries.len() + ==> pt_final@.entries[i as int] + === pt_with_empty@.entries[i as int]); + assert(ghost_pt_used_regions_rtrancl( + mem, + pt_final@, + layer as nat, + ptr, + )) by { + assert forall|i: nat, r: MemRegion| + i < pt_final@.entries.len() + && pt_final@.entries[i as int].is_Some() + && #[trigger] pt_final@.entries[i as int].get_Some_0().used_regions.contains( + r) implies pt_final@.used_regions.contains(r) by { + if i == idx { + if dir_new_regions@.contains(r) { + assert(pt_final@.used_regions.contains(r)); } else { - assert(entry == view_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr, i)); - assert(pt_final@.entries[i as int] === pt_with_empty@.entries[i as int]); - if entry.is_Directory() { - assert(pt_with_empty@.entries[i as int].is_Some()); - let pt_entry = pt_with_empty@.entries[i as int].get_Some_0(); - assert(pt_with_empty@.entries[i as int] === pt_final@.entries[i as int]); - assert(pt_with_empty@.entries[i as int].get_Some_0() === pt_final@.entries[i as int].get_Some_0()); - assert(forall|r: MemRegion| #[trigger] pt_entry.used_regions.contains(r) - ==> !dir_new_regions@.contains(r) && !new_dir_pt@.used_regions.contains(r)); - assert(forall|r: MemRegion| pt_entry.used_regions.contains(r) - ==> #[trigger] mem_with_empty@.region_view(r) === mem.region_view(r)); - lemma_inv_at_different_memory(mem_with_empty@, mem, pt_entry, (layer + 1) as nat, entry.get_Directory_addr()); - assert(inv_at(mem, pt_final@.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr())); - } + assert(pt_with_empty@.entries[i as int].get_Some_0().used_regions.contains( + r)); + assert(pt_with_empty@.used_regions.contains(r)); + assert(pt_final@.used_regions.contains(r)); } - }; + } else { + } }; - - assert(directories_have_flags(mem, pt_final@, layer as nat, ptr)); - - assert(pt_final@.entries.len() == pt_with_empty@.entries.len()); - assert(forall|i: nat| i != idx && i < pt_final@.entries.len() ==> pt_final@.entries[i as int] === pt_with_empty@.entries[i as int]); - assert(ghost_pt_used_regions_rtrancl(mem, pt_final@, layer as nat, ptr)) by { - assert forall|i: nat, r: MemRegion| - i < pt_final@.entries.len() && - pt_final@.entries[i as int].is_Some() && - #[trigger] pt_final@.entries[i as int].get_Some_0().used_regions.contains(r) - implies pt_final@.used_regions.contains(r) - by { - if i == idx { - if dir_new_regions@.contains(r) { - assert(pt_final@.used_regions.contains(r)); - } else { - assert(pt_with_empty@.entries[i as int].get_Some_0().used_regions.contains(r)); - assert(pt_with_empty@.used_regions.contains(r)); - assert(pt_final@.used_regions.contains(r)); + }; + assert(ghost_pt_used_regions_pairwise_disjoint( + mem, + pt_final@, + layer as nat, + ptr, + )) by { + assert forall|i: nat, j: nat, r: MemRegion| + i != j && i < pt_final@.entries.len() + && pt_final@.entries[i as int].is_Some() + && #[trigger] pt_final@.entries[i as int].get_Some_0().used_regions.contains( + r) && j < pt_final@.entries.len() + && pt_final@.entries[j as int].is_Some() implies !( + #[trigger] pt_final@.entries[j as int].get_Some_0().used_regions.contains( + r)) by { + assert(ghost_pt_used_regions_pairwise_disjoint( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + )); + if j == idx { + assert(pt_final@.entries[j as int].get_Some_0() + === dir_pt_res@); + assert(pt_final@.entries[i as int] + === pt.entries[i as int]); + if dir_new_regions@.contains(r) { + assert(!new_dir_pt@.used_regions.contains(r)); + assert(!mem_with_empty@.regions().contains(r)); + assert(!dir_pt_res@.used_regions.contains(r)); + } else { + if new_dir_pt@.used_regions.contains(r) { + assert(pt.used_regions.contains(r)); + assert(mem_with_empty@.regions().contains(r)); + assert(!dir_pt_res@.used_regions.contains(r)); } - } else { } - }; - }; - assert(ghost_pt_used_regions_pairwise_disjoint(mem, pt_final@, layer as nat, ptr)) by { - assert forall|i: nat, j: nat, r: MemRegion| - i != j && - i < pt_final@.entries.len() && pt_final@.entries[i as int].is_Some() && - #[trigger] pt_final@.entries[i as int].get_Some_0().used_regions.contains(r) && - j < pt_final@.entries.len() && pt_final@.entries[j as int].is_Some() - implies !(#[trigger] pt_final@.entries[j as int].get_Some_0().used_regions.contains(r)) - by - { - assert(ghost_pt_used_regions_pairwise_disjoint(mem_with_empty@, pt_with_empty@, layer as nat, ptr)); - if j == idx { - assert(pt_final@.entries[j as int].get_Some_0() === dir_pt_res@); - assert(pt_final@.entries[i as int] === pt.entries[i as int]); + } + } else { + if i == idx { + assert(pt_final@.entries[i as int].get_Some_0() + === dir_pt_res@); + assert(pt_final@.entries[j as int] + === pt.entries[j as int]); if dir_new_regions@.contains(r) { + assert(dir_pt_res@.used_regions.contains(r)); assert(!new_dir_pt@.used_regions.contains(r)); assert(!mem_with_empty@.regions().contains(r)); - assert(!dir_pt_res@.used_regions.contains(r)); + assert(!pt.entries[j as int].get_Some_0().used_regions.contains( + r)); } else { - if new_dir_pt@.used_regions.contains(r) { - assert(pt.used_regions.contains(r)); - assert(mem_with_empty@.regions().contains(r)); - assert(!dir_pt_res@.used_regions.contains(r)); - } + assert(new_dir_pt@.used_regions.contains(r)); + assert(!pt.entries[j as int].get_Some_0().used_regions.contains( + r)); } } else { - if i == idx { - assert(pt_final@.entries[i as int].get_Some_0() === dir_pt_res@); - assert(pt_final@.entries[j as int] === pt.entries[j as int]); - if dir_new_regions@.contains(r) { - assert(dir_pt_res@.used_regions.contains(r)); - assert(!new_dir_pt@.used_regions.contains(r)); - assert(!mem_with_empty@.regions().contains(r)); - assert(!pt.entries[j as int].get_Some_0().used_regions.contains(r)); - } else { - assert(new_dir_pt@.used_regions.contains(r)); - assert(!pt.entries[j as int].get_Some_0().used_regions.contains(r)); - } - } else { - assert(pt_final@.entries[i as int] === pt.entries[i as int]); - assert(pt_final@.entries[j as int] === pt.entries[j as int]); - } + assert(pt_final@.entries[i as int] + === pt.entries[i as int]); + assert(pt_final@.entries[j as int] + === pt.entries[j as int]); } - - }; - }; - assert(ghost_pt_matches_structure(mem, pt_final@, layer as nat, ptr)); - assert(ghost_pt_region_notin_used_regions(mem, pt_final@, layer as nat, ptr)); - assert(entry_mb0_bits_are_zero(mem, pt_final@, layer as nat, ptr)); - assert(hp_pat_is_zero(mem, pt_final@, layer as nat, ptr)); - }; - - assert(Ok(interp_at(mem, pt_final@, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).map_frame(vaddr as nat, pte@)) by { - lemma_interp_at_aux_facts(mem_with_empty@, pt_with_empty@, layer as nat, ptr, base as nat, seq![]); - assert(inv_at(mem, pt_final@, layer as nat, ptr)); - assert(inv_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr)); - lemma_interp_at_aux_facts(mem, pt_final@, layer as nat, ptr, base as nat, seq![]); - - // The original/old interp is `interp@` - let final_interp = interp_at(mem, pt_final@, layer as nat, ptr, base as nat); - let prev_interp = interp_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr, base as nat); - - assert forall|i: nat| i < X86_NUM_ENTRIES && i != idx - implies prev_interp.entries[i as int] === #[trigger] interp@.entries[i as int] - by { lemma_interp_at_entry_different_memory(&*old(mem), pt, mem_with_empty@, pt_with_empty@, layer as nat, ptr, base as nat, i); }; - - assert forall|i: nat| - i < X86_NUM_ENTRIES && i != idx - implies final_interp.entries[i as int] === #[trigger] prev_interp.entries[i as int] by - { - if pt_final@.entries[i as int].is_Some() { - let pt_entry = pt_final@.entries[i as int].get_Some_0(); - assert(ghost_pt_used_regions_pairwise_disjoint(mem, pt_final@, layer as nat, ptr)); - assert forall|r: MemRegion| #[trigger] pt_entry.used_regions.contains(r) - implies !new_regions@.contains(r) by - { - assert(pt_entry.used_regions.contains(r)); - assert(mem_with_empty@.regions().contains(r)); - assert(old(mem).regions().contains(r)); - assert(!new_regions@.contains(r)); - }; - assert(forall|r: MemRegion| #[trigger] pt_entry.used_regions.contains(r) - ==> !dir_pt_res@.used_regions.contains(r)); - assert(forall|r: MemRegion| pt_entry.used_regions.contains(r) - ==> #[trigger] old(mem).region_view(r) === mem.region_view(r)); } - lemma_interp_at_entry_different_memory(mem_with_empty@, pt_with_empty@, mem, pt_final@, layer as nat, ptr, base as nat, i); }; - - assert(final_interp.entries[idx as int] === interp@.map_frame(vaddr as nat, pte@).get_Ok_0().entries[idx as int]); - assert(final_interp.entries =~= interp@.map_frame(vaddr as nat, pte@).get_Ok_0().entries); - assert(Ok(interp_at(mem, pt_final@, layer as nat, ptr, base as nat)) === interp@.map_frame(vaddr as nat, pte@)); }; - } - - // posts - proof { - assert(pt_final@.region === pt.region); - assert(pt_final@.used_regions =~= pt.used_regions.union(new_regions@)); - assert(mem.regions() =~= old(mem).regions().union(new_regions@)); - assert forall|r: MemRegion| - !(#[trigger] pt.used_regions.contains(r)) - && !new_regions@.contains(r) - implies mem.region_view(r) === old(mem).region_view(r) by - { - assert(r !== new_dir_region@); - assert(!pt_with_empty@.used_regions.contains(r)); - assert(!new_dir_pt@.used_regions.contains(r)); - assert(!dir_new_regions@.contains(r)); - assert(mem.region_view(r) === mem_with_empty@.region_view(r)); + assert(ghost_pt_matches_structure( + mem, + pt_final@, + layer as nat, + ptr, + )); + assert(ghost_pt_region_notin_used_regions( + mem, + pt_final@, + layer as nat, + ptr, + )); + assert(entry_mb0_bits_are_zero(mem, pt_final@, layer as nat, ptr)); + assert(hp_pat_is_zero(mem, pt_final@, layer as nat, ptr)); + }; + assert(Ok(interp_at(mem, pt_final@, layer as nat, ptr, base as nat)) + === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).map_frame(vaddr as nat, pte@)) by { + lemma_interp_at_aux_facts( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + base as nat, + seq![], + ); + assert(inv_at(mem, pt_final@, layer as nat, ptr)); + assert(inv_at(mem_with_empty@, pt_with_empty@, layer as nat, ptr)); + lemma_interp_at_aux_facts( + mem, + pt_final@, + layer as nat, + ptr, + base as nat, + seq![], + ); + // The original/old interp is `interp@` + let final_interp = interp_at( + mem, + pt_final@, + layer as nat, + ptr, + base as nat, + ); + let prev_interp = interp_at( + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + base as nat, + ); + assert forall|i: nat| + i < X86_NUM_ENTRIES && i + != idx implies prev_interp.entries[i as int] + === #[trigger] interp@.entries[i as int] by { + lemma_interp_at_entry_different_memory( + &*old(mem), + pt, + mem_with_empty@, + pt_with_empty@, + layer as nat, + ptr, + base as nat, + i, + ); }; - assert forall|r: MemRegion| - new_regions@.contains(r) - implies !(#[trigger] old(mem).regions().contains(r)) by - { - if r === new_dir_region@ { - assert(!old(mem).regions().contains(r)); - } else { - assert(dir_new_regions@.contains(r)); - assert(!mem_with_empty@.regions().contains(r)); - assert(!old(mem).regions().contains(r)); + assert forall|i: nat| + i < X86_NUM_ENTRIES && i + != idx implies final_interp.entries[i as int] + === #[trigger] prev_interp.entries[i as int] by { + if pt_final@.entries[i as int].is_Some() { + let pt_entry = pt_final@.entries[i as int].get_Some_0(); + assert(ghost_pt_used_regions_pairwise_disjoint( + mem, + pt_final@, + layer as nat, + ptr, + )); + assert forall|r: MemRegion| #[trigger] + pt_entry.used_regions.contains( + r, + ) implies !new_regions@.contains(r) by { + assert(pt_entry.used_regions.contains(r)); + assert(mem_with_empty@.regions().contains(r)); + assert(old(mem).regions().contains(r)); + assert(!new_regions@.contains(r)); + }; + assert(forall|r: MemRegion| #[trigger] + pt_entry.used_regions.contains(r) + ==> !dir_pt_res@.used_regions.contains(r)); + assert(forall|r: MemRegion| + pt_entry.used_regions.contains(r) ==> #[trigger] old( + mem, + ).region_view(r) === mem.region_view(r)); } + lemma_interp_at_entry_different_memory( + mem_with_empty@, + pt_with_empty@, + mem, + pt_final@, + layer as nat, + ptr, + base as nat, + i, + ); }; - assert(forall|r: MemRegion| new_regions@.contains(r) ==> !(#[trigger] pt.used_regions.contains(r))); - } - Ok(Ghost((pt_final@, new_regions@))) - }, - Err(e) => { - proof { - indexing::lemma_index_from_base_and_addr(entry_base as nat, vaddr as nat, x86_arch_spec.entry_size((layer + 1) as nat), X86_NUM_ENTRIES as nat); - assert(false); // We always successfully insert into an empty directory - } - Err(e) - }, - } + assert(final_interp.entries[idx as int] === interp@.map_frame( + vaddr as nat, + pte@, + ).get_Ok_0().entries[idx as int]); + assert(final_interp.entries =~= interp@.map_frame( + vaddr as nat, + pte@, + ).get_Ok_0().entries); + assert(Ok(interp_at(mem, pt_final@, layer as nat, ptr, base as nat)) + === interp@.map_frame(vaddr as nat, pte@)); + }; + } + // posts + proof { + assert(pt_final@.region === pt.region); + assert(pt_final@.used_regions =~= pt.used_regions.union(new_regions@)); + assert(mem.regions() =~= old(mem).regions().union(new_regions@)); + assert forall|r: MemRegion| + !(#[trigger] pt.used_regions.contains(r)) && !new_regions@.contains( + r, + ) implies mem.region_view(r) === old(mem).region_view(r) by { + assert(r !== new_dir_region@); + assert(!pt_with_empty@.used_regions.contains(r)); + assert(!new_dir_pt@.used_regions.contains(r)); + assert(!dir_new_regions@.contains(r)); + assert(mem.region_view(r) === mem_with_empty@.region_view(r)); + }; + assert forall|r: MemRegion| new_regions@.contains(r) implies !( + #[trigger] old(mem).regions().contains(r)) by { + if r === new_dir_region@ { + assert(!old(mem).regions().contains(r)); + } else { + assert(dir_new_regions@.contains(r)); + assert(!mem_with_empty@.regions().contains(r)); + assert(!old(mem).regions().contains(r)); + } + }; + assert(forall|r: MemRegion| + new_regions@.contains(r) ==> !(#[trigger] pt.used_regions.contains( + r, + ))); + } + Ok(Ghost((pt_final@, new_regions@))) + }, + Err(e) => { + proof { + indexing::lemma_index_from_base_and_addr( + entry_base as nat, + vaddr as nat, + x86_arch_spec.entry_size((layer + 1) as nat), + X86_NUM_ENTRIES as nat, + ); + assert(false); // We always successfully insert into an empty directory + } + Err(e) + }, } } } + } - pub proof fn lemma_zeroed_page_implies_empty_at(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize) - requires - ptr % PAGE_SIZE == 0, - well_formed(mem, pt, ptr), - mem.inv(), - mem.regions().contains(pt.region), - pt.region.base == ptr, - pt.region.size == PAGE_SIZE, - mem.region_view(pt.region).len() == pt.entries.len(), - pt.region.base == ptr, - ptr == pt.region.base, - pt.used_regions === set![pt.region], - layer_in_range(layer), - pt.entries.len() == X86_NUM_ENTRIES, - forall|i: nat| i < X86_NUM_ENTRIES ==> mem.region_view(pt.region)[i as int] == 0u64, - forall|i: nat| i < X86_NUM_ENTRIES ==> pt.entries[i as int].is_None(), - ensures - empty_at(mem, pt, layer, ptr), - inv_at(mem, pt, layer, ptr), - { - assert forall|i: nat| #![auto] i < X86_NUM_ENTRIES implies - entry_at_spec(mem, pt, layer, ptr, i)@.is_Empty() - && entry_at_spec(mem, pt, layer, ptr, i).all_mb0_bits_are_zero() - by { entry_at_spec(mem, pt, layer, ptr, i).lemma_zero_entry_facts(); }; - assert(forall|i: nat| #![auto] entry_at_spec(mem, pt, layer, ptr, i)@ == view_at(mem, pt, layer, ptr, i)); - } + pub proof fn lemma_zeroed_page_implies_empty_at( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + ) + requires + ptr % PAGE_SIZE == 0, + well_formed(mem, pt, ptr), + mem.inv(), + mem.regions().contains(pt.region), + pt.region.base == ptr, + pt.region.size == PAGE_SIZE, + mem.region_view(pt.region).len() == pt.entries.len(), + pt.region.base == ptr, + ptr == pt.region.base, + pt.used_regions === set![pt.region], + layer_in_range(layer), + pt.entries.len() == X86_NUM_ENTRIES, + forall|i: nat| i < X86_NUM_ENTRIES ==> mem.region_view(pt.region)[i as int] == 0u64, + forall|i: nat| i < X86_NUM_ENTRIES ==> pt.entries[i as int].is_None(), + ensures + empty_at(mem, pt, layer, ptr), + inv_at(mem, pt, layer, ptr), + { + assert forall|i: nat| #![auto] i < X86_NUM_ENTRIES implies entry_at_spec( + mem, + pt, + layer, + ptr, + i, + )@.is_Empty() && entry_at_spec(mem, pt, layer, ptr, i).all_mb0_bits_are_zero() by { + entry_at_spec(mem, pt, layer, ptr, i).lemma_zero_entry_facts(); + }; + assert(forall|i: nat| + #![auto] + entry_at_spec(mem, pt, layer, ptr, i)@ == view_at(mem, pt, layer, ptr, i)); + } - proof fn lemma_empty_at_interp_at_aux_equal_l1_empty_dir(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base: nat, init: Seq, idx: nat) - requires - inv_at(mem, pt, layer, ptr), - forall|i: nat| i < init.len() ==> init[i as int] === l1::NodeEntry::Empty(), - init.len() <= X86_NUM_ENTRIES, - idx < X86_NUM_ENTRIES, - view_at(mem, pt, layer, ptr, idx).is_Directory(), - empty_at(mem, pt.entries[idx as int].get_Some_0(), (layer + 1) as nat, view_at(mem, pt, layer, ptr, idx).get_Directory_addr()), - ensures - ({ let res = - interp_at_aux( - mem, - pt.entries[idx as int].get_Some_0(), - layer + 1, - view_at(mem, pt, layer, ptr, idx).get_Directory_addr(), - x86_arch_spec.entry_base(layer, base, idx), - init); + proof fn lemma_empty_at_interp_at_aux_equal_l1_empty_dir( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base: nat, + init: Seq, + idx: nat, + ) + requires + inv_at(mem, pt, layer, ptr), + forall|i: nat| i < init.len() ==> init[i as int] === l1::NodeEntry::Empty(), + init.len() <= X86_NUM_ENTRIES, + idx < X86_NUM_ENTRIES, + view_at(mem, pt, layer, ptr, idx).is_Directory(), + empty_at( + mem, + pt.entries[idx as int].get_Some_0(), + (layer + 1) as nat, + view_at(mem, pt, layer, ptr, idx).get_Directory_addr(), + ), + ensures + ({ + let res = interp_at_aux( + mem, + pt.entries[idx as int].get_Some_0(), + layer + 1, + view_at(mem, pt, layer, ptr, idx).get_Directory_addr(), + x86_arch_spec.entry_base(layer, base, idx), + init, + ); &&& res.len() === X86_NUM_ENTRIES as nat &&& forall|i: nat| i < res.len() ==> res[i as int] === l1::NodeEntry::Empty() - }) - decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES - init.len(), 0nat - { - let e_ptr = view_at(mem, pt, layer, ptr, idx).get_Directory_addr(); - let e_base = x86_arch_spec.entry_base(layer, base, idx); - let e_pt = pt.entries[idx as int].get_Some_0(); - - if init.len() >= X86_NUM_ENTRIES as nat { - } else { - lemma_empty_at_interp_at_aux_equal_l1_empty_dir( - mem, pt, layer, ptr, base, - init.push(interp_at_entry(mem, e_pt, layer + 1, e_ptr, e_base, init.len())), idx); - } + }), + decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES - init.len(), 0nat, + { + let e_ptr = view_at(mem, pt, layer, ptr, idx).get_Directory_addr(); + let e_base = x86_arch_spec.entry_base(layer, base, idx); + let e_pt = pt.entries[idx as int].get_Some_0(); + if init.len() >= X86_NUM_ENTRIES as nat { + } else { + lemma_empty_at_interp_at_aux_equal_l1_empty_dir( + mem, + pt, + layer, + ptr, + base, + init.push(interp_at_entry(mem, e_pt, layer + 1, e_ptr, e_base, init.len())), + idx, + ); } + } - proof fn lemma_empty_at_interp_at_equal_l1_empty_dir(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base: nat, idx: nat) - requires - inv_at(mem, pt, layer, ptr), - idx < X86_NUM_ENTRIES, - view_at(mem, pt, layer, ptr, idx).is_Directory(), - empty_at(mem, pt.entries[idx as int].get_Some_0(), (layer + 1) as nat, view_at(mem, pt, layer, ptr, idx).get_Directory_addr()), - ensures - ({ let res = - interp_at( - mem, - pt.entries[idx as int].get_Some_0(), - layer + 1, - view_at(mem, pt, layer, ptr, idx).get_Directory_addr(), - x86_arch_spec.entry_base(layer, base, idx)); + proof fn lemma_empty_at_interp_at_equal_l1_empty_dir( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base: nat, + idx: nat, + ) + requires + inv_at(mem, pt, layer, ptr), + idx < X86_NUM_ENTRIES, + view_at(mem, pt, layer, ptr, idx).is_Directory(), + empty_at( + mem, + pt.entries[idx as int].get_Some_0(), + (layer + 1) as nat, + view_at(mem, pt, layer, ptr, idx).get_Directory_addr(), + ), + ensures + ({ + let res = interp_at( + mem, + pt.entries[idx as int].get_Some_0(), + layer + 1, + view_at(mem, pt, layer, ptr, idx).get_Directory_addr(), + x86_arch_spec.entry_base(layer, base, idx), + ); &&& res.entries.len() === X86_NUM_ENTRIES as nat - &&& forall|i: nat| i < res.entries.len() ==> res.entries[i as int] === l1::NodeEntry::Empty() - }) - { - lemma_empty_at_interp_at_aux_equal_l1_empty_dir(mem, pt, layer, ptr, base, seq![], idx); - } - - proof fn lemma_not_empty_at_implies_interp_at_aux_not_empty(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base: nat, init: Seq, nonempty_idx: nat) - requires - inv_at(mem, pt, layer, ptr), - nonempty_idx < X86_NUM_ENTRIES, - !view_at(mem, pt, layer, ptr, nonempty_idx).is_Empty(), - nonempty_idx < init.len() ==> !init[nonempty_idx as int].is_Empty() - ensures - !interp_at_aux(mem, pt, layer, ptr, base, init)[nonempty_idx as int].is_Empty() - decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES - init.len(), 0nat - { - if init.len() >= X86_NUM_ENTRIES as nat { - } else { - let new_init = init.push(interp_at_entry(mem, pt, layer, ptr, base, init.len())); - lemma_not_empty_at_implies_interp_at_aux_not_empty(mem, pt, layer, ptr, base, new_init, nonempty_idx); - } - } - - proof fn lemma_empty_at_implies_interp_at_empty(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base: nat) - requires - inv_at(mem, pt, layer, ptr), - empty_at(mem, pt, layer, ptr), - ensures - interp_at(mem, pt, layer, ptr, base).empty() - { - lemma_interp_at_aux_facts(mem, pt, layer, ptr, base, seq![]); - } + &&& forall|i: nat| + i < res.entries.len() ==> res.entries[i as int] === l1::NodeEntry::Empty() + }), + { + lemma_empty_at_interp_at_aux_equal_l1_empty_dir(mem, pt, layer, ptr, base, seq![], idx); + } - proof fn lemma_not_empty_at_implies_interp_at_not_empty(mem: &mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base: nat) - requires - inv_at(mem, pt, layer, ptr), - !empty_at(mem, pt, layer, ptr), - ensures - !interp_at(mem, pt, layer, ptr, base).empty() - { - let i = choose|i: nat| i < X86_NUM_ENTRIES && !view_at(mem, pt, layer, ptr, i).is_Empty(); - lemma_not_empty_at_implies_interp_at_aux_not_empty(mem, pt, layer, ptr, base, seq![], i); + proof fn lemma_not_empty_at_implies_interp_at_aux_not_empty( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base: nat, + init: Seq, + nonempty_idx: nat, + ) + requires + inv_at(mem, pt, layer, ptr), + nonempty_idx < X86_NUM_ENTRIES, + !view_at(mem, pt, layer, ptr, nonempty_idx).is_Empty(), + nonempty_idx < init.len() ==> !init[nonempty_idx as int].is_Empty(), + ensures + !interp_at_aux(mem, pt, layer, ptr, base, init)[nonempty_idx as int].is_Empty(), + decreases X86_NUM_LAYERS - layer, X86_NUM_ENTRIES - init.len(), 0nat, + { + if init.len() >= X86_NUM_ENTRIES as nat { + } else { + let new_init = init.push(interp_at_entry(mem, pt, layer, ptr, base, init.len())); + lemma_not_empty_at_implies_interp_at_aux_not_empty( + mem, + pt, + layer, + ptr, + base, + new_init, + nonempty_idx, + ); } + } - pub fn map_frame(mem: &mut mem::PageTableMemory, pt: &mut Ghost, vaddr: usize, pte: PageTableEntryExec) -> (res: MapResult) - requires - inv(&*old(mem), old(pt)@), - interp(&*old(mem), old(pt)@).inv(), - old(mem).inv(), - old(mem).alloc_available_pages() >= 3, - accepted_mapping(vaddr as nat, pte@), - interp(&*old(mem), old(pt)@).accepted_mapping(vaddr as nat, pte@), - vaddr < MAX_BASE, - ensures - inv(mem, pt@), - interp(mem, pt@).inv(), - // Refinement of l0 - match res { - MapResult::Ok => { - Ok(interp(mem, pt@).interp()) === interp(&*old(mem), old(pt)@).interp().map_frame(vaddr as nat, pte@) - }, - MapResult::ErrOverlap => - Err(interp(mem, pt@).interp()) === interp(&*old(mem), old(pt)@).interp().map_frame(vaddr as nat, pte@), - }, - { - proof { interp(mem, pt@).lemma_map_frame_refines_map_frame(vaddr as nat, pte@); } - match map_frame_aux(mem, *pt, 0, mem.cr3().base, 0, vaddr, pte) { - Ok(res) => { - proof { interp(&*old(mem), pt@).lemma_map_frame_preserves_inv(vaddr as nat, pte@); } - *pt = Ghost(res@.0); - MapResult::Ok + proof fn lemma_empty_at_implies_interp_at_empty( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base: nat, + ) + requires + inv_at(mem, pt, layer, ptr), + empty_at(mem, pt, layer, ptr), + ensures + interp_at(mem, pt, layer, ptr, base).empty(), + { + lemma_interp_at_aux_facts(mem, pt, layer, ptr, base, seq![]); + } + + proof fn lemma_not_empty_at_implies_interp_at_not_empty( + mem: &mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base: nat, + ) + requires + inv_at(mem, pt, layer, ptr), + !empty_at(mem, pt, layer, ptr), + ensures + !interp_at(mem, pt, layer, ptr, base).empty(), + { + let i = choose|i: nat| i < X86_NUM_ENTRIES && !view_at(mem, pt, layer, ptr, i).is_Empty(); + lemma_not_empty_at_implies_interp_at_aux_not_empty(mem, pt, layer, ptr, base, seq![], i); + } + + pub fn map_frame( + mem: &mut mem::PageTableMemory, + pt: &mut Ghost, + vaddr: usize, + pte: PageTableEntryExec, + ) -> (res: MapResult) + requires + inv(&*old(mem), old(pt)@), + interp(&*old(mem), old(pt)@).inv(), + old(mem).inv(), + old(mem).alloc_available_pages() >= 3, + accepted_mapping(vaddr as nat, pte@), + interp(&*old(mem), old(pt)@).accepted_mapping(vaddr as nat, pte@), + vaddr < MAX_BASE, + ensures + inv(mem, pt@), + interp(mem, pt@).inv(), + // Refinement of l0 + match res { + MapResult::Ok => { + Ok(interp(mem, pt@).interp()) === interp( + &*old(mem), + old(pt)@, + ).interp().map_frame(vaddr as nat, pte@) }, - Err(e) => MapResult::ErrOverlap, - } + MapResult::ErrOverlap => Err(interp(mem, pt@).interp()) === interp( + &*old(mem), + old(pt)@, + ).interp().map_frame(vaddr as nat, pte@), + }, + { + proof { + interp(mem, pt@).lemma_map_frame_refines_map_frame(vaddr as nat, pte@); + } + match map_frame_aux(mem, *pt, 0, mem.cr3().base, 0, vaddr, pte) { + Ok(res) => { + proof { + interp(&*old(mem), pt@).lemma_map_frame_preserves_inv(vaddr as nat, pte@); + } + *pt = Ghost(res@.0); + MapResult::Ok + }, + Err(e) => MapResult::ErrOverlap, } + } - fn is_directory_empty(mem: &mem::PageTableMemory, Ghost(pt): Ghost, layer: usize, ptr: usize) -> (res: bool) - requires + fn is_directory_empty( + mem: &mem::PageTableMemory, + Ghost(pt): Ghost, + layer: usize, + ptr: usize, + ) -> (res: bool) + requires + inv_at(mem, pt, layer as nat, ptr), + ensures + res === empty_at(mem, pt, layer as nat, ptr), + { + assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)); + let mut idx = 0; + let num_entries = x86_arch_exec().num_entries(layer); + while idx < num_entries + invariant + num_entries == X86_NUM_ENTRIES, inv_at(mem, pt, layer as nat, ptr), - ensures - res === empty_at(mem, pt, layer as nat, ptr) + forall|i: nat| i < idx ==> view_at(mem, pt, layer as nat, ptr, i).is_Empty(), { - assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)); - let mut idx = 0; - let num_entries = x86_arch_exec().num_entries(layer); - while idx < num_entries - invariant - num_entries == X86_NUM_ENTRIES, - inv_at(mem, pt, layer as nat, ptr), - forall|i: nat| i < idx ==> view_at(mem, pt, layer as nat, ptr, i).is_Empty(), - { - let entry = entry_at(mem, Ghost(pt), layer, ptr, idx); - if entry.is_mapping() { - assert(!view_at(mem, pt, layer as nat, ptr, idx as nat).is_Empty()); - assert(!empty_at(mem, pt, layer as nat, ptr)); - return false; - } - idx = idx + 1; + let entry = entry_at(mem, Ghost(pt), layer, ptr, idx); + if entry.is_mapping() { + assert(!view_at(mem, pt, layer as nat, ptr, idx as nat).is_Empty()); + assert(!empty_at(mem, pt, layer as nat, ptr)); + return false; } - true + idx = idx + 1; } + true + } - fn unmap_aux(mem: &mut mem::PageTableMemory, Ghost(pt): Ghost, layer: usize, ptr: usize, base: usize, vaddr: usize) - -> (res: Result)>,()>) - requires - inv_at(&*old(mem), pt, layer as nat, ptr), - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).inv(), - old(mem).inv(), - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).accepted_unmap(vaddr as nat), - base <= vaddr < MAX_BASE, - ensures - match res { - Ok(resv) => { - let (pt_res, removed_regions) = resv@; - // We return the regions that we removed - &&& old(mem).regions() == mem.regions().union(removed_regions) - &&& pt.used_regions == pt_res.used_regions.union(removed_regions) - // and only those we removed - &&& (forall|r: MemRegion| removed_regions.contains(r) ==> !(#[trigger] mem.regions().contains(r))) - &&& (forall|r: MemRegion| removed_regions.contains(r) ==> !(#[trigger] pt_res.used_regions.contains(r))) - // Invariant preserved - &&& inv_at(mem, pt_res, layer as nat, ptr) - // We only touch regions in pt.used_regions - &&& (forall|r: MemRegion| - !(#[trigger] pt_res.used_regions.contains(r)) - && !(#[trigger] removed_regions.contains(r)) - ==> mem.region_view(r) === old(mem).region_view(r)) - &&& pt_res.region === pt.region - }, - Err(e) => { - // If error, unchanged - &&& mem === old(mem) - }, + fn unmap_aux( + mem: &mut mem::PageTableMemory, + Ghost(pt): Ghost, + layer: usize, + ptr: usize, + base: usize, + vaddr: usize, + ) -> (res: Result)>, ()>) + requires + inv_at(&*old(mem), pt, layer as nat, ptr), + interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).inv(), + old(mem).inv(), + interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).accepted_unmap(vaddr as nat), + base <= vaddr < MAX_BASE, + ensures + match res { + Ok(resv) => { + let (pt_res, removed_regions) = resv@; + // We return the regions that we removed + &&& old(mem).regions() == mem.regions().union(removed_regions) + &&& pt.used_regions == pt_res.used_regions.union( + removed_regions, + ) + // and only those we removed + + &&& (forall|r: MemRegion| + removed_regions.contains(r) ==> !(#[trigger] mem.regions().contains(r))) + &&& (forall|r: MemRegion| + removed_regions.contains(r) ==> !(#[trigger] pt_res.used_regions.contains( + r, + ))) + // Invariant preserved + + &&& inv_at( + mem, + pt_res, + layer as nat, + ptr, + ) + // We only touch regions in pt.used_regions + + &&& (forall|r: MemRegion| + !(#[trigger] pt_res.used_regions.contains(r)) && !( + #[trigger] removed_regions.contains(r)) ==> mem.region_view(r) === old( + mem, + ).region_view(r)) + &&& pt_res.region === pt.region }, - // Refinement of l1 - match res { - Ok(resv) => { - let (pt_res, removed_regions) = resv@; - Ok(interp_at(mem, pt_res, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat) - }, - Err(e) => - Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat), + Err(e) => { + // If error, unchanged + &&& mem === old(mem) }, - mem.cr3_spec() == old(mem).cr3_spec(), - // decreases X86_NUM_LAYERS - layer - { - proof { lemma_interp_at_facts(mem, pt, layer as nat, ptr, base as nat); } - let idx: usize = x86_arch_exec().index_for_vaddr(layer, base, vaddr); - proof { indexing::lemma_index_from_base_and_addr(base as nat, vaddr as nat, x86_arch_spec.entry_size(layer as nat), X86_NUM_ENTRIES as nat); } - let entry = entry_at(mem, Ghost(pt), layer, ptr, idx); - let interp: Ghost = Ghost(interp_at(mem, pt, layer as nat, ptr, base as nat)); - proof { - interp@.lemma_unmap_structure_assertions(vaddr as nat, idx as nat); - interp@.lemma_unmap_refines_unmap(vaddr as nat); - } - let entry_base: usize = x86_arch_exec().entry_base(layer, base, idx); - proof { - indexing::lemma_entry_base_from_index(base as nat, idx as nat, x86_arch_spec.entry_size(layer as nat)); - assert(entry_base <= vaddr); - } - if entry.is_mapping() { - if entry.is_dir(layer) { - let dir_addr = entry.address() as usize; - assert(pt.entries[idx as int].is_Some()); - let dir_pt: Ghost = Ghost(pt.entries.index(idx as int).get_Some_0()); - assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)); - assert(forall|r: MemRegion| #![auto] pt.entries[idx as int].get_Some_0().used_regions.contains(r) ==> pt.used_regions.contains(r)); - match unmap_aux(mem, dir_pt, layer + 1, dir_addr, entry_base, vaddr) { - Ok(rec_res) => { - let dir_pt_res: Ghost = Ghost(rec_res@.0); - let removed_regions: Ghost> = Ghost(rec_res@.1); - - assert(inv_at(mem, dir_pt_res@, (layer + 1) as nat, dir_addr)); - assert(Ok(interp_at(mem, dir_pt_res@, (layer + 1) as nat, dir_addr, entry_base as nat)) - === interp_at(&*old(mem), dir_pt@, (layer + 1) as nat, dir_addr, entry_base as nat).unmap(vaddr as nat)); - assert(idx < pt.entries.len()); - assert(!pt.entries[idx as int].get_Some_0().used_regions.contains(pt.region)); - assert(!removed_regions@.contains(pt.region)); - assert(!dir_pt_res@.used_regions.contains(pt.region)); - assert(old(mem).regions() === mem.regions().union(removed_regions@)); - - if is_directory_empty(mem, dir_pt_res, layer + 1, dir_addr) { - let mem_with_empty: Ghost<&mem::PageTableMemory> = Ghost(mem); - let pt_with_empty: Ghost = Ghost( - PTDir { - region: pt.region, - entries: pt.entries.update(idx as int, Some(dir_pt_res@)), - used_regions: pt.used_regions, - }); - mem.write(ptr, idx, Ghost(pt.region), 0u64); - mem.dealloc_page(MemRegionExec { base: dir_addr, size: PAGE_SIZE, }); - - let removed_regions: Ghost> = Ghost(removed_regions@.insert(dir_pt_res@.region)); - let pt_res: Ghost = Ghost( - PTDir { - region: pt.region, - entries: pt.entries.update(idx as int, None), - used_regions: pt.used_regions.difference(removed_regions@), - }); - let res: Ghost<(PTDir,Set)> = Ghost((pt_res@,removed_regions@)); - proof { - assert(pt_res@.region === pt.region); - assert(forall|i: nat| i < X86_NUM_ENTRIES && i != idx ==> pt_res@.entries[i as int] == pt.entries[i as int]); - assert(forall|i: nat| i < X86_NUM_ENTRIES && i != idx ==> view_at(mem, pt_res@, layer as nat, ptr, i) == view_at(&*old(mem), pt, layer as nat, ptr, i)); - assert(forall|i: nat| i < X86_NUM_ENTRIES && i != idx ==> entry_at_spec(mem, pt_res@, layer as nat, ptr, i) == entry_at_spec(&*old(mem), pt, layer as nat, ptr, i)); - assert(forall|i: nat, r: MemRegion| i < X86_NUM_ENTRIES && i != idx && pt_res@.entries[i as int].is_Some() && pt_res@.entries[i as int].get_Some_0().used_regions.contains(r) ==> !pt.entries[idx as int].get_Some_0().used_regions.contains(r)); - - entry_at_spec(mem, pt_res@, layer as nat, ptr, idx as nat).lemma_zero_entry_facts(); - - assert(inv_at(mem, pt_res@, layer as nat, ptr)) by { - assert(directories_obey_invariant_at(mem, pt_res@, layer as nat, ptr)) by { - assert forall|i: nat| i < X86_NUM_ENTRIES implies { - let entry = #[trigger] view_at(mem, pt_res@, layer as nat, ptr, i); - entry.is_Directory() ==> { - &&& inv_at(mem, pt_res@.entries[i as int].get_Some_0(), layer as nat + 1, entry.get_Directory_addr()) - } - } by { - let entry = view_at(mem, pt_res@, layer as nat, ptr, i); - if i == idx { - } else { - if entry.is_Directory() { - lemma_inv_at_different_memory(&*old(mem), mem, pt_res@.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr()); - } - } - }; - }; - }; + }, + // Refinement of l1 + match res { + Ok(resv) => { + let (pt_res, removed_regions) = resv@; + Ok(interp_at(mem, pt_res, layer as nat, ptr, base as nat)) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat) + }, + Err(e) => Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat), + }, + mem.cr3_spec() == old( + mem, + ).cr3_spec(), + // decreases X86_NUM_LAYERS - layer - // postconditions - assert((forall|r: MemRegion| removed_regions@.contains(r) ==> !(#[trigger] mem.regions().contains(r)))); - assert(old(mem).regions() =~= mem.regions().union(removed_regions@)); - assert(pt.used_regions =~= pt_res@.used_regions.union(removed_regions@)); - assert((forall|r: MemRegion| removed_regions@.contains(r) ==> !(#[trigger] pt_res@.used_regions.contains(r)))); - assert(forall|r: MemRegion| - !(#[trigger] pt_res@.used_regions.contains(r)) - && !(#[trigger] removed_regions@.contains(r)) - ==> mem.region_view(r) === old(mem).region_view(r)); - assert(mem.cr3_spec() == old(mem).cr3_spec()); - - // Refinement - assert(Ok(interp_at(mem, pt_res@, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat)) by { - lemma_interp_at_aux_facts(mem, pt_res@, layer as nat, ptr, base as nat, seq![]); - assert forall|i: nat| - i < X86_NUM_ENTRIES - implies - #[trigger] interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries[i as int] == - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries[i as int] by - { + { + proof { + lemma_interp_at_facts(mem, pt, layer as nat, ptr, base as nat); + } + let idx: usize = x86_arch_exec().index_for_vaddr(layer, base, vaddr); + proof { + indexing::lemma_index_from_base_and_addr( + base as nat, + vaddr as nat, + x86_arch_spec.entry_size(layer as nat), + X86_NUM_ENTRIES as nat, + ); + } + let entry = entry_at(mem, Ghost(pt), layer, ptr, idx); + let interp: Ghost = Ghost( + interp_at(mem, pt, layer as nat, ptr, base as nat), + ); + proof { + interp@.lemma_unmap_structure_assertions(vaddr as nat, idx as nat); + interp@.lemma_unmap_refines_unmap(vaddr as nat); + } + let entry_base: usize = x86_arch_exec().entry_base(layer, base, idx); + proof { + indexing::lemma_entry_base_from_index( + base as nat, + idx as nat, + x86_arch_spec.entry_size(layer as nat), + ); + assert(entry_base <= vaddr); + } + if entry.is_mapping() { + if entry.is_dir(layer) { + let dir_addr = entry.address() as usize; + assert(pt.entries[idx as int].is_Some()); + let dir_pt: Ghost = Ghost(pt.entries.index(idx as int).get_Some_0()); + assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)); + assert(forall|r: MemRegion| + #![auto] + pt.entries[idx as int].get_Some_0().used_regions.contains(r) + ==> pt.used_regions.contains(r)); + match unmap_aux(mem, dir_pt, layer + 1, dir_addr, entry_base, vaddr) { + Ok(rec_res) => { + let dir_pt_res: Ghost = Ghost(rec_res@.0); + let removed_regions: Ghost> = Ghost(rec_res@.1); + assert(inv_at(mem, dir_pt_res@, (layer + 1) as nat, dir_addr)); + assert(Ok( + interp_at( + mem, + dir_pt_res@, + (layer + 1) as nat, + dir_addr, + entry_base as nat, + ), + ) === interp_at( + &*old(mem), + dir_pt@, + (layer + 1) as nat, + dir_addr, + entry_base as nat, + ).unmap(vaddr as nat)); + assert(idx < pt.entries.len()); + assert(!pt.entries[idx as int].get_Some_0().used_regions.contains( + pt.region, + )); + assert(!removed_regions@.contains(pt.region)); + assert(!dir_pt_res@.used_regions.contains(pt.region)); + assert(old(mem).regions() === mem.regions().union(removed_regions@)); + if is_directory_empty(mem, dir_pt_res, layer + 1, dir_addr) { + let mem_with_empty: Ghost<&mem::PageTableMemory> = Ghost(mem); + let pt_with_empty: Ghost = Ghost( + PTDir { + region: pt.region, + entries: pt.entries.update(idx as int, Some(dir_pt_res@)), + used_regions: pt.used_regions, + }, + ); + mem.write(ptr, idx, Ghost(pt.region), 0u64); + mem.dealloc_page(MemRegionExec { base: dir_addr, size: PAGE_SIZE }); + let removed_regions: Ghost> = Ghost( + removed_regions@.insert(dir_pt_res@.region), + ); + let pt_res: Ghost = Ghost( + PTDir { + region: pt.region, + entries: pt.entries.update(idx as int, None), + used_regions: pt.used_regions.difference(removed_regions@), + }, + ); + let res: Ghost<(PTDir, Set)> = Ghost( + (pt_res@, removed_regions@), + ); + proof { + assert(pt_res@.region === pt.region); + assert(forall|i: nat| + i < X86_NUM_ENTRIES && i != idx ==> pt_res@.entries[i as int] + == pt.entries[i as int]); + assert(forall|i: nat| + i < X86_NUM_ENTRIES && i != idx ==> view_at( + mem, + pt_res@, + layer as nat, + ptr, + i, + ) == view_at(&*old(mem), pt, layer as nat, ptr, i)); + assert(forall|i: nat| + i < X86_NUM_ENTRIES && i != idx ==> entry_at_spec( + mem, + pt_res@, + layer as nat, + ptr, + i, + ) == entry_at_spec(&*old(mem), pt, layer as nat, ptr, i)); + assert(forall|i: nat, r: MemRegion| + i < X86_NUM_ENTRIES && i != idx + && pt_res@.entries[i as int].is_Some() + && pt_res@.entries[i as int].get_Some_0().used_regions.contains( + r) + ==> !pt.entries[idx as int].get_Some_0().used_regions.contains( + r)); + entry_at_spec( + mem, + pt_res@, + layer as nat, + ptr, + idx as nat, + ).lemma_zero_entry_facts(); + assert(inv_at(mem, pt_res@, layer as nat, ptr)) by { + assert(directories_obey_invariant_at( + mem, + pt_res@, + layer as nat, + ptr, + )) by { + assert forall|i: nat| i < X86_NUM_ENTRIES implies { + let entry = #[trigger] view_at( + mem, + pt_res@, + layer as nat, + ptr, + i, + ); + entry.is_Directory() ==> { + &&& inv_at( + mem, + pt_res@.entries[i as int].get_Some_0(), + layer as nat + 1, + entry.get_Directory_addr(), + ) + } + } by { + let entry = view_at(mem, pt_res@, layer as nat, ptr, i); if i == idx { - lemma_empty_at_implies_interp_at_empty(mem_with_empty@, dir_pt_res@, (layer + 1) as nat, dir_addr, entry_base as nat); - assert(interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries[idx as int] == - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries[idx as int]); } else { - lemma_interp_at_entry_different_memory(&*old(mem), pt, mem, pt_res@, layer as nat, ptr, base as nat, i); - assert(interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries[i as int] == - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries[i as int]); + if entry.is_Directory() { + lemma_inv_at_different_memory( + &*old(mem), + mem, + pt_res@.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + ); + } } + }; + }; + }; + // postconditions + assert((forall|r: MemRegion| + removed_regions@.contains(r) ==> !( + #[trigger] mem.regions().contains(r)))); + assert(old(mem).regions() =~= mem.regions().union( + removed_regions@, + )); + assert(pt.used_regions =~= pt_res@.used_regions.union( + removed_regions@, + )); + assert((forall|r: MemRegion| + removed_regions@.contains(r) ==> !( + #[trigger] pt_res@.used_regions.contains(r)))); + assert(forall|r: MemRegion| + !(#[trigger] pt_res@.used_regions.contains(r)) && !( + #[trigger] removed_regions@.contains(r)) ==> mem.region_view(r) + === old(mem).region_view(r)); + assert(mem.cr3_spec() == old(mem).cr3_spec()); + // Refinement + assert(Ok(interp_at(mem, pt_res@, layer as nat, ptr, base as nat)) + === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat)) by { + lemma_interp_at_aux_facts( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + seq![], + ); + assert forall|i: nat| + i < X86_NUM_ENTRIES implies #[trigger] interp_at( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + ).entries[i as int] == interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat).get_Ok_0().entries[i as int] by { + if i == idx { + lemma_empty_at_implies_interp_at_empty( + mem_with_empty@, + dir_pt_res@, + (layer + 1) as nat, + dir_addr, + entry_base as nat, + ); + assert(interp_at( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + ).entries[idx as int] == interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat).get_Ok_0().entries[idx as int]); + } else { + lemma_interp_at_entry_different_memory( + &*old(mem), + pt, + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + i, + ); + assert(interp_at( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + ).entries[i as int] == interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat).get_Ok_0().entries[i as int]); } - - assert_seqs_equal!( + } + assert_seqs_equal!( interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries, interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries); - }; - } - Ok(res) - } else { - let pt_res: Ghost = Ghost( - PTDir { - region: pt.region, - entries: pt.entries.update(idx as int, Some(dir_pt_res@)), - used_regions: pt.used_regions.difference(removed_regions@), - }); - let res: Ghost<(PTDir,Set)> = Ghost((pt_res@,removed_regions@)); - - proof { - assert(pt_res@.region === pt.region); - assert(forall|i: nat| i < X86_NUM_ENTRIES && i != idx ==> pt_res@.entries[i as int] == pt.entries[i as int]); - assert(forall|i: nat| i < X86_NUM_ENTRIES && i != idx ==> view_at(mem, pt_res@, layer as nat, ptr, i) == view_at(&*old(mem), pt, layer as nat, ptr, i)); - assert(forall|i: nat| i < X86_NUM_ENTRIES && i != idx ==> entry_at_spec(mem, pt_res@, layer as nat, ptr, i) == entry_at_spec(&*old(mem), pt, layer as nat, ptr, i)); - assert(forall|i: nat, r: MemRegion| i < X86_NUM_ENTRIES && i != idx && pt_res@.entries[i as int].is_Some() && pt_res@.entries[i as int].get_Some_0().used_regions.contains(r) ==> !pt.entries[idx as int].get_Some_0().used_regions.contains(r)); - - assert(inv_at(mem, pt_res@, layer as nat, ptr)) by { - assert(directories_obey_invariant_at(mem, pt_res@, layer as nat, ptr)) by { - assert forall|i: nat| i < X86_NUM_ENTRIES implies { - let entry = #[trigger] view_at(mem, pt_res@, layer as nat, ptr, i); - entry.is_Directory() ==> { - &&& inv_at(mem, pt_res@.entries[i as int].get_Some_0(), layer as nat + 1, entry.get_Directory_addr()) - } - } by { - let entry = view_at(mem, pt_res@, layer as nat, ptr, i); - if i == idx { - } else { - if entry.is_Directory() { - lemma_inv_at_different_memory(&*old(mem), mem, pt_res@.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr()); - } - } - }; - }; - }; - - // postconditions - assert(old(mem).regions() =~= mem.regions().union(removed_regions@)); - assert(pt.used_regions =~= pt_res@.used_regions.union(removed_regions@)); - assert(forall|r: MemRegion| - !(#[trigger] pt_res@.used_regions.contains(r)) - && !(#[trigger] removed_regions@.contains(r)) - ==> mem.region_view(r) === old(mem).region_view(r)); - assert(pt_res@.region === pt.region); - assert(mem.cr3_spec() == old(mem).cr3_spec()); - // Refinement - assert(Ok(interp_at(mem, pt_res@, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat)) by { - lemma_interp_at_aux_facts(mem, pt_res@, layer as nat, ptr, base as nat, seq![]); - assert forall|i: nat| - i < X86_NUM_ENTRIES - implies - #[trigger] interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries[i as int] == - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries[i as int] by - { + }; + } + Ok(res) + } else { + let pt_res: Ghost = Ghost( + PTDir { + region: pt.region, + entries: pt.entries.update(idx as int, Some(dir_pt_res@)), + used_regions: pt.used_regions.difference(removed_regions@), + }, + ); + let res: Ghost<(PTDir, Set)> = Ghost( + (pt_res@, removed_regions@), + ); + proof { + assert(pt_res@.region === pt.region); + assert(forall|i: nat| + i < X86_NUM_ENTRIES && i != idx ==> pt_res@.entries[i as int] + == pt.entries[i as int]); + assert(forall|i: nat| + i < X86_NUM_ENTRIES && i != idx ==> view_at( + mem, + pt_res@, + layer as nat, + ptr, + i, + ) == view_at(&*old(mem), pt, layer as nat, ptr, i)); + assert(forall|i: nat| + i < X86_NUM_ENTRIES && i != idx ==> entry_at_spec( + mem, + pt_res@, + layer as nat, + ptr, + i, + ) == entry_at_spec(&*old(mem), pt, layer as nat, ptr, i)); + assert(forall|i: nat, r: MemRegion| + i < X86_NUM_ENTRIES && i != idx + && pt_res@.entries[i as int].is_Some() + && pt_res@.entries[i as int].get_Some_0().used_regions.contains( + r) + ==> !pt.entries[idx as int].get_Some_0().used_regions.contains( + r)); + assert(inv_at(mem, pt_res@, layer as nat, ptr)) by { + assert(directories_obey_invariant_at( + mem, + pt_res@, + layer as nat, + ptr, + )) by { + assert forall|i: nat| i < X86_NUM_ENTRIES implies { + let entry = #[trigger] view_at( + mem, + pt_res@, + layer as nat, + ptr, + i, + ); + entry.is_Directory() ==> { + &&& inv_at( + mem, + pt_res@.entries[i as int].get_Some_0(), + layer as nat + 1, + entry.get_Directory_addr(), + ) + } + } by { + let entry = view_at(mem, pt_res@, layer as nat, ptr, i); if i == idx { - assert(interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries[idx as int] - == l1::NodeEntry::Directory(interp_at(mem, dir_pt_res@, (layer + 1) as nat, dir_addr, entry_base as nat))); - assert(interp_at(&*old(mem), dir_pt@, (layer + 1) as nat, dir_addr, entry_base as nat).unmap(vaddr as nat).is_Ok()); - assert(interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).entries[idx as int] == interp_at_entry(&*old(mem), pt, layer as nat, ptr, base as nat, idx as nat)); - - lemma_not_empty_at_implies_interp_at_not_empty(mem, dir_pt_res@, (layer + 1) as nat, dir_addr, entry_base as nat); - assert(interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries[idx as int] == - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries[idx as int]); } else { - lemma_interp_at_entry_different_memory(&*old(mem), pt, mem, pt_res@, layer as nat, ptr, base as nat, i); - assert(interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries[i as int] == - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries[i as int]); + if entry.is_Directory() { + lemma_inv_at_different_memory( + &*old(mem), + mem, + pt_res@.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + ); + } } - } - - assert_seqs_equal!( - interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries, - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries); + }; }; - } - Ok(res) - } - - }, - Err(e) => { - assert(mem === old(mem)); - assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat)); - Err(e) - }, - } - } else { - if aligned_exec(vaddr, x86_arch_exec().entry_size(layer)) { - mem.write(ptr, idx, Ghost(pt.region), 0u64); - - let removed_regions: Ghost> = Ghost(Set::empty()); - let res: Ghost<(PTDir,Set)> = Ghost((pt, removed_regions@)); - - proof { - assert(mem.region_view(pt.region) === old(mem).region_view(pt.region).update(idx as int, 0)); - assert(mem.spec_read(idx as nat, pt.region) == 0); - let new_entry = entry_at_spec(mem, pt, layer as nat, ptr, idx as nat); - new_entry.lemma_zero_entry_facts(); - assert(forall|i: nat| i < X86_NUM_ENTRIES && i != idx ==> entry_at_spec(mem, pt, layer as nat, ptr, i) == entry_at_spec(&*old(mem), pt, layer as nat, ptr, i)); - assert(forall|i: nat| i < X86_NUM_ENTRIES && i != idx ==> view_at(mem, pt, layer as nat, ptr, i) == view_at(&*old(mem), pt, layer as nat, ptr, i)); - - assert(inv_at(mem, pt, layer as nat, ptr)) by { - assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)) by { - assert forall|i: nat| i < X86_NUM_ENTRIES implies { - let entry = #[trigger] view_at(mem, pt, layer as nat, ptr, i); - entry.is_Directory() ==> { - &&& inv_at(mem, pt.entries[i as int].get_Some_0(), layer as nat + 1, entry.get_Directory_addr()) - } - } by { - let entry = view_at(mem, pt, layer as nat, ptr, i); + }; + // postconditions + assert(old(mem).regions() =~= mem.regions().union( + removed_regions@, + )); + assert(pt.used_regions =~= pt_res@.used_regions.union( + removed_regions@, + )); + assert(forall|r: MemRegion| + !(#[trigger] pt_res@.used_regions.contains(r)) && !( + #[trigger] removed_regions@.contains(r)) ==> mem.region_view(r) + === old(mem).region_view(r)); + assert(pt_res@.region === pt.region); + assert(mem.cr3_spec() == old(mem).cr3_spec()); + // Refinement + assert(Ok(interp_at(mem, pt_res@, layer as nat, ptr, base as nat)) + === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat)) by { + lemma_interp_at_aux_facts( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + seq![], + ); + assert forall|i: nat| + i < X86_NUM_ENTRIES implies #[trigger] interp_at( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + ).entries[i as int] == interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat).get_Ok_0().entries[i as int] by { if i == idx { + assert(interp_at( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + ).entries[idx as int] == l1::NodeEntry::Directory( + interp_at( + mem, + dir_pt_res@, + (layer + 1) as nat, + dir_addr, + entry_base as nat, + ), + )); + assert(interp_at( + &*old(mem), + dir_pt@, + (layer + 1) as nat, + dir_addr, + entry_base as nat, + ).unmap(vaddr as nat).is_Ok()); + assert(interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).entries[idx as int] == interp_at_entry( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + idx as nat, + )); + lemma_not_empty_at_implies_interp_at_not_empty( + mem, + dir_pt_res@, + (layer + 1) as nat, + dir_addr, + entry_base as nat, + ); + assert(interp_at( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + ).entries[idx as int] == interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat).get_Ok_0().entries[idx as int]); } else { - if entry.is_Directory() { - assert(directories_obey_invariant_at(&*old(mem), pt, layer as nat, ptr)); - lemma_inv_at_different_memory(&*old(mem), mem, pt.entries[i as int].get_Some_0(), (layer + 1) as nat, entry.get_Directory_addr()); - assert(inv_at(mem, pt.entries[i as int].get_Some_0(), layer as nat + 1, entry.get_Directory_addr())); - } + lemma_interp_at_entry_different_memory( + &*old(mem), + pt, + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + i, + ); + assert(interp_at( + mem, + pt_res@, + layer as nat, + ptr, + base as nat, + ).entries[i as int] == interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat).get_Ok_0().entries[i as int]); } - }; + } + assert_seqs_equal!( + interp_at(mem, pt_res@, layer as nat, ptr, base as nat).entries, + interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries); }; - }; - - // postconditions - assert_sets_equal!(old(mem).regions(), mem.regions().union(removed_regions@)); - assert_sets_equal!(pt.used_regions, pt.used_regions.union(removed_regions@)); - - // Refinement - assert(Ok(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat)) by { - lemma_interp_at_aux_facts(mem, pt, layer as nat, ptr, base as nat, seq![]); - assert(interp_at(mem, pt, layer as nat, ptr, base as nat).entries.len() == X86_NUM_ENTRIES); - assert(interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries.len() == X86_NUM_ENTRIES); - - assert forall|i: nat| - i < X86_NUM_ENTRIES - implies - #[trigger] interp_at(mem, pt, layer as nat, ptr, base as nat).entries[i as int] == - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries[i as int] by - { + } + Ok(res) + } + }, + Err(e) => { + assert(mem === old(mem)); + assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) + === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap( + vaddr as nat, + )); + Err(e) + }, + } + } else { + if aligned_exec(vaddr, x86_arch_exec().entry_size(layer)) { + mem.write(ptr, idx, Ghost(pt.region), 0u64); + let removed_regions: Ghost> = Ghost(Set::empty()); + let res: Ghost<(PTDir, Set)> = Ghost((pt, removed_regions@)); + proof { + assert(mem.region_view(pt.region) === old(mem).region_view( + pt.region, + ).update(idx as int, 0)); + assert(mem.spec_read(idx as nat, pt.region) == 0); + let new_entry = entry_at_spec(mem, pt, layer as nat, ptr, idx as nat); + new_entry.lemma_zero_entry_facts(); + assert(forall|i: nat| + i < X86_NUM_ENTRIES && i != idx ==> entry_at_spec( + mem, + pt, + layer as nat, + ptr, + i, + ) == entry_at_spec(&*old(mem), pt, layer as nat, ptr, i)); + assert(forall|i: nat| + i < X86_NUM_ENTRIES && i != idx ==> view_at( + mem, + pt, + layer as nat, + ptr, + i, + ) == view_at(&*old(mem), pt, layer as nat, ptr, i)); + assert(inv_at(mem, pt, layer as nat, ptr)) by { + assert(directories_obey_invariant_at(mem, pt, layer as nat, ptr)) by { + assert forall|i: nat| i < X86_NUM_ENTRIES implies { + let entry = #[trigger] view_at(mem, pt, layer as nat, ptr, i); + entry.is_Directory() ==> { + &&& inv_at( + mem, + pt.entries[i as int].get_Some_0(), + layer as nat + 1, + entry.get_Directory_addr(), + ) + } + } by { + let entry = view_at(mem, pt, layer as nat, ptr, i); if i == idx { } else { - lemma_interp_at_entry_different_memory(&*old(mem), pt, mem, pt, layer as nat, ptr, base as nat, i); - assert(interp_at(mem, pt, layer as nat, ptr, base as nat).entries[i as int] == - interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries[i as int]); + if entry.is_Directory() { + assert(directories_obey_invariant_at( + &*old(mem), + pt, + layer as nat, + ptr, + )); + lemma_inv_at_different_memory( + &*old(mem), + mem, + pt.entries[i as int].get_Some_0(), + (layer + 1) as nat, + entry.get_Directory_addr(), + ); + assert(inv_at( + mem, + pt.entries[i as int].get_Some_0(), + layer as nat + 1, + entry.get_Directory_addr(), + )); + } } + }; + }; + }; + // postconditions + assert_sets_equal!(old(mem).regions(), mem.regions().union(removed_regions@)); + assert_sets_equal!(pt.used_regions, pt.used_regions.union(removed_regions@)); + // Refinement + assert(Ok(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat)) by { + lemma_interp_at_aux_facts( + mem, + pt, + layer as nat, + ptr, + base as nat, + seq![], + ); + assert(interp_at(mem, pt, layer as nat, ptr, base as nat).entries.len() + == X86_NUM_ENTRIES); + assert(interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap( + vaddr as nat, + ).get_Ok_0().entries.len() == X86_NUM_ENTRIES); + assert forall|i: nat| i < X86_NUM_ENTRIES implies #[trigger] interp_at( + mem, + pt, + layer as nat, + ptr, + base as nat, + ).entries[i as int] == interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat).get_Ok_0().entries[i as int] by { + if i == idx { + } else { + lemma_interp_at_entry_different_memory( + &*old(mem), + pt, + mem, + pt, + layer as nat, + ptr, + base as nat, + i, + ); + assert(interp_at( + mem, + pt, + layer as nat, + ptr, + base as nat, + ).entries[i as int] == interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat).get_Ok_0().entries[i as int]); } - - assert_seqs_equal!( + } + assert_seqs_equal!( interp_at(mem, pt, layer as nat, ptr, base as nat).entries, interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat).get_Ok_0().entries); - }; - } - Ok(res) - - } else { - assert(mem === old(mem)); - assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat)); - Err(()) + }; } + Ok(res) + } else { + assert(mem === old(mem)); + assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat)); + Err(()) } - } else { - assert(mem === old(mem)); - assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at(&*old(mem), pt, layer as nat, ptr, base as nat).unmap(vaddr as nat)); - Err(()) } + } else { + assert(mem === old(mem)); + assert(Err(interp_at(mem, pt, layer as nat, ptr, base as nat)) === interp_at( + &*old(mem), + pt, + layer as nat, + ptr, + base as nat, + ).unmap(vaddr as nat)); + Err(()) } + } - pub fn unmap(mem: &mut mem::PageTableMemory, pt: &mut Ghost, vaddr: usize) -> (res: UnmapResult) - requires - inv(&*old(mem), old(pt)@), - interp(&*old(mem), old(pt)@).inv(), - old(mem).inv(), - interp(&*old(mem), old(pt)@).accepted_unmap(vaddr as nat), - vaddr < MAX_BASE, - ensures - inv(mem, pt@), - interp(mem, pt@).inv(), - // Refinement of l0 - match res { - UnmapResult::Ok => { - Ok(interp(mem, pt@).interp()) === interp(&*old(mem), old(pt)@).interp().unmap(vaddr as nat) - }, - UnmapResult::ErrNoSuchMapping => - Err(interp(mem, pt@).interp()) === interp(&*old(mem), old(pt)@).interp().unmap(vaddr as nat), - }, - { - proof { interp(mem, pt@).lemma_unmap_refines_unmap(vaddr as nat); } - match unmap_aux(mem, *pt, 0, mem.cr3().base, 0, vaddr) { - Ok(res) => { - proof { interp(&*old(mem), pt@).lemma_unmap_preserves_inv(vaddr as nat); } - *pt = Ghost(res@.0); - UnmapResult::Ok + pub fn unmap(mem: &mut mem::PageTableMemory, pt: &mut Ghost, vaddr: usize) -> (res: + UnmapResult) + requires + inv(&*old(mem), old(pt)@), + interp(&*old(mem), old(pt)@).inv(), + old(mem).inv(), + interp(&*old(mem), old(pt)@).accepted_unmap(vaddr as nat), + vaddr < MAX_BASE, + ensures + inv(mem, pt@), + interp(mem, pt@).inv(), + // Refinement of l0 + match res { + UnmapResult::Ok => { + Ok(interp(mem, pt@).interp()) === interp(&*old(mem), old(pt)@).interp().unmap( + vaddr as nat, + ) }, - Err(e) => UnmapResult::ErrNoSuchMapping, - } + UnmapResult::ErrNoSuchMapping => Err(interp(mem, pt@).interp()) === interp( + &*old(mem), + old(pt)@, + ).interp().unmap(vaddr as nat), + }, + { + proof { + interp(mem, pt@).lemma_unmap_refines_unmap(vaddr as nat); } - + match unmap_aux(mem, *pt, 0, mem.cr3().base, 0, vaddr) { + Ok(res) => { + proof { + interp(&*old(mem), pt@).lemma_unmap_preserves_inv(vaddr as nat); + } + *pt = Ghost(res@.0); + UnmapResult::Ok + }, + Err(e) => UnmapResult::ErrNoSuchMapping, } + } - pub proof fn lemma_set_union_empty_equals_set(s: Set) - ensures - s.union(set![]) === s - { - assert_sets_equal!(s.union(set![]), s); - } +} - } +pub proof fn lemma_set_union_empty_equals_set(s: Set) + ensures + s.union(set![]) === s, +{ + assert_sets_equal!(s.union(set![]), s); +} + +} // verus! } pub mod l2_refinement { @@ -4399,286 +6671,488 @@ pub mod impl_u { verus! { - pub proof fn lemma_page_table_walk_interp() - ensures - forall|mem: mem::PageTableMemory, pt: PTDir| #![auto] PT::inv(&mem, pt) && PT::interp(&mem, pt).inv() ==> PT::interp(&mem, pt).interp().map === interp_pt_mem(mem) - { - assert forall|mem: mem::PageTableMemory, pt: PTDir| #![auto] - PT::inv(&mem, pt) && PT::interp(&mem, pt).inv() implies PT::interp(&mem, pt).interp().map === interp_pt_mem(mem) - by { lemma_page_table_walk_interp_aux(mem, pt); } - } +pub proof fn lemma_page_table_walk_interp() + ensures + forall|mem: mem::PageTableMemory, pt: PTDir| + #![auto] + PT::inv(&mem, pt) && PT::interp(&mem, pt).inv() ==> PT::interp(&mem, pt).interp().map + === interp_pt_mem(mem), +{ + assert forall|mem: mem::PageTableMemory, pt: PTDir| + #![auto] + PT::inv(&mem, pt) && PT::interp(&mem, pt).inv() implies PT::interp(&mem, pt).interp().map + === interp_pt_mem(mem) by { + lemma_page_table_walk_interp_aux(mem, pt); + } +} - pub proof fn lemma_page_table_walk_interp_aux(mem: mem::PageTableMemory, pt: PTDir) - requires PT::inv(&mem, pt) && PT::interp(&mem, pt).inv() - ensures PT::interp(&mem, pt).interp().map === interp_pt_mem(mem) - { - let m1 = interp_pt_mem(mem); - let m2 = PT::interp(&mem, pt).interp().map; - PT::interp(&mem, pt).lemma_inv_implies_interp_inv(); - assert(PT::interp(&mem, pt).interp().inv()); - assert forall|addr: nat, pte: PageTableEntry| - m1.contains_pair(addr, pte) implies #[trigger] m2.contains_pair(addr, pte) - by { - let addr: u64 = addr as u64; - assert(addr < MAX_BASE); - let pte = choose|pte: PageTableEntry| valid_pt_walk(mem, addr, pte); - assert(valid_pt_walk(mem, addr as u64, pte)); - PT::lemma_interp_at_facts(&mem, pt, 0, mem.cr3_spec().base, 0); - - let l0_idx_u64: u64 = l0_bits!(addr); - let l0_idx: nat = l0_idx_u64 as nat; - let l1_idx_u64: u64 = l1_bits!(addr); - let l1_idx: nat = l1_idx_u64 as nat; - let l2_idx_u64: u64 = l2_bits!(addr); - let l2_idx: nat = l2_idx_u64 as nat; - let l3_idx_u64: u64 = l3_bits!(addr); - let l3_idx: nat = l3_idx_u64 as nat; - assert(forall|a:u64| (a & bitmask_inc!(0u64,8u64) == a) ==> a < 512) by (bit_vector); - assert(l0_idx < 512 && l1_idx < 512 && l2_idx < 512 && l3_idx < 512) by { - assert(((addr & bitmask_inc!(12u64,20u64)) >> 12u64) & bitmask_inc!(0u64,8u64) == ((addr & bitmask_inc!(12u64,20u64)) >> 12u64)) by (bit_vector); - assert(((addr & bitmask_inc!(21u64,29u64)) >> 21u64) & bitmask_inc!(0u64,8u64) == ((addr & bitmask_inc!(21u64,29u64)) >> 21u64)) by (bit_vector); - assert(((addr & bitmask_inc!(30u64,38u64)) >> 30u64) & bitmask_inc!(0u64,8u64) == ((addr & bitmask_inc!(30u64,38u64)) >> 30u64)) by (bit_vector); - assert(((addr & bitmask_inc!(39u64,47u64)) >> 39u64) & bitmask_inc!(0u64,8u64) == ((addr & bitmask_inc!(39u64,47u64)) >> 39u64)) by (bit_vector); - }; - assert(bitmask_inc!(39u64,47u64) == 0xFF80_0000_0000) by (compute); - assert(bitmask_inc!(30u64,38u64) == 0x007F_C000_0000) by (compute); - assert(bitmask_inc!(21u64,29u64) == 0x0000_3FE0_0000) by (compute); - assert(bitmask_inc!(12u64,20u64) == 0x0000_001F_F000) by (compute); - let interp_l0_dir = PT::interp(&mem, pt); - let interp_l0_entry = PT::interp_at_entry(&mem, pt, 0, mem.cr3_spec().base, 0, l0_idx); - interp_l0_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(l0_idx); - match read_entry(mem, mem.cr3_spec()@.base, 0, l0_idx) { +pub proof fn lemma_page_table_walk_interp_aux(mem: mem::PageTableMemory, pt: PTDir) + requires + PT::inv(&mem, pt) && PT::interp(&mem, pt).inv(), + ensures + PT::interp(&mem, pt).interp().map === interp_pt_mem(mem), +{ + let m1 = interp_pt_mem(mem); + let m2 = PT::interp(&mem, pt).interp().map; + PT::interp(&mem, pt).lemma_inv_implies_interp_inv(); + assert(PT::interp(&mem, pt).interp().inv()); + assert forall|addr: nat, pte: PageTableEntry| + m1.contains_pair(addr, pte) implies #[trigger] m2.contains_pair(addr, pte) by { + let addr: u64 = addr as u64; + assert(addr < MAX_BASE); + let pte = choose|pte: PageTableEntry| valid_pt_walk(mem, addr, pte); + assert(valid_pt_walk(mem, addr as u64, pte)); + PT::lemma_interp_at_facts(&mem, pt, 0, mem.cr3_spec().base, 0); + let l0_idx_u64: u64 = l0_bits!(addr); + let l0_idx: nat = l0_idx_u64 as nat; + let l1_idx_u64: u64 = l1_bits!(addr); + let l1_idx: nat = l1_idx_u64 as nat; + let l2_idx_u64: u64 = l2_bits!(addr); + let l2_idx: nat = l2_idx_u64 as nat; + let l3_idx_u64: u64 = l3_bits!(addr); + let l3_idx: nat = l3_idx_u64 as nat; + assert(forall|a: u64| (a & bitmask_inc!(0u64,8u64) == a) ==> a < 512) by (bit_vector); + assert(l0_idx < 512 && l1_idx < 512 && l2_idx < 512 && l3_idx < 512) by { + assert(((addr & bitmask_inc!(12u64,20u64)) >> 12u64) & bitmask_inc!(0u64,8u64) == ((addr + & bitmask_inc!(12u64,20u64)) >> 12u64)) by (bit_vector); + assert(((addr & bitmask_inc!(21u64,29u64)) >> 21u64) & bitmask_inc!(0u64,8u64) == ((addr + & bitmask_inc!(21u64,29u64)) >> 21u64)) by (bit_vector); + assert(((addr & bitmask_inc!(30u64,38u64)) >> 30u64) & bitmask_inc!(0u64,8u64) == ((addr + & bitmask_inc!(30u64,38u64)) >> 30u64)) by (bit_vector); + assert(((addr & bitmask_inc!(39u64,47u64)) >> 39u64) & bitmask_inc!(0u64,8u64) == ((addr + & bitmask_inc!(39u64,47u64)) >> 39u64)) by (bit_vector); + }; + assert(bitmask_inc!(39u64,47u64) == 0xFF80_0000_0000) by (compute); + assert(bitmask_inc!(30u64,38u64) == 0x007F_C000_0000) by (compute); + assert(bitmask_inc!(21u64,29u64) == 0x0000_3FE0_0000) by (compute); + assert(bitmask_inc!(12u64,20u64) == 0x0000_001F_F000) by (compute); + let interp_l0_dir = PT::interp(&mem, pt); + let interp_l0_entry = PT::interp_at_entry(&mem, pt, 0, mem.cr3_spec().base, 0, l0_idx); + interp_l0_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + l0_idx, + ); + match read_entry(mem, mem.cr3_spec()@.base, 0, l0_idx) { + GhostPageDirectoryEntry::Directory { + addr: l0_dir_addr, + flag_RW: l0_RW, + flag_US: l0_US, + flag_XD: l0_XD, + .. + } => { + assert(interp_l0_entry.is_Directory()); + let l1_base_vaddr = x86_arch_spec.entry_base(0, 0, l0_idx); + let l0_dir_ghost_pt = pt.entries[l0_idx as int].get_Some_0(); + assert(PT::directories_obey_invariant_at(&mem, pt, 0, mem.cr3_spec().base)); + assert(PT::inv_at(&mem, l0_dir_ghost_pt, 1, l0_dir_addr)); + assert(interp_l0_dir.directories_obey_invariant()); + assert(interp_l0_dir.entries[l0_idx as int].get_Directory_0().inv()); + PT::lemma_interp_at_facts(&mem, l0_dir_ghost_pt, 1, l0_dir_addr, l1_base_vaddr); + let interp_l1_dir = PT::interp_at( + &mem, + l0_dir_ghost_pt, + 1, + l0_dir_addr, + l1_base_vaddr, + ); + let interp_l1_entry = PT::interp_at_entry( + &mem, + l0_dir_ghost_pt, + 1, + l0_dir_addr, + l1_base_vaddr, + l1_idx, + ); + interp_l1_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + l1_idx); + match read_entry(mem, l0_dir_addr as nat, 1, l1_idx) { + GhostPageDirectoryEntry::Page { + addr: page_addr, + flag_RW: l1_RW, + flag_US: l1_US, + flag_XD: l1_XD, + .. + } => { + assert(aligned(addr as nat, L1_ENTRY_SIZE as nat)); + assert(pte == PageTableEntry { + frame: MemRegion { base: page_addr as nat, size: L1_ENTRY_SIZE as nat }, + flags: Flags { + is_writable: l0_RW && l1_RW, + is_supervisor: !l0_US || !l1_US, + disable_execute: l0_XD || l1_XD, + }, + }); + assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64))) + by (bit_vector) + requires + l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, + l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, + addr < mul(512u64, mul(512, mul(512, mul(512, 4096)))), + addr % mul(512, mul(512, 4096)) == 0, + ; + assert(add( + mul(l0_idx_u64, mul(512u64, mul(512, mul(512, 4096)))), + mul(l1_idx_u64, mul(512u64, mul(512, 4096))), + ) == l0_idx_u64 << 39u64 | l1_idx_u64 << 30u64) by (bit_vector) + requires + l0_idx_u64 < 512 && l1_idx_u64 < 512, + ; + // Previous assert proves: l0_idx * L0_ENTRY_SIZE + l1_idx * L1_ENTRY_SIZE == (l0_idx as u64) << 39u64 | (l1_idx as u64) << 30u64 + assert(interp_l1_dir.interp_of_entry(l1_idx).map.contains_pair( + addr as nat, + pte, + )); + assert(interp_l1_dir.interp().map.contains_pair(addr as nat, pte)); + assert(interp_l0_dir.interp().map.contains_pair(addr as nat, pte)); + assert(m2.contains_pair(addr as nat, pte)); + }, GhostPageDirectoryEntry::Directory { - addr: l0_dir_addr, flag_RW: l0_RW, flag_US: l0_US, flag_XD: l0_XD, .. + addr: l1_dir_addr, + flag_RW: l1_RW, + flag_US: l1_US, + flag_XD: l1_XD, + .. } => { - assert(interp_l0_entry.is_Directory()); - let l1_base_vaddr = x86_arch_spec.entry_base(0, 0, l0_idx); - let l0_dir_ghost_pt = pt.entries[l0_idx as int].get_Some_0(); - assert(PT::directories_obey_invariant_at(&mem, pt, 0, mem.cr3_spec().base)); - assert(PT::inv_at(&mem, l0_dir_ghost_pt, 1, l0_dir_addr)); - assert(interp_l0_dir.directories_obey_invariant()); - assert(interp_l0_dir.entries[l0_idx as int].get_Directory_0().inv()); - PT::lemma_interp_at_facts(&mem, l0_dir_ghost_pt, 1, l0_dir_addr, l1_base_vaddr); - let interp_l1_dir = PT::interp_at(&mem, l0_dir_ghost_pt, 1, l0_dir_addr, l1_base_vaddr); - let interp_l1_entry = PT::interp_at_entry(&mem, l0_dir_ghost_pt, 1, l0_dir_addr, l1_base_vaddr, l1_idx); - interp_l1_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(l1_idx); - match read_entry(mem, l0_dir_addr as nat, 1, l1_idx) { + assert(interp_l1_entry.is_Directory()); + let l2_base_vaddr = x86_arch_spec.entry_base(1, l1_base_vaddr, l1_idx); + let l1_dir_ghost_pt = l0_dir_ghost_pt.entries[l1_idx as int].get_Some_0(); + assert(PT::directories_obey_invariant_at( + &mem, + l0_dir_ghost_pt, + 1, + l0_dir_addr, + )); + assert(PT::inv_at(&mem, l1_dir_ghost_pt, 2, l1_dir_addr)); + PT::lemma_interp_at_facts( + &mem, + l1_dir_ghost_pt, + 2, + l1_dir_addr, + l2_base_vaddr, + ); + let interp_l2_dir = PT::interp_at( + &mem, + l1_dir_ghost_pt, + 2, + l1_dir_addr, + l2_base_vaddr, + ); + let interp_l2_entry = PT::interp_at_entry( + &mem, + l1_dir_ghost_pt, + 2, + l1_dir_addr, + l2_base_vaddr, + l2_idx, + ); + interp_l2_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + l2_idx); + match read_entry(mem, l1_dir_addr as nat, 2, l2_idx) { GhostPageDirectoryEntry::Page { - addr: page_addr, flag_RW: l1_RW, flag_US: l1_US, flag_XD: l1_XD, .. + addr: page_addr, + flag_RW: l2_RW, + flag_US: l2_US, + flag_XD: l2_XD, + .. } => { - assert(aligned(addr as nat, L1_ENTRY_SIZE as nat)); + assert(aligned(addr as nat, L2_ENTRY_SIZE as nat)); assert(pte == PageTableEntry { - frame: MemRegion { base: page_addr as nat, size: L1_ENTRY_SIZE as nat }, + frame: MemRegion { + base: page_addr as nat, + size: L2_ENTRY_SIZE as nat, + }, flags: Flags { - is_writable: l0_RW && l1_RW, - is_supervisor: !l0_US || !l1_US, - disable_execute: l0_XD || l1_XD - } + is_writable: l0_RW && l1_RW && l2_RW, + is_supervisor: !l0_US || !l1_US || !l2_US, + disable_execute: l0_XD || l1_XD || l2_XD, + }, }); - - assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64))) by (bit_vector) + assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | ( + l2_idx_u64 << 21u64))) by (bit_vector) requires l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, + l2_idx_u64 == (addr & 0x0000_3FE0_0000) >> 21, addr < mul(512u64, mul(512, mul(512, mul(512, 4096)))), - addr % mul(512, mul(512, 4096)) == 0; - - assert(add(mul(l0_idx_u64, mul(512u64, mul(512, mul(512, 4096)))), mul(l1_idx_u64, mul(512u64, mul(512, 4096)))) == l0_idx_u64 << 39u64 | l1_idx_u64 << 30u64) by (bit_vector) - requires l0_idx_u64 < 512 && l1_idx_u64 < 512; - // Previous assert proves: l0_idx * L0_ENTRY_SIZE + l1_idx * L1_ENTRY_SIZE == (l0_idx as u64) << 39u64 | (l1_idx as u64) << 30u64 - - assert(interp_l1_dir.interp_of_entry(l1_idx).map.contains_pair(addr as nat, pte)); + addr % mul(512, 4096) == 0, + ; + assert(add( + add( + mul(l0_idx_u64, mul(512u64, mul(512, mul(512, 4096)))), + mul(l1_idx_u64, mul(512u64, mul(512, 4096))), + ), + mul(l2_idx_u64, mul(512, 4096)), + ) == l0_idx_u64 << 39u64 | l1_idx_u64 << 30u64 | l2_idx_u64 + << 21u64) by (bit_vector) + requires + l0_idx_u64 < 512 && l1_idx_u64 < 512 && l2_idx_u64 < 512, + ; + // Previous assert proves: + // l0_idx * L0_ENTRY_SIZE + l1_idx * L1_ENTRY_SIZE + l2_idx * L2_ENTRY_SIZE + // == (l0_idx as u64) << 39u64 | (l1_idx as u64) << 30u64 | (l2_idx as u64) << 21u64 + assert(interp_l2_dir.interp_of_entry(l2_idx).map.contains_pair( + addr as nat, + pte, + )); + assert(interp_l2_dir.interp().map.contains_pair(addr as nat, pte)); assert(interp_l1_dir.interp().map.contains_pair(addr as nat, pte)); assert(interp_l0_dir.interp().map.contains_pair(addr as nat, pte)); assert(m2.contains_pair(addr as nat, pte)); }, GhostPageDirectoryEntry::Directory { - addr: l1_dir_addr, flag_RW: l1_RW, flag_US: l1_US, flag_XD: l1_XD, .. + addr: l2_dir_addr, + flag_RW: l2_RW, + flag_US: l2_US, + flag_XD: l2_XD, + .. } => { - assert(interp_l1_entry.is_Directory()); - let l2_base_vaddr = x86_arch_spec.entry_base(1, l1_base_vaddr, l1_idx); - let l1_dir_ghost_pt = l0_dir_ghost_pt.entries[l1_idx as int].get_Some_0(); - assert(PT::directories_obey_invariant_at(&mem, l0_dir_ghost_pt, 1, l0_dir_addr)); - assert(PT::inv_at(&mem, l1_dir_ghost_pt, 2, l1_dir_addr)); - PT::lemma_interp_at_facts(&mem, l1_dir_ghost_pt, 2, l1_dir_addr, l2_base_vaddr); - let interp_l2_dir = PT::interp_at(&mem, l1_dir_ghost_pt, 2, l1_dir_addr, l2_base_vaddr); - let interp_l2_entry = PT::interp_at_entry(&mem, l1_dir_ghost_pt, 2, l1_dir_addr, l2_base_vaddr, l2_idx); - interp_l2_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(l2_idx); - match read_entry(mem, l1_dir_addr as nat, 2, l2_idx) { + assert(interp_l2_entry.is_Directory()); + let l3_base_vaddr = x86_arch_spec.entry_base( + 2, + l2_base_vaddr, + l2_idx, + ); + let l2_dir_ghost_pt = + l1_dir_ghost_pt.entries[l2_idx as int].get_Some_0(); + assert(PT::directories_obey_invariant_at( + &mem, + l1_dir_ghost_pt, + 2, + l1_dir_addr, + )); + assert(PT::inv_at(&mem, l2_dir_ghost_pt, 3, l2_dir_addr)); + PT::lemma_interp_at_facts( + &mem, + l2_dir_ghost_pt, + 3, + l2_dir_addr, + l3_base_vaddr, + ); + let interp_l3_dir = PT::interp_at( + &mem, + l2_dir_ghost_pt, + 3, + l2_dir_addr, + l3_base_vaddr, + ); + let interp_l3_entry = PT::interp_at_entry( + &mem, + l2_dir_ghost_pt, + 3, + l2_dir_addr, + l3_base_vaddr, + l3_idx, + ); + interp_l3_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + l3_idx); + match read_entry(mem, l2_dir_addr as nat, 3, l3_idx) { GhostPageDirectoryEntry::Page { - addr: page_addr, flag_RW: l2_RW, flag_US: l2_US, flag_XD: l2_XD, .. + addr: page_addr, + flag_RW: l3_RW, + flag_US: l3_US, + flag_XD: l3_XD, + .. } => { - assert(aligned(addr as nat, L2_ENTRY_SIZE as nat)); + assert(aligned(addr as nat, L3_ENTRY_SIZE as nat)); assert(pte == PageTableEntry { - frame: MemRegion { base: page_addr as nat, size: L2_ENTRY_SIZE as nat }, + frame: MemRegion { + base: page_addr as nat, + size: L3_ENTRY_SIZE as nat, + }, flags: Flags { - is_writable: l0_RW && l1_RW && l2_RW, - is_supervisor: !l0_US || !l1_US || !l2_US, - disable_execute: l0_XD || l1_XD || l2_XD - } + is_writable: l0_RW && l1_RW && l2_RW && l3_RW, + is_supervisor: !l0_US || !l1_US || !l2_US || !l3_US, + disable_execute: l0_XD || l1_XD || l2_XD || l3_XD, + }, }); - - assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | (l2_idx_u64 << 21u64))) by (bit_vector) + assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 + << 30u64) | (l2_idx_u64 << 21u64) | (l3_idx_u64 + << 12u64))) by (bit_vector) requires l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, l2_idx_u64 == (addr & 0x0000_3FE0_0000) >> 21, - addr < mul(512u64, mul(512, mul(512, mul(512, 4096)))), - addr % mul(512, 4096) == 0; - - assert(add(add( - mul(l0_idx_u64, mul(512u64, mul(512, mul(512, 4096)))), - mul(l1_idx_u64, mul(512u64, mul(512, 4096)))), - mul(l2_idx_u64, mul(512, 4096))) - == l0_idx_u64 << 39u64 | l1_idx_u64 << 30u64 | l2_idx_u64 << 21u64) by (bit_vector) - requires l0_idx_u64 < 512 && l1_idx_u64 < 512 && l2_idx_u64 < 512; + l3_idx_u64 == (addr & 0x0000_001F_F000) >> 12, + addr < mul( + 512u64, + mul(512, mul(512, mul(512, 4096))), + ), + addr % 4096 == 0, + ; + assert(add( + add( + add( + mul( + l0_idx_u64, + mul(512u64, mul(512, mul(512, 4096))), + ), + mul(l1_idx_u64, mul(512u64, mul(512, 4096))), + ), + mul(l2_idx_u64, mul(512, 4096)), + ), + mul(l3_idx_u64, 4096), + ) == l0_idx_u64 << 39u64 | l1_idx_u64 << 30u64 | l2_idx_u64 + << 21u64 | l3_idx_u64 << 12u64) by (bit_vector) + requires + l0_idx_u64 < 512 && l1_idx_u64 < 512 && l2_idx_u64 + < 512 && l3_idx_u64 < 512, + ; // Previous assert proves: - // l0_idx * L0_ENTRY_SIZE + l1_idx * L1_ENTRY_SIZE + l2_idx * L2_ENTRY_SIZE - // == (l0_idx as u64) << 39u64 | (l1_idx as u64) << 30u64 | (l2_idx as u64) << 21u64 - - assert(interp_l2_dir.interp_of_entry(l2_idx).map.contains_pair(addr as nat, pte)); - assert(interp_l2_dir.interp().map.contains_pair(addr as nat, pte)); - assert(interp_l1_dir.interp().map.contains_pair(addr as nat, pte)); - assert(interp_l0_dir.interp().map.contains_pair(addr as nat, pte)); - assert(m2.contains_pair(addr as nat, pte)); - }, - GhostPageDirectoryEntry::Directory { - addr: l2_dir_addr, flag_RW: l2_RW, flag_US: l2_US, flag_XD: l2_XD, .. - } => { - assert(interp_l2_entry.is_Directory()); - let l3_base_vaddr = x86_arch_spec.entry_base(2, l2_base_vaddr, l2_idx); - let l2_dir_ghost_pt = l1_dir_ghost_pt.entries[l2_idx as int].get_Some_0(); - assert(PT::directories_obey_invariant_at(&mem, l1_dir_ghost_pt, 2, l1_dir_addr)); - assert(PT::inv_at(&mem, l2_dir_ghost_pt, 3, l2_dir_addr)); - PT::lemma_interp_at_facts(&mem, l2_dir_ghost_pt, 3, l2_dir_addr, l3_base_vaddr); - let interp_l3_dir = PT::interp_at(&mem, l2_dir_ghost_pt, 3, l2_dir_addr, l3_base_vaddr); - let interp_l3_entry = PT::interp_at_entry(&mem, l2_dir_ghost_pt, 3, l2_dir_addr, l3_base_vaddr, l3_idx); - interp_l3_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(l3_idx); - match read_entry(mem, l2_dir_addr as nat, 3, l3_idx) { - GhostPageDirectoryEntry::Page { - addr: page_addr, flag_RW: l3_RW, flag_US: l3_US, flag_XD: l3_XD, .. - } => { - assert(aligned(addr as nat, L3_ENTRY_SIZE as nat)); - assert(pte == PageTableEntry { - frame: MemRegion { base: page_addr as nat, size: L3_ENTRY_SIZE as nat }, - flags: Flags { - is_writable: l0_RW && l1_RW && l2_RW && l3_RW, - is_supervisor: !l0_US || !l1_US || !l2_US || !l3_US, - disable_execute: l0_XD || l1_XD || l2_XD || l3_XD - } - }); - - assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | (l2_idx_u64 << 21u64) | (l3_idx_u64 << 12u64))) by (bit_vector) - requires - l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, - l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, - l2_idx_u64 == (addr & 0x0000_3FE0_0000) >> 21, - l3_idx_u64 == (addr & 0x0000_001F_F000) >> 12, - addr < mul(512u64, mul(512, mul(512, mul(512, 4096)))), - addr % 4096 == 0; - - assert(add(add(add( - mul(l0_idx_u64, mul(512u64, mul(512, mul(512, 4096)))), - mul(l1_idx_u64, mul(512u64, mul(512, 4096)))), - mul(l2_idx_u64, mul(512, 4096))), - mul(l3_idx_u64, 4096)) - == l0_idx_u64 << 39u64 | l1_idx_u64 << 30u64 | l2_idx_u64 << 21u64 | l3_idx_u64 << 12u64) by (bit_vector) - requires l0_idx_u64 < 512 && l1_idx_u64 < 512 && l2_idx_u64 < 512 && l3_idx_u64 < 512; - // Previous assert proves: - // l0_idx * L0_ENTRY_SIZE + l1_idx * L1_ENTRY_SIZE + l2_idx * L2_ENTRY_SIZE + l3_idx * L3_ENTRY_SIZE - // == (l0_idx as u64) << 39u64 | (l1_idx as u64) << 30u64 | (l2_idx as u64) << 21u64 | (l3_idx as u64) << 12u64 - - assert(interp_l3_dir.interp_of_entry(l3_idx).map.contains_pair(addr as nat, pte)); - assert(interp_l3_dir.interp().map.contains_pair(addr as nat, pte)); - assert(interp_l2_dir.interp().map.contains_pair(addr as nat, pte)); - assert(interp_l1_dir.interp().map.contains_pair(addr as nat, pte)); - interp_l0_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(l0_idx); - assert(interp_l0_dir.interp().map.contains_pair(addr as nat, pte)); - assert(m1.contains_pair(addr as nat, pte)); - }, - GhostPageDirectoryEntry::Directory { .. } => assert(false), - GhostPageDirectoryEntry::Empty => assert(false), - } + // l0_idx * L0_ENTRY_SIZE + l1_idx * L1_ENTRY_SIZE + l2_idx * L2_ENTRY_SIZE + l3_idx * L3_ENTRY_SIZE + // == (l0_idx as u64) << 39u64 | (l1_idx as u64) << 30u64 | (l2_idx as u64) << 21u64 | (l3_idx as u64) << 12u64 + assert(interp_l3_dir.interp_of_entry( + l3_idx, + ).map.contains_pair(addr as nat, pte)); + assert(interp_l3_dir.interp().map.contains_pair( + addr as nat, + pte, + )); + assert(interp_l2_dir.interp().map.contains_pair( + addr as nat, + pte, + )); + assert(interp_l1_dir.interp().map.contains_pair( + addr as nat, + pte, + )); + interp_l0_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + l0_idx); + assert(interp_l0_dir.interp().map.contains_pair( + addr as nat, + pte, + )); + assert(m1.contains_pair(addr as nat, pte)); }, + GhostPageDirectoryEntry::Directory { .. } => assert(false), GhostPageDirectoryEntry::Empty => assert(false), } }, GhostPageDirectoryEntry::Empty => assert(false), } }, - _ => assert(false), + GhostPageDirectoryEntry::Empty => assert(false), + } + }, + _ => assert(false), + }; + }; + assert forall|addr: nat| !m1.contains_key(addr) ==> !m2.contains_key(addr) by { + PT::lemma_interp_at_facts(&mem, pt, 0, mem.cr3_spec().base, 0); + PT::interp(&mem, pt).lemma_inv_implies_interp_inv(); + if addr < MAX_BASE && (exists|pte: PageTableEntry| + valid_pt_walk(mem, nat_to_u64(addr), pte)) { + } else { + if addr >= MAX_BASE { + } else { + assert(addr < MAX_BASE); + let addr: u64 = addr as u64; + assert(!exists|pte: PageTableEntry| valid_pt_walk(mem, addr, pte)) by { + assert(!exists|pte: PageTableEntry| + valid_pt_walk(mem, nat_to_u64(addr as nat), pte)); }; - }; - assert forall|addr: nat| !m1.contains_key(addr) ==> !m2.contains_key(addr) by { - PT::lemma_interp_at_facts(&mem, pt, 0, mem.cr3_spec().base, 0); - PT::interp(&mem, pt).lemma_inv_implies_interp_inv(); - if addr < MAX_BASE && (exists|pte: PageTableEntry| valid_pt_walk(mem, nat_to_u64(addr), pte)) { - } else { - if addr >= MAX_BASE { - } else { - assert(addr < MAX_BASE); - let addr: u64 = addr as u64; - assert(!exists|pte: PageTableEntry| valid_pt_walk(mem, addr, pte)) by { - assert(!exists|pte: PageTableEntry| valid_pt_walk(mem, nat_to_u64(addr as nat), pte)); - }; - let l0_idx_u64: u64 = l0_bits!(addr); - let l0_idx: nat = l0_idx_u64 as nat; - let l1_idx_u64: u64 = l1_bits!(addr); - let l1_idx: nat = l1_idx_u64 as nat; - let l2_idx_u64: u64 = l2_bits!(addr); - let l2_idx: nat = l2_idx_u64 as nat; - let l3_idx_u64: u64 = l3_bits!(addr); - let l3_idx: nat = l3_idx_u64 as nat; - assert(forall|a:u64| (a & bitmask_inc!(0u64,8u64) == a) ==> a < 512) by (bit_vector); - assert(l0_idx < 512 && l1_idx < 512 && l2_idx < 512 && l3_idx < 512) by { - assert(((addr & bitmask_inc!(12u64,20u64)) >> 12u64) & bitmask_inc!(0u64,8u64) == ((addr & bitmask_inc!(12u64,20u64)) >> 12u64)) by (bit_vector); - assert(((addr & bitmask_inc!(21u64,29u64)) >> 21u64) & bitmask_inc!(0u64,8u64) == ((addr & bitmask_inc!(21u64,29u64)) >> 21u64)) by (bit_vector); - assert(((addr & bitmask_inc!(30u64,38u64)) >> 30u64) & bitmask_inc!(0u64,8u64) == ((addr & bitmask_inc!(30u64,38u64)) >> 30u64)) by (bit_vector); - assert(((addr & bitmask_inc!(39u64,47u64)) >> 39u64) & bitmask_inc!(0u64,8u64) == ((addr & bitmask_inc!(39u64,47u64)) >> 39u64)) by (bit_vector); - }; - assert(bitmask_inc!(39u64,47u64) == 0xFF80_0000_0000) by (compute); - assert(bitmask_inc!(30u64,38u64) == 0x007F_C000_0000) by (compute); - assert(bitmask_inc!(21u64,29u64) == 0x0000_3FE0_0000) by (compute); - assert(bitmask_inc!(12u64,20u64) == 0x0000_001F_F000) by (compute); - let interp_l0_dir = PT::interp(&mem, pt); - let interp_l0_entry = PT::interp_at_entry(&mem, pt, 0, mem.cr3_spec().base, 0, l0_idx); - interp_l0_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(l0_idx); - match read_entry(mem, mem.cr3_spec()@.base, 0, l0_idx) { - GhostPageDirectoryEntry::Directory { - addr: l0_dir_addr, flag_RW: l0_RW, flag_US: l0_US, flag_XD: l0_XD, .. + let l0_idx_u64: u64 = l0_bits!(addr); + let l0_idx: nat = l0_idx_u64 as nat; + let l1_idx_u64: u64 = l1_bits!(addr); + let l1_idx: nat = l1_idx_u64 as nat; + let l2_idx_u64: u64 = l2_bits!(addr); + let l2_idx: nat = l2_idx_u64 as nat; + let l3_idx_u64: u64 = l3_bits!(addr); + let l3_idx: nat = l3_idx_u64 as nat; + assert(forall|a: u64| (a & bitmask_inc!(0u64,8u64) == a) ==> a < 512) + by (bit_vector); + assert(l0_idx < 512 && l1_idx < 512 && l2_idx < 512 && l3_idx < 512) by { + assert(((addr & bitmask_inc!(12u64,20u64)) >> 12u64) & bitmask_inc!(0u64,8u64) + == ((addr & bitmask_inc!(12u64,20u64)) >> 12u64)) by (bit_vector); + assert(((addr & bitmask_inc!(21u64,29u64)) >> 21u64) & bitmask_inc!(0u64,8u64) + == ((addr & bitmask_inc!(21u64,29u64)) >> 21u64)) by (bit_vector); + assert(((addr & bitmask_inc!(30u64,38u64)) >> 30u64) & bitmask_inc!(0u64,8u64) + == ((addr & bitmask_inc!(30u64,38u64)) >> 30u64)) by (bit_vector); + assert(((addr & bitmask_inc!(39u64,47u64)) >> 39u64) & bitmask_inc!(0u64,8u64) + == ((addr & bitmask_inc!(39u64,47u64)) >> 39u64)) by (bit_vector); + }; + assert(bitmask_inc!(39u64,47u64) == 0xFF80_0000_0000) by (compute); + assert(bitmask_inc!(30u64,38u64) == 0x007F_C000_0000) by (compute); + assert(bitmask_inc!(21u64,29u64) == 0x0000_3FE0_0000) by (compute); + assert(bitmask_inc!(12u64,20u64) == 0x0000_001F_F000) by (compute); + let interp_l0_dir = PT::interp(&mem, pt); + let interp_l0_entry = PT::interp_at_entry( + &mem, + pt, + 0, + mem.cr3_spec().base, + 0, + l0_idx, + ); + interp_l0_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + l0_idx); + match read_entry(mem, mem.cr3_spec()@.base, 0, l0_idx) { + GhostPageDirectoryEntry::Directory { + addr: l0_dir_addr, + flag_RW: l0_RW, + flag_US: l0_US, + flag_XD: l0_XD, + .. + } => { + assert(interp_l0_entry.is_Directory()); + let l1_base_vaddr = x86_arch_spec.entry_base(0, 0, l0_idx); + let l0_dir_ghost_pt = pt.entries[l0_idx as int].get_Some_0(); + assert(PT::directories_obey_invariant_at(&mem, pt, 0, mem.cr3_spec().base)); + assert(PT::inv_at(&mem, l0_dir_ghost_pt, 1, l0_dir_addr)); + assert(interp_l0_dir.directories_obey_invariant()); + assert(interp_l0_dir.entries[l0_idx as int].get_Directory_0().inv()); + PT::lemma_interp_at_facts( + &mem, + l0_dir_ghost_pt, + 1, + l0_dir_addr, + l1_base_vaddr, + ); + let interp_l1_dir = PT::interp_at( + &mem, + l0_dir_ghost_pt, + 1, + l0_dir_addr, + l1_base_vaddr, + ); + let interp_l1_entry = PT::interp_at_entry( + &mem, + l0_dir_ghost_pt, + 1, + l0_dir_addr, + l1_base_vaddr, + l1_idx, + ); + interp_l1_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + l1_idx); + let low_bits: u64 = addr % (L1_ENTRY_SIZE as u64); + // This assert proves: ... == l0_idx_u64 * L0_ENTRY_SIZE + l1_idx_u64 * L1_ENTRY_SIZE + low_bits + assert((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | low_bits == add( + add( + mul(l0_idx_u64, mul(512, mul(512, mul(512, 4096)))), + mul(l1_idx_u64, mul(512, mul(512, 4096))), + ), + low_bits, + )) by (bit_vector) + requires + l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, + low_bits == addr % mul(512, mul(512, 4096)), + ; + assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | low_bits)) + by (bit_vector) + requires + l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, + l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, + addr < mul(512u64, mul(512, mul(512, mul(512, 4096)))), + low_bits == addr % mul(512, mul(512, 4096)), + ; + match read_entry(mem, l0_dir_addr as nat, 1, l1_idx) { + GhostPageDirectoryEntry::Page { + addr: page_addr, + flag_RW: l1_RW, + flag_US: l1_US, + flag_XD: l1_XD, + .. } => { - assert(interp_l0_entry.is_Directory()); - let l1_base_vaddr = x86_arch_spec.entry_base(0, 0, l0_idx); - let l0_dir_ghost_pt = pt.entries[l0_idx as int].get_Some_0(); - assert(PT::directories_obey_invariant_at(&mem, pt, 0, mem.cr3_spec().base)); - assert(PT::inv_at(&mem, l0_dir_ghost_pt, 1, l0_dir_addr)); - assert(interp_l0_dir.directories_obey_invariant()); - assert(interp_l0_dir.entries[l0_idx as int].get_Directory_0().inv()); - PT::lemma_interp_at_facts(&mem, l0_dir_ghost_pt, 1, l0_dir_addr, l1_base_vaddr); - let interp_l1_dir = PT::interp_at(&mem, l0_dir_ghost_pt, 1, l0_dir_addr, l1_base_vaddr); - let interp_l1_entry = PT::interp_at_entry(&mem, l0_dir_ghost_pt, 1, l0_dir_addr, l1_base_vaddr, l1_idx); - interp_l1_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(l1_idx); - - let low_bits: u64 = addr % (L1_ENTRY_SIZE as u64); - // This assert proves: ... == l0_idx_u64 * L0_ENTRY_SIZE + l1_idx_u64 * L1_ENTRY_SIZE + low_bits - assert((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | low_bits - == add(add(mul(l0_idx_u64, mul(512, mul(512, mul(512, 4096)))), - mul(l1_idx_u64, mul(512, mul(512, 4096)))), - low_bits)) by (bit_vector) - requires - l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, - low_bits == addr % mul(512, mul(512, 4096)); - assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | low_bits)) by (bit_vector) - requires - l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, - l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, - addr < mul(512u64, mul(512, mul(512, mul(512, 4096)))), - low_bits == addr % mul(512, mul(512, 4096)); - match read_entry(mem, l0_dir_addr as nat, 1, l1_idx) { - GhostPageDirectoryEntry::Page { - addr: page_addr, flag_RW: l1_RW, flag_US: l1_US, flag_XD: l1_XD, .. - } => { - assert_by_contradiction!(!aligned(addr as nat, L1_ENTRY_SIZE as nat), { + assert_by_contradiction!(!aligned(addr as nat, L1_ENTRY_SIZE as nat), { let pte = PageTableEntry { frame: MemRegion { base: page_addr as nat, size: L1_ENTRY_SIZE as nat }, flags: Flags { @@ -4689,48 +7163,95 @@ pub mod impl_u { }; assert(valid_pt_walk(mem, addr as u64, pte)); }); - assert(!interp_l1_dir.interp_of_entry(l1_idx).map.contains_key(addr as nat)); - assert(!interp_l1_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l0_dir.interp().map.contains_key(addr as nat)); - assert(!m2.contains_key(addr as nat)); - } - GhostPageDirectoryEntry::Directory { - addr: l1_dir_addr, flag_RW: l1_RW, flag_US: l1_US, flag_XD: l1_XD, .. + assert(!interp_l1_dir.interp_of_entry(l1_idx).map.contains_key( + addr as nat, + )); + assert(!interp_l1_dir.interp().map.contains_key(addr as nat)); + assert(!interp_l0_dir.interp().map.contains_key(addr as nat)); + assert(!m2.contains_key(addr as nat)); + }, + GhostPageDirectoryEntry::Directory { + addr: l1_dir_addr, + flag_RW: l1_RW, + flag_US: l1_US, + flag_XD: l1_XD, + .. + } => { + assert(interp_l1_entry.is_Directory()); + let l2_base_vaddr = x86_arch_spec.entry_base( + 1, + l1_base_vaddr, + l1_idx, + ); + let l1_dir_ghost_pt = + l0_dir_ghost_pt.entries[l1_idx as int].get_Some_0(); + assert(PT::directories_obey_invariant_at( + &mem, + l0_dir_ghost_pt, + 1, + l0_dir_addr, + )); + assert(PT::inv_at(&mem, l1_dir_ghost_pt, 2, l1_dir_addr)); + PT::lemma_interp_at_facts( + &mem, + l1_dir_ghost_pt, + 2, + l1_dir_addr, + l2_base_vaddr, + ); + let interp_l2_dir = PT::interp_at( + &mem, + l1_dir_ghost_pt, + 2, + l1_dir_addr, + l2_base_vaddr, + ); + let interp_l2_entry = PT::interp_at_entry( + &mem, + l1_dir_ghost_pt, + 2, + l1_dir_addr, + l2_base_vaddr, + l2_idx, + ); + interp_l2_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + l2_idx); + let low_bits: u64 = addr % (L2_ENTRY_SIZE as u64); + // This assert proves: ... == l0_idx_u64 * L0_ENTRY_SIZE + l1_idx_u64 * L1_ENTRY_SIZE + l2_idx_u64 * L2_ENTRY_SIZE + low_bits + assert((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | (l2_idx_u64 + << 21u64) | low_bits == add( + add( + add( + mul(l0_idx_u64, mul(512, mul(512, mul(512, 4096)))), + mul(l1_idx_u64, mul(512, mul(512, 4096))), + ), + mul(l2_idx_u64, mul(512, 4096)), + ), + low_bits, + )) by (bit_vector) + requires + l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, + l2_idx_u64 == (addr & 0x0000_3FE0_0000) >> 21, + low_bits == addr % mul(512, 4096), + ; + assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | ( + l2_idx_u64 << 21u64) | low_bits)) by (bit_vector) + requires + l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, + l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, + l2_idx_u64 == (addr & 0x0000_3FE0_0000) >> 21, + addr < mul(512u64, mul(512, mul(512, mul(512, 4096)))), + low_bits == addr % mul(512, 4096), + ; + match read_entry(mem, l1_dir_addr as nat, 2, l2_idx) { + GhostPageDirectoryEntry::Page { + addr: page_addr, + flag_RW: l2_RW, + flag_US: l2_US, + flag_XD: l2_XD, + .. } => { - assert(interp_l1_entry.is_Directory()); - let l2_base_vaddr = x86_arch_spec.entry_base(1, l1_base_vaddr, l1_idx); - let l1_dir_ghost_pt = l0_dir_ghost_pt.entries[l1_idx as int].get_Some_0(); - assert(PT::directories_obey_invariant_at(&mem, l0_dir_ghost_pt, 1, l0_dir_addr)); - assert(PT::inv_at(&mem, l1_dir_ghost_pt, 2, l1_dir_addr)); - PT::lemma_interp_at_facts(&mem, l1_dir_ghost_pt, 2, l1_dir_addr, l2_base_vaddr); - let interp_l2_dir = PT::interp_at(&mem, l1_dir_ghost_pt, 2, l1_dir_addr, l2_base_vaddr); - let interp_l2_entry = PT::interp_at_entry(&mem, l1_dir_ghost_pt, 2, l1_dir_addr, l2_base_vaddr, l2_idx); - interp_l2_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(l2_idx); - - let low_bits: u64 = addr % (L2_ENTRY_SIZE as u64); - // This assert proves: ... == l0_idx_u64 * L0_ENTRY_SIZE + l1_idx_u64 * L1_ENTRY_SIZE + l2_idx_u64 * L2_ENTRY_SIZE + low_bits - assert((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | (l2_idx_u64 << 21u64) | low_bits - == add(add(add( - mul(l0_idx_u64, mul(512, mul(512, mul(512, 4096)))), - mul(l1_idx_u64, mul(512, mul(512, 4096)))), - mul(l2_idx_u64, mul(512, 4096))), - low_bits)) by (bit_vector) - requires - l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, - l2_idx_u64 == (addr & 0x0000_3FE0_0000) >> 21, - low_bits == addr % mul(512, 4096); - assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | (l2_idx_u64 << 21u64) | low_bits)) by (bit_vector) - requires - l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, - l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, - l2_idx_u64 == (addr & 0x0000_3FE0_0000) >> 21, - addr < mul(512u64, mul(512, mul(512, mul(512, 4096)))), - low_bits == addr % mul(512, 4096); - match read_entry(mem, l1_dir_addr as nat, 2, l2_idx) { - GhostPageDirectoryEntry::Page { - addr: page_addr, flag_RW: l2_RW, flag_US: l2_US, flag_XD: l2_XD, .. - } => { - assert_by_contradiction!(!aligned(addr as nat, L2_ENTRY_SIZE as nat), { + assert_by_contradiction!(!aligned(addr as nat, L2_ENTRY_SIZE as nat), { let pte = PageTableEntry { frame: MemRegion { base: page_addr as nat, size: L2_ENTRY_SIZE as nat }, flags: Flags { @@ -4741,53 +7262,118 @@ pub mod impl_u { }; assert(valid_pt_walk(mem, addr as u64, pte)); }); - assert(!interp_l2_dir.interp_of_entry(l2_idx).map.contains_key(addr as nat)); - assert(!interp_l2_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l1_dir.interp_of_entry(l1_idx).map.contains_key(addr as nat)); - assert(!interp_l1_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l0_dir.interp().map.contains_key(addr as nat)); - assert(!m2.contains_key(addr as nat)); - }, - GhostPageDirectoryEntry::Directory { - addr: l2_dir_addr, flag_RW: l2_RW, flag_US: l2_US, flag_XD: l2_XD, .. + assert(!interp_l2_dir.interp_of_entry( + l2_idx, + ).map.contains_key(addr as nat)); + assert(!interp_l2_dir.interp().map.contains_key( + addr as nat, + )); + assert(!interp_l1_dir.interp_of_entry( + l1_idx, + ).map.contains_key(addr as nat)); + assert(!interp_l1_dir.interp().map.contains_key( + addr as nat, + )); + assert(!interp_l0_dir.interp().map.contains_key( + addr as nat, + )); + assert(!m2.contains_key(addr as nat)); + }, + GhostPageDirectoryEntry::Directory { + addr: l2_dir_addr, + flag_RW: l2_RW, + flag_US: l2_US, + flag_XD: l2_XD, + .. + } => { + assert(interp_l2_entry.is_Directory()); + let l3_base_vaddr = x86_arch_spec.entry_base( + 2, + l2_base_vaddr, + l2_idx, + ); + let l2_dir_ghost_pt = + l1_dir_ghost_pt.entries[l2_idx as int].get_Some_0(); + assert(PT::directories_obey_invariant_at( + &mem, + l1_dir_ghost_pt, + 2, + l1_dir_addr, + )); + assert(PT::inv_at(&mem, l2_dir_ghost_pt, 3, l2_dir_addr)); + PT::lemma_interp_at_facts( + &mem, + l2_dir_ghost_pt, + 3, + l2_dir_addr, + l3_base_vaddr, + ); + let interp_l3_dir = PT::interp_at( + &mem, + l2_dir_ghost_pt, + 3, + l2_dir_addr, + l3_base_vaddr, + ); + let interp_l3_entry = PT::interp_at_entry( + &mem, + l2_dir_ghost_pt, + 3, + l2_dir_addr, + l3_base_vaddr, + l3_idx, + ); + interp_l3_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping( + l3_idx); + let low_bits: u64 = addr % (L3_ENTRY_SIZE as u64); + // This assert proves: ... == l0_idx_u64 * L0_ENTRY_SIZE + l1_idx_u64 * L1_ENTRY_SIZE + l2_idx_u64 * L2_ENTRY_SIZE + l3_idx_u64 * L3_ENTRY_SIZE + low_bits + assert((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | ( + l2_idx_u64 << 21u64) | (l3_idx_u64 << 12u64) | low_bits + == add( + add( + add( + add( + mul( + l0_idx_u64, + mul(512, mul(512, mul(512, 4096))), + ), + mul(l1_idx_u64, mul(512, mul(512, 4096))), + ), + mul(l2_idx_u64, mul(512, 4096)), + ), + mul(l3_idx_u64, 4096), + ), + low_bits, + )) by (bit_vector) + requires + l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, + l2_idx_u64 == (addr & 0x0000_3FE0_0000) >> 21, + l3_idx_u64 == (addr & 0x0000_001F_F000) >> 12, + low_bits == addr % 4096, + ; + assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 + << 30u64) | (l2_idx_u64 << 21u64) | (l3_idx_u64 + << 12u64) | low_bits)) by (bit_vector) + requires + l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, + l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, + l2_idx_u64 == (addr & 0x0000_3FE0_0000) >> 21, + l3_idx_u64 == (addr & 0x0000_001F_F000) >> 12, + addr < mul( + 512u64, + mul(512, mul(512, mul(512, 4096))), + ), + low_bits == addr % 4096, + ; + match read_entry(mem, l2_dir_addr as nat, 3, l3_idx) { + GhostPageDirectoryEntry::Page { + addr: page_addr, + flag_RW: l3_RW, + flag_US: l3_US, + flag_XD: l3_XD, + .. } => { - assert(interp_l2_entry.is_Directory()); - let l3_base_vaddr = x86_arch_spec.entry_base(2, l2_base_vaddr, l2_idx); - let l2_dir_ghost_pt = l1_dir_ghost_pt.entries[l2_idx as int].get_Some_0(); - assert(PT::directories_obey_invariant_at(&mem, l1_dir_ghost_pt, 2, l1_dir_addr)); - assert(PT::inv_at(&mem, l2_dir_ghost_pt, 3, l2_dir_addr)); - PT::lemma_interp_at_facts(&mem, l2_dir_ghost_pt, 3, l2_dir_addr, l3_base_vaddr); - let interp_l3_dir = PT::interp_at(&mem, l2_dir_ghost_pt, 3, l2_dir_addr, l3_base_vaddr); - let interp_l3_entry = PT::interp_at_entry(&mem, l2_dir_ghost_pt, 3, l2_dir_addr, l3_base_vaddr, l3_idx); - interp_l3_dir.lemma_interp_of_entry_contains_mapping_implies_interp_contains_mapping(l3_idx); - - let low_bits: u64 = addr % (L3_ENTRY_SIZE as u64); - // This assert proves: ... == l0_idx_u64 * L0_ENTRY_SIZE + l1_idx_u64 * L1_ENTRY_SIZE + l2_idx_u64 * L2_ENTRY_SIZE + l3_idx_u64 * L3_ENTRY_SIZE + low_bits - assert((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | (l2_idx_u64 << 21u64) | (l3_idx_u64 << 12u64) | low_bits - == add(add(add(add( - mul(l0_idx_u64, mul(512, mul(512, mul(512, 4096)))), - mul(l1_idx_u64, mul(512, mul(512, 4096)))), - mul(l2_idx_u64, mul(512, 4096))), - mul(l3_idx_u64, 4096)), - low_bits)) by (bit_vector) - requires - l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, - l2_idx_u64 == (addr & 0x0000_3FE0_0000) >> 21, - l3_idx_u64 == (addr & 0x0000_001F_F000) >> 12, - low_bits == addr % 4096; - assert(addr == ((l0_idx_u64 << 39u64) | (l1_idx_u64 << 30u64) | (l2_idx_u64 << 21u64) | (l3_idx_u64 << 12u64) | low_bits)) by (bit_vector) - requires - l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, - l1_idx_u64 == (addr & 0x007F_C000_0000) >> 30, - l2_idx_u64 == (addr & 0x0000_3FE0_0000) >> 21, - l3_idx_u64 == (addr & 0x0000_001F_F000) >> 12, - addr < mul(512u64, mul(512, mul(512, mul(512, 4096)))), - low_bits == addr % 4096; - match read_entry(mem, l2_dir_addr as nat, 3, l3_idx) { - GhostPageDirectoryEntry::Page { - addr: page_addr, flag_RW: l3_RW, flag_US: l3_US, flag_XD: l3_XD, .. - } => { - assert_by_contradiction!(!aligned(addr as nat, L3_ENTRY_SIZE as nat), { + assert_by_contradiction!(!aligned(addr as nat, L3_ENTRY_SIZE as nat), { let pte = PageTableEntry { frame: MemRegion { base: page_addr as nat, size: L3_ENTRY_SIZE as nat }, flags: Flags { @@ -4798,168 +7384,253 @@ pub mod impl_u { }; assert(valid_pt_walk(mem, addr as u64, pte)); }); - assert(!interp_l3_dir.interp_of_entry(l3_idx).map.contains_key(addr as nat)); - assert(!interp_l3_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l2_dir.interp_of_entry(l2_idx).map.contains_key(addr as nat)); - assert(!interp_l2_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l1_dir.interp_of_entry(l1_idx).map.contains_key(addr as nat)); - assert(!interp_l1_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l0_dir.interp().map.contains_key(addr as nat)); - assert(!m2.contains_key(addr as nat)); - }, - GhostPageDirectoryEntry::Directory { .. } => assert(false), - GhostPageDirectoryEntry::Empty => { - assert(!interp_l3_dir.interp_of_entry(l3_idx).map.contains_key(addr as nat)); - assert(!interp_l3_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l2_dir.interp_of_entry(l2_idx).map.contains_key(addr as nat)); - assert(!interp_l2_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l1_dir.interp_of_entry(l1_idx).map.contains_key(addr as nat)); - assert(!interp_l1_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l0_dir.interp().map.contains_key(addr as nat)); - assert(!m2.contains_key(addr as nat)); - } - } + assert(!interp_l3_dir.interp_of_entry( + l3_idx, + ).map.contains_key(addr as nat)); + assert(!interp_l3_dir.interp().map.contains_key( + addr as nat, + )); + assert(!interp_l2_dir.interp_of_entry( + l2_idx, + ).map.contains_key(addr as nat)); + assert(!interp_l2_dir.interp().map.contains_key( + addr as nat, + )); + assert(!interp_l1_dir.interp_of_entry( + l1_idx, + ).map.contains_key(addr as nat)); + assert(!interp_l1_dir.interp().map.contains_key( + addr as nat, + )); + assert(!interp_l0_dir.interp().map.contains_key( + addr as nat, + )); + assert(!m2.contains_key(addr as nat)); }, + GhostPageDirectoryEntry::Directory { + .. + } => assert(false), GhostPageDirectoryEntry::Empty => { - assert(!interp_l2_dir.interp_of_entry(l2_idx).map.contains_key(addr as nat)); - assert(!interp_l2_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l1_dir.interp_of_entry(l1_idx).map.contains_key(addr as nat)); - assert(!interp_l1_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l0_dir.interp().map.contains_key(addr as nat)); + assert(!interp_l3_dir.interp_of_entry( + l3_idx, + ).map.contains_key(addr as nat)); + assert(!interp_l3_dir.interp().map.contains_key( + addr as nat, + )); + assert(!interp_l2_dir.interp_of_entry( + l2_idx, + ).map.contains_key(addr as nat)); + assert(!interp_l2_dir.interp().map.contains_key( + addr as nat, + )); + assert(!interp_l1_dir.interp_of_entry( + l1_idx, + ).map.contains_key(addr as nat)); + assert(!interp_l1_dir.interp().map.contains_key( + addr as nat, + )); + assert(!interp_l0_dir.interp().map.contains_key( + addr as nat, + )); assert(!m2.contains_key(addr as nat)); }, } }, GhostPageDirectoryEntry::Empty => { - assert(!interp_l1_dir.interp_of_entry(l1_idx).map.contains_key(addr as nat)); - assert(!interp_l1_dir.interp().map.contains_key(addr as nat)); - assert(!interp_l0_dir.interp().map.contains_key(addr as nat)); + assert(!interp_l2_dir.interp_of_entry( + l2_idx, + ).map.contains_key(addr as nat)); + assert(!interp_l2_dir.interp().map.contains_key( + addr as nat, + )); + assert(!interp_l1_dir.interp_of_entry( + l1_idx, + ).map.contains_key(addr as nat)); + assert(!interp_l1_dir.interp().map.contains_key( + addr as nat, + )); + assert(!interp_l0_dir.interp().map.contains_key( + addr as nat, + )); assert(!m2.contains_key(addr as nat)); }, } }, - GhostPageDirectoryEntry::Page { .. } => assert(false), GhostPageDirectoryEntry::Empty => { - let low_bits: u64 = addr % (L0_ENTRY_SIZE as u64); - // This assert proves: ... == l0_idx_u64 * L0_ENTRY_SIZE + low_bits - assert((l0_idx_u64 << 39u64) | low_bits - == add(mul(l0_idx_u64, mul(512, mul(512, mul(512, 4096)))), - low_bits)) by (bit_vector) - requires - low_bits == addr % mul(512, mul(512, mul(512, 4096))); - assert(addr == ((l0_idx_u64 << 39u64) | low_bits)) by (bit_vector) - requires - l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, - addr < mul(512u64, mul(512, mul(512, mul(512, 4096)))), - low_bits == addr % mul(512, mul(512, mul(512, 4096))); + assert(!interp_l1_dir.interp_of_entry(l1_idx).map.contains_key( + addr as nat, + )); + assert(!interp_l1_dir.interp().map.contains_key(addr as nat)); assert(!interp_l0_dir.interp().map.contains_key(addr as nat)); assert(!m2.contains_key(addr as nat)); }, - }; - } - } - }; - assert(m1 =~= m2) by { - assert forall|addr: nat| m1.dom().contains(addr) <==> m2.dom().contains(addr) by { - assert(m1.dom().contains(addr) ==> m2.contains_pair(addr, m1[addr])); - assert(m2.dom().contains(addr) ==> m1.contains_pair(addr, m2[addr])); - }; - assert forall|addr: nat| #[trigger] m1.contains_key(addr) && m2.contains_key(addr) implies m1[addr] == m2[addr] by { - assert(m1.contains_pair(addr, m1[addr])); - assert(m2.contains_pair(addr, m1[addr])); - }; - }; - } - - proof fn lemma_no_entries_implies_interp_at_aux_no_entries(mem: mem::PageTableMemory, pt: PTDir, layer: nat, ptr: usize, base_vaddr: nat, init: Seq) - requires - mem.regions() == set![mem.cr3_spec()@], - (forall|i: nat| i < 512 ==> mem.region_view(mem.cr3_spec()@)[i as int] == 0), - layer == 0, - PT::inv_at(&mem, pt, layer, ptr), - forall|i: nat| i < init.len() ==> init[i as int] == l1::NodeEntry::Empty(), - init.len() <= 512, - ensures - ({ let res = PT::interp_at_aux(&mem, pt, layer, ptr, base_vaddr, init); - &&& res.len() == 512 - &&& forall|i: nat| i < res.len() ==> res[i as int] == l1::NodeEntry::Empty() - }) - decreases 512 - init.len() - { - lemma_new_seq::>(512, Option::None); - let res = PT::interp_at_aux(&mem, pt, layer, ptr, base_vaddr, init); - if init.len() >= 512 { - } else { - let entry = PT::interp_at_entry(&mem, pt, layer, ptr, base_vaddr, init.len()); - assert(PT::ghost_pt_matches_structure(&mem, pt, layer, ptr)); - assert forall|i: nat| i < 512 implies PT::view_at(&mem, pt, layer, ptr, i).is_Empty() by { - let entry = mem.spec_read(i, pt.region); - assert((entry & (1u64 << 0)) != (1u64 << 0)) by (bit_vector) requires entry == 0u64; + } + }, + GhostPageDirectoryEntry::Page { .. } => assert(false), + GhostPageDirectoryEntry::Empty => { + let low_bits: u64 = addr % (L0_ENTRY_SIZE as u64); + // This assert proves: ... == l0_idx_u64 * L0_ENTRY_SIZE + low_bits + assert((l0_idx_u64 << 39u64) | low_bits == add( + mul(l0_idx_u64, mul(512, mul(512, mul(512, 4096)))), + low_bits, + )) by (bit_vector) + requires + low_bits == addr % mul(512, mul(512, mul(512, 4096))), + ; + assert(addr == ((l0_idx_u64 << 39u64) | low_bits)) by (bit_vector) + requires + l0_idx_u64 == (addr & 0xFF80_0000_0000) >> 39, + addr < mul(512u64, mul(512, mul(512, mul(512, 4096)))), + low_bits == addr % mul(512, mul(512, mul(512, 4096))), + ; + assert(!interp_l0_dir.interp().map.contains_key(addr as nat)); + assert(!m2.contains_key(addr as nat)); + }, }; - assert(entry == l1::NodeEntry::Empty()); - lemma_no_entries_implies_interp_at_aux_no_entries(mem, pt, layer, ptr, base_vaddr, init.push(entry)); } } + }; + assert(m1 =~= m2) by { + assert forall|addr: nat| m1.dom().contains(addr) <==> m2.dom().contains(addr) by { + assert(m1.dom().contains(addr) ==> m2.contains_pair(addr, m1[addr])); + assert(m2.dom().contains(addr) ==> m1.contains_pair(addr, m2[addr])); + }; + assert forall|addr: nat| #[trigger] + m1.contains_key(addr) && m2.contains_key(addr) implies m1[addr] == m2[addr] by { + assert(m1.contains_pair(addr, m1[addr])); + assert(m2.contains_pair(addr, m1[addr])); + }; + }; +} - impl impl_spec::InterfaceSpec for impl_spec::PageTableImpl { - closed spec fn ispec_inv(&self, mem: &mem::PageTableMemory) -> bool { - exists|pt: PTDir| #[trigger] PT::inv(mem, pt) && PT::interp(mem, pt).inv() - } +proof fn lemma_no_entries_implies_interp_at_aux_no_entries( + mem: mem::PageTableMemory, + pt: PTDir, + layer: nat, + ptr: usize, + base_vaddr: nat, + init: Seq, +) + requires + mem.regions() == set![mem.cr3_spec()@], + (forall|i: nat| i < 512 ==> mem.region_view(mem.cr3_spec()@)[i as int] == 0), + layer == 0, + PT::inv_at(&mem, pt, layer, ptr), + forall|i: nat| i < init.len() ==> init[i as int] == l1::NodeEntry::Empty(), + init.len() <= 512, + ensures + ({ + let res = PT::interp_at_aux(&mem, pt, layer, ptr, base_vaddr, init); + &&& res.len() == 512 + &&& forall|i: nat| i < res.len() ==> res[i as int] == l1::NodeEntry::Empty() + }), + decreases 512 - init.len(), +{ + lemma_new_seq::>(512, Option::None); + let res = PT::interp_at_aux(&mem, pt, layer, ptr, base_vaddr, init); + if init.len() >= 512 { + } else { + let entry = PT::interp_at_entry(&mem, pt, layer, ptr, base_vaddr, init.len()); + assert(PT::ghost_pt_matches_structure(&mem, pt, layer, ptr)); + assert forall|i: nat| i < 512 implies PT::view_at(&mem, pt, layer, ptr, i).is_Empty() by { + let entry = mem.spec_read(i, pt.region); + assert((entry & (1u64 << 0)) != (1u64 << 0)) by (bit_vector) + requires + entry == 0u64, + ; + }; + assert(entry == l1::NodeEntry::Empty()); + lemma_no_entries_implies_interp_at_aux_no_entries( + mem, + pt, + layer, + ptr, + base_vaddr, + init.push(entry), + ); + } +} - proof fn ispec_init_implies_inv(&self, mem: &mem::PageTableMemory) { - let pt = PTDir { - region: mem.cr3_spec()@, - entries: new_seq(512, Option::None), - used_regions: set![mem.cr3_spec()@], - }; - lemma_new_seq::>(512, Option::None); - assert(PT::inv(mem, pt)) by { - x86_arch_inv(); - axiom_x86_arch_exec_spec(); - PT::lemma_zeroed_page_implies_empty_at(mem, pt, 0, mem.cr3_spec().base); - }; - lemma_no_entries_implies_interp_at_aux_no_entries(*mem, pt, 0, mem.cr3_spec().base, 0, seq![]); - } +impl impl_spec::InterfaceSpec for impl_spec::PageTableImpl { + closed spec fn ispec_inv(&self, mem: &mem::PageTableMemory) -> bool { + exists|pt: PTDir| #[trigger] PT::inv(mem, pt) && PT::interp(mem, pt).inv() + } - fn ispec_map_frame(&self, mem: &mut mem::PageTableMemory, vaddr: usize, pte: PageTableEntryExec) -> (res: MapResult) { - let mut pt: Ghost = Ghost(choose|pt: PTDir| #[trigger] PT::inv(mem, pt) && PT::interp(mem, pt).inv()); - proof { - PT::lemma_interp_at_facts(mem, pt@, 0, mem.cr3_spec().base, 0); - PT::interp(mem, pt@).lemma_inv_implies_interp_inv(); - assert(x86_arch_spec.upper_vaddr(0, 0) == crate::definitions_t::PT_BOUND_HIGH) by (compute_only); - lemma_page_table_walk_interp(); - } - PT::map_frame(mem, &mut pt, vaddr, pte) - } + proof fn ispec_init_implies_inv(&self, mem: &mem::PageTableMemory) { + let pt = PTDir { + region: mem.cr3_spec()@, + entries: new_seq(512, Option::None), + used_regions: set![mem.cr3_spec()@], + }; + lemma_new_seq::>(512, Option::None); + assert(PT::inv(mem, pt)) by { + x86_arch_inv(); + axiom_x86_arch_exec_spec(); + PT::lemma_zeroed_page_implies_empty_at(mem, pt, 0, mem.cr3_spec().base); + }; + lemma_no_entries_implies_interp_at_aux_no_entries( + *mem, + pt, + 0, + mem.cr3_spec().base, + 0, + seq![], + ); + } - fn ispec_unmap(&self, mem: &mut mem::PageTableMemory, vaddr: usize) -> (res: UnmapResult) { - let mut pt: Ghost = Ghost(choose|pt: PTDir| #[trigger] PT::inv(mem, pt) && PT::interp(mem, pt).inv()); - proof { - PT::lemma_interp_at_facts(mem, pt@, 0, mem.cr3_spec().base, 0); - PT::interp(mem, pt@).lemma_inv_implies_interp_inv(); - assert(x86_arch_spec.upper_vaddr(0, 0) == crate::definitions_t::PT_BOUND_HIGH) by (compute_only); - lemma_page_table_walk_interp(); - } - PT::unmap(mem, &mut pt, vaddr) - } + fn ispec_map_frame( + &self, + mem: &mut mem::PageTableMemory, + vaddr: usize, + pte: PageTableEntryExec, + ) -> (res: MapResult) { + let mut pt: Ghost = Ghost( + choose|pt: PTDir| #[trigger] PT::inv(mem, pt) && PT::interp(mem, pt).inv(), + ); + proof { + PT::lemma_interp_at_facts(mem, pt@, 0, mem.cr3_spec().base, 0); + PT::interp(mem, pt@).lemma_inv_implies_interp_inv(); + assert(x86_arch_spec.upper_vaddr(0, 0) == crate::definitions_t::PT_BOUND_HIGH) + by (compute_only); + lemma_page_table_walk_interp(); + } + PT::map_frame(mem, &mut pt, vaddr, pte) + } - fn ispec_resolve(&self, mem: &mem::PageTableMemory, vaddr: usize) -> (res: ResolveResultExec) { - let pt: Ghost = Ghost(choose|pt: PTDir| #[trigger] PT::inv(mem, pt) && PT::interp(mem, pt).inv()); - proof { - PT::lemma_interp_at_facts(mem, pt@, 0, mem.cr3_spec().base, 0); - PT::interp(mem, pt@).lemma_inv_implies_interp_inv(); - assert(x86_arch_spec.upper_vaddr(0, 0) == crate::definitions_t::PT_BOUND_HIGH) by (compute_only); - lemma_page_table_walk_interp(); - } - match PT::resolve(mem, pt, vaddr) { - Ok((v,pte)) => ResolveResultExec::Ok(v,pte), - Err(e) => ResolveResultExec::ErrUnmapped, - } - } + fn ispec_unmap(&self, mem: &mut mem::PageTableMemory, vaddr: usize) -> (res: UnmapResult) { + let mut pt: Ghost = Ghost( + choose|pt: PTDir| #[trigger] PT::inv(mem, pt) && PT::interp(mem, pt).inv(), + ); + proof { + PT::lemma_interp_at_facts(mem, pt@, 0, mem.cr3_spec().base, 0); + PT::interp(mem, pt@).lemma_inv_implies_interp_inv(); + assert(x86_arch_spec.upper_vaddr(0, 0) == crate::definitions_t::PT_BOUND_HIGH) + by (compute_only); + lemma_page_table_walk_interp(); } + PT::unmap(mem, &mut pt, vaddr) + } + fn ispec_resolve(&self, mem: &mem::PageTableMemory, vaddr: usize) -> (res: ResolveResultExec) { + let pt: Ghost = Ghost( + choose|pt: PTDir| #[trigger] PT::inv(mem, pt) && PT::interp(mem, pt).inv(), + ); + proof { + PT::lemma_interp_at_facts(mem, pt@, 0, mem.cr3_spec().base, 0); + PT::interp(mem, pt@).lemma_inv_implies_interp_inv(); + assert(x86_arch_spec.upper_vaddr(0, 0) == crate::definitions_t::PT_BOUND_HIGH) + by (compute_only); + lemma_page_table_walk_interp(); + } + match PT::resolve(mem, pt, vaddr) { + Ok((v, pte)) => ResolveResultExec::Ok(v, pte), + Err(e) => ResolveResultExec::ErrUnmapped, } } +} + +} // verus! + } pub mod spec_pt { #![allow(unused_imports)] @@ -4993,105 +7664,129 @@ pub mod impl_u { verus! { - pub struct PageTableVariables { - pub map: Map, - } - - pub enum PageTableStep { - Map { vaddr: nat, pte: PageTableEntry, result: MapResult }, - Unmap { vaddr: nat, result: UnmapResult }, - Resolve { vaddr: nat, result: ResolveResult }, - Stutter, - } +pub struct PageTableVariables { + pub map: Map, +} - pub open spec fn step_Map_enabled(map: Map, vaddr: nat, pte: PageTableEntry) -> bool { - &&& aligned(vaddr, pte.frame.size) - &&& aligned(pte.frame.base, pte.frame.size) - &&& pte.frame.base <= MAX_PHYADDR - &&& candidate_mapping_in_bounds(vaddr, pte) - &&& { // The size of the frame must be the entry_size of a layer that supports page mappings - ||| pte.frame.size == L3_ENTRY_SIZE - ||| pte.frame.size == L2_ENTRY_SIZE - ||| pte.frame.size == L1_ENTRY_SIZE - } - &&& !candidate_mapping_overlaps_existing_pmem(map, vaddr, pte) - } +pub enum PageTableStep { + Map { vaddr: nat, pte: PageTableEntry, result: MapResult }, + Unmap { vaddr: nat, result: UnmapResult }, + Resolve { vaddr: nat, result: ResolveResult }, + Stutter, +} - pub open spec fn step_Map(s1: PageTableVariables, s2: PageTableVariables, vaddr: nat, pte: PageTableEntry, result: MapResult) -> bool { - &&& step_Map_enabled(s1.map, vaddr, pte) - &&& if candidate_mapping_overlaps_existing_vmem(s1.map, vaddr, pte) { - &&& result.is_ErrOverlap() - &&& s2.map === s1.map - } else { - &&& result.is_Ok() - &&& s2.map === s1.map.insert(vaddr, pte) - } - } +pub open spec fn step_Map_enabled( + map: Map, + vaddr: nat, + pte: PageTableEntry, +) -> bool { + &&& aligned(vaddr, pte.frame.size) + &&& aligned(pte.frame.base, pte.frame.size) + &&& pte.frame.base <= MAX_PHYADDR + &&& candidate_mapping_in_bounds(vaddr, pte) + &&& { // The size of the frame must be the entry_size of a layer that supports page mappings + ||| pte.frame.size == L3_ENTRY_SIZE + ||| pte.frame.size == L2_ENTRY_SIZE + ||| pte.frame.size == L1_ENTRY_SIZE + } + &&& !candidate_mapping_overlaps_existing_pmem(map, vaddr, pte) +} - pub open spec fn step_Unmap_enabled(vaddr: nat) -> bool { - &&& between(vaddr, PT_BOUND_LOW, PT_BOUND_HIGH as nat) - &&& { // The given vaddr must be aligned to some valid page size - ||| aligned(vaddr, L3_ENTRY_SIZE as nat) - ||| aligned(vaddr, L2_ENTRY_SIZE as nat) - ||| aligned(vaddr, L1_ENTRY_SIZE as nat) - } - } +pub open spec fn step_Map( + s1: PageTableVariables, + s2: PageTableVariables, + vaddr: nat, + pte: PageTableEntry, + result: MapResult, +) -> bool { + &&& step_Map_enabled(s1.map, vaddr, pte) + &&& if candidate_mapping_overlaps_existing_vmem(s1.map, vaddr, pte) { + &&& result.is_ErrOverlap() + &&& s2.map === s1.map + } else { + &&& result.is_Ok() + &&& s2.map === s1.map.insert(vaddr, pte) + } +} - pub open spec fn step_Unmap(s1: PageTableVariables, s2: PageTableVariables, vaddr: nat, result: UnmapResult) -> bool { - &&& step_Unmap_enabled(vaddr) - &&& if s1.map.dom().contains(vaddr) { - &&& result.is_Ok() - &&& s2.map === s1.map.remove(vaddr) - } else { - &&& result.is_ErrNoSuchMapping() - &&& s2.map === s1.map - } - } +pub open spec fn step_Unmap_enabled(vaddr: nat) -> bool { + &&& between(vaddr, PT_BOUND_LOW, PT_BOUND_HIGH as nat) + &&& { // The given vaddr must be aligned to some valid page size + ||| aligned(vaddr, L3_ENTRY_SIZE as nat) + ||| aligned(vaddr, L2_ENTRY_SIZE as nat) + ||| aligned(vaddr, L1_ENTRY_SIZE as nat) + } +} - pub open spec fn step_Resolve_enabled(vaddr: nat) -> bool { - &&& aligned(vaddr, 8) - &&& vaddr < MAX_BASE - } +pub open spec fn step_Unmap( + s1: PageTableVariables, + s2: PageTableVariables, + vaddr: nat, + result: UnmapResult, +) -> bool { + &&& step_Unmap_enabled(vaddr) + &&& if s1.map.dom().contains(vaddr) { + &&& result.is_Ok() + &&& s2.map === s1.map.remove(vaddr) + } else { + &&& result.is_ErrNoSuchMapping() + &&& s2.map === s1.map + } +} - pub open spec fn step_Resolve(s1: PageTableVariables, s2: PageTableVariables, vaddr: nat, result: ResolveResult) -> bool { - &&& step_Resolve_enabled(vaddr) - &&& s2 === s1 - &&& match result { - ResolveResult::Ok(base, pte) => { - // If result is Ok, it's an existing mapping that contains vaddr.. - &&& s1.map.contains_pair(base, pte) - &&& between(vaddr, base, base + pte.frame.size) - }, - ResolveResult::ErrUnmapped => { - // If result is ErrUnmapped, no mapping containing vaddr exists.. - &&& (!exists|base: nat, pte: PageTableEntry| s1.map.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size)) - }, - } - } +pub open spec fn step_Resolve_enabled(vaddr: nat) -> bool { + &&& aligned(vaddr, 8) + &&& vaddr < MAX_BASE +} +pub open spec fn step_Resolve( + s1: PageTableVariables, + s2: PageTableVariables, + vaddr: nat, + result: ResolveResult, +) -> bool { + &&& step_Resolve_enabled(vaddr) + &&& s2 === s1 + &&& match result { + ResolveResult::Ok(base, pte) => { + // If result is Ok, it's an existing mapping that contains vaddr.. + &&& s1.map.contains_pair(base, pte) + &&& between(vaddr, base, base + pte.frame.size) + }, + ResolveResult::ErrUnmapped => { + // If result is ErrUnmapped, no mapping containing vaddr exists.. + &&& (!exists|base: nat, pte: PageTableEntry| + s1.map.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size)) + }, + } +} - pub open spec fn step_Stutter(s1: PageTableVariables, s2: PageTableVariables) -> bool { - s1 === s2 - } +pub open spec fn step_Stutter(s1: PageTableVariables, s2: PageTableVariables) -> bool { + s1 === s2 +} - pub open spec fn init(s: PageTableVariables) -> bool { - s.map === Map::empty() - } +pub open spec fn init(s: PageTableVariables) -> bool { + s.map === Map::empty() +} - pub open spec fn next_step(s1: PageTableVariables, s2: PageTableVariables, step: PageTableStep) -> bool { - match step { - PageTableStep::Map { vaddr, pte, result } => step_Map(s1, s2, vaddr, pte, result), - PageTableStep::Unmap { vaddr, result } => step_Unmap(s1, s2, vaddr, result), - PageTableStep::Resolve { vaddr, result } => step_Resolve(s1, s2, vaddr, result), - PageTableStep::Stutter => step_Stutter(s1, s2), - } - } +pub open spec fn next_step( + s1: PageTableVariables, + s2: PageTableVariables, + step: PageTableStep, +) -> bool { + match step { + PageTableStep::Map { vaddr, pte, result } => step_Map(s1, s2, vaddr, pte, result), + PageTableStep::Unmap { vaddr, result } => step_Unmap(s1, s2, vaddr, result), + PageTableStep::Resolve { vaddr, result } => step_Resolve(s1, s2, vaddr, result), + PageTableStep::Stutter => step_Stutter(s1, s2), + } +} - pub open spec fn next(s1: PageTableVariables, s2: PageTableVariables) -> bool { - exists|step: PageTableStep| next_step(s1, s2, step) - } +pub open spec fn next(s1: PageTableVariables, s2: PageTableVariables) -> bool { + exists|step: PageTableStep| next_step(s1, s2, step) +} - } +} // verus! } pub mod indexing { @@ -5115,201 +7810,262 @@ pub mod impl_u { verus! { - ///! This module implements an indexing calculus with corresponding lemmas. It only provides spec - ///! functions, without any exec versions. The (specialized to specific entry_size) exec versions - ///! can be implemented in their own modules and simply assert their equivalence to these spec - ///! functions to make use of the lemmas. This is mainly because the absence of overflows may use - ///! different bounds depending on the exact context. It also has the benefit that trusted exec - ///! functions (e.g. in mem) are fully defined in their own modules - - - pub open spec fn nat_mul(a: nat, b: nat) -> nat { - a * b - } +///! This module implements an indexing calculus with corresponding lemmas. It only provides spec +///! functions, without any exec versions. The (specialized to specific entry_size) exec versions +///! can be implemented in their own modules and simply assert their equivalence to these spec +///! functions to make use of the lemmas. This is mainly because the absence of overflows may use +///! different bounds depending on the exact context. It also has the benefit that trusted exec +///! functions (e.g. in mem) are fully defined in their own modules +pub open spec fn nat_mul(a: nat, b: nat) -> nat { + a * b +} - // This lemma has "support" postconditions for lemma_entry_base_from_index. I.e. postconditions - // that may help proving the lhs of some of that lemma's postconditions which are implications. - // However, one of these postconditions triggers on every multiplication, hence this is separated - // in its own lemma. - pub proof fn lemma_entry_base_from_index_support(base: nat, idx: nat, entry_size: nat) - requires entry_size > 0 - ensures - // forall|nested_es: nat, nested_num: nat| - // entry_size == nat_mul(nested_es, nested_num) - // ==> next_entry_base_from_index(base, idx, entry_size) - // == entry_base_from_index(entry_base_from_index(base, idx, entry_size), nested_num, nested_es), - // Support postconditions: - // Ugly, ugly workaround for mixed triggers. - forall|a: nat, b: nat| nat_mul(a, b) == #[trigger] (a * b), - forall|a: nat, b: nat| nat_mul(a, b) == nat_mul(b, a), - forall|a: nat| #[trigger] aligned(base, nat_mul(entry_size, a)) && a > 0 ==> aligned(base, entry_size), - { - assert(forall|a: nat, b: nat| nat_mul(a, b) == #[trigger] (a * b)) by(nonlinear_arith); - assert(forall|a: nat, b: nat| nat_mul(a, b) == nat_mul(b, a)) by(nonlinear_arith); - assert forall|a: nat| - #[trigger] aligned(base, nat_mul(entry_size, a)) && a > 0 - implies - aligned(base, entry_size) by - { - lib::mod_mult_zero_implies_mod_zero(base, entry_size, a); - }; - } +// This lemma has "support" postconditions for lemma_entry_base_from_index. I.e. postconditions +// that may help proving the lhs of some of that lemma's postconditions which are implications. +// However, one of these postconditions triggers on every multiplication, hence this is separated +// in its own lemma. +pub proof fn lemma_entry_base_from_index_support(base: nat, idx: nat, entry_size: nat) + requires + entry_size > 0, + ensures +// forall|nested_es: nat, nested_num: nat| +// entry_size == nat_mul(nested_es, nested_num) +// ==> next_entry_base_from_index(base, idx, entry_size) +// == entry_base_from_index(entry_base_from_index(base, idx, entry_size), nested_num, nested_es), +// Support postconditions: +// Ugly, ugly workaround for mixed triggers. + + forall|a: nat, b: nat| nat_mul(a, b) == #[trigger] (a * b), + forall|a: nat, b: nat| nat_mul(a, b) == nat_mul(b, a), + forall|a: nat| #[trigger] + aligned(base, nat_mul(entry_size, a)) && a > 0 ==> aligned(base, entry_size), +{ + assert(forall|a: nat, b: nat| nat_mul(a, b) == #[trigger] (a * b)) by (nonlinear_arith); + assert(forall|a: nat, b: nat| nat_mul(a, b) == nat_mul(b, a)) by (nonlinear_arith); + assert forall|a: nat| #[trigger] aligned(base, nat_mul(entry_size, a)) && a > 0 implies aligned( + base, + entry_size, + ) by { + lib::mod_mult_zero_implies_mod_zero(base, entry_size, a); + }; +} - pub proof fn lemma_entry_base_from_index(base: nat, idx: nat, entry_size: nat) +pub proof fn lemma_entry_base_from_index(base: nat, idx: nat, entry_size: nat) + requires + 0 < entry_size, + ensures + forall|idx2: nat| + #![trigger entry_base_from_index(base, idx, entry_size), entry_base_from_index(base, idx2, entry_size)] + idx < idx2 ==> entry_base_from_index(base, idx, entry_size) < entry_base_from_index( + base, + idx2, + entry_size, + ), + // // && next_entry_base_from_index(base, idx, entry_size) <= entry_base_from_index(layer, base, j), + // TODO: The line above can't be a separate postcondition because it doesn't have any valid triggers. + // The trigger for it is pretty bad. + forall|idx2: nat| + idx < idx2 ==> next_entry_base_from_index(base, idx, entry_size) + <= entry_base_from_index(base, idx2, entry_size), + next_entry_base_from_index(base, idx, entry_size) == entry_base_from_index( + base, + idx + 1, + entry_size, + ), + next_entry_base_from_index(base, idx, entry_size) == entry_base_from_index( + base, + idx, + entry_size, + ) + entry_size, + next_entry_base_from_index(base, idx, entry_size) == entry_size + entry_base_from_index( + base, + idx, + entry_size, + ), + forall|n: nat| + 0 < n && aligned(base, n) && aligned(entry_size, n) ==> #[trigger] aligned( + entry_base_from_index(base, idx, entry_size), + n, + ), + forall|n: nat| + 0 < n && aligned(base, n) && aligned(entry_size, n) ==> #[trigger] aligned( + next_entry_base_from_index(base, idx, entry_size), + n, + ), + aligned(base, entry_size) ==> aligned( + entry_base_from_index(base, idx, entry_size), + entry_size, + ), + base <= entry_base_from_index( + base, + idx, + entry_size, + ), +// forall|idx: nat, base: nat, layer: nat| +// layer < self.layers.len() && idx < self.num_entries(layer) ==> entry_base_from_index(base, idx, entry_size) < self.upper_vaddr(layer, base), +// forall|idx: nat, base: nat, layer: nat| +// layer < self.layers.len() && idx <= self.num_entries(layer) ==> entry_base_from_index(base, idx, entry_size) <= self.upper_vaddr(layer, base), +// forall|idx: nat, base: nat, layer: nat| +// layer + 1 < self.layers.len() ==> #[trigger] next_entry_base_from_index(base, idx, entry_size) == self.upper_vaddr(layer + 1, entry_base_from_index(base, idx, entry_size)), +// // Support postconditions: +// forall(|base: nat, n: nat| // Used to infer lhs of next postcondition's implication +// aligned(base, #[trigger] (entry_size * n)) ==> aligned(base, entry_size)), +// No valid triggers +// Note for thesis report: +// This is really annoying. No mixed triggers means I can't use this postcondition. In the +// less general case (lemma_entry_base) this worked because n happens to be a specific +// function call there on which we can trigger. In other words: the lack of mixed triggers +// makes it impossible to generalize this postcondition. + +{ + assert forall|idx2: nat| idx < idx2 implies entry_base_from_index(base, idx, entry_size) + < entry_base_from_index(base, idx2, entry_size) by { + assert(entry_base_from_index(base, idx, entry_size) < entry_base_from_index( + base, + idx2, + entry_size, + )) by (nonlinear_arith) requires 0 < entry_size, - ensures - forall|idx2: nat| - #![trigger entry_base_from_index(base, idx, entry_size), entry_base_from_index(base, idx2, entry_size)] - idx < idx2 ==> entry_base_from_index(base, idx, entry_size) < entry_base_from_index(base, idx2, entry_size), - // // && next_entry_base_from_index(base, idx, entry_size) <= entry_base_from_index(layer, base, j), - // TODO: The line above can't be a separate postcondition because it doesn't have any valid triggers. - // The trigger for it is pretty bad. - forall|idx2: nat| idx < idx2 - ==> next_entry_base_from_index(base, idx, entry_size) <= entry_base_from_index(base, idx2, entry_size), - next_entry_base_from_index(base, idx, entry_size) == entry_base_from_index(base, idx + 1, entry_size), - next_entry_base_from_index(base, idx, entry_size) == entry_base_from_index(base, idx, entry_size) + entry_size, - next_entry_base_from_index(base, idx, entry_size) == entry_size + entry_base_from_index(base, idx, entry_size), - forall|n: nat| - 0 < n && aligned(base, n) && aligned(entry_size, n) ==> #[trigger] aligned(entry_base_from_index(base, idx, entry_size), n), - forall|n: nat| - 0 < n && aligned(base, n) && aligned(entry_size, n) ==> #[trigger] aligned(next_entry_base_from_index(base, idx, entry_size), n), - aligned(base, entry_size) ==> aligned(entry_base_from_index(base, idx, entry_size), entry_size), - base <= entry_base_from_index(base, idx, entry_size), - // forall|idx: nat, base: nat, layer: nat| - // layer < self.layers.len() && idx < self.num_entries(layer) ==> entry_base_from_index(base, idx, entry_size) < self.upper_vaddr(layer, base), - // forall|idx: nat, base: nat, layer: nat| - // layer < self.layers.len() && idx <= self.num_entries(layer) ==> entry_base_from_index(base, idx, entry_size) <= self.upper_vaddr(layer, base), - // forall|idx: nat, base: nat, layer: nat| - // layer + 1 < self.layers.len() ==> #[trigger] next_entry_base_from_index(base, idx, entry_size) == self.upper_vaddr(layer + 1, entry_base_from_index(base, idx, entry_size)), - // // Support postconditions: - // forall(|base: nat, n: nat| // Used to infer lhs of next postcondition's implication - // aligned(base, #[trigger] (entry_size * n)) ==> aligned(base, entry_size)), - // No valid triggers - // Note for thesis report: - // This is really annoying. No mixed triggers means I can't use this postcondition. In the - // less general case (lemma_entry_base) this worked because n happens to be a specific - // function call there on which we can trigger. In other words: the lack of mixed triggers - // makes it impossible to generalize this postcondition. + idx < idx2, { - assert forall|idx2: nat| - idx < idx2 - implies entry_base_from_index(base, idx, entry_size) < entry_base_from_index(base, idx2, entry_size) by - { - assert(entry_base_from_index(base, idx, entry_size) < entry_base_from_index(base, idx2, entry_size)) - by(nonlinear_arith) - requires - 0 < entry_size, - idx < idx2, - { - lib::mult_less_mono_both1(idx, entry_size, idx2, entry_size); - }; - }; - assert forall|idx2: nat| - idx < idx2 - implies next_entry_base_from_index(base, idx, entry_size) <= entry_base_from_index(base, idx2, entry_size) by - { - assert(next_entry_base_from_index(base, idx, entry_size) <= entry_base_from_index(base, idx2, entry_size)) - by(nonlinear_arith) - requires - idx < idx2 - { - }; - }; - assert(next_entry_base_from_index(base, idx, entry_size) == entry_base_from_index(base, idx + 1, entry_size)); - assert(next_entry_base_from_index(base, idx, entry_size) == entry_base_from_index(base, idx, entry_size) + entry_size) by(nonlinear_arith); - assert(next_entry_base_from_index(base, idx, entry_size) == entry_size + entry_base_from_index(base, idx, entry_size)); - assert forall|n: nat| - 0 < n && aligned(base, n) && aligned(entry_size, n) - implies #[trigger] aligned(entry_base_from_index(base, idx, entry_size), n) by - { - assert(aligned(entry_base_from_index(base, idx, entry_size), n)) - by(nonlinear_arith) - requires - 0 < n, - 0 < entry_size, - aligned(base, n), - aligned(entry_size, n) - { - assert(aligned(idx * entry_size, entry_size)) by { - lib::mod_of_mul(idx, entry_size); - }; - assert(aligned(idx * entry_size, n)) by { - lib::aligned_transitive(idx * entry_size, entry_size, n); - }; - assert(aligned(base + idx * entry_size, n)) by { - lib::mod_add_zero(base, idx * entry_size, n); - }; - }; - }; - assert forall|n: nat| - 0 < n && aligned(base, n) && aligned(entry_size, n) - implies #[trigger] aligned(next_entry_base_from_index(base, idx, entry_size), n) by - { - assert(aligned(next_entry_base_from_index(base, idx, entry_size), n)) - by(nonlinear_arith) - requires - 0 < n, - 0 < entry_size, - aligned(base, n), - aligned(entry_size, n) - { - assert(aligned((idx + 1) * entry_size, entry_size)) by { - lib::mod_of_mul(idx + 1, entry_size); - }; - assert(aligned((idx + 1) * entry_size, n)) by { - lib::aligned_transitive((idx + 1) * entry_size, entry_size, n); - }; - assert(aligned(base + (idx + 1) * entry_size, n)) by { - lib::mod_add_zero(base, (idx + 1) * entry_size, n); - }; - }; - }; - assert(aligned(base, entry_size) ==> aligned(entry_base_from_index(base, idx, entry_size), entry_size)); - assert(base <= entry_base_from_index(base, idx, entry_size)); - } - - pub proof fn lemma_index_from_base_and_addr(base: nat, addr: nat, entry_size: nat, num_entries: nat) + lib::mult_less_mono_both1(idx, entry_size, idx2, entry_size); + }; + }; + assert forall|idx2: nat| idx < idx2 implies next_entry_base_from_index(base, idx, entry_size) + <= entry_base_from_index(base, idx2, entry_size) by { + assert(next_entry_base_from_index(base, idx, entry_size) <= entry_base_from_index( + base, + idx2, + entry_size, + )) by (nonlinear_arith) + requires + idx < idx2, + {}; + }; + assert(next_entry_base_from_index(base, idx, entry_size) == entry_base_from_index( + base, + idx + 1, + entry_size, + )); + assert(next_entry_base_from_index(base, idx, entry_size) == entry_base_from_index( + base, + idx, + entry_size, + ) + entry_size) by (nonlinear_arith); + assert(next_entry_base_from_index(base, idx, entry_size) == entry_size + entry_base_from_index( + base, + idx, + entry_size, + )); + assert forall|n: nat| + 0 < n && aligned(base, n) && aligned(entry_size, n) implies #[trigger] aligned( + entry_base_from_index(base, idx, entry_size), + n, + ) by { + assert(aligned(entry_base_from_index(base, idx, entry_size), n)) by (nonlinear_arith) requires - addr >= base, - addr < entry_base_from_index(base, num_entries, entry_size), - entry_size > 0, - ensures - ({ - let idx = index_from_base_and_addr(base, addr, entry_size); - &&& idx < num_entries - &&& between(addr, entry_base_from_index(base, idx, entry_size), next_entry_base_from_index(base, idx, entry_size)) - &&& aligned(base, entry_size) && aligned(addr, entry_size) ==> addr == entry_base_from_index(base, idx, entry_size) - }), + 0 < n, + 0 < entry_size, + aligned(base, n), + aligned(entry_size, n), { - let idx = index_from_base_and_addr(base, addr, entry_size); - assert(idx < num_entries) by(nonlinear_arith) - requires - addr >= base, - addr < entry_base_from_index(base, num_entries, entry_size), - entry_size > 0, - idx == index_from_offset(sub(addr, base), entry_size), - { }; - assert(between(addr, entry_base_from_index(base, idx, entry_size), next_entry_base_from_index(base, idx, entry_size))) by(nonlinear_arith) - requires - addr >= base, - addr < entry_base_from_index(base, num_entries, entry_size), - entry_size > 0, - idx == index_from_offset(sub(addr, base), entry_size), - { }; - assert(aligned(base, entry_size) && aligned(addr, entry_size) ==> addr == entry_base_from_index(base, idx, entry_size)) by(nonlinear_arith) - requires - addr >= base, - entry_size > 0, - idx == index_from_offset(sub(addr, base), entry_size), - { - if aligned(base, entry_size) && aligned(addr, entry_size) { - lib::subtract_mod_eq_zero(base, addr, entry_size); - lib::div_mul_cancel(sub(addr, base), entry_size); - } + assert(aligned(idx * entry_size, entry_size)) by { + lib::mod_of_mul(idx, entry_size); }; + assert(aligned(idx * entry_size, n)) by { + lib::aligned_transitive(idx * entry_size, entry_size, n); + }; + assert(aligned(base + idx * entry_size, n)) by { + lib::mod_add_zero(base, idx * entry_size, n); + }; + }; + }; + assert forall|n: nat| + 0 < n && aligned(base, n) && aligned(entry_size, n) implies #[trigger] aligned( + next_entry_base_from_index(base, idx, entry_size), + n, + ) by { + assert(aligned(next_entry_base_from_index(base, idx, entry_size), n)) by (nonlinear_arith) + requires + 0 < n, + 0 < entry_size, + aligned(base, n), + aligned(entry_size, n), + { + assert(aligned((idx + 1) * entry_size, entry_size)) by { + lib::mod_of_mul(idx + 1, entry_size); + }; + assert(aligned((idx + 1) * entry_size, n)) by { + lib::aligned_transitive((idx + 1) * entry_size, entry_size, n); + }; + assert(aligned(base + (idx + 1) * entry_size, n)) by { + lib::mod_add_zero(base, (idx + 1) * entry_size, n); + }; + }; + }; + assert(aligned(base, entry_size) ==> aligned( + entry_base_from_index(base, idx, entry_size), + entry_size, + )); + assert(base <= entry_base_from_index(base, idx, entry_size)); +} + +pub proof fn lemma_index_from_base_and_addr(base: nat, addr: nat, entry_size: nat, num_entries: nat) + requires + addr >= base, + addr < entry_base_from_index(base, num_entries, entry_size), + entry_size > 0, + ensures + ({ + let idx = index_from_base_and_addr(base, addr, entry_size); + &&& idx < num_entries + &&& between( + addr, + entry_base_from_index(base, idx, entry_size), + next_entry_base_from_index(base, idx, entry_size), + ) + &&& aligned(base, entry_size) && aligned(addr, entry_size) ==> addr + == entry_base_from_index(base, idx, entry_size) + }), +{ + let idx = index_from_base_and_addr(base, addr, entry_size); + assert(idx < num_entries) by (nonlinear_arith) + requires + addr >= base, + addr < entry_base_from_index(base, num_entries, entry_size), + entry_size > 0, + idx == index_from_offset(sub(addr, base), entry_size), + {}; + assert(between( + addr, + entry_base_from_index(base, idx, entry_size), + next_entry_base_from_index(base, idx, entry_size), + )) by (nonlinear_arith) + requires + addr >= base, + addr < entry_base_from_index(base, num_entries, entry_size), + entry_size > 0, + idx == index_from_offset(sub(addr, base), entry_size), + {}; + assert(aligned(base, entry_size) && aligned(addr, entry_size) ==> addr == entry_base_from_index( + base, + idx, + entry_size, + )) by (nonlinear_arith) + requires + addr >= base, + entry_size > 0, + idx == index_from_offset(sub(addr, base), entry_size), + { + if aligned(base, entry_size) && aligned(addr, entry_size) { + lib::subtract_mod_eq_zero(base, addr, entry_size); + lib::div_mul_cancel(sub(addr, base), entry_size); } - } + }; +} + +} // verus! } pub mod os_refinement { @@ -5340,701 +8096,832 @@ pub mod impl_u { verus! { - pub proof fn lemma_pt_mappings_dont_overlap_in_pmem(this: OSVariables, other: OSVariables) - requires - this.pt_mappings_dont_overlap_in_pmem(), - this.pt_entry_sizes_are_valid(), - other.pt_entry_sizes_are_valid(), - this.tlb_is_submap_of_pt(), - other.tlb_is_submap_of_pt(), - ensures - forall|base, pte| - !candidate_mapping_overlaps_existing_pmem(this.interp_pt_mem(), base, pte) && - other.interp_pt_mem() === this.interp_pt_mem().insert(base, pte) - ==> other.pt_mappings_dont_overlap_in_pmem(), - forall|base| - other.interp_pt_mem() === this.interp_pt_mem().remove(base) - ==> other.pt_mappings_dont_overlap_in_pmem(), - { - assert forall|base, pte| - !candidate_mapping_overlaps_existing_pmem(this.interp_pt_mem(), base, pte) && - other.interp_pt_mem() === this.interp_pt_mem().insert(base, pte) - implies other.pt_mappings_dont_overlap_in_pmem() by - { - lemma_effective_mappings_equal_interp_pt_mem(this); - lemma_effective_mappings_equal_interp_pt_mem(other); - assert forall|b1: nat, pte1: PageTableEntry, b2: nat, pte2: PageTableEntry| - other.interp_pt_mem().contains_pair(b1, pte1) && other.interp_pt_mem().contains_pair(b2, pte2) - implies - ((b1 == b2) || !overlap(pte1.frame, pte2.frame)) by - { - if b1 == b2 { - } else { - if b1 == base { - assert(!overlap(pte1.frame, pte2.frame)); - } else { - assert(this.interp_pt_mem().dom().contains(b1)); - assert(this.interp_pt_mem().contains_pair(b1, pte1)); - if b2 == base { - assert(pte2 === pte); - assert(!candidate_mapping_overlaps_existing_pmem(this.interp_pt_mem(), base, pte)); - assert(forall|b: nat| { - this.interp_pt_mem().dom().contains(b) - ==> !(#[trigger] overlap(pte.frame, this.interp_pt_mem()[b].frame)) - }); - assert(this.interp_pt_mem()[b1] === pte1); - assert(this.interp_pt_mem().dom().contains(b1)); - assert(!overlap(pte.frame, pte1.frame)); - assert(pte.frame.size > 0); - assert(pte1.frame.size > 0); - assert(!overlap(pte1.frame, pte.frame)); - } else { - assert(this.interp_pt_mem().dom().contains(b2)); - assert(this.interp_pt_mem().contains_pair(b2, pte2)); - assert(!overlap(pte1.frame, pte2.frame)); - } - } - } - }; - }; - assert forall|base| - other.interp_pt_mem() === this.interp_pt_mem().remove(base) - implies - other.pt_mappings_dont_overlap_in_pmem() by - { - lemma_effective_mappings_equal_interp_pt_mem(this); - lemma_effective_mappings_equal_interp_pt_mem(other); - assert forall|b1: nat, pte1: PageTableEntry, b2: nat, pte2: PageTableEntry| - other.interp_pt_mem().contains_pair(b1, pte1) && other.interp_pt_mem().contains_pair(b2, pte2) - implies - ((b1 == b2) || !overlap(pte1.frame, pte2.frame)) by - { - if b1 == b2 { +pub proof fn lemma_pt_mappings_dont_overlap_in_pmem(this: OSVariables, other: OSVariables) + requires + this.pt_mappings_dont_overlap_in_pmem(), + this.pt_entry_sizes_are_valid(), + other.pt_entry_sizes_are_valid(), + this.tlb_is_submap_of_pt(), + other.tlb_is_submap_of_pt(), + ensures + forall|base, pte| + !candidate_mapping_overlaps_existing_pmem(this.interp_pt_mem(), base, pte) + && other.interp_pt_mem() === this.interp_pt_mem().insert(base, pte) + ==> other.pt_mappings_dont_overlap_in_pmem(), + forall|base| + other.interp_pt_mem() === this.interp_pt_mem().remove(base) + ==> other.pt_mappings_dont_overlap_in_pmem(), +{ + assert forall|base, pte| + !candidate_mapping_overlaps_existing_pmem(this.interp_pt_mem(), base, pte) + && other.interp_pt_mem() === this.interp_pt_mem().insert( + base, + pte, + ) implies other.pt_mappings_dont_overlap_in_pmem() by { + lemma_effective_mappings_equal_interp_pt_mem(this); + lemma_effective_mappings_equal_interp_pt_mem(other); + assert forall|b1: nat, pte1: PageTableEntry, b2: nat, pte2: PageTableEntry| + other.interp_pt_mem().contains_pair(b1, pte1) && other.interp_pt_mem().contains_pair( + b2, + pte2, + ) implies ((b1 == b2) || !overlap(pte1.frame, pte2.frame)) by { + if b1 == b2 { + } else { + if b1 == base { + assert(!overlap(pte1.frame, pte2.frame)); + } else { + assert(this.interp_pt_mem().dom().contains(b1)); + assert(this.interp_pt_mem().contains_pair(b1, pte1)); + if b2 == base { + assert(pte2 === pte); + assert(!candidate_mapping_overlaps_existing_pmem( + this.interp_pt_mem(), + base, + pte, + )); + assert(forall|b: nat| + { + this.interp_pt_mem().dom().contains(b) ==> !(#[trigger] overlap( + pte.frame, + this.interp_pt_mem()[b].frame, + )) + }); + assert(this.interp_pt_mem()[b1] === pte1); + assert(this.interp_pt_mem().dom().contains(b1)); + assert(!overlap(pte.frame, pte1.frame)); + assert(pte.frame.size > 0); + assert(pte1.frame.size > 0); + assert(!overlap(pte1.frame, pte.frame)); } else { - assert(b2 != base); - if b1 == base { - assert(!overlap(pte1.frame, pte2.frame)); - } else { - assert(this.interp_pt_mem().dom().contains(b1)); - assert(this.interp_pt_mem().contains_pair(b1, pte1)); - assert(this.interp_pt_mem().dom().contains(b2)); - assert(this.interp_pt_mem().contains_pair(b2, pte2)); - assert(!overlap(pte1.frame, pte2.frame)); - } - } - }; - - }; - } - - pub proof fn lemma_effective_mappings_equal_interp_pt_mem(this: OSVariables) - requires - this.tlb_is_submap_of_pt() - ensures - this.effective_mappings() === this.interp_pt_mem(), - { - let eff = this.effective_mappings(); - let pt = this.interp_pt_mem(); - let tlb = this.hw.tlb; - assert forall|base| - eff.dom().contains(base) - implies pt.dom().contains(base) by - { assert(pt.contains_pair(base, eff[base])); }; - assert forall|base| - pt.dom().contains(base) - implies eff.dom().contains(base) by - { - if tlb.dom().contains(base) { - if tlb[base] !== pt[base] { - let pteprime = tlb[base]; - assert(pt.contains_pair(base, pteprime)); - assert(false); + assert(this.interp_pt_mem().dom().contains(b2)); + assert(this.interp_pt_mem().contains_pair(b2, pte2)); + assert(!overlap(pte1.frame, pte2.frame)); } - assert(eff.contains_pair(base, pt[base])); + } + } + }; + }; + assert forall|base| + other.interp_pt_mem() === this.interp_pt_mem().remove( + base, + ) implies other.pt_mappings_dont_overlap_in_pmem() by { + lemma_effective_mappings_equal_interp_pt_mem(this); + lemma_effective_mappings_equal_interp_pt_mem(other); + assert forall|b1: nat, pte1: PageTableEntry, b2: nat, pte2: PageTableEntry| + other.interp_pt_mem().contains_pair(b1, pte1) && other.interp_pt_mem().contains_pair( + b2, + pte2, + ) implies ((b1 == b2) || !overlap(pte1.frame, pte2.frame)) by { + if b1 == b2 { + } else { + assert(b2 != base); + if b1 == base { + assert(!overlap(pte1.frame, pte2.frame)); } else { - assert(eff.contains_pair(base, pt[base])); + assert(this.interp_pt_mem().dom().contains(b1)); + assert(this.interp_pt_mem().contains_pair(b1, pte1)); + assert(this.interp_pt_mem().dom().contains(b2)); + assert(this.interp_pt_mem().contains_pair(b2, pte2)); + assert(!overlap(pte1.frame, pte2.frame)); } - }; - assert forall|base| - eff.dom().contains(base) && pt.dom().contains(base) - implies #[trigger] pt[base] === #[trigger] eff[base] by - { - let pte = eff[base]; - assert(eff.contains_pair(base, pte)); - assert(pt.contains_pair(base, pte)); - }; - lib::assert_maps_equal_contains_pair::(eff, pt); + } + }; + }; +} + +pub proof fn lemma_effective_mappings_equal_interp_pt_mem(this: OSVariables) + requires + this.tlb_is_submap_of_pt(), + ensures + this.effective_mappings() === this.interp_pt_mem(), +{ + let eff = this.effective_mappings(); + let pt = this.interp_pt_mem(); + let tlb = this.hw.tlb; + assert forall|base| eff.dom().contains(base) implies pt.dom().contains(base) by { + assert(pt.contains_pair(base, eff[base])); + }; + assert forall|base| pt.dom().contains(base) implies eff.dom().contains(base) by { + if tlb.dom().contains(base) { + if tlb[base] !== pt[base] { + let pteprime = tlb[base]; + assert(pt.contains_pair(base, pteprime)); + assert(false); + } + assert(eff.contains_pair(base, pt[base])); + } else { + assert(eff.contains_pair(base, pt[base])); } + }; + assert forall|base| + eff.dom().contains(base) && pt.dom().contains(base) implies #[trigger] pt[base] + === #[trigger] eff[base] by { + let pte = eff[base]; + assert(eff.contains_pair(base, pte)); + assert(pt.contains_pair(base, pte)); + }; + lib::assert_maps_equal_contains_pair::(eff, pt); +} - pub proof fn lemma_effective_mappings_other(this: OSVariables, other: OSVariables) - requires - this.tlb_is_submap_of_pt(), - other.tlb_is_submap_of_pt(), - this.hw.pt_mem === other.hw.pt_mem, - ensures - this.effective_mappings() === other.effective_mappings(), - { - let eff1 = this.effective_mappings(); - let eff2 = other.effective_mappings(); - let tlb1 = this.hw.tlb; - let tlb2 = other.hw.tlb; - let pt1 = this.interp_pt_mem(); - let pt2 = other.interp_pt_mem(); - assert forall|base, pte| - eff1.contains_pair(base, pte) - implies eff2.contains_pair(base, pte) by - { - assert(pt1.contains_pair(base, pte)); - assert(pt2.contains_pair(base, pte)); - if tlb2.dom().contains(base) { - if tlb2[base] !== pte { - let pteprime = tlb2[base]; - assert(pt2.contains_pair(base, pteprime)); - assert(false); - } - assert(tlb2.contains_pair(base, pte)); - assert(eff2.contains_pair(base, pte)); - } else { - assert(eff2.contains_pair(base, pte)); - } - assert(eff2.contains_pair(base, pte)); - }; - assert forall|base, pte| - eff2.contains_pair(base, pte) - implies eff1.contains_pair(base, pte) by - { - assert(pt1.contains_pair(base, pte)); - assert(pt2.contains_pair(base, pte)); - if tlb1.dom().contains(base) { - if tlb1[base] !== pte { - let pteprime = tlb1[base]; - assert(pt1.contains_pair(base, pteprime)); - assert(false); - } - assert(tlb1.contains_pair(base, pte)); - assert(eff1.contains_pair(base, pte)); - } else { - assert(eff1.contains_pair(base, pte)); - } - assert(eff1.contains_pair(base, pte)); - }; - lib::assert_maps_equal_contains_pair::(eff1, eff2); +pub proof fn lemma_effective_mappings_other(this: OSVariables, other: OSVariables) + requires + this.tlb_is_submap_of_pt(), + other.tlb_is_submap_of_pt(), + this.hw.pt_mem === other.hw.pt_mem, + ensures + this.effective_mappings() === other.effective_mappings(), +{ + let eff1 = this.effective_mappings(); + let eff2 = other.effective_mappings(); + let tlb1 = this.hw.tlb; + let tlb2 = other.hw.tlb; + let pt1 = this.interp_pt_mem(); + let pt2 = other.interp_pt_mem(); + assert forall|base, pte| eff1.contains_pair(base, pte) implies eff2.contains_pair( + base, + pte, + ) by { + assert(pt1.contains_pair(base, pte)); + assert(pt2.contains_pair(base, pte)); + if tlb2.dom().contains(base) { + if tlb2[base] !== pte { + let pteprime = tlb2[base]; + assert(pt2.contains_pair(base, pteprime)); + assert(false); + } + assert(tlb2.contains_pair(base, pte)); + assert(eff2.contains_pair(base, pte)); + } else { + assert(eff2.contains_pair(base, pte)); } + assert(eff2.contains_pair(base, pte)); + }; + assert forall|base, pte| eff2.contains_pair(base, pte) implies eff1.contains_pair( + base, + pte, + ) by { + assert(pt1.contains_pair(base, pte)); + assert(pt2.contains_pair(base, pte)); + if tlb1.dom().contains(base) { + if tlb1[base] !== pte { + let pteprime = tlb1[base]; + assert(pt1.contains_pair(base, pteprime)); + assert(false); + } + assert(tlb1.contains_pair(base, pte)); + assert(eff1.contains_pair(base, pte)); + } else { + assert(eff1.contains_pair(base, pte)); + } + assert(eff1.contains_pair(base, pte)); + }; + lib::assert_maps_equal_contains_pair::(eff1, eff2); +} - proof fn lemma_interp(this: OSVariables) - requires - this.inv() - ensures - this.interp().mappings === this.interp_pt_mem(), - this.interp().mappings === this.effective_mappings(), - forall|base: nat, pte: PageTableEntry, vmem_idx: nat| { - let vaddr = vmem_idx * WORD_SIZE as nat; - let paddr = (pte.frame.base + (vaddr - base)) as nat; - let pmem_idx = word_index_spec(paddr); - #[trigger] this.interp_pt_mem().contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size) && pmem_idx < this.hw.mem.len() - ==> this.hw.mem[pmem_idx as int] === #[trigger] this.interp().mem[vmem_idx] - }, - { - lemma_effective_mappings_equal_interp_pt_mem(this); - assert forall|base: nat, pte: PageTableEntry, vmem_idx: nat| { - let vaddr = vmem_idx * WORD_SIZE as nat; - let paddr = (pte.frame.base + (vaddr - base)) as nat; - let pmem_idx = word_index_spec(paddr); - #[trigger] this.interp_pt_mem().contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size) && pmem_idx < this.hw.mem.len() - } implies this.hw.mem[word_index_spec((pte.frame.base + ((vmem_idx * WORD_SIZE as nat) - base)) as nat) as int] === #[trigger] this.interp().mem[vmem_idx] - by { - let pt = this.interp_pt_mem(); - let sys_mem = this.hw.mem; +proof fn lemma_interp(this: OSVariables) + requires + this.inv(), + ensures + this.interp().mappings === this.interp_pt_mem(), + this.interp().mappings === this.effective_mappings(), + forall|base: nat, pte: PageTableEntry, vmem_idx: nat| + { let vaddr = vmem_idx * WORD_SIZE as nat; let paddr = (pte.frame.base + (vaddr - base)) as nat; let pmem_idx = word_index_spec(paddr); - if this.hw.mem[pmem_idx as int] !== this.interp().mem[vmem_idx] { - assert(exists|base, pte| pt.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size)); - let (base2, pte2): (nat, PageTableEntry) = choose|base: nat, pte: PageTableEntry| #![auto] pt.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size); - if base2 == base { - assert(pte2 === pte); - assert(false); - } else { - assert(overlap( - MemRegion { base: base, size: pte.frame.size }, - MemRegion { base: base2, size: pte2.frame.size })); - assert(false); - } - } + #[trigger] this.interp_pt_mem().contains_pair(base, pte) && between( + vaddr, + base, + base + pte.frame.size, + ) && pmem_idx < this.hw.mem.len() ==> this.hw.mem[pmem_idx as int] + === #[trigger] this.interp().mem[vmem_idx] + }, +{ + lemma_effective_mappings_equal_interp_pt_mem(this); + assert forall|base: nat, pte: PageTableEntry, vmem_idx: nat| + { + let vaddr = vmem_idx * WORD_SIZE as nat; + let paddr = (pte.frame.base + (vaddr - base)) as nat; + let pmem_idx = word_index_spec(paddr); + #[trigger] this.interp_pt_mem().contains_pair(base, pte) && between( + vaddr, + base, + base + pte.frame.size, + ) && pmem_idx < this.hw.mem.len() + } implies this.hw.mem[word_index_spec( + (pte.frame.base + ((vmem_idx * WORD_SIZE as nat) - base)) as nat, + ) as int] === #[trigger] this.interp().mem[vmem_idx] by { + let pt = this.interp_pt_mem(); + let sys_mem = this.hw.mem; + let vaddr = vmem_idx * WORD_SIZE as nat; + let paddr = (pte.frame.base + (vaddr - base)) as nat; + let pmem_idx = word_index_spec(paddr); + if this.hw.mem[pmem_idx as int] !== this.interp().mem[vmem_idx] { + assert(exists|base, pte| + pt.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size)); + let (base2, pte2): (nat, PageTableEntry) = choose|base: nat, pte: PageTableEntry| + #![auto] + pt.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size); + if base2 == base { + assert(pte2 === pte); + assert(false); + } else { + assert(overlap( + MemRegion { base: base, size: pte.frame.size }, + MemRegion { base: base2, size: pte2.frame.size }, + )); + assert(false); } } + } +} - proof fn lemma_interp_other(this: OSVariables, other: OSVariables) - requires - other.hw.mem === this.hw.mem, - forall|base, pte| this.effective_mappings().contains_pair(base, pte) ==> other.effective_mappings().contains_pair(base, pte), - this.inv(), - other.inv(), - ensures - forall|word_idx: nat| - this.interp().mem.dom().contains(word_idx) - ==> { - &&& other.interp().mem.dom().contains(word_idx) - &&& #[trigger] other.interp().mem[word_idx] == #[trigger] this.interp().mem[word_idx] - }, - { - assert forall|word_idx: nat| - this.interp().mem.dom().contains(word_idx) - implies { - &&& other.interp().mem.dom().contains(word_idx) - &&& #[trigger] other.interp().mem[word_idx] == #[trigger] this.interp().mem[word_idx] - } by - { - let vaddr = word_idx * WORD_SIZE as nat; - let this_mappings = this.effective_mappings(); - let other_mappings = other.effective_mappings(); - let phys_mem_size = this.interp_constants().phys_mem_size; - assert(hlspec::mem_domain_from_mappings_contains(phys_mem_size, word_idx, this_mappings)); - let (base, pte): (nat, PageTableEntry) = choose|base: nat, pte: PageTableEntry| #![auto] this_mappings.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size); - assert(this_mappings.contains_pair(base, pte)); - assert(between(vaddr, base, base + pte.frame.size)); - assert(other_mappings.contains_pair(base, pte)); - - assert(other.interp().mem.dom().contains(word_idx)); - if other.interp().mem[word_idx] !== this.interp().mem[word_idx] { - let (base2, pte2): (nat, PageTableEntry) = choose|base: nat, pte: PageTableEntry| #![auto] other_mappings.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size); - assert(other_mappings.contains_pair(base, pte)); - assert(other_mappings.contains_pair(base2, pte2)); - assert(between(vaddr, base2, base2 + pte2.frame.size)); - assert(overlap( - MemRegion { base: base, size: base + pte.frame.size }, - MemRegion { base: base2, size: base2 + pte2.frame.size })); - assert(other.pt_mappings_dont_overlap_in_vmem()); - assert(((base == base2) || !overlap( - MemRegion { base: base, size: pte.frame.size }, - MemRegion { base: base2, size: pte2.frame.size }))); - assert(base != base2); - assert(false); - } - }; +proof fn lemma_interp_other(this: OSVariables, other: OSVariables) + requires + other.hw.mem === this.hw.mem, + forall|base, pte| + this.effective_mappings().contains_pair(base, pte) + ==> other.effective_mappings().contains_pair(base, pte), + this.inv(), + other.inv(), + ensures + forall|word_idx: nat| + this.interp().mem.dom().contains(word_idx) ==> { + &&& other.interp().mem.dom().contains(word_idx) + &&& #[trigger] other.interp().mem[word_idx] + == #[trigger] this.interp().mem[word_idx] + }, +{ + assert forall|word_idx: nat| this.interp().mem.dom().contains(word_idx) implies { + &&& other.interp().mem.dom().contains(word_idx) + &&& #[trigger] other.interp().mem[word_idx] == #[trigger] this.interp().mem[word_idx] + } by { + let vaddr = word_idx * WORD_SIZE as nat; + let this_mappings = this.effective_mappings(); + let other_mappings = other.effective_mappings(); + let phys_mem_size = this.interp_constants().phys_mem_size; + assert(hlspec::mem_domain_from_mappings_contains(phys_mem_size, word_idx, this_mappings)); + let (base, pte): (nat, PageTableEntry) = choose|base: nat, pte: PageTableEntry| + #![auto] + this_mappings.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size); + assert(this_mappings.contains_pair(base, pte)); + assert(between(vaddr, base, base + pte.frame.size)); + assert(other_mappings.contains_pair(base, pte)); + assert(other.interp().mem.dom().contains(word_idx)); + if other.interp().mem[word_idx] !== this.interp().mem[word_idx] { + let (base2, pte2): (nat, PageTableEntry) = choose|base: nat, pte: PageTableEntry| + #![auto] + other_mappings.contains_pair(base, pte) && between( + vaddr, + base, + base + pte.frame.size, + ); + assert(other_mappings.contains_pair(base, pte)); + assert(other_mappings.contains_pair(base2, pte2)); + assert(between(vaddr, base2, base2 + pte2.frame.size)); + assert(overlap( + MemRegion { base: base, size: base + pte.frame.size }, + MemRegion { base: base2, size: base2 + pte2.frame.size }, + )); + assert(other.pt_mappings_dont_overlap_in_vmem()); + assert(((base == base2) || !overlap( + MemRegion { base: base, size: pte.frame.size }, + MemRegion { base: base2, size: pte2.frame.size }, + ))); + assert(base != base2); + assert(false); } + }; +} - // not technically necessary, i think - proof fn init_implies_pt_init(s: OSVariables) - requires - init(s) - ensures - spec_pt::init(s.pt_variables()); - - proof fn init_implies_inv(s: OSVariables) - requires - init(s) - ensures - s.inv() - { - reveal(OSVariables::pt_entries_aligned); - } +// not technically necessary, i think +proof fn init_implies_pt_init(s: OSVariables) + requires + init(s), + ensures + spec_pt::init(s.pt_variables()), +; + +proof fn init_implies_inv(s: OSVariables) + requires + init(s), + ensures + s.inv(), +{ + reveal(OSVariables::pt_entries_aligned); +} - proof fn next_step_preserves_inv(s1: OSVariables, s2: OSVariables, step: OSStep) - requires - s1.inv(), - next_step(s1, s2, step) - ensures - s2.inv(), - { - reveal(OSVariables::pt_entries_aligned); - match step { - OSStep::HW { step: system_step } => { - assert(step_HW(s1, s2, system_step)); - }, - OSStep::Map { vaddr, pte, result } => { - let pt_s1 = s1.pt_variables(); - let pt_s2 = s2.pt_variables(); - assert(step_Map(s1, s2, vaddr, pte, result)); - assert(spec_pt::step_Map(pt_s1, pt_s2, vaddr, pte, result)); - assert(!candidate_mapping_overlaps_existing_pmem(pt_s1.map, vaddr, pte)); - if candidate_mapping_overlaps_existing_vmem(pt_s1.map, vaddr, pte) { - assert(s2.inv()); - } else { - assert(forall|base, pte| s1.interp_pt_mem().contains_pair(base, pte) ==> s2.interp_pt_mem().contains_pair(base, pte)); - assert forall|base, pteprime| s2.interp_pt_mem().contains_pair(base, pteprime) implies { +proof fn next_step_preserves_inv(s1: OSVariables, s2: OSVariables, step: OSStep) + requires + s1.inv(), + next_step(s1, s2, step), + ensures + s2.inv(), +{ + reveal(OSVariables::pt_entries_aligned); + match step { + OSStep::HW { step: system_step } => { + assert(step_HW(s1, s2, system_step)); + }, + OSStep::Map { vaddr, pte, result } => { + let pt_s1 = s1.pt_variables(); + let pt_s2 = s2.pt_variables(); + assert(step_Map(s1, s2, vaddr, pte, result)); + assert(spec_pt::step_Map(pt_s1, pt_s2, vaddr, pte, result)); + assert(!candidate_mapping_overlaps_existing_pmem(pt_s1.map, vaddr, pte)); + if candidate_mapping_overlaps_existing_vmem(pt_s1.map, vaddr, pte) { + assert(s2.inv()); + } else { + assert(forall|base, pte| + s1.interp_pt_mem().contains_pair(base, pte) + ==> s2.interp_pt_mem().contains_pair(base, pte)); + assert forall|base, pteprime| + s2.interp_pt_mem().contains_pair(base, pteprime) implies { + ||| pteprime.frame.size == L3_ENTRY_SIZE + ||| pteprime.frame.size == L2_ENTRY_SIZE + ||| pteprime.frame.size == L1_ENTRY_SIZE + } by { + if vaddr == base { + assert({ ||| pteprime.frame.size == L3_ENTRY_SIZE ||| pteprime.frame.size == L2_ENTRY_SIZE ||| pteprime.frame.size == L1_ENTRY_SIZE - } by - { - if vaddr == base { - assert({ - ||| pteprime.frame.size == L3_ENTRY_SIZE - ||| pteprime.frame.size == L2_ENTRY_SIZE - ||| pteprime.frame.size == L1_ENTRY_SIZE - }); - } else { - assert(s1.pt_entry_sizes_are_valid()); - assert(s1.interp_pt_mem().dom().contains(base)); - assert(s1.interp_pt_mem().contains_pair(base, pteprime)); - assert({ - ||| pteprime.frame.size == L3_ENTRY_SIZE - ||| pteprime.frame.size == L2_ENTRY_SIZE - ||| pteprime.frame.size == L1_ENTRY_SIZE - }); - } - }; - assert(s2.pt_entry_sizes_are_valid()); - assert(s2.tlb_is_submap_of_pt()); - lemma_pt_mappings_dont_overlap_in_pmem(s1, s2); - assert(s2.pt_mappings_dont_overlap_in_pmem()); - assert(s2.pt_entries_aligned()) by { - assert forall|base2, pte2| - s2.interp_pt_mem().contains_pair(base2, pte2) - implies - aligned(base2, 8) && aligned(pte2.frame.base, 8) by - { - if base2 === vaddr { - assert(pte2 === pte); - assert(aligned(vaddr, pte.frame.size)); - assert(aligned(pte.frame.base, pte.frame.size)); - if pte.frame.size == L3_ENTRY_SIZE as nat { - lib::aligned_transitive(pte.frame.base, L3_ENTRY_SIZE as nat, 8); - lib::aligned_transitive(vaddr, L3_ENTRY_SIZE as nat, 8); - assert(aligned(vaddr, 8)); - assert(aligned(pte.frame.base, 8)); - } else if pte.frame.size == L2_ENTRY_SIZE as nat { - lib::aligned_transitive(pte.frame.base, L2_ENTRY_SIZE as nat, 8); - lib::aligned_transitive(vaddr, L2_ENTRY_SIZE as nat, 8); - assert(aligned(vaddr, 8)); - assert(aligned(pte.frame.base, 8)); - } else { - assert(pte.frame.size == L1_ENTRY_SIZE as nat); - assert(aligned(L1_ENTRY_SIZE as nat, 8)); - lib::aligned_transitive(pte.frame.base, L1_ENTRY_SIZE as nat, 8); - lib::aligned_transitive(vaddr, L1_ENTRY_SIZE as nat, 8); - assert(aligned(vaddr, 8)); - assert(aligned(pte.frame.base, 8)); - } - } else { - assert(s1.interp_pt_mem().contains_pair(base2, pte2)); - } - }; - }; - assert(s2.inv()); - } - }, - OSStep::Unmap { vaddr, result } => { - let pt_s1 = s1.pt_variables(); - let pt_s2 = s2.pt_variables(); - assert(step_Unmap(s1, s2, vaddr, result)); - assert(spec_pt::step_Unmap(pt_s1, pt_s2, vaddr, result)); - if pt_s1.map.dom().contains(vaddr) { - assert(result.is_Ok()); - assert(pt_s2.map === pt_s1.map.remove(vaddr)); - // assert(s2.pt_mappings_dont_overlap_in_vmem()); - assert forall|base2, pte2| - s2.hw.tlb.contains_pair(base2, pte2) - implies #[trigger] s2.interp_pt_mem().contains_pair(base2, pte2) by - { - assert(s1.hw.tlb.contains_pair(base2, pte2)); - assert(s1.tlb_is_submap_of_pt()); - assert(s1.interp_pt_mem().contains_pair(base2, pte2)); - assert(s2.interp_pt_mem().contains_pair(base2, pte2)); - }; - assert forall|baseprime, pteprime| s2.interp_pt_mem().contains_pair(baseprime, pteprime) implies { + }); + } else { + assert(s1.pt_entry_sizes_are_valid()); + assert(s1.interp_pt_mem().dom().contains(base)); + assert(s1.interp_pt_mem().contains_pair(base, pteprime)); + assert({ ||| pteprime.frame.size == L3_ENTRY_SIZE ||| pteprime.frame.size == L2_ENTRY_SIZE ||| pteprime.frame.size == L1_ENTRY_SIZE - } by - { - assert(s1.pt_entry_sizes_are_valid()); - assert(s1.interp_pt_mem().dom().contains(baseprime)); - assert(s1.interp_pt_mem().contains_pair(baseprime, pteprime)); - assert({ - ||| pteprime.frame.size == L3_ENTRY_SIZE - ||| pteprime.frame.size == L2_ENTRY_SIZE - ||| pteprime.frame.size == L1_ENTRY_SIZE - }); - }; - assert(s2.pt_entry_sizes_are_valid()); - lemma_pt_mappings_dont_overlap_in_pmem(s1, s2); - assert(s2.pt_entries_aligned()) by { - assert forall|base, pte| - s2.interp_pt_mem().contains_pair(base, pte) - implies - aligned(base, 8) && aligned(pte.frame.base, 8) by - { assert(s1.interp_pt_mem().contains_pair(base, pte)); }; - }; - assert(s2.inv()); - } else { - assert(s2.inv()); + }); } - }, - OSStep::Resolve { vaddr, result } => (), + }; + assert(s2.pt_entry_sizes_are_valid()); + assert(s2.tlb_is_submap_of_pt()); + lemma_pt_mappings_dont_overlap_in_pmem(s1, s2); + assert(s2.pt_mappings_dont_overlap_in_pmem()); + assert(s2.pt_entries_aligned()) by { + assert forall|base2, pte2| + s2.interp_pt_mem().contains_pair(base2, pte2) implies aligned(base2, 8) + && aligned(pte2.frame.base, 8) by { + if base2 === vaddr { + assert(pte2 === pte); + assert(aligned(vaddr, pte.frame.size)); + assert(aligned(pte.frame.base, pte.frame.size)); + if pte.frame.size == L3_ENTRY_SIZE as nat { + lib::aligned_transitive(pte.frame.base, L3_ENTRY_SIZE as nat, 8); + lib::aligned_transitive(vaddr, L3_ENTRY_SIZE as nat, 8); + assert(aligned(vaddr, 8)); + assert(aligned(pte.frame.base, 8)); + } else if pte.frame.size == L2_ENTRY_SIZE as nat { + lib::aligned_transitive(pte.frame.base, L2_ENTRY_SIZE as nat, 8); + lib::aligned_transitive(vaddr, L2_ENTRY_SIZE as nat, 8); + assert(aligned(vaddr, 8)); + assert(aligned(pte.frame.base, 8)); + } else { + assert(pte.frame.size == L1_ENTRY_SIZE as nat); + assert(aligned(L1_ENTRY_SIZE as nat, 8)); + lib::aligned_transitive(pte.frame.base, L1_ENTRY_SIZE as nat, 8); + lib::aligned_transitive(vaddr, L1_ENTRY_SIZE as nat, 8); + assert(aligned(vaddr, 8)); + assert(aligned(pte.frame.base, 8)); + } + } else { + assert(s1.interp_pt_mem().contains_pair(base2, pte2)); + } + }; + }; + assert(s2.inv()); + } + }, + OSStep::Unmap { vaddr, result } => { + let pt_s1 = s1.pt_variables(); + let pt_s2 = s2.pt_variables(); + assert(step_Unmap(s1, s2, vaddr, result)); + assert(spec_pt::step_Unmap(pt_s1, pt_s2, vaddr, result)); + if pt_s1.map.dom().contains(vaddr) { + assert(result.is_Ok()); + assert(pt_s2.map === pt_s1.map.remove(vaddr)); + // assert(s2.pt_mappings_dont_overlap_in_vmem()); + assert forall|base2, pte2| + s2.hw.tlb.contains_pair( + base2, + pte2, + ) implies #[trigger] s2.interp_pt_mem().contains_pair(base2, pte2) by { + assert(s1.hw.tlb.contains_pair(base2, pte2)); + assert(s1.tlb_is_submap_of_pt()); + assert(s1.interp_pt_mem().contains_pair(base2, pte2)); + assert(s2.interp_pt_mem().contains_pair(base2, pte2)); + }; + assert forall|baseprime, pteprime| + s2.interp_pt_mem().contains_pair(baseprime, pteprime) implies { + ||| pteprime.frame.size == L3_ENTRY_SIZE + ||| pteprime.frame.size == L2_ENTRY_SIZE + ||| pteprime.frame.size == L1_ENTRY_SIZE + } by { + assert(s1.pt_entry_sizes_are_valid()); + assert(s1.interp_pt_mem().dom().contains(baseprime)); + assert(s1.interp_pt_mem().contains_pair(baseprime, pteprime)); + assert({ + ||| pteprime.frame.size == L3_ENTRY_SIZE + ||| pteprime.frame.size == L2_ENTRY_SIZE + ||| pteprime.frame.size == L1_ENTRY_SIZE + }); + }; + assert(s2.pt_entry_sizes_are_valid()); + lemma_pt_mappings_dont_overlap_in_pmem(s1, s2); + assert(s2.pt_entries_aligned()) by { + assert forall|base, pte| + s2.interp_pt_mem().contains_pair(base, pte) implies aligned(base, 8) + && aligned(pte.frame.base, 8) by { + assert(s1.interp_pt_mem().contains_pair(base, pte)); + }; + }; + assert(s2.inv()); + } else { + assert(s2.inv()); } - } + }, + OSStep::Resolve { vaddr, result } => (), + } +} - proof fn init_refines_hl_init(s: OSVariables) - requires - init(s) - ensures - hlspec::init(s.interp()) - { - lemma_effective_mappings_equal_interp_pt_mem(s); - assert_maps_equal!(s.interp().mem, Map::empty()); - } +proof fn init_refines_hl_init(s: OSVariables) + requires + init(s), + ensures + hlspec::init(s.interp()), +{ + lemma_effective_mappings_equal_interp_pt_mem(s); + assert_maps_equal!(s.interp().mem, Map::empty()); +} - proof fn next_step_refines_hl_next_step(s1: OSVariables, s2: OSVariables, step: OSStep) - requires - s1.inv(), - next_step(s1, s2, step) - ensures - hlspec::next_step(s1.interp_constants(), s1.interp(), s2.interp(), step.interp()) - { - next_step_preserves_inv(s1, s2, step); - lemma_effective_mappings_equal_interp_pt_mem(s1); - lemma_effective_mappings_equal_interp_pt_mem(s2); - let abs_s1 = s1.interp(); - let abs_s2 = s2.interp(); - let abs_c = s1.interp_constants(); - let sys_s1 = s1.hw; - let sys_s2 = s2.hw; - let pt1 = s1.interp_pt_mem(); - let pt2 = s2.interp_pt_mem(); - let abs_step = step.interp(); - match step { - OSStep::HW { step: system_step } => { - lemma_effective_mappings_other(s1, s2); - match system_step { - hardware::HWStep::ReadWrite { vaddr, paddr, op, pte } => { - // hlspec::AbstractStep::ReadWrite { vaddr, op, pte } - let pmem_idx = word_index_spec(paddr); - let vmem_idx = word_index_spec(vaddr); - assert(sys_s2.pt_mem === sys_s1.pt_mem); - assert(sys_s2.tlb === sys_s1.tlb); - match pte { - Some((base, pte)) => { - lemma_interp(s1); - lemma_interp(s2); - - // hw - assert(sys_s1.tlb.contains_pair(base, pte)); - assert(between(vaddr, base, base + pte.frame.size)); - assert(paddr === (pte.frame.base + (vaddr - base)) as nat); - - // abs - assert(abs_s1.mappings.contains_pair(base, pte)); - match op { - RWOp::Store { new_value, result } => { - if pmem_idx < sys_s1.mem.len() && !pte.flags.is_supervisor && pte.flags.is_writable { - assert(result.is_Ok()); - assert(sys_s2.mem === sys_s1.mem.update(pmem_idx as int, new_value)); - assert(hlspec::mem_domain_from_mappings_contains(abs_c.phys_mem_size, vmem_idx, s1.interp_pt_mem())); - assert(abs_s1.mem.dom() === abs_s2.mem.dom()); - - assert(sys_s1.mem[pmem_idx as int] == abs_s1.mem[vmem_idx]); - - assert(abs_s1.mem.dom().contains(vmem_idx)); - assert(abs_s1.mem.insert(vmem_idx, new_value).dom() === abs_s1.mem.dom().insert(vmem_idx)); - assert_sets_equal!(abs_s1.mem.dom(), abs_s1.mem.dom().insert(vmem_idx)); - assert(abs_s1.mem.insert(vmem_idx, new_value).dom() === abs_s2.mem.dom()); - assert forall|vmem_idx2: nat| - abs_s2.mem.dom().contains(vmem_idx2) && - abs_s1.mem.insert(vmem_idx, new_value).dom().contains(vmem_idx2) - implies - #[trigger] abs_s2.mem[vmem_idx2] == abs_s1.mem.insert(vmem_idx, new_value)[vmem_idx2] by - { - if vmem_idx2 == vmem_idx { - assert(abs_s2.mem[vmem_idx2] == new_value); - } else { - assert(hlspec::mem_domain_from_mappings_contains(abs_c.phys_mem_size, vmem_idx2, pt1)); - let vaddr2 = vmem_idx2 * WORD_SIZE as nat; - let (base2, pte2): (nat, PageTableEntry) = choose|base2: nat, pte2: PageTableEntry| { - let paddr2 = (pte2.frame.base + (vaddr2 - base2)) as nat; - let pmem_idx2 = word_index_spec(paddr2); - &&& #[trigger] pt1.contains_pair(base2, pte2) - &&& between(vaddr2, base2, base2 + pte2.frame.size) - &&& pmem_idx2 < abs_c.phys_mem_size - }; - let paddr2 = (pte2.frame.base + (vaddr2 - base2)) as nat; +proof fn next_step_refines_hl_next_step(s1: OSVariables, s2: OSVariables, step: OSStep) + requires + s1.inv(), + next_step(s1, s2, step), + ensures + hlspec::next_step(s1.interp_constants(), s1.interp(), s2.interp(), step.interp()), +{ + next_step_preserves_inv(s1, s2, step); + lemma_effective_mappings_equal_interp_pt_mem(s1); + lemma_effective_mappings_equal_interp_pt_mem(s2); + let abs_s1 = s1.interp(); + let abs_s2 = s2.interp(); + let abs_c = s1.interp_constants(); + let sys_s1 = s1.hw; + let sys_s2 = s2.hw; + let pt1 = s1.interp_pt_mem(); + let pt2 = s2.interp_pt_mem(); + let abs_step = step.interp(); + match step { + OSStep::HW { step: system_step } => { + lemma_effective_mappings_other(s1, s2); + match system_step { + hardware::HWStep::ReadWrite { vaddr, paddr, op, pte } => { + // hlspec::AbstractStep::ReadWrite { vaddr, op, pte } + let pmem_idx = word_index_spec(paddr); + let vmem_idx = word_index_spec(vaddr); + assert(sys_s2.pt_mem === sys_s1.pt_mem); + assert(sys_s2.tlb === sys_s1.tlb); + match pte { + Some((base, pte)) => { + lemma_interp(s1); + lemma_interp(s2); + // hw + assert(sys_s1.tlb.contains_pair(base, pte)); + assert(between(vaddr, base, base + pte.frame.size)); + assert(paddr === (pte.frame.base + (vaddr - base)) as nat); + // abs + assert(abs_s1.mappings.contains_pair(base, pte)); + match op { + RWOp::Store { new_value, result } => { + if pmem_idx < sys_s1.mem.len() && !pte.flags.is_supervisor + && pte.flags.is_writable { + assert(result.is_Ok()); + assert(sys_s2.mem === sys_s1.mem.update( + pmem_idx as int, + new_value, + )); + assert(hlspec::mem_domain_from_mappings_contains( + abs_c.phys_mem_size, + vmem_idx, + s1.interp_pt_mem(), + )); + assert(abs_s1.mem.dom() === abs_s2.mem.dom()); + assert(sys_s1.mem[pmem_idx as int] == abs_s1.mem[vmem_idx]); + assert(abs_s1.mem.dom().contains(vmem_idx)); + assert(abs_s1.mem.insert(vmem_idx, new_value).dom() + === abs_s1.mem.dom().insert(vmem_idx)); + assert_sets_equal!(abs_s1.mem.dom(), abs_s1.mem.dom().insert(vmem_idx)); + assert(abs_s1.mem.insert(vmem_idx, new_value).dom() + === abs_s2.mem.dom()); + assert forall|vmem_idx2: nat| + abs_s2.mem.dom().contains(vmem_idx2) + && abs_s1.mem.insert( + vmem_idx, + new_value, + ).dom().contains( + vmem_idx2, + ) implies #[trigger] abs_s2.mem[vmem_idx2] + == abs_s1.mem.insert( + vmem_idx, + new_value, + )[vmem_idx2] by { + if vmem_idx2 == vmem_idx { + assert(abs_s2.mem[vmem_idx2] == new_value); + } else { + assert(hlspec::mem_domain_from_mappings_contains( + abs_c.phys_mem_size, + vmem_idx2, + pt1, + )); + let vaddr2 = vmem_idx2 * WORD_SIZE as nat; + let (base2, pte2): (nat, PageTableEntry) = choose| + base2: nat, + pte2: PageTableEntry, + | + { + let paddr2 = (pte2.frame.base + (vaddr2 + - base2)) as nat; let pmem_idx2 = word_index_spec(paddr2); - assert(pt1.contains_pair(base2, pte2)); - assert(between(vaddr2, base2, base2 + pte2.frame.size)); - assert(pmem_idx2 < abs_c.phys_mem_size); - assert(abs_s1.mem[vmem_idx2] == s1.hw.mem[pmem_idx2 as int]); - assert(abs_s2.mem[vmem_idx2] == s2.hw.mem[pmem_idx2 as int]); - assert(s2.hw.mem === s1.hw.mem.update(pmem_idx as int, new_value)); - assert(pmem_idx < s1.hw.mem.len()); - assert(pmem_idx2 < s1.hw.mem.len()); - lib::mod_of_mul(vmem_idx2, WORD_SIZE as nat); - assert(aligned(paddr, 8)) by { - reveal(OSVariables::pt_entries_aligned); - assert(aligned(pte.frame.base, 8)); - assert(aligned(base, 8)); - assert(aligned(vaddr, 8)); - lib::subtract_mod_eq_zero(base, vaddr, 8); - lib::mod_add_zero(pte.frame.base, sub(vaddr, base), 8); - }; - assert(aligned(paddr2, 8)) by { - reveal(OSVariables::pt_entries_aligned); - assert(aligned(pte2.frame.base, 8)); - assert(aligned(base2, 8)); - assert(aligned(vaddr2, 8)); - lib::subtract_mod_eq_zero(base2, vaddr2, 8); - lib::mod_add_zero(pte2.frame.base, sub(vaddr2, base2), 8); - }; - if pmem_idx == pmem_idx2 { - assert(vaddr != vaddr2); - assert(pte === pte2); - assert(vaddr - base != vaddr2 - base); - assert(paddr != paddr2); - assert(paddr == (pte.frame.base + (vaddr - base)) as nat); - assert(paddr2 == (pte2.frame.base + (vaddr2 - base2)) as nat); - assert(false); - } - assert(s1.hw.mem[pmem_idx2 as int] == s2.hw.mem[pmem_idx2 as int]); - - assert(abs_s2.mem[vmem_idx2] == abs_s1.mem[vmem_idx2]); - } + &&& #[trigger] pt1.contains_pair( + base2, + pte2, + ) + &&& between( + vaddr2, + base2, + base2 + pte2.frame.size, + ) + &&& pmem_idx2 < abs_c.phys_mem_size + }; + let paddr2 = (pte2.frame.base + (vaddr2 + - base2)) as nat; + let pmem_idx2 = word_index_spec(paddr2); + assert(pt1.contains_pair(base2, pte2)); + assert(between( + vaddr2, + base2, + base2 + pte2.frame.size, + )); + assert(pmem_idx2 < abs_c.phys_mem_size); + assert(abs_s1.mem[vmem_idx2] + == s1.hw.mem[pmem_idx2 as int]); + assert(abs_s2.mem[vmem_idx2] + == s2.hw.mem[pmem_idx2 as int]); + assert(s2.hw.mem === s1.hw.mem.update( + pmem_idx as int, + new_value, + )); + assert(pmem_idx < s1.hw.mem.len()); + assert(pmem_idx2 < s1.hw.mem.len()); + lib::mod_of_mul(vmem_idx2, WORD_SIZE as nat); + assert(aligned(paddr, 8)) by { + reveal(OSVariables::pt_entries_aligned); + assert(aligned(pte.frame.base, 8)); + assert(aligned(base, 8)); + assert(aligned(vaddr, 8)); + lib::subtract_mod_eq_zero(base, vaddr, 8); + lib::mod_add_zero( + pte.frame.base, + sub(vaddr, base), + 8, + ); }; - assert_maps_equal!(abs_s2.mem, abs_s1.mem.insert(vmem_idx, new_value)); - assert(hlspec::step_ReadWrite(abs_c, abs_s1, abs_s2, vaddr, op, Some((base, pte)))); - // Generalizing from the previous assert to the - // postcondition seems unstable. Simply assuming the - // statement after the assert somehow fixes it. (As does - // increasing the rlimit to 50, luckily.) - // assume(hlspec::step_ReadWrite(abs_c, abs_s1, abs_s2, vaddr, op, Some((base, pte)))); - } else { - assert(result.is_Pagefault()); - assert(sys_s2.mem === sys_s1.mem); - assert(hlspec::step_ReadWrite(abs_c, abs_s1, abs_s2, vaddr, op, Some((base, pte)))); - } - }, - RWOp::Load { is_exec, result } => { - assert(sys_s2.mem === sys_s1.mem); - if pmem_idx < sys_s1.mem.len() && !pte.flags.is_supervisor && (is_exec ==> !pte.flags.disable_execute) { - assert(result.is_Value()); - assert(result.get_Value_0() == sys_s1.mem[pmem_idx as int]); - assert(hlspec::mem_domain_from_mappings_contains(abs_c.phys_mem_size, vmem_idx, s1.interp_pt_mem())); - assert(sys_s1.mem[pmem_idx as int] == abs_s1.mem[vmem_idx]); - assert(hlspec::step_ReadWrite(abs_c, abs_s1, abs_s2, vaddr, op, Some((base, pte)))); - } else { - assert(result.is_Pagefault()); - assert(hlspec::step_ReadWrite(abs_c, abs_s1, abs_s2, vaddr, op, Some((base, pte)))); + assert(aligned(paddr2, 8)) by { + reveal(OSVariables::pt_entries_aligned); + assert(aligned(pte2.frame.base, 8)); + assert(aligned(base2, 8)); + assert(aligned(vaddr2, 8)); + lib::subtract_mod_eq_zero(base2, vaddr2, 8); + lib::mod_add_zero( + pte2.frame.base, + sub(vaddr2, base2), + 8, + ); + }; + if pmem_idx == pmem_idx2 { + assert(vaddr != vaddr2); + assert(pte === pte2); + assert(vaddr - base != vaddr2 - base); + assert(paddr != paddr2); + assert(paddr == (pte.frame.base + (vaddr + - base)) as nat); + assert(paddr2 == (pte2.frame.base + (vaddr2 + - base2)) as nat); + assert(false); + } + assert(s1.hw.mem[pmem_idx2 as int] + == s2.hw.mem[pmem_idx2 as int]); + assert(abs_s2.mem[vmem_idx2] + == abs_s1.mem[vmem_idx2]); } - }, + }; + assert_maps_equal!(abs_s2.mem, abs_s1.mem.insert(vmem_idx, new_value)); + assert(hlspec::step_ReadWrite( + abs_c, + abs_s1, + abs_s2, + vaddr, + op, + Some((base, pte)), + )); + // Generalizing from the previous assert to the + // postcondition seems unstable. Simply assuming the + // statement after the assert somehow fixes it. (As does + // increasing the rlimit to 50, luckily.) + // assume(hlspec::step_ReadWrite(abs_c, abs_s1, abs_s2, vaddr, op, Some((base, pte)))); + } else { + assert(result.is_Pagefault()); + assert(sys_s2.mem === sys_s1.mem); + assert(hlspec::step_ReadWrite( + abs_c, + abs_s1, + abs_s2, + vaddr, + op, + Some((base, pte)), + )); } }, - None => { - assert(hlspec::step_ReadWrite(abs_c, abs_s1, abs_s2, vaddr, op, pte)); + RWOp::Load { is_exec, result } => { + assert(sys_s2.mem === sys_s1.mem); + if pmem_idx < sys_s1.mem.len() && !pte.flags.is_supervisor && ( + is_exec ==> !pte.flags.disable_execute) { + assert(result.is_Value()); + assert(result.get_Value_0() == sys_s1.mem[pmem_idx as int]); + assert(hlspec::mem_domain_from_mappings_contains( + abs_c.phys_mem_size, + vmem_idx, + s1.interp_pt_mem(), + )); + assert(sys_s1.mem[pmem_idx as int] == abs_s1.mem[vmem_idx]); + assert(hlspec::step_ReadWrite( + abs_c, + abs_s1, + abs_s2, + vaddr, + op, + Some((base, pte)), + )); + } else { + assert(result.is_Pagefault()); + assert(hlspec::step_ReadWrite( + abs_c, + abs_s1, + abs_s2, + vaddr, + op, + Some((base, pte)), + )); + } }, } - assert(hlspec::step_ReadWrite(abs_c, abs_s1, abs_s2, vaddr, op, pte)); - assert(hlspec::next_step(abs_c, abs_s1, abs_s2, abs_step)); - }, - hardware::HWStep::PTMemOp => assert(false), - hardware::HWStep::TLBFill { vaddr, pte } => { - // hlspec::AbstractStep::Stutter - assert(abs_s2 === abs_s1); }, - hardware::HWStep::TLBEvict { vaddr } => { - // hlspec::AbstractStep::Stutter - assert(abs_s2 === abs_s1); + None => { + assert(hlspec::step_ReadWrite(abs_c, abs_s1, abs_s2, vaddr, op, pte)); }, } - }, - OSStep::Map { vaddr, pte, result } => { - // hlspec::AbstractStep::Map { vaddr, pte } - let pt_s1 = s1.pt_variables(); - let pt_s2 = s2.pt_variables(); - assert(abs_step === hlspec::AbstractStep::Map { vaddr, pte, result }); - assert(step_Map(s1, s2, vaddr, pte, result)); - assert(spec_pt::step_Map(pt_s1, pt_s2, vaddr, pte, result)); - assert(hlspec::step_Map_enabled(abs_s1.mappings, vaddr, pte)); - if candidate_mapping_overlaps_existing_vmem(pt_s1.map, vaddr, pte) { - assert(candidate_mapping_overlaps_existing_vmem(abs_s1.mappings, vaddr, pte)); - assert(hlspec::step_Map(abs_c, abs_s1, abs_s2, vaddr, pte, result)); - } else { - assert(!candidate_mapping_overlaps_existing_vmem(abs_s1.mappings, vaddr, pte)); - assert(forall|base, pte| s1.interp_pt_mem().contains_pair(base, pte) ==> s2.interp_pt_mem().contains_pair(base, pte)); - assert(forall|base, pte| s1.interp().mappings.contains_pair(base, pte) ==> s2.interp().mappings.contains_pair(base, pte)); - assert(s1.interp().mappings === s1.interp_pt_mem()); - assert(s2.interp().mappings === s2.interp_pt_mem()); - lemma_interp_other(s1, s2); - assert(result.is_Ok()); - assert(abs_s2.mappings === abs_s1.mappings.insert(vaddr, pte)); - assert forall|word_idx| - #[trigger] abs_s1.mem.dom().contains(word_idx) - implies abs_s2.mem[word_idx] === abs_s1.mem[word_idx] by - { - assert(abs_s2.mem.dom().contains(word_idx)); - assert(abs_s2.mem[word_idx] == abs_s1.mem[word_idx]); - }; - assert(abs_s2.mem.dom() === hlspec::mem_domain_from_mappings(abs_c.phys_mem_size, abs_s2.mappings)); - assert(hlspec::step_Map(abs_c, abs_s1, abs_s2, vaddr, pte, result)); - } - assert(hlspec::step_Map(abs_c, abs_s1, abs_s2, vaddr, pte, result)); + assert(hlspec::step_ReadWrite(abs_c, abs_s1, abs_s2, vaddr, op, pte)); assert(hlspec::next_step(abs_c, abs_s1, abs_s2, abs_step)); }, - OSStep::Unmap { vaddr, result } => { - // hlspec::AbstractStep::Unmap { vaddr } - let pt_s1 = s1.pt_variables(); - let pt_s2 = s2.pt_variables(); - assert(abs_step === hlspec::AbstractStep::Unmap { vaddr, result }); - assert(step_Unmap(s1, s2, vaddr, result)); - assert(spec_pt::step_Unmap(pt_s1, pt_s2, vaddr, result)); - assert(hlspec::step_Unmap_enabled(vaddr)); - if pt_s1.map.dom().contains(vaddr) { - assert(abs_s1.mappings.dom().contains(vaddr)); - assert(result.is_Ok()); - assert(pt_s2.map === pt_s1.map.remove(vaddr)); - assert(abs_s2.mappings === abs_s1.mappings.remove(vaddr)); - - assert(abs_s2.mem.dom() === hlspec::mem_domain_from_mappings(abs_c.phys_mem_size, abs_s2.mappings)); - lemma_interp_other(s2, s1); - assert forall|word_idx| - #[trigger] abs_s2.mem.dom().contains(word_idx) - implies abs_s1.mem[word_idx] === abs_s2.mem[word_idx] by - { - assert(abs_s1.mem[word_idx] == abs_s2.mem[word_idx]); - }; - - assert(hlspec::step_Unmap(abs_c, abs_s1, abs_s2, vaddr, result)); - } else { - assert(!abs_s1.mappings.dom().contains(vaddr)); - assert(hlspec::step_Unmap(abs_c, abs_s1, abs_s2, vaddr, result)); - } - assert(hlspec::step_Unmap(abs_c, abs_s1, abs_s2, vaddr, result)); - assert(hlspec::next_step(abs_c, abs_s1, abs_s2, abs_step)); + hardware::HWStep::PTMemOp => assert(false), + hardware::HWStep::TLBFill { vaddr, pte } => { + // hlspec::AbstractStep::Stutter + assert(abs_s2 === abs_s1); }, - OSStep::Resolve { vaddr, result } => { - // hlspec::AbstractStep::Resolve { vaddr, result } - let pt_s1 = s1.pt_variables(); - let pt_s2 = s2.pt_variables(); - assert(abs_step === hlspec::AbstractStep::Resolve { vaddr, result }); - assert(step_Resolve(s1, s2, vaddr, result)); - assert(spec_pt::step_Resolve(pt_s1, pt_s2, vaddr, result)); - match result { - ResolveResult::Ok(base, pte) => { - assert(hlspec::step_Resolve(abs_c, abs_s1, abs_s2, vaddr, ResolveResult::Ok(base, pte))); - }, - ResolveResult::ErrUnmapped => { - let vmem_idx = word_index_spec(vaddr); - assert(vmem_idx * WORD_SIZE == vaddr); - if hlspec::mem_domain_from_mappings(abs_c.phys_mem_size, abs_s1.mappings).contains(vmem_idx) { - assert(hlspec::mem_domain_from_mappings_contains(abs_c.phys_mem_size, vmem_idx, abs_s1.mappings)); - let (base, pte): (nat, PageTableEntry) = choose|base: nat, pte: PageTableEntry| { - let paddr = (pte.frame.base + (vaddr - base)) as nat; - let pmem_idx = word_index_spec(paddr); - &&& #[trigger] abs_s1.mappings.contains_pair(base, pte) - &&& between(vaddr, base, base + pte.frame.size) - &&& pmem_idx < abs_c.phys_mem_size - }; - assert(pt_s1.map.contains_pair(base, pte)); - assert(false); - } - assert(hlspec::step_Resolve(abs_c, abs_s1, abs_s2, vaddr, result)); - }, + hardware::HWStep::TLBEvict { vaddr } => { + // hlspec::AbstractStep::Stutter + assert(abs_s2 === abs_s1); + }, + } + }, + OSStep::Map { vaddr, pte, result } => { + // hlspec::AbstractStep::Map { vaddr, pte } + let pt_s1 = s1.pt_variables(); + let pt_s2 = s2.pt_variables(); + assert(abs_step === hlspec::AbstractStep::Map { vaddr, pte, result }); + assert(step_Map(s1, s2, vaddr, pte, result)); + assert(spec_pt::step_Map(pt_s1, pt_s2, vaddr, pte, result)); + assert(hlspec::step_Map_enabled(abs_s1.mappings, vaddr, pte)); + if candidate_mapping_overlaps_existing_vmem(pt_s1.map, vaddr, pte) { + assert(candidate_mapping_overlaps_existing_vmem(abs_s1.mappings, vaddr, pte)); + assert(hlspec::step_Map(abs_c, abs_s1, abs_s2, vaddr, pte, result)); + } else { + assert(!candidate_mapping_overlaps_existing_vmem(abs_s1.mappings, vaddr, pte)); + assert(forall|base, pte| + s1.interp_pt_mem().contains_pair(base, pte) + ==> s2.interp_pt_mem().contains_pair(base, pte)); + assert(forall|base, pte| + s1.interp().mappings.contains_pair(base, pte) + ==> s2.interp().mappings.contains_pair(base, pte)); + assert(s1.interp().mappings === s1.interp_pt_mem()); + assert(s2.interp().mappings === s2.interp_pt_mem()); + lemma_interp_other(s1, s2); + assert(result.is_Ok()); + assert(abs_s2.mappings === abs_s1.mappings.insert(vaddr, pte)); + assert forall|word_idx| #[trigger] + abs_s1.mem.dom().contains(word_idx) implies abs_s2.mem[word_idx] + === abs_s1.mem[word_idx] by { + assert(abs_s2.mem.dom().contains(word_idx)); + assert(abs_s2.mem[word_idx] == abs_s1.mem[word_idx]); + }; + assert(abs_s2.mem.dom() === hlspec::mem_domain_from_mappings( + abs_c.phys_mem_size, + abs_s2.mappings, + )); + assert(hlspec::step_Map(abs_c, abs_s1, abs_s2, vaddr, pte, result)); + } + assert(hlspec::step_Map(abs_c, abs_s1, abs_s2, vaddr, pte, result)); + assert(hlspec::next_step(abs_c, abs_s1, abs_s2, abs_step)); + }, + OSStep::Unmap { vaddr, result } => { + // hlspec::AbstractStep::Unmap { vaddr } + let pt_s1 = s1.pt_variables(); + let pt_s2 = s2.pt_variables(); + assert(abs_step === hlspec::AbstractStep::Unmap { vaddr, result }); + assert(step_Unmap(s1, s2, vaddr, result)); + assert(spec_pt::step_Unmap(pt_s1, pt_s2, vaddr, result)); + assert(hlspec::step_Unmap_enabled(vaddr)); + if pt_s1.map.dom().contains(vaddr) { + assert(abs_s1.mappings.dom().contains(vaddr)); + assert(result.is_Ok()); + assert(pt_s2.map === pt_s1.map.remove(vaddr)); + assert(abs_s2.mappings === abs_s1.mappings.remove(vaddr)); + assert(abs_s2.mem.dom() === hlspec::mem_domain_from_mappings( + abs_c.phys_mem_size, + abs_s2.mappings, + )); + lemma_interp_other(s2, s1); + assert forall|word_idx| #[trigger] + abs_s2.mem.dom().contains(word_idx) implies abs_s1.mem[word_idx] + === abs_s2.mem[word_idx] by { + assert(abs_s1.mem[word_idx] == abs_s2.mem[word_idx]); + }; + assert(hlspec::step_Unmap(abs_c, abs_s1, abs_s2, vaddr, result)); + } else { + assert(!abs_s1.mappings.dom().contains(vaddr)); + assert(hlspec::step_Unmap(abs_c, abs_s1, abs_s2, vaddr, result)); + } + assert(hlspec::step_Unmap(abs_c, abs_s1, abs_s2, vaddr, result)); + assert(hlspec::next_step(abs_c, abs_s1, abs_s2, abs_step)); + }, + OSStep::Resolve { vaddr, result } => { + // hlspec::AbstractStep::Resolve { vaddr, result } + let pt_s1 = s1.pt_variables(); + let pt_s2 = s2.pt_variables(); + assert(abs_step === hlspec::AbstractStep::Resolve { vaddr, result }); + assert(step_Resolve(s1, s2, vaddr, result)); + assert(spec_pt::step_Resolve(pt_s1, pt_s2, vaddr, result)); + match result { + ResolveResult::Ok(base, pte) => { + assert(hlspec::step_Resolve( + abs_c, + abs_s1, + abs_s2, + vaddr, + ResolveResult::Ok(base, pte), + )); + }, + ResolveResult::ErrUnmapped => { + let vmem_idx = word_index_spec(vaddr); + assert(vmem_idx * WORD_SIZE == vaddr); + if hlspec::mem_domain_from_mappings( + abs_c.phys_mem_size, + abs_s1.mappings, + ).contains(vmem_idx) { + assert(hlspec::mem_domain_from_mappings_contains( + abs_c.phys_mem_size, + vmem_idx, + abs_s1.mappings, + )); + let (base, pte): (nat, PageTableEntry) = choose| + base: nat, + pte: PageTableEntry, + | + { + let paddr = (pte.frame.base + (vaddr - base)) as nat; + let pmem_idx = word_index_spec(paddr); + &&& #[trigger] abs_s1.mappings.contains_pair(base, pte) + &&& between(vaddr, base, base + pte.frame.size) + &&& pmem_idx < abs_c.phys_mem_size + }; + assert(pt_s1.map.contains_pair(base, pte)); + assert(false); } assert(hlspec::step_Resolve(abs_c, abs_s1, abs_s2, vaddr, result)); - assert(hlspec::next_step(abs_c, abs_s1, abs_s2, abs_step)); }, } - } + assert(hlspec::step_Resolve(abs_c, abs_s1, abs_s2, vaddr, result)); + assert(hlspec::next_step(abs_c, abs_s1, abs_s2, abs_step)); + }, + } +} - } +} // verus! } } @@ -6061,420 +8948,444 @@ pub mod definitions_t { verus! { - macro_rules! bitmask_inc { +macro_rules! bitmask_inc { ($low:expr,$high:expr) => { (!(!0u64 << (($high+1u64)-$low))) << $low } } - pub(crate) use bitmask_inc; - macro_rules! bit { +pub(crate) use bitmask_inc; + +macro_rules! bit { ($v:expr) => { 1u64 << $v } } - pub(crate) use bit; - pub const X86_NUM_LAYERS: usize = 4; - pub const X86_NUM_ENTRIES: usize = 512; +pub(crate) use bit; - // The maximum physical address width is between 32 and 52 bits. - #[verifier(external_body)] - pub const MAX_PHYADDR_WIDTH: u64 = unimplemented!(); +pub const X86_NUM_LAYERS: usize = 4; - #[verifier(external_body)] - pub proof fn axiom_max_phyaddr_width_facts() - ensures 32 <= MAX_PHYADDR_WIDTH <= 52; - - // We cannot use a dual exec/spec constant for MAX_PHYADDR, because for those Verus currently - // doesn't support manually guiding the no-overflow proofs. - pub spec const MAX_PHYADDR_SPEC: u64 = ((1u64 << MAX_PHYADDR_WIDTH) - 1u64) as u64; - #[verifier::when_used_as_spec(MAX_PHYADDR_SPEC)] - pub exec const MAX_PHYADDR: u64 ensures MAX_PHYADDR == MAX_PHYADDR_SPEC { - axiom_max_phyaddr_width_facts(); - assert(1u64 << 32 == 0x100000000) by (compute); - assert(forall|m:u64,n:u64| n < m < 64 ==> 1u64 << n < 1u64 << m) by (bit_vector); - (1u64 << MAX_PHYADDR_WIDTH) - 1u64 - } +pub const X86_NUM_ENTRIES: usize = 512; - pub const WORD_SIZE: usize = 8; - pub const PAGE_SIZE: usize = 4096; +// The maximum physical address width is between 32 and 52 bits. +#[verifier(external_body)] +pub const MAX_PHYADDR_WIDTH: u64 = unimplemented!(); - pub spec const X86_MAX_ENTRY_SIZE: nat = 512 * 512 * 512 * 4096; - pub spec const MAX_BASE: nat = X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES as nat); +#[verifier(external_body)] +pub proof fn axiom_max_phyaddr_width_facts() + ensures + 32 <= MAX_PHYADDR_WIDTH <= 52, +; - pub spec const PT_BOUND_LOW: nat = 0; - // Upper bound for x86 4-level paging. - // 512 entries, each mapping 512*1024*1024*1024 bytes - pub const PT_BOUND_HIGH: usize = 512 * 512 * 1024 * 1024 * 1024; - pub const L3_ENTRY_SIZE: usize = PAGE_SIZE; - pub const L2_ENTRY_SIZE: usize = 512 * L3_ENTRY_SIZE; - pub const L1_ENTRY_SIZE: usize = 512 * L2_ENTRY_SIZE; - pub const L0_ENTRY_SIZE: usize = 512 * L1_ENTRY_SIZE; +// We cannot use a dual exec/spec constant for MAX_PHYADDR, because for those Verus currently +// doesn't support manually guiding the no-overflow proofs. +pub spec const MAX_PHYADDR_SPEC: u64 = ((1u64 << MAX_PHYADDR_WIDTH) - 1u64) as u64; - pub open spec fn index_from_offset(offset: nat, entry_size: nat) -> (res: nat) - recommends - entry_size > 0, - { - offset / entry_size - } +#[verifier::when_used_as_spec(MAX_PHYADDR_SPEC)] +pub exec const MAX_PHYADDR: u64 + ensures + MAX_PHYADDR == MAX_PHYADDR_SPEC, +{ + axiom_max_phyaddr_width_facts(); + assert(1u64 << 32 == 0x100000000) by (compute); + assert(forall|m: u64, n: u64| n < m < 64 ==> 1u64 << n < 1u64 << m) by (bit_vector); + (1u64 << MAX_PHYADDR_WIDTH) - 1u64 +} - pub open spec fn index_from_base_and_addr(base: nat, addr: nat, entry_size: nat) -> nat - recommends - addr >= base, - entry_size > 0, - { - index_from_offset(sub(addr, base), entry_size) - } +pub const WORD_SIZE: usize = 8; - pub open spec fn entry_base_from_index(base: nat, idx: nat, entry_size: nat) -> nat { - base + idx * entry_size - } +pub const PAGE_SIZE: usize = 4096; - pub open spec fn next_entry_base_from_index(base: nat, idx: nat, entry_size: nat) -> nat { - base + (idx + 1) * entry_size - } +pub spec const X86_MAX_ENTRY_SIZE: nat = 512 * 512 * 512 * 4096; +pub spec const MAX_BASE: nat = X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES as nat); - pub open spec fn candidate_mapping_in_bounds(base: nat, pte: PageTableEntry) -> bool { - base + pte.frame.size < x86_arch_spec.upper_vaddr(0, 0) - } +pub spec const PT_BOUND_LOW: nat = 0; + +// Upper bound for x86 4-level paging. +// 512 entries, each mapping 512*1024*1024*1024 bytes +pub const PT_BOUND_HIGH: usize = 512 * 512 * 1024 * 1024 * 1024; + +pub const L3_ENTRY_SIZE: usize = PAGE_SIZE; + +pub const L2_ENTRY_SIZE: usize = 512 * L3_ENTRY_SIZE; + +pub const L1_ENTRY_SIZE: usize = 512 * L2_ENTRY_SIZE; + +pub const L0_ENTRY_SIZE: usize = 512 * L1_ENTRY_SIZE; + +pub open spec fn index_from_offset(offset: nat, entry_size: nat) -> (res: nat) + recommends + entry_size > 0, +{ + offset / entry_size +} + +pub open spec fn index_from_base_and_addr(base: nat, addr: nat, entry_size: nat) -> nat + recommends + addr >= base, + entry_size > 0, +{ + index_from_offset(sub(addr, base), entry_size) +} + +pub open spec fn entry_base_from_index(base: nat, idx: nat, entry_size: nat) -> nat { + base + idx * entry_size +} + +pub open spec fn next_entry_base_from_index(base: nat, idx: nat, entry_size: nat) -> nat { + base + (idx + 1) * entry_size +} + +pub open spec fn candidate_mapping_in_bounds(base: nat, pte: PageTableEntry) -> bool { + base + pte.frame.size < x86_arch_spec.upper_vaddr(0, 0) +} - pub open spec fn candidate_mapping_overlaps_existing_vmem(mappings: Map, base: nat, pte: PageTableEntry) -> bool { - exists|b: nat| #![auto] { +pub open spec fn candidate_mapping_overlaps_existing_vmem( + mappings: Map, + base: nat, + pte: PageTableEntry, +) -> bool { + exists|b: nat| + #![auto] + { &&& mappings.dom().contains(b) &&& overlap( MemRegion { base: base, size: pte.frame.size }, - MemRegion { base: b, size: mappings[b].frame.size }) + MemRegion { base: b, size: mappings[b].frame.size }, + ) } - } +} - pub open spec fn candidate_mapping_overlaps_existing_pmem(mappings: Map, base: nat, pte: PageTableEntry) -> bool { - exists|b: nat| #![auto] { +pub open spec fn candidate_mapping_overlaps_existing_pmem( + mappings: Map, + base: nat, + pte: PageTableEntry, +) -> bool { + exists|b: nat| + #![auto] + { &&& mappings.dom().contains(b) &&& overlap(pte.frame, mappings.index(b).frame) } - } +} + +pub open spec(checked) fn aligned(addr: nat, size: nat) -> bool { + addr % size == 0 +} +pub open spec fn between(x: nat, a: nat, b: nat) -> bool { + a <= x && x < b +} - pub open spec(checked) fn aligned(addr: nat, size: nat) -> bool { - addr % size == 0 +pub open spec fn new_seq(i: nat, e: T) -> Seq + decreases i, +{ + if i == 0 { + seq![] + } else { + new_seq((i - 1) as nat, e).push(e) } +} - pub open spec fn between(x: nat, a: nat, b: nat) -> bool { - a <= x && x < b - } +#[is_variant] +pub enum MapResult { + ErrOverlap, + Ok, +} - pub open spec fn new_seq(i: nat, e: T) -> Seq - decreases i - { - if i == 0 { - seq![] - } else { - new_seq((i-1) as nat, e).push(e) +#[is_variant] +pub enum UnmapResult { + ErrNoSuchMapping, + Ok, +} + +#[is_variant] +pub enum ResolveResultExec { + ErrUnmapped, + Ok(usize, PageTableEntryExec), +} + +impl ResolveResultExec { + pub open spec fn view(self) -> ResolveResult { + match self { + ResolveResultExec::ErrUnmapped => ResolveResult::ErrUnmapped, + ResolveResultExec::Ok(base, pte) => ResolveResult::Ok(base as nat, pte@), } } +} - #[is_variant] - pub enum MapResult { - ErrOverlap, - Ok, - } +#[is_variant] +pub enum ResolveResult { + ErrUnmapped, + Ok(nat, PageTableEntry), +} - #[is_variant] - pub enum UnmapResult { - ErrNoSuchMapping, - Ok, - } +#[is_variant] +pub enum LoadResult { + Pagefault, + Value(nat), // word-sized load +} - #[is_variant] - pub enum ResolveResultExec { - ErrUnmapped, - Ok(usize, PageTableEntryExec), - } +#[is_variant] +pub enum StoreResult { + Pagefault, + Ok, +} - impl ResolveResultExec { - pub open spec fn view(self) -> ResolveResult { - match self { - ResolveResultExec::ErrUnmapped => ResolveResult::ErrUnmapped, - ResolveResultExec::Ok(base, pte) => ResolveResult::Ok(base as nat, pte@), - } - } - } +#[is_variant] +pub enum RWOp { + Store { new_value: nat, result: StoreResult }, + Load { is_exec: bool, result: LoadResult }, +} - #[is_variant] - pub enum ResolveResult { - ErrUnmapped, - Ok(nat, PageTableEntry), - } +pub struct MemRegion { + pub base: nat, + pub size: nat, +} - #[is_variant] - pub enum LoadResult { - Pagefault, - Value(nat), // word-sized load +impl MemRegion { + pub open spec fn contains(self, addr: nat) -> bool { + between(addr, self.base, self.base + self.size) } +} - #[is_variant] - pub enum StoreResult { - Pagefault, - Ok, +// only well-defined for sizes > 0 +pub open spec(checked) fn overlap(region1: MemRegion, region2: MemRegion) -> bool { + if region1.base <= region2.base { + region2.base < region1.base + region1.size + } else { + region1.base < region2.base + region2.size } +} - #[is_variant] - pub enum RWOp { - Store { new_value: nat, result: StoreResult }, - Load { is_exec: bool, result: LoadResult }, - } +// hardens spec for overlap +#[verus::line_count::ignore] +proof fn overlap_sanity_check() { + assert(overlap(MemRegion { base: 0, size: 4096 }, MemRegion { base: 0, size: 4096 })); + assert(overlap(MemRegion { base: 0, size: 8192 }, MemRegion { base: 0, size: 4096 })); + assert(overlap(MemRegion { base: 0, size: 4096 }, MemRegion { base: 0, size: 8192 })); + assert(overlap(MemRegion { base: 0, size: 8192 }, MemRegion { base: 4096, size: 4096 })); + assert(!overlap(MemRegion { base: 4096, size: 8192 }, MemRegion { base: 0, size: 4096 })); + assert(!overlap(MemRegion { base: 0, size: 4096 }, MemRegion { base: 8192, size: 16384 })); +} - pub struct MemRegion { pub base: nat, pub size: nat } +pub struct MemRegionExec { + pub base: usize, + pub size: usize, +} - impl MemRegion { - pub open spec fn contains(self, addr: nat) -> bool { - between(addr, self.base, self.base + self.size) - } +impl MemRegionExec { + pub open spec fn view(self) -> MemRegion { + MemRegion { base: self.base as nat, size: self.size as nat } } +} - // only well-defined for sizes > 0 - pub open spec(checked) fn overlap(region1: MemRegion, region2: MemRegion) -> bool { - if region1.base <= region2.base { - region2.base < region1.base + region1.size - } else { - region1.base < region2.base + region2.size - } - } +pub struct Flags { + pub is_writable: bool, + pub is_supervisor: bool, + pub disable_execute: bool, +} - // hardens spec for overlap - #[verus::line_count::ignore] - proof fn overlap_sanity_check() { - assert(overlap( - MemRegion { base: 0, size: 4096 }, - MemRegion { base: 0, size: 4096 })); - assert(overlap( - MemRegion { base: 0, size: 8192 }, - MemRegion { base: 0, size: 4096 })); - assert(overlap( - MemRegion { base: 0, size: 4096 }, - MemRegion { base: 0, size: 8192 })); - assert(overlap( - MemRegion { base: 0, size: 8192 }, - MemRegion { base: 4096, size: 4096 })); - assert(!overlap( - MemRegion { base: 4096, size: 8192 }, - MemRegion { base: 0, size: 4096 })); - assert(!overlap( - MemRegion { base: 0, size: 4096 }, - MemRegion { base: 8192, size: 16384 })); - } - - pub struct MemRegionExec { pub base: usize, pub size: usize } - - impl MemRegionExec { - pub open spec fn view(self) -> MemRegion { - MemRegion { - base: self.base as nat, - size: self.size as nat, - } - } +pub struct PageTableEntry { + pub frame: MemRegion, + /// The `flags` field on a `PageTableEntry` denotes the combined flags of the entire + /// translation path to the entry. (See page table walk definition in hardware model, + /// `spec_t::hardware`.) However, because we always set the flags on directories to be + /// permissive these flags also correspond to the flags that we set for the frame mapping + /// corresponding to this `PageTableEntry`. + pub flags: Flags, +} + +pub struct PageTableEntryExec { + pub frame: MemRegionExec, + pub flags: Flags, +} + +impl PageTableEntryExec { + pub open spec fn view(self) -> PageTableEntry { + PageTableEntry { frame: self.frame@, flags: self.flags } } +} + +pub ghost struct ArchLayer { + /// Address space size mapped by a single entry at this layer + pub entry_size: nat, + /// Number of entries at this layer + pub num_entries: nat, +} + +pub ghost struct Arch { + pub layers: Seq, + // [512G, 1G , 2M , 4K ] + // [512 , 512 , 512 , 512 ] +} - pub struct Flags { - pub is_writable: bool, - pub is_supervisor: bool, - pub disable_execute: bool, +impl Arch { + pub open spec(checked) fn entry_size(self, layer: nat) -> nat + recommends + layer < self.layers.len(), + { + self.layers.index(layer as int).entry_size } - pub struct PageTableEntry { - pub frame: MemRegion, - /// The `flags` field on a `PageTableEntry` denotes the combined flags of the entire - /// translation path to the entry. (See page table walk definition in hardware model, - /// `spec_t::hardware`.) However, because we always set the flags on directories to be - /// permissive these flags also correspond to the flags that we set for the frame mapping - /// corresponding to this `PageTableEntry`. - pub flags: Flags, + pub open spec(checked) fn num_entries(self, layer: nat) -> nat + recommends + layer < self.layers.len(), + { + self.layers.index(layer as int).num_entries } - pub struct PageTableEntryExec { - pub frame: MemRegionExec, - pub flags: Flags, + pub open spec(checked) fn upper_vaddr(self, layer: nat, base: nat) -> nat + recommends + self.inv(), + layer < self.layers.len(), + { + self.entry_base(layer, base, self.num_entries(layer)) } - impl PageTableEntryExec { - pub open spec fn view(self) -> PageTableEntry { - PageTableEntry { - frame: self.frame@, - flags: self.flags, + pub open spec(checked) fn inv(&self) -> bool { + &&& self.layers.len() <= X86_NUM_LAYERS + &&& forall|i: nat| + #![trigger self.entry_size(i)] + #![trigger self.num_entries(i)] + i < self.layers.len() ==> { + &&& 0 < self.entry_size(i) <= X86_MAX_ENTRY_SIZE + &&& 0 < self.num_entries(i) <= X86_NUM_ENTRIES + &&& self.entry_size_is_next_layer_size(i) } - } } - pub ghost struct ArchLayer { - /// Address space size mapped by a single entry at this layer - pub entry_size: nat, - /// Number of entries at this layer - pub num_entries: nat, + pub open spec(checked) fn entry_size_is_next_layer_size(self, i: nat) -> bool + recommends + i < self.layers.len(), + { + i + 1 < self.layers.len() ==> self.entry_size(i) == self.entry_size((i + 1) as nat) + * self.num_entries((i + 1) as nat) } - pub ghost struct Arch { - pub layers: Seq, - // [512G, 1G , 2M , 4K ] - // [512 , 512 , 512 , 512 ] + pub open spec(checked) fn contains_entry_size_at_index_atleast( + &self, + entry_size: nat, + min_idx: nat, + ) -> bool { + exists|i: nat| + min_idx <= i && i < self.layers.len() && #[trigger] self.entry_size(i) == entry_size } - impl Arch { - pub open spec(checked) fn entry_size(self, layer: nat) -> nat - recommends layer < self.layers.len() - { - self.layers.index(layer as int).entry_size - } - - pub open spec(checked) fn num_entries(self, layer: nat) -> nat - recommends layer < self.layers.len() - { - self.layers.index(layer as int).num_entries - } - - pub open spec(checked) fn upper_vaddr(self, layer: nat, base: nat) -> nat - recommends - self.inv(), - layer < self.layers.len(), - { - self.entry_base(layer, base, self.num_entries(layer)) - } - - pub open spec(checked) fn inv(&self) -> bool { - &&& self.layers.len() <= X86_NUM_LAYERS - &&& forall|i:nat| - #![trigger self.entry_size(i)] - #![trigger self.num_entries(i)] - i < self.layers.len() ==> { - &&& 0 < self.entry_size(i) <= X86_MAX_ENTRY_SIZE - &&& 0 < self.num_entries(i) <= X86_NUM_ENTRIES - &&& self.entry_size_is_next_layer_size(i) - } - } - - pub open spec(checked) fn entry_size_is_next_layer_size(self, i: nat) -> bool - recommends i < self.layers.len() - { - i + 1 < self.layers.len() ==> - self.entry_size(i) == self.entry_size((i + 1) as nat) * self.num_entries((i + 1) as nat) - } - - pub open spec(checked) fn contains_entry_size_at_index_atleast(&self, entry_size: nat, min_idx: nat) -> bool { - exists|i: nat| min_idx <= i && i < self.layers.len() && #[trigger] self.entry_size(i) == entry_size - } - - pub open spec(checked) fn contains_entry_size(&self, entry_size: nat) -> bool { - self.contains_entry_size_at_index_atleast(entry_size, 0) - } + pub open spec(checked) fn contains_entry_size(&self, entry_size: nat) -> bool { + self.contains_entry_size_at_index_atleast(entry_size, 0) + } - #[verifier(inline)] - pub open spec(checked) fn index_for_vaddr(self, layer: nat, base: nat, vaddr: nat) -> nat - recommends - self.inv(), - layer < self.layers.len(), - base <= vaddr, - { - index_from_base_and_addr(base, vaddr, self.entry_size(layer)) - } + #[verifier(inline)] + pub open spec(checked) fn index_for_vaddr(self, layer: nat, base: nat, vaddr: nat) -> nat + recommends + self.inv(), + layer < self.layers.len(), + base <= vaddr, + { + index_from_base_and_addr(base, vaddr, self.entry_size(layer)) + } - #[verifier(inline)] - pub open spec(checked) fn entry_base(self, layer: nat, base: nat, idx: nat) -> nat - recommends - self.inv(), - layer < self.layers.len() - { - // base + idx * self.entry_size(layer) - entry_base_from_index(base, idx, self.entry_size(layer)) - } + #[verifier(inline)] + pub open spec(checked) fn entry_base(self, layer: nat, base: nat, idx: nat) -> nat + recommends + self.inv(), + layer < self.layers.len(), + { + // base + idx * self.entry_size(layer) + entry_base_from_index(base, idx, self.entry_size(layer)) + } - #[verifier(inline)] - pub open spec(checked) fn next_entry_base(self, layer: nat, base: nat, idx: nat) -> nat - recommends - self.inv(), - layer < self.layers.len() - { - // base + (idx + 1) * self.entry_size(layer) - next_entry_base_from_index(base, idx, self.entry_size(layer)) - } + #[verifier(inline)] + pub open spec(checked) fn next_entry_base(self, layer: nat, base: nat, idx: nat) -> nat + recommends + self.inv(), + layer < self.layers.len(), + { + // base + (idx + 1) * self.entry_size(layer) + next_entry_base_from_index(base, idx, self.entry_size(layer)) } +} - pub struct ArchLayerExec { - /// Address space size mapped by a single entry at this layer - pub entry_size: usize, - /// Number of entries of at this layer - pub num_entries: usize, - } - - pub struct ArchExec { - // TODO: This could probably be an array, once we have support for that - pub layers: Vec, - } - - // Why does this exec_spec function even exist: - // - In some places we need to refer to the `Exec` versions of the structs in spec mode. - // - We can't make x86_arch_exec a const because Verus panics if we initialize the vec directly, - // i.e. we need to push to a mut vec instead. (Does rust even support vecs in a const? Otherwise - // would need arrays.) - // - Since x86_arch_exec is a function it has to have a mode, i.e. we need a version for exec usage - // and a version for spec usage. In the spec version we can't initialize the vec (same problem as - // above and can't use mut), i.e. we have to axiomatize their equivalence. - // - We can't even have a proof function axiom because we need to show - // `x86_arch_exec_spec() == x86_arch_exec()`, where the second function call is an exec function. - // Thus the axiom is an assumed postcondition on the exec function itself. - // - In addition to adding the postcondition, we also need a separate axiom to show that the view - // of x86_arch_exec_spec is the same as x86_arch_spec. This is provable but only with the - // postconditions on x86_arch_exec, which is an exec function. Consequently we can't use that - // postcondition in proof mode. - // - All this mess should go away as soon as we can make that exec function the constant it ought - // to be. - pub open spec fn x86_arch_exec_spec() -> ArchExec; +pub struct ArchLayerExec { + /// Address space size mapped by a single entry at this layer + pub entry_size: usize, + /// Number of entries of at this layer + pub num_entries: usize, +} - #[verifier(external_body)] - pub proof fn axiom_x86_arch_exec_spec() - ensures - x86_arch_exec_spec()@ == x86_arch_spec; +pub struct ArchExec { + // TODO: This could probably be an array, once we have support for that + pub layers: Vec, +} - pub exec fn x86_arch_exec() -> (res: ArchExec) - ensures - res.layers@ == seq![ +// Why does this exec_spec function even exist: +// - In some places we need to refer to the `Exec` versions of the structs in spec mode. +// - We can't make x86_arch_exec a const because Verus panics if we initialize the vec directly, +// i.e. we need to push to a mut vec instead. (Does rust even support vecs in a const? Otherwise +// would need arrays.) +// - Since x86_arch_exec is a function it has to have a mode, i.e. we need a version for exec usage +// and a version for spec usage. In the spec version we can't initialize the vec (same problem as +// above and can't use mut), i.e. we have to axiomatize their equivalence. +// - We can't even have a proof function axiom because we need to show +// `x86_arch_exec_spec() == x86_arch_exec()`, where the second function call is an exec function. +// Thus the axiom is an assumed postcondition on the exec function itself. +// - In addition to adding the postcondition, we also need a separate axiom to show that the view +// of x86_arch_exec_spec is the same as x86_arch_spec. This is provable but only with the +// postconditions on x86_arch_exec, which is an exec function. Consequently we can't use that +// postcondition in proof mode. +// - All this mess should go away as soon as we can make that exec function the constant it ought +// to be. +pub open spec fn x86_arch_exec_spec() -> ArchExec; + +#[verifier(external_body)] +pub proof fn axiom_x86_arch_exec_spec() + ensures + x86_arch_exec_spec()@ == x86_arch_spec, +; + +pub exec fn x86_arch_exec() -> (res: ArchExec) + ensures + res.layers@ + == seq![ ArchLayerExec { entry_size: L0_ENTRY_SIZE, num_entries: 512 }, ArchLayerExec { entry_size: L1_ENTRY_SIZE, num_entries: 512 }, ArchLayerExec { entry_size: L2_ENTRY_SIZE, num_entries: 512 }, ArchLayerExec { entry_size: L3_ENTRY_SIZE, num_entries: 512 }, ], - res@ === x86_arch_spec, - res === x86_arch_exec_spec(), - { - // Can we somehow just initialize an immutable vec directly? Verus panics when I try do so - // (unless the function is external_body). - let mut v = Vec::new(); - v.push(ArchLayerExec { entry_size: L0_ENTRY_SIZE, num_entries: 512 }); - v.push(ArchLayerExec { entry_size: L1_ENTRY_SIZE, num_entries: 512 }); - v.push(ArchLayerExec { entry_size: L2_ENTRY_SIZE, num_entries: 512 }); - v.push(ArchLayerExec { entry_size: L3_ENTRY_SIZE, num_entries: 512 }); - let res = ArchExec { - layers: v, - }; - proof { - assert_seqs_equal!(res@.layers, x86_arch_spec.layers); - // This is an axiom to establish the equivalence with x86_arch_exec_spec; See comments - // further up for explanation why this workaround is necessary. - assume(res === x86_arch_exec_spec()); - } - res + res@ === x86_arch_spec, + res === x86_arch_exec_spec(), +{ + // Can we somehow just initialize an immutable vec directly? Verus panics when I try do so + // (unless the function is external_body). + let mut v = Vec::new(); + v.push(ArchLayerExec { entry_size: L0_ENTRY_SIZE, num_entries: 512 }); + v.push(ArchLayerExec { entry_size: L1_ENTRY_SIZE, num_entries: 512 }); + v.push(ArchLayerExec { entry_size: L2_ENTRY_SIZE, num_entries: 512 }); + v.push(ArchLayerExec { entry_size: L3_ENTRY_SIZE, num_entries: 512 }); + let res = ArchExec { layers: v }; + proof { + assert_seqs_equal!(res@.layers, x86_arch_spec.layers); + // This is an axiom to establish the equivalence with x86_arch_exec_spec; See comments + // further up for explanation why this workaround is necessary. + assume(res === x86_arch_exec_spec()); } + res +} - pub spec const x86_arch_spec: Arch = Arch { - layers: seq![ +pub spec const x86_arch_spec: Arch = Arch { + layers: + seq![ ArchLayer { entry_size: L0_ENTRY_SIZE as nat, num_entries: 512 }, ArchLayer { entry_size: L1_ENTRY_SIZE as nat, num_entries: 512 }, ArchLayer { entry_size: L2_ENTRY_SIZE as nat, num_entries: 512 }, ArchLayer { entry_size: L3_ENTRY_SIZE as nat, num_entries: 512 }, ], - }; +}; - } +} // verus! } pub mod definitions_u { @@ -6496,265 +9407,284 @@ pub mod definitions_u { }; verus! { - pub proof fn lemma_maxphyaddr_facts() - ensures 0xFFFFFFFF <= MAX_PHYADDR <= 0xFFFFFFFFFFFFF - { - axiom_max_phyaddr_width_facts(); - assert(1u64 << 32 == 0x100000000) by (compute); - assert(1u64 << 52 == 0x10000000000000) by (compute); - assert(forall|m:u64,n:u64| n < m < 64 ==> 1u64 << n < 1u64 << m) by (bit_vector); + +pub proof fn lemma_maxphyaddr_facts() + ensures + 0xFFFFFFFF <= MAX_PHYADDR <= 0xFFFFFFFFFFFFF, +{ + axiom_max_phyaddr_width_facts(); + assert(1u64 << 32 == 0x100000000) by (compute); + assert(1u64 << 52 == 0x10000000000000) by (compute); + assert(forall|m: u64, n: u64| n < m < 64 ==> 1u64 << n < 1u64 << m) by (bit_vector); +} + +pub proof fn lemma_new_seq(i: nat, e: T) + ensures + new_seq(i, e).len() == i, + forall|j: nat| j < i ==> new_seq(i, e).index(j as int) === e, + decreases i, +{ + if i == 0 { + } else { + lemma_new_seq::((i - 1) as nat, e); } +} - pub proof fn lemma_new_seq(i: nat, e: T) - ensures - new_seq(i, e).len() == i, - forall|j: nat| j < i ==> new_seq(i, e).index(j as int) === e, - decreases i - { - if i == 0 { - } else { - lemma_new_seq::((i-1) as nat, e); - } +pub exec fn aligned_exec(addr: usize, size: usize) -> (res: bool) + requires + size > 0, + ensures + res == aligned(addr as nat, size as nat), +{ + addr % size == 0 +} + +/// We always set permissive flags on directories. Restrictions happen on the frame mapping. +pub spec const permissive_flags: Flags = Flags { + is_writable: true, + is_supervisor: false, + disable_execute: false, +}; + +// Sometimes z3 needs these concrete bounds to prove the no-overflow VC +pub proof fn overflow_bounds() + ensures + X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1) < 0x10000000000000000, + MAX_BASE + X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1) < 0x10000000000000000, +{ + assert(X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1) < 0x10000000000000000) by (nonlinear_arith); + assert(MAX_BASE + X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1) < 0x10000000000000000) + by (nonlinear_arith); +} + +// Architecture +// page_size, next_sizes +// 2**40 , [ 2 ** 30, 2 ** 20 ] +// 2**30 , [ 2 ** 20 ] +// 2**20 , [ ] +// [es0 # es1 , es2 , es3 ] // entry_size +// [1T # 1G , 1M , 1K ] // pages mapped at this level are this size <-- +// [n0 # n1 , n2 , n3 ] // number_of_entries +// [1 # 1024, 1024, 1024] +// es1 == es0 / n1 -- n1 * es1 == es0 +// es2 == es1 / n2 -- n2 * es2 == es1 +// es3 == es2 / n3 -- n3 * es3 == es2 +// [es0 # es1 , es2 , es3 , es4 ] // entry_size +// [256T # 512G, 1G , 2M , 4K ] +// [n0 # n1 , n2 , n3 , n4 ] // number_of_entries +// [ # 512 , 512 , 512 , 512 ] +// [ # 9 , 9 , 9 , 9 , 12 ] +use crate::definitions_t::{ + Arch, + ArchLayer, + MAX_BASE, + X86_MAX_ENTRY_SIZE, + X86_NUM_ENTRIES, + x86_arch_spec, + X86_NUM_LAYERS, +}; + +impl Clone for ArchLayerExec { + fn clone(&self) -> Self { + ArchLayerExec { entry_size: self.entry_size, num_entries: self.num_entries } + } +} + +impl ArchLayerExec { + pub open spec fn view(self) -> ArchLayer { + ArchLayer { entry_size: self.entry_size as nat, num_entries: self.num_entries as nat } + } +} + +impl ArchExec { + pub open spec fn view(self) -> Arch { + Arch { layers: self.layers@.map(|i: int, l: ArchLayerExec| l@) } } - pub exec fn aligned_exec(addr: usize, size: usize) -> (res: bool) + pub fn entry_size(&self, layer: usize) -> (res: usize) requires - size > 0 + layer < self@.layers.len(), ensures - res == aligned(addr as nat, size as nat) + res == self@.entry_size(layer as nat), { - addr % size == 0 + self.layers[layer].entry_size } - /// We always set permissive flags on directories. Restrictions happen on the frame mapping. - pub spec const permissive_flags: Flags = Flags { - is_writable: true, - is_supervisor: false, - disable_execute: false, - }; - - // Sometimes z3 needs these concrete bounds to prove the no-overflow VC - pub proof fn overflow_bounds() + pub fn num_entries(&self, layer: usize) -> (res: usize) + requires + layer < self@.layers.len(), ensures - X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1) < 0x10000000000000000, - MAX_BASE + X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1) < 0x10000000000000000, + res == self@.num_entries(layer as nat), { - assert(X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1) < 0x10000000000000000) by (nonlinear_arith); - assert(MAX_BASE + X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1) < 0x10000000000000000) by (nonlinear_arith); + self.layers[layer].num_entries } - // Architecture - - // page_size, next_sizes - // 2**40 , [ 2 ** 30, 2 ** 20 ] - // 2**30 , [ 2 ** 20 ] - // 2**20 , [ ] - - // [es0 # es1 , es2 , es3 ] // entry_size - // [1T # 1G , 1M , 1K ] // pages mapped at this level are this size <-- - - // [n0 # n1 , n2 , n3 ] // number_of_entries - // [1 # 1024, 1024, 1024] - - // es1 == es0 / n1 -- n1 * es1 == es0 - // es2 == es1 / n2 -- n2 * es2 == es1 - // es3 == es2 / n3 -- n3 * es3 == es2 - - // [es0 # es1 , es2 , es3 , es4 ] // entry_size - // [256T # 512G, 1G , 2M , 4K ] - // [n0 # n1 , n2 , n3 , n4 ] // number_of_entries - // [ # 512 , 512 , 512 , 512 ] - // [ # 9 , 9 , 9 , 9 , 12 ] - - use crate::definitions_t::{Arch, ArchLayer, MAX_BASE, X86_MAX_ENTRY_SIZE, X86_NUM_ENTRIES, x86_arch_spec, X86_NUM_LAYERS}; - - impl Clone for ArchLayerExec { - fn clone(&self) -> Self { - ArchLayerExec { - entry_size: self.entry_size, - num_entries: self.num_entries, - } - } + pub fn index_for_vaddr(&self, layer: usize, base: usize, vaddr: usize) -> (res: usize) + requires + self@.inv(), + layer < self@.layers.len(), + vaddr >= base, + ensures + res == self@.index_for_vaddr(layer as nat, base as nat, vaddr as nat), + res == crate::definitions_t::index_from_base_and_addr( + base as nat, + vaddr as nat, + self@.entry_size(layer as nat), + ), + { + let es = self.entry_size(layer); + let offset = vaddr - base; + let res = offset / es; + assert(res as nat == offset as nat / es as nat) by (nonlinear_arith) + requires + res == offset / es, + 0 < es as int, + {}; + res } - impl ArchLayerExec { - pub open spec fn view(self) -> ArchLayer { - ArchLayer { - entry_size: self.entry_size as nat, - num_entries: self.num_entries as nat, - } - } + #[verifier(nonlinear)] + pub fn entry_base(&self, layer: usize, base: usize, idx: usize) -> (res: usize) + requires + self@.inv(), + layer < self@.layers.len(), + base <= MAX_BASE, + idx <= X86_NUM_ENTRIES, + ensures + res == self@.entry_base(layer as nat, base as nat, idx as nat), + { + proof { + // FIXME: Weird error message when using the spec const here + // lib::mult_leq_mono_both(idx as nat, self@.entry_size(layer as nat), X86_NUM_ENTRIES as nat, X86_MAX_ENTRY_SIZE); + crate::extra::mult_leq_mono_both( + idx as nat, + self@.entry_size(layer as nat), + X86_NUM_ENTRIES as nat, + 512 * 1024 * 1024 * 1024, + ); + } + base + idx * self.entry_size(layer) } - impl ArchExec { - pub open spec fn view(self) -> Arch { - Arch { - layers: self.layers@.map(|i: int, l: ArchLayerExec| l@), - } - } - - pub fn entry_size(&self, layer: usize) -> (res: usize) - requires layer < self@.layers.len() - ensures res == self@.entry_size(layer as nat) - { - self.layers[layer].entry_size - } - - pub fn num_entries(&self, layer: usize) -> (res: usize) - requires layer < self@.layers.len() - ensures res == self@.num_entries(layer as nat) - { - self.layers[layer].num_entries - } - - pub fn index_for_vaddr(&self, layer: usize, base: usize, vaddr: usize) -> (res: usize) - requires - self@.inv(), - layer < self@.layers.len(), - vaddr >= base, - ensures - res == self@.index_for_vaddr(layer as nat, base as nat, vaddr as nat), - res == crate::definitions_t::index_from_base_and_addr(base as nat, vaddr as nat, self@.entry_size(layer as nat)), - { - let es = self.entry_size(layer); - let offset = vaddr - base; - let res = offset / es; - assert(res as nat == offset as nat / es as nat) by (nonlinear_arith) + pub fn next_entry_base(&self, layer: usize, base: usize, idx: usize) -> (res: usize) + requires + self@.inv(), + layer < self@.layers.len(), + base <= MAX_BASE, + idx <= X86_NUM_ENTRIES, + ensures + res == self@.next_entry_base(layer as nat, base as nat, idx as nat), + { + proof { + overflow_bounds(); + let es = self@.entry_size(layer as nat); + assert(0 <= (idx + 1) * es <= X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1)) + by (nonlinear_arith) requires - res == offset / es, - 0 < es as int, - { }; - res - } - - #[verifier(nonlinear)] - pub fn entry_base(&self, layer: usize, base: usize, idx: usize) -> (res: usize) - requires - self@.inv(), - layer < self@.layers.len(), - base <= MAX_BASE, - idx <= X86_NUM_ENTRIES, - ensures - res == self@.entry_base(layer as nat, base as nat, idx as nat) - { - proof { - // FIXME: Weird error message when using the spec const here - // lib::mult_leq_mono_both(idx as nat, self@.entry_size(layer as nat), X86_NUM_ENTRIES as nat, X86_MAX_ENTRY_SIZE); - crate::extra::mult_leq_mono_both(idx as nat, self@.entry_size(layer as nat), X86_NUM_ENTRIES as nat, 512 * 1024 * 1024 * 1024); - } - base + idx * self.entry_size(layer) + es <= X86_MAX_ENTRY_SIZE, + idx <= X86_NUM_ENTRIES, + { /* New instability with z3 4.10.1 */ + }; } - - pub fn next_entry_base(&self, layer: usize, base: usize, idx: usize) -> (res: usize) - requires - self@.inv(), - layer < self@.layers.len(), - base <= MAX_BASE, - idx <= X86_NUM_ENTRIES, - ensures - res == self@.next_entry_base(layer as nat, base as nat, idx as nat) - { - proof { - overflow_bounds(); - let es = self@.entry_size(layer as nat); - assert(0 <= (idx + 1) * es <= X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1)) by (nonlinear_arith) - requires es <= X86_MAX_ENTRY_SIZE, idx <= X86_NUM_ENTRIES - { /* New instability with z3 4.10.1 */ }; - } - let offset = (idx + 1) * self.entry_size(layer); - proof { - assert(base + offset <= MAX_BASE + X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1)) by (nonlinear_arith) - requires - 0 <= offset <= X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1), - 0 <= base <= MAX_BASE, - {}; - } - base + offset + let offset = (idx + 1) * self.entry_size(layer); + proof { + assert(base + offset <= MAX_BASE + X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1)) + by (nonlinear_arith) + requires + 0 <= offset <= X86_MAX_ENTRY_SIZE * (X86_NUM_ENTRIES + 1), + 0 <= base <= MAX_BASE, + {}; } + base + offset } +} - impl Arch { - pub proof fn lemma_entry_sizes_aligned(self, i: nat, j: nat) - requires - self.inv(), - i <= j, - j < self.layers.len(), - ensures - aligned(self.entry_size(i), self.entry_size(j)) - decreases self.layers.len() - i - { - if i == j { - assert(aligned(self.entry_size(i), self.entry_size(j))) by (nonlinear_arith) - requires i == j, self.entry_size(i) > 0, - { }; - } else { - assert(forall|a: int, b: int| #[trigger] (a * b) == b * a); - self.lemma_entry_sizes_aligned(i+1,j); - crate::extra::mod_of_mul_auto(); - crate::extra::aligned_transitive_auto(); - assert(aligned(self.entry_size(i), self.entry_size(j))); - } - } - - pub proof fn lemma_entry_sizes_aligned_auto(self) - ensures - forall|i: nat, j: nat| - self.inv() && i <= j && j < self.layers.len() ==> - aligned(self.entry_size(i), self.entry_size(j)) - { - assert_forall_by(|i: nat, j: nat| { - requires(self.inv() && i <= j && j < self.layers.len()); - ensures(aligned(self.entry_size(i), self.entry_size(j))); - self.lemma_entry_sizes_aligned(i, j); - }); - } - - pub proof fn lemma_entry_sizes_increase(self, i: nat, j: nat) - requires - self.inv(), - i < j, - j < self.layers.len(), - ensures - self.entry_size(i) >= self.entry_size(j), - decreases j - i - { - assert(self.entry_size(i) >= self.entry_size(i + 1)) - by (nonlinear_arith) +impl Arch { + pub proof fn lemma_entry_sizes_aligned(self, i: nat, j: nat) + requires + self.inv(), + i <= j, + j < self.layers.len(), + ensures + aligned(self.entry_size(i), self.entry_size(j)), + decreases self.layers.len() - i, + { + if i == j { + assert(aligned(self.entry_size(i), self.entry_size(j))) by (nonlinear_arith) requires - i + 1 < self.layers.len(), - self.entry_size_is_next_layer_size(i), - self.num_entries(i + 1) > 0, - { }; - if j == i + 1 { - } else { - self.lemma_entry_sizes_increase(i + 1, j); - - } + i == j, + self.entry_size(i) > 0, + {}; + } else { + assert(forall|a: int, b: int| #[trigger] (a * b) == b * a); + self.lemma_entry_sizes_aligned(i + 1, j); + crate::extra::mod_of_mul_auto(); + crate::extra::aligned_transitive_auto(); + assert(aligned(self.entry_size(i), self.entry_size(j))); } } - #[verifier(nonlinear)] - pub proof fn x86_arch_inv() + pub proof fn lemma_entry_sizes_aligned_auto(self) ensures - x86_arch_spec.inv() + forall|i: nat, j: nat| + self.inv() && i <= j && j < self.layers.len() ==> aligned( + self.entry_size(i), + self.entry_size(j), + ), { - assert(x86_arch_spec.entry_size(3) == 4096); - assert(x86_arch_spec.contains_entry_size(4096)); - assert(x86_arch_spec.layers.len() <= X86_NUM_LAYERS); - assert forall|i:nat| i < x86_arch_spec.layers.len() implies { - &&& 0 < #[trigger] x86_arch_spec.entry_size(i) <= X86_MAX_ENTRY_SIZE - &&& 0 < #[trigger] x86_arch_spec.num_entries(i) <= X86_NUM_ENTRIES - &&& x86_arch_spec.entry_size_is_next_layer_size(i) - } by { - assert(0 < #[trigger] x86_arch_spec.entry_size(i) <= X86_MAX_ENTRY_SIZE); - assert(0 < #[trigger] x86_arch_spec.num_entries(i) <= X86_NUM_ENTRIES); - assert(x86_arch_spec.entry_size_is_next_layer_size(i)); - } - assert(x86_arch_spec.inv()); + assert_forall_by( + |i: nat, j: nat| + { + requires(self.inv() && i <= j && j < self.layers.len()); + ensures(aligned(self.entry_size(i), self.entry_size(j))); + self.lemma_entry_sizes_aligned(i, j); + }, + ); } + pub proof fn lemma_entry_sizes_increase(self, i: nat, j: nat) + requires + self.inv(), + i < j, + j < self.layers.len(), + ensures + self.entry_size(i) >= self.entry_size(j), + decreases j - i, + { + assert(self.entry_size(i) >= self.entry_size(i + 1)) by (nonlinear_arith) + requires + i + 1 < self.layers.len(), + self.entry_size_is_next_layer_size(i), + self.num_entries(i + 1) > 0, + {}; + if j == i + 1 { + } else { + self.lemma_entry_sizes_increase(i + 1, j); + } + } +} +#[verifier(nonlinear)] +pub proof fn x86_arch_inv() + ensures + x86_arch_spec.inv(), +{ + assert(x86_arch_spec.entry_size(3) == 4096); + assert(x86_arch_spec.contains_entry_size(4096)); + assert(x86_arch_spec.layers.len() <= X86_NUM_LAYERS); + assert forall|i: nat| i < x86_arch_spec.layers.len() implies { + &&& 0 < #[trigger] x86_arch_spec.entry_size(i) <= X86_MAX_ENTRY_SIZE + &&& 0 < #[trigger] x86_arch_spec.num_entries(i) <= X86_NUM_ENTRIES + &&& x86_arch_spec.entry_size_is_next_layer_size(i) + } by { + assert(0 < #[trigger] x86_arch_spec.entry_size(i) <= X86_MAX_ENTRY_SIZE); + assert(0 < #[trigger] x86_arch_spec.num_entries(i) <= X86_NUM_ENTRIES); + assert(x86_arch_spec.entry_size_is_next_layer_size(i)); } + assert(x86_arch_spec.inv()); +} + +} // verus! } pub mod spec_t { @@ -6789,236 +9719,314 @@ pub mod spec_t { verus! { - pub struct AbstractConstants { - pub phys_mem_size: nat, - } +pub struct AbstractConstants { + pub phys_mem_size: nat, +} - pub struct AbstractVariables { - /// Word-indexed virtual memory - pub mem: Map, - /// `mappings` constrains the domain of mem and tracks the flags. We could instead move the - /// flags into `map` as well and write the specification exclusively in terms of `map` but that - /// also makes some of the enabling conditions awkward, e.g. full mappings have the same flags, etc. - pub mappings: Map, - } +pub struct AbstractVariables { + /// Word-indexed virtual memory + pub mem: Map, + /// `mappings` constrains the domain of mem and tracks the flags. We could instead move the + /// flags into `map` as well and write the specification exclusively in terms of `map` but that + /// also makes some of the enabling conditions awkward, e.g. full mappings have the same flags, etc. + pub mappings: Map, +} - pub enum AbstractStep { - ReadWrite { vaddr: nat, op: RWOp, pte: Option<(nat, PageTableEntry)> }, - Map { vaddr: nat, pte: PageTableEntry, result: MapResult }, - Unmap { vaddr: nat, result: UnmapResult }, - Resolve { vaddr: nat, result: ResolveResult }, - Stutter, - } +pub enum AbstractStep { + ReadWrite { vaddr: nat, op: RWOp, pte: Option<(nat, PageTableEntry)> }, + Map { vaddr: nat, pte: PageTableEntry, result: MapResult }, + Unmap { vaddr: nat, result: UnmapResult }, + Resolve { vaddr: nat, result: ResolveResult }, + Stutter, +} + +pub open spec fn init(s: AbstractVariables) -> bool { + &&& s.mem === Map::empty() + &&& s.mappings === Map::empty() +} - pub open spec fn init(s: AbstractVariables) -> bool { - &&& s.mem === Map::empty() - &&& s.mappings === Map::empty() +pub open spec fn mem_domain_from_mappings_contains( + phys_mem_size: nat, + word_idx: nat, + mappings: Map, +) -> bool { + let vaddr = word_idx * WORD_SIZE as nat; + exists|base: nat, pte: PageTableEntry| + { + let paddr = (pte.frame.base + (vaddr - base)) as nat; + let pmem_idx = word_index_spec(paddr); + &&& #[trigger] mappings.contains_pair(base, pte) + &&& between(vaddr, base, base + pte.frame.size) + &&& pmem_idx < phys_mem_size } +} + +pub open spec fn mem_domain_from_mappings( + phys_mem_size: nat, + mappings: Map, +) -> Set { + Set::new(|word_idx: nat| mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings)) +} - pub open spec fn mem_domain_from_mappings_contains(phys_mem_size: nat, word_idx: nat, mappings: Map) -> bool { - let vaddr = word_idx * WORD_SIZE as nat; - exists|base: nat, pte: PageTableEntry| { +pub proof fn lemma_mem_domain_from_mappings( + phys_mem_size: nat, + mappings: Map, + base: nat, + pte: PageTableEntry, +) + requires + !mappings.dom().contains(base), + ensures + (forall|word_idx: nat| + mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings) + ==> #[trigger] mem_domain_from_mappings_contains( + phys_mem_size, + word_idx, + mappings.insert(base, pte), + )), + (forall|word_idx: nat| + !mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings) + && #[trigger] mem_domain_from_mappings_contains( + phys_mem_size, + word_idx, + mappings.insert(base, pte), + ) ==> between(word_idx * WORD_SIZE as nat, base, base + pte.frame.size)), +{ + assert forall|word_idx: nat| + mem_domain_from_mappings_contains( + phys_mem_size, + word_idx, + mappings, + ) implies #[trigger] mem_domain_from_mappings_contains( + phys_mem_size, + word_idx, + mappings.insert(base, pte), + ) by { + let vaddr = word_idx * WORD_SIZE as nat; + let (base2, pte2) = choose|base: nat, pte: PageTableEntry| + { let paddr = (pte.frame.base + (vaddr - base)) as nat; let pmem_idx = word_index_spec(paddr); &&& #[trigger] mappings.contains_pair(base, pte) &&& between(vaddr, base, base + pte.frame.size) &&& pmem_idx < phys_mem_size - } - } - - pub open spec fn mem_domain_from_mappings(phys_mem_size: nat, mappings: Map) -> Set { - Set::new(|word_idx: nat| mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings)) - } - - pub proof fn lemma_mem_domain_from_mappings(phys_mem_size: nat, mappings: Map, base: nat, pte: PageTableEntry) - requires - !mappings.dom().contains(base) - ensures - (forall|word_idx: nat| - mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings) - ==> #[trigger] mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings.insert(base, pte))), - (forall|word_idx: nat| - !mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings) - && #[trigger] mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings.insert(base, pte)) - ==> between(word_idx * WORD_SIZE as nat, base, base + pte.frame.size)), - { - assert forall|word_idx: nat| - mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings) - implies #[trigger] mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings.insert(base, pte)) by - { - let vaddr = word_idx * WORD_SIZE as nat; - let (base2, pte2) = choose|base: nat, pte: PageTableEntry| { - let paddr = (pte.frame.base + (vaddr - base)) as nat; - let pmem_idx = word_index_spec(paddr); - &&& #[trigger] mappings.contains_pair(base, pte) - &&& between(vaddr, base, base + pte.frame.size) - &&& pmem_idx < phys_mem_size - }; - assert(mappings.insert(base, pte).contains_pair(base2, pte2)); }; - assert forall|word_idx: nat| - !mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings) - && #[trigger] mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings.insert(base, pte)) - implies between(word_idx * WORD_SIZE as nat, base, base + pte.frame.size) by + assert(mappings.insert(base, pte).contains_pair(base2, pte2)); + }; + assert forall|word_idx: nat| + !mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings) + && #[trigger] mem_domain_from_mappings_contains( + phys_mem_size, + word_idx, + mappings.insert(base, pte), + ) implies between(word_idx * WORD_SIZE as nat, base, base + pte.frame.size) by { + let vaddr = word_idx * WORD_SIZE as nat; + let (base2, pte2) = choose|base2: nat, pte2: PageTableEntry| { - let vaddr = word_idx * WORD_SIZE as nat; - let (base2, pte2) = choose|base2: nat, pte2: PageTableEntry| { - let paddr = (pte2.frame.base + (vaddr - base2)) as nat; - let pmem_idx = word_index_spec(paddr); - &&& #[trigger] mappings.insert(base, pte).contains_pair(base2, pte2) - &&& between(vaddr, base2, base2 + pte2.frame.size) - &&& pmem_idx < phys_mem_size - }; - assert(mappings.insert(base, pte).contains_pair(base2, pte2)); - assert(between(vaddr, base2, base2 + pte2.frame.size)); - if !between(vaddr, base, base + pte.frame.size) { - assert(base2 != base || pte2 !== pte); - if base2 != base { - assert(mappings.contains_pair(base2, pte2)); - assert(mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings)); - } - assert(false); - } else { - } + let paddr = (pte2.frame.base + (vaddr - base2)) as nat; + let pmem_idx = word_index_spec(paddr); + &&& #[trigger] mappings.insert(base, pte).contains_pair(base2, pte2) + &&& between(vaddr, base2, base2 + pte2.frame.size) + &&& pmem_idx < phys_mem_size }; + assert(mappings.insert(base, pte).contains_pair(base2, pte2)); + assert(between(vaddr, base2, base2 + pte2.frame.size)); + if !between(vaddr, base, base + pte.frame.size) { + assert(base2 != base || pte2 !== pte); + if base2 != base { + assert(mappings.contains_pair(base2, pte2)); + assert(mem_domain_from_mappings_contains(phys_mem_size, word_idx, mappings)); + } + assert(false); + } else { } + }; +} - pub open spec fn step_ReadWrite(c: AbstractConstants, s1: AbstractVariables, s2: AbstractVariables, vaddr: nat, op: RWOp, pte: Option<(nat, PageTableEntry)>) -> bool { - let vmem_idx = word_index_spec(vaddr); - &&& aligned(vaddr, 8) - &&& s2.mappings === s1.mappings - &&& match pte { - Some((base, pte)) => { - let paddr = (pte.frame.base + (vaddr - base)) as nat; - let pmem_idx = word_index_spec(paddr); - // If pte is Some, it's an existing mapping that contains vaddr.. - &&& s1.mappings.contains_pair(base, pte) - &&& between(vaddr, base, base + pte.frame.size) - // .. and the result depends on the flags. - &&& match op { - RWOp::Store { new_value, result } => { - if pmem_idx < c.phys_mem_size && !pte.flags.is_supervisor && pte.flags.is_writable { - &&& result.is_Ok() - &&& s2.mem === s1.mem.insert(vmem_idx, new_value) - } else { - &&& result.is_Pagefault() - &&& s2.mem === s1.mem - } - }, - RWOp::Load { is_exec, result } => { - &&& s2.mem === s1.mem - &&& if pmem_idx < c.phys_mem_size && !pte.flags.is_supervisor && (is_exec ==> !pte.flags.disable_execute) { - &&& result.is_Value() - &&& result.get_Value_0() == s1.mem.index(vmem_idx) - } else { - &&& result.is_Pagefault() - } - }, +pub open spec fn step_ReadWrite( + c: AbstractConstants, + s1: AbstractVariables, + s2: AbstractVariables, + vaddr: nat, + op: RWOp, + pte: Option<(nat, PageTableEntry)>, +) -> bool { + let vmem_idx = word_index_spec(vaddr); + &&& aligned(vaddr, 8) + &&& s2.mappings === s1.mappings + &&& match pte { + Some((base, pte)) => { + let paddr = (pte.frame.base + (vaddr - base)) as nat; + let pmem_idx = word_index_spec(paddr); + // If pte is Some, it's an existing mapping that contains vaddr.. + &&& s1.mappings.contains_pair(base, pte) + &&& between( + vaddr, + base, + base + pte.frame.size, + ) + // .. and the result depends on the flags. + + &&& match op { + RWOp::Store { new_value, result } => { + if pmem_idx < c.phys_mem_size && !pte.flags.is_supervisor + && pte.flags.is_writable { + &&& result.is_Ok() + &&& s2.mem === s1.mem.insert(vmem_idx, new_value) + } else { + &&& result.is_Pagefault() + &&& s2.mem === s1.mem } }, - None => { - // If pte is None, no mapping containing vaddr exists.. - &&& !mem_domain_from_mappings(c.phys_mem_size, s1.mappings).contains(vmem_idx) - // .. and the result is always a pagefault and an unchanged memory. + RWOp::Load { is_exec, result } => { &&& s2.mem === s1.mem - &&& match op { - RWOp::Store { new_value, result } => result.is_Pagefault(), - RWOp::Load { is_exec, result } => result.is_Pagefault(), + &&& if pmem_idx < c.phys_mem_size && !pte.flags.is_supervisor && (is_exec + ==> !pte.flags.disable_execute) { + &&& result.is_Value() + &&& result.get_Value_0() == s1.mem.index(vmem_idx) + } else { + &&& result.is_Pagefault() } }, } - } + }, + None => { + // If pte is None, no mapping containing vaddr exists.. + &&& !mem_domain_from_mappings(c.phys_mem_size, s1.mappings).contains( + vmem_idx, + ) + // .. and the result is always a pagefault and an unchanged memory. - pub open spec fn step_Map_enabled(map: Map, vaddr: nat, pte: PageTableEntry) -> bool { - &&& aligned(vaddr, pte.frame.size) - &&& aligned(pte.frame.base, pte.frame.size) - &&& candidate_mapping_in_bounds(vaddr, pte) - &&& { // The size of the frame must be the entry_size of a layer that supports page mappings - ||| pte.frame.size == L3_ENTRY_SIZE - ||| pte.frame.size == L2_ENTRY_SIZE - ||| pte.frame.size == L1_ENTRY_SIZE + &&& s2.mem === s1.mem + &&& match op { + RWOp::Store { new_value, result } => result.is_Pagefault(), + RWOp::Load { is_exec, result } => result.is_Pagefault(), } - &&& !candidate_mapping_overlaps_existing_pmem(map, vaddr, pte) - } + }, + } +} - pub open spec fn step_Map(c: AbstractConstants, s1: AbstractVariables, s2: AbstractVariables, vaddr: nat, pte: PageTableEntry, result: MapResult) -> bool { - &&& step_Map_enabled(s1.mappings, vaddr, pte) - &&& if candidate_mapping_overlaps_existing_vmem(s1.mappings, vaddr, pte) { - &&& result.is_ErrOverlap() - &&& s2.mappings === s1.mappings - &&& s2.mem === s1.mem - } else { - &&& result.is_Ok() - &&& s2.mappings === s1.mappings.insert(vaddr, pte) - &&& (forall|idx:nat| #![auto] s1.mem.dom().contains(idx) ==> s2.mem[idx] === s1.mem[idx]) - &&& s2.mem.dom() === mem_domain_from_mappings(c.phys_mem_size, s2.mappings) - } - } +pub open spec fn step_Map_enabled( + map: Map, + vaddr: nat, + pte: PageTableEntry, +) -> bool { + &&& aligned(vaddr, pte.frame.size) + &&& aligned(pte.frame.base, pte.frame.size) + &&& candidate_mapping_in_bounds(vaddr, pte) + &&& { // The size of the frame must be the entry_size of a layer that supports page mappings + ||| pte.frame.size == L3_ENTRY_SIZE + ||| pte.frame.size == L2_ENTRY_SIZE + ||| pte.frame.size == L1_ENTRY_SIZE + } + &&& !candidate_mapping_overlaps_existing_pmem(map, vaddr, pte) +} - pub open spec fn step_Unmap_enabled(vaddr: nat) -> bool { - &&& between(vaddr, PT_BOUND_LOW, PT_BOUND_HIGH as nat) - &&& { // The given vaddr must be aligned to some valid page size - ||| aligned(vaddr, L3_ENTRY_SIZE as nat) - ||| aligned(vaddr, L2_ENTRY_SIZE as nat) - ||| aligned(vaddr, L1_ENTRY_SIZE as nat) - } - } +pub open spec fn step_Map( + c: AbstractConstants, + s1: AbstractVariables, + s2: AbstractVariables, + vaddr: nat, + pte: PageTableEntry, + result: MapResult, +) -> bool { + &&& step_Map_enabled(s1.mappings, vaddr, pte) + &&& if candidate_mapping_overlaps_existing_vmem(s1.mappings, vaddr, pte) { + &&& result.is_ErrOverlap() + &&& s2.mappings === s1.mappings + &&& s2.mem === s1.mem + } else { + &&& result.is_Ok() + &&& s2.mappings === s1.mappings.insert(vaddr, pte) + &&& (forall|idx: nat| #![auto] s1.mem.dom().contains(idx) ==> s2.mem[idx] === s1.mem[idx]) + &&& s2.mem.dom() === mem_domain_from_mappings(c.phys_mem_size, s2.mappings) + } +} - pub open spec fn step_Unmap(c: AbstractConstants, s1: AbstractVariables, s2: AbstractVariables, vaddr: nat, result: UnmapResult) -> bool { - &&& step_Unmap_enabled(vaddr) - &&& if s1.mappings.dom().contains(vaddr) { - &&& result.is_Ok() - &&& s2.mappings === s1.mappings.remove(vaddr) - &&& s2.mem.dom() === mem_domain_from_mappings(c.phys_mem_size, s2.mappings) - &&& (forall|idx:nat| #![auto] s2.mem.dom().contains(idx) ==> s2.mem[idx] === s1.mem[idx]) - } else { - &&& result.is_ErrNoSuchMapping() - &&& s2.mappings === s1.mappings - &&& s2.mem === s1.mem - } - } +pub open spec fn step_Unmap_enabled(vaddr: nat) -> bool { + &&& between(vaddr, PT_BOUND_LOW, PT_BOUND_HIGH as nat) + &&& { // The given vaddr must be aligned to some valid page size + ||| aligned(vaddr, L3_ENTRY_SIZE as nat) + ||| aligned(vaddr, L2_ENTRY_SIZE as nat) + ||| aligned(vaddr, L1_ENTRY_SIZE as nat) + } +} - pub open spec fn step_Resolve_enabled(vaddr: nat) -> bool { - &&& aligned(vaddr, 8) - } +pub open spec fn step_Unmap( + c: AbstractConstants, + s1: AbstractVariables, + s2: AbstractVariables, + vaddr: nat, + result: UnmapResult, +) -> bool { + &&& step_Unmap_enabled(vaddr) + &&& if s1.mappings.dom().contains(vaddr) { + &&& result.is_Ok() + &&& s2.mappings === s1.mappings.remove(vaddr) + &&& s2.mem.dom() === mem_domain_from_mappings(c.phys_mem_size, s2.mappings) + &&& (forall|idx: nat| #![auto] s2.mem.dom().contains(idx) ==> s2.mem[idx] === s1.mem[idx]) + } else { + &&& result.is_ErrNoSuchMapping() + &&& s2.mappings === s1.mappings + &&& s2.mem === s1.mem + } +} - pub open spec fn step_Resolve(c: AbstractConstants, s1: AbstractVariables, s2: AbstractVariables, vaddr: nat, result: ResolveResult) -> bool { - &&& step_Resolve_enabled(vaddr) - &&& s2 === s1 - &&& match result { - ResolveResult::Ok(base, pte) => { - // If result is Ok, it's an existing mapping that contains vaddr.. - &&& s1.mappings.contains_pair(base, pte) - &&& between(vaddr, base, base + pte.frame.size) - }, - ResolveResult::ErrUnmapped => { - let vmem_idx = word_index_spec(vaddr); - // If result is ErrUnmapped, no mapping containing vaddr exists.. - &&& !mem_domain_from_mappings(c.phys_mem_size, s1.mappings).contains(vmem_idx) - }, - } - } +pub open spec fn step_Resolve_enabled(vaddr: nat) -> bool { + &&& aligned(vaddr, 8) +} +pub open spec fn step_Resolve( + c: AbstractConstants, + s1: AbstractVariables, + s2: AbstractVariables, + vaddr: nat, + result: ResolveResult, +) -> bool { + &&& step_Resolve_enabled(vaddr) + &&& s2 === s1 + &&& match result { + ResolveResult::Ok(base, pte) => { + // If result is Ok, it's an existing mapping that contains vaddr.. + &&& s1.mappings.contains_pair(base, pte) + &&& between(vaddr, base, base + pte.frame.size) + }, + ResolveResult::ErrUnmapped => { + let vmem_idx = word_index_spec(vaddr); + // If result is ErrUnmapped, no mapping containing vaddr exists.. + &&& !mem_domain_from_mappings(c.phys_mem_size, s1.mappings).contains(vmem_idx) + }, + } +} - pub open spec fn step_Stutter(c: AbstractConstants, s1: AbstractVariables, s2: AbstractVariables) -> bool { - s1 === s2 - } +pub open spec fn step_Stutter( + c: AbstractConstants, + s1: AbstractVariables, + s2: AbstractVariables, +) -> bool { + s1 === s2 +} - pub open spec fn next_step(c: AbstractConstants, s1: AbstractVariables, s2: AbstractVariables, step: AbstractStep) -> bool { - match step { - AbstractStep::ReadWrite { vaddr, op, pte } => step_ReadWrite(c, s1, s2, vaddr, op, pte), - AbstractStep::Map { vaddr, pte, result } => step_Map(c, s1, s2, vaddr, pte, result), - AbstractStep::Unmap { vaddr, result } => step_Unmap(c, s1, s2, vaddr, result), - AbstractStep::Resolve { vaddr, result } => step_Resolve(c, s1, s2, vaddr, result), - AbstractStep::Stutter => step_Stutter(c, s1, s2), - } - } +pub open spec fn next_step( + c: AbstractConstants, + s1: AbstractVariables, + s2: AbstractVariables, + step: AbstractStep, +) -> bool { + match step { + AbstractStep::ReadWrite { vaddr, op, pte } => step_ReadWrite(c, s1, s2, vaddr, op, pte), + AbstractStep::Map { vaddr, pte, result } => step_Map(c, s1, s2, vaddr, pte, result), + AbstractStep::Unmap { vaddr, result } => step_Unmap(c, s1, s2, vaddr, result), + AbstractStep::Resolve { vaddr, result } => step_Resolve(c, s1, s2, vaddr, result), + AbstractStep::Stutter => step_Stutter(c, s1, s2), + } +} - pub open spec fn next(c: AbstractConstants, s1: AbstractVariables, s2: AbstractVariables) -> bool { - exists|step: AbstractStep| next_step(c, s1, s2, step) - } +pub open spec fn next(c: AbstractConstants, s1: AbstractVariables, s2: AbstractVariables) -> bool { + exists|step: AbstractStep| next_step(c, s1, s2, step) +} - } +} // verus! } pub mod hardware { @@ -7054,526 +10062,670 @@ pub mod spec_t { verus! { - pub struct HWVariables { - /// Word-indexed physical memory - pub mem: Seq, - pub pt_mem: mem::PageTableMemory, - pub tlb: Map, - } +pub struct HWVariables { + /// Word-indexed physical memory + pub mem: Seq, + pub pt_mem: mem::PageTableMemory, + pub tlb: Map, +} - #[is_variant] - pub enum HWStep { - ReadWrite { vaddr: nat, paddr: nat, op: RWOp, pte: Option<(nat, PageTableEntry)> }, - PTMemOp, - TLBFill { vaddr: nat, pte: PageTableEntry }, - TLBEvict { vaddr: nat}, - } +#[is_variant] +pub enum HWStep { + ReadWrite { vaddr: nat, paddr: nat, op: RWOp, pte: Option<(nat, PageTableEntry)> }, + PTMemOp, + TLBFill { vaddr: nat, pte: PageTableEntry }, + TLBEvict { vaddr: nat }, +} - #[is_variant] - pub ghost enum GhostPageDirectoryEntry { - Directory { - addr: usize, - /// Present; must be 1 to map a page or reference a directory - flag_P: bool, - /// Read/write; if 0, writes may not be allowed to the page controlled by this entry - flag_RW: bool, - /// User/supervisor; user-mode accesses are not allowed to the page controlled by this entry - flag_US: bool, - /// Page-level write-through - flag_PWT: bool, - /// Page-level cache disable - flag_PCD: bool, - /// Accessed; indicates whether software has accessed the page referenced by this entry - flag_A: bool, - /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from - /// the page controlled by this entry); otherwise, reserved (must be 0) - flag_XD: bool, - }, - Page { - addr: usize, - /// Present; must be 1 to map a page or reference a directory - flag_P: bool, - /// Read/write; if 0, writes may not be allowed to the page controlled by this entry - flag_RW: bool, - /// User/supervisor; if 0, user-mode accesses are not allowed to the page controlled by this entry - flag_US: bool, - /// Page-level write-through - flag_PWT: bool, - /// Page-level cache disable - flag_PCD: bool, - /// Accessed; indicates whether software has accessed the page referenced by this entry - flag_A: bool, - /// Dirty; indicates whether software has written to the page referenced by this entry - flag_D: bool, - // /// Page size; must be 1 (otherwise, this entry references a directory) - // flag_PS: Option, - // PS is entirely determined by the Page variant and the layer - /// Global; if CR4.PGE = 1, determines whether the translation is global; ignored otherwise - flag_G: bool, - /// Indirectly determines the memory type used to access the page referenced by this entry - flag_PAT: bool, - /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from - /// the page controlled by this entry); otherwise, reserved (must be 0) - flag_XD: bool, - }, - /// An `Empty` entry is an entry that does not contain a valid mapping. I.e. the entry is - /// either empty or has a bit set that the intel manual designates as must-be-zero. Both empty - /// and invalid entries cause a page fault if used during translation. - Empty, - } +#[is_variant] +pub ghost enum GhostPageDirectoryEntry { + Directory { + addr: usize, + /// Present; must be 1 to map a page or reference a directory + flag_P: bool, + /// Read/write; if 0, writes may not be allowed to the page controlled by this entry + flag_RW: bool, + /// User/supervisor; user-mode accesses are not allowed to the page controlled by this entry + flag_US: bool, + /// Page-level write-through + flag_PWT: bool, + /// Page-level cache disable + flag_PCD: bool, + /// Accessed; indicates whether software has accessed the page referenced by this entry + flag_A: bool, + /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from + /// the page controlled by this entry); otherwise, reserved (must be 0) + flag_XD: bool, + }, + Page { + addr: usize, + /// Present; must be 1 to map a page or reference a directory + flag_P: bool, + /// Read/write; if 0, writes may not be allowed to the page controlled by this entry + flag_RW: bool, + /// User/supervisor; if 0, user-mode accesses are not allowed to the page controlled by this entry + flag_US: bool, + /// Page-level write-through + flag_PWT: bool, + /// Page-level cache disable + flag_PCD: bool, + /// Accessed; indicates whether software has accessed the page referenced by this entry + flag_A: bool, + /// Dirty; indicates whether software has written to the page referenced by this entry + flag_D: bool, + // /// Page size; must be 1 (otherwise, this entry references a directory) + // flag_PS: Option, + // PS is entirely determined by the Page variant and the layer + /// Global; if CR4.PGE = 1, determines whether the translation is global; ignored otherwise + flag_G: bool, + /// Indirectly determines the memory type used to access the page referenced by this entry + flag_PAT: bool, + /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from + /// the page controlled by this entry); otherwise, reserved (must be 0) + flag_XD: bool, + }, + /// An `Empty` entry is an entry that does not contain a valid mapping. I.e. the entry is + /// either empty or has a bit set that the intel manual designates as must-be-zero. Both empty + /// and invalid entries cause a page fault if used during translation. + Empty, +} +// layer: +// 0 -> PML4 +// 1 -> PDPT, Page Directory Pointer Table +// 2 -> PD, Page Directory +// 3 -> PT, Page Table +// MASK_FLAG_* are flags valid for entries at all levels. +pub const MASK_FLAG_P: u64 = bit!(0u64); - // layer: - // 0 -> PML4 - // 1 -> PDPT, Page Directory Pointer Table - // 2 -> PD, Page Directory - // 3 -> PT, Page Table - - - // MASK_FLAG_* are flags valid for entries at all levels. - pub const MASK_FLAG_P: u64 = bit!(0u64); - pub const MASK_FLAG_RW: u64 = bit!(1u64); - pub const MASK_FLAG_US: u64 = bit!(2u64); - pub const MASK_FLAG_PWT: u64 = bit!(3u64); - pub const MASK_FLAG_PCD: u64 = bit!(4u64); - pub const MASK_FLAG_A: u64 = bit!(5u64); - pub const MASK_FLAG_XD: u64 = bit!(63u64); - - // MASK_PG_FLAG_* are flags valid for all page mapping entries, unless a specialized version for that - // layer exists, e.g. for layer 3 MASK_L3_PG_FLAG_PAT is used rather than MASK_PG_FLAG_PAT. - pub const MASK_PG_FLAG_D: u64 = bit!(6u64); - pub const MASK_PG_FLAG_G: u64 = bit!(8u64); - pub const MASK_PG_FLAG_PAT: u64 = bit!(12u64); - - pub const MASK_L1_PG_FLAG_PS: u64 = bit!(7u64); - pub const MASK_L2_PG_FLAG_PS: u64 = bit!(7u64); - - pub const MASK_L3_PG_FLAG_PAT: u64 = bit!(7u64); - - // const MASK_DIR_REFC: u64 = bitmask_inc!(52u64,62u64); // Ignored bits for storing refcount in L3 and L2 - // const MASK_DIR_L1_REFC: u64 = bitmask_inc!(8u64,12u64); // Ignored bits for storing refcount in L1 - // const MASK_DIR_REFC_SHIFT: u64 = 52u64; - // const MASK_DIR_L1_REFC_SHIFT: u64 = 8u64; - - // In the implementation we can always use the 12:52 mask as the invariant guarantees that in the - // other cases, the lower bits are already zero anyway. - // We cannot use dual exec/spec constants here because for those Verus currently doesn't support - // manually guiding the no-overflow proofs. - pub spec const MASK_ADDR_SPEC: u64 = bitmask_inc!(12u64, MAX_PHYADDR_WIDTH - 1); - #[verifier::when_used_as_spec(MASK_ADDR_SPEC)] - pub exec const MASK_ADDR: u64 ensures MASK_ADDR == MASK_ADDR_SPEC { - axiom_max_phyaddr_width_facts(); - bitmask_inc!(12u64, MAX_PHYADDR_WIDTH - 1) - } +pub const MASK_FLAG_RW: u64 = bit!(1u64); - pub spec const MASK_L1_PG_ADDR_SPEC: u64 = bitmask_inc!(30u64, MAX_PHYADDR_WIDTH - 1); - #[verifier::when_used_as_spec(MASK_L1_PG_ADDR_SPEC)] - pub exec const MASK_L1_PG_ADDR: u64 ensures MASK_L1_PG_ADDR == MASK_L1_PG_ADDR_SPEC { - axiom_max_phyaddr_width_facts(); - bitmask_inc!(30u64, MAX_PHYADDR_WIDTH - 1) - } +pub const MASK_FLAG_US: u64 = bit!(2u64); - pub spec const MASK_L2_PG_ADDR_SPEC: u64 = bitmask_inc!(21u64, MAX_PHYADDR_WIDTH - 1); - #[verifier::when_used_as_spec(MASK_L2_PG_ADDR_SPEC)] - pub exec const MASK_L2_PG_ADDR: u64 ensures MASK_L2_PG_ADDR == MASK_L2_PG_ADDR_SPEC { - axiom_max_phyaddr_width_facts(); - bitmask_inc!(21u64, MAX_PHYADDR_WIDTH - 1) - } +pub const MASK_FLAG_PWT: u64 = bit!(3u64); - pub spec const MASK_L3_PG_ADDR_SPEC: u64 = bitmask_inc!(12u64, MAX_PHYADDR_WIDTH - 1); - #[verifier::when_used_as_spec(MASK_L3_PG_ADDR_SPEC)] - pub exec const MASK_L3_PG_ADDR: u64 ensures MASK_L3_PG_ADDR == MASK_L3_PG_ADDR_SPEC{ - axiom_max_phyaddr_width_facts(); - bitmask_inc!(12u64, MAX_PHYADDR_WIDTH - 1) - } +pub const MASK_FLAG_PCD: u64 = bit!(4u64); - pub spec const MASK_DIR_ADDR_SPEC: u64 = MASK_ADDR; - #[verifier::when_used_as_spec(MASK_DIR_ADDR_SPEC)] - pub exec const MASK_DIR_ADDR: u64 ensures MASK_DIR_ADDR == MASK_DIR_ADDR_SPEC { - MASK_ADDR - } +pub const MASK_FLAG_A: u64 = bit!(5u64); +pub const MASK_FLAG_XD: u64 = bit!(63u64); - #[allow(repr_transparent_external_private_fields)] - // An entry in any page directory (i.e. in PML4, PDPT, PD or PT) - #[repr(transparent)] - pub struct PageDirectoryEntry { - pub entry: u64, - pub layer: Ghost, - } +// MASK_PG_FLAG_* are flags valid for all page mapping entries, unless a specialized version for that +// layer exists, e.g. for layer 3 MASK_L3_PG_FLAG_PAT is used rather than MASK_PG_FLAG_PAT. +pub const MASK_PG_FLAG_D: u64 = bit!(6u64); +pub const MASK_PG_FLAG_G: u64 = bit!(8u64); - // This impl defines everything necessary for the page table walk semantics. - // PageDirectoryEntry is reused in the implementation, which has an additional impl block for it in - // `impl_u::l2_impl`. - impl PageDirectoryEntry { - pub open spec fn view(self) -> GhostPageDirectoryEntry { - let v = self.entry; - let flag_P = v & MASK_FLAG_P == MASK_FLAG_P; - let flag_RW = v & MASK_FLAG_RW == MASK_FLAG_RW; - let flag_US = v & MASK_FLAG_US == MASK_FLAG_US; - let flag_PWT = v & MASK_FLAG_PWT == MASK_FLAG_PWT; - let flag_PCD = v & MASK_FLAG_PCD == MASK_FLAG_PCD; - let flag_A = v & MASK_FLAG_A == MASK_FLAG_A; - let flag_XD = v & MASK_FLAG_XD == MASK_FLAG_XD; - let flag_D = v & MASK_PG_FLAG_D == MASK_PG_FLAG_D; - let flag_G = v & MASK_PG_FLAG_G == MASK_PG_FLAG_G; - if self.layer@ <= 3 { - if v & MASK_FLAG_P == MASK_FLAG_P && self.all_mb0_bits_are_zero() { - if self.layer == 0 { - let addr = (v & MASK_ADDR) as usize; - GhostPageDirectoryEntry::Directory { - addr, flag_P, flag_RW, flag_US, flag_PWT, flag_PCD, flag_A, flag_XD, - } - } else if self.layer == 1 { - if v & MASK_L1_PG_FLAG_PS == MASK_L1_PG_FLAG_PS { - // super page mapping - let addr = (v & MASK_L1_PG_ADDR) as usize; - let flag_PAT = v & MASK_PG_FLAG_PAT == MASK_PG_FLAG_PAT; - GhostPageDirectoryEntry::Page { - addr, - flag_P, flag_RW, flag_US, flag_PWT, flag_PCD, - flag_A, flag_D, flag_G, flag_PAT, flag_XD, - } - } else { - let addr = (v & MASK_ADDR) as usize; - GhostPageDirectoryEntry::Directory { - addr, flag_P, flag_RW, flag_US, flag_PWT, flag_PCD, flag_A, flag_XD, - } - } - } else if self.layer == 2 { - if v & MASK_L2_PG_FLAG_PS == MASK_L2_PG_FLAG_PS { - // huge page mapping - let addr = (v & MASK_L2_PG_ADDR) as usize; - let flag_PAT = v & MASK_PG_FLAG_PAT == MASK_PG_FLAG_PAT; - GhostPageDirectoryEntry::Page { - addr, - flag_P, flag_RW, flag_US, flag_PWT, flag_PCD, - flag_A, flag_D, flag_G, flag_PAT, flag_XD, - } - } else { - let addr = (v & MASK_ADDR) as usize; - GhostPageDirectoryEntry::Directory { - addr, flag_P, flag_RW, flag_US, flag_PWT, flag_PCD, flag_A, flag_XD, - } - } - } else { - // TODO: uncomment when we have inline proofs - // assert(self.layer == 3); - let addr = (v & MASK_L3_PG_ADDR) as usize; - let flag_PAT = v & MASK_L3_PG_FLAG_PAT == MASK_L3_PG_FLAG_PAT; - GhostPageDirectoryEntry::Page { - addr, - flag_P, flag_RW, flag_US, flag_PWT, flag_PCD, - flag_A, flag_D, flag_G, flag_PAT, flag_XD, - } +pub const MASK_PG_FLAG_PAT: u64 = bit!(12u64); + +pub const MASK_L1_PG_FLAG_PS: u64 = bit!(7u64); + +pub const MASK_L2_PG_FLAG_PS: u64 = bit!(7u64); + +pub const MASK_L3_PG_FLAG_PAT: u64 = bit!(7u64); + +// const MASK_DIR_REFC: u64 = bitmask_inc!(52u64,62u64); // Ignored bits for storing refcount in L3 and L2 +// const MASK_DIR_L1_REFC: u64 = bitmask_inc!(8u64,12u64); // Ignored bits for storing refcount in L1 +// const MASK_DIR_REFC_SHIFT: u64 = 52u64; +// const MASK_DIR_L1_REFC_SHIFT: u64 = 8u64; +// In the implementation we can always use the 12:52 mask as the invariant guarantees that in the +// other cases, the lower bits are already zero anyway. +// We cannot use dual exec/spec constants here because for those Verus currently doesn't support +// manually guiding the no-overflow proofs. +pub spec const MASK_ADDR_SPEC: u64 = bitmask_inc!(12u64, MAX_PHYADDR_WIDTH - 1); + +#[verifier::when_used_as_spec(MASK_ADDR_SPEC)] +pub exec const MASK_ADDR: u64 + ensures + MASK_ADDR == MASK_ADDR_SPEC, +{ + axiom_max_phyaddr_width_facts(); + bitmask_inc!(12u64, MAX_PHYADDR_WIDTH - 1) +} + +pub spec const MASK_L1_PG_ADDR_SPEC: u64 = bitmask_inc!(30u64, MAX_PHYADDR_WIDTH - 1); + +#[verifier::when_used_as_spec(MASK_L1_PG_ADDR_SPEC)] +pub exec const MASK_L1_PG_ADDR: u64 + ensures + MASK_L1_PG_ADDR == MASK_L1_PG_ADDR_SPEC, +{ + axiom_max_phyaddr_width_facts(); + bitmask_inc!(30u64, MAX_PHYADDR_WIDTH - 1) +} + +pub spec const MASK_L2_PG_ADDR_SPEC: u64 = bitmask_inc!(21u64, MAX_PHYADDR_WIDTH - 1); + +#[verifier::when_used_as_spec(MASK_L2_PG_ADDR_SPEC)] +pub exec const MASK_L2_PG_ADDR: u64 + ensures + MASK_L2_PG_ADDR == MASK_L2_PG_ADDR_SPEC, +{ + axiom_max_phyaddr_width_facts(); + bitmask_inc!(21u64, MAX_PHYADDR_WIDTH - 1) +} + +pub spec const MASK_L3_PG_ADDR_SPEC: u64 = bitmask_inc!(12u64, MAX_PHYADDR_WIDTH - 1); + +#[verifier::when_used_as_spec(MASK_L3_PG_ADDR_SPEC)] +pub exec const MASK_L3_PG_ADDR: u64 + ensures + MASK_L3_PG_ADDR == MASK_L3_PG_ADDR_SPEC, +{ + axiom_max_phyaddr_width_facts(); + bitmask_inc!(12u64, MAX_PHYADDR_WIDTH - 1) +} + +pub spec const MASK_DIR_ADDR_SPEC: u64 = MASK_ADDR; + +#[verifier::when_used_as_spec(MASK_DIR_ADDR_SPEC)] +pub exec const MASK_DIR_ADDR: u64 + ensures + MASK_DIR_ADDR == MASK_DIR_ADDR_SPEC, +{ + MASK_ADDR +} + +#[allow(repr_transparent_external_private_fields)] +// An entry in any page directory (i.e. in PML4, PDPT, PD or PT) +#[repr(transparent)] +pub struct PageDirectoryEntry { + pub entry: u64, + pub layer: Ghost, +} + +// This impl defines everything necessary for the page table walk semantics. +// PageDirectoryEntry is reused in the implementation, which has an additional impl block for it in +// `impl_u::l2_impl`. +impl PageDirectoryEntry { + pub open spec fn view(self) -> GhostPageDirectoryEntry { + let v = self.entry; + let flag_P = v & MASK_FLAG_P == MASK_FLAG_P; + let flag_RW = v & MASK_FLAG_RW == MASK_FLAG_RW; + let flag_US = v & MASK_FLAG_US == MASK_FLAG_US; + let flag_PWT = v & MASK_FLAG_PWT == MASK_FLAG_PWT; + let flag_PCD = v & MASK_FLAG_PCD == MASK_FLAG_PCD; + let flag_A = v & MASK_FLAG_A == MASK_FLAG_A; + let flag_XD = v & MASK_FLAG_XD == MASK_FLAG_XD; + let flag_D = v & MASK_PG_FLAG_D == MASK_PG_FLAG_D; + let flag_G = v & MASK_PG_FLAG_G == MASK_PG_FLAG_G; + if self.layer@ <= 3 { + if v & MASK_FLAG_P == MASK_FLAG_P && self.all_mb0_bits_are_zero() { + if self.layer == 0 { + let addr = (v & MASK_ADDR) as usize; + GhostPageDirectoryEntry::Directory { + addr, + flag_P, + flag_RW, + flag_US, + flag_PWT, + flag_PCD, + flag_A, + flag_XD, + } + } else if self.layer == 1 { + if v & MASK_L1_PG_FLAG_PS == MASK_L1_PG_FLAG_PS { + // super page mapping + let addr = (v & MASK_L1_PG_ADDR) as usize; + let flag_PAT = v & MASK_PG_FLAG_PAT == MASK_PG_FLAG_PAT; + GhostPageDirectoryEntry::Page { + addr, + flag_P, + flag_RW, + flag_US, + flag_PWT, + flag_PCD, + flag_A, + flag_D, + flag_G, + flag_PAT, + flag_XD, } } else { - GhostPageDirectoryEntry::Empty - } - } else { - arbitrary() - } - } - - /// Returns `true` iff all must-be-zero bits for a given entry are zero. - #[verifier::opaque] - pub open spec fn all_mb0_bits_are_zero(self) -> bool - recommends self.layer@ <= 3, - { - if self.entry & MASK_FLAG_P == MASK_FLAG_P { - if self.layer == 0 { // PML4, always directory - // 51:M, 7 - &&& self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0 - &&& self.entry & bit!(7u64) == 0 - } else if self.layer == 1 { // PDPT - if self.entry & MASK_L1_PG_FLAG_PS == MASK_L1_PG_FLAG_PS { - // 51:M, 29:13 - &&& self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0 - &&& self.entry & bitmask_inc!(13u64,29u64) == 0 - } else { - // 51:M, 7 - &&& self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0 - &&& self.entry & bit!(7u64) == 0 + let addr = (v & MASK_ADDR) as usize; + GhostPageDirectoryEntry::Directory { + addr, + flag_P, + flag_RW, + flag_US, + flag_PWT, + flag_PCD, + flag_A, + flag_XD, } - } else if self.layer == 2 { // PD - if self.entry & MASK_L2_PG_FLAG_PS == MASK_L2_PG_FLAG_PS { - // 62:M, 20:13 - &&& self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0 - &&& self.entry & bitmask_inc!(13u64,20u64) == 0 - } else { - // 62:M, 7 - &&& self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0 - &&& self.entry & bit!(7u64) == 0 + } + } else if self.layer == 2 { + if v & MASK_L2_PG_FLAG_PS == MASK_L2_PG_FLAG_PS { + // huge page mapping + let addr = (v & MASK_L2_PG_ADDR) as usize; + let flag_PAT = v & MASK_PG_FLAG_PAT == MASK_PG_FLAG_PAT; + GhostPageDirectoryEntry::Page { + addr, + flag_P, + flag_RW, + flag_US, + flag_PWT, + flag_PCD, + flag_A, + flag_D, + flag_G, + flag_PAT, + flag_XD, } - } else if self.layer == 3 { // PT, always frame - // 62:M - self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0 } else { - arbitrary() + let addr = (v & MASK_ADDR) as usize; + GhostPageDirectoryEntry::Directory { + addr, + flag_P, + flag_RW, + flag_US, + flag_PWT, + flag_PCD, + flag_A, + flag_XD, + } } } else { - // No bits are reserved for unused entries - true + // TODO: uncomment when we have inline proofs + // assert(self.layer == 3); + let addr = (v & MASK_L3_PG_ADDR) as usize; + let flag_PAT = v & MASK_L3_PG_FLAG_PAT == MASK_L3_PG_FLAG_PAT; + GhostPageDirectoryEntry::Page { + addr, + flag_P, + flag_RW, + flag_US, + flag_PWT, + flag_PCD, + flag_A, + flag_D, + flag_G, + flag_PAT, + flag_XD, + } } + } else { + GhostPageDirectoryEntry::Empty } + } else { + arbitrary() + } + } - pub open spec fn layer(self) -> nat { - self.layer@ + /// Returns `true` iff all must-be-zero bits for a given entry are zero. + #[verifier::opaque] + pub open spec fn all_mb0_bits_are_zero(self) -> bool + recommends + self.layer@ <= 3, + { + if self.entry & MASK_FLAG_P == MASK_FLAG_P { + if self.layer == 0 { // PML4, always directory + // 51:M, 7 + &&& self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0 + &&& self.entry & bit!(7u64) == 0 + } else if self.layer == 1 { // PDPT + if self.entry & MASK_L1_PG_FLAG_PS == MASK_L1_PG_FLAG_PS { + // 51:M, 29:13 + &&& self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0 + &&& self.entry & bitmask_inc!(13u64,29u64) == 0 + } else { + // 51:M, 7 + &&& self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 51) == 0 + &&& self.entry & bit!(7u64) == 0 + } + } else if self.layer == 2 { // PD + if self.entry & MASK_L2_PG_FLAG_PS == MASK_L2_PG_FLAG_PS { + // 62:M, 20:13 + &&& self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0 + &&& self.entry & bitmask_inc!(13u64,20u64) == 0 + } else { + // 62:M, 7 + &&& self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0 + &&& self.entry & bit!(7u64) == 0 + } + } else if self.layer == 3 { // PT, always frame + // 62:M + self.entry & bitmask_inc!(MAX_PHYADDR_WIDTH, 62) == 0 + } else { + arbitrary() } + } else { + // No bits are reserved for unused entries + true } + } + + pub open spec fn layer(self) -> nat { + self.layer@ + } +} - #[allow(unused_macros)] +#[allow(unused_macros)] macro_rules! l0_bits { ($addr:expr) => { ($addr & bitmask_inc!(39u64,47u64)) >> 39u64 } } - pub(crate) use l0_bits; - #[allow(unused_macros)] +pub(crate) use l0_bits; + +#[allow(unused_macros)] macro_rules! l1_bits { ($addr:expr) => { ($addr & bitmask_inc!(30u64,38u64)) >> 30u64 } } - pub(crate) use l1_bits; - #[allow(unused_macros)] +pub(crate) use l1_bits; + +#[allow(unused_macros)] macro_rules! l2_bits { ($addr:expr) => { ($addr & bitmask_inc!(21u64,29u64)) >> 21u64 } } - pub(crate) use l2_bits; - #[allow(unused_macros)] +pub(crate) use l2_bits; + +#[allow(unused_macros)] macro_rules! l3_bits { ($addr:expr) => { ($addr & bitmask_inc!(12u64,20u64)) >> 12u64 } } - pub(crate) use l3_bits; - pub open spec fn read_entry(pt_mem: mem::PageTableMemory, dir_addr: nat, layer: nat, idx: nat) -> GhostPageDirectoryEntry { - let region = MemRegion { base: dir_addr as nat, size: PAGE_SIZE as nat }; - PageDirectoryEntry { entry: pt_mem.spec_read(idx, region), layer: Ghost(layer) }@ - } +pub(crate) use l3_bits; +pub open spec fn read_entry( + pt_mem: mem::PageTableMemory, + dir_addr: nat, + layer: nat, + idx: nat, +) -> GhostPageDirectoryEntry { + let region = MemRegion { base: dir_addr as nat, size: PAGE_SIZE as nat }; + PageDirectoryEntry { entry: pt_mem.spec_read(idx, region), layer: Ghost(layer) }@ +} - /// TODO: list 4-level paging no HLAT etc. as assumptions (+ the register to enable XD semantics, - /// it's mb0 otherwise) - /// - /// The intended semantics for valid_pt_walk is this: - /// Given a `PageTableMemory` `pt_mem`, the predicate is true for those `addr` and `pte` where the - /// MMU's page table walk arrives at an entry mapping the frame `pte.frame`. The properties in - /// `pte.flags` reflect the properties along the translation path. I.e. `pte.flags.is_writable` is - /// true iff the RW flag is set in all directories along the translation path and in the frame - /// mapping. Similarly, `pte.flags.is_supervisor` is true iff the US flag is unset in all those - /// structures and `pte.flags.disable_execute` is true iff the XD flag is set in at least one of - /// those structures. - /// - /// In practice, we always set these flags to their more permissive state in directories and only - /// make more restrictive settings in the frame mappings. (Ensured in the invariant, see conjunct - /// `directories_have_flags` in refinement layers 1 and 2.) But in the hardware model we still - /// define the full, correct semantics to ensure the implementation sets the flags correctly. - pub open spec fn valid_pt_walk(pt_mem: mem::PageTableMemory, addr: u64, pte: PageTableEntry) -> bool { - let l0_idx: nat = l0_bits!(addr) as nat; - let l1_idx: nat = l1_bits!(addr) as nat; - let l2_idx: nat = l2_bits!(addr) as nat; - let l3_idx: nat = l3_bits!(addr) as nat; - match read_entry(pt_mem, pt_mem.cr3_spec()@.base, 0, l0_idx) { +/// TODO: list 4-level paging no HLAT etc. as assumptions (+ the register to enable XD semantics, +/// it's mb0 otherwise) +/// +/// The intended semantics for valid_pt_walk is this: +/// Given a `PageTableMemory` `pt_mem`, the predicate is true for those `addr` and `pte` where the +/// MMU's page table walk arrives at an entry mapping the frame `pte.frame`. The properties in +/// `pte.flags` reflect the properties along the translation path. I.e. `pte.flags.is_writable` is +/// true iff the RW flag is set in all directories along the translation path and in the frame +/// mapping. Similarly, `pte.flags.is_supervisor` is true iff the US flag is unset in all those +/// structures and `pte.flags.disable_execute` is true iff the XD flag is set in at least one of +/// those structures. +/// +/// In practice, we always set these flags to their more permissive state in directories and only +/// make more restrictive settings in the frame mappings. (Ensured in the invariant, see conjunct +/// `directories_have_flags` in refinement layers 1 and 2.) But in the hardware model we still +/// define the full, correct semantics to ensure the implementation sets the flags correctly. +pub open spec fn valid_pt_walk( + pt_mem: mem::PageTableMemory, + addr: u64, + pte: PageTableEntry, +) -> bool { + let l0_idx: nat = l0_bits!(addr) as nat; + let l1_idx: nat = l1_bits!(addr) as nat; + let l2_idx: nat = l2_bits!(addr) as nat; + let l3_idx: nat = l3_bits!(addr) as nat; + match read_entry(pt_mem, pt_mem.cr3_spec()@.base, 0, l0_idx) { + GhostPageDirectoryEntry::Directory { + addr: dir_addr, + flag_RW: l0_RW, + flag_US: l0_US, + flag_XD: l0_XD, + .. + } => { + match read_entry(pt_mem, dir_addr as nat, 1, l1_idx) { + GhostPageDirectoryEntry::Page { + addr: page_addr, + flag_RW: l1_RW, + flag_US: l1_US, + flag_XD: l1_XD, + .. + } => { + aligned(addr as nat, L1_ENTRY_SIZE as nat) && pte == PageTableEntry { + frame: MemRegion { base: page_addr as nat, size: L1_ENTRY_SIZE as nat }, + flags: Flags { + is_writable: l0_RW && l1_RW, + is_supervisor: !l0_US || !l1_US, + disable_execute: l0_XD || l1_XD, + }, + } + }, GhostPageDirectoryEntry::Directory { - addr: dir_addr, flag_RW: l0_RW, flag_US: l0_US, flag_XD: l0_XD, .. + addr: dir_addr, + flag_RW: l1_RW, + flag_US: l1_US, + flag_XD: l1_XD, + .. } => { - match read_entry(pt_mem, dir_addr as nat, 1, l1_idx) { + match read_entry(pt_mem, dir_addr as nat, 2, l2_idx) { GhostPageDirectoryEntry::Page { - addr: page_addr, flag_RW: l1_RW, flag_US: l1_US, flag_XD: l1_XD, .. + addr: page_addr, + flag_RW: l2_RW, + flag_US: l2_US, + flag_XD: l2_XD, + .. } => { - aligned(addr as nat, L1_ENTRY_SIZE as nat) && - pte == PageTableEntry { - frame: MemRegion { base: page_addr as nat, size: L1_ENTRY_SIZE as nat }, + aligned(addr as nat, L2_ENTRY_SIZE as nat) && pte == PageTableEntry { + frame: MemRegion { + base: page_addr as nat, + size: L2_ENTRY_SIZE as nat, + }, flags: Flags { - is_writable: l0_RW && l1_RW, - is_supervisor: !l0_US || !l1_US, - disable_execute: l0_XD || l1_XD - } + is_writable: l0_RW && l1_RW && l2_RW, + is_supervisor: !l0_US || !l1_US || !l2_US, + disable_execute: l0_XD || l1_XD || l2_XD, + }, } }, GhostPageDirectoryEntry::Directory { - addr: dir_addr, flag_RW: l1_RW, flag_US: l1_US, flag_XD: l1_XD, .. + addr: dir_addr, + flag_RW: l2_RW, + flag_US: l2_US, + flag_XD: l2_XD, + .. } => { - match read_entry(pt_mem, dir_addr as nat, 2, l2_idx) { + match read_entry(pt_mem, dir_addr as nat, 3, l3_idx) { GhostPageDirectoryEntry::Page { - addr: page_addr, flag_RW: l2_RW, flag_US: l2_US, flag_XD: l2_XD, .. + addr: page_addr, + flag_RW: l3_RW, + flag_US: l3_US, + flag_XD: l3_XD, + .. } => { - aligned(addr as nat, L2_ENTRY_SIZE as nat) && - pte == PageTableEntry { - frame: MemRegion { base: page_addr as nat, size: L2_ENTRY_SIZE as nat }, + aligned(addr as nat, L3_ENTRY_SIZE as nat) && pte + == PageTableEntry { + frame: MemRegion { + base: page_addr as nat, + size: L3_ENTRY_SIZE as nat, + }, flags: Flags { - is_writable: l0_RW && l1_RW && l2_RW, - is_supervisor: !l0_US || !l1_US || !l2_US, - disable_execute: l0_XD || l1_XD || l2_XD - } - } - }, - GhostPageDirectoryEntry::Directory { - addr: dir_addr, flag_RW: l2_RW, flag_US: l2_US, flag_XD: l2_XD, .. - } => { - match read_entry(pt_mem, dir_addr as nat, 3, l3_idx) { - GhostPageDirectoryEntry::Page { - addr: page_addr, flag_RW: l3_RW, flag_US: l3_US, flag_XD: l3_XD, .. - } => { - aligned(addr as nat, L3_ENTRY_SIZE as nat) && - pte == PageTableEntry { - frame: MemRegion { base: page_addr as nat, size: L3_ENTRY_SIZE as nat }, - flags: Flags { - is_writable: l0_RW && l1_RW && l2_RW && l3_RW, - is_supervisor: !l0_US || !l1_US || !l2_US || !l3_US, - disable_execute: l0_XD || l1_XD || l2_XD || l3_XD - } - } + is_writable: l0_RW && l1_RW && l2_RW && l3_RW, + is_supervisor: !l0_US || !l1_US || !l2_US || !l3_US, + disable_execute: l0_XD || l1_XD || l2_XD || l3_XD, }, - GhostPageDirectoryEntry::Directory { .. } => false, - GhostPageDirectoryEntry::Empty => false, } }, + GhostPageDirectoryEntry::Directory { .. } => false, GhostPageDirectoryEntry::Empty => false, } }, GhostPageDirectoryEntry::Empty => false, } }, - _ => false, + GhostPageDirectoryEntry::Empty => false, } - } + }, + _ => false, + } +} - // Can't use `n as u64` in triggers because it's an arithmetic expression - pub open spec fn nat_to_u64(n: nat) -> u64 - recommends n <= u64::MAX - { n as u64 } - - /// Page table walker interpretation of the page table memory - pub open spec fn interp_pt_mem(pt_mem: mem::PageTableMemory) -> Map { - Map::new( - |addr: nat| - addr < MAX_BASE - // Casting addr to u64 is okay since addr < MAX_BASE < u64::MAX - && exists|pte: PageTableEntry| valid_pt_walk(pt_mem, nat_to_u64(addr), pte), - |addr: nat| - choose|pte: PageTableEntry| valid_pt_walk(pt_mem, nat_to_u64(addr), pte)) - } +// Can't use `n as u64` in triggers because it's an arithmetic expression +pub open spec fn nat_to_u64(n: nat) -> u64 + recommends + n <= u64::MAX, +{ + n as u64 +} - pub open spec fn init(s: HWVariables) -> bool { - &&& s.tlb.dom() === Set::empty() - &&& interp_pt_mem(s.pt_mem) === Map::empty() - } +/// Page table walker interpretation of the page table memory +pub open spec fn interp_pt_mem(pt_mem: mem::PageTableMemory) -> Map { + Map::new( + |addr: nat| + addr + < MAX_BASE + // Casting addr to u64 is okay since addr < MAX_BASE < u64::MAX + && exists|pte: PageTableEntry| valid_pt_walk(pt_mem, nat_to_u64(addr), pte), + |addr: nat| choose|pte: PageTableEntry| valid_pt_walk(pt_mem, nat_to_u64(addr), pte), + ) +} - // We only allow aligned accesses. Can think of unaligned accesses as two aligned accesses. When we - // get to concurrency we may have to change that. - pub open spec fn step_ReadWrite(s1: HWVariables, s2: HWVariables, vaddr: nat, paddr: nat, op: RWOp, pte: Option<(nat, PageTableEntry)>) -> bool { - &&& aligned(vaddr, 8) - &&& s2.pt_mem === s1.pt_mem - &&& s2.tlb === s1.tlb - &&& match pte { - Some((base, pte)) => { - let pmem_idx = word_index_spec(paddr); - // If pte is Some, it's a cached mapping that maps vaddr to paddr.. - &&& s1.tlb.contains_pair(base, pte) - &&& between(vaddr, base, base + pte.frame.size) - &&& paddr === (pte.frame.base + (vaddr - base)) as nat - // .. and the result depends on the flags. - &&& match op { - RWOp::Store { new_value, result } => { - if pmem_idx < s1.mem.len() && !pte.flags.is_supervisor && pte.flags.is_writable { - &&& result.is_Ok() - &&& s2.mem === s1.mem.update(pmem_idx as int, new_value) - } else { - &&& result.is_Pagefault() - &&& s2.mem === s1.mem - } - }, - RWOp::Load { is_exec, result } => { - &&& s2.mem === s1.mem - &&& if pmem_idx < s1.mem.len() && !pte.flags.is_supervisor && (is_exec ==> !pte.flags.disable_execute) { - &&& result.is_Value() - &&& result.get_Value_0() == s1.mem[pmem_idx as int] - } else { - &&& result.is_Pagefault() - } - }, +pub open spec fn init(s: HWVariables) -> bool { + &&& s.tlb.dom() === Set::empty() + &&& interp_pt_mem(s.pt_mem) === Map::empty() +} + +// We only allow aligned accesses. Can think of unaligned accesses as two aligned accesses. When we +// get to concurrency we may have to change that. +pub open spec fn step_ReadWrite( + s1: HWVariables, + s2: HWVariables, + vaddr: nat, + paddr: nat, + op: RWOp, + pte: Option<(nat, PageTableEntry)>, +) -> bool { + &&& aligned(vaddr, 8) + &&& s2.pt_mem === s1.pt_mem + &&& s2.tlb === s1.tlb + &&& match pte { + Some((base, pte)) => { + let pmem_idx = word_index_spec(paddr); + // If pte is Some, it's a cached mapping that maps vaddr to paddr.. + &&& s1.tlb.contains_pair(base, pte) + &&& between(vaddr, base, base + pte.frame.size) + &&& paddr === (pte.frame.base + (vaddr + - base)) as nat + // .. and the result depends on the flags. + + &&& match op { + RWOp::Store { new_value, result } => { + if pmem_idx < s1.mem.len() && !pte.flags.is_supervisor + && pte.flags.is_writable { + &&& result.is_Ok() + &&& s2.mem === s1.mem.update(pmem_idx as int, new_value) + } else { + &&& result.is_Pagefault() + &&& s2.mem === s1.mem } }, - None => { - // If pte is None, no mapping containing vaddr exists.. - &&& (!exists|base, pte| { - &&& interp_pt_mem(s1.pt_mem).contains_pair(base, pte) - &&& between(vaddr, base, base + pte.frame.size) - }) - // .. and the result is always a pagefault and an unchanged memory. + RWOp::Load { is_exec, result } => { &&& s2.mem === s1.mem - &&& match op { - RWOp::Store { new_value, result } => result.is_Pagefault(), - RWOp::Load { is_exec, result } => result.is_Pagefault(), + &&& if pmem_idx < s1.mem.len() && !pte.flags.is_supervisor && (is_exec + ==> !pte.flags.disable_execute) { + &&& result.is_Value() + &&& result.get_Value_0() == s1.mem[pmem_idx as int] + } else { + &&& result.is_Pagefault() } }, } - } + }, + None => { + // If pte is None, no mapping containing vaddr exists.. + &&& (!exists|base, pte| + { + &&& interp_pt_mem(s1.pt_mem).contains_pair(base, pte) + &&& between(vaddr, base, base + pte.frame.size) + }) + // .. and the result is always a pagefault and an unchanged memory. - pub open spec fn step_PTMemOp(s1: HWVariables, s2: HWVariables) -> bool { &&& s2.mem === s1.mem - // s2.tlb is a submap of s1.tlb - &&& forall|base: nat, pte: PageTableEntry| s2.tlb.contains_pair(base, pte) ==> s1.tlb.contains_pair(base, pte) - // pt_mem may change arbitrarily - } + &&& match op { + RWOp::Store { new_value, result } => result.is_Pagefault(), + RWOp::Load { is_exec, result } => result.is_Pagefault(), + } + }, + } +} - pub open spec fn step_TLBFill(s1: HWVariables, s2: HWVariables, vaddr: nat, pte: PageTableEntry) -> bool { - &&& interp_pt_mem(s1.pt_mem).contains_pair(vaddr, pte) - &&& s2.tlb === s1.tlb.insert(vaddr, pte) - &&& s2.pt_mem === s1.pt_mem - &&& s2.mem === s1.mem - } +pub open spec fn step_PTMemOp(s1: HWVariables, s2: HWVariables) -> bool { + &&& s2.mem === s1.mem + // s2.tlb is a submap of s1.tlb - pub open spec fn step_TLBEvict(s1: HWVariables, s2: HWVariables, vaddr: nat) -> bool { - &&& s1.tlb.dom().contains(vaddr) - &&& s2.tlb === s1.tlb.remove(vaddr) - &&& s2.pt_mem === s1.pt_mem - &&& s2.mem === s1.mem - } + &&& forall|base: nat, pte: PageTableEntry| + s2.tlb.contains_pair(base, pte) ==> s1.tlb.contains_pair( + base, + pte, + ) + // pt_mem may change arbitrarily - pub open spec fn next_step(s1: HWVariables, s2: HWVariables, step: HWStep) -> bool { - match step { - HWStep::ReadWrite { vaddr, paddr, op, pte } => step_ReadWrite(s1, s2, vaddr, paddr, op, pte), - HWStep::PTMemOp => step_PTMemOp(s1, s2), - HWStep::TLBFill { vaddr, pte } => step_TLBFill(s1, s2, vaddr, pte), - HWStep::TLBEvict { vaddr } => step_TLBEvict(s1, s2, vaddr), - } - } +} - pub open spec fn next(s1: HWVariables, s2: HWVariables) -> bool { - exists|step: HWStep| next_step(s1, s2, step) - } +pub open spec fn step_TLBFill( + s1: HWVariables, + s2: HWVariables, + vaddr: nat, + pte: PageTableEntry, +) -> bool { + &&& interp_pt_mem(s1.pt_mem).contains_pair(vaddr, pte) + &&& s2.tlb === s1.tlb.insert(vaddr, pte) + &&& s2.pt_mem === s1.pt_mem + &&& s2.mem === s1.mem +} - // pub closed spec fn inv(s: HWVariables) -> bool { - // true - // } - // - // proof fn init_implies_inv(s: HWVariables) - // requires - // init(s), - // ensures - // inv(s) - // { } - // - // proof fn next_preserves_inv(s1: HWVariables, s2: HWVariables) - // requires - // next(s1, s2), - // inv(s1), - // ensures - // inv(s2), - // { - // let step = choose|step: HWStep| next_step(s1, s2, step); - // match step { - // HWStep::ReadWrite { vaddr, paddr, op , pte} => (), - // HWStep::PTMemOp => (), - // HWStep::TLBFill { vaddr, pte } => (), - // HWStep::TLBEvict { vaddr } => (), - // } - // } +pub open spec fn step_TLBEvict(s1: HWVariables, s2: HWVariables, vaddr: nat) -> bool { + &&& s1.tlb.dom().contains(vaddr) + &&& s2.tlb === s1.tlb.remove(vaddr) + &&& s2.pt_mem === s1.pt_mem + &&& s2.mem === s1.mem +} - } +pub open spec fn next_step(s1: HWVariables, s2: HWVariables, step: HWStep) -> bool { + match step { + HWStep::ReadWrite { vaddr, paddr, op, pte } => step_ReadWrite( + s1, + s2, + vaddr, + paddr, + op, + pte, + ), + HWStep::PTMemOp => step_PTMemOp(s1, s2), + HWStep::TLBFill { vaddr, pte } => step_TLBFill(s1, s2, vaddr, pte), + HWStep::TLBEvict { vaddr } => step_TLBEvict(s1, s2, vaddr), + } +} + +pub open spec fn next(s1: HWVariables, s2: HWVariables) -> bool { + exists|step: HWStep| next_step(s1, s2, step) +} + +// pub closed spec fn inv(s: HWVariables) -> bool { +// true +// } +// +// proof fn init_implies_inv(s: HWVariables) +// requires +// init(s), +// ensures +// inv(s) +// { } +// +// proof fn next_preserves_inv(s1: HWVariables, s2: HWVariables) +// requires +// next(s1, s2), +// inv(s1), +// ensures +// inv(s2), +// { +// let step = choose|step: HWStep| next_step(s1, s2, step); +// match step { +// HWStep::ReadWrite { vaddr, paddr, op , pte} => (), +// HWStep::PTMemOp => (), +// HWStep::TLBFill { vaddr, pte } => (), +// HWStep::TLBEvict { vaddr } => (), +// } +// } + +} // verus! } pub mod os { @@ -7608,165 +10760,205 @@ pub mod spec_t { verus! { - pub struct OSVariables { - pub hw: hardware::HWVariables, - } - - impl OSVariables { - pub open spec fn pt_mappings_dont_overlap_in_vmem(self) -> bool { - forall|b1: nat, pte1: PageTableEntry, b2: nat, pte2: PageTableEntry| - self.interp_pt_mem().contains_pair(b1, pte1) && self.interp_pt_mem().contains_pair(b2, pte2) ==> - ((b1 == b2) || !overlap( - MemRegion { base: b1, size: pte1.frame.size }, - MemRegion { base: b2, size: pte2.frame.size })) - } +pub struct OSVariables { + pub hw: hardware::HWVariables, +} - pub open spec fn pt_mappings_dont_overlap_in_pmem(self) -> bool { - forall|b1: nat, pte1: PageTableEntry, b2: nat, pte2: PageTableEntry| - self.interp_pt_mem().contains_pair(b1, pte1) && self.interp_pt_mem().contains_pair(b2, pte2) ==> - ((b1 == b2) || !overlap(pte1.frame, pte2.frame)) - } +impl OSVariables { + pub open spec fn pt_mappings_dont_overlap_in_vmem(self) -> bool { + forall|b1: nat, pte1: PageTableEntry, b2: nat, pte2: PageTableEntry| + self.interp_pt_mem().contains_pair(b1, pte1) && self.interp_pt_mem().contains_pair( + b2, + pte2, + ) ==> ((b1 == b2) || !overlap( + MemRegion { base: b1, size: pte1.frame.size }, + MemRegion { base: b2, size: pte2.frame.size }, + )) + } - pub open spec fn tlb_is_submap_of_pt(self) -> bool { - forall|base, pte| self.hw.tlb.contains_pair(base, pte) ==> #[trigger] self.interp_pt_mem().contains_pair(base, pte) - } + pub open spec fn pt_mappings_dont_overlap_in_pmem(self) -> bool { + forall|b1: nat, pte1: PageTableEntry, b2: nat, pte2: PageTableEntry| + self.interp_pt_mem().contains_pair(b1, pte1) && self.interp_pt_mem().contains_pair( + b2, + pte2, + ) ==> ((b1 == b2) || !overlap(pte1.frame, pte2.frame)) + } - pub open spec fn pt_entry_sizes_are_valid(self) -> bool { - forall|base, pte| self.interp_pt_mem().contains_pair(base, pte) ==> { - ||| pte.frame.size == L3_ENTRY_SIZE - ||| pte.frame.size == L2_ENTRY_SIZE - ||| pte.frame.size == L1_ENTRY_SIZE - } - } + pub open spec fn tlb_is_submap_of_pt(self) -> bool { + forall|base, pte| + self.hw.tlb.contains_pair(base, pte) ==> #[trigger] self.interp_pt_mem().contains_pair( + base, + pte, + ) + } - #[verifier(opaque)] - pub open spec fn pt_entries_aligned(self) -> bool { - forall|base, pte| self.interp_pt_mem().contains_pair(base, pte) - ==> aligned(base, 8) && aligned(pte.frame.base, 8) + pub open spec fn pt_entry_sizes_are_valid(self) -> bool { + forall|base, pte| + self.interp_pt_mem().contains_pair(base, pte) ==> { + ||| pte.frame.size == L3_ENTRY_SIZE + ||| pte.frame.size == L2_ENTRY_SIZE + ||| pte.frame.size == L1_ENTRY_SIZE } + } - pub open spec fn inv(self) -> bool { - &&& self.pt_mappings_dont_overlap_in_vmem() - &&& self.pt_mappings_dont_overlap_in_pmem() - &&& self.pt_entry_sizes_are_valid() - &&& self.pt_entries_aligned() - &&& self.tlb_is_submap_of_pt() - } + #[verifier(opaque)] + pub open spec fn pt_entries_aligned(self) -> bool { + forall|base, pte| + self.interp_pt_mem().contains_pair(base, pte) ==> aligned(base, 8) && aligned( + pte.frame.base, + 8, + ) + } - pub open spec fn pt_variables(self) -> spec_pt::PageTableVariables { - spec_pt::PageTableVariables { - map: self.interp_pt_mem(), - } - } + pub open spec fn inv(self) -> bool { + &&& self.pt_mappings_dont_overlap_in_vmem() + &&& self.pt_mappings_dont_overlap_in_pmem() + &&& self.pt_entry_sizes_are_valid() + &&& self.pt_entries_aligned() + &&& self.tlb_is_submap_of_pt() + } - pub open spec fn interp_pt_mem(self) -> Map { - hardware::interp_pt_mem(self.hw.pt_mem) - } + pub open spec fn pt_variables(self) -> spec_pt::PageTableVariables { + spec_pt::PageTableVariables { map: self.interp_pt_mem() } + } - pub open spec fn effective_mappings(self) -> Map { - Map::new( - |base: nat| self.hw.tlb.dom().contains(base) || self.interp_pt_mem().dom().contains(base), - |base: nat| if self.hw.tlb.dom().contains(base) { self.hw.tlb.index(base) } else { self.interp_pt_mem().index(base) }, - ) - } + pub open spec fn interp_pt_mem(self) -> Map { + hardware::interp_pt_mem(self.hw.pt_mem) + } - pub open spec fn interp_vmem(self) -> Map { - let phys_mem_size = self.interp_constants().phys_mem_size; - let mappings: Map = self.effective_mappings(); - Map::new( - |vmem_idx: nat| hlspec::mem_domain_from_mappings_contains(phys_mem_size, vmem_idx, mappings), - |vmem_idx: nat| { - let vaddr = vmem_idx * WORD_SIZE as nat; - let (base, pte): (nat, PageTableEntry) = choose|base: nat, pte: PageTableEntry| #![auto] mappings.contains_pair(base, pte) && between(vaddr, base, base + pte.frame.size); - let paddr = (pte.frame.base + (vaddr - base)) as nat; - let pmem_idx = word_index_spec(paddr); - self.hw.mem[pmem_idx as int] - }) - } + pub open spec fn effective_mappings(self) -> Map { + Map::new( + |base: nat| + self.hw.tlb.dom().contains(base) || self.interp_pt_mem().dom().contains(base), + |base: nat| + if self.hw.tlb.dom().contains(base) { + self.hw.tlb.index(base) + } else { + self.interp_pt_mem().index(base) + }, + ) + } - pub open spec fn interp(self) -> hlspec::AbstractVariables { - let mappings: Map = self.effective_mappings(); - let mem: Map = self.interp_vmem(); - hlspec::AbstractVariables { - mem, - mappings, - } - } + pub open spec fn interp_vmem(self) -> Map { + let phys_mem_size = self.interp_constants().phys_mem_size; + let mappings: Map = self.effective_mappings(); + Map::new( + |vmem_idx: nat| + hlspec::mem_domain_from_mappings_contains(phys_mem_size, vmem_idx, mappings), + |vmem_idx: nat| + { + let vaddr = vmem_idx * WORD_SIZE as nat; + let (base, pte): (nat, PageTableEntry) = choose|base: nat, pte: PageTableEntry| + #![auto] + mappings.contains_pair(base, pte) && between( + vaddr, + base, + base + pte.frame.size, + ); + let paddr = (pte.frame.base + (vaddr - base)) as nat; + let pmem_idx = word_index_spec(paddr); + self.hw.mem[pmem_idx as int] + }, + ) + } - pub open spec fn interp_constants(self) -> hlspec::AbstractConstants { - hlspec::AbstractConstants { - phys_mem_size: self.hw.mem.len(), - } - } - } + pub open spec fn interp(self) -> hlspec::AbstractVariables { + let mappings: Map = self.effective_mappings(); + let mem: Map = self.interp_vmem(); + hlspec::AbstractVariables { mem, mappings } + } - pub open spec fn step_HW(s1: OSVariables, s2: OSVariables, system_step: hardware::HWStep) -> bool { - &&& !system_step.is_PTMemOp() - &&& hardware::next_step(s1.hw, s2.hw, system_step) - &&& spec_pt::step_Stutter(s1.pt_variables(), s2.pt_variables()) - } + pub open spec fn interp_constants(self) -> hlspec::AbstractConstants { + hlspec::AbstractConstants { phys_mem_size: self.hw.mem.len() } + } +} - pub open spec fn step_Map(s1: OSVariables, s2: OSVariables, base: nat, pte: PageTableEntry, result: MapResult) -> bool { - &&& hardware::step_PTMemOp(s1.hw, s2.hw) - &&& spec_pt::step_Map(s1.pt_variables(), s2.pt_variables(), base, pte, result) - } +pub open spec fn step_HW(s1: OSVariables, s2: OSVariables, system_step: hardware::HWStep) -> bool { + &&& !system_step.is_PTMemOp() + &&& hardware::next_step(s1.hw, s2.hw, system_step) + &&& spec_pt::step_Stutter(s1.pt_variables(), s2.pt_variables()) +} - pub open spec fn step_Unmap(s1: OSVariables, s2: OSVariables, base: nat, result: UnmapResult) -> bool { - // The hw step tells us that s2.tlb is a submap of s1.tlb, so all we need to specify is - // that s2.tlb doesn't contain this particular entry. - &&& !s2.hw.tlb.dom().contains(base) - &&& hardware::step_PTMemOp(s1.hw, s2.hw) - &&& spec_pt::step_Unmap(s1.pt_variables(), s2.pt_variables(), base, result) - } +pub open spec fn step_Map( + s1: OSVariables, + s2: OSVariables, + base: nat, + pte: PageTableEntry, + result: MapResult, +) -> bool { + &&& hardware::step_PTMemOp(s1.hw, s2.hw) + &&& spec_pt::step_Map(s1.pt_variables(), s2.pt_variables(), base, pte, result) +} - pub open spec fn step_Resolve(s1: OSVariables, s2: OSVariables, base: nat, result: ResolveResult) -> bool { - &&& hardware::step_PTMemOp(s1.hw, s2.hw) - &&& spec_pt::step_Resolve(s1.pt_variables(), s2.pt_variables(), base, result) - } +pub open spec fn step_Unmap( + s1: OSVariables, + s2: OSVariables, + base: nat, + result: UnmapResult, +) -> bool { + // The hw step tells us that s2.tlb is a submap of s1.tlb, so all we need to specify is + // that s2.tlb doesn't contain this particular entry. + &&& !s2.hw.tlb.dom().contains(base) + &&& hardware::step_PTMemOp(s1.hw, s2.hw) + &&& spec_pt::step_Unmap(s1.pt_variables(), s2.pt_variables(), base, result) +} +pub open spec fn step_Resolve( + s1: OSVariables, + s2: OSVariables, + base: nat, + result: ResolveResult, +) -> bool { + &&& hardware::step_PTMemOp(s1.hw, s2.hw) + &&& spec_pt::step_Resolve(s1.pt_variables(), s2.pt_variables(), base, result) +} - pub enum OSStep { - HW { step: hardware::HWStep }, - Map { vaddr: nat, pte: PageTableEntry, result: MapResult }, - Unmap { vaddr: nat, result: UnmapResult }, - Resolve { vaddr: nat, result: ResolveResult }, - } +pub enum OSStep { + HW { step: hardware::HWStep }, + Map { vaddr: nat, pte: PageTableEntry, result: MapResult }, + Unmap { vaddr: nat, result: UnmapResult }, + Resolve { vaddr: nat, result: ResolveResult }, +} - impl OSStep { - pub open spec fn interp(self) -> hlspec::AbstractStep { - match self { - OSStep::HW { step } => - match step { - hardware::HWStep::ReadWrite { vaddr, paddr, op, pte } => hlspec::AbstractStep::ReadWrite { vaddr, op, pte }, - hardware::HWStep::PTMemOp => arbitrary(), - hardware::HWStep::TLBFill { vaddr, pte } => hlspec::AbstractStep::Stutter, - hardware::HWStep::TLBEvict { vaddr } => hlspec::AbstractStep::Stutter, - }, - OSStep::Map { vaddr, pte, result } => hlspec::AbstractStep::Map { vaddr, pte, result }, - OSStep::Unmap { vaddr, result } => hlspec::AbstractStep::Unmap { vaddr, result }, - OSStep::Resolve { vaddr, result } => hlspec::AbstractStep::Resolve { vaddr, result }, - } - } +impl OSStep { + pub open spec fn interp(self) -> hlspec::AbstractStep { + match self { + OSStep::HW { step } => match step { + hardware::HWStep::ReadWrite { + vaddr, + paddr, + op, + pte, + } => hlspec::AbstractStep::ReadWrite { vaddr, op, pte }, + hardware::HWStep::PTMemOp => arbitrary(), + hardware::HWStep::TLBFill { vaddr, pte } => hlspec::AbstractStep::Stutter, + hardware::HWStep::TLBEvict { vaddr } => hlspec::AbstractStep::Stutter, + }, + OSStep::Map { vaddr, pte, result } => hlspec::AbstractStep::Map { vaddr, pte, result }, + OSStep::Unmap { vaddr, result } => hlspec::AbstractStep::Unmap { vaddr, result }, + OSStep::Resolve { vaddr, result } => hlspec::AbstractStep::Resolve { vaddr, result }, } + } +} - pub open spec fn next_step(s1: OSVariables, s2: OSVariables, step: OSStep) -> bool { - match step { - OSStep::HW { step } => step_HW(s1, s2, step), - OSStep::Map { vaddr, pte, result } => step_Map(s1, s2, vaddr, pte, result), - OSStep::Unmap { vaddr, result } => step_Unmap(s1, s2, vaddr, result), - OSStep::Resolve { vaddr, result } => step_Resolve(s1, s2, vaddr, result), - } - } +pub open spec fn next_step(s1: OSVariables, s2: OSVariables, step: OSStep) -> bool { + match step { + OSStep::HW { step } => step_HW(s1, s2, step), + OSStep::Map { vaddr, pte, result } => step_Map(s1, s2, vaddr, pte, result), + OSStep::Unmap { vaddr, result } => step_Unmap(s1, s2, vaddr, result), + OSStep::Resolve { vaddr, result } => step_Resolve(s1, s2, vaddr, result), + } +} - pub open spec fn next(s1: OSVariables, s2: OSVariables) -> bool { - exists|step: OSStep| next_step(s1, s2, step) - } +pub open spec fn next(s1: OSVariables, s2: OSVariables) -> bool { + exists|step: OSStep| next_step(s1, s2, step) +} - pub open spec fn init(s: OSVariables) -> bool { - hardware::init(s.hw) - } +pub open spec fn init(s: OSVariables) -> bool { + hardware::init(s.hw) +} - } +} // verus! } pub mod impl_spec { @@ -7790,61 +10982,82 @@ pub mod spec_t { verus! { - pub trait InterfaceSpec { - spec fn ispec_inv(&self, mem: &mem::PageTableMemory) -> bool; - - proof fn ispec_init_implies_inv(&self, mem: &mem::PageTableMemory) - requires - mem.inv(), - mem.regions() === set![mem.cr3_spec()@], - mem.region_view(mem.cr3_spec()@).len() == 512, - (forall|i: nat| i < 512 ==> mem.region_view(mem.cr3_spec()@)[i as int] == 0), - ensures - self.ispec_inv(mem); - - fn ispec_map_frame(&self, mem: &mut mem::PageTableMemory, vaddr: usize, pte: PageTableEntryExec) -> (res: MapResult) - requires - old(mem).alloc_available_pages() >= 3, - spec_pt::step_Map_enabled(interp_pt_mem(*old(mem)), vaddr as nat, pte@), - self.ispec_inv(&*old(mem)), - ensures - self.ispec_inv(mem), - spec_pt::step_Map(spec_pt::PageTableVariables { map: interp_pt_mem(*old(mem)) }, spec_pt::PageTableVariables { map: interp_pt_mem(*mem) }, vaddr as nat, pte@, res); - - fn ispec_unmap(&self, mem: &mut mem::PageTableMemory, vaddr: usize) -> (res: UnmapResult) - requires - spec_pt::step_Unmap_enabled(vaddr as nat), - self.ispec_inv(&*old(mem)), - ensures - self.ispec_inv(mem), - spec_pt::step_Unmap(spec_pt::PageTableVariables { map: interp_pt_mem(*old(mem)) }, spec_pt::PageTableVariables { map: interp_pt_mem(*mem) }, vaddr as nat, res); +pub trait InterfaceSpec { + spec fn ispec_inv(&self, mem: &mem::PageTableMemory) -> bool; - fn ispec_resolve(&self, mem: &mem::PageTableMemory, vaddr: usize) -> (res: ResolveResultExec) - requires - spec_pt::step_Resolve_enabled(vaddr as nat), - self.ispec_inv(mem), - ensures - spec_pt::step_Resolve( - spec_pt::PageTableVariables { map: interp_pt_mem(*mem) }, - spec_pt::PageTableVariables { map: interp_pt_mem(*mem) }, - vaddr as nat, - res@ - ); - } + proof fn ispec_init_implies_inv(&self, mem: &mem::PageTableMemory) + requires + mem.inv(), + mem.regions() === set![mem.cr3_spec()@], + mem.region_view(mem.cr3_spec()@).len() == 512, + (forall|i: nat| i < 512 ==> mem.region_view(mem.cr3_spec()@)[i as int] == 0), + ensures + self.ispec_inv(mem), + ; + + fn ispec_map_frame( + &self, + mem: &mut mem::PageTableMemory, + vaddr: usize, + pte: PageTableEntryExec, + ) -> (res: MapResult) + requires + old(mem).alloc_available_pages() >= 3, + spec_pt::step_Map_enabled(interp_pt_mem(*old(mem)), vaddr as nat, pte@), + self.ispec_inv(&*old(mem)), + ensures + self.ispec_inv(mem), + spec_pt::step_Map( + spec_pt::PageTableVariables { map: interp_pt_mem(*old(mem)) }, + spec_pt::PageTableVariables { map: interp_pt_mem(*mem) }, + vaddr as nat, + pte@, + res, + ), + ; + + fn ispec_unmap(&self, mem: &mut mem::PageTableMemory, vaddr: usize) -> (res: UnmapResult) + requires + spec_pt::step_Unmap_enabled(vaddr as nat), + self.ispec_inv(&*old(mem)), + ensures + self.ispec_inv(mem), + spec_pt::step_Unmap( + spec_pt::PageTableVariables { map: interp_pt_mem(*old(mem)) }, + spec_pt::PageTableVariables { map: interp_pt_mem(*mem) }, + vaddr as nat, + res, + ), + ; + + fn ispec_resolve(&self, mem: &mem::PageTableMemory, vaddr: usize) -> (res: ResolveResultExec) + requires + spec_pt::step_Resolve_enabled(vaddr as nat), + self.ispec_inv(mem), + ensures + spec_pt::step_Resolve( + spec_pt::PageTableVariables { map: interp_pt_mem(*mem) }, + spec_pt::PageTableVariables { map: interp_pt_mem(*mem) }, + vaddr as nat, + res@, + ), + ; +} - pub struct PageTableImpl {} +pub struct PageTableImpl {} - pub closed spec fn implements_interface_spec() -> bool { - true - } +pub closed spec fn implements_interface_spec() -> bool { + true +} - // ensure that there's an implementation of the InterfaceSpec trait - pub proof fn theorem() - ensures implements_interface_spec::(), - { - } +// ensure that there's an implementation of the InterfaceSpec trait +pub proof fn theorem() + ensures + implements_interface_spec::(), +{ +} - } +} // verus! } pub mod mem { @@ -7875,216 +11088,234 @@ pub mod spec_t { verus! { - pub fn word_index(addr: usize) -> (res: usize) - requires - aligned(addr as nat, 8), - ensures - res as nat === word_index_spec(addr as nat), - // Prove this equivalence to use the indexing lemmas - res as nat === crate::definitions_t::index_from_offset(addr as nat, WORD_SIZE as nat), - word_index_spec(addr as nat) === crate::definitions_t::index_from_offset(addr as nat, WORD_SIZE as nat), - { - addr / WORD_SIZE - } +pub fn word_index(addr: usize) -> (res: usize) + requires + aligned(addr as nat, 8), + ensures + res as nat === word_index_spec(addr as nat), + // Prove this equivalence to use the indexing lemmas + res as nat === crate::definitions_t::index_from_offset(addr as nat, WORD_SIZE as nat), + word_index_spec(addr as nat) === crate::definitions_t::index_from_offset( + addr as nat, + WORD_SIZE as nat, + ), +{ + addr / WORD_SIZE +} - pub open spec fn word_index_spec(addr: nat) -> nat - recommends aligned(addr, 8) - { - addr / (WORD_SIZE as nat) - } +pub open spec fn word_index_spec(addr: nat) -> nat + recommends + aligned(addr, 8), +{ + addr / (WORD_SIZE as nat) +} - pub struct TLB { - } +pub struct TLB {} - impl TLB { - pub spec fn view(self) -> Map; +impl TLB { + pub spec fn view(self) -> Map; - /// Invalidates any TLB entries containing `vbase`. - #[verifier(external_body)] - pub fn invalidate_entry(&mut self, vbase: usize) - ensures - forall|base, pte| self.view().contains_pair(base, pte) ==> old(self).view().contains_pair(base, pte), - !self.view().dom().contains(vbase as nat), - { - unimplemented!() - } - } + /// Invalidates any TLB entries containing `vbase`. + #[verifier(external_body)] + pub fn invalidate_entry(&mut self, vbase: usize) + ensures + forall|base, pte| + self.view().contains_pair(base, pte) ==> old(self).view().contains_pair(base, pte), + !self.view().dom().contains(vbase as nat), + { + unimplemented!() + } +} - // FIXME: We need to allow the dirty and accessed bits to change in the memory. - // Or maybe we just specify reads to return those bits as arbitrary? - #[verifier(external_body)] - pub struct PageTableMemory { - /// `phys_mem_ref` is the starting address of the physical memory linear mapping - phys_mem_ref: *mut u64, - cr3: u64, - } +// FIXME: We need to allow the dirty and accessed bits to change in the memory. +// Or maybe we just specify reads to return those bits as arbitrary? +#[verifier(external_body)] +pub struct PageTableMemory { + /// `phys_mem_ref` is the starting address of the physical memory linear mapping + phys_mem_ref: *mut u64, + cr3: u64, +} - impl PageTableMemory { - pub spec fn alloc_available_pages(self) -> nat; - pub spec fn regions(self) -> Set; - pub spec fn region_view(self, r: MemRegion) -> Seq; +impl PageTableMemory { + pub spec fn alloc_available_pages(self) -> nat; - pub open spec fn inv(self) -> bool { - &&& self.phys_mem_ref_as_usize_spec() <= 0x7FE0_0000_0000_0000 - &&& forall|s1: MemRegion, s2: MemRegion| self.regions().contains(s1) && self.regions().contains(s2) && s1 !== s2 ==> !overlap(s1, s2) - &&& aligned(self.cr3_spec().base as nat, PAGE_SIZE as nat) - &&& self.cr3_spec().size == PAGE_SIZE - } + pub spec fn regions(self) -> Set; - pub open spec fn init(self) -> bool { - &&& self.inv() - } + pub spec fn region_view(self, r: MemRegion) -> Seq; - /// `cr3` returns a MemRegion whose base is the address at which the layer 0 page directory is mapped - #[verifier(external_body)] - pub fn cr3(&self) -> (res: MemRegionExec) - ensures res === self.cr3_spec() - { - MemRegionExec { - base: self.cr3 as usize, - size: PAGE_SIZE, - } - } + pub open spec fn inv(self) -> bool { + &&& self.phys_mem_ref_as_usize_spec() <= 0x7FE0_0000_0000_0000 + &&& forall|s1: MemRegion, s2: MemRegion| + self.regions().contains(s1) && self.regions().contains(s2) && s1 !== s2 ==> !overlap( + s1, + s2, + ) + &&& aligned(self.cr3_spec().base as nat, PAGE_SIZE as nat) + &&& self.cr3_spec().size == PAGE_SIZE + } - pub open spec fn cr3_spec(&self) -> MemRegionExec; + pub open spec fn init(self) -> bool { + &&& self.inv() + } - // We assume that alloc_page never fails. In practice we can just keep a buffer of 3+ pages - // that are allocated before we use map_frame. - /// Allocates one page and returns its physical address - #[verifier(external_body)] - pub fn alloc_page(&mut self) -> (r: MemRegionExec) - requires - old(self).inv(), - 0 < old(self).alloc_available_pages(), - ensures - self.alloc_available_pages() == old(self).alloc_available_pages() - 1, - r@.size == PAGE_SIZE, - r@.base + PAGE_SIZE <= MAX_PHYADDR, - aligned(r@.base, PAGE_SIZE as nat), - !old(self).regions().contains(r@), - self.regions() === old(self).regions().insert(r@), - self.region_view(r@) === new_seq::(512nat, 0u64), - forall|r2: MemRegion| r2 !== r@ ==> #[trigger] self.region_view(r2) === old(self).region_view(r2), - self.cr3_spec() == old(self).cr3_spec(), - self.phys_mem_ref_as_usize_spec() == old(self).phys_mem_ref_as_usize_spec(), - self.inv(), - { - unimplemented!() - } + /// `cr3` returns a MemRegion whose base is the address at which the layer 0 page directory is mapped + #[verifier(external_body)] + pub fn cr3(&self) -> (res: MemRegionExec) + ensures + res === self.cr3_spec(), + { + MemRegionExec { base: self.cr3 as usize, size: PAGE_SIZE } + } - /// Deallocates a page - #[verifier(external_body)] - pub fn dealloc_page(&mut self, r: MemRegionExec) - requires - old(self).inv(), - old(self).regions().contains(r@), - ensures - self.regions() === old(self).regions().remove(r@), - forall|r2: MemRegion| r2 !== r@ ==> #[trigger] self.region_view(r2) === old(self).region_view(r2), - self.cr3_spec() == old(self).cr3_spec(), - self.phys_mem_ref_as_usize_spec() == old(self).phys_mem_ref_as_usize_spec(), - self.inv(), - { - unimplemented!() - } + pub open spec fn cr3_spec(&self) -> MemRegionExec; - #[verifier(external_body)] - /// Write value to physical address `pbase + idx * WORD_SIZE` - pub fn write(&mut self, pbase: usize, idx: usize, region: Ghost, value: u64) - requires - pbase == region@.base, - aligned(pbase as nat, WORD_SIZE as nat), - old(self).inv(), - old(self).regions().contains(region@), - idx < 512, - ensures - self.region_view(region@) === old(self).region_view(region@).update(idx as int, value), - forall|r: MemRegion| r !== region@ ==> self.region_view(r) === old(self).region_view(r), - self.regions() === old(self).regions(), - self.alloc_available_pages() == old(self).alloc_available_pages(), - self.cr3_spec() == old(self).cr3_spec(), - self.phys_mem_ref_as_usize_spec() == old(self).phys_mem_ref_as_usize_spec(), - { - let word_offset: isize = (word_index(pbase) + idx) as isize; - unsafe { self.phys_mem_ref.offset(word_offset).write(value); } - } + // We assume that alloc_page never fails. In practice we can just keep a buffer of 3+ pages + // that are allocated before we use map_frame. + /// Allocates one page and returns its physical address + #[verifier(external_body)] + pub fn alloc_page(&mut self) -> (r: MemRegionExec) + requires + old(self).inv(), + 0 < old(self).alloc_available_pages(), + ensures + self.alloc_available_pages() == old(self).alloc_available_pages() - 1, + r@.size == PAGE_SIZE, + r@.base + PAGE_SIZE <= MAX_PHYADDR, + aligned(r@.base, PAGE_SIZE as nat), + !old(self).regions().contains(r@), + self.regions() === old(self).regions().insert(r@), + self.region_view(r@) === new_seq::(512nat, 0u64), + forall|r2: MemRegion| + r2 !== r@ ==> #[trigger] self.region_view(r2) === old(self).region_view(r2), + self.cr3_spec() == old(self).cr3_spec(), + self.phys_mem_ref_as_usize_spec() == old(self).phys_mem_ref_as_usize_spec(), + self.inv(), + { + unimplemented!() + } - #[verifier(external_body)] - /// Read value at physical address `pbase + idx * WORD_SIZE` - pub fn read(&self, pbase: usize, idx: usize, region: Ghost) -> (res: u64) - requires - pbase == region@.base, - aligned(pbase as nat, WORD_SIZE as nat), - self.regions().contains(region@), - idx < 512, - ensures - res == self.spec_read(idx as nat, region@) - { - let word_offset: isize = (word_index(pbase) + idx) as isize; - unsafe { self.phys_mem_ref.offset(word_offset).read() } - } + /// Deallocates a page + #[verifier(external_body)] + pub fn dealloc_page(&mut self, r: MemRegionExec) + requires + old(self).inv(), + old(self).regions().contains(r@), + ensures + self.regions() === old(self).regions().remove(r@), + forall|r2: MemRegion| + r2 !== r@ ==> #[trigger] self.region_view(r2) === old(self).region_view(r2), + self.cr3_spec() == old(self).cr3_spec(), + self.phys_mem_ref_as_usize_spec() == old(self).phys_mem_ref_as_usize_spec(), + self.inv(), + { + unimplemented!() + } - pub open spec fn spec_read(self, idx: nat, region: MemRegion) -> (res: u64) { - self.region_view(region)[idx as int] - } + #[verifier(external_body)] + /// Write value to physical address `pbase + idx * WORD_SIZE` + pub fn write(&mut self, pbase: usize, idx: usize, region: Ghost, value: u64) + requires + pbase == region@.base, + aligned(pbase as nat, WORD_SIZE as nat), + old(self).inv(), + old(self).regions().contains(region@), + idx < 512, + ensures + self.region_view(region@) === old(self).region_view(region@).update(idx as int, value), + forall|r: MemRegion| r !== region@ ==> self.region_view(r) === old(self).region_view(r), + self.regions() === old(self).regions(), + self.alloc_available_pages() == old(self).alloc_available_pages(), + self.cr3_spec() == old(self).cr3_spec(), + self.phys_mem_ref_as_usize_spec() == old(self).phys_mem_ref_as_usize_spec(), + { + let word_offset: isize = (word_index(pbase) + idx) as isize; + unsafe { + self.phys_mem_ref.offset(word_offset).write(value); + } + } - /// This function manually does the address computation which `read` and `write` rely on not - /// overflowing. Since this function is not `external_body`, Verus checks that there's no - /// overflow. The preconditions are those of `read`, which are a subset of the `write` - /// preconditions. - /// (This is an exec function so it generates the normal overflow VCs.) - #[verus::line_count::ignore] - fn check_overflow(&self, pbase: usize, idx: usize, region: Ghost) - requires - pbase <= MAX_PHYADDR, - self.phys_mem_ref_as_usize_spec() <= 0x7FE0_0000_0000_0000, - pbase == region@.base, - aligned(pbase as nat, WORD_SIZE as nat), - self.regions().contains(region@), - idx < 512, - { - proof { crate::definitions_u::lemma_maxphyaddr_facts(); } - // https://dev-doc.rust-lang.org/beta/std/primitive.pointer.html#method.offset - // The raw pointer offset computation needs to fit in an isize. - // isize::MAX is 0x7FFF_FFFF_FFFF_FFFF - // - // `pbase` is a physical address, so we know it's <= MAX_PHYADDR (2^52-1). - // The no-overflow assertions below require phys_mem_ref <= 0x7FEFFFFFFFFFF009. - // In the invariant we require the (arbitrarily chosen) nicer number - // 0x7FE0_0000_0000_0000 as an upper bound for phys_mem_ref. - // (In practice the address has to be smaller anyway, because the address space - // isn't that large.) NrOS uses 0x4000_0000_0000. - assert(word_index_spec(pbase as nat) < 0x2_0000_0000_0000) by(nonlinear_arith) - requires - aligned(pbase as nat, WORD_SIZE as nat), - pbase <= MAX_PHYADDR, - MAX_PHYADDR <= 0xFFFFFFFFFFFFF; - let word_offset: isize = (word_index(pbase) + idx) as isize; - assert(word_offset < 0x2_0000_0000_01FF) by(nonlinear_arith) - requires - idx < 512, - word_offset == word_index_spec(pbase as nat) + idx, - word_index_spec(pbase as nat) < 0x2_0000_0000_0000; - let phys_mem_ref: isize = self.phys_mem_ref_as_usize() as isize; - - assert(word_offset * WORD_SIZE < 0x10_0000_0000_0FF8) by(nonlinear_arith) - requires word_offset < 0x2_0000_0000_01FF; - let byte_offset: isize = word_offset * (WORD_SIZE as isize); - let raw_ptr_offset = phys_mem_ref + word_offset * (WORD_SIZE as isize); - } + #[verifier(external_body)] + /// Read value at physical address `pbase + idx * WORD_SIZE` + pub fn read(&self, pbase: usize, idx: usize, region: Ghost) -> (res: u64) + requires + pbase == region@.base, + aligned(pbase as nat, WORD_SIZE as nat), + self.regions().contains(region@), + idx < 512, + ensures + res == self.spec_read(idx as nat, region@), + { + let word_offset: isize = (word_index(pbase) + idx) as isize; + unsafe { self.phys_mem_ref.offset(word_offset).read() } + } - #[verifier(external_body)] - pub spec fn phys_mem_ref_as_usize_spec(&self) -> usize; + pub open spec fn spec_read(self, idx: nat, region: MemRegion) -> (res: u64) { + self.region_view(region)[idx as int] + } - #[verifier(external_body)] - fn phys_mem_ref_as_usize(&self) -> (res: usize) - ensures res == self.phys_mem_ref_as_usize_spec() - { - unsafe { self.phys_mem_ref as usize } - } + /// This function manually does the address computation which `read` and `write` rely on not + /// overflowing. Since this function is not `external_body`, Verus checks that there's no + /// overflow. The preconditions are those of `read`, which are a subset of the `write` + /// preconditions. + /// (This is an exec function so it generates the normal overflow VCs.) + #[verus::line_count::ignore] + fn check_overflow(&self, pbase: usize, idx: usize, region: Ghost) + requires + pbase <= MAX_PHYADDR, + self.phys_mem_ref_as_usize_spec() <= 0x7FE0_0000_0000_0000, + pbase == region@.base, + aligned(pbase as nat, WORD_SIZE as nat), + self.regions().contains(region@), + idx < 512, + { + proof { + crate::definitions_u::lemma_maxphyaddr_facts(); } + // https://dev-doc.rust-lang.org/beta/std/primitive.pointer.html#method.offset + // The raw pointer offset computation needs to fit in an isize. + // isize::MAX is 0x7FFF_FFFF_FFFF_FFFF + // + // `pbase` is a physical address, so we know it's <= MAX_PHYADDR (2^52-1). + // The no-overflow assertions below require phys_mem_ref <= 0x7FEFFFFFFFFFF009. + // In the invariant we require the (arbitrarily chosen) nicer number + // 0x7FE0_0000_0000_0000 as an upper bound for phys_mem_ref. + // (In practice the address has to be smaller anyway, because the address space + // isn't that large.) NrOS uses 0x4000_0000_0000. + assert(word_index_spec(pbase as nat) < 0x2_0000_0000_0000) by (nonlinear_arith) + requires + aligned(pbase as nat, WORD_SIZE as nat), + pbase <= MAX_PHYADDR, + MAX_PHYADDR <= 0xFFFFFFFFFFFFF, + ; + let word_offset: isize = (word_index(pbase) + idx) as isize; + assert(word_offset < 0x2_0000_0000_01FF) by (nonlinear_arith) + requires + idx < 512, + word_offset == word_index_spec(pbase as nat) + idx, + word_index_spec(pbase as nat) < 0x2_0000_0000_0000, + ; + let phys_mem_ref: isize = self.phys_mem_ref_as_usize() as isize; + assert(word_offset * WORD_SIZE < 0x10_0000_0000_0FF8) by (nonlinear_arith) + requires + word_offset < 0x2_0000_0000_01FF, + ; + let byte_offset: isize = word_offset * (WORD_SIZE as isize); + let raw_ptr_offset = phys_mem_ref + word_offset * (WORD_SIZE as isize); + } - } + #[verifier(external_body)] + pub spec fn phys_mem_ref_as_usize_spec(&self) -> usize; + + #[verifier(external_body)] + fn phys_mem_ref_as_usize(&self) -> (res: usize) + ensures + res == self.phys_mem_ref_as_usize_spec(), + { + unsafe { self.phys_mem_ref as usize } + } +} + +} // verus! } } @@ -8098,212 +11329,264 @@ pub mod extra { verus! { - pub proof fn mod_of_mul_integer_ring(a: int, b: int) by (integer_ring) - ensures (a * b) % b == 0 - { } +pub proof fn mod_of_mul_integer_ring(a: int, b: int) + by (integer_ring) + ensures + (a * b) % b == 0, +{ +} - pub proof fn mod_of_mul(a: nat, b: nat) by (nonlinear_arith) - requires b > 0, - ensures aligned(a * b, b), - { - mod_of_mul_integer_ring(a as int, b as int); - assert((a * b) % b == 0); - } +pub proof fn mod_of_mul(a: nat, b: nat) + by (nonlinear_arith) + requires + b > 0, + ensures + aligned(a * b, b), +{ + mod_of_mul_integer_ring(a as int, b as int); + assert((a * b) % b == 0); +} - pub proof fn mod_of_mul_auto() - ensures forall|a: nat, b: nat| b > 0 ==> aligned(#[trigger] (a * b), b), - { - assert forall|a: nat, b: nat| b > 0 implies aligned(#[trigger] (a * b), b) by { - mod_of_mul(a, b); - } +pub proof fn mod_of_mul_auto() + ensures + forall|a: nat, b: nat| b > 0 ==> aligned(#[trigger] (a * b), b), +{ + assert forall|a: nat, b: nat| b > 0 implies aligned(#[trigger] (a * b), b) by { + mod_of_mul(a, b); } +} - pub proof fn mod_add_zero_integer_ring(a: int, b: int, c: int) by (integer_ring) - requires a % c == 0, b % c == 0 - ensures (a + b) % c == 0 - { } +pub proof fn mod_add_zero_integer_ring(a: int, b: int, c: int) + by (integer_ring) + requires + a % c == 0, + b % c == 0, + ensures + (a + b) % c == 0, +{ +} - pub proof fn mod_add_zero(a: nat, b: nat, c: nat) - requires aligned(a, c), aligned(b, c), c > 0 - ensures aligned(a + b, c) - { - mod_add_zero_integer_ring(a as int, b as int, c as int); - } +pub proof fn mod_add_zero(a: nat, b: nat, c: nat) + requires + aligned(a, c), + aligned(b, c), + c > 0, + ensures + aligned(a + b, c), +{ + mod_add_zero_integer_ring(a as int, b as int, c as int); +} - pub proof fn mod_mult_zero_implies_mod_zero_integer_ring(a: int, b: int, c: int) by (integer_ring) - requires a % (b * c) == 0 - ensures a % b == 0 - { } +pub proof fn mod_mult_zero_implies_mod_zero_integer_ring(a: int, b: int, c: int) + by (integer_ring) + requires + a % (b * c) == 0, + ensures + a % b == 0, +{ +} - pub proof fn mod_mult_zero_implies_mod_zero(a: nat, b: nat, c: nat) by (nonlinear_arith) - requires aligned(a, b * c), b > 0, c > 0 - ensures aligned(a, b) - { - mod_mult_zero_implies_mod_zero_integer_ring(a as int, b as int, c as int); - } +pub proof fn mod_mult_zero_implies_mod_zero(a: nat, b: nat, c: nat) + by (nonlinear_arith) + requires + aligned(a, b * c), + b > 0, + c > 0, + ensures + aligned(a, b), +{ + mod_mult_zero_implies_mod_zero_integer_ring(a as int, b as int, c as int); +} - pub proof fn subtract_mod_eq_zero_integer_ring(a: int, b: int, c: int) by (integer_ring) - requires a % c == 0, b % c == 0 - ensures (b - a) % c == 0 - { } +pub proof fn subtract_mod_eq_zero_integer_ring(a: int, b: int, c: int) + by (integer_ring) + requires + a % c == 0, + b % c == 0, + ensures + (b - a) % c == 0, +{ +} - pub proof fn subtract_mod_eq_zero(a: nat, b: nat, c: nat) - requires aligned(a, c), aligned(b, c), a <= b, c > 0 - ensures aligned((b - a) as nat, c) - { - subtract_mod_eq_zero_integer_ring(a as int, b as int, c as int) - } +pub proof fn subtract_mod_eq_zero(a: nat, b: nat, c: nat) + requires + aligned(a, c), + aligned(b, c), + a <= b, + c > 0, + ensures + aligned((b - a) as nat, c), +{ + subtract_mod_eq_zero_integer_ring(a as int, b as int, c as int) +} - pub proof fn leq_add_aligned_less(a: nat, b: nat, c: nat) by (nonlinear_arith) - requires 0 < b, a < c, aligned(a, b), aligned(c, b), - ensures a + b <= c, - { - assert(a == b * (a / b) + a % b); - assert(c == b * (c / b) + c % b); - } +pub proof fn leq_add_aligned_less(a: nat, b: nat, c: nat) + by (nonlinear_arith) + requires + 0 < b, + a < c, + aligned(a, b), + aligned(c, b), + ensures + a + b <= c, +{ + assert(a == b * (a / b) + a % b); + assert(c == b * (c / b) + c % b); +} - pub proof fn aligned_transitive_auto() - ensures forall|a: nat, b: nat, c: nat| 0 < b && 0 < c && aligned(a, b) && aligned(b, c) ==> aligned(a, c), - { - assert forall|a: nat, b: nat, c: nat| 0 < b && 0 < c && aligned(a, b) && aligned(b, c) implies aligned(a, c) by { - aligned_transitive(a, b, c); - } +pub proof fn aligned_transitive_auto() + ensures + forall|a: nat, b: nat, c: nat| + 0 < b && 0 < c && aligned(a, b) && aligned(b, c) ==> aligned(a, c), +{ + assert forall|a: nat, b: nat, c: nat| + 0 < b && 0 < c && aligned(a, b) && aligned(b, c) implies aligned(a, c) by { + aligned_transitive(a, b, c); } +} - pub proof fn lemma_aligned_iff_eq_mul_div(a: nat, b: nat) - requires b > 0 - ensures aligned(a, b) <==> a == b * (a / b) - { - assert(a % b == 0 ==> a == b * (a / b)) by (nonlinear_arith) - requires b > 0; - assert(a == b * (a / b) ==> a % b == 0) by (nonlinear_arith) - requires b > 0; - } +pub proof fn lemma_aligned_iff_eq_mul_div(a: nat, b: nat) + requires + b > 0, + ensures + aligned(a, b) <==> a == b * (a / b), +{ + assert(a % b == 0 ==> a == b * (a / b)) by (nonlinear_arith) + requires + b > 0, + ; + assert(a == b * (a / b) ==> a % b == 0) by (nonlinear_arith) + requires + b > 0, + ; +} - pub proof fn aligned_transitive(a: nat, b: nat, c: nat) +pub proof fn aligned_transitive(a: nat, b: nat, c: nat) + requires + 0 < b, + 0 < c, + aligned(a, b), + aligned(b, c), + ensures + aligned(a, c), +{ + lemma_aligned_iff_eq_mul_div(a, b); + lemma_aligned_iff_eq_mul_div(b, c); + lemma_aligned_iff_eq_mul_div(a, c); + let i = a / b; + let j = b / c; + assert((c * j) * i == c * (j * i)) by (nonlinear_arith); + assert(a / c == j * i) by (nonlinear_arith) requires - 0 < b, 0 < c, - aligned(a, b), - aligned(b, c), - ensures aligned(a, c) - { - lemma_aligned_iff_eq_mul_div(a, b); - lemma_aligned_iff_eq_mul_div(b, c); - lemma_aligned_iff_eq_mul_div(a, c); - let i = a / b; let j = b / c; - assert((c * j) * i == c * (j * i)) by (nonlinear_arith); - assert(a / c == j * i) by (nonlinear_arith) - requires 0 < c, a == c * (j * i); - } - - #[verifier(nonlinear)] - pub proof fn mod_less_eq(a: nat, b: nat) { - requires(b != 0); - ensures(a % b <= a); - } - - #[verifier(nonlinear)] - pub proof fn aligned_zero() - ensures - forall|a:nat| a != 0 ==> aligned(0, a) - { } - - #[verifier(nonlinear)] - pub proof fn mul_distributive(a: nat, b: nat) { - ensures((a + 1) * b == a * b + b); - } - - #[verifier(nonlinear)] - pub proof fn mul_commute(a: nat, b: nat) { - ensures(a * b == b * a); - } + a == c * (j * i), + ; +} - #[verifier(nonlinear)] - pub proof fn div_mul_cancel(a: nat, b: nat) { - requires([ - aligned(a, b), - b != 0 - ]); - ensures(a / b * b == a); - } +#[verifier(nonlinear)] +pub proof fn mod_less_eq(a: nat, b: nat) { + requires(b != 0); + ensures(a % b <= a); +} - #[verifier(nonlinear)] - pub proof fn less_mul_cancel(a: nat, b: nat, c: nat) { - requires(a * c < b * c); - ensures(a < b); - } +#[verifier(nonlinear)] +pub proof fn aligned_zero() + ensures + forall|a: nat| a != 0 ==> aligned(0, a), +{ +} - #[verifier(nonlinear)] - pub proof fn mult_leq_mono1(a: nat, b: nat, c: nat) { - requires(a <= b); - ensures(a * c <= b * c); - } +#[verifier(nonlinear)] +pub proof fn mul_distributive(a: nat, b: nat) { + ensures((a + 1) * b == a * b + b); +} - #[verifier(nonlinear)] - pub proof fn mult_leq_mono2(a: nat, b: nat, c: nat) { - requires(a <= b); - ensures(c * a <= c * a); - } +#[verifier(nonlinear)] +pub proof fn mul_commute(a: nat, b: nat) { + ensures(a * b == b * a); +} - #[verifier(nonlinear)] - pub proof fn mult_leq_mono_both(a: nat, b: nat, c: nat, d: nat) - requires - a <= c, - b <= d, - ensures - // Including `0 <=` here because it's used in a place where this is part of an overflow VC - // and non-nonlinear z3 can't even deal with that. - 0 <= a * b <= c * d; +#[verifier(nonlinear)] +pub proof fn div_mul_cancel(a: nat, b: nat) { + requires([aligned(a, b), b != 0]); + ensures(a / b * b == a); +} - #[verifier(nonlinear)] - pub proof fn mult_less_mono_both1(a: nat, b: nat, c: nat, d: nat) - requires - a < c, - b <= d, - 0 < c, - 0 < d, - ensures - a * b < c * d; +#[verifier(nonlinear)] +pub proof fn less_mul_cancel(a: nat, b: nat, c: nat) { + requires(a * c < b * c); + ensures(a < b); +} - #[verifier(nonlinear)] - pub proof fn mult_less_mono_both2(a: nat, b: nat, c: nat, d: nat) - requires - a <= c, - b < d, - 0 < c, - 0 < d, - ensures - a * b < c * d; +#[verifier(nonlinear)] +pub proof fn mult_leq_mono1(a: nat, b: nat, c: nat) { + requires(a <= b); + ensures(a * c <= b * c); +} +#[verifier(nonlinear)] +pub proof fn mult_leq_mono2(a: nat, b: nat, c: nat) { + requires(a <= b); + ensures(c * a <= c * a); +} - pub proof fn assert_maps_equal_contains_pair(m1: Map, m2: Map) - requires - forall|k:K,v:V| m1.contains_pair(k, v) ==> m2.contains_pair(k, v), - forall|k:K,v:V| m2.contains_pair(k, v) ==> m1.contains_pair(k, v), - ensures - m1 === m2 - { - assert forall|k| - m1.dom().contains(k) - implies m2.dom().contains(k) by - { assert(m2.contains_pair(k, m1.index(k))); }; - assert forall|k| - m2.dom().contains(k) - implies m1.dom().contains(k) by - { assert(m1.contains_pair(k, m2.index(k))); }; - assert forall|k| - m1.dom().contains(k) && m2.dom().contains(k) - implies #[trigger] m2.index(k) === #[trigger] m1.index(k) by - { - let v = m1.index(k); - assert(m1.contains_pair(k, v)); - assert(m2.contains_pair(k, v)); - }; - assert_maps_equal!(m1, m2); - } +#[verifier(nonlinear)] +pub proof fn mult_leq_mono_both(a: nat, b: nat, c: nat, d: nat) + requires + a <= c, + b <= d, + ensures +// Including `0 <=` here because it's used in a place where this is part of an overflow VC +// and non-nonlinear z3 can't even deal with that. + + 0 <= a * b <= c * d, +; + +#[verifier(nonlinear)] +pub proof fn mult_less_mono_both1(a: nat, b: nat, c: nat, d: nat) + requires + a < c, + b <= d, + 0 < c, + 0 < d, + ensures + a * b < c * d, +; + +#[verifier(nonlinear)] +pub proof fn mult_less_mono_both2(a: nat, b: nat, c: nat, d: nat) + requires + a <= c, + b < d, + 0 < c, + 0 < d, + ensures + a * b < c * d, +; + +pub proof fn assert_maps_equal_contains_pair(m1: Map, m2: Map) + requires + forall|k: K, v: V| m1.contains_pair(k, v) ==> m2.contains_pair(k, v), + forall|k: K, v: V| m2.contains_pair(k, v) ==> m1.contains_pair(k, v), + ensures + m1 === m2, +{ + assert forall|k| m1.dom().contains(k) implies m2.dom().contains(k) by { + assert(m2.contains_pair(k, m1.index(k))); + }; + assert forall|k| m2.dom().contains(k) implies m1.dom().contains(k) by { + assert(m1.contains_pair(k, m2.index(k))); + }; + assert forall|k| m1.dom().contains(k) && m2.dom().contains(k) implies #[trigger] m2.index(k) + === #[trigger] m1.index(k) by { + let v = m1.index(k); + assert(m1.contains_pair(k, v)); + assert(m2.contains_pair(k, v)); + }; + assert_maps_equal!(m1, m2); +} - } +} // verus! } use vstd::prelude::verus; @@ -8312,6 +11595,5 @@ verus! { global size_of usize == 8; -} - +} // verus! fn main() {} diff --git a/tests/snapshot-examples.rs b/tests/snapshot-examples.rs index 2db21c1..6c7c76b 100644 --- a/tests/snapshot-examples.rs +++ b/tests/snapshot-examples.rs @@ -47,7 +47,6 @@ fn owl_output_rs_unchanged() { } #[test] -#[ignore] // Due to https://github.com/verus-lang/verusfmt/issues/33 fn pagetable_rs_unchanged() { check_snapshot(include_str!("../examples/pagetable.rs")); } From 0ff13c1e0354f223102b18706253a0dbb13a00af Mon Sep 17 00:00:00 2001 From: Jay Bosamiya Date: Mon, 26 Feb 2024 17:22:40 -0500 Subject: [PATCH 09/10] Ensure testing with verusfmt binary default --- tests/snapshot-examples.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/snapshot-examples.rs b/tests/snapshot-examples.rs index 6c7c76b..0503059 100644 --- a/tests/snapshot-examples.rs +++ b/tests/snapshot-examples.rs @@ -5,7 +5,7 @@ //! modified by any change. fn check_snapshot(original: &str) { - let formatted = verusfmt::parse_and_format(&original).unwrap(); + let formatted = verusfmt::rustfmt(&verusfmt::parse_and_format(&original).unwrap()).unwrap(); if original != formatted { let diff = similar::udiff::unified_diff( similar::Algorithm::Patience, From e1b754cfa0a59d2349fb5cb009b8d3d84c26c3d6 Mon Sep 17 00:00:00 2001 From: Jay Bosamiya Date: Mon, 26 Feb 2024 17:30:41 -0500 Subject: [PATCH 10/10] Update CHANGELOG --- CHANGELOG.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c895291..d117088 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Unreleased +* Fix `FnSpec` parsing + - Despite Verus having deprecated `FnSpec` with the introduction of `spec_fn`, verusfmt still supports it for projects on older Verus +* Fix idempotency issue of macro-items inside `verus!` inside in-file `mod`ules + # v0.2.4 * Move verusfmt to the verus-lang organization: https://github.com/verus-lang/verusfmt @@ -18,7 +22,7 @@ * Update handling for `fn_trait_types` * Add support for `const` params * Add support for `opens_invariants` -* Improve handling of comples self-params (e.g., `tracked '&a self`) +* Improve handling of complex self-params (e.g., `tracked '&a self`) * Introduce `#[verusfmt::skip]` (#31) * Add support for new `->` and `matches` expressions (#32)