diff --git a/Cargo.lock b/Cargo.lock index b3d88e4..823c582 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -53,6 +53,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "bincode" version = "1.3.3" @@ -88,9 +94,9 @@ checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cc" -version = "1.1.15" +version = "1.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6" +checksum = "a93fe60e2fc87b6ba2c117f67ae14f66e3fc7d6a1e612a25adb238cc980eadb3" dependencies = [ "shlex", ] @@ -143,6 +149,63 @@ dependencies = [ "memchr", ] +[[package]] +name = "darling" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", + "serde", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "getrandom" version = "0.2.15" @@ -163,6 +226,25 @@ checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "graphs" version = "0.1.0" +dependencies = [ + "chrono", + "csv", + "rand", + "rpq", + "tokio", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hermit-abi" @@ -170,6 +252,12 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + [[package]] name = "iana-time-zone" version = "0.1.60" @@ -193,6 +281,34 @@ dependencies = [ "cc", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +dependencies = [ + "equivalent", + "hashbrown 0.14.5", + "serde", +] + [[package]] name = "itoa" version = "1.0.11" @@ -257,6 +373,12 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-traits" version = "0.2.19" @@ -310,6 +432,12 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.20" @@ -387,7 +515,7 @@ dependencies = [ [[package]] name = "rpq" -version = "0.1.3" +version = "0.2.0" dependencies = [ "bincode", "chrono", @@ -395,6 +523,7 @@ dependencies = [ "rand", "redb", "serde", + "serde_with", "tokio", "uuid", ] @@ -419,24 +548,66 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", "syn", ] +[[package]] +name = "serde_json" +version = "1.0.128" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "3.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" +dependencies = [ + "base64", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.5.0", + "serde", + "serde_derive", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "shlex" version = "1.3.0" @@ -468,6 +639,12 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + [[package]] name = "syn" version = "2.0.77" @@ -479,6 +656,37 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "time" +version = "0.3.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +dependencies = [ + "num-conv", + "time-core", +] + [[package]] name = "tokio" version = "1.40.0" diff --git a/Cargo.toml b/Cargo.toml index fea41e4..06909bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rpq" -version = "0.1.3" +version = "0.2.0" edition = "2021" authors = ["Justin Timperio"] description = "A high performance embeddable double priority queue with complex priority ordering guarantees" @@ -25,6 +25,7 @@ csv = "1.3.0" rand = "0.8.4" redb = "2.1.1" serde = { version = "1.0.208", features = ["derive"] } +serde_with = { version = "3.9.0", features = ["chrono"] } tokio = { version = "1.10.0", features = ["full"] } uuid = { version = "1.10.0", features = ["v4"] } @@ -33,3 +34,4 @@ inherits = "release" [workspace] members = ["graphs"] +name = "rpq-graphs" diff --git a/README.md b/README.md index afa393b..ebd1eb5 100644 --- a/README.md +++ b/README.md @@ -42,10 +42,10 @@ RPQ is a concurrency safe, embeddable priority queue that can be used in a varie ## Benchmarks Due to the fact that most operations are done in constant time O(1) or logarithmic time O(log n), with the exception of the prioritize function which happens in linear time O(n), all RPQ operations are extremely fast. A single RPQ can handle a few million transactions a second and can be tuned depending on your work load. I have included some basic benchmarks using C++, Rust, Zig, and Go to measure RPQ's performance against the standard implementations of other languages that can be found here at: [pq-bench](https://github.com/JustinTimperio/pq-bench). -| | | -|-------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------| -| ![Time-Spent](https://github.com/JustinTimperio/pq-bench/blob/master/docs/Time-Spent-vs-Implementation.png) | ![Queue-Speed-WITHOUT-Reprioritize](./docs/Queue-Speed-Without-Prioritize.png) | -| ![TODO: Queue-Speed-WITH-Reprioritize](./docs/Queue-Speed-With-Prioritize.png) | ![Time-to-Send-and-Recive-VS-Bucket-Count](./docs/Time-to-Send-and-Receive-VS-Bucket-Count.png) | +| | | +|-------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------| +| ![Time-Spent](https://github.com/JustinTimperio/pq-bench/blob/master/docs/Time-Spent-vs-Implementation.png) | ![Queue-Speed-WITHOUT-Reprioritize](./docs/Queue-Speed-Without-Prioritize.png) | +| ![TODO: Queue-Speed-WITH-Reprioritize](./docs/Queue-Speed-With-Prioritize.png) | ![Time-to-Send-and-Receive-VS-Bucket-Count](./docs/Time-to-Send-and-Receive-VS-Bucket-Count.png) | ## Usage RPQ is a embeddable priority queue that is meant to be used at the core of critical workloads where complex ordering are required in combination with large volumes of data. The best way to us RPQ is to import the Crate and use the API to interact with the queue. @@ -54,13 +54,15 @@ TODO: Publish to crates.io ```toml [dependencies] -rpq = "0.1.3" +rpq = "0.2.0" ``` ### API Reference - `RPQ::new(options: RPQOptions) -> Result<(RPQ, usize), Error>` - Creates a new RPQ with the given options and returns the number of restored items. - `enqueue(mut item: Item) -> Result<(), Error>` - Enqueues a new item into the RPQ. + - `enqueue_batch(mut items: Vec) -> Result<(), Error>` - Enqueues a batch of items into the RPQ. - `dequeue() -> Result` - Dequeues the next item from the RPQ. + - `dequeue_batch(count: usize) -> Result, Error>` - Dequeues a batch of items from the RPQ. - `prioritize() -> Result<(usize, usize), Error>` - Prioritizes the items in the RPQ and returns the number of timed out and reprioritized items. - `len() -> usize` - Returns the number of items in the RPQ. - `active_buckets() -> usize` - Returns the number of active buckets in the RPQ. @@ -71,23 +73,20 @@ rpq = "0.1.3" #### Example Usage ```rust -use std::sync::Arc; -use std::time::{Duration, Instant}; -use rpq::pq::Item; -use rpq::{RPQOptions, RPQ}; +use chrono::Duration; +use rpq::{schema::RPQOptions, schema::Item, RPQ}; #[tokio::main(flavor = "multi_thread")] async fn main() { - let message_count = 10_000_000; + let message_count = 1_000_000; let options = RPQOptions { - bucket_count: 10, - disk_cache_enabled: false, - database_path: "/tmp/rpq.redb".to_string(), - lazy_disk_cache: false, - lazy_disk_max_delay: Duration::from_secs(5), - lazy_disk_cache_batch_size: 5000, - buffer_size: 1_000_000, + max_priority: 10, + disk_cache_enabled: true, + database_path: "/tmp/rpq-prioritize.redb".to_string(), + lazy_disk_cache: true, + lazy_disk_write_delay: Duration::seconds(5), + lazy_disk_cache_batch_size: 10_000, }; let r = RPQ::new(options).await; @@ -99,10 +98,8 @@ async fn main() { } } - let (rpq, _) = r.unwrap(); + let (rpq, _restored_items) = r.unwrap(); - let timer = Instant::now(); - let send_timer = Instant::now(); for i in 0..message_count { let item = Item::new( i % 10, @@ -110,7 +107,7 @@ async fn main() { false, None, false, - Some(Duration::from_secs(5)), + None, ); let result = rpq.enqueue(item).await; if result.is_err() { @@ -119,9 +116,6 @@ async fn main() { } } - let send_elapsed = send_timer.elapsed().as_secs_f64(); - - let receive_timer = Instant::now(); for _i in 0..message_count { let result = rpq.dequeue().await; if result.is_err() { @@ -130,17 +124,7 @@ async fn main() { } } - let receive_elapsed = receive_timer.elapsed().as_secs_f64(); - - println!( - "Time to insert {} messages: {}s", - message_count, send_elapsed - ); - println!( - "Time to receive {} messages: {}s", - message_count, receive_elapsed - ); - println!("Total Time: {}s", timer.elapsed().as_secs_f64()); + rpq.close().await; } ``` diff --git a/docs/Queue-Speed-With-Prioritize.png b/docs/Queue-Speed-With-Prioritize.png index 97e7f6b..1afde8b 100644 Binary files a/docs/Queue-Speed-With-Prioritize.png and b/docs/Queue-Speed-With-Prioritize.png differ diff --git a/docs/Queue-Speed-Without-Prioritize.png b/docs/Queue-Speed-Without-Prioritize.png index fc07ab8..fb5a862 100644 Binary files a/docs/Queue-Speed-Without-Prioritize.png and b/docs/Queue-Speed-Without-Prioritize.png differ diff --git a/docs/Time-to-Send-and-Receive-VS-Bucket-Count.png b/docs/Time-to-Send-and-Receive-VS-Bucket-Count.png index b1607ec..c6a5981 100644 Binary files a/docs/Time-to-Send-and-Receive-VS-Bucket-Count.png and b/docs/Time-to-Send-and-Receive-VS-Bucket-Count.png differ diff --git a/docs/bench-report-no-repro.csv b/docs/bench-report-no-repro.csv index e20f48b..20fc8dc 100644 --- a/docs/bench-report-no-repro.csv +++ b/docs/bench-report-no-repro.csv @@ -1,201 +1,201 @@ Total Items,Buckets,Removed,Escalated,Time Elapsed,Time to Send,Time to Receive -1000000,5,0,0,0.133308658,0.074854881,0.058453237 -1000000,10,0,0,0.13310412,0.075824955,0.057278365 -1000000,15,0,0,0.139436113,0.08198638,0.057449043 -1000000,20,0,0,0.136789019,0.079211417,0.057576802 -1000000,25,0,0,0.142562566,0.084971813,0.057590033 -1000000,30,0,0,0.150181781,0.092719167,0.057461844 -1000000,35,0,0,0.156051097,0.098667043,0.057383334 -1000000,40,0,0,0.171833585,0.113966073,0.057866772 -1000000,45,0,0,0.172011005,0.114370181,0.057640164 -1000000,50,0,0,0.173376698,0.115681535,0.057694493 -1000000,55,0,0,0.174668441,0.117214646,0.057453165 -1000000,60,0,0,0.175628854,0.117919741,0.057708533 -1000000,65,0,0,0.165365025,0.107641322,0.057723053 -1000000,70,0,0,0.169935908,0.112338235,0.057597083 -1000000,75,0,0,0.170071431,0.112232507,0.057838204 -1000000,80,0,0,0.176208214,0.118364521,0.057842963 -1000000,85,0,0,0.175014342,0.11698308,0.058030552 -1000000,90,0,0,0.172340007,0.114386364,0.057952953 -1000000,95,0,0,0.177411938,0.119679984,0.057731354 -1000000,100,0,0,0.173190702,0.115671757,0.057518355 -2000000,5,0,0,0.287553231,0.173190215,0.114362386 -2000000,10,0,0,0.281078628,0.165125511,0.115952437 -2000000,15,0,0,0.330861119,0.216055526,0.114804973 -2000000,20,0,0,0.273904833,0.158875219,0.115028954 -2000000,25,0,0,0.314602009,0.189316314,0.125285095 -2000000,30,0,0,0.327853832,0.212677179,0.115175953 -2000000,35,0,0,0.336871653,0.221160031,0.115710982 -2000000,40,0,0,0.362136189,0.247066593,0.115068916 -2000000,45,0,0,0.361528583,0.245880431,0.115647592 -2000000,50,0,0,0.365300955,0.249696861,0.115603354 -2000000,55,0,0,0.371892289,0.256154235,0.115737264 -2000000,60,0,0,0.377380138,0.261274665,0.116104853 -2000000,65,0,0,0.349289864,0.233491539,0.115797755 -2000000,70,0,0,0.361265735,0.244977133,0.116287872 -2000000,75,0,0,0.358302103,0.242216208,0.116085335 -2000000,80,0,0,0.354098462,0.238777322,0.11532048 -2000000,85,0,0,0.357239634,0.242178673,0.115060321 -2000000,90,0,0,0.361058505,0.24471655,0.116341395 -2000000,95,0,0,0.357499268,0.242413165,0.115085533 -2000000,100,0,0,0.353538541,0.237898151,0.11563973 -3000000,5,0,0,0.588874274,0.417870029,0.171003555 -3000000,10,0,0,0.535163023,0.362848573,0.17231377 -3000000,15,0,0,0.459196592,0.287711586,0.171484386 -3000000,20,0,0,0.515607183,0.343355329,0.172251214 -3000000,25,0,0,0.427230939,0.253967422,0.173262937 -3000000,30,0,0,0.475802685,0.303503031,0.172299064 -3000000,35,0,0,0.562853009,0.389596358,0.173256041 -3000000,40,0,0,0.621447582,0.446054271,0.175392541 -3000000,45,0,0,0.656118213,0.479451018,0.176666455 -3000000,50,0,0,0.545518873,0.3716625,0.173855703 -3000000,55,0,0,0.54645116,0.369813522,0.176636958 -3000000,60,0,0,0.557223897,0.383878398,0.173344759 -3000000,65,0,0,0.565184175,0.390710881,0.174472424 -3000000,70,0,0,0.608720887,0.4346921,0.174028137 -3000000,75,0,0,0.600017902,0.42632145,0.173695782 -3000000,80,0,0,0.627075035,0.443287499,0.183786816 -3000000,85,0,0,0.636334662,0.45695962,0.179374282 -3000000,90,0,0,0.638066978,0.459098641,0.178967607 -3000000,95,0,0,0.547406766,0.372362266,0.17504372 -3000000,100,0,0,0.549292791,0.375396003,0.173896008 -4000000,5,0,0,0.776770289,0.545738083,0.231031466 -4000000,10,0,0,0.708482575,0.476639102,0.231842663 -4000000,15,0,0,0.801711641,0.567675728,0.234035193 -4000000,20,0,0,0.694311222,0.462470832,0.23183963 -4000000,25,0,0,0.741830938,0.509914297,0.231915921 -4000000,30,0,0,0.819332903,0.587921286,0.231410877 -4000000,35,0,0,0.753385303,0.504907557,0.248476876 -4000000,40,0,0,0.799525831,0.567257214,0.232267917 -4000000,45,0,0,0.830732254,0.598460813,0.232270731 -4000000,50,0,0,0.862168984,0.627314334,0.2348539 -4000000,55,0,0,0.902179089,0.667946943,0.234231266 -4000000,60,0,0,0.913464832,0.679733679,0.233730313 -4000000,65,0,0,0.7743713,0.542247755,0.232122775 -4000000,70,0,0,0.766458601,0.533356019,0.233101902 -4000000,75,0,0,0.791708534,0.559083878,0.232623996 -4000000,80,0,0,0.802502649,0.571567841,0.230934278 -4000000,85,0,0,0.810647094,0.579260305,0.231386119 -4000000,90,0,0,0.822981329,0.591938185,0.231042364 -4000000,95,0,0,0.844478955,0.612418513,0.232059872 -4000000,100,0,0,0.844933873,0.614224461,0.230708662 -5000000,5,0,0,0.963019492,0.677043557,0.285975215 -5000000,10,0,0,0.863741288,0.577313082,0.286427476 -5000000,15,0,0,0.948979453,0.660120316,0.288858417 -5000000,20,0,0,0.854053583,0.566084327,0.287968446 -5000000,25,0,0,0.911723333,0.622463741,0.289258822 -5000000,30,0,0,1.006352145,0.71664296,0.289708465 -5000000,35,0,0,1.10110018,0.81141124,0.28968808 -5000000,40,0,0,1.0171051229999999,0.710993693,0.30611072 -5000000,45,0,0,1.022687349,0.734094364,0.288592125 -5000000,50,0,0,1.056614511,0.765006558,0.291607283 -5000000,55,0,0,1.109477737,0.816761435,0.292715272 -5000000,60,0,0,1.108324821,0.818972447,0.289351684 -5000000,65,0,0,1.132979035,0.843191198,0.289787357 -5000000,70,0,0,1.148490705,0.859539958,0.288950137 -5000000,75,0,0,1.166898722,0.877580733,0.289317439 -5000000,80,0,0,0.988138701,0.699823743,0.288314308 -5000000,85,0,0,1.004086785,0.715284745,0.28880155 -5000000,90,0,0,1.016904566,0.727466415,0.289437521 -5000000,95,0,0,1.02116854,0.7325535,0.28861452 -5000000,100,0,0,1.026939047,0.736553032,0.290385445 -6000000,5,0,0,1.150205161,0.804841564,0.345362847 -6000000,10,0,0,1.22335467,0.877619627,0.345734453 -6000000,15,0,0,1.121252755,0.776793501,0.344458424 -6000000,20,0,0,1.254137613,0.908473749,0.345662854 -6000000,25,0,0,1.072295564,0.724811883,0.347483001 -6000000,30,0,0,1.155200638,0.808659894,0.346540094 -6000000,35,0,0,1.256393426,0.908990842,0.347401924 -6000000,40,0,0,1.38248134,1.035283638,0.347197122 -6000000,45,0,0,1.4372678749999999,1.087994326,0.349272869 -6000000,50,0,0,1.235671851,0.888146747,0.347524404 -6000000,55,0,0,1.269010354,0.921285243,0.347724431 -6000000,60,0,0,1.2890554220000001,0.940125201,0.348929451 -6000000,65,0,0,1.316043518,0.968613324,0.347429434 -6000000,70,0,0,1.34206347,0.993040376,0.349022504 -6000000,75,0,0,1.367627534,1.018926794,0.3486998 -6000000,80,0,0,1.3723429409999999,1.025560233,0.346782118 -6000000,85,0,0,1.3962596440000001,1.04971603,0.346542974 -6000000,90,0,0,1.419178439,1.071386421,0.347791498 -6000000,95,0,0,1.204016578,0.855489669,0.348526359 -6000000,100,0,0,1.232556897,0.877838161,0.354718016 -7000000,5,0,0,1.354937799,0.95315794,0.401779109 -7000000,10,0,0,1.42338905,1.01624733,0.4071411 -7000000,15,0,0,1.296805807,0.894871053,0.401934044 -7000000,20,0,0,1.405539186,1.002316413,0.403222083 -7000000,25,0,0,1.5144379159999999,1.106333675,0.408103591 -7000000,30,0,0,1.3474914980000001,0.941457559,0.406033249 -7000000,35,0,0,1.458732465,1.050039041,0.408692654 -7000000,40,0,0,1.5413461499999999,1.136110269,0.405235191 -7000000,45,0,0,1.611412821,1.202144062,0.409268049 -7000000,50,0,0,1.673892403,1.268492267,0.405399536 -7000000,55,0,0,1.455989987,1.051082739,0.404906578 -7000000,60,0,0,1.481833531,1.0755503929999999,0.406282438 -7000000,65,0,0,1.513225526,1.10653938,0.406685396 -7000000,70,0,0,1.522915105,1.11623607,0.406678405 -7000000,75,0,0,1.560444444,1.152851986,0.407591858 -7000000,80,0,0,1.579448727,1.174641227,0.40480684 -7000000,85,0,0,1.617784346,1.211896432,0.405887284 -7000000,90,0,0,1.63662398,1.2302709410000001,0.406352539 -7000000,95,0,0,1.65964924,1.2559602,0.40368843 -7000000,100,0,0,1.70964278,1.301749457,0.407892743 -8000000,5,0,0,1.524643158,1.06531746,0.459324908 -8000000,10,0,0,1.598506452,1.137525534,0.460980268 -8000000,15,0,0,1.688085909,1.225421347,0.462663662 -8000000,20,0,0,1.577839719,1.116043903,0.461795256 -8000000,25,0,0,1.704321417,1.234133574,0.470187153 -8000000,30,0,0,1.848801721,1.375614787,0.473185934 -8000000,35,0,0,1.624956828,1.163848716,0.461107402 -8000000,40,0,0,1.699318104,1.237499486,0.461817758 -8000000,45,0,0,1.778365666,1.3138266490000001,0.464538367 -8000000,50,0,0,1.84804068,1.38404583,0.46399423 -8000000,55,0,0,1.926150758,1.461997916,0.464152152 -8000000,60,0,0,2.002264129,1.537368387,0.464894972 -8000000,65,0,0,1.689940423,1.2266170459999999,0.463322647 -8000000,70,0,0,1.721141959,1.257777883,0.463363166 -8000000,75,0,0,1.751038482,1.284623436,0.466414236 -8000000,80,0,0,1.770346078,1.30675563,0.463589528 -8000000,85,0,0,1.8097454659999999,1.337137693,0.472607003 -8000000,90,0,0,1.814953837,1.351729857,0.46322323 -8000000,95,0,0,1.839956243,1.373939892,0.466015691 -8000000,100,0,0,1.864874633,1.400203697,0.464670296 -9000000,5,0,0,1.7112399360000001,1.19533996,0.515899266 -9000000,10,0,0,1.75244299,1.23480901,0.51763328 -9000000,15,0,0,1.864467729,1.334448702,0.530018347 -9000000,20,0,0,1.729575514,1.210906874,0.51866764 -9000000,25,0,0,1.846862257,1.328004326,0.518857211 -9000000,30,0,0,2.017080687,1.497764904,0.519314863 -9000000,35,0,0,1.811104293,1.286394157,0.524709406 -9000000,40,0,0,1.886902444,1.365721074,0.52118058 -9000000,45,0,0,1.97639673,1.448813471,0.527582509 -9000000,50,0,0,2.044056742,1.521849872,0.52220612 -9000000,55,0,0,2.16849492,1.6419948020000001,0.526499388 -9000000,60,0,0,2.2015995999999998,1.675772144,0.525826696 -9000000,65,0,0,2.221407107,1.696373617,0.52503263 -9000000,70,0,0,1.916705885,1.39209853,0.524606695 -9000000,75,0,0,1.917945133,1.396912345,0.521032138 -9000000,80,0,0,1.9590850180000001,1.435187857,0.523896551 -9000000,85,0,0,1.978717466,1.45581348,0.522903376 -9000000,90,0,0,1.995038496,1.470494502,0.524543354 -9000000,95,0,0,2.032576648,1.5086458299999999,0.523930248 -9000000,100,0,0,2.059806808,1.537450904,0.522355214 -10000000,5,0,0,1.9025132070000002,1.326093708,0.576418859 -10000000,10,0,0,1.956922141,1.379729952,0.577191469 -10000000,15,0,0,2.066267387,1.487306689,0.578960028 -10000000,20,0,0,1.889202286,1.311144641,0.578056935 -10000000,25,0,0,2.019879988,1.43856076,0.581318458 -10000000,30,0,0,2.180625569,1.599278708,0.581346131 -10000000,35,0,0,2.361957904,1.7795732800000001,0.582383894 -10000000,40,0,0,2.055803275,1.47261512,0.583187375 -10000000,45,0,0,2.169251769,1.585298784,0.583952415 -10000000,50,0,0,2.249806705,1.667782308,0.582023717 -10000000,55,0,0,2.369881869,1.779957842,0.589923367 -10000000,60,0,0,2.41041461,1.820642565,0.589771425 -10000000,65,0,0,2.4407591650000002,1.860547207,0.580211288 -10000000,70,0,0,2.490055933,1.908866194,0.581189009 -10000000,75,0,0,2.5594432620000003,1.972414221,0.587028491 -10000000,80,0,0,2.172982773,1.59263126,0.580350463 -10000000,85,0,0,2.197620781,1.617127303,0.580492868 -10000000,90,0,0,2.219133099,1.639121499,0.58001086 -10000000,95,0,0,2.239221596,1.657891696,0.5813292 -10000000,100,0,0,2.276434615,1.6930042360000002,0.583429609 +1000000,5,0,0,0.086482525,0.055190617,0.031291348 +1000000,10,0,0,0.091694785,0.060531547,0.031162628 +1000000,15,0,0,0.105297342,0.073807693,0.031488539 +1000000,20,0,0,0.097988461,0.066224041,0.03176337 +1000000,25,0,0,0.121323806,0.089763877,0.031558799 +1000000,30,0,0,0.135643895,0.104085606,0.031557289 +1000000,35,0,0,0.116550237,0.085090159,0.031459518 +1000000,40,0,0,0.110782402,0.079218193,0.031563319 +1000000,45,0,0,0.118772686,0.087022956,0.03174894 +1000000,50,0,0,0.137958094,0.106531636,0.031425838 +1000000,55,0,0,0.143415017,0.111989729,0.031424738 +1000000,60,0,0,0.144638571,0.113171292,0.031466639 +1000000,65,0,0,0.139000399,0.10757127,0.031428599 +1000000,70,0,0,0.137512552,0.106003613,0.031508419 +1000000,75,0,0,0.136820689,0.10533274,0.031487379 +1000000,80,0,0,0.131545248,0.099784218,0.03176025 +1000000,85,0,0,0.135652274,0.104160785,0.031490879 +1000000,90,0,0,0.137816783,0.106189315,0.031626708 +1000000,95,0,0,0.139478931,0.107972222,0.031505969 +1000000,100,0,0,0.1394834,0.107930971,0.031551859 +2000000,5,0,0,0.339020265,0.275996857,0.063022878 +2000000,10,0,0,0.253160584,0.190559968,0.062599926 +2000000,15,0,0,0.293580007,0.230744111,0.062834686 +2000000,20,0,0,0.232089517,0.169238051,0.062850656 +2000000,25,0,0,0.271757638,0.208809791,0.062947267 +2000000,30,0,0,0.324831255,0.261771848,0.063058707 +2000000,35,0,0,0.252956241,0.189993534,0.062961887 +2000000,40,0,0,0.271904417,0.20881258,0.063091187 +2000000,45,0,0,0.295118502,0.232134286,0.062983086 +2000000,50,0,0,0.352331615,0.289394939,0.062935636 +2000000,55,0,0,0.357319764,0.294446028,0.062873176 +2000000,60,0,0,0.371371541,0.308219615,0.063151166 +2000000,65,0,0,0.285201259,0.222122592,0.063077937 +2000000,70,0,0,0.285179469,0.222187623,0.062991186 +2000000,75,0,0,0.29296129,0.229780543,0.063179797 +2000000,80,0,0,0.303835475,0.240850279,0.062984436 +2000000,85,0,0,0.312916411,0.249908534,0.063007067 +2000000,90,0,0,0.318975485,0.255834649,0.063140236 +2000000,95,0,0,0.32758036,0.264356993,0.063222607 +2000000,100,0,0,0.329083335,0.266017108,0.063065397 +3000000,5,0,0,0.489091783,0.395029251,0.094061832 +3000000,10,0,0,0.473871339,0.38000075,0.093869939 +3000000,15,0,0,0.441457678,0.347191007,0.094266031 +3000000,20,0,0,0.482605022,0.388020701,0.094583631 +3000000,25,0,0,0.397696429,0.302879065,0.094816664 +3000000,30,0,0,0.473846805,0.379590504,0.094255501 +3000000,35,0,0,0.476431645,0.381861813,0.094568722 +3000000,40,0,0,0.509967068,0.414725094,0.095240864 +3000000,45,0,0,0.566235185,0.471489073,0.094744882 +3000000,50,0,0,0.484070392,0.389565331,0.094504101 +3000000,55,0,0,0.515552098,0.421152707,0.094398521 +3000000,60,0,0,0.522006012,0.427546782,0.09445795 +3000000,65,0,0,0.532751104,0.438328854,0.09442091 +3000000,70,0,0,0.54682652,0.452262279,0.09456322 +3000000,75,0,0,0.554870729,0.4604436,0.094426099 +3000000,80,0,0,0.56737039,0.472922631,0.094446459 +3000000,85,0,0,0.584969437,0.490608648,0.094359759 +3000000,90,0,0,0.595148247,0.500537897,0.09460948 +3000000,95,0,0,0.46400188,0.369435891,0.094565459 +3000000,100,0,0,0.474701341,0.379824371,0.0948762 +4000000,5,0,0,0.62690303,0.501285087,0.125617253 +4000000,10,0,0,0.616527546,0.490858853,0.125668073 +4000000,15,0,0,0.689377125,0.563442232,0.125934333 +4000000,20,0,0,0.603691441,0.477847519,0.125843162 +4000000,25,0,0,0.684769362,0.558822359,0.125946223 +4000000,30,0,0,0.771194675,0.645258853,0.125935002 +4000000,35,0,0,0.580988794,0.454557081,0.126430953 +4000000,40,0,0,0.61531332,0.489107807,0.126204763 +4000000,45,0,0,0.694222071,0.568471151,0.1257503 +4000000,50,0,0,0.766115995,0.640329925,0.12578525 +4000000,55,0,0,0.815485017,0.688960455,0.126523842 +4000000,60,0,0,0.8421729,0.715567738,0.126604182 +4000000,65,0,0,0.677701035,0.551712197,0.125988168 +4000000,70,0,0,0.693704787,0.567771938,0.125932009 +4000000,75,0,0,0.709085355,0.583092867,0.125991548 +4000000,80,0,0,0.729574803,0.603629905,0.125944108 +4000000,85,0,0,0.7422117,0.616143293,0.126067657 +4000000,90,0,0,0.758361463,0.632292246,0.126068517 +4000000,95,0,0,0.77389219,0.647640143,0.126251047 +4000000,100,0,0,0.784067537,0.65781446,0.126252277 +5000000,5,0,0,0.794806447,0.637441568,0.157363909 +5000000,10,0,0,0.773598699,0.616102331,0.157495188 +5000000,15,0,0,0.826045183,0.668689475,0.157354948 +5000000,20,0,0,0.724420891,0.566806853,0.157613178 +5000000,25,0,0,0.82429761,0.666789603,0.157506717 +5000000,30,0,0,0.919953051,0.762730496,0.157221355 +5000000,35,0,0,0.94121964,0.784111946,0.157106744 +5000000,40,0,0,0.747834511,0.590179526,0.157654195 +5000000,45,0,0,0.853717092,0.696109517,0.157606205 +5000000,50,0,0,0.933944791,0.776474447,0.157469354 +5000000,55,0,0,0.976339901,0.818729948,0.157608913 +5000000,60,0,0,0.999914939,0.842437277,0.157476642 +5000000,65,0,0,1.021589818,0.864366238,0.15722259 +5000000,70,0,0,1.057951563,0.900439843,0.15751058 +5000000,75,0,0,1.098464575,0.940809415,0.15765404 +5000000,80,0,0,0.885913458,0.728239718,0.15767265 +5000000,85,0,0,0.907278997,0.749051485,0.158226142 +5000000,90,0,0,0.918343925,0.760260885,0.15808194 +5000000,95,0,0,0.930315448,0.772157058,0.15815708 +5000000,100,0,0,0.953084152,0.795385955,0.157697187 +6000000,5,0,0,0.942027775,0.752743627,0.189283178 +6000000,10,0,0,0.975083857,0.786155881,0.188926676 +6000000,15,0,0,1.044115728,0.85443215,0.189682588 +6000000,20,0,0,1.06683752,0.877143293,0.189693287 +6000000,25,0,0,0.939204296,0.750399544,0.188803822 +6000000,30,0,0,1.033794044,0.844874322,0.188918782 +6000000,35,0,0,1.070410228,0.881834718,0.1885742 +6000000,40,0,0,1.136678725,0.947983246,0.188694369 +6000000,45,0,0,1.291909119,1.103104611,0.188803398 +6000000,50,0,0,1.081156381,0.892408624,0.188746837 +6000000,55,0,0,1.132814063,0.943890716,0.188922327 +6000000,60,0,0,1.174083643,0.985349277,0.188733066 +6000000,65,0,0,1.176512266,0.987553411,0.188957715 +6000000,70,0,0,1.219095192,1.029701646,0.189392566 +6000000,75,0,0,1.246010657,1.057202014,0.188807353 +6000000,80,0,0,1.263748696,1.074796064,0.188951272 +6000000,85,0,0,1.2982377889999999,1.109622159,0.18861468 +6000000,90,0,0,1.321491597,1.132596688,0.188893879 +6000000,95,0,0,1.098753624,0.909455404,0.18929689 +6000000,100,0,0,1.099393881,0.910011322,0.189381539 +7000000,5,0,0,1.087561472,0.867139957,0.220420555 +7000000,10,0,0,1.124742826,0.904719524,0.220022132 +7000000,15,0,0,1.199606819,0.979508818,0.220096911 +7000000,20,0,0,1.227475917,1.007304586,0.220169871 +7000000,25,0,0,1.2766389519999999,1.055375219,0.221262863 +7000000,30,0,0,1.185185325,0.965092117,0.220092218 +7000000,35,0,0,1.189659176,0.96959161,0.220066786 +7000000,40,0,0,1.25708569,1.033321511,0.223763169 +7000000,45,0,0,1.392216582,1.167802042,0.22441352 +7000000,50,0,0,1.519737415,1.295540807,0.224195678 +7000000,55,0,0,1.2819905679999999,1.061620216,0.220369042 +7000000,60,0,0,1.312179113,1.091996912,0.220180901 +7000000,65,0,0,1.327781283,1.107730494,0.220049509 +7000000,70,0,0,1.367059609,1.146866221,0.220192288 +7000000,75,0,0,1.405577653,1.185047455,0.220529078 +7000000,80,0,0,1.4278509160000001,1.207839481,0.220010625 +7000000,85,0,0,1.437262781,1.216764477,0.220497094 +7000000,90,0,0,1.475492852,1.255305169,0.220186693 +7000000,95,0,0,1.488981591,1.268348529,0.220632172 +7000000,100,0,0,1.531255536,1.310888906,0.22036562 +8000000,5,0,0,1.22371369,0.972789131,0.250923749 +8000000,10,0,0,1.280783482,1.028384708,0.252397594 +8000000,15,0,0,1.362920911,1.112380715,0.250539146 +8000000,20,0,0,1.371749925,1.12108461,0.250664395 +8000000,25,0,0,1.414347791,1.162867084,0.251479787 +8000000,30,0,0,1.530194638,1.274271767,0.255921811 +8000000,35,0,0,1.320626226,1.069679384,0.250945892 +8000000,40,0,0,1.378282706,1.121966676,0.256315 +8000000,45,0,0,1.5845888860000001,1.3303528359999999,0.25423528 +8000000,50,0,0,1.6983233119999999,1.445706599,0.252615933 +8000000,55,0,0,1.800137634,1.545539406,0.254597448 +8000000,60,0,0,1.848637794,1.594498399,0.254138345 +8000000,65,0,0,1.4882189989999999,1.2363576649999999,0.251860354 +8000000,70,0,0,1.5029625229999999,1.250931759,0.252029584 +8000000,75,0,0,1.535216758,1.283503027,0.251713101 +8000000,80,0,0,1.572478191,1.320192659,0.252284612 +8000000,85,0,0,1.6244272039999998,1.372588175,0.251838219 +8000000,90,0,0,1.655490453,1.403143224,0.252346539 +8000000,95,0,0,1.682510507,1.430311261,0.252198086 +8000000,100,0,0,1.679958587,1.4279700229999999,0.251987944 +9000000,5,0,0,1.389062327,1.10717561,0.281885557 +9000000,10,0,0,1.416861226,1.135069151,0.281791235 +9000000,15,0,0,1.518059052,1.236050028,0.282008164 +9000000,20,0,0,1.5507419869999999,1.268465133,0.282276084 +9000000,25,0,0,1.616066904,1.333282651,0.282783533 +9000000,30,0,0,1.713593091,1.430842079,0.282750262 +9000000,35,0,0,1.457420083,1.173801111,0.283618132 +9000000,40,0,0,1.512607526,1.229124315,0.283482411 +9000000,45,0,0,1.684572451,1.401569203,0.283002508 +9000000,50,0,0,1.855187358,1.571951662,0.283234656 +9000000,55,0,0,1.934407067,1.6505280409999998,0.283877676 +9000000,60,0,0,1.9593587380000002,1.676022066,0.283335872 +9000000,65,0,0,2.024381956,1.741170267,0.283210939 +9000000,70,0,0,1.668900737,1.385078587,0.2838214 +9000000,75,0,0,1.711984913,1.427484923,0.28449922 +9000000,80,0,0,1.748630017,1.4646257,0.284003777 +9000000,85,0,0,1.7832985639999999,1.498374606,0.284923178 +9000000,90,0,0,1.798267753,1.514342599,0.283924384 +9000000,95,0,0,1.8399620909999999,1.554909367,0.285052004 +9000000,100,0,0,1.859382125,1.575741007,0.283640148 +10000000,5,0,0,1.5193459649999999,1.206180109,0.313164946 +10000000,10,0,0,1.578123616,1.26468397,0.313438856 +10000000,15,0,0,1.685687167,1.370494317,0.315191799 +10000000,20,0,0,1.729291483,1.414461067,0.314829496 +10000000,25,0,0,1.770931351,1.456083527,0.314846884 +10000000,30,0,0,1.859928317,1.544283492,0.315643765 +10000000,35,0,0,1.92777253,1.6121073080000001,0.315664052 +10000000,40,0,0,1.658695042,1.331586564,0.327107488 +10000000,45,0,0,1.8190461340000001,1.504293649,0.314751575 +10000000,50,0,0,1.986230737,1.671785045,0.314444952 +10000000,55,0,0,2.08255887,1.7670837480000001,0.315474322 +10000000,60,0,0,2.171866621,1.857508614,0.314356727 +10000000,65,0,0,2.261070687,1.9465845819999998,0.314484865 +10000000,70,0,0,2.25192901,1.936448604,0.315479756 +10000000,75,0,0,2.308560987,1.992937994,0.315621993 +10000000,80,0,0,1.8805117199999999,1.5652523999999999,0.3152583 +10000000,85,0,0,1.899076559,1.581332323,0.317743216 +10000000,90,0,0,1.964298289,1.64833014,0.315967119 +10000000,95,0,0,1.9669363450000001,1.65168486,0.315250545 +10000000,100,0,0,1.993444717,1.677748925,0.315694872 diff --git a/docs/bench-report-repro.csv b/docs/bench-report-repro.csv index b74ec48..dc56173 100644 --- a/docs/bench-report-repro.csv +++ b/docs/bench-report-repro.csv @@ -1,201 +1,201 @@ Total Items,Buckets,Removed,Escalated,Time Elapsed,Time to Send,Time to Receive -1000000,5,0,0,0.189784303,0.132660383,0.05711994 -1000000,10,0,0,0.233516515,0.176141808,0.057370967 -1000000,15,0,0,0.233891082,0.176742211,0.057145201 -1000000,20,0,0,0.217160909,0.158457746,0.058698963 -1000000,25,0,0,0.227972722,0.170580652,0.05738844 -1000000,30,0,0,0.232389806,0.173230536,0.05915423 -1000000,35,0,0,0.199818358,0.14235436,0.057459908 -1000000,40,0,0,0.209756308,0.151787885,0.057964303 -1000000,45,0,0,0.199243976,0.141735678,0.057504468 -1000000,50,0,0,0.210216389,0.152735469,0.0574756 -1000000,55,0,0,0.221664361,0.16251186,0.059148761 -1000000,60,0,0,0.225220811,0.167653782,0.057563409 -1000000,65,0,0,0.189910964,0.132080629,0.057825165 -1000000,70,0,0,0.169361267,0.111847316,0.057509791 -1000000,75,0,0,0.198859889,0.140695165,0.058161014 -1000000,80,0,0,0.197056769,0.139609457,0.057443612 -1000000,85,0,0,0.204030402,0.146601431,0.057425022 -1000000,90,0,0,0.207381765,0.14934585,0.058032285 -1000000,95,0,0,0.199866274,0.142152424,0.05770954 -1000000,100,0,0,0.212269626,0.154403458,0.057862858 -2000000,5,0,0,0.463133665,0.347538087,0.115590718 -2000000,10,0,0,0.428248459,0.313433879,0.11480802 -2000000,15,0,0,0.443390205,0.328688714,0.114694711 -2000000,20,0,0,0.394881118,0.278456023,0.116417925 -2000000,25,0,0,0.42129288,0.306418148,0.114868803 -2000000,30,0,0,0.474313491,0.323970007,0.150338264 -2000000,35,0,0,0.383674055,0.26881707,0.114852575 -2000000,40,0,0,0.410969347,0.296139949,0.114822978 -2000000,45,0,0,0.431114934,0.316240227,0.114869717 -2000000,50,0,0,0.452711008,0.33771034,0.114996588 -2000000,55,0,0,0.469223947,0.354212558,0.11500456 -2000000,60,0,0,0.47925181,0.364244269,0.115001501 -2000000,65,0,0,0.416106091,0.301166898,0.114933103 -2000000,70,0,0,0.425083372,0.309794262,0.11528365 -2000000,75,0,0,0.438138011,0.322831989,0.115302102 -2000000,80,0,0,0.439246059,0.323942847,0.115299422 -2000000,85,0,0,0.444206296,0.328885591,0.115316685 -2000000,90,0,0,0.448249861,0.332952456,0.115293405 -2000000,95,0,0,0.439710444,0.324604105,0.11509997 -2000000,100,0,0,0.505268684,0.38866833,0.116596805 -3000000,5,0,0,0.691616426,0.519690098,0.171922598 -3000000,10,0,0,0.742500906,0.570311847,0.172185379 -3000000,15,0,0,0.608189128,0.434744462,0.173441186 -3000000,20,0,0,0.65949574,0.487653972,0.171837778 -3000000,25,0,0,0.570289591,0.397358351,0.17292759 -3000000,30,0,0,0.593475557,0.419683546,0.173788101 -3000000,35,0,0,0.649016586,0.475789804,0.173223692 -3000000,40,0,0,0.681068083,0.508119095,0.172945899 -3000000,45,0,0,0.711773336,0.539175139,0.172594517 -3000000,50,0,0,0.635857059,0.462815647,0.173037852 -3000000,55,0,0,0.665072434,0.486402578,0.178666296 -3000000,60,0,0,0.685683934,0.508045503,0.177634661 -3000000,65,0,0,0.696487201,0.522371387,0.174108964 -3000000,70,0,0,0.717006517,0.543526146,0.173474131 -3000000,75,0,0,0.880341206,0.694974477,0.185359979 -3000000,80,0,0,0.733736333,0.560421512,0.173307751 -3000000,85,0,0,0.835224407,0.661945122,0.173274655 -3000000,90,0,0,0.72902678,0.555688061,0.17333319 -3000000,95,0,0,0.723791197,0.549529917,0.17425739 -3000000,100,0,0,0.658435409,0.484867408,0.173564131 -4000000,5,0,0,0.910467303,0.681572037,0.228890436 -4000000,10,0,0,0.941390301,0.711773866,0.229612495 -4000000,15,0,0,0.960136998,0.729608787,0.230523671 -4000000,20,0,0,0.761404153,0.521368395,0.240030328 -4000000,25,0,0,0.797869381,0.5663841,0.231480841 -4000000,30,0,0,0.864450309,0.633999473,0.230446526 -4000000,35,0,0,0.795733838,0.561888865,0.233840553 -4000000,40,0,0,0.82265524,0.591934968,0.230715732 -4000000,45,0,0,0.863113062,0.632302846,0.230805026 -4000000,50,0,0,0.911357626,0.67903656,0.232316826 -4000000,55,0,0,0.927249154,0.696896232,0.230348692 -4000000,60,0,0,0.940077416,0.709471141,0.230601805 -4000000,65,0,0,0.811232211,0.579969447,0.231255254 -4000000,70,0,0,0.821564892,0.590878587,0.230681585 -4000000,75,0,0,0.828727736,0.59854777,0.230174996 -4000000,80,0,0,0.85387219,0.623360851,0.230506859 -4000000,85,0,0,0.860671689,0.630621611,0.230045078 -4000000,90,0,0,0.871962455,0.639958723,0.231999912 -4000000,95,0,0,0.882353719,0.651733149,0.23061681 -4000000,100,0,0,0.905503637,0.674886362,0.230612416 -5000000,5,0,0,0.964215828,0.666434432,0.297776556 -5000000,10,0,0,0.94144752,0.655612257,0.285829073 -5000000,15,0,0,0.979999476,0.692189346,0.28780324 -5000000,20,0,0,0.913770348,0.626793563,0.286971735 -5000000,25,0,0,0.965331427,0.678065408,0.287261569 -5000000,30,0,0,1.059350852,0.769285393,0.290061339 -5000000,35,0,0,1.12143884,0.832344019,0.289090501 -5000000,40,0,0,0.991721026,0.704190083,0.287525903 -5000000,45,0,0,1.031740059,0.742231148,0.289504371 -5000000,50,0,0,1.078773986,0.789445917,0.289323399 -5000000,55,0,0,1.115674255,0.827389169,0.288280526 -5000000,60,0,0,1.143400678,0.85335784,0.290039578 -5000000,65,0,0,1.1772477430000001,0.887914977,0.289328176 -5000000,70,0,0,1.348778966,1.060411193,0.288363753 -5000000,75,0,0,1.217029099,0.928754239,0.28827002 -5000000,80,0,0,1.051031902,0.761136496,0.289892126 -5000000,85,0,0,1.054436007,0.766782051,0.287649586 -5000000,90,0,0,1.067362681,0.778888406,0.288469985 -5000000,95,0,0,1.154298407,0.866601459,0.287691818 -5000000,100,0,0,1.079508491,0.790885374,0.288619517 -6000000,5,0,0,1.210280516,0.854958025,0.355317401 -6000000,10,0,0,1.272526105,0.920682999,0.351838606 -6000000,15,0,0,1.265821624,0.916308375,0.349509449 -6000000,20,0,0,1.294652938,0.945362199,0.349286149 -6000000,25,0,0,1.167680554,0.807184401,0.360491693 -6000000,30,0,0,1.227174264,0.87394661,0.353223874 -6000000,35,0,0,1.2908412089999999,0.940868434,0.349967765 -6000000,40,0,0,1.446913624,1.100418974,0.34649085 -6000000,45,0,0,1.453957889,1.097814377,0.356139832 -6000000,50,0,0,1.2458647059999999,0.896248552,0.349611204 -6000000,55,0,0,1.309505108,0.955547313,0.353952915 -6000000,60,0,0,1.3365717990000001,0.977353944,0.359213235 -6000000,65,0,0,1.436328977,1.088716101,0.347608316 -6000000,70,0,0,1.462626226,1.115455845,0.347165121 -6000000,75,0,0,1.466957207,1.119366598,0.347586259 -6000000,80,0,0,1.509914823,1.160376176,0.349533757 -6000000,85,0,0,1.486219705,1.135168825,0.35104702 -6000000,90,0,0,1.479264715,1.13368384,0.345576005 -6000000,95,0,0,1.276724483,0.929857782,0.346862241 -6000000,100,0,0,1.288999437,0.943258681,0.345736636 -7000000,5,0,0,1.431058345,0.99839169,0.432662175 -7000000,10,0,0,1.555162347,1.155255216,0.399903101 -7000000,15,0,0,1.546103267,1.145010318,0.401088659 -7000000,20,0,0,1.592660435,1.191554489,0.401101736 -7000000,25,0,0,1.625113744,1.222570588,0.402537206 -7000000,30,0,0,1.547678303,1.145361791,0.402312202 -7000000,35,0,0,1.5378119319999999,1.13492997,0.402877452 -7000000,40,0,0,1.6163273089999999,1.211114904,0.405206935 -7000000,45,0,0,1.666438831,1.262596541,0.403838071 -7000000,50,0,0,1.918665962,1.514305609,0.404356514 -7000000,55,0,0,1.561535946,1.157067536,0.40446436 -7000000,60,0,0,1.5931406689999998,1.186708343,0.406427666 -7000000,65,0,0,1.605670589,1.202541044,0.403125975 -7000000,70,0,0,1.6405244479999999,1.234761537,0.405759611 -7000000,75,0,0,1.659654038,1.255454668,0.40419501 -7000000,80,0,0,1.692914383,1.2890185650000001,0.403891548 -7000000,85,0,0,1.66206575,1.257899341,0.404162949 -7000000,90,0,0,1.673276099,1.2575200020000001,0.415752217 -7000000,95,0,0,1.6932848169999999,1.289705914,0.403574573 -7000000,100,0,0,1.8162885,1.382712355,0.433572285 -8000000,5,0,0,1.743629505,1.284642788,0.458982557 -8000000,10,0,0,1.7787060110000001,1.3212297880000001,0.457472103 -8000000,15,0,0,1.9346855010000001,1.476186899,0.458494602 -8000000,20,0,0,1.794325777,1.334528132,0.459793315 -8000000,25,0,0,1.7124312590000001,1.2533599579999999,0.459066841 -8000000,30,0,0,1.9450463390000001,1.485290396,0.459751393 -8000000,35,0,0,1.748071048,1.2889405329999999,0.459126055 -8000000,40,0,0,1.746861262,1.2851799019999999,0.46167769 -8000000,45,0,0,1.788206412,1.324833968,0.463368174 -8000000,50,0,0,1.8418554230000002,1.375432585,0.466418158 -8000000,55,0,0,2.148387635,1.6868159390000002,0.461567596 -8000000,60,0,0,2.048337063,1.5871791929999999,0.46115435 -8000000,65,0,0,1.77284716,1.3061952749999999,0.466647405 -8000000,70,0,0,1.7904199589999998,1.328938438,0.461477141 -8000000,75,0,0,1.810309045,1.346388895,0.46391596 -8000000,80,0,0,1.8206942320000001,1.358599152,0.46209073 -8000000,85,0,0,1.8448267980000002,1.381276503,0.463545755 -8000000,90,0,0,1.815506596,1.35375998,0.461742626 -8000000,95,0,0,1.869966978,1.393391495,0.476571103 -8000000,100,0,0,1.857450671,1.395473412,0.461972859 -9000000,5,0,0,1.939399242,1.423561334,0.515834238 -9000000,10,0,0,1.815579129,1.3009805349999999,0.514594474 -9000000,15,0,0,1.938881822,1.423469496,0.515407867 -9000000,20,0,0,1.833274919,1.317826444,0.515444075 -9000000,25,0,0,1.877344612,1.360875935,0.516465147 -9000000,30,0,0,2.116484109,1.595702414,0.520777065 -9000000,35,0,0,1.911173288,1.389950027,0.521219481 -9000000,40,0,0,1.935746184,1.416965427,0.518776897 -9000000,45,0,0,1.952215281,1.434376219,0.517835412 -9000000,50,0,0,2.010015668,1.489288247,0.520723371 -9000000,55,0,0,2.177753764,1.65835314,0.519396594 -9000000,60,0,0,2.233401238,1.711419778,0.52197716 -9000000,65,0,0,2.299810427,1.78014203,0.519664218 -9000000,70,0,0,1.984796141,1.464529176,0.520262665 -9000000,75,0,0,2.008725572,1.488935033,0.519787269 -9000000,80,0,0,2.023948754,1.502974825,0.520970019 -9000000,85,0,0,2.04030746,1.51978038,0.52052199 -9000000,90,0,0,2.017552351,1.496467126,0.521080845 -9000000,95,0,0,2.046780962,1.5259712539999999,0.520805938 -9000000,100,0,0,2.062938963,1.543056046,0.519878587 -10000000,5,0,0,2.143155546,1.556639691,0.586511975 -10000000,10,0,0,2.071882152,1.499120446,0.572757576 -10000000,15,0,0,2.174352955,1.598936138,0.575412587 -10000000,20,0,0,2.005195706,1.4295251310000001,0.575666685 -10000000,25,0,0,2.071854644,1.495598882,0.576251762 -10000000,30,0,0,2.268355285,1.679054255,0.58929679 -10000000,35,0,0,2.423951337,1.847407809,0.576539478 -10000000,40,0,0,2.172540409,1.592222636,0.580313913 -10000000,45,0,0,2.110944902,1.530738728,0.580201154 -10000000,50,0,0,2.190691453,1.610562233,0.58012548 -10000000,55,0,0,2.329200672,1.751318027,0.577876515 -10000000,60,0,0,2.4070755679999998,1.829102826,0.577969362 -10000000,65,0,0,2.4745701860000002,1.89669855,0.577867786 -10000000,70,0,0,2.514790167,1.9299485779999999,0.584837339 -10000000,75,0,0,2.574490559,1.981489441,0.592996778 -10000000,80,0,0,2.201098419,1.623493908,0.577600291 -10000000,85,0,0,2.302351854,1.723389866,0.578957428 -10000000,90,0,0,2.203668378,1.621751478,0.58191293 -10000000,95,0,0,2.230287925,1.644980015,0.58530387 -10000000,100,0,0,2.438635391,1.858093427,0.580538184 +1000000,5,0,0,0.154461682,0.122934628,0.031520654 +1000000,10,0,0,0.18127478,0.149779896,0.031488803 +1000000,15,0,0,0.239860722,0.207917006,0.031937246 +1000000,20,0,0,0.121865491,0.090241577,0.031617004 +1000000,25,0,0,0.145450802,0.114029029,0.031415443 +1000000,30,0,0,0.176191061,0.144742817,0.031443304 +1000000,35,0,0,0.120915275,0.089466082,0.031442573 +1000000,40,0,0,0.117675758,0.086157244,0.031513424 +1000000,45,0,0,0.159141796,0.127662632,0.031474404 +1000000,50,0,0,0.168207726,0.136745323,0.031457593 +1000000,55,0,0,0.177595498,0.146117604,0.031472614 +1000000,60,0,0,0.181027267,0.149480734,0.031541703 +1000000,65,0,0,0.13834606,0.106836657,0.031504833 +1000000,70,0,0,0.135014582,0.103552409,0.031458583 +1000000,75,0,0,0.137922398,0.106421815,0.031495603 +1000000,80,0,0,0.137344506,0.105823062,0.031516504 +1000000,85,0,0,0.156988963,0.12551365,0.031471283 +1000000,90,0,0,0.16723791,0.135702067,0.031530973 +1000000,95,0,0,0.167845563,0.136328919,0.031513113 +1000000,100,0,0,0.172060935,0.140427431,0.031628394 +2000000,5,0,0,0.306421262,0.243742648,0.062673434 +2000000,10,0,0,0.359062952,0.296385317,0.062674035 +2000000,15,0,0,0.377638502,0.314918858,0.062714274 +2000000,20,0,0,0.321437772,0.258671798,0.062761404 +2000000,25,0,0,0.34095596,0.278188095,0.062764075 +2000000,30,0,0,0.389581434,0.32675571,0.062821934 +2000000,35,0,0,0.304158615,0.241208681,0.062945664 +2000000,40,0,0,0.347346421,0.284421837,0.062920994 +2000000,45,0,0,0.382406341,0.319460937,0.062940494 +2000000,50,0,0,0.395820545,0.332912961,0.062902684 +2000000,55,0,0,0.409058424,0.345827069,0.063227645 +2000000,60,0,0,0.461783472,0.398940649,0.062837653 +2000000,65,0,0,0.332966717,0.270152855,0.062809932 +2000000,70,0,0,0.368888332,0.306003779,0.062879403 +2000000,75,0,0,0.374601053,0.3117304,0.062866403 +2000000,80,0,0,0.387708702,0.32481109,0.062892402 +2000000,85,0,0,0.362076322,0.299065399,0.063007133 +2000000,90,0,0,0.395889765,0.332871543,0.063014152 +2000000,95,0,0,0.402932112,0.339966719,0.062961102 +2000000,100,0,0,0.410245411,0.347263818,0.062977053 +3000000,5,0,0,0.522881371,0.428562939,0.094314292 +3000000,10,0,0,0.553744168,0.459977049,0.093763049 +3000000,15,0,0,0.526298515,0.432259174,0.094035281 +3000000,20,0,0,0.523379119,0.429380719,0.0939929 +3000000,25,0,0,0.43750164,0.34340408,0.09409213 +3000000,30,0,0,0.50577512,0.410768526,0.095001424 +3000000,35,0,0,0.52479382,0.43057319,0.094216829 +3000000,40,0,0,0.623450341,0.529411173,0.094033708 +3000000,45,0,0,0.584584268,0.490293498,0.09428577 +3000000,50,0,0,0.538793916,0.444563718,0.094226318 +3000000,55,0,0,0.622067916,0.527378845,0.094683581 +3000000,60,0,0,0.625081778,0.53084353,0.094234668 +3000000,65,0,0,0.644021797,0.54976919,0.094248277 +3000000,70,0,0,0.664308984,0.570079938,0.094224106 +3000000,75,0,0,0.666509052,0.571701672,0.09480178 +3000000,80,0,0,0.675315975,0.580864438,0.094447167 +3000000,85,0,0,0.706947054,0.612672918,0.094267956 +3000000,90,0,0,0.728847576,0.63445536,0.094388216 +3000000,95,0,0,0.611813816,0.516414525,0.095394641 +3000000,100,0,0,0.588500668,0.494130464,0.094365454 +4000000,5,0,0,0.68476196,0.559618041,0.125138919 +4000000,10,0,0,0.767426468,0.642360141,0.125060917 +4000000,15,0,0,0.797010051,0.671763303,0.125240098 +4000000,20,0,0,0.65264152,0.527198121,0.125436539 +4000000,25,0,0,0.698052936,0.572654159,0.125391077 +4000000,30,0,0,0.794344955,0.668520157,0.125818938 +4000000,35,0,0,0.707903362,0.582489186,0.125410476 +4000000,40,0,0,0.687976373,0.562198064,0.125769998 +4000000,45,0,0,0.857209515,0.731396838,0.125808697 +4000000,50,0,0,0.930972372,0.805000645,0.125965967 +4000000,55,0,0,0.855811327,0.729780891,0.126026626 +4000000,60,0,0,0.988361681,0.862564147,0.125792584 +4000000,65,0,0,0.860608182,0.734465247,0.126136125 +4000000,70,0,0,0.838071258,0.711782343,0.126282385 +4000000,75,0,0,0.76905297,0.643189868,0.125855492 +4000000,80,0,0,0.871804925,0.745672292,0.126125643 +4000000,85,0,0,0.886534017,0.760505835,0.126020242 +4000000,90,0,0,0.905878473,0.779967032,0.125904501 +4000000,95,0,0,0.911773738,0.785645657,0.126120761 +4000000,100,0,0,0.936369811,0.809856119,0.126505282 +5000000,5,0,0,0.874504091,0.718106744,0.156389757 +5000000,10,0,0,0.943904998,0.78717711,0.156723588 +5000000,15,0,0,0.989918702,0.833271926,0.156642486 +5000000,20,0,0,0.806668031,0.649847785,0.156816156 +5000000,25,0,0,0.799220778,0.642245462,0.156971096 +5000000,30,0,0,0.887389862,0.730392377,0.156993055 +5000000,35,0,0,0.938923454,0.781573677,0.157343617 +5000000,40,0,0,0.808994414,0.650738324,0.15825045 +5000000,45,0,0,0.923389702,0.766048348,0.157334464 +5000000,50,0,0,1.062518735,0.875444418,0.187068387 +5000000,55,0,0,0.979310888,0.821600014,0.157704164 +5000000,60,0,0,1.22139438,0.959249158,0.262140052 +5000000,65,0,0,1.219010906,0.958058754,0.260948022 +5000000,70,0,0,1.266618051,0.978053349,0.288559322 +5000000,75,0,0,1.296935846,0.999337989,0.297592977 +5000000,80,0,0,0.998040825,0.840565397,0.157467798 +5000000,85,0,0,0.966208875,0.8088971,0.157306045 +5000000,90,0,0,0.966047037,0.808387562,0.157653805 +5000000,95,0,0,0.991945054,0.827716826,0.164220078 +5000000,100,0,0,1.031554869,0.858923789,0.17262502 +6000000,5,0,0,1.052930379,0.839812534,0.213111445 +6000000,10,0,0,0.989721502,0.801631787,0.188081075 +6000000,15,0,0,0.995553785,0.807536391,0.188009734 +6000000,20,0,22,1.236723792,0.936902992,0.29981283 +6000000,25,0,0,0.933204755,0.74458933,0.188606845 +6000000,30,0,0,1.052769493,0.839783547,0.212979246 +6000000,35,0,0,1.193857278,0.913632294,0.280218194 +6000000,40,0,0,1.355443103,0.997928792,0.357506881 +6000000,45,0,86,1.44492684,1.255353227,0.189565693 +6000000,50,0,0,1.349211767,0.993890574,0.355313653 +6000000,55,0,82,1.2855334950000001,0.954968239,0.330559416 +6000000,60,0,0,1.419054682,1.230283458,0.188764584 +6000000,65,0,112,1.433088689,1.244711188,0.188369321 +6000000,70,0,118,1.464549473,1.275810023,0.18873361 +6000000,75,0,47,1.494534057,1.305675218,0.188853279 +6000000,80,0,74,1.5447111329999998,1.354119608,0.190588005 +6000000,85,0,94,1.576538024,1.38778534,0.188748624 +6000000,90,0,158,1.62525284,1.435678114,0.189571046 +6000000,95,0,34,1.39738871,1.207948625,0.189434915 +6000000,100,0,0,1.344574227,0.991754589,0.352814757 +7000000,5,0,0,1.32736454,0.951884814,0.375475916 +7000000,10,0,0,1.38032406,0.980032285,0.400287245 +7000000,15,0,16,1.278961081,0.926870415,0.352084966 +7000000,20,0,38,1.463739414,1.243647837,0.220084387 +7000000,25,0,46,1.475319557,1.255316753,0.219996064 +7000000,30,0,0,1.2421348779999999,0.907835739,0.334294989 +7000000,35,0,42,1.304497391,0.940049748,0.364441163 +7000000,40,0,70,1.454264849,1.234123039,0.22013678 +7000000,45,0,82,1.578216165,1.357821537,0.220391058 +7000000,50,0,88,1.7946538570000001,1.574561993,0.220086364 +7000000,55,0,98,1.525602439,1.29785407,0.227742599 +7000000,60,0,64,1.579894816,1.359775326,0.22011553 +7000000,65,0,22,1.594872474,1.374022992,0.220845492 +7000000,70,0,14,1.62035684,1.400630096,0.219722944 +7000000,75,0,52,1.6647081259999998,1.44390129,0.220801616 +7000000,80,0,42,1.709948826,1.489762145,0.220182931 +7000000,85,0,57,1.746659133,1.526041662,0.220613521 +7000000,90,0,24,1.788473873,1.568028094,0.220441869 +7000000,95,0,67,1.817977914,1.597131656,0.220841968 +7000000,100,0,0,1.839645287,1.619542185,0.220099052 +8000000,5,0,8,1.5880360690000002,1.337391705,0.250640534 +8000000,10,0,18,1.573567305,1.323167885,0.25039537 +8000000,15,0,28,1.638801268,1.387404876,0.251391942 +8000000,20,0,0,1.661443638,1.410140667,0.251297131 +8000000,25,0,50,1.6583141769999998,1.407503022,0.250803815 +8000000,30,0,58,1.693837027,1.44214537,0.251687347 +8000000,35,0,64,1.518360282,1.266653957,0.251697564 +8000000,40,0,0,1.4935131529999999,1.241497999,0.252007464 +8000000,45,0,84,1.701610847,1.450352198,0.251253199 +8000000,50,0,88,1.952943326,1.701450469,0.251487697 +8000000,55,0,98,1.988048016,1.73608958,0.251951536 +8000000,60,0,104,2.08036586,1.827698524,0.252659336 +8000000,65,0,0,1.729247049,1.4779886819999999,0.251253777 +8000000,70,0,118,1.808575056,1.556585119,0.251982557 +8000000,75,0,132,1.832886859,1.581168886,0.251712313 +8000000,80,0,53,1.871832908,1.6204915579999999,0.25133748 +8000000,85,0,60,1.911996232,1.660423174,0.251567638 +8000000,90,0,75,1.945201892,1.6919735390000001,0.253223443 +8000000,95,0,172,1.833931374,1.581474418,0.252452846 +8000000,100,0,40,2.00256976,1.7491349710000001,0.253428099 +9000000,5,0,8,1.767544344,1.48563304,0.281908774 +9000000,10,0,0,1.736577178,1.454862138,0.28171006 +9000000,15,0,26,1.775987529,1.493308547,0.282673992 +9000000,20,0,36,1.843951728,1.561225258,0.28271961 +9000000,25,0,46,1.832759268,1.5465913850000002,0.286160863 +9000000,30,0,56,1.8652641540000001,1.581707867,0.283548337 +9000000,35,0,0,1.627987238,1.345022066,0.282960212 +9000000,40,0,72,1.6668902669999999,1.381674298,0.285208239 +9000000,45,0,82,1.8038324000000001,1.520639071,0.283185159 +9000000,50,0,90,2.007056143,1.7218645590000001,0.285184974 +9000000,55,0,98,2.097914693,1.814954382,0.282951871 +9000000,60,294912,38,2.325675123,1.954457837,0.371214036 +9000000,65,573440,182,2.451496358,2.015921973,0.435568715 +9000000,70,0,120,1.91870182,1.63577169,0.28292638 +9000000,75,0,23,1.992483264,1.7084928609999999,0.283984873 +9000000,80,0,43,2.036879937,1.753121039,0.283755228 +9000000,85,0,47,2.085537998,1.801994103,0.283539775 +9000000,90,0,41,2.119598572,1.835411688,0.284178524 +9000000,95,0,164,2.148365924,1.860875889,0.287484565 +9000000,100,0,0,2.140099394,1.855689006,0.284406128 +10000000,5,0,8,1.8700826369999999,1.556790787,0.31328628 +10000000,10,0,20,1.8732469040000002,1.559900595,0.313340189 +10000000,15,0,28,1.9292456150000001,1.612286035,0.3169545 +10000000,20,0,36,1.9866805520000002,1.669480144,0.317196638 +10000000,25,0,50,2.009008398,1.691617032,0.317387066 +10000000,30,0,58,2.088136254,1.773839525,0.314290289 +10000000,35,0,0,2.031370034,1.71534082,0.316023044 +10000000,40,0,0,1.757403197,1.442509901,0.314888416 +10000000,45,0,82,1.991531164,1.6767288919999999,0.314798342 +10000000,50,0,92,2.184338041,1.870236475,0.314097146 +10000000,55,507904,160,2.522708948,2.02647558,0.496230218 +10000000,60,327680,42,2.400769038,1.9723764799999999,0.428388208 +10000000,65,809498,240,2.779502305,2.469068647,0.310430268 +10000000,70,1146881,140,2.799326677,2.491492964,0.307830123 +10000000,75,1520361,284,2.777745502,2.473290617,0.304447465 +10000000,80,0,142,2.15693655,1.841179342,0.315752518 +10000000,85,0,94,2.348839173,1.9424992030000001,0.4063328 +10000000,90,144658,246,2.47119717,1.992348866,0.478844124 +10000000,95,224,114,2.535739569,2.019762538,0.515971831 +10000000,100,260,300,2.568621078,2.031456477,0.537159701 diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index 47aad3b..3bc6943 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -2,3 +2,11 @@ name = "graphs" version = "0.1.0" edition = "2021" + + +[dependencies] +chrono = "0.4.38" +csv = "1.3.0" +rand = "0.8.4" +tokio = { version = "1.10.0", features = ["full"] } +rpq = { path = "../" } diff --git a/graphs/src/main.rs b/graphs/src/main.rs index 0937da5..09d90b7 100644 --- a/graphs/src/main.rs +++ b/graphs/src/main.rs @@ -1,10 +1,9 @@ -use core::time; use std::fs::File; use std::sync::atomic::Ordering; use std::sync::Arc; -use rpq::pq::Item; -use rpq::{RPQOptions, RPQ}; +use chrono::Duration; +use rpq::{schema::Item, schema::RPQOptions, RPQ}; #[tokio::main(flavor = "multi_thread")] async fn main() { @@ -74,9 +73,8 @@ async fn bench( disk_cache_enabled: disk_cache_enabled, database_path: "/tmp/rpq.redb".to_string(), lazy_disk_cache: lazy_disk_cache, - lazy_disk_write_delay: std::time::Duration::from_secs(5), + lazy_disk_write_delay: Duration::seconds(5), lazy_disk_cache_batch_size: 5000, - buffer_size: 1_000_000, }; let r = RPQ::new(options).await; @@ -100,7 +98,7 @@ async fn bench( }, _ = async { loop { - tokio::time::sleep(time::Duration::from_secs(1)).await; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; let results = rpq_clone.prioritize().await; if !results.is_err() { @@ -121,9 +119,9 @@ async fn bench( i % bucket_count, i, true, - Some(std::time::Duration::from_secs(1)), + Some(Duration::seconds(1)), true, - Some(std::time::Duration::from_secs(2)), + Some(Duration::seconds(2)), ); let result = rpq.enqueue(item).await; if result.is_err() { diff --git a/src/bpq.rs b/src/bpq.rs deleted file mode 100644 index d572b37..0000000 --- a/src/bpq.rs +++ /dev/null @@ -1,35 +0,0 @@ -use std::collections::BTreeSet; -use std::sync::RwLock; - -pub struct BucketPriorityQueue { - bucket_ids: RwLock>, -} - -impl BucketPriorityQueue { - pub fn new() -> BucketPriorityQueue { - BucketPriorityQueue { - bucket_ids: RwLock::new(BTreeSet::new()), - } - } - - pub fn len(&self) -> usize { - self.bucket_ids.read().unwrap().len() - } - - pub fn peek(&self) -> Option { - self.bucket_ids.read().unwrap().first().cloned() - } - - pub fn add_bucket(&self, bucket_id: usize) { - // If the bucket already exists, return - if self.bucket_ids.read().unwrap().contains(&bucket_id) { - return; - } - - self.bucket_ids.write().unwrap().insert(bucket_id); - } - - pub fn remove_bucket(&self, bucket_id: &usize) { - self.bucket_ids.write().unwrap().remove(&bucket_id); - } -} diff --git a/src/disk.rs b/src/disk.rs new file mode 100644 index 0000000..5dffd2b --- /dev/null +++ b/src/disk.rs @@ -0,0 +1,203 @@ +use std::error::Error; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::vec::Vec; + +use redb::{Database, ReadableTableMetadata, TableDefinition}; +use serde::de::DeserializeOwned; +use serde::Serialize; + +use crate::schema; + +pub struct DiskCache { + db: Database, + phantom: std::marker::PhantomData, +} + +const DB: TableDefinition<&str, &[u8]> = TableDefinition::new("rpq"); + +impl DiskCache +where + T: Serialize + DeserializeOwned, +{ + pub fn new(path: &str) -> DiskCache { + let db = Database::create(path).unwrap(); + + // Create the initial table + let ctxn = db.begin_write().unwrap(); + ctxn.open_table(DB).unwrap(); + ctxn.commit().unwrap(); + + DiskCache { + db, + phantom: std::marker::PhantomData, + } + } + + pub fn commit_batch(&self, write_cache: &mut Vec>) -> Result<(), Box> + where + T: Serialize + DeserializeOwned, + { + let write_txn = self.db.begin_write().unwrap(); + for item in write_cache.iter() { + let mut table = write_txn.open_table(DB).unwrap(); + let b = item.to_bytes(); + if b.is_err() { + return Err(Box::::from(IoError::new( + ErrorKind::InvalidInput, + "Error converting item to bytes", + ))); + } + + let b = b.unwrap(); + let key = item.get_disk_uuid().unwrap(); + + let was_written = table.insert(key.as_str(), &b[..]); + if was_written.is_err() { + return Err(Box::::from(IoError::new( + ErrorKind::InvalidInput, + "Error writing item to disk cache", + ))); + } + } + + write_txn.commit().unwrap(); + write_cache.clear(); + Ok(()) + } + + pub fn delete_batch( + &self, + delete_cache: &mut Vec>, + ) -> Result<(), Box> + where + T: Serialize + DeserializeOwned, + { + let write_txn = self.db.begin_write().unwrap(); + for item in delete_cache.iter() { + let mut table = write_txn.open_table(DB).unwrap(); + let key = item.get_disk_uuid().unwrap(); + let was_deleted = table.remove(key.as_str()); + if was_deleted.is_err() { + return Err(Box::::from(IoError::new( + ErrorKind::InvalidInput, + "Error deleting item from disk cache", + ))); + } + } + write_txn.commit().unwrap(); + + delete_cache.clear(); + Ok(()) + } + + pub fn commit_single(&self, item: schema::Item) -> Result<(), Box> + where + T: Serialize + DeserializeOwned, + { + let write_txn = self.db.begin_write().unwrap(); + { + let mut table = write_txn.open_table(DB).unwrap(); + let b = item.to_bytes(); + + if b.is_err() { + return Err(Box::::from(IoError::new( + ErrorKind::InvalidInput, + "Error converting item to bytes", + ))); + } + let b = b.unwrap(); + + let disk_uuid = item.get_disk_uuid(); + if disk_uuid.is_none() { + return Err(Box::::from(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk uuid", + ))); + } + + let was_written = table.insert(disk_uuid.unwrap().as_str(), &b[..]); + if was_written.is_err() { + return Err(Box::::from(IoError::new( + ErrorKind::InvalidInput, + "Error writing item to disk cache", + ))); + } + } + write_txn.commit().unwrap(); + + Ok(()) + } + + pub fn delete_single(&self, key: &str) -> Result<(), Box> + where + T: Serialize + DeserializeOwned, + { + let write_txn = self.db.begin_write().unwrap(); + { + let mut table = write_txn.open_table(DB).unwrap(); + let was_removed = table.remove(key); + if was_removed.is_err() { + return Err(Box::::from(IoError::new( + ErrorKind::InvalidInput, + "Error deleting item from disk cache", + ))); + } + } + write_txn.commit().unwrap(); + + Ok(()) + } + + pub fn return_items_from_disk(&self) -> Result>, Box> + where + T: Serialize + DeserializeOwned, + { + let mut items = Vec::new(); + let read_txn = self.db.begin_read().unwrap(); + let table = read_txn.open_table(DB).unwrap(); + + let cursor = match table.range::<&str>(..) { + Ok(range) => range, + Err(e) => { + return Err(Box::::from(e)); + } + }; + + // Restore the items from the disk cache + for (_i, entry) in cursor.enumerate() { + match entry { + Ok((_key, value)) => { + let item = schema::Item::from_bytes(value.value()); + + if item.is_err() { + println!("Error reading from disk cache: {:?}", item.err()); + return Err(Box::::from(IoError::new( + ErrorKind::InvalidInput, + "Error reading from disk cache", + ))); + } + + // Mark the item as restored + let mut i = item.unwrap(); + i.set_restored(); + items.push(i); + } + Err(e) => { + return Err(Box::::from(e)); + } + } + } + _ = read_txn.close(); + + Ok(items) + } + + /// Returns the number of items in the database + pub fn items_in_db(&self) -> usize { + let read_txn = self.db.begin_read().unwrap(); + let table = read_txn.open_table(DB).unwrap(); + let count = table.len().unwrap(); + count as usize + } +} diff --git a/src/lib.rs b/src/lib.rs index faf5d99..a29405f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,36 +9,70 @@ //! The RPQ should always be created with the new function like so: //! //! ```rust -//! use rpq::{RPQ, RPQOptions}; -//! use std::time; +//! use chrono::Duration; +//! use rpq::{schema::RPQOptions, schema::Item, RPQ}; //! -//! #[tokio::main] +//! #[tokio::main(flavor = "multi_thread")] //! async fn main() { +//! let message_count = 1_000; +//! //! let options = RPQOptions { -//! max_priority: 10, -//! disk_cache_enabled: false, -//! database_path: "/tmp/rpq.db".to_string(), -//! lazy_disk_cache: true, -//! lazy_disk_write_delay: time::Duration::from_secs(5), -//! lazy_disk_cache_batch_size: 10_000, -//! buffer_size: 1_000_000, +//! max_priority: 10, +//! disk_cache_enabled: true, +//! database_path: "/tmp/rpq-prioritize.redb".to_string(), +//! lazy_disk_cache: true, +//! lazy_disk_write_delay: Duration::seconds(5), +//! lazy_disk_cache_batch_size: 10_000, //! }; //! -//! let r = RPQ::::new(options).await; -//! if r.is_err() { -//! // handle logic -//! } +//! let r = RPQ::new(options).await; +//! match r { +//! Ok(_) => {} +//! Err(e) => { +//! println!("Error Creating RPQ: {}", e); +//! return; +//! } +//! } +//! +//! let (rpq, _restored_items) = r.unwrap(); +//! +//! for i in 0..message_count { +//! let item = Item::new( +//! i % 10, +//! i, +//! false, +//! None, +//! false, +//! None, +//! ); +//! +//! let result = rpq.enqueue(item).await; +//! if result.is_err() { +//! println!("Error Enqueuing: {}", result.err().unwrap()); +//! return; +//! } +//! } +//! +//! for _i in 0..message_count { +//! let result = rpq.dequeue().await; +//! if result.is_err() { +//! println!("Error Dequeuing: {}", result.err().unwrap()); +//! return; +//! } +//! } +//! +//! rpq.close().await; //! } //! ``` //! //! # Architecture Notes -//! In many ways, RPQ slighty compromises the performance of a traditional priority queue in order to provide +//! In many ways, RPQ slightly compromises the performance of a traditional priority queue in order to provide //! a variety of features that are useful when absorbing distributed load from many down or upstream services. -//! It employs a fairly novel techinique that allows it to lazily write and delete items from a disk cache while +//! It employs a fairly novel technique that allows it to lazily write and delete items from a disk cache while //! still maintaining data in memory. This basically means that a object can be added to the queue and then removed -//! without the disk commit ever blocking the processes sending or reciving the data. In the case that a batch of data +//! without the disk commit ever blocking the processes sending or receiving the data. In the case that a batch of data //! has already been removed from the queue before it is written to disk, the data is simply discarded. This -//! dramaically reduces the amount of time spent doing disk commits and allows for much better performance in the +//! dramatically reduces the amount of time spent doing disk commits and allows for much better performance in the //! case that you need disk caching and still want to maintain a high peak throughput. //! //! ```text @@ -78,93 +112,60 @@ //! │ Item │ //! └──────┘ //! ``` -use core::time; use std::collections::HashMap; use std::error::Error; use std::io::Error as IoError; use std::io::ErrorKind; use std::result::Result; -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; -use redb::{Database, ReadableTableMetadata, TableDefinition}; use serde::de::DeserializeOwned; use serde::Serialize; -use tokio::sync::mpsc::{channel, Receiver, Sender}; +use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; use tokio::sync::watch; use tokio::sync::Mutex; use tokio::task::JoinHandle; use tokio::time::interval; -mod bpq; +mod disk; pub mod pq; +pub mod schema; -const DB: TableDefinition<&str, &[u8]> = TableDefinition::new("rpq"); - -/// RPQ hold private items and configuration for the RPQ. -/// You don't need to interact with the items in this struct directly, -/// but instead via the implementations attched to the RPQ struct. -pub struct RPQ { +/// RPQ holds private items and configuration for the RPQ. +pub struct RPQ { // options is the configuration for the RPQ - options: RPQOptions, - // non_empty_buckets is a binary heap of priorities - non_empty_buckets: bpq::BucketPriorityQueue, - // buckets is a map of priorities to a binary heap of items - buckets: Arc>>, - - // items_in_queues is the number of items across all queues - items_in_queues: AtomicUsize, + options: schema::RPQOptions, + + // queue is the main queue that holds the items + queue: Mutex>, + // disk_cache maintains a cache of items that are in the queue - disk_cache: Option>, + disk_cache: Arc>>, + // lazy_disk_channel is the channel for lazy disk writes - lazy_disk_writer_sender: Arc>>, + lazy_disk_writer_sender: UnboundedSender>, // lazy_disk_reader is the receiver for lazy disk writes - lazy_disk_writer_receiver: Mutex>>, + lazy_disk_writer_receiver: Mutex>>, // lazy_disk_delete_sender is the sender for lazy disk deletes - lazy_disk_delete_sender: Arc>>, + lazy_disk_delete_sender: Arc>>, // lazy_disk_delete_receiver is the receiver for lazy disk deletes - lazy_disk_delete_receiver: Mutex>>, - - // batch_handler is the handler for batches - batch_handler: Mutex, - // batch_counter is the counter for batches - batch_counter: Mutex, - // batch_shutdown_receiver is the receiver for the shutdown signal - batch_shutdown_receiver: watch::Receiver, - // batch_shutdown_sender is the sender for the shutdown signal - batch_shutdown_sender: watch::Sender, - - // shutdown_receiver is the receiver for the shutdown signal - shutdown_receiver: watch::Receiver, - // shutdown_sender is the sender for the shutdown signal - shutdown_sender: watch::Sender, - // sync_handles is a map of priorities to sync handles - sync_handles: Mutex>>, -} - -/// RPQOptions is the configuration for the RPQ -pub struct RPQOptions { - /// Holds the number of priorities(buckets) that this RPQ will accept for this queue. - pub max_priority: usize, - /// Enables or disables the disk cache using redb as the backend to store items - pub disk_cache_enabled: bool, - /// Holds the path to where the disk cache database will be persisted - pub database_path: String, - /// Enables or disables lazy disk writes and deletes. The speed can be quite variable depending - /// on the disk itself and how often you are emptying the queue in combination with the write delay - pub lazy_disk_cache: bool, - /// Sets the delay between lazy disk writes. This delays items from being commited to the disk cache. - /// If you are pulling items off the queue faster than this delay, many times can be skip the write to disk, - /// massively increasing the throughput of the queue. - pub lazy_disk_write_delay: time::Duration, - /// Sets the number of items that will be written to the disk cache in a single batch. This can be used to - /// tune the performance of the disk cache depending on your specific workload. - pub lazy_disk_cache_batch_size: usize, - /// Sets the size of the channnel that is used to buffer items before they are written to the disk cache. - /// This can block your queue if the thread pulling items off the channel becomes fully saturated. Typically you - /// should set this value in proportion to your largest write peaks. I.E. if your peak write is 10,000,000 items per second, - /// and your average write is 1,000,000 items per second, you should set this value to 20,000,000 to ensure that no blocking occurs. - pub buffer_size: usize, + lazy_disk_delete_receiver: Mutex>>, + + // lazy_disk_sync_handles is a map of priorities to sync handles + lazy_disk_sync_handles: Mutex>>, + // lazy_disk_batch_handler is the handler for batches + lazy_disk_batch_handler: Mutex, + // lazy_disk_batch_counter is the counter for batches + lazy_disk_batch_counter: Mutex, + + // lazy_disk_batch_shutdown_receiver is the receiver for the shutdown signal + lazy_disk_batch_shutdown_receiver: watch::Receiver, + // lazy_disk_batch_shutdown_sender is the sender for the shutdown signal + lazy_disk_batch_shutdown_sender: watch::Sender, + // lazy_disk_shutdown_receiver is the receiver for the shutdown signal + lazy_disk_shutdown_receiver: watch::Receiver, + // lazy_disk_shutdown_sender is the sender for the shutdown signal + lazy_disk_shutdown_sender: watch::Sender, } struct BatchHandler { @@ -181,15 +182,13 @@ struct BatchCounter { batch_number: usize, } -impl RPQ +impl RPQ where T: Serialize + DeserializeOwned + 'static, { /// Creates a new RPQ with the given options and returns the RPQ and the number of items restored from the disk cache - pub async fn new(options: RPQOptions) -> Result<(Arc>, usize), Box> { + pub async fn new(options: schema::RPQOptions) -> Result<(Arc>, usize), Box> { // Create base structures - let mut buckets = HashMap::new(); - let items_in_queues = AtomicUsize::new(0); let sync_handles = Vec::new(); let (shutdown_sender, shutdown_receiver) = watch::channel(false); let (batch_shutdown_sender, batch_shutdown_receiver) = watch::channel(false); @@ -203,107 +202,69 @@ where }; // Create the lazy disk sync channel - let (lazy_disk_writer_sender, lazy_disk_writer_receiver) = - channel(options.buffer_size as usize); - let (lazy_disk_delete_sender, lazy_disk_delete_receiver) = - channel(options.buffer_size as usize); + let (lazy_disk_writer_sender, lazy_disk_writer_receiver) = unbounded_channel(); + let (lazy_disk_delete_sender, lazy_disk_delete_receiver) = unbounded_channel(); + let lazy_disk_delete_sender = Arc::new(lazy_disk_delete_sender); + let lazy_disk_delete_sender_clone = Arc::clone(&lazy_disk_delete_sender); // Capture some variables let path = options.database_path.clone(); let disk_cache_enabled = options.disk_cache_enabled; let lazy_disk_cache = options.lazy_disk_cache; + let max_priority = options.max_priority; - // Create the buckets - for i in 0..options.max_priority { - buckets.insert(i, pq::PriorityQueue::new()); - } - - let disk_cache: Option>; - if disk_cache_enabled { - let db = Database::create(&path).unwrap(); - let db = Arc::new(db); - disk_cache = Some(db); + let disk_cache = if disk_cache_enabled { + Arc::new(Some(disk::DiskCache::new(&path))) } else { - disk_cache = None; - } + Arc::new(None) + }; + + let disk_cache_clone_one = Arc::clone(&disk_cache); + let disk_cache_clone_two = Arc::clone(&disk_cache); // Create the RPQ let rpq = RPQ { options, - non_empty_buckets: bpq::BucketPriorityQueue::new(), - buckets: Arc::new(buckets), - items_in_queues, - disk_cache, - lazy_disk_writer_sender: Arc::new(lazy_disk_writer_sender), + queue: Mutex::new(pq::PriorityQueue::new( + max_priority, + disk_cache_enabled, + lazy_disk_cache, + lazy_disk_delete_sender_clone, + disk_cache_clone_one, + )), + + disk_cache: disk_cache_clone_two, + + lazy_disk_sync_handles: Mutex::new(sync_handles), + lazy_disk_writer_sender: lazy_disk_writer_sender, lazy_disk_writer_receiver: Mutex::new(lazy_disk_writer_receiver), - lazy_disk_delete_sender: Arc::new(lazy_disk_delete_sender), + lazy_disk_delete_sender: lazy_disk_delete_sender, lazy_disk_delete_receiver: Mutex::new(lazy_disk_delete_receiver), - sync_handles: Mutex::new(sync_handles), - shutdown_receiver, - shutdown_sender, - batch_handler: Mutex::new(batch_handler), - batch_shutdown_sender: batch_shutdown_sender, - batch_shutdown_receiver: batch_shutdown_receiver, - batch_counter: Mutex::new(batch_counter), + lazy_disk_shutdown_receiver: shutdown_receiver, + lazy_disk_shutdown_sender: shutdown_sender, + lazy_disk_batch_handler: Mutex::new(batch_handler), + lazy_disk_batch_counter: Mutex::new(batch_counter), + lazy_disk_batch_shutdown_sender: batch_shutdown_sender, + lazy_disk_batch_shutdown_receiver: batch_shutdown_receiver, }; let rpq = Arc::new(rpq); // Restore the items from the disk cache let mut restored_items: usize = 0; if disk_cache_enabled { - // Create a the initial table - let ctxn = rpq.disk_cache.as_ref().unwrap().begin_write().unwrap(); - ctxn.open_table(DB).unwrap(); - ctxn.commit().unwrap(); - - let read_txn = rpq.disk_cache.as_ref().unwrap().begin_read().unwrap(); - let table = read_txn.open_table(DB).unwrap(); - - let cursor = match table.range::<&str>(..) { - Ok(range) => range, - Err(e) => { - return Err(Box::::from(e)); - } - }; - - for (_i, entry) in cursor.enumerate() { - match entry { - Ok((_key, value)) => { - let item = pq::Item::from_bytes(value.value()); - - if item.is_err() { - return Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "Error reading from disk cache", - ))); - } - - // Mark the item as restored - let mut i = item.unwrap(); - i.set_restored(); - let result = rpq.enqueue(i).await; - if result.is_err() { - return Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "Error enqueueing item from the disk cache", - ))); - } - restored_items += 1; - } - Err(e) => { - return Err(Box::::from(e)); - } - } + let result = rpq.restore_from_disk().await; + if result.is_err() { + return Err(Box::::from(result.err().unwrap())); } - _ = read_txn.close(); + restored_items = result?; if lazy_disk_cache { - let mut handles = rpq.sync_handles.lock().await; + let mut handles = rpq.lazy_disk_sync_handles.lock().await; let rpq_clone = Arc::clone(&rpq); handles.push(tokio::spawn(async move { let result = rpq_clone.lazy_disk_writer().await; if result.is_err() { - println!("Error in lazy disk writer: {:?}", result.err().unwrap()); + panic!("Error in lazy disk writer: {:?}", result.err().unwrap()); } })); @@ -311,7 +272,7 @@ where handles.push(tokio::spawn(async move { let result = rpq_clone.lazy_disk_deleter().await; if result.is_err() { - println!("Error in lazy disk deleter: {:?}", result.err().unwrap()); + panic!("Error in lazy disk deleter: {:?}", result.err().unwrap()); } })); } @@ -320,30 +281,11 @@ where } /// Adds an item to the RPQ and returns an error if one occurs otherwise it returns () - pub async fn enqueue(&self, mut item: pq::Item) -> Result<(), Box> { - // Check if the item priority is greater than the bucket count - if item.priority >= self.options.max_priority { - return Result::Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "Priority is greater than bucket count", - ))); - } - let priority = item.priority; - - // Get the bucket and enqueue the item - let bucket = self.buckets.get(&item.priority); - - if bucket.is_none() { - return Result::Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "Bucket does not exist", - ))); - } - + pub async fn enqueue(&self, mut item: schema::Item) -> Result<(), Box> { // If the disk cache is enabled, send the item to the lazy disk writer if self.options.disk_cache_enabled { // Increment the batch number - let mut batch_counter = self.batch_counter.lock().await; + let mut batch_counter = self.lazy_disk_batch_counter.lock().await; batch_counter.message_counter += 1; if batch_counter.message_counter % self.options.lazy_disk_cache_batch_size == 0 { batch_counter.batch_number += 1; @@ -356,7 +298,7 @@ where item.set_disk_uuid(); if self.options.lazy_disk_cache { let lazy_disk_writer_sender = &self.lazy_disk_writer_sender; - let was_sent = lazy_disk_writer_sender.send(item.clone()).await; + let was_sent = lazy_disk_writer_sender.send(item.clone()); match was_sent { Ok(_) => {} Err(e) => { @@ -364,65 +306,104 @@ where } } } else { - let result = self.commit_single(item.clone()); - match result { - Ok(_) => {} - Err(e) => { - return Result::Err(e); + let db = self.disk_cache.as_ref(); + match db { + None => { + return Err(Box::::from(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk cache", + ))); + } + Some(db) => { + let result = db.commit_single(item.clone()); + match result { + Ok(_) => {} + Err(e) => { + return Err(Box::::from(e)); + } + } } } } } } - // Enqueue the item and update - bucket.unwrap().enqueue(item); - self.non_empty_buckets.add_bucket(priority); + // Enqueue the item + self.queue.lock().await.enqueue(item); Ok(()) } - /// Returns a Result with the next item in the RPQ or an error if one occurs - pub async fn dequeue(&self) -> Result>, Box> { - // Fetch the bucket - let bucket_id = self.non_empty_buckets.peek(); - if bucket_id.is_none() { - return Result::Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "No items in queue", - ))); - } - let bucket_id = bucket_id.unwrap(); - - // Fetch the queue - let queue = self.buckets.get(&bucket_id); - if queue.is_none() { - return Result::Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "No items in queue", - ))); + /// Adds a batch of items to the RPQ and returns an error if one occurs otherwise it returns () + pub async fn enqueue_batch( + &self, + mut items: Vec>, + ) -> Result<(), Box> { + let mut queue = self.queue.lock().await; + for item in items.iter_mut() { + // If the disk cache is enabled, send the item to the lazy disk writer + if self.options.disk_cache_enabled { + // Increment the batch number + let mut batch_counter = self.lazy_disk_batch_counter.lock().await; + batch_counter.message_counter += 1; + if batch_counter.message_counter % self.options.lazy_disk_cache_batch_size == 0 { + batch_counter.batch_number += 1; + } + let bn = batch_counter.batch_number; + drop(batch_counter); + + item.set_batch_id(bn); + if !item.was_restored() { + item.set_disk_uuid(); + if self.options.lazy_disk_cache { + let lazy_disk_writer_sender = &self.lazy_disk_writer_sender; + let was_sent = lazy_disk_writer_sender.send(item.clone()); + match was_sent { + Ok(_) => {} + Err(e) => { + return Err(Box::::from(e)); + } + } + } else { + let db = self.disk_cache.as_ref(); + match db { + None => { + return Err(Box::::from(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk cache", + ))); + } + Some(db) => { + let result = db.commit_single(item.clone()); + match result { + Ok(_) => {} + Err(e) => { + return Err(Box::::from(e)); + } + } + } + } + } + } + } + + // Enqueue the item + queue.enqueue(item.clone()); } + Ok(()) + } - // Fetch the item from the bucket - let item = queue.unwrap().dequeue(); + /// Returns a Result with the next item in the RPQ or an error if one occurs + pub async fn dequeue(&self) -> Result>, Box> { + let item = self.queue.lock().await.dequeue(); if item.is_none() { - return Result::Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "No items in queue", - ))); + return Ok(None); } - self.items_in_queues.fetch_sub(1, Ordering::SeqCst); let item = item.unwrap(); - // If the bucket is empty, remove it from the non_empty_buckets - if queue.unwrap().len() == 0 { - self.non_empty_buckets.remove_bucket(&bucket_id); - } - if self.options.disk_cache_enabled { - let item_clone = item.clone(); if self.options.lazy_disk_cache { let lazy_disk_delete_sender = &self.lazy_disk_delete_sender; - let was_sent = lazy_disk_delete_sender.send(item_clone).await; + let was_sent = lazy_disk_delete_sender.send(item.clone()); match was_sent { Ok(_) => {} Err(e) => { @@ -430,9 +411,30 @@ where } } } else { - let result = self.delete_single(item_clone.get_disk_uuid().unwrap().as_ref()); - if result.is_err() { - return Result::Err(result.err().unwrap()); + let id = match item.get_disk_uuid() { + Some(id) => id, + None => { + return Result::Err(Box::new(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk uuid", + ))); + } + }; + + let db = self.disk_cache.as_ref(); + match db { + None => { + return Result::Err(Box::new(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk cache", + ))); + } + Some(db) => { + let result = db.delete_single(&id); + if result.is_err() { + return Result::Err(result.err().unwrap()); + } + } } } } @@ -440,50 +442,151 @@ where Ok(Some(item)) } - /// Prioritize reorders the items in each bucket based on the values spesified in the item. - /// It returns a tuple with the number of items removed and the number of items escalated or and error if one occurs. - pub async fn prioritize(&self) -> Result<(usize, usize), Box> { - let mut removed: usize = 0; - let mut escalated: usize = 0; - - for (_, active_bucket) in self.buckets.iter() { - match active_bucket.prioritize() { - Ok((r, e)) => { - removed += r; - escalated += e; - } - Err(err) => { - return Err(Box::::from(err)); + /// Returns a Result with a batch of items in the RPQ or an error if one occurs + pub async fn dequeue_batch( + &self, + count: usize, + ) -> Result>>, Box> { + let mut items = Vec::new(); + let mut queue = self.queue.lock().await; + for _ in 0..count { + let item = queue.dequeue(); + if item.is_none() { + break; + } + let item = item.unwrap(); + + if self.options.disk_cache_enabled { + if self.options.lazy_disk_cache { + let lazy_disk_delete_sender = &self.lazy_disk_delete_sender; + let was_sent = lazy_disk_delete_sender.send(item.clone()); + match was_sent { + Ok(_) => {} + Err(e) => { + return Result::Err(Box::new(e)); + } + } + } else { + let id = match item.get_disk_uuid() { + Some(id) => id, + None => { + return Result::Err(Box::new(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk uuid", + ))); + } + }; + + let db = self.disk_cache.as_ref(); + match db { + None => { + return Result::Err(Box::new(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk cache", + ))); + } + Some(db) => { + let result = db.delete_single(&id); + if result.is_err() { + return Result::Err(result.err().unwrap()); + } + } + } } } + items.push(item); + } + if items.is_empty() { + return Ok(None); + } + Ok(Some(items)) + } + + /// Prioritize reorders the items in each bucket based on the values specified in the item. + /// It returns a tuple with the number of items removed and the number of items escalated or and error if one occurs. + pub async fn prioritize(&self) -> Result<(usize, usize), Box> { + self.queue.lock().await.prioritize() + } + + /// Returns the number of items in the RPQ across all buckets + pub async fn len(&self) -> usize { + self.queue.lock().await.items_in_queue() + } + + /// Returns the number of active buckets in the RPQ (buckets with items) + pub async fn active_buckets(&self) -> usize { + self.queue.lock().await.active_buckets() + } + + /// Returns the number of pending batches in the RPQ for both the writer or the deleter + pub async fn unsynced_batches(&self) -> usize { + let batch_handler = self.lazy_disk_batch_handler.lock().await; + batch_handler + .synced_batches + .iter() + .chain(batch_handler.deleted_batches.iter()) + .filter(|&(_, synced_or_deleted)| !*synced_or_deleted) + .count() + } + + /// Returns the number of items in the disk cache which can be helpful for debugging or monitoring + pub fn items_in_db(&self) -> usize { + if self.disk_cache.is_none() { + return 0; + } + + let db = self.disk_cache.as_ref(); + match db { + None => 0, + Some(db) => db.items_in_db(), + } + } + + /// Closes the RPQ and waits for all the async tasks to finish + pub async fn close(&self) { + self.lazy_disk_shutdown_sender.send(true).unwrap(); + + let mut handles = self.lazy_disk_sync_handles.lock().await; + while let Some(handle) = handles.pop() { + handle.await.unwrap(); } - self.items_in_queues.fetch_sub(removed, Ordering::SeqCst); - Ok((removed, escalated)) } async fn lazy_disk_writer(&self) -> Result<(), Box> { - let mut awaiting_batches = HashMap::>>::new(); - let mut ticker = interval(self.options.lazy_disk_write_delay); + let mut awaiting_batches = HashMap::>>::new(); + let mut ticker = interval(self.options.lazy_disk_write_delay.to_std().unwrap()); let mut receiver = self.lazy_disk_writer_receiver.lock().await; - let mut shutdown_receiver = self.shutdown_receiver.clone(); + let mut shutdown_receiver = self.lazy_disk_shutdown_receiver.clone(); loop { // Check if the write cache is full or the ticker has ticked tokio::select! { // Flush the cache if the ticker has ticked _ = ticker.tick() => { - let mut batch_handler = self.batch_handler.lock().await; + let mut batch_handler = self.lazy_disk_batch_handler.lock().await; for (id, batch) in awaiting_batches.iter_mut() { if batch.len() >= self.options.lazy_disk_cache_batch_size { if *batch_handler.deleted_batches.get(id).unwrap_or(&false) { batch.clear(); } else { - let result = self.commit_batch(batch); - if result.is_err() { - return Err(Box::::from(result.err().unwrap())); + let db = self.disk_cache.as_ref(); + match db { + None => { + return Result::Err(Box::new(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk cache", + ))); + } + Some(db) => { + let result = db.commit_batch(batch); + if result.is_err() { + return Err(Box::::from(result.err().unwrap())); + } + } } } + batch_handler.synced_batches.insert(*id, true); batch_handler.deleted_batches.insert(*id, false); } @@ -512,7 +615,7 @@ where // Commit the remaining batches for (id, batch) in awaiting_batches.iter_mut() { - let mut batch_handler = self.batch_handler.lock().await; + let mut batch_handler = self.lazy_disk_batch_handler.lock().await; if *batch_handler.deleted_batches.get(id).unwrap_or(&false) { batch.clear(); @@ -521,12 +624,24 @@ where batch_handler.synced_batches.insert(*id, true); batch_handler.deleted_batches.insert(*id, false); - let result = self.commit_batch(batch); - if result.is_err() { - return Err(Box::::from(result.err().unwrap())); + + let db = self.disk_cache.as_ref(); + match db { + None => { + return Result::Err(Box::new(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk cache", + ))); + } + Some(db) => { + let result = db.commit_batch(batch); + if result.is_err() { + return Err(Box::::from(result.err().unwrap())); + } + } } } - self.batch_shutdown_sender.send(true).unwrap(); + self.lazy_disk_batch_shutdown_sender.send(true).unwrap(); break Ok(()); } @@ -535,13 +650,12 @@ where } async fn lazy_disk_deleter(&self) -> Result<(), Box> { - let mut awaiting_batches = HashMap::>>::new(); - let mut restored_items: Vec> = Vec::new(); + let mut awaiting_batches = HashMap::>>::new(); + let mut restored_items: Vec> = Vec::new(); let mut receiver = self.lazy_disk_delete_receiver.lock().await; - let mut shutdown_receiver = self.batch_shutdown_receiver.clone(); + let mut shutdown_receiver = self.lazy_disk_batch_shutdown_receiver.clone(); loop { - // Check if the write cache is full or the ticker has ticked tokio::select! { item = receiver.recv() => { // Check if the item was restored @@ -550,29 +664,51 @@ where restored_items.push(item); if restored_items.len() >= self.options.lazy_disk_cache_batch_size { - let result = self.delete_batch(&mut restored_items); - if result.is_err() { - return Err(Box::::from(result.err().unwrap())); + let db = self.disk_cache.as_ref(); + match db { + None => { + return Result::Err(Box::new(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk cache", + ))); + } + Some(db) => { + let result = db.delete_batch(&mut restored_items); + if result.is_err() { + return Err(Box::::from(result.err().unwrap())); + } + } } - restored_items.clear(); } continue; } // If the item was not restored, add it to the batch let batch_bucket = item.get_batch_id(); - let batch = awaiting_batches.entry(batch_bucket).or_insert(Vec::new()); + let mut batch = awaiting_batches.entry(batch_bucket).or_insert(Vec::new()); batch.push(item); // Check if the batch is full if batch.len() >= self.options.lazy_disk_cache_batch_size { - let mut batch_handler = self.batch_handler.lock().await; + let mut batch_handler = self.lazy_disk_batch_handler.lock().await; let was_synced = batch_handler.synced_batches.get(&batch_bucket).unwrap_or(&false); if *was_synced { - let result = self.delete_batch(batch); - if result.is_err() { - return Err(Box::::from(result.err().unwrap())); + let db = self.disk_cache.as_ref(); + match db { + None => { + return Result::Err(Box::new(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk cache", + ))); + } + Some(db) => { + let result = db.delete_batch(&mut batch); + if result.is_err() { + return Err(Box::::from(result.err().unwrap())); + } + } } + awaiting_batches.remove(&batch_bucket); } else { batch.clear(); @@ -604,20 +740,41 @@ where // Commit the remaining batches if !restored_items.is_empty() { - let result = self.delete_batch(&mut restored_items); - if result.is_err() { - return Err(Box::::from(result.err().unwrap())); - } - restored_items.clear(); + let db = self.disk_cache.as_ref(); + match db { + None => { + return Result::Err(Box::new(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk cache", + ))); + } + Some(db) => { + let result = db.delete_batch(&mut restored_items); + if result.is_err() { + return Err(Box::::from(result.err().unwrap())); + } + } + } } - for (id, batch) in awaiting_batches.iter_mut() { - let mut batch_handler = self.batch_handler.lock().await; + for (id, mut batch) in awaiting_batches.iter_mut() { + let mut batch_handler = self.lazy_disk_batch_handler.lock().await; let was_synced = batch_handler.synced_batches.get(id).unwrap_or(&false); if *was_synced { - let result = self.delete_batch(batch); - if result.is_err() { - return Err(Box::::from(result.err().unwrap())); - } + let db = self.disk_cache.as_ref(); + match db { + None => { + return Result::Err(Box::new(IoError::new( + ErrorKind::InvalidInput, + "Error getting disk cache", + ))); + } + Some(db) => { + let result = db.delete_batch(&mut batch); + if result.is_err() { + return Err(Box::::from(result.err().unwrap())); + } + } + } } else { batch.clear(); } @@ -631,218 +788,231 @@ where } } - fn commit_batch(&self, write_cache: &mut Vec>) -> Result<(), Box> { - let write_txn = self.disk_cache.as_ref().unwrap().begin_write().unwrap(); - for item in write_cache.iter() { - let mut table = write_txn.open_table(DB).unwrap(); - // Convert to bytes - let b = item.to_bytes(); - if b.is_err() { + async fn restore_from_disk(&self) -> Result> { + let db = self.disk_cache.as_ref(); + match db { + None => { return Err(Box::::from(IoError::new( ErrorKind::InvalidInput, - "Error converting item to bytes", + "Error getting disk cache", ))); } + Some(db) => { + let restored_items = db.return_items_from_disk(); + if restored_items.is_err() { + return Err(Box::::from(restored_items.err().unwrap())); + } + let restored_items = restored_items?; + let total_items = restored_items.len(); - let b = b.unwrap(); - let key = item.get_disk_uuid().unwrap(); + let mut queue = self.queue.lock().await; + for item in restored_items.iter() { + queue.enqueue(item.clone()); + } - let was_written = table.insert(key.as_str(), &b[..]); - if was_written.is_err() { - return Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "Error writing item to disk cache", - ))); + Ok(total_items) } } - - write_txn.commit().unwrap(); - write_cache.clear(); - Ok(()) } +} - fn delete_batch(&self, delete_cache: &mut Vec>) -> Result<(), Box> { - let write_txn = self.disk_cache.as_ref().unwrap().begin_write().unwrap(); - for item in delete_cache.iter() { - let mut table = write_txn.open_table(DB).unwrap(); - let key = item.get_disk_uuid().unwrap(); - let was_deleted = table.remove(key.as_str()); - if was_deleted.is_err() { - return Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "Error deleting item from disk cache", - ))); - } - } +#[cfg(test)] +mod tests { + use super::*; + use chrono::Duration; + use rand::Rng; + use std::sync::atomic::Ordering; + use std::{ + collections::VecDeque, + error::Error, + sync::atomic::{AtomicBool, AtomicUsize}, + }; - write_txn.commit().unwrap(); - delete_cache.clear(); - Ok(()) - } + #[tokio::test(flavor = "multi_thread")] + async fn order_test() { + let message_count = 1_000_000; - fn commit_single(&self, item: pq::Item) -> Result<(), Box> { - let write_txn = self.disk_cache.as_ref().unwrap().begin_write().unwrap(); - { - let mut table = write_txn.open_table(DB).unwrap(); - let b = item.to_bytes(); + let options = schema::RPQOptions { + max_priority: 10, + disk_cache_enabled: false, + database_path: "/tmp/rpq-order.redb".to_string(), + lazy_disk_cache: false, + lazy_disk_write_delay: Duration::seconds(5), + lazy_disk_cache_batch_size: 5_000, + }; - if b.is_err() { - return Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "Error converting item to bytes", - ))); - } + let r: Result<(Arc>, usize), Box> = RPQ::new(options).await; + assert!(r.is_ok()); + let (rpq, _restored_items) = r.unwrap(); - let key = item.get_disk_uuid().unwrap(); - let b = b.unwrap(); + let mut expected_data = HashMap::new(); + for i in 0..message_count { + let item = schema::Item::new(i % 10, i, false, None, false, Some(Duration::seconds(5))); + let result = rpq.enqueue(item).await; + assert!(result.is_ok()); + let v = expected_data.entry(i % 10).or_insert(VecDeque::new()); + v.push_back(i); + } - let was_written = table.insert(key.as_str(), &b[..]); - if was_written.is_err() { - return Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "Error writing item to disk cache", - ))); - } + for _i in 0..message_count { + let item = rpq.dequeue().await; + assert!(item.is_ok()); + let item = item.unwrap().unwrap(); + let v = expected_data.get_mut(&item.priority).unwrap(); + let expected_data = v.pop_front().unwrap(); + assert!(item.data == expected_data); } - write_txn.commit().unwrap(); - Ok(()) + rpq.close().await; + assert_eq!(rpq.len().await, 0); } - fn delete_single(&self, key: &str) -> Result<(), Box> { - let write_txn = self.disk_cache.as_ref().unwrap().begin_write().unwrap(); - { - let mut table = write_txn.open_table(DB).unwrap(); - let was_written = table.remove(key); - if was_written.is_err() { - return Err(Box::::from(IoError::new( - ErrorKind::InvalidInput, - "Error deleting item from disk cache", - ))); - } - } + #[tokio::test(flavor = "multi_thread")] + async fn prioritize_test() { + let message_count = 1_000_000; + let sent_counter = Arc::new(AtomicUsize::new(0)); + let received_counter = Arc::new(AtomicUsize::new(0)); + let removed_counter = Arc::new(AtomicUsize::new(0)); + let total_escalated = Arc::new(AtomicUsize::new(0)); - write_txn.commit().unwrap(); - Ok(()) - } + let options = schema::RPQOptions { + max_priority: 10, + disk_cache_enabled: true, + database_path: "/tmp/rpq-prioritize.redb".to_string(), + lazy_disk_cache: true, + lazy_disk_write_delay: Duration::seconds(5), + lazy_disk_cache_batch_size: 5_000, + }; - /// Returns the number of items in the RPQ across all buckets - pub async fn len(&self) -> usize { - let mut len = 0 as usize; - for (_, active_bucket) in self.buckets.iter() { - len += active_bucket.len(); - } - len - } + let r: Result<(Arc>, usize), Box> = RPQ::new(options).await; + assert!(r.is_ok()); + let (rpq, _restored_items) = r.unwrap(); - /// Returns the number of active buckets in the RPQ (buckets with items) - pub fn active_buckets(&self) -> usize { - self.non_empty_buckets.len() - } + let rpq_clone = Arc::clone(&rpq); + let (shutdown_sender, mut shutdown_receiver) = watch::channel(false); + let removed_clone = Arc::clone(&removed_counter); + let escalated_clone = Arc::clone(&total_escalated); + tokio::spawn(async move { + tokio::select! { + _ = shutdown_receiver.changed() => { + return; + }, + _ = async { + loop { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + let results = rpq_clone.prioritize().await; - /// Returns the number of pending batches in the RPQ for both the writer or the deleter - pub async fn unsynced_batches(&self) -> usize { - let mut unsynced_batches = 0; - let batch_handler = self.batch_handler.lock().await; - for (_, synced) in batch_handler.synced_batches.iter() { - if !*synced { - unsynced_batches += 1; - } - } - for (_, deleted) in batch_handler.deleted_batches.iter() { - if !*deleted { - unsynced_batches += 1; + if results.is_ok() { + let (removed, escalated) = results.unwrap(); + removed_clone.fetch_add(removed, Ordering::SeqCst); + escalated_clone.fetch_add(escalated, Ordering::SeqCst); + } else { + println!("Error: {:?}", results.err().unwrap()); + } + } + } => {} } - } - unsynced_batches - } + }); - /// Returns the number of items in the disk cache which can be helpful for debugging or monitoring - pub fn items_in_db(&self) -> usize { - if self.disk_cache.is_none() { - return 0; + for i in 0..message_count { + let item = schema::Item::new( + i % 10, + i, + true, + Some(Duration::seconds(rand::thread_rng().gen_range(1..10))), + true, + Some(Duration::seconds(10)), + ); + let result = rpq.enqueue(item).await; + assert!(result.is_ok()); + sent_counter.fetch_add(1, Ordering::SeqCst); } - let read_txn = self.disk_cache.as_ref().unwrap().begin_read().unwrap(); - let table = read_txn.open_table(DB).unwrap(); - let count = table.len().unwrap(); - count as usize - } - /// Closes the RPQ and waits for all the async tasks to finish - pub async fn close(&self) { - self.shutdown_sender.send(true).unwrap(); + loop { + if removed_counter.load(Ordering::SeqCst) + received_counter.load(Ordering::SeqCst) + == sent_counter.load(Ordering::SeqCst) + { + break; + } - let mut handles = self.sync_handles.lock().await; - while let Some(handle) = handles.pop() { - handle.await.unwrap(); + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + let item = rpq.dequeue().await; + assert!(item.is_ok()); + let item = item.unwrap(); + if item.is_none() { + continue; + } + received_counter.fetch_add(1, Ordering::SeqCst); } - } -} -#[cfg(test)] -mod tests { - use super::*; - use core::time; - use std::{ - collections::VecDeque, - error::Error, - sync::atomic::{AtomicBool, AtomicUsize}, - }; + shutdown_sender.send(true).unwrap(); + rpq.close().await; + assert_eq!(rpq.len().await, 0); + assert_eq!(rpq.items_in_db(), 0); + println!( + "Sent: {}, Received: {}, Removed: {}, Escalated: {}", + sent_counter.load(Ordering::SeqCst), + received_counter.load(Ordering::SeqCst), + removed_counter.load(Ordering::SeqCst), + total_escalated.load(Ordering::SeqCst) + ); + } #[tokio::test(flavor = "multi_thread")] - async fn order_test() { - let message_count = 1_000_000; + async fn disk_write_test() { + let message_count = 500_000; - let options = RPQOptions { + let options = schema::RPQOptions { max_priority: 10, - disk_cache_enabled: false, - database_path: "/tmp/rpq.redb".to_string(), - lazy_disk_cache: false, - lazy_disk_write_delay: time::Duration::from_secs(5), - lazy_disk_cache_batch_size: 5000, - buffer_size: 1_000_000, + disk_cache_enabled: true, + database_path: "/tmp/rpq-write.redb".to_string(), + lazy_disk_cache: true, + lazy_disk_write_delay: Duration::seconds(1), + lazy_disk_cache_batch_size: 5_000, }; let r: Result<(Arc>, usize), Box> = RPQ::new(options).await; - if r.is_err() { - panic!("Error creating RPQ"); - } + assert!(r.is_ok()); let (rpq, _restored_items) = r.unwrap(); let mut expected_data = HashMap::new(); for i in 0..message_count { - let item = pq::Item::new( + let item = schema::Item::new( i % 10, i, - false, - None, - false, - Some(std::time::Duration::from_secs(5)), + true, + Some(Duration::seconds(1)), + true, + Some(Duration::seconds(5)), ); let result = rpq.enqueue(item).await; - if result.is_err() { - panic!("Error enqueueing item"); - } + assert!(result.is_ok()); let v = expected_data.entry(i % 10).or_insert(VecDeque::new()); v.push_back(i); } + tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; + assert!(rpq.len().await == message_count); + assert!(rpq.items_in_db() != 0); + for _i in 0..message_count { let item = rpq.dequeue().await; - if item.is_err() { - panic!("Item is None"); - } + assert!(item.is_ok()); let item = item.unwrap().unwrap(); let v = expected_data.get_mut(&item.priority).unwrap(); let expected_data = v.pop_front().unwrap(); assert!(item.data == expected_data); } + + rpq.close().await; + assert_eq!(rpq.len().await, 0); + assert_eq!(rpq.items_in_db(), 0); } #[tokio::test(flavor = "multi_thread")] - async fn e2e_test() { + async fn e2e_no_batch() { // Set Message Count - let message_count = 10_000_000 as usize; + let message_count = 10_000_250 as usize; // Set Concurrency let send_threads = 4 as usize; @@ -852,23 +1022,19 @@ mod tests { let received_counter = Arc::new(AtomicUsize::new(0)); let removed_counter = Arc::new(AtomicUsize::new(0)); let total_escalated = Arc::new(AtomicUsize::new(0)); - let finshed_sending = Arc::new(AtomicBool::new(false)); - let max_retries = 1000; + let finished_sending = Arc::new(AtomicBool::new(false)); // Create the RPQ - let options = RPQOptions { + let options = schema::RPQOptions { max_priority: bucket_count, disk_cache_enabled: true, - database_path: "/tmp/rpq.redb".to_string(), + database_path: "/tmp/rpq-e2e-nobatch.redb".to_string(), lazy_disk_cache: true, - lazy_disk_write_delay: time::Duration::from_secs(5), - lazy_disk_cache_batch_size: 10000, - buffer_size: 1_000_000, + lazy_disk_write_delay: Duration::seconds(5), + lazy_disk_cache_batch_size: 10_000, }; let r = RPQ::new(options).await; - if r.is_err() { - panic!("Error creating RPQ"); - } + assert!(r.is_ok()); let (rpq, restored_items) = r.unwrap(); // Launch the monitoring thread @@ -883,7 +1049,7 @@ mod tests { }, _ = async { loop { - tokio::time::sleep(time::Duration::from_secs(10)).await; + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; let results = rpq_clone.prioritize().await; if !results.is_ok() { @@ -901,7 +1067,6 @@ mod tests { // Enqueue items println!("Launching {} Send Threads", send_threads); let mut send_handles = Vec::new(); - let send_timer = std::time::Instant::now(); for _ in 0..send_threads { let rpq_clone = Arc::clone(&rpq); let sent_clone = Arc::clone(&sent_counter); @@ -912,43 +1077,37 @@ mod tests { break; } - let item = pq::Item::new( - //rand::thread_rng().gen_range(0..bucket_count), + let item = schema::Item::new( sent_clone.load(Ordering::SeqCst) % bucket_count, 0, - false, - None, - false, - Some(std::time::Duration::from_secs(5)), + true, + Some(Duration::seconds(1)), + true, + Some(Duration::seconds(5)), ); let result = rpq_clone.enqueue(item).await; - if result.is_err() { - panic!("Error enqueueing item"); - } + assert!(result.is_ok()); sent_clone.fetch_add(1, Ordering::SeqCst); } - println!("Finished Sending"); })); } // Dequeue items println!("Launching {} Receive Threads", receive_threads); let mut receive_handles = Vec::new(); - let receive_timer = std::time::Instant::now(); for _ in 0..receive_threads { // Clone all the shared variables let rpq_clone = Arc::clone(&rpq); let received_clone = Arc::clone(&received_counter); let sent_clone = Arc::clone(&sent_counter); let removed_clone = Arc::clone(&removed_counter); - let finshed_sending_clone = Arc::clone(&finshed_sending); + let finished_sending_clone = Arc::clone(&finished_sending); // Spawn the thread receive_handles.push(tokio::spawn(async move { - let mut counter = 0; loop { - if finshed_sending_clone.load(Ordering::SeqCst) { + if finished_sending_clone.load(Ordering::SeqCst) { if received_clone.load(Ordering::SeqCst) + removed_clone.load(Ordering::SeqCst) >= sent_clone.load(Ordering::SeqCst) + restored_items @@ -958,15 +1117,10 @@ mod tests { } let item = rpq_clone.dequeue().await; - if item.is_err() { - if counter >= max_retries { - panic!("Reached max retries waiting for items!"); - } - counter += 1; - std::thread::sleep(time::Duration::from_millis(100)); + assert!(item.is_ok()); + if item.unwrap().is_none() { continue; } - counter = 0; received_clone.fetch_add(1, Ordering::SeqCst); } })); @@ -976,34 +1130,182 @@ mod tests { for handle in send_handles { handle.await.unwrap(); } - let send_time = send_timer.elapsed().as_secs_f64(); - finshed_sending.store(true, Ordering::SeqCst); + finished_sending.store(true, Ordering::SeqCst); // Wait for receive threads to finish for handle in receive_handles { handle.await.unwrap(); } - let receive_time = receive_timer.elapsed().as_secs_f64(); shutdown_sender.send(true).unwrap(); + println!("Total Time: {}s", total_timer.elapsed().as_secs_f64()); // Close the RPQ println!("Waiting for RPQ to close"); rpq.close().await; println!( - "Sent: {}, Received: {}, Removed: {}, Escalated: {}", + "Sent: {}, Received: {}, Removed: {}, Escalated: {} Restored: {:?}", sent_counter.load(Ordering::SeqCst), received_counter.load(Ordering::SeqCst), removed_counter.load(Ordering::SeqCst), - total_escalated.load(Ordering::SeqCst) + total_escalated.load(Ordering::SeqCst), + restored_items ); + + assert_eq!(rpq.items_in_db(), 0); + } + + #[tokio::test(flavor = "multi_thread")] + async fn e2e_batch() { + // Set Message Count + let message_count = 10_000_250 as usize; + + // Set Concurrency + let send_threads = 4 as usize; + let receive_threads = 4 as usize; + let bucket_count = 10 as usize; + let sent_counter = Arc::new(AtomicUsize::new(0)); + let received_counter = Arc::new(AtomicUsize::new(0)); + let removed_counter = Arc::new(AtomicUsize::new(0)); + let total_escalated = Arc::new(AtomicUsize::new(0)); + let finshed_sending = Arc::new(AtomicBool::new(false)); + + // Create the RPQ + let options = schema::RPQOptions { + max_priority: bucket_count, + disk_cache_enabled: true, + database_path: "/tmp/rpq-e2e-batch.redb".to_string(), + lazy_disk_cache: true, + lazy_disk_write_delay: Duration::seconds(5), + lazy_disk_cache_batch_size: 10_000, + }; + let r = RPQ::new(options).await; + assert!(r.is_ok()); + let (rpq, restored_items) = r.unwrap(); + + // Launch the monitoring thread + let rpq_clone = Arc::clone(&rpq); + let (shutdown_sender, mut shutdown_receiver) = watch::channel(false); + let removed_clone = Arc::clone(&removed_counter); + let escalated_clone = Arc::clone(&total_escalated); + tokio::spawn(async move { + tokio::select! { + _ = shutdown_receiver.changed() => { + return; + }, + _ = async { + loop { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + let results = rpq_clone.prioritize().await; + + if !results.is_ok() { + let (removed, escalated) = results.unwrap(); + removed_clone.fetch_add(removed, Ordering::SeqCst); + escalated_clone.fetch_add(escalated, Ordering::SeqCst); + } + } + } => {} + } + }); + + let total_timer = std::time::Instant::now(); + + // Enqueue items + println!("Launching {} Batch Send Threads", send_threads); + let mut send_handles = Vec::new(); + for _ in 0..send_threads { + let rpq_clone = Arc::clone(&rpq); + let sent_clone = Arc::clone(&sent_counter); + + send_handles.push(tokio::spawn(async move { + loop { + if sent_clone.load(Ordering::SeqCst) >= message_count { + break; + } + + let mut batch = Vec::new(); + for i in 0..1000 { + let item = schema::Item::new( + sent_clone.load(Ordering::SeqCst) % bucket_count, + i, + true, + Some(Duration::seconds(1)), + true, + Some(Duration::seconds(5)), + ); + + batch.push(item); + sent_clone.fetch_add(1, Ordering::SeqCst); + } + + let result = rpq_clone.enqueue_batch(batch).await; + assert!(result.is_ok()); + } + println!("Finished Sending"); + })); + } + + // Dequeue items + println!("Launching {} Batch Receive Threads", receive_threads); + let mut receive_handles = Vec::new(); + for _ in 0..receive_threads { + // Clone all the shared variables + let rpq_clone = Arc::clone(&rpq); + let received_clone = Arc::clone(&received_counter); + let sent_clone = Arc::clone(&sent_counter); + let removed_clone = Arc::clone(&removed_counter); + let finished_sending_clone = Arc::clone(&finshed_sending); + + // Spawn the thread + receive_handles.push(tokio::spawn(async move { + loop { + if finished_sending_clone.load(Ordering::SeqCst) { + if received_clone.load(Ordering::SeqCst) + + removed_clone.load(Ordering::SeqCst) + >= sent_clone.load(Ordering::SeqCst) + restored_items + { + break; + } + } + + let item = rpq_clone.dequeue_batch(1000).await; + assert!(item.is_ok()); + let item = item.unwrap(); + if item.is_none() { + continue; + } + for _i in item.unwrap() { + received_clone.fetch_add(1, Ordering::SeqCst); + } + } + })); + } + + // Wait for send threads to finish + for handle in send_handles { + handle.await.unwrap(); + } + + finshed_sending.store(true, Ordering::SeqCst); + // Wait for receive threads to finish + for handle in receive_handles { + handle.await.unwrap(); + } + shutdown_sender.send(true).unwrap(); + println!("Total Time: {}s", total_timer.elapsed().as_secs_f64()); println!( - "Send Time: {}s, Receive Time: {}s, Total Time: {}s", - send_time, - receive_time, - total_timer.elapsed().as_secs_f64() + "Sent: {}, Received: {}, Removed: {}, Escalated: {} Restored: {}", + sent_counter.load(Ordering::SeqCst), + received_counter.load(Ordering::SeqCst), + removed_counter.load(Ordering::SeqCst), + total_escalated.load(Ordering::SeqCst), + restored_items ); + // Close the RPQ + println!("Waiting for RPQ to close"); + rpq.close().await; + assert_eq!(rpq.items_in_db(), 0); } } diff --git a/src/pq.rs b/src/pq.rs index eeebb3c..badf99f 100644 --- a/src/pq.rs +++ b/src/pq.rs @@ -1,259 +1,200 @@ +use std::collections::BTreeSet; +use std::collections::VecDeque; use std::error::Error; -use std::time::Duration; -use std::{collections::VecDeque, sync::RwLock}; -use std::{io::Error as IoError, io::ErrorKind as IoErrorKind}; +use std::time::Duration as StdDuration; +use std::{sync::Arc, vec::Vec}; -use bincode::{deserialize, serialize}; -use chrono::{DateTime, Utc}; use serde::de::DeserializeOwned; -use serde::{Deserialize, Serialize}; +use serde::Serialize; +use tokio::sync::mpsc::UnboundedSender; mod ftime; +use crate::disk::DiskCache; +use crate::schema::Item; /// PriorityQueue is a struct that holds methods for inserting, removing, and prioritizing items /// in a queue. Items are stored in a VecDeque and are prioritized based on metadata provided by the user. /// Items can be escalated or timed out based on the should_escalate and can_timeout fields. -pub struct PriorityQueue { - items: RwLock>>, - ftime: ftime::CachedTime, +pub struct PriorityQueue { + items: Vec>>, + active_buckets: BTreeSet, + ftime: Arc, + len: usize, + db: Arc>>, + disk_enabled: bool, + lazy_disk_enabled: bool, + lazy_disk_deleter: Arc>>, } -impl PriorityQueue { +impl PriorityQueue { /// This function creates a new PriorityQueue. - pub fn new() -> PriorityQueue { - PriorityQueue { - items: RwLock::new(VecDeque::new()), - ftime: ftime::CachedTime::new(Duration::from_millis(50)), + pub fn new( + buckets: usize, + disk_enabled: bool, + lazy_disk_enabled: bool, + lazy_disk_deleter: Arc>>, + db: Arc>>, + ) -> PriorityQueue { + let mut pq = PriorityQueue { + items: Vec::new(), + active_buckets: BTreeSet::new(), + ftime: ftime::CachedTime::new(StdDuration::from_millis(50)), + len: 0 as usize, + disk_enabled, + lazy_disk_enabled, + lazy_disk_deleter, + db, + }; + + for i in 0..buckets { + pq.items.insert(i, VecDeque::new()); } + + pq } /// Returns the number of items in this queue - pub fn len(&self) -> usize { - self.items.read().unwrap().len() + pub fn items_in_queue(&self) -> usize { + self.len + } + + pub fn active_buckets(&self) -> usize { + self.active_buckets.len() } /// Adds an item to the queue at the end of the VecDeque - pub fn enqueue(&self, item: Item) { + pub fn enqueue(&mut self, item: Item) { let mut item = item; - // Set the internal fields item.submitted_at = self.ftime.get_time().into(); item.last_escalation = None; - // Add the item to the queue - self.items.write().unwrap().push_back(item); + // Insert the item into the queue + let priority = item.priority; + self.active_buckets.insert(priority); + self.items.get_mut(priority).unwrap().push_back(item); + self.len += 1; } /// Removes and returns the item with the highest priority - pub fn dequeue(&self) -> Option> { - self.items.write().unwrap().pop_front() + pub fn dequeue(&mut self) -> Option> { + // This should really only ever loop once if the first pulled bucket is a miss + loop { + let bucket = self.active_buckets.first(); + if bucket.is_none() { + return None; + } + let item = self.items.get_mut(*bucket.unwrap()).unwrap().pop_front(); + if item.is_none() { + self.active_buckets.pop_first(); + continue; + } + self.len -= 1; + return item; + } } /// Prioritizes the items in the queue based on the priority, escalation rate, and timeout /// Returns a tuple of the number of items removed and the number of items swapped - pub fn prioritize(&self) -> Result<(usize, usize), Box> { - let mut items = self.items.write().unwrap(); - let mut to_remove = Vec::new(); - let mut to_swap = Vec::new(); - let mut was_error = false; - - for (index, item) in items.iter_mut().enumerate() { - // Timeout items that have been in the queue for too long - if item.can_timeout { - if let (Some(timeout), Some(submitted_at)) = (item.timeout, item.submitted_at) { - let current_time_millis = self.ftime.get_time().timestamp_millis(); - let submitted_time_millis = submitted_at.timestamp_millis(); - let elapsed_time = current_time_millis - submitted_time_millis; - - // Downcast u128 to i64 to compare with the timeout - if timeout.as_millis() <= i64::MAX as u128 { - if elapsed_time >= timeout.as_millis() as i64 { - to_remove.push(index); - continue; + pub fn prioritize(&mut self) -> Result<(usize, usize), Box> + where + T: Serialize + DeserializeOwned, + { + let mut removed = 0 as usize; + let mut swapped = 0 as usize; + + for (_, bucket) in self.items.iter_mut().enumerate() { + bucket.retain(|item| { + let mut keep = true; + + // Timeout items that have been in the queue for too long + if item.can_timeout { + if let (Some(timeout), Some(submitted_at)) = (item.timeout, item.submitted_at) { + let current_time = self.ftime.get_time(); + let elapsed = current_time - submitted_at; + + if elapsed >= timeout { + keep = false; + removed += 1; + self.len -= 1; + + if self.disk_enabled { + let db = self.db.as_ref(); + match db { + Some(db) => { + if self.lazy_disk_enabled { + self.lazy_disk_deleter.send(item.clone()).unwrap(); + } else { + let _ = db.delete_single( + item.get_disk_uuid().as_ref().unwrap(), + ); + } + } + None => {} + } + } } - } else { - was_error = true; } } - } - - // Escalate items that have been in the queue for too long - if item.should_escalate { - let current_time_millis = self.ftime.get_time().timestamp_millis(); - let submitted_time_millis = item.submitted_at.unwrap().timestamp_millis(); - let escalation_rate_millis = item.escalation_rate.unwrap().as_millis(); - - // Downcast u128 to i64 to compare with the timeout - if !item.timeout.unwrap().as_millis() <= i64::MAX as u128 { - // Check if we have ever escalated this item - if item.last_escalation.is_none() { - let elapsed_time = current_time_millis - submitted_time_millis; - - if elapsed_time > escalation_rate_millis as i64 { - item.last_escalation = Some(self.ftime.get_time()); - if index > 0 { - to_swap.push(index); + keep + }); + + let mut index = 0; + let mut last_item_was_escalated = false; + let mut last_last_was_escalated = false; + + while index < bucket.len() { + let item = bucket.get_mut(index).unwrap(); + + if item.should_escalate { + if item.submitted_at.is_some() && item.escalation_rate.is_some() { + let current_time = self.ftime.get_time(); + let context = (!last_item_was_escalated && !last_last_was_escalated) + || (last_item_was_escalated && !last_last_was_escalated); + + // Check if we have ever escalated this item + if item.last_escalation.is_none() { + let elapsed_time = current_time - item.submitted_at.unwrap(); + + if elapsed_time >= item.escalation_rate.unwrap() { + if context { + item.last_escalation = Some(self.ftime.get_time()); + if index > 0 { + bucket.swap(index, index - 1); + } + last_last_was_escalated = last_item_was_escalated; + last_item_was_escalated = true; + swapped += 1; + } + } else { + last_last_was_escalated = last_item_was_escalated; + last_item_was_escalated = false; } - } - } else { - let last_escalation_time_millis = - item.last_escalation.unwrap().timestamp_millis(); - let time_since_last_escalation = - current_time_millis - last_escalation_time_millis; - // Check if we need to escalate this item again - if time_since_last_escalation >= escalation_rate_millis as i64 { - item.last_escalation = Some(self.ftime.get_time()); - if index > 0 { - to_swap.push(index); + // We have escalated this item before + } else { + let elapsed_time = current_time - item.last_escalation.unwrap(); + if elapsed_time >= item.escalation_rate.unwrap() { + if context { + item.last_escalation = Some(self.ftime.get_time()); + if index > 0 { + bucket.swap(index, index - 1); + } + last_last_was_escalated = last_item_was_escalated; + last_item_was_escalated = true; + swapped += 1; + } + } else { + last_last_was_escalated = last_item_was_escalated; + last_item_was_escalated = false; } } } - } else { - was_error = true; } + index += 1; } } - let removed = to_remove.len(); - let swapped = to_swap.len(); - - // Perform removals and swaps - for index in to_remove.iter().rev() { - items.remove(*index); - } - for index in to_swap { - items.swap(index, index - 1); - } - - if was_error { - return Err(Box::::from(IoError::new( - IoErrorKind::InvalidInput, - "Timeout or escalation rate is too large", - ))); - } - Ok((removed, swapped)) } } - -/// Item holds the data that you want to store along with the metadata needed to manage the item. -/// The priority field is used to determine the order in which items are dequeued. The lower the -/// value, the higher the priority. Items will NOT escalate to a new priority level but instead -/// items will be escalated up or down within there same priority level. AKA, items are not promoted -/// to a higher priority level no matter how long they are in the queue. -#[derive(Serialize, Deserialize, Clone)] -pub struct Item { - // User-provided fields - /// The priority of the item. Lower values are higher priority. - /// Be sure that this value does not exceed the max_priority value set when creating the queue. - pub priority: usize, - /// The data associated with the item. - pub data: T, - /// Whether the item should be escalated over time. - pub should_escalate: bool, - /// The rate at which the item should be escalated. - pub escalation_rate: Option, - /// Whether the item should be timed out. - pub can_timeout: bool, - /// The timeout duration for the item. - pub timeout: Option, - - // Internal - disk_uuid: Option, - submitted_at: Option>, - last_escalation: Option>, - batch_id: usize, - was_restored: bool, -} - -impl Item { - /// This function creates a new Item with the provided fields. - pub fn new( - priority: usize, - data: T, - should_escalate: bool, - escalation_rate: Option, - can_timeout: bool, - timeout: Option, - ) -> Self { - Item { - // User-provided fields - priority, - data, - should_escalate, - escalation_rate, - can_timeout, - timeout, - - // Private with fn access - batch_id: 0, - was_restored: false, - disk_uuid: None, - - // Internal fields - submitted_at: None, - last_escalation: None, - } - } - - // This function is for internal use only. It sets the disk_uuid field to a random UUID. - pub fn set_disk_uuid(&mut self) { - self.disk_uuid = Some(uuid::Uuid::new_v4().to_string()); - } - - // This function is for internal use only. It returns the disk_uuid field. - pub fn get_disk_uuid(&self) -> Option { - self.disk_uuid.clone() - } - - /// This function is for internal use only. It sets the batch_id field. - pub fn set_batch_id(&mut self, batch_id: usize) { - self.batch_id = batch_id; - } - - /// This function is for internal use only. It returns the batch_id field. - pub fn get_batch_id(&self) -> usize { - self.batch_id - } - - /// This function is for internal use only. It sets the was_restored field to true. - pub fn set_restored(&mut self) { - self.was_restored = true; - } - - /// This function is for internal use only. It returns the was_restored field. - pub fn was_restored(&self) -> bool { - self.was_restored - } - - /// This function is for internal use only. It returns creates a new Item from a serialized byte array. - pub fn from_bytes(bytes: &[u8]) -> Result, Box> - where - T: Serialize + DeserializeOwned, - { - let b = bytes.to_vec(); - if b.is_empty() { - return Err(Box::::from(IoError::new( - IoErrorKind::InvalidInput, - "Empty byte array", - ))); - } - Ok(deserialize(&b).unwrap()) - } - - /// This function is for internal use only. It returns a serialized byte array from an Item. - pub fn to_bytes(&self) -> Result, Box> - where - T: Serialize + DeserializeOwned, - { - let b = serialize(&self).unwrap(); - if b.is_empty() { - return Err(Box::::from(IoError::new( - IoErrorKind::InvalidInput, - "Empty byte array", - ))); - } - Ok(b) - } -} diff --git a/src/pq/ftime.rs b/src/pq/ftime.rs index 57735d3..b2ef346 100644 --- a/src/pq/ftime.rs +++ b/src/pq/ftime.rs @@ -1,32 +1,39 @@ use chrono::{DateTime, Utc}; use std::sync::Arc; +use std::sync::RwLock; use std::time::Duration; use tokio::time::sleep; #[derive(Clone)] pub struct CachedTime { - time: Arc>, + time: Arc>>, } impl CachedTime { - pub fn new(update_interval: Duration) -> Self { + pub fn new(update_interval: Duration) -> Arc { let cached_time = CachedTime { - time: Arc::new(Utc::now()), + time: Arc::new(RwLock::new(Utc::now())), }; + let arc = Arc::new(cached_time); + let arc_clone = arc.clone(); - let mut clone = cached_time.clone(); - clone.time = Arc::new(Utc::now()); tokio::spawn(async move { loop { sleep(update_interval).await; - clone.time = Arc::new(Utc::now()); + arc_clone.update_time(); } }); - cached_time + arc + } + + pub fn update_time(&self) { + let mut time = self.time.write().unwrap(); + let new_time = Utc::now(); + *time = new_time; } pub fn get_time(&self) -> DateTime { - self.time.as_ref().clone().into() + self.time.read().unwrap().clone() } } diff --git a/src/schema.rs b/src/schema.rs new file mode 100644 index 0000000..3f46b9a --- /dev/null +++ b/src/schema.rs @@ -0,0 +1,178 @@ +use std::error::Error; +use std::io::Error as IoError; +use std::io::ErrorKind as IoErrorKind; + +use bincode::{deserialize, serialize}; +use chrono::{DateTime, Duration, Utc}; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DurationMilliSeconds}; + +/// RPQOptions is the configuration for the RPQ +pub struct RPQOptions { + /// Holds the number of priorities(buckets) that this RPQ will accept for this queue. + pub max_priority: usize, + /// Enables or disables the disk cache using redb as the backend to store items + pub disk_cache_enabled: bool, + /// Holds the path to where the disk cache database will be persisted + pub database_path: String, + /// Enables or disables lazy disk writes and deletes. The speed can be quite variable depending + /// on the disk itself and how often you are emptying the queue in combination with the write delay + pub lazy_disk_cache: bool, + /// Sets the delay between lazy disk writes. This delays items from being commit'ed to the disk cache. + /// If you are pulling items off the queue faster than this delay, many times can be skip the write to disk, + /// massively increasing the throughput of the queue. + pub lazy_disk_write_delay: Duration, + /// Sets the number of items that will be written to the disk cache in a single batch. This can be used to + /// tune the performance of the disk cache depending on your specific workload. + pub lazy_disk_cache_batch_size: usize, +} + +/// Item holds the data that you want to store along with the metadata needed to manage the item. +/// The priority field is used to determine the order in which items are dequeued. The lower the +/// value, the higher the priority. Items will NOT escalate to a new priority level but instead +/// items will be escalated up or down within there same priority level. AKA, items are not promoted +/// to a higher priority level no matter how long they are in the queue. +#[serde_as] +#[derive(Serialize, Deserialize, Clone)] +pub struct Item { + // User-provided fields + /// The priority of the item. Lower values are higher priority. + /// Be sure that this value does not exceed the max_priority value set when creating the queue. + pub priority: usize, + /// The data associated with the item. + pub data: T, + /// Whether the item should be escalated over time. + pub should_escalate: bool, + /// The rate at which the item should be escalated. + #[serde_as(as = "Option>")] + pub escalation_rate: Option, + /// Whether the item should be timed out. + pub can_timeout: bool, + /// The timeout duration for the item. + #[serde_as(as = "Option>")] + pub timeout: Option, + + // Internal + /// INTERNAL USE ONLY: The UUID of the item when it is stored on disk. + pub disk_uuid: Option, + /// INTERNAL USE ONLY: The time the item was submitted to the queue. + pub submitted_at: Option>, + /// INTERNAL USE ONLY: The last time the item was escalated. + pub last_escalation: Option>, + /// INTERNAL USE ONLY: The batch_id of the item if committed to disk in a batch. + pub batch_id: usize, + /// INTERNAL USE ONLY: Whether the item was restored from disk. + pub was_restored: bool, +} + +impl Item { + /// This function creates a new Item with the provided fields. + pub fn new( + priority: usize, + data: T, + should_escalate: bool, + escalation_rate: Option, + can_timeout: bool, + timeout: Option, + ) -> Self { + Item { + // User-provided fields + priority, + data, + should_escalate, + escalation_rate, + can_timeout, + timeout, + + // Private with fn access + batch_id: 0, + was_restored: false, + disk_uuid: None, + + // Internal fields + submitted_at: None, + last_escalation: None, + } + } + + // This function is for internal use only. It sets the disk_uuid field to a random UUID. + pub fn set_disk_uuid(&mut self) { + self.disk_uuid = Some(uuid::Uuid::new_v4().to_string()); + } + + // This function is for internal use only. It returns the disk_uuid field. + pub fn get_disk_uuid(&self) -> Option { + self.disk_uuid.clone() + } + + /// This function is for internal use only. It sets the batch_id field. + pub fn set_batch_id(&mut self, batch_id: usize) { + self.batch_id = batch_id; + } + + /// This function is for internal use only. It returns the batch_id field. + pub fn get_batch_id(&self) -> usize { + self.batch_id + } + + /// This function is for internal use only. It sets the was_restored field to true. + pub fn set_restored(&mut self) { + self.was_restored = true; + } + + /// This function is for internal use only. It returns the was_restored field. + pub fn was_restored(&self) -> bool { + self.was_restored + } + + /// This function is for internal use only. It returns creates a new Item from a serialized byte array. + pub fn from_bytes(bytes: &[u8]) -> Result, Box> + where + T: Serialize + DeserializeOwned, + { + let b = bytes.to_vec(); + if b.is_empty() { + return Err(Box::::from(IoError::new( + IoErrorKind::InvalidInput, + "Empty byte array", + ))); + } + + let d = deserialize(&b); + match d { + Err(e) => { + return Err(Box::::from(IoError::new( + IoErrorKind::InvalidInput, + format!("Failed to deserialize item: {}", e), + ))); + } + _ => {} + } + + Ok(d.unwrap()) + } + + /// This function is for internal use only. It returns a serialized byte array from an Item. + pub fn to_bytes(&self) -> Result, Box> + where + T: Serialize + DeserializeOwned, + { + let b = serialize(&self); + if b.is_err() { + return Err(Box::::from(IoError::new( + IoErrorKind::InvalidInput, + "Failed to serialize item", + ))); + } + let b = b.unwrap(); + if b.is_empty() { + return Err(Box::::from(IoError::new( + IoErrorKind::InvalidInput, + "Output empty byte array", + ))); + } + + Ok(b) + } +}