From 5a911b108816628095727dcd4302a5f9df1bd0f3 Mon Sep 17 00:00:00 2001 From: Clement Rey Date: Thu, 25 Jul 2024 15:13:45 +0200 Subject: [PATCH] wip --- ARCHITECTURE.md | 1 + Cargo.lock | 40 +- Cargo.toml | 1 + crates/store/re_chunk/Cargo.toml | 1 + crates/store/re_chunk/src/chunk.rs | 49 +- crates/store/re_chunk/src/helpers.rs | 360 ++ crates/store/re_chunk/src/iter.rs | 471 ++- crates/store/re_chunk/src/lib.rs | 2 + crates/store/re_chunk/src/shuffle.rs | 39 +- crates/store/re_chunk/src/slice.rs | 198 +- crates/store/re_chunk/src/util.rs | 2 + crates/store/re_chunk_store/src/lib.rs | 4 +- crates/store/re_chunk_store/src/store.rs | 14 + .../store/re_chunk_store/tests/correctness.rs | 18 +- crates/store/re_chunk_store/tests/gc.rs | 15 +- crates/store/re_chunk_store/tests/reads.rs | 18 +- crates/store/re_entity_db/Cargo.toml | 1 + crates/store/re_entity_db/src/entity_db.rs | 68 +- crates/store/re_entity_db/src/lib.rs | 1 + crates/store/re_query/Cargo.toml | 2 + crates/store/re_query/src/cache.rs | 36 +- crates/store/re_query/src/latest_at/query.rs | 35 +- crates/store/re_query/src/range/query.rs | 31 +- crates/store/re_query2/Cargo.toml | 81 + crates/store/re_query2/README.md | 10 + crates/store/re_query2/benches/latest_at.rs | 335 ++ crates/store/re_query2/examples/latest_at.rs | 131 + crates/store/re_query2/examples/range.rs | 157 + crates/store/re_query2/src/bin/clamped_zip.rs | 354 ++ crates/store/re_query2/src/bin/range_zip.rs | 499 +++ crates/store/re_query2/src/cache.rs | 323 ++ crates/store/re_query2/src/cache_stats.rs | 108 + .../re_query2/src/clamped_zip/.gitattributes | 1 + .../re_query2/src/clamped_zip/generated.rs | 3124 +++++++++++++++ crates/store/re_query2/src/clamped_zip/mod.rs | 64 + crates/store/re_query2/src/latest_at.rs | 681 ++++ crates/store/re_query2/src/lib.rs | 72 + crates/store/re_query2/src/range.rs | 317 ++ .../re_query2/src/range_zip/.gitattributes | 1 + .../re_query2/src/range_zip/generated.rs | 3490 +++++++++++++++++ crates/store/re_query2/src/range_zip/mod.rs | 70 + crates/store/re_query2/tests/latest_at.rs | 579 +++ crates/store/re_query2/tests/range.rs | 1080 +++++ crates/top/rerun/src/run.rs | 30 +- .../re_data_ui/src/annotation_context.rs | 4 +- crates/viewer/re_data_ui/src/image.rs | 6 +- .../re_selection_panel/src/defaults_ui.rs | 26 +- .../src/visible_time_range_ui.rs | 20 +- crates/viewer/re_space_view/Cargo.toml | 1 + crates/viewer/re_space_view/src/lib.rs | 11 + crates/viewer/re_space_view/src/query2.rs | 233 ++ .../viewer/re_space_view/src/results_ext2.rs | 363 ++ .../re_space_view/src/view_property_ui.rs | 31 +- .../src/latest_at_table.rs | 21 +- .../src/time_range_table.rs | 8 +- .../re_space_view_dataframe/src/utils.rs | 17 +- .../src/contexts/transform_context.rs | 34 +- .../viewer/re_space_view_spatial/src/lib.rs | 29 +- .../src/transformables.rs | 116 + .../viewer/re_space_view_spatial/src/ui_3d.rs | 2 +- .../re_space_view_spatial/src/view_2d.rs | 1 + .../re_space_view_spatial/src/view_3d.rs | 3 +- .../src/visualizers/cameras.rs | 2 +- .../src/visualizers/depth_images.rs | 2 +- .../src/visualizer_system.rs | 4 +- .../re_space_view_time_series/Cargo.toml | 2 +- .../src/line_visualizer_system.rs | 298 +- .../src/point_visualizer_system.rs | 357 +- crates/viewer/re_viewer/Cargo.toml | 1 + crates/viewer/re_viewer/src/app.rs | 4 +- crates/viewer/re_viewer/src/app_blueprint.rs | 2 +- .../re_viewer/src/blueprint/validation.rs | 13 +- .../viewer/re_viewer/src/ui/memory_panel.rs | 70 +- crates/viewer/re_viewer_context/Cargo.toml | 1 + .../re_viewer_context/src/annotations.rs | 6 +- .../src/blueprint_helpers.rs | 14 +- .../src/component_ui_registry.rs | 45 +- crates/viewer/re_viewer_context/src/item.rs | 25 +- .../re_viewer_context/src/space_view/mod.rs | 3 + .../src/space_view/view_query.rs | 2 +- .../viewer/re_viewer_context/src/store_hub.rs | 6 +- .../re_viewport_blueprint/src/container.rs | 19 +- .../re_viewport_blueprint/src/space_view.rs | 23 +- .../src/space_view_contents.rs | 47 +- .../src/view_properties.rs | 8 +- .../src/viewport_blueprint.rs | 13 +- .../color_coordinates_visualizer_system.rs | 9 +- tests/python/plot_dashboard_stress/main.py | 81 +- 88 files changed, 14023 insertions(+), 874 deletions(-) create mode 100644 crates/store/re_chunk/src/helpers.rs create mode 100644 crates/store/re_query2/Cargo.toml create mode 100644 crates/store/re_query2/README.md create mode 100644 crates/store/re_query2/benches/latest_at.rs create mode 100644 crates/store/re_query2/examples/latest_at.rs create mode 100644 crates/store/re_query2/examples/range.rs create mode 100644 crates/store/re_query2/src/bin/clamped_zip.rs create mode 100644 crates/store/re_query2/src/bin/range_zip.rs create mode 100644 crates/store/re_query2/src/cache.rs create mode 100644 crates/store/re_query2/src/cache_stats.rs create mode 100644 crates/store/re_query2/src/clamped_zip/.gitattributes create mode 100644 crates/store/re_query2/src/clamped_zip/generated.rs create mode 100644 crates/store/re_query2/src/clamped_zip/mod.rs create mode 100644 crates/store/re_query2/src/latest_at.rs create mode 100644 crates/store/re_query2/src/lib.rs create mode 100644 crates/store/re_query2/src/range.rs create mode 100644 crates/store/re_query2/src/range_zip/.gitattributes create mode 100644 crates/store/re_query2/src/range_zip/generated.rs create mode 100644 crates/store/re_query2/src/range_zip/mod.rs create mode 100644 crates/store/re_query2/tests/latest_at.rs create mode 100644 crates/store/re_query2/tests/range.rs create mode 100644 crates/viewer/re_space_view/src/query2.rs create mode 100644 crates/viewer/re_space_view/src/results_ext2.rs create mode 100644 crates/viewer/re_space_view_spatial/src/transformables.rs diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index 0afcae79616ee..ba481d0bfcfa2 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -161,6 +161,7 @@ Update instructions: |----------------------|--------------------------------------------------------------------------| | re_entity_db | In-memory storage of Rerun entities | | re_query | Querying data in the re_chunk_store | +| re_query2 | Querying data in the re_chunk_store | | re_types | The built-in Rerun data types, component types, and archetypes. | | re_types_blueprint | The core traits and types that power Rerun's Blueprint sub-system. | | re_log_encoding | Helpers for encoding and transporting Rerun log messages | diff --git a/Cargo.lock b/Cargo.lock index 07f02659d20c0..faa5aed94c385 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4311,6 +4311,7 @@ dependencies = [ "rand", "re_arrow2", "re_build_info", + "re_error", "re_format", "re_format_arrow", "re_log", @@ -4526,6 +4527,7 @@ dependencies = [ "re_log_encoding", "re_log_types", "re_query", + "re_query2", "re_smart_channel", "re_tracing", "re_types", @@ -4714,6 +4716,39 @@ dependencies = [ "thiserror", ] +[[package]] +name = "re_query2" +version = "0.18.0-alpha.1+dev" +dependencies = [ + "ahash", + "anyhow", + "backtrace", + "criterion", + "indent", + "indexmap 2.1.0", + "itertools 0.13.0", + "mimalloc", + "nohash-hasher", + "parking_lot", + "paste", + "rand", + "re_arrow2", + "re_chunk", + "re_chunk_store", + "re_error", + "re_format", + "re_log", + "re_log_types", + "re_tracing", + "re_tuid", + "re_types", + "re_types_core", + "seq-macro", + "similar-asserts", + "static_assertions", + "thiserror", +] + [[package]] name = "re_renderer" version = "0.18.0-alpha.1+dev" @@ -4891,6 +4926,7 @@ dependencies = [ "re_log", "re_log_types", "re_query", + "re_query2", "re_tracing", "re_types_core", "re_ui", @@ -5049,7 +5085,7 @@ dependencies = [ "re_format", "re_log", "re_log_types", - "re_query", + "re_query2", "re_renderer", "re_space_view", "re_tracing", @@ -5278,6 +5314,7 @@ dependencies = [ "re_log_types", "re_memory", "re_query", + "re_query2", "re_renderer", "re_sdk_comms", "re_selection_panel", @@ -5349,6 +5386,7 @@ dependencies = [ "re_log_types", "re_math", "re_query", + "re_query2", "re_renderer", "re_smart_channel", "re_string_interner", diff --git a/Cargo.toml b/Cargo.toml index b0d8ec6d4907c..069ba1c71aa38 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,6 +49,7 @@ re_format_arrow = { path = "crates/store/re_format_arrow", version = "=0.18.0-al re_log_encoding = { path = "crates/store/re_log_encoding", version = "=0.18.0-alpha.1", default-features = false } re_log_types = { path = "crates/store/re_log_types", version = "=0.18.0-alpha.1", default-features = false } re_query = { path = "crates/store/re_query", version = "=0.18.0-alpha.1", default-features = false } +re_query2 = { path = "crates/store/re_query2", version = "=0.18.0-alpha.1", default-features = false } re_sdk_comms = { path = "crates/store/re_sdk_comms", version = "=0.18.0-alpha.1", default-features = false } re_types = { path = "crates/store/re_types", version = "=0.18.0-alpha.1", default-features = false } re_types_blueprint = { path = "crates/store/re_types_blueprint", version = "=0.18.0-alpha.1", default-features = false } diff --git a/crates/store/re_chunk/Cargo.toml b/crates/store/re_chunk/Cargo.toml index f32fb079e8599..e6007cb1dbc6a 100644 --- a/crates/store/re_chunk/Cargo.toml +++ b/crates/store/re_chunk/Cargo.toml @@ -37,6 +37,7 @@ serde = [ # Rerun re_build_info.workspace = true +re_error.workspace = true re_format.workspace = true re_format_arrow.workspace = true re_log.workspace = true diff --git a/crates/store/re_chunk/src/chunk.rs b/crates/store/re_chunk/src/chunk.rs index 40ec249852df0..f6e082e575ea0 100644 --- a/crates/store/re_chunk/src/chunk.rs +++ b/crates/store/re_chunk/src/chunk.rs @@ -3,14 +3,19 @@ use std::{ sync::atomic::{AtomicU64, Ordering}, }; -use arrow2::array::{ - Array as ArrowArray, ListArray as ArrowListArray, PrimitiveArray as ArrowPrimitiveArray, - StructArray as ArrowStructArray, +use arrow2::{ + array::{ + Array as ArrowArray, ListArray as ArrowListArray, PrimitiveArray as ArrowPrimitiveArray, + StructArray as ArrowStructArray, + }, + Either, }; use itertools::{izip, Itertools}; use re_log_types::{EntityPath, ResolvedTimeRange, Time, TimeInt, TimePoint, Timeline}; -use re_types_core::{ComponentName, Loggable, LoggableBatch, SerializationError, SizeBytes}; +use re_types_core::{ + ComponentName, DeserializationError, Loggable, LoggableBatch, SerializationError, SizeBytes, +}; use crate::{ChunkId, RowId}; @@ -26,8 +31,18 @@ pub enum ChunkError { #[error(transparent)] Arrow(#[from] arrow2::error::Error), + #[error("{kind} index out of bounds: {index} (len={len})")] + IndexOutOfBounds { + kind: String, + len: usize, + index: usize, + }, + #[error(transparent)] Serialization(#[from] SerializationError), + + #[error(transparent)] + Deserialization(#[from] DeserializationError), } pub type ChunkResult = Result; @@ -822,6 +837,32 @@ impl Chunk { .map(|(&time, &counter)| RowId::from_u128((time as u128) << 64 | (counter as u128))) } + /// Returns an iterator over the [`RowId`]s of a [`Chunk`], for a given component. + /// + /// This is different than [`Self::row_ids`]: it will only yield `RowId`s for rows at which + /// there is data for the specified `component_name`. + #[inline] + pub fn component_row_ids( + &self, + component_name: &ComponentName, + ) -> impl Iterator + '_ { + let Some(list_array) = self.components.get(component_name) else { + return Either::Left(std::iter::empty()); + }; + + let row_ids = self.row_ids(); + + if let Some(validity) = list_array.validity() { + Either::Right(Either::Left( + row_ids + .enumerate() + .filter_map(|(i, o)| validity.get_bit(i).then_some(o)), + )) + } else { + Either::Right(Either::Right(row_ids)) + } + } + /// Returns the [`RowId`]-range covered by this [`Chunk`]. /// /// `None` if the chunk `is_empty`. diff --git a/crates/store/re_chunk/src/helpers.rs b/crates/store/re_chunk/src/helpers.rs new file mode 100644 index 0000000000000..f9969587ae9cc --- /dev/null +++ b/crates/store/re_chunk/src/helpers.rs @@ -0,0 +1,360 @@ +use std::sync::Arc; + +use arrow2::array::Array as ArrowArray; + +use re_log_types::{TimeInt, Timeline}; +use re_types_core::{Component, ComponentName, SizeBytes}; + +use crate::{Chunk, ChunkResult, RowId}; + +// --- Helpers --- + +impl Chunk { + // --- Batch --- + + /// Returns the raw data for the specified component. + /// + /// Returns an error if the row index is out of bounds. + #[inline] + pub fn component_batch_raw( + &self, + component_name: &ComponentName, + row_index: usize, + ) -> Option>> { + self.components.get(component_name).map(|list_array| { + if list_array.len() > row_index { + Ok(list_array.value(row_index)) + } else { + Err(crate::ChunkError::IndexOutOfBounds { + kind: "row".to_owned(), + len: list_array.len(), + index: row_index, + }) + } + }) + } + + /// Returns the deserialized data for the specified component. + /// + /// Returns an error if the data cannot be deserialized, or if the row index is out of bounds. + #[inline] + pub fn component_batch(&self, row_index: usize) -> Option>> { + let res = self.component_batch_raw(&C::name(), row_index)?; + + let array = match res { + Ok(array) => array, + Err(err) => return Some(Err(err)), + }; + + let data = C::from_arrow(&*array); + Some(data.map_err(Into::into)) + } + + // --- Instance --- + + /// Returns the raw data for the specified component at the given instance index. + /// + /// Returns an error if either the row index or instance index are out of bounds. + #[inline] + pub fn component_instance_raw( + &self, + component_name: &ComponentName, + row_index: usize, + instance_index: usize, + ) -> Option>> { + let res = self.component_batch_raw(component_name, row_index)?; + + let array = match res { + Ok(array) => array, + Err(err) => return Some(Err(err)), + }; + + if array.len() > instance_index { + Some(Ok(array.sliced(instance_index, 1))) + } else { + Some(Err(crate::ChunkError::IndexOutOfBounds { + kind: "instance".to_owned(), + len: array.len(), + index: instance_index, + })) + } + } + + /// Returns the component data of the specified instance. + /// + /// Returns an error if the data cannot be deserialized, or if either the row index or instance index + /// are out of bounds. + #[inline] + pub fn component_instance( + &self, + row_index: usize, + instance_index: usize, + ) -> Option> { + let res = self.component_instance_raw(&C::name(), row_index, instance_index)?; + + let array = match res { + Ok(array) => array, + Err(err) => return Some(Err(err)), + }; + + match C::from_arrow(&*array) { + Ok(data) => data.into_iter().next().map(Ok), // NOTE: It's already sliced! + Err(err) => Some(Err(err.into())), + } + } + + // --- Mono --- + + /// Returns the raw data for the specified component, assuming a mono-batch. + /// + /// Returns an error if either the row index is out of bounds, or the underlying batch is not + /// of unit length. + #[inline] + pub fn component_mono_raw( + &self, + component_name: &ComponentName, + row_index: usize, + ) -> Option>> { + let res = self.component_batch_raw(component_name, row_index)?; + + let array = match res { + Ok(array) => array, + Err(err) => return Some(Err(err)), + }; + + if array.len() == 1 { + Some(Ok(array.sliced(0, 1))) + } else { + Some(Err(crate::ChunkError::IndexOutOfBounds { + kind: "mono".to_owned(), + len: array.len(), + index: 0, + })) + } + } + + /// Returns the deserialized data for the specified component, assuming a mono-batch. + /// + /// Returns an error if the data cannot be deserialized, or if either the row index is out of bounds, + /// or the underlying batch is not of unit length. + #[inline] + pub fn component_mono(&self, row_index: usize) -> Option> { + let res = self.component_mono_raw(&C::name(), row_index)?; + + let array = match res { + Ok(array) => array, + Err(err) => return Some(Err(err)), + }; + + match C::from_arrow(&*array) { + Ok(data) => data.into_iter().next().map(Ok), // NOTE: It's already sliced! + Err(err) => Some(Err(err.into())), + } + } +} + +// --- Unit --- + +/// A simple type alias for an `Arc`. +pub type ChunkShared = Arc; + +/// A [`ChunkShared`] that is guaranteed to always contain a single row's worth of data. +#[derive(Debug, Clone)] +pub struct UnitChunkShared(ChunkShared); + +impl std::ops::Deref for UnitChunkShared { + type Target = Chunk; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl SizeBytes for UnitChunkShared { + #[inline] + fn heap_size_bytes(&self) -> u64 { + Chunk::heap_size_bytes(&self.0) + } +} + +impl Chunk { + /// Turns the chunk into a [`UnitChunkShared`], if possible. + #[inline] + pub fn to_unit(self: &ChunkShared) -> Option { + (self.num_rows() == 1).then(|| UnitChunkShared(Arc::clone(self))) + } + + /// Turns the chunk into a [`UnitChunkShared`], if possible. + #[inline] + pub fn into_unit(self) -> Option { + (self.num_rows() == 1).then(|| UnitChunkShared(Arc::new(self))) + } +} + +impl UnitChunkShared { + // Turns the unit chunk back into a standard [`Chunk`]. + #[inline] + pub fn into_chunk(self) -> ChunkShared { + self.0 + } +} + +impl UnitChunkShared { + /// Returns the index (`(TimeInt, RowId)` pair) of the single row within, on the given timeline. + /// + /// Returns the single static index if the chunk is static. + #[inline] + pub fn index(&self, timeline: &Timeline) -> Option<(TimeInt, RowId)> { + debug_assert!(self.num_rows() == 1); + if self.is_static() { + self.row_ids() + .next() + .map(|row_id| (TimeInt::STATIC, row_id)) + } else { + self.timelines.get(timeline).and_then(|time_chunk| { + time_chunk + .times() + .next() + .and_then(|time| self.row_ids().next().map(|row_id| (time, row_id))) + }) + } + } + + /// Returns the [`RowId`] of the single row within, on the given timeline. + /// + /// Returns the single static `RowId` if the chunk is static. + #[inline] + pub fn row_id(&self) -> Option { + debug_assert!(self.num_rows() == 1); + self.row_ids().next() + } + + /// Returns the number of instances of the single row within. + /// + /// The maximum value amongst all components is what's returned. + #[inline] + pub fn num_instances(&self) -> u64 { + self.components + .values() + .map(|list_array| { + list_array.validity().map_or_else( + || list_array.len(), + |validity| validity.len() - validity.unset_bits(), + ) + }) + .max() + .unwrap_or(0) as u64 + } +} + +// --- Unit helpers --- + +impl UnitChunkShared { + // --- Batch --- + + /// Returns the raw data for the specified component. + #[inline] + pub fn component_batch_raw( + &self, + component_name: &ComponentName, + ) -> Option> { + debug_assert!(self.num_rows() == 1); + self.components + .get(component_name) + .map(|list_array| list_array.value(0)) + } + + /// Returns the deserialized data for the specified component. + /// + /// Returns an error if the data cannot be deserialized. + #[inline] + pub fn component_batch(&self) -> Option>> { + let data = C::from_arrow(&*self.component_batch_raw(&C::name())?); + Some(data.map_err(Into::into)) + } + + // --- Instance --- + + /// Returns the raw data for the specified component at the given instance index. + /// + /// Returns an error if the instance index is out of bounds. + #[inline] + pub fn component_instance_raw( + &self, + component_name: &ComponentName, + instance_index: usize, + ) -> Option>> { + let array = self.component_batch_raw(component_name)?; + if array.len() > instance_index { + Some(Ok(array.sliced(instance_index, 1))) + } else { + Some(Err(crate::ChunkError::IndexOutOfBounds { + kind: "instance".to_owned(), + len: array.len(), + index: instance_index, + })) + } + } + + /// Returns the deserialized data for the specified component at the given instance index. + /// + /// Returns an error if the data cannot be deserialized, or if the instance index is out of bounds. + #[inline] + pub fn component_instance( + &self, + instance_index: usize, + ) -> Option> { + let res = self.component_instance_raw(&C::name(), instance_index)?; + + let array = match res { + Ok(array) => array, + Err(err) => return Some(Err(err)), + }; + + match C::from_arrow(&*array) { + Ok(data) => data.into_iter().next().map(Ok), // NOTE: It's already sliced! + Err(err) => Some(Err(err.into())), + } + } + + // --- Mono --- + + /// Returns the raw data for the specified component, assuming a mono-batch. + /// + /// Returns an error if the underlying batch is not of unit length. + #[inline] + pub fn component_mono_raw( + &self, + component_name: &ComponentName, + ) -> Option>> { + let array = self.component_batch_raw(component_name)?; + if array.len() == 1 { + Some(Ok(array.sliced(0, 1))) + } else { + Some(Err(crate::ChunkError::IndexOutOfBounds { + kind: "mono".to_owned(), + len: array.len(), + index: 0, + })) + } + } + + /// Returns the deserialized data for the specified component, assuming a mono-batch. + /// + /// Returns an error if the data cannot be deserialized, or if the underlying batch is not of unit length. + #[inline] + pub fn component_mono(&self) -> Option> { + let res = self.component_mono_raw(&C::name())?; + + let array = match res { + Ok(array) => array, + Err(err) => return Some(Err(err)), + }; + + match C::from_arrow(&*array) { + Ok(data) => data.into_iter().next().map(Ok), // NOTE: It's already sliced! + Err(err) => Some(Err(err.into())), + } + } +} diff --git a/crates/store/re_chunk/src/iter.rs b/crates/store/re_chunk/src/iter.rs index 59a67b33dfb44..4850a555b8a12 100644 --- a/crates/store/re_chunk/src/iter.rs +++ b/crates/store/re_chunk/src/iter.rs @@ -1,99 +1,173 @@ use std::sync::Arc; -use arrow2::array::Array as ArrowArray; -use itertools::Itertools as _; +use arrow2::{ + array::{Array as ArrowArray, PrimitiveArray}, + Either, +}; +use itertools::izip; use re_log_types::{TimeInt, Timeline}; -use re_types_core::ComponentName; +use re_types_core::{Component, ComponentName}; use crate::{Chunk, ChunkTimeline, RowId}; // --- impl Chunk { - /// Returns an iterator over the rows of the [`Chunk`]. + /// Returns an iterator over the indices (`(TimeInt, RowId)`) of a [`Chunk`], for a given timeline. + /// + /// If the chunk is static, `timeline` will be ignored. + /// + /// See also: + /// * [`Self::iter_component_indices`]. + /// * [`Self::iter_indices_owned`]. + #[inline] + pub fn iter_indices(&self, timeline: &Timeline) -> impl Iterator + '_ { + if self.is_static() { + Either::Right(Either::Left(izip!( + std::iter::repeat(TimeInt::STATIC), + self.row_ids() + ))) + } else { + let Some(time_chunk) = self.timelines.get(timeline) else { + return Either::Left(std::iter::empty()); + }; + + Either::Right(Either::Right(izip!(time_chunk.times(), self.row_ids()))) + } + } + + /// Returns an iterator over the indices (`(TimeInt, RowId)`) of a [`Chunk`], for a given + /// timeline and component. + /// + /// If the chunk is static, `timeline` will be ignored. /// - /// Each yielded item is a component batch with its associated index ([`RowId`] + data time). + /// This is different than [`Self::iter_indices`] in that it will only yield indices for rows + /// at which there is data for the specified `component_name`. /// - /// Iterating a [`Chunk`] on a row basis is very wasteful, performance-wise. - /// Prefer columnar access when possible. - // - // TODO(cmc): a row-based iterator is obviously not what we want -- one of the benefits of - // chunks is to amortize the cost of downcasting & "deserialization". - // But at the moment we still need to run with the native deserialization cache, which expects - // row-based data. - // As soon as we remove the native cache and start exposing `Chunk`s directly to downstream - // systems, we will look into ergonomic ways to do columnar access. - pub fn iter_rows( + /// See also [`Self::iter_indices`]. + pub fn iter_component_indices( &self, timeline: &Timeline, component_name: &ComponentName, - ) -> impl Iterator>)> + '_ { - let Self { - id: _, - entity_path: _, - heap_size_bytes: _, - is_sorted: _, - row_ids: _, - timelines, - components, - } = self; - - let row_ids = self.row_ids(); - - let data_times = timelines - .get(timeline) - .into_iter() - .flat_map(|time_chunk| time_chunk.times().collect::>()) - // If there's no time data, then the associate data time must be `TimeInt::STATIC`. - .chain(std::iter::repeat(TimeInt::STATIC)); - - let arrays = components - .get(component_name) - .into_iter() - .flat_map(|list_array| list_array.into_iter()); - - itertools::izip!(data_times, row_ids, arrays) + ) -> impl Iterator + '_ { + let Some(list_array) = self.components.get(component_name) else { + return Either::Left(std::iter::empty()); + }; + + if self.is_static() { + let indices = izip!(std::iter::repeat(TimeInt::STATIC), self.row_ids()); + + if let Some(validity) = list_array.validity() { + Either::Right(Either::Left(Either::Left( + indices + .enumerate() + .filter_map(|(i, o)| validity.get_bit(i).then_some(o)), + ))) + } else { + Either::Right(Either::Left(Either::Right(indices))) + } + } else { + let Some(time_chunk) = self.timelines.get(timeline) else { + return Either::Left(std::iter::empty()); + }; + + let indices = izip!(time_chunk.times(), self.row_ids()); + + if let Some(validity) = list_array.validity() { + Either::Right(Either::Right(Either::Left( + indices + .enumerate() + .filter_map(|(i, o)| validity.get_bit(i).then_some(o)), + ))) + } else { + Either::Right(Either::Right(Either::Right(indices))) + } + } } - /// Returns the cell corresponding to the specified [`RowId`] for a given [`ComponentName`]. - /// - /// This is `O(log(n))` if `self.is_sorted()`, and `O(n)` otherwise. + /// Returns an iterator over the offsets (`(offset, len)`) of a [`Chunk`], for a given timeline + /// and component. + pub fn iter_component_offsets( + &self, + component_name: &ComponentName, + ) -> impl Iterator + '_ { + let Some(list_array) = self.components.get(component_name) else { + return Either::Left(std::iter::empty()); + }; + + let offsets = list_array.offsets().iter().map(|idx| *idx as usize); + let lengths = list_array.offsets().lengths(); + + if let Some(validity) = list_array.validity() { + Either::Right(Either::Left( + izip!(offsets, lengths) + .enumerate() + .filter_map(|(i, o)| validity.get_bit(i).then_some(o)), + )) + } else { + Either::Right(Either::Right(izip!(offsets, lengths))) + } + } + + /// Returns an iterator over the raw arrays of a [`Chunk`], for a given component. /// - /// Reminder: duplicated `RowId`s results in undefined behavior. - pub fn cell( + /// See also: + /// * [`Self::iter_component`]. + /// * [`Self::iter_primitive`]. + pub fn iter_component_arrays( &self, - row_id: RowId, component_name: &ComponentName, - ) -> Option> { - let list_array = self.components.get(component_name)?; + ) -> impl Iterator> + '_ { + let Some(list_array) = self.components.get(component_name) else { + return Either::Left(std::iter::empty()); + }; - if self.is_sorted() { - let row_id_128 = row_id.as_u128(); - let row_id_time_ns = (row_id_128 >> 64) as u64; - let row_id_inc = (row_id_128 & (!0 >> 64)) as u64; + Either::Right(list_array.iter().flatten()) + } - let (times, incs) = self.row_ids_raw(); - let times = times.values().as_slice(); - let incs = incs.values().as_slice(); + /// Returns an iterator over the raw primitive values of a [`Chunk`], for a given component. + /// + /// This is a very fast path: the entire column will be downcasted at once, and then every + /// component batch will be a slice reference into that global slice. + /// Use this when working with simple arrow datatypes and performance matters (e.g. scalars, + /// points, etc). + /// + /// * [`Self::iter_component_arrays`]. + /// * [`Self::iter_component`]. + #[inline] + pub fn iter_primitive( + &self, + component_name: &ComponentName, + ) -> impl Iterator + '_ { + let Some(list_array) = self.components.get(component_name) else { + return Either::Left(std::iter::empty()); + }; - let mut index = times.partition_point(|&time| time < row_id_time_ns); - while index < incs.len() && incs[index] < row_id_inc { - index += 1; + let Some(values) = list_array + .values() + .as_any() + .downcast_ref::>() + else { + if cfg!(debug_assertions) { + panic!("downcast failed for {component_name}, data discarded"); + } else { + re_log::error_once!("downcast failed for {component_name}, data discarded"); } + return Either::Left(std::iter::empty()); + }; + let values = values.values().as_slice(); - let found_it = - times.get(index) == Some(&row_id_time_ns) && incs.get(index) == Some(&row_id_inc); - - (found_it && list_array.is_valid(index)).then(|| list_array.value(index)) - } else { - self.row_ids() - .find_position(|id| *id == row_id) - .and_then(|(index, _)| list_array.is_valid(index).then(|| list_array.value(index))) - } + // NOTE: No need for validity checks here, `iter_offsets` already takes care of that. + Either::Right( + self.iter_component_offsets(component_name) + .map(move |(idx, len)| &values[idx..idx + len]), + ) } } +// --- + pub struct ChunkIndicesIter { chunk: Arc, @@ -135,161 +209,144 @@ impl Chunk { /// If the chunk is static, `timeline` will be ignored. /// /// The returned iterator outlives `self`, thus it can be passed around freely. + /// The tradeoff is that `self` must be an `Arc`. + /// + /// See also [`Self::iter_indices`]. #[inline] - pub fn iter_indices(self: Arc, timeline: &Timeline) -> Option { + pub fn iter_indices_owned( + self: Arc, + timeline: &Timeline, + ) -> impl Iterator { if self.is_static() { - Some(ChunkIndicesIter { + Either::Left(ChunkIndicesIter { chunk: self, time_chunk: None, index: 0, }) } else { - self.timelines - .get(timeline) - .cloned() - .map(|time_chunk| ChunkIndicesIter { - chunk: self, - time_chunk: Some(time_chunk), - index: 0, - }) + self.timelines.get(timeline).cloned().map_or_else( + || Either::Right(Either::Left(std::iter::empty())), + |time_chunk| { + Either::Right(Either::Right(ChunkIndicesIter { + chunk: self, + time_chunk: Some(time_chunk), + index: 0, + })) + }, + ) } } } -#[cfg(test)] -mod tests { - use std::sync::Arc; +// --- - use itertools::{izip, Itertools}; - use re_log_types::{ - example_components::{MyColor, MyLabel, MyPoint}, - EntityPath, TimeInt, TimePoint, - }; - use re_types_core::{ComponentBatch, Loggable}; +/// The actual iterator implementation for [`Chunk::iter_component`]. +pub struct ChunkComponentIter { + values: Vec, + offsets: IO, +} - use crate::{Chunk, RowId, Timeline}; +/// The intermediate state for [`ChunkComponentIter`]. +/// +/// Required so that we can return references to the inner data. +pub struct ChunkComponentIterRef<'a, C, IO> { + values: &'a [C], + offsets: &'a mut IO, +} - #[test] - fn cell() -> anyhow::Result<()> { - let entity_path = "my/entity"; +impl<'a, C: Component, IO: Iterator> IntoIterator + for &'a mut ChunkComponentIter +{ + type Item = &'a [C]; - let row_id1 = RowId::ZERO.incremented_by(10); - let row_id2 = RowId::ZERO.incremented_by(20); - let row_id3 = RowId::ZERO.incremented_by(30); - let row_id4 = RowId::new(); - let row_id5 = RowId::new(); + type IntoIter = ChunkComponentIterRef<'a, C, IO>; - let timepoint1 = [ - (Timeline::log_time(), 1000), - (Timeline::new_sequence("frame"), 1), - ]; - let timepoint2 = [ - (Timeline::log_time(), 1032), - (Timeline::new_sequence("frame"), 3), - ]; - let timepoint3 = [ - (Timeline::log_time(), 1064), - (Timeline::new_sequence("frame"), 5), - ]; - let timepoint4 = [ - (Timeline::log_time(), 1096), - (Timeline::new_sequence("frame"), 7), - ]; - let timepoint5 = [ - (Timeline::log_time(), 1128), - (Timeline::new_sequence("frame"), 9), - ]; - - let points1 = &[MyPoint::new(1.0, 1.0), MyPoint::new(2.0, 2.0)]; - let points3 = &[MyPoint::new(6.0, 7.0)]; - - let colors4 = &[MyColor::from_rgb(1, 1, 1)]; - let colors5 = &[MyColor::from_rgb(2, 2, 2), MyColor::from_rgb(3, 3, 3)]; - - let labels1 = &[MyLabel("a".into())]; - let labels2 = &[MyLabel("b".into())]; - let labels3 = &[MyLabel("c".into())]; - let labels4 = &[MyLabel("d".into())]; - let labels5 = &[MyLabel("e".into())]; - - let mut chunk = Chunk::builder(entity_path.into()) - .with_sparse_component_batches( - row_id2, - timepoint4, - [ - (MyPoint::name(), None), - (MyColor::name(), Some(colors4 as _)), - (MyLabel::name(), Some(labels4 as _)), - ], - ) - .with_sparse_component_batches( - row_id5, - timepoint5, - [ - (MyPoint::name(), None), - (MyColor::name(), Some(colors5 as _)), - (MyLabel::name(), Some(labels5 as _)), - ], - ) - .with_sparse_component_batches( - row_id1, - timepoint3, - [ - (MyPoint::name(), Some(points1 as _)), - (MyColor::name(), None), - (MyLabel::name(), Some(labels1 as _)), - ], - ) - .with_sparse_component_batches( - row_id4, - timepoint2, - [ - (MyPoint::name(), None), - (MyColor::name(), None), - (MyLabel::name(), Some(labels2 as _)), - ], - ) - .with_sparse_component_batches( - row_id3, - timepoint1, - [ - (MyPoint::name(), Some(points3 as _)), - (MyColor::name(), None), - (MyLabel::name(), Some(labels3 as _)), - ], - ) - .build()?; - - eprintln!("chunk:\n{chunk}"); - - let expectations: &[(_, _, Option<&dyn ComponentBatch>)] = &[ - (row_id1, MyPoint::name(), Some(points1 as _)), - (row_id2, MyLabel::name(), Some(labels4 as _)), - (row_id3, MyColor::name(), None), - (row_id4, MyLabel::name(), Some(labels2 as _)), - (row_id5, MyColor::name(), Some(colors5 as _)), - ]; - - assert!(!chunk.is_sorted()); - for (row_id, component_name, expected) in expectations { - let expected = - expected.and_then(|expected| re_types_core::LoggableBatch::to_arrow(expected).ok()); - eprintln!("{component_name} @ {row_id}"); - similar_asserts::assert_eq!(expected, chunk.cell(*row_id, component_name)); + #[inline] + fn into_iter(self) -> Self::IntoIter { + ChunkComponentIterRef { + values: &self.values, + offsets: &mut self.offsets, } + } +} - chunk.sort_if_unsorted(); - assert!(chunk.is_sorted()); +impl<'a, C: Component, IO: Iterator> Iterator + for ChunkComponentIterRef<'a, C, IO> +{ + type Item = &'a [C]; - for (row_id, component_name, expected) in expectations { - let expected = - expected.and_then(|expected| re_types_core::LoggableBatch::to_arrow(expected).ok()); - eprintln!("{component_name} @ {row_id}"); - similar_asserts::assert_eq!(expected, chunk.cell(*row_id, component_name)); - } + #[inline] + fn next(&mut self) -> Option { + self.offsets + .next() + .map(move |(idx, len)| &self.values[idx..idx + len]) + } +} - Ok(()) +impl Chunk { + /// Returns an iterator over the deserialized batches of a [`Chunk`], for a given component. + /// + /// This is a dedicated fast path: the entire column will be downcasted and deserialized at + /// once, and then every component batch will be a slice reference into that global slice. + /// Use this when working with complex arrow datatypes and performance matters (e.g. ranging + /// through enum types across many timestamps). + /// + /// See also: + /// * [`Self::iter_component`]. + /// * [`Self::iter_primitive`]. + #[inline] + pub fn iter_component( + &self, + ) -> ChunkComponentIter + '_> { + let Some(list_array) = self.components.get(&C::name()) else { + return ChunkComponentIter { + values: vec![], + offsets: Either::Left(std::iter::empty()), + }; + }; + + let values = list_array.values(); + let values = match C::from_arrow(&**values) { + Ok(values) => values, + Err(err) => { + if cfg!(debug_assertions) { + panic!( + "deserialization failed for {}, data discarded: {}", + C::name(), + re_error::format_ref(&err), + ); + } else { + re_log::error_once!( + "deserialization failed for {}, data discarded: {}", + C::name(), + re_error::format_ref(&err), + ); + } + return ChunkComponentIter { + values: vec![], + offsets: Either::Left(std::iter::empty()), + }; + } + }; + + // NOTE: No need for validity checks here, `iter_offsets` already takes care of that. + ChunkComponentIter { + values, + offsets: Either::Right(self.iter_component_offsets(&C::name())), + } } +} + +// --- + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use itertools::{izip, Itertools}; + use re_log_types::{example_components::MyPoint, EntityPath, TimeInt, TimePoint}; + + use crate::{Chunk, RowId, Timeline}; #[test] fn iter_indices_temporal() -> anyhow::Result<()> { @@ -327,9 +384,8 @@ mod tests { { let got = Arc::clone(&chunk) - .iter_indices(&timeline_frame) - .map(|it| it.collect_vec()) - .unwrap_or_default(); + .iter_indices_owned(&timeline_frame) + .collect_vec(); let expected = izip!( chunk .timelines @@ -376,9 +432,8 @@ mod tests { { let got = Arc::clone(&chunk) - .iter_indices(&timeline_frame) - .map(|it| it.collect_vec()) - .unwrap_or_default(); + .iter_indices_owned(&timeline_frame) + .collect_vec(); let expected = izip!(std::iter::repeat(TimeInt::STATIC), chunk.row_ids()).collect_vec(); similar_asserts::assert_eq!(expected, got); diff --git a/crates/store/re_chunk/src/lib.rs b/crates/store/re_chunk/src/lib.rs index 6b320ef7dcb3b..0f2693f41ced2 100644 --- a/crates/store/re_chunk/src/lib.rs +++ b/crates/store/re_chunk/src/lib.rs @@ -6,6 +6,7 @@ mod builder; mod chunk; +mod helpers; mod id; mod iter; mod latest_at; @@ -21,6 +22,7 @@ mod batcher; pub use self::builder::{ChunkBuilder, ChunkTimelineBuilder}; pub use self::chunk::{Chunk, ChunkError, ChunkResult, ChunkTimeline}; +pub use self::helpers::{ChunkShared, UnitChunkShared}; pub use self::id::{ChunkId, RowId}; pub use self::latest_at::LatestAtQuery; pub use self::range::RangeQuery; diff --git a/crates/store/re_chunk/src/shuffle.rs b/crates/store/re_chunk/src/shuffle.rs index fe409a3f9547c..a19c88e3d23a3 100644 --- a/crates/store/re_chunk/src/shuffle.rs +++ b/crates/store/re_chunk/src/shuffle.rs @@ -23,6 +23,17 @@ impl Chunk { self.is_sorted } + /// For debugging purposes. + #[doc(hidden)] + #[inline] + pub fn is_sorted_uncached(&self) -> bool { + re_tracing::profile_function!(); + + self.row_ids() + .tuple_windows::<(_, _)>() + .all(|row_ids| row_ids.0 <= row_ids.1) + } + /// Is the chunk ascendingly sorted by time, for all of its timelines? /// /// This is O(1) (cached). @@ -33,19 +44,29 @@ impl Chunk { .all(|time_chunk| time_chunk.is_sorted()) } - /// Like [`Self::is_sorted`], but actually checks the entire dataset rather than relying on the - /// cached value. + /// Is the chunk ascendingly sorted by time, for a specific timeline? /// - /// O(n). Useful for tests/debugging, or when you just don't know. + /// This is O(1) (cached). /// - /// See also [`Self::is_sorted`]. + /// See also [`Self::is_timeline_sorted_uncached`]. #[inline] - pub fn is_sorted_uncached(&self) -> bool { - re_tracing::profile_function!(); + pub fn is_timeline_sorted(&self, timeline: &Timeline) -> bool { + self.is_static() + || self + .timelines + .get(timeline) + .map_or(false, |time_chunk| time_chunk.is_sorted()) + } - self.row_ids() - .tuple_windows::<(_, _)>() - .all(|row_ids| row_ids.0 <= row_ids.1) + /// For debugging purposes. + #[doc(hidden)] + #[inline] + pub fn is_timeline_sorted_uncached(&self, timeline: &Timeline) -> bool { + self.is_static() + || self + .timelines + .get(timeline) + .map_or(false, |time_chunk| time_chunk.is_sorted_uncached()) } /// Sort the chunk, if needed. diff --git a/crates/store/re_chunk/src/slice.rs b/crates/store/re_chunk/src/slice.rs index 345b195efa412..7687f90f4b188 100644 --- a/crates/store/re_chunk/src/slice.rs +++ b/crates/store/re_chunk/src/slice.rs @@ -5,10 +5,11 @@ use arrow2::array::{ use itertools::Itertools; use nohash_hasher::IntSet; + use re_log_types::Timeline; use re_types_core::ComponentName; -use crate::{Chunk, ChunkTimeline}; +use crate::{Chunk, ChunkTimeline, RowId}; // --- @@ -16,6 +17,43 @@ use crate::{Chunk, ChunkTimeline}; // Most of them are indirectly stressed by our higher-level query tests anyhow. impl Chunk { + /// Returns the cell corresponding to the specified [`RowId`] for a given [`ComponentName`]. + /// + /// This is `O(log(n))` if `self.is_sorted()`, and `O(n)` otherwise. + /// + /// Reminder: duplicated `RowId`s results in undefined behavior. + pub fn cell( + &self, + row_id: RowId, + component_name: &ComponentName, + ) -> Option> { + let list_array = self.components.get(component_name)?; + + if self.is_sorted() { + let row_id_128 = row_id.as_u128(); + let row_id_time_ns = (row_id_128 >> 64) as u64; + let row_id_inc = (row_id_128 & (!0 >> 64)) as u64; + + let (times, incs) = self.row_ids_raw(); + let times = times.values().as_slice(); + let incs = incs.values().as_slice(); + + let mut index = times.partition_point(|&time| time < row_id_time_ns); + while index < incs.len() && incs[index] < row_id_inc { + index += 1; + } + + let found_it = + times.get(index) == Some(&row_id_time_ns) && incs.get(index) == Some(&row_id_inc); + + (found_it && list_array.is_valid(index)).then(|| list_array.value(index)) + } else { + self.row_ids() + .find_position(|id| *id == row_id) + .and_then(|(index, _)| list_array.is_valid(index).then(|| list_array.value(index))) + } + } + /// Slices the [`Chunk`] vertically. /// /// The result is a new [`Chunk`] with the same columns and (potentially) less rows. @@ -267,7 +305,7 @@ impl Chunk { /// If `component_name` doesn't exist in this [`Chunk`], or if it is already dense, this method /// is a no-op. #[inline] - pub fn densified(&self, component_name: ComponentName) -> Self { + pub fn densified(&self, component_name_pov: ComponentName) -> Self { let Self { id, entity_path, @@ -282,7 +320,7 @@ impl Chunk { return self.clone(); } - let Some(component_list_array) = components.get(&component_name) else { + let Some(component_list_array) = components.get(&component_name_pov) else { return self.clone(); }; @@ -307,10 +345,24 @@ impl Chunk { components: components .iter() .map(|(&component_name, list_array)| { - ( - component_name, - crate::util::filter_array(list_array, &validity_filter), - ) + let filtered = crate::util::filter_array(list_array, &validity_filter); + let filtered = if component_name == component_name_pov { + // Make sure we fully remove the validity bitmap for the densified + // component. + // This will allow further operations on this densified chunk to take some + // very optimized paths. + + filtered + .with_validity(None) + .as_any() + .downcast_ref::>() + .unwrap() + .clone() + } else { + filtered + }; + + (component_name, filtered) }) .collect(), }; @@ -493,3 +545,135 @@ impl ChunkTimeline { ) } } + +// --- + +#[cfg(test)] +mod tests { + use re_log_types::example_components::{MyColor, MyLabel, MyPoint}; + use re_types_core::{ComponentBatch, Loggable}; + + use crate::{Chunk, RowId, Timeline}; + + #[test] + fn cell() -> anyhow::Result<()> { + let entity_path = "my/entity"; + + let row_id1 = RowId::ZERO.incremented_by(10); + let row_id2 = RowId::ZERO.incremented_by(20); + let row_id3 = RowId::ZERO.incremented_by(30); + let row_id4 = RowId::new(); + let row_id5 = RowId::new(); + + let timepoint1 = [ + (Timeline::log_time(), 1000), + (Timeline::new_sequence("frame"), 1), + ]; + let timepoint2 = [ + (Timeline::log_time(), 1032), + (Timeline::new_sequence("frame"), 3), + ]; + let timepoint3 = [ + (Timeline::log_time(), 1064), + (Timeline::new_sequence("frame"), 5), + ]; + let timepoint4 = [ + (Timeline::log_time(), 1096), + (Timeline::new_sequence("frame"), 7), + ]; + let timepoint5 = [ + (Timeline::log_time(), 1128), + (Timeline::new_sequence("frame"), 9), + ]; + + let points1 = &[MyPoint::new(1.0, 1.0), MyPoint::new(2.0, 2.0)]; + let points3 = &[MyPoint::new(6.0, 7.0)]; + + let colors4 = &[MyColor::from_rgb(1, 1, 1)]; + let colors5 = &[MyColor::from_rgb(2, 2, 2), MyColor::from_rgb(3, 3, 3)]; + + let labels1 = &[MyLabel("a".into())]; + let labels2 = &[MyLabel("b".into())]; + let labels3 = &[MyLabel("c".into())]; + let labels4 = &[MyLabel("d".into())]; + let labels5 = &[MyLabel("e".into())]; + + let mut chunk = Chunk::builder(entity_path.into()) + .with_sparse_component_batches( + row_id2, + timepoint4, + [ + (MyPoint::name(), None), + (MyColor::name(), Some(colors4 as _)), + (MyLabel::name(), Some(labels4 as _)), + ], + ) + .with_sparse_component_batches( + row_id5, + timepoint5, + [ + (MyPoint::name(), None), + (MyColor::name(), Some(colors5 as _)), + (MyLabel::name(), Some(labels5 as _)), + ], + ) + .with_sparse_component_batches( + row_id1, + timepoint3, + [ + (MyPoint::name(), Some(points1 as _)), + (MyColor::name(), None), + (MyLabel::name(), Some(labels1 as _)), + ], + ) + .with_sparse_component_batches( + row_id4, + timepoint2, + [ + (MyPoint::name(), None), + (MyColor::name(), None), + (MyLabel::name(), Some(labels2 as _)), + ], + ) + .with_sparse_component_batches( + row_id3, + timepoint1, + [ + (MyPoint::name(), Some(points3 as _)), + (MyColor::name(), None), + (MyLabel::name(), Some(labels3 as _)), + ], + ) + .build()?; + + eprintln!("chunk:\n{chunk}"); + + let expectations: &[(_, _, Option<&dyn ComponentBatch>)] = &[ + (row_id1, MyPoint::name(), Some(points1 as _)), + (row_id2, MyLabel::name(), Some(labels4 as _)), + (row_id3, MyColor::name(), None), + (row_id4, MyLabel::name(), Some(labels2 as _)), + (row_id5, MyColor::name(), Some(colors5 as _)), + ]; + + assert!(!chunk.is_sorted()); + for (row_id, component_name, expected) in expectations { + let expected = + expected.and_then(|expected| re_types_core::LoggableBatch::to_arrow(expected).ok()); + eprintln!("{component_name} @ {row_id}"); + similar_asserts::assert_eq!(expected, chunk.cell(*row_id, component_name)); + } + + chunk.sort_if_unsorted(); + assert!(chunk.is_sorted()); + + for (row_id, component_name, expected) in expectations { + let expected = + expected.and_then(|expected| re_types_core::LoggableBatch::to_arrow(expected).ok()); + eprintln!("{component_name} @ {row_id}"); + similar_asserts::assert_eq!(expected, chunk.cell(*row_id, component_name)); + } + + Ok(()) + } +} diff --git a/crates/store/re_chunk/src/util.rs b/crates/store/re_chunk/src/util.rs index 87a80e74df696..916de440c33ee 100644 --- a/crates/store/re_chunk/src/util.rs +++ b/crates/store/re_chunk/src/util.rs @@ -199,6 +199,8 @@ pub fn pad_list_array_front( /// /// Takes care of up- and down-casting the data back and forth on behalf of the caller. pub fn filter_array(array: &A, filter: &ArrowBooleanArray) -> A { + debug_assert!(filter.validity().is_none()); // just for good measure + #[allow(clippy::unwrap_used)] arrow2::compute::filter::filter(array, filter) // Unwrap: this literally cannot fail. diff --git a/crates/store/re_chunk_store/src/lib.rs b/crates/store/re_chunk_store/src/lib.rs index ec6e606f69a22..3cf4b54304eb8 100644 --- a/crates/store/re_chunk_store/src/lib.rs +++ b/crates/store/re_chunk_store/src/lib.rs @@ -30,7 +30,9 @@ pub use self::subscribers::{ChunkStoreSubscriber, ChunkStoreSubscriberHandle}; // Re-exports #[doc(no_inline)] -pub use re_chunk::{Chunk, ChunkId, LatestAtQuery, RangeQuery, RowId}; +pub use re_chunk::{ + Chunk, ChunkId, ChunkShared, LatestAtQuery, RangeQuery, RowId, UnitChunkShared, +}; #[doc(no_inline)] pub use re_log_types::{ResolvedTimeRange, TimeInt, TimeType, Timeline}; diff --git a/crates/store/re_chunk_store/src/store.rs b/crates/store/re_chunk_store/src/store.rs index 12b444545d142..1a421f5850a70 100644 --- a/crates/store/re_chunk_store/src/store.rs +++ b/crates/store/re_chunk_store/src/store.rs @@ -115,6 +115,20 @@ impl ChunkStoreConfig { ..Self::DEFAULT }; + /// [`Self::DEFAULT`], but with changelog disabled. + pub const CHANGELOG_DISABLED: Self = Self { + enable_changelog: false, + ..Self::DEFAULT + }; + + /// All features disabled. + pub const ALL_DISABLED: Self = Self { + enable_changelog: false, + chunk_max_bytes: 0, + chunk_max_rows: 0, + chunk_max_rows_if_unsorted: 0, + }; + /// Environment variable to configure [`Self::enable_changelog`]. pub const ENV_STORE_ENABLE_CHANGELOG: &'static str = "RERUN_STORE_ENABLE_CHANGELOG"; diff --git a/crates/store/re_chunk_store/tests/correctness.rs b/crates/store/re_chunk_store/tests/correctness.rs index 4a06e65e45502..a8ee192f72836 100644 --- a/crates/store/re_chunk_store/tests/correctness.rs +++ b/crates/store/re_chunk_store/tests/correctness.rs @@ -3,7 +3,6 @@ use std::sync::Arc; -use itertools::Itertools as _; use re_chunk::{Chunk, ChunkId, RowId}; use re_chunk_store::{ChunkStore, ChunkStoreError, LatestAtQuery}; use re_log_types::example_components::{MyIndex, MyPoint}; @@ -22,21 +21,20 @@ fn query_latest_component( ) -> Option<(TimeInt, RowId, C)> { re_tracing::profile_function!(); - let (data_time, row_id, array) = store + let ((data_time, row_id), unit) = store .latest_at_relevant_chunks(query, entity_path, C::name()) .into_iter() - .flat_map(|chunk| { + .filter_map(|chunk| { chunk .latest_at(query, C::name()) - .iter_rows(&query.timeline(), &C::name()) - .collect_vec() + .into_unit() + .and_then(|unit| unit.index(&query.timeline()).map(|index| (index, unit))) }) - .max_by_key(|(data_time, row_id, _)| (*data_time, *row_id)) - .and_then(|(data_time, row_id, array)| array.map(|array| (data_time, row_id, array)))?; + .max_by_key(|(index, _unit)| *index)?; - let value = C::from_arrow(&*array).ok()?.first()?.clone(); - - Some((data_time, row_id, value)) + unit.component_mono()? + .ok() + .map(|values| (data_time, row_id, values)) } // --- diff --git a/crates/store/re_chunk_store/tests/gc.rs b/crates/store/re_chunk_store/tests/gc.rs index f91b4205b9962..03dff3db286b7 100644 --- a/crates/store/re_chunk_store/tests/gc.rs +++ b/crates/store/re_chunk_store/tests/gc.rs @@ -1,7 +1,6 @@ use std::sync::Arc; use arrow2::array::Array as ArrowArray; -use itertools::Itertools as _; use rand::Rng as _; use re_chunk::{Chunk, ChunkId, ComponentName, LatestAtQuery, RowId, TimeInt, TimePoint}; @@ -27,19 +26,19 @@ fn query_latest_array( ) -> Option<(TimeInt, RowId, Box)> { re_tracing::profile_function!(); - let (data_time, row_id, array) = store + let ((data_time, row_id), unit) = store .latest_at_relevant_chunks(query, entity_path, component_name) .into_iter() - .flat_map(|chunk| { + .filter_map(|chunk| { chunk .latest_at(query, component_name) - .iter_rows(&query.timeline(), &component_name) - .collect_vec() + .into_unit() + .and_then(|chunk| chunk.index(&query.timeline()).map(|index| (index, chunk))) }) - .max_by_key(|(data_time, row_id, _)| (*data_time, *row_id)) - .and_then(|(data_time, row_id, array)| array.map(|array| (data_time, row_id, array)))?; + .max_by_key(|(index, _chunk)| *index)?; - Some((data_time, row_id, array)) + unit.component_batch_raw(&component_name) + .map(|array| (data_time, row_id, array)) } // --- diff --git a/crates/store/re_chunk_store/tests/reads.rs b/crates/store/re_chunk_store/tests/reads.rs index fe5ff17d5c62e..8ff27964164f2 100644 --- a/crates/store/re_chunk_store/tests/reads.rs +++ b/crates/store/re_chunk_store/tests/reads.rs @@ -26,19 +26,19 @@ fn query_latest_array( ) -> Option<(TimeInt, RowId, Box)> { re_tracing::profile_function!(); - let (data_time, row_id, array) = store + let ((data_time, row_id), unit) = store .latest_at_relevant_chunks(query, entity_path, component_name) .into_iter() - .flat_map(|chunk| { + .filter_map(|chunk| { chunk .latest_at(query, component_name) - .iter_rows(&query.timeline(), &component_name) - .collect_vec() + .into_unit() + .and_then(|chunk| chunk.index(&query.timeline()).map(|index| (index, chunk))) }) - .max_by_key(|(data_time, row_id, _)| (*data_time, *row_id)) - .and_then(|(data_time, row_id, array)| array.map(|array| (data_time, row_id, array)))?; + .max_by_key(|(index, _chunk)| *index)?; - Some((data_time, row_id, array)) + unit.component_batch_raw(&component_name) + .map(|array| (data_time, row_id, array)) } // --- @@ -723,9 +723,7 @@ fn range() -> anyhow::Result<()> { for chunk in results { let chunk = chunk.range(&query, component_name); eprintln!("{chunk}"); - for (data_time, row_id, _array) in - chunk.iter_rows(&timeline_frame_nr, &component_name) - { + for (data_time, row_id) in chunk.iter_indices(&timeline_frame_nr) { let (expected_data_time, expected_row_id) = row_ids_at_times[results_processed]; assert_eq!(expected_data_time, data_time); assert_eq!(expected_row_id, row_id); diff --git a/crates/store/re_entity_db/Cargo.toml b/crates/store/re_entity_db/Cargo.toml index a094f0aaf9e1e..67853810ff4d9 100644 --- a/crates/store/re_entity_db/Cargo.toml +++ b/crates/store/re_entity_db/Cargo.toml @@ -36,6 +36,7 @@ re_log.workspace = true re_log_encoding = { workspace = true, features = ["decoder"] } re_log_types.workspace = true re_query.workspace = true +re_query2.workspace = true re_smart_channel.workspace = true re_tracing.workspace = true re_types_core.workspace = true diff --git a/crates/store/re_entity_db/src/entity_db.rs b/crates/store/re_entity_db/src/entity_db.rs index c4634f39e8138..2693498aa6ef2 100644 --- a/crates/store/re_entity_db/src/entity_db.rs +++ b/crates/store/re_entity_db/src/entity_db.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use nohash_hasher::IntMap; use parking_lot::Mutex; -use re_chunk::{Chunk, ChunkResult, RowId}; +use re_chunk::{Chunk, ChunkResult, RowId, TimeInt}; use re_chunk_store::{ ChunkStore, ChunkStoreConfig, ChunkStoreEvent, ChunkStoreSubscriber, GarbageCollectionOptions, GarbageCollectionTarget, @@ -68,6 +68,9 @@ pub struct EntityDb { /// Query caches for the data in [`Self::data_store`]. query_caches: re_query::Caches, + /// Query caches for the data in [`Self::data_store`]. + query_caches2: re_query2::Caches, + stats: IngestionStatistics, } @@ -79,6 +82,7 @@ impl EntityDb { pub fn with_store_config(store_id: StoreId, store_config: ChunkStoreConfig) -> Self { let data_store = ChunkStore::new(store_id.clone(), store_config); let query_caches = re_query::Caches::new(&data_store); + let query_caches2 = re_query2::Caches::new(&data_store); Self { data_source: None, @@ -92,6 +96,7 @@ impl EntityDb { data_store, resolver: re_query::PromiseResolver::default(), query_caches, + query_caches2, stats: IngestionStatistics::new(store_id), } } @@ -123,6 +128,11 @@ impl EntityDb { &self.query_caches } + #[inline] + pub fn query_caches2(&self) -> &re_query2::Caches { + &self.query_caches2 + } + #[inline] pub fn resolver(&self) -> &re_query::PromiseResolver { &self.resolver @@ -130,7 +140,7 @@ impl EntityDb { /// Queries for the given `component_names` using latest-at semantics. /// - /// See [`re_query::LatestAtResults`] for more information about how to handle the results. + /// See [`re_query2::LatestAtResults`] for more information about how to handle the results. /// /// This is a cached API -- data will be lazily cached upon access. #[inline] @@ -139,8 +149,8 @@ impl EntityDb { query: &re_chunk_store::LatestAtQuery, entity_path: &EntityPath, component_names: impl IntoIterator, - ) -> re_query::LatestAtResults { - self.query_caches() + ) -> re_query2::LatestAtResults { + self.query_caches2() .latest_at(self.store(), query, entity_path, component_names) } @@ -157,13 +167,13 @@ impl EntityDb { &self, entity_path: &EntityPath, query: &re_chunk_store::LatestAtQuery, - ) -> Option> { - self.query_caches().latest_at_component::( - self.store(), - self.resolver(), - entity_path, - query, - ) + ) -> Option<((TimeInt, RowId), C)> { + let results = self + .query_caches2() + .latest_at(self.store(), query, entity_path, [C::name()]); + results + .component_mono() + .map(|value| (results.index(), value)) } /// Get the latest index and value for a given dense [`re_types_core::Component`]. @@ -179,13 +189,13 @@ impl EntityDb { &self, entity_path: &EntityPath, query: &re_chunk_store::LatestAtQuery, - ) -> Option> { - self.query_caches().latest_at_component_quiet::( - self.store(), - self.resolver(), - entity_path, - query, - ) + ) -> Option<((TimeInt, RowId), C)> { + let results = self + .query_caches2() + .latest_at(self.store(), query, entity_path, [C::name()]); + results + .component_mono_quiet() + .map(|value| (results.index(), value)) } #[inline] @@ -193,14 +203,18 @@ impl EntityDb { &self, entity_path: &EntityPath, query: &re_chunk_store::LatestAtQuery, - ) -> Option<(EntityPath, re_query::LatestAtMonoResult)> { - self.query_caches() - .latest_at_component_at_closest_ancestor::( - self.store(), - self.resolver(), - entity_path, - query, - ) + ) -> Option<(EntityPath, (TimeInt, RowId), C)> { + re_tracing::profile_function!(); + + let mut cur_entity_path = Some(entity_path.clone()); + while let Some(entity_path) = cur_entity_path { + if let Some((index, value)) = self.latest_at_component(&entity_path, query) { + return Some((entity_path, index, value)); + } + cur_entity_path = entity_path.parent(); + } + + None } #[inline] @@ -371,6 +385,7 @@ impl EntityDb { self.times_per_timeline.on_events(&store_events); self.time_histogram_per_timeline.on_events(&store_events); self.query_caches.on_events(&store_events); + self.query_caches2.on_events(&store_events); self.tree.on_store_additions(&store_events); // We inform the stats last, since it measures e2e latency. @@ -431,6 +446,7 @@ impl EntityDb { self.times_per_timeline.on_events(store_events); self.query_caches.on_events(store_events); + self.query_caches2.on_events(store_events); self.time_histogram_per_timeline.on_events(store_events); self.tree.on_store_deletions(&self.data_store, store_events); } diff --git a/crates/store/re_entity_db/src/lib.rs b/crates/store/re_entity_db/src/lib.rs index 2ef69e2b24c2a..ef9ff33b1dfeb 100644 --- a/crates/store/re_entity_db/src/lib.rs +++ b/crates/store/re_entity_db/src/lib.rs @@ -27,6 +27,7 @@ pub use re_log_types::{EntityPath, EntityPathPart, TimeInt, Timeline}; pub mod external { pub use re_chunk_store; pub use re_query; + pub use re_query2; } // ---------------------------------------------------------------------------- diff --git a/crates/store/re_query/Cargo.toml b/crates/store/re_query/Cargo.toml index ba50a64e7db38..c33e4c79a9171 100644 --- a/crates/store/re_query/Cargo.toml +++ b/crates/store/re_query/Cargo.toml @@ -69,11 +69,13 @@ bench = false name = "clamped_zip" required-features = ["codegen"] bench = false +doc = false # we're already documenting the one is `re_query2` [[bin]] name = "range_zip" required-features = ["codegen"] bench = false +doc = false # we're already documenting the one is `re_query2` [[bench]] diff --git a/crates/store/re_query/src/cache.rs b/crates/store/re_query/src/cache.rs index dd838f7869d77..7036376f03544 100644 --- a/crates/store/re_query/src/cache.rs +++ b/crates/store/re_query/src/cache.rs @@ -4,11 +4,12 @@ use std::{ }; use ahash::{HashMap, HashSet}; +use nohash_hasher::IntSet; use parking_lot::RwLock; use re_chunk_store::{ChunkStore, ChunkStoreDiff, ChunkStoreEvent, ChunkStoreSubscriber}; use re_log_types::{EntityPath, ResolvedTimeRange, StoreId, TimeInt, Timeline}; -use re_types_core::ComponentName; +use re_types_core::{components::ClearIsRecursive, ComponentName, Loggable as _}; use crate::{LatestAtCache, RangeCache}; @@ -70,6 +71,14 @@ pub struct Caches { /// The [`StoreId`] of the associated [`ChunkStore`]. pub(crate) store_id: StoreId, + /// Keeps track of which entities have had any `Clear`-related data on any timeline at any + /// point in time. + /// + /// This is used to optimized read-time clears, so that we don't unnecessarily pay for the fixed + /// overhead of all the query layers when we know for a fact that there won't be any data there. + /// This is a huge performance improvement in practice, especially in recordings with many entities. + pub(crate) might_require_clearing: RwLock>, + // NOTE: `Arc` so we can cheaply free the top-level lock early when needed. pub(crate) latest_at_per_cache_key: RwLock>>>, @@ -81,12 +90,25 @@ impl std::fmt::Debug for Caches { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let Self { store_id, + might_require_clearing, latest_at_per_cache_key, range_per_cache_key, } = self; let mut strings = Vec::new(); + strings.push(format!( + "[Entities that must be checked for clears @ {store_id}]\n" + )); + { + let sorted: BTreeSet = + might_require_clearing.read().iter().cloned().collect(); + for entity_path in sorted { + strings.push(format!(" * {entity_path}\n")); + } + strings.push("\n".to_owned()); + } + strings.push(format!("[LatestAt @ {store_id}]")); { let latest_at_per_cache_key = latest_at_per_cache_key.read(); @@ -130,6 +152,7 @@ impl Caches { pub fn new(store: &ChunkStore) -> Self { Self { store_id: store.id().clone(), + might_require_clearing: Default::default(), latest_at_per_cache_key: Default::default(), range_per_cache_key: Default::default(), } @@ -139,10 +162,12 @@ impl Caches { pub fn clear(&self) { let Self { store_id: _, + might_require_clearing, latest_at_per_cache_key, range_per_cache_key, } = self; + might_require_clearing.write().clear(); latest_at_per_cache_key.write().clear(); range_per_cache_key.write().clear(); } @@ -223,6 +248,7 @@ impl ChunkStoreSubscriber for Caches { } } + let mut might_require_clearing = self.might_require_clearing.write(); let caches_latest_at = self.latest_at_per_cache_key.write(); let caches_range = self.range_per_cache_key.write(); // NOTE: Don't release the top-level locks -- even though this cannot happen yet with @@ -237,6 +263,10 @@ impl ChunkStoreSubscriber for Caches { // But since this pretty much never happens in practice, let's not go there until we // have metrics showing that show we need to. for (entity_path, component_name) in compacted.static_ { + if component_name == ClearIsRecursive::name() { + might_require_clearing.insert(entity_path.clone()); + } + for (key, cache) in caches_latest_at.iter() { if key.entity_path == entity_path && key.component_name == component_name { cache.write().pending_invalidations.insert(TimeInt::STATIC); @@ -255,6 +285,10 @@ impl ChunkStoreSubscriber for Caches { re_tracing::profile_scope!("temporal"); for (key, times) in compacted.temporal { + if key.component_name == ClearIsRecursive::name() { + might_require_clearing.insert(key.entity_path.clone()); + } + if let Some(cache) = caches_latest_at.get(&key) { cache .write() diff --git a/crates/store/re_query/src/latest_at/query.rs b/crates/store/re_query/src/latest_at/query.rs index df5c90d1a3b48..f54af6e85cfee 100644 --- a/crates/store/re_query/src/latest_at/query.rs +++ b/crates/store/re_query/src/latest_at/query.rs @@ -49,6 +49,13 @@ impl Caches { let mut results = LatestAtResults::default(); + // NOTE: This pre-filtering is extremely important: going through all these query layers + // has non-negligible overhead even if the final result ends up being nothing, and our + // number of queries for a frame grows linearly with the number of entity paths. + let component_names = component_names.into_iter().filter(|component_name| { + store.entity_has_component_on_timeline(&query.timeline(), entity_path, component_name) + }); + // Query-time clears // ----------------- // @@ -70,8 +77,22 @@ impl Caches { { re_tracing::profile_scope!("clears"); + let potential_clears = self.might_require_clearing.read(); + let mut clear_entity_path = entity_path.clone(); loop { + if !potential_clears.contains(&clear_entity_path) { + // This entity does not contain any `Clear`-related data at all, there's no + // point in running actual queries. + + let Some(parent_entity_path) = clear_entity_path.parent() else { + break; + }; + clear_entity_path = parent_entity_path; + + continue; + } + let key = CacheKey::new( clear_entity_path.clone(), query.timeline(), @@ -297,17 +318,19 @@ pub fn latest_at( entity_path: &EntityPath, component_name: ComponentName, ) -> Option<(TimeInt, RowId, Box)> { - store + let ((data_time, row_id), unit) = store .latest_at_relevant_chunks(query, entity_path, component_name) .into_iter() - .flat_map(|chunk| { + .filter_map(|chunk| { chunk .latest_at(query, component_name) - .iter_rows(&query.timeline(), &component_name) - .collect_vec() + .into_unit() + .and_then(|chunk| chunk.index(&query.timeline()).map(|index| (index, chunk))) }) - .max_by_key(|(data_time, row_id, _)| (*data_time, *row_id)) - .and_then(|(data_time, row_id, array)| array.map(|array| (data_time, row_id, array))) + .max_by_key(|(index, _chunk)| *index)?; + + unit.component_batch_raw(&component_name) + .map(|array| (data_time, row_id, array)) } impl LatestAtCache { diff --git a/crates/store/re_query/src/range/query.rs b/crates/store/re_query/src/range/query.rs index 932902a85e382..7a3862992a17f 100644 --- a/crates/store/re_query/src/range/query.rs +++ b/crates/store/re_query/src/range/query.rs @@ -30,6 +30,13 @@ impl Caches { let mut results = RangeResults::new(query.clone()); + // NOTE: This pre-filtering is extremely important: going through all these query layers + // has non-negligible overhead even if the final result ends up being nothing, and our + // number of queries for a frame grows linearly with the number of entity paths. + let component_names = component_names.into_iter().filter(|component_name| { + store.entity_has_component_on_timeline(&query.timeline(), entity_path, component_name) + }); + for component_name in component_names { let key = CacheKey::new(entity_path.clone(), query.timeline(), component_name); @@ -209,19 +216,19 @@ pub fn range<'a>( query: &'a RangeQuery, entity_path: &EntityPath, component_name: ComponentName, -) -> impl Iterator)> + 'a { +) -> impl Iterator)> + 'a { store .range_relevant_chunks(query, entity_path, component_name) .into_iter() .map(move |chunk| chunk.range(query, component_name)) .filter(|chunk| !chunk.is_empty()) .flat_map(move |chunk| { - chunk - .iter_rows(&query.timeline(), &component_name) - .filter_map(|(data_time, row_id, array)| { - array.map(|array| (data_time, row_id, array)) - }) - .collect_vec() + itertools::izip!( + chunk + .iter_component_indices(&query.timeline(), &component_name) + .collect_vec(), + chunk.iter_component_arrays(&component_name).collect_vec(), + ) }) } @@ -248,7 +255,8 @@ impl RangeCache { if let Some(query_front) = query_front.as_ref() { re_tracing::profile_scope!("front"); - for (data_time, row_id, array) in range(store, query_front, entity_path, component_name) + for ((data_time, row_id), array) in + range(store, query_front, entity_path, component_name) { per_data_time .promises_front @@ -265,9 +273,10 @@ impl RangeCache { if let Some(query_back) = per_data_time.compute_back_query(query, query_front.as_ref()) { re_tracing::profile_scope!("back"); - for (data_time, row_id, array) in range(store, &query_back, entity_path, component_name) - // If there's static data to be found, the front query will take care of it already. - .filter(|(data_time, _, _)| !data_time.is_static()) + for ((data_time, row_id), array) in + range(store, &query_back, entity_path, component_name) + // If there's static data to be found, the front query will take care of it already. + .filter(|((data_time, _), _)| !data_time.is_static()) { per_data_time .promises_back diff --git a/crates/store/re_query2/Cargo.toml b/crates/store/re_query2/Cargo.toml new file mode 100644 index 0000000000000..f09c631c2b5fb --- /dev/null +++ b/crates/store/re_query2/Cargo.toml @@ -0,0 +1,81 @@ +[package] +name = "re_query2" +authors.workspace = true +description = "High-level query APIs" +edition.workspace = true +homepage.workspace = true +include.workspace = true +license.workspace = true +publish = true +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lints] +workspace = true + +[package.metadata.docs.rs] +all-features = true + + +[features] +default = [] + +## Enable codegen helper binaries (generates ClampedZip & RangeZip implementations). +codegen = [] + + +[dependencies] +# Rerun dependencies: +re_chunk.workspace = true +re_chunk_store.workspace = true +re_error.workspace = true +re_format.workspace = true +re_log.workspace = true +re_log_types.workspace = true +re_tracing.workspace = true +re_tuid.workspace = true +re_types_core.workspace = true + +# External dependencies: +ahash.workspace = true +anyhow.workspace = true +arrow2.workspace = true +backtrace.workspace = true +indent.workspace = true +indexmap.workspace = true +itertools.workspace = true +nohash-hasher.workspace = true +parking_lot.workspace = true +paste.workspace = true +seq-macro.workspace = true +static_assertions.workspace = true +thiserror.workspace = true + + +[dev-dependencies] +criterion.workspace = true +mimalloc.workspace = true +rand = { workspace = true, features = ["std", "std_rng"] } +re_types.workspace = true +similar-asserts.workspace = true + +[lib] +bench = false + + +[[bin]] +name = "clamped_zip" +required-features = ["codegen"] +bench = false + +[[bin]] +name = "range_zip" +required-features = ["codegen"] +bench = false + + +[[bench]] +name = "latest_at" +harness = false diff --git a/crates/store/re_query2/README.md b/crates/store/re_query2/README.md new file mode 100644 index 0000000000000..906e0113fc51e --- /dev/null +++ b/crates/store/re_query2/README.md @@ -0,0 +1,10 @@ +# re_query2 + +Part of the [`rerun`](https://github.com/rerun-io/rerun) family of crates. + +[![Latest version](https://img.shields.io/crates/v/re_query2.svg)](https://crates.io/crates/re_query2) +[![Documentation](https://docs.rs/re_query2/badge.svg)](https://docs.rs/re_query2) +![MIT](https://img.shields.io/badge/license-MIT-blue.svg) +![Apache](https://img.shields.io/badge/license-Apache-blue.svg) + +High-level query APIs. diff --git a/crates/store/re_query2/benches/latest_at.rs b/crates/store/re_query2/benches/latest_at.rs new file mode 100644 index 0000000000000..561ddf097a449 --- /dev/null +++ b/crates/store/re_query2/benches/latest_at.rs @@ -0,0 +1,335 @@ +// Allow unwrap() in benchmarks +#![allow(clippy::unwrap_used)] + +use std::sync::Arc; + +use criterion::{criterion_group, criterion_main, Criterion}; +use itertools::Itertools; + +use re_chunk::{Chunk, RowId}; +use re_chunk_store::{ChunkStore, ChunkStoreSubscriber, LatestAtQuery}; +use re_log_types::{entity_path, EntityPath, TimeInt, TimeType, Timeline}; +use re_query2::clamped_zip_1x1; +use re_query2::{Caches, LatestAtResults}; +use re_types::{ + archetypes::Points2D, + components::{Color, Position2D, Text}, + Archetype as _, +}; + +// --- + +// `cargo test` also runs the benchmark setup code, so make sure they run quickly: +#[cfg(debug_assertions)] +mod constants { + pub const NUM_FRAMES_POINTS: u32 = 1; + pub const NUM_POINTS: u32 = 1; + pub const NUM_FRAMES_STRINGS: u32 = 1; + pub const NUM_STRINGS: u32 = 1; +} + +#[cfg(not(debug_assertions))] +mod constants { + pub const NUM_FRAMES_POINTS: u32 = 1_000; + pub const NUM_POINTS: u32 = 1_000; + pub const NUM_FRAMES_STRINGS: u32 = 1_000; + pub const NUM_STRINGS: u32 = 1_000; +} + +#[allow(clippy::wildcard_imports)] +use self::constants::*; + +// --- + +#[global_allocator] +static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; + +criterion_group!( + benches, + mono_points, + mono_strings, + batch_points, + batch_strings +); +criterion_main!(benches); + +// --- + +fn mono_points(c: &mut Criterion) { + // Each mono point gets logged at a different path + let paths = (0..NUM_POINTS) + .map(move |point_idx| entity_path!("points", point_idx)) + .collect_vec(); + let msgs = build_points_chunks(&paths, 1); + + { + let mut group = c.benchmark_group("arrow_mono_points2"); + // Mono-insert is slow -- decrease the sample size + group.sample_size(10); + group.throughput(criterion::Throughput::Elements( + (NUM_POINTS * NUM_FRAMES_POINTS) as _, + )); + group.bench_function("insert", |b| { + b.iter(|| insert_chunks(msgs.iter())); + }); + } + + { + let mut group = c.benchmark_group("arrow_mono_points2"); + group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); + let (caches, store) = insert_chunks(msgs.iter()); + group.bench_function("query", |b| { + b.iter(|| query_and_visit_points(&caches, &store, &paths)); + }); + } +} + +fn mono_strings(c: &mut Criterion) { + // Each mono string gets logged at a different path + let paths = (0..NUM_STRINGS) + .map(move |string_idx| entity_path!("strings", string_idx)) + .collect_vec(); + let msgs = build_strings_chunks(&paths, 1); + + { + let mut group = c.benchmark_group("arrow_mono_strings2"); + group.sample_size(10); + group.throughput(criterion::Throughput::Elements( + (NUM_STRINGS * NUM_FRAMES_STRINGS) as _, + )); + group.bench_function("insert", |b| { + b.iter(|| insert_chunks(msgs.iter())); + }); + } + + { + let mut group = c.benchmark_group("arrow_mono_strings2"); + group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); + let (caches, store) = insert_chunks(msgs.iter()); + group.bench_function("query", |b| { + b.iter(|| query_and_visit_strings(&caches, &store, &paths)); + }); + } +} + +fn batch_points(c: &mut Criterion) { + // Batch points are logged together at a single path + let paths = [EntityPath::from("points")]; + let msgs = build_points_chunks(&paths, NUM_POINTS as _); + + { + let mut group = c.benchmark_group("arrow_batch_points2"); + group.throughput(criterion::Throughput::Elements( + (NUM_POINTS * NUM_FRAMES_POINTS) as _, + )); + group.bench_function("insert", |b| { + b.iter(|| insert_chunks(msgs.iter())); + }); + } + + { + let mut group = c.benchmark_group("arrow_batch_points2"); + group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); + let (caches, store) = insert_chunks(msgs.iter()); + group.bench_function("query", |b| { + b.iter(|| query_and_visit_points(&caches, &store, &paths)); + }); + } +} + +fn batch_strings(c: &mut Criterion) { + // Batch strings are logged together at a single path + let paths = [EntityPath::from("points")]; + let msgs = build_strings_chunks(&paths, NUM_STRINGS as _); + + { + let mut group = c.benchmark_group("arrow_batch_strings2"); + group.throughput(criterion::Throughput::Elements( + (NUM_STRINGS * NUM_FRAMES_STRINGS) as _, + )); + group.bench_function("insert", |b| { + b.iter(|| insert_chunks(msgs.iter())); + }); + } + + { + let mut group = c.benchmark_group("arrow_batch_strings2"); + group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); + let (caches, store) = insert_chunks(msgs.iter()); + group.bench_function("query", |b| { + b.iter(|| query_and_visit_strings(&caches, &store, &paths)); + }); + } +} + +// --- Helpers --- + +pub fn build_some_point2d(len: usize) -> Vec { + use rand::Rng as _; + let mut rng = rand::thread_rng(); + + (0..len) + .map(|_| Position2D::new(rng.gen_range(0.0..10.0), rng.gen_range(0.0..10.0))) + .collect() +} + +/// Create `len` dummy colors +pub fn build_some_colors(len: usize) -> Vec { + (0..len).map(|i| Color::from(i as u32)).collect() +} + +/// Build a ([`Timeline`], [`TimeInt`]) tuple from `frame_nr` suitable for inserting in a [`re_log_types::TimePoint`]. +pub fn build_frame_nr(frame_nr: TimeInt) -> (Timeline, TimeInt) { + (Timeline::new("frame_nr", TimeType::Sequence), frame_nr) +} + +pub fn build_some_strings(len: usize) -> Vec { + use rand::Rng as _; + let mut rng = rand::thread_rng(); + + (0..len) + .map(|_| { + let ilen: usize = rng.gen_range(0..100); + let s: String = rand::thread_rng() + .sample_iter(&rand::distributions::Alphanumeric) + .take(ilen) + .map(char::from) + .collect(); + Text::from(s) + }) + .collect() +} + +fn build_points_chunks(paths: &[EntityPath], num_points: usize) -> Vec> { + paths + .iter() + .map(|path| { + let mut builder = Chunk::builder(path.clone()); + for frame_idx in 0..NUM_FRAMES_POINTS { + builder = builder.with_component_batches( + RowId::new(), + [build_frame_nr((frame_idx as i64).try_into().unwrap())], + [ + &build_some_point2d(num_points) as _, + &build_some_colors(num_points) as _, + ], + ); + } + Arc::new(builder.build().unwrap()) + }) + .collect() +} + +fn build_strings_chunks(paths: &[EntityPath], num_strings: usize) -> Vec> { + paths + .iter() + .map(|path| { + let mut builder = Chunk::builder(path.clone()); + for frame_idx in 0..NUM_FRAMES_POINTS { + builder = builder.with_component_batches( + RowId::new(), + [build_frame_nr((frame_idx as i64).try_into().unwrap())], + [ + // We still need to create points because they are the primary for the + // archetype query we want to do. We won't actually deserialize the points + // during the query -- we just need it for the primary keys. + // TODO(jleibs): switch this to use `TextEntry` once the new type has + // landed. + &build_some_point2d(num_strings) as _, + &build_some_strings(num_strings) as _, + ], + ); + } + Arc::new(builder.build().unwrap()) + }) + .collect() +} + +fn insert_chunks<'a>(msgs: impl Iterator>) -> (Caches, ChunkStore) { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + msgs.for_each(|chunk| { + caches.on_events(&store.insert_chunk(chunk).unwrap()); + }); + + (caches, store) +} + +struct SavePoint { + _pos: Position2D, + _color: Option, +} + +fn query_and_visit_points( + caches: &Caches, + store: &ChunkStore, + paths: &[EntityPath], +) -> Vec { + let timeline_frame_nr = Timeline::new("frame_nr", TimeType::Sequence); + let query = LatestAtQuery::new(timeline_frame_nr, NUM_FRAMES_POINTS as i64 / 2); + + let mut ret = Vec::with_capacity(NUM_POINTS as _); + + // TODO(jleibs): Add Radius once we have support for it in field_types + for entity_path in paths { + let results: LatestAtResults = caches.latest_at( + store, + &query, + entity_path, + Points2D::all_components().iter().copied(), // no generics! + ); + + let points = results.component_batch_quiet::().unwrap(); + let colors = results.component_batch_quiet::().unwrap_or_default(); + let color_default_fn = || Color::from(0xFF00FFFF); + + for (point, color) in clamped_zip_1x1(points, colors, color_default_fn) { + ret.push(SavePoint { + _pos: point, + _color: Some(color), + }); + } + } + assert_eq!(NUM_POINTS as usize, ret.len()); + ret +} + +struct SaveString { + _label: Option, +} + +fn query_and_visit_strings( + caches: &Caches, + store: &ChunkStore, + paths: &[EntityPath], +) -> Vec { + let timeline_frame_nr = Timeline::new("frame_nr", TimeType::Sequence); + let query = LatestAtQuery::new(timeline_frame_nr, NUM_FRAMES_STRINGS as i64 / 2); + + let mut strings = Vec::with_capacity(NUM_STRINGS as _); + + for entity_path in paths { + let results: LatestAtResults = caches.latest_at( + store, + &query, + entity_path, + Points2D::all_components().iter().copied(), // no generics! + ); + + let points = results.component_batch_quiet::().unwrap(); + let labels = results.component_batch_quiet::().unwrap_or_default(); + let label_default_fn = || Text(String::new().into()); + + for (_point, label) in clamped_zip_1x1(points, labels, label_default_fn) { + strings.push(SaveString { + _label: Some(label), + }); + } + } + assert_eq!(NUM_STRINGS as usize, strings.len()); + criterion::black_box(strings) +} diff --git a/crates/store/re_query2/examples/latest_at.rs b/crates/store/re_query2/examples/latest_at.rs new file mode 100644 index 0000000000000..32fc107977ece --- /dev/null +++ b/crates/store/re_query2/examples/latest_at.rs @@ -0,0 +1,131 @@ +use std::sync::Arc; + +use anyhow::Context; +use arrow2::array::PrimitiveArray as ArrowPrimitiveArray; +use itertools::Itertools; + +use re_chunk::{Chunk, RowId}; +use re_chunk_store::{ChunkStore, LatestAtQuery}; +use re_log_types::example_components::{MyColor, MyLabel, MyPoint, MyPoints}; +use re_log_types::{build_frame_nr, Timeline}; +use re_types::{ComponentBatch, Loggable as _}; +use re_types_core::Archetype as _; + +use re_query2::{clamped_zip_1x2, LatestAtResults}; + +// --- + +fn main() -> anyhow::Result<()> { + let store = store()?; + eprintln!("store:\n{store}"); + + let entity_path = "points"; + let timeline = Timeline::new_sequence("frame_nr"); + let query = LatestAtQuery::latest(timeline); + eprintln!("query:{query:?}"); + + let caches = re_query2::Caches::new(&store); + + // First, get the (potentially cached) results for this query. + let results: LatestAtResults = caches.latest_at( + &store, + &query, + &entity_path.into(), + MyPoints::all_components().iter().copied(), // no generics! + ); + + // The results can be accessed either through the low-level Chunk APIs, or the higher-level helpers. + + // Example of accessing the data using the higher-level APIs. + // + // These APIs will log errors instead of returning them. + { + let points = results.component_batch::().context("missing")?; + let colors = results.component_batch::().unwrap_or_default(); + let labels = results.component_batch::().unwrap_or_default(); + + // Then apply your instance-level joining logic, if any: + let color_default_fn = || MyColor(0xFF00FFFF); + let label_default_fn = || MyLabel("N/A".to_owned()); + let results = clamped_zip_1x2(points, colors, color_default_fn, labels, label_default_fn) + .collect_vec(); + + eprintln!("results 1:\n{results:#?}"); + } + + // Example of accessing the data using the Chunk APIs. + // + // Because a latest-at query can only ever return a single row's worth of data for each + // individual component, the chunks returned here will be so-called unit chunks, which are + // guaranteed to only contain a single row. + { + // * `get_required` returns an error if the chunk is missing. + // * `get` returns an option. + let points = results.get_required(&MyPoint::name())?; + let colors = results.get(&MyColor::name()); + let labels = results.get(&MyLabel::name()); + + // You can always use the standard deserialization path: + let points = points.component_batch::().context("missing")??; + let labels = labels + .and_then(|unit| unit.component_batch::()?.ok()) + .unwrap_or_default(); + + // Or, if you want every last bit of performance you can get, you can manipulate the raw + // data directly: + let colors = colors + .context("missing")? + .component_batch_raw(&MyColor::name()) + .context("invalid")?; + let colors = colors + .as_any() + .downcast_ref::>() + .context("invalid")?; + let colors = colors + .values() + .as_slice() + .iter() + .map(|&color| MyColor(color)); + + // And finally apply your instance-level joining logic, if any: + let color_default_fn = || MyColor(0xFF00FFFF); + let label_default_fn = || MyLabel("N/A".to_owned()); + let results = clamped_zip_1x2(points, colors, color_default_fn, labels, label_default_fn) + .collect_vec(); + + eprintln!("results 2:\n{results:#?}"); + } + + Ok(()) +} + +// --- + +fn store() -> anyhow::Result { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + + let entity_path = "points"; + + { + let timepoint = [build_frame_nr(123)]; + + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches( + RowId::new(), + timepoint, + [ + &[MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)] as &dyn ComponentBatch, // + &[MyColor::from_rgb(255, 0, 0)], + &[MyLabel("a".into()), MyLabel("b".into())], + ], + ) + .build()?; + + store.insert_chunk(&Arc::new(chunk))?; + } + + Ok(store) +} diff --git a/crates/store/re_query2/examples/range.rs b/crates/store/re_query2/examples/range.rs new file mode 100644 index 0000000000000..acfe88500c8cd --- /dev/null +++ b/crates/store/re_query2/examples/range.rs @@ -0,0 +1,157 @@ +use std::sync::Arc; + +use itertools::{izip, Itertools}; +use re_chunk::{Chunk, RowId}; +use re_chunk_store::{ChunkStore, RangeQuery}; +use re_log_types::example_components::{MyColor, MyLabel, MyPoint, MyPoints}; +use re_log_types::{build_frame_nr, ResolvedTimeRange, TimeType, Timeline}; +use re_types::ComponentBatch; +use re_types_core::{Archetype as _, Loggable as _}; + +use re_query2::{clamped_zip_1x2, range_zip_1x2, RangeResults}; + +// --- + +fn main() -> anyhow::Result<()> { + let store = store()?; + eprintln!("store:\n{store}"); + + let entity_path = "points"; + let timeline = Timeline::new("frame_nr", TimeType::Sequence); + let query = RangeQuery::new(timeline, ResolvedTimeRange::EVERYTHING); + eprintln!("query:{query:?}"); + + let caches = re_query2::Caches::new(&store); + + // First, get the (potentially cached) results for this query. + let results: RangeResults = caches.range( + &store, + &query, + &entity_path.into(), + MyPoints::all_components().iter().copied(), // no generics! + ); + + // * `get_required` returns an error if the chunk is missing. + // * `get` returns an option. + let all_points_chunks = results.get_required(&MyPoint::name())?; + let all_colors_chunks = results.get(&MyColor::name()); + let all_labels_chunks = results.get(&MyLabel::name()); + + // You can always use the standard deserialization path. + // + // The underlying operator is optimized to only pay the cost of downcasting and deserialization + // once for the whole column, and will then return references into that data. + // This is why you have to process the data in two-steps: the iterator needs to have somewhere + // to reference to. + let mut all_points_iters = all_points_chunks + .iter() + .map(|chunk| chunk.iter_component::()) + .collect_vec(); + let all_points_indexed = { + let all_points = all_points_iters.iter_mut().flat_map(|it| it.into_iter()); + let all_points_indices = all_points_chunks + .iter() + .flat_map(|chunk| chunk.iter_component_indices(&query.timeline(), &MyPoint::name())); + izip!(all_points_indices, all_points) + }; + let mut all_labels_iters = all_labels_chunks + .unwrap_or_default() + .iter() + .map(|chunk| chunk.iter_component::()) + .collect_vec(); + let all_labels_indexed = { + let all_labels = all_labels_iters.iter_mut().flat_map(|it| it.into_iter()); + let all_labels_indices = all_labels_chunks + .unwrap_or_default() + .iter() + .flat_map(|chunk| chunk.iter_component_indices(&query.timeline(), &MyLabel::name())); + izip!(all_labels_indices, all_labels) + }; + + // Or, if you want every last bit of performance you can get, you can manipulate the raw + // data directly: + let all_colors_indexed = all_colors_chunks + .unwrap_or_default() + .iter() + .flat_map(|chunk| { + itertools::izip!( + chunk.iter_component_indices(&query.timeline(), &MyColor::name()), + chunk.iter_primitive::(&MyColor::name()), + ) + }); + + // Zip the results together using a stateful time-based join. + let all_frames = range_zip_1x2(all_points_indexed, all_colors_indexed, all_labels_indexed); + + // And finally inspect our final results: + { + let color_default_fn = || Some(MyColor(0xFF00FFFF)); + let label_default_fn = || None; + + eprintln!("results:"); + for ((data_time, row_id), points, colors, labels) in all_frames { + let colors = colors.unwrap_or(&[]).iter().map(|c| Some(MyColor(*c))); + let labels = labels.unwrap_or(&[]).iter().cloned().map(Some); + + // Apply your instance-level joining logic, if any: + let results = + clamped_zip_1x2(points, colors, color_default_fn, labels, label_default_fn) + .collect_vec(); + eprintln!("{data_time:?} @ {row_id}:\n {results:?}"); + } + } + + Ok(()) +} + +// --- + +fn store() -> anyhow::Result { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + + let entity_path = "points"; + + { + let timepoint = [build_frame_nr(123)]; + + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches( + RowId::new(), + timepoint, + [ + &[MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)] as &dyn ComponentBatch, // + &[MyColor::from_rgb(255, 0, 0)], + &[MyLabel("a".into()), MyLabel("b".into())], + ], + ) + .build()?; + + store.insert_chunk(&Arc::new(chunk))?; + } + + { + let timepoint = [build_frame_nr(423)]; + + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches( + RowId::new(), + timepoint, + [ + &[ + MyPoint::new(10.0, 20.0), + MyPoint::new(30.0, 40.0), + MyPoint::new(50.0, 60.0), + ] as &dyn ComponentBatch, // + &[MyColor::from_rgb(255, 0, 0), MyColor::from_rgb(0, 0, 255)], + ], + ) + .build()?; + + store.insert_chunk(&Arc::new(chunk))?; + } + + Ok(store) +} diff --git a/crates/store/re_query2/src/bin/clamped_zip.rs b/crates/store/re_query2/src/bin/clamped_zip.rs new file mode 100644 index 0000000000000..d213cbdb3bbc4 --- /dev/null +++ b/crates/store/re_query2/src/bin/clamped_zip.rs @@ -0,0 +1,354 @@ +//! CLI tool to generate `ClampedZip` implementations of different arities. + +use itertools::{izip, Itertools}; + +struct Params { + num_required: usize, + num_optional: usize, +} + +impl Params { + fn to_num_required(&self) -> String { + self.num_required.to_string() + } + + fn to_num_optional(&self) -> String { + self.num_optional.to_string() + } + + /// `1x3`, `2x2`… + fn to_suffix(&self) -> String { + format!("{}x{}", self.to_num_required(), self.to_num_optional()) + } + + /// `r0, r1, r2…`. + fn to_required_names(&self) -> Vec { + (0..self.num_required) + .map(|n| format!("r{n}")) + .collect_vec() + } + + /// `R0, R1, R2…`. + fn to_required_types(&self) -> Vec { + self.to_required_names() + .into_iter() + .map(|s| s.to_uppercase()) + .collect() + } + + /// `r0: R0, r1: R1, r2: R2…`. + fn to_required_params(&self) -> Vec { + izip!(self.to_required_names(), self.to_required_types()) + .map(|(n, t)| format!("{n}: {t}")) + .collect() + } + + /// `R0: (Into)Iterator, R1: (Into)Iterator, R2: (Into)Iterator…` + fn to_required_clauses(&self, into: bool) -> Vec { + let trait_name = if into { "IntoIterator" } else { "Iterator" }; + self.to_required_types() + .into_iter() + .map(|t| format!("{t}: {trait_name}")) + .collect() + } + + /// `o0, o1, o2…`. + fn to_optional_names(&self) -> Vec { + (0..self.num_optional) + .map(|n| format!("o{n}")) + .collect_vec() + } + + /// `O0, O1, O2…`. + fn to_optional_types(&self) -> Vec { + self.to_optional_names() + .into_iter() + .map(|s| s.to_uppercase()) + .collect() + } + + /// `o0: O0, o1: O1, o2: O2…`. + fn to_optional_params(&self) -> Vec { + izip!(self.to_optional_names(), self.to_optional_types()) + .map(|(n, t)| format!("{n}: {t}")) + .collect() + } + + /// `O0: IntoIterator, O0::Item: Clone, O1: IntoIterator, O1::Item: Clone…` + fn to_optional_clauses(&self, into: bool) -> Vec { + let trait_name = if into { "IntoIterator" } else { "Iterator" }; + self.to_optional_types() + .into_iter() + .map(|t| format!("{t}: {trait_name}, {t}::Item: Clone")) + .collect() + } + + /// `o0_default_fn, o1_default_fn, o2_default_fn…`. + fn to_optional_fn_names(&self) -> Vec { + (0..self.num_optional) + .map(|n| format!("o{n}_default_fn")) + .collect_vec() + } + + /// `D0, D1, D2…`. + fn to_optional_fn_types(&self) -> Vec { + (0..self.num_optional) + .map(|n| format!("D{n}")) + .collect_vec() + } + + /// `o0_default_fn: D0, o1_default_fn: D1…`. + fn to_optional_fn_params(&self) -> Vec { + izip!(self.to_optional_fn_names(), self.to_optional_fn_types()) + .map(|(n, t)| format!("{n}: {t}")) + .collect() + } + + /// `D0: Fn() -> O0::Item, D1: Fn() -> O1::Item…` + fn to_optional_fn_clauses(&self) -> Vec { + izip!(self.to_optional_fn_types(), self.to_optional_types()) + .map(|(tl, tr)| format!("{tl}: Fn() -> {tr}::Item")) + .collect() + } +} + +fn backticked(strs: impl IntoIterator) -> Vec { + strs.into_iter().map(|s| format!("`{s}`")).collect() +} + +fn generate_helper_func(params: &Params) -> String { + let suffix = params.to_suffix(); + let required_names = backticked(params.to_required_names()).join(", "); + let optional_names = backticked(params.to_optional_names()).join(", "); + let optional_fn_names = backticked(params.to_optional_fn_names()).join(", "); + let required_types = params.to_required_types().join(", "); + let optional_types = params.to_optional_types().join(", "); + let optional_fn_types = params.to_optional_fn_types().join(", "); + let required_clauses = params.to_required_clauses(true /* into */).join(", "); + let optional_clauses = params.to_optional_clauses(true /* into */).join(", "); + let optional_fn_clauses = params.to_optional_fn_clauses().join(", "); + let required_params = params.to_required_params().join(", "); + let optional_params = izip!(params.to_optional_params(), params.to_optional_fn_params()) + .map(|(o, d)| format!("{o}, {d}")) + .collect_vec() + .join(",\n"); + + let ret_clause = params + .to_required_types() + .into_iter() + .map(|r| format!("{r}::IntoIter")) + .chain( + params + .to_optional_types() + .into_iter() + .map(|o| format!("{o}::IntoIter")), + ) + .chain(params.to_optional_fn_types()) + .collect_vec() + .join(", "); + + let ret = params + .to_required_names() + .into_iter() + .map(|r| format!("{r}: {r}.into_iter()")) + .chain( + params + .to_optional_names() + .into_iter() + .map(|o| format!("{o}: {o}.into_iter()")), + ) + .chain(params.to_optional_fn_names()) + .chain( + params + .to_optional_names() + .into_iter() + .map(|o| format!("{o}_latest_value: None")), + ) + .collect_vec() + .join(",\n"); + + format!( + r#" + /// Returns a new [`ClampedZip{suffix}`] iterator. + /// + /// The number of elements in a clamped zip iterator corresponds to the number of elements in the + /// shortest of its required iterators ({required_names}). + /// + /// Optional iterators ({optional_names}) will repeat their latest values if they happen to be too short + /// to be zipped with the shortest of the required iterators. + /// + /// If an optional iterator is not only too short but actually empty, its associated default function + /// ({optional_fn_names}) will be executed and the resulting value repeated as necessary. + pub fn clamped_zip_{suffix}<{required_types}, {optional_types}, {optional_fn_types}>( + {required_params}, + {optional_params}, + ) -> ClampedZip{suffix}<{ret_clause}> + where + {required_clauses}, + {optional_clauses}, + {optional_fn_clauses}, + {{ + ClampedZip{suffix} {{ + {ret} + }} + }} + "# + ) +} + +fn generate_struct(params: &Params) -> String { + let suffix = params.to_suffix(); + let required_types = params.to_required_types().join(", "); + let optional_types = params.to_optional_types().join(", "); + let optional_fn_types = params.to_optional_fn_types().join(", "); + let required_clauses = params.to_required_clauses(false /* into */).join(", "); + let optional_clauses = params.to_optional_clauses(false /* into */).join(", "); + let optional_fn_clauses = params.to_optional_fn_clauses().join(", "); + let required_params = params.to_required_params().join(", "); + let optional_params = params.to_optional_params().join(", "); + let optional_fn_params = params.to_optional_fn_params().join(", "); + + let latest_values = izip!(params.to_optional_names(), params.to_optional_types()) + .map(|(n, t)| format!("{n}_latest_value: Option<{t}::Item>")) + .collect_vec() + .join(",\n"); + + format!( + r#" + /// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional + /// iterators. + /// + /// See [`clamped_zip_{suffix}`] for more information. + pub struct ClampedZip{suffix}<{required_types}, {optional_types}, {optional_fn_types}> + where + {required_clauses}, + {optional_clauses}, + {optional_fn_clauses}, + {{ + {required_params}, + {optional_params}, + {optional_fn_params}, + + {latest_values} + }} + "# + ) +} + +fn generate_impl(params: &Params) -> String { + let suffix = params.to_suffix(); + let required_types = params.to_required_types().join(", "); + let optional_types = params.to_optional_types().join(", "); + let optional_fn_types = params.to_optional_fn_types().join(", "); + let required_clauses = params.to_required_clauses(false /* into */).join(", "); + let optional_clauses = params.to_optional_clauses(false /* into */).join(", "); + let optional_fn_clauses = params.to_optional_fn_clauses().join(", "); + + let items = params + .to_required_types() + .into_iter() + .map(|r| format!("{r}::Item")) + .chain( + params + .to_optional_types() + .into_iter() + .map(|o| format!("{o}::Item")), + ) + .collect_vec() + .join(", "); + + let next = + params + .to_required_names() + .into_iter() + .map(|r| format!("let {r}_next = self.{r}.next()?;")) + .chain(params.to_optional_names().into_iter().map(|o| { + format!("let {o}_next = self.{o}.next().or(self.{o}_latest_value.take());") + })) + .collect_vec() + .join("\n"); + + let update_latest = params + .to_optional_names() + .into_iter() + .map(|o| format!("self.{o}_latest_value.clone_from(&{o}_next);")) + .collect_vec() + .join("\n"); + + let ret = params + .to_required_names() + .into_iter() + .map(|r| format!("{r}_next")) + .chain( + params + .to_optional_names() + .into_iter() + .map(|o| format!("{o}_next.unwrap_or_else(|| (self.{o}_default_fn)())")), + ) + .collect_vec() + .join(",\n"); + + format!( + r#" + impl<{required_types}, {optional_types}, {optional_fn_types}> Iterator for ClampedZip{suffix}<{required_types}, {optional_types}, {optional_fn_types}> + where + {required_clauses}, + {optional_clauses}, + {optional_fn_clauses}, + {{ + type Item = ({items}); + + #[inline] + fn next(&mut self) -> Option {{ + {next} + + {update_latest} + + Some(( + {ret} + )) + }} + }} + "# + ) +} + +fn main() { + let num_required = 1..3; + let num_optional = 1..10; + + let output = num_required + .flat_map(|num_required| { + num_optional + .clone() + .map(move |num_optional| (num_required, num_optional)) + }) + .flat_map(|(num_required, num_optional)| { + let params = Params { + num_required, + num_optional, + }; + + [ + generate_helper_func(¶ms), + generate_struct(¶ms), + generate_impl(¶ms), + ] + }) + .collect_vec() + .join("\n"); + + println!( + " + // This file was generated using `cargo r -p re_query2 --all-features --bin clamped_zip`. + // DO NOT EDIT. + + // --- + + #![allow(clippy::too_many_arguments)] + #![allow(clippy::type_complexity)] + + {output} + " + ); +} diff --git a/crates/store/re_query2/src/bin/range_zip.rs b/crates/store/re_query2/src/bin/range_zip.rs new file mode 100644 index 0000000000000..a32c70e6c9e9b --- /dev/null +++ b/crates/store/re_query2/src/bin/range_zip.rs @@ -0,0 +1,499 @@ +//! CLI tool to generate `RangeZip` implementations of different arities. + +#![allow(clippy::tuple_array_conversions)] // false positive + +use itertools::{izip, Itertools}; + +struct Params { + num_required: usize, + num_optional: usize, +} + +impl Params { + fn to_num_required(&self) -> String { + self.num_required.to_string() + } + + fn to_num_optional(&self) -> String { + self.num_optional.to_string() + } + + /// `1x3`, `2x2`… + fn to_suffix(&self) -> String { + format!("{}x{}", self.to_num_required(), self.to_num_optional()) + } + + /// `r0, r1, r2…`. + fn to_required_names(&self) -> Vec { + (0..self.num_required) + .map(|n| format!("r{n}")) + .collect_vec() + } + + /// `R0, R1, R2…`. + fn to_required_types(&self) -> Vec { + self.to_required_names() + .into_iter() + .map(|s| s.to_uppercase()) + .collect() + } + + /// `r0: IR0, r1: IR1, r2: IR2…`. + fn to_required_params(&self) -> Vec { + izip!(self.to_required_names(), self.to_required_types()) + .map(|(n, t)| format!("{n}: I{t}")) + .collect() + } + + /// `IR0: (Into)Iterator, IR1: (Into)Iterator…` + fn to_required_clauses(&self, into: bool) -> Vec { + let trait_name = if into { "IntoIterator" } else { "Iterator" }; + self.to_required_types() + .into_iter() + .map(|t| format!("I{t}: {trait_name}")) + .collect() + } + + /// `o0, o1, o2…`. + fn to_optional_names(&self) -> Vec { + (0..self.num_optional) + .map(|n| format!("o{n}")) + .collect_vec() + } + + /// `O0, O1, O2…`. + fn to_optional_types(&self) -> Vec { + self.to_optional_names() + .into_iter() + .map(|s| s.to_uppercase()) + .collect() + } + + /// `o0: IO0, o1: IO1, o2: IO2…`. + fn to_optional_params(&self) -> Vec { + izip!(self.to_optional_names(), self.to_optional_types()) + .map(|(n, t)| format!("{n}: I{t}")) + .collect() + } + + /// `o0: Peekable, o1: Peekable, o2: Peekable…`. + fn to_optional_peekable_params(&self) -> Vec { + izip!(self.to_optional_names(), self.to_optional_types()) + .map(|(n, t)| format!("{n}: Peekable")) + .collect() + } + + /// `IO0: (Into)Iterator, IO1: (Into)Iterator…` + fn to_optional_clauses(&self, into: bool) -> Vec { + let trait_name = if into { "IntoIterator" } else { "Iterator" }; + self.to_optional_types() + .into_iter() + .map(|t| format!("I{t}: {trait_name}")) + .collect() + } +} + +fn backticked(strs: impl IntoIterator) -> Vec { + strs.into_iter().map(|s| format!("`{s}`")).collect() +} + +/// Output: +/// ```ignore +/// pub fn range_zip_2x2( +/// r0: IR0, +/// r1: IR1, +/// o0: IO0, +/// o1: IO1, +/// ) -> RangeZip2x2 +/// where +/// Idx: std::cmp::Ord, +/// IR0: IntoIterator, +/// IR1: IntoIterator, +/// IO0: IntoIterator, +/// IO1: IntoIterator, +/// { +/// RangeZip2x2 { +/// r0: r0.into_iter(), +/// r1: r1.into_iter(), +/// o0: o0.into_iter().peekable(), +/// o1: o1.into_iter().peekable(), +/// +/// o0_data_latest: None, +/// o1_data_latest: None, +/// } +/// } +/// ``` +fn generate_helper_func(params: &Params) -> String { + let suffix = params.to_suffix(); + let required_names = backticked(params.to_required_names()).join(", "); + let required_types = izip!( + params + .to_required_types() + .into_iter() + .map(|t| format!("I{t}")), + params.to_required_types() + ) + .flat_map(|(tr, r)| [tr, r]) + .collect_vec() + .join(", "); + let optional_types = izip!( + params + .to_optional_types() + .into_iter() + .map(|t| format!("I{t}")), + params.to_optional_types() + ) + .flat_map(|(tr, r)| [tr, r]) + .collect_vec() + .join(", "); + let required_clauses = params.to_required_clauses(true /* into */).join(", "); + let optional_clauses = params.to_optional_clauses(true /* into */).join(", "); + let required_params = params.to_required_params().join(", "); + let optional_params = params.to_optional_params().join(", "); + + let ret_clause = params + .to_required_types() + .into_iter() + .map(|r| format!("I{r}::IntoIter, {r}")) + .chain( + params + .to_optional_types() + .into_iter() + .map(|o| format!("I{o}::IntoIter, {o}")), + ) + .collect_vec() + .join(", "); + + let ret = params + .to_required_names() + .into_iter() + .map(|r| format!("{r}: {r}.into_iter()")) + .chain( + params + .to_optional_names() + .into_iter() + .map(|o| format!("{o}: {o}.into_iter().peekable()")), + ) + .collect_vec() + .join(",\n"); + + let latest = params + .to_optional_names() + .into_iter() + .map(|o| format!("{o}_data_latest: None")) + .collect_vec() + .join(",\n"); + + format!( + r#" + /// Returns a new [`RangeZip{suffix}`] iterator. + /// + /// The number of elements in a range zip iterator corresponds to the number of elements in the + /// shortest of its required iterators ({required_names}). + /// + /// Each call to `next` is guaranteed to yield the next value for each required iterator, + /// as well as the most recent index amongst all of them. + /// + /// Optional iterators accumulate their state and yield their most recent value (if any), + /// each time the required iterators fire. + pub fn range_zip_{suffix}( + {required_params}, + {optional_params}, + ) -> RangeZip{suffix} + where + Idx: std::cmp::Ord, + {required_clauses}, + {optional_clauses}, + {{ + RangeZip{suffix} {{ + {ret}, + + {latest}, + }} + }} + "# + ) +} + +/// Output: +/// ```ignore +/// pub struct RangeZip2x2 +/// where +/// Idx: std::cmp::Ord, +/// IR0: Iterator, +/// IR1: Iterator, +/// IO0: Iterator, +/// IO1: Iterator, +/// { +/// r0: IR0, +/// r1: IR1, +/// o0: Peekable, +/// o1: Peekable, +/// +/// o0_data_latest: Option, +/// o1_data_latest: Option, +/// } +/// ``` +fn generate_struct(params: &Params) -> String { + let suffix = params.to_suffix(); + let required_types = izip!( + params + .to_required_types() + .into_iter() + .map(|t| format!("I{t}")), + params.to_required_types() + ) + .flat_map(|(tr, r)| [tr, r]) + .collect_vec() + .join(", "); + let optional_types = izip!( + params + .to_optional_types() + .into_iter() + .map(|t| format!("I{t}")), + params.to_optional_types() + ) + .flat_map(|(tr, r)| [tr, r]) + .collect_vec() + .join(", "); + let required_clauses = params.to_required_clauses(false /* into */).join(", "); + let optional_clauses = params.to_optional_clauses(false /* into */).join(", "); + let required_params = params.to_required_params().join(", "); + let optional_params = params.to_optional_peekable_params().join(", "); + let optional_latest_params = izip!(params.to_optional_names(), params.to_optional_types()) + .map(|(n, t)| format!("{n}_data_latest: Option<{t}>")) + .join(", "); + + format!( + r#" + /// Implements a range zip iterator combinator with 2 required iterators and 2 optional + /// iterators. + /// + /// See [`range_zip_{suffix}`] for more information. + pub struct RangeZip{suffix} + where + Idx: std::cmp::Ord, + {required_clauses}, + {optional_clauses}, + {{ + {required_params}, + {optional_params}, + + {optional_latest_params}, + }} + "# + ) +} + +/// Output: +/// ```ignore +/// impl Iterator +/// for RangeZip2x2 +/// where +/// Idx: std::cmp::Ord, +/// IR0: Iterator, +/// IR1: Iterator, +/// IO0: Iterator, +/// IO1: Iterator, +/// O0: Clone, +/// O1: Clone, +/// { +/// type Item = (Idx, R0, R1, Option, Option); +/// +/// #[inline] +/// fn next(&mut self) -> Option { +/// let Self { +/// r0, +/// r1, +/// o0, +/// o1, +/// o0_data_latest, +/// o1_data_latest, +/// } = self; +/// +/// let (r0_index, r0_data) = r0.next()?; +/// let (r1_index, r1_data) = r1.next()?; +/// +/// let max_index = [r0_index, r1_index].into_iter().max()?; +/// +/// let mut o0_data = None; +/// while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { +/// o0_data = Some(data); +/// } +/// let o0_data = o0_data.or(o0_data_latest.take()); +/// o0_data_latest.clone_from(&o0_data); +/// +/// let mut o1_data = None; +/// while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { +/// o1_data = Some(data); +/// } +/// let o1_data = o1_data.or(o1_data_latest.take()); +/// o1_data_latest.clone_from(&o1_data); +/// +/// Some((max_index, r0_data, r1_data, o0_data, o1_data)) +/// } +/// } +/// ``` +fn generate_impl(params: &Params) -> String { + let suffix = params.to_suffix(); + let required_types = izip!( + params + .to_required_types() + .into_iter() + .map(|t| format!("I{t}")), + params.to_required_types() + ) + .flat_map(|(tr, r)| [tr, r]) + .collect_vec() + .join(", "); + let optional_types = izip!( + params + .to_optional_types() + .into_iter() + .map(|t| format!("I{t}")), + params.to_optional_types() + ) + .flat_map(|(tr, r)| [tr, r]) + .collect_vec() + .join(", "); + let required_names = params.to_required_names().join(", "); + let optional_names = params.to_optional_names().join(", "); + let optional_latest_names = params + .to_optional_names() + .into_iter() + .map(|n| format!("{n}_data_latest")) + .join(", "); + let required_indices = params + .to_required_names() + .into_iter() + .map(|n| format!("{n}_index")) + .collect_vec() + .join(", "); + let required_data = params + .to_required_names() + .into_iter() + .map(|n| format!("{n}_data")) + .collect_vec() + .join(", "); + let optional_data = params + .to_optional_names() + .into_iter() + .map(|n| format!("{n}_data")) + .collect_vec() + .join(", "); + let required_clauses = params.to_required_clauses(false /* into */).join(", "); + let optional_clauses = params.to_optional_clauses(false /* into */).join(", "); + let optional_clone_clauses = params + .to_optional_types() + .into_iter() + .map(|o| format!("{o}: Clone")) + .collect_vec() + .join(", "); + + let items = params + .to_required_types() + .into_iter() + .chain( + params + .to_optional_types() + .into_iter() + .map(|o| format!("Option<{o}>")), + ) + .collect_vec() + .join(", "); + + let next_required = params + .to_required_names() + .into_iter() + .map(|r| format!("let ({r}_index, {r}_data) = {r}.next()?;")) + .collect_vec() + .join("\n"); + + let next_optional = params + .to_optional_names() + .into_iter() + .map(|o| { + format!( + " + let mut {o}_data = None; + while let Some((_, data)) = {o}.next_if(|(index, _)| index <= &max_index) {{ + {o}_data = Some(data); + }} + let {o}_data = {o}_data.or({o}_data_latest.take()); + {o}_data_latest.clone_from(&{o}_data); + " + ) + }) + .collect_vec() + .join("\n"); + + format!( + r#" + impl Iterator for RangeZip{suffix} + where + Idx: std::cmp::Ord, + {required_clauses}, + {optional_clauses}, + {optional_clone_clauses}, + {{ + type Item = (Idx, {items}); + + #[inline] + fn next(&mut self) -> Option {{ + let Self {{ {required_names}, {optional_names}, {optional_latest_names} }} = self; + + {next_required} + + let max_index = [{required_indices}].into_iter().max()?; + + {next_optional} + + Some((max_index, {required_data}, {optional_data})) + }} + }} + "# + ) +} + +fn main() { + let num_required = 1..3; + let num_optional = 1..10; + + let output = num_required + .flat_map(|num_required| { + num_optional + .clone() + .map(move |num_optional| (num_required, num_optional)) + }) + .flat_map(|(num_required, num_optional)| { + let params = Params { + num_required, + num_optional, + }; + + [ + generate_helper_func(¶ms), + generate_struct(¶ms), + generate_impl(¶ms), + ] + }) + .collect_vec() + .join("\n"); + + println!( + " + // This file was generated using `cargo r -p re_query2 --all-features --bin range_zip`. + // DO NOT EDIT. + + // --- + + #![allow(clippy::iter_on_single_items)] + #![allow(clippy::too_many_arguments)] + #![allow(clippy::type_complexity)] + + use std::iter::Peekable; + + {output} + " + ); +} diff --git a/crates/store/re_query2/src/cache.rs b/crates/store/re_query2/src/cache.rs new file mode 100644 index 0000000000000..f0e123a97a96c --- /dev/null +++ b/crates/store/re_query2/src/cache.rs @@ -0,0 +1,323 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; + +use ahash::HashMap; +use nohash_hasher::IntSet; +use parking_lot::RwLock; + +use re_chunk::ChunkId; +use re_chunk_store::{ChunkStore, ChunkStoreDiff, ChunkStoreEvent, ChunkStoreSubscriber}; +use re_log_types::{EntityPath, ResolvedTimeRange, StoreId, TimeInt, Timeline}; +use re_types_core::{components::ClearIsRecursive, ComponentName, Loggable as _}; + +use crate::{LatestAtCache, RangeCache}; + +// --- + +/// Uniquely identifies cached query results in the [`Caches`]. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct CacheKey { + pub entity_path: EntityPath, + pub timeline: Timeline, + pub component_name: ComponentName, +} + +impl re_types_core::SizeBytes for CacheKey { + #[inline] + fn heap_size_bytes(&self) -> u64 { + let Self { + entity_path, + timeline, + component_name, + } = self; + entity_path.heap_size_bytes() + + timeline.heap_size_bytes() + + component_name.heap_size_bytes() + } +} + +impl std::fmt::Debug for CacheKey { + #[inline] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Self { + entity_path, + timeline, + component_name, + } = self; + f.write_fmt(format_args!( + "{entity_path}:{component_name} on {}", + timeline.name() + )) + } +} + +impl CacheKey { + #[inline] + pub fn new( + entity_path: impl Into, + timeline: impl Into, + component_name: impl Into, + ) -> Self { + Self { + entity_path: entity_path.into(), + timeline: timeline.into(), + component_name: component_name.into(), + } + } +} + +pub struct Caches { + /// The [`StoreId`] of the associated [`ChunkStore`]. + pub(crate) store_id: StoreId, + + /// Keeps track of which entities have had any `Clear`-related data on any timeline at any + /// point in time. + /// + /// This is used to optimized read-time clears, so that we don't unnecessarily pay for the fixed + /// overhead of all the query layers when we know for a fact that there won't be any data there. + /// This is a huge performance improvement in practice, especially in recordings with many entities. + pub(crate) might_require_clearing: RwLock>, + + // NOTE: `Arc` so we can cheaply free the top-level lock early when needed. + pub(crate) latest_at_per_cache_key: RwLock>>>, + + // NOTE: `Arc` so we can cheaply free the top-level lock early when needed. + pub(crate) range_per_cache_key: RwLock>>>, +} + +impl std::fmt::Debug for Caches { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Self { + store_id, + might_require_clearing, + latest_at_per_cache_key, + range_per_cache_key, + } = self; + + let mut strings = Vec::new(); + + strings.push(format!( + "[Entities that must be checked for clears @ {store_id}]\n" + )); + { + let sorted: BTreeSet = + might_require_clearing.read().iter().cloned().collect(); + for entity_path in sorted { + strings.push(format!(" * {entity_path}\n")); + } + strings.push("\n".to_owned()); + } + + strings.push(format!("[LatestAt @ {store_id}]")); + { + let latest_at_per_cache_key = latest_at_per_cache_key.read(); + let latest_at_per_cache_key: BTreeMap<_, _> = latest_at_per_cache_key.iter().collect(); + + for (cache_key, cache) in &latest_at_per_cache_key { + let cache = cache.read(); + strings.push(format!( + " [{cache_key:?} (pending_invalidation_min={:?})]", + cache.pending_invalidation.map(|t| cache_key + .timeline + .format_time_range_utc(&ResolvedTimeRange::new(t, TimeInt::MAX))), + )); + strings.push(indent::indent_all_by(4, format!("{cache:?}"))); + } + } + + strings.push(format!("[Range @ {store_id}]")); + { + let range_per_cache_key = range_per_cache_key.read(); + let range_per_cache_key: BTreeMap<_, _> = range_per_cache_key.iter().collect(); + + for (cache_key, cache) in &range_per_cache_key { + let cache = cache.read(); + strings.push(format!( + " [{cache_key:?} (pending_invalidations={:?})]", + cache.pending_invalidations, + )); + strings.push(indent::indent_all_by(4, format!("{cache:?}"))); + } + } + + f.write_str(&strings.join("\n").replace("\n\n", "\n")) + } +} + +impl Caches { + #[inline] + pub fn new(store: &ChunkStore) -> Self { + Self { + store_id: store.id().clone(), + might_require_clearing: Default::default(), + latest_at_per_cache_key: Default::default(), + range_per_cache_key: Default::default(), + } + } + + #[inline] + pub fn clear(&self) { + let Self { + store_id: _, + might_require_clearing, + latest_at_per_cache_key, + range_per_cache_key, + } = self; + + might_require_clearing.write().clear(); + latest_at_per_cache_key.write().clear(); + range_per_cache_key.write().clear(); + } +} + +impl ChunkStoreSubscriber for Caches { + #[inline] + fn name(&self) -> String { + "rerun.store_subscribers.QueryCache".into() + } + + #[inline] + fn as_any(&self) -> &dyn std::any::Any { + self + } + + #[inline] + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { + self + } + + fn on_events(&mut self, events: &[ChunkStoreEvent]) { + re_tracing::profile_function!(format!("num_events={}", events.len())); + + #[derive(Default, Debug)] + struct CompactedEvents { + static_: HashMap<(EntityPath, ComponentName), BTreeSet>, + temporal_latest_at: HashMap, + temporal_range: HashMap>, + } + + let mut compacted = CompactedEvents::default(); + + for event in events { + let ChunkStoreEvent { + store_id, + store_generation: _, + event_id: _, + diff, + } = event; + + assert!( + self.store_id == *store_id, + "attempted to use a query cache {} with the wrong datastore ({})", + self.store_id, + store_id, + ); + + let ChunkStoreDiff { + kind: _, // Don't care: both additions and deletions invalidate query results. + chunk, + compacted: _, + } = diff; + + { + re_tracing::profile_scope!("compact events"); + + if chunk.is_static() { + for component_name in chunk.component_names() { + compacted + .static_ + .entry((chunk.entity_path().clone(), component_name)) + .or_default() + .insert(chunk.id()); + } + } + + for (&timeline, time_chunk) in chunk.timelines() { + for data_time in time_chunk.times() { + for component_name in chunk.component_names() { + let key = CacheKey::new( + chunk.entity_path().clone(), + timeline, + component_name, + ); + + compacted + .temporal_latest_at + .entry(key.clone()) + .and_modify(|time| *time = TimeInt::min(*time, data_time)) + .or_insert(data_time); + + compacted + .temporal_range + .entry(key) + .or_default() + .insert(chunk.id()); + } + } + } + } + } + + let mut might_require_clearing = self.might_require_clearing.write(); + let caches_latest_at = self.latest_at_per_cache_key.write(); + let caches_range = self.range_per_cache_key.write(); + // NOTE: Don't release the top-level locks -- even though this cannot happen yet with + // our current macro-architecture, we want to prevent queries from concurrently + // running while we're updating the invalidation flags. + + { + re_tracing::profile_scope!("static"); + + // TODO(cmc): This is horribly stupid and slow and can easily be made faster by adding + // yet another layer of caching indirection. + // But since this pretty much never happens in practice, let's not go there until we + // have metrics showing that show we need to. + for ((entity_path, component_name), chunk_ids) in compacted.static_ { + if component_name == ClearIsRecursive::name() { + might_require_clearing.insert(entity_path.clone()); + } + + for (key, cache) in caches_latest_at.iter() { + if key.entity_path == entity_path && key.component_name == component_name { + cache.write().pending_invalidation = Some(TimeInt::STATIC); + } + } + + for (key, cache) in caches_range.iter() { + if key.entity_path == entity_path && key.component_name == component_name { + cache + .write() + .pending_invalidations + .extend(chunk_ids.iter().copied()); + } + } + } + } + + { + re_tracing::profile_scope!("temporal"); + + for (key, time) in compacted.temporal_latest_at { + if key.component_name == ClearIsRecursive::name() { + might_require_clearing.insert(key.entity_path.clone()); + } + + if let Some(cache) = caches_latest_at.get(&key) { + let mut cache = cache.write(); + cache.pending_invalidation = Some(time); + } + } + + for (key, chunk_ids) in compacted.temporal_range { + if let Some(cache) = caches_range.get(&key) { + cache + .write() + .pending_invalidations + .extend(chunk_ids.iter().copied()); + } + } + } + } +} diff --git a/crates/store/re_query2/src/cache_stats.rs b/crates/store/re_query2/src/cache_stats.rs new file mode 100644 index 0000000000000..339ad0cdf6fcf --- /dev/null +++ b/crates/store/re_query2/src/cache_stats.rs @@ -0,0 +1,108 @@ +use std::collections::BTreeMap; + +use re_types_core::SizeBytes as _; + +use crate::{CacheKey, Caches}; + +// --- + +/// Stats for all primary caches. +/// +/// Fetch them via [`Caches::stats`]. +#[derive(Default, Debug, Clone)] +pub struct CachesStats { + pub latest_at: BTreeMap, + pub range: BTreeMap, +} + +impl CachesStats { + #[inline] + pub fn total_size_bytes(&self) -> u64 { + re_tracing::profile_function!(); + + let Self { latest_at, range } = self; + + let latest_at_size_bytes: u64 = latest_at + .values() + .map(|stats| stats.total_actual_size_bytes) + .sum(); + let range_size_bytes: u64 = range + .values() + .map(|stats| stats.total_actual_size_bytes) + .sum(); + + latest_at_size_bytes + range_size_bytes + } +} + +/// Stats for a single `crate::RangeCache`. +#[derive(Default, Debug, Clone)] +pub struct CacheStats { + /// How many chunks in the cache? + pub total_chunks: u64, + + /// What would be the size of this cache in the worst case, i.e. if all chunks had + /// been fully copied? + pub total_effective_size_bytes: u64, + + /// What is the actual size of this cache after deduplication? + pub total_actual_size_bytes: u64, +} + +impl Caches { + /// Computes the stats for all primary caches. + pub fn stats(&self) -> CachesStats { + re_tracing::profile_function!(); + + let latest_at = { + let latest_at = self.latest_at_per_cache_key.read().clone(); + // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. + + latest_at + .iter() + .map(|(key, cache)| { + let cache = cache.read(); + ( + key.clone(), + CacheStats { + total_chunks: cache.per_query_time.len() as _, + total_effective_size_bytes: cache + .per_query_time + .values() + .map(|cached| cached.unit.total_size_bytes()) + .sum(), + total_actual_size_bytes: cache.per_query_time.total_size_bytes(), + }, + ) + }) + .collect() + }; + + let range = { + let range = self.range_per_cache_key.read().clone(); + // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. + + range + .iter() + .map(|(key, cache)| { + let cache = cache.read(); + + ( + key.clone(), + CacheStats { + total_chunks: cache.chunks.len() as _, + total_effective_size_bytes: cache + .chunks + .values() + .map(|cached| cached.chunk.total_size_bytes()) + .sum(), + total_actual_size_bytes: cache.chunks.total_size_bytes(), + }, + ) + }) + .collect() + }; + + CachesStats { latest_at, range } + } +} diff --git a/crates/store/re_query2/src/clamped_zip/.gitattributes b/crates/store/re_query2/src/clamped_zip/.gitattributes new file mode 100644 index 0000000000000..30d2025060647 --- /dev/null +++ b/crates/store/re_query2/src/clamped_zip/.gitattributes @@ -0,0 +1 @@ +generated.rs linguist-generated=true diff --git a/crates/store/re_query2/src/clamped_zip/generated.rs b/crates/store/re_query2/src/clamped_zip/generated.rs new file mode 100644 index 0000000000000..37ad262e6c8a6 --- /dev/null +++ b/crates/store/re_query2/src/clamped_zip/generated.rs @@ -0,0 +1,3124 @@ +// This file was generated using `cargo r -p re_query2 --all-features --bin clamped_zip`. +// DO NOT EDIT. + +// --- + +#![allow(clippy::too_many_arguments)] +#![allow(clippy::type_complexity)] + +/// Returns a new [`ClampedZip1x1`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Optional iterators (`o0`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_1x1( + r0: R0, + o0: O0, + o0_default_fn: D0, +) -> ClampedZip1x1 +where + R0: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + D0: Fn() -> O0::Item, +{ + ClampedZip1x1 { + r0: r0.into_iter(), + o0: o0.into_iter(), + o0_default_fn, + o0_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_1x1`] for more information. +pub struct ClampedZip1x1 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + D0: Fn() -> O0::Item, +{ + r0: R0, + o0: O0, + o0_default_fn: D0, + + o0_latest_value: Option, +} + +impl Iterator for ClampedZip1x1 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + D0: Fn() -> O0::Item, +{ + type Item = (R0::Item, O0::Item); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + + Some((r0_next, o0_next.unwrap_or_else(|| (self.o0_default_fn)()))) + } +} + +/// Returns a new [`ClampedZip1x2`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Optional iterators (`o0`, `o1`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_1x2( + r0: R0, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, +) -> ClampedZip1x2 +where + R0: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, +{ + ClampedZip1x2 { + r0: r0.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o0_default_fn, + o1_default_fn, + o0_latest_value: None, + o1_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_1x2`] for more information. +pub struct ClampedZip1x2 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, +{ + r0: R0, + o0: O0, + o1: O1, + o0_default_fn: D0, + o1_default_fn: D1, + + o0_latest_value: Option, + o1_latest_value: Option, +} + +impl Iterator for ClampedZip1x2 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, +{ + type Item = (R0::Item, O0::Item, O1::Item); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + + Some(( + r0_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip1x3`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Optional iterators (`o0`, `o1`, `o2`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_1x3( + r0: R0, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, +) -> ClampedZip1x3 +where + R0: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, +{ + ClampedZip1x3 { + r0: r0.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_1x3`] for more information. +pub struct ClampedZip1x3 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, +{ + r0: R0, + o0: O0, + o1: O1, + o2: O2, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, +} + +impl Iterator for ClampedZip1x3 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, +{ + type Item = (R0::Item, O0::Item, O1::Item, O2::Item); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + + Some(( + r0_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip1x4`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_1x4( + r0: R0, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, +) -> ClampedZip1x4< + R0::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + D0, + D1, + D2, + D3, +> +where + R0: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, +{ + ClampedZip1x4 { + r0: r0.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_1x4`] for more information. +pub struct ClampedZip1x4 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, +{ + r0: R0, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, +} + +impl Iterator + for ClampedZip1x4 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, +{ + type Item = (R0::Item, O0::Item, O1::Item, O2::Item, O3::Item); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + + Some(( + r0_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip1x5`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`, `o4`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`, `o4_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_1x5( + r0: R0, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, + o4: O4, + o4_default_fn: D4, +) -> ClampedZip1x5< + R0::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + O4::IntoIter, + D0, + D1, + D2, + D3, + D4, +> +where + R0: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + O4: IntoIterator, + O4::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, +{ + ClampedZip1x5 { + r0: r0.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o4: o4.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o4_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + o4_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_1x5`] for more information. +pub struct ClampedZip1x5 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, +{ + r0: R0, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o4: O4, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + o4_default_fn: D4, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, + o4_latest_value: Option, +} + +impl Iterator + for ClampedZip1x5 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, +{ + type Item = (R0::Item, O0::Item, O1::Item, O2::Item, O3::Item, O4::Item); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + let o4_next = self.o4.next().or(self.o4_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + self.o4_latest_value.clone_from(&o4_next); + + Some(( + r0_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + o4_next.unwrap_or_else(|| (self.o4_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip1x6`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`, `o4`, `o5`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`, `o4_default_fn`, `o5_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_1x6( + r0: R0, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, + o4: O4, + o4_default_fn: D4, + o5: O5, + o5_default_fn: D5, +) -> ClampedZip1x6< + R0::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + O4::IntoIter, + O5::IntoIter, + D0, + D1, + D2, + D3, + D4, + D5, +> +where + R0: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + O4: IntoIterator, + O4::Item: Clone, + O5: IntoIterator, + O5::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, +{ + ClampedZip1x6 { + r0: r0.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o4: o4.into_iter(), + o5: o5.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o4_default_fn, + o5_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + o4_latest_value: None, + o5_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_1x6`] for more information. +pub struct ClampedZip1x6 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, +{ + r0: R0, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o4: O4, + o5: O5, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + o4_default_fn: D4, + o5_default_fn: D5, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, + o4_latest_value: Option, + o5_latest_value: Option, +} + +impl Iterator + for ClampedZip1x6 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, +{ + type Item = ( + R0::Item, + O0::Item, + O1::Item, + O2::Item, + O3::Item, + O4::Item, + O5::Item, + ); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + let o4_next = self.o4.next().or(self.o4_latest_value.take()); + let o5_next = self.o5.next().or(self.o5_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + self.o4_latest_value.clone_from(&o4_next); + self.o5_latest_value.clone_from(&o5_next); + + Some(( + r0_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + o4_next.unwrap_or_else(|| (self.o4_default_fn)()), + o5_next.unwrap_or_else(|| (self.o5_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip1x7`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`, `o4`, `o5`, `o6`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`, `o4_default_fn`, `o5_default_fn`, `o6_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_1x7( + r0: R0, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, + o4: O4, + o4_default_fn: D4, + o5: O5, + o5_default_fn: D5, + o6: O6, + o6_default_fn: D6, +) -> ClampedZip1x7< + R0::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + O4::IntoIter, + O5::IntoIter, + O6::IntoIter, + D0, + D1, + D2, + D3, + D4, + D5, + D6, +> +where + R0: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + O4: IntoIterator, + O4::Item: Clone, + O5: IntoIterator, + O5::Item: Clone, + O6: IntoIterator, + O6::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, +{ + ClampedZip1x7 { + r0: r0.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o4: o4.into_iter(), + o5: o5.into_iter(), + o6: o6.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o4_default_fn, + o5_default_fn, + o6_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + o4_latest_value: None, + o5_latest_value: None, + o6_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_1x7`] for more information. +pub struct ClampedZip1x7 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, +{ + r0: R0, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o4: O4, + o5: O5, + o6: O6, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + o4_default_fn: D4, + o5_default_fn: D5, + o6_default_fn: D6, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, + o4_latest_value: Option, + o5_latest_value: Option, + o6_latest_value: Option, +} + +impl Iterator + for ClampedZip1x7 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, +{ + type Item = ( + R0::Item, + O0::Item, + O1::Item, + O2::Item, + O3::Item, + O4::Item, + O5::Item, + O6::Item, + ); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + let o4_next = self.o4.next().or(self.o4_latest_value.take()); + let o5_next = self.o5.next().or(self.o5_latest_value.take()); + let o6_next = self.o6.next().or(self.o6_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + self.o4_latest_value.clone_from(&o4_next); + self.o5_latest_value.clone_from(&o5_next); + self.o6_latest_value.clone_from(&o6_next); + + Some(( + r0_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + o4_next.unwrap_or_else(|| (self.o4_default_fn)()), + o5_next.unwrap_or_else(|| (self.o5_default_fn)()), + o6_next.unwrap_or_else(|| (self.o6_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip1x8`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`, `o4`, `o5`, `o6`, `o7`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`, `o4_default_fn`, `o5_default_fn`, `o6_default_fn`, `o7_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_1x8( + r0: R0, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, + o4: O4, + o4_default_fn: D4, + o5: O5, + o5_default_fn: D5, + o6: O6, + o6_default_fn: D6, + o7: O7, + o7_default_fn: D7, +) -> ClampedZip1x8< + R0::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + O4::IntoIter, + O5::IntoIter, + O6::IntoIter, + O7::IntoIter, + D0, + D1, + D2, + D3, + D4, + D5, + D6, + D7, +> +where + R0: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + O4: IntoIterator, + O4::Item: Clone, + O5: IntoIterator, + O5::Item: Clone, + O6: IntoIterator, + O6::Item: Clone, + O7: IntoIterator, + O7::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, +{ + ClampedZip1x8 { + r0: r0.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o4: o4.into_iter(), + o5: o5.into_iter(), + o6: o6.into_iter(), + o7: o7.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o4_default_fn, + o5_default_fn, + o6_default_fn, + o7_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + o4_latest_value: None, + o5_latest_value: None, + o6_latest_value: None, + o7_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_1x8`] for more information. +pub struct ClampedZip1x8 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + O7: Iterator, + O7::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, +{ + r0: R0, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o4: O4, + o5: O5, + o6: O6, + o7: O7, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + o4_default_fn: D4, + o5_default_fn: D5, + o6_default_fn: D6, + o7_default_fn: D7, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, + o4_latest_value: Option, + o5_latest_value: Option, + o6_latest_value: Option, + o7_latest_value: Option, +} + +impl Iterator + for ClampedZip1x8 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + O7: Iterator, + O7::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, +{ + type Item = ( + R0::Item, + O0::Item, + O1::Item, + O2::Item, + O3::Item, + O4::Item, + O5::Item, + O6::Item, + O7::Item, + ); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + let o4_next = self.o4.next().or(self.o4_latest_value.take()); + let o5_next = self.o5.next().or(self.o5_latest_value.take()); + let o6_next = self.o6.next().or(self.o6_latest_value.take()); + let o7_next = self.o7.next().or(self.o7_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + self.o4_latest_value.clone_from(&o4_next); + self.o5_latest_value.clone_from(&o5_next); + self.o6_latest_value.clone_from(&o6_next); + self.o7_latest_value.clone_from(&o7_next); + + Some(( + r0_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + o4_next.unwrap_or_else(|| (self.o4_default_fn)()), + o5_next.unwrap_or_else(|| (self.o5_default_fn)()), + o6_next.unwrap_or_else(|| (self.o6_default_fn)()), + o7_next.unwrap_or_else(|| (self.o7_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip1x9`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`, `o4`, `o5`, `o6`, `o7`, `o8`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`, `o4_default_fn`, `o5_default_fn`, `o6_default_fn`, `o7_default_fn`, `o8_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_1x9( + r0: R0, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, + o4: O4, + o4_default_fn: D4, + o5: O5, + o5_default_fn: D5, + o6: O6, + o6_default_fn: D6, + o7: O7, + o7_default_fn: D7, + o8: O8, + o8_default_fn: D8, +) -> ClampedZip1x9< + R0::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + O4::IntoIter, + O5::IntoIter, + O6::IntoIter, + O7::IntoIter, + O8::IntoIter, + D0, + D1, + D2, + D3, + D4, + D5, + D6, + D7, + D8, +> +where + R0: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + O4: IntoIterator, + O4::Item: Clone, + O5: IntoIterator, + O5::Item: Clone, + O6: IntoIterator, + O6::Item: Clone, + O7: IntoIterator, + O7::Item: Clone, + O8: IntoIterator, + O8::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, + D8: Fn() -> O8::Item, +{ + ClampedZip1x9 { + r0: r0.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o4: o4.into_iter(), + o5: o5.into_iter(), + o6: o6.into_iter(), + o7: o7.into_iter(), + o8: o8.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o4_default_fn, + o5_default_fn, + o6_default_fn, + o7_default_fn, + o8_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + o4_latest_value: None, + o5_latest_value: None, + o6_latest_value: None, + o7_latest_value: None, + o8_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_1x9`] for more information. +pub struct ClampedZip1x9 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + O7: Iterator, + O7::Item: Clone, + O8: Iterator, + O8::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, + D8: Fn() -> O8::Item, +{ + r0: R0, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o4: O4, + o5: O5, + o6: O6, + o7: O7, + o8: O8, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + o4_default_fn: D4, + o5_default_fn: D5, + o6_default_fn: D6, + o7_default_fn: D7, + o8_default_fn: D8, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, + o4_latest_value: Option, + o5_latest_value: Option, + o6_latest_value: Option, + o7_latest_value: Option, + o8_latest_value: Option, +} + +impl Iterator + for ClampedZip1x9 +where + R0: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + O7: Iterator, + O7::Item: Clone, + O8: Iterator, + O8::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, + D8: Fn() -> O8::Item, +{ + type Item = ( + R0::Item, + O0::Item, + O1::Item, + O2::Item, + O3::Item, + O4::Item, + O5::Item, + O6::Item, + O7::Item, + O8::Item, + ); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + let o4_next = self.o4.next().or(self.o4_latest_value.take()); + let o5_next = self.o5.next().or(self.o5_latest_value.take()); + let o6_next = self.o6.next().or(self.o6_latest_value.take()); + let o7_next = self.o7.next().or(self.o7_latest_value.take()); + let o8_next = self.o8.next().or(self.o8_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + self.o4_latest_value.clone_from(&o4_next); + self.o5_latest_value.clone_from(&o5_next); + self.o6_latest_value.clone_from(&o6_next); + self.o7_latest_value.clone_from(&o7_next); + self.o8_latest_value.clone_from(&o8_next); + + Some(( + r0_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + o4_next.unwrap_or_else(|| (self.o4_default_fn)()), + o5_next.unwrap_or_else(|| (self.o5_default_fn)()), + o6_next.unwrap_or_else(|| (self.o6_default_fn)()), + o7_next.unwrap_or_else(|| (self.o7_default_fn)()), + o8_next.unwrap_or_else(|| (self.o8_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip2x1`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Optional iterators (`o0`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_2x1( + r0: R0, + r1: R1, + o0: O0, + o0_default_fn: D0, +) -> ClampedZip2x1 +where + R0: IntoIterator, + R1: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + D0: Fn() -> O0::Item, +{ + ClampedZip2x1 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter(), + o0_default_fn, + o0_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_2x1`] for more information. +pub struct ClampedZip2x1 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + D0: Fn() -> O0::Item, +{ + r0: R0, + r1: R1, + o0: O0, + o0_default_fn: D0, + + o0_latest_value: Option, +} + +impl Iterator for ClampedZip2x1 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + D0: Fn() -> O0::Item, +{ + type Item = (R0::Item, R1::Item, O0::Item); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let r1_next = self.r1.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + + Some(( + r0_next, + r1_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip2x2`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Optional iterators (`o0`, `o1`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_2x2( + r0: R0, + r1: R1, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, +) -> ClampedZip2x2 +where + R0: IntoIterator, + R1: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, +{ + ClampedZip2x2 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o0_default_fn, + o1_default_fn, + o0_latest_value: None, + o1_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_2x2`] for more information. +pub struct ClampedZip2x2 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, +{ + r0: R0, + r1: R1, + o0: O0, + o1: O1, + o0_default_fn: D0, + o1_default_fn: D1, + + o0_latest_value: Option, + o1_latest_value: Option, +} + +impl Iterator for ClampedZip2x2 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, +{ + type Item = (R0::Item, R1::Item, O0::Item, O1::Item); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let r1_next = self.r1.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + + Some(( + r0_next, + r1_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip2x3`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Optional iterators (`o0`, `o1`, `o2`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_2x3( + r0: R0, + r1: R1, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, +) -> ClampedZip2x3 +where + R0: IntoIterator, + R1: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, +{ + ClampedZip2x3 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_2x3`] for more information. +pub struct ClampedZip2x3 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, +{ + r0: R0, + r1: R1, + o0: O0, + o1: O1, + o2: O2, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, +} + +impl Iterator for ClampedZip2x3 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, +{ + type Item = (R0::Item, R1::Item, O0::Item, O1::Item, O2::Item); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let r1_next = self.r1.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + + Some(( + r0_next, + r1_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip2x4`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_2x4( + r0: R0, + r1: R1, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, +) -> ClampedZip2x4< + R0::IntoIter, + R1::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + D0, + D1, + D2, + D3, +> +where + R0: IntoIterator, + R1: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, +{ + ClampedZip2x4 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_2x4`] for more information. +pub struct ClampedZip2x4 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, +{ + r0: R0, + r1: R1, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, +} + +impl Iterator + for ClampedZip2x4 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, +{ + type Item = (R0::Item, R1::Item, O0::Item, O1::Item, O2::Item, O3::Item); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let r1_next = self.r1.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + + Some(( + r0_next, + r1_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip2x5`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`, `o4`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`, `o4_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_2x5( + r0: R0, + r1: R1, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, + o4: O4, + o4_default_fn: D4, +) -> ClampedZip2x5< + R0::IntoIter, + R1::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + O4::IntoIter, + D0, + D1, + D2, + D3, + D4, +> +where + R0: IntoIterator, + R1: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + O4: IntoIterator, + O4::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, +{ + ClampedZip2x5 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o4: o4.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o4_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + o4_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_2x5`] for more information. +pub struct ClampedZip2x5 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, +{ + r0: R0, + r1: R1, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o4: O4, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + o4_default_fn: D4, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, + o4_latest_value: Option, +} + +impl Iterator + for ClampedZip2x5 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, +{ + type Item = ( + R0::Item, + R1::Item, + O0::Item, + O1::Item, + O2::Item, + O3::Item, + O4::Item, + ); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let r1_next = self.r1.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + let o4_next = self.o4.next().or(self.o4_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + self.o4_latest_value.clone_from(&o4_next); + + Some(( + r0_next, + r1_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + o4_next.unwrap_or_else(|| (self.o4_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip2x6`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`, `o4`, `o5`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`, `o4_default_fn`, `o5_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_2x6( + r0: R0, + r1: R1, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, + o4: O4, + o4_default_fn: D4, + o5: O5, + o5_default_fn: D5, +) -> ClampedZip2x6< + R0::IntoIter, + R1::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + O4::IntoIter, + O5::IntoIter, + D0, + D1, + D2, + D3, + D4, + D5, +> +where + R0: IntoIterator, + R1: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + O4: IntoIterator, + O4::Item: Clone, + O5: IntoIterator, + O5::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, +{ + ClampedZip2x6 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o4: o4.into_iter(), + o5: o5.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o4_default_fn, + o5_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + o4_latest_value: None, + o5_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_2x6`] for more information. +pub struct ClampedZip2x6 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, +{ + r0: R0, + r1: R1, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o4: O4, + o5: O5, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + o4_default_fn: D4, + o5_default_fn: D5, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, + o4_latest_value: Option, + o5_latest_value: Option, +} + +impl Iterator + for ClampedZip2x6 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, +{ + type Item = ( + R0::Item, + R1::Item, + O0::Item, + O1::Item, + O2::Item, + O3::Item, + O4::Item, + O5::Item, + ); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let r1_next = self.r1.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + let o4_next = self.o4.next().or(self.o4_latest_value.take()); + let o5_next = self.o5.next().or(self.o5_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + self.o4_latest_value.clone_from(&o4_next); + self.o5_latest_value.clone_from(&o5_next); + + Some(( + r0_next, + r1_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + o4_next.unwrap_or_else(|| (self.o4_default_fn)()), + o5_next.unwrap_or_else(|| (self.o5_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip2x7`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`, `o4`, `o5`, `o6`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`, `o4_default_fn`, `o5_default_fn`, `o6_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_2x7( + r0: R0, + r1: R1, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, + o4: O4, + o4_default_fn: D4, + o5: O5, + o5_default_fn: D5, + o6: O6, + o6_default_fn: D6, +) -> ClampedZip2x7< + R0::IntoIter, + R1::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + O4::IntoIter, + O5::IntoIter, + O6::IntoIter, + D0, + D1, + D2, + D3, + D4, + D5, + D6, +> +where + R0: IntoIterator, + R1: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + O4: IntoIterator, + O4::Item: Clone, + O5: IntoIterator, + O5::Item: Clone, + O6: IntoIterator, + O6::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, +{ + ClampedZip2x7 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o4: o4.into_iter(), + o5: o5.into_iter(), + o6: o6.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o4_default_fn, + o5_default_fn, + o6_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + o4_latest_value: None, + o5_latest_value: None, + o6_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_2x7`] for more information. +pub struct ClampedZip2x7 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, +{ + r0: R0, + r1: R1, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o4: O4, + o5: O5, + o6: O6, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + o4_default_fn: D4, + o5_default_fn: D5, + o6_default_fn: D6, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, + o4_latest_value: Option, + o5_latest_value: Option, + o6_latest_value: Option, +} + +impl Iterator + for ClampedZip2x7 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, +{ + type Item = ( + R0::Item, + R1::Item, + O0::Item, + O1::Item, + O2::Item, + O3::Item, + O4::Item, + O5::Item, + O6::Item, + ); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let r1_next = self.r1.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + let o4_next = self.o4.next().or(self.o4_latest_value.take()); + let o5_next = self.o5.next().or(self.o5_latest_value.take()); + let o6_next = self.o6.next().or(self.o6_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + self.o4_latest_value.clone_from(&o4_next); + self.o5_latest_value.clone_from(&o5_next); + self.o6_latest_value.clone_from(&o6_next); + + Some(( + r0_next, + r1_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + o4_next.unwrap_or_else(|| (self.o4_default_fn)()), + o5_next.unwrap_or_else(|| (self.o5_default_fn)()), + o6_next.unwrap_or_else(|| (self.o6_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip2x8`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`, `o4`, `o5`, `o6`, `o7`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`, `o4_default_fn`, `o5_default_fn`, `o6_default_fn`, `o7_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_2x8( + r0: R0, + r1: R1, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, + o4: O4, + o4_default_fn: D4, + o5: O5, + o5_default_fn: D5, + o6: O6, + o6_default_fn: D6, + o7: O7, + o7_default_fn: D7, +) -> ClampedZip2x8< + R0::IntoIter, + R1::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + O4::IntoIter, + O5::IntoIter, + O6::IntoIter, + O7::IntoIter, + D0, + D1, + D2, + D3, + D4, + D5, + D6, + D7, +> +where + R0: IntoIterator, + R1: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + O4: IntoIterator, + O4::Item: Clone, + O5: IntoIterator, + O5::Item: Clone, + O6: IntoIterator, + O6::Item: Clone, + O7: IntoIterator, + O7::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, +{ + ClampedZip2x8 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o4: o4.into_iter(), + o5: o5.into_iter(), + o6: o6.into_iter(), + o7: o7.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o4_default_fn, + o5_default_fn, + o6_default_fn, + o7_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + o4_latest_value: None, + o5_latest_value: None, + o6_latest_value: None, + o7_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_2x8`] for more information. +pub struct ClampedZip2x8 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + O7: Iterator, + O7::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, +{ + r0: R0, + r1: R1, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o4: O4, + o5: O5, + o6: O6, + o7: O7, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + o4_default_fn: D4, + o5_default_fn: D5, + o6_default_fn: D6, + o7_default_fn: D7, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, + o4_latest_value: Option, + o5_latest_value: Option, + o6_latest_value: Option, + o7_latest_value: Option, +} + +impl Iterator + for ClampedZip2x8 +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + O7: Iterator, + O7::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, +{ + type Item = ( + R0::Item, + R1::Item, + O0::Item, + O1::Item, + O2::Item, + O3::Item, + O4::Item, + O5::Item, + O6::Item, + O7::Item, + ); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let r1_next = self.r1.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + let o4_next = self.o4.next().or(self.o4_latest_value.take()); + let o5_next = self.o5.next().or(self.o5_latest_value.take()); + let o6_next = self.o6.next().or(self.o6_latest_value.take()); + let o7_next = self.o7.next().or(self.o7_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + self.o4_latest_value.clone_from(&o4_next); + self.o5_latest_value.clone_from(&o5_next); + self.o6_latest_value.clone_from(&o6_next); + self.o7_latest_value.clone_from(&o7_next); + + Some(( + r0_next, + r1_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + o4_next.unwrap_or_else(|| (self.o4_default_fn)()), + o5_next.unwrap_or_else(|| (self.o5_default_fn)()), + o6_next.unwrap_or_else(|| (self.o6_default_fn)()), + o7_next.unwrap_or_else(|| (self.o7_default_fn)()), + )) + } +} + +/// Returns a new [`ClampedZip2x9`] iterator. +/// +/// The number of elements in a clamped zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Optional iterators (`o0`, `o1`, `o2`, `o3`, `o4`, `o5`, `o6`, `o7`, `o8`) will repeat their latest values if they happen to be too short +/// to be zipped with the shortest of the required iterators. +/// +/// If an optional iterator is not only too short but actually empty, its associated default function +/// (`o0_default_fn`, `o1_default_fn`, `o2_default_fn`, `o3_default_fn`, `o4_default_fn`, `o5_default_fn`, `o6_default_fn`, `o7_default_fn`, `o8_default_fn`) will be executed and the resulting value repeated as necessary. +pub fn clamped_zip_2x9< + R0, + R1, + O0, + O1, + O2, + O3, + O4, + O5, + O6, + O7, + O8, + D0, + D1, + D2, + D3, + D4, + D5, + D6, + D7, + D8, +>( + r0: R0, + r1: R1, + o0: O0, + o0_default_fn: D0, + o1: O1, + o1_default_fn: D1, + o2: O2, + o2_default_fn: D2, + o3: O3, + o3_default_fn: D3, + o4: O4, + o4_default_fn: D4, + o5: O5, + o5_default_fn: D5, + o6: O6, + o6_default_fn: D6, + o7: O7, + o7_default_fn: D7, + o8: O8, + o8_default_fn: D8, +) -> ClampedZip2x9< + R0::IntoIter, + R1::IntoIter, + O0::IntoIter, + O1::IntoIter, + O2::IntoIter, + O3::IntoIter, + O4::IntoIter, + O5::IntoIter, + O6::IntoIter, + O7::IntoIter, + O8::IntoIter, + D0, + D1, + D2, + D3, + D4, + D5, + D6, + D7, + D8, +> +where + R0: IntoIterator, + R1: IntoIterator, + O0: IntoIterator, + O0::Item: Clone, + O1: IntoIterator, + O1::Item: Clone, + O2: IntoIterator, + O2::Item: Clone, + O3: IntoIterator, + O3::Item: Clone, + O4: IntoIterator, + O4::Item: Clone, + O5: IntoIterator, + O5::Item: Clone, + O6: IntoIterator, + O6::Item: Clone, + O7: IntoIterator, + O7::Item: Clone, + O8: IntoIterator, + O8::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, + D8: Fn() -> O8::Item, +{ + ClampedZip2x9 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter(), + o1: o1.into_iter(), + o2: o2.into_iter(), + o3: o3.into_iter(), + o4: o4.into_iter(), + o5: o5.into_iter(), + o6: o6.into_iter(), + o7: o7.into_iter(), + o8: o8.into_iter(), + o0_default_fn, + o1_default_fn, + o2_default_fn, + o3_default_fn, + o4_default_fn, + o5_default_fn, + o6_default_fn, + o7_default_fn, + o8_default_fn, + o0_latest_value: None, + o1_latest_value: None, + o2_latest_value: None, + o3_latest_value: None, + o4_latest_value: None, + o5_latest_value: None, + o6_latest_value: None, + o7_latest_value: None, + o8_latest_value: None, + } +} + +/// Implements a clamped zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`clamped_zip_2x9`] for more information. +pub struct ClampedZip2x9< + R0, + R1, + O0, + O1, + O2, + O3, + O4, + O5, + O6, + O7, + O8, + D0, + D1, + D2, + D3, + D4, + D5, + D6, + D7, + D8, +> where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + O7: Iterator, + O7::Item: Clone, + O8: Iterator, + O8::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, + D8: Fn() -> O8::Item, +{ + r0: R0, + r1: R1, + o0: O0, + o1: O1, + o2: O2, + o3: O3, + o4: O4, + o5: O5, + o6: O6, + o7: O7, + o8: O8, + o0_default_fn: D0, + o1_default_fn: D1, + o2_default_fn: D2, + o3_default_fn: D3, + o4_default_fn: D4, + o5_default_fn: D5, + o6_default_fn: D6, + o7_default_fn: D7, + o8_default_fn: D8, + + o0_latest_value: Option, + o1_latest_value: Option, + o2_latest_value: Option, + o3_latest_value: Option, + o4_latest_value: Option, + o5_latest_value: Option, + o6_latest_value: Option, + o7_latest_value: Option, + o8_latest_value: Option, +} + +impl Iterator + for ClampedZip2x9< + R0, + R1, + O0, + O1, + O2, + O3, + O4, + O5, + O6, + O7, + O8, + D0, + D1, + D2, + D3, + D4, + D5, + D6, + D7, + D8, + > +where + R0: Iterator, + R1: Iterator, + O0: Iterator, + O0::Item: Clone, + O1: Iterator, + O1::Item: Clone, + O2: Iterator, + O2::Item: Clone, + O3: Iterator, + O3::Item: Clone, + O4: Iterator, + O4::Item: Clone, + O5: Iterator, + O5::Item: Clone, + O6: Iterator, + O6::Item: Clone, + O7: Iterator, + O7::Item: Clone, + O8: Iterator, + O8::Item: Clone, + D0: Fn() -> O0::Item, + D1: Fn() -> O1::Item, + D2: Fn() -> O2::Item, + D3: Fn() -> O3::Item, + D4: Fn() -> O4::Item, + D5: Fn() -> O5::Item, + D6: Fn() -> O6::Item, + D7: Fn() -> O7::Item, + D8: Fn() -> O8::Item, +{ + type Item = ( + R0::Item, + R1::Item, + O0::Item, + O1::Item, + O2::Item, + O3::Item, + O4::Item, + O5::Item, + O6::Item, + O7::Item, + O8::Item, + ); + + #[inline] + fn next(&mut self) -> Option { + let r0_next = self.r0.next()?; + let r1_next = self.r1.next()?; + let o0_next = self.o0.next().or(self.o0_latest_value.take()); + let o1_next = self.o1.next().or(self.o1_latest_value.take()); + let o2_next = self.o2.next().or(self.o2_latest_value.take()); + let o3_next = self.o3.next().or(self.o3_latest_value.take()); + let o4_next = self.o4.next().or(self.o4_latest_value.take()); + let o5_next = self.o5.next().or(self.o5_latest_value.take()); + let o6_next = self.o6.next().or(self.o6_latest_value.take()); + let o7_next = self.o7.next().or(self.o7_latest_value.take()); + let o8_next = self.o8.next().or(self.o8_latest_value.take()); + + self.o0_latest_value.clone_from(&o0_next); + self.o1_latest_value.clone_from(&o1_next); + self.o2_latest_value.clone_from(&o2_next); + self.o3_latest_value.clone_from(&o3_next); + self.o4_latest_value.clone_from(&o4_next); + self.o5_latest_value.clone_from(&o5_next); + self.o6_latest_value.clone_from(&o6_next); + self.o7_latest_value.clone_from(&o7_next); + self.o8_latest_value.clone_from(&o8_next); + + Some(( + r0_next, + r1_next, + o0_next.unwrap_or_else(|| (self.o0_default_fn)()), + o1_next.unwrap_or_else(|| (self.o1_default_fn)()), + o2_next.unwrap_or_else(|| (self.o2_default_fn)()), + o3_next.unwrap_or_else(|| (self.o3_default_fn)()), + o4_next.unwrap_or_else(|| (self.o4_default_fn)()), + o5_next.unwrap_or_else(|| (self.o5_default_fn)()), + o6_next.unwrap_or_else(|| (self.o6_default_fn)()), + o7_next.unwrap_or_else(|| (self.o7_default_fn)()), + o8_next.unwrap_or_else(|| (self.o8_default_fn)()), + )) + } +} diff --git a/crates/store/re_query2/src/clamped_zip/mod.rs b/crates/store/re_query2/src/clamped_zip/mod.rs new file mode 100644 index 0000000000000..65bd61b050210 --- /dev/null +++ b/crates/store/re_query2/src/clamped_zip/mod.rs @@ -0,0 +1,64 @@ +mod generated; +pub use self::generated::*; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn r0_is_empty_o0_is_empty() { + let r0 = std::iter::empty::(); + let o0 = (0..).map(|n| n.to_string()); + + let expected: Vec<(u32, String)> = vec![]; + let got = clamped_zip_1x1(r0, o0, String::new).collect::>(); + + similar_asserts::assert_eq!(expected, got); + } + + #[test] + fn r0_and_o0_are_matched() { + let r0 = 0..20u32; + let o0 = (0..20).map(|n| n.to_string()); + + let expected: Vec<(u32, String)> = (0..20u32).map(|n| (n, n.to_string())).collect(); + let got = clamped_zip_1x1(r0, o0, String::new).collect::>(); + + similar_asserts::assert_eq!(expected, got); + } + + #[test] + fn r0_is_shorter() { + let r0 = 0..10u32; + let o0 = (0..20).map(|n| n.to_string()); + + let expected: Vec<(u32, String)> = (0..10u32).map(|n| (n, n.to_string())).collect(); + let got = clamped_zip_1x1(r0, o0, String::new).collect::>(); + + similar_asserts::assert_eq!(expected, got); + } + + #[test] + fn r0_is_longer() { + let r0 = 0..30u32; + let o0 = (0..20).map(|n| n.to_string()); + + let expected: Vec<(u32, String)> = (0..30u32) + .map(|n| (n, u32::min(n, 19).to_string())) + .collect(); + let got = clamped_zip_1x1(r0, o0, String::new).collect::>(); + + similar_asserts::assert_eq!(expected, got); + } + + #[test] + fn r0_is_longer_and_o0_is_empty() { + let r0 = 0..10u32; + let o0 = std::iter::empty(); + + let expected: Vec<(u32, String)> = (0..10u32).map(|n| (n, "hey".to_owned())).collect(); + let got = clamped_zip_1x1(r0, o0, || "hey".to_owned()).collect::>(); + + similar_asserts::assert_eq!(expected, got); + } +} diff --git a/crates/store/re_query2/src/latest_at.rs b/crates/store/re_query2/src/latest_at.rs new file mode 100644 index 0000000000000..fe42c699f3b29 --- /dev/null +++ b/crates/store/re_query2/src/latest_at.rs @@ -0,0 +1,681 @@ +use std::{collections::BTreeMap, sync::Arc}; + +use arrow2::array::Array as ArrowArray; +use nohash_hasher::IntMap; +use parking_lot::RwLock; + +use re_chunk::{Chunk, RowId, UnitChunkShared}; +use re_chunk_store::{ChunkStore, LatestAtQuery, TimeInt}; +use re_log_types::EntityPath; +use re_types_core::{ + components::ClearIsRecursive, Component, ComponentName, Loggable as _, SizeBytes, +}; + +use crate::{CacheKey, Caches, QueryError}; + +// --- Public API --- + +/// Compute the ordering of two data indices, making sure to deal with `STATIC` data appropriately. +// +// TODO(cmc): Maybe at some point we'll want to introduce a dedicated `DataIndex` type with +// proper ordering operators etc. +// It's harder than it sounds though -- depending on the context, you don't necessarily want index +// ordering to behave the same way. +fn compare_indices(lhs: (TimeInt, RowId), rhs: (TimeInt, RowId)) -> std::cmp::Ordering { + match (lhs, rhs) { + ((TimeInt::STATIC, lhs_row_id), (TimeInt::STATIC, rhs_row_id)) => { + lhs_row_id.cmp(&rhs_row_id) + } + ((_, _), (TimeInt::STATIC, _)) => std::cmp::Ordering::Less, + ((TimeInt::STATIC, _), (_, _)) => std::cmp::Ordering::Greater, + _ => lhs.cmp(&rhs), + } +} + +impl Caches { + /// Queries for the given `component_names` using latest-at semantics. + /// + /// See [`LatestAtResults`] for more information about how to handle the results. + /// + /// This is a cached API -- data will be lazily cached upon access. + pub fn latest_at( + &self, + store: &ChunkStore, + query: &LatestAtQuery, + entity_path: &EntityPath, + component_names: impl IntoIterator, + ) -> LatestAtResults { + re_tracing::profile_function!(entity_path.to_string()); + + let mut results = LatestAtResults::empty(entity_path.clone(), query.clone()); + + // NOTE: This pre-filtering is extremely important: going through all these query layers + // has non-negligible overhead even if the final result ends up being nothing, and our + // number of queries for a frame grows linearly with the number of entity paths. + let component_names = component_names.into_iter().filter(|component_name| { + store.entity_has_component_on_timeline(&query.timeline(), entity_path, component_name) + }); + + // Query-time clears + // ----------------- + // + // We need to find, at query time, whether there exist a `Clear` component that should + // shadow part or all of the results that we are about to return. + // + // This is a two-step process. + // + // First, we need to find all `Clear` components that could potentially affect the returned + // results, i.e. any `Clear` component on the entity itself, or any recursive `Clear` + // component on any of its recursive parents. + // + // Then, we need to compare the index of each component result with the index of the most + // recent relevant `Clear` component that was found: if there exists a `Clear` component with + // both a _data time_ lesser or equal to the _query time_ and an index greater or equal + // than the indexed of the returned data, then we know for sure that the `Clear` shadows + // the data. + let mut max_clear_index = (TimeInt::MIN, RowId::ZERO); + { + re_tracing::profile_scope!("clears"); + + let potential_clears = self.might_require_clearing.read(); + + let mut clear_entity_path = entity_path.clone(); + loop { + if !potential_clears.contains(&clear_entity_path) { + // This entity does not contain any `Clear`-related data at all, there's no + // point in running actual queries. + + let Some(parent_entity_path) = clear_entity_path.parent() else { + break; + }; + clear_entity_path = parent_entity_path; + + continue; + } + + let key = CacheKey::new( + clear_entity_path.clone(), + query.timeline(), + ClearIsRecursive::name(), + ); + + let cache = Arc::clone( + self.latest_at_per_cache_key + .write() + .entry(key.clone()) + .or_insert_with(|| Arc::new(RwLock::new(LatestAtCache::new(key.clone())))), + ); + + let mut cache = cache.write(); + cache.handle_pending_invalidation(); + if let Some(cached) = + cache.latest_at(store, query, &clear_entity_path, ClearIsRecursive::name()) + { + let found_recursive_clear = cached + .component_mono::() + .and_then(Result::ok) + == Some(ClearIsRecursive(true.into())); + // When checking the entity itself, any kind of `Clear` component + // (i.e. recursive or not) will do. + // + // For (recursive) parents, we need to deserialize the data to make sure the + // recursive flag is set. + #[allow(clippy::collapsible_if)] // readability + if clear_entity_path == *entity_path || found_recursive_clear { + if let Some(index) = cached.index(&query.timeline()) { + if compare_indices(index, max_clear_index) + == std::cmp::Ordering::Greater + { + max_clear_index = index; + } + } + } + } + + let Some(parent_entity_path) = clear_entity_path.parent() else { + break; + }; + + clear_entity_path = parent_entity_path; + } + } + + for component_name in component_names { + let key = CacheKey::new(entity_path.clone(), query.timeline(), component_name); + + let cache = Arc::clone( + self.latest_at_per_cache_key + .write() + .entry(key.clone()) + .or_insert_with(|| Arc::new(RwLock::new(LatestAtCache::new(key.clone())))), + ); + + let mut cache = cache.write(); + cache.handle_pending_invalidation(); + if let Some(cached) = cache.latest_at(store, query, entity_path, component_name) { + // 1. A `Clear` component doesn't shadow its own self. + // 2. If a `Clear` component was found with an index greater than or equal to the + // component data, then we know for sure that it should shadow it. + if let Some(index) = cached.index(&query.timeline()) { + if component_name == ClearIsRecursive::name() + || compare_indices(index, max_clear_index) == std::cmp::Ordering::Greater + { + results.add(component_name, index, cached); + } + } + } + } + + results + } +} + +// --- Results --- + +/// Results for a latest-at query. +/// +/// Use [`LatestAtResults::get`] and/or [`LatestAtResults::get_required`] in order to access +/// the results for each individual component. +#[derive(Debug)] +pub struct LatestAtResults { + /// The associated [`EntityPath`]. + pub entity_path: EntityPath, + + /// The query that yielded these results. + pub query: LatestAtQuery, + + /// The compound index of this query result. + /// + /// A latest-at query is a compound operation that gathers data from many different rows. + /// The index of that compound result corresponds to the index of most the recent row in all the + /// sub-results, as defined by time and row-id order. + pub compound_index: (TimeInt, RowId), + + /// Results for each individual component. + pub components: IntMap, +} + +impl LatestAtResults { + #[inline] + pub fn empty(entity_path: EntityPath, query: LatestAtQuery) -> Self { + Self { + entity_path, + query, + compound_index: (TimeInt::STATIC, RowId::ZERO), + components: Default::default(), + } + } +} + +impl LatestAtResults { + #[inline] + pub fn contains(&self, component_name: &ComponentName) -> bool { + self.components.contains_key(component_name) + } + + /// Returns the [`UnitChunkShared`] for the specified [`Component`]. + #[inline] + pub fn get(&self, component_name: &ComponentName) -> Option<&UnitChunkShared> { + self.components.get(component_name) + } + + /// Returns the [`UnitChunkShared`] for the specified [`Component`]. + /// + /// Returns an error if the component is not present. + #[inline] + pub fn get_required(&self, component_name: &ComponentName) -> crate::Result<&UnitChunkShared> { + if let Some(component) = self.get(component_name) { + Ok(component) + } else { + Err(QueryError::PrimaryNotFound(*component_name)) + } + } + + /// Returns the compound index (`(TimeInt, RowId)` pair) of the results. + #[inline] + pub fn index(&self) -> (TimeInt, RowId) { + self.compound_index + } +} + +impl LatestAtResults { + #[doc(hidden)] + #[inline] + pub fn add( + &mut self, + component_name: ComponentName, + index: (TimeInt, RowId), + chunk: UnitChunkShared, + ) { + debug_assert!(chunk.num_rows() == 1); + + // NOTE: Since this is a compound API that actually emits multiple queries, the index of the + // final result is the most recent index among all of its components, as defined by time + // and row-id order. + if index > self.compound_index { + self.compound_index = index; + } + + self.components.insert(component_name, chunk); + } +} + +// --- Helpers --- +// +// Helpers for UI and other high-level/user-facing code. +// +// In particular, these replace all error handling with logs instead. + +impl LatestAtResults { + // --- Batch --- + + /// Returns the raw data for the specified component. + #[inline] + pub fn component_batch_raw( + &self, + component_name: &ComponentName, + ) -> Option> { + self.components + .get(component_name) + .and_then(|unit| unit.component_batch_raw(component_name)) + } + + /// Returns the deserialized data for the specified component. + /// + /// Logs at the specified `log_level` if the data cannot be deserialized. + #[inline] + pub fn component_batch_with_log_level( + &self, + log_level: re_log::Level, + ) -> Option> { + self.components + .get(&C::name()) + .and_then(|unit| self.ok_or_log_err(log_level, C::name(), unit.component_batch()?)) + } + + /// Returns the deserialized data for the specified component. + /// + /// Logs an error if the data cannot be deserialized. + #[inline] + pub fn component_batch(&self) -> Option> { + self.component_batch_with_log_level(re_log::Level::Error) + } + + /// Returns the deserialized data for the specified component. + #[inline] + pub fn component_batch_quiet(&self) -> Option> { + self.components + .get(&C::name()) + .and_then(|unit| unit.component_batch()?.ok()) + } + + // --- Instance --- + + /// Returns the raw data for the specified component at the given instance index. + /// + /// Logs at the specified `log_level` if the instance index is out of bounds. + #[inline] + pub fn component_instance_raw_with_log_level( + &self, + log_level: re_log::Level, + component_name: &ComponentName, + instance_index: usize, + ) -> Option> { + self.components.get(component_name).and_then(|unit| { + self.ok_or_log_err( + log_level, + *component_name, + unit.component_instance_raw(component_name, instance_index)?, + ) + }) + } + + /// Returns the raw data for the specified component at the given instance index. + /// + /// Logs an error if the instance index is out of bounds. + #[inline] + pub fn component_instance_raw( + &self, + component_name: &ComponentName, + instance_index: usize, + ) -> Option> { + self.component_instance_raw_with_log_level( + re_log::Level::Error, + component_name, + instance_index, + ) + } + + /// Returns the raw data for the specified component at the given instance index. + #[inline] + pub fn component_instance_raw_quiet( + &self, + component_name: &ComponentName, + instance_index: usize, + ) -> Option> { + self.components.get(component_name).and_then(|unit| { + unit.component_instance_raw(component_name, instance_index)? + .ok() + }) + } + + /// Returns the deserialized data for the specified component at the given instance index. + /// + /// Logs at the specified `log_level` if the data cannot be deserialized, or if the instance index + /// is out of bounds. + #[inline] + pub fn component_instance_with_log_level( + &self, + log_level: re_log::Level, + instance_index: usize, + ) -> Option { + self.components.get(&C::name()).and_then(|unit| { + self.ok_or_log_err( + log_level, + C::name(), + unit.component_instance(instance_index)?, + ) + }) + } + + /// Returns the deserialized data for the specified component at the given instance index. + /// + /// Logs an error if the data cannot be deserialized, or if the instance index is out of bounds. + #[inline] + pub fn component_instance(&self, instance_index: usize) -> Option { + self.component_instance_with_log_level(re_log::Level::Error, instance_index) + } + + /// Returns the deserialized data for the specified component at the given instance index. + /// + /// Returns an error if the data cannot be deserialized, or if the instance index is out of bounds. + #[inline] + pub fn component_instance_quiet(&self, instance_index: usize) -> Option { + self.components + .get(&C::name()) + .and_then(|unit| unit.component_instance(instance_index)?.ok()) + } + + // --- Mono --- + + /// Returns the raw data for the specified component, assuming a mono-batch. + /// + /// Logs at the specified `log_level` if the underlying batch is not of unit length. + #[inline] + pub fn component_mono_raw_with_log_level( + &self, + log_level: re_log::Level, + component_name: &ComponentName, + ) -> Option> { + self.components.get(component_name).and_then(|unit| { + self.ok_or_log_err( + log_level, + *component_name, + unit.component_mono_raw(component_name)?, + ) + }) + } + + /// Returns the raw data for the specified component, assuming a mono-batch. + /// + /// Returns an error if the underlying batch is not of unit length. + #[inline] + pub fn component_mono_raw( + &self, + component_name: &ComponentName, + ) -> Option> { + self.component_mono_raw_with_log_level(re_log::Level::Error, component_name) + } + + /// Returns the raw data for the specified component, assuming a mono-batch. + /// + /// Returns an error if the underlying batch is not of unit length. + #[inline] + pub fn component_mono_raw_quiet( + &self, + component_name: &ComponentName, + ) -> Option> { + self.components + .get(component_name) + .and_then(|unit| unit.component_mono_raw(component_name)?.ok()) + } + + /// Returns the deserialized data for the specified component, assuming a mono-batch. + /// + /// Logs at the specified `log_level` if the data cannot be deserialized, or if the underlying batch + /// is not of unit length. + #[inline] + pub fn component_mono_with_log_level( + &self, + log_level: re_log::Level, + ) -> Option { + self.components + .get(&C::name()) + .and_then(|unit| self.ok_or_log_err(log_level, C::name(), unit.component_mono()?)) + } + + /// Returns the deserialized data for the specified component, assuming a mono-batch. + /// + /// Returns an error if the data cannot be deserialized, or if the underlying batch is not of unit length. + #[inline] + pub fn component_mono(&self) -> Option { + self.component_mono_with_log_level(re_log::Level::Error) + } + + /// Returns the deserialized data for the specified component, assuming a mono-batch. + /// + /// Returns an error if the data cannot be deserialized, or if the underlying batch is not of unit length. + #[inline] + pub fn component_mono_quiet(&self) -> Option { + self.components + .get(&C::name()) + .and_then(|unit| unit.component_mono()?.ok()) + } + + // --- + + fn ok_or_log_err( + &self, + log_level: re_log::Level, + component_name: ComponentName, + res: re_chunk::ChunkResult, + ) -> Option { + match res { + Ok(data) => Some(data), + + // NOTE: It is expected for UI code to look for OOB instance indices on purpose. + // E.g. it is very common to look at index 0 in blueprint data that has been cleared. + Err(re_chunk::ChunkError::IndexOutOfBounds { len: 0, .. }) => None, + + Err(err) => { + let entity_path = &self.entity_path; + let index = self.compound_index; + let err = re_error::format_ref(&err); + re_log::log_once!( + log_level, + "Couldn't read {entity_path}:{component_name} @ ({index:?}): {err}", + ); + None + } + } + } +} + +// --- Cached implementation --- + +/// Caches the results of `LatestAt` queries for a given [`CacheKey`]. +pub struct LatestAtCache { + /// For debugging purposes. + pub cache_key: CacheKey, + + /// Organized by _query_ time. + /// + /// If the key is present but has a `None` value associated with it, it means we cached the + /// lack of result. + /// This is important to do performance-wise: we run _a lot_ of queries each frame to figure + /// out what to render, and this scales linearly with the number of entity. + pub per_query_time: BTreeMap, + + /// The smallest timestamp that has been invalidated. + /// + /// The next time this cache gets queried, it must remove any invalidated entries accordingly. + /// + /// Invalidation is deferred to query time because it is far more efficient that way: the frame + /// time effectively behaves as a natural micro-batching mechanism. + pub pending_invalidation: Option, +} + +impl LatestAtCache { + #[inline] + pub fn new(cache_key: CacheKey) -> Self { + Self { + cache_key, + per_query_time: Default::default(), + pending_invalidation: Default::default(), + } + } +} + +impl std::fmt::Debug for LatestAtCache { + #[inline] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Self { + cache_key, + per_query_time, + pending_invalidation: _, + } = self; + + let mut strings = Vec::new(); + + for (query_time, unit) in per_query_time { + strings.push(format!( + "query_time={} ({})", + cache_key.timeline.typ().format_utc(*query_time), + re_format::format_bytes(unit.total_size_bytes() as _) + )); + } + + if strings.is_empty() { + return f.write_str(""); + } + + f.write_str(&strings.join("\n").replace("\n\n", "\n")) + } +} + +#[derive(Clone)] +pub struct LatestAtCachedChunk { + pub unit: UnitChunkShared, + + /// Is this just a reference to another entry in the cache? + pub is_reference: bool, +} + +impl SizeBytes for LatestAtCachedChunk { + #[inline] + fn heap_size_bytes(&self) -> u64 { + let Self { + unit: chunk, + is_reference, + } = self; + + if *is_reference { + // This chunk is just a reference to another one in the cache. + // Consider it amortized. + 0 + } else { + Chunk::heap_size_bytes(chunk) + } + } +} + +impl SizeBytes for LatestAtCache { + #[inline] + fn heap_size_bytes(&self) -> u64 { + let Self { + cache_key: _, + per_query_time, + pending_invalidation, + } = self; + + let per_query_time = per_query_time.total_size_bytes(); + let pending_invalidation = pending_invalidation.total_size_bytes(); + + per_query_time + pending_invalidation + } +} + +impl LatestAtCache { + /// Queries cached latest-at data for a single component. + pub fn latest_at( + &mut self, + store: &ChunkStore, + query: &LatestAtQuery, + entity_path: &EntityPath, + component_name: ComponentName, + ) -> Option { + re_tracing::profile_scope!("latest_at", format!("{component_name} @ {query:?}")); + + debug_assert_eq!(query.timeline(), self.cache_key.timeline); + + let Self { + cache_key: _, + per_query_time, + pending_invalidation: _, + } = self; + + if let Some(cached) = per_query_time.get(&query.at()) { + return Some(cached.unit.clone()); + } + + let ((data_time, _row_id), unit) = store + .latest_at_relevant_chunks(query, entity_path, component_name) + .into_iter() + .filter_map(|chunk| { + chunk + .latest_at(query, component_name) + .into_unit() + .and_then(|chunk| chunk.index(&query.timeline()).map(|index| (index, chunk))) + }) + .max_by_key(|(index, _chunk)| *index)?; + + let to_be_cached = if let Some(cached) = per_query_time.get(&data_time) { + // If already cached, just reference that, it's still cheaper than cloning all the + // arrow arrays etc. + LatestAtCachedChunk { + unit: cached.unit.clone(), + is_reference: true, + } + } else { + LatestAtCachedChunk { + unit, + is_reference: false, + } + }; + + per_query_time.insert(query.at(), to_be_cached.clone()); + // Even though we're caching per query-time, we know for a fact that a query at that + // data-time would also yield the same result, i.e. this is the one case where + // data-time == query_time. + per_query_time.insert(data_time, to_be_cached.clone()); + + Some(to_be_cached.unit) + } + + pub fn handle_pending_invalidation(&mut self) { + let Self { + cache_key: _, + per_query_time, + pending_invalidation, + } = self; + + // Remove any data indexed by a _query time_ that's more recent than the oldest + // _data time_ that's been invalidated. + // + // Note that this data time might very well be `TimeInt::STATIC`, in which case the entire + // query-time-based index will be dropped. + if let Some(oldest_data_time) = pending_invalidation.take() { + per_query_time.retain(|&query_time, _| query_time < oldest_data_time); + } + } +} diff --git a/crates/store/re_query2/src/lib.rs b/crates/store/re_query2/src/lib.rs new file mode 100644 index 0000000000000..62bab7a57d7f3 --- /dev/null +++ b/crates/store/re_query2/src/lib.rs @@ -0,0 +1,72 @@ +//! Caching datastructures for `re_query2`. + +mod cache; +mod cache_stats; +mod latest_at; +mod range; + +pub mod clamped_zip; +pub mod range_zip; + +pub use self::cache::{CacheKey, Caches}; +pub use self::cache_stats::{CacheStats, CachesStats}; +pub use self::clamped_zip::*; +pub use self::latest_at::LatestAtResults; +pub use self::range::RangeResults; +pub use self::range_zip::*; + +pub(crate) use self::latest_at::LatestAtCache; +pub(crate) use self::range::RangeCache; + +pub mod external { + pub use paste; + pub use seq_macro; +} + +// --- + +#[derive(Debug, Clone, Copy)] +pub struct ComponentNotFoundError(pub re_types_core::ComponentName); + +impl std::fmt::Display for ComponentNotFoundError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!("Could not find component: {}", self.0)) + } +} + +impl std::error::Error for ComponentNotFoundError {} + +#[derive(thiserror::Error, Debug)] +pub enum QueryError { + #[error("Tried to access a column that doesn't exist")] + BadAccess, + + #[error("Could not find primary component: {0}")] + PrimaryNotFound(re_types_core::ComponentName), + + #[error(transparent)] + ComponentNotFound(#[from] ComponentNotFoundError), + + #[error("Tried to access component of type '{actual:?}' using component '{requested:?}'")] + TypeMismatch { + actual: re_types_core::ComponentName, + requested: re_types_core::ComponentName, + }, + + #[error("Error deserializing: {0}")] + DeserializationError(#[from] re_types_core::DeserializationError), + + #[error("Error serializing: {0}")] + SerializationError(#[from] re_types_core::SerializationError), + + #[error("Error converting arrow data: {0}")] + ArrowError(#[from] arrow2::error::Error), + + #[error("Not implemented")] + NotImplemented, + + #[error(transparent)] + Other(#[from] anyhow::Error), +} + +pub type Result = std::result::Result; diff --git a/crates/store/re_query2/src/range.rs b/crates/store/re_query2/src/range.rs new file mode 100644 index 0000000000000..62f6aa2136869 --- /dev/null +++ b/crates/store/re_query2/src/range.rs @@ -0,0 +1,317 @@ +use std::{collections::BTreeSet, sync::Arc}; + +use ahash::HashMap; +use nohash_hasher::IntMap; +use parking_lot::RwLock; + +use re_chunk::{Chunk, ChunkId}; +use re_chunk_store::{ChunkStore, RangeQuery, TimeInt}; +use re_log_types::{EntityPath, ResolvedTimeRange}; +use re_types_core::{ComponentName, DeserializationError, SizeBytes}; + +use crate::{CacheKey, Caches}; + +// --- Public API --- + +impl Caches { + /// Queries for the given `component_names` using range semantics. + /// + /// See [`RangeResults`] for more information about how to handle the results. + /// + /// This is a cached API -- data will be lazily cached upon access. + pub fn range( + &self, + store: &ChunkStore, + query: &RangeQuery, + entity_path: &EntityPath, + component_names: impl IntoIterator, + ) -> RangeResults { + re_tracing::profile_function!(entity_path.to_string()); + + let mut results = RangeResults::new(query.clone()); + + // NOTE: This pre-filtering is extremely important: going through all these query layers + // has non-negligible overhead even if the final result ends up being nothing, and our + // number of queries for a frame grows linearly with the number of entity paths. + let component_names = component_names.into_iter().filter(|component_name| { + store.entity_has_component_on_timeline(&query.timeline(), entity_path, component_name) + }); + + for component_name in component_names { + let key = CacheKey::new(entity_path.clone(), query.timeline(), component_name); + + let cache = Arc::clone( + self.range_per_cache_key + .write() + .entry(key.clone()) + .or_insert_with(|| Arc::new(RwLock::new(RangeCache::new(key.clone())))), + ); + + let mut cache = cache.write(); + + cache.handle_pending_invalidation(); + + let cached = cache.range(store, query, entity_path, component_name); + if !cached.is_empty() { + results.add(component_name, cached); + } + } + + results + } +} + +// --- Results --- + +/// Results for a range query. +/// +/// The data is both deserialized and resolved/converted. +/// +/// Use [`RangeResults::get`] or [`RangeResults::get_required`] in order to access the results for +/// each individual component. +#[derive(Debug)] +pub struct RangeResults { + /// The query that yielded these results. + pub query: RangeQuery, + + /// Results for each individual component. + pub components: IntMap>, +} + +impl RangeResults { + #[inline] + pub fn new(query: RangeQuery) -> Self { + Self { + query, + components: Default::default(), + } + } + + #[inline] + pub fn contains(&self, component_name: &ComponentName) -> bool { + self.components.contains_key(component_name) + } + + /// Returns the [`Chunk`]s for the specified `component_name`. + #[inline] + pub fn get(&self, component_name: &ComponentName) -> Option<&[Chunk]> { + self.components + .get(component_name) + .map(|chunks| chunks.as_slice()) + } + + /// Returns the [`Chunk`]s for the specified `component_name`. + /// + /// Returns an error if the component is not present. + #[inline] + pub fn get_required(&self, component_name: &ComponentName) -> crate::Result<&[Chunk]> { + if let Some(chunks) = self.components.get(component_name) { + Ok(chunks) + } else { + Err(DeserializationError::MissingComponent { + component: *component_name, + backtrace: ::backtrace::Backtrace::new_unresolved(), + } + .into()) + } + } +} + +impl RangeResults { + #[doc(hidden)] + #[inline] + pub fn add(&mut self, component_name: ComponentName, chunks: Vec) { + self.components.insert(component_name, chunks); + } +} + +// --- Cache implementation --- + +/// Caches the results of `Range` queries for a given [`CacheKey`]. +pub struct RangeCache { + /// For debugging purposes. + pub cache_key: CacheKey, + + /// All the [`Chunk`]s currently cached. + /// + /// See [`RangeCachedChunk`] for more information. + pub chunks: HashMap, + + /// Every [`ChunkId`] present in this set has been asynchronously invalidated. + /// + /// The next time this cache gets queried, it must remove any entry matching any of these IDs. + /// + /// Invalidation is deferred to query time because it is far more efficient that way: the frame + /// time effectively behaves as a natural micro-batching mechanism. + pub pending_invalidations: BTreeSet, +} + +impl RangeCache { + #[inline] + pub fn new(cache_key: CacheKey) -> Self { + Self { + cache_key, + chunks: HashMap::default(), + pending_invalidations: BTreeSet::default(), + } + } + + /// Returns the time range covered by this [`RangeCache`]. + /// + /// This is extremely slow (`O(n)`), don't use this for anything but debugging. + #[inline] + pub fn time_range(&self) -> ResolvedTimeRange { + self.chunks + .values() + .filter_map(|cached| { + cached + .chunk + .timelines() + .get(&self.cache_key.timeline) + .map(|time_chunk| time_chunk.time_range()) + }) + .fold(ResolvedTimeRange::EMPTY, |mut acc, time_range| { + acc.set_min(TimeInt::min(acc.min(), time_range.min())); + acc.set_max(TimeInt::max(acc.max(), time_range.max())); + acc + }) + } +} + +impl std::fmt::Debug for RangeCache { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Self { + cache_key, + chunks, + pending_invalidations: _, + } = self; + + let mut strings: Vec = Vec::new(); + + strings.push(format!( + "{} ({})", + cache_key.timeline.typ().format_range_utc(self.time_range()), + re_format::format_bytes(chunks.total_size_bytes() as _), + )); + + if strings.is_empty() { + return f.write_str(""); + } + + f.write_str(&strings.join("\n").replace("\n\n", "\n")) + } +} + +pub struct RangeCachedChunk { + pub chunk: Chunk, + + /// When a `Chunk` gets cached, it is pre-processed according to the current [`CacheKey`], + /// e.g. it is time-sorted on the appropriate timeline. + /// + /// In the happy case, pre-processing a `Chunk` is a no-op, and the cached `Chunk` is just a + /// reference to the real one sitting in the store. + /// Otherwise, the cached `Chunk` is a full blown copy of the original one. + pub resorted: bool, +} + +impl SizeBytes for RangeCachedChunk { + #[inline] + fn heap_size_bytes(&self) -> u64 { + let Self { chunk, resorted } = self; + + if *resorted { + // The chunk had to be post-processed for caching. + // Its data was duplicated. + Chunk::heap_size_bytes(chunk) + } else { + // This chunk is just a reference to the one in the store. + // Consider it amortized. + 0 + } + } +} + +impl SizeBytes for RangeCache { + #[inline] + fn heap_size_bytes(&self) -> u64 { + let Self { + cache_key, + chunks, + pending_invalidations, + } = self; + + cache_key.heap_size_bytes() + + chunks.heap_size_bytes() + + pending_invalidations.heap_size_bytes() + } +} + +impl RangeCache { + /// Queries cached range data for a single component. + pub fn range( + &mut self, + store: &ChunkStore, + query: &RangeQuery, + entity_path: &EntityPath, + component_name: ComponentName, + ) -> Vec { + re_tracing::profile_scope!("range", format!("{query:?}")); + + debug_assert_eq!(query.timeline(), self.cache_key.timeline); + + // First, we forward the query as-is to the store. + // + // It's fine to run the query every time -- the index scan itself is not the costly part of a + // range query. + // + // For all relevant chunks that we find, we process them according to the [`CacheKey`], and + // cache them. + + let raw_chunks = store.range_relevant_chunks(query, entity_path, component_name); + for raw_chunk in &raw_chunks { + self.chunks + .entry(raw_chunk.id()) + .or_insert_with(|| RangeCachedChunk { + chunk: raw_chunk + // Pre-sort the cached chunk according to the cache key's timeline. + .sorted_by_timeline_if_unsorted(&self.cache_key.timeline) + // Densify the cached chunk according to the cache key's component, which + // will speed future arrow operations on this chunk. + .densified(component_name), + resorted: !raw_chunk.is_timeline_sorted(&self.cache_key.timeline), + }); + } + + // Second, we simply retrieve from the cache all the relevant `Chunk`s . + // + // Since these `Chunk`s have already been pre-processed adequately, running a range filter + // on them will be quite cheap. + + raw_chunks + .into_iter() + .filter_map(|raw_chunk| self.chunks.get(&raw_chunk.id())) + .map(|cached_sorted_chunk| { + debug_assert!(cached_sorted_chunk + .chunk + .is_timeline_sorted(&query.timeline())); + cached_sorted_chunk.chunk.range(query, component_name) + }) + .filter(|chunk| !chunk.is_empty()) + .collect() + } + + #[inline] + pub fn handle_pending_invalidation(&mut self) { + re_tracing::profile_function!(); + + let Self { + cache_key: _, + chunks, + pending_invalidations, + } = self; + + chunks.retain(|chunk_id, _chunk| !pending_invalidations.contains(chunk_id)); + + pending_invalidations.clear(); + } +} diff --git a/crates/store/re_query2/src/range_zip/.gitattributes b/crates/store/re_query2/src/range_zip/.gitattributes new file mode 100644 index 0000000000000..30d2025060647 --- /dev/null +++ b/crates/store/re_query2/src/range_zip/.gitattributes @@ -0,0 +1 @@ +generated.rs linguist-generated=true diff --git a/crates/store/re_query2/src/range_zip/generated.rs b/crates/store/re_query2/src/range_zip/generated.rs new file mode 100644 index 0000000000000..863b487197591 --- /dev/null +++ b/crates/store/re_query2/src/range_zip/generated.rs @@ -0,0 +1,3490 @@ +// This file was generated using `cargo r -p re_query2 --all-features --bin range_zip`. +// DO NOT EDIT. + +// --- + +#![allow(clippy::iter_on_single_items)] +#![allow(clippy::too_many_arguments)] +#![allow(clippy::type_complexity)] + +use std::iter::Peekable; + +/// Returns a new [`RangeZip1x1`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_1x1( + r0: IR0, + o0: IO0, +) -> RangeZip1x1 +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IO0: IntoIterator, +{ + RangeZip1x1 { + r0: r0.into_iter(), + o0: o0.into_iter().peekable(), + + o0_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_1x1`] for more information. +pub struct RangeZip1x1 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, +{ + r0: IR0, + o0: Peekable, + + o0_data_latest: Option, +} + +impl Iterator for RangeZip1x1 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + O0: Clone, +{ + type Item = (Idx, R0, Option); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + o0, + o0_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + + let max_index = [r0_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + Some((max_index, r0_data, o0_data)) + } +} + +/// Returns a new [`RangeZip1x2`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_1x2( + r0: IR0, + o0: IO0, + o1: IO1, +) -> RangeZip1x2 +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, +{ + RangeZip1x2 { + r0: r0.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_1x2`] for more information. +pub struct RangeZip1x2 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, +{ + r0: IR0, + o0: Peekable, + o1: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, +} + +impl Iterator for RangeZip1x2 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + O0: Clone, + O1: Clone, +{ + type Item = (Idx, R0, Option, Option); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + o0, + o1, + o0_data_latest, + o1_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + + let max_index = [r0_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + Some((max_index, r0_data, o0_data, o1_data)) + } +} + +/// Returns a new [`RangeZip1x3`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_1x3( + r0: IR0, + o0: IO0, + o1: IO1, + o2: IO2, +) -> RangeZip1x3 +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, +{ + RangeZip1x3 { + r0: r0.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_1x3`] for more information. +pub struct RangeZip1x3 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, +{ + r0: IR0, + o0: Peekable, + o1: Peekable, + o2: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, +} + +impl Iterator + for RangeZip1x3 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, +{ + type Item = (Idx, R0, Option, Option, Option); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + o0, + o1, + o2, + o0_data_latest, + o1_data_latest, + o2_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + + let max_index = [r0_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + Some((max_index, r0_data, o0_data, o1_data, o2_data)) + } +} + +/// Returns a new [`RangeZip1x4`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_1x4( + r0: IR0, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, +) -> RangeZip1x4< + Idx, + IR0::IntoIter, + R0, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, +{ + RangeZip1x4 { + r0: r0.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_1x4`] for more information. +pub struct RangeZip1x4 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, +{ + r0: IR0, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, +} + +impl Iterator + for RangeZip1x4 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, +{ + type Item = (Idx, R0, Option, Option, Option, Option); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + o0, + o1, + o2, + o3, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + + let max_index = [r0_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + Some((max_index, r0_data, o0_data, o1_data, o2_data, o3_data)) + } +} + +/// Returns a new [`RangeZip1x5`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_1x5( + r0: IR0, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, + o4: IO4, +) -> RangeZip1x5< + Idx, + IR0::IntoIter, + R0, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, + IO4::IntoIter, + O4, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, + IO4: IntoIterator, +{ + RangeZip1x5 { + r0: r0.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + o4: o4.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + o4_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_1x5`] for more information. +pub struct RangeZip1x5 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, +{ + r0: IR0, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + o4: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, + o4_data_latest: Option, +} + +impl Iterator + for RangeZip1x5 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, + O4: Clone, +{ + type Item = ( + Idx, + R0, + Option, + Option, + Option, + Option, + Option, + ); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + o0, + o1, + o2, + o3, + o4, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + o4_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + + let max_index = [r0_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + let mut o4_data = None; + while let Some((_, data)) = o4.next_if(|(index, _)| index <= &max_index) { + o4_data = Some(data); + } + let o4_data = o4_data.or(o4_data_latest.take()); + o4_data_latest.clone_from(&o4_data); + + Some(( + max_index, r0_data, o0_data, o1_data, o2_data, o3_data, o4_data, + )) + } +} + +/// Returns a new [`RangeZip1x6`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_1x6( + r0: IR0, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, + o4: IO4, + o5: IO5, +) -> RangeZip1x6< + Idx, + IR0::IntoIter, + R0, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, + IO4::IntoIter, + O4, + IO5::IntoIter, + O5, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, + IO4: IntoIterator, + IO5: IntoIterator, +{ + RangeZip1x6 { + r0: r0.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + o4: o4.into_iter().peekable(), + o5: o5.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + o4_data_latest: None, + o5_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_1x6`] for more information. +pub struct RangeZip1x6 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, +{ + r0: IR0, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + o4: Peekable, + o5: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, + o4_data_latest: Option, + o5_data_latest: Option, +} + +impl Iterator + for RangeZip1x6 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, + O4: Clone, + O5: Clone, +{ + type Item = ( + Idx, + R0, + Option, + Option, + Option, + Option, + Option, + Option, + ); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + o0, + o1, + o2, + o3, + o4, + o5, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + o4_data_latest, + o5_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + + let max_index = [r0_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + let mut o4_data = None; + while let Some((_, data)) = o4.next_if(|(index, _)| index <= &max_index) { + o4_data = Some(data); + } + let o4_data = o4_data.or(o4_data_latest.take()); + o4_data_latest.clone_from(&o4_data); + + let mut o5_data = None; + while let Some((_, data)) = o5.next_if(|(index, _)| index <= &max_index) { + o5_data = Some(data); + } + let o5_data = o5_data.or(o5_data_latest.take()); + o5_data_latest.clone_from(&o5_data); + + Some(( + max_index, r0_data, o0_data, o1_data, o2_data, o3_data, o4_data, o5_data, + )) + } +} + +/// Returns a new [`RangeZip1x7`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_1x7( + r0: IR0, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, + o4: IO4, + o5: IO5, + o6: IO6, +) -> RangeZip1x7< + Idx, + IR0::IntoIter, + R0, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, + IO4::IntoIter, + O4, + IO5::IntoIter, + O5, + IO6::IntoIter, + O6, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, + IO4: IntoIterator, + IO5: IntoIterator, + IO6: IntoIterator, +{ + RangeZip1x7 { + r0: r0.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + o4: o4.into_iter().peekable(), + o5: o5.into_iter().peekable(), + o6: o6.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + o4_data_latest: None, + o5_data_latest: None, + o6_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_1x7`] for more information. +pub struct RangeZip1x7 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, +{ + r0: IR0, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + o4: Peekable, + o5: Peekable, + o6: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, + o4_data_latest: Option, + o5_data_latest: Option, + o6_data_latest: Option, +} + +impl Iterator + for RangeZip1x7 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, + O4: Clone, + O5: Clone, + O6: Clone, +{ + type Item = ( + Idx, + R0, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + ); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + o0, + o1, + o2, + o3, + o4, + o5, + o6, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + o4_data_latest, + o5_data_latest, + o6_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + + let max_index = [r0_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + let mut o4_data = None; + while let Some((_, data)) = o4.next_if(|(index, _)| index <= &max_index) { + o4_data = Some(data); + } + let o4_data = o4_data.or(o4_data_latest.take()); + o4_data_latest.clone_from(&o4_data); + + let mut o5_data = None; + while let Some((_, data)) = o5.next_if(|(index, _)| index <= &max_index) { + o5_data = Some(data); + } + let o5_data = o5_data.or(o5_data_latest.take()); + o5_data_latest.clone_from(&o5_data); + + let mut o6_data = None; + while let Some((_, data)) = o6.next_if(|(index, _)| index <= &max_index) { + o6_data = Some(data); + } + let o6_data = o6_data.or(o6_data_latest.take()); + o6_data_latest.clone_from(&o6_data); + + Some(( + max_index, r0_data, o0_data, o1_data, o2_data, o3_data, o4_data, o5_data, o6_data, + )) + } +} + +/// Returns a new [`RangeZip1x8`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_1x8< + Idx, + IR0, + R0, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, +>( + r0: IR0, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, + o4: IO4, + o5: IO5, + o6: IO6, + o7: IO7, +) -> RangeZip1x8< + Idx, + IR0::IntoIter, + R0, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, + IO4::IntoIter, + O4, + IO5::IntoIter, + O5, + IO6::IntoIter, + O6, + IO7::IntoIter, + O7, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, + IO4: IntoIterator, + IO5: IntoIterator, + IO6: IntoIterator, + IO7: IntoIterator, +{ + RangeZip1x8 { + r0: r0.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + o4: o4.into_iter().peekable(), + o5: o5.into_iter().peekable(), + o6: o6.into_iter().peekable(), + o7: o7.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + o4_data_latest: None, + o5_data_latest: None, + o6_data_latest: None, + o7_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_1x8`] for more information. +pub struct RangeZip1x8< + Idx, + IR0, + R0, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, +> where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, + IO7: Iterator, +{ + r0: IR0, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + o4: Peekable, + o5: Peekable, + o6: Peekable, + o7: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, + o4_data_latest: Option, + o5_data_latest: Option, + o6_data_latest: Option, + o7_data_latest: Option, +} + +impl Iterator + for RangeZip1x8< + Idx, + IR0, + R0, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, + > +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, + IO7: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, + O4: Clone, + O5: Clone, + O6: Clone, + O7: Clone, +{ + type Item = ( + Idx, + R0, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + ); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + o0, + o1, + o2, + o3, + o4, + o5, + o6, + o7, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + o4_data_latest, + o5_data_latest, + o6_data_latest, + o7_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + + let max_index = [r0_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + let mut o4_data = None; + while let Some((_, data)) = o4.next_if(|(index, _)| index <= &max_index) { + o4_data = Some(data); + } + let o4_data = o4_data.or(o4_data_latest.take()); + o4_data_latest.clone_from(&o4_data); + + let mut o5_data = None; + while let Some((_, data)) = o5.next_if(|(index, _)| index <= &max_index) { + o5_data = Some(data); + } + let o5_data = o5_data.or(o5_data_latest.take()); + o5_data_latest.clone_from(&o5_data); + + let mut o6_data = None; + while let Some((_, data)) = o6.next_if(|(index, _)| index <= &max_index) { + o6_data = Some(data); + } + let o6_data = o6_data.or(o6_data_latest.take()); + o6_data_latest.clone_from(&o6_data); + + let mut o7_data = None; + while let Some((_, data)) = o7.next_if(|(index, _)| index <= &max_index) { + o7_data = Some(data); + } + let o7_data = o7_data.or(o7_data_latest.take()); + o7_data_latest.clone_from(&o7_data); + + Some(( + max_index, r0_data, o0_data, o1_data, o2_data, o3_data, o4_data, o5_data, o6_data, + o7_data, + )) + } +} + +/// Returns a new [`RangeZip1x9`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_1x9< + Idx, + IR0, + R0, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, + IO8, + O8, +>( + r0: IR0, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, + o4: IO4, + o5: IO5, + o6: IO6, + o7: IO7, + o8: IO8, +) -> RangeZip1x9< + Idx, + IR0::IntoIter, + R0, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, + IO4::IntoIter, + O4, + IO5::IntoIter, + O5, + IO6::IntoIter, + O6, + IO7::IntoIter, + O7, + IO8::IntoIter, + O8, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, + IO4: IntoIterator, + IO5: IntoIterator, + IO6: IntoIterator, + IO7: IntoIterator, + IO8: IntoIterator, +{ + RangeZip1x9 { + r0: r0.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + o4: o4.into_iter().peekable(), + o5: o5.into_iter().peekable(), + o6: o6.into_iter().peekable(), + o7: o7.into_iter().peekable(), + o8: o8.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + o4_data_latest: None, + o5_data_latest: None, + o6_data_latest: None, + o7_data_latest: None, + o8_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_1x9`] for more information. +pub struct RangeZip1x9< + Idx, + IR0, + R0, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, + IO8, + O8, +> where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, + IO7: Iterator, + IO8: Iterator, +{ + r0: IR0, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + o4: Peekable, + o5: Peekable, + o6: Peekable, + o7: Peekable, + o8: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, + o4_data_latest: Option, + o5_data_latest: Option, + o6_data_latest: Option, + o7_data_latest: Option, + o8_data_latest: Option, +} + +impl< + Idx, + IR0, + R0, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, + IO8, + O8, + > Iterator + for RangeZip1x9< + Idx, + IR0, + R0, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, + IO8, + O8, + > +where + Idx: std::cmp::Ord, + IR0: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, + IO7: Iterator, + IO8: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, + O4: Clone, + O5: Clone, + O6: Clone, + O7: Clone, + O8: Clone, +{ + type Item = ( + Idx, + R0, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + ); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + o0, + o1, + o2, + o3, + o4, + o5, + o6, + o7, + o8, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + o4_data_latest, + o5_data_latest, + o6_data_latest, + o7_data_latest, + o8_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + + let max_index = [r0_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + let mut o4_data = None; + while let Some((_, data)) = o4.next_if(|(index, _)| index <= &max_index) { + o4_data = Some(data); + } + let o4_data = o4_data.or(o4_data_latest.take()); + o4_data_latest.clone_from(&o4_data); + + let mut o5_data = None; + while let Some((_, data)) = o5.next_if(|(index, _)| index <= &max_index) { + o5_data = Some(data); + } + let o5_data = o5_data.or(o5_data_latest.take()); + o5_data_latest.clone_from(&o5_data); + + let mut o6_data = None; + while let Some((_, data)) = o6.next_if(|(index, _)| index <= &max_index) { + o6_data = Some(data); + } + let o6_data = o6_data.or(o6_data_latest.take()); + o6_data_latest.clone_from(&o6_data); + + let mut o7_data = None; + while let Some((_, data)) = o7.next_if(|(index, _)| index <= &max_index) { + o7_data = Some(data); + } + let o7_data = o7_data.or(o7_data_latest.take()); + o7_data_latest.clone_from(&o7_data); + + let mut o8_data = None; + while let Some((_, data)) = o8.next_if(|(index, _)| index <= &max_index) { + o8_data = Some(data); + } + let o8_data = o8_data.or(o8_data_latest.take()); + o8_data_latest.clone_from(&o8_data); + + Some(( + max_index, r0_data, o0_data, o1_data, o2_data, o3_data, o4_data, o5_data, o6_data, + o7_data, o8_data, + )) + } +} + +/// Returns a new [`RangeZip2x1`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_2x1( + r0: IR0, + r1: IR1, + o0: IO0, +) -> RangeZip2x1 +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IR1: IntoIterator, + IO0: IntoIterator, +{ + RangeZip2x1 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter().peekable(), + + o0_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_2x1`] for more information. +pub struct RangeZip2x1 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, +{ + r0: IR0, + r1: IR1, + o0: Peekable, + + o0_data_latest: Option, +} + +impl Iterator for RangeZip2x1 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + O0: Clone, +{ + type Item = (Idx, R0, R1, Option); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + r1, + o0, + o0_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + let (r1_index, r1_data) = r1.next()?; + + let max_index = [r0_index, r1_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + Some((max_index, r0_data, r1_data, o0_data)) + } +} + +/// Returns a new [`RangeZip2x2`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_2x2( + r0: IR0, + r1: IR1, + o0: IO0, + o1: IO1, +) -> RangeZip2x2 +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IR1: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, +{ + RangeZip2x2 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_2x2`] for more information. +pub struct RangeZip2x2 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, +{ + r0: IR0, + r1: IR1, + o0: Peekable, + o1: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, +} + +impl Iterator + for RangeZip2x2 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + O0: Clone, + O1: Clone, +{ + type Item = (Idx, R0, R1, Option, Option); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + r1, + o0, + o1, + o0_data_latest, + o1_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + let (r1_index, r1_data) = r1.next()?; + + let max_index = [r0_index, r1_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + Some((max_index, r0_data, r1_data, o0_data, o1_data)) + } +} + +/// Returns a new [`RangeZip2x3`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_2x3( + r0: IR0, + r1: IR1, + o0: IO0, + o1: IO1, + o2: IO2, +) -> RangeZip2x3< + Idx, + IR0::IntoIter, + R0, + IR1::IntoIter, + R1, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IR1: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, +{ + RangeZip2x3 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_2x3`] for more information. +pub struct RangeZip2x3 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, +{ + r0: IR0, + r1: IR1, + o0: Peekable, + o1: Peekable, + o2: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, +} + +impl Iterator + for RangeZip2x3 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, +{ + type Item = (Idx, R0, R1, Option, Option, Option); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + r1, + o0, + o1, + o2, + o0_data_latest, + o1_data_latest, + o2_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + let (r1_index, r1_data) = r1.next()?; + + let max_index = [r0_index, r1_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + Some((max_index, r0_data, r1_data, o0_data, o1_data, o2_data)) + } +} + +/// Returns a new [`RangeZip2x4`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_2x4( + r0: IR0, + r1: IR1, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, +) -> RangeZip2x4< + Idx, + IR0::IntoIter, + R0, + IR1::IntoIter, + R1, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IR1: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, +{ + RangeZip2x4 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_2x4`] for more information. +pub struct RangeZip2x4 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, +{ + r0: IR0, + r1: IR1, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, +} + +impl Iterator + for RangeZip2x4 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, +{ + type Item = (Idx, R0, R1, Option, Option, Option, Option); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + r1, + o0, + o1, + o2, + o3, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + let (r1_index, r1_data) = r1.next()?; + + let max_index = [r0_index, r1_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + Some(( + max_index, r0_data, r1_data, o0_data, o1_data, o2_data, o3_data, + )) + } +} + +/// Returns a new [`RangeZip2x5`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_2x5( + r0: IR0, + r1: IR1, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, + o4: IO4, +) -> RangeZip2x5< + Idx, + IR0::IntoIter, + R0, + IR1::IntoIter, + R1, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, + IO4::IntoIter, + O4, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IR1: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, + IO4: IntoIterator, +{ + RangeZip2x5 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + o4: o4.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + o4_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_2x5`] for more information. +pub struct RangeZip2x5 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, +{ + r0: IR0, + r1: IR1, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + o4: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, + o4_data_latest: Option, +} + +impl Iterator + for RangeZip2x5 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, + O4: Clone, +{ + type Item = ( + Idx, + R0, + R1, + Option, + Option, + Option, + Option, + Option, + ); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + r1, + o0, + o1, + o2, + o3, + o4, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + o4_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + let (r1_index, r1_data) = r1.next()?; + + let max_index = [r0_index, r1_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + let mut o4_data = None; + while let Some((_, data)) = o4.next_if(|(index, _)| index <= &max_index) { + o4_data = Some(data); + } + let o4_data = o4_data.or(o4_data_latest.take()); + o4_data_latest.clone_from(&o4_data); + + Some(( + max_index, r0_data, r1_data, o0_data, o1_data, o2_data, o3_data, o4_data, + )) + } +} + +/// Returns a new [`RangeZip2x6`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_2x6( + r0: IR0, + r1: IR1, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, + o4: IO4, + o5: IO5, +) -> RangeZip2x6< + Idx, + IR0::IntoIter, + R0, + IR1::IntoIter, + R1, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, + IO4::IntoIter, + O4, + IO5::IntoIter, + O5, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IR1: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, + IO4: IntoIterator, + IO5: IntoIterator, +{ + RangeZip2x6 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + o4: o4.into_iter().peekable(), + o5: o5.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + o4_data_latest: None, + o5_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_2x6`] for more information. +pub struct RangeZip2x6 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, +{ + r0: IR0, + r1: IR1, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + o4: Peekable, + o5: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, + o4_data_latest: Option, + o5_data_latest: Option, +} + +impl Iterator + for RangeZip2x6 +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, + O4: Clone, + O5: Clone, +{ + type Item = ( + Idx, + R0, + R1, + Option, + Option, + Option, + Option, + Option, + Option, + ); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + r1, + o0, + o1, + o2, + o3, + o4, + o5, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + o4_data_latest, + o5_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + let (r1_index, r1_data) = r1.next()?; + + let max_index = [r0_index, r1_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + let mut o4_data = None; + while let Some((_, data)) = o4.next_if(|(index, _)| index <= &max_index) { + o4_data = Some(data); + } + let o4_data = o4_data.or(o4_data_latest.take()); + o4_data_latest.clone_from(&o4_data); + + let mut o5_data = None; + while let Some((_, data)) = o5.next_if(|(index, _)| index <= &max_index) { + o5_data = Some(data); + } + let o5_data = o5_data.or(o5_data_latest.take()); + o5_data_latest.clone_from(&o5_data); + + Some(( + max_index, r0_data, r1_data, o0_data, o1_data, o2_data, o3_data, o4_data, o5_data, + )) + } +} + +/// Returns a new [`RangeZip2x7`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_2x7< + Idx, + IR0, + R0, + IR1, + R1, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, +>( + r0: IR0, + r1: IR1, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, + o4: IO4, + o5: IO5, + o6: IO6, +) -> RangeZip2x7< + Idx, + IR0::IntoIter, + R0, + IR1::IntoIter, + R1, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, + IO4::IntoIter, + O4, + IO5::IntoIter, + O5, + IO6::IntoIter, + O6, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IR1: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, + IO4: IntoIterator, + IO5: IntoIterator, + IO6: IntoIterator, +{ + RangeZip2x7 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + o4: o4.into_iter().peekable(), + o5: o5.into_iter().peekable(), + o6: o6.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + o4_data_latest: None, + o5_data_latest: None, + o6_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_2x7`] for more information. +pub struct RangeZip2x7< + Idx, + IR0, + R0, + IR1, + R1, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, +> where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, +{ + r0: IR0, + r1: IR1, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + o4: Peekable, + o5: Peekable, + o6: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, + o4_data_latest: Option, + o5_data_latest: Option, + o6_data_latest: Option, +} + +impl Iterator + for RangeZip2x7< + Idx, + IR0, + R0, + IR1, + R1, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + > +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, + O4: Clone, + O5: Clone, + O6: Clone, +{ + type Item = ( + Idx, + R0, + R1, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + ); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + r1, + o0, + o1, + o2, + o3, + o4, + o5, + o6, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + o4_data_latest, + o5_data_latest, + o6_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + let (r1_index, r1_data) = r1.next()?; + + let max_index = [r0_index, r1_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + let mut o4_data = None; + while let Some((_, data)) = o4.next_if(|(index, _)| index <= &max_index) { + o4_data = Some(data); + } + let o4_data = o4_data.or(o4_data_latest.take()); + o4_data_latest.clone_from(&o4_data); + + let mut o5_data = None; + while let Some((_, data)) = o5.next_if(|(index, _)| index <= &max_index) { + o5_data = Some(data); + } + let o5_data = o5_data.or(o5_data_latest.take()); + o5_data_latest.clone_from(&o5_data); + + let mut o6_data = None; + while let Some((_, data)) = o6.next_if(|(index, _)| index <= &max_index) { + o6_data = Some(data); + } + let o6_data = o6_data.or(o6_data_latest.take()); + o6_data_latest.clone_from(&o6_data); + + Some(( + max_index, r0_data, r1_data, o0_data, o1_data, o2_data, o3_data, o4_data, o5_data, + o6_data, + )) + } +} + +/// Returns a new [`RangeZip2x8`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_2x8< + Idx, + IR0, + R0, + IR1, + R1, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, +>( + r0: IR0, + r1: IR1, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, + o4: IO4, + o5: IO5, + o6: IO6, + o7: IO7, +) -> RangeZip2x8< + Idx, + IR0::IntoIter, + R0, + IR1::IntoIter, + R1, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, + IO4::IntoIter, + O4, + IO5::IntoIter, + O5, + IO6::IntoIter, + O6, + IO7::IntoIter, + O7, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IR1: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, + IO4: IntoIterator, + IO5: IntoIterator, + IO6: IntoIterator, + IO7: IntoIterator, +{ + RangeZip2x8 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + o4: o4.into_iter().peekable(), + o5: o5.into_iter().peekable(), + o6: o6.into_iter().peekable(), + o7: o7.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + o4_data_latest: None, + o5_data_latest: None, + o6_data_latest: None, + o7_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_2x8`] for more information. +pub struct RangeZip2x8< + Idx, + IR0, + R0, + IR1, + R1, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, +> where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, + IO7: Iterator, +{ + r0: IR0, + r1: IR1, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + o4: Peekable, + o5: Peekable, + o6: Peekable, + o7: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, + o4_data_latest: Option, + o5_data_latest: Option, + o6_data_latest: Option, + o7_data_latest: Option, +} + +impl< + Idx, + IR0, + R0, + IR1, + R1, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, + > Iterator + for RangeZip2x8< + Idx, + IR0, + R0, + IR1, + R1, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, + > +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, + IO7: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, + O4: Clone, + O5: Clone, + O6: Clone, + O7: Clone, +{ + type Item = ( + Idx, + R0, + R1, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + ); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + r1, + o0, + o1, + o2, + o3, + o4, + o5, + o6, + o7, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + o4_data_latest, + o5_data_latest, + o6_data_latest, + o7_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + let (r1_index, r1_data) = r1.next()?; + + let max_index = [r0_index, r1_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + let mut o4_data = None; + while let Some((_, data)) = o4.next_if(|(index, _)| index <= &max_index) { + o4_data = Some(data); + } + let o4_data = o4_data.or(o4_data_latest.take()); + o4_data_latest.clone_from(&o4_data); + + let mut o5_data = None; + while let Some((_, data)) = o5.next_if(|(index, _)| index <= &max_index) { + o5_data = Some(data); + } + let o5_data = o5_data.or(o5_data_latest.take()); + o5_data_latest.clone_from(&o5_data); + + let mut o6_data = None; + while let Some((_, data)) = o6.next_if(|(index, _)| index <= &max_index) { + o6_data = Some(data); + } + let o6_data = o6_data.or(o6_data_latest.take()); + o6_data_latest.clone_from(&o6_data); + + let mut o7_data = None; + while let Some((_, data)) = o7.next_if(|(index, _)| index <= &max_index) { + o7_data = Some(data); + } + let o7_data = o7_data.or(o7_data_latest.take()); + o7_data_latest.clone_from(&o7_data); + + Some(( + max_index, r0_data, r1_data, o0_data, o1_data, o2_data, o3_data, o4_data, o5_data, + o6_data, o7_data, + )) + } +} + +/// Returns a new [`RangeZip2x9`] iterator. +/// +/// The number of elements in a range zip iterator corresponds to the number of elements in the +/// shortest of its required iterators (`r0`, `r1`). +/// +/// Each call to `next` is guaranteed to yield the next value for each required iterator, +/// as well as the most recent index amongst all of them. +/// +/// Optional iterators accumulate their state and yield their most recent value (if any), +/// each time the required iterators fire. +pub fn range_zip_2x9< + Idx, + IR0, + R0, + IR1, + R1, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, + IO8, + O8, +>( + r0: IR0, + r1: IR1, + o0: IO0, + o1: IO1, + o2: IO2, + o3: IO3, + o4: IO4, + o5: IO5, + o6: IO6, + o7: IO7, + o8: IO8, +) -> RangeZip2x9< + Idx, + IR0::IntoIter, + R0, + IR1::IntoIter, + R1, + IO0::IntoIter, + O0, + IO1::IntoIter, + O1, + IO2::IntoIter, + O2, + IO3::IntoIter, + O3, + IO4::IntoIter, + O4, + IO5::IntoIter, + O5, + IO6::IntoIter, + O6, + IO7::IntoIter, + O7, + IO8::IntoIter, + O8, +> +where + Idx: std::cmp::Ord, + IR0: IntoIterator, + IR1: IntoIterator, + IO0: IntoIterator, + IO1: IntoIterator, + IO2: IntoIterator, + IO3: IntoIterator, + IO4: IntoIterator, + IO5: IntoIterator, + IO6: IntoIterator, + IO7: IntoIterator, + IO8: IntoIterator, +{ + RangeZip2x9 { + r0: r0.into_iter(), + r1: r1.into_iter(), + o0: o0.into_iter().peekable(), + o1: o1.into_iter().peekable(), + o2: o2.into_iter().peekable(), + o3: o3.into_iter().peekable(), + o4: o4.into_iter().peekable(), + o5: o5.into_iter().peekable(), + o6: o6.into_iter().peekable(), + o7: o7.into_iter().peekable(), + o8: o8.into_iter().peekable(), + + o0_data_latest: None, + o1_data_latest: None, + o2_data_latest: None, + o3_data_latest: None, + o4_data_latest: None, + o5_data_latest: None, + o6_data_latest: None, + o7_data_latest: None, + o8_data_latest: None, + } +} + +/// Implements a range zip iterator combinator with 2 required iterators and 2 optional +/// iterators. +/// +/// See [`range_zip_2x9`] for more information. +pub struct RangeZip2x9< + Idx, + IR0, + R0, + IR1, + R1, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, + IO8, + O8, +> where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, + IO7: Iterator, + IO8: Iterator, +{ + r0: IR0, + r1: IR1, + o0: Peekable, + o1: Peekable, + o2: Peekable, + o3: Peekable, + o4: Peekable, + o5: Peekable, + o6: Peekable, + o7: Peekable, + o8: Peekable, + + o0_data_latest: Option, + o1_data_latest: Option, + o2_data_latest: Option, + o3_data_latest: Option, + o4_data_latest: Option, + o5_data_latest: Option, + o6_data_latest: Option, + o7_data_latest: Option, + o8_data_latest: Option, +} + +impl< + Idx, + IR0, + R0, + IR1, + R1, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, + IO8, + O8, + > Iterator + for RangeZip2x9< + Idx, + IR0, + R0, + IR1, + R1, + IO0, + O0, + IO1, + O1, + IO2, + O2, + IO3, + O3, + IO4, + O4, + IO5, + O5, + IO6, + O6, + IO7, + O7, + IO8, + O8, + > +where + Idx: std::cmp::Ord, + IR0: Iterator, + IR1: Iterator, + IO0: Iterator, + IO1: Iterator, + IO2: Iterator, + IO3: Iterator, + IO4: Iterator, + IO5: Iterator, + IO6: Iterator, + IO7: Iterator, + IO8: Iterator, + O0: Clone, + O1: Clone, + O2: Clone, + O3: Clone, + O4: Clone, + O5: Clone, + O6: Clone, + O7: Clone, + O8: Clone, +{ + type Item = ( + Idx, + R0, + R1, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + ); + + #[inline] + fn next(&mut self) -> Option { + let Self { + r0, + r1, + o0, + o1, + o2, + o3, + o4, + o5, + o6, + o7, + o8, + o0_data_latest, + o1_data_latest, + o2_data_latest, + o3_data_latest, + o4_data_latest, + o5_data_latest, + o6_data_latest, + o7_data_latest, + o8_data_latest, + } = self; + + let (r0_index, r0_data) = r0.next()?; + let (r1_index, r1_data) = r1.next()?; + + let max_index = [r0_index, r1_index].into_iter().max()?; + + let mut o0_data = None; + while let Some((_, data)) = o0.next_if(|(index, _)| index <= &max_index) { + o0_data = Some(data); + } + let o0_data = o0_data.or(o0_data_latest.take()); + o0_data_latest.clone_from(&o0_data); + + let mut o1_data = None; + while let Some((_, data)) = o1.next_if(|(index, _)| index <= &max_index) { + o1_data = Some(data); + } + let o1_data = o1_data.or(o1_data_latest.take()); + o1_data_latest.clone_from(&o1_data); + + let mut o2_data = None; + while let Some((_, data)) = o2.next_if(|(index, _)| index <= &max_index) { + o2_data = Some(data); + } + let o2_data = o2_data.or(o2_data_latest.take()); + o2_data_latest.clone_from(&o2_data); + + let mut o3_data = None; + while let Some((_, data)) = o3.next_if(|(index, _)| index <= &max_index) { + o3_data = Some(data); + } + let o3_data = o3_data.or(o3_data_latest.take()); + o3_data_latest.clone_from(&o3_data); + + let mut o4_data = None; + while let Some((_, data)) = o4.next_if(|(index, _)| index <= &max_index) { + o4_data = Some(data); + } + let o4_data = o4_data.or(o4_data_latest.take()); + o4_data_latest.clone_from(&o4_data); + + let mut o5_data = None; + while let Some((_, data)) = o5.next_if(|(index, _)| index <= &max_index) { + o5_data = Some(data); + } + let o5_data = o5_data.or(o5_data_latest.take()); + o5_data_latest.clone_from(&o5_data); + + let mut o6_data = None; + while let Some((_, data)) = o6.next_if(|(index, _)| index <= &max_index) { + o6_data = Some(data); + } + let o6_data = o6_data.or(o6_data_latest.take()); + o6_data_latest.clone_from(&o6_data); + + let mut o7_data = None; + while let Some((_, data)) = o7.next_if(|(index, _)| index <= &max_index) { + o7_data = Some(data); + } + let o7_data = o7_data.or(o7_data_latest.take()); + o7_data_latest.clone_from(&o7_data); + + let mut o8_data = None; + while let Some((_, data)) = o8.next_if(|(index, _)| index <= &max_index) { + o8_data = Some(data); + } + let o8_data = o8_data.or(o8_data_latest.take()); + o8_data_latest.clone_from(&o8_data); + + Some(( + max_index, r0_data, r1_data, o0_data, o1_data, o2_data, o3_data, o4_data, o5_data, + o6_data, o7_data, o8_data, + )) + } +} diff --git a/crates/store/re_query2/src/range_zip/mod.rs b/crates/store/re_query2/src/range_zip/mod.rs new file mode 100644 index 0000000000000..8bf59965386c0 --- /dev/null +++ b/crates/store/re_query2/src/range_zip/mod.rs @@ -0,0 +1,70 @@ +mod generated; +pub use self::generated::*; + +#[cfg(test)] +mod tests { + use itertools::Itertools as _; + + use re_chunk::RowId; + use re_log_types::TimeInt; + + use super::*; + + #[test] + fn overview_1x1() { + let t9 = TimeInt::new_temporal(9); + let t10 = TimeInt::new_temporal(10); + let t11 = TimeInt::new_temporal(11); + let t12 = TimeInt::new_temporal(12); + let t13 = TimeInt::new_temporal(13); + let t14 = TimeInt::new_temporal(14); + + let p0: Vec<((TimeInt, RowId), u32)> = vec![ + ((t9, RowId::ZERO), 90), // + // + ((t10, RowId::ZERO), 100), // + // + ((t13, RowId::ZERO.incremented_by(0)), 130), // + ((t13, RowId::ZERO.incremented_by(0)), 130), // + ((t13, RowId::ZERO.incremented_by(0)), 130), // + ((t13, RowId::ZERO.incremented_by(1)), 131), // + ((t13, RowId::ZERO.incremented_by(2)), 132), // + ((t13, RowId::ZERO.incremented_by(5)), 135), // + // + ((t14, RowId::ZERO), 140), // + ]; + + let c0: Vec<((TimeInt, RowId), &'static str)> = vec![ + ((t10, RowId::ZERO.incremented_by(1)), "101"), // + ((t10, RowId::ZERO.incremented_by(2)), "102"), // + ((t10, RowId::ZERO.incremented_by(3)), "103"), // + // + ((t11, RowId::ZERO), "110"), // + // + ((t12, RowId::ZERO), "120"), // + // + ((t13, RowId::ZERO.incremented_by(1)), "131"), // + ((t13, RowId::ZERO.incremented_by(2)), "132"), // + ((t13, RowId::ZERO.incremented_by(4)), "134"), // + ((t13, RowId::ZERO.incremented_by(6)), "136"), // + ]; + + let expected: Vec<((TimeInt, RowId), u32, Option<&'static str>)> = vec![ + ((t9, RowId::ZERO), 90, None), // + // + ((t10, RowId::ZERO), 100, None), // + // + ((t13, RowId::ZERO.incremented_by(0)), 130, Some("120")), // + ((t13, RowId::ZERO.incremented_by(0)), 130, Some("120")), // + ((t13, RowId::ZERO.incremented_by(0)), 130, Some("120")), // + ((t13, RowId::ZERO.incremented_by(1)), 131, Some("131")), // + ((t13, RowId::ZERO.incremented_by(2)), 132, Some("132")), // + ((t13, RowId::ZERO.incremented_by(5)), 135, Some("134")), // + // + ((t14, RowId::ZERO), 140, Some("136")), // + ]; + let got = range_zip_1x1(p0, c0).collect_vec(); + + similar_asserts::assert_eq!(expected, got); + } +} diff --git a/crates/store/re_query2/tests/latest_at.rs b/crates/store/re_query2/tests/latest_at.rs new file mode 100644 index 0000000000000..d8112f3c7ca2e --- /dev/null +++ b/crates/store/re_query2/tests/latest_at.rs @@ -0,0 +1,579 @@ +// https://github.com/rust-lang/rust-clippy/issues/10011 +#![cfg(test)] + +use std::sync::Arc; + +use re_chunk::RowId; +use re_chunk_store::{ + external::re_chunk::Chunk, ChunkStore, ChunkStoreSubscriber as _, LatestAtQuery, +}; +use re_log_types::{ + build_frame_nr, + example_components::{MyColor, MyPoint, MyPoints}, + EntityPath, TimeInt, TimePoint, +}; +use re_query2::Caches; +use re_types::{Archetype as _, ComponentBatch}; + +// --- + +#[test] +fn simple_query() { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let entity_path = "point"; + let timepoint = [build_frame_nr(123)]; + + let row_id1 = RowId::new(); + let points1 = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let row_id2 = RowId::new(); + let colors2 = vec![MyColor::from_rgb(255, 0, 0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batch(row_id1, timepoint, &points1) + .with_component_batch(row_id2, timepoint, &colors2) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let query = re_chunk_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); + let expected_compound_index = (TimeInt::new_temporal(123), row_id2); + let expected_points = &points1; + let expected_colors = &colors2; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); +} + +#[test] +fn static_query() { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let entity_path = "point"; + let timepoint = [build_frame_nr(123)]; + + let row_id1 = RowId::new(); + let points = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id1, timepoint, [&points as &dyn ComponentBatch]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let row_id2 = RowId::new(); + let colors = vec![MyColor::from_rgb(255, 0, 0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches( + row_id2, + TimePoint::default(), + [&colors as &dyn ComponentBatch], + ) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let query = re_chunk_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); + let expected_compound_index = (TimeInt::new_temporal(123), row_id1); + let expected_points = &points; + let expected_colors = &colors; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); +} + +#[test] +fn invalidation() { + let entity_path = "point"; + + let test_invalidation = |query: LatestAtQuery, + present_data_timepoint: TimePoint, + past_data_timepoint: TimePoint, + future_data_timepoint: TimePoint| { + let past_timestamp = past_data_timepoint + .get(&query.timeline()) + .copied() + .unwrap_or(TimeInt::STATIC); + let present_timestamp = present_data_timepoint + .get(&query.timeline()) + .copied() + .unwrap_or(TimeInt::STATIC); + + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let row_id1 = RowId::new(); + let points = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id1, present_data_timepoint.clone(), [&points as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let row_id2 = RowId::new(); + let colors = vec![MyColor::from_rgb(1, 2, 3)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id2, present_data_timepoint.clone(), [&colors as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let expected_compound_index = (present_timestamp, row_id2); + let expected_points = &points; + let expected_colors = &colors; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + + // --- Modify present --- + + // Modify the PoV component + let row_id3 = RowId::new(); + let points = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id3, present_data_timepoint.clone(), [&points as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let expected_compound_index = (present_timestamp, row_id3); + let expected_points = &points; + let expected_colors = &colors; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + + // Modify the optional component + let row_id4 = RowId::new(); + let colors = vec![MyColor::from_rgb(4, 5, 6), MyColor::from_rgb(7, 8, 9)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id4, present_data_timepoint.clone(), [&colors as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let expected_compound_index = (present_timestamp, row_id4); + let expected_points = &points; + let expected_colors = &colors; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + + // --- Modify past --- + + // Modify the PoV component + let row_id5 = RowId::new(); + let points_past = vec![MyPoint::new(100.0, 200.0), MyPoint::new(300.0, 400.0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id5, past_data_timepoint.clone(), [&points_past as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let expected_compound_index = (present_timestamp, row_id4); + let expected_points = if past_timestamp.is_static() { + &points_past + } else { + &points + }; + let expected_colors = &colors; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + + // Modify the optional component + let row_id6 = RowId::new(); + let colors_past = vec![MyColor::from_rgb(10, 11, 12), MyColor::from_rgb(13, 14, 15)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id6, past_data_timepoint.clone(), [&colors_past as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let (expected_compound_index, expected_colors) = if past_timestamp.is_static() { + ((past_timestamp, row_id6), &colors_past) + } else { + ((present_timestamp, row_id4), &colors) + }; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + + // --- Modify future --- + + // Modify the PoV component + let row_id7 = RowId::new(); + let points_future = vec![MyPoint::new(1000.0, 2000.0), MyPoint::new(3000.0, 4000.0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches( + row_id7, + future_data_timepoint.clone(), + [&points_future as _], + ) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let (expected_compound_index, expected_points) = if past_timestamp.is_static() { + ((past_timestamp, row_id6), &points_past) + } else { + ((present_timestamp, row_id4), &points) + }; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + + // Modify the optional component + let row_id8 = RowId::new(); + let colors_future = vec![MyColor::from_rgb(16, 17, 18)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches( + row_id8, + future_data_timepoint.clone(), + [&colors_future as _], + ) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let (expected_compound_index, expected_colors) = if past_timestamp.is_static() { + ((past_timestamp, row_id6), &colors_past) + } else { + ((present_timestamp, row_id4), &colors) + }; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + }; + + let static_ = TimePoint::default(); + let frame_122 = build_frame_nr(122); + let frame_123 = build_frame_nr(123); + let frame_124 = build_frame_nr(124); + + test_invalidation( + LatestAtQuery::new(frame_123.0, frame_123.1), + [frame_123].into(), + [frame_122].into(), + [frame_124].into(), + ); + + test_invalidation( + LatestAtQuery::new(frame_123.0, frame_123.1), + [frame_123].into(), + static_, + [frame_124].into(), + ); +} + +// Test the following scenario: +// ```py +// rr.log("points", rr.Points3D([1, 2, 3]), static=True) +// +// # Do first query here: LatestAt(+inf) +// # Expected: points=[[1,2,3]] colors=[] +// +// rr.set_time(2) +// rr.log_components("points", rr.components.MyColor(0xFF0000)) +// +// # Do second query here: LatestAt(+inf) +// # Expected: points=[[1,2,3]] colors=[0xFF0000] +// +// rr.set_time(3) +// rr.log_components("points", rr.components.MyColor(0x0000FF)) +// +// # Do third query here: LatestAt(+inf) +// # Expected: points=[[1,2,3]] colors=[0x0000FF] +// +// rr.set_time(3) +// rr.log_components("points", rr.components.MyColor(0x00FF00)) +// +// # Do fourth query here: LatestAt(+inf) +// # Expected: points=[[1,2,3]] colors=[0x00FF00] +// ``` +#[test] +fn invalidation_of_future_optionals() { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let entity_path = "points"; + + let static_ = TimePoint::default(); + let frame2 = [build_frame_nr(2)]; + let frame3 = [build_frame_nr(3)]; + + let query_time = [build_frame_nr(9999)]; + + let row_id1 = RowId::new(); + let points = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id1, static_, [&points as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let query = re_chunk_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); + let expected_compound_index = (TimeInt::STATIC, row_id1); + let expected_points = &points; + let expected_colors = &[]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + + let row_id2 = RowId::new(); + let colors = vec![MyColor::from_rgb(255, 0, 0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id2, frame2, [&colors as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let query = re_chunk_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); + let expected_compound_index = (TimeInt::new_temporal(2), row_id2); + let expected_points = &points; + let expected_colors = &colors; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + + let row_id3 = RowId::new(); + let colors = vec![MyColor::from_rgb(0, 0, 255)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id3, frame3, [&colors as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let query = re_chunk_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); + let expected_compound_index = (TimeInt::new_temporal(3), row_id3); + let expected_points = &points; + let expected_colors = &colors; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + + let row_id4 = RowId::new(); + let colors = vec![MyColor::from_rgb(0, 255, 0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id4, frame3, [&colors as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let query = re_chunk_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); + let expected_compound_index = (TimeInt::new_temporal(3), row_id4); + let expected_points = &points; + let expected_colors = &colors; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); +} + +#[test] +fn static_invalidation() { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let entity_path = "points"; + + let timeless = TimePoint::default(); + + let query_time = [build_frame_nr(9999)]; + + let row_id1 = RowId::new(); + let points = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id1, timeless.clone(), [&points as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let query = re_chunk_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); + let expected_compound_index = (TimeInt::STATIC, row_id1); + let expected_points = &points; + let expected_colors = &[]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + + let row_id2 = RowId::new(); + let colors = vec![MyColor::from_rgb(255, 0, 0)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id2, timeless.clone(), [&colors as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let query = re_chunk_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); + let expected_compound_index = (TimeInt::STATIC, row_id2); + let expected_points = &points; + let expected_colors = &colors; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); + + let row_id3 = RowId::new(); + let colors = vec![MyColor::from_rgb(0, 0, 255)]; + let chunk = Chunk::builder(entity_path.into()) + .with_component_batches(row_id3, timeless.clone(), [&colors as _]) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let query = re_chunk_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); + let expected_compound_index = (TimeInt::STATIC, row_id3); + let expected_points = &points; + let expected_colors = &colors; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_compound_index, + expected_points, + expected_colors, + ); +} + +// --- + +fn insert_and_react(store: &mut ChunkStore, caches: &mut Caches, chunk: &Arc) { + caches.on_events(&store.insert_chunk(chunk).unwrap()); +} + +fn query_and_compare( + caches: &Caches, + store: &ChunkStore, + query: &LatestAtQuery, + entity_path: &EntityPath, + expected_compound_index: (TimeInt, RowId), + expected_points: &[MyPoint], + expected_colors: &[MyColor], +) { + re_log::setup_logging(); + + for _ in 0..3 { + let cached = caches.latest_at( + store, + query, + entity_path, + MyPoints::all_components().iter().copied(), + ); + + let cached_points = cached.component_batch::().unwrap(); + let cached_colors = cached.component_batch::().unwrap_or_default(); + + eprintln!("{store}"); + eprintln!("{query:?}"); + // eprintln!("{}", store.to_data_table().unwrap()); + + similar_asserts::assert_eq!(expected_compound_index, cached.compound_index); + similar_asserts::assert_eq!(expected_points, cached_points); + similar_asserts::assert_eq!(expected_colors, cached_colors); + } +} diff --git a/crates/store/re_query2/tests/range.rs b/crates/store/re_query2/tests/range.rs new file mode 100644 index 0000000000000..b6d86262cfac0 --- /dev/null +++ b/crates/store/re_query2/tests/range.rs @@ -0,0 +1,1080 @@ +// https://github.com/rust-lang/rust-clippy/issues/10011 +#![cfg(test)] + +use std::sync::Arc; + +use itertools::Itertools as _; + +use re_chunk::{RowId, Timeline}; +use re_chunk_store::{ + external::re_chunk::Chunk, ChunkStore, ChunkStoreSubscriber as _, RangeQuery, + ResolvedTimeRange, TimeInt, +}; +use re_log_types::{ + build_frame_nr, + example_components::{MyColor, MyPoint, MyPoints}, + EntityPath, TimePoint, +}; +use re_query2::Caches; +use re_types::Archetype; +use re_types_core::Loggable as _; + +// --- + +#[test] +fn simple_range() -> anyhow::Result<()> { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let entity_path: EntityPath = "point".into(); + + let timepoint1 = [build_frame_nr(123)]; + let row_id1_1 = RowId::new(); + let points1_1 = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let row_id1_2 = RowId::new(); + let colors1_2 = vec![MyColor::from_rgb(255, 0, 0)]; + let chunk = Chunk::builder(entity_path.clone()) + .with_component_batch(row_id1_1, timepoint1, &points1_1) + .with_component_batch(row_id1_2, timepoint1, &colors1_2) + .build()?; + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let timepoint2 = [build_frame_nr(223)]; + let row_id2 = RowId::new(); + let colors2 = vec![MyColor::from_rgb(255, 0, 0)]; + let chunk = Chunk::builder(entity_path.clone()) + .with_component_batch(row_id2, timepoint2, &colors2) + .build()?; + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let timepoint3 = [build_frame_nr(323)]; + let row_id3 = RowId::new(); + let points3 = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; + let chunk = Chunk::builder(entity_path.clone()) + .with_component_batch(row_id3, timepoint3, &points3) + .build()?; + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + // --- First test: `(timepoint1, timepoint3]` --- + + let query = RangeQuery::new( + timepoint1[0].0, + ResolvedTimeRange::new(timepoint1[0].1.as_i64() + 1, timepoint3[0].1), + ); + + let expected_points = &[ + ((TimeInt::new_temporal(323), row_id3), points3.as_slice()), // + ]; + let expected_colors = &[ + ((TimeInt::new_temporal(223), row_id2), colors2.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path, + expected_points, + expected_colors, + ); + + // --- Second test: `[timepoint1, timepoint3]` --- + + let query = RangeQuery::new( + timepoint1[0].0, + ResolvedTimeRange::new(timepoint1[0].1, timepoint3[0].1), + ); + + let expected_points = &[ + ( + (TimeInt::new_temporal(123), row_id1_1), + points1_1.as_slice(), + ), // + ((TimeInt::new_temporal(323), row_id3), points3.as_slice()), // + ]; + let expected_colors = &[ + ( + (TimeInt::new_temporal(123), row_id1_2), + colors1_2.as_slice(), + ), // + ((TimeInt::new_temporal(223), row_id2), colors2.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path, + expected_points, + expected_colors, + ); + + Ok(()) +} + +#[test] +fn static_range() -> anyhow::Result<()> { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let entity_path: EntityPath = "point".into(); + + let timepoint1 = [build_frame_nr(123)]; + let row_id1_1 = RowId::new(); + let points1_1 = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let row_id1_2 = RowId::new(); + let colors1_2 = vec![MyColor::from_rgb(255, 0, 0)]; + let chunk = Chunk::builder(entity_path.clone()) + .with_component_batch(row_id1_1, timepoint1, &points1_1) + .with_component_batch(row_id1_2, timepoint1, &colors1_2) + .build()?; + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + // Insert statically too! + let row_id1_3 = RowId::new(); + let chunk = Chunk::builder(entity_path.clone()) + .with_component_batch(row_id1_3, TimePoint::default(), &colors1_2) + .build()?; + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let timepoint2 = [build_frame_nr(223)]; + let row_id2_1 = RowId::new(); + let colors2_1 = vec![MyColor::from_rgb(255, 0, 0)]; + let chunk = Chunk::builder(entity_path.clone()) + .with_component_batch(row_id2_1, timepoint2, &colors2_1) + .build()?; + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + // Insert statically too! + let row_id2_2 = RowId::new(); + let chunk = Chunk::builder(entity_path.clone()) + .with_component_batch(row_id2_2, TimePoint::default(), &colors2_1) + .build()?; + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + let timepoint3 = [build_frame_nr(323)]; + // Create some Positions with implicit instances + let row_id3 = RowId::new(); + let points3 = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; + let chunk = Chunk::builder(entity_path.clone()) + .with_component_batch(row_id3, timepoint3, &points3) + .build()?; + insert_and_react(&mut store, &mut caches, &Arc::new(chunk)); + + // --- First test: `(timepoint1, timepoint3]` --- + + let query = RangeQuery::new( + timepoint1[0].0, + ResolvedTimeRange::new(timepoint1[0].1.as_i64() + 1, timepoint3[0].1), + ); + + let expected_points = &[ + ((TimeInt::new_temporal(323), row_id3), points3.as_slice()), // + ]; + let expected_colors = &[ + ((TimeInt::STATIC, row_id2_2), colors2_1.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path, + expected_points, + expected_colors, + ); + + // --- Second test: `[timepoint1, timepoint3]` --- + + // The inclusion of `timepoint1` means latest-at semantics will fall back to timeless data! + + let query = RangeQuery::new( + timepoint1[0].0, + ResolvedTimeRange::new(timepoint1[0].1, timepoint3[0].1), + ); + + let expected_points = &[ + ( + (TimeInt::new_temporal(123), row_id1_1), + points1_1.as_slice(), + ), // + ((TimeInt::new_temporal(323), row_id3), points3.as_slice()), // + ]; + let expected_colors = &[ + ((TimeInt::STATIC, row_id2_2), colors2_1.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path, + expected_points, + expected_colors, + ); + + // --- Third test: `[-inf, +inf]` --- + + let query = RangeQuery::new( + timepoint1[0].0, + ResolvedTimeRange::new(TimeInt::MIN, TimeInt::MAX), + ); + + // same expectations + query_and_compare( + &caches, + &store, + &query, + &entity_path, + expected_points, + expected_colors, + ); + + Ok(()) +} + +// Test the case where the user loads a piece of data at the end of the time range, then a piece at +// the beginning of the range, and finally a piece right in the middle. +// +// DATA = ################################################### +// | | | | \_____/ +// \______/ | | query #1 +// query #2 \_______/ +// query #3 +// +// There is no data invalidation involved, which is what makes this case tricky: the cache must +// properly keep track of the fact that there are holes in the data -- on purpose. +#[test] +fn time_back_and_forth() { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let entity_path: EntityPath = "point".into(); + + let (chunks, points): (Vec<_>, Vec<_>) = (0..10) + .map(|i| { + let timepoint = [build_frame_nr(i)]; + let points = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let chunk = Arc::new( + Chunk::builder(entity_path.clone()) + .with_component_batch(RowId::new(), timepoint, &points.clone()) + .build() + .unwrap(), + ); + + insert_and_react(&mut store, &mut caches, &chunk); + + (chunk, points) + }) + .unzip(); + + // --- Query #1: `[8, 10]` --- + + let query = RangeQuery::new( + Timeline::new_sequence("frame_nr"), + ResolvedTimeRange::new(8, 10), + ); + + let expected_points = &[ + ( + ( + TimeInt::new_temporal(8), + chunks[8].row_id_range().unwrap().0, + ), // + points[8].as_slice(), + ), // + ( + ( + TimeInt::new_temporal(9), + chunks[9].row_id_range().unwrap().0, + ), // + points[9].as_slice(), + ), // + ]; + query_and_compare(&caches, &store, &query, &entity_path, expected_points, &[]); + + // --- Query #2: `[1, 3]` --- + + let query = RangeQuery::new( + Timeline::new_sequence("frame_nr"), + ResolvedTimeRange::new(1, 3), + ); + + let expected_points = &[ + ( + ( + TimeInt::new_temporal(1), + chunks[1].row_id_range().unwrap().0, + ), // + points[1].as_slice(), + ), // + ( + ( + TimeInt::new_temporal(2), + chunks[2].row_id_range().unwrap().0, + ), // + points[2].as_slice(), + ), // + ( + ( + TimeInt::new_temporal(3), + chunks[3].row_id_range().unwrap().0, + ), // + points[3].as_slice(), + ), // + ]; + query_and_compare(&caches, &store, &query, &entity_path, expected_points, &[]); + + // --- Query #3: `[5, 7]` --- + + let query = RangeQuery::new( + Timeline::new_sequence("frame_nr"), + ResolvedTimeRange::new(5, 7), + ); + + let expected_points = &[ + ( + ( + TimeInt::new_temporal(5), + chunks[5].row_id_range().unwrap().0, + ), // + points[5].as_slice(), + ), // + ( + ( + TimeInt::new_temporal(6), + chunks[6].row_id_range().unwrap().0, + ), // + points[6].as_slice(), + ), // + ( + ( + TimeInt::new_temporal(7), + chunks[7].row_id_range().unwrap().0, + ), // + points[7].as_slice(), + ), // + ]; + query_and_compare(&caches, &store, &query, &entity_path, expected_points, &[]); +} + +#[test] +fn invalidation() { + let entity_path = "point"; + + let test_invalidation = |query: RangeQuery, + present_data_timepoint: TimePoint, + past_data_timepoint: TimePoint, + future_data_timepoint: TimePoint| { + let past_timestamp = past_data_timepoint + .get(&query.timeline()) + .copied() + .unwrap_or(TimeInt::STATIC); + let present_timestamp = present_data_timepoint + .get(&query.timeline()) + .copied() + .unwrap_or(TimeInt::STATIC); + let future_timestamp = future_data_timepoint + .get(&query.timeline()) + .copied() + .unwrap_or(TimeInt::STATIC); + + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let row_id1 = RowId::new(); + let points1 = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let chunk1 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id1, present_data_timepoint.clone(), &points1) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk1)); + + let row_id2 = RowId::new(); + let colors2 = vec![MyColor::from_rgb(1, 2, 3)]; + let chunk2 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id2, present_data_timepoint.clone(), &colors2) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk2)); + + let expected_points = &[ + ((present_timestamp, row_id1), points1.as_slice()), // + ]; + let expected_colors = &[ + ((present_timestamp, row_id2), colors2.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + + // --- Modify present --- + + // Modify the PoV component + let row_id3 = RowId::new(); + let points3 = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; + let chunk3 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id3, present_data_timepoint.clone(), &points3) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk3)); + + let expected_points = &[ + ((present_timestamp, row_id1), points1.as_slice()), // + ((present_timestamp, row_id3), points3.as_slice()), // + ]; + let expected_colors = &[ + ((present_timestamp, row_id2), colors2.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + + // Modify the optional component + let row_id4 = RowId::new(); + let colors4 = vec![MyColor::from_rgb(4, 5, 6), MyColor::from_rgb(7, 8, 9)]; + let chunk4 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id4, present_data_timepoint.clone(), &colors4) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk4)); + + let expected_points = &[ + ((present_timestamp, row_id1), points1.as_slice()), // + ((present_timestamp, row_id3), points3.as_slice()), // + ]; + let expected_colors = &[ + ((present_timestamp, row_id2), colors2.as_slice()), // + ((present_timestamp, row_id4), colors4.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + + // --- Modify past --- + + // Modify the PoV component + let points5 = vec![MyPoint::new(100.0, 200.0), MyPoint::new(300.0, 400.0)]; + let row_id5 = RowId::new(); + let chunk5 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id5, past_data_timepoint.clone(), &points5) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk5)); + + let expected_points1 = &[ + ((past_timestamp, row_id5), points5.as_slice()), // + ] as &[_]; + let expected_points2 = &[ + ((past_timestamp, row_id5), points5.as_slice()), // + ((present_timestamp, row_id1), points1.as_slice()), // + ((present_timestamp, row_id3), points3.as_slice()), // + ] as &[_]; + let expected_points = if past_data_timepoint.is_static() { + expected_points1 + } else { + expected_points2 + }; + let expected_colors = &[ + ((present_timestamp, row_id2), colors2.as_slice()), // + ((present_timestamp, row_id4), colors4.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + + // Modify the optional component + let row_id6 = RowId::new(); + let colors6 = vec![MyColor::from_rgb(10, 11, 12), MyColor::from_rgb(13, 14, 15)]; + let chunk6 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id6, past_data_timepoint.clone(), &colors6) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk6)); + + let expected_colors1 = &[ + ((past_timestamp, row_id6), colors6.as_slice()), // + ] as &[_]; + let expected_colors2 = &[ + ((past_timestamp, row_id6), colors6.as_slice()), // + ((present_timestamp, row_id2), colors2.as_slice()), // + ((present_timestamp, row_id4), colors4.as_slice()), // + ] as &[_]; + let expected_colors = if past_data_timepoint.is_static() { + expected_colors1 + } else { + expected_colors2 + }; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + + // --- Modify future --- + + // Modify the PoV component + let row_id7 = RowId::new(); + let points7 = vec![MyPoint::new(1000.0, 2000.0), MyPoint::new(3000.0, 4000.0)]; + let chunk7 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id7, future_data_timepoint.clone(), &points7) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk7)); + + let expected_points1 = &[ + ((past_timestamp, row_id5), points5.as_slice()), // + ] as &[_]; + let expected_points2 = &[ + ((past_timestamp, row_id5), points5.as_slice()), // + ((present_timestamp, row_id1), points1.as_slice()), // + ((present_timestamp, row_id3), points3.as_slice()), // + ((future_timestamp, row_id7), points7.as_slice()), // + ] as &[_]; + let expected_points = if past_data_timepoint.is_static() { + expected_points1 + } else { + expected_points2 + }; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + + // Modify the optional component + let row_id8 = RowId::new(); + let colors8 = vec![MyColor::from_rgb(16, 17, 18)]; + let chunk8 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id8, future_data_timepoint.clone(), &colors8) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk8)); + + let expected_colors1 = &[ + ((past_timestamp, row_id6), colors6.as_slice()), // + ] as &[_]; + let expected_colors2 = &[ + ((past_timestamp, row_id6), colors6.as_slice()), // + ((present_timestamp, row_id2), colors2.as_slice()), // + ((present_timestamp, row_id4), colors4.as_slice()), // + ((future_timestamp, row_id8), colors8.as_slice()), // + ] as &[_]; + let expected_colors = if past_data_timepoint.is_static() { + expected_colors1 + } else { + expected_colors2 + }; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + }; + + let timeless = TimePoint::default(); + let frame_122 = build_frame_nr(122); + let frame_123 = build_frame_nr(123); + let frame_124 = build_frame_nr(124); + + test_invalidation( + RangeQuery::new(frame_123.0, ResolvedTimeRange::EVERYTHING), + [frame_123].into(), + [frame_122].into(), + [frame_124].into(), + ); + + test_invalidation( + RangeQuery::new(frame_123.0, ResolvedTimeRange::EVERYTHING), + [frame_123].into(), + timeless, + [frame_124].into(), + ); +} + +// Test the following scenario: +// ```py +// rr.log("points", rr.Points3D([1, 2, 3]), static=True) +// +// # Do first query here: LatestAt(+inf) +// # Expected: points=[[1,2,3]] colors=[] +// +// rr.set_time(2) +// rr.log_components("points", rr.components.MyColor(0xFF0000)) +// +// # Do second query here: LatestAt(+inf) +// # Expected: points=[[1,2,3]] colors=[0xFF0000] +// +// rr.set_time(3) +// rr.log_components("points", rr.components.MyColor(0x0000FF)) +// +// # Do third query here: LatestAt(+inf) +// # Expected: points=[[1,2,3]] colors=[0x0000FF] +// +// rr.set_time(3) +// rr.log_components("points", rr.components.MyColor(0x00FF00)) +// +// # Do fourth query here: LatestAt(+inf) +// # Expected: points=[[1,2,3]] colors=[0x00FF00] +// ``` +#[test] +fn invalidation_of_future_optionals() { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let entity_path = "points"; + + let timeless = TimePoint::default(); + let frame2 = [build_frame_nr(2)]; + let frame3 = [build_frame_nr(3)]; + + let query = RangeQuery::new(frame2[0].0, ResolvedTimeRange::EVERYTHING); + + let row_id1 = RowId::new(); + let points1 = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let chunk1 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id1, timeless, &points1) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk1)); + + let expected_points = &[ + ((TimeInt::STATIC, row_id1), points1.as_slice()), // + ]; + let expected_colors = &[]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + + let row_id2 = RowId::new(); + let colors2 = vec![MyColor::from_rgb(255, 0, 0)]; + let chunk2 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id2, frame2, &colors2) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk2)); + + let expected_colors = &[ + ((TimeInt::new_temporal(2), row_id2), colors2.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + + let row_id3 = RowId::new(); + let colors3 = vec![MyColor::from_rgb(0, 0, 255)]; + let chunk3 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id3, frame3, &colors3) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk3)); + + let expected_colors = &[ + ((TimeInt::new_temporal(2), row_id2), colors2.as_slice()), // + ((TimeInt::new_temporal(3), row_id3), colors3.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + + let row_id4 = RowId::new(); + let colors4 = vec![MyColor::from_rgb(0, 255, 0)]; + let chunk4 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id4, frame3, &colors4) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk4)); + + let expected_colors = &[ + ((TimeInt::new_temporal(2), row_id2), colors2.as_slice()), // + ((TimeInt::new_temporal(3), row_id3), colors3.as_slice()), // + ((TimeInt::new_temporal(3), row_id4), colors4.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); +} + +#[test] +fn invalidation_static() { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let entity_path = "points"; + + let timeless = TimePoint::default(); + + let frame0 = [build_frame_nr(TimeInt::ZERO)]; + let query = RangeQuery::new(frame0[0].0, ResolvedTimeRange::EVERYTHING); + + let row_id1 = RowId::new(); + let points1 = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let chunk1 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id1, timeless.clone(), &points1) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk1)); + + let expected_points = &[ + ((TimeInt::STATIC, row_id1), points1.as_slice()), // + ]; + let expected_colors = &[]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + + let row_id2 = RowId::new(); + let colors2 = vec![MyColor::from_rgb(255, 0, 0)]; + let chunk2 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id2, timeless.clone(), &colors2) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk2)); + + let expected_colors = &[ + ((TimeInt::STATIC, row_id2), colors2.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); + + let row_id3 = RowId::new(); + let colors3 = vec![MyColor::from_rgb(0, 0, 255)]; + let chunk3 = Chunk::builder(entity_path.into()) + .with_component_batch(row_id3, timeless, &colors3) + .build() + .unwrap(); + insert_and_react(&mut store, &mut caches, &Arc::new(chunk3)); + + let expected_colors = &[ + ((TimeInt::STATIC, row_id3), colors3.as_slice()), // + ]; + query_and_compare( + &caches, + &store, + &query, + &entity_path.into(), + expected_points, + expected_colors, + ); +} + +// See . +#[test] +fn concurrent_multitenant_edge_case() { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let entity_path: EntityPath = "point".into(); + + let add_points = |time: i64, point_value: f32| { + let timepoint = [build_frame_nr(time)]; + let points = vec![ + MyPoint::new(point_value, point_value + 1.0), + MyPoint::new(point_value + 2.0, point_value + 3.0), + ]; + let chunk = Arc::new( + Chunk::builder(entity_path.clone()) + .with_component_batch(RowId::new(), timepoint, &points) + .build() + .unwrap(), + ); + (timepoint, points, chunk) + }; + + let (timepoint1, points1, chunk1) = add_points(123, 1.0); + insert_and_react(&mut store, &mut caches, &chunk1); + let (_timepoint2, points2, chunk2) = add_points(223, 2.0); + insert_and_react(&mut store, &mut caches, &chunk2); + let (_timepoint3, points3, chunk3) = add_points(323, 3.0); + insert_and_react(&mut store, &mut caches, &chunk3); + + // --- Tenant #1 queries the data, but doesn't cache the result in the deserialization cache --- + + let query = RangeQuery::new(timepoint1[0].0, ResolvedTimeRange::EVERYTHING); + + eprintln!("{store}"); + + { + let cached = caches.range( + &store, + &query, + &entity_path, + MyPoints::all_components().iter().copied(), + ); + + let _cached_all_points = cached.get_required(&MyPoint::name()).unwrap(); + } + + // --- Meanwhile, tenant #2 queries and deserializes the data --- + + let query = RangeQuery::new(timepoint1[0].0, ResolvedTimeRange::EVERYTHING); + + let expected_points = &[ + ( + (TimeInt::new_temporal(123), chunk1.row_id_range().unwrap().0), + points1.as_slice(), + ), // + ( + (TimeInt::new_temporal(223), chunk2.row_id_range().unwrap().0), + points2.as_slice(), + ), // + ( + (TimeInt::new_temporal(323), chunk3.row_id_range().unwrap().0), + points3.as_slice(), + ), // + ]; + query_and_compare(&caches, &store, &query, &entity_path, expected_points, &[]); +} + +// See . +#[test] +fn concurrent_multitenant_edge_case2() { + let mut store = ChunkStore::new( + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), + Default::default(), + ); + let mut caches = Caches::new(&store); + + let entity_path: EntityPath = "point".into(); + + let add_points = |time: i64, point_value: f32| { + let timepoint = [build_frame_nr(time)]; + let points = vec![ + MyPoint::new(point_value, point_value + 1.0), + MyPoint::new(point_value + 2.0, point_value + 3.0), + ]; + let chunk = Arc::new( + Chunk::builder(entity_path.clone()) + .with_component_batch(RowId::new(), timepoint, &points) + .build() + .unwrap(), + ); + (timepoint, points, chunk) + }; + + let (timepoint1, points1, chunk1) = add_points(123, 1.0); + insert_and_react(&mut store, &mut caches, &chunk1); + let (_timepoint2, points2, chunk2) = add_points(223, 2.0); + insert_and_react(&mut store, &mut caches, &chunk2); + let (_timepoint3, points3, chunk3) = add_points(323, 3.0); + insert_and_react(&mut store, &mut caches, &chunk3); + let (_timepoint4, points4, chunk4) = add_points(423, 4.0); + insert_and_react(&mut store, &mut caches, &chunk4); + let (_timepoint5, points5, chunk5) = add_points(523, 5.0); + insert_and_react(&mut store, &mut caches, &chunk5); + + // --- Tenant #1 queries the data at (123, 223), but doesn't cache the result in the deserialization cache --- + + let query1 = RangeQuery::new(timepoint1[0].0, ResolvedTimeRange::new(123, 223)); + { + let cached = caches.range( + &store, + &query1, + &entity_path, + MyPoints::all_components().iter().copied(), + ); + + let _cached_all_points = cached.get_required(&MyPoint::name()).unwrap(); + } + + // --- Tenant #2 queries the data at (423, 523), but doesn't cache the result in the deserialization cache --- + + let query2 = RangeQuery::new(timepoint1[0].0, ResolvedTimeRange::new(423, 523)); + { + let cached = caches.range( + &store, + &query2, + &entity_path, + MyPoints::all_components().iter().copied(), + ); + + let _cached_all_points = cached.get_required(&MyPoint::name()).unwrap(); + } + + // --- Tenant #2 queries the data at (223, 423) and deserializes it --- + + let query3 = RangeQuery::new(timepoint1[0].0, ResolvedTimeRange::new(223, 423)); + let expected_points = &[ + ( + (TimeInt::new_temporal(223), chunk2.row_id_range().unwrap().0), + points2.as_slice(), + ), // + ( + (TimeInt::new_temporal(323), chunk3.row_id_range().unwrap().0), + points3.as_slice(), + ), // + ( + (TimeInt::new_temporal(423), chunk4.row_id_range().unwrap().0), + points4.as_slice(), + ), // + ]; + query_and_compare(&caches, &store, &query3, &entity_path, expected_points, &[]); + + // --- Tenant #1 finally deserializes its data --- + + let expected_points = &[ + ( + (TimeInt::new_temporal(123), chunk1.row_id_range().unwrap().0), + points1.as_slice(), + ), // + ( + (TimeInt::new_temporal(223), chunk2.row_id_range().unwrap().0), + points2.as_slice(), + ), // + ]; + query_and_compare(&caches, &store, &query1, &entity_path, expected_points, &[]); + + // --- Tenant #2 finally deserializes its data --- + + let expected_points = &[ + ( + (TimeInt::new_temporal(423), chunk4.row_id_range().unwrap().0), + points4.as_slice(), + ), // + ( + (TimeInt::new_temporal(523), chunk5.row_id_range().unwrap().0), + points5.as_slice(), + ), // + ]; + query_and_compare(&caches, &store, &query2, &entity_path, expected_points, &[]); +} + +// // --- + +fn insert_and_react(store: &mut ChunkStore, caches: &mut Caches, chunk: &Arc) { + caches.on_events(&store.insert_chunk(chunk).unwrap()); +} + +fn query_and_compare( + caches: &Caches, + store: &ChunkStore, + query: &RangeQuery, + entity_path: &EntityPath, + expected_all_points_indexed: &[((TimeInt, RowId), &[MyPoint])], + expected_all_colors_indexed: &[((TimeInt, RowId), &[MyColor])], +) { + re_log::setup_logging(); + + for _ in 0..3 { + let cached = caches.range( + store, + query, + entity_path, + MyPoints::all_components().iter().copied(), + ); + + let all_points_chunks = cached.get_required(&MyPoint::name()).unwrap(); + let mut all_points_iters = all_points_chunks + .iter() + .map(|chunk| chunk.iter_component::()) + .collect_vec(); + let all_points_indexed = { + let all_points = all_points_iters.iter_mut().flat_map(|it| it.into_iter()); + let all_points_indices = all_points_chunks.iter().flat_map(|chunk| { + chunk.iter_component_indices(&query.timeline(), &MyPoint::name()) + }); + itertools::izip!(all_points_indices, all_points) + }; + + let all_colors_chunks = cached.get(&MyColor::name()).unwrap_or_default(); + let mut all_colors_iters = all_colors_chunks + .iter() + .map(|chunk| chunk.iter_component::()) + .collect_vec(); + let all_colors_indexed = { + let all_colors = all_colors_iters.iter_mut().flat_map(|it| it.into_iter()); + let all_colors_indices = all_colors_chunks.iter().flat_map(|chunk| { + chunk.iter_component_indices(&query.timeline(), &MyColor::name()) + }); + itertools::izip!(all_colors_indices, all_colors) + }; + + eprintln!("{query:?}"); + eprintln!("{store}"); + + similar_asserts::assert_eq!( + expected_all_points_indexed, + all_points_indexed.collect_vec(), + ); + + similar_asserts::assert_eq!( + expected_all_colors_indexed, + all_colors_indexed.collect_vec(), + ); + } +} diff --git a/crates/top/rerun/src/run.rs b/crates/top/rerun/src/run.rs index b8644448948b8..02b198eaac9f7 100644 --- a/crates/top/rerun/src/run.rs +++ b/crates/top/rerun/src/run.rs @@ -9,7 +9,7 @@ use itertools::{izip, Itertools}; use re_data_source::DataSource; use re_log_types::{LogMsg, SetStoreInfo}; -use re_sdk::log::Chunk; +use re_sdk::{log::Chunk, StoreKind}; use re_smart_channel::{ReceiveSet, Receiver, SmartMessagePayload}; #[cfg(feature = "web_viewer")] @@ -661,16 +661,32 @@ fn run_compact(path_to_input_rrd: &Path, path_to_output_rrd: &Path) -> anyhow::R let mut rrd_out = std::fs::File::create(path_to_output_rrd) .with_context(|| format!("{path_to_output_rrd:?}"))?; - let messages: Result>, _> = entity_dbs - .into_values() + let messages_rbl: Result>, _> = entity_dbs + .values() + .filter(|entity_db| entity_db.store_kind() == StoreKind::Blueprint) .map(|entity_db| entity_db.to_messages(None /* time selection */)) .collect(); - let messages = messages?; - let messages = messages.iter().flatten(); + let messages_rbl = messages_rbl?; + let messages_rbl = messages_rbl.iter().flatten(); + + let messages_rrd: Result>, _> = entity_dbs + .values() + .filter(|entity_db| entity_db.store_kind() == StoreKind::Recording) + .map(|entity_db| entity_db.to_messages(None /* time selection */)) + .collect(); + let messages_rrd = messages_rrd?; + let messages_rrd = messages_rrd.iter().flatten(); let encoding_options = re_log_encoding::EncodingOptions::COMPRESSED; - re_log_encoding::encoder::encode(version, encoding_options, messages, &mut rrd_out) - .context("Message encode")?; + re_log_encoding::encoder::encode( + version, + encoding_options, + // NOTE: We want to make sure all blueprints come first, so that the viewer can immediately + // set up the viewport correctly. + messages_rbl.chain(messages_rrd), + &mut rrd_out, + ) + .context("Message encode")?; let rrd_out_size = rrd_out.metadata().ok().map(|md| md.len()); diff --git a/crates/viewer/re_data_ui/src/annotation_context.rs b/crates/viewer/re_data_ui/src/annotation_context.rs index e05e15b470816..665d7f2ce5ecc 100644 --- a/crates/viewer/re_data_ui/src/annotation_context.rs +++ b/crates/viewer/re_data_ui/src/annotation_context.rs @@ -98,12 +98,12 @@ fn annotation_info( // TODO(#6358): this needs to use the index of the keypoint to look up the correct // class_id. For now we use `latest_at_component_quiet` to avoid the warning spam. - let class_id = ctx + let (_, class_id) = ctx .recording() .latest_at_component_quiet::(entity_path, query)?; let annotations = crate::annotations(ctx, query, entity_path); - let class = annotations.resolved_class_description(Some(class_id.value)); + let class = annotations.resolved_class_description(Some(class_id)); class.keypoint_map?.get(&keypoint_id).cloned() } diff --git a/crates/viewer/re_data_ui/src/image.rs b/crates/viewer/re_data_ui/src/image.rs index 5b4795a6ee936..4722699d67275 100644 --- a/crates/viewer/re_data_ui/src/image.rs +++ b/crates/viewer/re_data_ui/src/image.rs @@ -63,7 +63,7 @@ impl EntityDataUi for re_types::components::TensorData { let tensor_data_row_id = ctx .recording() .latest_at_component::(entity_path, query) - .map_or(RowId::ZERO, |tensor| tensor.index.1); + .map_or(RowId::ZERO, |((_time, row_id), _tensor)| row_id); let annotations = crate::annotations(ctx, query, entity_path); tensor_ui( @@ -106,10 +106,10 @@ pub fn tensor_ui( ( ctx.recording() .latest_at_component::(entity_path, query) - .map(|meter| *meter.value.0), + .map(|(_index, meter)| *meter.0), ctx.recording() .latest_at_component::(entity_path, query) - .map(|colormap| colormap.value), + .map(|(_index, colormap)| colormap), ) } else { (None, None) diff --git a/crates/viewer/re_selection_panel/src/defaults_ui.rs b/crates/viewer/re_selection_panel/src/defaults_ui.rs index 15163c250c428..5864c7f6fdf3c 100644 --- a/crates/viewer/re_selection_panel/src/defaults_ui.rs +++ b/crates/viewer/re_selection_panel/src/defaults_ui.rs @@ -114,14 +114,12 @@ fn active_default_ui( // TODO(jleibs): We're already doing this query above as part of the filter. This is kind of silly to do it again. // Change the structure to avoid this. - let component_data = db - .query_caches() - .latest_at(db.store(), query, &view.defaults_path, [component_name]) - .components - .get(&component_name) - .cloned(); /* arc */ - - if let Some(component_data) = component_data { + let component_array = { + let results = db.latest_at(query, &view.defaults_path, [component_name]); + results.component_batch_raw(&component_name) + }; + + if let Some(component_array) = component_array { let value_fn = |ui: &mut egui::Ui| { ctx.viewer_ctx.component_ui_registry.singleline_edit_ui( &query_context, @@ -129,7 +127,7 @@ fn active_default_ui( db, &view.defaults_path, component_name, - &component_data, + Some(&*component_array), visualizer.as_fallback_provider(), ); }; @@ -181,8 +179,6 @@ fn active_defaults( db: &re_entity_db::EntityDb, query: &LatestAtQuery, ) -> BTreeSet { - let resolver = Default::default(); - // Cleared components should act as unset, so we filter out everything that's empty, // even if they are listed in `all_components`. ctx.blueprint_db() @@ -191,11 +187,9 @@ fn active_defaults( .unwrap_or_default() .into_iter() .filter(|c| { - db.query_caches() + db.query_caches2() .latest_at(db.store(), query, &view.defaults_path, [*c]) - .components - .get(c) - .and_then(|data| data.resolved(&resolver).ok()) + .component_batch_raw(c) .map_or(false, |data| !data.is_empty()) }) .collect::>() @@ -316,7 +310,7 @@ fn add_popup_ui( )); } Err(err) => { - re_log::warn!("Failed to create DataRow for blueprint component: {}", err); + re_log::warn!("Failed to create Chunk for blueprint component: {}", err); } } diff --git a/crates/viewer/re_selection_panel/src/visible_time_range_ui.rs b/crates/viewer/re_selection_panel/src/visible_time_range_ui.rs index 311ffebe3a5bd..f45a75c84e22b 100644 --- a/crates/viewer/re_selection_panel/src/visible_time_range_ui.rs +++ b/crates/viewer/re_selection_panel/src/visible_time_range_ui.rs @@ -87,18 +87,16 @@ fn visible_time_range_ui( ) { use re_types::Loggable as _; - let results = ctx.blueprint_db().latest_at( - ctx.blueprint_query, - time_range_override_path, - std::iter::once(VisibleTimeRange::name()), - ); - let ranges: &[VisibleTimeRange] = results - .get(VisibleTimeRange::name()) - .and_then(|results| results.dense(ctx.blueprint_db().resolver())) + let ranges = ctx + .blueprint_db() + .latest_at( + ctx.blueprint_query, + time_range_override_path, + std::iter::once(VisibleTimeRange::name()), + ) + .component_batch::() .unwrap_or_default(); - let visible_time_ranges = re_types::blueprint::archetypes::VisibleTimeRanges { - ranges: ranges.to_vec(), - }; + let visible_time_ranges = re_types::blueprint::archetypes::VisibleTimeRanges { ranges }; let timeline_name = *ctx.rec_cfg.time_ctrl.read().timeline().name(); let mut has_individual_range = visible_time_ranges diff --git a/crates/viewer/re_space_view/Cargo.toml b/crates/viewer/re_space_view/Cargo.toml index d7eb17846ff43..99453aa42b0d9 100644 --- a/crates/viewer/re_space_view/Cargo.toml +++ b/crates/viewer/re_space_view/Cargo.toml @@ -27,6 +27,7 @@ re_entity_db.workspace = true re_log_types.workspace = true re_log.workspace = true re_query.workspace = true +re_query2.workspace = true re_tracing.workspace = true re_types_core.workspace = true re_ui.workspace = true diff --git a/crates/viewer/re_space_view/src/lib.rs b/crates/viewer/re_space_view/src/lib.rs index 73174f7e308fc..ac3015ceeb0f8 100644 --- a/crates/viewer/re_space_view/src/lib.rs +++ b/crates/viewer/re_space_view/src/lib.rs @@ -6,7 +6,9 @@ pub mod controls; mod heuristics; mod query; +mod query2; mod results_ext; +mod results_ext2; mod screenshot; mod view_property_ui; @@ -14,7 +16,16 @@ pub use heuristics::suggest_space_view_for_each_entity; pub use query::{ latest_at_with_blueprint_resolved_data, range_with_blueprint_resolved_data, DataResultQuery, }; +pub use query2::{ + latest_at_with_blueprint_resolved_data as latest_at_with_blueprint_resolved_data2, + range_with_blueprint_resolved_data as range_with_blueprint_resolved_data2, + DataResultQuery as DataResultQuery2, +}; pub use results_ext::{HybridLatestAtResults, HybridResults, RangeResultsExt}; +pub use results_ext2::{ + HybridLatestAtResults as HybridLatestAtResults2, HybridResults as HybridResults2, + RangeResultsExt as RangeResultsExt2, +}; pub use screenshot::ScreenshotMode; pub use view_property_ui::view_property_ui; diff --git a/crates/viewer/re_space_view/src/query2.rs b/crates/viewer/re_space_view/src/query2.rs new file mode 100644 index 0000000000000..72a7a2de8b478 --- /dev/null +++ b/crates/viewer/re_space_view/src/query2.rs @@ -0,0 +1,233 @@ +use nohash_hasher::IntSet; + +use re_chunk_store::{LatestAtQuery, RangeQuery, RowId}; +use re_log_types::TimeInt; +use re_query2::LatestAtResults; +use re_types_core::ComponentName; +use re_viewer_context::{DataResult, ViewContext, ViewerContext}; + +use crate::results_ext2::{HybridLatestAtResults, HybridRangeResults}; + +// --- + +/// Queries for the given `component_names` using range semantics with blueprint support. +/// +/// Data will be resolved, in order of priority: +/// - Data overrides from the blueprint +/// - Data from the recording +/// - Default data from the blueprint +/// - Fallback from the visualizer +/// - Placeholder from the component. +/// +/// Data should be accessed via the [`crate::RangeResultsExt`] trait which is implemented for +/// [`crate::HybridResults`]. +pub fn range_with_blueprint_resolved_data( + ctx: &ViewContext<'_>, + _annotations: Option<&re_viewer_context::Annotations>, + range_query: &RangeQuery, + data_result: &re_viewer_context::DataResult, + component_names: impl IntoIterator, +) -> HybridRangeResults { + re_tracing::profile_function!(data_result.entity_path.to_string()); + + let mut component_set = component_names.into_iter().collect::>(); + + let overrides = query_overrides(ctx.viewer_ctx, data_result, component_set.iter()); + + // No need to query for components that have overrides. + component_set.retain(|component| !overrides.components.contains_key(component)); + + let results = ctx.recording().query_caches2().range( + ctx.recording_store(), + range_query, + &data_result.entity_path, + component_set.iter().copied(), + ); + + // TODO(jleibs): This doesn't work when the component set contains empty results. + // This means we over-query for defaults that will never be used. + // component_set.retain(|component| !results.components.contains_key(component)); + + let defaults = ctx.viewer_ctx.blueprint_db().query_caches2().latest_at( + ctx.viewer_ctx.store_context.blueprint.store(), + ctx.viewer_ctx.blueprint_query, + ctx.defaults_path, + component_set.iter().copied(), + ); + + HybridRangeResults { + overrides, + results, + defaults, + } +} + +/// Queries for the given `component_names` using latest-at semantics with blueprint support. +/// +/// Data will be resolved, in order of priority: +/// - Data overrides from the blueprint +/// - Data from the recording +/// - Default data from the blueprint +/// - Fallback from the visualizer +/// - Placeholder from the component. +/// +/// Data should be accessed via the [`crate::RangeResultsExt`] trait which is implemented for +/// [`crate::HybridResults`]. +/// +/// If `query_shadowed_defaults` is true, all defaults will be queried, even if they are not used. +pub fn latest_at_with_blueprint_resolved_data<'a>( + ctx: &'a ViewContext<'a>, + _annotations: Option<&'a re_viewer_context::Annotations>, + latest_at_query: &LatestAtQuery, + data_result: &'a re_viewer_context::DataResult, + component_names: impl IntoIterator, + query_shadowed_defaults: bool, +) -> HybridLatestAtResults<'a> { + re_tracing::profile_function!(data_result.entity_path.to_string()); + + let mut component_set = component_names.into_iter().collect::>(); + + let overrides = query_overrides(ctx.viewer_ctx, data_result, component_set.iter()); + + // No need to query for components that have overrides unless opted in! + if !query_shadowed_defaults { + component_set.retain(|component| !overrides.components.contains_key(component)); + } + + let results = ctx.viewer_ctx.recording().query_caches2().latest_at( + ctx.viewer_ctx.recording_store(), + latest_at_query, + &data_result.entity_path, + component_set.iter().copied(), + ); + + // TODO(jleibs): This doesn't work when the component set contains empty results. + // This means we over-query for defaults that will never be used. + // component_set.retain(|component| !results.components.contains_key(component)); + + let defaults = ctx.viewer_ctx.blueprint_db().query_caches2().latest_at( + ctx.viewer_ctx.store_context.blueprint.store(), + ctx.viewer_ctx.blueprint_query, + ctx.defaults_path, + component_set.iter().copied(), + ); + + HybridLatestAtResults { + overrides, + results, + defaults, + ctx, + query: latest_at_query.clone(), + data_result, + } +} + +fn query_overrides<'a>( + ctx: &ViewerContext<'_>, + data_result: &re_viewer_context::DataResult, + component_names: impl Iterator, +) -> LatestAtResults { + // First see if any components have overrides. + let mut overrides = LatestAtResults::empty("".into(), ctx.current_query()); + + // TODO(jleibs): partitioning overrides by path + for component_name in component_names { + if let Some(override_value) = data_result + .property_overrides + .resolved_component_overrides + .get(component_name) + { + let component_override_result = match override_value.store_kind { + re_log_types::StoreKind::Recording => { + // TODO(jleibs): This probably is not right, but this code path is not used + // currently. This may want to use range_query instead depending on how + // component override data-references are resolved. + ctx.store_context.blueprint.query_caches2().latest_at( + ctx.store_context.blueprint.store(), + &ctx.current_query(), + &override_value.path, + [*component_name], + ) + } + re_log_types::StoreKind::Blueprint => { + ctx.store_context.blueprint.query_caches2().latest_at( + ctx.store_context.blueprint.store(), + ctx.blueprint_query, + &override_value.path, + [*component_name], + ) + } + }; + + // If we successfully find a non-empty override, add it to our results. + + // TODO(jleibs): it seems like value could still be null/empty if the override + // has been cleared. It seems like something is preventing that from happening + // but I don't fully understand what. + // + // This is extra tricky since the promise hasn't been resolved yet so we can't + // actually look at the data. + if let Some(value) = component_override_result.components.get(component_name) { + let index = value.index(&ctx.current_query().timeline()); + + // NOTE: This can never happen, but I'd rather it happens than an unwrap. + debug_assert!(index.is_some()); + let index = index.unwrap_or((TimeInt::STATIC, RowId::ZERO)); + + overrides.add(*component_name, index, value.clone()); + } + } + } + overrides +} + +pub trait DataResultQuery { + fn latest_at_with_blueprint_resolved_data<'a, A: re_types_core::Archetype>( + &'a self, + ctx: &'a ViewContext<'a>, + latest_at_query: &'a LatestAtQuery, + ) -> HybridLatestAtResults<'a>; + + fn best_fallback_for<'a>( + &self, + ctx: &'a ViewContext<'a>, + component: re_types_core::ComponentName, + ) -> Option<&'a dyn re_viewer_context::ComponentFallbackProvider>; +} + +impl DataResultQuery for DataResult { + fn latest_at_with_blueprint_resolved_data<'a, A: re_types_core::Archetype>( + &'a self, + ctx: &'a ViewContext<'a>, + latest_at_query: &'a LatestAtQuery, + ) -> HybridLatestAtResults<'a> { + let query_shadowed_defaults = false; + latest_at_with_blueprint_resolved_data( + ctx, + None, + latest_at_query, + self, + A::all_components().iter().copied(), + query_shadowed_defaults, + ) + } + + fn best_fallback_for<'a>( + &self, + ctx: &'a ViewContext<'a>, + component: re_types_core::ComponentName, + ) -> Option<&'a dyn re_viewer_context::ComponentFallbackProvider> { + // TODO(jleibs): This should be cached somewhere + for vis in &self.visualizers { + let Ok(vis) = ctx.visualizer_collection.get_by_identifier(*vis) else { + continue; + }; + + if vis.visualizer_query_info().queried.contains(&component) { + return Some(vis.as_fallback_provider()); + } + } + + None + } +} diff --git a/crates/viewer/re_space_view/src/results_ext2.rs b/crates/viewer/re_space_view/src/results_ext2.rs new file mode 100644 index 0000000000000..1c716d3b282a3 --- /dev/null +++ b/crates/viewer/re_space_view/src/results_ext2.rs @@ -0,0 +1,363 @@ +use std::borrow::Cow; +use std::sync::Arc; + +use re_chunk_store::{Chunk, LatestAtQuery, RangeQuery}; +use re_log_types::external::arrow2::array::Array as ArrowArray; +use re_log_types::hash::Hash64; +use re_query2::{LatestAtResults, RangeResults}; +use re_types_core::ComponentName; +use re_viewer_context::{DataResult, QueryContext, ViewContext}; + +use crate::DataResultQuery as _; + +// --- + +/// Wrapper that contains the results of a latest-at query with possible overrides. +/// +/// Although overrides are never temporal, when accessed via the [`crate::RangeResultsExt`] trait +/// they will be merged into the results appropriately. +pub struct HybridLatestAtResults<'a> { + pub overrides: LatestAtResults, + pub results: LatestAtResults, + pub defaults: LatestAtResults, + + pub ctx: &'a ViewContext<'a>, + pub query: LatestAtQuery, + pub data_result: &'a DataResult, +} + +/// Wrapper that contains the results of a range query with possible overrides. +/// +/// Although overrides are never temporal, when accessed via the [`crate::RangeResultsExt`] trait +/// they will be merged into the results appropriately. +#[derive(Debug)] +pub struct HybridRangeResults { + pub(crate) overrides: LatestAtResults, + pub(crate) results: RangeResults, + pub(crate) defaults: LatestAtResults, +} + +impl<'a> HybridLatestAtResults<'a> { + pub fn try_fallback_raw(&self, component_name: ComponentName) -> Option> { + let fallback_provider = self + .data_result + .best_fallback_for(self.ctx, component_name)?; + + let query_context = QueryContext { + viewer_ctx: self.ctx.viewer_ctx, + target_entity_path: &self.data_result.entity_path, + archetype_name: None, // TODO(jleibs): Do we need this? + query: &self.query, + view_state: self.ctx.view_state, + view_ctx: Some(self.ctx), + }; + + fallback_provider + .fallback_for(&query_context, component_name) + .ok() + } + + /// Utility for retrieving the first instance of a component, ignoring defaults. + #[inline] + pub fn get_required_mono(&self) -> Option { + self.get_required_instance(0) + } + + /// Utility for retrieving the first instance of a component. + #[inline] + pub fn get_mono(&self) -> Option { + self.get_instance(0) + } + + /// Utility for retrieving the first instance of a component. + #[inline] + pub fn get_mono_with_fallback(&self) -> C { + self.get_instance_with_fallback(0) + } + + /// Utility for retrieving a single instance of a component, not checking for defaults. + /// + /// If overrides or defaults are present, they will only be used respectively if they have a component at the specified index. + #[inline] + pub fn get_required_instance(&self, index: usize) -> Option { + self.overrides.component_instance::(index).or_else(|| + // No override -> try recording store instead + self.results.component_instance::(index)) + } + + /// Utility for retrieving a single instance of a component. + /// + /// If overrides or defaults are present, they will only be used respectively if they have a component at the specified index. + #[inline] + pub fn get_instance(&self, index: usize) -> Option { + self.get_required_instance(index).or_else(|| { + // No override & no store -> try default instead + self.defaults.component_instance::(index) + }) + } + + /// Utility for retrieving a single instance of a component. + /// + /// If overrides or defaults are present, they will only be used respectively if they have a component at the specified index. + #[inline] + pub fn get_instance_with_fallback( + &self, + index: usize, + ) -> C { + self.get_instance(index) + .or_else(|| { + // No override, no store, no default -> try fallback instead + self.try_fallback_raw(C::name()) + .and_then(|raw| C::from_arrow(raw.as_ref()).ok()) + .and_then(|r| r.first().cloned()) + }) + .unwrap_or_default() + } +} + +pub enum HybridResults<'a> { + LatestAt(LatestAtQuery, HybridLatestAtResults<'a>), + + // Boxed because of size difference between variants + Range(RangeQuery, Box), +} + +impl<'a> HybridResults<'a> { + pub fn query_result_hash(&self) -> Hash64 { + re_tracing::profile_function!(); + // TODO(andreas): We should be able to do better than this and determine hashes for queries on the fly. + + match self { + Self::LatestAt(_, r) => { + let mut indices = Vec::with_capacity( + r.defaults.components.len() + + r.overrides.components.len() + + r.results.components.len(), + ); + + indices.extend( + r.defaults + .components + .values() + .filter_map(|chunk| chunk.row_id()), + ); + indices.extend( + r.overrides + .components + .values() + .filter_map(|chunk| chunk.row_id()), + ); + indices.extend( + r.results + .components + .values() + .filter_map(|chunk| chunk.row_id()), + ); + + Hash64::hash(&indices) + } + + Self::Range(_, r) => { + let mut indices = Vec::with_capacity( + r.defaults.components.len() + + r.overrides.components.len() + + r.results.components.len(), // Don't know how many results per component. + ); + + indices.extend( + r.defaults + .components + .values() + .filter_map(|chunk| chunk.row_id()), + ); + indices.extend( + r.overrides + .components + .values() + .filter_map(|chunk| chunk.row_id()), + ); + indices.extend( + r.results + .components + .iter() + .flat_map(|(component_name, chunks)| { + chunks + .iter() + .flat_map(|chunk| chunk.component_row_ids(component_name)) + }), + ); + + Hash64::hash(&indices) + } + } + } +} + +// --- + +impl<'a> From<(LatestAtQuery, HybridLatestAtResults<'a>)> for HybridResults<'a> { + #[inline] + fn from((query, results): (LatestAtQuery, HybridLatestAtResults<'a>)) -> Self { + Self::LatestAt(query, results) + } +} + +impl<'a> From<(RangeQuery, HybridRangeResults)> for HybridResults<'a> { + #[inline] + fn from((query, results): (RangeQuery, HybridRangeResults)) -> Self { + Self::Range(query, Box::new(results)) + } +} + +/// Extension traits to abstract query result handling for all spatial space views. +/// +/// Also turns all results into range results, so that views only have to worry about the ranged +/// case. +pub trait RangeResultsExt { + /// Returns component data for the given component, ignores default data if the result + /// distinguishes them. + /// + /// For results that are aware of the blueprint, only overrides & store results will + /// be considered. + /// Defaults have no effect. + fn get_required_chunks(&self, component_name: &ComponentName) -> Option>; + + /// Returns component data for the given component or an empty array. + /// + /// For results that are aware of the blueprint, overrides, store results, and defaults will be + /// considered. + fn get_optional_chunks(&self, component_name: &ComponentName) -> Cow<'_, [Chunk]>; +} + +impl RangeResultsExt for LatestAtResults { + #[inline] + fn get_required_chunks(&self, component_name: &ComponentName) -> Option> { + self.get(component_name) + .cloned() + .map(|chunk| Cow::Owned(vec![Arc::unwrap_or_clone(chunk.into_chunk())])) + } + + #[inline] + fn get_optional_chunks(&self, component_name: &ComponentName) -> Cow<'_, [Chunk]> { + self.get(component_name).cloned().map_or_else( + || Cow::Owned(vec![]), + |chunk| Cow::Owned(vec![Arc::unwrap_or_clone(chunk.into_chunk())]), + ) + } +} + +impl RangeResultsExt for RangeResults { + #[inline] + fn get_required_chunks(&self, component_name: &ComponentName) -> Option> { + self.get_required(component_name).ok().map(Cow::Borrowed) + } + + #[inline] + fn get_optional_chunks(&self, component_name: &ComponentName) -> Cow<'_, [Chunk]> { + Cow::Borrowed(self.get(component_name).unwrap_or_default()) + } +} + +impl RangeResultsExt for HybridRangeResults { + #[inline] + fn get_required_chunks(&self, component_name: &ComponentName) -> Option> { + if self.overrides.contains(component_name) { + let unit = self.overrides.get(component_name)?; + // Because this is an override we always re-index the data as static + let chunk = Arc::unwrap_or_clone(unit.clone().into_chunk()).into_static(); + Some(Cow::Owned(vec![chunk])) + } else { + self.results.get_required_chunks(component_name) + } + } + + #[inline] + fn get_optional_chunks(&self, component_name: &ComponentName) -> Cow<'_, [Chunk]> { + if self.overrides.contains(component_name) { + let Some(unit) = self.overrides.get(component_name) else { + return Cow::Owned(Vec::new()); + }; + // Because this is an override we always re-index the data as static + let chunk = Arc::unwrap_or_clone(unit.clone().into_chunk()).into_static(); + Cow::Owned(vec![chunk]) + } else { + let chunks = self.results.get_optional_chunks(component_name); + + // If the data is not empty, return it. + + if !chunks.is_empty() { + return chunks; + } + + // Otherwise try to use the default data. + + let Some(unit) = self.defaults.get(component_name) else { + return Cow::Owned(Vec::new()); + }; + // Because this is an default from the blueprint we always re-index the data as static + let chunk = Arc::unwrap_or_clone(unit.clone().into_chunk()).into_static(); + Cow::Owned(vec![chunk]) + } + } +} + +impl<'a> RangeResultsExt for HybridLatestAtResults<'a> { + #[inline] + fn get_required_chunks(&self, component_name: &ComponentName) -> Option> { + if self.overrides.contains(component_name) { + let unit = self.overrides.get(component_name)?; + // Because this is an override we always re-index the data as static + let chunk = Arc::unwrap_or_clone(unit.clone().into_chunk()).into_static(); + Some(Cow::Owned(vec![chunk])) + } else { + self.results.get_required_chunks(component_name) + } + } + + #[inline] + fn get_optional_chunks(&self, component_name: &ComponentName) -> Cow<'_, [Chunk]> { + if self.overrides.contains(component_name) { + let Some(unit) = self.overrides.get(component_name) else { + return Cow::Owned(Vec::new()); + }; + // Because this is an override we always re-index the data as static + let chunk = Arc::unwrap_or_clone(unit.clone().into_chunk()).into_static(); + Cow::Owned(vec![chunk]) + } else { + let chunks = self.results.get_optional_chunks(component_name); + + // If the data is not empty, return it. + + if !chunks.is_empty() { + return chunks; + } + + // Otherwise try to use the default data. + + let Some(unit) = self.defaults.get(component_name) else { + return Cow::Owned(Vec::new()); + }; + // Because this is an default from the blueprint we always re-index the data as static + let chunk = Arc::unwrap_or_clone(unit.clone().into_chunk()).into_static(); + Cow::Owned(vec![chunk]) + } + } +} + +impl<'a> RangeResultsExt for HybridResults<'a> { + #[inline] + fn get_required_chunks(&self, component_name: &ComponentName) -> Option> { + match self { + Self::LatestAt(_, results) => results.get_required_chunks(component_name), + Self::Range(_, results) => results.get_required_chunks(component_name), + } + } + + #[inline] + fn get_optional_chunks(&self, component_name: &ComponentName) -> Cow<'_, [Chunk]> { + match self { + Self::LatestAt(_, results) => results.get_optional_chunks(component_name), + Self::Range(_, results) => results.get_optional_chunks(component_name), + } + } +} diff --git a/crates/viewer/re_space_view/src/view_property_ui.rs b/crates/viewer/re_space_view/src/view_property_ui.rs index b006cff291a4f..95f4141519d29 100644 --- a/crates/viewer/re_space_view/src/view_property_ui.rs +++ b/crates/viewer/re_space_view/src/view_property_ui.rs @@ -1,3 +1,4 @@ +use re_chunk_store::external::re_chunk::ArrowArray; use re_types_core::{ reflection::{ArchetypeFieldReflection, ArchetypeReflection}, Archetype, ArchetypeName, ArchetypeReflectionMarker, ComponentName, @@ -65,6 +66,7 @@ fn view_property_ui_impl( if reflection.fields.len() == 1 { let field = &reflection.fields[0]; + let component_array = component_results.component_batch_raw(&field.component_name); view_property_component_ui( &query_ctx, ui, @@ -73,7 +75,7 @@ fn view_property_ui_impl( name, field, &blueprint_path, - component_results.get_or_empty(field.component_name), + component_array.as_deref(), fallback_provider, ); } else { @@ -81,6 +83,7 @@ fn view_property_ui_impl( for field in &reflection.fields { let display_name = &field.display_name; + let component_array = component_results.component_batch_raw(&field.component_name); view_property_component_ui( &query_ctx, ui, @@ -89,7 +92,7 @@ fn view_property_ui_impl( name, field, &blueprint_path, - component_results.get_or_empty(field.component_name), + component_array.as_deref(), fallback_provider, ); } @@ -117,7 +120,7 @@ fn view_property_component_ui( archetype_name: ArchetypeName, field: &ArchetypeFieldReflection, blueprint_path: &re_log_types::EntityPath, - component_results: &re_query::LatestAtComponentResults, + component_array: Option<&dyn ArrowArray>, fallback_provider: &dyn ComponentFallbackProvider, ) { let singleline_list_item_content = singleline_list_item_content( @@ -125,7 +128,7 @@ fn view_property_component_ui( root_item_display_name, blueprint_path, component_name, - component_results, + component_array, fallback_provider, ); @@ -151,7 +154,7 @@ fn view_property_component_ui( ctx.viewer_ctx.blueprint_db(), blueprint_path, component_name, - component_results, + component_array, fallback_provider, ); }, @@ -175,12 +178,12 @@ fn menu_more( ui: &mut egui::Ui, blueprint_path: &re_log_types::EntityPath, component_name: ComponentName, - component_results: &re_query::LatestAtComponentResults, + component_array: Option<&dyn ArrowArray>, ) { - let resolver = ctx.blueprint_db().resolver(); - - let property_differs_from_default = component_results.raw(resolver, component_name) - != ctx.raw_latest_at_in_default_blueprint(blueprint_path, component_name); + let property_differs_from_default = component_array + != ctx + .raw_latest_at_in_default_blueprint(blueprint_path, component_name) + .as_deref(); let response = ui .add_enabled( @@ -201,7 +204,7 @@ If no default blueprint was set or it didn't set any value for this field, this let response = ui .add_enabled( - !component_results.is_empty(resolver), + component_array.is_none(), egui::Button::new("Unset"), ) .on_hover_text( @@ -223,7 +226,7 @@ fn singleline_list_item_content<'a>( display_name: &str, blueprint_path: &'a re_log_types::EntityPath, component_name: ComponentName, - component_results: &'a re_query::LatestAtComponentResults, + component_array: Option<&'a dyn ArrowArray>, fallback_provider: &'a dyn ComponentFallbackProvider, ) -> list_item::PropertyContent<'a> { list_item::PropertyContent::new(display_name) @@ -233,7 +236,7 @@ fn singleline_list_item_content<'a>( ui, blueprint_path, component_name, - component_results, + component_array, ); }) .value_fn(move |ui, _| { @@ -243,7 +246,7 @@ fn singleline_list_item_content<'a>( ctx.viewer_ctx.blueprint_db(), blueprint_path, component_name, - component_results, + component_array, fallback_provider, ); }) diff --git a/crates/viewer/re_space_view_dataframe/src/latest_at_table.rs b/crates/viewer/re_space_view_dataframe/src/latest_at_table.rs index 658ef78c470aa..6eccb88c94f48 100644 --- a/crates/viewer/re_space_view_dataframe/src/latest_at_table.rs +++ b/crates/viewer/re_space_view_dataframe/src/latest_at_table.rs @@ -146,6 +146,7 @@ pub(crate) fn latest_at_table_ui( for component_name in &sorted_components { row.col(|ui| { // TODO(ab, cmc): use the suitable API from re_query when it becomes available. + let result = ctx .recording_store() .latest_at_relevant_chunks( @@ -154,20 +155,22 @@ pub(crate) fn latest_at_table_ui( *component_name, ) .into_iter() - .flat_map(|chunk| { - chunk + .filter_map(|chunk| { + let (index, unit) = chunk .latest_at(&latest_at_query, *component_name) - .iter_rows(&query.timeline, component_name) - .collect::>() + .into_unit() + .and_then(|unit| { + unit.index(&query.timeline).map(|index| (index, unit)) + })?; + + unit.component_batch_raw(component_name) + .map(|array| (index, array)) }) - .max_by_key(|(data_time, row_id, _)| (*data_time, *row_id)) - .and_then(|(data_time, row_id, array)| { - array.map(|array| (data_time, row_id, array)) - }); + .max_by_key(|(index, _array)| *index); // TODO(#4466): it would be nice to display the time and row id somewhere, since we // have them. - if let Some((_time, _row_id, array)) = result { + if let Some(((_time, _row_id), array)) = result { let instance_index = instance_path.instance.get() as usize; let (data, clamped) = if instance_index >= array.len() { diff --git a/crates/viewer/re_space_view_dataframe/src/time_range_table.rs b/crates/viewer/re_space_view_dataframe/src/time_range_table.rs index 1d18cb17d22de..0a465bf73320b 100644 --- a/crates/viewer/re_space_view_dataframe/src/time_range_table.rs +++ b/crates/viewer/re_space_view_dataframe/src/time_range_table.rs @@ -102,14 +102,10 @@ pub(crate) fn time_range_table_ui( // 1) Filter out instances where `chunk.iter_indices()` returns `None`. // 2) Exploit the fact that the returned iterator (if any) is *not* bound to the // lifetime of the chunk (it has an internal Arc). - .filter_map(move |chunk| { + .map(move |chunk| { //TODO(ab, cmc): remove this line when a range-aware, iter_indices API is available. let chunk = Arc::new(chunk.range(&range_query, *component)); - - chunk - .clone() - .iter_indices(&timeline) - .map(|iter_indices| (iter_indices, chunk)) + (Arc::clone(&chunk).iter_indices_owned(&timeline), chunk) }) .flat_map(move |(indices_iter, chunk)| { map_chunk_indices_to_key_value_iter( diff --git a/crates/viewer/re_space_view_dataframe/src/utils.rs b/crates/viewer/re_space_view_dataframe/src/utils.rs index 49eeaa1f8a9d7..e622e2cb413ab 100644 --- a/crates/viewer/re_space_view_dataframe/src/utils.rs +++ b/crates/viewer/re_space_view_dataframe/src/utils.rs @@ -27,6 +27,8 @@ pub(crate) fn sorted_instance_paths_for<'a>( ) -> impl Iterator + 'a { re_tracing::profile_function!(); + // TODO(cmc): This should be using re_query. + store .all_components_on_timeline(timeline, entity_path) .unwrap_or_default() @@ -37,14 +39,17 @@ pub(crate) fn sorted_instance_paths_for<'a>( .latest_at_relevant_chunks(latest_at_query, entity_path, component_name) .into_iter() .filter_map(|chunk| { - let (data_time, row_id, batch) = chunk + let (index, unit) = chunk .latest_at(latest_at_query, component_name) - .iter_rows(timeline, &component_name) - .next()?; - batch.map(|batch| (data_time, row_id, batch)) + .into_unit() + .and_then(|unit| unit.index(timeline).map(|index| (index, unit)))?; + + unit.component_batch_raw(&component_name) + .map(|array| (index, array)) }) - .max_by_key(|(data_time, row_id, _)| (*data_time, *row_id)) - .map_or(0, |(_, _, batch)| batch.len()); + .max_by_key(|(index, _array)| *index) + .map_or(0, |(_index, array)| array.len()); + (0..num_instances).map(|i| Instance::from(i as u64)) }) .collect::>() // dedup and sort diff --git a/crates/viewer/re_space_view_spatial/src/contexts/transform_context.rs b/crates/viewer/re_space_view_spatial/src/contexts/transform_context.rs index 75ec284b0fadb..32eaf1639b8ff 100644 --- a/crates/viewer/re_space_view_spatial/src/contexts/transform_context.rs +++ b/crates/viewer/re_space_view_spatial/src/contexts/transform_context.rs @@ -346,7 +346,6 @@ fn get_parent_from_child_transform( entity_db: &EntityDb, query: &LatestAtQuery, ) -> Option { - let resolver = entity_db.resolver(); // TODO(#6743): Doesn't take into account overrides. let result = entity_db.latest_at( query, @@ -368,24 +367,24 @@ fn get_parent_from_child_transform( // Order is specified by order of components in the Transform3D archetype. // See `has_transform_expected_order` let mut transform = glam::Affine3A::IDENTITY; - if let Some(translation) = result.get_instance::(resolver, 0) { + if let Some(translation) = result.component_instance::(0) { transform *= glam::Affine3A::from(translation); } - if let Some(rotation) = result.get_instance::(resolver, 0) { + if let Some(rotation) = result.component_instance::(0) { transform *= glam::Affine3A::from(rotation); } - if let Some(rotation) = result.get_instance::(resolver, 0) { + if let Some(rotation) = result.component_instance::(0) { transform *= glam::Affine3A::from(rotation); } - if let Some(scale) = result.get_instance::(resolver, 0) { + if let Some(scale) = result.component_instance::(0) { transform *= glam::Affine3A::from(scale); } - if let Some(mat3x3) = result.get_instance::(resolver, 0) { + if let Some(mat3x3) = result.component_instance::(0) { transform *= glam::Affine3A::from(mat3x3); } let transform_relation = result - .get_instance::(resolver, 0) + .component_instance::(0) .unwrap_or_default(); if transform_relation == TransformRelation::ChildFromParent { Some(transform.inverse()) @@ -404,12 +403,12 @@ fn get_cached_pinhole( ) -> Option<(PinholeProjection, ViewCoordinates)> { entity_db .latest_at_component::(entity_path, query) - .map(|image_from_camera| { + .map(|(_index, image_from_camera)| { ( - image_from_camera.value, + image_from_camera, entity_db .latest_at_component::(entity_path, query) - .map_or(ViewCoordinates::RDF, |res| res.value), + .map_or(ViewCoordinates::RDF, |(_index, res)| res), ) }) } @@ -434,7 +433,18 @@ fn transform_at( } } - let transform3d = get_parent_from_child_transform(entity_path, entity_db, query); + // If this entity does not contain any `Transform3D`-related data at all, there's no + // point in running actual queries. + let is_potentially_transformed = + crate::transformables::Transformables::access(entity_db.store_id(), |transformables| { + transformables.is_potentially_transformed(entity_path) + }) + .unwrap_or(false); + let transform3d = is_potentially_transformed + .then(|| get_parent_from_child_transform(entity_path, entity_db, query)) + .flatten(); + + // let transform3d = get_parent_from_child_transform(entity_path, entity_db, query); let pinhole = pinhole.map(|(image_from_camera, camera_xyz)| { // Everything under a pinhole camera is a 2D projection, thus doesn't actually have a proper 3D representation. @@ -476,7 +486,7 @@ fn transform_at( let is_disconnect_space = || { entity_db .latest_at_component::(entity_path, query) - .map_or(false, |res| **res.value) + .map_or(false, |(_index, res)| **res) }; // If there is any other transform, we ignore `DisconnectedSpace`. diff --git a/crates/viewer/re_space_view_spatial/src/lib.rs b/crates/viewer/re_space_view_spatial/src/lib.rs index 7d81a4653562e..47044ff1b949d 100644 --- a/crates/viewer/re_space_view_spatial/src/lib.rs +++ b/crates/viewer/re_space_view_spatial/src/lib.rs @@ -18,6 +18,7 @@ mod proc_mesh; mod scene_bounding_boxes; mod space_camera_3d; mod spatial_topology; +mod transformables; mod ui; mod ui_2d; mod ui_3d; @@ -55,10 +56,9 @@ fn resolution_from_tensor( query: &re_chunk_store::LatestAtQuery, entity_path: &re_log_types::EntityPath, ) -> Option { - // TODO(#5607): what should happen if the promise is still pending? entity_db .latest_at_component::(entity_path, query) - .and_then(|tensor| { + .and_then(|(_index, tensor)| { tensor .image_height_width_channels() .map(|hwc| Resolution([hwc[1] as f32, hwc[0] as f32].into())) @@ -102,20 +102,21 @@ fn query_pinhole_legacy( query: &re_chunk_store::LatestAtQuery, entity_path: &re_log_types::EntityPath, ) -> Option { - // TODO(#5607): what should happen if the promise is still pending? entity_db .latest_at_component::(entity_path, query) - .map(|image_from_camera| re_types::archetypes::Pinhole { - image_from_camera: image_from_camera.value, - resolution: entity_db - .latest_at_component(entity_path, query) - .map(|c| c.value) - .or_else(|| resolution_from_tensor(entity_db, query, entity_path)), - camera_xyz: entity_db - .latest_at_component(entity_path, query) - .map(|c| c.value), - image_plane_distance: None, - }) + .map( + |(_index, image_from_camera)| re_types::archetypes::Pinhole { + image_from_camera, + resolution: entity_db + .latest_at_component(entity_path, query) + .map(|(_index, c)| c) + .or_else(|| resolution_from_tensor(entity_db, query, entity_path)), + camera_xyz: entity_db + .latest_at_component(entity_path, query) + .map(|(_index, c)| c), + image_plane_distance: None, + }, + ) } pub(crate) fn configure_background( diff --git a/crates/viewer/re_space_view_spatial/src/transformables.rs b/crates/viewer/re_space_view_spatial/src/transformables.rs new file mode 100644 index 0000000000000..6d87c264ce1d1 --- /dev/null +++ b/crates/viewer/re_space_view_spatial/src/transformables.rs @@ -0,0 +1,116 @@ +use ahash::HashMap; +use once_cell::sync::OnceCell; + +use nohash_hasher::IntSet; +use re_chunk_store::{ + ChunkStore, ChunkStoreDiffKind, ChunkStoreEvent, ChunkStoreSubscriber, + ChunkStoreSubscriberHandle, +}; +use re_log_types::{EntityPath, StoreId}; +use re_types::ComponentName; + +// --- + +/// Keeps track of which entities have had any `Transform3D`-related data on any timeline at any +/// point in time. +/// +/// This is used to optimize queries in the `TransformContext`, so that we don't unnecessarily pay +/// for the fixed overhead of all the query layers when we know for a fact that there won't be any +/// data there. +/// This is a huge performance improvement in practice, especially in recordings with many entities. +#[derive(Default)] +pub struct Transformables { + /// Which entities have had any of these components at any point in time. + entities: IntSet, +} + +impl Transformables { + /// Accesses the spatial topology for a given store. + #[inline] + pub fn access(store_id: &StoreId, f: impl FnOnce(&Self) -> T) -> Option { + ChunkStore::with_subscriber_once( + TransformablesStoreSubscriber::subscription_handle(), + move |susbcriber: &TransformablesStoreSubscriber| { + susbcriber.per_store.get(store_id).map(f) + }, + ) + .flatten() + } + + #[inline] + pub fn is_potentially_transformed(&self, entity_path: &EntityPath) -> bool { + self.entities.contains(entity_path) + } +} + +// --- + +pub struct TransformablesStoreSubscriber { + /// The components of interest. + components: IntSet, + + per_store: HashMap, +} + +impl Default for TransformablesStoreSubscriber { + #[inline] + fn default() -> Self { + use re_types::Archetype as _; + let components = re_types::archetypes::Transform3D::all_components() + .iter() + .copied() + .collect(); + + Self { + components, + per_store: Default::default(), + } + } +} + +impl TransformablesStoreSubscriber { + /// Accesses the global store subscriber. + /// + /// Lazily registers the subscriber if it hasn't been registered yet. + pub fn subscription_handle() -> ChunkStoreSubscriberHandle { + static SUBSCRIPTION: OnceCell = OnceCell::new(); + *SUBSCRIPTION.get_or_init(|| ChunkStore::register_subscriber(Box::::default())) + } +} + +impl ChunkStoreSubscriber for TransformablesStoreSubscriber { + #[inline] + fn name(&self) -> String { + "rerun.store_subscriber.Transformables".into() + } + + #[inline] + fn as_any(&self) -> &dyn std::any::Any { + self + } + + #[inline] + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { + self + } + + fn on_events(&mut self, events: &[ChunkStoreEvent]) { + re_tracing::profile_function!(); + + for event in events + .iter() + // This is only additive, don't care about removals. + .filter(|e| e.kind == ChunkStoreDiffKind::Addition) + { + let transformables = self.per_store.entry(event.store_id.clone()).or_default(); + + for component_name in event.chunk.component_names() { + if self.components.contains(&component_name) { + transformables + .entities + .insert(event.chunk.entity_path().clone()); + } + } + } + } +} diff --git a/crates/viewer/re_space_view_spatial/src/ui_3d.rs b/crates/viewer/re_space_view_spatial/src/ui_3d.rs index a4a9bb2e4279a..355645e5e3fe7 100644 --- a/crates/viewer/re_space_view_spatial/src/ui_3d.rs +++ b/crates/viewer/re_space_view_spatial/src/ui_3d.rs @@ -445,7 +445,7 @@ impl SpatialSpaceView3D { // Allow logging view-coordinates to `/` and have it apply to `/world` etc. // See https://github.com/rerun-io/rerun/issues/3538 .latest_at_component_at_closest_ancestor(query.space_origin, &ctx.current_query()) - .map(|(_, c)| c.value); + .map(|(_, _index, c)| c); let (rect, mut response) = ui.allocate_at_least(ui.available_size(), egui::Sense::click_and_drag()); diff --git a/crates/viewer/re_space_view_spatial/src/view_2d.rs b/crates/viewer/re_space_view_spatial/src/view_2d.rs index 4ae9bd70f5eed..1824e62f3fd2d 100644 --- a/crates/viewer/re_space_view_spatial/src/view_2d.rs +++ b/crates/viewer/re_space_view_spatial/src/view_2d.rs @@ -68,6 +68,7 @@ impl SpaceViewClass for SpatialSpaceView2D { ) -> Result<(), SpaceViewClassRegistryError> { // Ensure spatial topology & max image dimension is registered. crate::spatial_topology::SpatialTopologyStoreSubscriber::subscription_handle(); + crate::transformables::TransformablesStoreSubscriber::subscription_handle(); crate::max_image_dimension_subscriber::MaxImageDimensionSubscriber::subscription_handle(); register_spatial_contexts(system_registry)?; diff --git a/crates/viewer/re_space_view_spatial/src/view_3d.rs b/crates/viewer/re_space_view_spatial/src/view_3d.rs index 93d59f5b48757..80115f199de8c 100644 --- a/crates/viewer/re_space_view_spatial/src/view_3d.rs +++ b/crates/viewer/re_space_view_spatial/src/view_3d.rs @@ -74,6 +74,7 @@ impl SpaceViewClass for SpatialSpaceView3D { ) -> Result<(), SpaceViewClassRegistryError> { // Ensure spatial topology is registered. crate::spatial_topology::SpatialTopologyStoreSubscriber::subscription_handle(); + crate::transformables::TransformablesStoreSubscriber::subscription_handle(); register_spatial_contexts(system_registry)?; register_3d_spatial_visualizers(system_registry)?; @@ -367,7 +368,7 @@ impl SpaceViewClass for SpatialSpaceView3D { let scene_view_coordinates = ctx .recording() .latest_at_component::(space_origin, &ctx.current_query()) - .map(|c| c.value); + .map(|(_index, c)| c); // TODO(andreas): list_item'ify the rest ui.selection_grid("spatial_settings_ui").show(ui, |ui| { diff --git a/crates/viewer/re_space_view_spatial/src/visualizers/cameras.rs b/crates/viewer/re_space_view_spatial/src/visualizers/cameras.rs index 1075f0f3ee431..5ac201daa5dec 100644 --- a/crates/viewer/re_space_view_spatial/src/visualizers/cameras.rs +++ b/crates/viewer/re_space_view_spatial/src/visualizers/cameras.rs @@ -235,7 +235,7 @@ impl VisualizerSystem for CamerasVisualizer { // TODO(#5607): what should happen if the promise is still pending? ctx.recording() .latest_at_component::(&data_result.entity_path, &time_query) - .map(|c| c.value), + .map(|(_index, c)| c), pinhole.camera_xyz.unwrap_or(ViewCoordinates::RDF), // TODO(#2641): This should come from archetype entity_highlight, ); diff --git a/crates/viewer/re_space_view_spatial/src/visualizers/depth_images.rs b/crates/viewer/re_space_view_spatial/src/visualizers/depth_images.rs index ceae1b8e12541..e02ec82d72dbd 100644 --- a/crates/viewer/re_space_view_spatial/src/visualizers/depth_images.rs +++ b/crates/viewer/re_space_view_spatial/src/visualizers/depth_images.rs @@ -386,7 +386,7 @@ impl TypedComponentFallbackProvider for DepthImageVisualizer { let is_integer_tensor = ctx .recording() .latest_at_component::(ctx.target_entity_path, ctx.query) - .map_or(false, |tensor| tensor.dtype().is_integer()); + .map_or(false, |(_index, tensor)| tensor.dtype().is_integer()); if is_integer_tensor { 1000.0 } else { 1.0 }.into() } diff --git a/crates/viewer/re_space_view_tensor/src/visualizer_system.rs b/crates/viewer/re_space_view_tensor/src/visualizer_system.rs index a7112d13cd1dc..7889fa4f3b323 100644 --- a/crates/viewer/re_space_view_tensor/src/visualizer_system.rs +++ b/crates/viewer/re_space_view_tensor/src/visualizer_system.rs @@ -33,11 +33,11 @@ impl VisualizerSystem for TensorSystem { let timeline_query = LatestAtQuery::new(query.timeline, query.latest_at); // TODO(#5607): what should happen if the promise is still pending? - if let Some(tensor) = ctx + if let Some(((_time, row_id), tensor)) = ctx .recording() .latest_at_component::(&data_result.entity_path, &timeline_query) { - self.tensors.push((tensor.row_id(), tensor.value)); + self.tensors.push((row_id, tensor)); } } diff --git a/crates/viewer/re_space_view_time_series/Cargo.toml b/crates/viewer/re_space_view_time_series/Cargo.toml index 51a350ceabbc8..630bb76d0ed41 100644 --- a/crates/viewer/re_space_view_time_series/Cargo.toml +++ b/crates/viewer/re_space_view_time_series/Cargo.toml @@ -23,7 +23,7 @@ re_chunk_store.workspace = true re_format.workspace = true re_log.workspace = true re_log_types.workspace = true -re_query.workspace = true +re_query2.workspace = true re_renderer.workspace = true re_space_view.workspace = true re_tracing.workspace = true diff --git a/crates/viewer/re_space_view_time_series/src/line_visualizer_system.rs b/crates/viewer/re_space_view_time_series/src/line_visualizer_system.rs index 75bf86bd91e85..889dbf87cb054 100644 --- a/crates/viewer/re_space_view_time_series/src/line_visualizer_system.rs +++ b/crates/viewer/re_space_view_time_series/src/line_visualizer_system.rs @@ -1,8 +1,9 @@ use itertools::Itertools as _; -use re_query::{PromiseResult, QueryError}; -use re_space_view::range_with_blueprint_resolved_data; + +use re_space_view::range_with_blueprint_resolved_data2; use re_types::archetypes; use re_types::components::AggregationPolicy; +use re_types::external::arrow2::datatypes::DataType as ArrowDatatype; use re_types::{ archetypes::SeriesLine, components::{Color, Name, Scalar, StrokeWidth}, @@ -50,10 +51,8 @@ impl VisualizerSystem for SeriesLineSystem { ) -> Result, SpaceViewSystemExecutionError> { re_tracing::profile_function!(); - match self.load_scalars(ctx, query) { - Ok(_) | Err(QueryError::PrimaryNotFound(_)) => Ok(Vec::new()), - Err(err) => Err(err.into()), - } + self.load_scalars(ctx, query); + Ok(Vec::new()) } fn as_any(&self) -> &dyn std::any::Any { @@ -89,11 +88,7 @@ impl TypedComponentFallbackProvider for SeriesLineSystem { re_viewer_context::impl_component_fallback_provider!(SeriesLineSystem => [Color, StrokeWidth, Name]); impl SeriesLineSystem { - fn load_scalars( - &mut self, - ctx: &ViewContext<'_>, - query: &ViewQuery<'_>, - ) -> Result<(), QueryError> { + fn load_scalars(&mut self, ctx: &ViewContext<'_>, query: &ViewQuery<'_>) { re_tracing::profile_function!(); let (plot_bounds, time_per_pixel) = @@ -105,10 +100,10 @@ impl SeriesLineSystem { if parallel_loading { use rayon::prelude::*; re_tracing::profile_wait!("load_series"); - for one_series in data_results + for mut one_series in data_results .collect_vec() .par_iter() - .map(|data_result| -> Result, QueryError> { + .map(|data_result| -> Vec { let mut series = vec![]; self.load_series( ctx, @@ -117,12 +112,12 @@ impl SeriesLineSystem { time_per_pixel, data_result, &mut series, - )?; - Ok(series) + ); + series }) - .collect::>>() + .collect::>() { - self.all_series.append(&mut one_series?); + self.all_series.append(&mut one_series); } } else { let mut series = vec![]; @@ -134,12 +129,10 @@ impl SeriesLineSystem { time_per_pixel, data_result, &mut series, - )?; + ); } self.all_series = series; } - - Ok(()) } #[allow(clippy::too_many_arguments)] @@ -151,11 +144,9 @@ impl SeriesLineSystem { time_per_pixel: f64, data_result: &re_viewer_context::DataResult, all_series: &mut Vec, - ) -> Result<(), QueryError> { + ) { re_tracing::profile_function!(); - let resolver = ctx.recording().resolver(); - let current_query = ctx.current_query(); let query_ctx = ctx.query_context(data_result, ¤t_query); @@ -183,14 +174,14 @@ impl SeriesLineSystem { ctx.viewer_ctx.app_options.experimental_plot_query_clamping, ); { - use re_space_view::RangeResultsExt as _; + use re_space_view::RangeResultsExt2 as _; re_tracing::profile_scope!("primary", &data_result.entity_path.to_string()); let entity_path = &data_result.entity_path; let query = re_chunk_store::RangeQuery::new(view_query.timeline, time_range); - let results = range_with_blueprint_resolved_data( + let results = range_with_blueprint_resolved_data2( ctx, None, &query, @@ -205,144 +196,193 @@ impl SeriesLineSystem { ); // If we have no scalars, we can't do anything. - let Some(all_scalars) = results.get_required_component_dense::(resolver) else { - return Ok(()); + let Some(all_scalar_chunks) = results.get_required_chunks(&Scalar::name()) else { + return; }; - let all_scalars = all_scalars?; - - let all_scalars_entry_range = all_scalars.entry_range(); - - if !matches!( - all_scalars.status(), - (PromiseResult::Ready(()), PromiseResult::Ready(())) - ) { - // TODO(#5607): what should happen if the promise is still pending? - } + let all_scalars_indices = || { + all_scalar_chunks + .iter() + .flat_map(|chunk| { + chunk.iter_component_indices(&query.timeline(), &Scalar::name()) + }) + .map(|index| (index, ())) + }; // Allocate all points. - points = all_scalars - .range_indices(all_scalars_entry_range.clone()) - .map(|(data_time, _)| PlotPoint { - time: data_time.as_i64(), - ..default_point.clone() - }) - .collect_vec(); - - if cfg!(debug_assertions) { - for ps in points.windows(2) { - assert!( - ps[0].time <= ps[1].time, - "scalars should be sorted already when extracted from the cache, got p0 at {} and p1 at {}\n{:?}", - ps[0].time, ps[1].time, - points.iter().map(|p| p.time).collect_vec(), - ); - } + { + re_tracing::profile_scope!("alloc"); + + points = all_scalar_chunks + .iter() + .flat_map(|chunk| { + chunk.iter_component_indices(&query.timeline(), &Scalar::name()) + }) + .map(|(data_time, _)| { + debug_assert_eq!(Scalar::arrow_datatype(), ArrowDatatype::Float64); + + PlotPoint { + time: data_time.as_i64(), + ..default_point.clone() + } + }) + .collect_vec(); } // Fill in values. - for (i, scalars) in all_scalars - .range_data(all_scalars_entry_range.clone()) - .enumerate() { - if scalars.len() > 1 { - re_log::warn_once!( - "found a scalar batch in {entity_path:?} -- those have no effect" - ); - } else if scalars.is_empty() { - points[i].attrs.kind = PlotSeriesKind::Clear; - } else { - points[i].value = scalars.first().map_or(0.0, |s| *s.0); - } + re_tracing::profile_scope!("fill values"); + + debug_assert_eq!(Scalar::arrow_datatype(), ArrowDatatype::Float64); + let mut i = 0; + all_scalar_chunks + .iter() + .flat_map(|chunk| chunk.iter_primitive::(&Scalar::name())) + .for_each(|values| { + if !values.is_empty() { + if values.len() > 1 { + re_log::warn_once!( + "found a scalar batch in {entity_path:?} -- those have no effect" + ); + } + + points[i].value = values[0]; + } else { + points[i].attrs.kind = PlotSeriesKind::Clear; + } + + i += 1; + }); } // Fill in colors. - // TODO(jleibs): Handle Err values. - if let Ok(all_colors) = results.get_or_empty_dense::(resolver) { - if !matches!( - all_colors.status(), - (PromiseResult::Ready(()), PromiseResult::Ready(())) - ) { - // TODO(#5607): what should happen if the promise is still pending? + { + re_tracing::profile_scope!("fill colors"); + + debug_assert_eq!(Color::arrow_datatype(), ArrowDatatype::UInt32); + + fn map_raw_color(raw: &[u32]) -> Option { + raw.first().map(|c| { + let [a, b, g, r] = c.to_le_bytes(); + if a == 255 { + // Common-case optimization + re_renderer::Color32::from_rgb(r, g, b) + } else { + re_renderer::Color32::from_rgba_unmultiplied(r, g, b, a) + } + }) } - let all_scalars_indexed = all_scalars - .range_indices(all_scalars_entry_range.clone()) - .map(|index| (index, ())); - - let all_frames = - re_query::range_zip_1x1(all_scalars_indexed, all_colors.range_indexed()) - .enumerate(); - - for (i, (_index, _scalars, colors)) in all_frames { - if let Some(color) = colors.and_then(|colors| { - colors.first().map(|c| { - let [r, g, b, a] = c.to_array(); - if a == 255 { - // Common-case optimization - re_renderer::Color32::from_rgb(r, g, b) - } else { - re_renderer::Color32::from_rgba_unmultiplied(r, g, b, a) + if let Some(all_color_chunks) = results.get_required_chunks(&Color::name()) { + if all_color_chunks.len() == 1 && all_color_chunks[0].is_static() { + re_tracing::profile_scope!("override fast path"); + + let color = all_color_chunks[0] + .iter_primitive::(&Color::name()) + .next() + .and_then(map_raw_color); + + if let Some(color) = color { + points.iter_mut().for_each(|p| p.attrs.color = color); + } + } else { + re_tracing::profile_scope!("standard path"); + + let all_colors = all_color_chunks.iter().flat_map(|chunk| { + itertools::izip!( + chunk.iter_component_indices(&query.timeline(), &Color::name()), + chunk.iter_primitive::(&Color::name()) + ) + }); + + let all_frames = + re_query2::range_zip_1x1(all_scalars_indices(), all_colors).enumerate(); + + all_frames.for_each(|(i, (_index, _scalars, colors))| { + if let Some(color) = colors.and_then(map_raw_color) { + points[i].attrs.color = color; } - }) - }) { - points[i].attrs.color = color; + }); } } } // Fill in stroke widths - // TODO(jleibs): Handle Err values. - if let Ok(all_stroke_widths) = results.get_or_empty_dense::(resolver) { - if !matches!( - all_stroke_widths.status(), - (PromiseResult::Ready(()), PromiseResult::Ready(())) - ) { - // TODO(#5607): what should happen if the promise is still pending? - } - - let all_scalars_indexed = all_scalars - .range_indices(all_scalars_entry_range.clone()) - .map(|index| (index, ())); + { + re_tracing::profile_scope!("fill stroke widths"); - let all_frames = - re_query::range_zip_1x1(all_scalars_indexed, all_stroke_widths.range_indexed()) - .enumerate(); + debug_assert_eq!(StrokeWidth::arrow_datatype(), ArrowDatatype::Float32); - for (i, (_index, _scalars, stroke_widths)) in all_frames { - if let Some(stroke_width) = - stroke_widths.and_then(|stroke_widths| stroke_widths.first().map(|r| *r.0)) + if let Some(all_stroke_width_chunks) = + results.get_required_chunks(&StrokeWidth::name()) + { + if all_stroke_width_chunks.len() == 1 && all_stroke_width_chunks[0].is_static() { - points[i].attrs.radius_ui = 0.5 * stroke_width; + re_tracing::profile_scope!("override fast path"); + + let stroke_width = all_stroke_width_chunks[0] + .iter_primitive::(&StrokeWidth::name()) + .next() + .and_then(|stroke_widths| stroke_widths.first().copied()); + + if let Some(stroke_width) = stroke_width { + points + .iter_mut() + .for_each(|p| p.attrs.radius_ui = stroke_width * 0.5); + } + } else { + re_tracing::profile_scope!("standard path"); + + let all_stroke_widths = all_stroke_width_chunks.iter().flat_map(|chunk| { + itertools::izip!( + chunk.iter_component_indices( + &query.timeline(), + &StrokeWidth::name() + ), + chunk.iter_primitive::(&StrokeWidth::name()) + ) + }); + + let all_frames = + re_query2::range_zip_1x1(all_scalars_indices(), all_stroke_widths) + .enumerate(); + + all_frames.for_each(|(i, (_index, _scalars, stroke_widths))| { + if let Some(stroke_width) = stroke_widths + .and_then(|stroke_widths| stroke_widths.first().copied()) + { + points[i].attrs.radius_ui = stroke_width * 0.5; + } + }); } } } // Extract the series name let series_name = results - .get_or_empty_dense::(resolver) - .ok() - .and_then(|all_series_name| { - all_series_name - .range_data(all_scalars_entry_range.clone()) - .next() - .and_then(|name| name.first().cloned()) - }) + .get_optional_chunks(&Name::name()) + .iter() + .find(|chunk| !chunk.is_empty()) + .and_then(|chunk| chunk.component_mono::(0)?.ok()) .unwrap_or_else(|| self.fallback_for(&query_ctx)); // Now convert the `PlotPoints` into `Vec` let aggregator = results - .get_or_empty_dense::(resolver) - .ok() - .and_then(|result| { - result - .range_data(all_scalars_entry_range.clone()) - .next() - .and_then(|aggregator| aggregator.first().copied()) - }) + .get_optional_chunks(&AggregationPolicy::name()) + .iter() + .find(|chunk| !chunk.is_empty()) + .and_then(|chunk| chunk.component_mono::(0)?.ok()) // TODO(andreas): Relying on the default==placeholder here instead of going through a fallback provider. // This is fine, because we know there's no `TypedFallbackProvider`, but wrong if one were to be added. .unwrap_or_default(); + + // This is _almost_ sorted already: all the individual chunks are sorted, but we still + // have to deal with overlap chunks. + { + re_tracing::profile_scope!("sort"); + points.sort_by_key(|p| p.time); + } + points_to_series( &data_result.entity_path, time_per_pixel, @@ -354,7 +394,5 @@ impl SeriesLineSystem { all_series, ); } - - Ok(()) } } diff --git a/crates/viewer/re_space_view_time_series/src/point_visualizer_system.rs b/crates/viewer/re_space_view_time_series/src/point_visualizer_system.rs index 0f3031ce0a9d1..f6eb1b62de5bf 100644 --- a/crates/viewer/re_space_view_time_series/src/point_visualizer_system.rs +++ b/crates/viewer/re_space_view_time_series/src/point_visualizer_system.rs @@ -1,10 +1,10 @@ use itertools::Itertools as _; -use re_query::{PromiseResult, QueryError}; -use re_space_view::range_with_blueprint_resolved_data; +use re_space_view::range_with_blueprint_resolved_data2; use re_types::{ archetypes::{self, SeriesPoint}, components::{Color, MarkerShape, MarkerSize, Name, Scalar}, + external::arrow2::datatypes::DataType as ArrowDatatype, Archetype as _, Loggable as _, }; use re_viewer_context::{ @@ -12,11 +12,10 @@ use re_viewer_context::{ TypedComponentFallbackProvider, ViewContext, ViewQuery, VisualizerQueryInfo, VisualizerSystem, }; -use crate::util::{ - determine_plot_bounds_and_time_per_pixel, determine_time_range, points_to_series, +use crate::{ + util::{determine_plot_bounds_and_time_per_pixel, determine_time_range, points_to_series}, + ScatterAttrs, {PlotPoint, PlotPointAttrs, PlotSeries, PlotSeriesKind}, }; -use crate::ScatterAttrs; -use crate::{PlotPoint, PlotPointAttrs, PlotSeries, PlotSeriesKind}; /// The system for rendering [`SeriesPoint`] archetypes. #[derive(Default, Debug)] @@ -52,10 +51,8 @@ impl VisualizerSystem for SeriesPointSystem { ) -> Result, SpaceViewSystemExecutionError> { re_tracing::profile_function!(); - match self.load_scalars(ctx, query) { - Ok(_) | Err(QueryError::PrimaryNotFound(_)) => Ok(Vec::new()), - Err(err) => Err(err.into()), - } + self.load_scalars(ctx, query); + Ok(Vec::new()) } fn as_any(&self) -> &dyn std::any::Any { @@ -91,15 +88,9 @@ impl TypedComponentFallbackProvider for SeriesPointSystem { re_viewer_context::impl_component_fallback_provider!(SeriesPointSystem => [Color, MarkerSize, Name]); impl SeriesPointSystem { - fn load_scalars( - &mut self, - ctx: &ViewContext<'_>, - view_query: &ViewQuery<'_>, - ) -> Result<(), QueryError> { + fn load_scalars(&mut self, ctx: &ViewContext<'_>, view_query: &ViewQuery<'_>) { re_tracing::profile_function!(); - let resolver = ctx.recording().resolver(); - let (plot_bounds, time_per_pixel) = determine_plot_bounds_and_time_per_pixel(ctx.viewer_ctx, view_query); @@ -144,14 +135,14 @@ impl SeriesPointSystem { ); { - use re_space_view::RangeResultsExt as _; + use re_space_view::RangeResultsExt2 as _; re_tracing::profile_scope!("primary", &data_result.entity_path.to_string()); let entity_path = &data_result.entity_path; let query = re_chunk_store::RangeQuery::new(view_query.timeline, time_range); - let results = range_with_blueprint_resolved_data( + let results = range_with_blueprint_resolved_data2( ctx, None, &query, @@ -166,164 +157,242 @@ impl SeriesPointSystem { ); // If we have no scalars, we can't do anything. - let Some(all_scalars) = results.get_required_component_dense::(resolver) - else { - return Ok(()); + let Some(all_scalar_chunks) = results.get_required_chunks(&Scalar::name()) else { + return; }; - let all_scalars = all_scalars?; - - let all_scalars_entry_range = all_scalars.entry_range(); - - if !matches!( - all_scalars.status(), - (PromiseResult::Ready(()), PromiseResult::Ready(())) - ) { - // TODO(#5607): what should happen if the promise is still pending? - } + let all_scalars_indices = || { + all_scalar_chunks + .iter() + .flat_map(|chunk| { + chunk.iter_component_indices(&query.timeline(), &Scalar::name()) + }) + .map(|index| (index, ())) + }; // Allocate all points. - points = all_scalars - .range_indices(all_scalars_entry_range.clone()) - .map(|(data_time, _)| PlotPoint { - time: data_time.as_i64(), - ..default_point.clone() - }) - .collect_vec(); - - if cfg!(debug_assertions) { - for ps in points.windows(2) { - assert!( - ps[0].time <= ps[1].time, - "scalars should be sorted already when extracted from the cache, got p0 at {} and p1 at {}\n{:?}", - ps[0].time, ps[1].time, - points.iter().map(|p| p.time).collect_vec(), - ); - } + { + re_tracing::profile_scope!("alloc"); + + points = all_scalar_chunks + .iter() + .flat_map(|chunk| { + chunk.iter_component_indices(&query.timeline(), &Scalar::name()) + }) + .map(|(data_time, _)| { + debug_assert_eq!(Scalar::arrow_datatype(), ArrowDatatype::Float64); + + PlotPoint { + time: data_time.as_i64(), + ..default_point.clone() + } + }) + .collect_vec(); } // Fill in values. - for (i, scalars) in all_scalars - .range_data(all_scalars_entry_range.clone()) - .enumerate() { - if scalars.len() > 1 { - re_log::warn_once!( - "found a scalar batch in {entity_path:?} -- those have no effect" - ); - } else if scalars.is_empty() { - points[i].attrs.kind = PlotSeriesKind::Clear; - } else { - points[i].value = scalars.first().map_or(0.0, |s| *s.0); - } - } + re_tracing::profile_scope!("fill values"); + + debug_assert_eq!(Scalar::arrow_datatype(), ArrowDatatype::Float64); + let mut i = 0; + all_scalar_chunks + .iter() + .flat_map(|chunk| chunk.iter_primitive::(&Scalar::name())) + .for_each(|values| { + if !values.is_empty() { + if values.len() > 1 { + re_log::warn_once!( + "found a scalar batch in {entity_path:?} -- those have no effect" + ); + } - // Make it as clear as possible to the optimizer that some parameters - // go completely unused as soon as overrides have been defined. + points[i].value = values[0]; + } else { + points[i].attrs.kind = PlotSeriesKind::Clear; + } + + i += 1; + }); + } // Fill in colors. - // TODO(jleibs): Handle Err values. - if let Ok(all_colors) = results.get_or_empty_dense::(resolver) { - if !matches!( - all_colors.status(), - (PromiseResult::Ready(()), PromiseResult::Ready(())) - ) { - // TODO(#5607): what should happen if the promise is still pending? + { + re_tracing::profile_scope!("fill colors"); + + debug_assert_eq!(Color::arrow_datatype(), ArrowDatatype::UInt32); + + fn map_raw_color(raw: &[u32]) -> Option { + raw.first().map(|c| { + let [a, b, g, r] = c.to_le_bytes(); + if a == 255 { + // Common-case optimization + re_renderer::Color32::from_rgb(r, g, b) + } else { + re_renderer::Color32::from_rgba_unmultiplied(r, g, b, a) + } + }) } - let all_scalars_indexed = all_scalars - .range_indices(all_scalars_entry_range.clone()) - .map(|index| (index, ())); - - let all_frames = - re_query::range_zip_1x1(all_scalars_indexed, all_colors.range_indexed()) - .enumerate(); - - for (i, (_index, _scalars, colors)) in all_frames { - if let Some(color) = colors.and_then(|colors| { - colors.first().map(|c| { - let [r, g, b, a] = c.to_array(); - if a == 255 { - // Common-case optimization - re_renderer::Color32::from_rgb(r, g, b) - } else { - re_renderer::Color32::from_rgba_unmultiplied(r, g, b, a) + if let Some(all_color_chunks) = results.get_required_chunks(&Color::name()) { + if all_color_chunks.len() == 1 && all_color_chunks[0].is_static() { + re_tracing::profile_scope!("override fast path"); + + let color = all_color_chunks[0] + .iter_primitive::(&Color::name()) + .next() + .and_then(map_raw_color); + + if let Some(color) = color { + points.iter_mut().for_each(|p| p.attrs.color = color); + } + } else { + re_tracing::profile_scope!("standard path"); + + let all_colors = all_color_chunks.iter().flat_map(|chunk| { + itertools::izip!( + chunk.iter_component_indices(&query.timeline(), &Color::name()), + chunk.iter_primitive::(&Color::name()) + ) + }); + + let all_frames = + re_query2::range_zip_1x1(all_scalars_indices(), all_colors) + .enumerate(); + + all_frames.for_each(|(i, (_index, _scalars, colors))| { + if let Some(color) = colors.and_then(map_raw_color) { + points[i].attrs.color = color; } - }) - }) { - points[i].attrs.color = color; + }); } } } // Fill in marker sizes - // TODO(jleibs): Handle Err values. - if let Ok(all_marker_sizes) = results.get_or_empty_dense::(resolver) { - if !matches!( - all_marker_sizes.status(), - (PromiseResult::Ready(()), PromiseResult::Ready(())) - ) { - // TODO(#5607): what should happen if the promise is still pending? - } - - let all_scalars_indexed = all_scalars - .range_indices(all_scalars_entry_range.clone()) - .map(|index| (index, ())); + { + re_tracing::profile_scope!("fill marker sizes"); - let all_frames = re_query::range_zip_1x1( - all_scalars_indexed, - all_marker_sizes.range_indexed(), - ) - .enumerate(); + debug_assert_eq!(MarkerSize::arrow_datatype(), ArrowDatatype::Float32); - for (i, (_index, _scalars, marker_sizes)) in all_frames { - if let Some(marker_size) = - marker_sizes.and_then(|marker_sizes| marker_sizes.first().copied()) + if let Some(all_marker_size_chunks) = + results.get_required_chunks(&MarkerSize::name()) + { + if all_marker_size_chunks.len() == 1 + && all_marker_size_chunks[0].is_static() { - points[i].attrs.radius_ui = *marker_size.0; + re_tracing::profile_scope!("override fast path"); + + let marker_size = all_marker_size_chunks[0] + .iter_primitive::(&MarkerSize::name()) + .next() + .and_then(|marker_sizes| marker_sizes.first().copied()); + + if let Some(marker_size) = marker_size { + points + .iter_mut() + .for_each(|p| p.attrs.radius_ui = marker_size * 0.5); + } + } else { + re_tracing::profile_scope!("standard path"); + + let all_marker_sizes = + all_marker_size_chunks.iter().flat_map(|chunk| { + itertools::izip!( + chunk.iter_component_indices( + &query.timeline(), + &MarkerSize::name() + ), + chunk.iter_primitive::(&MarkerSize::name()) + ) + }); + + let all_frames = + re_query2::range_zip_1x1(all_scalars_indices(), all_marker_sizes) + .enumerate(); + + all_frames.for_each(|(i, (_index, _scalars, marker_sizes))| { + if let Some(marker_size) = marker_sizes + .and_then(|marker_sizes| marker_sizes.first().copied()) + { + points[i].attrs.radius_ui = marker_size * 0.5; + } + }); } } } - // Fill in marker sizes - // TODO(jleibs): Handle Err values. - if let Ok(all_marker_shapes) = results.get_or_empty_dense::(resolver) { - if !matches!( - all_marker_shapes.status(), - (PromiseResult::Ready(()), PromiseResult::Ready(())) - ) { - // TODO(#5607): what should happen if the promise is still pending? - } - - let all_scalars_indexed = all_scalars - .range_indices(all_scalars_entry_range.clone()) - .map(|index| (index, ())); - - let all_frames = re_query::range_zip_1x1( - all_scalars_indexed, - all_marker_shapes.range_indexed(), - ) - .enumerate(); + // Fill in marker shapes + { + re_tracing::profile_scope!("fill marker shapes"); - for (i, (_index, _scalars, marker_shapes)) in all_frames { - if let Some(marker) = - marker_shapes.and_then(|marker_shapes| marker_shapes.first().copied()) + if let Some(all_marker_shapes_chunks) = + results.get_required_chunks(&MarkerShape::name()) + { + if all_marker_shapes_chunks.len() == 1 + && all_marker_shapes_chunks[0].is_static() { - points[i].attrs.kind = PlotSeriesKind::Scatter(ScatterAttrs { marker }); + re_tracing::profile_scope!("override fast path"); + + let marker_shape = all_marker_shapes_chunks[0] + .iter_component::() + .into_iter() + .next() + .and_then(|marker_shapes| marker_shapes.first().copied()); + + if let Some(marker_shape) = marker_shape { + for p in &mut points { + p.attrs.kind = PlotSeriesKind::Scatter(ScatterAttrs { + marker: marker_shape, + }); + } + } + } else { + re_tracing::profile_scope!("standard path"); + + let mut all_marker_shapes_iters = all_marker_shapes_chunks + .iter() + .map(|chunk| chunk.iter_component::()) + .collect_vec(); + let all_marker_shapes_indexed = { + let all_marker_shapes = all_marker_shapes_iters + .iter_mut() + .flat_map(|it| it.into_iter()); + let all_marker_shapes_indices = + all_marker_shapes_chunks.iter().flat_map(|chunk| { + chunk.iter_component_indices( + &query.timeline(), + &MarkerShape::name(), + ) + }); + itertools::izip!(all_marker_shapes_indices, all_marker_shapes) + }; + + let all_frames = re_query2::range_zip_1x1( + all_scalars_indices(), + all_marker_shapes_indexed, + ) + .enumerate(); + + all_frames.for_each(|(i, (_index, _scalars, marker_shapes))| { + if let Some(marker_shape) = marker_shapes + .and_then(|marker_shapes| marker_shapes.first().copied()) + { + points[i].attrs.kind = PlotSeriesKind::Scatter(ScatterAttrs { + marker: marker_shape, + }); + } + }); } } } // Extract the series name let series_name = results - .get_or_empty_dense::(resolver) - .ok() - .and_then(|all_series_name| { - all_series_name - .range_data(all_scalars_entry_range.clone()) - .next() - .and_then(|name| name.first().cloned()) - }) + .get_optional_chunks(&Name::name()) + .iter() + .find(|chunk| !chunk.is_empty()) + .and_then(|chunk| chunk.component_mono::(0)?.ok()) .unwrap_or_else(|| self.fallback_for(&query_ctx)); // Now convert the `PlotPoints` into `Vec` @@ -340,7 +409,5 @@ impl SeriesPointSystem { ); } } - - Ok(()) } } diff --git a/crates/viewer/re_viewer/Cargo.toml b/crates/viewer/re_viewer/Cargo.toml index 57ecc176b72ab..214882dc46018 100644 --- a/crates/viewer/re_viewer/Cargo.toml +++ b/crates/viewer/re_viewer/Cargo.toml @@ -58,6 +58,7 @@ re_log_encoding = { workspace = true, features = [ re_log_types.workspace = true re_memory.workspace = true re_query.workspace = true +re_query2.workspace = true re_renderer = { workspace = true, default-features = false } re_selection_panel.workspace = true re_sdk_comms.workspace = true diff --git a/crates/viewer/re_viewer/src/app.rs b/crates/viewer/re_viewer/src/app.rs index dd105ebf22eab..6ff650605bc0f 100644 --- a/crates/viewer/re_viewer/src/app.rs +++ b/crates/viewer/re_viewer/src/app.rs @@ -740,13 +740,13 @@ impl App { #[cfg(not(target_arch = "wasm32"))] UICommand::ClearPrimaryCache => { if let Some(ctx) = store_context { - ctx.recording.query_caches().clear(); + ctx.recording.query_caches2().clear(); } } #[cfg(not(target_arch = "wasm32"))] UICommand::PrintPrimaryCache => { if let Some(ctx) = store_context { - let text = format!("{:?}", ctx.recording.query_caches()); + let text = format!("{:?}", ctx.recording.query_caches2()); egui_ctx.output_mut(|o| o.copied_text = text.clone()); println!("{text}"); } diff --git a/crates/viewer/re_viewer/src/app_blueprint.rs b/crates/viewer/re_viewer/src/app_blueprint.rs index 9068b88243149..52dc59189932a 100644 --- a/crates/viewer/re_viewer/src/app_blueprint.rs +++ b/crates/viewer/re_viewer/src/app_blueprint.rs @@ -241,5 +241,5 @@ fn load_panel_state( // TODO(#5607): what should happen if the promise is still pending? blueprint_db .latest_at_component_quiet::(path, query) - .map(|p| p.value) + .map(|(_index, p)| p) } diff --git a/crates/viewer/re_viewer/src/blueprint/validation.rs b/crates/viewer/re_viewer/src/blueprint/validation.rs index 9535846f583b9..4bf5ee55c377f 100644 --- a/crates/viewer/re_viewer/src/blueprint/validation.rs +++ b/crates/viewer/re_viewer/src/blueprint/validation.rs @@ -20,15 +20,10 @@ pub(crate) fn validate_component(blueprint: &EntityDb) -> bool { // Walk the blueprint and see if any cells fail to deserialize for this component type. let query = LatestAtQuery::latest(Timeline::default()); for path in blueprint.entity_paths() { - let results = blueprint.query_caches().latest_at( - blueprint.store(), - &query, - path, - [C::name()], - ); - if let Some(array) = results - .get(C::name()) - .and_then(|results| results.raw(blueprint.resolver(), C::name())) + if let Some(array) = blueprint + .query_caches2() + .latest_at(blueprint.store(), &query, path, [C::name()]) + .component_batch_raw(&C::name()) { if let Err(err) = C::from_arrow_opt(&*array) { re_log::debug!( diff --git a/crates/viewer/re_viewer/src/ui/memory_panel.rs b/crates/viewer/re_viewer/src/ui/memory_panel.rs index 735298cfbdc23..4f732a953f9a8 100644 --- a/crates/viewer/re_viewer/src/ui/memory_panel.rs +++ b/crates/viewer/re_viewer/src/ui/memory_panel.rs @@ -1,9 +1,7 @@ -use itertools::Itertools; - use re_chunk_store::{ChunkStoreChunkStats, ChunkStoreConfig, ChunkStoreStats}; use re_format::{format_bytes, format_uint}; use re_memory::{util::sec_since_start, MemoryHistory, MemoryLimit, MemoryUse}; -use re_query::{CachedComponentStats, CachesStats}; +use re_query2::{CacheStats, CachesStats}; use re_renderer::WgpuResourcePoolStatistics; use re_ui::UiExt as _; use re_viewer_context::store_hub::StoreHubStats; @@ -264,11 +262,6 @@ impl MemoryPanel { fn caches_stats(ui: &mut egui::Ui, caches_stats: &CachesStats) { let CachesStats { latest_at, range } = caches_stats; - let latest_at = latest_at - .iter() - .filter(|(_, stats)| stats.total_indices > 0) - .collect_vec(); - if !latest_at.is_empty() { ui.separator(); ui.strong("LatestAt"); @@ -281,34 +274,32 @@ impl MemoryPanel { .show(ui, |ui| { ui.label(egui::RichText::new("Entity").underline()); ui.label(egui::RichText::new("Component").underline()); - ui.label(egui::RichText::new("Indices").underline()); - ui.label(egui::RichText::new("Instances").underline()); - ui.label(egui::RichText::new("Size").underline()); + ui.label(egui::RichText::new("Chunks").underline()) + .on_hover_text("How many chunks in the cache?"); + ui.label(egui::RichText::new("Effective size").underline()) + .on_hover_text("What would be the size of this cache in the worst case, i.e. if all chunks had been fully copied?"); + ui.label(egui::RichText::new("Actual size").underline()) + .on_hover_text("What is the actual size of this cache after deduplication?"); ui.end_row(); for (cache_key, stats) in latest_at { - let &CachedComponentStats { - total_indices, - total_instances, - total_size_bytes, + let &CacheStats { + total_chunks, + total_effective_size_bytes, + total_actual_size_bytes, } = stats; ui.label(cache_key.entity_path.to_string()); ui.label(cache_key.component_name.to_string()); - ui.label(re_format::format_uint(total_indices)); - ui.label(re_format::format_uint(total_instances)); - ui.label(re_format::format_bytes(total_size_bytes as _)); + ui.label(re_format::format_uint(total_chunks)); + ui.label(re_format::format_bytes(total_effective_size_bytes as _)); + ui.label(re_format::format_bytes(total_actual_size_bytes as _)); ui.end_row(); } }); }); } - let range = range - .iter() - .filter(|(_, (_, stats))| stats.total_indices > 0) - .collect_vec(); - if !range.is_empty() { ui.separator(); ui.strong("Range"); @@ -321,31 +312,26 @@ impl MemoryPanel { .show(ui, |ui| { ui.label(egui::RichText::new("Entity").underline()); ui.label(egui::RichText::new("Component").underline()); - ui.label(egui::RichText::new("Indices").underline()); - ui.label(egui::RichText::new("Instances").underline()); - ui.label(egui::RichText::new("Size").underline()); - ui.label(egui::RichText::new("Time range").underline()); + ui.label(egui::RichText::new("Chunks").underline()) + .on_hover_text("How many chunks in the cache?"); + ui.label(egui::RichText::new("Effective size").underline()) + .on_hover_text("What would be the size of this cache in the worst case, i.e. if all chunks had been fully copied?"); + ui.label(egui::RichText::new("Actual size").underline()) + .on_hover_text("What is the actual size of this cache after deduplication?"); ui.end_row(); - for (cache_key, (time_range, stats)) in range { - let &CachedComponentStats { - total_indices, - total_instances, - total_size_bytes, + for (cache_key, stats) in range { + let &CacheStats { + total_chunks, + total_effective_size_bytes, + total_actual_size_bytes, } = stats; ui.label(cache_key.entity_path.to_string()); ui.label(cache_key.component_name.to_string()); - ui.label(re_format::format_uint(total_indices)); - ui.label(re_format::format_uint(total_instances)); - ui.label(re_format::format_bytes(total_size_bytes as _)); - ui.label(format!( - "{}({})", - cache_key.timeline.name(), - time_range.map_or("".to_owned(), |time_range| { - cache_key.timeline.format_time_range_utc(&time_range) - }) - )); + ui.label(re_format::format_uint(total_chunks)); + ui.label(re_format::format_bytes(total_effective_size_bytes as _)); + ui.label(re_format::format_bytes(total_actual_size_bytes as _)); ui.end_row(); } }); diff --git a/crates/viewer/re_viewer_context/Cargo.toml b/crates/viewer/re_viewer_context/Cargo.toml index aae44b9719152..87e4ccf458fcf 100644 --- a/crates/viewer/re_viewer_context/Cargo.toml +++ b/crates/viewer/re_viewer_context/Cargo.toml @@ -29,6 +29,7 @@ re_log_types.workspace = true re_log.workspace = true re_math.workspace = true re_query.workspace = true +re_query2.workspace = true re_renderer.workspace = true re_smart_channel.workspace = true re_string_interner.workspace = true diff --git a/crates/viewer/re_viewer_context/src/annotations.rs b/crates/viewer/re_viewer_context/src/annotations.rs index 28d8331347e0f..3396a7b384552 100644 --- a/crates/viewer/re_viewer_context/src/annotations.rs +++ b/crates/viewer/re_viewer_context/src/annotations.rs @@ -6,7 +6,6 @@ use nohash_hasher::IntSet; use re_chunk::RowId; use re_chunk_store::LatestAtQuery; use re_entity_db::EntityPath; -use re_query::LatestAtMonoResult; use re_types::components::AnnotationContext; use re_types::datatypes::{AnnotationInfo, ClassDescription, ClassId, KeypointId, Utf8}; @@ -249,10 +248,7 @@ impl AnnotationMap { // Otherwise check the obj_store for the field. // If we find one, insert it and then we can break. std::collections::btree_map::Entry::Vacant(entry) => { - if let Some(LatestAtMonoResult { - index: (_, row_id), - value: ann_ctx, - }) = ctx + if let Some(((_time, row_id), ann_ctx)) = ctx .recording() .latest_at_component::(&parent, time_query) { diff --git a/crates/viewer/re_viewer_context/src/blueprint_helpers.rs b/crates/viewer/re_viewer_context/src/blueprint_helpers.rs index 7a02834c1f5e2..9d3f2ea19ce76 100644 --- a/crates/viewer/re_viewer_context/src/blueprint_helpers.rs +++ b/crates/viewer/re_viewer_context/src/blueprint_helpers.rs @@ -129,10 +129,8 @@ impl ViewerContext<'_> { .and_then(|default_blueprint| { default_blueprint .latest_at(self.blueprint_query, entity_path, [component_name]) - .get(component_name) - .and_then(|default_value| { - default_value.raw(default_blueprint.resolver(), component_name) - }) + .get(&component_name) + .and_then(|default_value| default_value.component_batch_raw(&component_name)) }) } @@ -161,12 +159,10 @@ impl ViewerContext<'_> { let Some(datatype) = blueprint .latest_at(self.blueprint_query, entity_path, [component_name]) - .get(component_name) - .and_then(|result| { - result - .resolved(blueprint.resolver()) + .get(&component_name) + .and_then(|mono| { + mono.component_batch_raw(&component_name) .map(|array| array.data_type().clone()) - .ok() }) else { re_log::error!( diff --git a/crates/viewer/re_viewer_context/src/component_ui_registry.rs b/crates/viewer/re_viewer_context/src/component_ui_registry.rs index 2033b56815776..6ea41ae275af7 100644 --- a/crates/viewer/re_viewer_context/src/component_ui_registry.rs +++ b/crates/viewer/re_viewer_context/src/component_ui_registry.rs @@ -1,5 +1,6 @@ use std::collections::BTreeMap; +use re_chunk::ArrowArray; use re_chunk_store::LatestAtQuery; use re_entity_db::{external::re_query::LatestAtComponentResults, EntityDb, EntityPath}; use re_log::ResultExt; @@ -469,7 +470,7 @@ impl ComponentUiRegistry { origin_db: &EntityDb, blueprint_write_path: &EntityPath, component_name: ComponentName, - component_query_result: &LatestAtComponentResults, + component_array: Option<&dyn ArrowArray>, fallback_provider: &dyn ComponentFallbackProvider, ) { let multiline = true; @@ -479,7 +480,7 @@ impl ComponentUiRegistry { origin_db, blueprint_write_path, component_name, - component_query_result, + component_array, fallback_provider, multiline, ); @@ -498,7 +499,7 @@ impl ComponentUiRegistry { origin_db: &EntityDb, blueprint_write_path: &EntityPath, component_name: ComponentName, - component_query_result: &LatestAtComponentResults, + component_query_result: Option<&dyn ArrowArray>, fallback_provider: &dyn ComponentFallbackProvider, ) { let multiline = false; @@ -522,7 +523,7 @@ impl ComponentUiRegistry { origin_db: &EntityDb, blueprint_write_path: &EntityPath, component_name: ComponentName, - component_query_result: &LatestAtComponentResults, + component_array: Option<&dyn ArrowArray>, fallback_provider: &dyn ComponentFallbackProvider, multiline: bool, ) { @@ -534,35 +535,17 @@ impl ComponentUiRegistry { .map_err(|_err| format!("No fallback value available for {component_name}.")) }; - let component_raw_or_error = match component_query_result.resolved(origin_db.resolver()) { - re_query::PromiseResult::Pending => { - if component_query_result.num_instances() == 0 { - // This can currently also happen when there's no data at all. - create_fallback() - } else { - // In the future, we might want to show a loading indicator here, - // but right now this is always an error. - Err(format!("Promise for {component_name} is still pending.")) - } - } - re_query::PromiseResult::Ready(array) => { - if !array.is_empty() { - Ok(array) - } else { - create_fallback() + let component_raw = if let Some(array) = component_array { + array.to_boxed() + } else { + match create_fallback() { + Ok(value) => value, + Err(error_text) => { + re_log::error_once!("{error_text}"); + ui.error_label(&error_text); + return; } } - re_query::PromiseResult::Error(err) => { - Err(format!("Couldn't get {component_name}: {err}")) - } - }; - let component_raw = match component_raw_or_error { - Ok(value) => value, - Err(error_text) => { - re_log::error_once!("{error_text}"); - ui.error_label(&error_text); - return; - } }; self.edit_ui_raw( diff --git a/crates/viewer/re_viewer_context/src/item.rs b/crates/viewer/re_viewer_context/src/item.rs index 735e45fde5095..85ff07abb031c 100644 --- a/crates/viewer/re_viewer_context/src/item.rs +++ b/crates/viewer/re_viewer_context/src/item.rs @@ -190,19 +190,18 @@ pub fn resolve_mono_instance_path( }; for component_name in component_names { - let results = entity_db.query_caches().latest_at( - entity_db.store(), - query, - &instance.entity_path, - [component_name], - ); - if let Some(results) = results.get(component_name) { - if let re_query::PromiseResult::Ready(array) = - results.resolved(entity_db.resolver()) - { - if array.len() > 1 { - return instance.clone(); - } + if let Some(array) = entity_db + .query_caches2() + .latest_at( + entity_db.store(), + query, + &instance.entity_path, + [component_name], + ) + .component_batch_raw(&component_name) + { + if array.len() > 1 { + return instance.clone(); } } } diff --git a/crates/viewer/re_viewer_context/src/space_view/mod.rs b/crates/viewer/re_viewer_context/src/space_view/mod.rs index 4692b9ec1cf5c..41d1715292177 100644 --- a/crates/viewer/re_viewer_context/src/space_view/mod.rs +++ b/crates/viewer/re_viewer_context/src/space_view/mod.rs @@ -52,6 +52,9 @@ pub enum SpaceViewSystemExecutionError { #[error(transparent)] QueryError(#[from] re_query::QueryError), + #[error(transparent)] + QueryError2(#[from] re_query2::QueryError), + #[error(transparent)] DeserializationError(#[from] re_types::DeserializationError), diff --git a/crates/viewer/re_viewer_context/src/space_view/view_query.rs b/crates/viewer/re_viewer_context/src/space_view/view_query.rs index b15137a434907..1bd4d5a83a5f0 100644 --- a/crates/viewer/re_viewer_context/src/space_view/view_query.rs +++ b/crates/viewer/re_viewer_context/src/space_view/view_query.rs @@ -160,7 +160,7 @@ impl DataResult { .recording() .latest_at_component::(path, &ctx.current_query()), }) - .map(|c| c.value) + .map(|(_index, value)| value) } /// Returns from which entity path an override originates from. diff --git a/crates/viewer/re_viewer_context/src/store_hub.rs b/crates/viewer/re_viewer_context/src/store_hub.rs index 848c5805dce42..624dd4ed2a9a4 100644 --- a/crates/viewer/re_viewer_context/src/store_hub.rs +++ b/crates/viewer/re_viewer_context/src/store_hub.rs @@ -6,7 +6,7 @@ use itertools::Itertools as _; use re_chunk_store::{ChunkStoreConfig, ChunkStoreGeneration, ChunkStoreStats}; use re_entity_db::{EntityDb, StoreBundle}; use re_log_types::{ApplicationId, StoreId, StoreKind}; -use re_query::CachesStats; +use re_query2::CachesStats; use crate::StoreContext; @@ -736,7 +736,7 @@ impl StoreHub { .unwrap_or_default(); let blueprint_cached_stats = blueprint - .map(|entity_db| entity_db.query_caches().stats()) + .map(|entity_db| entity_db.query_caches2().stats()) .unwrap_or_default(); let blueprint_config = blueprint @@ -753,7 +753,7 @@ impl StoreHub { .unwrap_or_default(); let recording_cached_stats = recording - .map(|entity_db| entity_db.query_caches().stats()) + .map(|entity_db| entity_db.query_caches2().stats()) .unwrap_or_default(); let recording_config2 = recording diff --git a/crates/viewer/re_viewport_blueprint/src/container.rs b/crates/viewer/re_viewport_blueprint/src/container.rs index e2df151a59113..72956b4e6c74e 100644 --- a/crates/viewer/re_viewport_blueprint/src/container.rs +++ b/crates/viewer/re_viewport_blueprint/src/container.rs @@ -47,8 +47,7 @@ impl ContainerBlueprint { // ---- - let resolver = blueprint_db.resolver(); - let results = blueprint_db.query_caches().latest_at( + let results = blueprint_db.query_caches2().latest_at( blueprint_db.store(), query, &id.as_entity_path(), @@ -60,7 +59,7 @@ impl ContainerBlueprint { // This is a required component. Note that when loading containers we crawl the subtree and so // cleared empty container paths may exist transiently. The fact that they have an empty container_kind // is the marker that the have been cleared and not an error. - let container_kind = results.get_instance::(resolver, 0)?; + let container_kind = results.component_instance::(0)?; let blueprint_archetypes::ContainerBlueprint { container_kind, @@ -73,13 +72,13 @@ impl ContainerBlueprint { grid_columns, } = blueprint_archetypes::ContainerBlueprint { container_kind, - display_name: results.get_instance(resolver, 0), - contents: results.get_vec(resolver), - col_shares: results.get_vec(resolver), - row_shares: results.get_vec(resolver), - active_tab: results.get_instance(resolver, 0), - visible: results.get_instance(resolver, 0), - grid_columns: results.get_instance(resolver, 0), + display_name: results.component_instance(0), + contents: results.component_batch(), + col_shares: results.component_batch(), + row_shares: results.component_batch(), + active_tab: results.component_instance(0), + visible: results.component_instance(0), + grid_columns: results.component_instance(0), }; // ---- diff --git a/crates/viewer/re_viewport_blueprint/src/space_view.rs b/crates/viewer/re_viewport_blueprint/src/space_view.rs index 3014233e38714..d18be4f737d9e 100644 --- a/crates/viewer/re_viewport_blueprint/src/space_view.rs +++ b/crates/viewer/re_viewport_blueprint/src/space_view.rs @@ -132,8 +132,7 @@ impl SpaceViewBlueprint { ) -> Option { re_tracing::profile_function!(); - let resolver = blueprint_db.resolver(); - let results = blueprint_db.query_caches().latest_at( + let results = blueprint_db.query_caches2().latest_at( blueprint_db.store(), query, &id.as_entity_path(), @@ -146,7 +145,7 @@ impl SpaceViewBlueprint { // cleared empty space-views paths may exist transiently. The fact that they have an empty class_identifier // is the marker that the have been cleared and not an error. let class_identifier = - results.get_instance::(resolver, 0)?; + results.component_instance::(0)?; let blueprint_archetypes::SpaceViewBlueprint { class_identifier, @@ -155,9 +154,9 @@ impl SpaceViewBlueprint { visible, } = blueprint_archetypes::SpaceViewBlueprint { class_identifier, - display_name: results.get_instance::(resolver, 0), - space_origin: results.get_instance::(resolver, 0), - visible: results.get_instance::(resolver, 0), + display_name: results.component_instance::(0), + space_origin: results.component_instance::(0), + visible: results.component_instance::(0), }; let space_origin = space_origin.map_or_else(EntityPath::root, |origin| origin.0.into()); @@ -275,14 +274,10 @@ impl SpaceViewBlueprint { .contains(component) }) .filter_map(|component_name| { - let results = blueprint.query_caches().latest_at( - blueprint.store(), - query, - path, - [component_name], - ); - let results = results.get(component_name)?; - let array = results.raw(blueprint.resolver(), component_name); + let array = blueprint + .query_caches2() + .latest_at(blueprint.store(), query, path, [component_name]) + .component_batch_raw(&component_name); array.map(|array| (component_name, array)) }), ) diff --git a/crates/viewer/re_viewport_blueprint/src/space_view_contents.rs b/crates/viewer/re_viewport_blueprint/src/space_view_contents.rs index 8f319916493c8..b532774973cf2 100644 --- a/crates/viewer/re_viewport_blueprint/src/space_view_contents.rs +++ b/crates/viewer/re_viewport_blueprint/src/space_view_contents.rs @@ -397,13 +397,12 @@ impl DataQueryPropertyResolver<'_> { re_tracing::profile_scope!("Update visualizers from overrides"); // If the user has overridden the visualizers, update which visualizers are used. - // TODO(#5607): what should happen if the promise is still pending? if let Some(viz_override) = blueprint .latest_at_component::( &individual_override_path, blueprint_query, ) - .map(|c| c.value) + .map(|(_index, value)| value) { node.data_result.visualizers = viz_override.0.iter().map(Into::into).collect(); @@ -433,15 +432,15 @@ impl DataQueryPropertyResolver<'_> { .all_components(&recursive_override_subtree.path) .unwrap_or_default() { - let results = blueprint.query_caches().latest_at( - blueprint.store(), - blueprint_query, - &recursive_override_path, - [component_name], - ); - if let Some(component_data) = results - .get(component_name) - .and_then(|results| results.raw(blueprint.resolver(), component_name)) + if let Some(component_data) = blueprint + .query_caches2() + .latest_at( + blueprint.store(), + blueprint_query, + &recursive_override_path, + [component_name], + ) + .component_batch_raw(&component_name) { if !component_data.is_empty() { recursive_property_overrides.to_mut().insert( @@ -468,15 +467,15 @@ impl DataQueryPropertyResolver<'_> { .all_components(&individual_override_subtree.path) .unwrap_or_default() { - let results = blueprint.query_caches().latest_at( - blueprint.store(), - blueprint_query, - &individual_override_path, - [component_name], - ); - if let Some(component_data) = results - .get(component_name) - .and_then(|results| results.raw(blueprint.resolver(), component_name)) + if let Some(component_data) = blueprint + .query_caches2() + .latest_at( + blueprint.store(), + blueprint_query, + &individual_override_path, + [component_name], + ) + .component_batch_raw(&component_name) { if !component_data.is_empty() { resolved_component_overrides.insert( @@ -490,14 +489,14 @@ impl DataQueryPropertyResolver<'_> { // Figure out relevant visual time range. use re_types::Loggable as _; - let range_query_results = blueprint.latest_at( + let latest_at_results = blueprint.latest_at( blueprint_query, &recursive_override_path, std::iter::once(blueprint_components::VisibleTimeRange::name()), ); - let visible_time_ranges: Option<&[blueprint_components::VisibleTimeRange]> = - range_query_results.get_slice(blueprint.resolver()); - let time_range = visible_time_ranges.and_then(|ranges| { + let visible_time_ranges = + latest_at_results.component_batch::(); + let time_range = visible_time_ranges.as_ref().and_then(|ranges| { ranges .iter() .find(|range| range.timeline.as_str() == active_timeline.name().as_str()) diff --git a/crates/viewer/re_viewport_blueprint/src/view_properties.rs b/crates/viewer/re_viewport_blueprint/src/view_properties.rs index bb4bddd412993..3870a2384b5ec 100644 --- a/crates/viewer/re_viewport_blueprint/src/view_properties.rs +++ b/crates/viewer/re_viewport_blueprint/src/view_properties.rs @@ -1,5 +1,5 @@ use re_chunk_store::LatestAtQuery; -use re_entity_db::{external::re_query::LatestAtResults, EntityDb}; +use re_entity_db::{external::re_query2::LatestAtResults, EntityDb}; use re_log_types::EntityPath; use re_types::{ external::arrow2, Archetype, ArchetypeName, ComponentBatch, ComponentName, DeserializationError, @@ -33,7 +33,6 @@ pub struct ViewProperty<'a> { archetype_name: ArchetypeName, component_names: Vec, query_results: LatestAtResults, - blueprint_db: &'a EntityDb, blueprint_query: &'a LatestAtQuery, } @@ -74,7 +73,6 @@ impl<'a> ViewProperty<'a> { archetype_name, query_results, component_names: component_names.to_vec(), - blueprint_db, blueprint_query, } } @@ -140,8 +138,8 @@ impl<'a> ViewProperty<'a> { component_name: ComponentName, ) -> Option> { self.query_results - .get(component_name) - .and_then(|result| result.raw(self.blueprint_db.resolver(), component_name)) + .get(&component_name) + .and_then(|mono| mono.component_batch_raw(&component_name)) } fn component_or_fallback_raw( diff --git a/crates/viewer/re_viewport_blueprint/src/viewport_blueprint.rs b/crates/viewer/re_viewport_blueprint/src/viewport_blueprint.rs index 2360c0504482f..f1b3aba3a697c 100644 --- a/crates/viewer/re_viewport_blueprint/src/viewport_blueprint.rs +++ b/crates/viewer/re_viewport_blueprint/src/viewport_blueprint.rs @@ -68,8 +68,7 @@ impl ViewportBlueprint { ) -> Self { re_tracing::profile_function!(); - let resolver = blueprint_db.resolver(); - let results = blueprint_db.query_caches().latest_at( + let results = blueprint_db.query_caches2().latest_at( blueprint_db.store(), query, &VIEWPORT_PATH.into(), @@ -85,11 +84,11 @@ impl ViewportBlueprint { auto_space_views, past_viewer_recommendations, } = blueprint_archetypes::ViewportBlueprint { - root_container: results.get_instance(resolver, 0), - maximized: results.get_instance(resolver, 0), - auto_layout: results.get_instance(resolver, 0), - auto_space_views: results.get_instance(resolver, 0), - past_viewer_recommendations: results.get_vec(resolver), + root_container: results.component_instance(0), + maximized: results.component_instance(0), + auto_layout: results.component_instance(0), + auto_space_views: results.component_instance(0), + past_viewer_recommendations: results.component_batch(), }; let all_space_view_ids: Vec = blueprint_db diff --git a/examples/rust/custom_space_view/src/color_coordinates_visualizer_system.rs b/examples/rust/custom_space_view/src/color_coordinates_visualizer_system.rs index e4d34f531a8fc..cd802622192d5 100644 --- a/examples/rust/custom_space_view/src/color_coordinates_visualizer_system.rs +++ b/examples/rust/custom_space_view/src/color_coordinates_visualizer_system.rs @@ -61,19 +61,14 @@ impl VisualizerSystem for InstanceColorSystem { for data_result in query.iter_visible_data_results(ctx, Self::identifier()) { // …gather all colors and their instance ids. - let results = ctx.recording().query_caches().latest_at( + let results = ctx.recording().query_caches2().latest_at( ctx.recording_store(), &ctx.current_query(), &data_result.entity_path, [Color::name()], ); - let Some(colors) = results.get(Color::name()).and_then(|results| { - results - .to_dense::(ctx.recording().resolver()) - .flatten() - .ok() - }) else { + let Some(colors) = results.component_batch::() else { continue; }; diff --git a/tests/python/plot_dashboard_stress/main.py b/tests/python/plot_dashboard_stress/main.py index 36c55fe01359a..cb87596158d8e 100755 --- a/tests/python/plot_dashboard_stress/main.py +++ b/tests/python/plot_dashboard_stress/main.py @@ -29,10 +29,35 @@ rr.script_add_args(parser) parser.add_argument("--num-plots", type=int, default=1, help="How many different plots?") -parser.add_argument("--num-series-per-plot", type=int, default=1, help="How many series in each single plot?") -parser.add_argument("--num-points-per-series", type=int, default=100000, help="How many points in each single series?") -parser.add_argument("--freq", type=float, default=1000, help="Frequency of logging (applies to all series)") -parser.add_argument("--temporal-batch-size", type=int, default=None, help="Number of rows to include in each log call") +parser.add_argument( + "--num-series-per-plot", + type=int, + default=1, + help="How many series in each single plot?", +) +parser.add_argument( + "--num-points-per-series", + type=int, + default=100000, + help="How many points in each single series?", +) +parser.add_argument( + "--freq", + type=float, + default=1000, + help="Frequency of logging (applies to all series)", +) +parser.add_argument( + "--temporal-batch-size", + type=int, + default=None, + help="Number of rows to include in each log call", +) +parser.add_argument( + "--blueprint", + action="store_true", + help="Setup a blueprint for a 5s window", +) order = [ "forwards", @@ -40,7 +65,11 @@ "random", ] parser.add_argument( - "--order", type=str, default=order[0], help="What order to log the data in (applies to all series)", choices=order + "--order", + type=str, + default=order[0], + help="What order to log the data in (applies to all series)", + choices=order, ) series_type = [ @@ -68,6 +97,37 @@ def main() -> None: plot_paths = [f"plot_{i}" for i in range(0, args.num_plots)] series_paths = [f"series_{i}" for i in range(0, args.num_series_per_plot)] + if args.blueprint: + from rerun.blueprint import ( + Blueprint, + BlueprintPanel, + Grid, + SelectionPanel, + TimeRangeBoundary, + TimeSeriesView, + VisibleTimeRange, + ) + + print("logging blueprint!") + rr.send_blueprint( + Blueprint( + Grid(*[ + TimeSeriesView( + name=p, + origin=f"/{p}", + time_ranges=VisibleTimeRange( + "sim_time", + start=TimeRangeBoundary.cursor_relative(offset=rr.TimeInt(seconds=-2.5)), + end=TimeRangeBoundary.cursor_relative(offset=rr.TimeInt(seconds=2.5)), + ), + ) + for p in plot_paths + ]), + BlueprintPanel(state="collapsed"), + SelectionPanel(state="collapsed"), + ) + ) + time_per_sim_step = 1.0 / args.freq stop_time = args.num_points_per_series * time_per_sim_step @@ -110,7 +170,10 @@ def main() -> None: ticks = enumerate(sim_times) else: offsets = range(0, len(sim_times), args.temporal_batch_size) - ticks = zip(offsets, (sim_times[offset : offset + args.temporal_batch_size] for offset in offsets)) + ticks = zip( + offsets, + (sim_times[offset : offset + args.temporal_batch_size] for offset in offsets), + ) time_batch = None @@ -129,7 +192,11 @@ def main() -> None: else: value_index = slice(index, index + args.temporal_batch_size) value_batch = rr.components.ScalarBatch(values[value_index, plot_idx, series_idx]) - rr.log_temporal_batch(f"{plot_path}/{series_path}", times=[time_batch], components=[value_batch]) + rr.log_temporal_batch( + f"{plot_path}/{series_path}", + times=[time_batch], + components=[value_batch], + ) # Progress report