Skip to content

Commit

Permalink
Use full form for lambdas to avoid confusion with IIFEs.
Browse files Browse the repository at this point in the history
  • Loading branch information
LTLA committed Feb 5, 2025
1 parent 942ffdb commit 7b91c2a
Show file tree
Hide file tree
Showing 3 changed files with 109 additions and 65 deletions.
118 changes: 76 additions & 42 deletions include/tatami_hdf5/DenseMatrix.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,10 @@ void extract_indices(bool h5_row_is_target, Index_ cache_start, Index_ cache_len
// but hopefully they've fixed the problem with non-consecutive slices in:
// https://forum.hdfgroup.org/t/union-of-non-consecutive-hyperslabs-is-very-slow/5062
comp.dataspace.selectNone();
tatami::process_consecutive_indices<Index_>(indices.data(), indices.size(),
[&](Index_ start, Index_ length) {
tatami::process_consecutive_indices<Index_>(
indices.data(),
indices.size(),
[&](Index_ start, Index_ length) -> void {
offset[non_target_dim] = start;
count[non_target_dim] = length;
comp.dataspace.selectHyperslab(H5S_SELECT_OR, count, offset);
Expand Down Expand Up @@ -168,7 +170,7 @@ class SoloCore {
if constexpr(oracle_) {
i = my_oracle->get(my_counter++);
}
serialize([&](){
serialize([&]() -> void {
extract_block(by_h5_row_, i, static_cast<Index_>(1), block_start, block_length, buffer, *my_h5comp);
});
return buffer;
Expand All @@ -179,7 +181,7 @@ class SoloCore {
if constexpr(oracle_) {
i = my_oracle->get(my_counter++);
}
serialize([&](){
serialize([&]() -> void {
extract_indices(by_h5_row_, i, static_cast<Index_>(1), indices, buffer, *my_h5comp);
});
return buffer;
Expand Down Expand Up @@ -256,17 +258,27 @@ class MyopicCore {
public:
template<typename Value_>
const Value_* fetch_block(Index_ i, Index_ block_start, Index_ block_length, Value_* buffer) {
fetch_raw(i, buffer, block_length, [&](Index_ start, Index_ length, CachedValue_* buf) {
extract_block(by_h5_row_, start, length, block_start, block_length, buf, *my_h5comp);
});
fetch_raw(
i,
buffer,
block_length,
[&](Index_ start, Index_ length, CachedValue_* buf) -> void {
extract_block(by_h5_row_, start, length, block_start, block_length, buf, *my_h5comp);
}
);
return buffer;
}

template<typename Value_>
const Value_* fetch_indices(Index_ i, const std::vector<Index_>& indices, Value_* buffer) {
fetch_raw(i, buffer, indices.size(), [&](Index_ start, Index_ length, CachedValue_* buf) {
extract_indices(by_h5_row_, start, length, indices, buf, *my_h5comp);
});
fetch_raw(
i,
buffer,
indices.size(),
[&](Index_ start, Index_ length, CachedValue_* buf) -> void {
extract_indices(by_h5_row_, start, length, indices, buf, *my_h5comp);
}
);
return buffer;
}
};
Expand Down Expand Up @@ -330,7 +342,7 @@ class OracularCoreNormal {
my_offset += my_slab_size;
return output;
},
/* populate = */ [&](std::vector<std::pair<Index_, Slab*> >& chunks, std::vector<std::pair<Index_, Slab*> >& to_reuse) {
/* populate = */ [&](std::vector<std::pair<Index_, Slab*> >& chunks, std::vector<std::pair<Index_, Slab*> >& to_reuse) -> void {
// Defragmenting the existing chunks. We sort by offset to make
// sure that we're not clobbering in-use slabs during the copy().
sort_by_field(to_reuse, [](const std::pair<Index_, Slab*>& x) -> size_t { return x.second->offset; });
Expand Down Expand Up @@ -407,35 +419,47 @@ class OracularCoreNormal {
public:
template<typename Value_>
const Value_* fetch_block(Index_ i, Index_ block_start, Index_ block_length, Value_* buffer) {
fetch_raw(i, buffer, block_length, [&](H5::DataSpace& dspace, Index_ run_start, Index_ run_length) {
hsize_t offset[2];
hsize_t count[2];
offset[0] = run_start;
offset[1] = block_start;
count[0] = run_length;
count[1] = block_length;
dspace.selectHyperslab(H5S_SELECT_OR, count, offset);
});
fetch_raw(
i,
buffer,
block_length,
[&](H5::DataSpace& dspace, Index_ run_start, Index_ run_length) -> void {
hsize_t offset[2];
hsize_t count[2];
offset[0] = run_start;
offset[1] = block_start;
count[0] = run_length;
count[1] = block_length;
dspace.selectHyperslab(H5S_SELECT_OR, count, offset);
}
);
return buffer;
}

template<typename Value_>
const Value_* fetch_indices(Index_ i, const std::vector<Index_>& indices, Value_* buffer) {
fetch_raw(i, buffer, indices.size(), [&](H5::DataSpace& dspace, Index_ run_start, Index_ run_length) {
hsize_t offset[2];
hsize_t count[2];
offset[0] = run_start;
count[0] = run_length;

// See comments in extract_indices().
tatami::process_consecutive_indices<Index_>(indices.data(), indices.size(),
[&](Index_ start, Index_ length) {
offset[1] = start;
count[1] = length;
dspace.selectHyperslab(H5S_SELECT_OR, count, offset);
}
);
});
fetch_raw(
i,
buffer,
indices.size(),
[&](H5::DataSpace& dspace, Index_ run_start, Index_ run_length) -> void {
hsize_t offset[2];
hsize_t count[2];
offset[0] = run_start;
count[0] = run_length;

// See comments in extract_indices().
tatami::process_consecutive_indices<Index_>(
indices.data(),
indices.size(),
[&](Index_ start, Index_ length) -> void {
offset[1] = start;
count[1] = length;
dspace.selectHyperslab(H5S_SELECT_OR, count, offset);
}
);
}
);
return buffer;
}
};
Expand Down Expand Up @@ -487,7 +511,7 @@ class OracularCoreTransposed {
/* create = */ [&]() -> Slab {
return my_factory.create();
},
/* populate = */ [&](std::vector<std::pair<Index_, Slab*> >& chunks) {
/* populate = */ [&](std::vector<std::pair<Index_, Slab*> >& chunks) -> void {
my_cache_transpose_info.clear();

serialize([&]() -> void {
Expand Down Expand Up @@ -522,17 +546,27 @@ class OracularCoreTransposed {
public:
template<typename Value_>
const Value_* fetch_block(Index_ i, Index_ block_start, Index_ block_length, Value_* buffer) {
fetch_raw(i, buffer, block_length, [&](Index_ start, Index_ length, CachedValue_* buf) {
extract_block(false, start, length, block_start, block_length, buf, *my_h5comp);
});
fetch_raw(
i,
buffer,
block_length,
[&](Index_ start, Index_ length, CachedValue_* buf) -> void {
extract_block(false, start, length, block_start, block_length, buf, *my_h5comp);
}
);
return buffer;
}

template<typename Value_>
const Value_* fetch_indices(Index_ i, const std::vector<Index_>& indices, Value_* buffer) {
fetch_raw(i, buffer, indices.size(), [&](Index_ start, Index_ length, CachedValue_* buf) {
extract_indices(false, start, length, indices, buf, *my_h5comp);
});
fetch_raw(
i,
buffer,
indices.size(),
[&](Index_ start, Index_ length, CachedValue_* buf) -> void {
extract_indices(false, start, length, indices, buf, *my_h5comp);
}
);
return buffer;
}
};
Expand Down
22 changes: 13 additions & 9 deletions include/tatami_hdf5/sparse_primary.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ void initialize(const MatrixDetails<Index_>& details, std::unique_ptr<Components
}

inline void destroy(std::unique_ptr<Components>& h5comp) {
serialize([&]() {
serialize([&]() -> void {
h5comp.reset();
});
}
Expand Down Expand Up @@ -467,8 +467,10 @@ class PrimaryLruIndexCore : private PrimaryLruCoreBase<Index_, CachedValue_, Cac
if (this->my_needs_value && num_found > 0) {
hsize_t new_start = extraction_start + (indices_start - my_index_buffer.begin());
comp.dataspace.selectNone();
tatami::process_consecutive_indices<Index_>(my_found.data(), my_found.size(),
[&](Index_ start, Index_ length) {
tatami::process_consecutive_indices<Index_>(
my_found.data(),
my_found.size(),
[&](Index_ start, Index_ length) -> void {
hsize_t offset = start + new_start;
hsize_t count = length;
comp.dataspace.selectHyperslab(H5S_SELECT_OR, &count, &offset);
Expand Down Expand Up @@ -586,7 +588,7 @@ class PrimaryOracularCoreBase {
/* create = */ [&]() -> SlabPrecursor {
return SlabPrecursor();
},
/* populate = */ [&](std::vector<std::pair<Index_, size_t> >& to_populate, std::vector<std::pair<Index_, size_t> >& to_reuse, std::vector<SlabPrecursor>& all_preslabs) {
/* populate = */ [&](std::vector<std::pair<Index_, size_t> >& to_populate, std::vector<std::pair<Index_, size_t> >& to_reuse, std::vector<SlabPrecursor>& all_preslabs) -> void {
size_t dest_offset = 0;

if (to_reuse.size()) {
Expand Down Expand Up @@ -696,7 +698,7 @@ class PrimaryOracularFullCore : private PrimaryOracularCoreBase<Index_, CachedVa
std::vector<SlabPrecursor>& all_preslabs,
std::vector<CachedValue_>& full_value_buffer,
std::vector<CachedIndex_>& full_index_buffer) -> void {
serialize([&](){
serialize([&]() -> void {
this->prepare_contiguous_index_spaces(dest_offset, to_populate, all_preslabs);
auto& comp = *(this->my_h5comp);
if (my_needs_index) {
Expand Down Expand Up @@ -748,7 +750,7 @@ class PrimaryOracularBlockCore : private PrimaryOracularCoreBase<Index_, CachedV
std::vector<SlabPrecursor>& all_preslabs,
std::vector<CachedValue_>& full_value_buffer,
std::vector<CachedIndex_>& full_index_buffer) -> void {
serialize([&](){
serialize([&]() -> void {
this->prepare_contiguous_index_spaces(dest_offset, to_populate, all_preslabs);
auto& comp = *(this->my_h5comp);
comp.index_dataset.read(full_index_buffer.data() + dest_offset, define_mem_type<CachedIndex_>(), comp.memspace, comp.dataspace);
Expand Down Expand Up @@ -851,7 +853,7 @@ class PrimaryOracularIndexCore : private PrimaryOracularCoreBase<Index_, CachedV
std::vector<SlabPrecursor>& all_preslabs,
std::vector<CachedValue_>& full_value_buffer,
std::vector<CachedIndex_>& full_index_buffer) -> void {
serialize([&](){
serialize([&]() -> void {
this->prepare_contiguous_index_spaces(dest_offset, to_populate, all_preslabs);
auto& comp = *(this->my_h5comp);
comp.index_dataset.read(full_index_buffer.data() + dest_offset, define_mem_type<CachedIndex_>(), comp.memspace, comp.dataspace);
Expand Down Expand Up @@ -882,8 +884,10 @@ class PrimaryOracularIndexCore : private PrimaryOracularCoreBase<Index_, CachedV
// indices in 'found' across 'needed', to reduce the memory usage of 'found';
// otherwise we grossly exceed the cache limits during extraction.
hsize_t new_start = this->my_pointers[p.first] + (indices_start - original_start);
tatami::process_consecutive_indices<Index_>(found.data(), found.size(),
[&](Index_ start, Index_ length) {
tatami::process_consecutive_indices<Index_>(
found.data(),
found.size(),
[&](Index_ start, Index_ length) -> void {
hsize_t offset = start + new_start;
hsize_t count = length;
comp.dataspace.selectHyperslab(H5S_SELECT_OR, &count, &offset);
Expand Down
34 changes: 20 additions & 14 deletions include/tatami_hdf5/sparse_secondary.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ class MyopicSecondaryCore {
Index_ clen = tatami_chunked::get_chunk_length(my_secondary_dim_stats, chunk_id);
std::fill_n(my_cache_count.begin(), clen, 0);

serialize([&]() {
serialize([&]() -> void {
extract(chunk_id * my_secondary_dim_stats.chunk_length, clen);
});
my_last_chunk_id = chunk_id;
Expand Down Expand Up @@ -171,22 +171,28 @@ class MyopicSecondaryCore {
// itself (useful for sparse extraction).
template<bool store_index_>
tatami::SparseRange<CachedValue_, Index_> fetch_block(Index_ i, Index_ primary_start, Index_ primary_length) {
return fetch_raw(i, [&](Index_ secondary_start, Index_ secondary_length) {
for (Index_ px = 0; px < primary_length; ++px) {
auto primary = px + primary_start;
extract_and_append(primary, secondary_start, secondary_length, (store_index_ ? px : primary));
return fetch_raw(
i,
[&](Index_ secondary_start, Index_ secondary_length) -> void {
for (Index_ px = 0; px < primary_length; ++px) {
auto primary = px + primary_start;
extract_and_append(primary, secondary_start, secondary_length, (store_index_ ? px : primary));
}
}
});
);
}

template<bool store_index_>
tatami::SparseRange<CachedValue_, Index_> fetch_indices(Index_ i, const std::vector<Index_>& primary_indices) {
return fetch_raw(i, [&](Index_ secondary_start, Index_ secondary_length) {
for (Index_ px = 0, end = primary_indices.size(); px < end; ++px) {
auto primary = primary_indices[px];
extract_and_append(primary, secondary_start, secondary_length, (store_index_ ? px : primary));
return fetch_raw(
i,
[&](Index_ secondary_start, Index_ secondary_length) -> void {
for (Index_ px = 0, end = primary_indices.size(); px < end; ++px) {
auto primary = primary_indices[px];
extract_and_append(primary, secondary_start, secondary_length, (store_index_ ? px : primary));
}
}
});
);
}
};

Expand Down Expand Up @@ -347,7 +353,7 @@ class OracularSecondaryCore {
tatami::process_consecutive_indices<Index_>(
my_found.data(),
my_found.size(),
[&](Index_ start, Index_ length) {
[&](Index_ start, Index_ length) -> void {
hsize_t offset = start + new_start;
hsize_t count = length;
my_h5comp->dataspace.selectHyperslab(H5S_SELECT_OR, &count, &offset);
Expand Down Expand Up @@ -375,7 +381,7 @@ class OracularSecondaryCore {
// myopic counterpart, and is not actually needed itself.
template<bool store_index_>
tatami::SparseRange<CachedValue_, Index_> fetch_block(Index_, Index_ primary_start, Index_ primary_length) {
const auto& info = fetch_raw([&](Index_ secondary_first, Index_ secondary_last_plus_one) {
const auto& info = fetch_raw([&](Index_ secondary_first, Index_ secondary_last_plus_one) -> void {
for (Index_ px = 0; px < primary_length; ++px) {
auto primary = px + primary_start;
extract_and_append(primary, secondary_first, secondary_last_plus_one, (store_index_ ? px : primary));
Expand All @@ -386,7 +392,7 @@ class OracularSecondaryCore {

template<bool store_index_>
tatami::SparseRange<CachedValue_, Index_> fetch_indices(Index_, const std::vector<Index_>& primary_indices) {
const auto& info = fetch_raw([&](Index_ secondary_first, Index_ secondary_last_plus_one) {
const auto& info = fetch_raw([&](Index_ secondary_first, Index_ secondary_last_plus_one) -> void {
for (Index_ px = 0, end = primary_indices.size(); px < end; ++px) {
auto primary = primary_indices[px];
extract_and_append(primary, secondary_first, secondary_last_plus_one, (store_index_ ? px : primary));
Expand Down

0 comments on commit 7b91c2a

Please sign in to comment.