Skip to content

Commit

Permalink
Improve definition of agglomerates
Browse files Browse the repository at this point in the history
  • Loading branch information
fdrmrc committed Oct 9, 2024
1 parent da242ce commit 6602d29
Show file tree
Hide file tree
Showing 3 changed files with 100 additions and 19 deletions.
89 changes: 89 additions & 0 deletions include/poly_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -616,6 +616,95 @@ namespace dealii::PolyUtils



/**
* Partition with METIS the locally owned regions of the given
* triangulation and insert agglomerates in the polytopic grid.
*
* @note The given triangulation must be a parallel::fullydistributed::Triangulation. This is
* required as the partitions generated by p4est, the partitioner for
* parallell::distributed::Triangulation, can generate discontinuous
* partitions which are not supported by the METIS partitioner.
*
*/
template <int dim, int spacedim>
void
partition_locally_owned_regions(
AgglomerationHandler<dim> &agglomeration_handler,
const unsigned int n_partitions,
Triangulation<dim, spacedim> &triangulation,
const SparsityTools::Partitioner partitioner)
{
AssertDimension(dim, spacedim);
Assert(
agglomeration_handler->n_agglomerates() == 0,
ExcMessage(
"The agglomerated grid must be empty upon calling this function."));
Assert(n_partitions > 0,
ExcMessage("Invalid number of partitions, you provided " +
std::to_string(n_partitions)));

auto parallel_triangulation =
dynamic_cast<parallel::fullydistributed::Triangulation<dim, spacedim> *>(
&triangulation);
Assert(
(parallel_triangulation != nullptr),
ExcMessage(
"Only fully distributed triangulations are supported. If you are using"
"a parallel::distributed::triangulation, you must convert it to a fully"
"distributed as explained in the documentation."));

// check for an easy return
if (n_partitions == 1)
{
for (const auto &cell : parallel_triangulation->active_cell_iterators())
if (cell->is_locally_owned())
agglomeration_handler.define_agglomerate({cell});
return;
}

// collect all locally owned cells
std::vector<types::global_cell_index> locally_owned_cells;
for (const auto &cell : triangulation.active_cell_iterators())
if (cell->is_locally_owned())
locally_owned_cells.push_back(cell->active_cell_index());

DynamicSparsityPattern cell_connectivity;
internal::get_face_connectivity_of_cells(*parallel_triangulation,
cell_connectivity,
locally_owned_cells);

SparsityPattern sp_cell_connectivity;
sp_cell_connectivity.copy_from(cell_connectivity);

// partition each locally owned connection graph and get
// back a vector of indices, one per degree
// of freedom (which is associated with a
// cell)
std::vector<unsigned int> partition_indices(
parallel_triangulation->n_locally_owned_active_cells());
SparsityTools::partition(sp_cell_connectivity,
n_partitions,
partition_indices,
partitioner);

std::vector<std::vector<typename Triangulation<dim>::active_cell_iterator>>
cells_per_partion_id;
cells_per_partion_id.resize(n_partitions); // number of agglomerates

// finally loop over all cells and store the ones with same partition index
for (const auto &cell : parallel_triangulation->active_cell_iterators())
if (cell->is_locally_owned())
cells_per_partion_id[partition_indices[internal::get_index(
locally_owned_cells, cell->active_cell_index())]]
.push_back(cell);

// All the cells with the same partition index will be merged together.
for (unsigned int i = 0; i < n_partitions; ++i)
agglomeration_handler.define_agglomerate(cells_per_partion_id[i]);
}



template <int dim>
std::
tuple<std::vector<double>, std::vector<double>, std::vector<double>, double>
Expand Down
20 changes: 5 additions & 15 deletions test/polydeal/fully_distributed_poisson_sanity_check_01.cc
Original file line number Diff line number Diff line change
Expand Up @@ -166,25 +166,15 @@ main(int argc, char *argv[])
const unsigned int n_local_agglomerates =
10; // number of agglomerates in each local subdomain

// Call the METIS partitioner to agglomerate within each processor.
PolyUtils::partition_locally_owned_regions(n_local_agglomerates,
tria_pft,
SparsityTools::Partitioner::metis);

// Setup the agglomeration handler.
GridTools::Cache<dim> cached_tria(tria_pft);
AgglomerationHandler<dim> ah(cached_tria);

// Agglomerate cells together based on their material id
std::vector<std::vector<typename Triangulation<dim>::active_cell_iterator>>
cells_per_subdomain(n_local_agglomerates);
for (const auto &cell : tria_pft.active_cell_iterators())
if (cell->is_locally_owned())
cells_per_subdomain[cell->material_id()].push_back(cell);

// Agglomerate elements with same id
for (std::size_t i = 0; i < cells_per_subdomain.size(); ++i)
ah.define_agglomerate(cells_per_subdomain[i]);
// Call the METIS partitioner to agglomerate within each processor.
PolyUtils::partition_locally_owned_regions(ah,
n_local_agglomerates,
tria_pft,
SparsityTools::Partitioner::metis);


FE_DGQ<2> fe_dg(1);
Expand Down
10 changes: 6 additions & 4 deletions test/polydeal/fully_distributed_poisson_sanity_check_02.cc
Original file line number Diff line number Diff line change
Expand Up @@ -148,10 +148,6 @@ main(int argc, char *argv[])
const unsigned int n_local_agglomerates =
10; // number of agglomerates in each local subdomain

// Call the METIS partitioner to agglomerate within each processor.
PolyUtils::partition_locally_owned_regions(n_local_agglomerates,
tria_pft,
SparsityTools::Partitioner::metis);


// The rest of the program is identical to
Expand All @@ -162,6 +158,12 @@ main(int argc, char *argv[])
GridTools::Cache<dim> cached_tria(tria_pft);
AgglomerationHandler<dim> ah(cached_tria);

// Call the METIS partitioner to agglomerate within each processor.
PolyUtils::partition_locally_owned_regions(ah,
n_local_agglomerates,
tria_pft,
SparsityTools::Partitioner::metis);

// Agglomerate cells together based on their material id
std::vector<std::vector<typename Triangulation<dim>::active_cell_iterator>>
cells_per_subdomain(n_local_agglomerates);
Expand Down

0 comments on commit 6602d29

Please sign in to comment.