From 6aa6e3889a80ee1ba8f9160fff8eb5ef6423b7a6 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 25 Feb 2025 22:50:25 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/particles/ImpactXParticleContainer.cpp | 148 ++++++++++----------- 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/src/particles/ImpactXParticleContainer.cpp b/src/particles/ImpactXParticleContainer.cpp index 2b03084ab..437edeb94 100644 --- a/src/particles/ImpactXParticleContainer.cpp +++ b/src/particles/ImpactXParticleContainer.cpp @@ -142,24 +142,24 @@ namespace impactx } } - int nthreads = 1; + int nthreads = 1; #if defined(AMREX_USE_OMP) - nthreads = omp_get_max_threads(); + nthreads = omp_get_max_threads(); #endif - const auto& ba = ParticleBoxArray(lid); - auto n_logical = numTilesInBox(ba[gid], true, tile_size); + const auto& ba = ParticleBoxArray(lid); + auto n_logical = numTilesInBox(ba[gid], true, tile_size); - if (n_logical < nthreads ) { - amrex::Print() << "Too few tiles for the number of OpenMP threads. Parallelization will be poor. \n"; - } + if (n_logical < nthreads ) { + amrex::Print() << "Too few tiles for the number of OpenMP threads. Parallelization will be poor. \n"; + } - for (int ithr = 0; ithr < nthreads; ++ithr) { - DefineAndReturnParticleTile(lid, gid, ithr); - } + for (int ithr = 0; ithr < nthreads; ++ithr) { + DefineAndReturnParticleTile(lid, gid, ithr); + } - int pid = ParticleType::NextID(); - ParticleType::NextID(pid+np); + int pid = ParticleType::NextID(); + ParticleType::NextID(pid+np); AMREX_ALWAYS_ASSERT_WITH_MESSAGE( static_cast(pid) + static_cast(np) < amrex::LongParticleIds::LastParticleID, "ERROR: overflow on particle id numbers"); @@ -167,68 +167,68 @@ namespace impactx #if defined(AMREX_USE_OMP) #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - { - int tid = omp_get_thread_num(); - int nr = np / nthreads; - int nlft = np - nr*nthreads; - - int num_to_add = 0; - int my_index =0; - if (tid < nlft) { // get nr+1 items - my_index = tid * (nr+1); - num_to_add = nr+1; - } else { // get nr items - my_index = tid * nr + nlft; - num_to_add = nr; - } - - auto& particle_tile = ParticlesAt(lid, gid, tid); - auto old_np = particle_tile.numParticles(); - auto new_np = old_np + num_to_add; - particle_tile.resize(new_np); - - const int cpuid = amrex::ParallelDescriptor::MyProc(); - - auto & soa = particle_tile.GetStructOfArrays().GetRealData(); - amrex::ParticleReal * const AMREX_RESTRICT x_arr = soa[RealSoA::x].dataPtr(); - amrex::ParticleReal * const AMREX_RESTRICT y_arr = soa[RealSoA::y].dataPtr(); - amrex::ParticleReal * const AMREX_RESTRICT t_arr = soa[RealSoA::t].dataPtr(); - amrex::ParticleReal * const AMREX_RESTRICT px_arr = soa[RealSoA::px].dataPtr(); - amrex::ParticleReal * const AMREX_RESTRICT py_arr = soa[RealSoA::py].dataPtr(); - amrex::ParticleReal * const AMREX_RESTRICT pt_arr = soa[RealSoA::pt].dataPtr(); - amrex::ParticleReal * const AMREX_RESTRICT qm_arr = soa[RealSoA::qm].dataPtr(); - amrex::ParticleReal * const AMREX_RESTRICT w_arr = soa[RealSoA::w ].dataPtr(); - - uint64_t * const AMREX_RESTRICT idcpu_arr = particle_tile.GetStructOfArrays().GetIdCPUData().dataPtr(); - - amrex::ParticleReal const * const AMREX_RESTRICT x_ptr = x.data(); - amrex::ParticleReal const * const AMREX_RESTRICT y_ptr = y.data(); - amrex::ParticleReal const * const AMREX_RESTRICT t_ptr = t.data(); - amrex::ParticleReal const * const AMREX_RESTRICT px_ptr = px.data(); - amrex::ParticleReal const * const AMREX_RESTRICT py_ptr = py.data(); - amrex::ParticleReal const * const AMREX_RESTRICT pt_ptr = pt.data(); - - amrex::ParallelFor(num_to_add, - [=] AMREX_GPU_DEVICE (int i) noexcept - { - idcpu_arr[old_np+i] = amrex::SetParticleIDandCPU(pid + my_index + i, cpuid); - - x_arr[old_np+i] = x_ptr[my_index+i]; - y_arr[old_np+i] = y_ptr[my_index+i]; - t_arr[old_np+i] = t_ptr[my_index+i]; - - px_arr[old_np+i] = px_ptr[my_index+i]; - py_arr[old_np+i] = py_ptr[my_index+i]; - pt_arr[old_np+i] = pt_ptr[my_index+i]; - - qm_arr[old_np+i] = qm; - w_arr[old_np+i] = bchchg/ablastr::constant::SI::q_e/np; - }); - } - - // safety first: in case passed attribute arrays were temporary, we - // want to make sure the ParallelFor has ended here - amrex::Gpu::streamSynchronize(); + { + int tid = omp_get_thread_num(); + int nr = np / nthreads; + int nlft = np - nr*nthreads; + + int num_to_add = 0; + int my_index =0; + if (tid < nlft) { // get nr+1 items + my_index = tid * (nr+1); + num_to_add = nr+1; + } else { // get nr items + my_index = tid * nr + nlft; + num_to_add = nr; + } + + auto& particle_tile = ParticlesAt(lid, gid, tid); + auto old_np = particle_tile.numParticles(); + auto new_np = old_np + num_to_add; + particle_tile.resize(new_np); + + const int cpuid = amrex::ParallelDescriptor::MyProc(); + + auto & soa = particle_tile.GetStructOfArrays().GetRealData(); + amrex::ParticleReal * const AMREX_RESTRICT x_arr = soa[RealSoA::x].dataPtr(); + amrex::ParticleReal * const AMREX_RESTRICT y_arr = soa[RealSoA::y].dataPtr(); + amrex::ParticleReal * const AMREX_RESTRICT t_arr = soa[RealSoA::t].dataPtr(); + amrex::ParticleReal * const AMREX_RESTRICT px_arr = soa[RealSoA::px].dataPtr(); + amrex::ParticleReal * const AMREX_RESTRICT py_arr = soa[RealSoA::py].dataPtr(); + amrex::ParticleReal * const AMREX_RESTRICT pt_arr = soa[RealSoA::pt].dataPtr(); + amrex::ParticleReal * const AMREX_RESTRICT qm_arr = soa[RealSoA::qm].dataPtr(); + amrex::ParticleReal * const AMREX_RESTRICT w_arr = soa[RealSoA::w ].dataPtr(); + + uint64_t * const AMREX_RESTRICT idcpu_arr = particle_tile.GetStructOfArrays().GetIdCPUData().dataPtr(); + + amrex::ParticleReal const * const AMREX_RESTRICT x_ptr = x.data(); + amrex::ParticleReal const * const AMREX_RESTRICT y_ptr = y.data(); + amrex::ParticleReal const * const AMREX_RESTRICT t_ptr = t.data(); + amrex::ParticleReal const * const AMREX_RESTRICT px_ptr = px.data(); + amrex::ParticleReal const * const AMREX_RESTRICT py_ptr = py.data(); + amrex::ParticleReal const * const AMREX_RESTRICT pt_ptr = pt.data(); + + amrex::ParallelFor(num_to_add, + [=] AMREX_GPU_DEVICE (int i) noexcept + { + idcpu_arr[old_np+i] = amrex::SetParticleIDandCPU(pid + my_index + i, cpuid); + + x_arr[old_np+i] = x_ptr[my_index+i]; + y_arr[old_np+i] = y_ptr[my_index+i]; + t_arr[old_np+i] = t_ptr[my_index+i]; + + px_arr[old_np+i] = px_ptr[my_index+i]; + py_arr[old_np+i] = py_ptr[my_index+i]; + pt_arr[old_np+i] = pt_ptr[my_index+i]; + + qm_arr[old_np+i] = qm; + w_arr[old_np+i] = bchchg/ablastr::constant::SI::q_e/np; + }); + } + + // safety first: in case passed attribute arrays were temporary, we + // want to make sure the ParallelFor has ended here + amrex::Gpu::streamSynchronize(); } void