Skip to content

Commit

Permalink
Skip MPI-CUDA demo when not building for the CUDA backend (#10)
Browse files Browse the repository at this point in the history
Also remove unused variable in send_recv_usm.cpp
  • Loading branch information
rafbiels authored Feb 15, 2024
1 parent 065e5af commit 0fd6b7e
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 13 deletions.
7 changes: 4 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,10 @@ These three examples form part of the [Codeplay oneAPI for NVIDIA GPUs plugin
documentation](https://developer.codeplay.com/products/oneapi/nvidia/latest/guides/MPI-guide).
The documentation refers to the gpu-aware MPI guide for the CUDA backend.

Building the MPI examples requires the MPI headers and library to be present on
the system. This demo will be automatically skipped if MPI is not installed or
detected and a message saying this will appear in the CMake configuration
Building the MPI-CUDA examples requires the CUDA backend to be enabled and the
MPI headers and library to be present on the system. This demo will be
automatically skipped when not building for the CUDA backend or when MPI is not
installed/detected. A message saying this will appear in the CMake configuration
output. Additionally, in order to run the examples, the MPI implementation needs
to be CUDA-aware. This is only detectable at runtime, so the examples may build
fine but crash on execution if the linked MPI library isn't CUDA-aware.
Expand Down
22 changes: 13 additions & 9 deletions src/MPI_for_CUDA_backend/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
find_package(MPI)
if(NOT MPI_FOUND)
message(STATUS "MPI not found, skipping the MPI_for_CUDA_backend demo")
if(NOT ENABLE_CUDA)
message(STATUS "CUDA backend is disabled, skipping the MPI_for_CUDA_backend demo")
else()
message(STATUS "Found MPI, configuring the MPI_for_CUDA_backend demo")
foreach(TARGET send_recv_usm send_recv_buff scatter_reduce_gather)
add_executable(${TARGET} ${TARGET}.cpp)
target_compile_options(${TARGET} PUBLIC ${SYCL_FLAGS} ${MPI_INCLUDE_DIRS})
target_link_options(${TARGET} PUBLIC ${SYCL_FLAGS} ${MPI_LIBRARIES})
endforeach()
find_package(MPI)
if(NOT MPI_FOUND)
message(STATUS "MPI not found, skipping the MPI_for_CUDA_backend demo")
else()
message(STATUS "Found MPI, configuring the MPI_for_CUDA_backend demo")
foreach(TARGET send_recv_usm send_recv_buff scatter_reduce_gather)
add_executable(${TARGET} ${TARGET}.cpp)
target_compile_options(${TARGET} PUBLIC ${SYCL_FLAGS} ${MPI_INCLUDE_DIRS})
target_link_options(${TARGET} PUBLIC ${SYCL_FLAGS} ${MPI_LIBRARIES})
endforeach()
endif()
endif()
1 change: 0 additions & 1 deletion src/MPI_for_CUDA_backend/send_recv_usm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@ int main(int argc, char *argv[]) {

q.submit(pf).wait();

MPI_Status status;
// Send the data from rank 0 to rank 1.
MPI_Send(devp, nsize, MPI_BYTE, 1, tag, MPI_COMM_WORLD);
printf("Sent %d elements from %d to 1\n", nelem, rank);
Expand Down

0 comments on commit 0fd6b7e

Please sign in to comment.