diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 68efd74..5b59a42 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -12,8 +12,8 @@ env: MACOSX_DEPLOYMENT_TARGET: 14.0 jobs: - Run_standalone_tests_with_GFortran: - name: "Run standalone tests with GFortran" + Run_standalone_tests_with_GFortran_with_OpenMPI: + name: "Run standalone tests with GFortran with Open MPI" runs-on: ${{ matrix.os }} strategy: matrix: @@ -25,17 +25,17 @@ jobs: uses: mamba-org/setup-micromamba@v2.0.2 with: micromamba-version: '2.0.4-0' - environment-file: ci/environment_gfortran.yml + environment-file: ci/environment_gfortran_openmpi.yml - - name: Run standalone tests with GFortran with and without optimization + - name: Run standalone tests with GFortran with and without optimization using Open MPI shell: bash -e -x -l {0} run: | cd tests FC="gfortran" ./run_tests.sh FC="gfortran -O3 -march=native" ./run_tests.sh - Run_standalone_tests_with_LFortran: - name: "Run standalone tests with LFortran" + Run_standalone_tests_with_GFortran_with_MPICH: + name: "Run standalone tests with GFortran with MPICH" runs-on: ${{ matrix.os }} strategy: matrix: @@ -47,17 +47,119 @@ jobs: uses: mamba-org/setup-micromamba@v2.0.2 with: micromamba-version: '2.0.4-0' - environment-file: ci/environment_lfortran.yml + environment-file: ci/environment_gfortran_mpich.yml - - name: Run standalone tests with LFortran with and without optimization + - name: Run standalone tests with GFortran with and without optimization using MPICH + shell: bash -e -x -l {0} + run: | + cd tests + FC="gfortran" ./run_tests.sh + FC="gfortran -O3 -march=native" ./run_tests.sh + + Run_standalone_tests_with_LFortran_with_OpenMPI: + name: "Run standalone tests with LFortran with Open MPI" + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: ["macos-latest", "ubuntu-20.04"] + steps: + - uses: actions/checkout@v4 + + - name: Setup Micromamba + uses: mamba-org/setup-micromamba@v2.0.2 + with: + micromamba-version: '2.0.4-0' + environment-file: ci/environment_lfortran_openmpi.yml + + - name: Run standalone tests with LFortran with and without optimization using Open MPI + shell: bash -e -x -l {0} + run: | + cd tests + FC="lfortran" ./run_tests.sh + FC="lfortran --fast" ./run_tests.sh + + Run_standalone_tests_with_LFortran_with_MPICH: + name: "Run standalone tests with LFortran with MPICH" + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: ["macos-latest", "ubuntu-20.04"] + steps: + - uses: actions/checkout@v4 + + - name: Setup Micromamba + uses: mamba-org/setup-micromamba@v2.0.2 + with: + micromamba-version: '2.0.4-0' + environment-file: ci/environment_lfortran_mpich.yml + + - name: Run standalone tests with LFortran with and without optimization using MPICH shell: bash -e -x -l {0} run: | cd tests FC="lfortran" ./run_tests.sh FC="lfortran --fast" ./run_tests.sh - Compile_POT3D_with_GFortran: - name: "Build POT3D and validate with GFortran" + Compile_POT3D_with_GFortran_with_OpenMPI: + name: "Build POT3D and validate with GFortran with Open MPI" + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: ["macos-latest", "ubuntu-20.04"] + steps: + - uses: actions/checkout@v4 + + - name: Set up Micromamba + uses: mamba-org/setup-micromamba@v2.0.2 + with: + micromamba-version: '2.0.4-0' + environment-file: ci/environment_gfortran_openmpi.yml + + # build and validation with GFortran's optimization + - name: POT3D Build and validation with GFortran with optimization using Open MPI (MPI only) + shell: bash -e -x -l {0} + run: | + cd tests/pot3d + FC="gfortran -O3 -march=native" ./build_and_run_gfortran.sh + + # build and validation without GFortran's optimization + - name: POT3D Build and validation with GFortran without optimization using Open MPI (MPI only) + shell: bash -e -x -l {0} + run: | + cd tests/pot3d + FC="gfortran" ./build_and_run_gfortran.sh + + Compile_POT3D_with_LFortran_with_OpenMPI: + name: "Build POT3D and validate with LFortran with Open MPI" + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: ["macos-latest", "ubuntu-20.04"] + steps: + - uses: actions/checkout@v4 + + - name: Set up Micromamba + uses: mamba-org/setup-micromamba@v2.0.2 + with: + micromamba-version: '2.0.4-0' + environment-file: ci/environment_lfortran_openmpi.yml + + # build and validation with LFortran's optimization + - name: POT3D Build and validation with LFortran with optimization using Open MPI (MPI only) + shell: bash -e -x -l {0} + run: | + cd tests/pot3d + FC="lfortran --fast" ./build_and_run_lfortran.sh + + # build and validation without LFortran's optimization + - name: POT3D Build and validation with LFortran without optimization using Open MPI (MPI only) + shell: bash -e -x -l {0} + run: | + cd tests/pot3d + FC="lfortran" ./build_and_run_lfortran.sh + + Compile_POT3D_with_GFortran_with_MPICH: + name: "Build POT3D and validate with GFortran with MPICH" runs-on: ${{ matrix.os }} strategy: matrix: @@ -69,7 +171,7 @@ jobs: uses: mamba-org/setup-micromamba@v2.0.2 with: micromamba-version: '2.0.4-0' - environment-file: ci/environment_gfortran.yml + environment-file: ci/environment_gfortran_mpich.yml # build and validation with GFortran's optimization - name: POT3D Build and validation with GFortran with optimization (MPI only) @@ -85,8 +187,8 @@ jobs: cd tests/pot3d FC="gfortran" ./build_and_run_gfortran.sh - Compile_POT3D_with_LFortran: - name: "Build POT3D and validate with LFortran" + Compile_POT3D_with_LFortran_with_MPICH: + name: "Build POT3D and validate with LFortran with MPICH" runs-on: ${{ matrix.os }} strategy: matrix: @@ -98,17 +200,17 @@ jobs: uses: mamba-org/setup-micromamba@v2.0.2 with: micromamba-version: '2.0.4-0' - environment-file: ci/environment_lfortran.yml + environment-file: ci/environment_lfortran_mpich.yml # build and validation with LFortran's optimization - - name: POT3D Build and validation with LFortran with optimization (MPI only) + - name: POT3D Build and validation with LFortran with optimization using MPICH (MPI only) shell: bash -e -x -l {0} run: | cd tests/pot3d FC="lfortran --fast" ./build_and_run_lfortran.sh # build and validation without LFortran's optimization - - name: POT3D Build and validation with LFortran without optimization (MPI only) + - name: POT3D Build and validation with LFortran without optimization using MPICH (MPI only) shell: bash -e -x -l {0} run: | cd tests/pot3d diff --git a/ci/environment_gfortran_mpich.yml b/ci/environment_gfortran_mpich.yml new file mode 100644 index 0000000..4cae468 --- /dev/null +++ b/ci/environment_gfortran_mpich.yml @@ -0,0 +1,7 @@ +name: c_mpich_gfortran_env +channels: + - conda-forge +dependencies: + # GFortran version 14 isn't available on conda for macOS atleast + - gfortran=13.2.0 + - mpich=4.3.0 diff --git a/ci/environment_gfortran.yml b/ci/environment_gfortran_openmpi.yml similarity index 100% rename from ci/environment_gfortran.yml rename to ci/environment_gfortran_openmpi.yml diff --git a/ci/environment_lfortran_mpich.yml b/ci/environment_lfortran_mpich.yml new file mode 100644 index 0000000..bb5db1d --- /dev/null +++ b/ci/environment_lfortran_mpich.yml @@ -0,0 +1,8 @@ +name: c_mpich_lfortran_env +channels: + - conda-forge +dependencies: + - lfortran=0.49.0 + - make + - cmake + - mpich=4.3.0 diff --git a/ci/environment_lfortran.yml b/ci/environment_lfortran_openmpi.yml similarity index 100% rename from ci/environment_lfortran.yml rename to ci/environment_lfortran_openmpi.yml diff --git a/src/mpi.f90 b/src/mpi.f90 index 4736454..4841680 100644 --- a/src/mpi.f90 +++ b/src/mpi.f90 @@ -9,10 +9,10 @@ module mpi integer, parameter :: MPI_PROC_NULL = -1 integer, parameter :: MPI_SUCCESS = 0 - integer, parameter :: MPI_COMM_WORLD = 0 + integer, parameter :: MPI_COMM_WORLD = -1000 real(8), parameter :: MPI_IN_PLACE = -1 integer, parameter :: MPI_SUM = 1 - integer, parameter :: MPI_INFO_NULL = 0 + integer, parameter :: MPI_INFO_NULL = -2000 integer :: MPI_STATUS_IGNORE = 0 ! NOTE: I've no idea for how to implement this, refer ! see section 2.5.4 page 21 of mpi40-report.pdf @@ -141,16 +141,16 @@ subroutine MPI_Init_thread_proc(required, provided, ierr) type(c_ptr) :: argv = c_null_ptr integer(c_int) :: c_required integer(c_int) :: c_provided - + ! Map Fortran MPI_THREAD_FUNNELED to C MPI_THREAD_FUNNELED if needed c_required = int(required, c_int) - + ! Call C MPI_Init_thread directly local_ierr = c_mpi_init_thread(argc, argv, required, provided) - + ! Copy output values back to Fortran provided = int(c_provided) - + if (present(ierr)) then ierr = int(local_ierr) else if (local_ierr /= 0) then diff --git a/src/mpi_wrapper.c b/src/mpi_wrapper.c index 6fa37ef..3639c58 100644 --- a/src/mpi_wrapper.c +++ b/src/mpi_wrapper.c @@ -3,20 +3,32 @@ #include #define MPI_STATUS_SIZE 5 +#define FORTRAN_MPI_COMM_WORLD -1000 +#define FORTRAN_MPI_INFO_NULL -2000 -// void mpi_init_wrapper(int *ierr) { -// int argc = 0; -// char **argv = NULL; -// *ierr = MPI_Init(&argc, &argv); -// } +MPI_Info get_c_info_from_fortran(int info) { + if (info == FORTRAN_MPI_INFO_NULL) { + return MPI_INFO_NULL; + } else { + return MPI_Info_f2c(info); + } +} + +MPI_Comm get_c_comm_from_fortran(int comm_f) { + if (comm_f == FORTRAN_MPI_COMM_WORLD) { + return MPI_COMM_WORLD; + } else { + return MPI_Comm_f2c(comm_f); + } +} void mpi_comm_size_wrapper(int *comm_f, int *size, int *ierr) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); *ierr = MPI_Comm_size(comm, size); } void mpi_bcast_int_wrapper(int *buffer, int *count, int *datatype_f, int *root, int *comm_f, int *ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); MPI_Datatype datatype; switch (*datatype_f) { case 2: @@ -36,7 +48,7 @@ void mpi_bcast_int_wrapper(int *buffer, int *count, int *datatype_f, int *root, } void mpi_bcast_real_wrapper(double *buffer, int *count, int *datatype_f, int *root, int *comm_f, int *ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); MPI_Datatype datatype; switch (*datatype_f) { case 0: @@ -55,7 +67,7 @@ void mpi_bcast_real_wrapper(double *buffer, int *count, int *datatype_f, int *ro void mpi_allgather_int_wrapper(const int *sendbuf, int *sendcount, int *sendtype_f, int *recvbuf, int *recvcount, int *recvtype_f, int *comm_f, int *ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); MPI_Datatype sendtype, recvtype; switch (*sendtype_f) { @@ -83,7 +95,7 @@ void mpi_allgather_int_wrapper(const int *sendbuf, int *sendcount, int *sendtype void mpi_allgather_real_wrapper(const double *sendbuf, int *sendcount, int *sendtype_f, double *recvbuf, int *recvcount, int *recvtype_f, int *comm_f, int *ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); MPI_Datatype sendtype, recvtype; switch (*sendtype_f) { @@ -117,7 +129,7 @@ void mpi_allgather_real_wrapper(const double *sendbuf, int *sendcount, int *send void mpi_isend_wrapper(const double *buf, int *count, int *datatype_f, int *dest, int *tag, int *comm_f, int *request_f, int *ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); MPI_Datatype datatype; switch (*datatype_f) { case 0: @@ -139,7 +151,7 @@ void mpi_isend_wrapper(const double *buf, int *count, int *datatype_f, void mpi_irecv_wrapper(double *buf, int *count, int *datatype_f, int *source, int *tag, int *comm_f, int *request_f, int *ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); MPI_Datatype datatype; switch (*datatype_f) { case 0: @@ -160,7 +172,7 @@ void mpi_irecv_wrapper(double *buf, int *count, int *datatype_f, void mpi_allreduce_wrapper_real(const double *sendbuf, double *recvbuf, int *count, int *datatype_f, int *op_f, int *comm_f, int *ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); MPI_Datatype datatype; switch (*datatype_f) { case 0: @@ -193,7 +205,7 @@ void mpi_allreduce_wrapper_real(const double *sendbuf, double *recvbuf, int *cou void mpi_allreduce_wrapper_int(const int *sendbuf, int *recvbuf, int *count, int *datatype_f, int *op_f, int *comm_f, int *ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); MPI_Datatype datatype; datatype = MPI_INT; @@ -207,21 +219,22 @@ void mpi_allreduce_wrapper_int(const int *sendbuf, int *recvbuf, int *count, } void mpi_barrier_wrapper(int *comm_f, int *ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); *ierror = MPI_Barrier(comm); } void mpi_comm_rank_wrapper(int *comm_f, int *rank, int *ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); *ierror = MPI_Comm_rank(comm, rank); } void mpi_comm_split_type_wrapper(int *comm_f, int *split_type, int *key, int *info_f, int *newcomm_f, int *ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); - MPI_Info info = MPI_Info_f2c(*info_f); - MPI_Comm newcomm = MPI_Comm_f2c(*newcomm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); + MPI_Info info = get_c_info_from_fortran(*info_f); + MPI_Comm newcomm; *ierror = MPI_Comm_split_type( comm, *split_type, *key , info, &newcomm); + *newcomm_f = MPI_Comm_c2f(newcomm); } void mpi_recv_wrapper(double *buf, int *count, int *datatype_f, int *source, @@ -239,7 +252,7 @@ void mpi_recv_wrapper(double *buf, int *count, int *datatype_f, int *source, return; } - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); MPI_Status status; *ierror = MPI_Recv(buf, *count, datatype, *source, *tag, comm, &status); if (*ierror == MPI_SUCCESS) { @@ -289,26 +302,26 @@ void mpi_ssend_wrapper(double *buf, int *count, int *datatype_f, int *dest, return; } - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); *ierror = MPI_Ssend(buf, *count, datatype, *dest, *tag, comm); } void mpi_cart_create_wrapper(int * comm_f, int * ndims, int * dims, int * periods, int * reorder, int * newcomm_f, int * ierror){ MPI_Comm newcomm = MPI_COMM_NULL; - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); *ierror = MPI_Cart_create(comm, *ndims, dims, periods, *reorder, &newcomm); *newcomm_f = MPI_Comm_c2f(newcomm); } void mpi_cart_coords_wrapper(int * comm_f, int * rank, int * maxdims, int * coords, int * ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); *ierror = MPI_Cart_coords(comm, *rank, *maxdims, coords); } void mpi_cart_shift_wrapper(int * comm_f, int * dir, int * disp, int * rank_source, int * rank_dest, int * ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); *ierror = MPI_Cart_shift(comm, *dir, *disp, rank_source, rank_dest); } @@ -318,7 +331,7 @@ void mpi_dims_create_wrapper(int * nnodes, int * ndims, int * dims, int * ierror } void mpi_cart_sub_wrapper(int * comm_f, int * rmains_dims, int * newcomm_f, int * ierror) { - MPI_Comm comm = MPI_Comm_f2c(*comm_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); MPI_Comm newcomm = MPI_COMM_NULL; *ierror = MPI_Cart_sub(comm, rmains_dims, &newcomm); *newcomm_f = MPI_Comm_c2f(newcomm); diff --git a/tests/run_tests.sh b/tests/run_tests.sh index b1cc88c..5eaaabf 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -37,3 +37,5 @@ for file in *.f90; do done done + +git clean -dfx diff --git a/tests/split_type_1.f90 b/tests/split_type_1.f90 new file mode 100644 index 0000000..5722dca --- /dev/null +++ b/tests/split_type_1.f90 @@ -0,0 +1,33 @@ +module split_type_1_mod + use mpi + implicit none + integer :: nproc + integer :: nprocsh + integer :: iprocw + integer :: iprocsh + integer :: comm_shared + + contains + + subroutine init_mpi + implicit none + integer :: ierr,tcheck + call MPI_Init_thread (MPI_THREAD_FUNNELED,tcheck,ierr) + call MPI_Comm_size (MPI_COMM_WORLD,nproc,ierr) + call MPI_Comm_rank (MPI_COMM_WORLD,iprocw,ierr) + call MPI_Comm_split_type (MPI_COMM_WORLD,MPI_COMM_TYPE_SHARED,0, & + MPI_INFO_NULL,comm_shared,ierr) + call MPI_Comm_size (comm_shared,nprocsh,ierr) + call MPI_Comm_rank (comm_shared,iprocsh,ierr) + end subroutine +end module + +program split_type_1 + use mpi + use split_type_1_mod + implicit none + integer :: ierr + call init_mpi + call MPI_Finalize (ierr) + if (ierr /= 0) error stop +end program split_type_1