From 8a8f6e953028377d643f0ab58a67648f61ecfc29 Mon Sep 17 00:00:00 2001 From: Gaurav Dhingra Date: Wed, 26 Mar 2025 15:46:18 +0530 Subject: [PATCH 1/3] add test program to compute pi using Monte Carlo method --- src/mpi.f90 | 18 ++++++++++++- src/mpi_c_bindings.f90 | 8 ++++++ src/mpi_wrapper.c | 36 ++++++++++++++++++++++++++ tests/compute_pi.f90 | 58 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 119 insertions(+), 1 deletion(-) create mode 100644 tests/compute_pi.f90 diff --git a/src/mpi.f90 b/src/mpi.f90 index 62038a7..53ade5c 100644 --- a/src/mpi.f90 +++ b/src/mpi.f90 @@ -110,6 +110,10 @@ module mpi module procedure MPI_Cart_coords_proc end interface + interface MPI_Reduce + module procedure MPI_Reduce_scalar_int + end interface + contains subroutine MPI_Init_proc(ierr) @@ -428,4 +432,16 @@ subroutine MPI_Cart_sub_proc (comm, remain_dims, newcomm, ierror) end where call c_mpi_cart_sub(comm, remain_dims_i, newcomm, ierror) end subroutine -end module mpi \ No newline at end of file + + subroutine MPI_Reduce_scalar_int(sendbuf, recvbuf, count, datatype, op, root, comm, ierror) + use mpi_c_bindings, only: c_mpi_reduce + integer, intent(in) :: sendbuf + integer, intent(out) :: recvbuf + integer, intent(in) :: count, root + integer, intent(in) :: datatype + integer, intent(in) :: op + integer, intent(in) :: comm + integer, intent(out), optional :: ierror + call c_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm, ierror) + end subroutine +end module mpi diff --git a/src/mpi_c_bindings.f90 b/src/mpi_c_bindings.f90 index 92a4188..895da37 100644 --- a/src/mpi_c_bindings.f90 +++ b/src/mpi_c_bindings.f90 @@ -213,5 +213,13 @@ subroutine c_mpi_cart_sub(comm, remain_dims, newcomm, ierror) bind(C, name ="mpi integer(c_int), intent(in) :: remain_dims(*) integer(c_int), intent(out) :: newcomm, ierror end subroutine + + subroutine c_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm, ierror) bind(C, name="mpi_reduce_wrapper") + use iso_c_binding, only: c_int + integer(c_int), intent(in) :: sendbuf + integer(c_int), intent(out) :: recvbuf + integer(c_int), intent(in) :: count, datatype, op, root, comm + integer(c_int), intent(out), optional :: ierror + end subroutine end interface end module mpi_c_bindings diff --git a/src/mpi_wrapper.c b/src/mpi_wrapper.c index d75db9f..ff16f9b 100644 --- a/src/mpi_wrapper.c +++ b/src/mpi_wrapper.c @@ -15,6 +15,19 @@ MPI_Info get_c_info_from_fortran(int info) { } } +// MPI_Datatype get_c_datatype_from_fortran(int datatype) { +// if (datatype == 2) { +// return MPI_Int; +// } else if (datatype == 0) { +// return MPI_FLOAT; +// } else if (datatype == 1) { +// return MPI_DOUBLE; +// } else { +// printf("Unsupported datatype provided\n"); +// exit 1; +// } +// } + MPI_Comm get_c_comm_from_fortran(int comm_f) { if (comm_f == FORTRAN_MPI_COMM_WORLD) { return MPI_COMM_WORLD; @@ -337,3 +350,26 @@ void mpi_cart_sub_wrapper(int * comm_f, int * rmains_dims, int * newcomm_f, int *ierror = MPI_Cart_sub(comm, rmains_dims, &newcomm); *newcomm_f = MPI_Comm_c2f(newcomm); } + +void mpi_reduce_wrapper(const int* sendbuf, int* recvbuf, int* count, int* datatype_f, + int* op_f, int* root, int* comm_f, int* ierror +) { + MPI_Datatype datatype; + switch (*datatype_f) { + case 2: + datatype = MPI_INT; + break; + case 0: + datatype = MPI_FLOAT; + break; + case 1: + datatype = MPI_DOUBLE; + break; + default: + *ierror = -1; + return; + } + MPI_Op op = MPI_Op_f2c(*op_f); + MPI_Comm comm = get_c_comm_from_fortran(*comm_f); + *ierror = MPI_Reduce(sendbuf, recvbuf, *count, datatype, op, *root, comm); +} diff --git a/tests/compute_pi.f90 b/tests/compute_pi.f90 new file mode 100644 index 0000000..7c6a33e --- /dev/null +++ b/tests/compute_pi.f90 @@ -0,0 +1,58 @@ +!> program obtained using: https://grok.com/share/bGVnYWN5_9c35b809-fc05-4856-82d2-be4ded09d890 +!> with query: Write a simple MPI code in Fortran to compute "pi" using Monte Carlo in parallel +program compute_pi + use mpi + implicit none + + integer, parameter :: N_per_process = 100000 ! Number of points per process + integer :: rank, num_procs, ierr ! MPI variables: rank, number of processes, error code + integer :: local_M = 0 ! Local count of points inside quarter circle + integer :: global_M ! Global count of points inside quarter circle + integer :: total_N ! Total number of points across all processes + real(8) :: x, y ! Coordinates of random points + real(8) :: pi_approx ! Approximated value of pi + integer :: i ! Loop variable + integer :: seed_size ! Size of the random seed array + integer, allocatable :: seed(:) ! Seed array for random number generator + + ! Initialize MPI + call MPI_Init(ierr) + call MPI_Comm_size(MPI_COMM_WORLD, num_procs, ierr) + call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr) + + ! Set up unique random seed for each process + call random_seed(size=seed_size) + allocate(seed(seed_size)) + do i = 1, seed_size + seed(i) = 12345 + rank + i ! Unique seed based on rank + end do + call random_seed(put=seed) + + ! Generate random points and count those inside the quarter circle + do i = 1, N_per_process + call random_number(x) + call random_number(y) + if (x**2 + y**2 <= 1.0d0) then + local_M = local_M + 1 + end if + end do + + ! Sum local counts into global count on root process (rank 0) + call MPI_Reduce(local_M, global_M, 1, MPI_INTEGER, MPI_SUM, 0, MPI_COMM_WORLD, ierr) + + ! Compute and display the result on the root process + if (rank == 0) then + total_N = N_per_process * num_procs + pi_approx = 4.0d0 * dble(global_M) / dble(total_N) + print *, 'Number of processes:', num_procs + print *, 'Total number of points:', total_N + print *, 'Approximation of pi:', pi_approx + end if + + ! Finalize MPI + call MPI_Finalize(ierr) + + ! Clean up (optional, as Fortran deallocates automatically at program end) + if (allocated(seed)) deallocate(seed) + +end program compute_pi From 6cfbd0c463c0518a31ffc471bbeb5b8c925ba619 Mon Sep 17 00:00:00 2001 From: Gaurav Dhingra Date: Wed, 26 Mar 2025 17:17:43 +0530 Subject: [PATCH 2/3] use function to get MPI_Op --- src/mpi.f90 | 2 +- src/mpi_wrapper.c | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/mpi.f90 b/src/mpi.f90 index 53ade5c..3325471 100644 --- a/src/mpi.f90 +++ b/src/mpi.f90 @@ -11,7 +11,7 @@ module mpi integer, parameter :: MPI_COMM_WORLD = -1000 real(8), parameter :: MPI_IN_PLACE = -1002 - integer, parameter :: MPI_SUM = 1 + integer, parameter :: MPI_SUM = -2300 integer, parameter :: MPI_INFO_NULL = -2000 integer :: MPI_STATUS_IGNORE = 0 ! NOTE: I've no idea for how to implement this, refer diff --git a/src/mpi_wrapper.c b/src/mpi_wrapper.c index ff16f9b..d5cc7fc 100644 --- a/src/mpi_wrapper.c +++ b/src/mpi_wrapper.c @@ -6,6 +6,7 @@ #define FORTRAN_MPI_COMM_WORLD -1000 #define FORTRAN_MPI_INFO_NULL -2000 #define FORTRAN_MPI_IN_PLACE -1002 +#define FORTRAN_MPI_SUM -2300 MPI_Info get_c_info_from_fortran(int info) { if (info == FORTRAN_MPI_INFO_NULL) { @@ -15,6 +16,14 @@ MPI_Info get_c_info_from_fortran(int info) { } } +MPI_Op get_c_op_from_fortran(int op) { + if (op == FORTRAN_MPI_SUM) { + return MPI_SUM; + } else { + return MPI_Op_f2c(op); + } +} + // MPI_Datatype get_c_datatype_from_fortran(int datatype) { // if (datatype == 2) { // return MPI_Int; @@ -369,7 +378,7 @@ void mpi_reduce_wrapper(const int* sendbuf, int* recvbuf, int* count, int* datat *ierror = -1; return; } - MPI_Op op = MPI_Op_f2c(*op_f); + MPI_Op op = get_c_op_from_fortran(*op_f); MPI_Comm comm = get_c_comm_from_fortran(*comm_f); *ierror = MPI_Reduce(sendbuf, recvbuf, *count, datatype, op, *root, comm); } From 1a7344a49b64119b813a8f2a5e6c365b383a8b96 Mon Sep 17 00:00:00 2001 From: Gaurav Dhingra Date: Wed, 26 Mar 2025 17:25:31 +0530 Subject: [PATCH 3/3] add check to make sure that the computed PI value is within range --- src/mpi_wrapper.c | 17 ++--------------- tests/compute_pi.f90 | 4 ++++ 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/src/mpi_wrapper.c b/src/mpi_wrapper.c index d5cc7fc..2eef39e 100644 --- a/src/mpi_wrapper.c +++ b/src/mpi_wrapper.c @@ -24,19 +24,6 @@ MPI_Op get_c_op_from_fortran(int op) { } } -// MPI_Datatype get_c_datatype_from_fortran(int datatype) { -// if (datatype == 2) { -// return MPI_Int; -// } else if (datatype == 0) { -// return MPI_FLOAT; -// } else if (datatype == 1) { -// return MPI_DOUBLE; -// } else { -// printf("Unsupported datatype provided\n"); -// exit 1; -// } -// } - MPI_Comm get_c_comm_from_fortran(int comm_f) { if (comm_f == FORTRAN_MPI_COMM_WORLD) { return MPI_COMM_WORLD; @@ -361,8 +348,8 @@ void mpi_cart_sub_wrapper(int * comm_f, int * rmains_dims, int * newcomm_f, int } void mpi_reduce_wrapper(const int* sendbuf, int* recvbuf, int* count, int* datatype_f, - int* op_f, int* root, int* comm_f, int* ierror -) { + int* op_f, int* root, int* comm_f, int* ierror) +{ MPI_Datatype datatype; switch (*datatype_f) { case 2: diff --git a/tests/compute_pi.f90 b/tests/compute_pi.f90 index 7c6a33e..6edf1bb 100644 --- a/tests/compute_pi.f90 +++ b/tests/compute_pi.f90 @@ -47,6 +47,10 @@ program compute_pi print *, 'Number of processes:', num_procs print *, 'Total number of points:', total_N print *, 'Approximation of pi:', pi_approx + ! ensure the computed pi is within a reasonable range + if (pi_approx < 3.1d0 .or. pi_approx > 3.2d0) then + error stop 'Computed pi value is out of expected range (3.1 to 3.2)!' + end if end if ! Finalize MPI