Skip to content

Commit b832179

Browse files
committed
doc: cleaned fortran examples
Changed from .EQ. to == and other bools. Also fixed a couple of wrong codes and beautified in general. Signed-off-by: Nick Papior <nickpapior@gmail.com>
1 parent 7c1a621 commit b832179

10 files changed

+74
-46
lines changed

docs/man-openmpi/man3/MPI_Alloc_mem.3.rst

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,33 @@ pointer object for this parameter. The provided argument should be a
7474
pointer to a pointer of arbitrary type (e.g., ``void **``).
7575

7676

77+
Fortran NOTES
78+
-------------
79+
80+
The :ref:`MPI_Alloc_mem` calls require the use of the ``iso_c_binding`` module
81+
for due to the use of ``TYPE(C_PTR)``.
82+
83+
.. code-block:: fortran
84+
85+
use iso_c_binding
86+
87+
type(c_ptr) :: alloc_ptr
88+
integer :: size, ierr
89+
double precision, pointer :: array(:,:)
90+
91+
! A 2D array of 100 elements
92+
size = 10 * 10
93+
call MPI_Alloc_Mem(size * 8, MPI_INFO_NULL, alloc_ptr, ierr)
94+
95+
! Point to the array
96+
call c_f_pointer(alloc_ptr, array, [10, 10])
97+
98+
! ... use the array ...
99+
100+
! Free the memory, no need for the alloc_ptr
101+
call MPI_Free_mem(array, ierr)
102+
103+
77104
ERRORS
78105
------
79106

docs/man-openmpi/man3/MPI_Probe.3.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -115,17 +115,17 @@ problem.
115115
CALL MPI_COMM_RANK(comm, rank, ierr)
116116
IF (rank == 0) THEN
117117
CALL MPI_SEND(i, 1, MPI_INTEGER, 2, 0, comm, ierr)
118-
ELSE IF(rank.EQ.1) THEN
118+
ELSE IF(rank == 1) THEN
119119
CALL MPI_SEND(x, 1, MPI_REAL, 2, 0, comm, ierr)
120120
ELSE
121121
DO i=1, 2
122122
CALL MPI_PROBE(MPI_ANY_SOURCE, 0,
123123
comm, status, ierr)
124-
IF (status(MPI_SOURCE) = 0) THEN
125-
CALL MPI_RECV(i, 1, MPI_INTEGER, MPI_ANY_SOURCE,
124+
IF (status(MPI_SOURCE) == 0) THEN
125+
CALL MPI_RECV(i, 1, MPI_INTEGER, MPI_ANY_SOURCE, &
126126
0, status, ierr)
127127
ELSE
128-
CALL MPI_RECV(x, 1, MPI_REAL, MPI_ANY_SOURCE,
128+
CALL MPI_RECV(x, 1, MPI_REAL, MPI_ANY_SOURCE, &
129129
0, status, ierr)
130130
END IF
131131
END DO

docs/man-openmpi/man3/MPI_Reduce.3.rst

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -469,28 +469,28 @@ containing the largest value.
469469
...
470470
! each process has an array of 30 double: ain(30)
471471
472-
DOUBLE PRECISION ain(30), aout(30)
473-
INTEGER ind(30);
474-
DOUBLE PRECISION in(2,30), out(2,30)
475-
INTEGER i, myrank, root, ierr;
476-
477-
MPI_COMM_RANK(MPI_COMM_WORLD, myrank);
478-
DO I=1, 30
479-
in(1,i) = ain(i)
480-
in(2,i) = myrank ! myrank is coerced to a double
481-
END DO
472+
DOUBLE PRECISION :: ain(30), aout(30)
473+
INTEGER :: ind(30)
474+
DOUBLE PRECISION :: in(2,30), out(2,30)
475+
INTEGER :: i, myrank, root, ierr
476+
477+
call MPI_COMM_RANK(MPI_COMM_WORLD, myrank)
478+
DO I=1, 30
479+
in(1,i) = ain(i)
480+
in(2,i) = myrank ! myrank is coerced to a double
481+
END DO
482482
483-
MPI_REDUCE( in, out, 30, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, root,
484-
comm, ierr );
483+
call MPI_REDUCE( in, out, 30, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, root, &
484+
comm, ierr )
485485
! At this point, the answer resides on process root
486486
487-
IF (myrank .EQ. root) THEN
488-
! read ranks out
489-
DO I= 1, 30
490-
aout(i) = out(1,i)
491-
ind(i) = out(2,i) ! rank is coerced back to an integer
492-
END DO
493-
END IF
487+
IF (myrank == root) THEN
488+
! read ranks out
489+
DO I= 1, 30
490+
aout(i) = out(1,i)
491+
ind(i) = out(2,i) ! rank is coerced back to an integer
492+
END DO
493+
END IF
494494
495495
**Example 5:** Each process has a nonempty array of values. Find the
496496
minimum global value, the rank of the process that holds it, and its

docs/man-openmpi/man3/MPI_Reduce_local.3.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -276,11 +276,11 @@ datatypes:
276276
MPI_SHORT_INT short and int
277277
MPI_LONG_DOUBLE_INT long double and int
278278

279-
The data type MPI_2REAL is equivalent to:
279+
The data type ``MPI_2REAL`` is equivalent to:
280280

281281
.. code-block:: fortran
282282
283-
MPI_TYPE_CONTIGUOUS(2, MPI_REAL, MPI_2REAL)
283+
call MPI_TYPE_CONTIGUOUS(2, MPI_REAL, MPI_2REAL)
284284
285285
Similar statements apply for MPI_2INTEGER, MPI_2DOUBLE_PRECISION, and
286286
MPI_2INT.

docs/man-openmpi/man3/MPI_Request_free.3.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,14 +85,14 @@ receive has completed and the receive buffer can be reused.
8585
.. code-block:: fortran
8686
8787
CALL MPI_COMM_RANK(MPI_COMM_WORLD, rank)
88-
IF(rank.EQ.0) THEN
88+
IF(rank == 0) THEN
8989
DO i=1, n
9090
CALL MPI_ISEND(outval, 1, MPI_REAL, 1, 0, req, ierr)
9191
CALL MPI_REQUEST_FREE(req, ierr)
9292
CALL MPI_IRECV(inval, 1, MPI_REAL, 1, 0, req, ierr)
9393
CALL MPI_WAIT(req, status, ierr)
9494
END DO
95-
ELSE ! rank.EQ.1
95+
ELSE IF (rank == 1) THEN
9696
CALL MPI_IRECV(inval, 1, MPI_REAL, 0, 0, req, ierr)
9797
CALL MPI_WAIT(req, status)
9898
DO I=1, n-1

docs/man-openmpi/man3/MPI_Type_commit.3.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ different buffers, with different starting addresses.
6767

6868
.. code-block:: fortran
6969
70-
INTEGER type1, type2
70+
INTEGER :: type1, type2
7171
CALL MPI_TYPE_CONTIGUOUS(5, MPI_REAL, type1, ierr)
7272
! new type object created
7373
CALL MPI_TYPE_COMMIT(type1, ierr)

docs/man-openmpi/man3/MPI_Type_create_struct.3.rst

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ returns a datatype with type map
101101

102102
That is, two copies of ``MPI_FLOAT`` starting at 0, followed by one copy of
103103
``type1`` starting at 16, followed by three copies of ``MPI_CHAR``, starting at
104-
26. (We assume that a float occupies 4 bytes.)
104+
26.
105105

106106

107107
**Example 2:**
@@ -147,17 +147,18 @@ An example of a struct with only some components part of the type
147147
if ( rank == 0 ) {
148148
// ... initialize values
149149
MPI_Send(values, 3, mpi_dt_mystruct, 1, 0, MPI_COMM_WORLD);
150-
} else if ( rank == 1 )
150+
} else if ( rank == 1 ) {
151151
MPI_Recv(values, 3, mpi_dt_mystruct, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
152+
}
152153
153154
154-
For more information, see section 3.12.1 of the MPI-1.1 Standard.
155+
For more information, see section 5.1.2 of the MPI-4.0 Standard.
155156

156157

157158
NOTES
158159
-----
159160

160-
If an upperbound is set explicitly by using the MPI datatype MPI_UB, the
161+
If an upper bound is set explicitly by using the MPI datatype ``MPI_UB``, the
161162
corresponding index must be positive.
162163

163164
The MPI-1 Standard originally made vague statements about padding and
@@ -180,7 +181,7 @@ have allowed an implementation to make the extent an MPI datatype for
180181
this structure equal to ``2*sizeof(int)``. However, since different systems
181182
might define different paddings, a clarification to the standard made
182183
epsilon zero. Thus, if you define a structure datatype and wish to send
183-
or receive multiple items, you should explicitly include an MPI_UB entry
184+
or receive multiple items, you should explicitly include an ``MPI_UB`` entry
184185
as the last member of the structure. See the above example.
185186

186187

docs/man-openmpi/man3/MPI_Wait.3.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,11 +97,11 @@ Example: Simple usage of nonblocking operations and :ref:`MPI_Wait`.
9797
.. code-block:: fortran
9898
9999
CALL MPI_COMM_RANK(comm, rank, ierr)
100-
IF(rank.EQ.0) THEN
100+
IF(rank == 0) THEN
101101
CALL MPI_ISEND(a(1), 10, MPI_REAL, 1, tag, comm, request, ierr)
102102
**** do some computation ****
103103
CALL MPI_WAIT(request, status, ierr)
104-
ELSE
104+
ELSE IF (rank == 1) THEN
105105
CALL MPI_IRECV(a(1), 15, MPI_REAL, 0, tag, comm, request, ierr)
106106
**** do some computation ****
107107
CALL MPI_WAIT(request, status, ierr)

docs/man-openmpi/man3/MPI_Waitany.3.rst

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -88,20 +88,20 @@ special value for the *status* argument.
8888
8989
CALL MPI_COMM_SIZE(comm, size, ierr)
9090
CALL MPI_COMM_RANK(comm, rank, ierr)
91-
IF(rank .GT 0) THEN ! client code
92-
DO WHILE(.TRUE.)
91+
IF(rank > 0) THEN ! client code
92+
DO
9393
CALL MPI_ISEND(a, n, MPI_REAL, 0, tag, comm, request, ierr)
9494
CALL MPI_WAIT(request, status, ierr)
9595
END DO
9696
ELSE ! rank=0 -- server code
9797
DO i=1, size-1
98-
CALL MPI_IRECV(a(1,i), n, MPI_REAL, i tag,
98+
CALL MPI_IRECV(a(1,i), n, MPI_REAL, i tag, &
9999
comm, request_list(i), ierr)
100100
END DO
101-
DO WHILE(.TRUE.)
101+
DO
102102
CALL MPI_WAITANY(size-1, request_list, index, status, ierr)
103103
CALL DO_SERVICE(a(1,index)) ! handle one message
104-
CALL MPI_IRECV(a(1, index), n, MPI_REAL, index, tag,
104+
CALL MPI_IRECV(a(1, index), n, MPI_REAL, index, tag, &
105105
comm, request_list(index), ierr)
106106
END DO
107107
END IF

docs/man-openmpi/man3/MPI_Waitsome.3.rst

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -108,22 +108,22 @@ using :ref:`MPI_Waitsome`.
108108
109109
CALL MPI_COMM_SIZE(comm, size, ierr)
110110
CALL MPI_COMM_RANK(comm, rank, ierr)
111-
IF(rank .GT. 0) THEN ! client code
112-
DO WHILE(.TRUE.)
111+
IF(rank > 0) THEN ! client code
112+
DO
113113
CALL MPI_ISEND(a, n, MPI_REAL, 0, tag, comm, request, ierr)
114114
CALL MPI_WAIT(request, status, ierr)
115115
END DO
116116
ELSE ! rank=0 -- server code
117117
DO i=1, size-1
118-
CALL MPI_IRECV(a(1,i), n, MPI_REAL, i, tag,
118+
CALL MPI_IRECV(a(1,i), n, MPI_REAL, i, tag, &
119119
comm, requests(i), ierr)
120120
END DO
121-
DO WHILE(.TRUE.)
122-
CALL MPI_WAITSOME(size, request_list, numdone,
121+
DO
122+
CALL MPI_WAITSOME(size, request_list, numdone, &
123123
indices, statuses, ierr)
124124
DO i=1, numdone
125125
CALL DO_SERVICE(a(1, indices(i)))
126-
CALL MPI_IRECV(a(1, indices(i)), n, MPI_REAL, 0, tag,
126+
CALL MPI_IRECV(a(1, indices(i)), n, MPI_REAL, i, tag, &
127127
comm, requests(indices(i)), ierr)
128128
END DO
129129
END DO

0 commit comments

Comments
 (0)