Skip to content

Commit 76ec31d

Browse files
authored
Clean up I/O tests to only use MPI I/O (#393)
* Clean up I/O tests to only use MPI I/O Hopefully fixes #383. * add MPI.File.sync
1 parent 852ed82 commit 76ec31d

File tree

3 files changed

+39
-37
lines changed

3 files changed

+39
-37
lines changed

docs/src/io.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,12 @@ MPI.File.open
1212
MPI.File.set_view!
1313
```
1414

15+
## Consistency
16+
17+
```@docs
18+
MPI.File.sync
19+
```
20+
1521
## Data access
1622

1723
### Explicit offsets

src/io.jl

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,22 @@ function set_view!(file::FileHandle, disp::Integer, etype::Datatype, filetype::D
9797
set_view!(file, disp, etype, filetype, datarep, Info(infokwargs...))
9898
end
9999

100+
"""
101+
MPI.File.sync(fh::FileHandle)
102+
103+
A collective operation causing all previous writes to `fh` by the calling process to be
104+
transferred to the storage device. If other processes have made updates to the storage
105+
device, then all such updates become visible to subsequent reads of `fh` by the calling
106+
process.
100107
108+
# External links
109+
$(_doc_external("MPI_File_sync"))
110+
"""
111+
function sync(file::FileHandle)
112+
# int MPI_File_sync(MPI_File fh)
113+
@mpichk ccall((:MPI_File_sync, libmpi), Cint, (MPI_File,), file)
114+
return nothing
115+
end
101116

102117

103118
# Explicit offsets

test/test_io.jl

Lines changed: 18 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -15,52 +15,33 @@ rank = MPI.Comm_rank(comm)
1515
sz = MPI.Comm_size(comm)
1616
filename = MPI.bcast(tempname(), 0, comm)
1717

18-
# Write
1918
MPI.Barrier(comm)
2019

21-
f = MPI.File.open(comm, filename, write=true)
22-
MPI.File.set_view!(f, 0, MPI.Datatype(Int64), MPI.Datatype(Int64))
23-
MPI.File.write_at(f, rank*2, ArrayType([Int64(rank+1) for i = 1:2]))
24-
close(f)
20+
# Collective write
21+
fh = MPI.File.open(comm, filename, read=true, write=true, create=true)
22+
MPI.File.set_view!(fh, 0, MPI.Datatype(Int64), MPI.Datatype(Int64))
23+
MPI.File.write_at_all(fh, rank*2, ArrayType([Int64(rank+1) for i = 1:2]))
2524

26-
MPI.Barrier(comm)
27-
28-
if rank == 0
29-
@test read!(filename, zeros(Int64, (2,sz))) == [j for i = 1:2, j=1:sz]
30-
end
31-
32-
MPI.Barrier(comm)
33-
34-
f = MPI.File.open(comm, filename, write=true)
35-
MPI.File.set_view!(f, 0, MPI.Datatype(Int64), MPI.Datatype(Int64))
36-
MPI.File.write_at_all(f, rank*2, ArrayType([Int64(rank+1) for i = 1:2]))
37-
close(f)
38-
39-
MPI.Barrier(comm)
25+
MPI.File.sync(fh)
4026

27+
# Noncollective read
4128
if rank == 0
42-
@test read!(filename, zeros(Int64, (2,sz))) == [j for i = 1:2, j=1:sz]
29+
data = zeros(Int64, (2,sz))
30+
MPI.File.read_at!(fh, 0, data)
31+
@test data == [j for i = 1:2, j=1:sz]
4332
end
4433

34+
MPI.File.sync(fh)
4535
MPI.Barrier(comm)
4636

47-
# Read
48-
if rank == 0
49-
write(filename, [Float64(j) for i = 1:3, j = 1:sz])
37+
if rank == sz-1
38+
MPI.File.write_at(fh, 0, ArrayType([Int64(-1) for i = 1:2]))
5039
end
5140

52-
MPI.Barrier(comm)
53-
54-
55-
f = MPI.File.open(comm, filename, read=true)
56-
MPI.File.set_view!(f, 0, MPI.Datatype(Float64), MPI.Datatype(Float64))
57-
58-
data = ArrayType(zeros(Float64, 3))
59-
MPI.File.read_at!(f, rank*3, data)
60-
@test data == Float64[rank+1 for i = 1:3]
61-
62-
MPI.Barrier(comm)
41+
MPI.File.sync(fh)
6342

64-
data = ArrayType(zeros(Float64, 3))
65-
MPI.File.read_at_all!(f, rank*3, data)
66-
@test data == Float64[rank+1 for i = 1:3]
43+
# Collective read
44+
data = zeros(Int64, 1)
45+
MPI.File.read_at_all!(fh, rank*2, data)
46+
@test data == [rank == 0 ? -1 : rank+1]
47+
close(fh)

0 commit comments

Comments
 (0)