Skip to content

Commit cd40db9

Browse files
authored
Merge pull request #210 from jkozdon/v07
Update to Julia V0.7. MPIManager needs some work to get it to work in V0.7.
2 parents 9b1f5e3 + a433ef1 commit cd40db9

37 files changed

+406
-334
lines changed

.travis.yml

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ os:
66
- osx
77
julia:
88
- 0.6
9+
- 0.7
910
- nightly
1011
notifications:
1112
email: false
@@ -20,5 +21,8 @@ before_install:
2021
# Work around OpenMPI attempting to create overly long temporary
2122
# file names - and erroring as a result
2223
- export TMPDIR=/tmp
24+
script:
25+
- julia -e 'if VERSION < v"0.7.0-DEV.5183"; Pkg.clone(pwd()) ; else; using Pkg; Pkg.up(); end; Pkg.build("MPI"); Pkg.test("MPI"; coverage=true)'
2326
after_success:
24-
- if [ "$TRAVIS_JULIA_VERSION" = nightly ]; then julia -e 'cd(Pkg.dir("MPI")); Pkg.add("Coverage"); using Coverage; Coveralls.submit(Coveralls.process_folder()); Codecov.submit(Coveralls.process_folder())'; fi
27+
- julia -e 'VERSION < v"0.7.0-DEV.5183" || using Pkg; cd(Pkg.dir("MPI")); Pkg.add("Coverage"); using Coverage; Coveralls.submit(Coveralls.process_folder())'
28+
- julia -e 'VERSION < v"0.7.0-DEV.5183" || using Pkg; cd(Pkg.dir("MPI")); Pkg.add("Coverage"); using Coverage; Codecov.submit(Codecov.process_folder())'

Project.toml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
authors = []
2+
name = "MPI"
3+
uuid = "da04e1cc-30fd-572f-bb4f-1f8673147195"
4+
version = "0.1.0"
5+
6+
[deps]
7+
BinDeps = "9e28174c-4ba2-5203-b857-d8d62c4213ee"
8+
Compat = "34da2185-b29b-5c13-b0c7-acf172513d20"
9+
Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b"
10+
Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb"

REQUIRE

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
julia 0.6
22
BinDeps
3-
Compat 0.42
3+
Compat 0.66

appveyor.yml

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@ environment:
44
- JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.6/julia-0.6-latest-win64.exe"
55
- JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x86/julia-latest-win32.exe"
66
- JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x64/julia-latest-win64.exe"
7+
- JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.7/julia-0.7.0-alpha-win32.exe"
8+
- JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.7/julia-0.7.0-alpha-win64.exe"
79

810
branches:
911
only:
@@ -33,8 +35,9 @@ install:
3335
build_script:
3436
# Need to convert from shallow to complete for Pkg.clone to work
3537
- IF EXIST .git\shallow (git fetch --unshallow)
36-
- C:\projects\julia\bin\julia -e "versioninfo();
37-
Pkg.clone(pwd(), \"MPI\"); Pkg.build(\"MPI\")"
38+
- C:\projects\julia\bin\julia -e "VERSION < v\"0.7.0-DEV\" || (using InteractiveUtils);
39+
versioninfo(); pkg=\"MPI\";
40+
if VERSION < v\"0.7.0-DEV.5183\"; Pkg.clone(pwd(), pkg); else; using Pkg; Pkg.up(); end; Pkg.build(\"MPI\")"
3841

3942
test_script:
40-
- C:\projects\julia\bin\julia --check-bounds=yes -e "Pkg.test(\"MPI\")"
43+
- C:\projects\julia\bin\julia --check-bounds=yes -e "VERSION < v\"0.7.0-DEV.5183\" || using Pkg; Pkg.test(\"MPI\")"

deps/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ endif(${USE_GIT} EQUAL 1)
142142
if(GIT_DESCRIBE_RESULT EQUAL 0)
143143
file(WRITE ${CMAKE_SOURCE_DIR}/VERSION \"\${GIT_DESCRIBE_VERSION}\")
144144
else(GIT_DESCRIBE_RESULT EQUAL 0)
145-
FILE(READ ${CMAKE_SOURCE_DIR}/VERSION GIT_DESCRIBE_VERSION)
145+
FILE(READ ${CMAKE_SOURCE_DIR}/VERSION_NO_GIT GIT_DESCRIBE_VERSION)
146146
endif(GIT_DESCRIBE_RESULT EQUAL 0)
147147
148148
string(REGEX REPLACE \"v([0-9]*)\\\\.([0-9]*)\\\\.(.*)\"

deps/VERSION_NO_GIT

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
v0.7.0

src/MPI.jl

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,13 @@ module MPI
44

55
using Compat
66

7-
@static if is_windows()
7+
using Compat.Libdl
8+
9+
@static if Compat.Sys.iswindows()
810
const depfile = "win_mpiconstants.jl"
911
end
1012

11-
@static if is_unix()
13+
@static if Compat.Sys.isunix()
1214
const depfile = joinpath(dirname(@__FILE__), "..", "deps", "src", "compile-time.jl")
1315
isfile(depfile) || error("MPI not properly installed. Please run Pkg.build(\"MPI\")")
1416
end
@@ -22,7 +24,7 @@ const mpitype_dict = Dict{DataType, Cint}()
2224
const mpitype_dict_inverse = Dict{Cint, DataType}()
2325

2426
function __init__()
25-
@static if is_unix()
27+
@static if Compat.Sys.isunix()
2628
# need to open libmpi with RTLD_GLOBAL flag for Linux, before
2729
# any ccall cannot use RTLD_DEEPBIND; this leads to segfaults
2830
# at least on Ubuntu 15.10
@@ -48,8 +50,8 @@ function __init__()
4850
UInt64 => MPI_INTEGER8, # => MPI_UINT64_T,
4951
Float32 => MPI_REAL4,
5052
Float64 => MPI_REAL8,
51-
Complex64 => MPI_COMPLEX8,
52-
Complex128 => MPI_COMPLEX16)
53+
Compat.ComplexF32 => MPI_COMPLEX8,
54+
Compat.ComplexF64 => MPI_COMPLEX16)
5355
mpitype_dict[T] = mpiT
5456
mpitype_dict_inverse[mpiT] = T
5557
end

src/cman.jl

Lines changed: 22 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
1-
import Base: launch, manage, kill, procs, connect
1+
import Base: kill
22
export MPIManager, launch, manage, kill, procs, connect, mpiprocs, @mpi_do
33
export TransportMode, MPI_ON_WORKERS, TCP_TRANSPORT_ALL, MPI_TRANSPORT_ALL
4+
using Compat
5+
using Compat.Distributed
6+
import Compat.Sockets: connect, listenany, accept, getipaddr, IPv4
47

58

69

@@ -47,9 +50,9 @@ mutable struct MPIManager <: ClusterManager
4750

4851
# MPI_TRANSPORT_ALL
4952
comm::MPI.Comm
50-
initiate_shutdown::Channel{Void}
51-
sending_done::Channel{Void}
52-
receiving_done::Channel{Void}
53+
initiate_shutdown::Channel{Nothing}
54+
sending_done::Channel{Nothing}
55+
receiving_done::Channel{Nothing}
5356

5457
function MPIManager(; np::Integer = Sys.CPU_CORES,
5558
mpirun_cmd::Cmd = `mpiexec -n $np`,
@@ -83,7 +86,7 @@ mutable struct MPIManager <: ClusterManager
8386
if mode != MPI_TRANSPORT_ALL
8487
# Start a listener for capturing stdout from the workers
8588
port, server = listenany(11000)
86-
@schedule begin
89+
@async begin
8790
while true
8891
sock = accept(server)
8992
push!(mgr.stdout_ios, sock)
@@ -98,12 +101,12 @@ mutable struct MPIManager <: ClusterManager
98101
end
99102

100103
if mode == MPI_TRANSPORT_ALL
101-
mgr.initiate_shutdown = Channel{Void}(1)
102-
mgr.sending_done = Channel{Void}(np)
103-
mgr.receiving_done = Channel{Void}(1)
104+
mgr.initiate_shutdown = Channel{Nothing}(1)
105+
mgr.sending_done = Channel{Nothing}(np)
106+
mgr.receiving_done = Channel{Nothing}(1)
104107
global initiate_shutdown = mgr.initiate_shutdown
105108
end
106-
mgr.initiate_shutdown = Channel{Void}(1)
109+
mgr.initiate_shutdown = Channel{Nothing}(1)
107110
global initiate_shutdown = mgr.initiate_shutdown
108111

109112
return mgr
@@ -119,7 +122,7 @@ end
119122
# MPI_ON_WORKERS case
120123

121124
# Launch a new worker, called from Base.addprocs
122-
function launch(mgr::MPIManager, params::Dict,
125+
function Distributed.launch(mgr::MPIManager, params::Dict,
123126
instances::Array, cond::Condition)
124127
try
125128
if mgr.mode == MPI_ON_WORKERS
@@ -129,7 +132,7 @@ function launch(mgr::MPIManager, params::Dict,
129132
println("Try again with a different instance of MPIManager.")
130133
throw(ErrorException("Reuse of MPIManager is not allowed."))
131134
end
132-
cookie = string(":cookie_",Base.cluster_cookie())
135+
cookie = string(":cookie_",Distributed.cluster_cookie())
133136
setup_cmds = `using MPI\;MPI.setup_worker'('$(getipaddr().host),$(mgr.port),$cookie')'`
134137
mpi_cmd = `$(mgr.mpirun_cmd) $(params[:exename]) -e $(Base.shell_escape(setup_cmds))`
135138
open(detach(mpi_cmd))
@@ -156,7 +159,7 @@ function launch(mgr::MPIManager, params::Dict,
156159
config.io = io
157160
# Add config to the correct slot so that MPI ranks and
158161
# Julia pids are in the same order
159-
rank = Base.deserialize(io)
162+
rank = Compat.Serialization.deserialize(io)
160163
idx = mgr.mode == MPI_ON_WORKERS ? rank+1 : rank
161164
configs[idx] = config
162165
end
@@ -192,7 +195,7 @@ function setup_worker(host, port, cookie)
192195

193196
# Send our MPI rank to the manager
194197
rank = MPI.Comm_rank(MPI.COMM_WORLD)
195-
Base.serialize(io, rank)
198+
Compat.Serialization.serialize(io, rank)
196199

197200
# Hand over control to Base
198201
if cookie == nothing
@@ -206,7 +209,7 @@ function setup_worker(host, port, cookie)
206209
end
207210

208211
# Manage a worker (e.g. register / deregister it)
209-
function manage(mgr::MPIManager, id::Integer, config::WorkerConfig, op::Symbol)
212+
function Distributed.manage(mgr::MPIManager, id::Integer, config::WorkerConfig, op::Symbol)
210213
if op == :register
211214
# Retrieve MPI rank from worker
212215
# TODO: Why is this necessary? The workers already sent their rank.
@@ -284,7 +287,7 @@ function start_send_event_loop(mgr::MPIManager, rank::Int)
284287
# quite expensive when there are many workers. Design something better.
285288
# For example, instead of maintaining two streams per worker, provide
286289
# only abstract functions to write to / read from these streams.
287-
@schedule begin
290+
@async begin
288291
rr = MPI.Comm_rank(mgr.comm)
289292
reqs = MPI.Request[]
290293
while !isready(mgr.initiate_shutdown)
@@ -334,7 +337,7 @@ function start_main_loop(mode::TransportMode=TCP_TRANSPORT_ALL;
334337
# Send connection information to all workers
335338
# TODO: Use Bcast
336339
for j in 1:size-1
337-
cookie = VERSION >= v"0.5.0-dev+4047" ? Base.cluster_cookie() : nothing
340+
cookie = VERSION >= v"0.5.0-dev+4047" ? Distributed.cluster_cookie() : nothing
338341
MPI.send((getipaddr().host, mgr.port, cookie), j, 0, comm)
339342
end
340343
# Tell Base about the workers
@@ -360,9 +363,9 @@ function start_main_loop(mode::TransportMode=TCP_TRANSPORT_ALL;
360363

361364
# Send the cookie over. Introduced in v"0.5.0-dev+4047". Irrelevant under MPI
362365
# transport, but need it to satisfy the changed protocol.
363-
MPI.bcast(Base.cluster_cookie(), 0, comm)
366+
MPI.bcast(Distributed.cluster_cookie(), 0, comm)
364367
# Start event loop for the workers
365-
@schedule receive_event_loop(mgr)
368+
@async receive_event_loop(mgr)
366369
# Tell Base about the workers
367370
addprocs(mgr)
368371
return mgr
@@ -493,7 +496,7 @@ macro mpi_do(mgr, expr)
493496
end
494497

495498
# All managed Julia processes
496-
procs(mgr::MPIManager) = sort(keys(mgr.j2mpi))
499+
Distributed.procs(mgr::MPIManager) = sort(keys(mgr.j2mpi))
497500

498501
# All managed MPI ranks
499502
mpiprocs(mgr::MPIManager) = sort(keys(mgr.mpi2j))

0 commit comments

Comments
 (0)