Skip to content

Commit 8548c6f

Browse files
authored
Merge pull request #413 from JuliaParallel/jps/ci-commands
TEST: Try implementing custom actions
2 parents 8afc692 + 9f3e01a commit 8548c6f

File tree

2 files changed

+62
-13
lines changed

2 files changed

+62
-13
lines changed

.github/workflows/CustomCommands.yml

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
name: Custom Commands
2+
3+
on:
4+
pull_request:
5+
6+
jobs:
7+
custom_actions:
8+
runs-on: ubuntu-latest
9+
steps:
10+
- name: Wait for commands
11+
id: read_commands
12+
run: |
13+
while true; do
14+
ci_commands=$(curl -sH "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -X GET "${{ github.event.pull_request.comments_url }}" | jq -r ".[] | select(.body | startswith(\"/run-actions\")) | .body")
15+
if [ ! -z "$ci_commands" ]; then
16+
echo "Read commands $ci_commands"
17+
echo "ci_commands=$ci_commands" >> $GITHUB_OUTPUT
18+
break
19+
fi
20+
sleep 30
21+
done
22+
23+
- uses: actions/checkout@v2
24+
- uses: julia-actions/setup-julia@v1
25+
with:
26+
version: '1.9'
27+
arch: 'x64'
28+
29+
- name: Run provided commands
30+
run: |
31+
echo "Got custom commands:"
32+
ci_commands="${{ steps.read_commands.outputs.ci_commands }}"
33+
echo $ci_commands
34+
if echo "$ci_commands" | grep -q benchmark; then
35+
echo "Running benchmarks..."
36+
julia -e 'using Pkg; Pkg.add("BenchmarkTools"); Pkg.develop(;path=pwd())'
37+
JULIA_PROJECT=$PWD julia --project benchmarks/benchmark.jl
38+
fi
39+
if echo "$ci_commands" | grep -q premerge; then
40+
echo "Running pre-merge checks..."
41+
julia -e 'using Pkg; Pkg.add("Aqua"); Pkg.develop(;path=pwd())'
42+
JULIA_PROJECT=$PWD julia --project -e 'using Aqua, Dagger; Aqua.test_all(Dagger)'
43+
fi
44+
env:
45+
BENCHMARK: "nmf:raw,dagger"
46+
BENCHMARK_SCALE: "5:5:10"

benchmarks/suites/nmf.jl

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -55,40 +55,43 @@ function nmf_suite(ctx; method, accels)
5555
nw = length(workers())
5656
nsuite = BenchmarkGroup()
5757
while nw > 0
58-
opts = if accel == "cuda"
58+
scope = if accel == "cuda"
59+
error("Not implemented")
5960
Dagger.Sch.SchedulerOptions(;proctypes=[
6061
DaggerGPU.CuArrayDeviceProc
6162
])
6263
elseif accel == "amdgpu"
64+
error("Not implemented")
6365
Dagger.Sch.SchedulerOptions(;proctypes=[
6466
DaggerGPU.ROCArrayProc
6567
])
6668
elseif accel == "cpu"
67-
Dagger.Sch.SchedulerOptions()
69+
scope = Dagger.scope(;workers=workers()[1:nw])
6870
else
6971
error("Unknown accelerator $accel")
7072
end
7173
#bsz = ncol ÷ length(workers())
7274
bsz = ncol ÷ 64
7375
nsuite["Workers: $nw"] = @benchmarkable begin
74-
_ctx = Context($ctx, workers()[1:$nw])
75-
compute(_ctx, nnmf($X[], $W[], $H[]); options=$opts)
76+
Dagger.with_options(;scope=$scope) do
77+
fetch(nnmf($X[], $W[], $H[]))
78+
end
7679
end setup=begin
7780
_nw, _scale = $nw, $scale
7881
@info "Starting $_nw worker Dagger NNMF (scale by $_scale)"
7982
if $accel == "cuda"
8083
# FIXME: Allocate with CUDA.rand if possible
81-
$X[] = Dagger.mapchunks(CUDA.cu, compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol); options=$opts))
82-
$W[] = Dagger.mapchunks(CUDA.cu, compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures); options=$opts))
83-
$H[] = Dagger.mapchunks(CUDA.cu, compute(rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol); options=$opts))
84+
$X[] = Dagger.mapchunks(CuArray, rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol))
85+
$W[] = Dagger.mapchunks(CuArray, rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures))
86+
$H[] = Dagger.mapchunks(CuArray, rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol))
8487
elseif $accel == "amdgpu"
85-
$X[] = Dagger.mapchunks(ROCArray, compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol); options=$opts))
86-
$W[] = Dagger.mapchunks(ROCArray, compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures); options=$opts))
87-
$H[] = Dagger.mapchunks(ROCArray, compute(rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol); options=$opts))
88+
$X[] = Dagger.mapchunks(ROCArray, rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol))
89+
$W[] = Dagger.mapchunks(ROCArray, rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures))
90+
$H[] = Dagger.mapchunks(ROCArray, rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol))
8891
elseif $accel == "cpu"
89-
$X[] = compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol); options=$opts)
90-
$W[] = compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures); options=$opts)
91-
$H[] = compute(rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol); options=$opts)
92+
$X[] = rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol)
93+
$W[] = rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures)
94+
$H[] = rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol)
9295
end
9396
end teardown=begin
9497
if render != "" && !live

0 commit comments

Comments
 (0)