Skip to content

Commit 03ebed7

Browse files
authored
Switch to GitHub Actions-based benchmarks. (#2597)
[only benchmarks]
1 parent 4e9513b commit 03ebed7

File tree

4 files changed

+87
-159
lines changed

4 files changed

+87
-159
lines changed

.buildkite/pipeline.yml

Lines changed: 28 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -384,7 +384,6 @@ steps:
384384
build.message !~ /\[skip special\]/
385385
timeout_in_minutes: 45
386386

387-
# we want to benchmark every commit on the master branch, even if it failed CI
388387
- wait: ~
389388
continue_on_failure: true
390389

@@ -412,78 +411,36 @@ steps:
412411
build.message !~ /\[skip docs\]/
413412
timeout_in_minutes: 15
414413

415-
- group: ":racehorse: Benchmarks"
416-
steps:
417-
# benchmarks outside of the master branch don't submit their results,
418-
# so they can run on any system in the juliagpu queue.
419-
- label: "Benchmarks (dry run)"
420-
plugins:
421-
- JuliaCI/julia#v1:
422-
version: "1.11"
423-
command: |
424-
julia --project -e '
425-
using Pkg
426-
427-
println("--- :julia: Instantiating project")
428-
Pkg.resolve()
429-
Pkg.instantiate()
430-
Pkg.activate("perf")
431-
Pkg.resolve()
432-
Pkg.instantiate()
433-
push!(LOAD_PATH, @__DIR__)
434-
435-
println("+++ :julia: Benchmarking")
436-
include("perf/runbenchmarks.jl")'
437-
agents:
438-
queue: "juliagpu"
439-
cuda: "*"
440-
if: |
441-
build.message =~ /\[only benchmarks\]/ ||
442-
build.message !~ /\[only/ && !build.pull_request.draft &&
443-
build.message !~ /\[skip benchmarks\]/
444-
timeout_in_minutes: 30
445-
446-
# if we will submit results, use the benchmark queue so that we will
447-
# be running on the same system each time
448-
- label: "Benchmarks on Julia {{matrix.julia}}"
449-
plugins:
450-
- JuliaCI/julia#v1:
451-
version: "{{matrix.julia}}"
452-
env:
453-
BENCHMARKS: "true"
454-
CODESPEED_PROJECT: "$BUILDKITE_PIPELINE_NAME"
455-
CODESPEED_BRANCH: "$BUILDKITE_BRANCH"
456-
CODESPEED_COMMIT: "$BUILDKITE_COMMIT"
457-
CODESPEED_EXECUTABLE: "Julia {{matrix.julia}}"
458-
command: |
459-
julia --project -e '
460-
using Pkg
461-
ENV["CODESPEED_ENVIRONMENT"] = ENV["BUILDKITE_AGENT_NAME"]
414+
- label: ":racehorse: Benchmarks"
415+
plugins:
416+
- JuliaCI/julia#v1:
417+
version: "1.11"
418+
env:
419+
BENCHMARKS: "true"
420+
CODESPEED_PROJECT: "$BUILDKITE_PIPELINE_NAME"
421+
CODESPEED_BRANCH: "$BUILDKITE_BRANCH"
422+
CODESPEED_COMMIT: "$BUILDKITE_COMMIT"
423+
CODESPEED_EXECUTABLE: "Julia {{matrix.julia}}"
424+
command: |
425+
julia --project=perf -e '
426+
using Pkg
462427
463-
println("--- :julia: Instantiating project")
464-
Pkg.resolve()
465-
Pkg.instantiate()
466-
Pkg.activate("perf")
467-
Pkg.resolve()
468-
Pkg.instantiate()
469-
push!(LOAD_PATH, @__DIR__)
428+
println("--- :julia: Instantiating project")
429+
Pkg.develop([PackageSpec(path=pwd())])
470430
471-
println("+++ :julia: Benchmarking")
472-
include("perf/runbenchmarks.jl")'
473-
agents:
474-
queue: "benchmark"
475-
gpu: "rtx2070"
476-
cuda: "*"
477-
if: |
478-
build.branch =~ /^master$$/ && build.message =~ /\[only benchmarks\]/ ||
479-
build.branch =~ /^master$$/ && build.message !~ /\[only/ &&
480-
build.message !~ /\[skip benchmarks\]/
481-
matrix:
482-
setup:
483-
julia:
484-
- "1.11"
485-
- "1.11"
486-
timeout_in_minutes: 30
431+
println("+++ :julia: Benchmarking")
432+
include("perf/runbenchmarks.jl")'
433+
artifact_paths:
434+
- "benchmarkresults.json"
435+
agents:
436+
queue: "benchmark"
437+
gpu: "rtx2070"
438+
cuda: "*"
439+
if: |
440+
build.message =~ /\[only benchmarks\]/ ||
441+
build.message !~ /\[only/ && !build.pull_request.draft &&
442+
build.message !~ /\[skip benchmarks\]/
443+
timeout_in_minutes: 30
487444

488445
env:
489446
JULIA_PKG_SERVER_REGISTRY_PREFERENCE: "eager" # OK to downloading JLLs from GitHub

.github/workflows/Benchmark.yml

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
name: Benchmarks
2+
permissions:
3+
statuses: read # find Buildkite URL from PR status
4+
contents: write # update benchmark contents in gh-pages branch
5+
pull-requests: write # comment on PR with benchmark results
6+
deployments: write # deploy GitHub pages website
7+
8+
on:
9+
pull_request_target:
10+
branches:
11+
- master
12+
push:
13+
branches:
14+
- master
15+
16+
jobs:
17+
benchmark:
18+
runs-on: ubuntu-latest
19+
steps:
20+
- uses: actions/checkout@v4
21+
22+
- name: Download Buildkite Artifacts
23+
id: download
24+
uses: EnricoMi/download-buildkite-artifact-action@v1
25+
with:
26+
buildkite_token: ${{ secrets.BUILDKITE_TOKEN }}
27+
ignore_build_states: blocked,canceled,skipped,not_run
28+
ignore_job_states: timed_out,failed
29+
output_path: artifacts
30+
31+
- name: Locate Benchmarks Artifact
32+
id: locate
33+
if: ${{ steps.download.outputs.download-state == 'success' }}
34+
run: echo "path=$(find artifacts -type f -name benchmarkresults.json 2>/dev/null)" >> $GITHUB_OUTPUT
35+
36+
- name: Upload Benchmark Results
37+
if: ${{ steps.locate.outputs.path != '' }}
38+
uses: benchmark-action/github-action-benchmark@v1
39+
with:
40+
name: CUDA.jl Benchmarks
41+
tool: "julia"
42+
output-file-path: ${{ steps.locate.outputs.path }}
43+
benchmark-data-dir-path: "bench"
44+
github-token: ${{ secrets.GITHUB_TOKEN }}
45+
comment-always: ${{ github.event_name == 'pull_request' }}
46+
summary-always: true
47+
alert-threshold: "125%"
48+
fail-on-alert: false
49+
auto-push: ${{ github.event_name != 'pull_request' }}

README.md

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
*CUDA programming in Julia*
44

5-
[![][doi-img]][doi-url] [![][docs-stable-img]][docs-stable-url] [![][docs-dev-img]][docs-dev-url] [![][buildkite-img]][buildkite-url] [![][codecov-img]][codecov-url] [![][codespeed-trend-img]][codespeed-trend-url] [![][codespeed-chart-img]][codespeed-chart-url]
5+
[![][doi-img]][doi-url] [![][docs-stable-img]][docs-stable-url] [![][docs-dev-img]][docs-dev-url] [![][buildkite-img]][buildkite-url] [![][codecov-img]][codecov-url] [![][benchmark-img]][benchmark-url]
66

77
[doi-img]: https://zenodo.org/badge/doi/10.1109/TPDS.2018.2872064.svg
88
[doi-url]: https://ieeexplore.ieee.org/abstract/document/8471188
@@ -19,11 +19,8 @@
1919
[codecov-img]: https://codecov.io/gh/JuliaGPU/CUDA.jl/branch/master/graph/badge.svg
2020
[codecov-url]: https://codecov.io/gh/JuliaGPU/CUDA.jl
2121

22-
[codespeed-chart-img]: https://img.shields.io/badge/benchmarks-Chart-yellowgreen
23-
[codespeed-chart-url]: https://speed.juliagpu.org/timeline/#/?exe=9,11&env=1&base=none&ben=grid&revs=50
24-
25-
[codespeed-trend-img]: https://img.shields.io/badge/benchmarks-Trend-yellowgreen
26-
[codespeed-trend-url]: https://speed.juliagpu.org/changes/?exe=9&env=1&tre=50
22+
[benchmark-img]: https://img.shields.io/badge/benchmarks-Chart-yellowgreen
23+
[benchmark-url]: https://cuda.juliagpu.org/bench/
2724

2825
The CUDA.jl package is the main programming interface for working with NVIDIA CUDA GPUs
2926
using Julia. It features a user-friendly array abstraction, a compiler for writing CUDA

perf/runbenchmarks.jl

Lines changed: 7 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,6 @@ using BenchmarkTools
77
using StableRNGs
88
rng = StableRNG(123)
99

10-
# we only submit results when running on the master branch
11-
real_run = get(ENV, "CODESPEED_BRANCH", nothing) == "master"
12-
if real_run
13-
# to find untuned benchmarks
14-
BenchmarkTools.DEFAULT_PARAMETERS.evals = 0
15-
end
16-
1710
# convenience macro to create a benchmark that requires synchronizing the GPU
1811
macro async_benchmarkable(ex...)
1912
quote
@@ -28,21 +21,17 @@ latency_results = include("latency.jl")
2821

2922
SUITE = BenchmarkGroup()
3023

31-
# NOTE: don't use spaces in benchmark names (tobami/codespeed#256)
32-
3324
include("cuda.jl")
3425
include("kernel.jl")
3526
include("array.jl")
3627

37-
if real_run
38-
@info "Preparing main benchmarks"
39-
warmup(SUITE; verbose=false)
40-
tune!(SUITE)
28+
@info "Preparing main benchmarks"
29+
warmup(SUITE; verbose=false)
30+
tune!(SUITE)
4131

42-
# reclaim memory that might have been used by the tuning process
43-
GC.gc(true)
44-
CUDA.reclaim()
45-
end
32+
# reclaim memory that might have been used by the tuning process
33+
GC.gc(true)
34+
CUDA.reclaim()
4635

4736
# benchmark groups that aren't part of the suite
4837
addgroup!(SUITE, "integration")
@@ -60,69 +49,5 @@ integration_results["cudadevrt"] = include("cudadevrt.jl")
6049
results["latency"] = latency_results
6150
results["integration"] = integration_results
6251

63-
println(results)
64-
65-
66-
## comparison
67-
6852
# write out the results
69-
BenchmarkTools.save(joinpath(@__DIR__, "results.json"), results)
70-
71-
# compare against previous results
72-
# TODO: store these results so that we can compare when benchmarking PRs
73-
reference_path = joinpath(@__DIR__, "reference.json")
74-
if ispath(reference_path)
75-
reference = BenchmarkTools.load(reference_path)[1]
76-
comparison = judge(minimum(results), minimum(reference))
77-
78-
println("Improvements:")
79-
println(improvements(comparison))
80-
81-
println("Regressions:")
82-
println(regressions(comparison))
83-
end
84-
85-
86-
## submission
87-
88-
using JSON, HTTP
89-
90-
if real_run
91-
@info "Submitting to Codespeed..."
92-
93-
basedata = Dict(
94-
"branch" => ENV["CODESPEED_BRANCH"],
95-
"commitid" => ENV["CODESPEED_COMMIT"],
96-
"project" => ENV["CODESPEED_PROJECT"],
97-
"environment" => ENV["CODESPEED_ENVIRONMENT"],
98-
"executable" => ENV["CODESPEED_EXECUTABLE"]
99-
)
100-
101-
# convert nested groups of benchmark to flat dictionaries of results
102-
flat_results = []
103-
function flatten(results, prefix="")
104-
for (key,value) in results
105-
if value isa BenchmarkGroup
106-
flatten(value, "$prefix$key/")
107-
else
108-
@assert value isa BenchmarkTools.Trial
109-
110-
# codespeed reports maxima, but those are often very noisy.
111-
# get rid of measurements that unnecessarily skew the distribution.
112-
rmskew!(value)
113-
114-
push!(flat_results,
115-
Dict(basedata...,
116-
"benchmark" => "$prefix$key",
117-
"result_value" => median(value).time / 1e9,
118-
"min" => minimum(value).time / 1e9,
119-
"max" => maximum(value).time / 1e9))
120-
end
121-
end
122-
end
123-
flatten(results)
124-
125-
HTTP.post("$(ENV["CODESPEED_SERVER"])/result/add/json/",
126-
["Content-Type" => "application/x-www-form-urlencoded"],
127-
HTTP.URIs.escapeuri(Dict("json" => JSON.json(flat_results))))
128-
end
53+
BenchmarkTools.save("benchmarkresults.json", median(results))

0 commit comments

Comments
 (0)