@@ -13,8 +13,11 @@ The argument `pkg` can be a name of a package or a path to a directory to a pack
13
13
* `postprocess` - A function to post-process results. Will be passed the `BenchmarkGroup`, which it can modify, or return a new one.
14
14
* `resultfile` - If set, saves the output to `resultfile`
15
15
* `retune` - Force a re-tune, saving the new tuning to the tune file.
16
- * `progressoptions` - Options (a `NamedTuple`) to be passed as keyword arguments to
17
- `ProgressMeter.Progress`.
16
+ * `verbose::Bool = true` - Print currently running benchmark.
17
+ * `logger_factory` - Specify the logger used during benchmark. It is a callable object
18
+ (typically a type) with no argument that creates a logger. It must exist as a constant
19
+ in some package (e.g., an anonymous function does not work).
20
+ * `progressoptions` - Deprecated.
18
21
19
22
The result can be used by functions such as [`judge`](@ref). If you choose to, you can save the results manually using
20
23
[`writeresults`](@ref) where `results` is the return value of this function. It can be read back with [`readresults`](@ref).
@@ -42,9 +45,18 @@ function benchmarkpkg(
42
45
postprocess= nothing ,
43
46
resultfile= nothing ,
44
47
retune= false ,
45
- progressoptions= NamedTuple (),
48
+ verbose:: Bool = true ,
49
+ logger_factory= nothing ,
50
+ progressoptions= nothing ,
46
51
custom_loadpath= " " #= used in tests =#
47
52
)
53
+ if progressoptions != = nothing
54
+ Base. depwarn (
55
+ " Keyword argument `progressoptions` is ignored. Please use `logger_factory`." ,
56
+ :benchmarkpkg ,
57
+ )
58
+ end
59
+
48
60
target = BenchmarkConfig (target)
49
61
50
62
pkgid = Base. identify_package (pkg)
@@ -96,7 +108,8 @@ function benchmarkpkg(
96
108
_runbenchmark (script, f, target, tunefile;
97
109
retune = retune,
98
110
custom_loadpath = custom_loadpath,
99
- progressoptions = progressoptions)
111
+ runoptions = (verbose = verbose,),
112
+ logger_factory = logger_factory)
100
113
end
101
114
io = IOBuffer (results_local[" results" ])
102
115
seek (io, 0 )
@@ -138,8 +151,61 @@ function benchmarkpkg(
138
151
return results
139
152
end
140
153
154
+ """
155
+ objectpath(x) -> (pkg_uuid::Union{String,Nothing}, pkg_name::String, name::Symbol...)
156
+
157
+ Get the "fullname" of object, prefixed by package ID.
158
+
159
+ # Examples
160
+ ```jldoctest
161
+ julia> using PkgBenchmark: objectpath
162
+
163
+ julia> using Logging
164
+
165
+ julia> objectpath(ConsoleLogger)
166
+ ("56ddb016-857b-54e1-b83d-db4d58db5568", "Logging", :ConsoleLogger)
167
+ ```
168
+ """
169
+ function objectpath (x)
170
+ m = parentmodule (x)
171
+ if x === m
172
+ pkg = Base. PkgId (x)
173
+ uuid = pkg. uuid === nothing ? nothing : string (pkg. uuid)
174
+ return (uuid, pkg. name)
175
+ else
176
+ n = nameof (x)
177
+ if ! isdefined (m, n)
178
+ error (" Object `$x ` is not accessible as `$m .$n `." )
179
+ end
180
+ return (objectpath (m)... , n)
181
+ end
182
+ end
183
+
184
+ """
185
+ loadobject((pkg_uuid, pkg_name, name...))
186
+
187
+ Inverse of `objectpath`.
188
+
189
+ # Examples
190
+ ```jldoctest
191
+ julia> using PkgBenchmark: loadobject
192
+
193
+ julia> using Logging
194
+
195
+ julia> loadobject(("56ddb016-857b-54e1-b83d-db4d58db5568", "Logging", :ConsoleLogger)) ===
196
+ ConsoleLogger
197
+ true
198
+ ```
199
+ """
200
+ loadobject (path) = _loadobject (path... )
201
+ function _loadobject (pkg_uuid, pkg_name, fullname... )
202
+ pkgid = Base. PkgId (pkg_uuid === nothing ? pkg_uuid : UUID (pkg_uuid), pkg_name)
203
+ return foldl (getproperty, fullname, init = Base. require (pkgid))
204
+ end
205
+
141
206
function _runbenchmark (file:: String , output:: String , benchmarkconfig:: BenchmarkConfig , tunefile:: String ;
142
- retune = false , custom_loadpath = nothing , progressoptions = NamedTuple ())
207
+ retune = false , custom_loadpath = nothing , runoptions = NamedTuple (),
208
+ logger_factory = nothing )
143
209
color = Base. have_color ? " --color=yes" : " --color=no"
144
210
compilecache = " --compiled-modules=" * (Bool (Base. JLOptions (). use_compiled_modules) ? " yes" : " no" )
145
211
_file, _output, _tunefile, _custom_loadpath = map (escape_string, (file, output, tunefile, custom_loadpath))
@@ -151,11 +217,26 @@ function _runbenchmark(file::String, output::String, benchmarkconfig::BenchmarkC
151
217
else
152
218
" all"
153
219
end
220
+ logger_factory_path = if logger_factory === nothing
221
+ # Default to `TerminalLoggers.TerminalLogger`; load via
222
+ # `PkgBenchmark` namespace so that users don't have to add it
223
+ # separately.
224
+ (objectpath (@__MODULE__ )... , :TerminalLogger )
225
+ else
226
+ objectpath (logger_factory)
227
+ end
154
228
exec_str = isempty (_custom_loadpath) ? " " : " push!(LOAD_PATH, \" $(_custom_loadpath) \" )\n "
155
229
exec_str *=
156
230
"""
157
231
using PkgBenchmark
158
- PkgBenchmark._runbenchmark_local($(repr (_file)) , $(repr (_output)) , $(repr (_tunefile)) , $(repr (retune)) , $(repr (progressoptions)) )
232
+ PkgBenchmark._runbenchmark_local(
233
+ $(repr (_file)) ,
234
+ $(repr (_output)) ,
235
+ $(repr (_tunefile)) ,
236
+ $(repr (retune)) ,
237
+ $(repr (runoptions)) ,
238
+ $(repr (logger_factory_path)) ,
239
+ )
159
240
"""
160
241
161
242
target_env = [k => v for (k, v) in benchmarkconfig. env]
@@ -166,8 +247,13 @@ function _runbenchmark(file::String, output::String, benchmarkconfig::BenchmarkC
166
247
return JSON. parsefile (output)
167
248
end
168
249
250
+ function _runbenchmark_local (file, output, tunefile, retune, runoptions, logger_factory_path)
251
+ with_logger (loadobject (logger_factory_path)()) do
252
+ __runbenchmark_local (file, output, tunefile, retune, runoptions)
253
+ end
254
+ end
169
255
170
- function _runbenchmark_local (file, output, tunefile, retune, progressoptions )
256
+ function __runbenchmark_local (file, output, tunefile, retune, runoptions )
171
257
# Loading
172
258
Base. include (Main, file)
173
259
if ! isdefined (Main, :SUITE )
@@ -182,12 +268,12 @@ function _runbenchmark_local(file, output, tunefile, retune, progressoptions)
182
268
else
183
269
_benchinfo (" creating benchmark tuning file $(abspath (tunefile)) ..." )
184
270
mkpath (dirname (tunefile))
185
- _tune ! (suite, progressoptions = progressoptions )
271
+ BenchmarkTools . tune ! (suite; runoptions ... )
186
272
BenchmarkTools. save (tunefile, params (suite));
187
273
end
188
274
189
275
# Running
190
- results = _run (suite, progressoptions = progressoptions )
276
+ results = run (suite; runoptions ... )
191
277
192
278
# Output
193
279
vinfo = first (split (sprint ((io) -> versioninfo (io; verbose= true )), " Environment" ))
@@ -202,52 +288,3 @@ function _runbenchmark_local(file, output, tunefile, retune, progressoptions)
202
288
end
203
289
return nothing
204
290
end
205
-
206
-
207
- function _tune! (group:: BenchmarkTools.BenchmarkGroup ; verbose:: Bool = false , root = true ,
208
- progressoptions = NamedTuple (),
209
- prog = Progress (length (BenchmarkTools. leaves (group)); desc = " Tuning: " , progressoptions... ),
210
- hierarchy = [], kwargs... )
211
- BenchmarkTools. gcscrub () # run GC before running group, even if individual benchmarks don't manually GC
212
- i = 1
213
- for id in keys (group)
214
- _tune! (group[id]; verbose = verbose, prog = prog, hierarchy = push! (copy (hierarchy), (repr (id), i, length (keys (group)))), kwargs... )
215
- i += 1
216
- end
217
- return group
218
- end
219
-
220
- function _tune! (b:: BenchmarkTools.Benchmark , p:: BenchmarkTools.Parameters = b. params;
221
- prog = nothing , verbose:: Bool = false , pad = " " , hierarchy = [], kwargs... )
222
- BenchmarkTools. warmup (b, verbose= false )
223
- estimate = ceil (Int, minimum (BenchmarkTools. lineartrial (b, p; kwargs... )))
224
- b. params. evals = BenchmarkTools. guessevals (estimate)
225
- if prog != nothing
226
- indent = 0
227
- ProgressMeter. next! (prog; showvalues = [map (id -> (" " ^ (indent += 1 ) * " [$(id[2 ]) /$(id[3 ]) ]" , id[1 ]), hierarchy)... ])
228
- end
229
- return b
230
- end
231
-
232
- function _run (group:: BenchmarkTools.BenchmarkGroup , args... ;
233
- progressoptions = NamedTuple (),
234
- prog = Progress (length (BenchmarkTools. leaves (group)); desc = " Benchmarking: " , progressoptions... ), hierarchy = [], kwargs... )
235
- result = similar (group)
236
- BenchmarkTools. gcscrub () # run GC before running group, even if individual benchmarks don't manually GC
237
- i = 1
238
- for id in keys (group)
239
- result[id] = _run (group[id], args... ; prog = prog, hierarchy = push! (copy (hierarchy), (repr (id), i, length (keys (group)))), kwargs... )
240
- i += 1
241
- end
242
- return result
243
- end
244
-
245
- function _run (b:: BenchmarkTools.Benchmark , p:: BenchmarkTools.Parameters = b. params;
246
- prog = nothing , verbose:: Bool = false , pad = " " , hierarchy = [], kwargs... )
247
- res = BenchmarkTools. run_result (b, p; kwargs... )[1 ]
248
- if prog != nothing
249
- indent = 0
250
- ProgressMeter. next! (prog; showvalues = [map (id -> (" " ^ (indent += 1 ) * " [$(id[2 ]) /$(id[3 ]) ]" , id[1 ]), hierarchy)... ])
251
- end
252
- return res
253
- end
0 commit comments