Skip to content

Commit 28e5957

Browse files
authored
Add environment variable to select which benchmarkers to run. (#22139)
EMTEST_BENCHMARKERS accepts a comma separated list of benchmarkers to run.
1 parent d079eca commit 28e5957

File tree

4 files changed

+39
-51
lines changed

4 files changed

+39
-51
lines changed

site/source/docs/getting_started/test-suite.rst

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -181,11 +181,19 @@ As with all the test suites, you can also run a specific benchmark:
181181
# Run one specific benchmark
182182
test/runner benchmark.test_skinning
183183
184-
Usually you will want to customize the in `test/test_benchmark.py` to
185-
run the benchmarks you want (there is currently no external config file). Things
186-
you may want to modify include:
184+
You can also specify which benchmarkers are run by using the environment
185+
variable `EMTEST_BENCHMARKERS`. It accepts a comma separated list of named
186+
benchmarkers (names can be found in `named_benchmarkers` in
187+
`test/test_benchmark.py`).
188+
189+
.. code-block:: bash
190+
191+
# Run one specific benchmark and with clang and v8.
192+
EMTEST_BENCHMARKERS=clang,v8 test/runner benchmark.test_skinning
193+
194+
To further customize how the benchmarks are run, you will want to edit the file
195+
`test/test_benchmark.py`. Some of the options include:
187196

188-
* ``benchmarkers`` is the list of VMs to run the benchmarks on.
189197
* ``DEFAULT_ARG`` is how long the benchmark should run (they all try to run for
190198
a similar amount of time for consistency).
191199
* ``TEST_REPS`` is how many times to repeat each run (more will take longer, but

test/common.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,6 @@
6363
EMTEST_LACKS_NATIVE_CLANG = None
6464
EMTEST_VERBOSE = None
6565
EMTEST_REBASELINE = None
66-
EMTEST_FORCE64 = None
6766

6867
# Verbosity level control for subprocess calls to configure + make.
6968
# 0: disabled.

test/runner.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,6 @@ def configure():
406406
common.EMTEST_LACKS_NATIVE_CLANG = int(os.getenv('EMTEST_LACKS_NATIVE_CLANG', '0'))
407407
common.EMTEST_REBASELINE = int(os.getenv('EMTEST_REBASELINE', '0'))
408408
common.EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0')) or shared.DEBUG
409-
common.EMTEST_FORCE64 = int(os.getenv('EMTEST_FORCE64', '0'))
410409
if common.EMTEST_VERBOSE:
411410
logging.root.setLevel(logging.DEBUG)
412411

test/test_benchmark.py

Lines changed: 27 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,10 @@
5050

5151
LLVM_FEATURE_FLAGS = ['-mnontrapping-fptoint']
5252

53+
# A comma separated list of benchmarkers to run during test_benchmark tests. See
54+
# `named_benchmarkers` for what is available.
55+
EMTEST_BENCHMARKERS = os.getenv('EMTEST_BENCHMARKERS', 'clang,v8,v8-lto,v8-ctors')
56+
5357

5458
class Benchmarker():
5559
# called when we init the object, which is during startup, even if we are
@@ -305,52 +309,30 @@ def get_output_files(self):
305309

306310
benchmarkers: List[Benchmarker] = []
307311

308-
if not common.EMTEST_FORCE64:
309-
benchmarkers += [
310-
NativeBenchmarker('clang', [CLANG_CC], [CLANG_CXX]),
311-
# NativeBenchmarker('gcc', ['gcc', '-no-pie'], ['g++', '-no-pie'])
312-
]
313-
314-
if config.V8_ENGINE and config.V8_ENGINE in config.JS_ENGINES:
315-
# avoid the baseline compiler running, because it adds a lot of noise
316-
# (the nondeterministic time it takes to get to the full compiler ends up
317-
# mattering as much as the actual benchmark)
318-
aot_v8 = config.V8_ENGINE + ['--no-liftoff']
319-
default_v8_name = os.environ.get('EMBENCH_NAME') or 'v8'
320-
if common.EMTEST_FORCE64:
321-
benchmarkers += [
322-
EmscriptenBenchmarker(default_v8_name, aot_v8, ['-sMEMORY64=2']),
323-
]
324-
else:
325-
benchmarkers += [
326-
EmscriptenBenchmarker(default_v8_name, aot_v8),
327-
EmscriptenBenchmarker(default_v8_name + '-lto', aot_v8, ['-flto']),
328-
EmscriptenBenchmarker(default_v8_name + '-ctors', aot_v8, ['-sEVAL_CTORS']),
329-
]
330-
if os.path.exists(CHEERP_BIN):
331-
benchmarkers += [
332-
# CheerpBenchmarker('cheerp-v8-wasm', aot_v8),
333-
]
334-
335-
if config.SPIDERMONKEY_ENGINE and config.SPIDERMONKEY_ENGINE in config.JS_ENGINES:
312+
# avoid the baseline compiler running, because it adds a lot of noise
313+
# (the nondeterministic time it takes to get to the full compiler ends up
314+
# mattering as much as the actual benchmark)
315+
aot_v8 = (config.V8_ENGINE if config.V8_ENGINE else []) + ['--no-liftoff']
316+
317+
named_benchmarkers = {
318+
'clang': NativeBenchmarker('clang', [CLANG_CC], [CLANG_CXX]),
319+
'gcc': NativeBenchmarker('gcc', ['gcc', '-no-pie'], ['g++', '-no-pie']),
320+
'v8': EmscriptenBenchmarker('v8', aot_v8),
321+
'v8-lto': EmscriptenBenchmarker('v8-lto', aot_v8, ['-flto']),
322+
'v8-ctors': EmscriptenBenchmarker('v8-ctors', aot_v8, ['-sEVAL_CTORS']),
323+
'v8-64': EmscriptenBenchmarker('v8-64', aot_v8, ['-sMEMORY64=2']),
324+
'node': EmscriptenBenchmarker('node', config.NODE_JS),
325+
'node-64': EmscriptenBenchmarker('node-64', config.NODE_JS, ['-sMEMORY64=2']),
326+
'cherp-v8': CheerpBenchmarker('cheerp-v8-wasm', aot_v8),
336327
# TODO: ensure no baseline compiler is used, see v8
337-
benchmarkers += [
338-
# EmscriptenBenchmarker('sm', SPIDERMONKEY_ENGINE),
339-
]
340-
if os.path.exists(CHEERP_BIN):
341-
benchmarkers += [
342-
# CheerpBenchmarker('cheerp-sm-wasm', SPIDERMONKEY_ENGINE),
343-
]
344-
345-
if config.NODE_JS and config.NODE_JS in config.JS_ENGINES:
346-
if common.EMTEST_FORCE64:
347-
benchmarkers += [
348-
EmscriptenBenchmarker('Node.js', config.NODE_JS, ['-sMEMORY64=2']),
349-
]
350-
else:
351-
benchmarkers += [
352-
# EmscriptenBenchmarker('Node.js', config.NODE_JS),
353-
]
328+
'sm': EmscriptenBenchmarker('sm', config.SPIDERMONKEY_ENGINE),
329+
'cherp-sm': CheerpBenchmarker('cheerp-sm-wasm', config.SPIDERMONKEY_ENGINE)
330+
}
331+
332+
for name in EMTEST_BENCHMARKERS.split(','):
333+
if name not in named_benchmarkers:
334+
raise Exception('error, unknown benchmarker ' + name)
335+
benchmarkers.append(named_benchmarkers[name])
354336

355337

356338
class benchmark(common.RunnerCore):

0 commit comments

Comments
 (0)