Skip to content

Commit b1422fc

Browse files
brendandahlkripken
andauthored
Add a benchmarker that stores sizes in a skia perf compatible file. (#22097)
This will be run by emscripten releases to start tracking size benchmarks on our skia perf instance. I've intentially only added two benchmarks to start for testing. The output format was verified using perf-tool. More info on the format at https://skia.googlesource.com/buildbot/+/refs/heads/main/perf/FORMAT.md --------- Co-authored-by: Alon Zakai <alonzakai@gmail.com>
1 parent b8c6e23 commit b1422fc

File tree

1 file changed

+77
-4
lines changed

1 file changed

+77
-4
lines changed

test/test_benchmark.py

Lines changed: 77 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
import sys
1111
import time
1212
import unittest
13+
import json
1314
import zlib
1415
from pathlib import Path
1516
from typing import List
@@ -56,6 +57,9 @@
5657

5758

5859
class Benchmarker():
60+
# Whether to record statistics. Set by SizeBenchmarker.
61+
record_stats = False
62+
5963
# called when we init the object, which is during startup, even if we are
6064
# not running benchmarks
6165
def __init__(self, name):
@@ -112,14 +116,39 @@ def display(self, baseline=None):
112116

113117
# size
114118

115-
size = sum(os.path.getsize(f) for f in self.get_output_files())
116-
gzip_size = sum(len(zlib.compress(read_binary(f))) for f in self.get_output_files())
119+
recorded_stats = []
120+
121+
def add_stat(name, size, gzip_size):
122+
recorded_stats.append({
123+
'value': name,
124+
'measurement': size,
125+
})
126+
recorded_stats.append({
127+
'value': name + ' (gzipped)',
128+
'measurement': gzip_size,
129+
})
130+
131+
total_size = 0
132+
total_gzip_size = 0
117133

118-
print(' size: %8s, compressed: %8s' % (size, gzip_size), end=' ')
134+
for file in self.get_output_files():
135+
size = os.path.getsize(file)
136+
gzip_size = len(zlib.compress(read_binary(file)))
137+
if self.record_stats:
138+
add_stat(utils.removeprefix(os.path.basename(file), 'size_'), size, gzip_size)
139+
total_size += size
140+
total_gzip_size += gzip_size
141+
142+
if self.record_stats:
143+
add_stat('total', total_size, total_gzip_size)
144+
145+
print(' size: %8s, compressed: %8s' % (total_size, total_gzip_size), end=' ')
119146
if self.get_size_text():
120147
print(' (' + self.get_size_text() + ')', end=' ')
121148
print()
122149

150+
return recorded_stats
151+
123152
def get_size_text(self):
124153
return ''
125154

@@ -240,6 +269,21 @@ def get_output_files(self):
240269
return ret
241270

242271

272+
# This benchmarker will make a test benchmark build with Emscripten and record
273+
# the file output sizes in out/test/stats.json. The file format is specified at
274+
# https://skia.googlesource.com/buildbot/+/refs/heads/main/perf/FORMAT.md
275+
# Running the benchmark will be skipped.
276+
class SizeBenchmarker(EmscriptenBenchmarker):
277+
record_stats = True
278+
279+
def __init__(self, name):
280+
# do not set an engine, as we will not run the code
281+
super().__init__(name, engine=None)
282+
283+
# we will not actually run the benchmarks
284+
run = None
285+
286+
243287
CHEERP_BIN = '/opt/cheerp/bin/'
244288

245289

@@ -317,6 +361,7 @@ def get_output_files(self):
317361
named_benchmarkers = {
318362
'clang': NativeBenchmarker('clang', [CLANG_CC], [CLANG_CXX]),
319363
'gcc': NativeBenchmarker('gcc', ['gcc', '-no-pie'], ['g++', '-no-pie']),
364+
'size': SizeBenchmarker('size'),
320365
'v8': EmscriptenBenchmarker('v8', aot_v8),
321366
'v8-lto': EmscriptenBenchmarker('v8-lto', aot_v8, ['-flto']),
322367
'v8-ctors': EmscriptenBenchmarker('v8-ctors', aot_v8, ['-sEVAL_CTORS']),
@@ -337,6 +382,7 @@ def get_output_files(self):
337382

338383
class benchmark(common.RunnerCore):
339384
save_dir = True
385+
stats = [] # type: ignore
340386

341387
@classmethod
342388
def setUpClass(cls):
@@ -358,6 +404,17 @@ def setUpClass(cls):
358404
fingerprint.append('llvm: ' + config.LLVM_ROOT)
359405
print('Running Emscripten benchmarks... [ %s ]' % ' | '.join(fingerprint))
360406

407+
@classmethod
408+
def tearDownClass(cls):
409+
super().tearDownClass()
410+
if cls.stats:
411+
output = {
412+
'version': 1,
413+
'git_hash': '',
414+
'results': cls.stats
415+
}
416+
utils.write_file('stats.json', json.dumps(output, indent=2) + '\n')
417+
361418
# avoid depending on argument reception from the commandline
362419
def hardcode_arguments(self, code):
363420
if not code or 'int main()' in code:
@@ -397,11 +454,27 @@ def do_benchmark(self, name, src, expected_output='FAIL', args=None,
397454
for b in benchmarkers:
398455
if skip_native and isinstance(b, NativeBenchmarker):
399456
continue
457+
if not b.run:
458+
# If we won't run the benchmark, we don't need repetitions.
459+
reps = 0
400460
baseline = b
401461
print('Running benchmarker: %s: %s' % (b.__class__.__name__, b.name))
402462
b.build(self, filename, args, shared_args, emcc_args, native_args, native_exec, lib_builder, has_output_parser=output_parser is not None)
403463
b.bench(args, output_parser, reps, expected_output)
404-
b.display(baseline)
464+
recorded_stats = b.display(baseline)
465+
if recorded_stats:
466+
self.add_stats(name, recorded_stats)
467+
468+
def add_stats(self, name, stats):
469+
self.stats.append({
470+
'key': {
471+
'test': name,
472+
'units': 'bytes'
473+
},
474+
'measurements': {
475+
'stats': stats
476+
}
477+
})
405478

406479
def test_primes(self, check=True):
407480
src = r'''

0 commit comments

Comments
 (0)