Skip to content

Commit 9cf854e

Browse files
committed
fix for user conf
1 parent 1a6464a commit 9cf854e

File tree

2 files changed

+9
-6
lines changed

2 files changed

+9
-6
lines changed

script/app-mlperf-automotive-mlcommons-python/meta.yaml

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -458,7 +458,8 @@ variations:
458458
bevformer:
459459
group: models
460460
default_env:
461-
MLC_MLPERF_MAX_QUERY_COUNT: 6636
461+
MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9
462+
MLC_DEFAULT_MLPERF_MAX_QUERY_COUNT: 6636
462463
add_deps_recursive:
463464
pytorch:
464465
version_max: "2.5.1"
@@ -495,7 +496,8 @@ variations:
495496
ssd:
496497
group: models
497498
default_env:
498-
MLC_MLPERF_MAX_QUERY_COUNT: 6636
499+
MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9
500+
MLC_DEFAULT_MLPERF_MAX_QUERY_COUNT: 6636
499501
add_deps_recursive:
500502
pytorch:
501503
version_max: "2.3.1"
@@ -535,7 +537,8 @@ variations:
535537
deeplabv3plus:
536538
group: models
537539
default_env:
538-
MLC_MLPERF_MAX_QUERY_COUNT: 6636
540+
MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9
541+
MLC_DEFAULT_MLPERF_MAX_QUERY_COUNT: 6636
539542
add_deps_recursive:
540543
pytorch:
541544
version_max: "2.3.1"

script/generate-mlperf-inference-user-conf/customize.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -368,11 +368,11 @@ def preprocess(i):
368368
user_conf += ml_model_name + "." + scenario + ".min_duration = " + \
369369
env['MLC_MLPERF_INFERENCE_MIN_DURATION'] + " \n"
370370
elif not is_false(env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes')) and not is_true(env.get(
371-
'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')) and env.get('MLC_BENCHMARK_GROUP') != 'automotive':
371+
'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')) and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') != '99.9':
372372
user_conf += ml_model_name + "." + scenario + \
373373
f".max_duration = {max_duration_valid}" + "\n"
374-
if scenario == "SingleStream" and env.get('MLC_BENCHMARK_GROUP') == 'automotive' and env.get(
375-
'MLC_MLPERF_MAX_QUERY_COUNT', '') != '' and env.get('MLC_MLPERF_TARGET_LATENCY', '') == '':
374+
if scenario == "SingleStream" and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') == '99.9' and env.get(
375+
'MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT', '') != '' and env.get('MLC_MLPERF_TARGET_LATENCY', '') == '':
376376
user_conf += ml_model_name + "." + scenario + \
377377
f".max_query_count = {env.get('MLC_MLPERF_MAX_QUERY_COUNT')}" + "\n"
378378
if scenario == "MultiStream":

0 commit comments

Comments
 (0)