From 5c3a3ce4f795d33636dbcc782a7ed1ad76edf655 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Wed, 28 May 2025 23:18:31 +0530 Subject: [PATCH 01/18] fix early stopping issue + cmd generation for ssd --- script/app-mlperf-automotive-mlcommons-python/customize.py | 2 +- script/app-mlperf-automotive-mlcommons-python/meta.yaml | 6 ++++++ script/generate-mlperf-inference-user-conf/customize.py | 5 ++++- script/generate-mlperf-inference-user-conf/meta.yaml | 4 ++++ 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 688a52dc9..60cd53b4f 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -263,7 +263,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, config_path = "baseline_8MP_ss_scales_fm1_5x5_all" - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --cognata-root-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" elif env['MLC_MODEL'] in ['deeplabv3plus']: run_dir = env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'] diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index cf95e283b..a071afbda 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -457,6 +457,8 @@ variations: bevformer: group: models + default_env: + MLC_MLPERF_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.5.1" @@ -492,6 +494,8 @@ variations: ssd: group: models + default_env: + MLC_MLPERF_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.3.1" @@ -530,6 +534,8 @@ variations: deeplabv3plus: group: models + default_env: + MLC_MLPERF_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.3.1" diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index b08f0b0fb..1268ba7e8 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -368,9 +368,12 @@ def preprocess(i): user_conf += ml_model_name + "." + scenario + ".min_duration = " + \ env['MLC_MLPERF_INFERENCE_MIN_DURATION'] + " \n" elif not is_false(env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes')) and not is_true(env.get( - 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')): + 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')) and env.get('MLC_BENCHMARK_GROUP') != 'automotive': user_conf += ml_model_name + "." + scenario + \ f".max_duration = {max_duration_valid}" + "\n" + if scenario == "SingleStream" and env.get('MLC_BENCHMARK_GROUP') == 'automotive' and env.get('MLC_MLPERF_MAX_QUERY_COUNT', '') != '' and env.get('MLC_MLPERF_TARGET_LATENCY', '') == '': + user_conf += ml_model_name + "." + scenario + \ + f".max_query_count = {env.get('MLC_MLPERF_MAX_QUERY_COUNT')}" + "\n" if scenario == "MultiStream": user_conf += ml_model_name + "." + scenario + ".min_query_count = " + \ env.get( diff --git a/script/generate-mlperf-inference-user-conf/meta.yaml b/script/generate-mlperf-inference-user-conf/meta.yaml index 4288bd443..8b825f8e1 100644 --- a/script/generate-mlperf-inference-user-conf/meta.yaml +++ b/script/generate-mlperf-inference-user-conf/meta.yaml @@ -91,12 +91,16 @@ variations: inference: default: true group: benchmark_wg + env: + MLC_BENCHMARK_GROUP: inference deps: - tags: get,mlcommons,inference,src names: - inference-src automotive: group: benchmark_wg + env: + MLC_BENCHMARK_GROUP: automotive deps: - tags: get,mlcommons,automotive,src names: From 052f285378287647a532bc8369ce58d82b205981 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 28 May 2025 17:48:54 +0000 Subject: [PATCH 02/18] [Automated Commit] Format Codebase [skip ci] --- script/generate-mlperf-inference-user-conf/customize.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index 1268ba7e8..375d775fa 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -371,7 +371,8 @@ def preprocess(i): 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')) and env.get('MLC_BENCHMARK_GROUP') != 'automotive': user_conf += ml_model_name + "." + scenario + \ f".max_duration = {max_duration_valid}" + "\n" - if scenario == "SingleStream" and env.get('MLC_BENCHMARK_GROUP') == 'automotive' and env.get('MLC_MLPERF_MAX_QUERY_COUNT', '') != '' and env.get('MLC_MLPERF_TARGET_LATENCY', '') == '': + if scenario == "SingleStream" and env.get('MLC_BENCHMARK_GROUP') == 'automotive' and env.get( + 'MLC_MLPERF_MAX_QUERY_COUNT', '') != '' and env.get('MLC_MLPERF_TARGET_LATENCY', '') == '': user_conf += ml_model_name + "." + scenario + \ f".max_query_count = {env.get('MLC_MLPERF_MAX_QUERY_COUNT')}" + "\n" if scenario == "MultiStream": From 1ca3fc551b48de89744f8c272f911e8bd2b5bb96 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Wed, 28 May 2025 23:35:06 +0530 Subject: [PATCH 03/18] correct typo --- script/get-preprocessed-dataset-cognata/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-preprocessed-dataset-cognata/meta.yaml b/script/get-preprocessed-dataset-cognata/meta.yaml index c8bac0417..524633f11 100644 --- a/script/get-preprocessed-dataset-cognata/meta.yaml +++ b/script/get-preprocessed-dataset-cognata/meta.yaml @@ -36,7 +36,7 @@ variations: MLC_DATASET_COGNATA_TASK: segmentation validation,2d_obj_det: env: - MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAMEE: val_2d + MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME: val_2d MLC_DATASET_COGNATA_TAR_FILENAME: val_2d.tar.gz MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_2d/<<>> calibration,2d_obj_det: From 1a6464ac5688537084583b093f8521ef1d0bb41b Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Wed, 28 May 2025 23:49:51 +0530 Subject: [PATCH 04/18] correct tags --- script/app-mlperf-automotive/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index db81b5313..85a70953f 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -303,7 +303,7 @@ variations: MLC_MODEL: ssd docker: deps: - - tags: get,preprocessed,dataset,cognata,_mlc,_2d_object_det,_validation + - tags: get,preprocessed,dataset,cognata,_mlc,_2d_obj_det,_validation enable_if_env: MLC_USE_DATASET_FROM_HOST: - "yes" From 9cf854ed3a7227a63217b39a031f28e20ff857d5 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Thu, 29 May 2025 02:00:12 +0530 Subject: [PATCH 05/18] fix for user conf --- script/app-mlperf-automotive-mlcommons-python/meta.yaml | 9 ++++++--- script/generate-mlperf-inference-user-conf/customize.py | 6 +++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index a071afbda..a7c602823 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -458,7 +458,8 @@ variations: bevformer: group: models default_env: - MLC_MLPERF_MAX_QUERY_COUNT: 6636 + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9 + MLC_DEFAULT_MLPERF_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.5.1" @@ -495,7 +496,8 @@ variations: ssd: group: models default_env: - MLC_MLPERF_MAX_QUERY_COUNT: 6636 + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9 + MLC_DEFAULT_MLPERF_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.3.1" @@ -535,7 +537,8 @@ variations: deeplabv3plus: group: models default_env: - MLC_MLPERF_MAX_QUERY_COUNT: 6636 + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9 + MLC_DEFAULT_MLPERF_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.3.1" diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index 375d775fa..20133ffcb 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -368,11 +368,11 @@ def preprocess(i): user_conf += ml_model_name + "." + scenario + ".min_duration = " + \ env['MLC_MLPERF_INFERENCE_MIN_DURATION'] + " \n" elif not is_false(env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes')) and not is_true(env.get( - 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')) and env.get('MLC_BENCHMARK_GROUP') != 'automotive': + 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')) and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') != '99.9': user_conf += ml_model_name + "." + scenario + \ f".max_duration = {max_duration_valid}" + "\n" - if scenario == "SingleStream" and env.get('MLC_BENCHMARK_GROUP') == 'automotive' and env.get( - 'MLC_MLPERF_MAX_QUERY_COUNT', '') != '' and env.get('MLC_MLPERF_TARGET_LATENCY', '') == '': + if scenario == "SingleStream" and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') == '99.9' and env.get( + 'MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT', '') != '' and env.get('MLC_MLPERF_TARGET_LATENCY', '') == '': user_conf += ml_model_name + "." + scenario + \ f".max_query_count = {env.get('MLC_MLPERF_MAX_QUERY_COUNT')}" + "\n" if scenario == "MultiStream": From f2786e3fef16a0c5e5bf2e28aecc9355f0450af5 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Thu, 29 May 2025 02:52:44 +0530 Subject: [PATCH 06/18] fix typos --- script/app-mlperf-automotive-mlcommons-python/meta.yaml | 6 +++--- script/generate-mlperf-inference-user-conf/customize.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index a7c602823..aee079df6 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -459,7 +459,7 @@ variations: group: models default_env: MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9 - MLC_DEFAULT_MLPERF_MAX_QUERY_COUNT: 6636 + MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.5.1" @@ -497,7 +497,7 @@ variations: group: models default_env: MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9 - MLC_DEFAULT_MLPERF_MAX_QUERY_COUNT: 6636 + MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.3.1" @@ -538,7 +538,7 @@ variations: group: models default_env: MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9 - MLC_DEFAULT_MLPERF_MAX_QUERY_COUNT: 6636 + MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.3.1" diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index 20133ffcb..3377af28c 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -374,7 +374,7 @@ def preprocess(i): if scenario == "SingleStream" and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') == '99.9' and env.get( 'MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT', '') != '' and env.get('MLC_MLPERF_TARGET_LATENCY', '') == '': user_conf += ml_model_name + "." + scenario + \ - f".max_query_count = {env.get('MLC_MLPERF_MAX_QUERY_COUNT')}" + "\n" + f".max_query_count = {env.get('MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT')}" + "\n" if scenario == "MultiStream": user_conf += ml_model_name + "." + scenario + ".min_query_count = " + \ env.get( From 5f646675d0ac8a1b3d7a3c9145d765d750eaae16 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Thu, 29 May 2025 02:53:36 +0530 Subject: [PATCH 07/18] percentile to str --- script/app-mlperf-automotive-mlcommons-python/meta.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index aee079df6..5651a2116 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -458,7 +458,7 @@ variations: bevformer: group: models default_env: - MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9 + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9" MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: @@ -496,7 +496,7 @@ variations: ssd: group: models default_env: - MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9 + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9" MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: @@ -537,7 +537,7 @@ variations: deeplabv3plus: group: models default_env: - MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: 99.9 + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9" MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: From 532074cc91f16a6972eea13601cd4be8504ac327 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Thu, 29 May 2025 10:14:18 +0530 Subject: [PATCH 08/18] fix tag --- script/app-mlperf-automotive-mlcommons-python/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 5651a2116..03afcfed0 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -521,7 +521,7 @@ variations: - tags: get,generic-python-lib,_package.onnx - tags: get,generic-python-lib,_package.onnxruntime - tags: get,generic-python-lib,_package.tqdm - - tags: get,preprocessed,dataset,cognata,_mlc,_2d_object_det,_validation + - tags: get,preprocessed,dataset,cognata,_mlc,_2d_obj_det,_validation skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" From 43d757b7165d6611408380c5984d7d3108e2818f Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Thu, 29 May 2025 11:42:46 +0530 Subject: [PATCH 09/18] add device flag --- .../app-mlperf-automotive-mlcommons-python/customize.py | 8 +++++--- script/app-mlperf-automotive/meta.yaml | 3 +++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 60cd53b4f..74e104167 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -198,6 +198,8 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, q = '"' if os_info['platform'] == 'windows' else "'" + device = env['MLC_MLPERF_DEVICE'] + ########################################################################## # Grigori added for ABTF demo @@ -249,7 +251,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, "bevformer", "bevformer_tiny.py") print(env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']) - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --device {"cuda" if device == "gpu" else "cpu"} --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" print(cmd) elif env['MLC_MODEL'] in ['ssd']: run_dir = env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'] @@ -263,7 +265,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, config_path = "baseline_8MP_ss_scales_fm1_5x5_all" - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --cognata-root-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --cognata-root-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} --device {"cuda" if device == "gpu" else "cpu"} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" elif env['MLC_MODEL'] in ['deeplabv3plus']: run_dir = env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'] @@ -275,7 +277,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, backend = "onnx" if env.get( 'MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --device {"cuda" if device == "gpu" else "cpu"} --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" ########################################################################## diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 85a70953f..172505430 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -381,6 +381,9 @@ variations: abtf-inference-implementation: tags: _cpu + gpu: + alias: cuda + cuda: group: device env: From fe532d79f8638c2eab5f9d0d037725c633d83305 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Thu, 29 May 2025 15:55:10 +0530 Subject: [PATCH 10/18] changes for constantstream --- .../meta.yaml | 4 ++++ script/app-mlperf-automotive/customize.py | 2 +- script/app-mlperf-automotive/meta.yaml | 13 ++++++++++++- .../customize.py | 6 +++--- .../generate-mlperf-inference-user-conf/meta.yaml | 1 + script/run-mlperf-automotive-app/customize.py | 3 +++ script/run-mlperf-automotive-app/meta.yaml | 1 + 7 files changed, 25 insertions(+), 5 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 03afcfed0..e7c863366 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -60,6 +60,7 @@ input_mapping: target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY output: MLC_MLPERF_OUTPUT_DIR @@ -610,6 +611,9 @@ variations: server: env: MLC_MLPERF_LOADGEN_SCENARIO: Server + constantstream: + env: + MLC_MLPERF_LOADGEN_SCENARIO: ConstantStream mvp_demo: env: diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py index b7d8598c5..f1c0f5958 100644 --- a/script/app-mlperf-automotive/customize.py +++ b/script/app-mlperf-automotive/customize.py @@ -68,7 +68,7 @@ def postprocess(i): mlperf_log = MLPerfLog(os.path.join(output_dir, "mlperf_log_detail.txt")) if mode == "performance": - if scenario in ["Offline", "Server"]: + if scenario in ["Offline", "Server", "ConstantStream"]: metric = "target_qps" result = mlperf_log['result_mean_latency_ns'] / 1000000 elif scenario.endswith("Stream"): diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 172505430..b47905df5 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -59,6 +59,7 @@ input_mapping: target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY output: MLC_MLPERF_OUTPUT_DIR @@ -383,7 +384,7 @@ variations: gpu: alias: cuda - + cuda: group: device env: @@ -416,12 +417,14 @@ variations: add_deps_recursive: abtf-inference-implementation: tags: _offline + multistream: env: MLC_MLPERF_LOADGEN_SCENARIO: MultiStream add_deps_recursive: abtf-inference-implementation: tags: _multistream + singlestream: group: loadgen-scenario default: true @@ -430,6 +433,14 @@ variations: add_deps_recursive: abtf-inference-implementation: tags: _singlestream + + constantstream: + env: + MLC_MLPERF_LOADGEN_SCENARIO: ConstantStream + add_deps_recursive: + abtf-inference-implementation: + tags: _constantstream + server: env: MLC_MLPERF_LOADGEN_SCENARIO: Server diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index 3377af28c..2c93af586 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -100,7 +100,7 @@ def preprocess(i): query_count = None value = None - if scenario in ['Offline', 'Server']: + if scenario in ['Offline', 'Server', 'ConstantStream']: metric = "target_qps" tolerance = 1.01 # value = env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS') if scenario == "Server" else env.get('MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') @@ -343,7 +343,7 @@ def preprocess(i): ".sample_concatenate_permutation = 0" + "\n" max_duration_fast_s = int(env.get('MLC_MLPERF_MAX_DURATION_FAST', 120)) max_duration_fast = str(max_duration_fast_s * 1000) # in milliseconds - if scenario == "Server": + if scenario == "Server" or scenario == "ConstantStream": user_conf += ml_model_name + "." + scenario + \ f".max_duration = {max_duration_fast}" + "\n" target_qps = conf['target_qps'] @@ -371,7 +371,7 @@ def preprocess(i): 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')) and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') != '99.9': user_conf += ml_model_name + "." + scenario + \ f".max_duration = {max_duration_valid}" + "\n" - if scenario == "SingleStream" and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') == '99.9' and env.get( + if scenario in ["SingleStream", "ConstantStream"] and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') == '99.9' and env.get( 'MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT', '') != '' and env.get('MLC_MLPERF_TARGET_LATENCY', '') == '': user_conf += ml_model_name + "." + scenario + \ f".max_query_count = {env.get('MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT')}" + "\n" diff --git a/script/generate-mlperf-inference-user-conf/meta.yaml b/script/generate-mlperf-inference-user-conf/meta.yaml index 8b825f8e1..18fbec442 100644 --- a/script/generate-mlperf-inference-user-conf/meta.yaml +++ b/script/generate-mlperf-inference-user-conf/meta.yaml @@ -47,6 +47,7 @@ input_mapping: target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT diff --git a/script/run-mlperf-automotive-app/customize.py b/script/run-mlperf-automotive-app/customize.py index 10983fc24..5e26568db 100644 --- a/script/run-mlperf-automotive-app/customize.py +++ b/script/run-mlperf-automotive-app/customize.py @@ -229,6 +229,9 @@ def preprocess(i): elif scenario == "Server": if env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS'): env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = env['MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS'] + elif scenario == "ConstantStream": + if env.get('MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS'): + env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = env['MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS'] elif scenario == "SingleStream": if env.get('MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'): env['MLC_MLPERF_LOADGEN_TARGET_LATENCY'] = env['MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'] diff --git a/script/run-mlperf-automotive-app/meta.yaml b/script/run-mlperf-automotive-app/meta.yaml index 74b9e34c9..e63b12348 100644 --- a/script/run-mlperf-automotive-app/meta.yaml +++ b/script/run-mlperf-automotive-app/meta.yaml @@ -73,6 +73,7 @@ input_mapping: run_style: MLC_MLPERF_RUN_STYLE scenario: MLC_MLPERF_LOADGEN_SCENARIO server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY skip_submission_generation: MLC_MLPERF_SKIP_SUBMISSION_GENERATION skip_truncation: MLC_SKIP_TRUNCATE_ACCURACY From c16ae3942a2334882d8bec79ffeb658109dae805 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Thu, 29 May 2025 18:03:14 +0530 Subject: [PATCH 11/18] fix tags --- script/app-mlperf-inference-amd/meta.yaml | 4 ++-- .../meta.yaml | 4 ++-- script/app-mlperf-inference-dummy/meta.yaml | 4 ++-- script/app-mlperf-inference-intel/meta.yaml | 12 ++++++------ .../meta.yaml | 4 ++-- .../meta.yaml | 4 ++-- script/app-mlperf-inference-qualcomm/meta.yaml | 4 ++-- script/app-mlperf-inference-redhat/meta.yaml | 4 ++-- .../meta.yaml | 18 ++++++++++++++---- script/run-mlperf-automotive-app/meta.yaml | 2 +- script/run-mlperf-inference-app/meta.yaml | 2 +- 11 files changed, 36 insertions(+), 26 deletions(-) diff --git a/script/app-mlperf-inference-amd/meta.yaml b/script/app-mlperf-inference-amd/meta.yaml index 2c3b6d063..1481362be 100644 --- a/script/app-mlperf-inference-amd/meta.yaml +++ b/script/app-mlperf-inference-amd/meta.yaml @@ -91,12 +91,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml b/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml index 815a2a152..0ffa1e9d2 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml @@ -28,7 +28,7 @@ deps: tags: get,cuda - names: - loadgen - tags: get,loadgen + tags: get,loadgen,_inference - names: - inference-src tags: get,mlcommons,inference,src @@ -121,7 +121,7 @@ post_deps: prehook_deps: - names: - user-conf-generator - tags: generate,user-conf,mlperf,inference + tags: generate,user-conf,mlperf,inference,_inference - enable_if_env: MLC_MLPERF_SKIP_RUN: - 'no' diff --git a/script/app-mlperf-inference-dummy/meta.yaml b/script/app-mlperf-inference-dummy/meta.yaml index f8876eb81..e488d679e 100644 --- a/script/app-mlperf-inference-dummy/meta.yaml +++ b/script/app-mlperf-inference-dummy/meta.yaml @@ -91,12 +91,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-intel/meta.yaml b/script/app-mlperf-inference-intel/meta.yaml index 86a2806eb..3470e6712 100644 --- a/script/app-mlperf-inference-intel/meta.yaml +++ b/script/app-mlperf-inference-intel/meta.yaml @@ -392,7 +392,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -510,7 +510,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -627,7 +627,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -699,7 +699,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -924,7 +924,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python + - tags: get,mlcommons,inference,loadgen,_custom-python,_inference names: - inference-loadgen - tags: get,ml-model,dlrm,_pytorch @@ -1074,7 +1074,7 @@ variations: - inference-src # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator - tags: get,generic-sys-util,_rsync diff --git a/script/app-mlperf-inference-mlcommons-cpp/meta.yaml b/script/app-mlperf-inference-mlcommons-cpp/meta.yaml index dda32e172..06af3ddba 100644 --- a/script/app-mlperf-inference-mlcommons-cpp/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-cpp/meta.yaml @@ -71,7 +71,7 @@ deps: # Install MLPerf inference dependencies # Install MLPerf loadgen - - tags: get,loadgen + - tags: get,loadgen,_inference names: - loadgen @@ -129,7 +129,7 @@ deps: tags: get,ml-model,retinanet,_onnx,_fp32 # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml index a0380fec2..b01c7d989 100644 --- a/script/app-mlperf-inference-mlcommons-python/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml @@ -684,7 +684,7 @@ deps: # Install MLPerf inference dependencies # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator skip_if_env: @@ -692,7 +692,7 @@ deps: - "yes" # Install MLPerf loadgen - - tags: get,loadgen + - tags: get,loadgen,_inference names: - loadgen - mlperf-inference-loadgen diff --git a/script/app-mlperf-inference-qualcomm/meta.yaml b/script/app-mlperf-inference-qualcomm/meta.yaml index 1e508e0e6..2e0193355 100644 --- a/script/app-mlperf-inference-qualcomm/meta.yaml +++ b/script/app-mlperf-inference-qualcomm/meta.yaml @@ -111,12 +111,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-redhat/meta.yaml b/script/app-mlperf-inference-redhat/meta.yaml index 55af68d65..1d32af174 100644 --- a/script/app-mlperf-inference-redhat/meta.yaml +++ b/script/app-mlperf-inference-redhat/meta.yaml @@ -91,12 +91,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/generate-mlperf-inference-submission/meta.yaml b/script/generate-mlperf-inference-submission/meta.yaml index c68bc5a37..3d7853498 100644 --- a/script/generate-mlperf-inference-submission/meta.yaml +++ b/script/generate-mlperf-inference-submission/meta.yaml @@ -13,12 +13,8 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - tags: mlcommons,inference,src - tags: get,sut,system-description - tags: install,pip-package,for-mlc-python,_package.tabulate -- tags: get,mlperf,inference,utils - names: - get-mlperf-results-dir skip_if_env: @@ -109,6 +105,20 @@ post_deps: - mlperf-inference-submission-checker - submission-checker tags: submission,inference,checker,mlc +variations: + inference: + default: true + deps: + - names: + - inference-src + tags: mlcommons,inference,src + - tags: get,mlperf,inference,utils + automotive: + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src + - tags: get,mlperf,automotive,utils tags: - generate - submission diff --git a/script/run-mlperf-automotive-app/meta.yaml b/script/run-mlperf-automotive-app/meta.yaml index e63b12348..9a2fb7401 100644 --- a/script/run-mlperf-automotive-app/meta.yaml +++ b/script/run-mlperf-automotive-app/meta.yaml @@ -252,4 +252,4 @@ variations: - 'false' - 'False' - '0' - tags: generate,mlperf,inference,submission + tags: generate,mlperf,inference,submission,_automotive diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index 5c13dfa16..9cc3064c6 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -433,7 +433,7 @@ variations: skip_if_env: MLC_MLPERF_SKIP_SUBMISSION_GENERATION: - 'yes' - tags: generate,mlperf,inference,submission + tags: generate,mlperf,inference,submission,_inference versions: master: {} From 526bf49ec850395b03bcde9f8fe43faeb5c5dfa1 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Thu, 29 May 2025 18:18:33 +0530 Subject: [PATCH 12/18] add inference variation in get loadgen --- script/get-mlperf-inference-loadgen/meta.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/script/get-mlperf-inference-loadgen/meta.yaml b/script/get-mlperf-inference-loadgen/meta.yaml index ed75db192..b920ade33 100644 --- a/script/get-mlperf-inference-loadgen/meta.yaml +++ b/script/get-mlperf-inference-loadgen/meta.yaml @@ -169,6 +169,9 @@ variations: automotive: env: MLC_INFERENCE_AUTOMOTIVE_REPO: 'YES' + inference: + env: + MLC_INFERENCE_AUTOMOTIVE_REPO: 'YES' versions: custom: add_deps: From 94033c33ebb473eca234fc42552078d819bff50c Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Thu, 29 May 2025 18:24:17 +0530 Subject: [PATCH 13/18] fix typo --- script/get-mlperf-inference-loadgen/meta.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/script/get-mlperf-inference-loadgen/meta.yaml b/script/get-mlperf-inference-loadgen/meta.yaml index b920ade33..e841b3293 100644 --- a/script/get-mlperf-inference-loadgen/meta.yaml +++ b/script/get-mlperf-inference-loadgen/meta.yaml @@ -170,8 +170,9 @@ variations: env: MLC_INFERENCE_AUTOMOTIVE_REPO: 'YES' inference: + default: true env: - MLC_INFERENCE_AUTOMOTIVE_REPO: 'YES' + MLC_INFERENCE_AUTOMOTIVE_REPO: 'NO' versions: custom: add_deps: From 29ed14c6378fc031ebd5bd7fef581e300ce03bc0 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Thu, 29 May 2025 22:50:50 +0530 Subject: [PATCH 14/18] fix command generation for accuracy check --- script/process-mlperf-accuracy/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index afb359ce7..8a2168956 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -214,7 +214,7 @@ def preprocess(i): elif dataset == "cognata_ssd": CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --dataset cognata --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" elif dataset == "cognata_deeplab": CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( From eb89634aa5877aab57353e895d53837f0f21119c Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Fri, 30 May 2025 12:55:50 +0530 Subject: [PATCH 15/18] docker repo defaulted to mlcommons --- script/app-mlperf-automotive-mlcommons-python/customize.py | 4 ++++ script/app-mlperf-automotive/meta.yaml | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 74e104167..19afe47d9 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -237,6 +237,10 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['RUN_DIR'] = run_dir + if device == "gpu": + logger.warning("Bevformer reference implementation is not supported on GPU, defaulting to CPU") + device = "cpu" + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] if env['MLC_MLPERF_BACKEND'] != "onnxruntime": diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index b47905df5..ebe23d75d 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -119,8 +119,8 @@ post_deps: docker: - mlc_repo: anandhu-eng@mlperf-automations - mlc_repo_branch: automotive2 + mlc_repo: mlcommons@mlperf-automations + mlc_repo_branch: dev use_host_group_id: True use_host_user_id: True real_run: false From ffccf7f4eba158054370c7b974b7ae706c470d4e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 30 May 2025 07:26:10 +0000 Subject: [PATCH 16/18] [Automated Commit] Format Codebase [skip ci] --- script/app-mlperf-automotive-mlcommons-python/customize.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 19afe47d9..806ff2951 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -238,7 +238,8 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['RUN_DIR'] = run_dir if device == "gpu": - logger.warning("Bevformer reference implementation is not supported on GPU, defaulting to CPU") + logger.warning( + "Bevformer reference implementation is not supported on GPU, defaulting to CPU") device = "cpu" env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] From 1c8757183d9a7963daf60b1667c5eb6ab4c70414 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Fri, 30 May 2025 17:22:11 +0530 Subject: [PATCH 17/18] submission generation changes for automotive --- .../meta.yaml | 19 +++++++++++++++ .../meta.yaml | 19 +++++++++++---- .../meta.yaml | 24 +++++++++++++++---- .../meta.yaml | 15 +++++++++--- 4 files changed, 65 insertions(+), 12 deletions(-) diff --git a/script/generate-mlperf-inference-submission/meta.yaml b/script/generate-mlperf-inference-submission/meta.yaml index 3d7853498..480fbd008 100644 --- a/script/generate-mlperf-inference-submission/meta.yaml +++ b/script/generate-mlperf-inference-submission/meta.yaml @@ -93,27 +93,46 @@ post_deps: skip_if_env: MLC_SKIP_TRUNCATE_ACCURACY: - 'yes' + names: + - truncate-mlperf-accuracy-log tags: accuracy,truncate,mlc - enable_if_env: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR: - 'yes' + names: + - preprocess-mlperf-submission tags: preprocess,mlperf,submission - skip_if_env: MLC_RUN_SUBMISSION_CHECKER: - 'no' names: - mlperf-inference-submission-checker + - mlperf-submission-checker - submission-checker tags: submission,inference,checker,mlc variations: inference: default: true + add_deps_recursive: + truncate-mlperf-accuracy-log: + tags: _inference + preprocess-mlperf-submission: + tags: _inference + mlperf-inference-submission-checker: + tags: _inference deps: - names: - inference-src tags: mlcommons,inference,src - tags: get,mlperf,inference,utils automotive: + add_deps_recursive: + truncate-mlperf-accuracy-log: + tags: _automotive + preprocess-mlperf-submission: + tags: _automotive + mlperf-inference-submission-checker: + tags: _automotive deps: - names: - automotive-src diff --git a/script/preprocess-mlperf-inference-submission/meta.yaml b/script/preprocess-mlperf-inference-submission/meta.yaml index 5381b36dd..0ddc10d51 100644 --- a/script/preprocess-mlperf-inference-submission/meta.yaml +++ b/script/preprocess-mlperf-inference-submission/meta.yaml @@ -9,17 +9,26 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - - submission-checker-src - tags: get,mlcommons,inference,src,_branch.dev - version: custom - names: - get-mlperf-submission-dir skip_if_env: MLC_MLPERF_INFERENCE_SUBMISSION_DIR: - 'on' tags: get,mlperf,submission,dir +variations: + inference: + default: true + deps: + - names: + - inference-src + - submission-checker-src + tags: mlcommons,inference,src,_branch.dev + version: custom + automotive: + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src input_mapping: input: MLC_MLPERF_INFERENCE_SUBMISSION_DIR submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR diff --git a/script/run-mlperf-inference-submission-checker/meta.yaml b/script/run-mlperf-inference-submission-checker/meta.yaml index 8d9667ae6..a939cdeae 100644 --- a/script/run-mlperf-inference-submission-checker/meta.yaml +++ b/script/run-mlperf-inference-submission-checker/meta.yaml @@ -16,10 +16,6 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - - submission-checker-src - tags: get,mlcommons,inference,src - tags: pull,git,repo env: MLC_GIT_CHECKOUT_PATH: '<<>>' @@ -43,7 +39,27 @@ deps: - enable_if_env: MLC_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION: - 'on' + names: + - preprocess-mlperf-submission tags: preprocess,mlperf,inference,submission +variations: + inference: + default: true + add_deps_recursive: + preprocess-mlperf-submission: + tags: _inference + deps: + - names: + - inference-src + tags: mlcommons,inference,src + automotive: + add_deps_recursive: + preprocess-mlperf-submission: + tags: _automotive + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src docker: mlc_repo: mlcommons@mlperf-automations mlc_repo_branch: dev diff --git a/script/truncate-mlperf-inference-accuracy-log/meta.yaml b/script/truncate-mlperf-inference-accuracy-log/meta.yaml index c0f02f6d3..2acbf3b85 100644 --- a/script/truncate-mlperf-inference-accuracy-log/meta.yaml +++ b/script/truncate-mlperf-inference-accuracy-log/meta.yaml @@ -9,15 +9,24 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - tags: get,mlcommons,inference,src - names: - get-mlperf-submission-dir skip_if_env: MLC_MLPERF_INFERENCE_SUBMISSION_DIR: - 'on' tags: get,mlperf,submission,dir +variations: + inference: + default: true + deps: + - names: + - inference-src + tags: mlcommons,inference,src + automotive: + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src input_mapping: input: MLC_MLPERF_INFERENCE_SUBMISSION_DIR submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR From 67b68b602ec1efbb4ba8355d009fa8eb43a73074 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Fri, 30 May 2025 17:38:17 +0530 Subject: [PATCH 18/18] fix issue with variations --- .../meta.yaml | 35 +++++++++---------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/script/run-mlperf-inference-submission-checker/meta.yaml b/script/run-mlperf-inference-submission-checker/meta.yaml index a939cdeae..658e8f943 100644 --- a/script/run-mlperf-inference-submission-checker/meta.yaml +++ b/script/run-mlperf-inference-submission-checker/meta.yaml @@ -42,24 +42,6 @@ deps: names: - preprocess-mlperf-submission tags: preprocess,mlperf,inference,submission -variations: - inference: - default: true - add_deps_recursive: - preprocess-mlperf-submission: - tags: _inference - deps: - - names: - - inference-src - tags: mlcommons,inference,src - automotive: - add_deps_recursive: - preprocess-mlperf-submission: - tags: _automotive - deps: - - names: - - automotive-src - tags: mlcommons,automotive,src docker: mlc_repo: mlcommons@mlperf-automations mlc_repo_branch: dev @@ -153,6 +135,23 @@ variations: short-run: env: MLC_MLPERF_SHORT_RUN: 'yes' + inference: + default: true + add_deps_recursive: + preprocess-mlperf-submission: + tags: _inference + deps: + - names: + - inference-src + tags: mlcommons,inference,src + automotive: + add_deps_recursive: + preprocess-mlperf-submission: + tags: _automotive + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src versions: master: adr: