From 3f139eb23f514590b6379e57f00acb1399d7632b Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Tue, 13 May 2025 21:13:32 +0530 Subject: [PATCH 01/83] add automotive 0.5 model download scripts --- script/get-ml-model-bevformer/COPYRIGHT.md | 9 ++ script/get-ml-model-bevformer/customize.py | 23 +++++ script/get-ml-model-bevformer/meta.yaml | 75 +++++++++++++++++ script/get-ml-model-bevformer/run.sh | 1 + .../get-ml-model-deeplabv3_plus/COPYRIGHT.md | 9 ++ .../get-ml-model-deeplabv3_plus/customize.py | 25 ++++++ script/get-ml-model-deeplabv3_plus/meta.yaml | 83 +++++++++++++++++++ script/get-ml-model-deeplabv3_plus/run.sh | 2 + script/get-ml-model-ssd-resnet50/COPYRIGHT.md | 9 ++ script/get-ml-model-ssd-resnet50/customize.py | 25 ++++++ script/get-ml-model-ssd-resnet50/meta.yaml | 77 +++++++++++++++++ script/get-ml-model-ssd-resnet50/run.sh | 1 + 12 files changed, 339 insertions(+) create mode 100644 script/get-ml-model-bevformer/COPYRIGHT.md create mode 100644 script/get-ml-model-bevformer/customize.py create mode 100644 script/get-ml-model-bevformer/meta.yaml create mode 100644 script/get-ml-model-bevformer/run.sh create mode 100644 script/get-ml-model-deeplabv3_plus/COPYRIGHT.md create mode 100644 script/get-ml-model-deeplabv3_plus/customize.py create mode 100644 script/get-ml-model-deeplabv3_plus/meta.yaml create mode 100644 script/get-ml-model-deeplabv3_plus/run.sh create mode 100644 script/get-ml-model-ssd-resnet50/COPYRIGHT.md create mode 100644 script/get-ml-model-ssd-resnet50/customize.py create mode 100644 script/get-ml-model-ssd-resnet50/meta.yaml create mode 100644 script/get-ml-model-ssd-resnet50/run.sh diff --git a/script/get-ml-model-bevformer/COPYRIGHT.md b/script/get-ml-model-bevformer/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-ml-model-bevformer/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-ml-model-bevformer/customize.py b/script/get-ml-model-bevformer/customize.py new file mode 100644 index 000000000..d5d6f7ca3 --- /dev/null +++ b/script/get-ml-model-bevformer/customize.py @@ -0,0 +1,23 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['MLC_ML_MODEL_BEVFORMER_PATH'] = os.path.join(env['MLC_ML_MODEL_BEVFORMER_PATH'], env['MLC_ML_MODEL_FILENAME']) + + return {'return': 0} diff --git a/script/get-ml-model-bevformer/meta.yaml b/script/get-ml-model-bevformer/meta.yaml new file mode 100644 index 000000000..4ef5a6132 --- /dev/null +++ b/script/get-ml-model-bevformer/meta.yaml @@ -0,0 +1,75 @@ +alias: get-ml-model-deeplabv3-plus +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- ml-model +- bevformer +uid: 438a053f666443bd +new_env_keys: + - MLC_ML_MODEL_BEVFORMER_PATH +print_env_at_the_end: + MLC_ML_MODEL_BEVFORMER_PATH: BevFormer checkpoint path +variations: + onnx: + group: model-format + default: true + env: + MLC_MODEL_FORMAT: onnx + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_bevformer/bevformer_tiny.onnx + MLC_ML_MODEL_FILENAME: bevformer_tiny.onnx + pytorch: + group: model-format + env: + MLC_MODEL_FORMAT: pth + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_bevformer/bevformer_tiny_epoch_24.pth + MLC_ML_MODEL_FILENAME: bevformer_tiny_epoch_24.onnx + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.mlc-nuscenes + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 17CpM5eU8tjrxh_LpH_BTNTeT37PhzcnC + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_BEVFORMER_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_BEVFORMER_PATH + MLC_DOWNLOAD_URL: 'mlc-nuscenes:nuscenes_dataset/<<>>' + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + extra_cache_tags: ml,model,bevformer + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run diff --git a/script/get-ml-model-bevformer/run.sh b/script/get-ml-model-bevformer/run.sh new file mode 100644 index 000000000..a9bf588e2 --- /dev/null +++ b/script/get-ml-model-bevformer/run.sh @@ -0,0 +1 @@ +#!/bin/bash diff --git a/script/get-ml-model-deeplabv3_plus/COPYRIGHT.md b/script/get-ml-model-deeplabv3_plus/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-ml-model-deeplabv3_plus/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-ml-model-deeplabv3_plus/customize.py b/script/get-ml-model-deeplabv3_plus/customize.py new file mode 100644 index 000000000..efc103c82 --- /dev/null +++ b/script/get-ml-model-deeplabv3_plus/customize.py @@ -0,0 +1,25 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = os.path.join(env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'], env['MLC_ML_MODEL_FILENAME']) + + return {'return': 0} diff --git a/script/get-ml-model-deeplabv3_plus/meta.yaml b/script/get-ml-model-deeplabv3_plus/meta.yaml new file mode 100644 index 000000000..02f437fbd --- /dev/null +++ b/script/get-ml-model-deeplabv3_plus/meta.yaml @@ -0,0 +1,83 @@ +alias: get-ml-model-deeplabv3-plus +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- ml-model +- deeplab +- v3-plus +- deeplabv3-plus +uid: cfb2d53b9dbc4dc0 +new_env_keys: + - MLC_ML_MODEL_DEEPLABV3_PLUS_PATH +print_env_at_the_end: + MLC_ML_MODEL_DEEPLABV3_PLUS_PATH: DeepLabV3+ checkpoint path +variations: + onnx: + group: model-format + default: true + env: + MLC_MODEL_FORMAT: onnx + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_deeplab/deeplabv3+_8mp.onnx + MLC_ML_MODEL_FILENAME: deeplabv3+_8mp.onnx + onnx_dynamic: + group: model-format + env: + MLC_MODEL_FORMAT: onnx + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_deeplab/deeplabv3+_dynamic.onnx + MLC_ML_MODEL_FILENAME: deeplabv3+_dynamic.onnx + pytorch: + group: model-format + env: + MLC_MODEL_FORMAT: pth + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_deeplab/latest_deeplabv3plus_resnet50_cognata_os16_it100000.pth + MLC_ML_MODEL_FILENAME: latest_deeplabv3plus_resnet50_cognata_os16_it100000.pth + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.cognata + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 1u5FDoeXHVtDrd4zClE47Gmyr7iLFidz1 + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_DEEPLABV3_PLUS_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_DEEPLABV3_PLUS_PATH + MLC_DOWNLOAD_URL: 'mlc-cognata:mlc_cognata_dataset/<<>>' + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + extra_cache_tags: ml,model,deeplabv3,plus + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run diff --git a/script/get-ml-model-deeplabv3_plus/run.sh b/script/get-ml-model-deeplabv3_plus/run.sh new file mode 100644 index 000000000..05a7907cf --- /dev/null +++ b/script/get-ml-model-deeplabv3_plus/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash + diff --git a/script/get-ml-model-ssd-resnet50/COPYRIGHT.md b/script/get-ml-model-ssd-resnet50/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-ml-model-ssd-resnet50/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-ml-model-ssd-resnet50/customize.py b/script/get-ml-model-ssd-resnet50/customize.py new file mode 100644 index 000000000..11ee4fb72 --- /dev/null +++ b/script/get-ml-model-ssd-resnet50/customize.py @@ -0,0 +1,25 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['MLC_ML_MODEL_SSD_PATH'] = os.path.join(env['MLC_ML_MODEL_SSD_PATH'], env['MLC_ML_MODEL_FILENAME']) + + return {'return': 0} diff --git a/script/get-ml-model-ssd-resnet50/meta.yaml b/script/get-ml-model-ssd-resnet50/meta.yaml new file mode 100644 index 000000000..67759da75 --- /dev/null +++ b/script/get-ml-model-ssd-resnet50/meta.yaml @@ -0,0 +1,77 @@ +alias: get-ml-model-ssd-resnet50 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- ml-model +- ssd +- resnet50 +- ssd-resnet50 +uid: acaa4c330a5a42c5 +new_env_keys: + - MLC_ML_MODEL_SSD_PATH +print_env_at_the_end: + MLC_ML_MODEL_SSD_PATH: SSD checkpoint path +variations: + onnx: + group: model-format + default: true + env: + MLC_MODEL_FORMAT: onnx + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_ssd/ssd_resnet50.onnx + MLC_ML_MODEL_FILENAME: ssd_resnet50.onnx + pytorch: + group: model-format + env: + MLC_MODEL_FORMAT: pth + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_ssd/baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth + MLC_ML_MODEL_FILENAME: baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.cognata + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 1u5FDoeXHVtDrd4zClE47Gmyr7iLFidz1 + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_SSD_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_MLC_MODEL_SSD_PATH + MLC_DOWNLOAD_URL: 'mlc-cognata:mlc_cognata_dataset/<<>>' + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + extra_cache_tags: ml,model,ssd,resnet50 + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run diff --git a/script/get-ml-model-ssd-resnet50/run.sh b/script/get-ml-model-ssd-resnet50/run.sh new file mode 100644 index 000000000..a9bf588e2 --- /dev/null +++ b/script/get-ml-model-ssd-resnet50/run.sh @@ -0,0 +1 @@ +#!/bin/bash From bf32c066faca9e638e55490ae4725516bd4e498f Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Tue, 13 May 2025 21:25:21 +0530 Subject: [PATCH 02/83] typo fix --- script/get-ml-model-bevformer/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-ml-model-bevformer/meta.yaml b/script/get-ml-model-bevformer/meta.yaml index 4ef5a6132..c14cf6208 100644 --- a/script/get-ml-model-bevformer/meta.yaml +++ b/script/get-ml-model-bevformer/meta.yaml @@ -1,4 +1,4 @@ -alias: get-ml-model-deeplabv3-plus +alias: get-ml-model-bevformer automation_alias: script automation_uid: 5b4e0237da074764 cache: true From 9a4bb54aa2ffb71002115d5bd6970407a370052f Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 14 May 2025 01:03:42 +0530 Subject: [PATCH 03/83] fix duplication + change rclone folder structure --- .../customize.py | 8 +- .../get-ml-model-abtf-ssd-pytorch/meta.yaml | 84 ++++++++++++++++++- script/get-ml-model-bevformer/customize.py | 2 + script/get-ml-model-bevformer/meta.yaml | 2 +- script/get-ml-model-ssd-resnet50/COPYRIGHT.md | 9 -- script/get-ml-model-ssd-resnet50/customize.py | 25 ------ script/get-ml-model-ssd-resnet50/meta.yaml | 77 ----------------- script/get-ml-model-ssd-resnet50/run.sh | 1 - 8 files changed, 92 insertions(+), 116 deletions(-) delete mode 100644 script/get-ml-model-ssd-resnet50/COPYRIGHT.md delete mode 100644 script/get-ml-model-ssd-resnet50/customize.py delete mode 100644 script/get-ml-model-ssd-resnet50/meta.yaml delete mode 100644 script/get-ml-model-ssd-resnet50/run.sh diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index a4fa7f16c..ab69791a8 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -26,6 +26,8 @@ def preprocess(i): 'error': 'ML model {} is not found'.format(ml_model)} env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model + elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": + env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} @@ -35,7 +37,11 @@ def postprocess(i): env = i['env'] if env.get('MLC_ML_MODEL_FILE_WITH_PATH', '') == '': - env['MLC_ML_MODEL_FILE_WITH_PATH'] = 'model-weights-skipped' + if env.get('MLC_ML_MODEL_SSD_PATH', '') == '': + env['MLC_ML_MODEL_FILE_WITH_PATH'] = 'model-weights-skipped' + else: + env['MLC_ML_MODEL_SSD_PATH'] = os.path.join(env['MLC_ML_MODEL_SSD_PATH'], env['MLC_ML_MODEL_FILENAME']) + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_SSD_PATH'] env['MLC_ML_MODEL_FILE'] = os.path.basename( env['MLC_ML_MODEL_FILE_WITH_PATH']) diff --git a/script/get-ml-model-abtf-ssd-pytorch/meta.yaml b/script/get-ml-model-abtf-ssd-pytorch/meta.yaml index b9f70ebc3..aa08c11d2 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/meta.yaml +++ b/script/get-ml-model-abtf-ssd-pytorch/meta.yaml @@ -17,6 +17,8 @@ tags: - get - ml-model - abtf-ssd-pytorch +- ssd +- resnet50 - cmc @@ -38,9 +40,11 @@ deps: names: - abtf-ssd-pytorch-git-repo - abtf-ml-model-code-git-repo - skip_if_env: + skip_if_any_env: MLC_SKIP_MODEL_CODE_DOWNLOAD: - 'yes' + MLC_DOWNLOAD_SRC: + - 'mlcommons' env: MLC_GIT_AUTH: 'yes' MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_ABTF_SSD_PYTORCH @@ -62,9 +66,11 @@ deps: names: - abtf-ml-model-weights - abtf-ml-model-weights-download - skip_if_env: + skip_if_any_env: MLC_SKIP_MODEL_WEIGHTS_DOWNLOAD: - 'yes' + MLC_DOWNLOAD_SRC: + - 'mlcommons' update_tags_from_env_with_prefix: _url.: - MLC_ML_MODEL_URL @@ -77,6 +83,7 @@ new_env_keys: print_env_at_the_end: MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model weights MLC_ML_MODEL_CODE_WITH_PATH: Path to the ML model code + MLC_ML_MODEL_SSD_PATH: Path to ssd resnet50 model variations: @@ -172,3 +179,76 @@ variations: adr: abtf-ml-model-weights-download: tags: _gdown + + onnx: + group: model-format + default: true + env: + MLC_MODEL_FORMAT: onnx + + onnx,mlc: + env: + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_ssd/ssd_resnet50.onnx + MLC_ML_MODEL_FILENAME: ssd_resnet50.onnx + + pytorch: + group: model-format + env: + MLC_MODEL_FORMAT: pth + + pytorch,mlc: + env: + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_ssd/baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth + MLC_ML_MODEL_FILENAME: baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth + + rclone,mlc: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + env: + MLC_RCLONE_COPY_USING: sync + + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run + + mlc: + group: download-src + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.cognata + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 1u5FDoeXHVtDrd4zClE47Gmyr7iLFidz1 + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_SSD_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_MLC_MODEL_SSD_PATH + MLC_DOWNLOAD_URL: 'mlc-cognata:mlc_cognata_dataset/<<>>' + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + extra_cache_tags: ml,model,ssd,resnet50 + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons diff --git a/script/get-ml-model-bevformer/customize.py b/script/get-ml-model-bevformer/customize.py index d5d6f7ca3..bee8c3c66 100644 --- a/script/get-ml-model-bevformer/customize.py +++ b/script/get-ml-model-bevformer/customize.py @@ -11,6 +11,8 @@ def preprocess(i): if os_info['platform'] == "windows": return {'return': 1, 'error': 'Script not supported in windows yet!'} + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + return {'return': 0} diff --git a/script/get-ml-model-bevformer/meta.yaml b/script/get-ml-model-bevformer/meta.yaml index c14cf6208..6022d7907 100644 --- a/script/get-ml-model-bevformer/meta.yaml +++ b/script/get-ml-model-bevformer/meta.yaml @@ -46,7 +46,7 @@ variations: env: MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_BEVFORMER_PATH MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_BEVFORMER_PATH - MLC_DOWNLOAD_URL: 'mlc-nuscenes:nuscenes_dataset/<<>>' + MLC_DOWNLOAD_URL: 'mlc-nuscenes:<<>>' MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' extra_cache_tags: ml,model,bevformer force_cache: true diff --git a/script/get-ml-model-ssd-resnet50/COPYRIGHT.md b/script/get-ml-model-ssd-resnet50/COPYRIGHT.md deleted file mode 100644 index d2ceead84..000000000 --- a/script/get-ml-model-ssd-resnet50/COPYRIGHT.md +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Notice - -© 2025-2026 MLCommons. All Rights Reserved. - -This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: - -[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - -Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-ml-model-ssd-resnet50/customize.py b/script/get-ml-model-ssd-resnet50/customize.py deleted file mode 100644 index 11ee4fb72..000000000 --- a/script/get-ml-model-ssd-resnet50/customize.py +++ /dev/null @@ -1,25 +0,0 @@ -from mlc import utils -import os - - -def preprocess(i): - - os_info = i['os_info'] - - env = i['env'] - - if os_info['platform'] == "windows": - return {'return': 1, 'error': 'Script not supported in windows yet!'} - - env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" - - return {'return': 0} - - -def postprocess(i): - - env = i['env'] - - env['MLC_ML_MODEL_SSD_PATH'] = os.path.join(env['MLC_ML_MODEL_SSD_PATH'], env['MLC_ML_MODEL_FILENAME']) - - return {'return': 0} diff --git a/script/get-ml-model-ssd-resnet50/meta.yaml b/script/get-ml-model-ssd-resnet50/meta.yaml deleted file mode 100644 index 67759da75..000000000 --- a/script/get-ml-model-ssd-resnet50/meta.yaml +++ /dev/null @@ -1,77 +0,0 @@ -alias: get-ml-model-ssd-resnet50 -automation_alias: script -automation_uid: 5b4e0237da074764 -cache: true -tags: -- get -- ml-model -- ssd -- resnet50 -- ssd-resnet50 -uid: acaa4c330a5a42c5 -new_env_keys: - - MLC_ML_MODEL_SSD_PATH -print_env_at_the_end: - MLC_ML_MODEL_SSD_PATH: SSD checkpoint path -variations: - onnx: - group: model-format - default: true - env: - MLC_MODEL_FORMAT: onnx - MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_ssd/ssd_resnet50.onnx - MLC_ML_MODEL_FILENAME: ssd_resnet50.onnx - pytorch: - group: model-format - env: - MLC_MODEL_FORMAT: pth - MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_ssd/baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth - MLC_ML_MODEL_FILENAME: baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth - mlc: - group: download-src - default: true - prehook_deps: - - tags: get,rclone - enable_if_env: - MLC_TMP_REQUIRE_DOWNLOAD: - - yes - - tags: get,rclone-config,_config-name.cognata - force_cache: true - enable_if_env: - MLC_TMP_REQUIRE_DOWNLOAD: - - yes - env: - MLC_RCLONE_DRIVE_FOLDER_ID: 1u5FDoeXHVtDrd4zClE47Gmyr7iLFidz1 - - enable_if_env: - MLC_TMP_REQUIRE_DOWNLOAD: - - 'yes' - env: - MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_SSD_PATH - MLC_EXTRACT_FINAL_ENV_NAME: MLC_MLC_MODEL_SSD_PATH - MLC_DOWNLOAD_URL: 'mlc-cognata:mlc_cognata_dataset/<<>>' - MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' - extra_cache_tags: ml,model,ssd,resnet50 - force_cache: true - names: - - dae - tags: download-and-extract - force_env_keys: - - MLC_OUTDIRNAME - update_tags_from_env_with_prefix: - _url.: - - MLC_DOWNLOAD_URL - env: - MLC_DOWNLOAD_SRC: mlcommons - rclone: - group: download-tool - add_deps_recursive: - dae: - tags: _rclone - default: true - dry-run: - group: run-mode - env: - MLC_DOWNLOAD_MODE: dry - dry-run,rclone: - env: - MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run diff --git a/script/get-ml-model-ssd-resnet50/run.sh b/script/get-ml-model-ssd-resnet50/run.sh deleted file mode 100644 index a9bf588e2..000000000 --- a/script/get-ml-model-ssd-resnet50/run.sh +++ /dev/null @@ -1 +0,0 @@ -#!/bin/bash From f2cef39e8da5c6655c2e0b0b08301e3340741243 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 13 May 2025 19:34:17 +0000 Subject: [PATCH 04/83] [Automated Commit] Format Codebase [skip ci] --- script/get-ml-model-abtf-ssd-pytorch/customize.py | 3 ++- script/get-ml-model-bevformer/customize.py | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index ab69791a8..5ae7b2534 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -40,7 +40,8 @@ def postprocess(i): if env.get('MLC_ML_MODEL_SSD_PATH', '') == '': env['MLC_ML_MODEL_FILE_WITH_PATH'] = 'model-weights-skipped' else: - env['MLC_ML_MODEL_SSD_PATH'] = os.path.join(env['MLC_ML_MODEL_SSD_PATH'], env['MLC_ML_MODEL_FILENAME']) + env['MLC_ML_MODEL_SSD_PATH'] = os.path.join( + env['MLC_ML_MODEL_SSD_PATH'], env['MLC_ML_MODEL_FILENAME']) env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_SSD_PATH'] env['MLC_ML_MODEL_FILE'] = os.path.basename( diff --git a/script/get-ml-model-bevformer/customize.py b/script/get-ml-model-bevformer/customize.py index bee8c3c66..1c4868594 100644 --- a/script/get-ml-model-bevformer/customize.py +++ b/script/get-ml-model-bevformer/customize.py @@ -12,7 +12,7 @@ def preprocess(i): return {'return': 1, 'error': 'Script not supported in windows yet!'} env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" - + return {'return': 0} @@ -20,6 +20,7 @@ def postprocess(i): env = i['env'] - env['MLC_ML_MODEL_BEVFORMER_PATH'] = os.path.join(env['MLC_ML_MODEL_BEVFORMER_PATH'], env['MLC_ML_MODEL_FILENAME']) + env['MLC_ML_MODEL_BEVFORMER_PATH'] = os.path.join( + env['MLC_ML_MODEL_BEVFORMER_PATH'], env['MLC_ML_MODEL_FILENAME']) return {'return': 0} From 678b42cf276d07bc2477332816a04b37b41071d8 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 14 May 2025 01:05:38 +0530 Subject: [PATCH 05/83] add comment --- script/get-ml-model-abtf-ssd-pytorch/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index ab69791a8..180e003bf 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -26,7 +26,7 @@ def preprocess(i): 'error': 'ML model {} is not found'.format(ml_model)} env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model - elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": + elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": #handle download from mlcommons gdrive env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} From 051e168cc01aa053d0fc26e4f857cecf87db2099 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 13 May 2025 19:36:49 +0000 Subject: [PATCH 06/83] [Automated Commit] Format Codebase [skip ci] --- script/get-ml-model-abtf-ssd-pytorch/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index 7403d0b05..1594893ee 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -26,7 +26,7 @@ def preprocess(i): 'error': 'ML model {} is not found'.format(ml_model)} env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model - elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": #handle download from mlcommons gdrive + elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": # handle download from mlcommons gdrive env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} From d5304ba8499c24c5bb1f1cd01708bc064c203a13 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 14 May 2025 01:07:56 +0530 Subject: [PATCH 07/83] add comment --- script/get-ml-model-abtf-ssd-pytorch/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index 1594893ee..da578cad6 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -26,7 +26,7 @@ def preprocess(i): 'error': 'ML model {} is not found'.format(ml_model)} env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model - elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": # handle download from mlcommons gdrive + elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": #handles download from mlcommons gdrive env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} From 76321b732bc7146f87ce837532b16cb7bf1932ab Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 13 May 2025 19:38:15 +0000 Subject: [PATCH 08/83] [Automated Commit] Format Codebase [skip ci] --- script/get-ml-model-abtf-ssd-pytorch/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index da578cad6..c2d168845 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -26,7 +26,7 @@ def preprocess(i): 'error': 'ML model {} is not found'.format(ml_model)} env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model - elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": #handles download from mlcommons gdrive + elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": # handles download from mlcommons gdrive env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} From ac44c38a4b374a4f00649bcf7f9f254cabcf2cc3 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 14 May 2025 01:09:14 +0530 Subject: [PATCH 09/83] add comment --- script/get-ml-model-abtf-ssd-pytorch/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index c2d168845..1594893ee 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -26,7 +26,7 @@ def preprocess(i): 'error': 'ML model {} is not found'.format(ml_model)} env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model - elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": # handles download from mlcommons gdrive + elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": # handle download from mlcommons gdrive env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} From f8fcf3d6a3a5ede09a716421514f4f388cb8e629 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 14 May 2025 01:14:07 +0530 Subject: [PATCH 10/83] fix bug --- script/get-ml-model-abtf-ssd-pytorch/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index 1594893ee..0a2dadc74 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -26,7 +26,7 @@ def preprocess(i): 'error': 'ML model {} is not found'.format(ml_model)} env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model - elif env['MLC_DOWNLOAD_SRC'] == "mlcommons": # handle download from mlcommons gdrive + elif env.get('MLC_DOWNLOAD_SRC', '') == "mlcommons": # handle download from mlcommons gdrive env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} From 80fd3465c10e769a848fc457dd1d1028ebbfe03b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 13 May 2025 19:44:25 +0000 Subject: [PATCH 11/83] [Automated Commit] Format Codebase [skip ci] --- script/get-ml-model-abtf-ssd-pytorch/customize.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index 0a2dadc74..f6a7df9c1 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -26,7 +26,8 @@ def preprocess(i): 'error': 'ML model {} is not found'.format(ml_model)} env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model - elif env.get('MLC_DOWNLOAD_SRC', '') == "mlcommons": # handle download from mlcommons gdrive + # handle download from mlcommons gdrive + elif env.get('MLC_DOWNLOAD_SRC', '') == "mlcommons": env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} From 6177ab22fcee03da17eb4f5bc744a4012d810326 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 14 May 2025 01:15:40 +0530 Subject: [PATCH 12/83] add comment --- script/get-ml-model-abtf-ssd-pytorch/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index f6a7df9c1..08adffd66 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -26,7 +26,7 @@ def preprocess(i): 'error': 'ML model {} is not found'.format(ml_model)} env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model - # handle download from mlcommons gdrive + # handles download from mlcommons gdrive elif env.get('MLC_DOWNLOAD_SRC', '') == "mlcommons": env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' From 9c3943c8df0a259167fa8bffda71f70231702705 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 14 May 2025 20:57:30 +0530 Subject: [PATCH 13/83] add dataset download for automotive v5.0 --- .../customize.py | 7 ++ .../get-dataset-cognata-mlcommons/meta.yaml | 80 +++++++++++++++++-- script/get-dataset-cognata-mlcommons/run.sh | 20 +++++ script/get-dataset-nuscenes/COPYRIGHT.md | 9 +++ script/get-dataset-nuscenes/customize.py | 23 ++++++ script/get-dataset-nuscenes/meta.yaml | 57 +++++++++++++ script/get-dataset-nuscenes/run.sh | 20 +++++ 7 files changed, 210 insertions(+), 6 deletions(-) create mode 100644 script/get-dataset-cognata-mlcommons/run.sh create mode 100644 script/get-dataset-nuscenes/COPYRIGHT.md create mode 100644 script/get-dataset-nuscenes/customize.py create mode 100644 script/get-dataset-nuscenes/meta.yaml create mode 100644 script/get-dataset-nuscenes/run.sh diff --git a/script/get-dataset-cognata-mlcommons/customize.py b/script/get-dataset-cognata-mlcommons/customize.py index be725599d..64406d44e 100644 --- a/script/get-dataset-cognata-mlcommons/customize.py +++ b/script/get-dataset-cognata-mlcommons/customize.py @@ -8,6 +8,10 @@ def preprocess(i): env = i['env'] + if env.get('MLC_COGNATA_DATASET_TYPE', '') == "release": + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + return {'return': 0} + mlc_cache_dataset_path = env.get( 'MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip() @@ -61,6 +65,9 @@ def postprocess(i): logger = automation.logger + if env.get('MLC_COGNATA_DATASET_TYPE', '') == "release": + return {'return': 0} + cur_dir = os.getcwd() quiet = is_true(env.get('MLC_QUIET', False)) diff --git a/script/get-dataset-cognata-mlcommons/meta.yaml b/script/get-dataset-cognata-mlcommons/meta.yaml index 309b6ba90..ecb10799a 100644 --- a/script/get-dataset-cognata-mlcommons/meta.yaml +++ b/script/get-dataset-cognata-mlcommons/meta.yaml @@ -48,9 +48,11 @@ deps: - custom-cache-entry-mlcommons-cognata-dataset tags: create,custom,cache,entry extra_cache_tags: dataset,cognata,mlcommons-cognata - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'off' + MLC_DOWNLOAD_SRC: + - mlcommons env_key: DATASET_MLCOMMONS_COGNATA # this script will prepare env MLC_CUSTOM_CACHE_ENTRY_{env_key}_PATH @@ -58,9 +60,11 @@ deps: prehook_deps: - names: - gdrive-downloader-cognata - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' + MLC_DOWNLOAD_SRC: + - mlcommons enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'no' @@ -76,9 +80,11 @@ prehook_deps: - names: - rclone-downloader-cognata - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' + MLC_DOWNLOAD_SRC: + - mlcommons enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'no' @@ -101,27 +107,33 @@ prehook_deps: - python - python3 tags: get,python3 - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'yes' + MLC_DOWNLOAD_SRC: + - mlcommons enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' # Python package to read/write Excel files - tags: get,generic-python-lib,_package.openpyxl - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'yes' + MLC_DOWNLOAD_SRC: + - mlcommons enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' # Tool to download large files - tags: get,aria2 - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'yes' + MLC_DOWNLOAD_SRC: + - mlcommons enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' @@ -143,6 +155,11 @@ variations: MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: "Cognata_Camera_01_8M" MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: "" + release: + group: dataset-type + env: + MLC_COGNATA_DATASET_TYPE: "release" + rclone: group: download-tool default: true @@ -153,6 +170,57 @@ variations: group: download-tool env: MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: gdrive + + mlc: + group: download-src + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.cognata + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 1u5FDoeXHVtDrd4zClE47Gmyr7iLFidz1 + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_MLCOMMONS_COGNATA_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_MLCOMMONS_COGNATA_PATH + MLC_DOWNLOAD_URL: 'mlc-cognata:mlc_cognata_dataset/unprocessed' + extra_cache_tags: dataset,cognata,release + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + + rclone,mlc: + add_deps_recursive: + dae: + tags: _rclone + env: + MLC_RCLONE_COPY_USING: sync + + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run + new_env_keys: - MLC_DATASET_MLCOMMONS_COGNATA* diff --git a/script/get-dataset-cognata-mlcommons/run.sh b/script/get-dataset-cognata-mlcommons/run.sh new file mode 100644 index 000000000..6177d043f --- /dev/null +++ b/script/get-dataset-cognata-mlcommons/run.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" == "true" && "$MLC_COGNATA_DATASET_TYPE" == "release"]]; then + cd "${MLC_DATASET_MLCOMMONS_COGNATA_PATH}" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd "${MLC_DATASET_MLCOMMONS_COGNATA_PATH}/nuscenes" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd - || exit +fi \ No newline at end of file diff --git a/script/get-dataset-nuscenes/COPYRIGHT.md b/script/get-dataset-nuscenes/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-dataset-nuscenes/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-dataset-nuscenes/customize.py b/script/get-dataset-nuscenes/customize.py new file mode 100644 index 000000000..d4abad774 --- /dev/null +++ b/script/get-dataset-nuscenes/customize.py @@ -0,0 +1,23 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/script/get-dataset-nuscenes/meta.yaml b/script/get-dataset-nuscenes/meta.yaml new file mode 100644 index 000000000..e12787bdf --- /dev/null +++ b/script/get-dataset-nuscenes/meta.yaml @@ -0,0 +1,57 @@ +alias: get-dataset-nuscenes +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- dataset +- nuscenes +uid: ec2a0842c9a644f5 +new_env_keys: + - MLC_DATASET_NUSCENES_PATH +variations: + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_waymo + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_NUSCENES_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_NUSCENES_PATH + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset + extra_cache_tags: nuscenes,dataset + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run diff --git a/script/get-dataset-nuscenes/run.sh b/script/get-dataset-nuscenes/run.sh new file mode 100644 index 000000000..abe5a17c7 --- /dev/null +++ b/script/get-dataset-nuscenes/run.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" = "true" ]]; then + cd "${MLC_DATASET_NUSCENES_PATH}" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd "${MLC_DATASET_NUSCENES_PATH}/nuscenes" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd - || exit +fi \ No newline at end of file From 1dec2bde958e392a06d2eef32f5eb42c2b2529d7 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 14 May 2025 20:58:57 +0530 Subject: [PATCH 14/83] update run.sh --- script/get-dataset-cognata-mlcommons/run.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/script/get-dataset-cognata-mlcommons/run.sh b/script/get-dataset-cognata-mlcommons/run.sh index 6177d043f..a2221b640 100644 --- a/script/get-dataset-cognata-mlcommons/run.sh +++ b/script/get-dataset-cognata-mlcommons/run.sh @@ -12,9 +12,5 @@ if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" == "true" && for f in *.tar.gz; do tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } done - cd "${MLC_DATASET_MLCOMMONS_COGNATA_PATH}/nuscenes" || exit - for f in *.tar.gz; do - tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } - done cd - || exit fi \ No newline at end of file From 8323fb856e2f5490fc0f5678e1a277e94f7412df Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 14 May 2025 21:03:44 +0530 Subject: [PATCH 15/83] fix syntax --- script/get-dataset-cognata-mlcommons/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-dataset-cognata-mlcommons/run.sh b/script/get-dataset-cognata-mlcommons/run.sh index a2221b640..5563a95da 100644 --- a/script/get-dataset-cognata-mlcommons/run.sh +++ b/script/get-dataset-cognata-mlcommons/run.sh @@ -7,7 +7,7 @@ #${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency -if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" == "true" && "$MLC_COGNATA_DATASET_TYPE" == "release"]]; then +if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" == "true" && "$MLC_COGNATA_DATASET_TYPE" == "release" ]]; then cd "${MLC_DATASET_MLCOMMONS_COGNATA_PATH}" || exit for f in *.tar.gz; do tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } From 3af70e48e052876ef56133d1ce2f0804a6ab2378 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 15 May 2025 19:40:48 +0530 Subject: [PATCH 16/83] add script for prebuilt preprocessed datasets --- .../COPYRIGHT.md | 9 ++ .../README.md | 1 + .../customize.py | 19 ++++ .../meta.yaml | 101 ++++++++++++++++++ .../get-preprocessed-dataset-cognata/run.sh | 9 ++ .../COPYRIGHT.md | 9 ++ .../README.md | 1 + .../customize.py | 19 ++++ .../meta.yaml | 78 ++++++++++++++ .../get-preprocessed-dataset-nuscenes/run.sh | 9 ++ 10 files changed, 255 insertions(+) create mode 100644 script/get-preprocessed-dataset-cognata/COPYRIGHT.md create mode 100644 script/get-preprocessed-dataset-cognata/README.md create mode 100644 script/get-preprocessed-dataset-cognata/customize.py create mode 100644 script/get-preprocessed-dataset-cognata/meta.yaml create mode 100644 script/get-preprocessed-dataset-cognata/run.sh create mode 100644 script/get-preprocessed-dataset-nuscenes/COPYRIGHT.md create mode 100644 script/get-preprocessed-dataset-nuscenes/README.md create mode 100644 script/get-preprocessed-dataset-nuscenes/customize.py create mode 100644 script/get-preprocessed-dataset-nuscenes/meta.yaml create mode 100644 script/get-preprocessed-dataset-nuscenes/run.sh diff --git a/script/get-preprocessed-dataset-cognata/COPYRIGHT.md b/script/get-preprocessed-dataset-cognata/COPYRIGHT.md new file mode 100644 index 000000000..2d6a2775e --- /dev/null +++ b/script/get-preprocessed-dataset-cognata/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2023-2025 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-preprocessed-dataset-cognata/README.md b/script/get-preprocessed-dataset-cognata/README.md new file mode 100644 index 000000000..fee3d0ae4 --- /dev/null +++ b/script/get-preprocessed-dataset-cognata/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19) for the documentation of this CM script. diff --git a/script/get-preprocessed-dataset-cognata/customize.py b/script/get-preprocessed-dataset-cognata/customize.py new file mode 100644 index 000000000..4beec14d8 --- /dev/null +++ b/script/get-preprocessed-dataset-cognata/customize.py @@ -0,0 +1,19 @@ +from mlc import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt": + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-cognata/meta.yaml b/script/get-preprocessed-dataset-cognata/meta.yaml new file mode 100644 index 000000000..e9aeb9510 --- /dev/null +++ b/script/get-preprocessed-dataset-cognata/meta.yaml @@ -0,0 +1,101 @@ +alias: get-preprocessed-dataset-cognata +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + MLC_DATASET: cognata +new_env_keys: +- MLC_PREPROCESSED_DATASET_* +tags: +- get +- dataset +- cognata +- preprocessed +uid: 29b3a984ff444de9 +print_env_at_the_end: + MLC_PREPROCESSED_DATASET_COGNATA_PATH: Preprocessed Cognata dataset path +variations: + validation: + default: true + group: dataset-type + env: + MLC_DATASET_COGNATA_TYPE: validation + calibration: + group: dataset-type + env: + MLC_DATASET_COGNATA_TYPE: calibration + 2d_obj_det: + default: true + group: task + env: + MLC_DATASET_COGNATA_TASK: 2d_object_detection + segmentation: + group: task + env: + MLC_DATASET_COGNATA_TASK: segmentation + validation,2d_obj_det: + env: + MLC_DATASET_COGNATA_TAR_FILENAME: val_2d.tar.gz + MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_2d/<<>> + calibration,2d_obj_det: + env: + MLC_DATASET_COGNATA_TAR_FILENAME: calib_2d.tar.gz + MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_2d/<<>> + validation,segmentation: + env: + MLC_DATASET_COGNATA_TAR_FILENAME: val_seg.tar.gz + MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_seg/<<>> + calibration,segmentation: + env: + MLC_DATASET_COGNATA_TAR_FILENAME: calib_seg.tar.gz + MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_seg/<<>> + prebuilt: + default: true + group: dataset-src + env: + MLC_NUSCENES_DATASET_TYPE: prebuilt + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_COGNATA_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_COGNATA_PATH + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_waymo + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + extra_cache_tags: nuscenes,dataset + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run \ No newline at end of file diff --git a/script/get-preprocessed-dataset-cognata/run.sh b/script/get-preprocessed-dataset-cognata/run.sh new file mode 100644 index 000000000..ed1a21d87 --- /dev/null +++ b/script/get-preprocessed-dataset-cognata/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" = "true" ]]; then + cd "${MLC_PREPROCESSED_DATASET_COGNATA_PATH}/${MLC_DATASET_COGNATA_TAR_FILENAME}" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd - || exit +fi \ No newline at end of file diff --git a/script/get-preprocessed-dataset-nuscenes/COPYRIGHT.md b/script/get-preprocessed-dataset-nuscenes/COPYRIGHT.md new file mode 100644 index 000000000..2d6a2775e --- /dev/null +++ b/script/get-preprocessed-dataset-nuscenes/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2023-2025 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-preprocessed-dataset-nuscenes/README.md b/script/get-preprocessed-dataset-nuscenes/README.md new file mode 100644 index 000000000..fee3d0ae4 --- /dev/null +++ b/script/get-preprocessed-dataset-nuscenes/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19) for the documentation of this CM script. diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py new file mode 100644 index 000000000..4beec14d8 --- /dev/null +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -0,0 +1,19 @@ +from mlc import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt": + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml new file mode 100644 index 000000000..af7a911dc --- /dev/null +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -0,0 +1,78 @@ +alias: get-preprocessed-dataset-nuscenes +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + MLC_DATASET: nuscenes +new_env_keys: +- MLC_PREPROCESSED_DATASET_* +tags: +- get +- dataset +- nuscenes +- preprocessed +uid: 0e403a2861984a4e +print_env_at_the_end: + MLC_PREPROCESSED_DATASET_NUSCENES_PATH: Preprocessed Nuscenes dataset path +variations: + validation: + default: true + group: dataset-type + env: + MLC_DATASET_NUSCENES_TAR_FILENAME: val_3d.tar.gz + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> + calibration: + group: dataset-type + env: + MLC_DATASET_NUSCENES_TAR_FILENAME: calib_3d.tar.gz + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> + prebuilt: + default: true + group: dataset-src + env: + MLC_NUSCENES_DATASET_TYPE: prebuilt + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_PATH + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_waymo + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + extra_cache_tags: nuscenes,dataset + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run \ No newline at end of file diff --git a/script/get-preprocessed-dataset-nuscenes/run.sh b/script/get-preprocessed-dataset-nuscenes/run.sh new file mode 100644 index 000000000..9ec6ee767 --- /dev/null +++ b/script/get-preprocessed-dataset-nuscenes/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" = "true" ]]; then + cd "${MLC_PREPROCESSED_DATASET_NUSCENES_PATH}/${MLC_DATASET_NUSCENES_TAR_FILENAME}" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd - || exit +fi \ No newline at end of file From 21e7d39b55b267f5390aac6f1c29e8da9671f055 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Fri, 16 May 2025 15:53:19 +0530 Subject: [PATCH 17/83] add script to get automotive ref implementation repo --- script/get-mlperf-automotive-src/COPYRIGHT.md | 9 + script/get-mlperf-automotive-src/customize.py | 154 ++++++++++++++++++ script/get-mlperf-automotive-src/meta.yaml | 103 ++++++++++++ 3 files changed, 266 insertions(+) create mode 100644 script/get-mlperf-automotive-src/COPYRIGHT.md create mode 100644 script/get-mlperf-automotive-src/customize.py create mode 100644 script/get-mlperf-automotive-src/meta.yaml diff --git a/script/get-mlperf-automotive-src/COPYRIGHT.md b/script/get-mlperf-automotive-src/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-mlperf-automotive-src/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-mlperf-automotive-src/customize.py b/script/get-mlperf-automotive-src/customize.py new file mode 100644 index 000000000..4108e005f --- /dev/null +++ b/script/get-mlperf-automotive-src/customize.py @@ -0,0 +1,154 @@ +from mlc import utils +from utils import is_true +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + meta = i['meta'] + + script_path = i['run_script_input']['path'] + + if env.get('MLC_GIT_CHECKOUT', '') == '' and env.get( + 'MLC_GIT_URL', '') == '' and env.get('MLC_VERSION', '') == '': + # if custom checkout and url parameters are not set and MLC_VERSION is + # not specified + env['MLC_VERSION'] = "master" + env["MLC_GIT_CHECKOUT"] = "master" + env["MLC_GIT_URL"] = "https://github.com/mlcommons/mlperf_automotive" + elif env.get('MLC_GIT_CHECKOUT', '') != '' and env.get('MLC_TMP_GIT_CHECKOUT', '') != '' and env.get('MLC_GIT_CHECKOUT', '') != env.get('MLC_TMP_GIT_CHECKOUT', ''): + # if checkout branch is assigned inside version and custom branch is + # also specified + return { + "return": 1, "error": "Conflicting branches between version assigned and user specified."} + elif env.get('MLC_GIT_URL', '') != '' and env.get('MLC_TMP_GIT_URL', '') != '' and env.get('MLC_GIT_URL', '') != env.get('MLC_TMP_GIT_URL', ''): + # if GIT URL is assigned inside version and custom branch is also + # specified + return { + "return": 1, "error": "Conflicting URL's between version assigned and user specified."} + + if env.get('MLC_VERSION', '') == '': + env['MLC_VERSION'] = "custom" + + # check whether branch and url is specified, + # if not try to assign the values specified in version parameters, + # if version parameters does not have the value to a parameter, set the + # default one + if env.get('MLC_GIT_CHECKOUT', '') == '' and env.get( + 'MLC_GIT_CHECKOUT_TAG', '') == '': + if env.get('MLC_TMP_GIT_CHECKOUT', '') != '': + env["MLC_GIT_CHECKOUT"] = env["MLC_TMP_GIT_CHECKOUT"] + else: + env["MLC_GIT_CHECKOUT"] = "master" + + if env.get('MLC_GIT_URL', '') == '': + if env.get('MLC_TMP_GIT_URL', '') != '': + env["MLC_GIT_URL"] = env["MLC_TMP_GIT_URL"] + else: + env["MLC_GIT_URL"] = "https://github.com/mlcommons/mlperf_automotive" + + if env.get("MLC_MLPERF_LAST_RELEASE", '') == '': + env["MLC_MLPERF_LAST_RELEASE"] = "v0.5" + + if 'MLC_GIT_DEPTH' not in env: + env['MLC_GIT_DEPTH'] = '' + + if 'MLC_GIT_RECURSE_SUBMODULES' not in env: + env['MLC_GIT_RECURSE_SUBMODULES'] = '' + submodules = [] + possible_submodules = { + "pybind": "third_party/pybind", + } + for submodule in possible_submodules: + env_name = submodule.upper().replace("-", "_") + if is_true(env.get("MLC_SUBMODULE_" + env_name)): + submodules.append(possible_submodules[submodule]) + + env['MLC_GIT_SUBMODULES'] = ",".join(submodules) + + if env.get('MLC_GIT_PATCH_FILENAME', '') != '': + patch_file_name = env['MLC_GIT_PATCH_FILENAME'] + env['MLC_GIT_PATCH_FILEPATHS'] = os.path.join( + script_path, 'patch', patch_file_name) + + need_version = env.get('MLC_VERSION', '') + versions = meta['versions'] + + if need_version != '' and not need_version in versions: + env['MLC_GIT_CHECKOUT'] = need_version + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + automotive_root = env['MLC_MLPERF_AUTOMOTIVE_SOURCE'] + env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'] = os.path.join( + automotive_root, 'camera-3d-detection') + env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'] = os.path.join( + automotive_root, '2d-object-detection') + env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'] = os.path.join( + automotive_root, 'semantic-segmentation') + + env['MLC_GET_DEPENDENT_CACHED_PATH'] = automotive_root + +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if '+PYTHONPATH' not in env: env['+PYTHONPATH'] = [] + env['+PYTHONPATH'] = [] + + if os.path.exists(os.path.join(automotive_root, "loadgen", "VERSION.txt")): + with open(os.path.join(automotive_root, "loadgen", "VERSION.txt")) as f: + version_info = f.read().strip() + env['MLC_MLPERF_AUTOMOTIVE_SOURCE_VERSION'] = version_info + + if is_true(env.get('MLC_GET_MLPERF_IMPLEMENTATION_ONLY', '')): + return {'return': 0} + + env['MLC_MLPERF_AUTOMOTIVE_CONF_PATH'] = os.path.join( + automotive_root, 'mlperf.conf') + env['+PYTHONPATH'].append( + os.path.join( + env['MLC_MLPERF_AUTOMOTIVE_SOURCE'], + 'tools', + 'submission')) + + # To be uncommented after Pablo's PR is merged: https://github.com/mlcommons/mlperf_automotive/pull/14 + # valid_models = get_valid_models( + # env['MLC_MLPERF_LAST_RELEASE'], + # env['MLC_MLPERF_AUTOMOTIVE_SOURCE']) + + # state['MLC_MLPERF_AUTOMOTIVE_MODELS'] = valid_models + + if env.get('MLC_GIT_REPO_CURRENT_HASH', '') != '': + env['MLC_VERSION'] += "-git-" + env['MLC_GIT_REPO_CURRENT_HASH'] + + return {'return': 0, 'version': env['MLC_VERSION']} + + +def get_valid_models(mlperf_version, mlperf_path): + + import sys + + submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") + + sys.path.append(submission_checker_dir) + + if not os.path.exists(os.path.join( + submission_checker_dir, "submission_checker.py")): + shutil.copy(os.path.join(submission_checker_dir, "submission-checker.py"), os.path.join(submission_checker_dir, + "submission_checker.py")) + + import submission_checker as checker + + config = checker.MODEL_CONFIG + + valid_models = config[mlperf_version]["models"] + + return valid_models diff --git a/script/get-mlperf-automotive-src/meta.yaml b/script/get-mlperf-automotive-src/meta.yaml new file mode 100644 index 000000000..9a8539c99 --- /dev/null +++ b/script/get-mlperf-automotive-src/meta.yaml @@ -0,0 +1,103 @@ +alias: get-mlperf-automotive-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_env: + MLC_GIT_CHECKOUT_FOLDER: automotive + MLC_GIT_DEPTH: --depth 4 + MLC_GIT_PATCH: 'no' + MLC_GIT_RECURSE_SUBMODULES: '' +default_version: master +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +new_env_keys: +- MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH +- MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH +- MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH +- MLC_MLPERF_LAST_RELEASE +- MLC_MLPERF_AUTOMOTIVE_SOURCE +- MLC_MLPERF_INFERENCE_SOURCE_VERSION +- +PYTHONPATH +prehook_deps: +- env: + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_AUTOMOTIVE_SOURCE + extra_cache_tags: automotive,src + force_env_keys: + - MLC_GIT_* + names: + - automotive-git-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - MLC_GIT_CHECKOUT + _repo.: + - MLC_GIT_URL + _sha.: + - MLC_GIT_SHA + _submodules.: + - MLC_GIT_SUBMODULES +print_env_at_the_end: + MLC_MLPERF_AUTOMOTIVE_SOURCE: Path to MLPerf automotive benchmark source +tags: +- get +- src +- source +- automotive +- automotive-src +- automotive-source +- mlperf +- mlcommons +uid: c3842e6e35d947ef +variations: + branch.#: + default_version: custom + env: + MLC_GIT_CHECKOUT: '#' + group: checkout + full-history: + env: + MLC_GIT_DEPTH: '' + group: git-history + no-recurse-submodules: + env: + MLC_GIT_RECURSE_SUBMODULES: '' + patch: + ad: + automotive-git-repo: + tags: _patch + env: + MLC_GIT_PATCH: 'yes' + pybind: + env: + MLC_SUBMODULE_PYBIND: 'yes' + recurse-submodules: + env: + MLC_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' + repo.#: + env: + MLC_GIT_URL: '#' + sha.#: + env: + MLC_GIT_SHA: '#' + group: checkout + short-history: + default: true + env: + MLC_GIT_DEPTH: --depth 10 + group: git-history + submodules.#: + env: + MLC_GIT_SUBMODULES: '#' +versions: + custom: + env: + MLC_MLPERF_LAST_RELEASE: v0.5 + master: + env: + MLC_MLPERF_LAST_RELEASE: v0.5 + MLC_TMP_GIT_CHECKOUT: master From 171cf0043e873bd96c8f68192e90eea6c2a984d1 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Fri, 16 May 2025 16:24:56 +0530 Subject: [PATCH 18/83] add script to get loadgen for automotive --- .../COPYRIGHT.md | 9 ++ .../customize.py | 60 ++++++++ .../get-mlperf-automotive-loadgen/meta.yaml | 143 ++++++++++++++++++ script/get-mlperf-automotive-loadgen/run.bat | 39 +++++ script/get-mlperf-automotive-loadgen/run.sh | 52 +++++++ 5 files changed, 303 insertions(+) create mode 100644 script/get-mlperf-automotive-loadgen/COPYRIGHT.md create mode 100644 script/get-mlperf-automotive-loadgen/customize.py create mode 100644 script/get-mlperf-automotive-loadgen/meta.yaml create mode 100644 script/get-mlperf-automotive-loadgen/run.bat create mode 100644 script/get-mlperf-automotive-loadgen/run.sh diff --git a/script/get-mlperf-automotive-loadgen/COPYRIGHT.md b/script/get-mlperf-automotive-loadgen/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-mlperf-automotive-loadgen/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-mlperf-automotive-loadgen/customize.py b/script/get-mlperf-automotive-loadgen/customize.py new file mode 100644 index 000000000..aea13a854 --- /dev/null +++ b/script/get-mlperf-automotive-loadgen/customize.py @@ -0,0 +1,60 @@ +from mlc import utils +from utils import is_true +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + # automotive loadgen is yet to be uploaded to pypi + if is_true(env.get('MLC_TMP_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_FROM_PIP', + '')): + i['run_script_input']['script_name'] = "donotrun" + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if is_true(env.get('MLC_TMP_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_FROM_PIP', + '')): + return {'return': 0} + + for key in ['+PYTHONPATH', '+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: + env[key] = [] + + # On Windows installs directly into Python distro for simplicity +# if os_info['platform'] != 'windows': + + cur_path = os.getcwd() + install_path = os.path.join(cur_path, 'install') + + env['MLC_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_PATH'] = install_path + + build_path = os.path.join(cur_path, 'build') + if os.path.exists(build_path): + env['MLC_MLPERF_AUTOMOTIVE_LOADGEN_BUILD_PATH'] = build_path + + include_path = os.path.join(install_path, 'include') + lib_path = os.path.join(install_path, 'lib') + python_path = os.path.join(install_path, 'python') + + env['+C_INCLUDE_PATH'].append(include_path) + env['+CPLUS_INCLUDE_PATH'].append(include_path) + env['MLC_MLPERF_AUTOMOTIVE_LOADGEN_INCLUDE_PATH'] = include_path + + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + env['MLC_MLPERF_AUTOMOTIVE_LOADGEN_LIBRARY_PATH'] = lib_path + + env['+PYTHONPATH'].append(python_path) + env['MLC_MLPERF_AUTOMOTIVE_LOADGEN_PYTHON_PATH'] = python_path + + return {'return': 0} diff --git a/script/get-mlperf-automotive-loadgen/meta.yaml b/script/get-mlperf-automotive-loadgen/meta.yaml new file mode 100644 index 000000000..c83e82d16 --- /dev/null +++ b/script/get-mlperf-automotive-loadgen/meta.yaml @@ -0,0 +1,143 @@ +alias: get-mlperf-automotive-loadgen +uid: 82396582494a4d38 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: MLPerf benchmark support + +default_env: + MLC_SHARED_BUILD: 'no' + +default_version: master + +deps: +- tags: detect,os +- names: + - python3 + - python + tags: get,python3 +- force_env_keys: + - MLC_GIT_URL + - MLC_GIT_CHECKOUT + names: + - automotive-src-loadgen + skip_if_env: + MLC_MLPERF_AUTOMOTIVE_LOADGEN_DOWNLOAD: + - 'YES' + tags: get,mlcommons,automotive,src +- enable_if_env: + MLC_MLPERF_AUTOMOTIVE_LOADGEN_DOWNLOAD: + - 'YES' + force_cache: true + names: + - automotive-src-loadgen-download + tags: download-and-extract,file,_wget,_extract + update_tags_from_env_with_prefix: + _url.: + - MLC_MLPERF_AUTOMOTIVE_LOADGEN_DOWNLOAD_URL +- names: + - compiler + skip_if_any_env: + MLC_HOST_OS_TYPE: + - windows + MLC_TMP_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_FROM_PIP: + - 'yes' + tags: get,compiler +- enable_if_env: + MLC_HOST_OS_TYPE: + - windows + skip_if_env: + MLC_TMP_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_FROM_PIP: + - 'yes' + names: + - compiler + tags: get,cl +- names: + - cmake + tags: get,cmake + version_min: '3.12' +- names: + - pip-package + - wheel + tags: get,generic-python-lib,_package.wheel +- names: + - pip-package + - pip + tags: get,generic-python-lib,_pip +- names: + - pip-package + - pybind11 + tags: get,generic-python-lib,_package.pybind11 +- names: + - pip-package + - setuputils + tags: get,generic-python-lib,_package.setuptools + +extra_cache_tags_from_env: +- env: MLC_PYTHON_CACHE_TAGS + prefix: python- +- env: MLC_COMPILER_CACHE_TAGS + prefix: compiler- + +new_env_keys: +- +PYTHONPATH +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +- MLC_MLPERF_AUTOMOTIVE_LOADGEN_* + +tags: +- get +- loadgen +- automotive +- automotive-loadgen +- mlperf +- mlcommons + +variations: + from-pip: + env: + MLC_TMP_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_FROM_PIP: 'yes' + deps: + - tags: get,generic-python-lib,_package.mlcommons-loadgen + custom-python: + ad: + pip-package: + tags: _custom-python + python3: + skip_if_env: + MLC_TMP_USE_CUSTOM_PYTHON: + - 'on' + env: + MLC_TMP_USE_CUSTOM_PYTHON: 'on' + keep-build: + group: clean-build + env: + MLC_MLPERF_AUTOMOTIVE_LOADGEN_BUILD_CLEAN: 'no' + clean-build: + group: clean-build + default: true + env: + MLC_MLPERF_AUTOMOTIVE_LOADGEN_BUILD_CLEAN: 'yes' + no-compilation-warnings: + env: + '+ CXXFLAGS': + - '-Werror' + - '-Wno-unused-parameter' + +versions: + custom: + add_deps: + automotive-src-loadgen: + version: custom + master: + add_deps: + automotive-src-loadgen: + version: master + +print_env_at_the_end: + MLC_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_PATH: "Path to the tool" diff --git a/script/get-mlperf-automotive-loadgen/run.bat b/script/get-mlperf-automotive-loadgen/run.bat new file mode 100644 index 000000000..18a26dd5d --- /dev/null +++ b/script/get-mlperf-automotive-loadgen/run.bat @@ -0,0 +1,39 @@ +@echo off + +echo ======================================================= + +set CUR_DIR=%cd% +echo Current path in CM script: %CUR_DIR% + +if "%MLC_MLPERF_AUTOMOTIVE_LOADGEN_DOWNLOAD%" == "YES" ( + set MLC_MLPERF_AUTOMOTIVE_SOURCE=%MLC_EXTRACT_EXTRACTED_PATH% +) + +set INSTALL_DIR=%CUR_DIR%\install + +echo. +echo Switching to %MLC_MLPERF_AUTOMOTIVE_SOURCE%\loadgen + +cd %MLC_MLPERF_AUTOMOTIVE_SOURCE%\loadgen +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +echo Running %MLC_PYTHON_BIN% setup.py develop + +%MLC_PYTHON_BIN% setup.py develop +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo ======================================================= +cmake ^ + -DCMAKE_INSTALL_PREFIX=%INSTALL_DIR% ^ + %MLC_MLPERF_AUTOMOTIVE_SOURCE%\loadgen ^ + -DPYTHON_EXECUTABLE:FILEPATH=%MLC_PYTHON_BIN_WITH_PATH% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo ======================================================= +cmake --build . --target install +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +del /Q /S build + +echo ======================================================= diff --git a/script/get-mlperf-automotive-loadgen/run.sh b/script/get-mlperf-automotive-loadgen/run.sh new file mode 100644 index 000000000..e31617a5e --- /dev/null +++ b/script/get-mlperf-automotive-loadgen/run.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +CUR_DIR=$PWD + +mkdir -p install +mkdir -p build + +INSTALL_DIR="${CUR_DIR}/install" + +echo "******************************************************" + +cd build + +if [ "${MLC_MLPERF_AUTOMOTIVE_LOADGEN_DOWNLOAD}" == "YES" ]; then + export MLC_MLPERF_AUTOMOTIVE_SOURCE="${MLC_EXTRACT_EXTRACTED_PATH}" +fi + + +if [ -z "${MLC_MLPERF_AUTOMOTIVE_SOURCE}" ]; then + echo "Error: env MLC_MLPERF_AUTOMOTIVE_SOURCE is not defined - something is wrong with script automation!" + exit 1 +fi + +cmake \ + -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \ + "${MLC_MLPERF_AUTOMOTIVE_SOURCE}/loadgen" \ + -DPYTHON_EXECUTABLE:FILEPATH="${MLC_PYTHON_BIN_WITH_PATH}" -B . +test $? -eq 0 || exit $? + +echo "******************************************************" +MLC_MAKE_CORES=${MLC_MAKE_CORES:-${MLC_HOST_CPU_TOTAL_CORES}} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-2} + +cmake --build . --target install -j "${MLC_MAKE_CORES}" +test $? -eq 0 || exit $? + +# Clean build directory (too large) +cd "${CUR_DIR}" +if [[ $MLC_MLPERF_AUTOMOTIVE_LOADGEN_BUILD_CLEAN == "yes" ]]; then + rm -rf build +fi + + +cd "${MLC_MLPERF_AUTOMOTIVE_SOURCE}/loadgen" +${MLC_PYTHON_BIN_WITH_PATH} -m pip install . --target="${MLPERF_AUTOMOTIVE_PYTHON_SITE_BASE}" +test $? -eq 0 || exit $? + +# Clean the built wheel +#find . -name 'mlcommons_loadgen*.whl' | xargs rm + +echo "******************************************************" +echo "Loadgen is built and installed to ${INSTALL_DIR} ..." From 9d7e769427626b86c085e5d311ee73cd57fcbd11 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Fri, 16 May 2025 17:27:44 +0530 Subject: [PATCH 19/83] add script for automotive utils --- .../get-mlperf-automotive-utils/COPYRIGHT.md | 9 + .../get-mlperf-automotive-utils/customize.py | 36 ++ script/get-mlperf-automotive-utils/meta.yaml | 18 + .../mlperf_utils.py | 353 ++++++++++++++++++ 4 files changed, 416 insertions(+) create mode 100644 script/get-mlperf-automotive-utils/COPYRIGHT.md create mode 100644 script/get-mlperf-automotive-utils/customize.py create mode 100644 script/get-mlperf-automotive-utils/meta.yaml create mode 100644 script/get-mlperf-automotive-utils/mlperf_utils.py diff --git a/script/get-mlperf-automotive-utils/COPYRIGHT.md b/script/get-mlperf-automotive-utils/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-mlperf-automotive-utils/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-mlperf-automotive-utils/customize.py b/script/get-mlperf-automotive-utils/customize.py new file mode 100644 index 000000000..d1ab8ab70 --- /dev/null +++ b/script/get-mlperf-automotive-utils/customize.py @@ -0,0 +1,36 @@ +from mlc import utils +from utils import is_true +import os +import sys + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = is_true(env.get('MLC_QUIET', False)) + + utils_path = env['MLC_TMP_CURRENT_SCRIPT_PATH'] + + env['+PYTHONPATH'] = [utils_path] + + submission_checker_dir = os.path.join( + env['MLC_MLPERF_AUTOMOTIVE_SOURCE'], "tools", "submission") + + sys.path.append(submission_checker_dir) + sys.path.append(utils_path) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/script/get-mlperf-automotive-utils/meta.yaml b/script/get-mlperf-automotive-utils/meta.yaml new file mode 100644 index 000000000..5a71e88b7 --- /dev/null +++ b/script/get-mlperf-automotive-utils/meta.yaml @@ -0,0 +1,18 @@ +alias: get-mlperf-automotive-utils +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- get +- mlperf +- automotive +- util +- utils +- functions +uid: c20cfade1c184f83 +deps: + - tags: get,mlperf,automotive,src + names: + - automotive-src +new_env_keys: + - '+PYTHONPATH' diff --git a/script/get-mlperf-automotive-utils/mlperf_utils.py b/script/get-mlperf-automotive-utils/mlperf_utils.py new file mode 100644 index 000000000..f7441cedd --- /dev/null +++ b/script/get-mlperf-automotive-utils/mlperf_utils.py @@ -0,0 +1,353 @@ +import sys +import os +import submission_checker as checker +from log_parser import MLPerfLog + + +def get_result_from_log(version, model, scenario, + result_path, mode, automotive_src_version=None): + + config = checker.Config( + version, + None, + ignore_uncommited=False, + skip_power_check=False, + ) + mlperf_model = config.get_mlperf_model(model) + # scenario = checker.SCENARIO_MAPPING[scenario] + + result = '' + power_result = None + valid = {} + if mode == "performance": + # has_power = os.path.exists(os.path.join(result_path, "..", "power")) + version_tuple = None + if automotive_src_version: + version_tuple = tuple(map(int, automotive_src_version.split('.'))) + + if version_tuple and version_tuple >= (4, 1, 22): + result_ = checker.get_performance_metric( + config, mlperf_model, result_path, scenario) + else: + result_ = checker.get_performance_metric( + config, mlperf_model, result_path, scenario, None, None, has_power) + mlperf_log = MLPerfLog( + os.path.join( + result_path, + "mlperf_log_detail.txt")) + if ( + "result_validity" not in mlperf_log.get_keys() + or mlperf_log["result_validity"] != "VALID" + ): + valid['performance'] = False + else: + valid['performance'] = True + + if "stream" in scenario.lower(): + result = result_ / 1000000 # convert to milliseconds + else: + result = result_ + result = str(round(result, 3)) + + # if has_power: + # power_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric( + # config, scenario, result_path, True, result_) + # power_result = f"{round(power_metric,3)},{round(avg_power_efficiency,3)}" + # valid['power'] = power_valid + + elif mode == "accuracy" and os.path.exists(os.path.join(result_path, 'accuracy.txt')): + + acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric( + config, mlperf_model, result_path) + valid['accuracy'] = acc_valid + + if len(acc_results) == 1: + for acc in acc_results: + result = str(round(float(acc_results[acc]), 5)) + else: + result = '(' + result_list = [] + for i, acc in enumerate(acc_results): + result_list.append(str(round(float(acc_results[acc]), 5))) + result += ", ".join(result_list) + ")" + + return result, valid, power_result + + +def get_accuracy_metric(config, model, path): + + import re + is_valid = False + all_accuracy_valid = True + acc = None + result_acc = None + target = config.get_accuracy_target(model) + acc_upper_limit = config.get_accuracy_upper_limit(model) + patterns = [] + acc_targets = [] + acc_limits = [None] * (len(target) // 2) + up_patterns = [None] * (len(target) // 2) + acc_types = [] + + if acc_upper_limit is not None: + acc_limit_check = True + + for ii in range(0, len(target), 2): + acc_type1, tmp = target[ii:ii + 2] + for i in range(0, len(acc_upper_limit), 2): + acc_type, acc_target = acc_upper_limit[i:i + 2] + if acc_type != acc_type1: + continue + acc_limits[ii // 2] = acc_target + up_patterns[ii // 2] = checker.ACC_PATTERN[acc_type] + + for i in range(0, len(target), 2): + acc_type, acc_target = target[i:i + 2] + acc_types.append(acc_type) + patterns.append(checker.ACC_PATTERN[acc_type]) + acc_targets.append(acc_target) + + acc_seen = [False for _ in acc_targets] + acc_results = {} + with open(os.path.join(path, "accuracy.txt"), "r", encoding="utf-8") as f: + for line in f: + for i, (pattern, acc_target, acc_type) in enumerate( + zip(patterns, acc_targets, acc_types)): + m = re.match(pattern, line) + if m: + acc = m.group(1) + + acc_results[acc_type] = acc + + if acc is not None and float(acc) >= acc_target: + all_accuracy_valid &= True + acc_seen[i] = True + elif acc is not None: + all_accuracy_valid = False + # log.warning("%s accuracy not met: expected=%f, found=%s", path, acc_target, acc) + if i == 0 and acc: + result_acc = acc + acc = None + if acc_upper_limit is not None: + for i, (pattern, acc_limit) in enumerate( + zip(up_patterns, acc_limits)): + if not pattern: + continue + m = re.match(pattern, line) + if m: + acc = m.group(1) + if acc is not None and acc_upper_limit is not None and float( + acc) > acc_limit: + acc_limit_check = False + # log.warning("%s accuracy not met: upper limit=%f, found=%s", path, acc_limit, acc) + acc = None + if all(acc_seen): + break + is_valid = all_accuracy_valid & all(acc_seen) + if acc_upper_limit is not None: + is_valid &= acc_limit_check + + return is_valid, acc_results, acc_targets, acc_limits + + +def get_result_string(version, model, scenario, result_path, has_power, sub_res, + division="open", system_json=None, model_precision="fp32", automotive_src_version=None): + + config = checker.Config( + version, + None, + ignore_uncommited=False, + skip_power_check=False, + ) + mlperf_model = config.get_mlperf_model(model) + performance_path = os.path.join(result_path, "performance", "run_1") + accuracy_path = os.path.join(result_path, "accuracy") + scenario = checker.SCENARIO_MAPPING[scenario.lower()] + + fname = os.path.join(performance_path, "mlperf_log_detail.txt") + mlperf_log = MLPerfLog(fname) + effective_scenario = mlperf_log["effective_scenario"] + inferred = False + result = {} + + version_tuple = None + if automotive_src_version: + version_tuple = tuple(map(int, automotive_src_version.split('.'))) + + if version_tuple and version_tuple >= (4, 1, 22): + performance_result = checker.get_performance_metric( + config, mlperf_model, performance_path, scenario) + else: + performance_result = checker.get_performance_metric( + config, mlperf_model, performance_path, scenario, None, None) + if "stream" in scenario.lower(): + performance_result_ = performance_result / 1000000 # convert to milliseconds + else: + performance_result_ = performance_result + result['performance'] = round(performance_result_, 3) + + if scenario != effective_scenario: + inferred, inferred_result = checker.get_inferred_result( + scenario, effective_scenario, performance_result, mlperf_log, config, False) + + # if has_power: + # is_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric( + # config, scenario, performance_path, True, performance_result) + # if "stream" in scenario.lower(): + # power_metric_unit = "milliJoules" + # else: + # power_metric_unit = "Watts" + # power_result_string = f"`Power consumed`: `{round(power_metric, 3)} {power_metric_unit}`, `Power efficiency`: `{round(avg_power_efficiency * 1000, 3)} samples per Joule`" + + # power_result = round(power_metric, 3) + # power_efficiency_result = round(avg_power_efficiency, 3) + # result['power'] = power_result + # result['power_efficiency'] = power_efficiency_result + + # compliance_list = ["TEST01", "TEST04", "TEST06"] + # if division == "closed": + # for test in compliance_list: + # test_path = os.path.join(result_path, test) + # if os.path.exists( + # test_path): # We dont consider missing test folders now - submission checker will do that + # # test_pass = checker.check_compliance_dir(test_path, mlperf_model, scenario, config, "closed", system_json, sub_res) + # test_pass = checker.check_compliance_perf_dir( + # test_path) if test != "TEST06" else True + # if test_pass and test in ["TEST01", "TEST06"]: + # # test_pass = checker.check_compliance_acc_dir(test_path, mlperf_model, config) + # pass # accuracy truncation script is done after submission generation. We assume here that it'll pass + # if test_pass: + # result[test] = "passed" + # else: + # result[test] = "failed" + + acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric( + config, mlperf_model, accuracy_path) + + result_field = checker.RESULT_FIELD[effective_scenario] + + performance_result_string = f"`{result_field}`: `{performance_result}`\n" + if inferred: + inferred_result_field = checker.RESULT_FIELD[scenario] + performance_result_string += f"Inferred result: `{inferred_result_field}`: `{inferred_result}` \n" + + accuracy_result_string = '' + accuracy_results = [] + for i, acc in enumerate(acc_results): + accuracy_results.append(str(round(float(acc_results[acc]), 5))) + accuracy_result_string += f"`{acc}`: `{round(float(acc_results[acc]), 5)}`" + if not acc_limits or not acc_limits[i]: + accuracy_result_string += f", Required accuracy for closed division `>= {round(acc_targets[i], 5)}`" + else: + accuracy_result_string += f", Required accuracy for closed division `>= {round(acc_targets[i], 5)}` and `<= {round(acc_limits[i], 5)}`" + accuracy_result_string += "\n" + + if len(accuracy_results) == 1: + accuracy_result = accuracy_results[0] + else: + accuracy_result = "(" + ", ".join(accuracy_results) + ")" + result['accuracy'] = accuracy_result + + result_string = f"\n\n## Results\n" + result_string += f"\nPlatform: {sub_res}\n" + result_string += f"\nModel Precision: {model_precision}\n" + result_string += "\n### Accuracy Results \n" + accuracy_result_string + result_string += "\n### Performance Results \n" + performance_result_string + # if has_power: + # result_string += "\n### Power Results \n" + power_result_string + + return result_string, result + + +def get_result_table(results): + + headers = [ + "Model", + "Scenario", + "Accuracy", + "Throughput", + "Latency (in ms)", + "Power Efficiency (in samples/J)", + "TEST01", + "TEST04"] + table = [] + for model in results: + for scenario in results[model]: + row = [] + row.append(model) + row.append(scenario) + if results[model][scenario].get('accuracy'): + val = str(results[model][scenario]['accuracy']) + if not results[model][scenario].get('accuracy_valid', True): + val = "X " + val + row.append(val) + else: + row.append("-") + + if results[model][scenario].get('performance'): + + if "stream" in scenario.lower(): + if float(results[model][scenario]['performance']) == 0: + row.append("-") + elif scenario.lower() == "singlestream": + val_qps = str( + round( + 1000 / + float( + results[model][scenario]['performance']), + 3)) + if not results[model][scenario].get( + 'performance_valid', True): # we explicitly mark invalid results + val_qps = "X " + val_qps + row.append(val_qps) + elif scenario.lower() == "multistream": + val_qps = str( + round( + 8000 / + float( + results[model][scenario]['performance']), + 3)) + if not results[model][scenario].get( + 'performance_valid', True): + val_qps = "X " + val_qps + row.append(val_qps) + val = str(results[model][scenario]['performance']) + if not results[model][scenario].get( + 'performance_valid', True): + val = "X " + val + row.append(val) + else: + val = str(results[model][scenario]['performance']) + if not results[model][scenario].get( + 'performance_valid', True): + val = "X " + val + row.append(val) + row.append("-") + + # val1 = results[model][scenario].get('TEST01') + # val2 = results[model][scenario].get('TEST05') + # val3 = results[model][scenario].get('TEST04') + + # if results[model][scenario].get('power','') != '': + # row.append(results[model][scenario]['power']) + # if results[model][scenario].get('power_efficiency', '') != '': + # val = str(results[model][scenario]['power_efficiency']) + # if not results[model][scenario].get('power_valid', True): + # val = "X " + val + # row.append(val) + # elif val1 or val3: # Don't output unless there are any further column data + # row.append(None) + + # if val1: + # row.append(val1) + # if val3: + # row.append(val3) + + # else: + # if val3: + # row.append("missing") + # row.append(val3) + + table.append(row) + + return table, headers From f7bfd992fdc48143be099d0c3a3481305c70d015 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sat, 17 May 2025 08:36:07 +0530 Subject: [PATCH 20/83] updated accuracy script for including automotive models --- script/process-mlperf-accuracy/customize.py | 12 ++++++++++++ script/process-mlperf-accuracy/meta.yaml | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index 4ef3fbd43..db0d98370 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -208,6 +208,18 @@ def preprocess(i): CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "automotive", "3d-object-detection", "accuracy_waymo.py") + "' --mlperf-accuracy-file '" + os.path.join( result_dir, "mlperf_log_accuracy.json") + "' --waymo-dir '" + env['MLC_DATASET_WAYMO_PATH'] + "' > '" + out_file + "'" + elif dataset == "nuscenes": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'], "accuracy_nuscenes_cpu.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --nuscenes-dir '" + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] + "' --config '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'], "projects" + "configs" + "bevformer" + "bevformer_tiny.py") + "' > '" + out_file + "'" + + elif dataset == "cognata_ssd": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --dataset cognata --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" + + elif dataset == "cognata_deeplab": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' > '" + out_file + "'" + else: return {'return': 1, 'error': 'Unsupported dataset'} diff --git a/script/process-mlperf-accuracy/meta.yaml b/script/process-mlperf-accuracy/meta.yaml index cd14ef67a..76324ead4 100644 --- a/script/process-mlperf-accuracy/meta.yaml +++ b/script/process-mlperf-accuracy/meta.yaml @@ -273,3 +273,15 @@ variations: env: MLC_DATASET: waymo group: dataset + nuscenes: + env: + MLC_DATASET: nuscenes + group: dataset + cognata_ssd: + env: + MLC_DATASET: cognata_ssd + group: dataset + cognata_deeplab: + env: + MLC_DATASET: cognata_deeplab + group: dataset From 4f29a3eae5f66e77fa324e7c5236d8d23e9998b6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 17 May 2025 03:08:09 +0000 Subject: [PATCH 21/83] [Automated Commit] Format Codebase [skip ci] --- script/process-mlperf-accuracy/customize.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index db0d98370..f0cccc34c 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -211,15 +211,15 @@ def preprocess(i): elif dataset == "nuscenes": CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'], "accuracy_nuscenes_cpu.py") + "' --mlperf-accuracy-file '" + os.path.join( result_dir, "mlperf_log_accuracy.json") + "' --nuscenes-dir '" + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] + "' --config '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'], "projects" + "configs" + "bevformer" + "bevformer_tiny.py") + "' > '" + out_file + "'" - + elif dataset == "cognata_ssd": CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --dataset cognata --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" - + elif dataset == "cognata_deeplab": CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' > '" + out_file + "'" - + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' > '" + out_file + "'" + else: return {'return': 1, 'error': 'Unsupported dataset'} From 262f993f80bd9b960f736c66069874d613d34968 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 12:50:04 +0530 Subject: [PATCH 22/83] added missing packages, dependencies, run commands for v0.5 --- .../customize.py | 59 +++++-- .../meta.yaml | 148 +++++++++++++++++- script/get-generic-sys-util/meta.yaml | 14 ++ 3 files changed, 206 insertions(+), 15 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 6ea96b377..635021f98 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -13,6 +13,8 @@ def preprocess(i): state = i['state'] script_path = i['run_script_input']['path'] + logger = i['automation'].logger + if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')): return {'return': 0} @@ -151,11 +153,12 @@ def preprocess(i): if mode == "accuracy": mode_extra_options += " --accuracy" - env['MLC_OUTPUT_PREDICTIONS_PATH'] = os.path.join( - env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'], - env['MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS'], - 'Cognata_Camera_01_8M_png', - 'output') + if env.get('MLC_MODEL', '') == "retinanet": + env['MLC_OUTPUT_PREDICTIONS_PATH'] = os.path.join( + env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'], + env['MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS'], + 'Cognata_Camera_01_8M_png', + 'output') elif mode == "performance": pass @@ -175,7 +178,7 @@ def preprocess(i): # Grigori updated for ABTF demo # cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options, mode_extra_options, dataset_options, mlperf_implementation) cmd, run_dir = get_run_cmd_reference( - os_info, env, scenario_extra_options, mode_extra_options, dataset_options, script_path) + os_info, env, scenario_extra_options, mode_extra_options, dataset_options, logger, script_path) if env.get('MLC_NETWORK_LOADGEN', '') == "lon": @@ -194,15 +197,16 @@ def preprocess(i): if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == "arm64": env['MLC_HOST_PLATFORM_FLAVOR'] = "aarch64" - if not env.get('MLC_COGNATA_ACCURACY_DUMP_FILE'): - env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( - env['OUTPUT_DIR'], "accuracy.txt") + if env.get('MLC_MODEL', '') == "retinanet": + if not env.get('MLC_COGNATA_ACCURACY_DUMP_FILE'): + env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( + env['OUTPUT_DIR'], "accuracy.txt") return {'return': 0} def get_run_cmd_reference(os_info, env, scenario_extra_options, - mode_extra_options, dataset_options, script_path=None): + mode_extra_options, dataset_options, logger, script_path=None): q = '"' if os_info['platform'] == 'windows' else "'" @@ -237,6 +241,41 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, " --output " + q + env['OUTPUT_DIR'] + q + " " + \ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ scenario_extra_options + mode_extra_options + dataset_options + + elif env['MLC_MODEL'] in ['bevformer']: + run_dir = env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'] + + env['RUN_DIR'] = run_dir + + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + + if env['MLC_MLPERF_BACKEND'] != "onnx": + logger.warning("Unsupported backend {MLC_MLPERF_BACKEND}, defaulting to onnx") + env['MLC_MLPERF_BACKEND'] = "onnx" + + config_path = os.path.join(run_dir, "projects", "configs", "bevformer", "bevformer_tiny.py") + + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset nuscenes --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path}""" + + elif env['MLC_MODEL'] in ['ssd-resnet50']: + run_dir = env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'] + + env['RUN_DIR'] = run_dir + + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + + config_path = "baseline_8MP_ss_scales_fm1_5x5_all" + + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path}""" + + elif env['MLC_MODEL'] in ['deeplab_v3+']: + run_dir = env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'] + + env['RUN_DIR'] = run_dir + + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} --config {config_path}""" ########################################################################## diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index e5567ac27..8a904d1fd 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -9,8 +9,10 @@ category: "Modular MLPerf inference benchmark pipeline for ABTF model" # User-friendly tags to find this CM script tags: -- demo -- run-mlperf-inference +- automotive +- mlcommons +- reference +- run-mlperf-automotive-inference - object-detection - abtf-model @@ -340,6 +342,9 @@ variations: group: framework env: MLC_MLPERF_BACKEND: onnxruntime + add_deps_recursive: + ml-model-bevformer: + tags: _onnx onnxruntime,cpu: env: @@ -361,10 +366,27 @@ variations: tags: _NCHW ml-model: tags: raw,_pytorch + ml-model-bevformer: + tags: _pytorch + ml-model-ssd-resnet50: + tags: _pytorch + ml-model-deeplabv3-plus: + tags: _pytorch env: MLC_MLPERF_BACKEND: pytorch MLC_MLPERF_BACKEND_VERSION: <<>> - + + pytorch,cpu: + add_deps_recursive: + pytorch: + env: + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu/torch_stable.html + torchvision: + env: + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu/torch_stable.html + torchaudio: + env: + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu/torch_stable.html @@ -399,7 +421,6 @@ variations: - tags: get,ml-model,abtf-ssd-pytorch,_abtf-mvp names: - ml-model-abtf - env: MLC_MODEL: retinanet @@ -427,10 +448,127 @@ variations: - tags: get,ml-model,abtf-ssd-pytorch,_abtf-poc names: - ml-model-abtf - env: MLC_MODEL: retinanet + bevformer: + group: models + add_deps_recursive: + pytorch: + version_max: "2.5.1" + version_max_usable: "2.5.1" + torchvision: + version_max: "0.20.1" + version_max_usable": "0.20.1" + torchaudio: + version_max: "2.5.1" + version_max_usable": "2.5.1" + deps: + - tags: get,generic-python-lib,_package.opencv-python + - tags: get,generic-python-lib,_package.numpy + version_max: "1.26.4" + version_max_usable: "1.26.4" + - tags: get,generic-python-lib,_package.onnx + - tags: get,generic-python-lib,_package.pillow + - tags: get,generic-python-lib,_package.pyquarternion + - tags: get,generic-python-lib,_package.tqdm + - tags: get,generic-python-lib,_package.nuscenes-devkit + - tags: get,preprocessed,dataset,nuscenes + skip_if_env: + MLC_PREPROCESSED_DATASET_NUSCENES_PATH: + - yes + MLC_USE_DATASET_FROM_HOST: + - "yes" + names: + - preprocessed-dataset-mlcommons-nuscenes + - tags: get,ml-model,bevformer + skip_if_env: + MLC_ML_MODEL_BEVFORMER_PATH: + - yes + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-bevformer + + ssd-resnet50: + group: models + add_deps_recursive: + pytorch: + version_max: "2.3.1" + version_max_usable: "2.3.1" + torchvision: + version_max: "0.18.1" + version_max_usable": "0.18.1" + torchaudio: + version_max: "2.3.1" + version_max_usable": "2.3.1" + deps: + - tags: get,generic-python-lib,_package.Cython + - tags: get,generic-python-lib,_package.scikit-image + - tags: get,generic-python-lib,_package.faster-coco-eval + - tags: get,generic-python-lib,_package.torchinfo + - tags: get,generic-python-lib,_package.torchmetrics + - tags: get,generic-sys-util,_libgl1-mesa-glx + - tags: get,generic-python-lib,_package.onnx + - tags: get,generic-python-lib,_package.onnxruntime + - tags: get,generic-python-lib,_package.tqdm + - tags: get,preprocessed,dataset,cognata,_mlc + skip_if_env: + MLC_PREPROCESSED_DATASET_COGNATA_PATH: + - yes + MLC_USE_DATASET_FROM_HOST: + - "yes" + names: + - preprocessed-dataset-mlcommons-cognata-ssd-resnet50 + - tags: get,ml-model,ssd,resnet50,_mlc,_rclone + skip_if_env: + MLC_ML_MODEL_SSD_PATH: + - yes + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-ssd-resnet50 + + deeplab-v3+: + group: models + add_deps_recursive: + pytorch: + version_max: "2.3.1" + version_max_usable: "2.3.1" + torchvision: + version_max: "0.18.1" + version_max_usable": "0.18.1" + torchaudio: + version_max: "2.3.1" + version_max_usable": "2.3.1" + deps: + - tags: get,generic-python-lib,_package.Cython + - tags: get,generic-python-lib,_package.scikit-image + - tags: get,generic-python-lib,_package.scikit-learn + - tags: get,generic-python-lib,_package.torchinfo + - tags: get,generic-python-lib,_package.torchmetrics + - tags: get,generic-sys-util,_libgl1-mesa-glx + - tags: get,generic-python-lib,_package.onnx + - tags: get,generic-python-lib,_package.onnxruntime + - tags: get,generic-python-lib,_package.tqdm + - tags: get,generic-python-lib,_package.ijson + - tags: get,preprocessed,dataset,cognata,_mlc,_segmentation + skip_if_env: + MLC_PREPROCESSED_DATASET_COGNATA_PATH: + - yes + MLC_USE_DATASET_FROM_HOST: + - "yes" + names: + - preprocessed-dataset-mlcommons-cognata-deeplabv3-plus + - tags: get,ml-model,deeplabv3-plus + skip_if_env: + MLC_ML_MODEL_DEEPLABV3_PLUS_PATH: + - yes + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-deeplabv3-plus + # Target devices cpu: group: device diff --git a/script/get-generic-sys-util/meta.yaml b/script/get-generic-sys-util/meta.yaml index 5f366d775..37e56c101 100644 --- a/script/get-generic-sys-util/meta.yaml +++ b/script/get-generic-sys-util/meta.yaml @@ -139,6 +139,20 @@ variations: brew: '' dnf: mesa-libGL yum: mesa-libGL + libgl1-mesa-glx: + env: + MLC_SYS_UTIL_NAME: libgl1-mesa-glx # tbd: regular expression for version as well as whether its installed? + MLC_SYS_UTIL_CHECK_CMD: 'ldconfig -p | grep -i libGLX_mesa.so.*' + default_env: + MLC_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE: yes + new_env_keys: + - MLC_LIBGLX_VERSION + state: + libgl: # tbd: complete for other flavours of linux + apt: libgl1-mesa-glx + brew: '' + dnf: '' + yum: '' libsm6: env: MLC_SYS_UTIL_NAME: libsm6 # tbd: regular expression for version as well as whether its installed? From ca9cb7dd299682ad2deab5a30299546f2b298110 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 21 May 2025 07:21:29 +0000 Subject: [PATCH 23/83] [Automated Commit] Format Codebase [skip ci] --- .../customize.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 635021f98..3f6160df7 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -241,7 +241,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, " --output " + q + env['OUTPUT_DIR'] + q + " " + \ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ scenario_extra_options + mode_extra_options + dataset_options - + elif env['MLC_MODEL'] in ['bevformer']: run_dir = env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'] @@ -250,10 +250,16 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] if env['MLC_MLPERF_BACKEND'] != "onnx": - logger.warning("Unsupported backend {MLC_MLPERF_BACKEND}, defaulting to onnx") + logger.warning( + "Unsupported backend {MLC_MLPERF_BACKEND}, defaulting to onnx") env['MLC_MLPERF_BACKEND'] = "onnx" - - config_path = os.path.join(run_dir, "projects", "configs", "bevformer", "bevformer_tiny.py") + + config_path = os.path.join( + run_dir, + "projects", + "configs", + "bevformer", + "bevformer_tiny.py") cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset nuscenes --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path}""" @@ -263,7 +269,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['RUN_DIR'] = run_dir env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - + config_path = "baseline_8MP_ss_scales_fm1_5x5_all" cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path}""" From 4c744950804e80ee2b06c5c1b017a38f89dd8af8 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 12:58:20 +0530 Subject: [PATCH 24/83] enable backward compatability --- script/app-mlperf-automotive-mlcommons-python/meta.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 8a904d1fd..b5e0f0d0d 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -12,9 +12,10 @@ tags: - automotive - mlcommons - reference -- run-mlperf-automotive-inference +- run-mlperf-inference - object-detection - abtf-model +- demo # Default environment From a7f2c28fab220afb346de3d60475482120732679 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 15:31:26 +0530 Subject: [PATCH 25/83] changes for app-mlperf-automotive --- script/app-mlperf-automotive/customize.py | 333 +++++++++++++++++++++- script/app-mlperf-automotive/meta.yaml | 163 ++++++++++- 2 files changed, 480 insertions(+), 16 deletions(-) diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py index 1333e0719..4435a953b 100644 --- a/script/app-mlperf-automotive/customize.py +++ b/script/app-mlperf-automotive/customize.py @@ -5,7 +5,10 @@ import subprocess import mlperf_utils from log_parser import MLPerfLog - +from utils import * +import copy +import platform +import sys def preprocess(i): @@ -36,6 +39,9 @@ def postprocess(i): os_info = i['os_info'] xsep = '^' if os_info['platform'] == 'windows' else '\\' + q = '"' if os_info['platform'] == 'windows' else "'" + + logger = i['automation'].logger env['CMD'] = '' @@ -45,6 +51,10 @@ def postprocess(i): output_dir = env['MLC_MLPERF_OUTPUT_DIR'] mode = env['MLC_MLPERF_LOADGEN_MODE'] + mlc = i['automation'].action_object + + result_sut_folder_path = env['MLC_MLPERF_INFERENCE_RESULTS_SUT_PATH'] + model = env['MLC_MODEL'] model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', model) @@ -57,16 +67,38 @@ def postprocess(i): mlperf_log = MLPerfLog(os.path.join(output_dir, "mlperf_log_detail.txt")) if mode == "performance": - result = mlperf_log['result_mean_latency_ns'] / 1000000 + if scenario in ["Offline", "Server"]: + metric = "target_qps" + result = mlperf_log['result_mean_latency_ns'] / 1000000 + elif scenario.endswith("Stream"): + metric = "target_latency" + result = mlperf_log['result_mean_latency_ns'] + else: + return {'return': 1, + 'error': 'Unsupported scenario: {}'.format(scenario)} + import yaml + sut_name = state['MLC_SUT_CONFIG_NAME'] + sut_config = state['MLC_SUT_CONFIG'][sut_name] + sut_config_path = state['MLC_SUT_CONFIG_PATH'][sut_name] + if scenario not in sut_config[model_full_name]: + sut_config[model_full_name][scenario] = {} + sut_config[model_full_name][scenario][metric] = result + + print( + f"SUT: {sut_name}, model: {model_full_name}, scenario: {scenario}, {metric} (mean value) updated as {result}") + with open(sut_config_path, "w") as f: + yaml.dump(sut_config, f) + logger.info(f"New config stored in {sut_config_path}") elif mode == "accuracy": - if not env.get( - 'MLC_COGNATA_ACCURACY_DUMP_FILE'): # can happen while reusing old runs - env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( - output_dir, "accuracy.txt") acc = "" - if os.path.exists(env['MLC_COGNATA_ACCURACY_DUMP_FILE']): - with open(env['MLC_COGNATA_ACCURACY_DUMP_FILE'], "r") as f: - acc = f.readline() + if env.get('MLC_MLPERF_INFERENCE_VERSION', '') == "mvp-demo" and env.get('MLC_MLPERF_INFERENCE_VERSION') == "poc-demo": + if not env.get( + 'MLC_COGNATA_ACCURACY_DUMP_FILE'): # can happen while reusing old runs + env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( + output_dir, "accuracy.txt") + if os.path.exists(env['MLC_COGNATA_ACCURACY_DUMP_FILE']): + with open(env['MLC_COGNATA_ACCURACY_DUMP_FILE'], "r") as f: + acc = f.readline() result = acc else: return {'return': 1, 'error': f"Unknown mode {mode}"} @@ -99,5 +131,288 @@ def postprocess(i): state['mlc-mlperf-inference-results-last'][mode] = result state['mlc-mlperf-inference-results-last'][mode + '_valid'] = valid.get(mode, False) + + if mode in ["performance", "accuracy"] and env.get('MLC_MLPERF_INFERENCE_VERSION', '') not in ["", "mvp-demo", "poc-demo"]: + # if measurements file exist read it + if os.path.exists("measurements.json"): + with open("measurements.json", "r") as file: + measurements = json.load(file) # Load JSON data from the file + else: + measurements = {} + measurements['starting_weights_filename'] = env.get( + 'MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get( + 'MLC_ML_MODEL_FILE', measurements.get( + 'starting_weights_filename', 'TBD'))) + measurements['retraining'] = env.get( + 'MLC_ML_MODEL_RETRAINING', measurements.get( + 'retraining', 'no')) + measurements['input_data_types'] = env.get( + 'MLC_ML_MODEL_INPUTS_DATA_TYPE', measurements.get( + 'input_data_types', 'fp32')) + measurements['weight_data_types'] = env.get( + 'MLC_ML_MODEL_WEIGHTS_DATA_TYPE', measurements.get( + 'weight_data_types', 'fp32')) + measurements['weight_transformations'] = env.get( + 'MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS', measurements.get( + 'weight_transformations', 'none')) + + os.chdir(output_dir) + + if not os.path.exists("mlperf_log_summary.txt"): + return {'return': 0} + + mlperf_log_summary = '' + if os.path.isfile("mlperf_log_summary.txt"): + with open("mlperf_log_summary.txt", "r") as fp: + mlperf_log_summary = fp.read() + + if mlperf_log_summary != '': + state['app_mlperf_inference_log_summary'] = {} + for x in mlperf_log_summary.split('\n'): + y = x.split(': ') + if len(y) == 2: + state['app_mlperf_inference_log_summary'][y[0].strip().lower() + ] = y[1].strip() + + if not is_false(env.get("MLC_MLPERF_PRINT_SUMMARY", "")): + logger.info("\n") + logger.info(mlperf_log_summary) + + with open("measurements.json", "w") as fp: + json.dump(measurements, fp, indent=2) + + mlc_sut_info = {} + mlc_sut_info['system_name'] = state['MLC_SUT_META']['system_name'] + mlc_sut_info['implementation'] = env['MLC_MLPERF_IMPLEMENTATION'] + mlc_sut_info['device'] = env['MLC_MLPERF_DEVICE'] + mlc_sut_info['framework'] = state['MLC_SUT_META']['framework'] + mlc_sut_info['run_config'] = env['MLC_MLPERF_INFERENCE_SUT_RUN_CONFIG'] + with open(os.path.join(result_sut_folder_path, "mlc-sut-info.json"), "w") as fp: + json.dump(mlc_sut_info, fp, indent=2) + + system_meta = state['MLC_SUT_META'] + with open("system_meta.json", "w") as fp: + json.dump(system_meta, fp, indent=2) + + # map the custom model for inference result to the official model + # if custom model name is not set, the official model name will be + # mapped to itself + official_model_name = model + if "efficientnet" in official_model_name or "mobilenet" in official_model_name: + official_model_name = "resnet" + model_mapping = {model_full_name: official_model_name} + with open("model_mapping.json", "w") as fp: + json.dump(model_mapping, fp, indent=2) + + # Add to the state + state['app_mlperf_inference_measurements'] = copy.deepcopy( + measurements) + + if os.path.exists(env['MLC_MLPERF_CONF']): + shutil.copy(env['MLC_MLPERF_CONF'], 'mlperf.conf') + + if os.path.exists(env['MLC_MLPERF_USER_CONF']): + shutil.copy(env['MLC_MLPERF_USER_CONF'], 'user.conf') + + result, valid, power_result = mlperf_utils.get_result_from_log( + env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('MLC_MLPERF_INFERENCE_SOURCE_VERSION')) + + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario][mode] = result + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario][mode + '_valid'] = valid.get(mode, False) + + state['mlc-mlperf-inference-results-last'][mode] = result + state['mlc-mlperf-inference-results-last'][mode + + '_valid'] = valid.get(mode, False) + + # Power not included in v0.5, code should be added in future + + # Record basic host info + host_info = { + "os_version": platform.platform(), + "cpu_version": platform.processor(), + "python_version": sys.version, + } + try: + import importlib.metadata + mlc_version = importlib.metadata.version("mlc") + host_info["mlc_version"] = mlc_version + except Exception as e: + error = format(e) + mlc_version = "unknown" + + x = '' + if env.get('MLC_HOST_OS_FLAVOR', '') != '': + x += env['MLC_HOST_OS_FLAVOR'] + if env.get('MLC_HOST_OS_VERSION', '') != '': + x += ' ' + env['MLC_HOST_OS_VERSION'] + if x != '': + host_info['os_version_sys'] = x + + if env.get('MLC_HOST_SYSTEM_NAME', '') != '': + host_info['system_name'] = env['MLC_HOST_SYSTEM_NAME'] + + # Check CM automation repository + repo_name = 'mlcommons@mlperf-automations' + repo_hash = '' + r = mlc.access({'action': 'find', 'automation': 'repo', + 'item': 'mlcommons@mlperf-automations,9e97bb72b0474657'}) + if r['return'] == 0 and len(r['list']) == 1: + repo_path = r['list'][0].path + if os.path.isdir(repo_path): + repo_name = os.path.basename(repo_path) + + # Check dev + # if repo_name == 'cm4mlops': repo_name = 'mlcommons@cm4mlops' + + r = utils.run_system_cmd({ + 'path': repo_path, + 'cmd': 'git rev-parse HEAD'}) + if r['return'] == 0: + repo_hash = r['output'] + + host_info['mlc_repo_name'] = repo_name + host_info['mlc_repo_git_hash'] = repo_hash + + with open("mlc-host-info.json", "w") as fp: + fp.write(json.dumps(host_info, indent=2) + '\n') + + # Prepare README + if "cmd" in inp: + cmd = "mlc run script \\\n\t" + " \\\n\t".join(inp['cmd']) + xcmd = "mlc run script " + xsep + "\n\t" + \ + (" " + xsep + "\n\t").join(inp['cmd']) + else: + cmd = "" + xcmd = "" + + readme_init = "*Check [CM MLPerf docs](https://docs.mlcommons.org/inference) for more details.*\n\n" + + readme_body = "## Host platform\n\n* OS version: {}\n* CPU version: {}\n* Python version: {}\n* MLC version: {}\n\n".format(platform.platform(), + platform.processor(), sys.version, mlc_version) + + x = repo_name + if repo_hash != '': + x += ' --checkout=' + str(repo_hash) + + readme_body += "## CM Run Command\n\nSee [CM installation guide](https://docs.mlcommons.org/inference/install/).\n\n" + \ + "```bash\npip install -U mlcflow\n\nmlc rm cache -f\n\nmlc pull repo {}\n\n{}\n```".format( + x, xcmd) + + readme_body += "\n*Note that if you want to use the [latest automation recipes](https://docs.mlcommons.org/inference) for MLPerf,\n" + \ + " you should simply reload {} without checkout and clean MLC cache as follows:*\n\n".format(repo_name) + \ + "```bash\nmlc rm repo {}\nmlc pull repo {}\nmlc rm cache -f\n\n```".format( + repo_name, repo_name) + + extra_readme_init = '' + extra_readme_body = '' + if env.get('MLC_MLPERF_README', '') == "yes": + extra_readme_body += "\n## Dependent MLPerf Automation scripts\n\n" + + script_tags = inp['tags'] + script_adr = inp.get('adr', {}) + + mlc_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'adr': script_adr, + 'print_deps': True, + 'env': env, + 'quiet': True, + 'silent': True, + 'fake_run': True + } + r = mlc.access(mlc_input) + if r['return'] > 0: + return r + + print_deps = r['new_state']['print_deps'] + count = 1 + for dep in print_deps: + extra_readme_body += "\n\n" + str(count) + ". `" + dep + "`\n" + count = count + 1 + + if state.get( + 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('print_deps'): + + extra_readme_body += "\n## Dependent automation scripts for the MLPerf Inference Implementation\n" + + print_deps = state['mlperf-inference-implementation']['print_deps'] + count = 1 + for dep in print_deps: + extra_readme_body += "\n\n" + \ + str(count) + ". `" + dep + "`\n" + count = count + 1 + + readme = readme_init + readme_body + extra_readme = extra_readme_init + extra_readme_body + + with open("README.md", "w") as fp: + fp.write(readme) + if extra_readme: + with open("README-extra.md", "w") as fp: + fp.write(extra_readme) + + if state.get( + 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'): + env['MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join( + output_dir, "mlc-version-info.json") + env['MLC_MLPERF_RUN_DEPS_GRAPH'] = os.path.join( + output_dir, "mlc-deps.png") + env['MLC_MLPERF_RUN_DEPS_MERMAID'] = os.path.join( + output_dir, "mlc-deps.mmd") + with open(os.path.join(output_dir, "mlc-version-info.json"), "w") as f: + f.write( + json.dumps( + state['mlperf-inference-implementation']['version_info'], + indent=2)) + + if env.get('MLC_DUMP_SYSTEM_INFO', True): + dump_script_output( + "detect,os", + env, + state, + 'new_env', + os.path.join( + output_dir, + "os_info.json"), mlc) + dump_script_output( + "detect,cpu", + env, + state, + 'new_env', + os.path.join( + output_dir, + "cpu_info.json"), mlc) + env['MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( + env['MLC_MLPERF_OUTPUT_DIR'], "pip_freeze.raw") + dump_script_output( + "dump,pip,freeze", + env, + state, + 'new_state', + os.path.join( + output_dir, + "pip_freeze.json"), mlc) + + return {'return': 0} + + +def dump_script_output(script_tags, env, state, output_key, dump_file, mlc): + + mlc_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'env': env, + 'state': state, + 'quiet': True, + 'silent': True, + } + r = mlc.access(mlc_input) + if r['return'] > 0: + return r + with open(dump_file, "w") as f: + f.write(json.dumps(r[output_key], indent=2)) return {'return': 0} diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 896008c39..af3307465 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -4,14 +4,16 @@ uid: f7488ce376484fd2 automation_alias: script automation_uid: 5b4e0237da074764 -category: "Modular MLPerf inference benchmark pipeline for ABTF model" +category: "Modular MLPerf automotive benchmark pipeline for ABTF models" # User-friendly tags to find this CM script tags: - app - app-mlperf-inference +- app-mlperf-inference-automotive - mlperf-inference +- mlperf-inference-automotive - abtf-inference predeps: no @@ -90,12 +92,12 @@ deps: # Use mlc inside scripts #- tags: get,generic-python-lib,_package.mlcflow - - tags: get,mlperf,inference,utils + - tags: get,mlperf,automotive,utils docker: - mlc_repo: gateoverflow@mlperf-automations - mlc_repo_branch: dev + mlc_repo: anandhu-eng@mlperf-automations + mlc_repo_branch: automotive use_host_group_id: True use_host_user_id: True real_run: false @@ -121,7 +123,7 @@ variations: - names: - python-reference-abtf-inference - abtf-inference-implementation - tags: run-mlperf-inference,demo,abtf-model + tags: run-mlperf-inference,abtf-model skip_if_env: MLC_SKIP_RUN: - yes @@ -158,6 +160,12 @@ variations: abtf-inference-implementation: tags: _onnxruntime + onnx_dynamic: + base: + - onnxruntime + add_deps_recursive: + ml-model-deeplab-v3+: + tags: _onnx_dynamic onnxruntime,cpu: env: @@ -178,6 +186,12 @@ variations: add_deps_recursive: abtf-inference-implementation: tags: _pytorch + ml-model-bevformer: + tags: _pytorch + ml-model-ssd-resnet50: + tags: _pytorch + ml-model-deeplab-v3+: + tags: _pytorch abtf-demo-model: @@ -204,10 +218,137 @@ variations: enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_IN_HOST: - yes - mounts: - "${{ MLC_DATASET_MLCOMMONS_COGNATA_PATH }}:${{ MLC_DATASET_MLCOMMONS_COGNATA_PATH }}" - + + bevformer: + group: + models + default_env: + MLC_USE_DATASET_FROM_HOST: yes + env: + MLC_MODEL: bevformer + deps: + - tags: get,preprocessed,dataset,nuscenes + skip_if_env: + MLC_PREPROCESSED_DATASET_NUSCENES_PATH: + - yes + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - "yes" + - tags: get,ml-model,bevformer + skip_if_env: + MLC_ML_MODEL_BEVFORMER_PATH: + - yes + enable_if_env: + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-bevformer + add_deps_recursive: + abtf-inference-implementation: + tags: _beformer + docker: + mounts: + - "${{ MLC_PREPROCESSED_DATASET_NUSCENES_PATH }}:${{ MLC_PREPROCESSED_DATASET_NUSCENES_PATH }}" + - "${{ MLC_ML_MODEL_BEVFORMER_PATH }}:${{ MLC_ML_MODEL_BEVFORMER_PATH }}" + posthook_deps: + - enable_if_env: + MLC_MLPERF_LOADGEN_MODE: + - accuracy + - all + MLC_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - nuscenes-accuracy-script + tags: run,accuracy,mlperf,_nuscenes + + ssd-resnet50: + group: + models + default_env: + MLC_USE_DATASET_FROM_HOST: yes + env: + MLC_MODEL: ssd-resnet50 + deps: + - tags: get,preprocessed,dataset,cognata + skip_if_env: + MLC_PREPROCESSED_DATASET_COGNATA_PATH: + - yes + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - "yes" + - tags: get,ml-model,ssd,resnet50,_mlc,_rclone + skip_if_env: + MLC_ML_MODEL_SSD_PATH: + - yes + enable_if_env: + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-ssd-resnet50 + add_deps_recursive: + mlperf-automotive-implementation: + tags: _ssd-resnet50 + docker: + mounts: + - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" + - "${{ MLC_ML_MODEL_SSD_PATH }}:${{ MLC_ML_MODEL_SSD_PATH }}" + posthook_deps: + - enable_if_env: + MLC_MLPERF_LOADGEN_MODE: + - accuracy + - all + MLC_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - cognata-ssd-accuracy-script + tags: run,accuracy,mlperf,_cognata_ssd + + deeplab-v3+: + group: + models + default_env: + MLC_USE_DATASET_FROM_HOST: yes + env: + MLC_MODEL: deeplab-v3+ + deps: + - tags: get,preprocessed,dataset,cognata,_segmentation + skip_if_env: + MLC_PREPROCESSED_DATASET_COGNATA_PATH: + - yes + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - "yes" + - tags: get,ml-model,ssd,resnet50,_mlc,_rclone + skip_if_env: + MLC_ML_MODEL_DEEPLABV3_PLUS_PATH: + - yes + enable_if_env: + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-deeplab-v3+ + add_deps_recursive: + mlperf-automotive-implementation: + tags: _deeplab-v3+ + docker: + mounts: + - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" + - "${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}:${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}" + posthook_deps: + - enable_if_env: + MLC_MLPERF_LOADGEN_MODE: + - accuracy + - all + MLC_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - cognata-deeplab-accuracy-script + tags: run,accuracy,mlperf,_cognata_deeplab # Target devices cpu: @@ -235,7 +376,15 @@ variations: all_gpus: 'yes' base_image: nvcr.io/nvidia/pytorch:24.03-py3 + v0.5: {} + + mvp-demo: {} + poc-demo: {} + + v0.5,mlcommons-python,cpu: + docker: + base_image: ubuntu:22.04 # Loadgen scenarios offline: From 8aa9494eefb95a918d2c968e939615cc2de61a49 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 21 May 2025 10:01:48 +0000 Subject: [PATCH 26/83] [Automated Commit] Format Codebase [skip ci] --- script/app-mlperf-automotive/customize.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py index 4435a953b..3fad7ad39 100644 --- a/script/app-mlperf-automotive/customize.py +++ b/script/app-mlperf-automotive/customize.py @@ -10,6 +10,7 @@ import platform import sys + def preprocess(i): os_info = i['os_info'] @@ -91,11 +92,12 @@ def postprocess(i): logger.info(f"New config stored in {sut_config_path}") elif mode == "accuracy": acc = "" - if env.get('MLC_MLPERF_INFERENCE_VERSION', '') == "mvp-demo" and env.get('MLC_MLPERF_INFERENCE_VERSION') == "poc-demo": + if env.get('MLC_MLPERF_INFERENCE_VERSION', '') == "mvp-demo" and env.get( + 'MLC_MLPERF_INFERENCE_VERSION') == "poc-demo": if not env.get( 'MLC_COGNATA_ACCURACY_DUMP_FILE'): # can happen while reusing old runs - env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( - output_dir, "accuracy.txt") + env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( + output_dir, "accuracy.txt") if os.path.exists(env['MLC_COGNATA_ACCURACY_DUMP_FILE']): with open(env['MLC_COGNATA_ACCURACY_DUMP_FILE'], "r") as f: acc = f.readline() @@ -131,8 +133,9 @@ def postprocess(i): state['mlc-mlperf-inference-results-last'][mode] = result state['mlc-mlperf-inference-results-last'][mode + '_valid'] = valid.get(mode, False) - - if mode in ["performance", "accuracy"] and env.get('MLC_MLPERF_INFERENCE_VERSION', '') not in ["", "mvp-demo", "poc-demo"]: + + if mode in ["performance", "accuracy"] and env.get( + 'MLC_MLPERF_INFERENCE_VERSION', '') not in ["", "mvp-demo", "poc-demo"]: # if measurements file exist read it if os.path.exists("measurements.json"): with open("measurements.json", "r") as file: @@ -216,7 +219,7 @@ def postprocess(i): result, valid, power_result = mlperf_utils.get_result_from_log( env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('MLC_MLPERF_INFERENCE_SOURCE_VERSION')) - + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model][scenario][mode] = result state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] @@ -225,7 +228,7 @@ def postprocess(i): state['mlc-mlperf-inference-results-last'][mode] = result state['mlc-mlperf-inference-results-last'][mode + '_valid'] = valid.get(mode, False) - + # Power not included in v0.5, code should be added in future # Record basic host info From fcdcf906c51b3479fbe14a2adff856319f6d6620 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 15:40:41 +0530 Subject: [PATCH 27/83] changes for run-mlperf-automotive-app --- script/run-mlperf-automotive-app/customize.py | 9 +++++---- script/run-mlperf-automotive-app/meta.yaml | 5 +++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/script/run-mlperf-automotive-app/customize.py b/script/run-mlperf-automotive-app/customize.py index c3dda9c7a..fd84952e2 100644 --- a/script/run-mlperf-automotive-app/customize.py +++ b/script/run-mlperf-automotive-app/customize.py @@ -116,7 +116,8 @@ def preprocess(i): test_list = [] - variation_implementation = "_" + \ + variation_benchmark_version = "_" + env["MLC_MLPERF_INFERENCE_VERSION"] + variation_implementation = ",_" + \ env.get("MLC_MLPERF_IMPLEMENTATION", "reference") variation_model = ",_" + env["MLC_MLPERF_MODEL"] variation_backend = ",_" + \ @@ -135,7 +136,7 @@ def preprocess(i): else: variation_quantization_string = "" - tags = "app,abtf-inference," + variation_implementation + variation_model + variation_backend + variation_device + \ + tags = "app,abtf-inference," + variation_benchmark_version + variation_implementation + variation_model + variation_backend + variation_device + \ variation_run_style + variation_reproducibility + \ variation_quantization_string + power_variation verbose = inp.get('v', False) @@ -290,9 +291,9 @@ def preprocess(i): if state.get("mlc-mlperf-inference-results"): # print(state["mlc-mlperf-inference-results"]) for sut in state["mlc-mlperf-inference-results"]: # only one sut will be there - # Better to do this in a stand alone CM script with proper deps but + # Better to do this in a stand alone MLC script with proper deps but # currently we manage this by modifying the sys path of the python - # executing CM + # executing MLC import mlperf_utils # noqa logger.info(f"{sut}") diff --git a/script/run-mlperf-automotive-app/meta.yaml b/script/run-mlperf-automotive-app/meta.yaml index c70ac9e97..f21fc1c0d 100644 --- a/script/run-mlperf-automotive-app/meta.yaml +++ b/script/run-mlperf-automotive-app/meta.yaml @@ -214,6 +214,11 @@ variations: compiler: tags: gcc group: benchmark-version + + v0.5: + group: benchmark-version + env: + MLC_MLPERF_AUTOMOTIVE_VERSION: v0.5 performance-and-accuracy: default: true From 83c091a3a566d7658952f64e226ce935ee159861 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 16:26:50 +0530 Subject: [PATCH 28/83] prevent duplication of loadgen script --- .../get-mlperf-inference-loadgen/customize.py | 3 +++ script/get-mlperf-inference-loadgen/meta.yaml | 19 ++++++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/script/get-mlperf-inference-loadgen/customize.py b/script/get-mlperf-inference-loadgen/customize.py index 4ae4b8b73..079a40876 100644 --- a/script/get-mlperf-inference-loadgen/customize.py +++ b/script/get-mlperf-inference-loadgen/customize.py @@ -8,6 +8,9 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] + if env.get('MLC_INFERENCE_AUTOMOTIVE_REPO', '') == "YES": + env['MLC_MLPERF_INFERENCE_SOURCE'] = env['MLC_MLPERF_AUTOMOTIVE_SOURCE'] + if is_true(env.get('MLC_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '')): i['run_script_input']['script_name'] = "donotrun" diff --git a/script/get-mlperf-inference-loadgen/meta.yaml b/script/get-mlperf-inference-loadgen/meta.yaml index ad59163fd..09e2529af 100644 --- a/script/get-mlperf-inference-loadgen/meta.yaml +++ b/script/get-mlperf-inference-loadgen/meta.yaml @@ -24,10 +24,21 @@ deps: - MLC_GIT_CHECKOUT names: - inference-src-loadgen - skip_if_env: + skip_if_any_env: MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: - 'YES' + MLC_INFERENCE_AUTOMOTIVE_REPO: + - 'YES' tags: get,mlcommons,inference,src +- force_env_keys: + - MLC_GIT_URL + - MLC_GIT_CHECKOUT + names: + - inference-src-loadgen-automotive + enable_if_env: + MLC_INFERENCE_AUTOMOTIVE_REPO: + - 'YES' + tags: get,mlcommons,automotive,src - enable_if_env: MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: - 'YES' @@ -49,7 +60,7 @@ deps: - enable_if_env: MLC_HOST_OS_TYPE: - windows - skip_if_env: + skip_if_any_env: MLC_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: - 'yes' names: @@ -155,7 +166,9 @@ variations: '+ CXXFLAGS': - '-Werror' - '-Wno-unused-parameter' - + automotive: + env: + MLC_INFERENCE_AUTOMOTIVE_REPO: 'YES' versions: custom: add_deps: From 8dbc710c5da20f62252253e5714def13a136644f Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 16:27:37 +0530 Subject: [PATCH 29/83] prevent duplication of loadgen script --- .../COPYRIGHT.md | 9 -- .../customize.py | 60 -------- .../get-mlperf-automotive-loadgen/meta.yaml | 143 ------------------ script/get-mlperf-automotive-loadgen/run.bat | 39 ----- script/get-mlperf-automotive-loadgen/run.sh | 52 ------- 5 files changed, 303 deletions(-) delete mode 100644 script/get-mlperf-automotive-loadgen/COPYRIGHT.md delete mode 100644 script/get-mlperf-automotive-loadgen/customize.py delete mode 100644 script/get-mlperf-automotive-loadgen/meta.yaml delete mode 100644 script/get-mlperf-automotive-loadgen/run.bat delete mode 100644 script/get-mlperf-automotive-loadgen/run.sh diff --git a/script/get-mlperf-automotive-loadgen/COPYRIGHT.md b/script/get-mlperf-automotive-loadgen/COPYRIGHT.md deleted file mode 100644 index d2ceead84..000000000 --- a/script/get-mlperf-automotive-loadgen/COPYRIGHT.md +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Notice - -© 2025-2026 MLCommons. All Rights Reserved. - -This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: - -[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - -Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-mlperf-automotive-loadgen/customize.py b/script/get-mlperf-automotive-loadgen/customize.py deleted file mode 100644 index aea13a854..000000000 --- a/script/get-mlperf-automotive-loadgen/customize.py +++ /dev/null @@ -1,60 +0,0 @@ -from mlc import utils -from utils import is_true -import os - - -def preprocess(i): - - os_info = i['os_info'] - env = i['env'] - # automotive loadgen is yet to be uploaded to pypi - if is_true(env.get('MLC_TMP_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_FROM_PIP', - '')): - i['run_script_input']['script_name'] = "donotrun" - - return {'return': 0} - - -def postprocess(i): - - os_info = i['os_info'] - env = i['env'] - - if is_true(env.get('MLC_TMP_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_FROM_PIP', - '')): - return {'return': 0} - - for key in ['+PYTHONPATH', '+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', - '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: - # 20221024: we save and restore env in the main script and can clean env here for determinism - # if key not in env: - env[key] = [] - - # On Windows installs directly into Python distro for simplicity -# if os_info['platform'] != 'windows': - - cur_path = os.getcwd() - install_path = os.path.join(cur_path, 'install') - - env['MLC_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_PATH'] = install_path - - build_path = os.path.join(cur_path, 'build') - if os.path.exists(build_path): - env['MLC_MLPERF_AUTOMOTIVE_LOADGEN_BUILD_PATH'] = build_path - - include_path = os.path.join(install_path, 'include') - lib_path = os.path.join(install_path, 'lib') - python_path = os.path.join(install_path, 'python') - - env['+C_INCLUDE_PATH'].append(include_path) - env['+CPLUS_INCLUDE_PATH'].append(include_path) - env['MLC_MLPERF_AUTOMOTIVE_LOADGEN_INCLUDE_PATH'] = include_path - - env['+LD_LIBRARY_PATH'].append(lib_path) - env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - env['MLC_MLPERF_AUTOMOTIVE_LOADGEN_LIBRARY_PATH'] = lib_path - - env['+PYTHONPATH'].append(python_path) - env['MLC_MLPERF_AUTOMOTIVE_LOADGEN_PYTHON_PATH'] = python_path - - return {'return': 0} diff --git a/script/get-mlperf-automotive-loadgen/meta.yaml b/script/get-mlperf-automotive-loadgen/meta.yaml deleted file mode 100644 index c83e82d16..000000000 --- a/script/get-mlperf-automotive-loadgen/meta.yaml +++ /dev/null @@ -1,143 +0,0 @@ -alias: get-mlperf-automotive-loadgen -uid: 82396582494a4d38 - -automation_alias: script -automation_uid: 5b4e0237da074764 - -cache: true - -category: MLPerf benchmark support - -default_env: - MLC_SHARED_BUILD: 'no' - -default_version: master - -deps: -- tags: detect,os -- names: - - python3 - - python - tags: get,python3 -- force_env_keys: - - MLC_GIT_URL - - MLC_GIT_CHECKOUT - names: - - automotive-src-loadgen - skip_if_env: - MLC_MLPERF_AUTOMOTIVE_LOADGEN_DOWNLOAD: - - 'YES' - tags: get,mlcommons,automotive,src -- enable_if_env: - MLC_MLPERF_AUTOMOTIVE_LOADGEN_DOWNLOAD: - - 'YES' - force_cache: true - names: - - automotive-src-loadgen-download - tags: download-and-extract,file,_wget,_extract - update_tags_from_env_with_prefix: - _url.: - - MLC_MLPERF_AUTOMOTIVE_LOADGEN_DOWNLOAD_URL -- names: - - compiler - skip_if_any_env: - MLC_HOST_OS_TYPE: - - windows - MLC_TMP_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_FROM_PIP: - - 'yes' - tags: get,compiler -- enable_if_env: - MLC_HOST_OS_TYPE: - - windows - skip_if_env: - MLC_TMP_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_FROM_PIP: - - 'yes' - names: - - compiler - tags: get,cl -- names: - - cmake - tags: get,cmake - version_min: '3.12' -- names: - - pip-package - - wheel - tags: get,generic-python-lib,_package.wheel -- names: - - pip-package - - pip - tags: get,generic-python-lib,_pip -- names: - - pip-package - - pybind11 - tags: get,generic-python-lib,_package.pybind11 -- names: - - pip-package - - setuputils - tags: get,generic-python-lib,_package.setuptools - -extra_cache_tags_from_env: -- env: MLC_PYTHON_CACHE_TAGS - prefix: python- -- env: MLC_COMPILER_CACHE_TAGS - prefix: compiler- - -new_env_keys: -- +PYTHONPATH -- +C_INCLUDE_PATH -- +CPLUS_INCLUDE_PATH -- +LD_LIBRARY_PATH -- +DYLD_FALLBACK_LIBRARY_PATH -- MLC_MLPERF_AUTOMOTIVE_LOADGEN_* - -tags: -- get -- loadgen -- automotive -- automotive-loadgen -- mlperf -- mlcommons - -variations: - from-pip: - env: - MLC_TMP_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_FROM_PIP: 'yes' - deps: - - tags: get,generic-python-lib,_package.mlcommons-loadgen - custom-python: - ad: - pip-package: - tags: _custom-python - python3: - skip_if_env: - MLC_TMP_USE_CUSTOM_PYTHON: - - 'on' - env: - MLC_TMP_USE_CUSTOM_PYTHON: 'on' - keep-build: - group: clean-build - env: - MLC_MLPERF_AUTOMOTIVE_LOADGEN_BUILD_CLEAN: 'no' - clean-build: - group: clean-build - default: true - env: - MLC_MLPERF_AUTOMOTIVE_LOADGEN_BUILD_CLEAN: 'yes' - no-compilation-warnings: - env: - '+ CXXFLAGS': - - '-Werror' - - '-Wno-unused-parameter' - -versions: - custom: - add_deps: - automotive-src-loadgen: - version: custom - master: - add_deps: - automotive-src-loadgen: - version: master - -print_env_at_the_end: - MLC_MLPERF_AUTOMOTIVE_LOADGEN_INSTALL_PATH: "Path to the tool" diff --git a/script/get-mlperf-automotive-loadgen/run.bat b/script/get-mlperf-automotive-loadgen/run.bat deleted file mode 100644 index 18a26dd5d..000000000 --- a/script/get-mlperf-automotive-loadgen/run.bat +++ /dev/null @@ -1,39 +0,0 @@ -@echo off - -echo ======================================================= - -set CUR_DIR=%cd% -echo Current path in CM script: %CUR_DIR% - -if "%MLC_MLPERF_AUTOMOTIVE_LOADGEN_DOWNLOAD%" == "YES" ( - set MLC_MLPERF_AUTOMOTIVE_SOURCE=%MLC_EXTRACT_EXTRACTED_PATH% -) - -set INSTALL_DIR=%CUR_DIR%\install - -echo. -echo Switching to %MLC_MLPERF_AUTOMOTIVE_SOURCE%\loadgen - -cd %MLC_MLPERF_AUTOMOTIVE_SOURCE%\loadgen -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% - -echo. -echo Running %MLC_PYTHON_BIN% setup.py develop - -%MLC_PYTHON_BIN% setup.py develop -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% - -echo ======================================================= -cmake ^ - -DCMAKE_INSTALL_PREFIX=%INSTALL_DIR% ^ - %MLC_MLPERF_AUTOMOTIVE_SOURCE%\loadgen ^ - -DPYTHON_EXECUTABLE:FILEPATH=%MLC_PYTHON_BIN_WITH_PATH% -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% - -echo ======================================================= -cmake --build . --target install -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% - -del /Q /S build - -echo ======================================================= diff --git a/script/get-mlperf-automotive-loadgen/run.sh b/script/get-mlperf-automotive-loadgen/run.sh deleted file mode 100644 index e31617a5e..000000000 --- a/script/get-mlperf-automotive-loadgen/run.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -CUR_DIR=$PWD - -mkdir -p install -mkdir -p build - -INSTALL_DIR="${CUR_DIR}/install" - -echo "******************************************************" - -cd build - -if [ "${MLC_MLPERF_AUTOMOTIVE_LOADGEN_DOWNLOAD}" == "YES" ]; then - export MLC_MLPERF_AUTOMOTIVE_SOURCE="${MLC_EXTRACT_EXTRACTED_PATH}" -fi - - -if [ -z "${MLC_MLPERF_AUTOMOTIVE_SOURCE}" ]; then - echo "Error: env MLC_MLPERF_AUTOMOTIVE_SOURCE is not defined - something is wrong with script automation!" - exit 1 -fi - -cmake \ - -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \ - "${MLC_MLPERF_AUTOMOTIVE_SOURCE}/loadgen" \ - -DPYTHON_EXECUTABLE:FILEPATH="${MLC_PYTHON_BIN_WITH_PATH}" -B . -test $? -eq 0 || exit $? - -echo "******************************************************" -MLC_MAKE_CORES=${MLC_MAKE_CORES:-${MLC_HOST_CPU_TOTAL_CORES}} -MLC_MAKE_CORES=${MLC_MAKE_CORES:-2} - -cmake --build . --target install -j "${MLC_MAKE_CORES}" -test $? -eq 0 || exit $? - -# Clean build directory (too large) -cd "${CUR_DIR}" -if [[ $MLC_MLPERF_AUTOMOTIVE_LOADGEN_BUILD_CLEAN == "yes" ]]; then - rm -rf build -fi - - -cd "${MLC_MLPERF_AUTOMOTIVE_SOURCE}/loadgen" -${MLC_PYTHON_BIN_WITH_PATH} -m pip install . --target="${MLPERF_AUTOMOTIVE_PYTHON_SITE_BASE}" -test $? -eq 0 || exit $? - -# Clean the built wheel -#find . -name 'mlcommons_loadgen*.whl' | xargs rm - -echo "******************************************************" -echo "Loadgen is built and installed to ${INSTALL_DIR} ..." From 2319cb52d71dc86a5405f751a49421f0b2eafbb1 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 19:19:34 +0530 Subject: [PATCH 30/83] added tests for automotive dataset and model downloads --- .../workflows/run-tests-on-modified-meta.yml | 25 +++++++++++++++++-- .../get-ml-model-abtf-ssd-pytorch/meta.yaml | 5 ++++ script/get-ml-model-bevformer/meta.yaml | 5 ++++ script/get-ml-model-deeplabv3_plus/meta.yaml | 6 +++++ .../meta.yaml | 9 ++++++- .../meta.yaml | 7 +++++- 6 files changed, 53 insertions(+), 4 deletions(-) diff --git a/.github/workflows/run-tests-on-modified-meta.yml b/.github/workflows/run-tests-on-modified-meta.yml index 244b18a6f..389a10fa9 100644 --- a/.github/workflows/run-tests-on-modified-meta.yml +++ b/.github/workflows/run-tests-on-modified-meta.yml @@ -8,6 +8,19 @@ on: - 'script/**meta.yaml' jobs: + fetch-secret: + runs-on: ubuntu-latest + outputs: + gdrive_secret: ${{ steps.op-load-secret.outputs.GDRIVE_SERVICE_ACCOUNT_KEY }} + steps: + - name: Load secret from 1Password + id: op-load-secret + uses: 1password/load-secrets-action@v2 + with: + export-env: false + env: + OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.OP_SERVICE_ACCOUNT_TOKEN }} + GDRIVE_SERVICE_ACCOUNT_KEY: op://7basd2jirojjckncf6qnq3azai/bzbaco3uxoqs2rcyu42rvuccga/credential get_modified_files: runs-on: ubuntu-latest outputs: @@ -38,7 +51,9 @@ jobs: process_modified_files: runs-on: ubuntu-latest - needs: get_modified_files + needs: + - get_modified_files + - fetch-secret if: needs.determine_modified_files.outputs.processed_files != '[]' && needs.determine_modified_files.outputs.processed_files != '' strategy: fail-fast: false @@ -50,7 +65,13 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 2 - + - name: Set RCLONE Service account env var from secret + shell: bash + run: | + echo "::add-mask::${{ needs.fetch-secret.outputs.gdrive_secret }}" + echo "RCLONE_CONFIG_MLC_COGNATA_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV + echo "RCLONE_CONFIG_MLC_NUSCENES_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV + echo "RCLONE_CONFIG_MLC_WAYMO_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV - name: Process meta.yaml file run: | echo "Processing ${{ matrix.file_info.file }} with run number ${{ matrix.file_info.num_run }}" diff --git a/script/get-ml-model-abtf-ssd-pytorch/meta.yaml b/script/get-ml-model-abtf-ssd-pytorch/meta.yaml index aa08c11d2..017acec37 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/meta.yaml +++ b/script/get-ml-model-abtf-ssd-pytorch/meta.yaml @@ -252,3 +252,8 @@ variations: - MLC_DOWNLOAD_URL env: MLC_DOWNLOAD_SRC: mlcommons +tests: + run_inputs: + - variations_list: + - onnx,rclone,mlc,dry-run + - pytorch,rclone,mlc,dry-run \ No newline at end of file diff --git a/script/get-ml-model-bevformer/meta.yaml b/script/get-ml-model-bevformer/meta.yaml index 6022d7907..33c3513f5 100644 --- a/script/get-ml-model-bevformer/meta.yaml +++ b/script/get-ml-model-bevformer/meta.yaml @@ -73,3 +73,8 @@ variations: dry-run,rclone: env: MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run +tests: + run_inputs: + - variations_list: + - onnx,rclone,mlc,dry-run + - pytorch,rclone,mlc,dry-run \ No newline at end of file diff --git a/script/get-ml-model-deeplabv3_plus/meta.yaml b/script/get-ml-model-deeplabv3_plus/meta.yaml index 02f437fbd..e92610e5b 100644 --- a/script/get-ml-model-deeplabv3_plus/meta.yaml +++ b/script/get-ml-model-deeplabv3_plus/meta.yaml @@ -81,3 +81,9 @@ variations: dry-run,rclone: env: MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run +tests: + run_inputs: + - variations_list: + - onnx,rclone,mlc,dry-run + - onnx_dynamic,rclone,mlc,dry-run + - pytorch,rclone,mlc,dry-run \ No newline at end of file diff --git a/script/get-preprocessed-dataset-cognata/meta.yaml b/script/get-preprocessed-dataset-cognata/meta.yaml index e9aeb9510..afd944e8d 100644 --- a/script/get-preprocessed-dataset-cognata/meta.yaml +++ b/script/get-preprocessed-dataset-cognata/meta.yaml @@ -98,4 +98,11 @@ variations: MLC_DOWNLOAD_MODE: dry dry-run,rclone: env: - MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run \ No newline at end of file + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run +tests: + run_inputs: + - variations_list: + - validation,prebuilt,2d_obj_det,rclone,mlc,dry-run + - calibration,prebuilt,2d_obj_det,rclone,mlc,dry-run + - validation,prebuilt,segmentation,rclone,mlc,dry-run + - calibration,prebuilt,segmentation,rclone,mlc,dry-run \ No newline at end of file diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml index af7a911dc..8222f7549 100644 --- a/script/get-preprocessed-dataset-nuscenes/meta.yaml +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -75,4 +75,9 @@ variations: MLC_DOWNLOAD_MODE: dry dry-run,rclone: env: - MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run \ No newline at end of file + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run +tests: + run_inputs: + - variations_list: + - validation,prebuilt,rclone,mlc,dry-run + - calibration,prebuilt,rclone,mlc,dry-run \ No newline at end of file From 05a1a0c2e93d4f3eb0ed20fbdf0946e5f947080c Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 19:59:59 +0530 Subject: [PATCH 31/83] env variable correction --- script/run-mlperf-automotive-app/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/run-mlperf-automotive-app/meta.yaml b/script/run-mlperf-automotive-app/meta.yaml index f21fc1c0d..8e7ea7b27 100644 --- a/script/run-mlperf-automotive-app/meta.yaml +++ b/script/run-mlperf-automotive-app/meta.yaml @@ -218,7 +218,7 @@ variations: v0.5: group: benchmark-version env: - MLC_MLPERF_AUTOMOTIVE_VERSION: v0.5 + MLC_MLPERF_INFERENCE_VERSION: v0.5 performance-and-accuracy: default: true From 2f8508bfefe703d46687de98b884f475fad06e54 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 20:08:50 +0530 Subject: [PATCH 32/83] created alias for reference --- script/app-mlperf-automotive/meta.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index af3307465..12fbbd63e 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -112,6 +112,9 @@ docker: # Variations to customize dependencies variations: + reference: + alias: mlcommons-python + # Implementation mlcommons-python: group: implementation From 3bfece2bfb90a2f1b8cced97d34e3cc09afd1c22 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 20:51:25 +0530 Subject: [PATCH 33/83] add docker os version --- script/app-mlperf-automotive/meta.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 12fbbd63e..06907e874 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -378,6 +378,7 @@ variations: docker: all_gpus: 'yes' base_image: nvcr.io/nvidia/pytorch:24.03-py3 + os_version: 24.03 v0.5: {} @@ -388,6 +389,7 @@ variations: v0.5,mlcommons-python,cpu: docker: base_image: ubuntu:22.04 + os_version: 22.04 # Loadgen scenarios offline: From 20bcb645fca455688b37c35ba761314c29f6c8ae Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 20:53:21 +0530 Subject: [PATCH 34/83] handle string while parsing docker os version --- script/build-dockerfile/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index a3d32b247..9c1ec4ca2 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -228,7 +228,7 @@ def preprocess(i): f.write('RUN ' + env['MLC_DOCKER_EXTRA_SYS_DEPS'] + EOL) if env['MLC_DOCKER_OS'] == "ubuntu": - if int(env['MLC_DOCKER_OS_VERSION'].split('.')[0]) >= 23: + if int(str(env['MLC_DOCKER_OS_VERSION']).split('.')[0]) >= 23: if "--break-system-packages" not in env.get( 'MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''): env['MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS'] = " --break-system-packages" From 14120696935b8132a09ea004f4d127f22132033e Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 20:57:20 +0530 Subject: [PATCH 35/83] fix for downloading model and dataset to host --- script/app-mlperf-automotive/meta.yaml | 128 ++++++++++++------------- 1 file changed, 64 insertions(+), 64 deletions(-) diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 06907e874..1264cae04 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -231,30 +231,30 @@ variations: MLC_USE_DATASET_FROM_HOST: yes env: MLC_MODEL: bevformer - deps: - - tags: get,preprocessed,dataset,nuscenes - skip_if_env: - MLC_PREPROCESSED_DATASET_NUSCENES_PATH: - - yes - enable_if_env: - MLC_USE_DATASET_FROM_HOST: - - "yes" - - tags: get,ml-model,bevformer - skip_if_env: - MLC_ML_MODEL_BEVFORMER_PATH: - - yes - enable_if_env: - MLC_USE_MODEL_FROM_HOST: - - "yes" - names: - - ml-model-bevformer - add_deps_recursive: - abtf-inference-implementation: - tags: _beformer docker: + deps: + - tags: get,preprocessed,dataset,nuscenes + skip_if_env: + MLC_PREPROCESSED_DATASET_NUSCENES_PATH: + - yes + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - "yes" + - tags: get,ml-model,bevformer + skip_if_env: + MLC_ML_MODEL_BEVFORMER_PATH: + - yes + enable_if_env: + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-bevformer mounts: - - "${{ MLC_PREPROCESSED_DATASET_NUSCENES_PATH }}:${{ MLC_PREPROCESSED_DATASET_NUSCENES_PATH }}" - - "${{ MLC_ML_MODEL_BEVFORMER_PATH }}:${{ MLC_ML_MODEL_BEVFORMER_PATH }}" + - "${{ MLC_PREPROCESSED_DATASET_NUSCENES_PATH }}:${{ MLC_PREPROCESSED_DATASET_NUSCENES_PATH }}" + - "${{ MLC_ML_MODEL_BEVFORMER_PATH }}:${{ MLC_ML_MODEL_BEVFORMER_PATH }}" + add_deps_recursive: + abtf-inference-implementation: + tags: _bevformer posthook_deps: - enable_if_env: MLC_MLPERF_LOADGEN_MODE: @@ -274,30 +274,30 @@ variations: MLC_USE_DATASET_FROM_HOST: yes env: MLC_MODEL: ssd-resnet50 - deps: - - tags: get,preprocessed,dataset,cognata - skip_if_env: - MLC_PREPROCESSED_DATASET_COGNATA_PATH: - - yes - enable_if_env: - MLC_USE_DATASET_FROM_HOST: - - "yes" - - tags: get,ml-model,ssd,resnet50,_mlc,_rclone - skip_if_env: - MLC_ML_MODEL_SSD_PATH: - - yes - enable_if_env: - MLC_USE_MODEL_FROM_HOST: - - "yes" - names: - - ml-model-ssd-resnet50 + docker: + deps: + - tags: get,preprocessed,dataset,cognata + skip_if_env: + MLC_PREPROCESSED_DATASET_COGNATA_PATH: + - yes + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - "yes" + - tags: get,ml-model,ssd,resnet50,_mlc,_rclone + skip_if_env: + MLC_ML_MODEL_SSD_PATH: + - yes + enable_if_env: + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-ssd-resnet50 + mounts: + - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" + - "${{ MLC_ML_MODEL_SSD_PATH }}:${{ MLC_ML_MODEL_SSD_PATH }}" add_deps_recursive: mlperf-automotive-implementation: tags: _ssd-resnet50 - docker: - mounts: - - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" - - "${{ MLC_ML_MODEL_SSD_PATH }}:${{ MLC_ML_MODEL_SSD_PATH }}" posthook_deps: - enable_if_env: MLC_MLPERF_LOADGEN_MODE: @@ -317,30 +317,30 @@ variations: MLC_USE_DATASET_FROM_HOST: yes env: MLC_MODEL: deeplab-v3+ - deps: - - tags: get,preprocessed,dataset,cognata,_segmentation - skip_if_env: - MLC_PREPROCESSED_DATASET_COGNATA_PATH: - - yes - enable_if_env: - MLC_USE_DATASET_FROM_HOST: - - "yes" - - tags: get,ml-model,ssd,resnet50,_mlc,_rclone - skip_if_env: - MLC_ML_MODEL_DEEPLABV3_PLUS_PATH: - - yes - enable_if_env: - MLC_USE_MODEL_FROM_HOST: - - "yes" - names: - - ml-model-deeplab-v3+ + docker: + deps: + - tags: get,preprocessed,dataset,cognata,_segmentation + skip_if_env: + MLC_PREPROCESSED_DATASET_COGNATA_PATH: + - yes + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - "yes" + - tags: get,ml-model,ssd,resnet50,_mlc,_rclone + skip_if_env: + MLC_ML_MODEL_DEEPLABV3_PLUS_PATH: + - yes + enable_if_env: + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-deeplab-v3+ + mounts: + - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" + - "${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}:${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}" add_deps_recursive: mlperf-automotive-implementation: tags: _deeplab-v3+ - docker: - mounts: - - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" - - "${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}:${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}" posthook_deps: - enable_if_env: MLC_MLPERF_LOADGEN_MODE: From 5199f742feef0f4ee5637148be288fffe9ba5024 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 21:33:06 +0530 Subject: [PATCH 36/83] fix config names for nuscenes and cognata --- script/get-preprocessed-dataset-cognata/meta.yaml | 2 +- script/get-preprocessed-dataset-nuscenes/meta.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/script/get-preprocessed-dataset-cognata/meta.yaml b/script/get-preprocessed-dataset-cognata/meta.yaml index afd944e8d..81febe604 100644 --- a/script/get-preprocessed-dataset-cognata/meta.yaml +++ b/script/get-preprocessed-dataset-cognata/meta.yaml @@ -66,7 +66,7 @@ variations: enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - yes - - tags: get,rclone-config,_waymo + - tags: get,rclone-config,_config-name.mlc-cognata force_cache: true enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml index 8222f7549..dd2fae2ec 100644 --- a/script/get-preprocessed-dataset-nuscenes/meta.yaml +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -43,7 +43,7 @@ variations: enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - yes - - tags: get,rclone-config,_waymo + - tags: get,rclone-config,_config=name.mlc-nuscenes force_cache: true enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: From 16083bf572423f06a1628efd1a8b302b9ec6b3b3 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 21:36:46 +0530 Subject: [PATCH 37/83] fix typo --- script/get-preprocessed-dataset-nuscenes/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml index dd2fae2ec..1679c4a15 100644 --- a/script/get-preprocessed-dataset-nuscenes/meta.yaml +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -43,7 +43,7 @@ variations: enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - yes - - tags: get,rclone-config,_config=name.mlc-nuscenes + - tags: get,rclone-config,_config-name.mlc-nuscenes force_cache: true enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: From 901cfba8fc621534368076e2ca7b6d56f96b4f6c Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 21:40:22 +0530 Subject: [PATCH 38/83] fix missing folder ids --- script/get-preprocessed-dataset-cognata/meta.yaml | 2 ++ script/get-preprocessed-dataset-nuscenes/meta.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/script/get-preprocessed-dataset-cognata/meta.yaml b/script/get-preprocessed-dataset-cognata/meta.yaml index 81febe604..d0b7c3d23 100644 --- a/script/get-preprocessed-dataset-cognata/meta.yaml +++ b/script/get-preprocessed-dataset-cognata/meta.yaml @@ -71,6 +71,8 @@ variations: enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 1u5FDoeXHVtDrd4zClE47Gmyr7iLFidz1 - enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - 'yes' diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml index 1679c4a15..a9b4efb99 100644 --- a/script/get-preprocessed-dataset-nuscenes/meta.yaml +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -48,6 +48,8 @@ variations: enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 17CpM5eU8tjrxh_LpH_BTNTeT37PhzcnC - enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - 'yes' From 63f20d470ca33a3fabede847c3f72afa0d14acf6 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 22:03:59 +0530 Subject: [PATCH 39/83] fix typo --- script/app-mlperf-automotive-mlcommons-python/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index b5e0f0d0d..5d3724fdc 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -471,7 +471,7 @@ variations: version_max_usable: "1.26.4" - tags: get,generic-python-lib,_package.onnx - tags: get,generic-python-lib,_package.pillow - - tags: get,generic-python-lib,_package.pyquarternion + - tags: get,generic-python-lib,_package.pyquaternion - tags: get,generic-python-lib,_package.tqdm - tags: get,generic-python-lib,_package.nuscenes-devkit - tags: get,preprocessed,dataset,nuscenes From 74e698771a98c064633d4537be7f55bbf67a52bc Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 22:32:45 +0530 Subject: [PATCH 40/83] add framework as input arg + pass onnx tags --- script/app-mlperf-automotive/meta.yaml | 6 ++++++ script/run-mlperf-automotive-app/meta.yaml | 1 + 2 files changed, 7 insertions(+) diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 1264cae04..7c2a50253 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -162,6 +162,12 @@ variations: add_deps_recursive: abtf-inference-implementation: tags: _onnxruntime + ml-model-bevformer: + tags: _onnx + ml-model-ssd-resnet50: + tags: _onnx + ml-model-deeplab-v3+: + tags: _onnx onnx_dynamic: base: diff --git a/script/run-mlperf-automotive-app/meta.yaml b/script/run-mlperf-automotive-app/meta.yaml index 8e7ea7b27..f42195f28 100644 --- a/script/run-mlperf-automotive-app/meta.yaml +++ b/script/run-mlperf-automotive-app/meta.yaml @@ -43,6 +43,7 @@ input_mapping: save_console_log: MLC_SAVE_CONSOLE_LOG execution_mode: MLC_MLPERF_RUN_STYLE find_performance: MLC_MLPERF_FIND_PERFORMANCE_MODE + framework: MLC_MLPERF_BACKEND gh_token: MLC_GH_TOKEN gpu_name: MLC_NVIDIA_GPU_NAME hw_name: MLC_HW_NAME From d730a2ef8d2f18f9b245e79d810524de0cdc8875 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 22:48:40 +0530 Subject: [PATCH 41/83] prevent dataset and model download in docker build stage --- .../meta.yaml | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 5d3724fdc..f410ac961 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -475,19 +475,23 @@ variations: - tags: get,generic-python-lib,_package.tqdm - tags: get,generic-python-lib,_package.nuscenes-devkit - tags: get,preprocessed,dataset,nuscenes - skip_if_env: + skip_if_any_env: MLC_PREPROCESSED_DATASET_NUSCENES_PATH: - yes MLC_USE_DATASET_FROM_HOST: - "yes" + MLC_RUN_STATE_DOCKER: + - "yes" names: - preprocessed-dataset-mlcommons-nuscenes - tags: get,ml-model,bevformer - skip_if_env: + skip_if_any_env: MLC_ML_MODEL_BEVFORMER_PATH: - yes MLC_USE_MODEL_FROM_HOST: - "yes" + MLC_RUN_STATE_DOCKER: + - "yes" names: - ml-model-bevformer @@ -514,19 +518,23 @@ variations: - tags: get,generic-python-lib,_package.onnxruntime - tags: get,generic-python-lib,_package.tqdm - tags: get,preprocessed,dataset,cognata,_mlc - skip_if_env: + skip_if_any_env: MLC_PREPROCESSED_DATASET_COGNATA_PATH: - yes MLC_USE_DATASET_FROM_HOST: - "yes" + MLC_RUN_STATE_DOCKER: + - "yes" names: - preprocessed-dataset-mlcommons-cognata-ssd-resnet50 - tags: get,ml-model,ssd,resnet50,_mlc,_rclone - skip_if_env: + skip_if_any_env: MLC_ML_MODEL_SSD_PATH: - yes MLC_USE_MODEL_FROM_HOST: - "yes" + MLC_RUN_STATE_DOCKER: + - "yes" names: - ml-model-ssd-resnet50 @@ -554,19 +562,23 @@ variations: - tags: get,generic-python-lib,_package.tqdm - tags: get,generic-python-lib,_package.ijson - tags: get,preprocessed,dataset,cognata,_mlc,_segmentation - skip_if_env: + skip_if_any_env: MLC_PREPROCESSED_DATASET_COGNATA_PATH: - yes MLC_USE_DATASET_FROM_HOST: - "yes" + MLC_RUN_STATE_DOCKER: + - "yes" names: - preprocessed-dataset-mlcommons-cognata-deeplabv3-plus - tags: get,ml-model,deeplabv3-plus - skip_if_env: + skip_if_any_env: MLC_ML_MODEL_DEEPLABV3_PLUS_PATH: - yes MLC_USE_MODEL_FROM_HOST: - "yes" + MLC_RUN_STATE_DOCKER: + - "yes" names: - ml-model-deeplabv3-plus From 633044060221af70be9e82a1e696f0170544442b Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 23:04:00 +0530 Subject: [PATCH 42/83] get automotive loadgen --- script/app-mlperf-automotive-mlcommons-python/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index f410ac961..fc4022066 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -276,7 +276,7 @@ deps: - loadgen - mlperf-inference-loadgen - - tags: get,loadgen + - tags: get,loadgen,_automotive enable_if_any_env: MLC_MLPERF_LOADGEN_BUILD_FROM_SRC: - "on" From 22869b84c3c94b75e1625259e7ebb1dd48cd05f1 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 23:17:07 +0530 Subject: [PATCH 43/83] mount generic ml model file env --- script/app-mlperf-automotive/meta.yaml | 1 + script/get-ml-model-bevformer/customize.py | 2 ++ script/get-ml-model-bevformer/meta.yaml | 1 + script/get-ml-model-deeplabv3_plus/customize.py | 1 + script/get-ml-model-deeplabv3_plus/meta.yaml | 4 +++- 5 files changed, 8 insertions(+), 1 deletion(-) diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 7c2a50253..b389868a9 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -107,6 +107,7 @@ docker: - tags: get,abtf,scratch,space mounts: - "${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}:${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}" + - "${{ MLC_ML_MODEL_FILE_WITH_PATH }}:${{ MLC_ML_MODEL_FILE_WITH_PATH }}" # Variations to customize dependencies diff --git a/script/get-ml-model-bevformer/customize.py b/script/get-ml-model-bevformer/customize.py index 1c4868594..dc3098519 100644 --- a/script/get-ml-model-bevformer/customize.py +++ b/script/get-ml-model-bevformer/customize.py @@ -22,5 +22,7 @@ def postprocess(i): env['MLC_ML_MODEL_BEVFORMER_PATH'] = os.path.join( env['MLC_ML_MODEL_BEVFORMER_PATH'], env['MLC_ML_MODEL_FILENAME']) + + env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = env['MLC_ML_MODEL_BEVFORMER_PATH'] return {'return': 0} diff --git a/script/get-ml-model-bevformer/meta.yaml b/script/get-ml-model-bevformer/meta.yaml index 33c3513f5..8cf191c69 100644 --- a/script/get-ml-model-bevformer/meta.yaml +++ b/script/get-ml-model-bevformer/meta.yaml @@ -9,6 +9,7 @@ tags: uid: 438a053f666443bd new_env_keys: - MLC_ML_MODEL_BEVFORMER_PATH + - MLC_ML_MODEL_DEEPLABV3_PLUS_PATH print_env_at_the_end: MLC_ML_MODEL_BEVFORMER_PATH: BevFormer checkpoint path variations: diff --git a/script/get-ml-model-deeplabv3_plus/customize.py b/script/get-ml-model-deeplabv3_plus/customize.py index efc103c82..7a9c6bf3b 100644 --- a/script/get-ml-model-deeplabv3_plus/customize.py +++ b/script/get-ml-model-deeplabv3_plus/customize.py @@ -21,5 +21,6 @@ def postprocess(i): env = i['env'] env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = os.path.join(env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'], env['MLC_ML_MODEL_FILENAME']) + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] return {'return': 0} diff --git a/script/get-ml-model-deeplabv3_plus/meta.yaml b/script/get-ml-model-deeplabv3_plus/meta.yaml index e92610e5b..832c7deaf 100644 --- a/script/get-ml-model-deeplabv3_plus/meta.yaml +++ b/script/get-ml-model-deeplabv3_plus/meta.yaml @@ -11,6 +11,7 @@ tags: uid: cfb2d53b9dbc4dc0 new_env_keys: - MLC_ML_MODEL_DEEPLABV3_PLUS_PATH + - MLC_ML_MODEL_DEEPLABV3_PLUS_PATH print_env_at_the_end: MLC_ML_MODEL_DEEPLABV3_PLUS_PATH: DeepLabV3+ checkpoint path variations: @@ -21,7 +22,8 @@ variations: MLC_MODEL_FORMAT: onnx MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_deeplab/deeplabv3+_8mp.onnx MLC_ML_MODEL_FILENAME: deeplabv3+_8mp.onnx - onnx_dynamic: + dynamic: {} + onnx,dynamic: group: model-format env: MLC_MODEL_FORMAT: onnx From dbb3d9a1b4c353ef687407155945b1b78f471e52 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 21 May 2025 17:47:34 +0000 Subject: [PATCH 44/83] [Automated Commit] Format Codebase [skip ci] --- script/get-ml-model-bevformer/customize.py | 2 +- script/get-ml-model-deeplabv3_plus/customize.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/script/get-ml-model-bevformer/customize.py b/script/get-ml-model-bevformer/customize.py index dc3098519..e3a0318cf 100644 --- a/script/get-ml-model-bevformer/customize.py +++ b/script/get-ml-model-bevformer/customize.py @@ -22,7 +22,7 @@ def postprocess(i): env['MLC_ML_MODEL_BEVFORMER_PATH'] = os.path.join( env['MLC_ML_MODEL_BEVFORMER_PATH'], env['MLC_ML_MODEL_FILENAME']) - + env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = env['MLC_ML_MODEL_BEVFORMER_PATH'] return {'return': 0} diff --git a/script/get-ml-model-deeplabv3_plus/customize.py b/script/get-ml-model-deeplabv3_plus/customize.py index 7a9c6bf3b..88929621d 100644 --- a/script/get-ml-model-deeplabv3_plus/customize.py +++ b/script/get-ml-model-deeplabv3_plus/customize.py @@ -10,7 +10,7 @@ def preprocess(i): if os_info['platform'] == "windows": return {'return': 1, 'error': 'Script not supported in windows yet!'} - + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" return {'return': 0} @@ -20,7 +20,8 @@ def postprocess(i): env = i['env'] - env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = os.path.join(env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'], env['MLC_ML_MODEL_FILENAME']) + env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = os.path.join( + env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'], env['MLC_ML_MODEL_FILENAME']) env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] - + return {'return': 0} From a7336b21ca1cd7b12f1710799864f9e1d0020e40 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 21 May 2025 23:43:37 +0530 Subject: [PATCH 45/83] fix typo --- script/get-ml-model-bevformer/customize.py | 56 +++++++++---------- script/get-ml-model-bevformer/meta.yaml | 2 +- .../get-ml-model-deeplabv3_plus/customize.py | 54 +++++++++--------- script/get-ml-model-deeplabv3_plus/meta.yaml | 2 +- 4 files changed, 57 insertions(+), 57 deletions(-) diff --git a/script/get-ml-model-bevformer/customize.py b/script/get-ml-model-bevformer/customize.py index e3a0318cf..915a0dde7 100644 --- a/script/get-ml-model-bevformer/customize.py +++ b/script/get-ml-model-bevformer/customize.py @@ -1,28 +1,28 @@ -from mlc import utils -import os - - -def preprocess(i): - - os_info = i['os_info'] - - env = i['env'] - - if os_info['platform'] == "windows": - return {'return': 1, 'error': 'Script not supported in windows yet!'} - - env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" - - return {'return': 0} - - -def postprocess(i): - - env = i['env'] - - env['MLC_ML_MODEL_BEVFORMER_PATH'] = os.path.join( - env['MLC_ML_MODEL_BEVFORMER_PATH'], env['MLC_ML_MODEL_FILENAME']) - - env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = env['MLC_ML_MODEL_BEVFORMER_PATH'] - - return {'return': 0} +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['MLC_ML_MODEL_BEVFORMER_PATH'] = os.path.join( + env['MLC_ML_MODEL_BEVFORMER_PATH'], env['MLC_ML_MODEL_FILENAME']) + + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_BEVFORMER_PATH'] + + return {'return': 0} diff --git a/script/get-ml-model-bevformer/meta.yaml b/script/get-ml-model-bevformer/meta.yaml index 8cf191c69..e4c156030 100644 --- a/script/get-ml-model-bevformer/meta.yaml +++ b/script/get-ml-model-bevformer/meta.yaml @@ -9,7 +9,7 @@ tags: uid: 438a053f666443bd new_env_keys: - MLC_ML_MODEL_BEVFORMER_PATH - - MLC_ML_MODEL_DEEPLABV3_PLUS_PATH + - MLC_ML_MODEL_FILE_WITH_PATH print_env_at_the_end: MLC_ML_MODEL_BEVFORMER_PATH: BevFormer checkpoint path variations: diff --git a/script/get-ml-model-deeplabv3_plus/customize.py b/script/get-ml-model-deeplabv3_plus/customize.py index 88929621d..c249abfd0 100644 --- a/script/get-ml-model-deeplabv3_plus/customize.py +++ b/script/get-ml-model-deeplabv3_plus/customize.py @@ -1,27 +1,27 @@ -from mlc import utils -import os - - -def preprocess(i): - - os_info = i['os_info'] - - env = i['env'] - - if os_info['platform'] == "windows": - return {'return': 1, 'error': 'Script not supported in windows yet!'} - - env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" - - return {'return': 0} - - -def postprocess(i): - - env = i['env'] - - env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = os.path.join( - env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'], env['MLC_ML_MODEL_FILENAME']) - env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] - - return {'return': 0} +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = os.path.join( + env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'], env['MLC_ML_MODEL_FILENAME']) + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] + + return {'return': 0} diff --git a/script/get-ml-model-deeplabv3_plus/meta.yaml b/script/get-ml-model-deeplabv3_plus/meta.yaml index 832c7deaf..66215eb09 100644 --- a/script/get-ml-model-deeplabv3_plus/meta.yaml +++ b/script/get-ml-model-deeplabv3_plus/meta.yaml @@ -11,7 +11,7 @@ tags: uid: cfb2d53b9dbc4dc0 new_env_keys: - MLC_ML_MODEL_DEEPLABV3_PLUS_PATH - - MLC_ML_MODEL_DEEPLABV3_PLUS_PATH + - MLC_ML_MODEL_FILE_WITH_PATH print_env_at_the_end: MLC_ML_MODEL_DEEPLABV3_PLUS_PATH: DeepLabV3+ checkpoint path variations: From d14a9c64a920532085b994934bb876ba27650bf5 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 00:05:46 +0530 Subject: [PATCH 46/83] populate inference repo paths --- script/app-mlperf-automotive/meta.yaml | 3 +++ script/get-mlperf-automotive-utils/meta.yaml | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index b389868a9..c31b11af4 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -108,6 +108,9 @@ docker: mounts: - "${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}:${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}" - "${{ MLC_ML_MODEL_FILE_WITH_PATH }}:${{ MLC_ML_MODEL_FILE_WITH_PATH }}" + - "${{ MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH }}:${{ MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH }}" + - "${{ MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH }}:${{ MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH }}" + - "${{ MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH }}:${{ MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH }}" # Variations to customize dependencies diff --git a/script/get-mlperf-automotive-utils/meta.yaml b/script/get-mlperf-automotive-utils/meta.yaml index 5a71e88b7..b76c2db19 100644 --- a/script/get-mlperf-automotive-utils/meta.yaml +++ b/script/get-mlperf-automotive-utils/meta.yaml @@ -16,3 +16,10 @@ deps: - automotive-src new_env_keys: - '+PYTHONPATH' + - MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH + - MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH + - MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH + - MLC_MLPERF_LAST_RELEASE + - MLC_MLPERF_AUTOMOTIVE_SOURCE + - MLC_MLPERF_AUTOMOTIVE_VERSION + - MLC_MLPERF_AUTOMOTIVE_SOURCE_VERSION \ No newline at end of file From dcd50e7cfebb24cc729c79a891ba3c43303a41b2 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 00:07:12 +0530 Subject: [PATCH 47/83] code clean --- script/app-mlperf-automotive/meta.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index c31b11af4..b389868a9 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -108,9 +108,6 @@ docker: mounts: - "${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}:${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}" - "${{ MLC_ML_MODEL_FILE_WITH_PATH }}:${{ MLC_ML_MODEL_FILE_WITH_PATH }}" - - "${{ MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH }}:${{ MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH }}" - - "${{ MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH }}:${{ MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH }}" - - "${{ MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH }}:${{ MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH }}" # Variations to customize dependencies From 5b6b7e36a307b78c11421ba018cfb1f59c577f0f Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 00:18:23 +0530 Subject: [PATCH 48/83] correct implementation paths in repo --- script/get-mlperf-automotive-src/customize.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/script/get-mlperf-automotive-src/customize.py b/script/get-mlperf-automotive-src/customize.py index 4108e005f..df444b439 100644 --- a/script/get-mlperf-automotive-src/customize.py +++ b/script/get-mlperf-automotive-src/customize.py @@ -91,11 +91,11 @@ def postprocess(i): automotive_root = env['MLC_MLPERF_AUTOMOTIVE_SOURCE'] env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'] = os.path.join( - automotive_root, 'camera-3d-detection') + automotive_root, 'automotive', 'camera-3d-detection') env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'] = os.path.join( - automotive_root, '2d-object-detection') + automotive_root, 'automotive', '2d-object-detection') env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'] = os.path.join( - automotive_root, 'semantic-segmentation') + automotive_root, 'automotive', 'semantic-segmentation') env['MLC_GET_DEPENDENT_CACHED_PATH'] = automotive_root From d7331b7356adb446d40701b1ad6212bf7a0c14ea Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 00:51:54 +0530 Subject: [PATCH 49/83] pytorch fix --- .../meta.yaml | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index fc4022066..4646a0fdd 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -177,26 +177,19 @@ deps: names: - ml-engine-pytorch - pytorch - enable_if_env: - MLC_MLPERF_BACKEND: - - pytorch - - tvm-pytorch + skip_if_env: MLC_MLPERF_DEVICE: - - cpu - - rocm + - gpu ## Pytorch (CUDA) - tags: get,generic-python-lib,_torch_cuda names: - ml-engine-pytorch - pytorch - enable_if_env: - MLC_MLPERF_BACKEND: - - pytorch - - tvm-pytorch - - ray + skip_if_env: MLC_MLPERF_DEVICE: - - gpu + - cpu + - rocm ## Torchvision (CPU) - tags: get,generic-python-lib,_torchvision From a06aaa7235991623abbacab449acaeea49d20475 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 02:31:05 +0530 Subject: [PATCH 50/83] make dataset path proper --- script/get-preprocessed-dataset-cognata/customize.py | 2 ++ script/get-preprocessed-dataset-cognata/run.sh | 4 ++-- script/get-preprocessed-dataset-nuscenes/customize.py | 2 ++ script/get-preprocessed-dataset-nuscenes/meta.yaml | 4 ++-- script/get-preprocessed-dataset-nuscenes/run.sh | 4 ++-- 5 files changed, 10 insertions(+), 6 deletions(-) diff --git a/script/get-preprocessed-dataset-cognata/customize.py b/script/get-preprocessed-dataset-cognata/customize.py index 4beec14d8..fa1126d5b 100644 --- a/script/get-preprocessed-dataset-cognata/customize.py +++ b/script/get-preprocessed-dataset-cognata/customize.py @@ -16,4 +16,6 @@ def preprocess(i): def postprocess(i): env = i['env'] + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] = os.path.join(env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'], env['MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME']) + return {'return': 0} diff --git a/script/get-preprocessed-dataset-cognata/run.sh b/script/get-preprocessed-dataset-cognata/run.sh index ed1a21d87..0c141b8e6 100644 --- a/script/get-preprocessed-dataset-cognata/run.sh +++ b/script/get-preprocessed-dataset-cognata/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" = "true" ]]; then - cd "${MLC_PREPROCESSED_DATASET_COGNATA_PATH}/${MLC_DATASET_COGNATA_TAR_FILENAME}" || exit +if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" = "yes" ]]; then + cd "${MLC_PREPROCESSED_DATASET_COGNATA_PATH}" || exit for f in *.tar.gz; do tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } done diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py index 4beec14d8..83fdbfb0a 100644 --- a/script/get-preprocessed-dataset-nuscenes/customize.py +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -16,4 +16,6 @@ def preprocess(i): def postprocess(i): env = i['env'] + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] = os.path.join(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) + return {'return': 0} diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml index a9b4efb99..e7a1bcc2d 100644 --- a/script/get-preprocessed-dataset-nuscenes/meta.yaml +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -20,12 +20,12 @@ variations: default: true group: dataset-type env: - MLC_DATASET_NUSCENES_TAR_FILENAME: val_3d.tar.gz + MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME: val_3d MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> calibration: group: dataset-type env: - MLC_DATASET_NUSCENES_TAR_FILENAME: calib_3d.tar.gz + MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME: calib_3d MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> prebuilt: default: true diff --git a/script/get-preprocessed-dataset-nuscenes/run.sh b/script/get-preprocessed-dataset-nuscenes/run.sh index 9ec6ee767..a6048a240 100644 --- a/script/get-preprocessed-dataset-nuscenes/run.sh +++ b/script/get-preprocessed-dataset-nuscenes/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" = "true" ]]; then - cd "${MLC_PREPROCESSED_DATASET_NUSCENES_PATH}/${MLC_DATASET_NUSCENES_TAR_FILENAME}" || exit +if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" = "yes" ]]; then + cd "${MLC_PREPROCESSED_DATASET_NUSCENES_PATH}" || exit for f in *.tar.gz; do tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } done From 6b378661a84fc947e4085151e86ed9af0b5770cc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 21 May 2025 21:01:27 +0000 Subject: [PATCH 51/83] [Automated Commit] Format Codebase [skip ci] --- script/get-preprocessed-dataset-cognata/customize.py | 6 ++++-- script/get-preprocessed-dataset-nuscenes/customize.py | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/script/get-preprocessed-dataset-cognata/customize.py b/script/get-preprocessed-dataset-cognata/customize.py index fa1126d5b..125104d5a 100644 --- a/script/get-preprocessed-dataset-cognata/customize.py +++ b/script/get-preprocessed-dataset-cognata/customize.py @@ -16,6 +16,8 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] = os.path.join(env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'], env['MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME']) - + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] = os.path.join( + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'], + env['MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME']) + return {'return': 0} diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py index 83fdbfb0a..6d9e7799a 100644 --- a/script/get-preprocessed-dataset-nuscenes/customize.py +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -16,6 +16,8 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] = os.path.join(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] = os.path.join( + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], + env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) return {'return': 0} From 28b0285af8ecf5b1d31800bfda7eb3d39c14fb81 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 02:59:43 +0530 Subject: [PATCH 52/83] fix env var issue --- script/get-preprocessed-dataset-cognata/meta.yaml | 4 ++++ script/get-preprocessed-dataset-nuscenes/meta.yaml | 2 ++ 2 files changed, 6 insertions(+) diff --git a/script/get-preprocessed-dataset-cognata/meta.yaml b/script/get-preprocessed-dataset-cognata/meta.yaml index d0b7c3d23..c8bac0417 100644 --- a/script/get-preprocessed-dataset-cognata/meta.yaml +++ b/script/get-preprocessed-dataset-cognata/meta.yaml @@ -36,18 +36,22 @@ variations: MLC_DATASET_COGNATA_TASK: segmentation validation,2d_obj_det: env: + MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAMEE: val_2d MLC_DATASET_COGNATA_TAR_FILENAME: val_2d.tar.gz MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_2d/<<>> calibration,2d_obj_det: env: + MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME: calib_2d MLC_DATASET_COGNATA_TAR_FILENAME: calib_2d.tar.gz MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_2d/<<>> validation,segmentation: env: + MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME: val_seg MLC_DATASET_COGNATA_TAR_FILENAME: val_seg.tar.gz MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_seg/<<>> calibration,segmentation: env: + MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME: calib_seg MLC_DATASET_COGNATA_TAR_FILENAME: calib_seg.tar.gz MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_seg/<<>> prebuilt: diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml index e7a1bcc2d..707f21726 100644 --- a/script/get-preprocessed-dataset-nuscenes/meta.yaml +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -21,11 +21,13 @@ variations: group: dataset-type env: MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME: val_3d + MLC_DATASET_NUSCENES_TAR_FILENAME: val_3d.tar.gz MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> calibration: group: dataset-type env: MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME: calib_3d + MLC_DATASET_NUSCENES_TAR_FILENAME: calib_3d.tar.gz MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> prebuilt: default: true From 8082cdf3651dbc1d0eda99f104c28f91dcccb214 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 03:21:00 +0530 Subject: [PATCH 53/83] minor fixes --- script/app-mlperf-automotive/customize.py | 842 +++++++++--------- script/app-mlperf-automotive/meta.yaml | 2 +- .../get-mlperf-inference-loadgen/customize.py | 2 +- 3 files changed, 423 insertions(+), 423 deletions(-) diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py index 3fad7ad39..872348b58 100644 --- a/script/app-mlperf-automotive/customize.py +++ b/script/app-mlperf-automotive/customize.py @@ -1,421 +1,421 @@ -from mlc import utils -import os -import json -import shutil -import subprocess -import mlperf_utils -from log_parser import MLPerfLog -from utils import * -import copy -import platform -import sys - - -def preprocess(i): - - os_info = i['os_info'] - env = i['env'] - state = i['state'] - script_path = i['run_script_input']['path'] - - if 'cmd' in i['input']: - state['mlperf_inference_run_cmd'] = "mlcr " + \ - " ".join(i['input']['cmd']) - - state['mlperf-inference-implementation'] = {} - - run_state = i['run_script_input']['run_state'] - state['mlperf-inference-implementation']['script_id'] = run_state['script_id'] + \ - ":" + ",".join(run_state['script_variation_tags']) - - return {'return': 0} - - -def postprocess(i): - - env = i['env'] - state = i['state'] - - inp = i['input'] - os_info = i['os_info'] - - xsep = '^' if os_info['platform'] == 'windows' else '\\' - q = '"' if os_info['platform'] == 'windows' else "'" - - logger = i['automation'].logger - - env['CMD'] = '' - - # if env.get('MLC_MLPERF_USER_CONF', '') == '': - # return {'return': 0} - - output_dir = env['MLC_MLPERF_OUTPUT_DIR'] - mode = env['MLC_MLPERF_LOADGEN_MODE'] - - mlc = i['automation'].action_object - - result_sut_folder_path = env['MLC_MLPERF_INFERENCE_RESULTS_SUT_PATH'] - - model = env['MLC_MODEL'] - model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', model) - - scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] - - if not os.path.exists(output_dir) or not os.path.exists( - os.path.join(output_dir, "mlperf_log_summary.txt")): - # No output, fake_run? - return {'return': 0} - - mlperf_log = MLPerfLog(os.path.join(output_dir, "mlperf_log_detail.txt")) - if mode == "performance": - if scenario in ["Offline", "Server"]: - metric = "target_qps" - result = mlperf_log['result_mean_latency_ns'] / 1000000 - elif scenario.endswith("Stream"): - metric = "target_latency" - result = mlperf_log['result_mean_latency_ns'] - else: - return {'return': 1, - 'error': 'Unsupported scenario: {}'.format(scenario)} - import yaml - sut_name = state['MLC_SUT_CONFIG_NAME'] - sut_config = state['MLC_SUT_CONFIG'][sut_name] - sut_config_path = state['MLC_SUT_CONFIG_PATH'][sut_name] - if scenario not in sut_config[model_full_name]: - sut_config[model_full_name][scenario] = {} - sut_config[model_full_name][scenario][metric] = result - - print( - f"SUT: {sut_name}, model: {model_full_name}, scenario: {scenario}, {metric} (mean value) updated as {result}") - with open(sut_config_path, "w") as f: - yaml.dump(sut_config, f) - logger.info(f"New config stored in {sut_config_path}") - elif mode == "accuracy": - acc = "" - if env.get('MLC_MLPERF_INFERENCE_VERSION', '') == "mvp-demo" and env.get( - 'MLC_MLPERF_INFERENCE_VERSION') == "poc-demo": - if not env.get( - 'MLC_COGNATA_ACCURACY_DUMP_FILE'): # can happen while reusing old runs - env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( - output_dir, "accuracy.txt") - if os.path.exists(env['MLC_COGNATA_ACCURACY_DUMP_FILE']): - with open(env['MLC_COGNATA_ACCURACY_DUMP_FILE'], "r") as f: - acc = f.readline() - result = acc - else: - return {'return': 1, 'error': f"Unknown mode {mode}"} - - valid = {'performance': True, 'accuracy': True} # its POC - power_result = None # No power measurement in POC - - # result, valid, power_result = mlperf_utils.get_result_from_log(env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode) - - if not state.get('mlc-mlperf-inference-results'): - state['mlc-mlperf-inference-results'] = {} - if not state.get('mlc-mlperf-inference-results-last'): - state['mlc-mlperf-inference-results-last'] = {} - if not state['mlc-mlperf-inference-results'].get( - state['MLC_SUT_CONFIG_NAME']): - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']] = {} - if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ].get(model): - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']][model] = {} - if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ][model].get(scenario): - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ][model][scenario] = {} - - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ][model][scenario][mode] = result - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ][model][scenario][mode + '_valid'] = valid.get(mode, False) - - state['mlc-mlperf-inference-results-last'][mode] = result - state['mlc-mlperf-inference-results-last'][mode + - '_valid'] = valid.get(mode, False) - - if mode in ["performance", "accuracy"] and env.get( - 'MLC_MLPERF_INFERENCE_VERSION', '') not in ["", "mvp-demo", "poc-demo"]: - # if measurements file exist read it - if os.path.exists("measurements.json"): - with open("measurements.json", "r") as file: - measurements = json.load(file) # Load JSON data from the file - else: - measurements = {} - measurements['starting_weights_filename'] = env.get( - 'MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get( - 'MLC_ML_MODEL_FILE', measurements.get( - 'starting_weights_filename', 'TBD'))) - measurements['retraining'] = env.get( - 'MLC_ML_MODEL_RETRAINING', measurements.get( - 'retraining', 'no')) - measurements['input_data_types'] = env.get( - 'MLC_ML_MODEL_INPUTS_DATA_TYPE', measurements.get( - 'input_data_types', 'fp32')) - measurements['weight_data_types'] = env.get( - 'MLC_ML_MODEL_WEIGHTS_DATA_TYPE', measurements.get( - 'weight_data_types', 'fp32')) - measurements['weight_transformations'] = env.get( - 'MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS', measurements.get( - 'weight_transformations', 'none')) - - os.chdir(output_dir) - - if not os.path.exists("mlperf_log_summary.txt"): - return {'return': 0} - - mlperf_log_summary = '' - if os.path.isfile("mlperf_log_summary.txt"): - with open("mlperf_log_summary.txt", "r") as fp: - mlperf_log_summary = fp.read() - - if mlperf_log_summary != '': - state['app_mlperf_inference_log_summary'] = {} - for x in mlperf_log_summary.split('\n'): - y = x.split(': ') - if len(y) == 2: - state['app_mlperf_inference_log_summary'][y[0].strip().lower() - ] = y[1].strip() - - if not is_false(env.get("MLC_MLPERF_PRINT_SUMMARY", "")): - logger.info("\n") - logger.info(mlperf_log_summary) - - with open("measurements.json", "w") as fp: - json.dump(measurements, fp, indent=2) - - mlc_sut_info = {} - mlc_sut_info['system_name'] = state['MLC_SUT_META']['system_name'] - mlc_sut_info['implementation'] = env['MLC_MLPERF_IMPLEMENTATION'] - mlc_sut_info['device'] = env['MLC_MLPERF_DEVICE'] - mlc_sut_info['framework'] = state['MLC_SUT_META']['framework'] - mlc_sut_info['run_config'] = env['MLC_MLPERF_INFERENCE_SUT_RUN_CONFIG'] - with open(os.path.join(result_sut_folder_path, "mlc-sut-info.json"), "w") as fp: - json.dump(mlc_sut_info, fp, indent=2) - - system_meta = state['MLC_SUT_META'] - with open("system_meta.json", "w") as fp: - json.dump(system_meta, fp, indent=2) - - # map the custom model for inference result to the official model - # if custom model name is not set, the official model name will be - # mapped to itself - official_model_name = model - if "efficientnet" in official_model_name or "mobilenet" in official_model_name: - official_model_name = "resnet" - model_mapping = {model_full_name: official_model_name} - with open("model_mapping.json", "w") as fp: - json.dump(model_mapping, fp, indent=2) - - # Add to the state - state['app_mlperf_inference_measurements'] = copy.deepcopy( - measurements) - - if os.path.exists(env['MLC_MLPERF_CONF']): - shutil.copy(env['MLC_MLPERF_CONF'], 'mlperf.conf') - - if os.path.exists(env['MLC_MLPERF_USER_CONF']): - shutil.copy(env['MLC_MLPERF_USER_CONF'], 'user.conf') - - result, valid, power_result = mlperf_utils.get_result_from_log( - env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('MLC_MLPERF_INFERENCE_SOURCE_VERSION')) - - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ][model][scenario][mode] = result - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ][model][scenario][mode + '_valid'] = valid.get(mode, False) - - state['mlc-mlperf-inference-results-last'][mode] = result - state['mlc-mlperf-inference-results-last'][mode + - '_valid'] = valid.get(mode, False) - - # Power not included in v0.5, code should be added in future - - # Record basic host info - host_info = { - "os_version": platform.platform(), - "cpu_version": platform.processor(), - "python_version": sys.version, - } - try: - import importlib.metadata - mlc_version = importlib.metadata.version("mlc") - host_info["mlc_version"] = mlc_version - except Exception as e: - error = format(e) - mlc_version = "unknown" - - x = '' - if env.get('MLC_HOST_OS_FLAVOR', '') != '': - x += env['MLC_HOST_OS_FLAVOR'] - if env.get('MLC_HOST_OS_VERSION', '') != '': - x += ' ' + env['MLC_HOST_OS_VERSION'] - if x != '': - host_info['os_version_sys'] = x - - if env.get('MLC_HOST_SYSTEM_NAME', '') != '': - host_info['system_name'] = env['MLC_HOST_SYSTEM_NAME'] - - # Check CM automation repository - repo_name = 'mlcommons@mlperf-automations' - repo_hash = '' - r = mlc.access({'action': 'find', 'automation': 'repo', - 'item': 'mlcommons@mlperf-automations,9e97bb72b0474657'}) - if r['return'] == 0 and len(r['list']) == 1: - repo_path = r['list'][0].path - if os.path.isdir(repo_path): - repo_name = os.path.basename(repo_path) - - # Check dev - # if repo_name == 'cm4mlops': repo_name = 'mlcommons@cm4mlops' - - r = utils.run_system_cmd({ - 'path': repo_path, - 'cmd': 'git rev-parse HEAD'}) - if r['return'] == 0: - repo_hash = r['output'] - - host_info['mlc_repo_name'] = repo_name - host_info['mlc_repo_git_hash'] = repo_hash - - with open("mlc-host-info.json", "w") as fp: - fp.write(json.dumps(host_info, indent=2) + '\n') - - # Prepare README - if "cmd" in inp: - cmd = "mlc run script \\\n\t" + " \\\n\t".join(inp['cmd']) - xcmd = "mlc run script " + xsep + "\n\t" + \ - (" " + xsep + "\n\t").join(inp['cmd']) - else: - cmd = "" - xcmd = "" - - readme_init = "*Check [CM MLPerf docs](https://docs.mlcommons.org/inference) for more details.*\n\n" - - readme_body = "## Host platform\n\n* OS version: {}\n* CPU version: {}\n* Python version: {}\n* MLC version: {}\n\n".format(platform.platform(), - platform.processor(), sys.version, mlc_version) - - x = repo_name - if repo_hash != '': - x += ' --checkout=' + str(repo_hash) - - readme_body += "## CM Run Command\n\nSee [CM installation guide](https://docs.mlcommons.org/inference/install/).\n\n" + \ - "```bash\npip install -U mlcflow\n\nmlc rm cache -f\n\nmlc pull repo {}\n\n{}\n```".format( - x, xcmd) - - readme_body += "\n*Note that if you want to use the [latest automation recipes](https://docs.mlcommons.org/inference) for MLPerf,\n" + \ - " you should simply reload {} without checkout and clean MLC cache as follows:*\n\n".format(repo_name) + \ - "```bash\nmlc rm repo {}\nmlc pull repo {}\nmlc rm cache -f\n\n```".format( - repo_name, repo_name) - - extra_readme_init = '' - extra_readme_body = '' - if env.get('MLC_MLPERF_README', '') == "yes": - extra_readme_body += "\n## Dependent MLPerf Automation scripts\n\n" - - script_tags = inp['tags'] - script_adr = inp.get('adr', {}) - - mlc_input = {'action': 'run', - 'automation': 'script', - 'tags': script_tags, - 'adr': script_adr, - 'print_deps': True, - 'env': env, - 'quiet': True, - 'silent': True, - 'fake_run': True - } - r = mlc.access(mlc_input) - if r['return'] > 0: - return r - - print_deps = r['new_state']['print_deps'] - count = 1 - for dep in print_deps: - extra_readme_body += "\n\n" + str(count) + ". `" + dep + "`\n" - count = count + 1 - - if state.get( - 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('print_deps'): - - extra_readme_body += "\n## Dependent automation scripts for the MLPerf Inference Implementation\n" - - print_deps = state['mlperf-inference-implementation']['print_deps'] - count = 1 - for dep in print_deps: - extra_readme_body += "\n\n" + \ - str(count) + ". `" + dep + "`\n" - count = count + 1 - - readme = readme_init + readme_body - extra_readme = extra_readme_init + extra_readme_body - - with open("README.md", "w") as fp: - fp.write(readme) - if extra_readme: - with open("README-extra.md", "w") as fp: - fp.write(extra_readme) - - if state.get( - 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'): - env['MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join( - output_dir, "mlc-version-info.json") - env['MLC_MLPERF_RUN_DEPS_GRAPH'] = os.path.join( - output_dir, "mlc-deps.png") - env['MLC_MLPERF_RUN_DEPS_MERMAID'] = os.path.join( - output_dir, "mlc-deps.mmd") - with open(os.path.join(output_dir, "mlc-version-info.json"), "w") as f: - f.write( - json.dumps( - state['mlperf-inference-implementation']['version_info'], - indent=2)) - - if env.get('MLC_DUMP_SYSTEM_INFO', True): - dump_script_output( - "detect,os", - env, - state, - 'new_env', - os.path.join( - output_dir, - "os_info.json"), mlc) - dump_script_output( - "detect,cpu", - env, - state, - 'new_env', - os.path.join( - output_dir, - "cpu_info.json"), mlc) - env['MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( - env['MLC_MLPERF_OUTPUT_DIR'], "pip_freeze.raw") - dump_script_output( - "dump,pip,freeze", - env, - state, - 'new_state', - os.path.join( - output_dir, - "pip_freeze.json"), mlc) - - return {'return': 0} - - -def dump_script_output(script_tags, env, state, output_key, dump_file, mlc): - - mlc_input = {'action': 'run', - 'automation': 'script', - 'tags': script_tags, - 'env': env, - 'state': state, - 'quiet': True, - 'silent': True, - } - r = mlc.access(mlc_input) - if r['return'] > 0: - return r - with open(dump_file, "w") as f: - f.write(json.dumps(r[output_key], indent=2)) - - return {'return': 0} +from mlc import utils +import os +import json +import shutil +import subprocess +import mlperf_utils +from log_parser import MLPerfLog +from utils import * +import copy +import platform +import sys + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + if 'cmd' in i['input']: + state['mlperf_inference_run_cmd'] = "mlcr " + \ + " ".join(i['input']['cmd']) + + state['mlperf-inference-implementation'] = {} + + run_state = i['run_script_input']['run_state'] + state['mlperf-inference-implementation']['script_id'] = run_state['script_id'] + \ + ":" + ",".join(run_state['script_variation_tags']) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + inp = i['input'] + os_info = i['os_info'] + + xsep = '^' if os_info['platform'] == 'windows' else '\\' + q = '"' if os_info['platform'] == 'windows' else "'" + + logger = i['automation'].logger + + env['CMD'] = '' + + # if env.get('MLC_MLPERF_USER_CONF', '') == '': + # return {'return': 0} + + output_dir = env['MLC_MLPERF_OUTPUT_DIR'] + mode = env['MLC_MLPERF_LOADGEN_MODE'] + + mlc = i['automation'].action_object + + result_sut_folder_path = env['MLC_MLPERF_INFERENCE_RESULTS_SUT_PATH'] + + model = env['MLC_MODEL'] + model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', model) + + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] + + if not os.path.exists(output_dir) or not os.path.exists( + os.path.join(output_dir, "mlperf_log_summary.txt")): + # No output, fake_run? + return {'return': 0} + + mlperf_log = MLPerfLog(os.path.join(output_dir, "mlperf_log_detail.txt")) + if mode == "performance": + if scenario in ["Offline", "Server"]: + metric = "target_qps" + result = mlperf_log['result_mean_latency_ns'] / 1000000 + elif scenario.endswith("Stream"): + metric = "target_latency" + result = mlperf_log['result_mean_latency_ns'] + else: + return {'return': 1, + 'error': 'Unsupported scenario: {}'.format(scenario)} + import yaml + sut_name = state['MLC_SUT_CONFIG_NAME'] + sut_config = state['MLC_SUT_CONFIG'][sut_name] + sut_config_path = state['MLC_SUT_CONFIG_PATH'][sut_name] + if scenario not in sut_config[model_full_name]: + sut_config[model_full_name][scenario] = {} + sut_config[model_full_name][scenario][metric] = result + + print( + f"SUT: {sut_name}, model: {model_full_name}, scenario: {scenario}, {metric} (mean value) updated as {result}") + with open(sut_config_path, "w") as f: + yaml.dump(sut_config, f) + logger.info(f"New config stored in {sut_config_path}") + elif mode == "accuracy": + acc = "" + if env.get('MLC_MLPERF_INFERENCE_VERSION', '') == "mvp-demo" and env.get( + 'MLC_MLPERF_INFERENCE_VERSION') == "poc-demo": + if not env.get( + 'MLC_COGNATA_ACCURACY_DUMP_FILE'): # can happen while reusing old runs + env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( + output_dir, "accuracy.txt") + if os.path.exists(env['MLC_COGNATA_ACCURACY_DUMP_FILE']): + with open(env['MLC_COGNATA_ACCURACY_DUMP_FILE'], "r") as f: + acc = f.readline() + result = acc + else: + return {'return': 1, 'error': f"Unknown mode {mode}"} + + valid = {'performance': True, 'accuracy': True} # its POC + power_result = None # No power measurement in POC + + # result, valid, power_result = mlperf_utils.get_result_from_log(env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode) + + if not state.get('mlc-mlperf-inference-results'): + state['mlc-mlperf-inference-results'] = {} + if not state.get('mlc-mlperf-inference-results-last'): + state['mlc-mlperf-inference-results-last'] = {} + if not state['mlc-mlperf-inference-results'].get( + state['MLC_SUT_CONFIG_NAME']): + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']] = {} + if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ].get(model): + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']][model] = {} + if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model].get(scenario): + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario] = {} + + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario][mode] = result + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario][mode + '_valid'] = valid.get(mode, False) + + state['mlc-mlperf-inference-results-last'][mode] = result + state['mlc-mlperf-inference-results-last'][mode + + '_valid'] = valid.get(mode, False) + + if mode in ["performance", "accuracy"] and env.get( + 'MLC_MLPERF_INFERENCE_VERSION', '') not in ["", "mvp-demo", "poc-demo"]: + # if measurements file exist read it + if os.path.exists("measurements.json"): + with open("measurements.json", "r") as file: + measurements = json.load(file) # Load JSON data from the file + else: + measurements = {} + measurements['starting_weights_filename'] = env.get( + 'MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get( + 'MLC_ML_MODEL_FILE', measurements.get( + 'starting_weights_filename', 'TBD'))) + measurements['retraining'] = env.get( + 'MLC_ML_MODEL_RETRAINING', measurements.get( + 'retraining', 'no')) + measurements['input_data_types'] = env.get( + 'MLC_ML_MODEL_INPUTS_DATA_TYPE', measurements.get( + 'input_data_types', 'fp32')) + measurements['weight_data_types'] = env.get( + 'MLC_ML_MODEL_WEIGHTS_DATA_TYPE', measurements.get( + 'weight_data_types', 'fp32')) + measurements['weight_transformations'] = env.get( + 'MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS', measurements.get( + 'weight_transformations', 'none')) + + os.chdir(output_dir) + + if not os.path.exists("mlperf_log_summary.txt"): + return {'return': 0} + + mlperf_log_summary = '' + if os.path.isfile("mlperf_log_summary.txt"): + with open("mlperf_log_summary.txt", "r") as fp: + mlperf_log_summary = fp.read() + + if mlperf_log_summary != '': + state['app_mlperf_inference_log_summary'] = {} + for x in mlperf_log_summary.split('\n'): + y = x.split(': ') + if len(y) == 2: + state['app_mlperf_inference_log_summary'][y[0].strip().lower() + ] = y[1].strip() + + if not is_false(env.get("MLC_MLPERF_PRINT_SUMMARY", "")): + logger.info("\n") + logger.info(mlperf_log_summary) + + with open("measurements.json", "w") as fp: + json.dump(measurements, fp, indent=2) + + mlc_sut_info = {} + mlc_sut_info['system_name'] = state['MLC_SUT_META']['system_name'] + mlc_sut_info['implementation'] = env['MLC_MLPERF_IMPLEMENTATION'] + mlc_sut_info['device'] = env['MLC_MLPERF_DEVICE'] + mlc_sut_info['framework'] = state['MLC_SUT_META']['framework'] + mlc_sut_info['run_config'] = env['MLC_MLPERF_INFERENCE_SUT_RUN_CONFIG'] + with open(os.path.join(result_sut_folder_path, "mlc-sut-info.json"), "w") as fp: + json.dump(mlc_sut_info, fp, indent=2) + + system_meta = state['MLC_SUT_META'] + with open("system_meta.json", "w") as fp: + json.dump(system_meta, fp, indent=2) + + # map the custom model for inference result to the official model + # if custom model name is not set, the official model name will be + # mapped to itself + official_model_name = model + if "efficientnet" in official_model_name or "mobilenet" in official_model_name: + official_model_name = "resnet" + model_mapping = {model_full_name: official_model_name} + with open("model_mapping.json", "w") as fp: + json.dump(model_mapping, fp, indent=2) + + # Add to the state + state['app_mlperf_inference_measurements'] = copy.deepcopy( + measurements) + + if os.path.exists(env['MLC_MLPERF_CONF']): + shutil.copy(env['MLC_MLPERF_CONF'], 'mlperf.conf') + + if os.path.exists(env['MLC_MLPERF_USER_CONF']): + shutil.copy(env['MLC_MLPERF_USER_CONF'], 'user.conf') + + result, valid, power_result = mlperf_utils.get_result_from_log( + env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('MLC_MLPERF_INFERENCE_SOURCE_VERSION')) + + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario][mode] = result + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario][mode + '_valid'] = valid.get(mode, False) + + state['mlc-mlperf-inference-results-last'][mode] = result + state['mlc-mlperf-inference-results-last'][mode + + '_valid'] = valid.get(mode, False) + + # Power not included in v0.5, code should be added in future + + # Record basic host info + host_info = { + "os_version": platform.platform(), + "cpu_version": platform.processor(), + "python_version": sys.version, + } + try: + import importlib.metadata + mlc_version = importlib.metadata.version("mlc") + host_info["mlc_version"] = mlc_version + except Exception as e: + error = format(e) + mlc_version = "unknown" + + x = '' + if env.get('MLC_HOST_OS_FLAVOR', '') != '': + x += env['MLC_HOST_OS_FLAVOR'] + if env.get('MLC_HOST_OS_VERSION', '') != '': + x += ' ' + env['MLC_HOST_OS_VERSION'] + if x != '': + host_info['os_version_sys'] = x + + if env.get('MLC_HOST_SYSTEM_NAME', '') != '': + host_info['system_name'] = env['MLC_HOST_SYSTEM_NAME'] + + # Check CM automation repository + repo_name = 'mlcommons@mlperf-automations' + repo_hash = '' + r = mlc.access({'action': 'find', 'automation': 'repo', + 'item': 'mlcommons@mlperf-automations,9e97bb72b0474657'}) + if r['return'] == 0 and len(r['list']) == 1: + repo_path = r['list'][0].path + if os.path.isdir(repo_path): + repo_name = os.path.basename(repo_path) + + # Check dev + # if repo_name == 'cm4mlops': repo_name = 'mlcommons@cm4mlops' + + r = utils.run_system_cmd({ + 'path': repo_path, + 'cmd': 'git rev-parse HEAD'}) + if r['return'] == 0: + repo_hash = r['output'] + + host_info['mlc_repo_name'] = repo_name + host_info['mlc_repo_git_hash'] = repo_hash + + with open("mlc-host-info.json", "w") as fp: + fp.write(json.dumps(host_info, indent=2) + '\n') + + # Prepare README + if "cmd" in inp: + cmd = "mlc run script \\\n\t" + " \\\n\t".join(inp['cmd']) + xcmd = "mlc run script " + xsep + "\n\t" + \ + (" " + xsep + "\n\t").join(inp['cmd']) + else: + cmd = "" + xcmd = "" + + readme_init = "*Check [MLC MLPerf docs](https://docs.mlcommons.org/automotive) for more details.*\n\n" + + readme_body = "## Host platform\n\n* OS version: {}\n* CPU version: {}\n* Python version: {}\n* MLC version: {}\n\n".format(platform.platform(), + platform.processor(), sys.version, mlc_version) + + x = repo_name + if repo_hash != '': + x += ' --checkout=' + str(repo_hash) + + readme_body += "## MLC Run Command\n\nSee [MLC installation guide](https://docs.mlcommons.org/mlcflow/install/).\n\n" + \ + "```bash\npip install -U mlcflow\n\nmlc rm cache -f\n\nmlc pull repo {}\n\n{}\n```".format( + x, xcmd) + + readme_body += "\n*Note that if you want to use the [latest automation recipes](https://docs.mlcommons.org/inference) for MLPerf,\n" + \ + " you should simply reload {} without checkout and clean MLC cache as follows:*\n\n".format(repo_name) + \ + "```bash\nmlc rm repo {}\nmlc pull repo {}\nmlc rm cache -f\n\n```".format( + repo_name, repo_name) + + extra_readme_init = '' + extra_readme_body = '' + if env.get('MLC_MLPERF_README', '') == "yes": + extra_readme_body += "\n## Dependent MLPerf Automation scripts\n\n" + + script_tags = inp['tags'] + script_adr = inp.get('adr', {}) + + mlc_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'adr': script_adr, + 'print_deps': True, + 'env': env, + 'quiet': True, + 'silent': True, + 'fake_run': True + } + r = mlc.access(mlc_input) + if r['return'] > 0: + return r + + print_deps = r['new_state']['print_deps'] + count = 1 + for dep in print_deps: + extra_readme_body += "\n\n" + str(count) + ". `" + dep + "`\n" + count = count + 1 + + if state.get( + 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('print_deps'): + + extra_readme_body += "\n## Dependent automation scripts for the MLPerf Automotive Implementation\n" + + print_deps = state['mlperf-inference-implementation']['print_deps'] + count = 1 + for dep in print_deps: + extra_readme_body += "\n\n" + \ + str(count) + ". `" + dep + "`\n" + count = count + 1 + + readme = readme_init + readme_body + extra_readme = extra_readme_init + extra_readme_body + + with open("README.md", "w") as fp: + fp.write(readme) + if extra_readme: + with open("README-extra.md", "w") as fp: + fp.write(extra_readme) + + if state.get( + 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'): + env['MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join( + output_dir, "mlc-version-info.json") + env['MLC_MLPERF_RUN_DEPS_GRAPH'] = os.path.join( + output_dir, "mlc-deps.png") + env['MLC_MLPERF_RUN_DEPS_MERMAID'] = os.path.join( + output_dir, "mlc-deps.mmd") + with open(os.path.join(output_dir, "mlc-version-info.json"), "w") as f: + f.write( + json.dumps( + state['mlperf-inference-implementation']['version_info'], + indent=2)) + + if env.get('MLC_DUMP_SYSTEM_INFO', True): + dump_script_output( + "detect,os", + env, + state, + 'new_env', + os.path.join( + output_dir, + "os_info.json"), mlc) + dump_script_output( + "detect,cpu", + env, + state, + 'new_env', + os.path.join( + output_dir, + "cpu_info.json"), mlc) + env['MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( + env['MLC_MLPERF_OUTPUT_DIR'], "pip_freeze.raw") + dump_script_output( + "dump,pip,freeze", + env, + state, + 'new_state', + os.path.join( + output_dir, + "pip_freeze.json"), mlc) + + return {'return': 0} + + +def dump_script_output(script_tags, env, state, output_key, dump_file, mlc): + + mlc_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'env': env, + 'state': state, + 'quiet': True, + 'silent': True, + } + r = mlc.access(mlc_input) + if r['return'] > 0: + return r + with open(dump_file, "w") as f: + f.write(json.dumps(r[output_key], indent=2)) + + return {'return': 0} diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index b389868a9..fc738e4cf 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -385,7 +385,7 @@ variations: docker: all_gpus: 'yes' base_image: nvcr.io/nvidia/pytorch:24.03-py3 - os_version: 24.03 + os_version: 22.02 v0.5: {} diff --git a/script/get-mlperf-inference-loadgen/customize.py b/script/get-mlperf-inference-loadgen/customize.py index 079a40876..eab681224 100644 --- a/script/get-mlperf-inference-loadgen/customize.py +++ b/script/get-mlperf-inference-loadgen/customize.py @@ -8,7 +8,7 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - if env.get('MLC_INFERENCE_AUTOMOTIVE_REPO', '') == "YES": + if is_true(env.get('MLC_INFERENCE_AUTOMOTIVE_REPO', '')): env['MLC_MLPERF_INFERENCE_SOURCE'] = env['MLC_MLPERF_AUTOMOTIVE_SOURCE'] if is_true(env.get('MLC_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', From f28efa95da473d1620cdf3eb425e80bb195ec114 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 03:26:09 +0530 Subject: [PATCH 54/83] fix typo --- script/app-mlperf-automotive/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index fc738e4cf..d0d120089 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -385,7 +385,7 @@ variations: docker: all_gpus: 'yes' base_image: nvcr.io/nvidia/pytorch:24.03-py3 - os_version: 22.02 + os_version: 22.04 v0.5: {} From 9400bef05c45fcd5f55c0e650e222fdff1739a58 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 14:03:53 +0530 Subject: [PATCH 55/83] add nuscenes root --- .../customize.py | 598 +++++++++--------- .../customize.py | 47 +- .../meta.yaml | 20 + 3 files changed, 343 insertions(+), 322 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 3f6160df7..88b65d5d8 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -1,299 +1,299 @@ -from mlc import utils -from utils import is_true -import os -import json -import shutil -import subprocess - - -def preprocess(i): - - os_info = i['os_info'] - env = i['env'] - state = i['state'] - script_path = i['run_script_input']['path'] - - logger = i['automation'].logger - - if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')): - return {'return': 0} - - if is_true(env.get('MLC_RUN_DOCKER_CONTAINER', '')): - return {'return': 0} - - if is_true(env.get('MLC_MLPERF_POWER', '')): - power = "yes" - else: - power = "no" - - rerun = True if env.get("MLC_RERUN", "") != '' else False - - if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env: - env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline" - - if 'MLC_MLPERF_LOADGEN_MODE' not in env: - env['MLC_MLPERF_LOADGEN_MODE'] = "accuracy" - - if 'MLC_MODEL' not in env: - return { - 'return': 1, 'error': "Please select a variation specifying the model to run"} - - # if env['MLC_MODEL'] == "resnet50": - # cmd = "cp " + os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['MLC_DATASET_PATH'], - # "val_map.txt") - # ret = os.system(cmd) - - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \ - env.get('MLC_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " - - if 'MLC_MLPERF_LOADGEN_QPS' not in env: - env['MLC_MLPERF_LOADGEN_QPS_OPT'] = "" - else: - env['MLC_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \ - env['MLC_MLPERF_LOADGEN_QPS'] - - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] - - if 'MLC_NUM_THREADS' not in env: - if 'MLC_MINIMIZE_THREADS' in env: - env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // - (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) - else: - env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') - - if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and not env.get( - 'MLC_MLPERF_MODEL_SKIP_BATCHING', False): - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \ - str(env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE']) - - if env.get('MLC_MLPERF_LOADGEN_BATCH_SIZE', '') != '': - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \ - str(env['MLC_MLPERF_LOADGEN_BATCH_SIZE']) - - if env.get('MLC_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get( - 'MLC_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and env.get('MLC_MLPERF_RUN_STYLE', '') != "valid": - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \ - env['MLC_MLPERF_LOADGEN_QUERY_COUNT'] - - print("Using MLCommons Inference source from '" + - env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") - - if 'MLC_MLPERF_CONF' not in env: - env['MLC_MLPERF_CONF'] = os.path.join( - env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") - - x = "" if os_info['platform'] == 'windows' else "'" - if "llama2-70b" in env['MLC_MODEL']: - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \ - x + env['MLC_MLPERF_CONF'] + x - else: - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \ - x + env['MLC_MLPERF_CONF'] + x - - env['MODEL_DIR'] = env.get('MLC_ML_MODEL_PATH') - if not env['MODEL_DIR']: - env['MODEL_DIR'] = os.path.dirname( - env.get( - 'MLC_MLPERF_CUSTOM_MODEL_PATH', - env.get('MLC_ML_MODEL_FILE_WITH_PATH'))) - - RUN_CMD = "" - - scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] - scenario_extra_options = '' - - NUM_THREADS = env['MLC_NUM_THREADS'] - if int(NUM_THREADS) > 2 and env['MLC_MLPERF_DEVICE'] == "gpu": - NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU - - if env['MLC_MODEL'] in ['resnet50', 'retinanet', 'stable-diffusion-xl']: - scenario_extra_options += " --threads " + NUM_THREADS - - ml_model_name = env['MLC_MODEL'] - if 'MLC_MLPERF_USER_CONF' in env: - user_conf_path = env['MLC_MLPERF_USER_CONF'] - x = "" if os_info['platform'] == 'windows' else "'" - scenario_extra_options += " --user_conf " + x + user_conf_path + x - - mode = env['MLC_MLPERF_LOADGEN_MODE'] - mode_extra_options = "" - - # Grigori blocked for ABTF to preprocess data set on the fly for now - # we can later move it to a separate script to preprocess data set - -# if 'MLC_DATASET_PREPROCESSED_PATH' in env and env['MLC_MODEL'] in [ 'resnet50', 'retinanet' ]: -# #dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['MLC_DATASET_PREPROCESSED_PATH'] -# if env.get('MLC_MLPERF_LAST_RELEASE') not in [ "v2.0", "v2.1" ]: -# dataset_options = " --use_preprocessed_dataset --cache_dir "+env['MLC_DATASET_PREPROCESSED_PATH'] -# else: -# dataset_options = "" -# if env['MLC_MODEL'] == "retinanet": -# dataset_options += " --dataset-list "+ env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] -# elif env['MLC_MODEL'] == "resnet50": -# dataset_options += " --dataset-list "+ os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") -# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') -# else: -# if 'MLC_DATASET_PREPROCESSED_PATH' in env: -# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') -# else: -# env['DATA_DIR'] = env.get('MLC_DATASET_PATH') -# dataset_options = '' - - # Grigori added for ABTF -# dataset_path = env.get('MLC_DATASET_PATH') -# env['DATA_DIR'] = dataset_path - -# dataset_options = " --dataset-list " + env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] -# dataset_options += " --cache_dir " + os.path.join(script_path, 'preprocessed-dataset') - - dataset_options = '' - - if env.get('MLC_MLPERF_EXTRA_DATASET_ARGS', '') != '': - dataset_options += " " + env['MLC_MLPERF_EXTRA_DATASET_ARGS'] - - if mode == "accuracy": - mode_extra_options += " --accuracy" - if env.get('MLC_MODEL', '') == "retinanet": - env['MLC_OUTPUT_PREDICTIONS_PATH'] = os.path.join( - env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'], - env['MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS'], - 'Cognata_Camera_01_8M_png', - 'output') - - elif mode == "performance": - pass - - elif mode == "compliance": - - audit_full_path = env['MLC_MLPERF_INFERENCE_AUDIT_PATH'] - mode_extra_options = " --audit '" + audit_full_path + "'" - - if env.get('MLC_MLPERF_OUTPUT_DIR', '') == '': - env['MLC_MLPERF_OUTPUT_DIR'] = os.getcwd() - - mlperf_implementation = env.get('MLC_MLPERF_IMPLEMENTATION', 'reference') - - # Generate CMD - - # Grigori updated for ABTF demo -# cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options, mode_extra_options, dataset_options, mlperf_implementation) - cmd, run_dir = get_run_cmd_reference( - os_info, env, scenario_extra_options, mode_extra_options, dataset_options, logger, script_path) - - if env.get('MLC_NETWORK_LOADGEN', '') == "lon": - - run_cmd = i['state']['mlperf_inference_run_cmd'] - env['MLC_SSH_RUN_COMMANDS'] = [] - env['MLC_SSH_RUN_COMMANDS'].append( - run_cmd.replace( - "--network=lon", - "--network=sut") + " &") - - env['MLC_MLPERF_RUN_CMD'] = cmd - env['MLC_RUN_DIR'] = run_dir - env['MLC_RUN_CMD'] = cmd - env['CK_PROGRAM_TMP_DIR'] = env.get('MLC_ML_MODEL_PATH') # for tvm - - if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == "arm64": - env['MLC_HOST_PLATFORM_FLAVOR'] = "aarch64" - - if env.get('MLC_MODEL', '') == "retinanet": - if not env.get('MLC_COGNATA_ACCURACY_DUMP_FILE'): - env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( - env['OUTPUT_DIR'], "accuracy.txt") - - return {'return': 0} - - -def get_run_cmd_reference(os_info, env, scenario_extra_options, - mode_extra_options, dataset_options, logger, script_path=None): - - q = '"' if os_info['platform'] == 'windows' else "'" - - ########################################################################## - # Grigori added for ABTF demo - - if env['MLC_MODEL'] in ['retinanet']: - - run_dir = os.path.join(script_path, 'ref') - - env['RUN_DIR'] = run_dir - - env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - - cognata_dataset_path = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] -# cognata_dataset_path = env['MLC_DATASET_PATH'] # Using open images -# dataset for some tests - - path_to_model = env.get( - 'MLC_MLPERF_CUSTOM_MODEL_PATH', - env.get( - 'MLC_ML_MODEL_FILE_WITH_PATH', - env.get('MLC_ML_MODEL_CODE_WITH_PATH'))) - env['MODEL_FILE'] = path_to_model - - cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " " + os.path.join(run_dir, "python", "main.py") + " --profile " + env['MLC_MODEL'] + "-" + env['MLC_MLPERF_BACKEND'] + \ - " --model=" + q + path_to_model + q + \ - " --dataset=" + env["MLC_MLPERF_VISION_DATASET_OPTION"] + \ - " --dataset-path=" + q + cognata_dataset_path + q + \ - " --cache_dir=" + q + os.path.join(script_path, 'tmp-preprocessed-dataset') + q + \ - " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \ - " --output " + q + env['OUTPUT_DIR'] + q + " " + \ - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ - scenario_extra_options + mode_extra_options + dataset_options - - elif env['MLC_MODEL'] in ['bevformer']: - run_dir = env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'] - - env['RUN_DIR'] = run_dir - - env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - - if env['MLC_MLPERF_BACKEND'] != "onnx": - logger.warning( - "Unsupported backend {MLC_MLPERF_BACKEND}, defaulting to onnx") - env['MLC_MLPERF_BACKEND'] = "onnx" - - config_path = os.path.join( - run_dir, - "projects", - "configs", - "bevformer", - "bevformer_tiny.py") - - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset nuscenes --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path}""" - - elif env['MLC_MODEL'] in ['ssd-resnet50']: - run_dir = env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'] - - env['RUN_DIR'] = run_dir - - env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - - config_path = "baseline_8MP_ss_scales_fm1_5x5_all" - - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path}""" - - elif env['MLC_MODEL'] in ['deeplab_v3+']: - run_dir = env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'] - - env['RUN_DIR'] = run_dir - - env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} --config {config_path}""" - - ########################################################################## - - return cmd, run_dir - - -def postprocess(i): - - env = i['env'] - - state = i['state'] - - inp = i['input'] - - return {'return': 0} +from mlc import utils +from utils import is_true +import os +import json +import shutil +import subprocess + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + logger = i['automation'].logger + + if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')): + return {'return': 0} + + if is_true(env.get('MLC_RUN_DOCKER_CONTAINER', '')): + return {'return': 0} + + if is_true(env.get('MLC_MLPERF_POWER', '')): + power = "yes" + else: + power = "no" + + rerun = True if env.get("MLC_RERUN", "") != '' else False + + if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env: + env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline" + + if 'MLC_MLPERF_LOADGEN_MODE' not in env: + env['MLC_MLPERF_LOADGEN_MODE'] = "accuracy" + + if 'MLC_MODEL' not in env: + return { + 'return': 1, 'error': "Please select a variation specifying the model to run"} + + # if env['MLC_MODEL'] == "resnet50": + # cmd = "cp " + os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['MLC_DATASET_PATH'], + # "val_map.txt") + # ret = os.system(cmd) + + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \ + env.get('MLC_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " + + if 'MLC_MLPERF_LOADGEN_QPS' not in env: + env['MLC_MLPERF_LOADGEN_QPS_OPT'] = "" + else: + env['MLC_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \ + env['MLC_MLPERF_LOADGEN_QPS'] + + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] + + if 'MLC_NUM_THREADS' not in env: + if 'MLC_MINIMIZE_THREADS' in env: + env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // + (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) + else: + env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') + + if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and not env.get( + 'MLC_MLPERF_MODEL_SKIP_BATCHING', False): + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \ + str(env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE']) + + if env.get('MLC_MLPERF_LOADGEN_BATCH_SIZE', '') != '': + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \ + str(env['MLC_MLPERF_LOADGEN_BATCH_SIZE']) + + if env.get('MLC_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get( + 'MLC_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and env.get('MLC_MLPERF_RUN_STYLE', '') != "valid": + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \ + env['MLC_MLPERF_LOADGEN_QUERY_COUNT'] + + print("Using MLCommons Inference source from '" + + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") + + if 'MLC_MLPERF_CONF' not in env: + env['MLC_MLPERF_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + + x = "" if os_info['platform'] == 'windows' else "'" + if "llama2-70b" in env['MLC_MODEL']: + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \ + x + env['MLC_MLPERF_CONF'] + x + else: + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \ + x + env['MLC_MLPERF_CONF'] + x + + env['MODEL_DIR'] = env.get('MLC_ML_MODEL_PATH') + if not env['MODEL_DIR']: + env['MODEL_DIR'] = os.path.dirname( + env.get( + 'MLC_MLPERF_CUSTOM_MODEL_PATH', + env.get('MLC_ML_MODEL_FILE_WITH_PATH'))) + + RUN_CMD = "" + + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] + scenario_extra_options = '' + + NUM_THREADS = env['MLC_NUM_THREADS'] + if int(NUM_THREADS) > 2 and env['MLC_MLPERF_DEVICE'] == "gpu": + NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU + + if env['MLC_MODEL'] in ['resnet50', 'retinanet', 'stable-diffusion-xl']: + scenario_extra_options += " --threads " + NUM_THREADS + + ml_model_name = env['MLC_MODEL'] + if 'MLC_MLPERF_USER_CONF' in env: + user_conf_path = env['MLC_MLPERF_USER_CONF'] + x = "" if os_info['platform'] == 'windows' else "'" + scenario_extra_options += " --user_conf " + x + user_conf_path + x + + mode = env['MLC_MLPERF_LOADGEN_MODE'] + mode_extra_options = "" + + # Grigori blocked for ABTF to preprocess data set on the fly for now + # we can later move it to a separate script to preprocess data set + +# if 'MLC_DATASET_PREPROCESSED_PATH' in env and env['MLC_MODEL'] in [ 'resnet50', 'retinanet' ]: +# #dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['MLC_DATASET_PREPROCESSED_PATH'] +# if env.get('MLC_MLPERF_LAST_RELEASE') not in [ "v2.0", "v2.1" ]: +# dataset_options = " --use_preprocessed_dataset --cache_dir "+env['MLC_DATASET_PREPROCESSED_PATH'] +# else: +# dataset_options = "" +# if env['MLC_MODEL'] == "retinanet": +# dataset_options += " --dataset-list "+ env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] +# elif env['MLC_MODEL'] == "resnet50": +# dataset_options += " --dataset-list "+ os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") +# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') +# else: +# if 'MLC_DATASET_PREPROCESSED_PATH' in env: +# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') +# else: +# env['DATA_DIR'] = env.get('MLC_DATASET_PATH') +# dataset_options = '' + + # Grigori added for ABTF +# dataset_path = env.get('MLC_DATASET_PATH') +# env['DATA_DIR'] = dataset_path + +# dataset_options = " --dataset-list " + env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] +# dataset_options += " --cache_dir " + os.path.join(script_path, 'preprocessed-dataset') + + dataset_options = '' + + if env.get('MLC_MLPERF_EXTRA_DATASET_ARGS', '') != '': + dataset_options += " " + env['MLC_MLPERF_EXTRA_DATASET_ARGS'] + + if mode == "accuracy": + mode_extra_options += " --accuracy" + if env.get('MLC_MODEL', '') == "retinanet": + env['MLC_OUTPUT_PREDICTIONS_PATH'] = os.path.join( + env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'], + env['MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS'], + 'Cognata_Camera_01_8M_png', + 'output') + + elif mode == "performance": + pass + + elif mode == "compliance": + + audit_full_path = env['MLC_MLPERF_INFERENCE_AUDIT_PATH'] + mode_extra_options = " --audit '" + audit_full_path + "'" + + if env.get('MLC_MLPERF_OUTPUT_DIR', '') == '': + env['MLC_MLPERF_OUTPUT_DIR'] = os.getcwd() + + mlperf_implementation = env.get('MLC_MLPERF_IMPLEMENTATION', 'reference') + + # Generate CMD + + # Grigori updated for ABTF demo +# cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options, mode_extra_options, dataset_options, mlperf_implementation) + cmd, run_dir = get_run_cmd_reference( + os_info, env, scenario_extra_options, mode_extra_options, dataset_options, logger, script_path) + + if env.get('MLC_NETWORK_LOADGEN', '') == "lon": + + run_cmd = i['state']['mlperf_inference_run_cmd'] + env['MLC_SSH_RUN_COMMANDS'] = [] + env['MLC_SSH_RUN_COMMANDS'].append( + run_cmd.replace( + "--network=lon", + "--network=sut") + " &") + + env['MLC_MLPERF_RUN_CMD'] = cmd + env['MLC_RUN_DIR'] = run_dir + env['MLC_RUN_CMD'] = cmd + env['CK_PROGRAM_TMP_DIR'] = env.get('MLC_ML_MODEL_PATH') # for tvm + + if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == "arm64": + env['MLC_HOST_PLATFORM_FLAVOR'] = "aarch64" + + if env.get('MLC_MODEL', '') == "retinanet": + if not env.get('MLC_COGNATA_ACCURACY_DUMP_FILE'): + env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( + env['OUTPUT_DIR'], "accuracy.txt") + + return {'return': 0} + + +def get_run_cmd_reference(os_info, env, scenario_extra_options, + mode_extra_options, dataset_options, logger, script_path=None): + + q = '"' if os_info['platform'] == 'windows' else "'" + + ########################################################################## + # Grigori added for ABTF demo + + if env['MLC_MODEL'] in ['retinanet']: + + run_dir = os.path.join(script_path, 'ref') + + env['RUN_DIR'] = run_dir + + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + + cognata_dataset_path = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] +# cognata_dataset_path = env['MLC_DATASET_PATH'] # Using open images +# dataset for some tests + + path_to_model = env.get( + 'MLC_MLPERF_CUSTOM_MODEL_PATH', + env.get( + 'MLC_ML_MODEL_FILE_WITH_PATH', + env.get('MLC_ML_MODEL_CODE_WITH_PATH'))) + env['MODEL_FILE'] = path_to_model + + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " " + os.path.join(run_dir, "python", "main.py") + " --profile " + env['MLC_MODEL'] + "-" + env['MLC_MLPERF_BACKEND'] + \ + " --model=" + q + path_to_model + q + \ + " --dataset=" + env["MLC_MLPERF_VISION_DATASET_OPTION"] + \ + " --dataset-path=" + q + cognata_dataset_path + q + \ + " --cache_dir=" + q + os.path.join(script_path, 'tmp-preprocessed-dataset') + q + \ + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \ + " --output " + q + env['OUTPUT_DIR'] + q + " " + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + dataset_options + + elif env['MLC_MODEL'] in ['bevformer']: + run_dir = env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'] + + env['RUN_DIR'] = run_dir + + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + + if env['MLC_MLPERF_BACKEND'] != "onnx": + logger.warning( + "Unsupported backend {MLC_MLPERF_BACKEND}, defaulting to onnx") + env['MLC_MLPERF_BACKEND'] = "onnx" + + config_path = os.path.join( + run_dir, + "projects", + "configs", + "bevformer", + "bevformer_tiny.py") + + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path}""" + + elif env['MLC_MODEL'] in ['ssd-resnet50']: + run_dir = env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'] + + env['RUN_DIR'] = run_dir + + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + + config_path = "baseline_8MP_ss_scales_fm1_5x5_all" + + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path}""" + + elif env['MLC_MODEL'] in ['deeplab_v3+']: + run_dir = env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'] + + env['RUN_DIR'] = run_dir + + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} --config {config_path}""" + + ########################################################################## + + return cmd, run_dir + + +def postprocess(i): + + env = i['env'] + + state = i['state'] + + inp = i['input'] + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py index 6d9e7799a..1ae178ce8 100644 --- a/script/get-preprocessed-dataset-nuscenes/customize.py +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -1,23 +1,24 @@ -from mlc import utils -import os -import shutil - - -def preprocess(i): - - env = i['env'] - - if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt": - env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" - - return {'return': 0} - - -def postprocess(i): - env = i['env'] - - env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] = os.path.join( - env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], - env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) - - return {'return': 0} +from mlc import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt": + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] = os.path.join( + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], + env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) + if env.get('MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH', '') != '': + shutil.copy(env['MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH'], os.path.join(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) + return {'return': 0} diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml index 707f21726..451acf80d 100644 --- a/script/get-preprocessed-dataset-nuscenes/meta.yaml +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -67,6 +67,26 @@ variations: - MLC_DOWNLOAD_URL env: MLC_DOWNLOAD_SRC: mlcommons + mlc,validation: + prehook_deps: + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + extra_cache_tags: nuscenes,dataset,scene_lengths + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH + MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME: scene_lengths.pkl + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/scene_lengths.pkl + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL rclone: group: download-tool add_deps_recursive: From 4c5feed5772e1b336353f34312c8cf705ee050a9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 22 May 2025 08:34:16 +0000 Subject: [PATCH 56/83] [Automated Commit] Format Codebase [skip ci] --- script/get-preprocessed-dataset-nuscenes/customize.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py index 1ae178ce8..7b6365353 100644 --- a/script/get-preprocessed-dataset-nuscenes/customize.py +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -20,5 +20,9 @@ def postprocess(i): env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) if env.get('MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH', '') != '': - shutil.copy(env['MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH'], os.path.join(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) + shutil.copy( + env['MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH'], + os.path.join( + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], + env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) return {'return': 0} From 48a39745a6328e88d6304bd11399c76b2297ec4e Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 15:04:10 +0530 Subject: [PATCH 57/83] fix path --- script/get-preprocessed-dataset-nuscenes/customize.py | 2 +- script/get-preprocessed-dataset-nuscenes/meta.yaml | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py index 1ae178ce8..ba3d1398d 100644 --- a/script/get-preprocessed-dataset-nuscenes/customize.py +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -20,5 +20,5 @@ def postprocess(i): env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) if env.get('MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH', '') != '': - shutil.copy(env['MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH'], os.path.join(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) + shutil.copy(os.path.join(env['MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH'],env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME']), os.path.join(os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/")), env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) return {'return': 0} diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml index 451acf80d..5599cf2c4 100644 --- a/script/get-preprocessed-dataset-nuscenes/meta.yaml +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -68,6 +68,8 @@ variations: env: MLC_DOWNLOAD_SRC: mlcommons mlc,validation: + env: + MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME: scene_lengths.pkl prehook_deps: - enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: @@ -75,14 +77,13 @@ variations: extra_cache_tags: nuscenes,dataset,scene_lengths force_cache: true names: - - dae + - dae_sl tags: download-and-extract force_env_keys: - MLC_OUTDIRNAME env: MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH - MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME: scene_lengths.pkl MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/scene_lengths.pkl update_tags_from_env_with_prefix: _url.: From 587f4532519212ba7ecc29b58acf01501ba3befa Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 22 May 2025 09:36:07 +0000 Subject: [PATCH 58/83] [Automated Commit] Format Codebase [skip ci] --- script/get-preprocessed-dataset-nuscenes/customize.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py index ba3d1398d..374aaba09 100644 --- a/script/get-preprocessed-dataset-nuscenes/customize.py +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -20,5 +20,12 @@ def postprocess(i): env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) if env.get('MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH', '') != '': - shutil.copy(os.path.join(env['MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH'],env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME']), os.path.join(os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/")), env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) + shutil.copy( + os.path.join( + env['MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH'], + env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME']), + os.path.join( + os.path.dirname( + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/")), + env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) return {'return': 0} From e581a6954b8f8bf0660e1bf1cd1fc11df458e6ed Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 16:40:34 +0530 Subject: [PATCH 59/83] Update run command generations --- script/app-mlperf-automotive-mlcommons-python/customize.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 88b65d5d8..285174fc4 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -261,7 +261,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, "bevformer", "bevformer_tiny.py") - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {env['MLC_MLPERF_BACKEND']} --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" elif env['MLC_MODEL'] in ['ssd-resnet50']: run_dir = env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'] @@ -272,7 +272,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, config_path = "baseline_8MP_ss_scales_fm1_5x5_all" - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" elif env['MLC_MODEL'] in ['deeplab_v3+']: run_dir = env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'] @@ -281,7 +281,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} --config {config_path}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" ########################################################################## From 5ee32b386e0e669f7ac40213efca5dcee81fb7b1 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 16:50:29 +0530 Subject: [PATCH 60/83] remove mlperf conf --- .../customize.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 285174fc4..39eb136e6 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -78,18 +78,6 @@ def preprocess(i): print("Using MLCommons Inference source from '" + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") - if 'MLC_MLPERF_CONF' not in env: - env['MLC_MLPERF_CONF'] = os.path.join( - env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") - - x = "" if os_info['platform'] == 'windows' else "'" - if "llama2-70b" in env['MLC_MODEL']: - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \ - x + env['MLC_MLPERF_CONF'] + x - else: - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \ - x + env['MLC_MLPERF_CONF'] + x - env['MODEL_DIR'] = env.get('MLC_ML_MODEL_PATH') if not env['MODEL_DIR']: env['MODEL_DIR'] = os.path.dirname( From 8cf9bc5d8e397b30c7ec5af0b65469a79f89f554 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 17:07:00 +0530 Subject: [PATCH 61/83] fix batch size --- .../app-mlperf-automotive-mlcommons-python/meta.yaml | 5 +++++ script/app-mlperf-automotive/meta.yaml | 12 ++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 4646a0fdd..16053cd06 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -611,3 +611,8 @@ variations: mvp_demo: env: + + batch_size.#: + group: batch-size + env: + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" \ No newline at end of file diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index d0d120089..ff2fbf674 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -303,7 +303,7 @@ variations: - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" - "${{ MLC_ML_MODEL_SSD_PATH }}:${{ MLC_ML_MODEL_SSD_PATH }}" add_deps_recursive: - mlperf-automotive-implementation: + abtf-inference-implementation: tags: _ssd-resnet50 posthook_deps: - enable_if_env: @@ -346,7 +346,7 @@ variations: - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" - "${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}:${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}" add_deps_recursive: - mlperf-automotive-implementation: + abtf-inference-implementation: tags: _deeplab-v3+ posthook_deps: - enable_if_env: @@ -447,3 +447,11 @@ variations: MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: Cognata_Camera_01_8M MLC_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS: 'yes' MLC_ABTF_ML_MODEL_SKIP_WARMUP: 'yes' + + batch_size.#: + group: batch_size + env: + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: '#' + add_deps_recursive: + abtf-inference-implementation: + tags: _batch_size.# \ No newline at end of file From 867a605967213979221390216a3d77e7fe41a597 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 17:23:05 +0530 Subject: [PATCH 62/83] add posthook dependencies --- script/app-mlperf-automotive/meta.yaml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index ff2fbf674..70fef3d4b 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -94,6 +94,28 @@ deps: - tags: get,mlperf,automotive,utils +posthook_deps: + - tags: get,mlperf,sut,description #populate system meta information like framework + - tags: get,platform,details + enable_if_env: + MLC_GET_PLATFORM_DETAILS: + - yes + skip_if_env: + MLC_MLPERF_LOADGEN_MODE: + - accuracy + env: + MLC_PLATFORM_DETAILS_FILE_PATH: '<<>>/system_info.txt' + +post_deps: + - tags: draw,graph,from-json + enable_if_env: + MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE: + - on + env: + MLC_JSON_INPUT_FILE: <<>> + MLC_OUTPUT_IMAGE_PATH: <<>> + MLC_OUTPUT_MERMAID_PATH: <<>> + docker: mlc_repo: anandhu-eng@mlperf-automations From dfd5b8843d362e99647ca477a127530b37c4a55c Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 17:34:41 +0530 Subject: [PATCH 63/83] corrected state keys --- script/app-mlperf-automotive/customize.py | 12 +++---- script/run-mlperf-automotive-app/customize.py | 36 +++++++++---------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py index 872348b58..b7d8598c5 100644 --- a/script/app-mlperf-automotive/customize.py +++ b/script/app-mlperf-automotive/customize.py @@ -22,10 +22,10 @@ def preprocess(i): state['mlperf_inference_run_cmd'] = "mlcr " + \ " ".join(i['input']['cmd']) - state['mlperf-inference-implementation'] = {} + state['abtf-inference-implementation'] = {} run_state = i['run_script_input']['run_state'] - state['mlperf-inference-implementation']['script_id'] = run_state['script_id'] + \ + state['abtf-inference-implementation']['script_id'] = run_state['script_id'] + \ ":" + ",".join(run_state['script_variation_tags']) return {'return': 0} @@ -337,11 +337,11 @@ def postprocess(i): count = count + 1 if state.get( - 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('print_deps'): + 'abtfabtf-inference-implementation') and state['abtfabtf-inference-implementation'].get('print_deps'): extra_readme_body += "\n## Dependent automation scripts for the MLPerf Automotive Implementation\n" - print_deps = state['mlperf-inference-implementation']['print_deps'] + print_deps = state['abtfabtf-inference-implementation']['print_deps'] count = 1 for dep in print_deps: extra_readme_body += "\n\n" + \ @@ -358,7 +358,7 @@ def postprocess(i): fp.write(extra_readme) if state.get( - 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'): + 'abtf-inference-implementation') and state['abtf-inference-implementation'].get('version_info'): env['MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join( output_dir, "mlc-version-info.json") env['MLC_MLPERF_RUN_DEPS_GRAPH'] = os.path.join( @@ -368,7 +368,7 @@ def postprocess(i): with open(os.path.join(output_dir, "mlc-version-info.json"), "w") as f: f.write( json.dumps( - state['mlperf-inference-implementation']['version_info'], + state['abtf-inference-implementation']['version_info'], indent=2)) if env.get('MLC_DUMP_SYSTEM_INFO', True): diff --git a/script/run-mlperf-automotive-app/customize.py b/script/run-mlperf-automotive-app/customize.py index fd84952e2..10983fc24 100644 --- a/script/run-mlperf-automotive-app/customize.py +++ b/script/run-mlperf-automotive-app/customize.py @@ -152,37 +152,37 @@ def preprocess(i): add_deps_recursive[key] = adr_from_meta[key] if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '': - if not add_deps_recursive.get('mlperf-inference-implementation', {}): - add_deps_recursive['mlperf-inference-implementation'] = {} - if add_deps_recursive['mlperf-inference-implementation'].get( + if not add_deps_recursive.get('abtf-inference-implementation', {}): + add_deps_recursive['abtf-inference-implementation'] = {} + if add_deps_recursive['abtf-inference-implementation'].get( 'tags', '') == '': - add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + add_deps_recursive['abtf-inference-implementation']['tags'] = '' else: - add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' - add_deps_recursive['mlperf-inference-implementation']['tags'] += "_batch_size." + \ + add_deps_recursive['abtf-inference-implementation']['tags'] += ',' + add_deps_recursive['abtf-inference-implementation']['tags'] += "_batch_size." + \ env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE'] if env.get('MLC_MLPERF_INFERENCE_SUT_VARIATION', '') != '': - if not add_deps_recursive.get('mlperf-inference-implementation', {}): - add_deps_recursive['mlperf-inference-implementation'] = {} - if add_deps_recursive['mlperf-inference-implementation'].get( + if not add_deps_recursive.get('abtf-inference-implementation', {}): + add_deps_recursive['abtf-inference-implementation'] = {} + if add_deps_recursive['abtf-inference-implementation'].get( 'tags', '') == '': - add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + add_deps_recursive['abtf-inference-implementation']['tags'] = '' else: - add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' - add_deps_recursive['mlperf-inference-implementation']['tags'] += "_" + \ + add_deps_recursive['abtf-inference-implementation']['tags'] += ',' + add_deps_recursive['abtf-inference-implementation']['tags'] += "_" + \ env['MLC_MLPERF_INFERENCE_SUT_VARIATION'] if env.get('MLC_NETWORK_LOADGEN', '') != '': - if not add_deps_recursive.get('mlperf-inference-implementation', {}): - add_deps_recursive['mlperf-inference-implementation'] = {} + if not add_deps_recursive.get('abtf-inference-implementation', {}): + add_deps_recursive['abtf-inference-implementation'] = {} network_variation_tag = f"_network-{env['MLC_NETWORK_LOADGEN']}" - if add_deps_recursive['mlperf-inference-implementation'].get( + if add_deps_recursive['abtf-inference-implementation'].get( 'tags', '') == '': - add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + add_deps_recursive['abtf-inference-implementation']['tags'] = '' else: - add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' - add_deps_recursive['mlperf-inference-implementation']['tags'] += network_variation_tag + add_deps_recursive['abtf-inference-implementation']['tags'] += ',' + add_deps_recursive['abtf-inference-implementation']['tags'] += network_variation_tag if env.get('MLC_OUTPUT_FOLDER_NAME', '') == '': env['MLC_OUTPUT_FOLDER_NAME'] = env['MLC_MLPERF_RUN_STYLE'] + "_results" From 6f65c9715bf9f8f3c4c8d60509ddd92032401267 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 18:35:58 +0530 Subject: [PATCH 64/83] changes for command generation --- .../customize.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 39eb136e6..2dce96b2f 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -237,7 +237,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - if env['MLC_MLPERF_BACKEND'] != "onnx": + if env['MLC_MLPERF_BACKEND'] != "onnxruntime": logger.warning( "Unsupported backend {MLC_MLPERF_BACKEND}, defaulting to onnx") env['MLC_MLPERF_BACKEND'] = "onnx" @@ -249,8 +249,8 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, "bevformer", "bevformer_tiny.py") - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {env['MLC_MLPERF_BACKEND']} --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" - + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + print(cmd) elif env['MLC_MODEL'] in ['ssd-resnet50']: run_dir = env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'] @@ -258,9 +258,11 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + backend = "onnx" if env.get('MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') + config_path = "baseline_8MP_ss_scales_fm1_5x5_all" - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" elif env['MLC_MODEL'] in ['deeplab_v3+']: run_dir = env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'] @@ -269,7 +271,9 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {env['MLC_MLPERF_BACKEND']} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + backend = "onnx" if env.get('MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') + + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" ########################################################################## From 5b3945e805fd93a9a5645fd28b87501b4412a4b6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 22 May 2025 13:06:24 +0000 Subject: [PATCH 65/83] [Automated Commit] Format Codebase [skip ci] --- script/app-mlperf-automotive-mlcommons-python/customize.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 2dce96b2f..6a91680e5 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -258,7 +258,8 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - backend = "onnx" if env.get('MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') + backend = "onnx" if env.get( + 'MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') config_path = "baseline_8MP_ss_scales_fm1_5x5_all" @@ -271,7 +272,8 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - backend = "onnx" if env.get('MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') + backend = "onnx" if env.get( + 'MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" From 4d89cbffe4924ddd30fd08e0abd5e1dbc9ce8f4c Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 18:38:43 +0530 Subject: [PATCH 66/83] register model and dataset to cache in docker --- script/app-mlperf-automotive-mlcommons-python/meta.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 16053cd06..61b3c73cd 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -512,8 +512,6 @@ variations: - tags: get,generic-python-lib,_package.tqdm - tags: get,preprocessed,dataset,cognata,_mlc skip_if_any_env: - MLC_PREPROCESSED_DATASET_COGNATA_PATH: - - yes MLC_USE_DATASET_FROM_HOST: - "yes" MLC_RUN_STATE_DOCKER: @@ -522,8 +520,6 @@ variations: - preprocessed-dataset-mlcommons-cognata-ssd-resnet50 - tags: get,ml-model,ssd,resnet50,_mlc,_rclone skip_if_any_env: - MLC_ML_MODEL_SSD_PATH: - - yes MLC_USE_MODEL_FROM_HOST: - "yes" MLC_RUN_STATE_DOCKER: @@ -556,8 +552,6 @@ variations: - tags: get,generic-python-lib,_package.ijson - tags: get,preprocessed,dataset,cognata,_mlc,_segmentation skip_if_any_env: - MLC_PREPROCESSED_DATASET_COGNATA_PATH: - - yes MLC_USE_DATASET_FROM_HOST: - "yes" MLC_RUN_STATE_DOCKER: @@ -566,8 +560,6 @@ variations: - preprocessed-dataset-mlcommons-cognata-deeplabv3-plus - tags: get,ml-model,deeplabv3-plus skip_if_any_env: - MLC_ML_MODEL_DEEPLABV3_PLUS_PATH: - - yes MLC_USE_MODEL_FROM_HOST: - "yes" MLC_RUN_STATE_DOCKER: From f27b3bfc99f5e8e31a95ee1130ae4d04dfc53a87 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 19:44:09 +0530 Subject: [PATCH 67/83] us is_true --- .../customize.py | 2 +- .../meta.yaml | 24 ++-------- script/app-mlperf-automotive/meta.yaml | 18 ------- .../customize.py | 7 +-- script/get-ml-model-bevformer/customize.py | 9 ++-- .../get-ml-model-deeplabv3_plus/customize.py | 9 ++-- .../customize.py | 47 ++++++++++--------- .../customize.py | 29 +++++++----- 8 files changed, 62 insertions(+), 83 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 6a91680e5..c1ac7368c 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -248,7 +248,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, "configs", "bevformer", "bevformer_tiny.py") - + print(env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']) cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" print(cmd) elif env['MLC_MODEL'] in ['ssd-resnet50']: diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 61b3c73cd..5cb8492c7 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -468,21 +468,13 @@ variations: - tags: get,generic-python-lib,_package.tqdm - tags: get,generic-python-lib,_package.nuscenes-devkit - tags: get,preprocessed,dataset,nuscenes - skip_if_any_env: - MLC_PREPROCESSED_DATASET_NUSCENES_PATH: - - yes - MLC_USE_DATASET_FROM_HOST: - - "yes" + skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" names: - preprocessed-dataset-mlcommons-nuscenes - tags: get,ml-model,bevformer - skip_if_any_env: - MLC_ML_MODEL_BEVFORMER_PATH: - - yes - MLC_USE_MODEL_FROM_HOST: - - "yes" + skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" names: @@ -511,9 +503,7 @@ variations: - tags: get,generic-python-lib,_package.onnxruntime - tags: get,generic-python-lib,_package.tqdm - tags: get,preprocessed,dataset,cognata,_mlc - skip_if_any_env: - MLC_USE_DATASET_FROM_HOST: - - "yes" + skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" names: @@ -551,17 +541,13 @@ variations: - tags: get,generic-python-lib,_package.tqdm - tags: get,generic-python-lib,_package.ijson - tags: get,preprocessed,dataset,cognata,_mlc,_segmentation - skip_if_any_env: - MLC_USE_DATASET_FROM_HOST: - - "yes" + skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" names: - preprocessed-dataset-mlcommons-cognata-deeplabv3-plus - tags: get,ml-model,deeplabv3-plus - skip_if_any_env: - MLC_USE_MODEL_FROM_HOST: - - "yes" + skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" names: diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 70fef3d4b..d1c76aa9e 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -263,16 +263,10 @@ variations: docker: deps: - tags: get,preprocessed,dataset,nuscenes - skip_if_env: - MLC_PREPROCESSED_DATASET_NUSCENES_PATH: - - yes enable_if_env: MLC_USE_DATASET_FROM_HOST: - "yes" - tags: get,ml-model,bevformer - skip_if_env: - MLC_ML_MODEL_BEVFORMER_PATH: - - yes enable_if_env: MLC_USE_MODEL_FROM_HOST: - "yes" @@ -306,16 +300,10 @@ variations: docker: deps: - tags: get,preprocessed,dataset,cognata - skip_if_env: - MLC_PREPROCESSED_DATASET_COGNATA_PATH: - - yes enable_if_env: MLC_USE_DATASET_FROM_HOST: - "yes" - tags: get,ml-model,ssd,resnet50,_mlc,_rclone - skip_if_env: - MLC_ML_MODEL_SSD_PATH: - - yes enable_if_env: MLC_USE_MODEL_FROM_HOST: - "yes" @@ -349,16 +337,10 @@ variations: docker: deps: - tags: get,preprocessed,dataset,cognata,_segmentation - skip_if_env: - MLC_PREPROCESSED_DATASET_COGNATA_PATH: - - yes enable_if_env: MLC_USE_DATASET_FROM_HOST: - "yes" - tags: get,ml-model,ssd,resnet50,_mlc,_rclone - skip_if_env: - MLC_ML_MODEL_DEEPLABV3_PLUS_PATH: - - yes enable_if_env: MLC_USE_MODEL_FROM_HOST: - "yes" diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index 08adffd66..e63e92fff 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -27,7 +27,7 @@ def preprocess(i): env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model # handles download from mlcommons gdrive - elif env.get('MLC_DOWNLOAD_SRC', '') == "mlcommons": + elif env.get('MLC_DOWNLOAD_SRC', '') == "mlcommons" and env.get('MLC_ML_MODEL_SSD_PATH', '') == '': env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} @@ -41,8 +41,9 @@ def postprocess(i): if env.get('MLC_ML_MODEL_SSD_PATH', '') == '': env['MLC_ML_MODEL_FILE_WITH_PATH'] = 'model-weights-skipped' else: - env['MLC_ML_MODEL_SSD_PATH'] = os.path.join( - env['MLC_ML_MODEL_SSD_PATH'], env['MLC_ML_MODEL_FILENAME']) + if is_true(env.get('MLC_TMP_REQUIRE_DOWNLOAD', '')): + env['MLC_ML_MODEL_SSD_PATH'] = os.path.join( + env['MLC_ML_MODEL_SSD_PATH'], env['MLC_ML_MODEL_FILENAME']) env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_SSD_PATH'] env['MLC_ML_MODEL_FILE'] = os.path.basename( diff --git a/script/get-ml-model-bevformer/customize.py b/script/get-ml-model-bevformer/customize.py index 915a0dde7..9afc37740 100644 --- a/script/get-ml-model-bevformer/customize.py +++ b/script/get-ml-model-bevformer/customize.py @@ -1,4 +1,5 @@ from mlc import utils +from utils import is_true import os @@ -11,7 +12,8 @@ def preprocess(i): if os_info['platform'] == "windows": return {'return': 1, 'error': 'Script not supported in windows yet!'} - env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + if env.get('MLC_ML_MODEL_BEVFORMER_PATH', '') == '': + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" return {'return': 0} @@ -20,8 +22,9 @@ def postprocess(i): env = i['env'] - env['MLC_ML_MODEL_BEVFORMER_PATH'] = os.path.join( - env['MLC_ML_MODEL_BEVFORMER_PATH'], env['MLC_ML_MODEL_FILENAME']) + if is_true(env.get('MLC_TMP_REQUIRE_DOWNLOAD', '')): + env['MLC_ML_MODEL_BEVFORMER_PATH'] = os.path.join( + env['MLC_ML_MODEL_BEVFORMER_PATH'], env['MLC_ML_MODEL_FILENAME']) env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_BEVFORMER_PATH'] diff --git a/script/get-ml-model-deeplabv3_plus/customize.py b/script/get-ml-model-deeplabv3_plus/customize.py index c249abfd0..2fff179b1 100644 --- a/script/get-ml-model-deeplabv3_plus/customize.py +++ b/script/get-ml-model-deeplabv3_plus/customize.py @@ -1,5 +1,6 @@ from mlc import utils import os +from utils import is_true def preprocess(i): @@ -11,7 +12,8 @@ def preprocess(i): if os_info['platform'] == "windows": return {'return': 1, 'error': 'Script not supported in windows yet!'} - env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + if env.get('MLC_ML_MODEL_DEEPLABV3_PLUS_PATH', '') == '': + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" return {'return': 0} @@ -20,8 +22,9 @@ def postprocess(i): env = i['env'] - env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = os.path.join( - env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'], env['MLC_ML_MODEL_FILENAME']) + if is_true(env.get('MLC_TMP_REQUIRE_DOWNLOAD', '')): + env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = os.path.join( + env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'], env['MLC_ML_MODEL_FILENAME']) env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] return {'return': 0} diff --git a/script/get-preprocessed-dataset-cognata/customize.py b/script/get-preprocessed-dataset-cognata/customize.py index 125104d5a..1e4f0beba 100644 --- a/script/get-preprocessed-dataset-cognata/customize.py +++ b/script/get-preprocessed-dataset-cognata/customize.py @@ -1,23 +1,24 @@ -from mlc import utils -import os -import shutil - - -def preprocess(i): - - env = i['env'] - - if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt": - env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" - - return {'return': 0} - - -def postprocess(i): - env = i['env'] - - env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] = os.path.join( - env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'], - env['MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME']) - - return {'return': 0} +from mlc import utils +import os +import shutil +from utils import is_true + + +def preprocess(i): + + env = i['env'] + + if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt": + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + if is_true(env.get('MLC_TMP_REQUIRE_DOWNLOAD', '')): + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] = os.path.join( + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'], + env['MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME']) + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py index 374aaba09..a90d428e2 100644 --- a/script/get-preprocessed-dataset-nuscenes/customize.py +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -1,13 +1,14 @@ from mlc import utils import os import shutil +from utils import is_true def preprocess(i): env = i['env'] - if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt": + if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt" and env.get('MLC_PREPROCESSED_DATASET_NUSCENES_PATH', '') == '': env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" return {'return': 0} @@ -16,16 +17,18 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] = os.path.join( - env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], - env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) - if env.get('MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH', '') != '': - shutil.copy( - os.path.join( - env['MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH'], - env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME']), - os.path.join( - os.path.dirname( - env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/")), - env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) + if is_true(env.get('MLC_TMP_REQUIRE_DOWNLOAD', '')): + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] = os.path.join( + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], + env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) + if env.get('MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH', '') != '': + shutil.copy( + os.path.join( + env['MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH'], + env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME']), + os.path.join( + os.path.dirname( + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/")), + env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) + return {'return': 0} From a5a8ca0de592e513500837b043e934ec5849b7c6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 22 May 2025 14:14:31 +0000 Subject: [PATCH 68/83] [Automated Commit] Format Codebase [skip ci] --- script/get-preprocessed-dataset-nuscenes/customize.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py index a90d428e2..5236b5c78 100644 --- a/script/get-preprocessed-dataset-nuscenes/customize.py +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -8,7 +8,8 @@ def preprocess(i): env = i['env'] - if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt" and env.get('MLC_PREPROCESSED_DATASET_NUSCENES_PATH', '') == '': + if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt" and env.get( + 'MLC_PREPROCESSED_DATASET_NUSCENES_PATH', '') == '': env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" return {'return': 0} @@ -21,7 +22,8 @@ def postprocess(i): env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] = os.path.join( env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) - if env.get('MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH', '') != '': + if env.get( + 'MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH', '') != '': shutil.copy( os.path.join( env['MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH'], @@ -30,5 +32,5 @@ def postprocess(i): os.path.dirname( env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/")), env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) - + return {'return': 0} From d085b32a2ca14eba1fe17cb43d9937d16e14c03b Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 22 May 2025 21:05:02 +0530 Subject: [PATCH 69/83] fix docker user issue --- script/app-mlperf-automotive-mlcommons-python/meta.yaml | 3 ++- script/app-mlperf-automotive/meta.yaml | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 5cb8492c7..4166dbea7 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -582,7 +582,8 @@ variations: singlestream: env: MLC_MLPERF_LOADGEN_SCENARIO: SingleStream - MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 + default_variations: + batch-size: batch_size.1 server: env: MLC_MLPERF_LOADGEN_SCENARIO: Server diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index d1c76aa9e..4722d7254 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -123,6 +123,7 @@ docker: use_host_group_id: True use_host_user_id: True real_run: false + user: mlcuser interactive: True mlc_repos_off: 'mlc pull repo mlcommons@cm4abtf --branch=poc' deps: From fbd8c8a902550054d58094a9233e41e69ac5d101 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Fri, 23 May 2025 01:40:07 +0530 Subject: [PATCH 70/83] add dependencies for accuracy checker --- .../meta.yaml | 2 - script/process-mlperf-accuracy/meta.yaml | 39 +++++++++++++++++++ 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 4166dbea7..212a45bc0 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -510,8 +510,6 @@ variations: - preprocessed-dataset-mlcommons-cognata-ssd-resnet50 - tags: get,ml-model,ssd,resnet50,_mlc,_rclone skip_if_any_env: - MLC_USE_MODEL_FROM_HOST: - - "yes" MLC_RUN_STATE_DOCKER: - "yes" names: diff --git a/script/process-mlperf-accuracy/meta.yaml b/script/process-mlperf-accuracy/meta.yaml index 76324ead4..458f740fa 100644 --- a/script/process-mlperf-accuracy/meta.yaml +++ b/script/process-mlperf-accuracy/meta.yaml @@ -274,14 +274,53 @@ variations: MLC_DATASET: waymo group: dataset nuscenes: + deps: + - tags: get,preprocessed,dataset,nuscenes + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - preprocessed-dataset-mlcommons-nuscenes + - tags: get,ml-model,bevformer + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - ml-model-bevformer env: MLC_DATASET: nuscenes group: dataset cognata_ssd: + deps: + - tags: get,preprocessed,dataset,cognata,_mlc + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - preprocessed-dataset-mlcommons-cognata-ssd-resnet50 + - tags: get,ml-model,ssd,resnet50,_mlc,_rclone + skip_if_any_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - ml-model-ssd-resnet50 env: MLC_DATASET: cognata_ssd group: dataset cognata_deeplab: + deps: + - tags: get,preprocessed,dataset,cognata,_mlc,_segmentation + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - preprocessed-dataset-mlcommons-cognata-deeplabv3-plus + - tags: get,ml-model,deeplabv3-plus + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - ml-model-deeplabv3-plus env: MLC_DATASET: cognata_deeplab group: dataset From e44ef4a74b3ed077142c98dec17c24617df406fd Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 23 May 2025 14:25:56 +0000 Subject: [PATCH 71/83] [Automated Commit] Format Codebase [skip ci] --- script/app-mlperf-automotive-mlcommons-python/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 2d5534a1a..c1ac7368c 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -290,4 +290,4 @@ def postprocess(i): inp = i['input'] - return {'return': 0} \ No newline at end of file + return {'return': 0} From cb9659dc72ba854fb9965bb9ea5e7e503a1596e1 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sat, 24 May 2025 15:48:30 +0530 Subject: [PATCH 72/83] added accuracy checker min files download --- script/app-mlperf-automotive/meta.yaml | 1 + .../customize.py | 13 +++++++++++++ .../meta.yaml | 19 +++++++++++++++++++ .../get-preprocessed-dataset-nuscenes/run.sh | 4 ++++ 4 files changed, 37 insertions(+) diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 4722d7254..101a7b851 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -276,6 +276,7 @@ variations: mounts: - "${{ MLC_PREPROCESSED_DATASET_NUSCENES_PATH }}:${{ MLC_PREPROCESSED_DATASET_NUSCENES_PATH }}" - "${{ MLC_ML_MODEL_BEVFORMER_PATH }}:${{ MLC_ML_MODEL_BEVFORMER_PATH }}" + - "${{ MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH }}:${{ MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH }}" add_deps_recursive: abtf-inference-implementation: tags: _bevformer diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py index 5236b5c78..d76e792bb 100644 --- a/script/get-preprocessed-dataset-nuscenes/customize.py +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -22,6 +22,9 @@ def postprocess(i): env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] = os.path.join( env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) + env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH'] = os.path.join( + env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH'], + env['MLC_DATASET_NUSCENES_ACC_REQ_FILE_EXTRACTED_FOLDER_NAME']) if env.get( 'MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH', '') != '': shutil.copy( @@ -32,5 +35,15 @@ def postprocess(i): os.path.dirname( env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/")), env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) + if env.get( + 'MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH', '') != '': + shutil.copy( + os.path.join( + env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH'], + env['MLC_DATASET_NUSCENES_ACC_CHECKER_DEP_FILES_TAR_NAME']), + os.path.join( + os.path.dirname( + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/")), + env['MLC_DATASET_NUSCENES_ACC_CHECKER_DEP_FILES_TAR_NAME'])) return {'return': 0} diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml index 5599cf2c4..e2f5433e4 100644 --- a/script/get-preprocessed-dataset-nuscenes/meta.yaml +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -70,6 +70,8 @@ variations: mlc,validation: env: MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME: scene_lengths.pkl + MLC_DATASET_NUSCENES_ACC_CHECKER_DEP_FILES_TAR_NAME: nuscenes_min.tar.gz + MLC_DATASET_NUSCENES_ACC_REQ_FILE_EXTRACTED_FOLDER_NAME: nuscenes prehook_deps: - enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: @@ -88,6 +90,23 @@ variations: update_tags_from_env_with_prefix: _url.: - MLC_DOWNLOAD_URL + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + extra_cache_tags: nuscenes,dataset,accuracy_checker + force_cache: true + names: + - dae_ac + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/nuscenes_min.tar.gz + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL rclone: group: download-tool add_deps_recursive: diff --git a/script/get-preprocessed-dataset-nuscenes/run.sh b/script/get-preprocessed-dataset-nuscenes/run.sh index a6048a240..16337c44d 100644 --- a/script/get-preprocessed-dataset-nuscenes/run.sh +++ b/script/get-preprocessed-dataset-nuscenes/run.sh @@ -5,5 +5,9 @@ if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" = "yes" ]]; t for f in *.tar.gz; do tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } done + cd "${MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH}" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done cd - || exit fi \ No newline at end of file From c5b8f79cb5cc172ef8f8dfe4cd0b9c9d629d31d0 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sat, 24 May 2025 16:44:32 +0530 Subject: [PATCH 73/83] fixes for nuscenes accuracy checker --- .../customize.py | 10 - .../meta.yaml | 256 ++++---- script/process-mlperf-accuracy/customize.py | 566 +++++++++--------- 3 files changed, 413 insertions(+), 419 deletions(-) diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py index d76e792bb..bb6e2b914 100644 --- a/script/get-preprocessed-dataset-nuscenes/customize.py +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -35,15 +35,5 @@ def postprocess(i): os.path.dirname( env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/")), env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) - if env.get( - 'MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH', '') != '': - shutil.copy( - os.path.join( - env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH'], - env['MLC_DATASET_NUSCENES_ACC_CHECKER_DEP_FILES_TAR_NAME']), - os.path.join( - os.path.dirname( - env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/")), - env['MLC_DATASET_NUSCENES_ACC_CHECKER_DEP_FILES_TAR_NAME'])) return {'return': 0} diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml index e2f5433e4..05245e475 100644 --- a/script/get-preprocessed-dataset-nuscenes/meta.yaml +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -1,127 +1,131 @@ -alias: get-preprocessed-dataset-nuscenes -automation_alias: script -automation_uid: 5b4e0237da074764 -cache: true -category: AI/ML datasets -default_env: - MLC_DATASET: nuscenes -new_env_keys: -- MLC_PREPROCESSED_DATASET_* -tags: -- get -- dataset -- nuscenes -- preprocessed -uid: 0e403a2861984a4e -print_env_at_the_end: - MLC_PREPROCESSED_DATASET_NUSCENES_PATH: Preprocessed Nuscenes dataset path -variations: - validation: - default: true - group: dataset-type - env: - MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME: val_3d - MLC_DATASET_NUSCENES_TAR_FILENAME: val_3d.tar.gz - MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> - calibration: - group: dataset-type - env: - MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME: calib_3d - MLC_DATASET_NUSCENES_TAR_FILENAME: calib_3d.tar.gz - MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> - prebuilt: - default: true - group: dataset-src - env: - MLC_NUSCENES_DATASET_TYPE: prebuilt - MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_PATH - MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_PATH - MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' - mlc: - group: download-src - default: true - prehook_deps: - - tags: get,rclone - enable_if_env: - MLC_TMP_REQUIRE_DOWNLOAD: - - yes - - tags: get,rclone-config,_config-name.mlc-nuscenes - force_cache: true - enable_if_env: - MLC_TMP_REQUIRE_DOWNLOAD: - - yes - env: - MLC_RCLONE_DRIVE_FOLDER_ID: 17CpM5eU8tjrxh_LpH_BTNTeT37PhzcnC - - enable_if_env: - MLC_TMP_REQUIRE_DOWNLOAD: - - 'yes' - extra_cache_tags: nuscenes,dataset - force_cache: true - names: - - dae - tags: download-and-extract - force_env_keys: - - MLC_OUTDIRNAME - update_tags_from_env_with_prefix: - _url.: - - MLC_DOWNLOAD_URL - env: - MLC_DOWNLOAD_SRC: mlcommons - mlc,validation: - env: - MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME: scene_lengths.pkl - MLC_DATASET_NUSCENES_ACC_CHECKER_DEP_FILES_TAR_NAME: nuscenes_min.tar.gz - MLC_DATASET_NUSCENES_ACC_REQ_FILE_EXTRACTED_FOLDER_NAME: nuscenes - prehook_deps: - - enable_if_env: - MLC_TMP_REQUIRE_DOWNLOAD: - - 'yes' - extra_cache_tags: nuscenes,dataset,scene_lengths - force_cache: true - names: - - dae_sl - tags: download-and-extract - force_env_keys: - - MLC_OUTDIRNAME - env: - MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH - MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH - MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/scene_lengths.pkl - update_tags_from_env_with_prefix: - _url.: - - MLC_DOWNLOAD_URL - - enable_if_env: - MLC_TMP_REQUIRE_DOWNLOAD: - - 'yes' - extra_cache_tags: nuscenes,dataset,accuracy_checker - force_cache: true - names: - - dae_ac - tags: download-and-extract - force_env_keys: - - MLC_OUTDIRNAME - env: - MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH - MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH - MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/nuscenes_min.tar.gz - update_tags_from_env_with_prefix: - _url.: - - MLC_DOWNLOAD_URL - rclone: - group: download-tool - add_deps_recursive: - dae: - tags: _rclone - default: true - dry-run: - group: run-mode - env: - MLC_DOWNLOAD_MODE: dry - dry-run,rclone: - env: - MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run -tests: - run_inputs: - - variations_list: - - validation,prebuilt,rclone,mlc,dry-run +alias: get-preprocessed-dataset-nuscenes +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + MLC_DATASET: nuscenes +new_env_keys: +- MLC_PREPROCESSED_DATASET_* +tags: +- get +- dataset +- nuscenes +- preprocessed +uid: 0e403a2861984a4e +print_env_at_the_end: + MLC_PREPROCESSED_DATASET_NUSCENES_PATH: Preprocessed Nuscenes dataset path +variations: + validation: + default: true + group: dataset-type + env: + MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME: val_3d + MLC_DATASET_NUSCENES_TAR_FILENAME: val_3d.tar.gz + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> + calibration: + group: dataset-type + env: + MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME: calib_3d + MLC_DATASET_NUSCENES_TAR_FILENAME: calib_3d.tar.gz + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> + prebuilt: + default: true + group: dataset-src + env: + MLC_NUSCENES_DATASET_TYPE: prebuilt + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_PATH + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.mlc-nuscenes + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 17CpM5eU8tjrxh_LpH_BTNTeT37PhzcnC + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + extra_cache_tags: nuscenes,dataset + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + mlc,validation: + env: + MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME: scene_lengths.pkl + MLC_DATASET_NUSCENES_ACC_CHECKER_DEP_FILES_TAR_NAME: nuscenes_min.tar.gz + MLC_DATASET_NUSCENES_ACC_REQ_FILE_EXTRACTED_FOLDER_NAME: nuscenes + prehook_deps: + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + extra_cache_tags: nuscenes,dataset,scene_lengths + force_cache: true + names: + - dae_sl + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/scene_lengths.pkl + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + extra_cache_tags: nuscenes,dataset,accuracy_checker + force_cache: true + names: + - dae_ac + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/nuscenes_min.tar.gz + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + dae_sl: + tags: _rclone + dae_ac: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run +tests: + run_inputs: + - variations_list: + - validation,prebuilt,rclone,mlc,dry-run - calibration,prebuilt,rclone,mlc,dry-run \ No newline at end of file diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index f0cccc34c..adc518147 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -1,283 +1,283 @@ -from mlc import utils -import os - - -def preprocess(i): - - os_info = i['os_info'] - - xsep = ';' if os_info['platform'] == 'windows' else ':' - - env = i['env'] - logger = i['automation'].logger - - results_dir = env.get("MLC_MLPERF_ACCURACY_RESULTS_DIR", "") - - if results_dir == "": - logger.error("Please set MLC_MLPERF_ACCURACY_RESULTS_DIR") - return {'return': -1} - - # In fact, we expect only 1 command line here - run_cmds = [] - - if env.get('MLC_MAX_EXAMPLES', '') != '' and env.get( - 'MLC_MLPERF_RUN_STYLE', '') != 'valid': - max_examples_string = " --max_examples " + env['MLC_MAX_EXAMPLES'] - else: - max_examples_string = "" - - results_dir_split = results_dir.split(xsep) - dataset = env['MLC_DATASET'] - regenerate_accuracy_file = env.get( - 'MLC_MLPERF_REGENERATE_ACCURACY_FILE', env.get( - 'MLC_RERUN', False)) - - for result_dir in results_dir_split: - - out_file = os.path.join(result_dir, 'accuracy.txt') - - if os.path.exists(out_file) and ( - os.stat(out_file).st_size != 0) and not regenerate_accuracy_file: - continue - - if dataset == "openimages": - if env.get('MLC_DATASET_PATH_ROOT', '') != '': - dataset_dir = env['MLC_DATASET_PATH_ROOT'] - if 'DATASET_ANNOTATIONS_FILE_PATH' in env: - del (env['DATASET_ANNOTATIONS_FILE_PATH']) - else: - env['DATASET_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] - dataset_dir = os.getcwd() # not used, just to keep the script happy - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " " + "'" + os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", - "accuracy-openimages.py") + "'" + " --mlperf-accuracy-file " + "'" + os.path.join(result_dir, - "mlperf_log_accuracy.json") + "'" + " --openimages-dir " + "'" + dataset_dir + "'" + " --verbose > " + "'" + \ - out_file + "'" - - elif dataset == "imagenet": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", - "accuracy-imagenet.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, - "mlperf_log_accuracy.json") + "' --imagenet-val-file '" + os.path.join(env['MLC_DATASET_AUX_PATH'], - "val.txt") + "' --dtype " + env.get('MLC_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" - - elif dataset == "squad": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_BERT_PATH'], - "accuracy-squad.py") + "' --val_data '" + env['MLC_DATASET_SQUAD_VAL_PATH'] + \ - "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --vocab_file '" + env['MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + \ - "' --out_file '" + os.path.join(result_dir, 'predictions.json') + \ - "' --features_cache_file '" + os.path.join(env['MLC_MLPERF_INFERENCE_BERT_PATH'], 'eval_features.pickle') + \ - "' --output_dtype " + env['MLC_ACCURACY_DTYPE'] + env.get( - 'MLC_OUTPUT_TRANSPOSED', '') + max_examples_string + " > '" + out_file + "'" - - elif dataset == "cnndm": - if env.get('MLC_MLPERF_IMPLEMENTATION', '') == 'intel': - accuracy_checker_file = env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_ACCURACY_FILE_WITH_PATH'] - env['+PYTHONPATH'] = [os.path.dirname(env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH'])] + [ - os.path.dirname(env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH'])] + env['+PYTHONPATH'] - suffix_string = " --model-name-or-path '" + \ - env['GPTJ_CHECKPOINT_PATH'] + "'" - else: - accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j", - "evaluation.py") - suffix_string = " --dtype " + \ - env.get('MLC_ACCURACY_DTYPE', "float32") - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --dataset-file '" + \ - env['MLC_DATASET_EVAL_PATH'] + "'" + \ - suffix_string + " > '" + out_file + "'" - - elif dataset == "openorca": - accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b", - "evaluate-accuracy.py") - if env.get('MLC_VLLM_SERVER_MODEL_NAME', '') == '': - checkpoint_path = env['MLC_ML_MODEL_LLAMA2_FILE_WITH_PATH'] - else: - checkpoint_path = env['MLC_VLLM_SERVER_MODEL_NAME'] - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + checkpoint_path + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --dataset-file '" + env['MLC_DATASET_PREPROCESSED_PATH'] + "'" + " --dtype " + env.get( - 'MLC_ACCURACY_DTYPE', "int32") + " > '" + out_file + "'" - - elif dataset == "openorca-gsm8k-mbxp-combined": - accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b", - "evaluate-accuracy.py") - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + env['MIXTRAL_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --dataset-file '" + env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] + "'" + \ - " --dtype " + env.get('MLC_ACCURACY_DTYPE', - "float32") + " > '" + out_file + "'" - - elif dataset == "coco2014": - env['+PYTHONPATH'] = [ - os.path.join( - env['MLC_MLPERF_INFERENCE_SOURCE'], - "text_to_image", - "tools"), - os.path.join( - env['MLC_MLPERF_INFERENCE_SOURCE'], - "text_to_image", - "tools", - "fid")] - extra_options = "" - - if env.get('MLC_SDXL_STATISTICS_FILE_PATH', '') != '': - extra_options += ( - f""" --statistics-path '{ - env['MLC_SDXL_STATISTICS_FILE_PATH']}'""" - ) - - if env.get('MLC_SDXL_COMPLIANCE_IMAGES_PATH', '') != '': - extra_options += ( - f""" --compliance-images-path '{ - env['MLC_SDXL_COMPLIANCE_IMAGES_PATH']}' """ - ) - else: - extra_options += f""" --compliance-images-path '{ - os.path.join( - result_dir, "images")}' """ - - if env.get('MLC_COCO2014_SAMPLE_ID_PATH', '') != '': - extra_options += ( - f" --ids-path '{env['MLC_COCO2014_SAMPLE_ID_PATH']}' " - ) - - if env.get('MLC_SDXL_ACCURACY_RUN_DEVICE', '') != '': - extra_options += ( - f" --device '{env['MLC_SDXL_ACCURACY_RUN_DEVICE']}' " - ) - - # env['DATASET_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", - "accuracy_coco.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --caption-path '" + os.path.join( - env['MLC_MLPERF_INFERENCE_SOURCE'], - "text_to_image", - "coco2014", - "captions", - "captions_source.tsv") + "'" + extra_options + " > '" + out_file + "'" - - elif dataset == "kits19": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_3DUNET_PATH'], - "accuracy_kits.py") + \ - "' --preprocessed_data_dir '" + env['MLC_DATASET_PREPROCESSED_PATH'] +\ - "' --postprocessed_data_dir '" + result_dir +\ - "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --output_dtype " + \ - env['MLC_ACCURACY_DTYPE'] + " > '" + out_file + "'" - - elif dataset == "librispeech": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_RNNT_PATH'], - "accuracy_eval.py") + \ - "' --dataset_dir '" + os.path.join(env['MLC_DATASET_PREPROCESSED_PATH'], "..") +\ - "' --manifest '" + env['MLC_DATASET_PREPROCESSED_JSON'] +\ - "' --log_dir '" + result_dir + \ - "' --output_dtype " + \ - env['MLC_ACCURACY_DTYPE'] + " > '" + out_file + "'" - - elif dataset == "terabyte": - extra_options = "" - if env.get('MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH', '') != '': - extra_options += ( - f""" --aggregation-trace-file '{ - env['MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH']}' """ - ) - if env.get('MLC_DLRM_V2_DAY23_FILE_PATH', '') != '': - extra_options += ( - f""" --day-23-file '{ - env['MLC_DLRM_V2_DAY23_FILE_PATH']}' """ - ) - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch", "tools", - "accuracy-dlrm.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, - "mlperf_log_accuracy.json") + "'" + extra_options + \ - " --dtype " + env.get('MLC_ACCURACY_DTYPE', - "float32") + " > '" + out_file + "'" - - elif dataset == "igbh": - if env.get('MLC_DATASET_IGBH_SIZE', '') == '': - if env.get('MLC_MLPERF_SUBMISSION_GENERATION_STYLE', - '') == "full": - env['MLC_DATASET_IGBH_SIZE'] = "full" - else: - env['MLC_DATASET_IGBH_SIZE'] = "tiny" - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "graph", "R-GAT", "tools", "accuracy_igbh.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_DATASET_IGBH_PATH'] + "' --dataset-size '" + env['MLC_DATASET_IGBH_SIZE'] + "' --output-file '" + out_file + "'" - - elif dataset == "dataset_llama3": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama3.1-405b", "evaluate-accuracy.py") + "' --checkpoint-path '" + env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dtype '" + env['MLC_ACCURACY_DTYPE'] + "' --dataset-file '" + env['MLC_DATASET_LLAMA3_PATH'] + "' > '" + out_file + "'" - - elif dataset == "waymo": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "automotive", "3d-object-detection", "accuracy_waymo.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --waymo-dir '" + env['MLC_DATASET_WAYMO_PATH'] + "' > '" + out_file + "'" - - elif dataset == "nuscenes": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'], "accuracy_nuscenes_cpu.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --nuscenes-dir '" + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] + "' --config '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'], "projects" + "configs" + "bevformer" + "bevformer_tiny.py") + "' > '" + out_file + "'" - - elif dataset == "cognata_ssd": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --dataset cognata --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" - - elif dataset == "cognata_deeplab": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' > '" + out_file + "'" - - else: - return {'return': 1, 'error': 'Unsupported dataset'} - - run_cmds.append(CMD) - - if os_info['platform'] == 'windows': - env['MLC_RUN_CMDS'] = ( - '\n'.join(run_cmds)).replace( - "'", - '"').replace( - '>', - '^>') - else: - env['MLC_RUN_CMDS'] = "??".join(run_cmds) - - return {'return': 0} - - -def postprocess(i): - - os_info = i['os_info'] - env = i['env'] - state = i['state'] - logger = i['automation'].logger - xsep = ';' if os_info['platform'] == 'windows' else ':' - - results_dir = env.get("MLC_MLPERF_ACCURACY_RESULTS_DIR", "") - - results_dir_split = results_dir.split(xsep) - - for result_dir in results_dir_split: - accuracy_file = os.path.join(result_dir, "accuracy.txt") - - if os.path.exists(accuracy_file): - logger.info('') - logger.info('Accuracy file: {}'.format(accuracy_file)) - logger.info('') - - x = '' - with open(accuracy_file, "r") as fp: - x = fp.read() - - if x != '': - logger.info(f"{x}") - - # Trying to extract accuracy dict - for y in x.split('\n'): - if y.startswith('{') and y.endswith('}'): - - import json - - try: - z = json.loads(y) - state['app_mlperf_inference_accuracy'] = z - - break - except ValueError as e: - pass - - logger.info('') - return {'return': 0} +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + xsep = ';' if os_info['platform'] == 'windows' else ':' + + env = i['env'] + logger = i['automation'].logger + + results_dir = env.get("MLC_MLPERF_ACCURACY_RESULTS_DIR", "") + + if results_dir == "": + logger.error("Please set MLC_MLPERF_ACCURACY_RESULTS_DIR") + return {'return': -1} + + # In fact, we expect only 1 command line here + run_cmds = [] + + if env.get('MLC_MAX_EXAMPLES', '') != '' and env.get( + 'MLC_MLPERF_RUN_STYLE', '') != 'valid': + max_examples_string = " --max_examples " + env['MLC_MAX_EXAMPLES'] + else: + max_examples_string = "" + + results_dir_split = results_dir.split(xsep) + dataset = env['MLC_DATASET'] + regenerate_accuracy_file = env.get( + 'MLC_MLPERF_REGENERATE_ACCURACY_FILE', env.get( + 'MLC_RERUN', False)) + + for result_dir in results_dir_split: + + out_file = os.path.join(result_dir, 'accuracy.txt') + + if os.path.exists(out_file) and ( + os.stat(out_file).st_size != 0) and not regenerate_accuracy_file: + continue + + if dataset == "openimages": + if env.get('MLC_DATASET_PATH_ROOT', '') != '': + dataset_dir = env['MLC_DATASET_PATH_ROOT'] + if 'DATASET_ANNOTATIONS_FILE_PATH' in env: + del (env['DATASET_ANNOTATIONS_FILE_PATH']) + else: + env['DATASET_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] + dataset_dir = os.getcwd() # not used, just to keep the script happy + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " " + "'" + os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + "accuracy-openimages.py") + "'" + " --mlperf-accuracy-file " + "'" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "'" + " --openimages-dir " + "'" + dataset_dir + "'" + " --verbose > " + "'" + \ + out_file + "'" + + elif dataset == "imagenet": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + "accuracy-imagenet.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "' --imagenet-val-file '" + os.path.join(env['MLC_DATASET_AUX_PATH'], + "val.txt") + "' --dtype " + env.get('MLC_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" + + elif dataset == "squad": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_BERT_PATH'], + "accuracy-squad.py") + "' --val_data '" + env['MLC_DATASET_SQUAD_VAL_PATH'] + \ + "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --vocab_file '" + env['MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + \ + "' --out_file '" + os.path.join(result_dir, 'predictions.json') + \ + "' --features_cache_file '" + os.path.join(env['MLC_MLPERF_INFERENCE_BERT_PATH'], 'eval_features.pickle') + \ + "' --output_dtype " + env['MLC_ACCURACY_DTYPE'] + env.get( + 'MLC_OUTPUT_TRANSPOSED', '') + max_examples_string + " > '" + out_file + "'" + + elif dataset == "cnndm": + if env.get('MLC_MLPERF_IMPLEMENTATION', '') == 'intel': + accuracy_checker_file = env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_ACCURACY_FILE_WITH_PATH'] + env['+PYTHONPATH'] = [os.path.dirname(env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH'])] + [ + os.path.dirname(env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH'])] + env['+PYTHONPATH'] + suffix_string = " --model-name-or-path '" + \ + env['GPTJ_CHECKPOINT_PATH'] + "'" + else: + accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j", + "evaluation.py") + suffix_string = " --dtype " + \ + env.get('MLC_ACCURACY_DTYPE', "float32") + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + \ + env['MLC_DATASET_EVAL_PATH'] + "'" + \ + suffix_string + " > '" + out_file + "'" + + elif dataset == "openorca": + accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b", + "evaluate-accuracy.py") + if env.get('MLC_VLLM_SERVER_MODEL_NAME', '') == '': + checkpoint_path = env['MLC_ML_MODEL_LLAMA2_FILE_WITH_PATH'] + else: + checkpoint_path = env['MLC_VLLM_SERVER_MODEL_NAME'] + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + checkpoint_path + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + env['MLC_DATASET_PREPROCESSED_PATH'] + "'" + " --dtype " + env.get( + 'MLC_ACCURACY_DTYPE', "int32") + " > '" + out_file + "'" + + elif dataset == "openorca-gsm8k-mbxp-combined": + accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b", + "evaluate-accuracy.py") + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + env['MIXTRAL_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] + "'" + \ + " --dtype " + env.get('MLC_ACCURACY_DTYPE', + "float32") + " > '" + out_file + "'" + + elif dataset == "coco2014": + env['+PYTHONPATH'] = [ + os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools"), + os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools", + "fid")] + extra_options = "" + + if env.get('MLC_SDXL_STATISTICS_FILE_PATH', '') != '': + extra_options += ( + f""" --statistics-path '{ + env['MLC_SDXL_STATISTICS_FILE_PATH']}'""" + ) + + if env.get('MLC_SDXL_COMPLIANCE_IMAGES_PATH', '') != '': + extra_options += ( + f""" --compliance-images-path '{ + env['MLC_SDXL_COMPLIANCE_IMAGES_PATH']}' """ + ) + else: + extra_options += f""" --compliance-images-path '{ + os.path.join( + result_dir, "images")}' """ + + if env.get('MLC_COCO2014_SAMPLE_ID_PATH', '') != '': + extra_options += ( + f" --ids-path '{env['MLC_COCO2014_SAMPLE_ID_PATH']}' " + ) + + if env.get('MLC_SDXL_ACCURACY_RUN_DEVICE', '') != '': + extra_options += ( + f" --device '{env['MLC_SDXL_ACCURACY_RUN_DEVICE']}' " + ) + + # env['DATASET_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", + "accuracy_coco.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --caption-path '" + os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "coco2014", + "captions", + "captions_source.tsv") + "'" + extra_options + " > '" + out_file + "'" + + elif dataset == "kits19": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_3DUNET_PATH'], + "accuracy_kits.py") + \ + "' --preprocessed_data_dir '" + env['MLC_DATASET_PREPROCESSED_PATH'] +\ + "' --postprocessed_data_dir '" + result_dir +\ + "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --output_dtype " + \ + env['MLC_ACCURACY_DTYPE'] + " > '" + out_file + "'" + + elif dataset == "librispeech": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_RNNT_PATH'], + "accuracy_eval.py") + \ + "' --dataset_dir '" + os.path.join(env['MLC_DATASET_PREPROCESSED_PATH'], "..") +\ + "' --manifest '" + env['MLC_DATASET_PREPROCESSED_JSON'] +\ + "' --log_dir '" + result_dir + \ + "' --output_dtype " + \ + env['MLC_ACCURACY_DTYPE'] + " > '" + out_file + "'" + + elif dataset == "terabyte": + extra_options = "" + if env.get('MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH', '') != '': + extra_options += ( + f""" --aggregation-trace-file '{ + env['MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH']}' """ + ) + if env.get('MLC_DLRM_V2_DAY23_FILE_PATH', '') != '': + extra_options += ( + f""" --day-23-file '{ + env['MLC_DLRM_V2_DAY23_FILE_PATH']}' """ + ) + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch", "tools", + "accuracy-dlrm.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "'" + extra_options + \ + " --dtype " + env.get('MLC_ACCURACY_DTYPE', + "float32") + " > '" + out_file + "'" + + elif dataset == "igbh": + if env.get('MLC_DATASET_IGBH_SIZE', '') == '': + if env.get('MLC_MLPERF_SUBMISSION_GENERATION_STYLE', + '') == "full": + env['MLC_DATASET_IGBH_SIZE'] = "full" + else: + env['MLC_DATASET_IGBH_SIZE'] = "tiny" + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "graph", "R-GAT", "tools", "accuracy_igbh.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_DATASET_IGBH_PATH'] + "' --dataset-size '" + env['MLC_DATASET_IGBH_SIZE'] + "' --output-file '" + out_file + "'" + + elif dataset == "dataset_llama3": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama3.1-405b", "evaluate-accuracy.py") + "' --checkpoint-path '" + env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dtype '" + env['MLC_ACCURACY_DTYPE'] + "' --dataset-file '" + env['MLC_DATASET_LLAMA3_PATH'] + "' > '" + out_file + "'" + + elif dataset == "waymo": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "automotive", "3d-object-detection", "accuracy_waymo.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --waymo-dir '" + env['MLC_DATASET_WAYMO_PATH'] + "' > '" + out_file + "'" + + elif dataset == "nuscenes": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'], "accuracy_nuscenes_cpu.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --nuscenes-dir '" + env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH'] + "' --config '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'], "projects" + "configs" + "bevformer" + "bevformer_tiny.py") + "' > '" + out_file + "'" + + elif dataset == "cognata_ssd": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --dataset cognata --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" + + elif dataset == "cognata_deeplab": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' > '" + out_file + "'" + + else: + return {'return': 1, 'error': 'Unsupported dataset'} + + run_cmds.append(CMD) + + if os_info['platform'] == 'windows': + env['MLC_RUN_CMDS'] = ( + '\n'.join(run_cmds)).replace( + "'", + '"').replace( + '>', + '^>') + else: + env['MLC_RUN_CMDS'] = "??".join(run_cmds) + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + logger = i['automation'].logger + xsep = ';' if os_info['platform'] == 'windows' else ':' + + results_dir = env.get("MLC_MLPERF_ACCURACY_RESULTS_DIR", "") + + results_dir_split = results_dir.split(xsep) + + for result_dir in results_dir_split: + accuracy_file = os.path.join(result_dir, "accuracy.txt") + + if os.path.exists(accuracy_file): + logger.info('') + logger.info('Accuracy file: {}'.format(accuracy_file)) + logger.info('') + + x = '' + with open(accuracy_file, "r") as fp: + x = fp.read() + + if x != '': + logger.info(f"{x}") + + # Trying to extract accuracy dict + for y in x.split('\n'): + if y.startswith('{') and y.endswith('}'): + + import json + + try: + z = json.loads(y) + state['app_mlperf_inference_accuracy'] = z + + break + except ValueError as e: + pass + + logger.info('') + return {'return': 0} From 5084ab41f5d11c6ccda76d38e528aab94040df65 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sat, 24 May 2025 17:06:03 +0530 Subject: [PATCH 74/83] path fixes --- script/get-preprocessed-dataset-nuscenes/customize.py | 3 --- script/get-preprocessed-dataset-nuscenes/meta.yaml | 1 + 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py index bb6e2b914..5236b5c78 100644 --- a/script/get-preprocessed-dataset-nuscenes/customize.py +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -22,9 +22,6 @@ def postprocess(i): env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] = os.path.join( env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) - env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH'] = os.path.join( - env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH'], - env['MLC_DATASET_NUSCENES_ACC_REQ_FILE_EXTRACTED_FOLDER_NAME']) if env.get( 'MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH', '') != '': shutil.copy( diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml index 05245e475..6436574a6 100644 --- a/script/get-preprocessed-dataset-nuscenes/meta.yaml +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -15,6 +15,7 @@ tags: uid: 0e403a2861984a4e print_env_at_the_end: MLC_PREPROCESSED_DATASET_NUSCENES_PATH: Preprocessed Nuscenes dataset path + MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH: Path containing minimum files for accuracy checker variations: validation: default: true From 710d1560d66bba445e95d5871fffafd61b68912e Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sat, 24 May 2025 22:12:57 +0530 Subject: [PATCH 75/83] resolve inference repo conflict + env variable updates --- .../customize.py | 6 +- .../meta.yaml | 2 +- .../meta.yaml | 15 ++ script/get-mlperf-automotive-src/customize.py | 16 +- script/get-mlperf-automotive-src/meta.yaml | 206 +++++++++--------- .../get-mlperf-automotive-utils/customize.py | 2 +- script/get-mlperf-automotive-utils/meta.yaml | 12 +- .../get-mlperf-inference-loadgen/customize.py | 3 - script/process-mlperf-accuracy/customize.py | 8 +- 9 files changed, 141 insertions(+), 129 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index c1ac7368c..4ea2ce0f1 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -231,7 +231,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, scenario_extra_options + mode_extra_options + dataset_options elif env['MLC_MODEL'] in ['bevformer']: - run_dir = env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'] + run_dir = env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'] env['RUN_DIR'] = run_dir @@ -252,7 +252,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" print(cmd) elif env['MLC_MODEL'] in ['ssd-resnet50']: - run_dir = env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'] + run_dir = env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'] env['RUN_DIR'] = run_dir @@ -266,7 +266,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" elif env['MLC_MODEL'] in ['deeplab_v3+']: - run_dir = env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'] + run_dir = env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'] env['RUN_DIR'] = run_dir diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 212a45bc0..eef2f49bb 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -255,7 +255,7 @@ deps: # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_automotive names: - user-conf-generator diff --git a/script/generate-mlperf-inference-user-conf/meta.yaml b/script/generate-mlperf-inference-user-conf/meta.yaml index fbba97b33..5f7738f56 100644 --- a/script/generate-mlperf-inference-user-conf/meta.yaml +++ b/script/generate-mlperf-inference-user-conf/meta.yaml @@ -94,3 +94,18 @@ deps: # Get SUT configs (System Under Test) - tags: get,sut,configs + +variations: + inference: + default: true + group: benchmark_wg + deps: + - tags: get,mlcommons,inference,src + names: + - inference-src + automotive: + group: automotive_wg + deps: + - tags: get,mlcommons,automotive,src + names: + - automotive-src \ No newline at end of file diff --git a/script/get-mlperf-automotive-src/customize.py b/script/get-mlperf-automotive-src/customize.py index df444b439..69b336134 100644 --- a/script/get-mlperf-automotive-src/customize.py +++ b/script/get-mlperf-automotive-src/customize.py @@ -89,12 +89,12 @@ def postprocess(i): env = i['env'] state = i['state'] - automotive_root = env['MLC_MLPERF_AUTOMOTIVE_SOURCE'] - env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'] = os.path.join( + automotive_root = env['MLC_MLPERF_INFERENCE_SOURCE'] + env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'] = os.path.join( automotive_root, 'automotive', 'camera-3d-detection') - env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'] = os.path.join( automotive_root, 'automotive', '2d-object-detection') - env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'] = os.path.join( automotive_root, 'automotive', 'semantic-segmentation') env['MLC_GET_DEPENDENT_CACHED_PATH'] = automotive_root @@ -106,23 +106,23 @@ def postprocess(i): if os.path.exists(os.path.join(automotive_root, "loadgen", "VERSION.txt")): with open(os.path.join(automotive_root, "loadgen", "VERSION.txt")) as f: version_info = f.read().strip() - env['MLC_MLPERF_AUTOMOTIVE_SOURCE_VERSION'] = version_info + env['MLC_MLPERF_INFERENCE_SOURCE_VERSION'] = version_info if is_true(env.get('MLC_GET_MLPERF_IMPLEMENTATION_ONLY', '')): return {'return': 0} - env['MLC_MLPERF_AUTOMOTIVE_CONF_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_CONF_PATH'] = os.path.join( automotive_root, 'mlperf.conf') env['+PYTHONPATH'].append( os.path.join( - env['MLC_MLPERF_AUTOMOTIVE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], 'tools', 'submission')) # To be uncommented after Pablo's PR is merged: https://github.com/mlcommons/mlperf_automotive/pull/14 # valid_models = get_valid_models( # env['MLC_MLPERF_LAST_RELEASE'], - # env['MLC_MLPERF_AUTOMOTIVE_SOURCE']) + # env['MLC_MLPERF_INFERENCE_SOURCE']) # state['MLC_MLPERF_AUTOMOTIVE_MODELS'] = valid_models diff --git a/script/get-mlperf-automotive-src/meta.yaml b/script/get-mlperf-automotive-src/meta.yaml index 9a8539c99..9fa26d5ca 100644 --- a/script/get-mlperf-automotive-src/meta.yaml +++ b/script/get-mlperf-automotive-src/meta.yaml @@ -1,103 +1,103 @@ -alias: get-mlperf-automotive-src -automation_alias: script -automation_uid: 5b4e0237da074764 -cache: true -category: MLPerf benchmark support -default_env: - MLC_GIT_CHECKOUT_FOLDER: automotive - MLC_GIT_DEPTH: --depth 4 - MLC_GIT_PATCH: 'no' - MLC_GIT_RECURSE_SUBMODULES: '' -default_version: master -deps: -- tags: detect,os -- names: - - python - - python3 - tags: get,python3 -new_env_keys: -- MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH -- MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH -- MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH -- MLC_MLPERF_LAST_RELEASE -- MLC_MLPERF_AUTOMOTIVE_SOURCE -- MLC_MLPERF_INFERENCE_SOURCE_VERSION -- +PYTHONPATH -prehook_deps: -- env: - MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_AUTOMOTIVE_SOURCE - extra_cache_tags: automotive,src - force_env_keys: - - MLC_GIT_* - names: - - automotive-git-repo - tags: get,git,repo - update_tags_from_env_with_prefix: - _branch.: - - MLC_GIT_CHECKOUT - _repo.: - - MLC_GIT_URL - _sha.: - - MLC_GIT_SHA - _submodules.: - - MLC_GIT_SUBMODULES -print_env_at_the_end: - MLC_MLPERF_AUTOMOTIVE_SOURCE: Path to MLPerf automotive benchmark source -tags: -- get -- src -- source -- automotive -- automotive-src -- automotive-source -- mlperf -- mlcommons -uid: c3842e6e35d947ef -variations: - branch.#: - default_version: custom - env: - MLC_GIT_CHECKOUT: '#' - group: checkout - full-history: - env: - MLC_GIT_DEPTH: '' - group: git-history - no-recurse-submodules: - env: - MLC_GIT_RECURSE_SUBMODULES: '' - patch: - ad: - automotive-git-repo: - tags: _patch - env: - MLC_GIT_PATCH: 'yes' - pybind: - env: - MLC_SUBMODULE_PYBIND: 'yes' - recurse-submodules: - env: - MLC_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' - repo.#: - env: - MLC_GIT_URL: '#' - sha.#: - env: - MLC_GIT_SHA: '#' - group: checkout - short-history: - default: true - env: - MLC_GIT_DEPTH: --depth 10 - group: git-history - submodules.#: - env: - MLC_GIT_SUBMODULES: '#' -versions: - custom: - env: - MLC_MLPERF_LAST_RELEASE: v0.5 - master: - env: - MLC_MLPERF_LAST_RELEASE: v0.5 - MLC_TMP_GIT_CHECKOUT: master +alias: get-mlperf-automotive-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_env: + MLC_GIT_CHECKOUT_FOLDER: automotive + MLC_GIT_DEPTH: --depth 4 + MLC_GIT_PATCH: 'no' + MLC_GIT_RECURSE_SUBMODULES: '' +default_version: master +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +new_env_keys: +- MLC_MLPERF_INFERENCE_BEVFORMER_PATH +- MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH +- MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH +- MLC_MLPERF_LAST_RELEASE +- MLC_MLPERF_INFERENCE_SOURCE +- MLC_MLPERF_INFERENCE_SOURCE_VERSION +- +PYTHONPATH +prehook_deps: +- env: + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_SOURCE + extra_cache_tags: automotive,src + force_env_keys: + - MLC_GIT_* + names: + - automotive-git-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - MLC_GIT_CHECKOUT + _repo.: + - MLC_GIT_URL + _sha.: + - MLC_GIT_SHA + _submodules.: + - MLC_GIT_SUBMODULES +print_env_at_the_end: + MLC_MLPERF_INFERENCE_SOURCE: Path to MLPerf automotive benchmark source +tags: +- get +- src +- source +- automotive +- automotive-src +- automotive-source +- mlperf +- mlcommons +uid: c3842e6e35d947ef +variations: + branch.#: + default_version: custom + env: + MLC_GIT_CHECKOUT: '#' + group: checkout + full-history: + env: + MLC_GIT_DEPTH: '' + group: git-history + no-recurse-submodules: + env: + MLC_GIT_RECURSE_SUBMODULES: '' + patch: + ad: + automotive-git-repo: + tags: _patch + env: + MLC_GIT_PATCH: 'yes' + pybind: + env: + MLC_SUBMODULE_PYBIND: 'yes' + recurse-submodules: + env: + MLC_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' + repo.#: + env: + MLC_GIT_URL: '#' + sha.#: + env: + MLC_GIT_SHA: '#' + group: checkout + short-history: + default: true + env: + MLC_GIT_DEPTH: --depth 10 + group: git-history + submodules.#: + env: + MLC_GIT_SUBMODULES: '#' +versions: + custom: + env: + MLC_MLPERF_LAST_RELEASE: v0.5 + master: + env: + MLC_MLPERF_LAST_RELEASE: v0.5 + MLC_TMP_GIT_CHECKOUT: master diff --git a/script/get-mlperf-automotive-utils/customize.py b/script/get-mlperf-automotive-utils/customize.py index d1ab8ab70..7ae5d02bd 100644 --- a/script/get-mlperf-automotive-utils/customize.py +++ b/script/get-mlperf-automotive-utils/customize.py @@ -21,7 +21,7 @@ def preprocess(i): env['+PYTHONPATH'] = [utils_path] submission_checker_dir = os.path.join( - env['MLC_MLPERF_AUTOMOTIVE_SOURCE'], "tools", "submission") + env['MLC_MLPERF_INFERENCE_SOURCE'], "tools", "submission") sys.path.append(submission_checker_dir) sys.path.append(utils_path) diff --git a/script/get-mlperf-automotive-utils/meta.yaml b/script/get-mlperf-automotive-utils/meta.yaml index b76c2db19..bdd5c667b 100644 --- a/script/get-mlperf-automotive-utils/meta.yaml +++ b/script/get-mlperf-automotive-utils/meta.yaml @@ -16,10 +16,10 @@ deps: - automotive-src new_env_keys: - '+PYTHONPATH' - - MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH - - MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH - - MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH + - MLC_MLPERF_INFERENCE_BEVFORMER_PATH + - MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH + - MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH - MLC_MLPERF_LAST_RELEASE - - MLC_MLPERF_AUTOMOTIVE_SOURCE - - MLC_MLPERF_AUTOMOTIVE_VERSION - - MLC_MLPERF_AUTOMOTIVE_SOURCE_VERSION \ No newline at end of file + - MLC_MLPERF_INFERENCE_SOURCE + - MLC_MLPERF_INFERENCE_VERSION + - MLC_MLPERF_INFERENCE_SOURCE_VERSION \ No newline at end of file diff --git a/script/get-mlperf-inference-loadgen/customize.py b/script/get-mlperf-inference-loadgen/customize.py index eab681224..4ae4b8b73 100644 --- a/script/get-mlperf-inference-loadgen/customize.py +++ b/script/get-mlperf-inference-loadgen/customize.py @@ -8,9 +8,6 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - if is_true(env.get('MLC_INFERENCE_AUTOMOTIVE_REPO', '')): - env['MLC_MLPERF_INFERENCE_SOURCE'] = env['MLC_MLPERF_AUTOMOTIVE_SOURCE'] - if is_true(env.get('MLC_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '')): i['run_script_input']['script_name'] = "donotrun" diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index adc518147..afb359ce7 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -209,15 +209,15 @@ def preprocess(i): result_dir, "mlperf_log_accuracy.json") + "' --waymo-dir '" + env['MLC_DATASET_WAYMO_PATH'] + "' > '" + out_file + "'" elif dataset == "nuscenes": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'], "accuracy_nuscenes_cpu.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --nuscenes-dir '" + env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH'] + "' --config '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_BEVFORMER_PATH'], "projects" + "configs" + "bevformer" + "bevformer_tiny.py") + "' > '" + out_file + "'" + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'], "accuracy_nuscenes_cpu.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --nuscenes-dir '" + env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH'] + "' --config '" + os.path.join(env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'], "projects" + "configs" + "bevformer" + "bevformer_tiny.py") + "' > '" + out_file + "'" elif dataset == "cognata_ssd": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_SSD_RESNET50_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --dataset cognata --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" elif dataset == "cognata_deeplab": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_AUTOMOTIVE_DEEPLABV3PLUS_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' > '" + out_file + "'" else: From 60c96ff2b6996e4acc8f704c6168ab6cbda28d26 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sat, 24 May 2025 22:41:42 +0530 Subject: [PATCH 76/83] fix variation groups --- script/generate-mlperf-inference-user-conf/meta.yaml | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/script/generate-mlperf-inference-user-conf/meta.yaml b/script/generate-mlperf-inference-user-conf/meta.yaml index 5f7738f56..4288bd443 100644 --- a/script/generate-mlperf-inference-user-conf/meta.yaml +++ b/script/generate-mlperf-inference-user-conf/meta.yaml @@ -84,14 +84,6 @@ deps: OUTPUT_BASE_DIR: - "on" - ######################################################################## - # Install MLPerf inference dependencies - - # Download MLPerf inference source - - tags: get,mlcommons,inference,src - names: - - inference-src - # Get SUT configs (System Under Test) - tags: get,sut,configs @@ -104,7 +96,7 @@ variations: names: - inference-src automotive: - group: automotive_wg + group: benchmark_wg deps: - tags: get,mlcommons,automotive,src names: From 82bebe1bde9ae4ddbead820cf2654e9de70442d8 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sat, 24 May 2025 23:06:53 +0530 Subject: [PATCH 77/83] fix reference repo source --- script/run-mlperf-automotive-app/meta.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/script/run-mlperf-automotive-app/meta.yaml b/script/run-mlperf-automotive-app/meta.yaml index f42195f28..74b9e34c9 100644 --- a/script/run-mlperf-automotive-app/meta.yaml +++ b/script/run-mlperf-automotive-app/meta.yaml @@ -106,8 +106,8 @@ deps: skip_if_env: MLC_MLPERF_USE_DOCKER: [ on ] - names: - - inference-src - tags: get,mlcommons,inference,src + - automotive-src + tags: get,mlcommons,automotive,src skip_if_env: MLC_MLPERF_USE_DOCKER: [ on ] - tags: get,sut,description @@ -122,7 +122,7 @@ deps: skip_if_env: OUTPUT_BASE_DIR: [ on ] - tags: install,pip-package,for-mlc-python,_package.tabulate -- tags: get,mlperf,inference,utils +- tags: get,mlperf,automotive,utils skip_if_env: MLC_MLPERF_USE_DOCKER: [ on ] From 2e3a05b461b636cf1dd7d8a9875c8a66ec056585 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 24 May 2025 18:46:50 +0100 Subject: [PATCH 78/83] Create test-mlperf-automotive.yml --- .github/workflows/test-mlperf-automotive.yml | 44 ++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 .github/workflows/test-mlperf-automotive.yml diff --git a/.github/workflows/test-mlperf-automotive.yml b/.github/workflows/test-mlperf-automotive.yml new file mode 100644 index 000000000..60bfbb501 --- /dev/null +++ b/.github/workflows/test-mlperf-automotive.yml @@ -0,0 +1,44 @@ +name: Test MLPerf automotive + +on: + pull_request_target: + branches: [ "main", "dev" ] + paths: + - 'script/**meta.yaml' + +jobs: + fetch-secret: + runs-on: ubuntu-latest + outputs: + gdrive_secret: ${{ steps.op-load-secret.outputs.GDRIVE_SERVICE_ACCOUNT_KEY }} + steps: + - name: Load secret from 1Password + id: op-load-secret + uses: 1password/load-secrets-action@v2 + with: + export-env: false + env: + OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.OP_SERVICE_ACCOUNT_TOKEN }} + GDRIVE_SERVICE_ACCOUNT_KEY: op://7basd2jirojjckncf6qnq3azai/bzbaco3uxoqs2rcyu42rvuccga/credential + + run-mlperf: + runs-on: ubuntu-latest + needs: + - fetch-secret + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 2 + - name: Set RCLONE Service account env var from secret + shell: bash + run: | + echo "::add-mask::${{ needs.fetch-secret.outputs.gdrive_secret }}" + echo "RCLONE_CONFIG_MLC_COGNATA_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV + echo "RCLONE_CONFIG_MLC_NUSCENES_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV + echo "RCLONE_CONFIG_MLC_WAYMO_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV + - name: Run MLPerf + run: | + pip install mlcflow + mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} + mlc test script app,mlperf,automotive From a0f79ca8c383f1b0179c63f97dea76c3ce595408 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 24 May 2025 18:49:16 +0100 Subject: [PATCH 79/83] Update run-tests-on-modified-meta.yml --- .../workflows/run-tests-on-modified-meta.yml | 94 ++++++------------- 1 file changed, 29 insertions(+), 65 deletions(-) diff --git a/.github/workflows/run-tests-on-modified-meta.yml b/.github/workflows/run-tests-on-modified-meta.yml index bfd979a32..244b18a6f 100644 --- a/.github/workflows/run-tests-on-modified-meta.yml +++ b/.github/workflows/run-tests-on-modified-meta.yml @@ -1,3 +1,4 @@ +# This workflow will run configured tests for any updated MLC script name: Test script on modified meta on: @@ -7,70 +8,38 @@ on: - 'script/**meta.yaml' jobs: - fetch-secret: - runs-on: ubuntu-latest - outputs: - gdrive_secret: ${{ steps.op-load-secret.outputs.GDRIVE_SERVICE_ACCOUNT_KEY }} - steps: - - name: Load secret from 1Password - id: op-load-secret - uses: 1password/load-secrets-action@v2 - with: - export-env: false - env: - OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.OP_SERVICE_ACCOUNT_TOKEN }} - GDRIVE_SERVICE_ACCOUNT_KEY: op://7basd2jirojjckncf6qnq3azai/bzbaco3uxoqs2rcyu42rvuccga/credential get_modified_files: runs-on: ubuntu-latest outputs: - processed_files: ${{ steps.filter-modified-files.outputs.processed_files }} + processed_files: ${{ steps.modified-files.outputs.processed_files }} steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: '3.x' - - - name: Install dependencies - run: pip install pyyaml - - - name: Fetch base branch - run: | - git fetch origin +refs/heads/${{ github.event.pull_request.base.ref }}:refs/remotes/origin/${{ github.event.pull_request.base.ref }} - - - name: Get list of changed files - id: modified-files - run: | - git diff --name-only origin/${{ github.event.pull_request.base.ref }}...HEAD > changed_files.txt - files=$(paste -sd, changed_files.txt) - echo "files=$files" >> $GITHUB_OUTPUT - - - name: Filter changed files - id: filter-modified-files - env: - FILES: ${{ steps.modified-files.outputs.files }} - run: | - processed=$(echo "$FILES" | python3 .github/scripts/list_modified_files.py) - echo "processed_files<> $GITHUB_OUTPUT - echo "$processed" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - - name: Debug processed_files output - run: | - echo "Processed files output:" - echo "${{ steps.filter-modified-files.outputs.processed_files }}" + - name: 'Checkout' + uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Install dependencies + run: | + pip install pyyaml + + - name: Get changed files + id: modified-files + run: | + git remote add upstream ${{ github.event.pull_request.base.repo.clone_url }} + git fetch upstream + changed_files=$(git diff upstream/${{ github.event.pull_request.base.ref }} --name-only) + echo "$changed_files" | python3 .github/scripts/list_modified_files.py process_modified_files: runs-on: ubuntu-latest - needs: - - get_modified_files - - fetch-secret - if: needs.determine_modified_files.outputs.processed_files != '[]' + needs: get_modified_files + if: needs.determine_modified_files.outputs.processed_files != '[]' && needs.determine_modified_files.outputs.processed_files != '' strategy: fail-fast: false matrix: @@ -81,16 +50,11 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 2 - - name: Set RCLONE Service account env var from secret - shell: bash - run: | - echo "::add-mask::${{ needs.fetch-secret.outputs.gdrive_secret }}" - echo "RCLONE_CONFIG_MLC_COGNATA_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV - echo "RCLONE_CONFIG_MLC_NUSCENES_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV - echo "RCLONE_CONFIG_MLC_WAYMO_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV + - name: Process meta.yaml file run: | - echo "Processing ${{ matrix.file_info.file }} (run #${{ matrix.file_info.num_run }})" + echo "Processing ${{ matrix.file_info.file }} with run number ${{ matrix.file_info.num_run }}" + pip install mlcflow mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - mlc test script ${{ matrix.file_info.uid }} --test_input_index=${{ matrix.file_info.num_run }} --docker_mlc_repo=${{ github.event.pull_request.head.repo.html_url }} --docker_mlc_repo_branch=${{ github.event.pull_request.head.ref }} --quiet + mlc test script ${{ matrix.file_info.uid}} --test_input_index=${{ matrix.file_info.num_run }} --docker_mlc_repo=${{ github.event.pull_request.head.repo.html_url }} --docker_mlc_repo_branch=${{ github.event.pull_request.head.ref }} --quiet From 4d8d499bae6d29576f4e9356d2ce92dd878ebc37 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sat, 24 May 2025 23:43:34 +0530 Subject: [PATCH 80/83] remove code for waymo --- .github/workflows/test-mlperf-automotive.yml | 87 ++++++++++---------- 1 file changed, 43 insertions(+), 44 deletions(-) diff --git a/.github/workflows/test-mlperf-automotive.yml b/.github/workflows/test-mlperf-automotive.yml index 60bfbb501..a03560dc1 100644 --- a/.github/workflows/test-mlperf-automotive.yml +++ b/.github/workflows/test-mlperf-automotive.yml @@ -1,44 +1,43 @@ -name: Test MLPerf automotive - -on: - pull_request_target: - branches: [ "main", "dev" ] - paths: - - 'script/**meta.yaml' - -jobs: - fetch-secret: - runs-on: ubuntu-latest - outputs: - gdrive_secret: ${{ steps.op-load-secret.outputs.GDRIVE_SERVICE_ACCOUNT_KEY }} - steps: - - name: Load secret from 1Password - id: op-load-secret - uses: 1password/load-secrets-action@v2 - with: - export-env: false - env: - OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.OP_SERVICE_ACCOUNT_TOKEN }} - GDRIVE_SERVICE_ACCOUNT_KEY: op://7basd2jirojjckncf6qnq3azai/bzbaco3uxoqs2rcyu42rvuccga/credential - - run-mlperf: - runs-on: ubuntu-latest - needs: - - fetch-secret - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 2 - - name: Set RCLONE Service account env var from secret - shell: bash - run: | - echo "::add-mask::${{ needs.fetch-secret.outputs.gdrive_secret }}" - echo "RCLONE_CONFIG_MLC_COGNATA_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV - echo "RCLONE_CONFIG_MLC_NUSCENES_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV - echo "RCLONE_CONFIG_MLC_WAYMO_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV - - name: Run MLPerf - run: | - pip install mlcflow - mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - mlc test script app,mlperf,automotive +name: Test MLPerf automotive + +on: + pull_request_target: + branches: [ "main", "dev" ] + paths: + - 'script/**meta.yaml' + +jobs: + fetch-secret: + runs-on: ubuntu-latest + outputs: + gdrive_secret: ${{ steps.op-load-secret.outputs.GDRIVE_SERVICE_ACCOUNT_KEY }} + steps: + - name: Load secret from 1Password + id: op-load-secret + uses: 1password/load-secrets-action@v2 + with: + export-env: false + env: + OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.OP_SERVICE_ACCOUNT_TOKEN }} + GDRIVE_SERVICE_ACCOUNT_KEY: op://7basd2jirojjckncf6qnq3azai/bzbaco3uxoqs2rcyu42rvuccga/credential + + run-mlperf: + runs-on: ubuntu-latest + needs: + - fetch-secret + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 2 + - name: Set RCLONE Service account env var from secret + shell: bash + run: | + echo "::add-mask::${{ needs.fetch-secret.outputs.gdrive_secret }}" + echo "RCLONE_CONFIG_MLC_COGNATA_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV + echo "RCLONE_CONFIG_MLC_NUSCENES_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV + - name: Run MLPerf + run: | + pip install mlcflow + mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} + mlc test script app,mlperf,automotive From 121860158061930c0b51df64d18aa72f35607598 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 24 May 2025 21:12:17 +0100 Subject: [PATCH 81/83] Update run-tests-on-modified-meta.yml --- .github/workflows/run-tests-on-modified-meta.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run-tests-on-modified-meta.yml b/.github/workflows/run-tests-on-modified-meta.yml index 244b18a6f..a2f3a5fbe 100644 --- a/.github/workflows/run-tests-on-modified-meta.yml +++ b/.github/workflows/run-tests-on-modified-meta.yml @@ -33,7 +33,7 @@ jobs: run: | git remote add upstream ${{ github.event.pull_request.base.repo.clone_url }} git fetch upstream - changed_files=$(git diff upstream/${{ github.event.pull_request.base.ref }} --name-only) + changed_files=$(git diff upstream/${{ github.event.pull_request.base.ref }} --name-only | paste -sd, -) echo "$changed_files" | python3 .github/scripts/list_modified_files.py process_modified_files: From 51993a98907b65a081119edad195d0d935e1300e Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 26 May 2025 11:24:18 +0530 Subject: [PATCH 82/83] clone automotive repo with particular commit --- .../app-mlperf-automotive-mlcommons-python/meta.yaml | 10 ++++++++++ script/get-mlperf-inference-loadgen/meta.yaml | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index eef2f49bb..7cea0e884 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -399,6 +399,11 @@ variations: abtf-demo-model: group: models + add_deps_recursive: + automotive-src: + env: + MLC_GIT_SHA: + - "ee526dc63d9ca2636000343c5d2d16132145719e" deps: - tags: get,generic-python-lib,_opencv-python - tags: get,generic-python-lib,_numpy @@ -421,6 +426,11 @@ variations: abtf-poc-model: group: models default: true + add_deps_recursive: + automotive-src: + env: + MLC_GIT_SHA: + - "ee526dc63d9ca2636000343c5d2d16132145719e" deps: - tags: get,generic-python-lib,_opencv-python - tags: get,generic-python-lib,_numpy diff --git a/script/get-mlperf-inference-loadgen/meta.yaml b/script/get-mlperf-inference-loadgen/meta.yaml index 09e2529af..ed75db192 100644 --- a/script/get-mlperf-inference-loadgen/meta.yaml +++ b/script/get-mlperf-inference-loadgen/meta.yaml @@ -34,7 +34,7 @@ deps: - MLC_GIT_URL - MLC_GIT_CHECKOUT names: - - inference-src-loadgen-automotive + - automotive-src enable_if_env: MLC_INFERENCE_AUTOMOTIVE_REPO: - 'YES' From 440474e7bf48177df0d8e0f0ade3c8d480a44e11 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 26 May 2025 13:05:58 +0530 Subject: [PATCH 83/83] fix sha --- script/app-mlperf-automotive-mlcommons-python/meta.yaml | 8 ++------ script/get-git-repo/meta.yaml | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 7cea0e884..dd288d46e 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -401,9 +401,7 @@ variations: group: models add_deps_recursive: automotive-src: - env: - MLC_GIT_SHA: - - "ee526dc63d9ca2636000343c5d2d16132145719e" + tags: _sha.ee526dc63d9ca2636000343c5d2d16132145719e deps: - tags: get,generic-python-lib,_opencv-python - tags: get,generic-python-lib,_numpy @@ -428,9 +426,7 @@ variations: default: true add_deps_recursive: automotive-src: - env: - MLC_GIT_SHA: - - "ee526dc63d9ca2636000343c5d2d16132145719e" + tags: _sha.ee526dc63d9ca2636000343c5d2d16132145719e deps: - tags: get,generic-python-lib,_opencv-python - tags: get,generic-python-lib,_numpy diff --git a/script/get-git-repo/meta.yaml b/script/get-git-repo/meta.yaml index efdf3bf63..468468bb5 100644 --- a/script/get-git-repo/meta.yaml +++ b/script/get-git-repo/meta.yaml @@ -79,7 +79,7 @@ variations: git-history: full-history env: MLC_GIT_SHA: '#' - group: checkout + group: post-checkout short-history: default: true env: