From d8d139646a49e71c3836cff4f689208c80ada4d6 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 18:07:07 +0000 Subject: [PATCH 01/13] Update test-nvidia-mlperf-inference-implementations.yml --- .../workflows/test-nvidia-mlperf-inference-implementations.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index 2fac2632c..f6eb160a5 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -2,7 +2,7 @@ name: MLPerf Inference Nvidia implementations on: schedule: - - cron: "27 11 * * *" + - cron: "27 11 1 * *" jobs: run_nvidia: From 30a313a8e857da032ce6c77c57c8225ca9c9dc4c Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 21:28:03 +0000 Subject: [PATCH 02/13] Fix extract-file --- script/extract-file/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/extract-file/customize.py b/script/extract-file/customize.py index bedfe41f5..fd6ab8235 100644 --- a/script/extract-file/customize.py +++ b/script/extract-file/customize.py @@ -125,7 +125,7 @@ def preprocess(i): q + extract_to_folder + q env['MLC_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder - x = '"' if ' ' in filename else '' + x = q if ' ' in filename else '' env['MLC_EXTRACT_CMD'] = env['MLC_EXTRACT_PRE_CMD'] + env['MLC_EXTRACT_TOOL'] + ' ' + \ env.get('MLC_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \ ' ' + env.get('MLC_EXTRACT_TOOL_OPTIONS', '') + ' ' + x + filename + x From fbf1fb2a58da733240a745d5ff93cc98e3906534 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 21:48:15 +0000 Subject: [PATCH 03/13] Update test-scc24-sdxl.yaml --- .github/workflows/test-scc24-sdxl.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-scc24-sdxl.yaml b/.github/workflows/test-scc24-sdxl.yaml index becc3830d..e64ddfbe7 100644 --- a/.github/workflows/test-scc24-sdxl.yaml +++ b/.github/workflows/test-scc24-sdxl.yaml @@ -31,7 +31,7 @@ jobs: mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions + # mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions build_nvidia: if: github.repository_owner == 'gateoverflow' @@ -59,4 +59,4 @@ jobs: mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions + # mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions From 485206ea6126928a9ab948d16aca04af808d536f Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 21:58:26 +0000 Subject: [PATCH 04/13] Update test-mlperf-inference-sdxl.yaml --- .github/workflows/test-mlperf-inference-sdxl.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-mlperf-inference-sdxl.yaml b/.github/workflows/test-mlperf-inference-sdxl.yaml index 2e287a0be..4a5482907 100644 --- a/.github/workflows/test-mlperf-inference-sdxl.yaml +++ b/.github/workflows/test-mlperf-inference-sdxl.yaml @@ -1,7 +1,7 @@ name: MLPerf inference SDXL on: schedule: - - cron: "19 17 * * *" + - cron: "19 17 1 * *" jobs: build_reference: From b9f60198d053124243323753df7f4e33ffee84f1 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 21:58:42 +0000 Subject: [PATCH 05/13] Update test-mlperf-inference-sdxl.yaml --- .github/workflows/test-mlperf-inference-sdxl.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-mlperf-inference-sdxl.yaml b/.github/workflows/test-mlperf-inference-sdxl.yaml index 4a5482907..b76abb2ee 100644 --- a/.github/workflows/test-mlperf-inference-sdxl.yaml +++ b/.github/workflows/test-mlperf-inference-sdxl.yaml @@ -21,5 +21,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC python3 -m pip install mlc-scripts mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions From d9524a8fc154cc8f70a70643a7766652cb3276d1 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 21:58:56 +0000 Subject: [PATCH 06/13] Update test-mlperf-inference-llama2.yml --- .github/workflows/test-mlperf-inference-llama2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml index 70e4e4909..6e88b172f 100644 --- a/.github/workflows/test-mlperf-inference-llama2.yml +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -5,7 +5,7 @@ name: MLPerf inference LLAMA2-70B on: schedule: - - cron: "59 04 * * *" + - cron: "59 04 1 * *" jobs: build_reference: From 97305b8f25ad7dabf8118acfab7b5b42c55c95b2 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 21:59:15 +0000 Subject: [PATCH 07/13] Update test-mlperf-inference-gptj.yml --- .github/workflows/test-mlperf-inference-gptj.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-mlperf-inference-gptj.yml b/.github/workflows/test-mlperf-inference-gptj.yml index 341e2e818..d55e515d5 100644 --- a/.github/workflows/test-mlperf-inference-gptj.yml +++ b/.github/workflows/test-mlperf-inference-gptj.yml @@ -5,7 +5,7 @@ name: MLPerf inference GPT-J on: schedule: - - cron: "15 19 * * *" + - cron: "15 19 1 * *" jobs: build: From 2c3750fe1ae76e0d2e6f351ff3a550cf6099b690 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 21:59:36 +0000 Subject: [PATCH 08/13] Update test-mlperf-inference-mixtral.yml --- .github/workflows/test-mlperf-inference-mixtral.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-mlperf-inference-mixtral.yml b/.github/workflows/test-mlperf-inference-mixtral.yml index eb47f853f..25c12c62b 100644 --- a/.github/workflows/test-mlperf-inference-mixtral.yml +++ b/.github/workflows/test-mlperf-inference-mixtral.yml @@ -2,7 +2,7 @@ name: MLPerf inference MIXTRAL-8x7B on: schedule: - - cron: "59 23 * * */5" # 30th minute and 20th hour => 20:30 UTC => 2 AM IST + - cron: "59 23 1 * */5" # 30th minute and 20th hour => 20:30 UTC => 2 AM IST jobs: build_reference: From 308633ae804d75f276223e3deae9cde0bb485a4c Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 22:07:26 +0000 Subject: [PATCH 09/13] Support armnn download on macos --- script/get-lib-armnn/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-lib-armnn/customize.py b/script/get-lib-armnn/customize.py index 263850444..5b157fee4 100644 --- a/script/get-lib-armnn/customize.py +++ b/script/get-lib-armnn/customize.py @@ -11,7 +11,7 @@ def preprocess(i): version = env['MLC_LIB_ARMNN_VERSION'] if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == 'x86_64': url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-x86_64.tar.gz" - elif env.get('MLC_HOST_PLATFORM_FLAVOR', '') == 'aarch64': + elif env.get('MLC_HOST_PLATFORM_FLAVOR', '') in [ 'arm64', 'aarch64']: url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-aarch64.tar.gz" env['MLC_LIB_ARMNN_PREBUILT_BINARY_URL'] = url From ea956734b5c4c88a39b5ad1e5a4e2363c2952a24 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 16 Feb 2025 22:07:47 +0000 Subject: [PATCH 10/13] [Automated Commit] Format Codebase [skip ci] --- script/get-lib-armnn/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-lib-armnn/customize.py b/script/get-lib-armnn/customize.py index 5b157fee4..b6aaae7a5 100644 --- a/script/get-lib-armnn/customize.py +++ b/script/get-lib-armnn/customize.py @@ -11,7 +11,7 @@ def preprocess(i): version = env['MLC_LIB_ARMNN_VERSION'] if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == 'x86_64': url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-x86_64.tar.gz" - elif env.get('MLC_HOST_PLATFORM_FLAVOR', '') in [ 'arm64', 'aarch64']: + elif env.get('MLC_HOST_PLATFORM_FLAVOR', '') in ['arm64', 'aarch64']: url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-aarch64.tar.gz" env['MLC_LIB_ARMNN_PREBUILT_BINARY_URL'] = url From 6d9525a19fd933b1fdf37009c5ddff59b4bbfff4 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 22:13:23 +0000 Subject: [PATCH 11/13] Fix SW/HW notes --- script/generate-mlperf-inference-submission/customize.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index 7dda1acbb..a10ce68a5 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -160,15 +160,11 @@ def generate_submission(env, state, inp, submission_division): print('* MLPerf inference submitter: {}'.format(submitter)) if env.get('MLC_MLPERF_SUT_SW_NOTES_EXTRA', '') != '': - sw_notes = f"""{ - system_meta_tmp['sw_notes']} { - env['MLC_MLPERF_SUT_SW_NOTES_EXTRA']}""" + sw_notes = f"""{system_meta_tmp.get('sw_notes','')} {env['MLC_MLPERF_SUT_SW_NOTES_EXTRA']}""" system_meta_tmp['sw_notes'] = sw_notes if env.get('MLC_MLPERF_SUT_HW_NOTES_EXTRA', '') != '': - hw_notes = f"""{ - system_meta_tmp['hw_notes']} { - env['MLC_MLPERF_SUT_HW_NOTES_EXTRA']}""" + hw_notes = f"""{system_meta_tmp.get('hw_notes', '')} {env['MLC_MLPERF_SUT_HW_NOTES_EXTRA']}""" system_meta_tmp['hw_notes'] = hw_notes path_submission = os.path.join(path_submission_division, submitter) From cf8c42326a5ca11ec0358d3c9b1bed4bbbee41de Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 22:19:07 +0000 Subject: [PATCH 12/13] Fix commands with mlcr --- ...t-amd-mlperf-inference-implementations.yml | 4 +-- .../test-image-classification-onnx.yml | 2 +- ...intel-mlperf-inference-implementations.yml | 4 +-- .../workflows/test-mlc-script-features.yml | 26 ++++++++-------- .../test-mlperf-inference-abtf-poc.yml | 2 +- ...bert-deepsparse-tf-onnxruntime-pytorch.yml | 6 ++-- .../workflows/test-mlperf-inference-dlrm.yml | 4 +-- .../workflows/test-mlperf-inference-gptj.yml | 4 +-- .../test-mlperf-inference-llama2.yml | 4 +-- .../test-mlperf-inference-mixtral.yml | 4 +-- ...lperf-inference-mlcommons-cpp-resnet50.yml | 6 ++-- .../test-mlperf-inference-resnet50.yml | 6 ++-- .../test-mlperf-inference-retinanet.yml | 6 ++-- .../workflows/test-mlperf-inference-rgat.yml | 4 +-- .../workflows/test-mlperf-inference-rnnt.yml | 2 +- .../test-mlperf-inference-tvm-resnet50.yml | 4 +-- ...adgen-onnx-huggingface-bert-fp32-squad.yml | 2 +- ...vidia-mlperf-inference-implementations.yml | 6 ++-- .../workflows/test-qaic-compute-sdk-build.yml | 4 +-- .github/workflows/test-qaic-software-kit.yml | 4 +-- .github/workflows/test-scc24-sdxl.yaml | 16 +++++----- .../README_aws_dl2q.24xlarge.md | 16 +++++----- script/app-mlperf-inference/meta.yaml | 2 +- .../run-template.sh | 18 +++++------ script/benchmark-program/customize.py | 2 +- script/get-platform-details/README-EXTRA.md | 2 +- script/get-rocm-devices/README.md | 2 +- .../run-all-mlperf-models/run-bert-macos.sh | 10 +++---- script/run-all-mlperf-models/run-bert.sh | 10 +++---- .../run-cpp-implementation.sh | 30 +++++++++---------- .../run-mobilenet-models.sh | 12 ++++---- .../run-all-mlperf-models/run-nvidia-4090.sh | 4 +-- .../run-all-mlperf-models/run-nvidia-a100.sh | 4 +-- script/run-all-mlperf-models/run-nvidia-t4.sh | 4 +-- .../run-all-mlperf-models/run-pruned-bert.sh | 4 +-- .../run-reference-models.sh | 20 ++++++------- .../run-resnet50-macos.sh | 10 +++---- script/run-all-mlperf-models/run-resnet50.sh | 10 +++---- script/run-all-mlperf-models/run-retinanet-sh | 10 +++---- script/run-all-mlperf-models/template.sh | 10 +++---- script/run-docker-container/customize.py | 2 +- .../meta.yaml | 2 +- script/run-terraform/README-about.md | 2 +- 43 files changed, 153 insertions(+), 153 deletions(-) diff --git a/.github/workflows/test-amd-mlperf-inference-implementations.yml b/.github/workflows/test-amd-mlperf-inference-implementations.yml index 4c4b6f749..bc01ad20c 100644 --- a/.github/workflows/test-amd-mlperf-inference-implementations.yml +++ b/.github/workflows/test-amd-mlperf-inference-implementations.yml @@ -22,5 +22,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC pip install --upgrade mlc-scripts mlc pull repo - mlcr --tags=run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet --docker_skip_run_cmd=yes - # mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=dev --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c + mlcr run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet --docker_skip_run_cmd=yes + # mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=dev --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c diff --git a/.github/workflows/test-image-classification-onnx.yml b/.github/workflows/test-image-classification-onnx.yml index 4edbb61b6..121c111ef 100644 --- a/.github/workflows/test-image-classification-onnx.yml +++ b/.github/workflows/test-image-classification-onnx.yml @@ -38,4 +38,4 @@ jobs: mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - name: Test image classification with ONNX run: | - mlcr --tags=python,app,image-classification,onnx --quiet + mlcr python,app,image-classification,onnx --quiet diff --git a/.github/workflows/test-intel-mlperf-inference-implementations.yml b/.github/workflows/test-intel-mlperf-inference-implementations.yml index 0041f9762..9e4d03e26 100644 --- a/.github/workflows/test-intel-mlperf-inference-implementations.yml +++ b/.github/workflows/test-intel-mlperf-inference-implementations.yml @@ -22,5 +22,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC pip install --upgrade mlc-scripts pip install tabulate - mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c + mlcr run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c diff --git a/.github/workflows/test-mlc-script-features.yml b/.github/workflows/test-mlc-script-features.yml index bca961c1b..05d62e2ae 100644 --- a/.github/workflows/test-mlc-script-features.yml +++ b/.github/workflows/test-mlc-script-features.yml @@ -35,12 +35,12 @@ jobs: - name: Test Python venv run: | - mlcr --tags=install,python-venv --name=test --quiet + mlcr install,python-venv --name=test --quiet mlc search cache --tags=get,python,virtual,name-test --quiet - name: Test variations run: | - mlcr --tags=get,dataset,preprocessed,imagenet,_NHWC --quiet + mlcr get,dataset,preprocessed,imagenet,_NHWC --quiet mlc search cache --tags=get,dataset,preprocessed,imagenet,-_NCHW mlc search cache --tags=get,dataset,preprocessed,imagenet,-_NHWC @@ -48,17 +48,17 @@ jobs: continue-on-error: true if: runner.os == 'linux' run: | - mlcr --tags=get,generic-python-lib,_package.scipy --version=1.9.3 --quiet + mlcr get,generic-python-lib,_package.scipy --version=1.9.3 --quiet test $? -eq 0 || exit $? - mlcr --tags=get,generic-python-lib,_package.scipy --version=1.9.2 --quiet + mlcr get,generic-python-lib,_package.scipy --version=1.9.2 --quiet test $? -eq 0 || exit $? # Need to add find cache here - # mlcr --tags=get,generic-python-lib,_package.scipy --version=1.9.3 --quiet --only_execute_from_cache=True + # mlcr get,generic-python-lib,_package.scipy --version=1.9.3 --quiet --only_execute_from_cache=True # test $? -eq 0 || exit 0 - name: Test python install from src run: | - mlcr --tags=python,src,install,_shared --version=3.9.10 --quiet + mlcr python,src,install,_shared --version=3.9.10 --quiet mlc search cache --tags=python,src,install,_shared,version-3.9.10 test_docker: @@ -81,11 +81,11 @@ jobs: - name: Run docker container from dockerhub on linux run: | - mlcr --tags=run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=cm-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=cknowledge --quiet + mlcr run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=cm-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=cknowledge --quiet - name: Run docker container locally on linux run: | - mlcr --tags=run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet + mlcr run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet test_mlperf_retinanet_cpp_venv: runs-on: ubuntu-latest @@ -107,15 +107,15 @@ jobs: - name: Run MLPerf Inference Retinanet with native and virtual Python run: | - mlcr --tags=app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=accuracy --test_query_count=10 --rerun --quiet + mlcr app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=accuracy --test_query_count=10 --rerun --quiet - mlcr --tags=app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=performance --test_query_count=10 --rerun --quiet + mlcr app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=performance --test_query_count=10 --rerun --quiet - mlcr --tags=install,python-venv --version=3.10.8 --name=mlperf --quiet + mlcr install,python-venv --version=3.10.8 --name=mlperf --quiet export MLC_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf" - mlcr --tags=run,mlperf,inference,_submission,_short --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --submitter=MLCommons --implementation=cpp --hw_name=default --model=retinanet --backend=onnxruntime --device=cpu --scenario=Offline --quiet + mlcr run,mlperf,inference,_submission,_short --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --submitter=MLCommons --implementation=cpp --hw_name=default --model=retinanet --backend=onnxruntime --device=cpu --scenario=Offline --quiet # Step for Linux/MacOS - name: Randomly Execute Step (Linux/MacOS) @@ -160,4 +160,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-abtf-poc.yml b/.github/workflows/test-mlperf-inference-abtf-poc.yml index cc2ec9868..fadea97da 100644 --- a/.github/workflows/test-mlperf-inference-abtf-poc.yml +++ b/.github/workflows/test-mlperf-inference-abtf-poc.yml @@ -114,4 +114,4 @@ jobs: - name: Test MLPerf Inference ABTF POC using ${{ matrix.backend }} on ${{ matrix.os }} run: | - mlcr --tags=run-abtf,inference,_poc-demo --test_query_count=2 --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet ${{ matrix.extra-args }} ${{ matrix.docker }} -v + mlcr run-abtf,inference,_poc-demo --test_query_count=2 --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet ${{ matrix.extra-args }} ${{ matrix.docker }} -v diff --git a/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml b/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml index 2a89dbe6f..73f0d4adb 100644 --- a/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml +++ b/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml @@ -43,11 +43,11 @@ jobs: - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 -v --quiet + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 -v --quiet - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os != 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 -v --quiet + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 -v --quiet - name: Randomly Execute Step id: random-check run: | @@ -77,4 +77,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-dlrm.yml b/.github/workflows/test-mlperf-inference-dlrm.yml index 13bf2dbcc..c1a48a4b3 100644 --- a/.github/workflows/test-mlperf-inference-dlrm.yml +++ b/.github/workflows/test-mlperf-inference-dlrm.yml @@ -24,7 +24,7 @@ jobs: source gh_action/bin/activate export MLC_REPOS=$HOME/GH_MLC python3 -m pip install mlperf - mlcr --tags=run-mlperf,inference,_performance-only --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --model=dlrm-v2-99 --implementation=reference --backend=pytorch --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --clean + mlcr run-mlperf,inference,_performance-only --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --model=dlrm-v2-99 --implementation=reference --backend=pytorch --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --clean build_intel: if: github.repository_owner == 'gateoverflow_off' @@ -44,4 +44,4 @@ jobs: export MLC_REPOS=$HOME/GH_MLC python3 -m pip install mlperf mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=dlrm-v2-99 --implementation=intel --batch_size=1 --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=dlrm-v2-99 --implementation=intel --batch_size=1 --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean diff --git a/.github/workflows/test-mlperf-inference-gptj.yml b/.github/workflows/test-mlperf-inference-gptj.yml index d55e515d5..346947611 100644 --- a/.github/workflows/test-mlperf-inference-gptj.yml +++ b/.github/workflows/test-mlperf-inference-gptj.yml @@ -26,6 +26,6 @@ jobs: export MLC_REPOS=$HOME/GH_MLC python3 -m pip install --upgrade mlc-scripts mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --pull_changes=yes --pull_inference_changes=yes --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --get_platform_details=yes --implementation=reference --clean - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --pull_changes=yes --pull_inference_changes=yes --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --get_platform_details=yes --implementation=reference --clean + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml index 6e88b172f..acf896b88 100644 --- a/.github/workflows/test-mlperf-inference-llama2.yml +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -31,5 +31,5 @@ jobs: pip install "huggingface_hub[cli]" git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-mixtral.yml b/.github/workflows/test-mlperf-inference-mixtral.yml index 25c12c62b..e091d2fe7 100644 --- a/.github/workflows/test-mlperf-inference-mixtral.yml +++ b/.github/workflows/test-mlperf-inference-mixtral.yml @@ -28,5 +28,5 @@ jobs: git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1 - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - GO-phoenix" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1 + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - GO-phoenix" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml index f4ed3f079..737132572 100644 --- a/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml @@ -41,11 +41,11 @@ jobs: - name: Test MLPerf Inference MLCommons C++ ResNet50 on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | - mlcr --tags=app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --adr.loadgen.tags=_from-pip --pip_loadgen=yes -v --quiet + mlcr app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --adr.loadgen.tags=_from-pip --pip_loadgen=yes -v --quiet - name: Test MLPerf Inference MLCommons C++ ResNet50 on ${{ matrix.os }} if: matrix.os != 'windows-latest' run: | - mlcr --tags=app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} -v --quiet + mlcr app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} -v --quiet - name: Randomly Execute Step id: random-check run: | @@ -77,4 +77,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=dev --commit_message="Results from MLCommons C++ ResNet50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=dev --commit_message="Results from MLCommons C++ ResNet50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-resnet50.yml b/.github/workflows/test-mlperf-inference-resnet50.yml index 4bfbe06ad..9bc5db424 100644 --- a/.github/workflows/test-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-resnet50.yml @@ -53,11 +53,11 @@ jobs: - name: Test MLPerf Inference ResNet50 (Windows) if: matrix.os == 'windows-latest' run: | - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet - name: Test MLPerf Inference ResNet50 (Linux/macOS) if: matrix.os != 'windows-latest' run: | - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet # Step for Linux/MacOS - name: Randomly Execute Step (Linux/MacOS) if: runner.os != 'Windows' @@ -101,5 +101,5 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-retinanet.yml b/.github/workflows/test-mlperf-inference-retinanet.yml index c1777beae..40e749831 100644 --- a/.github/workflows/test-mlperf-inference-retinanet.yml +++ b/.github/workflows/test-mlperf-inference-retinanet.yml @@ -47,11 +47,11 @@ jobs: - name: Test MLPerf Inference Retinanet using ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }} --model=retinanet --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }} --model=retinanet --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 - name: Test MLPerf Inference Retinanet using ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os != 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=retinanet --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=retinanet --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 # Step for Linux/MacOS - name: Randomly Execute Step (Linux/MacOS) @@ -96,4 +96,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-rgat.yml b/.github/workflows/test-mlperf-inference-rgat.yml index 026c64886..f8b0e6116 100644 --- a/.github/workflows/test-mlperf-inference-rgat.yml +++ b/.github/workflows/test-mlperf-inference-rgat.yml @@ -35,7 +35,7 @@ jobs: mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - name: Test MLPerf Inference R-GAT using ${{ matrix.backend }} on ${{ matrix.os }} run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 - name: Retrieve secrets from Keeper id: ksecrets @@ -55,4 +55,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-rnnt.yml b/.github/workflows/test-mlperf-inference-rnnt.yml index 89ec6e4e2..3dfba6938 100644 --- a/.github/workflows/test-mlperf-inference-rnnt.yml +++ b/.github/workflows/test-mlperf-inference-rnnt.yml @@ -37,4 +37,4 @@ jobs: mlcr --quiet --tags=get,sys-utils-cm - name: Test MLPerf Inference RNNT run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_performance-only --submitter="cTuning" --model=rnnt --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=5 -v --quiet + mlcr run,mlperf,inference,generate-run-cmds,_performance-only --submitter="cTuning" --model=rnnt --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=5 -v --quiet diff --git a/.github/workflows/test-mlperf-inference-tvm-resnet50.yml b/.github/workflows/test-mlperf-inference-tvm-resnet50.yml index 616f67db2..b2ae35d04 100644 --- a/.github/workflows/test-mlperf-inference-tvm-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-tvm-resnet50.yml @@ -35,7 +35,7 @@ jobs: mlcr --quiet --tags=get,sys-utils-cm - name: Test MLC Tutorial TVM run: | - mlcr --tags=run-mlperf,inference,_submission,_short --adr.python.name=mlperf --adr.python.version_min=3.8 --submitter=MLCommons --implementation=python --hw_name=gh_ubuntu-latest --model=resnet50 --backend=tvm-onnx --device=cpu --scenario=Offline --mode=accuracy --test_query_count=5 --clean --quiet ${{ matrix.extra-options }} + mlcr run-mlperf,inference,_submission,_short --adr.python.name=mlperf --adr.python.version_min=3.8 --submitter=MLCommons --implementation=python --hw_name=gh_ubuntu-latest --model=resnet50 --backend=tvm-onnx --device=cpu --scenario=Offline --mode=accuracy --test_query_count=5 --clean --quiet ${{ matrix.extra-options }} - name: Randomly Execute Step id: random-check run: | @@ -67,4 +67,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml b/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml index 7c9a68d85..cafb93a12 100644 --- a/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml +++ b/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml @@ -32,4 +32,4 @@ jobs: mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - name: Test MLPerf loadgen with HuggingFace bert onnx fp32 squad model run: | - mlcr --tags=python,app,loadgen-generic,_onnxruntime,_custom,_huggingface,_model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1 --quiet + mlcr python,app,loadgen-generic,_onnxruntime,_custom,_huggingface,_model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1 --quiet diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index f6eb160a5..9b75db22a 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -58,7 +58,7 @@ jobs: pip install --upgrade mlcflow mlc pull repo mlcommons@mlperf-automations --branch=dev - mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="GATEOverflow" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string $submission_preprocessor_args --quiet - #mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/GATEOverflow/mlperf_inference_submissions_v5.0 --repo_branch=main --commit_message="Results from GH actions on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name + mlcr run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="GATEOverflow" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string $submission_preprocessor_args --quiet + #mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/GATEOverflow/mlperf_inference_submissions_v5.0 --repo_branch=main --commit_message="Results from GH actions on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name diff --git a/.github/workflows/test-qaic-compute-sdk-build.yml b/.github/workflows/test-qaic-compute-sdk-build.yml index 0dff27cd0..6b3e91356 100644 --- a/.github/workflows/test-qaic-compute-sdk-build.yml +++ b/.github/workflows/test-qaic-compute-sdk-build.yml @@ -27,8 +27,8 @@ jobs: - name: Install dependencies run: | pip install mlc-scripts - mlcr --tags=get,sys-utils-cm --quiet + mlcr get,sys-utils-cm --quiet - name: Test QAIC Compute SDK for compilation run: | - mlcr --tags=get,qaic,compute,sdk --adr.llvm.version=${{ matrix.llvm-version }} --quiet + mlcr get,qaic,compute,sdk --adr.llvm.version=${{ matrix.llvm-version }} --quiet diff --git a/.github/workflows/test-qaic-software-kit.yml b/.github/workflows/test-qaic-software-kit.yml index 5cbfc0add..64bba66b4 100644 --- a/.github/workflows/test-qaic-software-kit.yml +++ b/.github/workflows/test-qaic-software-kit.yml @@ -32,8 +32,8 @@ jobs: - name: Pull MLOps repository run: | pip install mlc-scripts - mlcr --tags=get,sys-utils-mlc --quiet + mlcr get,sys-utils-mlc --quiet - name: Test Software Kit for compilation on Ubuntu 20.04 run: | - mlcr --tags=get,qaic,software,kit --adr.compiler.tags=${{ matrix.compiler }} --adr.compiler.version=${{ matrix.llvm-version }} --quiet + mlcr get,qaic,software,kit --adr.compiler.tags=${{ matrix.compiler }} --adr.compiler.version=${{ matrix.llvm-version }} --quiet diff --git a/.github/workflows/test-scc24-sdxl.yaml b/.github/workflows/test-scc24-sdxl.yaml index e64ddfbe7..ffe814ba4 100644 --- a/.github/workflows/test-scc24-sdxl.yaml +++ b/.github/workflows/test-scc24-sdxl.yaml @@ -28,10 +28,10 @@ jobs: pip install --upgrade mlcflow pip install tabulate mlc pull repo - mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results - # mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions + mlcr run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results + # mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions build_nvidia: if: github.repository_owner == 'gateoverflow' @@ -56,7 +56,7 @@ jobs: pip install --upgrade mlcflow pip install tabulate mlc pull repo - mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean - mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results - # mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions + mlcr run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean + mlcr run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results + # mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions diff --git a/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md b/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md index e27a6f3ec..cd19536a7 100644 --- a/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md +++ b/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md @@ -13,14 +13,14 @@ image from the Community AMIs is the recommended OS image as it comes with the Q sudo yum install -y python38-devel git python3.8 -m pip install cmind cm pull repo mlcommons@cm4mlops -mlcr --tags=get,python --version_min=3.8.1 +mlcr get,python --version_min=3.8.1 ``` ## Bert-99 ### Quick performance run ``` -mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic \ +mlcr generate-run-cmds,inference,_performance-only --device=qaic \ --backend=glow --scenario=Offline --implementation=kilt --model=bert-99 \ --test_query_count=40000 --precision=uint8 --rerun --quiet \ --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ @@ -29,7 +29,7 @@ mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic \ ### Full valid run ``` -mlcr --tags=generate-run-cmds,inference,_submission --device=qaic \ +mlcr generate-run-cmds,inference,_submission --device=qaic \ --backend=glow --scenario=Offline --implementation=kilt --model=bert-99 --precision=uint8 \ --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ --rerun --quiet --execution-mode=valid @@ -45,13 +45,13 @@ The expected accuracy is ~90 (Optional) If you have Imagenet 2012 validation dataset downloaded, you can register it in CM as follows. This step is optional and can avoid the download from the public URL which can be slow at times. ``` -mlcr --tags=get,dataset,imagenet,original,_full --env.IMAGENET_PATH=`pwd`/imagenet-2012-val +mlcr get,dataset,imagenet,original,_full --env.IMAGENET_PATH=`pwd`/imagenet-2012-val ``` ### Quick performance run ``` -mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=resnet50 \ --test_query_count=400000 --precision=uint8 --rerun --adr.compiler.tags=gcc \ --adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=test --quiet @@ -60,7 +60,7 @@ mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backen ### Full valid run ``` -mlcr --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_submission --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=resnet50 \ --precision=uint8 --rerun --adr.compiler.tags=gcc \ --adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=valid --quiet @@ -76,7 +76,7 @@ Expected accuracy is 75.936% ### Quick performance run ``` -mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=retinanet --test_query_count=40000 --precision=uint8 \ --rerun --quiet --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.1,_dl2q.24xlarge,_bs.1 \ --adr.compiler.tags=gcc --execution-mode=test @@ -85,7 +85,7 @@ mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backen ### Full valid run ``` -mlcr --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_submission --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=retinanet \ --precision=uint8 --rerun --adr.compiler.tags=gcc --adr.dataset-preprocessed.tags=_custom-annotations \ --adr.mlperf-inference-implementation.tags=_bs.1,_dl2q.24xlarge --execution-mode=valid --quiet diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index f041b8f66..7049671d7 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -1937,7 +1937,7 @@ docker: MLC_MLPERF_INFERENCE_SUBMISSION_DIR: [ on ] pre_run_cmds: - #- mlc pull repo && mlcr --tags=get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update + #- mlc pull repo && mlcr get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update - mlc pull repo mounts: - "${{ MLC_DATASET_IMAGENET_PATH }}:${{ MLC_DATASET_IMAGENET_PATH }}" diff --git a/script/benchmark-any-mlperf-inference-implementation/run-template.sh b/script/benchmark-any-mlperf-inference-implementation/run-template.sh index 0224c34dd..8e0cb42c0 100644 --- a/script/benchmark-any-mlperf-inference-implementation/run-template.sh +++ b/script/benchmark-any-mlperf-inference-implementation/run-template.sh @@ -43,47 +43,47 @@ function run_test() { results_dir=$HOME/results_dir #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun ${EXTRA_ARGS}' -find_ss_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_ss_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=SingleStream --quiet --test_query_count=$test_query_count $rerun ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme --scenario=$scenario \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -tflite_accuracy_cmd='mlcr --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +tflite_accuracy_cmd='mlcr run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} \ ${EXTRA_ARGS}' -tflite_performance_cmd='mlcr --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ +tflite_performance_cmd='mlcr run,mobilenet-models,_tflite,_performance-only$extra_tags \ ${POWER_STRING} \ --adr.compiler.tags=gcc \ ${extra_option} \ ${EXTRA_ARGS}' -tflite_readme_cmd='mlcr --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ +tflite_readme_cmd='mlcr run,mobilenet-models,_tflite,_populate-readme$extra_tags \ ${POWER_STRING} \ --adr.compiler.tags=gcc \ ${extra_option} \ diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py index 35cf623ec..25b7a17e3 100644 --- a/script/benchmark-program/customize.py +++ b/script/benchmark-program/customize.py @@ -84,7 +84,7 @@ def preprocess(i): pre_run_cmd += ' && ' # running the script as a process in background - pre_run_cmd = pre_run_cmd + 'mlcr --tags=runtime,system,utilisation' + \ + pre_run_cmd = pre_run_cmd + 'mlcr runtime,system,utilisation' + \ env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] + ' --quiet & ' # obtain the command if of the background process pre_run_cmd += r" cmd_pid=\$! && echo CMD_PID=\$cmd_pid" diff --git a/script/get-platform-details/README-EXTRA.md b/script/get-platform-details/README-EXTRA.md index 45ac261f2..c5b6ebfc2 100644 --- a/script/get-platform-details/README-EXTRA.md +++ b/script/get-platform-details/README-EXTRA.md @@ -1,7 +1,7 @@ Please execute the following CM command to obtain the platform details of the System Under Test (SUT): ``` -mlcr --tags=get,platform-details --platform_details_dir= +mlcr get,platform-details --platform_details_dir= ``` diff --git a/script/get-rocm-devices/README.md b/script/get-rocm-devices/README.md index 294a147bb..722b01028 100644 --- a/script/get-rocm-devices/README.md +++ b/script/get-rocm-devices/README.md @@ -1,4 +1,4 @@ Run this script ``` -mlcr --tags=get,rocm-devices +mlcr get,rocm-devices ``` diff --git a/script/run-all-mlperf-models/run-bert-macos.sh b/script/run-all-mlperf-models/run-bert-macos.sh index e0275153c..edf27333a 100644 --- a/script/run-all-mlperf-models/run-bert-macos.sh +++ b/script/run-all-mlperf-models/run-bert-macos.sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' power="" #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-bert.sh b/script/run-all-mlperf-models/run-bert.sh index 530c55e48..23d169dd8 100644 --- a/script/run-all-mlperf-models/run-bert.sh +++ b/script/run-all-mlperf-models/run-bert.sh @@ -38,26 +38,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-cpp-implementation.sh b/script/run-all-mlperf-models/run-cpp-implementation.sh index 7159cbcd8..a4db90e68 100644 --- a/script/run-all-mlperf-models/run-cpp-implementation.sh +++ b/script/run-all-mlperf-models/run-cpp-implementation.sh @@ -29,21 +29,21 @@ division="closed" POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 " POWER="" -run "mlcr --tags=set,system,performance,mode" +run "mlcr set,system,performance,mode" #cpp -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --adr.compiler.tags=gcc \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=2000 " -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --adr.compiler.tags=gcc \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ @@ -53,7 +53,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ @@ -63,7 +63,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ @@ -73,7 +73,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ @@ -85,20 +85,20 @@ ${POWER} \ # GPU -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --adr.compiler.tags=gcc \ --test_query_count=20000 \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --adr.compiler.tags=gcc \ --test_query_count=2000 \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --category=edge --division=$division --quiet \ @@ -108,7 +108,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ @@ -119,7 +119,7 @@ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=SingleStream \ @@ -130,7 +130,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ @@ -141,7 +141,7 @@ ${POWER} \ --results_dir=$HOME/results_dir" #multistream -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=MultiStream \ @@ -152,7 +152,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=MultiStream \ --category=edge --division=$division --quiet \ diff --git a/script/run-all-mlperf-models/run-mobilenet-models.sh b/script/run-all-mlperf-models/run-mobilenet-models.sh index 8fa760c8e..4190bf82c 100644 --- a/script/run-all-mlperf-models/run-mobilenet-models.sh +++ b/script/run-all-mlperf-models/run-mobilenet-models.sh @@ -32,35 +32,35 @@ extra_tags="" #Add your run commands here... # run "$MLC_RUN_CMD" -run "mlcr --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_performance-only$extra_tags \ ${POWER} \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_populate-readme$extra_tags \ ${POWER} \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ ${POWER} \ ${extra_option} \ --adr.compiler.tags=gcc \ --results_dir=$HOME/results_dir" -run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_populate-readme$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_populate-readme$extra_tags \ ${POWER} \ ${extra_option} \ --adr.compiler.tags=gcc \ diff --git a/script/run-all-mlperf-models/run-nvidia-4090.sh b/script/run-all-mlperf-models/run-nvidia-4090.sh index bc4eb5ae5..1fc37c864 100644 --- a/script/run-all-mlperf-models/run-nvidia-4090.sh +++ b/script/run-all-mlperf-models/run-nvidia-4090.sh @@ -38,7 +38,7 @@ power="" power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -49,7 +49,7 @@ find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance #run "3d-unet" "30" "${find_performance_cmd}" -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --execution-mode=valid --implementation=$implementation --device=$device --backend=$backend --results_dir=$HOME/results_dir \ --category=$category --division=$division --skip_submission_generation=yes --quiet $power' diff --git a/script/run-all-mlperf-models/run-nvidia-a100.sh b/script/run-all-mlperf-models/run-nvidia-a100.sh index 70069b9a7..a3489e7d2 100644 --- a/script/run-all-mlperf-models/run-nvidia-a100.sh +++ b/script/run-all-mlperf-models/run-nvidia-a100.sh @@ -37,7 +37,7 @@ connection_type="sxm" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -48,7 +48,7 @@ run "bert-99" "20000" "${find_performance_cmd}" run "3d-unet-99.9" "30" "${find_performance_cmd}" -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --adr.nvidia-harensss.tags=_${connection_type} $power' diff --git a/script/run-all-mlperf-models/run-nvidia-t4.sh b/script/run-all-mlperf-models/run-nvidia-t4.sh index facdb0a60..adde34344 100644 --- a/script/run-all-mlperf-models/run-nvidia-t4.sh +++ b/script/run-all-mlperf-models/run-nvidia-t4.sh @@ -35,7 +35,7 @@ category="edge,datacenter" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -47,7 +47,7 @@ run "bert-99.9" "5000" "${find_performance_cmd}" run "3d-unet" "10" "${find_performance_cmd}" -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet' diff --git a/script/run-all-mlperf-models/run-pruned-bert.sh b/script/run-all-mlperf-models/run-pruned-bert.sh index b7bc2beae..f5ed64042 100644 --- a/script/run-all-mlperf-models/run-pruned-bert.sh +++ b/script/run-all-mlperf-models/run-pruned-bert.sh @@ -45,7 +45,7 @@ scenario="Offline" if [[ $scenario == "Offline" ]]; then for stub in ${zoo_stub_list[@]}; do -cmd="mlcr --tags=run,mlperf,inference,generate-run-cmds,_find-performance \ +cmd="mlcr run,mlperf,inference,generate-run-cmds,_find-performance \ --adr.python.version_min=3.8 \ --implementation=reference \ --model=bert-99 \ @@ -64,7 +64,7 @@ done fi for stub in ${zoo_stub_list[@]}; do - cmd="mlcr --tags=run,mlperf,inference,generate-run-cmds \ + cmd="mlcr run,mlperf,inference,generate-run-cmds \ --adr.python.version_min=3.8 \ --adr.compiler.tags=gcc \ --implementation=reference \ diff --git a/script/run-all-mlperf-models/run-reference-models.sh b/script/run-all-mlperf-models/run-reference-models.sh index 84d7526fd..01766158a 100644 --- a/script/run-all-mlperf-models/run-reference-models.sh +++ b/script/run-all-mlperf-models/run-reference-models.sh @@ -25,43 +25,43 @@ function run() { division="closed" #Add your run commands here... # run "$MLC_RUN_CMD" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=100" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=rnnt --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=rnnt --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=retinanet --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" diff --git a/script/run-all-mlperf-models/run-resnet50-macos.sh b/script/run-all-mlperf-models/run-resnet50-macos.sh index ea2f91346..81b8d7124 100644 --- a/script/run-all-mlperf-models/run-resnet50-macos.sh +++ b/script/run-all-mlperf-models/run-resnet50-macos.sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' power="" #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-resnet50.sh b/script/run-all-mlperf-models/run-resnet50.sh index d9945c745..2ef2c3cff 100644 --- a/script/run-all-mlperf-models/run-resnet50.sh +++ b/script/run-all-mlperf-models/run-resnet50.sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-retinanet-sh b/script/run-all-mlperf-models/run-retinanet-sh index c5ede6296..1009fea53 100644 --- a/script/run-all-mlperf-models/run-retinanet-sh +++ b/script/run-all-mlperf-models/run-retinanet-sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/template.sh b/script/run-all-mlperf-models/template.sh index ff43cf2fe..4fbd47c73 100644 --- a/script/run-all-mlperf-models/template.sh +++ b/script/run-all-mlperf-models/template.sh @@ -40,26 +40,26 @@ function run_test() { power=${POWER_STRING} #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index a8b92139b..a504ffd07 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -23,7 +23,7 @@ def preprocess(i): env['MLC_DOCKER_RUN_SCRIPT_TAGS'] = "run,docker,container" MLC_RUN_CMD = "mlc version" else: - MLC_RUN_CMD = "mlcr --tags=" + \ + MLC_RUN_CMD = "mlcr " + \ env['MLC_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' r = mlc.access({'action': 'search', diff --git a/script/run-mlperf-inference-mobilenet-models/meta.yaml b/script/run-mlperf-inference-mobilenet-models/meta.yaml index f78f8b6e7..fd4cf3468 100644 --- a/script/run-mlperf-inference-mobilenet-models/meta.yaml +++ b/script/run-mlperf-inference-mobilenet-models/meta.yaml @@ -16,7 +16,7 @@ docker: results_dir: RESULTS_DIR submission_dir: SUBMISSION_DIR docker_run_final_cmds: - - mlcr --tags=run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True + - mlcr run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True --adr.compiler.tags=gcc fake_run_deps: false mounts: diff --git a/script/run-terraform/README-about.md b/script/run-terraform/README-about.md index 674ebee42..d0a7ba01f 100644 --- a/script/run-terraform/README-about.md +++ b/script/run-terraform/README-about.md @@ -7,6 +7,6 @@ gcloud auth application-default login The above two commands will install google-cloud-cli and authorizes the user to access it. Once done, you can start creating gcp instance using CM commands like below. To destroy an instance just repeat the same command with `--destroy` option. ``` -mlcr --tags=run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit +mlcr run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit ``` Here, `mlperf-inference-tests` is the name of the google project as created in [Google cloud console](https://console.cloud.google.com/apis/dashboard) From e02e111b314c3fad4d69c24e8497b2dd7796b3af Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 23:12:12 +0000 Subject: [PATCH 13/13] Fix mobilenet run-all --- .../run-mobilenet-models.sh | 22 ++++--------------- 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/script/run-all-mlperf-models/run-mobilenet-models.sh b/script/run-all-mlperf-models/run-mobilenet-models.sh index 4190bf82c..1844f2adf 100644 --- a/script/run-all-mlperf-models/run-mobilenet-models.sh +++ b/script/run-all-mlperf-models/run-mobilenet-models.sh @@ -34,34 +34,20 @@ extra_tags="" # run "$MLC_RUN_CMD" run "mlcr run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" +${extra_option} " run "mlcr run,mobilenet-models,_tflite,_performance-only$extra_tags \ ${POWER} \ --adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" +${extra_option}" -run "mlcr run,mobilenet-models,_tflite,_populate-readme$extra_tags \ -${POWER} \ ---adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" +${extra_option} " run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ ${POWER} \ ${extra_option} \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/results_dir" +--adr.compiler.tags=gcc" -run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_populate-readme$extra_tags \ -${POWER} \ -${extra_option} \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/results_dir"