From 22f35db33ed2ba8bf76a0b22c6307da720e9fec5 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 23 Jan 2025 20:25:28 +0530 Subject: [PATCH 1/7] Better naming for github tests --- .github/workflows/test-image-classification-onnx.yml | 2 +- .github/workflows/test-mlperf-inference-resnet50.yml | 2 +- .github/workflows/test-mlperf-inference-retinanet.yml | 2 +- script/get-sys-utils-min/meta.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test-image-classification-onnx.yml b/.github/workflows/test-image-classification-onnx.yml index d0cc00ee4..36d29839f 100644 --- a/.github/workflows/test-image-classification-onnx.yml +++ b/.github/workflows/test-image-classification-onnx.yml @@ -9,7 +9,7 @@ on: - '!**.md' jobs: - build: + mlc-run: runs-on: ${{ matrix.os }} strategy: fail-fast: false diff --git a/.github/workflows/test-mlperf-inference-resnet50.yml b/.github/workflows/test-mlperf-inference-resnet50.yml index 5ee480e21..f32a4b1ee 100644 --- a/.github/workflows/test-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-resnet50.yml @@ -10,7 +10,7 @@ on: - '**' - '!**.md' jobs: - mlperf-inference-r50: + mlc-run-with-results-upload: runs-on: ${{ matrix.os }} env: MLC_INDEX: "on" diff --git a/.github/workflows/test-mlperf-inference-retinanet.yml b/.github/workflows/test-mlperf-inference-retinanet.yml index d101ea14e..65ab5b75c 100644 --- a/.github/workflows/test-mlperf-inference-retinanet.yml +++ b/.github/workflows/test-mlperf-inference-retinanet.yml @@ -11,7 +11,7 @@ on: - '!**.md' jobs: - build: + mlc-run: runs-on: ${{ matrix.os }} strategy: fail-fast: false diff --git a/script/get-sys-utils-min/meta.yaml b/script/get-sys-utils-min/meta.yaml index 7db6d866e..ca092b60d 100644 --- a/script/get-sys-utils-min/meta.yaml +++ b/script/get-sys-utils-min/meta.yaml @@ -22,7 +22,7 @@ deps: env: MLC_CLEAN_DIRS: bin MLC_WINDOWS_SYS_UTILS_MIN_INSTALL: yes - MLC_PACKAGE_WIN_URL: https://zenodo.org/records/13868077/files/mlc-artifact-os-windows-32.zip?download=1 + MLC_PACKAGE_WIN_URL: https://zenodo.org/records/13868077/files/cm-artifact-os-windows-32.zip?download=1 MLC_SUDO: sudo new_env_keys: From c12dc01d23ab8c2cc0c965ff7ed40e0ce1ee2058 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 23 Jan 2025 20:46:17 +0530 Subject: [PATCH 2/7] README cleanups --- .github/scripts/process_individual_tests.py | 4 +- ...t-amd-mlperf-inference-implementations.yml | 4 +- ...intel-mlperf-inference-implementations.yml | 4 +- .../workflows/test-mlc-script-features.yml | 4 +- .../test-mlperf-inference-abtf-poc.yml | 10 +- .../workflows/test-mlperf-inference-dlrm.yml | 11 +- .../workflows/test-mlperf-inference-gptj.yml | 4 +- .../test-mlperf-inference-llama2.yml | 4 +- .../test-mlperf-inference-mixtral.yml | 4 +- .../test-mlperf-inference-retinanet.yml | 2 +- .../workflows/test-mlperf-inference-rnnt.yml | 4 +- .../workflows/test-mlperf-inference-sdxl.yaml | 4 +- .../workflows/test-mlperf-inference-tvm.yml | 4 +- ...vidia-mlperf-inference-implementations.yml | 4 +- .../workflows/test-qaic-compute-sdk-build.yml | 4 +- .github/workflows/test-qaic-software-kit.yml | 6 +- .github/workflows/test-scc24-sdxl.yaml | 8 +- automation/cache/module_misc.py | 38 +- automation/script/meta.json | 1 - automation/script/module.py | 42 +- script/activate-python-venv/README-extra.md | 7 - .../add-custom-nvidia-system/README-extra.md | 2 - .../README-extra.md | 17 - .../customize.py | 2 +- .../meta.yaml | 6 +- .../src/onnx_classify.py | 16 +- .../tests/README.md | 14 - .../README-extra.md | 3 - .../README-extra.md | 16 - .../README-extra.md | 16 - .../README-extra.md | 32 -- .../README-extra.md | 289 ------------ .../src/backend_pytorch.py | 6 +- .../README-extra.md | 1 - script/app-mlperf-automotive/customize.py | 2 +- script/app-mlperf-automotive/meta.yaml | 4 +- .../README-extra.md | 83 ---- .../tests/win.bat | 10 +- .../README-extra.md | 235 ---------- .../README_aws_dl2q.24xlarge.md | 16 +- script/app-mlperf-inference/README-extra.md | 131 ------ script/app-mlperf-inference/customize.py | 18 +- script/app-mlperf-inference/meta.yaml | 6 +- .../README-extra.md | 30 -- .../app-stable-diffusion-onnx-py/process.py | 6 +- .../run-template.sh | 18 +- script/benchmark-program/README-extra.md | 3 - script/benchmark-program/customize.py | 2 +- script/build-docker-image/README-extra.md | 16 - script/build-docker-image/meta.yaml | 2 +- script/build-dockerfile/customize.py | 18 +- script/build-dockerfile/meta.yaml | 10 +- .../README-extra.md | 2 - .../meta.yaml | 2 +- script/compile-program/README-extra.md | 3 - script/create-patch/README-extra.md | 5 - script/destroy-terraform/README-extra.md | 1 - script/detect-cpu/README-extra.md | 17 - script/download-and-extract/README-extra.md | 109 ----- .../tests/download-and-extract-file.bat | 2 +- .../tests/download-and-extract-file2.bat | 2 +- script/download-file/README-extra.md | 98 ---- script/download-file/tests/download-file.bat | 2 +- script/download-file/tests/download-file2.bat | 2 +- script/extract-file/README-extra.md | 115 ----- script/fail/README-extra.md | 1 - script/flash-tinyml-binary/README-extra.md | 16 - .../README-extra.md | 12 - .../meta.yaml | 4 +- .../README-extra.md | 55 --- .../README-extra.md | 3 - script/get-android-sdk/README-extra.md | 3 - script/get-aocl/README-extra.md | 0 script/get-aria2/README-extra.md | 9 - script/get-aws-cli/README-extra.md | 9 - script/get-bazel/README-extra.md | 9 - script/get-blis/README-extra.md | 0 script/get-cl/README-extra.md | 7 - script/get-cuda-devices/customize.py | 6 +- script/get-cuda-devices/meta.yaml | 10 +- script/get-cuda/README-extra.md | 44 -- script/get-cudnn/README-extra.md | 3 - script/get-cudnn/customize.py | 14 +- script/get-dataset-coco/README-extra.md | 95 ---- .../README-extra.md | 62 --- .../customize.py | 22 +- .../get-dataset-cognata-mlcommons/meta.yaml | 2 +- script/get-dataset-criteo/README-extra.md | 9 - .../get-dataset-imagenet-train/customize.py | 2 +- .../get-dataset-imagenet-val/README-extra.md | 28 -- script/get-dataset-imagenet-val/customize.py | 2 +- .../get-dataset-librispeech/README-extra.md | 26 -- script/get-dataset-openimages/README-extra.md | 2 - script/get-dataset-squad/README-extra.md | 20 - script/get-dlrm/README-extra.md | 15 - script/get-gcc/README-extra.md | 15 - script/get-generic-python-lib/README-extra.md | 6 - script/get-generic-sys-util/README-extra.md | 425 ------------------ script/get-git-repo/README-extra.md | 20 - script/get-git-repo/customize.py | 2 +- script/get-go/README-extra.md | 10 - script/get-ipol-src/README-extra.md | 1 - script/get-java/README-extra.md | 6 - script/get-javac/README-extra.md | 6 - script/get-llvm/README-extra.md | 96 ---- script/get-microtvm/README-extra.md | 5 - .../README-extra.md | 5 - .../README-extra.md | 21 - script/get-ml-model-mobilenet/README-extra.md | 15 - script/get-ml-model-resnet50/README-extra.md | 15 - script/get-ml-model-retinanet/README-extra.md | 16 - .../README-extra.md | 26 -- .../tests/download-and-install.bat | 2 +- .../README-extra.md | 9 - .../README-extra.md | 1 - .../README-extra.md | 18 - .../get-mlperf-inference-src/README-extra.md | 29 -- .../README-extra.md | 6 - .../customize.py | 8 +- script/get-mlperf-logging/README-extra.md | 16 - .../get-mlperf-training-src/README-extra.md | 27 -- script/get-nvidia-mitten/README-extra.md | 1 - script/get-openssl/README-extra.md | 8 - script/get-platform-details/README-EXTRA.md | 2 +- .../README-extra.md | 16 - .../README-extra.md | 26 -- .../README-extra.md | 28 -- script/get-python3/README-extra.md | 70 --- script/get-rocm-devices/README.md | 2 +- script/get-rocm-devices/customize.py | 6 +- script/get-rocm-devices/meta.yaml | 4 +- script/get-spec-ptd/README-extra.md | 16 - script/get-sys-utils-cm/run-arch.sh | 2 +- script/get-sys-utils-cm/run-debian.sh | 2 +- script/get-sys-utils-cm/run-macos.sh | 2 +- script/get-sys-utils-cm/run-rhel.sh | 2 +- script/get-sys-utils-cm/run-sles.sh | 2 +- script/get-sys-utils-cm/run-ubuntu.sh | 2 +- script/get-tensorrt/README-extra.md | 11 - script/get-tensorrt/customize.py | 2 +- script/get-terraform/README-extra.md | 9 - script/get-tvm-model/README-extra.md | 21 - script/get-tvm/README-extra.md | 5 - script/get-zephyr-sdk/README-extra.md | 19 - script/install-cuda-prebuilt/README-extra.md | 4 - script/install-llvm-prebuilt/README-extra.md | 99 ---- .../README-extra.md | 2 - script/plug-prebuilt-cudnn-to-cuda/run.sh | 4 +- .../plug-prebuilt-cusparselt-to-cuda/run.sh | 4 +- script/prune-bert-models/README-extra.md | 1 - script/publish-results-to-dashboard/code.py | 2 +- script/remote-run-commands/README-extra.md | 0 .../README-extra.md | 13 - .../run-all-mlperf-models/run-bert-macos.sh | 10 +- script/run-all-mlperf-models/run-bert.sh | 10 +- .../run-cpp-implementation.sh | 30 +- .../run-mobilenet-models.sh | 12 +- .../run-all-mlperf-models/run-nvidia-4090.sh | 4 +- .../run-all-mlperf-models/run-nvidia-a100.sh | 4 +- script/run-all-mlperf-models/run-nvidia-t4.sh | 4 +- .../run-all-mlperf-models/run-pruned-bert.sh | 4 +- .../run-reference-models.sh | 20 +- .../run-resnet50-macos.sh | 10 +- script/run-all-mlperf-models/run-resnet50.sh | 10 +- script/run-all-mlperf-models/run-retinanet-sh | 10 +- script/run-all-mlperf-models/template.sh | 10 +- script/run-docker-container/README-extra.md | 15 - script/run-docker-container/meta.yaml | 2 +- .../run-mlperf-inference-app/README-extra.md | 21 - .../run-mlperf-inference-app/run_mobilenet.py | 2 +- .../meta.yaml | 2 +- .../README-extra.md | 10 - .../run-mlperf-power-client/README-extra.md | 15 - .../run-mlperf-power-server/README-extra.md | 17 - script/run-terraform/README-about.md | 2 +- script/run-terraform/README-extra.md | 1 - .../customize.py | 2 +- script/set-venv/README-extra.md | 6 - script/tar-my-folder/README-extra.md | 12 - .../README-extra.md | 7 - .../README-extra.md | 17 - 181 files changed, 288 insertions(+), 3227 deletions(-) delete mode 100644 script/activate-python-venv/README-extra.md delete mode 100644 script/add-custom-nvidia-system/README-extra.md delete mode 100644 script/app-image-classification-onnx-py/README-extra.md delete mode 100644 script/app-image-classification-onnx-py/tests/README.md delete mode 100644 script/app-image-classification-tf-onnx-cpp/README-extra.md delete mode 100644 script/app-image-classification-torch-py/README-extra.md delete mode 100644 script/app-image-classification-tvm-onnx-py/README-extra.md delete mode 100644 script/app-image-corner-detection/README-extra.md delete mode 100644 script/app-loadgen-generic-python/README-extra.md delete mode 100644 script/app-mlperf-automotive-mlcommons-python/README-extra.md delete mode 100644 script/app-mlperf-inference-mlcommons-cpp/README-extra.md delete mode 100644 script/app-mlperf-inference-mlcommons-python/README-extra.md delete mode 100644 script/app-mlperf-inference/README-extra.md delete mode 100644 script/app-stable-diffusion-onnx-py/README-extra.md delete mode 100644 script/benchmark-program/README-extra.md delete mode 100644 script/build-docker-image/README-extra.md delete mode 100644 script/build-mlperf-inference-server-nvidia/README-extra.md delete mode 100644 script/compile-program/README-extra.md delete mode 100644 script/create-patch/README-extra.md delete mode 100644 script/destroy-terraform/README-extra.md delete mode 100644 script/detect-cpu/README-extra.md delete mode 100644 script/download-and-extract/README-extra.md delete mode 100644 script/download-file/README-extra.md delete mode 100644 script/extract-file/README-extra.md delete mode 100644 script/fail/README-extra.md delete mode 100644 script/flash-tinyml-binary/README-extra.md delete mode 100644 script/generate-mlperf-inference-submission/README-extra.md delete mode 100644 script/generate-mlperf-tiny-report/README-extra.md delete mode 100644 script/generate-mlperf-tiny-submission/README-extra.md delete mode 100644 script/get-android-sdk/README-extra.md delete mode 100644 script/get-aocl/README-extra.md delete mode 100644 script/get-aria2/README-extra.md delete mode 100644 script/get-aws-cli/README-extra.md delete mode 100644 script/get-bazel/README-extra.md delete mode 100644 script/get-blis/README-extra.md delete mode 100644 script/get-cl/README-extra.md delete mode 100644 script/get-cuda/README-extra.md delete mode 100644 script/get-cudnn/README-extra.md delete mode 100644 script/get-dataset-coco/README-extra.md delete mode 100644 script/get-dataset-cognata-mlcommons/README-extra.md delete mode 100644 script/get-dataset-criteo/README-extra.md delete mode 100644 script/get-dataset-imagenet-val/README-extra.md delete mode 100644 script/get-dataset-librispeech/README-extra.md delete mode 100644 script/get-dataset-openimages/README-extra.md delete mode 100644 script/get-dataset-squad/README-extra.md delete mode 100644 script/get-dlrm/README-extra.md delete mode 100644 script/get-gcc/README-extra.md delete mode 100644 script/get-generic-python-lib/README-extra.md delete mode 100644 script/get-generic-sys-util/README-extra.md delete mode 100644 script/get-git-repo/README-extra.md delete mode 100644 script/get-go/README-extra.md delete mode 100644 script/get-ipol-src/README-extra.md delete mode 100644 script/get-java/README-extra.md delete mode 100644 script/get-javac/README-extra.md delete mode 100644 script/get-llvm/README-extra.md delete mode 100644 script/get-microtvm/README-extra.md delete mode 100644 script/get-ml-model-abtf-ssd-pytorch/README-extra.md delete mode 100644 script/get-ml-model-huggingface-zoo/README-extra.md delete mode 100644 script/get-ml-model-mobilenet/README-extra.md delete mode 100644 script/get-ml-model-resnet50/README-extra.md delete mode 100644 script/get-ml-model-retinanet/README-extra.md delete mode 100644 script/get-mlperf-inference-loadgen/README-extra.md delete mode 100644 script/get-mlperf-inference-nvidia-common-code/README-extra.md delete mode 100644 script/get-mlperf-inference-nvidia-scratch-space/README-extra.md delete mode 100644 script/get-mlperf-inference-results/README-extra.md delete mode 100644 script/get-mlperf-inference-src/README-extra.md delete mode 100644 script/get-mlperf-inference-sut-configs/README-extra.md delete mode 100644 script/get-mlperf-logging/README-extra.md delete mode 100644 script/get-mlperf-training-src/README-extra.md delete mode 100644 script/get-nvidia-mitten/README-extra.md delete mode 100644 script/get-openssl/README-extra.md delete mode 100644 script/get-preprocessed-dataset-criteo/README-extra.md delete mode 100644 script/get-preprocessed-dataset-imagenet/README-extra.md delete mode 100644 script/get-preprocessed-dataset-openimages/README-extra.md delete mode 100644 script/get-python3/README-extra.md delete mode 100644 script/get-spec-ptd/README-extra.md delete mode 100644 script/get-tensorrt/README-extra.md delete mode 100644 script/get-terraform/README-extra.md delete mode 100644 script/get-tvm-model/README-extra.md delete mode 100644 script/get-tvm/README-extra.md delete mode 100644 script/get-zephyr-sdk/README-extra.md delete mode 100644 script/install-cuda-prebuilt/README-extra.md delete mode 100644 script/install-llvm-prebuilt/README-extra.md delete mode 100644 script/plug-prebuilt-cudnn-to-cuda/README-extra.md delete mode 100644 script/prune-bert-models/README-extra.md delete mode 100644 script/remote-run-commands/README-extra.md delete mode 100644 script/reproduce-mlperf-octoml-tinyml-results/README-extra.md delete mode 100644 script/run-docker-container/README-extra.md delete mode 100644 script/run-mlperf-inference-app/README-extra.md delete mode 100644 script/run-mlperf-inference-submission-checker/README-extra.md delete mode 100644 script/run-mlperf-power-client/README-extra.md delete mode 100644 script/run-mlperf-power-server/README-extra.md delete mode 100644 script/run-terraform/README-extra.md delete mode 100644 script/set-venv/README-extra.md delete mode 100644 script/tar-my-folder/README-extra.md delete mode 100644 script/truncate-mlperf-inference-accuracy-log/README-extra.md delete mode 100644 script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md diff --git a/.github/scripts/process_individual_tests.py b/.github/scripts/process_individual_tests.py index 848656d50..a8df24239 100644 --- a/.github/scripts/process_individual_tests.py +++ b/.github/scripts/process_individual_tests.py @@ -26,9 +26,9 @@ 'action': 'test', 'target': 'script', 'item': uid, 'quiet': 'yes', 'out': 'con' } if os.environ.get('DOCKER_MLC_REPO', '') != '': - ii['docker_cm_repo'] = os.environ['DOCKER_MLC_REPO'] + ii['docker_mlc_repo'] = os.environ['DOCKER_MLC_REPO'] if os.environ.get('DOCKER_MLC_REPO_BRANCH', '') != '': - ii['docker_cm_repo_branch'] = os.environ['DOCKER_MLC_REPO_BRANCH'] + ii['docker_mlc_repo_branch'] = os.environ['DOCKER_MLC_REPO_BRANCH'] if os.environ.get('TEST_INPUT_INDEX', '') != '': ii['test_input_index'] = os.environ['TEST_INPUT_INDEX'] print(ii) diff --git a/.github/workflows/test-amd-mlperf-inference-implementations.yml b/.github/workflows/test-amd-mlperf-inference-implementations.yml index 512a2af8e..9ff0b4da4 100644 --- a/.github/workflows/test-amd-mlperf-inference-implementations.yml +++ b/.github/workflows/test-amd-mlperf-inference-implementations.yml @@ -22,5 +22,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC pip install --upgrade cm4mlops cm pull repo - cm run script --tags=run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet --docker_skip_run_cmd=yes - # cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=dev --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c + mlcr --tags=run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet --docker_skip_run_cmd=yes + # mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=dev --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c diff --git a/.github/workflows/test-intel-mlperf-inference-implementations.yml b/.github/workflows/test-intel-mlperf-inference-implementations.yml index c70e5bb22..d56302111 100644 --- a/.github/workflows/test-intel-mlperf-inference-implementations.yml +++ b/.github/workflows/test-intel-mlperf-inference-implementations.yml @@ -22,5 +22,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC pip install --upgrade cm4mlops pip install tabulate - cm run script --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_cm_repo=mlcommons@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet - cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c + mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet + mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c diff --git a/.github/workflows/test-mlc-script-features.yml b/.github/workflows/test-mlc-script-features.yml index 96dabf921..d117afa8d 100644 --- a/.github/workflows/test-mlc-script-features.yml +++ b/.github/workflows/test-mlc-script-features.yml @@ -63,10 +63,10 @@ jobs: - name: Run docker container from dockerhub on linux if: runner.os == 'linux' run: | - mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_cm_repo=mlcommons@mlperf-automations --image_name=cm-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=cknowledge --quiet + mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --image_name=cm-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=cknowledge --quiet - name: Run docker container locally on linux if: runner.os == 'linux' run: | - mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_cm_repo=mlcommons@mlperf-automations --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet + mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet diff --git a/.github/workflows/test-mlperf-inference-abtf-poc.yml b/.github/workflows/test-mlperf-inference-abtf-poc.yml index d9da5830d..f12e6bd45 100644 --- a/.github/workflows/test-mlperf-inference-abtf-poc.yml +++ b/.github/workflows/test-mlperf-inference-abtf-poc.yml @@ -20,8 +20,8 @@ jobs: python-version: [ "3.8", "3.12" ] backend: [ "pytorch" ] implementation: [ "python" ] - docker: [ "", " --docker --docker_it=no --docker_cm_repo=mlcommons@mlperf-automations --docker_cm_repo_branch=dev --docker_dt=yes" ] - extra-args: [ "--adr.compiler.tags=gcc", "--env.CM_MLPERF_LOADGEN_BUILD_FROM_SRC=off" ] + docker: [ "", " --docker --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --docker_dt=yes" ] + extra-args: [ "--adr.compiler.tags=gcc", "--env.MLC_MLPERF_LOADGEN_BUILD_FROM_SRC=off" ] exclude: - os: ubuntu-24.04 python-version: "3.8" @@ -30,16 +30,16 @@ jobs: - os: windows-latest extra-args: "--adr.compiler.tags=gcc" - os: windows-latest - docker: " --docker --docker_it=no --docker_cm_repo=mlcommons@mlperf-automations --docker_cm_repo_branch=dev --docker_dt=yes" + docker: " --docker --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --docker_dt=yes" # windows docker image is not supported in CM yet - os: macos-latest python-version: "3.8" - os: macos-13 python-version: "3.8" - os: macos-latest - docker: " --docker --docker_it=no --docker_cm_repo=mlcommons@mlperf-automations --docker_cm_repo_branch=dev --docker_dt=yes" + docker: " --docker --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --docker_dt=yes" - os: macos-13 - docker: " --docker --docker_it=no --docker_cm_repo=mlcommons@mlperf-automations --docker_cm_repo_branch=dev --docker_dt=yes" + docker: " --docker --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --docker_dt=yes" steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/test-mlperf-inference-dlrm.yml b/.github/workflows/test-mlperf-inference-dlrm.yml index 749849842..13bf2dbcc 100644 --- a/.github/workflows/test-mlperf-inference-dlrm.yml +++ b/.github/workflows/test-mlperf-inference-dlrm.yml @@ -23,9 +23,8 @@ jobs: source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate export MLC_REPOS=$HOME/GH_MLC - python3 -m pip install cm4mlops - cm pull repo - cm run script --tags=run-mlperf,inference,_performance-only --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --model=dlrm-v2-99 --implementation=reference --backend=pytorch --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --clean + python3 -m pip install mlperf + mlcr --tags=run-mlperf,inference,_performance-only --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --model=dlrm-v2-99 --implementation=reference --backend=pytorch --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --clean build_intel: if: github.repository_owner == 'gateoverflow_off' @@ -43,6 +42,6 @@ jobs: source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate export MLC_REPOS=$HOME/GH_MLC - python3 -m pip install cm4mlops - cm pull repo - cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=dlrm-v2-99 --implementation=intel --batch_size=1 --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean + python3 -m pip install mlperf + mlc pull repo + mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=dlrm-v2-99 --implementation=intel --batch_size=1 --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean diff --git a/.github/workflows/test-mlperf-inference-gptj.yml b/.github/workflows/test-mlperf-inference-gptj.yml index 26543d98e..c99c503ff 100644 --- a/.github/workflows/test-mlperf-inference-gptj.yml +++ b/.github/workflows/test-mlperf-inference-gptj.yml @@ -26,6 +26,6 @@ jobs: export MLC_REPOS=$HOME/GH_MLC python3 -m pip install cm4mlops cm pull repo - cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --pull_changes=yes --pull_inference_changes=yes --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --get_platform_details=yes --implementation=reference --clean - cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --pull_changes=yes --pull_inference_changes=yes --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --get_platform_details=yes --implementation=reference --clean + mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml index ec52d6b06..8de010505 100644 --- a/.github/workflows/test-mlperf-inference-llama2.yml +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -31,5 +31,5 @@ jobs: pip install "huggingface_hub[cli]" git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential - cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean - cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean + mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-mixtral.yml b/.github/workflows/test-mlperf-inference-mixtral.yml index 174ae82a6..26b369c09 100644 --- a/.github/workflows/test-mlperf-inference-mixtral.yml +++ b/.github/workflows/test-mlperf-inference-mixtral.yml @@ -31,5 +31,5 @@ jobs: git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential cm pull repo - cm run script --tags=run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1 - cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - GO-phoenix" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr --tags=run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1 + mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - GO-phoenix" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-retinanet.yml b/.github/workflows/test-mlperf-inference-retinanet.yml index 65ab5b75c..268d2ccd8 100644 --- a/.github/workflows/test-mlperf-inference-retinanet.yml +++ b/.github/workflows/test-mlperf-inference-retinanet.yml @@ -67,4 +67,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from Retinanet GH action on ${{ matrix.os }}" --quiet + mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from Retinanet GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-rnnt.yml b/.github/workflows/test-mlperf-inference-rnnt.yml index 1d77ee282..352272370 100644 --- a/.github/workflows/test-mlperf-inference-rnnt.yml +++ b/.github/workflows/test-mlperf-inference-rnnt.yml @@ -34,7 +34,7 @@ jobs: - name: Pull MLOps repository run: | cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} - cm run script --quiet --tags=get,sys-utils-cm + mlcr --quiet --tags=get,sys-utils-cm - name: Test MLPerf Inference RNNT run: | - cm run script --tags=run,mlperf,inference,generate-run-cmds,_performance-only --submitter="cTuning" --model=rnnt --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=5 -v --quiet + mlcr --tags=run,mlperf,inference,generate-run-cmds,_performance-only --submitter="cTuning" --model=rnnt --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=5 -v --quiet diff --git a/.github/workflows/test-mlperf-inference-sdxl.yaml b/.github/workflows/test-mlperf-inference-sdxl.yaml index cbdb0bd04..7f7ce1fea 100644 --- a/.github/workflows/test-mlperf-inference-sdxl.yaml +++ b/.github/workflows/test-mlperf-inference-sdxl.yaml @@ -21,5 +21,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC python3 -m pip install cm4mlops cm pull repo - cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-tvm.yml b/.github/workflows/test-mlperf-inference-tvm.yml index 8ecf27fa2..b184396b3 100644 --- a/.github/workflows/test-mlperf-inference-tvm.yml +++ b/.github/workflows/test-mlperf-inference-tvm.yml @@ -31,7 +31,7 @@ jobs: - name: Pull MLOps repository run: | cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} - cm run script --quiet --tags=get,sys-utils-cm + mlcr --quiet --tags=get,sys-utils-cm - name: MLPerf Inference ResNet50 using TVM run: | - cm run script --tags=run,mlperf,inference,generate-run-cmds --hw_name=default --model=resnet50 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --target_qps=1 -v --quiet + mlcr --tags=run,mlperf,inference,generate-run-cmds --hw_name=default --model=resnet50 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --target_qps=1 -v --quiet diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index 5ff906bbd..86f06873d 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -51,6 +51,6 @@ jobs: MLC_PULL_DEFAULT_MLOPS_REPO=no pip install --upgrade cm4mlops cm pull repo - cm run script --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=datacenter,edge --division=closed --docker_dt=yes --docker_it=no --docker_cm_repo=mlcommons@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string --quiet + mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=datacenter,edge --division=closed --docker_dt=yes --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string --quiet - cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name + mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name diff --git a/.github/workflows/test-qaic-compute-sdk-build.yml b/.github/workflows/test-qaic-compute-sdk-build.yml index 6eb901fed..810da9e8e 100644 --- a/.github/workflows/test-qaic-compute-sdk-build.yml +++ b/.github/workflows/test-qaic-compute-sdk-build.yml @@ -27,8 +27,8 @@ jobs: - name: Install dependencies run: | MLC_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops - cm run script --tags=get,sys-utils-cm --quiet + mlcr --tags=get,sys-utils-cm --quiet - name: Test QAIC Compute SDK for compilation run: | - cm run script --tags=get,qaic,compute,sdk --adr.llvm.version=${{ matrix.llvm-version }} --quiet + mlcr --tags=get,qaic,compute,sdk --adr.llvm.version=${{ matrix.llvm-version }} --quiet diff --git a/.github/workflows/test-qaic-software-kit.yml b/.github/workflows/test-qaic-software-kit.yml index 7e8e6e662..127de2323 100644 --- a/.github/workflows/test-qaic-software-kit.yml +++ b/.github/workflows/test-qaic-software-kit.yml @@ -31,9 +31,9 @@ jobs: python-version: ${{ matrix.python-version }} - name: Pull MLOps repository run: | - pip install cm4mlops - cm run script --tags=get,sys-utils-cm --quiet + pip install mlperf + mlcr --tags=get,sys-utils-mlc --quiet - name: Test Software Kit for compilation on Ubuntu 20.04 run: | - cm run script --tags=get,qaic,software,kit --adr.compiler.tags=${{ matrix.compiler }} --adr.compiler.version=${{ matrix.llvm-version }} --quiet + mlcr --tags=get,qaic,software,kit --adr.compiler.tags=${{ matrix.compiler }} --adr.compiler.version=${{ matrix.llvm-version }} --quiet diff --git a/.github/workflows/test-scc24-sdxl.yaml b/.github/workflows/test-scc24-sdxl.yaml index 61fac51db..becc3830d 100644 --- a/.github/workflows/test-scc24-sdxl.yaml +++ b/.github/workflows/test-scc24-sdxl.yaml @@ -28,8 +28,8 @@ jobs: pip install --upgrade mlcflow pip install tabulate mlc pull repo - mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions @@ -56,7 +56,7 @@ jobs: pip install --upgrade mlcflow pip install tabulate mlc pull repo - mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean - mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean + mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions diff --git a/automation/cache/module_misc.py b/automation/cache/module_misc.py index d4eb76d1c..d5895edd4 100644 --- a/automation/cache/module_misc.py +++ b/automation/cache/module_misc.py @@ -34,11 +34,11 @@ def copy_to_remote(i): if not remote_host: return {'return': 1, 'error': 'Please input remote host_name/IP via --remote_host'} - remote_cm_repos_location = i.get( - 'remote_cm_repos_location', os.path.join( + remote_mlc_repos_location = i.get( + 'remote_mlc_repos_location', os.path.join( "/home", os.getlogin(), "CM", "repos")) - remote_cm_cache_location = os.path.join( - remote_cm_repos_location, "local", "cache") + remote_mlc_cache_location = os.path.join( + remote_mlc_repos_location, "local", "cache") remote_port = i.get('remote_port', '22') remote_user = i.get('remote_user', os.getlogin()) @@ -46,12 +46,12 @@ def copy_to_remote(i): tag_string = i['tags'] tag_string += ",-tmp" - cm_input = {'action': 'show', + mlc_input = {'action': 'show', 'automation': 'cache', 'tags': f'{tag_string}', 'quiet': True } - r = self_module.cmind.access(cm_input) + r = self_module.cmind.access(mlc_input) if r['return'] > 0: return r @@ -71,37 +71,37 @@ def copy_to_remote(i): path = k.path cacheid = os.path.basename(path) - copy_cmd = f"rsync -avz --exclude cm-cached-state.json -e 'ssh -p {remote_port}' {path} {remote_user}@{remote_host}:{remote_cm_cache_location}" + copy_cmd = f"rsync -avz --exclude cm-cached-state.json -e 'ssh -p {remote_port}' {path} {remote_user}@{remote_host}:{remote_mlc_cache_location}" print(copy_cmd) os.system(copy_cmd) - cm_cached_state_json_file = os.path.join(path, "cm-cached-state.json") - if not os.path.exists(cm_cached_state_json_file): + mlc_cached_state_json_file = os.path.join(path, "cm-cached-state.json") + if not os.path.exists(mlc_cached_state_json_file): return {'return': 1, 'error': f'cm-cached-state.json file missing in {path}'} - with open(cm_cached_state_json_file, "r") as f: - cm_cached_state = json.load(f) + with open(mlc_cached_state_json_file, "r") as f: + mlc_cached_state = json.load(f) - new_env = cm_cached_state['new_env'] - new_state = cm_cached_state['new_state'] # Todo fix new state - cm_repos_path = os.environ.get( + new_env = mlc_cached_state['new_env'] + new_state = mlc_cached_state['new_state'] # Todo fix new state + mlc_repos_path = os.environ.get( 'MLC_REPOS', os.path.join( os.path.expanduser("~"), "CM", "repos")) - cm_cache_path = os.path.realpath( - os.path.join(cm_repos_path, "local", "cache")) + mlc_cache_path = os.path.realpath( + os.path.join(mlc_repos_path, "local", "cache")) for key, val in new_env.items(): if isinstance(val, if ) new_env[key] = val.replace( - cm_cache_path, remote_cm_cache_location) + mlc_cache_path, remote_mlc_cache_location) with open("tmp_remote_cached_state.json", "w") as f: - json.dump(cm_cached_state, f, indent=2) + json.dump(mlc_cached_state, f, indent=2) remote_cached_state_file_location = os.path.join( - remote_cm_cache_location, cacheid, "cm-cached-state.json") + remote_mlc_cache_location, cacheid, "cm-cached-state.json") copy_cmd = f"rsync -avz -e 'ssh -p {remote_port}' tmp_remote_cached_state.json {remote_user}@{remote_host}:{remote_cached_state_file_location}" print(copy_cmd) os.system(copy_cmd) diff --git a/automation/script/meta.json b/automation/script/meta.json index c1419f8f8..51ae037bd 100644 --- a/automation/script/meta.json +++ b/automation/script/meta.json @@ -2,7 +2,6 @@ "alias": "script", "automation_alias": "automation", "automation_uid": "bbeb15d8f0a944a4", - "min_cm_version": "2.2.0", "deps": { "cache": "cache,541d6f712a6b464e" }, diff --git a/automation/script/module.py b/automation/script/module.py index 71ea0c9ad..bcd9f5c0c 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -286,10 +286,10 @@ def _run(self, i): if r['return'] > 0: return r - cm_input = r['cm_input'] + mlc_input = r['mlc_input'] utils.merge_dicts({'dict1': i, - 'dict2': cm_input, + 'dict2': mlc_input, 'append_lists': True, 'append_unique': True}) @@ -2275,11 +2275,11 @@ def _update_env_from_input(self, env, i): ########################################################################## def _fix_cache_paths(self, env): ''' - cm_repos_path = os.environ.get( + mlc_repos_path = os.environ.get( 'MLC_REPOS', os.path.join( os.path.expanduser("~"), "CM", "repos")) current_cache_path = os.path.realpath( - os.path.join(cm_repos_path, "local", "cache")) + os.path.join(mlc_repos_path, "local", "cache")) ''' current_cache_path = self.action_object.local_cache_path @@ -3008,7 +3008,7 @@ def test(self, i): # run the test without any variations run_variations = [""] use_docker = run_input.get('docker', False) - for key in run_input: # override meta with any user inputs like for docker_cm_repo + for key in run_input: # override meta with any user inputs like for docker_mlc_repo if i.get(key): if isinstance(run_input[key], dict): utils.merge_dicts({ @@ -6355,16 +6355,16 @@ def dump_repro_start(repro_prefix, ii): pass # For experiment - cm_output = {} + mlc_output = {} - cm_output['tmp_test_value'] = 10.0 + mlc_output['tmp_test_value'] = 10.0 - cm_output['info'] = info - cm_output['input'] = ii + mlc_output['info'] = info + mlc_output['input'] = ii try: with open('mlc-output.json', 'w', encoding='utf-8') as f: - json.dump(cm_output, f, ensure_ascii=False, indent=2) + json.dump(mlc_output, f, ensure_ascii=False, indent=2) except BaseException: pass @@ -6390,27 +6390,27 @@ def dump_repro(repro_prefix, rr, run_state): pass # For experiment - cm_output = {} + mlc_output = {} # Attempt to read try: r = utils.load_json('mlc-output.json') if r['return'] == 0: - cm_output = r['meta'] + mlc_output = r['meta'] except BaseException: pass - cm_output['output'] = rr - cm_output['state'] = copy.deepcopy(run_state) + mlc_output['output'] = rr + mlc_output['state'] = copy.deepcopy(run_state) # Try to load version_info.json version_info = {} version_info_orig = {} - if 'version_info' in cm_output['state']: - version_info_orig = cm_output['state']['version_info'] - del (cm_output['state']['version_info']) + if 'version_info' in mlc_output['state']: + version_info_orig = mlc_output['state']['version_info'] + del (mlc_output['state']['version_info']) try: r = utils.load_json('version_info.json') @@ -6426,17 +6426,17 @@ def dump_repro(repro_prefix, rr, run_state): pass if len(version_info) > 0: - cm_output['version_info'] = version_info + mlc_output['version_info'] = version_info if rr['return'] == 0: # See https://cTuning.org/ae - cm_output['acm_ctuning_repro_badge_available'] = True - cm_output['acm_ctuning_repro_badge_functional'] = True + mlc_output['amlc_ctuning_repro_badge_available'] = True + mlc_output['amlc_ctuning_repro_badge_functional'] = True try: with open('mlc-output.json', 'w', encoding='utf-8') as f: json.dump( - cm_output, + mlc_output, f, ensure_ascii=False, indent=2, diff --git a/script/activate-python-venv/README-extra.md b/script/activate-python-venv/README-extra.md deleted file mode 100644 index 2b61d193c..000000000 --- a/script/activate-python-venv/README-extra.md +++ /dev/null @@ -1,7 +0,0 @@ -# About - -Activate python virtual environment installed via CM: - -```bash -cm run script "activate python-ven" (--version={python version}) (--name={user friendly name of the virtual environment)) -``` diff --git a/script/add-custom-nvidia-system/README-extra.md b/script/add-custom-nvidia-system/README-extra.md deleted file mode 100644 index baa487880..000000000 --- a/script/add-custom-nvidia-system/README-extra.md +++ /dev/null @@ -1,2 +0,0 @@ -# About -This CM script detects the system details using Nvidia script diff --git a/script/app-image-classification-onnx-py/README-extra.md b/script/app-image-classification-onnx-py/README-extra.md deleted file mode 100644 index e379e2544..000000000 --- a/script/app-image-classification-onnx-py/README-extra.md +++ /dev/null @@ -1,17 +0,0 @@ -# About - -See [this tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/modular-image-classification.md). - -# Collaborative testing - -## Windows 11 - -* CUDA 11.8; cuDNN 8.7.0; ONNX GPU 1.16.1 - -## Windows 10 - -* CUDA 11.6; cuDNN 8.6.0.96; ONNX GPU 1.13.1 - -## Ubuntu 22.04 - -* CUDA 11.3; ONNX 1.12.0 diff --git a/script/app-image-classification-onnx-py/customize.py b/script/app-image-classification-onnx-py/customize.py index 338986722..ab145df23 100644 --- a/script/app-image-classification-onnx-py/customize.py +++ b/script/app-image-classification-onnx-py/customize.py @@ -27,7 +27,7 @@ def postprocess(i): # Saving predictions to JSON file to current directory # Should work with "cm docker script" ? - data = state.get('cm_app_image_classification_onnx_py', {}) + data = state.get('mlc_app_image_classification_onnx_py', {}) fjson = 'mlc-image-classification-onnx-py.json' fyaml = 'mlc-image-classification-onnx-py.yaml' diff --git a/script/app-image-classification-onnx-py/meta.yaml b/script/app-image-classification-onnx-py/meta.yaml index 82a559f8f..28170e802 100644 --- a/script/app-image-classification-onnx-py/meta.yaml +++ b/script/app-image-classification-onnx-py/meta.yaml @@ -90,7 +90,7 @@ new_env_keys: new_state_keys: - - cm_app_image_classification_onnx_py + - mlc_app_image_classification_onnx_py input_description: @@ -104,8 +104,8 @@ input_description: docker: skip_run_cmd: 'no' - skip_cm_sys_upgrade: 'yes' - cm_repo_flags: '--branch=dev' + skip_mlc_sys_upgrade: 'yes' + mlc_repo_flags: '--branch=dev' use_host_group_id: 'yes' image_tag_extra: '-mlc-dev' input_paths: diff --git a/script/app-image-classification-onnx-py/src/onnx_classify.py b/script/app-image-classification-onnx-py/src/onnx_classify.py index 5ce1dd4db..32da50189 100644 --- a/script/app-image-classification-onnx-py/src/onnx_classify.py +++ b/script/app-image-classification-onnx-py/src/onnx_classify.py @@ -166,7 +166,7 @@ def load_a_batch(batch_filenames): [output_layer_name], { input_layer_name: batch_data})[0] - cm_status = {'classifications': []} + mlc_status = {'classifications': []} print('') top_classification = '' @@ -187,18 +187,18 @@ def load_a_batch(batch_filenames): softmax_vector[class_idx], labels[class_idx])) - cm_status['classifications'].append({'class_idx': int(class_idx), + mlc_status['classifications'].append({'class_idx': int(class_idx), 'softmax': float(softmax_vector[class_idx]), 'label': labels[class_idx]}) print('') print('Top classification: {}'.format(top_classification)) - cm_status['top_classification'] = top_classification + mlc_status['top_classification'] = top_classification avg_time = (time.time() - start_time) / batch_count -cm_status['avg_time'] = avg_time +mlc_status['avg_time'] = avg_time -# Record cm_status to embedded it into CM workflows -with open('tmp-run-state.json', 'w') as cm_file: - cm_file.write(json.dumps( - {'cm_app_image_classification_onnx_py': cm_status}, sort_keys=True, indent=2)) +# Record mlc_status to embedded it into CM workflows +with open('tmp-run-state.json', 'w') as mlc_file: + mlc_file.write(json.dumps( + {'mlc_app_image_classification_onnx_py': mlc_status}, sort_keys=True, indent=2)) diff --git a/script/app-image-classification-onnx-py/tests/README.md b/script/app-image-classification-onnx-py/tests/README.md deleted file mode 100644 index 15254aa91..000000000 --- a/script/app-image-classification-onnx-py/tests/README.md +++ /dev/null @@ -1,14 +0,0 @@ -```bash -docker system prune -a -f - -cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.MLC_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e - -cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --env.MLC_IMAGE=computer_mouse.jpg -cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg - -cmrd "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg -j --docker_it - -cmrd "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg --output=. - - -``` diff --git a/script/app-image-classification-tf-onnx-cpp/README-extra.md b/script/app-image-classification-tf-onnx-cpp/README-extra.md deleted file mode 100644 index 5e59c8fed..000000000 --- a/script/app-image-classification-tf-onnx-cpp/README-extra.md +++ /dev/null @@ -1,3 +0,0 @@ -# Image Classification App in C++ for ResNet50 model - -* In development stage, not complete diff --git a/script/app-image-classification-torch-py/README-extra.md b/script/app-image-classification-torch-py/README-extra.md deleted file mode 100644 index 662888506..000000000 --- a/script/app-image-classification-torch-py/README-extra.md +++ /dev/null @@ -1,16 +0,0 @@ -# CPU - -## 20240129; Windows 11 - -```bash -cmr "get generic-python-lib _package.torch" --version=2.1.1 -cmr "get generic-python-lib _package.torchvision" --version=0.16.2 -``` - -# CUDA - -```bash -cm run script "install python-venv" --name=test -cm run script "python app image-classification pytorch _cuda" --adr.python.name=test -cm run script "python app image-classification pytorch _cuda" --adr.python.name=test --input=src/computer_mouse.jpg -``` diff --git a/script/app-image-classification-tvm-onnx-py/README-extra.md b/script/app-image-classification-tvm-onnx-py/README-extra.md deleted file mode 100644 index c24e073a9..000000000 --- a/script/app-image-classification-tvm-onnx-py/README-extra.md +++ /dev/null @@ -1,16 +0,0 @@ -Example: - -```bash -cm run script "get llvm" --version=14.0.0 -cm run script "get tvm _llvm" --version=0.10.0 -cm run script "python app image-classification tvm-onnx" -``` - -Example 2: - -```bash -cm run script "install python-venv" --name=test --version=3.10.7 -cm run script "get generic-python-lib _apache-tvm" -cm run script "python app image-classification tvm-onnx _tvm-pip-install" -cm run script "python app image-classification tvm-onnx _tvm-pip-install" --input=`cm find script --tags=python,app,image-classification,tvm-onnx`/img/computer_mouse.jpg -``` \ No newline at end of file diff --git a/script/app-image-corner-detection/README-extra.md b/script/app-image-corner-detection/README-extra.md deleted file mode 100644 index 19fe90edb..000000000 --- a/script/app-image-corner-detection/README-extra.md +++ /dev/null @@ -1,32 +0,0 @@ -# Examples - -First download images: - -```bash -cmr "download file _wget" --url=https://cKnowledge.org/ai/data/data.pgm --ssl-verify=no --md5sum=0af279e557a8de252d7ff0751a999379 -cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --ssl-verify=no --md5sum=45ae5c940233892c2f860efdf0b66e7e -cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse2.jpg --ssl-verify=no --md5sum=e7e2050b41e0b85cedca3ca87ab55390 -cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse2.pgm --ssl-verify=no --md5sum=a4e48556d3eb09402bfc98e375b41311 -``` - -Then run app - -```bash -cm run script "app image corner-detection" -cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=llvm -cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=gcc -cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=llvm --add_deps_recursive.compiler.version_min=11.0.0 --add_deps_recursive.compiler.version_max=13.0.0 -``` - -## Reproducibility matrix - -* Ubuntu 22.04; x64; LLVM 17.06 -* Windows 11; x64; LLVM 17.06 - -## Debugging scripts without CM - -```bash -cmr "app image corner-detection" --debug_script_tags=compile,cpp-program -cmr "app image corner-detection" --debug-script-tags=benchmark,program -``` - diff --git a/script/app-loadgen-generic-python/README-extra.md b/script/app-loadgen-generic-python/README-extra.md deleted file mode 100644 index 6222b6574..000000000 --- a/script/app-loadgen-generic-python/README-extra.md +++ /dev/null @@ -1,289 +0,0 @@ -This [portable CM script](https://access.cknowledge.org/playground/?action=scripts) -provides a unified API and CLI to benchmark ONNX models -using the [MLPerf loadgen](https://github.com/mlcommons/inference/tree/master/loadgen). -It measures performance without accuracy using randomly generated inputs. -If you need accuracy too, please check [official CM automation for MLPerf inference](../run-mlperf-inference-app). - -## Development status - -* [20240214] ONNX runtime (CPU & GPU) is connected with LoadGen and tested on Ubuntu, Windows and MacOS. - See [sources](src/ort.py). - -## Prerequisites - -### Install CM with automation recipes - -Install [MLCommons CM](https://github.com/mlcommons/ck/blob/master/docs/installation.md) -and pull CM repository with portable automation scripts to benchmark ML Systems: - -```bash -pip install cmind -cm pull repo mlcommons@cm4mlops --checkout=dev -``` - -### Clean CM cache - -If you want a "clean" environment, you may want to clean your CM cache as follows: -```bash -cm rm cache -f -``` - -### Set up CM virtual environment - -
-Click if you want to use Python virtual environment - -We suggest you to install a python virtual environment via CM though it's not strictly necessary -(CM can automatically detect and reuse your Python installation and environments): -```bash -cm run script "install python-venv" --name=loadgen -``` - -You can also install a specific version of Python on your system via: -```bash -cm run script "install python-venv" --name=loadgen --version=3.10.7 -``` - -By default, CM will be asking users to select one from all detected and installed Python versions -including the above one, any time a script with python dependency is run. To avoid that, you -can set up the following environment variable with the name of the current virtual environment: - -```bash -export MLC_SCRIPT_EXTRA_CMD="--adr.python.name=loadgen" -``` - -The `--adr` flag stands for "Add to all Dependencies Recursively" and will find all sub-dependencies on other CM scripts - -
- - -### Install dependencies via CM (optional) - -
-Click if you want to install specific versions of dependencies - -You can skip this sub-section if you want CM to automatically detect already installed -ONNX runtime on your system. Otherwise, follow the next steps to install the latest or specific -version of ONNX runtime. - - -### Download LoadGen sources from MLPerf inference benchmark - -```bash -cm run script "get mlperf inference src" --version=r3.1 -``` - -### Install MLPerf LoadGen -We can now install loadgen via CM while forcing compiler dependency to GCC: - -```bash -cm run script "get mlperf loadgen" -``` - -### ONNX, CPU - -```bash -cm run script "get generic-python-lib _onnxruntime" -``` - -or - -```bash -cm run script "get generic-python-lib _onnxruntime" --version=1.13.1 -``` - -or - -```bash -cm run script "get generic-python-lib _onnxruntime" --version_min=1.10.0 -``` -
- -### Benchmark standard MLPerf model - -You can use CM variations prefixed by `_` to benchmark an official MLPerf model -(_resnet50 or _retinanet): - -``` -cm run script "python app loadgen-generic _onnxruntime _retinanet" --samples=5 -cmr "python app loadgen-generic _onnxruntime _resnet50" -``` - -Normally, you should see the following performance report from the loadgen: - - - - -
-Click to open - -```bash - -2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Model: /home/gfursin/CM/repos/local/cache/9c825a0a06fb48e2/resnet50_v1.onnx -2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Runner: inline, Concurrency: 4 -2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Results: results/resnet50_v1.onnx/inline -2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Test Started -2022-12-06 16:51:39,399 INFO MainThread - loadgen.harness load_query_samples: Loaded 100 samples -2022-12-06 16:51:55,723 INFO MainThread - loadgen.harness issue_query: Queries issued 550 -2022-12-06 16:51:55,725 INFO MainThread - loadgen.harness flush_queries: Queries flushed -2022-12-06 16:51:55,731 INFO MainThread - loadgen.harness unload_query_samples: Unloaded samples -================================================ -MLPerf Results Summary -================================================ -SUT name : PySUT -Scenario : Offline -Mode : PerformanceOnly -Samples per second: 33.6903 -Result is : VALID - Min duration satisfied : Yes - Min queries satisfied : Yes - Early stopping satisfied: Yes - -================================================ -Additional Stats -================================================ -Min latency (ns) : 16325180169 -Max latency (ns) : 16325180169 -Mean latency (ns) : 16325180169 -50.00 percentile latency (ns) : 16325180169 -90.00 percentile latency (ns) : 16325180169 -95.00 percentile latency (ns) : 16325180169 -97.00 percentile latency (ns) : 16325180169 -99.00 percentile latency (ns) : 16325180169 -99.90 percentile latency (ns) : 16325180169 - -================================================ -Test Parameters Used -================================================ -samples_per_query : 550 -target_qps : 50 -target_latency (ns): 0 -max_async_queries : 1 -min_duration (ms): 10000 -max_duration (ms): 0 -min_query_count : 1 -max_query_count : 0 -qsl_rng_seed : 0 -sample_index_rng_seed : 0 -schedule_rng_seed : 0 -accuracy_log_rng_seed : 0 -accuracy_log_probability : 0 -accuracy_log_sampling_target : 0 -print_timestamps : 0 -performance_issue_unique : 0 -performance_issue_same : 0 -performance_issue_same_index : 0 -performance_sample_count : 100 - -No warnings encountered during test. - -No errors encountered during test. -2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Observed QPS: 33.6903 -2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Result: VALID -2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Test Completed - - - Running postprocess ... - - running time of script "app,loadgen,generic,loadgen-generic,python": 370.87 sec. - -``` - -
- - -### Benchmark custom model - -You can also specify any custom onnx model file as follows: - -```bash -cm run script "python app loadgen-generic _onnxruntime" --modelpath= -``` - -### Benchmark Hugging Face model - -```bash -cmr "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx -``` - -*See more examples to download Hugging Face models via CM [here](../get-ml-model-huggingface-zoo/README-extra.md).* - -### Benchmark using ONNX CUDA - -```bash -cm rm cache -f -cmr "python app loadgen-generic _onnxruntime _cuda _retinanet" --quiet -cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx -``` - -These cases worked on Windows and Linux but may require GPU with > 8GB memory: -```bash -cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2 -cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.alpindale/Llama-2-13b-ONNX" --adr.hf-downloader.model_filename=FP32/LlamaV2_13B_float32.onnx --adr.hf-downloader.full_subfolder=FP32 --samples=2 -cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.Intel/gpt-j-6B-int8-static" --adr.hf-downloader.model_filename=model.onnx --adr.hf-downloader.full_subfolder=. --samples=2 -``` - -TBD: some cases that are not yet fully supported (data types, input mismatch, etc): -```bash -cmr "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.runwayml/stable-diffusion-v1-5" --adr.hf-downloader.revision=onnx --adr.hf-downloader.model_filename=unet/model.onnx,unet/weights.pb --samples=2 -cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.microsoft/Mistral-7B-v0.1-onnx" --adr.hf-downloader.model_filename=Mistral-7B-v0.1.onnx,Mistral-7B-v0.1.onnx.data --samples=2 -cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.alpindale/Llama-2-7b-ONNX" --adr.hf-downloader.model_filename=FP16/LlamaV2_7B_float16.onnx --adr.hf-downloader.full_subfolder=FP16 --samples=2 -``` - -### Other variations and flags: - -You can obtain help about flags and variations from CMD: - -```bash -cm run script "python app loadgen-generic" --help - -Available variations: - - _cpu - _cuda - _custom - _custom,huggingface - _huggingface - _model-stub.# - _onnxruntime - _pytorch - _resnet50 - _retinanet - -Available flags mapped to environment variables: - - --concurrency -> --env.MLC_MLPERF_CONCURRENCY - --ep -> --env.MLC_MLPERF_EXECUTION_PROVIDER - --execmode -> --env.MLC_MLPERF_EXEC_MODE - --interop -> --env.MLC_MLPERF_INTEROP - --intraop -> --env.MLC_MLPERF_INTRAOP - --modelpath -> --env.MLC_ML_MODEL_FILE_WITH_PATH - --output_dir -> --env.MLC_MLPERF_OUTPUT_DIR - --runner -> --env.MLC_MLPERF_RUNNER - --samples -> --env.MLC_MLPERF_LOADGEN_SAMPLES - --scenario -> --env.MLC_MLPERF_LOADGEN_SCENARIO - -``` - -## Running this app via Docker - -```bash -cm docker script "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx --samples=2 --output_dir=new_results --docker_cm_repo=ctuning@mlcommons-ck -``` - -## Tuning CPU performance via CM experiment - -```bash -cm run experiment --tags=loadgen,python,llama2 -- cmr script "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2 --intraop={{MLC_OPT_INTRAOP{[1,2,4]}}} --interop={{MLC_OPT_INTEROP{[1,2,4]}}} --quiet -cm run experiment --tags=loadgen,python,llama2 -- cmr "python app loadgen-generic _onnxruntime" --modelpath={PATH TO ONNX MODEL} --samples=2 --intraop={{MLC_OPT_INTRAOP{[1,2,4]}}} --interop={{MLC_OPT_INTEROP{[1,2,4]}}} --quiet -``` - - -## Developers - -* [Gaz Iqbal](https://www.linkedin.com/in/gaziqbal) -* [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh) -* [Grigori Fursin](https://cKnowledge.org/gfursin) - -## Get in touch - -* [MLCommons Task Force on Automation and Reproducibility](../../../docs/taskforce.md) -* [Public Discord server](https://discord.gg/JjWNWXKxwT) diff --git a/script/app-loadgen-generic-python/src/backend_pytorch.py b/script/app-loadgen-generic-python/src/backend_pytorch.py index 6fb716028..57c00da8d 100644 --- a/script/app-loadgen-generic-python/src/backend_pytorch.py +++ b/script/app-loadgen-generic-python/src/backend_pytorch.py @@ -87,15 +87,15 @@ def create(self) -> Model: self.input_sample = pickle.load(handle) # Check if has CM connector - cm_model_module = os.path.join(self.model_code, 'cmc.py') - if not os.path.isfile(cm_model_module): + mlc_model_module = os.path.join(self.model_code, 'cmc.py') + if not os.path.isfile(mlc_model_module): raise Exception( 'cm.py interface for a PyTorch model was not found in {}'.format( self.model_code)) print('') print('Collective Mind Connector for the model found: {}'.format( - cm_model_module)) + mlc_model_module)) # Load CM interface for the model import sys diff --git a/script/app-mlperf-automotive-mlcommons-python/README-extra.md b/script/app-mlperf-automotive-mlcommons-python/README-extra.md deleted file mode 100644 index 582991f6d..000000000 --- a/script/app-mlperf-automotive-mlcommons-python/README-extra.md +++ /dev/null @@ -1 +0,0 @@ -# CM script diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py index 4a7600b14..1333e0719 100644 --- a/script/app-mlperf-automotive/customize.py +++ b/script/app-mlperf-automotive/customize.py @@ -15,7 +15,7 @@ def preprocess(i): script_path = i['run_script_input']['path'] if 'cmd' in i['input']: - state['mlperf_inference_run_cmd'] = "cm run script " + \ + state['mlperf_inference_run_cmd'] = "mlcr " + \ " ".join(i['input']['cmd']) state['mlperf-inference-implementation'] = {} diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 5f39eaac5..80e62af84 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -94,12 +94,12 @@ deps: docker: - cm_repo: gateoverflow@cm4mlops + mlc_repo: gateoverflow@cm4mlops use_host_group_id: True use_host_user_id: True real_run: false interactive: True - cm_repos: 'cm pull repo mlcommons@cm4abtf --checkout=poc' + mlc_repos: 'cm pull repo mlcommons@cm4abtf --checkout=poc' deps: - tags: get,abtf,scratch,space mounts: diff --git a/script/app-mlperf-inference-mlcommons-cpp/README-extra.md b/script/app-mlperf-inference-mlcommons-cpp/README-extra.md deleted file mode 100644 index b344ea7ad..000000000 --- a/script/app-mlperf-inference-mlcommons-cpp/README-extra.md +++ /dev/null @@ -1,83 +0,0 @@ -# About - -The MLCommons C++ Modular Inference Library (MIL) is a community project to provide -a simple and extensible C++ harness to connect diverse ML models, frameworks, data sets and hardware -backends to the [MLPerf loadgen](https://github.com/mlcommons/inference/tree/master/loadgen) -and run it using the [MLCommons CM automation language](https://github.com/mlcommons/ck/tree/master/cm). - -It is intended to help new submitters add new hardware backends to MLPerf, -optimize their MLPerf results using low-level knobs, -and automate their submission using the MLCommons CM automation language. - -MIL is maintained and extended by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) -based on user feedback to make it easier to run, optimize and reproduce MLPerf inference benchmarks -across diverse platforms with continuously changing software and hardware. - -MIL was originally developed by [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189) - -[![License](https://img.shields.io/badge/License-Apache%202.0-green)](https://github.com/mlcommons/ck/tree/master/cm) -[![CM repository](https://img.shields.io/badge/Collective%20Mind-compatible-blue)](https://github.com/mlcommons/ck) - -© 2021-2023 [MLCommons](https://mlcommons.org)
- -## About - -This is a modularized C++ implementation of an MLPerf Inference SUT. Each file corresponds to a different class that can be changed independently of other ones: -1. `Backend` runs the actual inference using a framework (ONNX Runtime, TF Lite, etc) -2. `Device` manages devices and memory (CPU, GPU, etc) -3. `Model` is a struct representing a model file (ResNet50, etc) -4. `SampleLibrary` is a dataset loader (ImageNet, COCO, etc) -5. `System` is the SUT interface to LoadGen which manages how input queries are issued - -Data flow: -* Init - 1. All classes are initialized, e.g. `Backend` is initialized with selected `Model` and `Device` -* Loading samples to memory - 1. LoadGen calls `SampleLibrary->LoadSamplesFromRam()` - 2. `SampleLibrary` reads sample (e.g. from .npy file) and calls `Backend->LoadSampleFromRam()` - 3. `Backend` stores samples contiguously into each device memory, e.g. by `Device->Write()` -* Running the model - 1. LoadGen calls `System->IssueQuery()` - 2. `System` gathers a batch of samples, selects a device concurrency (e.g. the 3rd CPU core) and calls `Backend->IssueBatch()` - 3. `Backend` retrieves pointers to input data in device memory, and calls `RunInference()` implemented by a derived class, e.g. `OnnxRuntimeBackend->RunInference()` - 4. in this example, `OnnxRuntimeBackend->RunInference()` calls the ONNX Runtime API with the retrieved pointers as input, packs the raw ONNX Runtime output to LoadGen format via `Model->PostProcess()`, and sends the response to LoadGen - 5. LoadGen records the latency from 1 to 4 - -See comments in code for each class for details. - -## Examples - -### ResNet50, ONNX Runtime, CPU, Accuracy -```sh -cm run script "cpp mlperf _resnet50 _onnxruntime _cpu" \ - --output_dir= \ - --count=500 \ - --max_batchsize=32 \ - --mode=accuracy - -python \ - /PATH/TO/inference/vision/classification_and_detection/tools/accuracy-imagenet.py \ - --mlperf-accuracy-file=/mlperf_log_accuracy.json \ - --imagenet-val-file `cm find cache --tags=imagenet-aux`/data/val.txt \ - --dtype int64 -``` - -### RetinaNet, ONNX Runtime, GPU, Accuracy - -Install dataset: -```sh -cm run script --tags=get,preprocessed,openimages,_500,_NCHW -``` - -Run benchmark: -```sh -cm run script "cpp mlperf _retinanet _onnxruntime _cuda" \ - --output_dir= \ - --count=500 \ - --max_batchsize=1 \ - --mode=accuracy - -python /PATH/TO/inference/vision/classification_and_detection/tools/accuracy-openimages.py \ - --mlperf-accuracy-file /mlperf_log_accuracy.json \ - --openimages-dir `cm find cache --tags=openimages,original`/install -``` diff --git a/script/app-mlperf-inference-mlcommons-cpp/tests/win.bat b/script/app-mlperf-inference-mlcommons-cpp/tests/win.bat index 08dc944a4..4e8b322f4 100644 --- a/script/app-mlperf-inference-mlcommons-cpp/tests/win.bat +++ b/script/app-mlperf-inference-mlcommons-cpp/tests/win.bat @@ -1,8 +1,8 @@ rem TBD: current not compiling - need to check ... -cmr "install llvm prebuilt" --version=16.0.4 -cmr "install llvm prebuilt" --version=17.0.6 +mlcr "install llvm prebuilt" --version=16.0.4 +mlcr "install llvm prebuilt" --version=17.0.6 -cmr "get lib onnxruntime lang-cpp _cpu" --version=1.11.1 -cmr "get lib onnxruntime lang-cpp _cpu" --version=1.13.1 -cmr "get lib onnxruntime lang-cpp _cpu" --version=1.15.1 +mlcr "get lib onnxruntime lang-cpp _cpu" --version=1.11.1 +mlcr "get lib onnxruntime lang-cpp _cpu" --version=1.13.1 +mlcr "get lib onnxruntime lang-cpp _cpu" --version=1.15.1 diff --git a/script/app-mlperf-inference-mlcommons-python/README-extra.md b/script/app-mlperf-inference-mlcommons-python/README-extra.md deleted file mode 100644 index 4a9706638..000000000 --- a/script/app-mlperf-inference-mlcommons-python/README-extra.md +++ /dev/null @@ -1,235 +0,0 @@ -# About - -This portable CM script is being developed by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) -to modularize the *python reference implementations* of the [MLPerf inference benchmark](https://github.com/mlcommons/inference) -using the [MLCommons CM automation meta-framework](https://github.com/mlcommons/ck). -The goal is to make it easier to run, optimize and reproduce MLPerf benchmarks -across diverse platforms with continuously changing software and hardware. - -# Current Coverage - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ModelDeviceBackendModel PrecisionStatusComments
ResNet50CPUOnnxruntimefp32Works on all tested versions
Tensorflowfp32Works on all tested versions
PytorchNReference Implementation missing
CUDAOnnxruntimefp32Works on all tested versions
Tensorflowfp32Works on all tested versions
PytorchNReference Implementation missing
RetinaNetCPUOnnxruntimefp32Works on all tested versions
Tensorflowfp32Not Implemented
Pytorchfp32Works on all tested versions
CUDAOnnxruntimefp32Works on all tested versions
Tensorflowfp32Not Implemented
Pytorchfp32Works on all tested versions
BertCPUOnnxruntimefp32Works on all tested versions
int8Works on all tested versions
Tensorflowfp32 -Works with protobuf 3.19. Issue mentioned here -
Pytorchfp32 -Works on all tested versions -
CUDAOnnxruntimefp32Works on all tested versions -
int8Works on all tested versions -
Tensorflowfp32Not tested
Pytorchfp32Works on all tested versions -
3d-unetCPUOnnxruntimefp32Works on all tested versions
Tensorflowfp32 -Works on all tested versions -
Pytorchfp32 -Works on all tested versions -
CUDAOnnxruntimefp32 -Works on all tested versions -
Tensorflowfp32 -Works on all tested versions -
Pytorchfp32 -Works on all tested versions -
RnntCPUPytorchfp32Works on all tested versions
DLRMCPUPytorchfp32Works with torch 1.10 and numpy 1.19
CUDAPytorchfp32?Needs GPU with high memory capacity
- -Please follow our R&D roadmap [here](https://github.com/mlcommons/ck/issues/536). - - - diff --git a/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md b/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md index 311b3b182..e27a6f3ec 100644 --- a/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md +++ b/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md @@ -13,14 +13,14 @@ image from the Community AMIs is the recommended OS image as it comes with the Q sudo yum install -y python38-devel git python3.8 -m pip install cmind cm pull repo mlcommons@cm4mlops -cm run script --tags=get,python --version_min=3.8.1 +mlcr --tags=get,python --version_min=3.8.1 ``` ## Bert-99 ### Quick performance run ``` -cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic \ +mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic \ --backend=glow --scenario=Offline --implementation=kilt --model=bert-99 \ --test_query_count=40000 --precision=uint8 --rerun --quiet \ --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ @@ -29,7 +29,7 @@ cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic ### Full valid run ``` -cm run script --tags=generate-run-cmds,inference,_submission --device=qaic \ +mlcr --tags=generate-run-cmds,inference,_submission --device=qaic \ --backend=glow --scenario=Offline --implementation=kilt --model=bert-99 --precision=uint8 \ --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ --rerun --quiet --execution-mode=valid @@ -45,13 +45,13 @@ The expected accuracy is ~90 (Optional) If you have Imagenet 2012 validation dataset downloaded, you can register it in CM as follows. This step is optional and can avoid the download from the public URL which can be slow at times. ``` -cm run script --tags=get,dataset,imagenet,original,_full --env.IMAGENET_PATH=`pwd`/imagenet-2012-val +mlcr --tags=get,dataset,imagenet,original,_full --env.IMAGENET_PATH=`pwd`/imagenet-2012-val ``` ### Quick performance run ``` -cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=resnet50 \ --test_query_count=400000 --precision=uint8 --rerun --adr.compiler.tags=gcc \ --adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=test --quiet @@ -60,7 +60,7 @@ cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic ### Full valid run ``` -cm run script --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +mlcr --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=resnet50 \ --precision=uint8 --rerun --adr.compiler.tags=gcc \ --adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=valid --quiet @@ -76,7 +76,7 @@ Expected accuracy is 75.936% ### Quick performance run ``` -cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=retinanet --test_query_count=40000 --precision=uint8 \ --rerun --quiet --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.1,_dl2q.24xlarge,_bs.1 \ --adr.compiler.tags=gcc --execution-mode=test @@ -85,7 +85,7 @@ cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic ### Full valid run ``` -cm run script --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +mlcr --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=retinanet \ --precision=uint8 --rerun --adr.compiler.tags=gcc --adr.dataset-preprocessed.tags=_custom-annotations \ --adr.mlperf-inference-implementation.tags=_bs.1,_dl2q.24xlarge --execution-mode=valid --quiet diff --git a/script/app-mlperf-inference/README-extra.md b/script/app-mlperf-inference/README-extra.md deleted file mode 100644 index f412c3c8f..000000000 --- a/script/app-mlperf-inference/README-extra.md +++ /dev/null @@ -1,131 +0,0 @@ -# Examples - -## MLPerf object detection with python, RetinaNet, Open Images, ONNX runtime (CPU), Ubuntu - -This example shows how to use this CM script to run the reference python implementation -of the MLPerf inference benchmark for object detection, RetinaNet, ONNX run-time (CPU) and Ubuntu. - -Install the MLCommons CM automation meta-framework as described [here]( https://github.com/mlcommons/ck/blob/master/cm/docs/installation.md ). - -Here is the typical installation on Ubuntu 20.04: - -```bash -sudo apt install python3 python3-pip git wget -python3 -m pip install cmind -source .profile -``` - -Next you need to install a CM repository with [cross-platform CM scripts](https://github.com/mlcommons/cm4mlops/tree/main/script) for ML Systems: - -```bash -cm pull repo mlcommons@cm4mlops --checkout=dev -``` - -Note that you can fork [this repository](https://github.com/mlcommons/cm4mlops) and use it instead of mlcommons@cm4mlops -to add CM scripts for your own public and private ML models, data sets, software and hardware. -In such case, just change mlcommons@cm4mlops to your own fork in the above command. - -You can find the location of this repository on your system as follows: -```bash -cm find repo mlcommons@cm4mlops -``` - -Now we suggest you to set up a virtual python via CM to avoid mixing up your native Python installation: -```bash -cm run script "install python-venv" --name=mlperf -``` - -If you need a specific python version use this command: -```bash -cm run script "install python-venv" --name=mlperf --version=3.10.7 -``` - -You can now test the MLPerf inference benchmark with RetinaNet and ONNX runtime CPU using just one CM command: - -```bash -cm run script "app mlperf inference generic reference _python _retinanet _onnxruntime _cpu" \ - --adr.python.name=mlperf \ - --adr.compiler.tags=gcc \ - --scenario=Offline \ - --mode=accuracy \ - --test_query_count=10 \ - --quiet -``` - -The first run of this CM script takes around 25 minutes on a GCP instance with 16 cores and 64GB of memory because -CM will automatically detect, install and cache all the necessary ML components -while adapting them to your system using [portable CM scripts](https://github.com/mlcommons/cm4mlops/tree/main/script). - -These dependencies are described using [this simple YAML file](https://github.com/octoml/ck/blob/master/mlc-mlops/script/app-mlperf-inference-reference/_cm.yaml#L57) -and can be turned on or off using different environment variables passed to this CM script using `--env.KEY=VALUE`. - -You should see the following output in the end: -```txt - Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.654 - Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.827 - Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.654 - Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000 - Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 - Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.657 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.566 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.705 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.735 - Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000 - Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 - Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.738 - -mAP=65.417% - -``` - -Any other run will automatically pick up all dependencies from the CM cache while setting up all environment variables and files -to launch the prepared MLPerf inference benchmark. For example, you can run these benchmark in performance mode as follows: - -```bash -cm run script "app mlperf inference generic reference _python _retinanet _onnxruntime _cpu" \ - --adr.python.name=mlperf \ - --adr.compiler.tags=gcc \ - --scenario=Offline \ - --mode=performance \ - --test_query_count=10 \ - --rerun -``` - -You should see the following output: -```txt -TestScenario.Offline qps=0.89, mean=8.6960, time=11.180, acc=31.661%, mAP=65.417%, queries=10, tiles=50.0:8.8280,80.0:9.0455,90.0:9.1450,95.0:9.2375,99.0:9.3114,99.9:9.3281 -``` - - - -### Using Docker - -Please check the prototype of Docker containers with the CM automation meta-framework -for modular MLPerf [here](https://github.com/mlcommons/ck/tree/master/docker) -(on-going work). - -```bash -docker build -f dockerfiles/resnet50/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile -t resnet50_onnxruntime:ubuntu20.04 . -``` - -```bash -docker run -it --rm resnet50_onnxruntime:ubuntu20.04 -c "cm run script --tags=app,mlperf,inference,reference,python_resnet50,_onnxruntime,_cpu --scenario=Offline --mode=accuracy" -``` - - - - -# Future work - -* See the current coverage of different models, devices and backends [here](README-extra.md#current-coverage). - -* See the development roadmap [here](https://github.com/mlcommons/ck/issues/536). - -* See extension projects to enable collaborative benchmarking, design space exploration and optimization of ML and AI Systems [here](https://github.com/mlcommons/ck/issues/627). - - -# Developers - -[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), -[Grigori Fursin]( https://cKnowledge.org/gfursin ) -and [individual contributors](https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md). diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py index 21e34e98a..fbe70dde4 100644 --- a/script/app-mlperf-inference/customize.py +++ b/script/app-mlperf-inference/customize.py @@ -26,14 +26,14 @@ def preprocess(i): env['MLC_NVIDIA_GPU_MEMORY'] = '' else: gpu_memory = i['state'].get( - 'cm_cuda_device_prop', '').get('Global memory') + 'mlc_cuda_device_prop', '').get('Global memory') gpu_memory_size = str( int((float(gpu_memory) / (1024 * 1024 * 1024) + 7) / 8) * 8) env['MLC_NVIDIA_GPU_MEMORY'] = gpu_memory_size env['MLC_NVIDIA_HARNESS_GPU_VARIATION'] = '' if 'cmd' in i['input']: - state['mlperf_inference_run_cmd'] = "cm run script " + \ + state['mlperf_inference_run_cmd'] = "mlcr " + \ " ".join(i['input']['cmd']) state['mlperf-inference-implementation'] = {} @@ -287,14 +287,14 @@ def postprocess(i): with open("measurements.json", "w") as fp: json.dump(measurements, fp, indent=2) - cm_sut_info = {} - cm_sut_info['system_name'] = state['MLC_SUT_META']['system_name'] - cm_sut_info['implementation'] = env['MLC_MLPERF_IMPLEMENTATION'] - cm_sut_info['device'] = env['MLC_MLPERF_DEVICE'] - cm_sut_info['framework'] = state['MLC_SUT_META']['framework'] - cm_sut_info['run_config'] = env['MLC_MLPERF_INFERENCE_SUT_RUN_CONFIG'] + mlc_sut_info = {} + mlc_sut_info['system_name'] = state['MLC_SUT_META']['system_name'] + mlc_sut_info['implementation'] = env['MLC_MLPERF_IMPLEMENTATION'] + mlc_sut_info['device'] = env['MLC_MLPERF_DEVICE'] + mlc_sut_info['framework'] = state['MLC_SUT_META']['framework'] + mlc_sut_info['run_config'] = env['MLC_MLPERF_INFERENCE_SUT_RUN_CONFIG'] with open(os.path.join(result_sut_folder_path, "mlc-sut-info.json"), "w") as fp: - json.dump(cm_sut_info, fp, indent=2) + json.dump(mlc_sut_info, fp, indent=2) system_meta = state['MLC_SUT_META'] with open("system_meta.json", "w") as fp: diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index 305535b49..b731f13f2 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -1856,7 +1856,7 @@ docker: MLC_MLPERF_INFERENCE_SUBMISSION_DIR: [ on ] pre_run_cmds: - #- cm pull repo && cm run script --tags=get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update + #- cm pull repo && mlcr --tags=get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update - cm pull repo mounts: - "${{ MLC_DATASET_IMAGENET_PATH }}:${{ MLC_DATASET_IMAGENET_PATH }}" @@ -1879,8 +1879,8 @@ docker: interactive: True extra_run_args: ' --dns 8.8.8.8 --dns 8.8.4.4 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' os: ubuntu - cm_repo: mlcommons@mlperf-automations - cm_repo_branch: dev + mlc_repo: mlcommons@mlperf-automations + mlc_repo_branch: dev real_run: False os_version: '22.04' docker_input_mapping: diff --git a/script/app-stable-diffusion-onnx-py/README-extra.md b/script/app-stable-diffusion-onnx-py/README-extra.md deleted file mode 100644 index de321d158..000000000 --- a/script/app-stable-diffusion-onnx-py/README-extra.md +++ /dev/null @@ -1,30 +0,0 @@ -# Examples - -CM interface for https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/onnx - -```bash -cm run script "install python-venv" --name=sd-test -cm run script "get generic-python-lib _package.optimum[onnxruntime]" --adr.python.name=sd-test -cm run script "activate python-venv" --name=sd-test - -cm run script "python app stable-diffusion onnx" --adr.python.name=sd-test --text="crazy programmer" - -cm rm cache -f -cm run script "python app stable-diffusion onnx _cuda" --adr.python.name=sd-test --text="crazy programmer" - -cm docker script "python app stable-diffusion onnx" --text="crazy programmer" --output=. --docker_cm_repo=ctuning@mlcommons-ck --env.MLC_DOCKER_ADD_FLAG_TO_MLC_MLOPS_REPO=xyz4 - -``` - - - -# Resources - -* https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0 -* https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main -* https://huggingface.co/CompVis/stable-diffusion-v1-4/tree/main -* https://huggingface.co/runwayml/stable-diffusion-v1-5 -* https://huggingface.co/bes-dev/stable-diffusion-v1-4-onnx -* https://onnxruntime.ai/docs/tutorials/csharp/stable-diffusion-csharp.html -* https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main -* https://huggingface.co/docs/optimum/onnxruntime/usage_guides/models diff --git a/script/app-stable-diffusion-onnx-py/process.py b/script/app-stable-diffusion-onnx-py/process.py index 86a59ef19..ed1570575 100644 --- a/script/app-stable-diffusion-onnx-py/process.py +++ b/script/app-stable-diffusion-onnx-py/process.py @@ -11,15 +11,15 @@ if os.path.isfile(f): os.remove(f) -cm_model_path = os.environ.get('MLC_ML_MODEL_PATH', '') -if cm_model_path == '': +mlc_model_path = os.environ.get('MLC_ML_MODEL_PATH', '') +if mlc_model_path == '': print('Error: MLC_ML_MODEL_PATH env is not defined') exit(1) device = os.environ.get('MLC_DEVICE', '') pipeline = ORTStableDiffusionPipeline.from_pretrained( - cm_model_path, local_files_only=True).to(device) + mlc_model_path, local_files_only=True).to(device) text = os.environ.get('MLC_APP_STABLE_DIFFUSION_ONNX_PY_TEXT', '') if text == '': diff --git a/script/benchmark-any-mlperf-inference-implementation/run-template.sh b/script/benchmark-any-mlperf-inference-implementation/run-template.sh index 8556de945..0224c34dd 100644 --- a/script/benchmark-any-mlperf-inference-implementation/run-template.sh +++ b/script/benchmark-any-mlperf-inference-implementation/run-template.sh @@ -43,47 +43,47 @@ function run_test() { results_dir=$HOME/results_dir #Add your run commands here... -find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun ${EXTRA_ARGS}' -find_ss_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +find_ss_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=SingleStream --quiet --test_query_count=$test_query_count $rerun ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme --scenario=$scenario \ +readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -tflite_accuracy_cmd='cm run script --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +tflite_accuracy_cmd='mlcr --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} \ ${EXTRA_ARGS}' -tflite_performance_cmd='cm run script --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ +tflite_performance_cmd='mlcr --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ ${POWER_STRING} \ --adr.compiler.tags=gcc \ ${extra_option} \ ${EXTRA_ARGS}' -tflite_readme_cmd='cm run script --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ +tflite_readme_cmd='mlcr --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ ${POWER_STRING} \ --adr.compiler.tags=gcc \ ${extra_option} \ diff --git a/script/benchmark-program/README-extra.md b/script/benchmark-program/README-extra.md deleted file mode 100644 index d0cdc3143..000000000 --- a/script/benchmark-program/README-extra.md +++ /dev/null @@ -1,3 +0,0 @@ -This is a universal script to run and profile programs. - -It is a part of our universal benchmarking and optimization roadmap: https://github.com/mlcommons/cm4mlops/issues/23 diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py index d0286557a..6fea34b10 100644 --- a/script/benchmark-program/customize.py +++ b/script/benchmark-program/customize.py @@ -84,7 +84,7 @@ def preprocess(i): pre_run_cmd += ' && ' # running the script as a process in background - pre_run_cmd = pre_run_cmd + 'cm run script --tags=runtime,system,utilisation' + \ + pre_run_cmd = pre_run_cmd + 'mlcr --tags=runtime,system,utilisation' + \ env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] + ' --quiet & ' # obtain the command if of the background process pre_run_cmd += r" cmd_pid=\$! && echo CMD_PID=\$cmd_pid" diff --git a/script/build-docker-image/README-extra.md b/script/build-docker-image/README-extra.md deleted file mode 100644 index 79b2c1b09..000000000 --- a/script/build-docker-image/README-extra.md +++ /dev/null @@ -1,16 +0,0 @@ -# Build CM Docker Image -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) builds a dockerfile with for using CM. - -## How to use -```bash -cm run script --tags=build,docker,image --dockerfile=[DOCKERFILEPATH] --gh_token=[GITHUB_AUTH_TOKEN] --image_repo=[IMAGE_REPO] --image_name=[IMAGE_NAME] --image_tag=[IMAGE_TAG] --cache=[yes,no] -``` -where -* `[DOCKERFILEPATH]` is the path to the dockerfile. If not given, the [dockerfile build script](../build-dockerfile) will be called. -* `[GITHUB_AUTH_TOKEN]`: is passed as a build argument to docker build. -* `[IMAGE_REPO]`: Repo name to add the docker image. Default is `local`. -* `[IMAGE_NAME]`: Name to add the docker image. Default is `cm`. -* `[IMAGE_TAG]`: Tag for the docker image. Default is `latest`. -* `--cache`: If `no` turns off docker build caching. Default is cache on. -* `[--docker_os, --docker_os_version, --cm_repo and --script_tags]` are additional options which are passed to the [dockerfile build script](../build-dockerfile) if needed. - diff --git a/script/build-docker-image/meta.yaml b/script/build-docker-image/meta.yaml index d1d86083a..947ae3aaf 100644 --- a/script/build-docker-image/meta.yaml +++ b/script/build-docker-image/meta.yaml @@ -21,7 +21,7 @@ default_env: input_mapping: cache: MLC_DOCKER_CACHE - cm_repo: MLC_MLOPS_REPO + mlc_repo: MLC_MLOPS_REPO docker_os: MLC_DOCKER_OS docker_os_version: MLC_DOCKER_OS_VERSION dockerfile: MLC_DOCKERFILE_WITH_PATH diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index a91853185..76c184782 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -62,7 +62,7 @@ def preprocess(i): return { 'return': 1, 'error': f"Version \"{env['MLC_DOCKER_OS_VERSION']}\" is not supported yet for \"{env['MLC_DOCKER_OS']}\" "} - # Handle cm_mlops Repository + # Handle mlc_mlops Repository if env.get("MLC_REPO_PATH", "") != "": use_copy_repo = True mlc_repo_path = os.path.abspath(env["MLC_REPO_PATH"]) @@ -119,23 +119,23 @@ def preprocess(i): use_copy_repo = False if env.get("MLC_MLOPS_REPO", "") != "": - cm_mlops_repo = env["MLC_MLOPS_REPO"] + mlc_mlops_repo = env["MLC_MLOPS_REPO"] # the below pattern matches both the HTTPS and SSH git link formats git_link_pattern = r'^(https?://github\.com/([^/]+)/([^/]+)(?:\.git)?|git@github\.com:([^/]+)/([^/]+)(?:\.git)?)$' - if match := re.match(git_link_pattern, cm_mlops_repo): + if match := re.match(git_link_pattern, mlc_mlops_repo): if match.group(2) and match.group(3): repo_owner = match.group(2) repo_name = match.group(3) elif match.group(4) and match.group(5): repo_owner = match.group(4) repo_name = match.group(5) - cm_mlops_repo = f"{repo_owner}@{repo_name}" + mlc_mlops_repo = f"{repo_owner}@{repo_name}" print( - f"Converted repo format from {env['MLC_MLOPS_REPO']} to {cm_mlops_repo}") + f"Converted repo format from {env['MLC_MLOPS_REPO']} to {mlc_mlops_repo}") else: - cm_mlops_repo = "mlcommons@mlperf-automations" + mlc_mlops_repo = "mlcommons@mlperf-automations" - cm_mlops_repo_branch_string = f" --branch={env['MLC_MLOPS_REPO_BRANCH']}" + mlc_mlops_repo_branch_string = f" --branch={env['MLC_MLOPS_REPO_BRANCH']}" if env.get('MLC_DOCKERFILE_WITH_PATH', '') == '': env['MLC_DOCKERFILE_WITH_PATH'] = os.path.join( @@ -329,8 +329,8 @@ def preprocess(i): f.write( 'RUN mlc pull repo ' + - cm_mlops_repo + - cm_mlops_repo_branch_string + + mlc_mlops_repo + + mlc_mlops_repo_branch_string + x + EOL) diff --git a/script/build-dockerfile/meta.yaml b/script/build-dockerfile/meta.yaml index a8acb0c30..6dd1ee499 100644 --- a/script/build-dockerfile/meta.yaml +++ b/script/build-dockerfile/meta.yaml @@ -24,10 +24,10 @@ default_env: input_mapping: build: MLC_BUILD_DOCKER_IMAGE cache: MLC_DOCKER_CACHE - cm_repo: MLC_MLOPS_REPO - cm_repo_flags: MLC_DOCKER_ADD_FLAG_TO_MLC_MLOPS_REPO - cm_repos: MLC_DOCKER_EXTRA_MLC_REPOS - cm_repo_branch: MLC_MLOPS_REPO_BRANCH + mlc_repo: MLC_MLOPS_REPO + mlc_repo_flags: MLC_DOCKER_ADD_FLAG_TO_MLC_MLOPS_REPO + mlc_repos: MLC_DOCKER_EXTRA_MLC_REPOS + mlc_repo_branch: MLC_MLOPS_REPO_BRANCH comments: MLC_DOCKER_RUN_COMMENTS copy_files: MLC_DOCKER_COPY_FILES docker_base_image: MLC_DOCKER_IMAGE_BASE @@ -50,7 +50,7 @@ input_mapping: run_cmd: MLC_DOCKER_RUN_CMD run_cmd_extra: MLC_DOCKER_RUN_CMD_EXTRA script_tags: MLC_DOCKER_RUN_SCRIPT_TAGS - skip_cm_sys_upgrade: MLC_DOCKER_SKIP_MLC_SYS_UPGRADE + skip_mlc_sys_upgrade: MLC_DOCKER_SKIP_MLC_SYS_UPGRADE push_image: MLC_DOCKER_PUSH_IMAGE docker_not_pull_update: MLC_DOCKER_NOT_PULL_UPDATE diff --git a/script/build-mlperf-inference-server-nvidia/README-extra.md b/script/build-mlperf-inference-server-nvidia/README-extra.md deleted file mode 100644 index f05fd8322..000000000 --- a/script/build-mlperf-inference-server-nvidia/README-extra.md +++ /dev/null @@ -1,2 +0,0 @@ -# About -This CM script builds the Nvidia C++ implementation of MLPerf Inference diff --git a/script/build-mlperf-inference-server-nvidia/meta.yaml b/script/build-mlperf-inference-server-nvidia/meta.yaml index d7f005bb4..f74d99e9a 100644 --- a/script/build-mlperf-inference-server-nvidia/meta.yaml +++ b/script/build-mlperf-inference-server-nvidia/meta.yaml @@ -357,7 +357,7 @@ docker: shm_size: '32gb' extra_run_args: ' --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' os: ubuntu - cm_repo_flags1: ' --branch=mlperf-inference' + mlc_repo_flags1: ' --branch=mlperf-inference' real_run: False interactive: True os_version: '20.04' diff --git a/script/compile-program/README-extra.md b/script/compile-program/README-extra.md deleted file mode 100644 index 87d157282..000000000 --- a/script/compile-program/README-extra.md +++ /dev/null @@ -1,3 +0,0 @@ -This script compiles C and C++ programs. - -It is a part of our universal benchmarking and optimization roadmap: https://github.com/mlcommons/cm4mlops/issues/23 diff --git a/script/create-patch/README-extra.md b/script/create-patch/README-extra.md deleted file mode 100644 index de783504d..000000000 --- a/script/create-patch/README-extra.md +++ /dev/null @@ -1,5 +0,0 @@ -# Examples - -``` -cmr "create patch" --new=new --old=old --exclude=.git,__pycache_ -``` diff --git a/script/destroy-terraform/README-extra.md b/script/destroy-terraform/README-extra.md deleted file mode 100644 index 8768e0fc7..000000000 --- a/script/destroy-terraform/README-extra.md +++ /dev/null @@ -1 +0,0 @@ -This CM script is automatically called from run-terraform script when `--destroy` option is given. diff --git a/script/detect-cpu/README-extra.md b/script/detect-cpu/README-extra.md deleted file mode 100644 index 3e6e1dad0..000000000 --- a/script/detect-cpu/README-extra.md +++ /dev/null @@ -1,17 +0,0 @@ -# Detect CPU -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the host CPU details and exports them in a unified list of environment variables to be reused across the supported operating systems. - -## Exported Variables -* `MLC_HOST_CPU_L1I_CACHE_SIZE` -* `MLC_HOST_CPU_L2_CACHE_SIZE` -* `MLC_HOST_CPU_MEMSIZE` -* `MLC_HOST_CPU_SOCKETS` -* `MLC_HOST_CPU_THREADS_PER_CORE` -* `MLC_HOST_CPU_TOTAL_CORES` -* `MLC_HOST_CPU_TOTAL_LOGICAL_CORES` -* `MLC_HOST_CPU_TOTAL_PHYSICAL_CORES` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 -3. macOS 12.6 diff --git a/script/download-and-extract/README-extra.md b/script/download-and-extract/README-extra.md deleted file mode 100644 index 91d015ee4..000000000 --- a/script/download-and-extract/README-extra.md +++ /dev/null @@ -1,109 +0,0 @@ -# CM interface to download and extract files in a unified way on any system - -## Download and extract file without CM caching - -### Use internal CM download function - -This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/mlc-mlops/automation/utils/module.py#L157) -to download and extract a given file to the current directory: - -```bash -cmr "download-and-extract file _extract" --url=https://cKnowledge.org/test/coco-2017-val-annotations.zip -``` -or - -```bash -cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip" -``` - -#### Output environment variables - -You can check produced environment variables produced by this CM script by adding the `-j` flag: - -```bash -cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip" -j -``` - -```json - "new_env": { - "MLC_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip", - "MLC_EXTRACT_EXTRACTED_PATH": "D:\\Work", - "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Work" - }, -``` - -#### Input flags and equivalent environment variables - -* `--url` or `--env.MLC_DAE_URL` - URL to download file -* `--verify` or `--env.MLC_VERIFY_SSL` - set to `no` to skip SSL certificate verification -* `--download_path` or `--store` or `--env.MLC_DOWNLOAD_PATH` - where to download file -* `--local_path` or `--from` or `--env.MLC_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading -* `--extract_path` or `--to` or `--env.MLC_EXTRACT_PATH` - where to extract files (--input should have full path then) -* `--extra_folder` or `--env.MLC_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory) - - -#### Variations - -* `_keep` or `_no-remove-extracted` or `--env.MLC_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default) - - - -### Use wget without SSL certificate verification - -```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no -``` - -### Use curl without SSL certificate verification - -```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _curl" --verify=no -``` - -### Check MD5SUM - -```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -``` - -### Save to another file - -```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_FILENAME=xyz --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -``` - -### Save to another place - -```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -``` - -### Reuse local file instead of downloading a file - -```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j -``` - - -### Simplified language to download, store and extract file - - -```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --store=$HOME/dir1 --to=$HOME/dir2 -``` - - - -## Download and extract files with CM caching - -You can use all above commands with `--force_cache` and `--extra_cache_tags` flags. -In such case, a given file will be downloaded to CM cache and can be reused by other CM scripts and workflows: - -```bash -cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations -``` - -You can find it in CM cache using extra cache tags as follows: -```bash -cm show cache "dae file annotations coco 2017 val" -``` diff --git a/script/download-and-extract/tests/download-and-extract-file.bat b/script/download-and-extract/tests/download-and-extract-file.bat index ecb28f0c7..0c7287f64 100644 --- a/script/download-and-extract/tests/download-and-extract-file.bat +++ b/script/download-and-extract/tests/download-and-extract-file.bat @@ -1 +1 @@ -cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract _no-remove-extracted" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 +mlcr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract _no-remove-extracted" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/script/download-and-extract/tests/download-and-extract-file2.bat b/script/download-and-extract/tests/download-and-extract-file2.bat index f2806eb1a..766ea4b7e 100644 --- a/script/download-and-extract/tests/download-and-extract-file2.bat +++ b/script/download-and-extract/tests/download-and-extract-file2.bat @@ -1 +1 @@ -cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 +mlcr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/script/download-file/README-extra.md b/script/download-file/README-extra.md deleted file mode 100644 index 09c9d065b..000000000 --- a/script/download-file/README-extra.md +++ /dev/null @@ -1,98 +0,0 @@ -# CM interface to download files in a unified way on any system - -## Download file without CM caching - -### Use internal CM download function - -This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/mlc-mlops/automation/utils/module.py#L157) -to download a given file to the current directory: - -```bash -cmr "download file" --url=https://cKnowledge.org/test/coco-2017-val-annotations.zip -``` -or - -```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip" -``` - -#### Output environment variables - -You can check produced environment variables produced by this CM script by adding the `-j` flag: - -```bash -cmr "download file" _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip -j -``` - -```json - "new_env": { - "MLC_DOWNLOAD_DOWNLOADED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip", - "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip" - }, -``` - -#### Input flags and equivalent environment variables - -* `--url` or `--env.MLC_DAE_URL` - URL to download file -* `--download_path` or `--to` or `--env.MLC_DOWNLOAD_PATH` - where to download file -* `--local_path` or `--from` or `--env.MLC_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading -* `--verify` or `--env.MLC_VERIFY_SSL` - set to `no` to skip SSL certificate verification - - -### Use wget without SSL certificate verification - -```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no -``` - -### Use curl without SSL certificate verification - -```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _curl" --verify=no -``` - -### Check MD5SUM - -```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -``` - -### Save to another file - -```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_FILENAME=xyz --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -``` - -### Save to another place - -```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -``` - -### Reuse local file instead of downloading a file - -```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j -``` - -Output environment variables produced by this CM script: -```json - "new_env": { - "MLC_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip", - "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Work\\coco-2017-val-annotations.zip" - } -``` - -## Download file with CM caching - -You can use all above commands with `--force_cache` and `--extra_cache_tags` flags. -In such case, a given file will be downloaded to CM cache and can be reused by other CM scripts and workflows: - -```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations -``` - -You can find it in CM cache using extra cache tags as follows: -```bash -cm show cache "download file annotations coco 2017 val" -``` diff --git a/script/download-file/tests/download-file.bat b/script/download-file/tests/download-file.bat index dbfcfc5ce..6b9325001 100644 --- a/script/download-file/tests/download-file.bat +++ b/script/download-file/tests/download-file.bat @@ -1,2 +1,2 @@ -cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 +mlcr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/script/download-file/tests/download-file2.bat b/script/download-file/tests/download-file2.bat index 6d919c8c1..bb27bcd0a 100644 --- a/script/download-file/tests/download-file2.bat +++ b/script/download-file/tests/download-file2.bat @@ -1 +1 @@ -cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _cmutil" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 +mlcr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _cmutil" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/script/extract-file/README-extra.md b/script/extract-file/README-extra.md deleted file mode 100644 index b227eadca..000000000 --- a/script/extract-file/README-extra.md +++ /dev/null @@ -1,115 +0,0 @@ -# CM interface to extract files in a unified way on any system - -## Extract files without CM caching - -You can use this script to extract `.tar`, `.gz`, `.zip`, `.bz2`, `.tag.gz` and `.tgz` files. - -Before using further examples, you can download `coco-2017-val-annotations.zip` using CM: -```bash -cmr "download file" --url=https://cKnowledge.org/test/coco-2017-val-annotations.zip -``` - -Extract this archive in the current path while keeping the archive file: - -```bash -cmr "extract file _keep" --input=coco-2017-val-annotations.zip -``` - -or - -```bash -cmr "extract file _keep _path.coco-2017-val-annotations.zip" -``` - -You can remove `_keep` to delete archive after extracting files: - -```bash -cmr "extract file" --input=coco-2017-val-annotations.zip -``` - -#### Output environment variables - -You can check produced environment variables produced by this CM script by adding the `-j` flag: - -```bash -cmr "extract file _keep" --input=coco-2017-val-annotations.zip -j -``` - -```json - "new_env": { - "MLC_EXTRACT_EXTRACTED_PATH": "D:\\Work99.3 readme\\xyz", - "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Work99.3 readme\\xyz" - }, -``` - -#### Input flags and equivalent environment variables - -* `--input` or `--env.MLC_EXTRACT_FILEPATH` - input file -* `--extract_path` or `--to` or `--env.MLC_EXTRACT_PATH` - where to extract files (--input should have full path then) -* `--extra_folder` or `--env.MLC_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory) - -#### Variations - -* `_keep` or `_no-remove-extracted` or `--env.MLC_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default) - - - -### Extract to a specific folder - -Note that you need to provide a full path to the archive file if you want to extract it to some directory: - -```bash -cmr "extract file _keep" --input="$PWD/coco-2017-val-annotations.zip" --extract_path="$HOME/mlc-test" -``` - -### Add extra folder to extracted files - -You can add extra folder when extracting files to avoid messing up current directory: - -```bash -cmr "extract file _keep" --input=coco-2017-val-annotations.zip --extra_folder=xyz -``` - - - - -## Extract 1 file and test MD5SUM without CM caching - -You can use this script to extract 1 archived file (model, captions, etc) and test MD5SUM. - -To test this CM script, download `captions_val2017.json.gz`: -```bash -cmr "download file _url.https://cKnowledge.org/test/captions_val2017.json.gz" -``` - -Then extract it and test MD5SUM as follows: - -```bash -cmr "extract file _keep _path.captions_val2017.json.gz" --env.MLC_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f -j -``` - - -### Force another filename during extract - -Some workflows may need to use a different filename than original. You can change it as follows: -```bash -cmr "extract file _keep _path.captions_val2017.json.gz" --env.MLC_EXTRACT_EXTRACTED_FILENAME=new-file.json --env.MLC_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f -``` - - - - -## Extract file(s) to CM cache - -You can use all above commands with `--force_cache` and `--extra_cache_tags` flags. -In such case, file(s) will be extracted to the CM cache and can be reused by other CM scripts and workflows. -Note that you need to provide full path to the archive file. - -```bash -cmr "extract file _keep" --input=$HOME/coco-2017-val-annotations.zip --force_cache --extra_cache_tags=coco,2017,val,annotations -``` - -You can find it in CM cache using extra cache tags as follows: -```bash -cm show cache "extract file annotations coco 2017 val" -``` diff --git a/script/fail/README-extra.md b/script/fail/README-extra.md deleted file mode 100644 index 582991f6d..000000000 --- a/script/fail/README-extra.md +++ /dev/null @@ -1 +0,0 @@ -# CM script diff --git a/script/flash-tinyml-binary/README-extra.md b/script/flash-tinyml-binary/README-extra.md deleted file mode 100644 index 1c50fc8e7..000000000 --- a/script/flash-tinyml-binary/README-extra.md +++ /dev/null @@ -1,16 +0,0 @@ -This script flashes the ELF binary using Zephyr. -## Install -```bash -cm run script --tags=flash,tiny,_[VARIANT],_[MODEL] -``` -where, -* `[VARIANT]` is one of `cmsis_nn`,`native` -* `[MODEL]` is one of `ad`, `ic`, `kws`, `vww` - -We can also pass a known build directory like here: - -```bash -cm run script --tags=flash,tiny --build_dir=[BUILD_DIR] -``` -where, -* `[BUILD_DIR]` is the build folder containing the zephyr folder which in turn contains the built ELF binary diff --git a/script/generate-mlperf-inference-submission/README-extra.md b/script/generate-mlperf-inference-submission/README-extra.md deleted file mode 100644 index 0510432d5..000000000 --- a/script/generate-mlperf-inference-submission/README-extra.md +++ /dev/null @@ -1,12 +0,0 @@ -# Generate MLPerf Inference Submission Folder -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) takes in a MLPerf Inference results folder (same folder structure assumed as produced by MLPerf inference reference implementation) and produces a valid submission folder as required by the [MLPerf Inference submission checker](https://github.com/mlcommons/inference/blob/master/tools/submission/submission-checker.py). - -## How To -```bash -cm run script --tags=generate,mlperf-inference-submission --results_dir=[MLPERF_RESULT_DIR] --submission_dir=[SUBMISSION_FOLDER] -``` - -### Additional Options -* `[--run_checker]:` Runs the MLPerf Inference submission checker on the produced submission folder -* `[--skip_truncation]:` If on will not run the truncation of the accuracy logs (useful for testing) -* `[--run_style]:` If set to "valid" will indicate the result folder is from a full and valid MLPerf inference run and will trigget the accuracy truncation script unless `--skip_truncation` flag is set. diff --git a/script/generate-mlperf-inference-submission/meta.yaml b/script/generate-mlperf-inference-submission/meta.yaml index 4c5a0ab34..20f7f3594 100644 --- a/script/generate-mlperf-inference-submission/meta.yaml +++ b/script/generate-mlperf-inference-submission/meta.yaml @@ -32,8 +32,8 @@ deps: - 'on' tags: get,mlperf,submission,dir docker: - cm_repo: mlcommons@mlperf-automations - cm_repo_branch: dev + mlc_repo: mlcommons@mlperf-automations + mlc_repo_branch: dev deps: - names: get-mlperf-inference-results-dir skip_if_env: diff --git a/script/generate-mlperf-tiny-report/README-extra.md b/script/generate-mlperf-tiny-report/README-extra.md deleted file mode 100644 index 36a0c58fc..000000000 --- a/script/generate-mlperf-tiny-report/README-extra.md +++ /dev/null @@ -1,55 +0,0 @@ -# About - -This portable CM script run submission checker and generates summary report for all Tiny MLPerf results -using [these native scripts](https://github.com/mlcommons/submissions_tiny_v1.1/pull/51). - -## Usage - -We have tested this portable CM script on Ubuntu and Windows. - -Install [MLCommons CM framework](https://github.com/mlcommons/ck/blob/master/docs/installation.md). - -Pull the MLCommons CK repository with automation recipes for interoperable MLOps: -```bash -cm pull repo mlcommons@cm4mlops --checkout=dev -``` - -Install repositories with raw MLPerf inference benchmark results: -```bash -cmr "get git repo _repo.https://github.com/mlcommons/tiny_results_v0.7" --extra_cache_tags=mlperf-tiny-results,version-0.7 -cmr "get git repo _repo.https://github.com/mlcommons/tiny_results_v1.0" --extra_cache_tags=mlperf-tiny-results,version-1.0 -``` - -You can also add private results to compare submissions locally before they become public: -```bash -cmr "get git repo _repo.https://github.com/mlcommons/submissions_tiny_v1.1" --extra_cache_tags=mlperf-tiny-results,version-1.1-private -``` - -You can use a specific checkout/branch as follows: -```bash -cm run script "get git repo _repo.https://github.com/mlcommons/submissions_tiny_v1.1" \ - --extra_cache_tags=mlperf-tiny-results,version-1.1-private,generate_final_report \ - --depth="" \ - --branch=generate_final_report -``` - - -Now run this script: -```bash -cmr "generate mlperf-tiny report" -``` - -It will create `summary-{TinyMLPerf version}.csv' report in your current directory. - -You can also specify a version of a repository here: - -```bash -cmr "generate mlperf-tiny report" --repo_tags=1.1-private -``` - -These results are also available in the [public CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny,all). - -# Contact us - -This project is maintained by the [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce). -Join our [Discord server](https://discord.gg/JjWNWXKxwT) to ask questions, provide your feedback and participate in further developments. diff --git a/script/generate-mlperf-tiny-submission/README-extra.md b/script/generate-mlperf-tiny-submission/README-extra.md deleted file mode 100644 index 6b3671619..000000000 --- a/script/generate-mlperf-tiny-submission/README-extra.md +++ /dev/null @@ -1,3 +0,0 @@ -# Generate MLPerf Tiny Submission Folder from a Results Directory - -This is a work in progress script. diff --git a/script/get-android-sdk/README-extra.md b/script/get-android-sdk/README-extra.md deleted file mode 100644 index c15c6df33..000000000 --- a/script/get-android-sdk/README-extra.md +++ /dev/null @@ -1,3 +0,0 @@ -# About - -https://developer.android.com/studio#command-line-tools-only diff --git a/script/get-aocl/README-extra.md b/script/get-aocl/README-extra.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/script/get-aria2/README-extra.md b/script/get-aria2/README-extra.md deleted file mode 100644 index 40539d77f..000000000 --- a/script/get-aria2/README-extra.md +++ /dev/null @@ -1,9 +0,0 @@ -# Some commands - -```bash -cmr "get aria2" --version=1.37.0 -cmr "get aria2" --install -cmr "get aria2" --path={path to the directory with aria2} -cmr "get aria2" --input={full path to aria2} -cmr "get aria2" --shell -``` diff --git a/script/get-aws-cli/README-extra.md b/script/get-aws-cli/README-extra.md deleted file mode 100644 index 94c96ea86..000000000 --- a/script/get-aws-cli/README-extra.md +++ /dev/null @@ -1,9 +0,0 @@ -# Get AWS CLI -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed aws-cli on the system and if not found calls the [install script for aws-cli](../script/install-aws-cli). - -## Exported Variables -* `MLC_AWS_BIN_WITH_PATH` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-bazel/README-extra.md b/script/get-bazel/README-extra.md deleted file mode 100644 index a0cc8d963..000000000 --- a/script/get-bazel/README-extra.md +++ /dev/null @@ -1,9 +0,0 @@ -# Get Bazel -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed bazel on the system and if not found calls the [install script for bazel](../script/install-bazel). - -## Exported Variables -* `MLC_BAZEL_BIN_WITH_PATH` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-blis/README-extra.md b/script/get-blis/README-extra.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/script/get-cl/README-extra.md b/script/get-cl/README-extra.md deleted file mode 100644 index 796ec7113..000000000 --- a/script/get-cl/README-extra.md +++ /dev/null @@ -1,7 +0,0 @@ -# Get Microsoft C compiler - -Example to detect a Microsoft C compiler from the Visual Studio: - -```bash -cm run script "get cl" --path="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64" -``` diff --git a/script/get-cuda-devices/customize.py b/script/get-cuda-devices/customize.py index 62832a6e7..0ca09bf5a 100644 --- a/script/get-cuda-devices/customize.py +++ b/script/get-cuda-devices/customize.py @@ -57,10 +57,10 @@ def postprocess(i): key_env = 'MLC_CUDA_DEVICE_PROP_' + key.upper().replace(' ', '_') env[key_env] = val - state['cm_cuda_num_devices'] = gpu_id + 1 + state['mlc_cuda_num_devices'] = gpu_id + 1 env['MLC_CUDA_NUM_DEVICES'] = gpu_id + 1 - state['cm_cuda_device_prop'] = p - state['cm_cuda_devices_prop'] = gpu + state['mlc_cuda_device_prop'] = p + state['mlc_cuda_devices_prop'] = gpu return {'return': 0} diff --git a/script/get-cuda-devices/meta.yaml b/script/get-cuda-devices/meta.yaml index a340263e4..2dac1175a 100644 --- a/script/get-cuda-devices/meta.yaml +++ b/script/get-cuda-devices/meta.yaml @@ -26,8 +26,8 @@ docker: run: false all_gpus: 'yes' skip_run_cmd: 'no' - skip_cm_sys_upgrade: 'yes' - cm_repo_flags: '--checkout=dev' + skip_mlc_sys_upgrade: 'yes' + mlc_repo_flags: '--checkout=dev' use_host_group_id: 'yes' image_tag_extra: '-mlc-dev' @@ -40,9 +40,9 @@ new_env_keys: - MLC_CUDA_VERSION new_state_keys: -- cm_cuda_device_prop -- cm_cuda_devices_prop -- cm_cuda_num_devices +- mlc_cuda_device_prop +- mlc_cuda_devices_prop +- mlc_cuda_num_devices print_files_if_script_error: - tmp-run.out diff --git a/script/get-cuda/README-extra.md b/script/get-cuda/README-extra.md deleted file mode 100644 index d1d37c98c..000000000 --- a/script/get-cuda/README-extra.md +++ /dev/null @@ -1,44 +0,0 @@ -# Get CUDA - -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed CUDA on the system -and if not found calls the [install script for CUDA](../script/install-cuda-prebuilt). - -## Exported Variables -* `MLC_CUDA_INSTALLED_PATH` -* `MLC_CUDA_VERSION` -* `MLC_NVCC_BIN_WITH_PATH` -* `CUDA_HOME` -* `CUDA_PATH` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. Windows - -# Examples - -## Detect CUDA on Windows - -You may want to install all system dependencies as described [here](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html). - -If Visual Studio and CUDA updated your PATH variable, you should just run the following: -```bash -cm run script "get cuda" -``` - -However, if the PATH variable was not updated, you need to provide path to the cl.exe and nvcc.exe to help CM detect them: - -```bash -cm run script "get cl" --path="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64" -cm run script "get cuda _compiler" --path="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\bin" -``` - -# System dependencies - -* Download [CUDA toolkit](https://developer.nvidia.com/cuda-toolkit). -* Download [cuDNN](https://developer.nvidia.com/rdp/cudnn-download). -* (Download [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-8x-download)). - -## Windows - -* ? Download [Microsoft Visual C++ Redistributable](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist) -* Check [Nvidia installation guide](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html) diff --git a/script/get-cudnn/README-extra.md b/script/get-cudnn/README-extra.md deleted file mode 100644 index 374680813..000000000 --- a/script/get-cudnn/README-extra.md +++ /dev/null @@ -1,3 +0,0 @@ -# TBD - -We need to add detection of cuDNN version on Windows, Linux and MacOS diff --git a/script/get-cudnn/customize.py b/script/get-cudnn/customize.py index 097c4342d..1cedb4a1d 100644 --- a/script/get-cudnn/customize.py +++ b/script/get-cudnn/customize.py @@ -65,12 +65,12 @@ def preprocess(i): # paths to cuda are not always in PATH - add a few typical locations to search for # (unless forced by a user) - cm_tmp_path = env.get('MLC_TMP_PATH', '').strip() - if cm_tmp_path != '': - cm_tmp_path += ':' - cm_tmp_path += '/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib' - cm_tmp_path += os.path.expandvars(':$CUDNN_ROOT/lib') - env['MLC_TMP_PATH'] = cm_tmp_path + mlc_tmp_path = env.get('MLC_TMP_PATH', '').strip() + if mlc_tmp_path != '': + mlc_tmp_path += ':' + mlc_tmp_path += '/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib' + mlc_tmp_path += os.path.expandvars(':$CUDNN_ROOT/lib') + env['MLC_TMP_PATH'] = mlc_tmp_path env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' for lib_path in env.get( @@ -103,7 +103,7 @@ def preprocess(i): return {'return': 0} if env.get('MLC_CUDNN_TAR_FILE_PATH', '') == '': - return {'return': 1, 'error': 'Please envoke cm run script "get cudnn" --tar_file={full path to the cuDNN tar file}'} + return {'return': 1, 'error': 'Please envoke mlcr "get cudnn" --tar_file={full path to the cuDNN tar file}'} print('Untaring file - can take some time ...') diff --git a/script/get-dataset-coco/README-extra.md b/script/get-dataset-coco/README-extra.md deleted file mode 100644 index 2bf3a5321..000000000 --- a/script/get-dataset-coco/README-extra.md +++ /dev/null @@ -1,95 +0,0 @@ -# CM interface to download or detect COCO data sets - -This CM automation recipe helps to download or detect [COCO datasets](https://cocodataset.org) -and register them in the CM cache with various environment variables -to be reused in CM workflows and other projects. - -Supported versions: -* 2017 val/train -* 2014 val/train - -## Use-cases - -* https://github.com/mlcommons/abtf-ssd-pytorch - -## Download COCO dataset and register in CM cache - -```bash -cmr "get coco dataset" -cmr "get coco dataset _val _2017" -cmr "get coco dataset _train _2017" -``` - -You can find this data set in the CM cache using the following command: - -```bash -cm show cache "get coco dataset" -``` - -#### Output environment variables - -You can check produced environment variables produced by this CM script by adding the `-j` flag: - -```bash -cmr "get coco dataset _val _2017" -j -``` - -```json - "new_env": { - "MLC_DATASET_COCO_URL_ANNOTATIONS": "http://images.cocodataset.org/annotations", - "MLC_DATASET_COCO_URL_DATA": "http://images.cocodataset.org/zips", - "MLC_DATASET_COCO_VERSION": "2017", - "MLC_DATASET_COCO_TYPE": "val", - "MLC_DATASET_COCO_SIZE": "complete", - "MLC_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\annotations_trainval2017.zip", - "MLC_DATASET_COCO_ANNOTATIONS_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\annotations", - "MLC_DATASET_COCO_DATA_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\val2017.zip", - "MLC_DATASET_COCO_DATA_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\val2017", - "MLC_DATASET_COCO_MD5SUM_ANN": "f4bbac642086de4f52a3fdda2de5fa2c", - "MLC_DATASET_COCO_MD5SUM_DATA": "442b8da7639aecaf257c1dceb8ba8c80", - "MLC_DATASET_COCO_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07", - "MLC_DATASET_COCO_TYPE_AND_VERSION": "val2017", - "MLC_DATASET_COCO_URL_ANNOTATIONS_FULL": "http://images.cocodataset.org/annotations/annotations_trainval2017.zip", - "MLC_DATASET_COCO_URL_DATA_FULL": "http://images.cocodataset.org/zips/val2017.zip", - "MLC_DATASET_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07", - "MLC_DATASET_PATH_ROOT": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07" - }, -``` - -#### Input flags and equivalent environment variables - -* `--from` - where to find dataset archive files instead of downloading them -* `--to` - where to extract dataset files -* `--path` - where to pick up extracted dataset files -* `--store` - where to keep downloaded files - -#### Variations - -* Dataset type: `_val` | `_train` -* Dataset year: `2017` | `2014` - - -## Detect already installed COCO dataset - -```bash -cmr "get coco dataset" --path={PATH to the installed dataset}" -``` - -CM script will attempt to automatically detect the type (val/train) and version (2014/2017) -of the dataset files. - -## Install dataset from already downloaded archives - -```bash -cmr "get coco dataset _val _2017" --from=d:\Work2\COCO-2017-val -j -``` - -where `--from` points to the COCO dataset zip files already downloaded from the server. -It is useful when all files are already downloaded and saved for common use. - - -## Download and store dataset files locally - -```bash -cmr "get coco dataset _val _2017" --to=d:\Downloads\COCO-2017-val --store=d:\Downloads -``` diff --git a/script/get-dataset-cognata-mlcommons/README-extra.md b/script/get-dataset-cognata-mlcommons/README-extra.md deleted file mode 100644 index 0bb16ad46..000000000 --- a/script/get-dataset-cognata-mlcommons/README-extra.md +++ /dev/null @@ -1,62 +0,0 @@ -Examples: - -### Check flags - -```bash -cm run script --tags=get,raw,dataset,mlcommons-cognata --help -``` - -### Import already downloaded dataset - -Note that this automation will attempt to install aria2 tool via sudo apt on Ubuntu. - -```bash -cm run script --tags=get,raw,dataset,mlcommons-cognata --import=${HOME}/datasets/cognata -j -cm run script --tags=get,raw,dataset,mlcommons-cognata --import=${HOME}/datasets/cognata -j --private_url="{ADD PRIVATE URL FOR COGNATA} FOR FULL AUTOMATION" -cm run script --tags=get,raw,dataset,mlcommons-cognata --import=%userprofile%\datasets\cognata -j -cm run script --tags=get,raw,dataset,mlcommons-cognata --import=D:\Work2\cognata -j -``` - -### Download dataset to CM cache - -```bash -cm run script --tags=get,raw,dataset,mlcommons-cognata -``` - -### Find dataset in CM cache - -```bash -cm show cache --tags=dataset,mlcommons-cognata - -cm rm cache --tags=dataset,mlcommons-cognata -``` - -### Download dataset to some local directory - -```bash -cm run script --tags=get,raw,dataset,mlcommons-cognata --path=${HOME}/datasets/cognata -j -cm run script --tags=get,raw,dataset,mlcommons-cognata --path=%userprofile%\datasets\cognata -j -cm run script --tags=get,raw,dataset,mlcommons-cognata --path=D:\Work2\cognata-downloaded -j - -``` - -### Download subsets of this dataset - -```bash -cm run script --tags=get,raw,dataset,mlcommons-cognata --serial_numbers=10002_Urban_Clear_Morning -cm run script --tags=get,raw,dataset,mlcommons-cognata --serial_numbers=10002_Urban_Clear_Morning --group_names=Cognata_Camera_01_8M -cm run script --tags=get,raw,dataset,mlcommons-cognata --serial_numbers=10002_Urban_Clear_Morning --group_names=Cognata_Camera_01_8M --file_names=Cognata_Camera_01_8M_ann.zip;Cognata_Camera_01_8M_ann_laneline.zip;Cognata_Camera_01_8M.zip -cm run script --tags=get,raw,dataset,mlcommons-cognata --serial_numbers=10002_Urban_Clear_Morning --group_names=Cognata_Camera_01_8M --file_names=Cognata_Camera_01_8M_ann.zip;Cognata_Camera_01_8M_ann_laneline.zip;Cognata_Camera_01_8M.zip -``` - -Compact way to download the ABTF demo data set to the CM cache: - -```bash -cm run script --tags=get,raw,dataset,mlcommons-cognata,_abtf-demo -``` - -or to specific path -```bash -cm run script --tags=get,raw,dataset,mlcommons-cognata _abtf-demo" --path=./cognata -cm run script --tags=get,raw,dataset,mlcommons-cognata _abtf-demo" --path=.\cognata -``` diff --git a/script/get-dataset-cognata-mlcommons/customize.py b/script/get-dataset-cognata-mlcommons/customize.py index dbb18802f..43b327769 100644 --- a/script/get-dataset-cognata-mlcommons/customize.py +++ b/script/get-dataset-cognata-mlcommons/customize.py @@ -7,12 +7,12 @@ def preprocess(i): env = i['env'] - cm_cache_dataset_path = env.get( + mlc_cache_dataset_path = env.get( 'MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip() res = utils.load_json( os.path.join( - cm_cache_dataset_path, + mlc_cache_dataset_path, 'cfg.json')) cfg = res.get('meta', {}) if cfg.get('imported', False): @@ -67,12 +67,12 @@ def postprocess(i): quiet = (env.get('MLC_QUIET', False) == 'yes') - cm_cache_dataset_path = env.get( + mlc_cache_dataset_path = env.get( 'MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip() - if not os.path.isdir(cm_cache_dataset_path): + if not os.path.isdir(mlc_cache_dataset_path): return { - 'return': 1, 'error': 'Dataset corrupted - CM cache path not found: {}'.format(cm_cache_dataset_path)} + 'return': 1, 'error': 'Dataset corrupted - CM cache path not found: {}'.format(mlc_cache_dataset_path)} if env.get('MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES', '') == '': env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] = os.path.dirname( @@ -80,10 +80,10 @@ def postprocess(i): env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] return {'return': 0} - cm_cache_dataset_cfg_file = os.path.join(cm_cache_dataset_path, 'cfg.json') - env['MLC_DATASET_MLCOMMONS_COGNATA_CFG_FILE'] = cm_cache_dataset_cfg_file + mlc_cache_dataset_cfg_file = os.path.join(mlc_cache_dataset_path, 'cfg.json') + env['MLC_DATASET_MLCOMMONS_COGNATA_CFG_FILE'] = mlc_cache_dataset_cfg_file - res = utils.load_json(cm_cache_dataset_cfg_file) + res = utils.load_json(mlc_cache_dataset_cfg_file) cfg = res.get('meta', {}) dataset_path = cfg.get('real_path', '') @@ -92,7 +92,7 @@ def postprocess(i): if dataset_path_requested != '': dataset_path = dataset_path_requested else: - dataset_path = os.path.join(cm_cache_dataset_path, 'cognata') + dataset_path = os.path.join(mlc_cache_dataset_path, 'cognata') else: if dataset_path_requested != '': dataset_path = dataset_path_requested @@ -110,7 +110,7 @@ def postprocess(i): else: cfg['imported'] = False - utils.save_json(cm_cache_dataset_cfg_file, cfg) + utils.save_json(mlc_cache_dataset_cfg_file, cfg) if cfg.get('imported', False): return {'return': 0} @@ -392,7 +392,7 @@ def postprocess(i): # Mark that processed this dataset once correctly cfg['processed'] = True - utils.save_json(cm_cache_dataset_cfg_file, cfg) + utils.save_json(mlc_cache_dataset_cfg_file, cfg) env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] diff --git a/script/get-dataset-cognata-mlcommons/meta.yaml b/script/get-dataset-cognata-mlcommons/meta.yaml index b59662b22..309b6ba90 100644 --- a/script/get-dataset-cognata-mlcommons/meta.yaml +++ b/script/get-dataset-cognata-mlcommons/meta.yaml @@ -17,7 +17,7 @@ tags: - ml-task--object-detection - ml-task--image-segmentation -min_cm_version: '2.2.0' +min_mlc_version: '2.2.0' private: true diff --git a/script/get-dataset-criteo/README-extra.md b/script/get-dataset-criteo/README-extra.md deleted file mode 100644 index efe669715..000000000 --- a/script/get-dataset-criteo/README-extra.md +++ /dev/null @@ -1,9 +0,0 @@ -# Get Criteo Dataset -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the 24 days of Criteo dataset for MLPerf inference using DLRM. - -## Exported Variables -* `MLC_DATASET_PATH` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-dataset-imagenet-train/customize.py b/script/get-dataset-imagenet-train/customize.py index fb6c67f66..1fe0b8b43 100644 --- a/script/get-dataset-imagenet-train/customize.py +++ b/script/get-dataset-imagenet-train/customize.py @@ -27,7 +27,7 @@ def preprocess(i): return {'return': 0} else: - return {'return': 1, 'error': 'Please rerun the last CM command with --env.IMAGENET_TRAIN_PATH={path the folder containing full ImageNet training images} or envoke cm run script "get train dataset imagenet" --input={path to the folder containing ImageNet training images}'} + return {'return': 1, 'error': 'Please rerun the last CM command with --env.IMAGENET_TRAIN_PATH={path the folder containing full ImageNet training images} or envoke mlcr "get train dataset imagenet" --input={path to the folder containing ImageNet training images}'} elif not os.path.isdir(path): if path.endswith(".tar"): diff --git a/script/get-dataset-imagenet-val/README-extra.md b/script/get-dataset-imagenet-val/README-extra.md deleted file mode 100644 index 75b310b29..000000000 --- a/script/get-dataset-imagenet-val/README-extra.md +++ /dev/null @@ -1,28 +0,0 @@ -## Notes - -The ImageNet 2012 validation data set is no longer publicly available [here](https://image-net.org/download.php). - -However, it seems that you can still download it via [Academic Torrents](https://academictorrents.com/details/5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5). -You can then register in the MLCommons CM using this portable CM script as follows: - -```bash -cm pull repo mlcommons@cm4mlops --checkout=dev -``` - -```bash -cm run script "get validation dataset imagenet _2012 _full" --input={directory with ILSVRC2012_val_00000001.JPEG} -``` - -Alternatively, you can download the imagenet validation dataset via torrent by giving the torrent URL as follows. - -```bash -cm run script "get validation dataset imagenet _2012 _full" --torrent={Torrent URL} -``` - -It can now be automatically plugged into other portable CM scripts for image classification including MLPerf inference vision benchmarks. - -You can also find the images and use them directly as follows: - -```bash -cm find cache --tags=dataset,validation,imagenet,_full -``` diff --git a/script/get-dataset-imagenet-val/customize.py b/script/get-dataset-imagenet-val/customize.py index b2f9a389d..24548f6a2 100644 --- a/script/get-dataset-imagenet-val/customize.py +++ b/script/get-dataset-imagenet-val/customize.py @@ -41,7 +41,7 @@ def preprocess(i): return {'return': 0} # return {'return':1, 'error':'Please rerun the last CM command # with --env.IMAGENET_PATH={path the folder containing full - # ImageNet images} or envoke cm run script "get val dataset + # ImageNet images} or envoke mlcr "get val dataset # imagenet" --input={path to the folder containing ImageNet # images}'} diff --git a/script/get-dataset-librispeech/README-extra.md b/script/get-dataset-librispeech/README-extra.md deleted file mode 100644 index d5d937fa3..000000000 --- a/script/get-dataset-librispeech/README-extra.md +++ /dev/null @@ -1,26 +0,0 @@ -# Downloads LibriSpeech Dataset -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the LibriSpeech dataset. - -## Usage - -``` -cm run script --tags=get,dataset,librispeech --version=[VERSION] -``` -where [VERSION] is one of -* `dev-clean` -* `dev-other` -* `train-clean` -* `train-other` -* `train-clean-100` -* `train-clean-360` -* `train-other-500` - -## Exported Variables -* `MLC_DATASET_ARCHIVE:` -* `MLC_DATASET_LIBRISPEECH_PATH:` -* `MLC_DATASET_MD5:` -* `MLC_DATASET_NAME:` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-dataset-openimages/README-extra.md b/script/get-dataset-openimages/README-extra.md deleted file mode 100644 index b6f5d0812..000000000 --- a/script/get-dataset-openimages/README-extra.md +++ /dev/null @@ -1,2 +0,0 @@ -# Ubuntu 22.04 -`sudo apt-get install -y libgl1-mesa-dev` diff --git a/script/get-dataset-squad/README-extra.md b/script/get-dataset-squad/README-extra.md deleted file mode 100644 index deb677c5f..000000000 --- a/script/get-dataset-squad/README-extra.md +++ /dev/null @@ -1,20 +0,0 @@ -# Downloads SQUAD Dataset -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the SQUAD dataset. - -## Usage - -``` -cm run script --tags=get,dataset,squad --version=[VERSION] -``` -where [VERSION] is one of -* `1.1` -* `2.0` - -## Exported Variables -* `MLC_DATASET_SQUAD_PATH:` Directory path to SQUAD dataset -* `MLC_DATASET_SQUAD_TRAIN_PATH:` JSON file path to SQUAD training dataset -* `MLC_DATASET_SQUAD_VAL_PATH:` JSON file path to SQUAD validation dataset - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-dlrm/README-extra.md b/script/get-dlrm/README-extra.md deleted file mode 100644 index 8c70c36cd..000000000 --- a/script/get-dlrm/README-extra.md +++ /dev/null @@ -1,15 +0,0 @@ -# Get DLRM -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [DLRM repository](https://github.com/facebookdresearch/dlrm). - -## Commands -To install -``` -cm run script --tags=get,mlperf,dlrm,src -``` - -## Exported Variables -* `DLRM_DIR`: Directory path of the cloned dlrm repository - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-gcc/README-extra.md b/script/get-gcc/README-extra.md deleted file mode 100644 index a20669f48..000000000 --- a/script/get-gcc/README-extra.md +++ /dev/null @@ -1,15 +0,0 @@ -# Get GCC -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed gcc on the system. - -## Exported Variables -* `MLC_GCC_BIN` -* `MLC_GCC_BIN_WITH_PATH` -* `MLC_C_COMPILER_BIN` -* `MLC_C_COMPILER_WITH_PATH` -* `MLC_CXX_COMPILER_BIN` -* `MLC_CXX_COMPILER_WITH_PATH` -* `MLC_COMPILER_*` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-generic-python-lib/README-extra.md b/script/get-generic-python-lib/README-extra.md deleted file mode 100644 index 5d320ba2b..000000000 --- a/script/get-generic-python-lib/README-extra.md +++ /dev/null @@ -1,6 +0,0 @@ -## Variation onnxruntime_gpu - -### Windows - -* General installation notes: https://onnxruntime.ai/docs/install -* Notes about dependencies: [link](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html). diff --git a/script/get-generic-sys-util/README-extra.md b/script/get-generic-sys-util/README-extra.md deleted file mode 100644 index d8f0015ae..000000000 --- a/script/get-generic-sys-util/README-extra.md +++ /dev/null @@ -1,425 +0,0 @@ -Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util) for the documentation of this CM script. - -# get-generic-sys-util -Below are the specific regexes and the format of output that they are expecting for each command used to check for versions. - -All commands are tested to be working on Ubuntu. - -Format: - -## Utility name -`regex` - -`command to obtain version` - -command output - ----- - -## g++-12 -`^.*([0-9]+(\\.[0-9]+)+).*` - -`g++-9 --version` - -g++-9 (Ubuntu 9.5.0-1ubuntu1~22.04) 9.5.0
-Copyright (C) 2019 Free Software Foundation, Inc.
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- -## g++-11 -`^.*([0-9]+(\\.[0-9]+)+).*` - -`g++-11 --version` - -g++-11 (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
-Copyright (C) 2021 Free Software Foundation, Inc.
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- -## g++-12 -`^.*([0-9]+(\\.[0-9]+)+).*` - -`g++-12 --version` - -g++-12 (Ubuntu 12.3.0-1ubuntu1~22.04) 12.3.0
-Copyright (C) 2022 Free Software Foundation, Inc.
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- - -## gcc-9 -`^.*([0-9]+(\\.[0-9]+)+).*` - -`gcc-9 --version` - -gcc-9 (Ubuntu 9.5.0-1ubuntu1~22.04) 9.5.0
-Copyright (C) 2019 Free Software Foundation, Inc.
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- -## gcc-11 -`^.*([0-9]+(\\.[0-9]+)+).*` - -`gcc-11 --version` - -gcc-11 (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
-Copyright (C) 2021 Free Software Foundation, Inc.
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- - -## libgflags-dev -`([\d.]+)` - -`pkg-config --modversion gflags` - -2.2.2 - -## libglog-dev -`([\d.]+)` - -`pkg-config --modversion libglog` - -0.4.0 - -## libboost-all-dev -`([0-9]+(\w.[0-9]+)+)` - -`dpkg -s libboost-dev | grep 'Version'` - -Version: 1.74.0.3ubuntu7 - - -## libpng-dev -`([\d.]+)` - -`pkg-config --modversion libpng` - -1.6.37 - -## libre2-dev -`([\d.]+)` - -`pkg-config --modversion libre2` - -0.0.0 - -## libpci-dev -`([\d.]+)` - -`pkg-config --modversion libpci` - -3.7.0 - - -## libreadline_dev -`([\d.]+)` - -`pkg-config --modversion readline` - -8.1 - -## zlib -`([\d.]+)` - -`pkg-config --modversion zlib` - -1.2.11 - - -## libsqlite3_dev -`([\d.]+)` - -`pkg-config --modversion sqlite3` - -3.37.2 - -## libssl_dev -`OpenSSL\s+([\d.]+)` - -`openssl version` - -OpenSSL 3.0.2 15 Mar 2022 (Library: OpenSSL 3.0.2 15 Mar 2022) - -## libudev-dev -`([\d.]+)` - -`pkg-config --modversion libudev` - -249 - - -## libbz2_dev -`Version ([A-Za-z0-9]+(\.[A-Za-z0-9]+)+)` - -`bzcat --version` - -bzip2, a block-sorting file compressor. Version 1.0.8, 13-Jul-2019. - -## libev_dev -dpkg here should be fine as only apt install is supported -`Version ([A-Za-z0-9]+(\.[A-Za-z0-9]+)+)` - -`dpkg -s libev-dev | grep 'Version'` - -Version: 1:4.33-1 - -## libffi-dev -`([\d.]+)` - -`pkg-config --modversion libffi` - -3.4.2 - -## libffi_dev -`([\d.]+)` - -`pkg-config --modversion libffi` - -3.4.2 - -## libffi7 -`\d\.\d-[0-9]+` - -`dpkg -l libffi7 2>/dev/null | grep '^ii' | awk '{print $3}' || rpm -q libffi7 2>/dev/null || pacman -Q libffi7 2>/dev/null` - -3.3-5ubuntu1 - -## libffi8 -`\d\.\d\.\d-\d` - -`pkg-config --modversion libffi8"` - -3.4.2-4 - -## libgdbm_dev -dpkg here should be fine as only apt install is supported -`dpkg -s libgdbm-dev | grep 'Version'` - -`([\d]+\.[\d\.-]+)` - -Version: 1.23-1 - - -## libgmock -`([\d.]+)` - -`pkg-config --modversion libgmock` - -1.11.0 - -## liblzma_dev -`[A-Za-z]+\s\d\.\d\.\d` - -`xz --version` - -xz (XZ Utils) 5.2.5 -liblzma 5.2.5 - - -## libmpfr_dev -`([\d.]+)` - -`pkg-config --modversion mpfr` - -`4.1.0` - -## libncurses_dev -`([0-9]+(\.[0-9]+)+)` - -`ncurses5-config --version` - -6.3.20211021 - - - -## ninja-build -`([\d.]+)` - -`ninja --version` - -1.11.1 - -## md5sha1sum -`md5sum \(GNU coreutils\) ([\d.]+)` - -`md5sum --version` or `sha1sum --version` - -md5sum (GNU coreutils) 9.5 - -sha1sum (GNU coreutils) 9.5 - - -## nlohmann-json3-dev -`([\d.]+)` - -`pkg-config --modversion nlohmann_json` - -`3.10.5` - -## ntpdate -`([A-Za-z0-9]+(\.[A-Za-z0-9]+)+)` - -`dpkg -l ntpdate 2>/dev/null | grep ^ii | awk '{print $3}'` - -1:4.2.8p15+dfsg-1ubuntu2 - -## nvidia-cuda-toolkit -`release ([\d.]+)` - -`nvcc --version` - -nvcc: NVIDIA (R) Cuda compiler driver
-Copyright (c) 2005-2021 NVIDIA Corporation
-Built on Thu_Nov_18_09:45:25_PST_2021
-Cuda compilation tools, release 11.5, V11.5.119
-Build cuda_11.5.r11.5/compiler.30672275_0
- - -## psmisc -`\(PSmisc\) ([\d.]+)` - -`pstree --version` - -pstree (PSmisc) 23.4 - -## rapidjson-dev -`([\d.]+)` - -`pkg-config --modversion RapidJSON` - -1.1.0 - -## cmake -`cmake version ([\d.]+)` - -`cmake --version` - -cmake version 3.30.4 - -## libnuma-dev -`([\d.]+)` - -`pkg-config --modversion numa` - -2.0.14 - - -## numactl -`([\d.]+)` - -`pkg-config --modversion numa` - -2.0.14 - -## wget -`Wget\s*([\d.]+)` - -`wget --version` - -GNU Wget 1.21.2 built on linux-gnu. - -## screen -`Screen version ([\d.]+)` - -`screen --version` - -Screen version 4.00.020 (FAU) 23-Oct-06 - -## xz -`xz \(XZ Utils\) ([\d.]+)` - -`xz --version` - -xz (XZ Utils) 5.2.5 -liblzma 5.2.5 - -## VIM -`VIM - Vi IMproved ([\d.]+` - -`vim --version` - -VIM - Vi IMproved 9.0 (2022 Jun 28, compiled Aug 3 2024 14:50:46) - -## rsync -`rsync\s+version\s+([\d.]+)` - -`rsync --version` - -rsync version 3.2.7 protocol version 31 - -## sox -`sox:\s+SoX\s+v([\d.]+)` - -`sox --version` - -sox: SoX v14.4.2 - - -## systemd -`systemd ([\d]+)` - -`systemctl --version` - -systemd 249 (249.11-0ubuntu3.12) - -## tk-dev -Probably fine to use `dpkg` here as only installation supported is for ubuntu - -`([0-9]+(\.[0-9]+)+)` - -`dpkg -s tk-dev | grep Version` - -Version: 8.6.11+1build2 - - -## transmission -`transmission-daemon ([\d.]+)` - -`transmission-daemon --version` - -transmission-daemon 3.00 (bb6b5a062e) - - -## wkhtmltopdf -`wkhtmltopdf ([\d.]+)` - -`wkhtmltopdf --version` - -wkhtmltopdf 0.12.6 - -## systemd -`systemd ([\d]+)` - -`systemd --version` - -systemd 255 (255.4-1ubuntu8.4) - - -## dmidecode -`([\d.]+)` - -`dmidecode --version` - -3.3 - -## git-lfs -`git-lfs/([\d.]+)` - -`git-lfs --version` - -git-lfs/3.4.1 (GitHub; linux arm64; go 1.22.2) - -## zlib1g -`([\d.]+)` - -`pkg-config --modversion zlib` - -1.2.11 - -## zlib1g_dev -`([\d.]+)` - -`pkg-config --modversion zlib` - -1.2.11 diff --git a/script/get-git-repo/README-extra.md b/script/get-git-repo/README-extra.md deleted file mode 100644 index 9ef54386b..000000000 --- a/script/get-git-repo/README-extra.md +++ /dev/null @@ -1,20 +0,0 @@ -# Get GIT Repository -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones any specified GIT repository. - -## Commands -To install -``` -cm run script --tags=get,git,repo,_repo.,[VARIATION] -``` -where [VARIATION] is one of -* `patch:` Applies the `git.patch` to the cloned git repository -* `short-history:` Uses a git depth of last 10 commits (significantly reduces the download size) -* `full-history:` Uses the full git history -* `no-recurse-submodules:` Only download the main repository - -## Exported Variables -* `MLC_GIT_CHECKOUT_PATH`: Directory path of the cloned git repository - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-git-repo/customize.py b/script/get-git-repo/customize.py index 8f9e3ea31..f3b3c4357 100644 --- a/script/get-git-repo/customize.py +++ b/script/get-git-repo/customize.py @@ -16,7 +16,7 @@ def preprocess(i): env_key = get_env_key(env) - cm_git_url = env['MLC_GIT_URL'] + mlc_git_url = env['MLC_GIT_URL'] if 'MLC_GIT_REPO_NAME' not in env: update_env( diff --git a/script/get-go/README-extra.md b/script/get-go/README-extra.md deleted file mode 100644 index 327cee0a9..000000000 --- a/script/get-go/README-extra.md +++ /dev/null @@ -1,10 +0,0 @@ -# Get GO Tool -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed GO tool on the system. - -## Exported Variables -* `MLC_GO_BIN_WITH_PATH` -* `+PATH` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-ipol-src/README-extra.md b/script/get-ipol-src/README-extra.md deleted file mode 100644 index 1618d0ed0..000000000 --- a/script/get-ipol-src/README-extra.md +++ /dev/null @@ -1 +0,0 @@ -20240127: Grigori added patch to support latest PIL diff --git a/script/get-java/README-extra.md b/script/get-java/README-extra.md deleted file mode 100644 index 232fbe6e0..000000000 --- a/script/get-java/README-extra.md +++ /dev/null @@ -1,6 +0,0 @@ -# Windows - -## Misc - -* https://jdk.java.net/java-se-ri/11 -* https://learn.microsoft.com/fr-fr/java/openjdk/download diff --git a/script/get-javac/README-extra.md b/script/get-javac/README-extra.md deleted file mode 100644 index 232fbe6e0..000000000 --- a/script/get-javac/README-extra.md +++ /dev/null @@ -1,6 +0,0 @@ -# Windows - -## Misc - -* https://jdk.java.net/java-se-ri/11 -* https://learn.microsoft.com/fr-fr/java/openjdk/download diff --git a/script/get-llvm/README-extra.md b/script/get-llvm/README-extra.md deleted file mode 100644 index a57c16f5a..000000000 --- a/script/get-llvm/README-extra.md +++ /dev/null @@ -1,96 +0,0 @@ -# Get LLVM -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed llvm on the system and if not found calls the [install script for llvm](../script/install-llvm-prebuilt). - -## Exported Variables -* `MLC_LLVM_CLANG_BIN` -* `MLC_LLVM_CLANG_BIN_WITH_PATH` -* `MLC_C_COMPILER_BIN` -* `MLC_C_COMPILER_WITH_PATH` -* `MLC_CXX_COMPILER_BIN` -* `MLC_CXX_COMPILER_WITH_PATH` -* `MLC_COMPILER_*` -* `MLC_LINKER_*` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 - -# CLI - -## Default -```bash -cm run script "get llvm" -``` -or -```bash -cm run script --tags=get,llvm -``` - -## Version - -```bash -cm run script "get llvm" --version=14.0.0 -``` - -## Version min -```bash -cm run script "get llvm" --version_min=12.0.0 -``` - -## Version max -```bash -cm run script "get llvm" --version_max=13.999.999 --version_max_usable=13.0.0 -``` - -## Detect llvm3 in non-standard path -```bash -cm run script "get llvm" --path={directory with llvm} -``` - -### Detect llvm with non-standard name -```bash -cm run script "get llvm" --input={full path to clang} -``` - -## Force new detection even if llvm is already found and cached -```bash -cm run script "get llvm" --new -``` - -## Test - -```bash -cm run script "app image corner-detection" -``` - -## Reproducibility matrix - -*Test detection and installation on different platforms* - -* Windows, Linux, MacOS - -### RHEL 9 - -#### v14.0.0: ✓ - -```bash -cm rm cache -f -cm run script "get llvm" --version=14.0.0 -cm run script "app image corner-detection" -``` - -#### v13.0.0: Need special command - -```bash -cm rm cache -f -cm run script "get llvm" --version=13.0.0 --env.MLC_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz -cm run script "app image corner-detection" -``` - -#### v12.0.0: Need special command - -```bash -cm rm cache -f -cm run script "get llvm" --version=12.0.0 --env.MLC_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz -cm run script "app image corner-detection" -``` diff --git a/script/get-microtvm/README-extra.md b/script/get-microtvm/README-extra.md deleted file mode 100644 index 3a27d6e1a..000000000 --- a/script/get-microtvm/README-extra.md +++ /dev/null @@ -1,5 +0,0 @@ -# GET-MICROTVM -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) clones the git repository of [Microtvm](https://github.com/octoml/microtvm) and cache it in CM for reuse across other CM scripts. - -## Exported Variables -1. [CN_MICROTVM_SOURCE](https://github.com/mlcommons/ck/blob/master/mlc-mlops/script/get-microtvm/customize.py#L24): Location in CM cache where microtvm git repository is cloned. diff --git a/script/get-ml-model-abtf-ssd-pytorch/README-extra.md b/script/get-ml-model-abtf-ssd-pytorch/README-extra.md deleted file mode 100644 index e08259617..000000000 --- a/script/get-ml-model-abtf-ssd-pytorch/README-extra.md +++ /dev/null @@ -1,5 +0,0 @@ -# Example to import local model - -```bash -cm run script --tags=get,ml-model,abtf-ssd-pytorch,_local.test_8mp.pth -``` diff --git a/script/get-ml-model-huggingface-zoo/README-extra.md b/script/get-ml-model-huggingface-zoo/README-extra.md deleted file mode 100644 index b7ec3407b..000000000 --- a/script/get-ml-model-huggingface-zoo/README-extra.md +++ /dev/null @@ -1,21 +0,0 @@ -# Examples - -```bash -cmr "get ml-model huggingface zoo _model-stub.alpindale/Llama-2-13b-ONNX" --model_filename=FP32/LlamaV2_13B_float32.onnx --full_subfolder=FP32 -``` - -```bash -cmr "get ml-model huggingface zoo _model-stub.microsoft/Mistral-7B-v0.1-onnx" --model_filename=Mistral-7B-v0.1.onnx,Mistral-7B-v0.1.onnx.data -``` - -```bash -cmr "get ml-model huggingface zoo _model-stub.Intel/gpt-j-6B-int8-static" --model_filename=model.onnx --full_subfolder=. -``` - -```bash -cmr "get ml-model huggingface zoo _model-stub.runwayml/stable-diffusion-v1-5" --revision=onnx --model_filename=unet/model.onnx,unet/weights.pb -``` - -```bash -cmr "get ml-model huggingface zoo _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --model_filename=model.onnx -``` diff --git a/script/get-ml-model-mobilenet/README-extra.md b/script/get-ml-model-mobilenet/README-extra.md deleted file mode 100644 index 24bc0e34f..000000000 --- a/script/get-ml-model-mobilenet/README-extra.md +++ /dev/null @@ -1,15 +0,0 @@ -# Get ML Model MobileNet -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the MobileNet model and adds it to CM cache with relevant meta data. - -## How To -```bash -cm run script --tags=get,ml-model,mobilenet,_[VARIATION] -``` -where, -* `[VARIATION]` is one of `tf-fp32`, `tf-int8`, `onnx-v1-opset-8`, `onnx-v1-opset-11`, `onnx-int8`. - -## Exported Variables -* `MLC_ML_MODEL_FILE:` Model filename -* `MLC_ML_MODEL_FILE_WITH_PATH:` Full path to model file -* `MLC_ML_MODEL_PATH:` Path to folder containing the model file -* More env variables being exported are given in [cm.json file](_cm.json) diff --git a/script/get-ml-model-resnet50/README-extra.md b/script/get-ml-model-resnet50/README-extra.md deleted file mode 100644 index 87e82b92c..000000000 --- a/script/get-ml-model-resnet50/README-extra.md +++ /dev/null @@ -1,15 +0,0 @@ -# Get ML Model Resnet50 -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the Resnet50 model and adds it to CM cache with relevant meta data. - -## How To -```bash -cm run script --tags=get,ml-model,resnet50,_[VARIATION] -``` -where, -* `[VARIATION]` is one of `onnx` (alias `onnxruntime`), `pytorch`, `tensorflow` (alias `tf`) , `fp32`, `onnx-1.5-opset-8`, `onnx-1.5-opset-11`. - -## Exported Variables -* `MLC_ML_MODEL_FILE:` Model filename -* `MLC_ML_MODEL_FILE_WITH_PATH:` Full path to model file -* `MLC_ML_MODEL_PATH:` Path to folder containing the model file -* More env variables being exported are given in [cm.json file](_cm.json) diff --git a/script/get-ml-model-retinanet/README-extra.md b/script/get-ml-model-retinanet/README-extra.md deleted file mode 100644 index 246c54db8..000000000 --- a/script/get-ml-model-retinanet/README-extra.md +++ /dev/null @@ -1,16 +0,0 @@ -# Get ML Model Retinanet -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the Retinanet model and adds it to CM cache with relevant meta data. - -## How To -```bash -cm run script --tags=get,ml-model,retinanet,_[VARIATION] -``` -where, -* `[VARIATION]` is one of `onnx-fp32`, `pytorch-fp32` or `pytorch-fp32-weights`. - -## Exported Variables -* `MLC_ML_MODEL_FILE:` Model filename -* `MLC_ML_MODEL_FILE_WITH_PATH:` Full path to model file -* `MLC_ML_MODEL_PATH:` Path to folder containing the model file -* More env variables being exported are given in [cm.json file](_cm.json) - diff --git a/script/get-mlperf-inference-loadgen/README-extra.md b/script/get-mlperf-inference-loadgen/README-extra.md deleted file mode 100644 index 7af6a0e4a..000000000 --- a/script/get-mlperf-inference-loadgen/README-extra.md +++ /dev/null @@ -1,26 +0,0 @@ -# Get MLCommons Inference Loadgen - -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) builds and installs -the Loadgen library from [MLCommons Inference repository](https://github.com/mlcommons/inference). - -## Commands -To install -``` -cm run script --tags=get,mlperf,inference,loadgen --version=[VERSION] -``` -where -[VERSION] is one of -* `master:` Uses the master branch of inference source repository to build loadgen -* `r2.1:` Uses the release branch used for MLCommons inference 2.1 round to build loadgen - -## Exported Variables -* `C_INCLUDE_PATH` -* `CPLUS_INCLUDE_PATH` -* `LD_LIBRARY_PATH` -* `DYLD_FALLBACK_LIBRARY_PATH` -* `PYTHONPATH` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 -3. Windows (installs into Python distro directly) diff --git a/script/get-mlperf-inference-loadgen/tests/download-and-install.bat b/script/get-mlperf-inference-loadgen/tests/download-and-install.bat index 868f0296c..299848144 100644 --- a/script/get-mlperf-inference-loadgen/tests/download-and-install.bat +++ b/script/get-mlperf-inference-loadgen/tests/download-and-install.bat @@ -1,2 +1,2 @@ -cmr "get loadgen _download" +mlcr "get loadgen _download" diff --git a/script/get-mlperf-inference-nvidia-common-code/README-extra.md b/script/get-mlperf-inference-nvidia-common-code/README-extra.md deleted file mode 100644 index 411a2248c..000000000 --- a/script/get-mlperf-inference-nvidia-common-code/README-extra.md +++ /dev/null @@ -1,9 +0,0 @@ -# Get MLPerf Nvidia Common code -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) exports the PYTHONPATH to the common code used by Nvidia for MLPerf submissions - -## Exported Variables -* `+PYTHONPATH` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md b/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md deleted file mode 100644 index 582991f6d..000000000 --- a/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md +++ /dev/null @@ -1 +0,0 @@ -# CM script diff --git a/script/get-mlperf-inference-results/README-extra.md b/script/get-mlperf-inference-results/README-extra.md deleted file mode 100644 index df428fff8..000000000 --- a/script/get-mlperf-inference-results/README-extra.md +++ /dev/null @@ -1,18 +0,0 @@ -# Get MLCommons Inference Results -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [MLCommons Inference results repository](https://github.com/mlcommons/inference_v2.1). - -## Commands -To install -``` -cm run script --tags=get,mlperf,inference,results --version=[VERSION] -``` - -[VERSION] is one of -* `v2.1:` MLCommons inference 2.1 round results - -## Exported Variables -* `MLC_MLPERF_INFERENCE_RESULTS_PATH`: Directory path to the inference results repository - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-mlperf-inference-src/README-extra.md b/script/get-mlperf-inference-src/README-extra.md deleted file mode 100644 index c02697077..000000000 --- a/script/get-mlperf-inference-src/README-extra.md +++ /dev/null @@ -1,29 +0,0 @@ -# Get MLCommons Inference Source -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [MLCommons Inference repository](https://github.com/mlcommons/inference). - -## Commands -To install -``` -cm run script --tags=get,mlperf,inference,src,[VARIATION] --version=[VERSION] -``` -where [VARIATION] is one of -* `default:` Works with the official MLCommons inference repository. Uses `short-history` variation -* `patch:` Applies the `git.patch` to the cloned git repository -* `octoml:` Works with the OctoML fork of the MLCommons inference repository. Uses `short-history` variation -* `short-history:` Uses a git depth of last 10 commits (significantly reduces the download size) -* `full-history:` Uses the full git history -* `no-recurse-submodules:` Only download the main repository - -[VERSION] is one of -* `master:` Uses the master branch -* `r2.1:` Uses the release branch used for MLCommons inference 2.1 round - -## Exported Variables -* `MLC_MLPERF_INFERENCE_SOURCE`: Directory path of the cloned inference repository -* `MLC_MLPERF_INFERENCE_VISION_PATH`: Directory path to the vision folder inside the inference repository -* `PYTHONPATH`: Is appended with the paths to vision module and the submission tools module -* `MLC_MLPERF_INFERENCE_MODELS`: This `state` variable contains the configuration of the MLPerf models as per the selected version - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-mlperf-inference-sut-configs/README-extra.md b/script/get-mlperf-inference-sut-configs/README-extra.md deleted file mode 100644 index 41e6b8cc9..000000000 --- a/script/get-mlperf-inference-sut-configs/README-extra.md +++ /dev/null @@ -1,6 +0,0 @@ -# Get Config SUT MLPerf Inference -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) loads the MLPerf inference performance configuration of a given System Under Test (SUT). - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-mlperf-inference-sut-description/customize.py b/script/get-mlperf-inference-sut-description/customize.py index d93bb2d00..13f69de8d 100644 --- a/script/get-mlperf-inference-sut-description/customize.py +++ b/script/get-mlperf-inference-sut-description/customize.py @@ -114,11 +114,11 @@ def preprocess(i): cuda_version = " , CUDA " + env['MLC_CUDA_VERSION'] state['MLC_SUT_META']['other_software_stack'] += cuda_version - if 'cm_cuda_device_prop' in state: - state['MLC_SUT_META']['accelerator_frequency'] = state['cm_cuda_device_prop']['Max clock rate'] + if 'mlc_cuda_device_prop' in state: + state['MLC_SUT_META']['accelerator_frequency'] = state['mlc_cuda_device_prop']['Max clock rate'] state['MLC_SUT_META']['accelerator_memory_capacity'] = str(int( - state['cm_cuda_device_prop']['Global memory']) / (1024 * 1024.0 * 1024)) + " GB" - state['MLC_SUT_META']['accelerator_model_name'] = state['cm_cuda_device_prop']['GPU Name'] + state['mlc_cuda_device_prop']['Global memory']) / (1024 * 1024.0 * 1024)) + " GB" + state['MLC_SUT_META']['accelerator_model_name'] = state['mlc_cuda_device_prop']['GPU Name'] num_accelerators = env.get('MLC_CUDA_NUM_DEVICES', "1") state['MLC_SUT_META']['accelerators_per_node'] = num_accelerators diff --git a/script/get-mlperf-logging/README-extra.md b/script/get-mlperf-logging/README-extra.md deleted file mode 100644 index 32392035f..000000000 --- a/script/get-mlperf-logging/README-extra.md +++ /dev/null @@ -1,16 +0,0 @@ -# Get MLCommons Training Source - -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) -git clones and installs the [MLCommons Logging library]( https://github.com/mlcommons/logging ). - -## Commands - -To install -``` -cm run script --tags=get,mlperf,logging -``` -or - -``` -cmr "get mlperf logging" -``` diff --git a/script/get-mlperf-training-src/README-extra.md b/script/get-mlperf-training-src/README-extra.md deleted file mode 100644 index 5ebb33d8d..000000000 --- a/script/get-mlperf-training-src/README-extra.md +++ /dev/null @@ -1,27 +0,0 @@ -# Get MLCommons Training Source -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [MLCommons Training repository](https://github.com/mlcommons/training). - -## Commands -To install -``` -cm run script --tags=get,mlperf,training,src,[VARIATION] --version=[VERSION] -``` -where [VARIATION] is one of -* `default:` Works with the official MLCommons inference repository. Uses `short-history` variation -* `patch:` Applies the `git.patch` to the cloned git repository -* `octoml:` Works with the OctoML fork of the MLCommons inference repository. Uses `short-history` variation -* `short-history:` Uses a git depth of last 10 commits (significantly reduces the download size) -* `full-history:` Uses the full git history -* `no-recurse-submodules:` Only download the main repository - -[VERSION] is one of -* `master:` Uses the master branch -* `r2.1:` Uses the release branch used for MLCommons training 2.1 round - -## Exported Variables -* `MLC_MLPERF_TRAINING_SOURCE`: Directory path of the cloned inference repository -* `PYTHONPATH`: Is appended with the paths to vision module and the submission tools module - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-nvidia-mitten/README-extra.md b/script/get-nvidia-mitten/README-extra.md deleted file mode 100644 index 8c1a21948..000000000 --- a/script/get-nvidia-mitten/README-extra.md +++ /dev/null @@ -1 +0,0 @@ -TBD: compile https://github.com/NVIDIA/mitten diff --git a/script/get-openssl/README-extra.md b/script/get-openssl/README-extra.md deleted file mode 100644 index cb54103fc..000000000 --- a/script/get-openssl/README-extra.md +++ /dev/null @@ -1,8 +0,0 @@ -# Get OpenSSL -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects openssl installed on the system and if not found calls the [install script for openssl](../script/install-openssl). - -## Exported Variables -* `MLC_OPENSSL_BIN_WITH_PATH` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-platform-details/README-EXTRA.md b/script/get-platform-details/README-EXTRA.md index 22b4875e8..45ac261f2 100644 --- a/script/get-platform-details/README-EXTRA.md +++ b/script/get-platform-details/README-EXTRA.md @@ -1,7 +1,7 @@ Please execute the following CM command to obtain the platform details of the System Under Test (SUT): ``` -cm run script --tags=get,platform-details --platform_details_dir= +mlcr --tags=get,platform-details --platform_details_dir= ``` diff --git a/script/get-preprocessed-dataset-criteo/README-extra.md b/script/get-preprocessed-dataset-criteo/README-extra.md deleted file mode 100644 index 745cf6861..000000000 --- a/script/get-preprocessed-dataset-criteo/README-extra.md +++ /dev/null @@ -1,16 +0,0 @@ -# Get Preprocessed Criteo Dataset -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) preprocesses the Criteo dataset. - -## How To -```bash -cm run script --tags=get,criteo,preprocessed --threads=[NUM_THREADS] -``` -where, -* `[DIRECTORY]:` is the folder to store the preprocessed dataset. Default is current work directory -* `[NUM_THREADS:]` is the number of threads to do preprocessing. Default is number of host cpus. - - -## Exported Variables -* `[MLC_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored - - diff --git a/script/get-preprocessed-dataset-imagenet/README-extra.md b/script/get-preprocessed-dataset-imagenet/README-extra.md deleted file mode 100644 index ab184e5a8..000000000 --- a/script/get-preprocessed-dataset-imagenet/README-extra.md +++ /dev/null @@ -1,26 +0,0 @@ -# Get Preprocessed Imagenet Dataset -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) preprocesses the Imagenet dataset. - -## How To -```bash -cm run script --tags=get,imagenet,preprocessed,_[VARIATION] --dir=[DIRECTORY] --threads=[NUM_THREADS] -``` -where, -* `[DIRECTORY]:` is the folder to store the preprocessed dataset. Default is current work directory -* `[NUM_THREADS:]` is the number of threads to do preprocessing. Default is number of host cpus. -and the supported [VARIATIONS] (comma separated and beginning with _) are -*`[1]:` Preprocess only 1 image -*`[500]:` Preprocess first 500 images -*`[full]:` Preprocess the full dataset -*`[NHWC]:` Preprocess the dataset with `Channel` component at end -*`[NCHW]:` Preprocess the dataset with `Channel` component at beginning - -## Input Variables coming from Dependencies -* `[MLC_DATASET_PATH]:` Folder path to Imagenet dataset -* `[MLC_DATASET_AUX_PATH]:` Folder path to Imagenet auxiliary dataset (to get image list) -* `[MLC_DATASET_IMAGES_LIST]:` File path containing the image names - -## Exported Variables -* `[MLC_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored - - diff --git a/script/get-preprocessed-dataset-openimages/README-extra.md b/script/get-preprocessed-dataset-openimages/README-extra.md deleted file mode 100644 index ee9878b4a..000000000 --- a/script/get-preprocessed-dataset-openimages/README-extra.md +++ /dev/null @@ -1,28 +0,0 @@ -# Get Preprocessed Open Images Dataset -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) preprocesses the Imagenet dataset. - -## How To -```bash -cm run script --tags=get,imagenet,preprocessed,_[VARIATION] --dir=[DIRECTORY] --threads=[NUM_THREADS] -``` -where, -* `[DIRECTORY]:` is the folder to store the preprocessed dataset. Default is current work directory -* `[NUM_THREADS:]` is the number of threads to do preprocessing. Default is number of host cpus. -and the supported [VARIATIONS] (comma separated and beginning with _) are -*`[1]:` Preprocess only 1 image -*`[500]:` Preprocess first 500 images -*`[full]:` Preprocess the full dataset -*`[validation]:` Preprocess the validation dataset -*`[calibration]:` Preprocess the calibration dataset -*`[NHWC]:` Preprocess the dataset with `Channel` component at end -*`[NCHW]:` Preprocess the dataset with `Channel` component at beginning - -## Input Variables coming from Dependencies -* `[MLC_DATASET_PATH]:` Folder path to Imagenet dataset -* `[MLC_DATASET_IMAGES_LIST]:` File path containing the image names -* `[MLC_DATASET_OPENIMAGES_RESIZE]:` Image width to resize to (default 800) - -## Exported Variables -* `[MLC_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored - - diff --git a/script/get-python3/README-extra.md b/script/get-python3/README-extra.md deleted file mode 100644 index 5f784aa75..000000000 --- a/script/get-python3/README-extra.md +++ /dev/null @@ -1,70 +0,0 @@ -# Detect or install python - -## New ENV - -* MLC_PYTHON_BIN -* MLC_PYTHON_BIN_WITH_PATH -* MLC_PYTHON_VERSION -* MLC_PYTHON_CACHE_TAGS - -* PATH -* LD_LIBRARY_PATH -* C_INCLUDE_PATH - -## New state - - -# CLI - -## Default -```bash -cm run script "get python" -``` -or -```bash -cm run script --tags=get,python -``` - -## Version - -```bash -cm run script "get python" --version=3.10.6 -``` - -## Version min -```bash -cm run script "get python" --version_min=3.9 -``` - -## Version max -```bash -cm run script "get python" --version_max=3.9.999 --version_max_usable=3.9.12 -``` - -## Detect python3 in non-standard path -```bash -cm run script "get python" --path={directory with python3} -``` - -### Detect python with non-standard name -```bash -cm run script "get python" --input={full path to python} -``` - -## Force new detection even if python is already found and cached -```bash -cm run script "get python" --new -``` - -## Test - -```bash -cm run script "print python hello-world" -``` - -## Reproducibility matrix - -*Test detection and installation on different platforms:* - -* Windows, Linux, MacOS - diff --git a/script/get-rocm-devices/README.md b/script/get-rocm-devices/README.md index 7b1f4474c..294a147bb 100644 --- a/script/get-rocm-devices/README.md +++ b/script/get-rocm-devices/README.md @@ -1,4 +1,4 @@ Run this script ``` -cm run script --tags=get,rocm-devices +mlcr --tags=get,rocm-devices ``` diff --git a/script/get-rocm-devices/customize.py b/script/get-rocm-devices/customize.py index 76930d30d..8c2903fb0 100644 --- a/script/get-rocm-devices/customize.py +++ b/script/get-rocm-devices/customize.py @@ -57,10 +57,10 @@ def postprocess(i): key_env = 'MLC_ROMLC_DEVICE_PROP_' + key.upper().replace(' ', '_') env[key_env] = val - state['cm_rocm_num_devices'] = gpu_id + 1 + state['mlc_romlc_num_devices'] = gpu_id + 1 env['MLC_ROMLC_NUM_DEVICES'] = gpu_id + 1 - state['cm_rocm_device_prop'] = p - state['cm_rocm_devices_prop'] = gpu + state['mlc_romlc_device_prop'] = p + state['mlc_romlc_devices_prop'] = gpu return {'return': 0} diff --git a/script/get-rocm-devices/meta.yaml b/script/get-rocm-devices/meta.yaml index 21a91b373..04ec83641 100644 --- a/script/get-rocm-devices/meta.yaml +++ b/script/get-rocm-devices/meta.yaml @@ -20,8 +20,8 @@ docker: run: false all_gpus: 'yes' skip_run_cmd: 'no' - skip_cm_sys_upgrade: 'yes' - cm_repo_flags: '--checkout=dev' + skip_mlc_sys_upgrade: 'yes' + mlc_repo_flags: '--checkout=dev' use_host_group_id: 'yes' image_tag_extra: '-cm-dev' diff --git a/script/get-spec-ptd/README-extra.md b/script/get-spec-ptd/README-extra.md deleted file mode 100644 index 1b5f4d7fb..000000000 --- a/script/get-spec-ptd/README-extra.md +++ /dev/null @@ -1,16 +0,0 @@ -# Get SPEC Power Daemon -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [SPEC Power Daemon](https://github.com/mlcommons/power) used by MLPerf for power measurements. - -## Commands -To install -``` -cm run script --tags=get,mlperf,power,src -``` - -## Exported Variables -* `MLC_SPEC_PTD_PATH'`: Path to the PTDaemon -* `MLC_MLPERF_PTD_PATH'`: Path to the PTDaemon (same as `MLC_SPEC_PTD_DAEMON`) - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-sys-utils-cm/run-arch.sh b/script/get-sys-utils-cm/run-arch.sh index 1c2105c62..3077af5a6 100644 --- a/script/get-sys-utils-cm/run-arch.sh +++ b/script/get-sys-utils-cm/run-arch.sh @@ -32,7 +32,7 @@ ${MLC_SUDO} ${MLC_PACKAGE_TOOL} -Syu && \ zip # Install Python deps though preference is to install them -# via cmr "get generic-python-lib _package.{Python PIP package name}" +# via mlcr "get generic-python-lib _package.{Python PIP package name}" if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? diff --git a/script/get-sys-utils-cm/run-debian.sh b/script/get-sys-utils-cm/run-debian.sh index 89112f7f1..13a2d5bed 100644 --- a/script/get-sys-utils-cm/run-debian.sh +++ b/script/get-sys-utils-cm/run-debian.sh @@ -53,7 +53,7 @@ ${MLC_SUDO} ${MLC_APT_TOOL} update && \ libncurses5 # Install Python deps though preference is to install them -# via cmr "get generic-python-lib _package.{Python PIP package name}" +# via mlcr "get generic-python-lib _package.{Python PIP package name}" if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? diff --git a/script/get-sys-utils-cm/run-macos.sh b/script/get-sys-utils-cm/run-macos.sh index a76c0524d..91c3a6cc4 100644 --- a/script/get-sys-utils-cm/run-macos.sh +++ b/script/get-sys-utils-cm/run-macos.sh @@ -36,7 +36,7 @@ brew update && \ python3 # Install Python deps though preference is to install them -# via cmr "get generic-python-lib _package.{Python PIP package name}" +# via mlcr "get generic-python-lib _package.{Python PIP package name}" if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? diff --git a/script/get-sys-utils-cm/run-rhel.sh b/script/get-sys-utils-cm/run-rhel.sh index 05ba53c04..e91d70d07 100644 --- a/script/get-sys-utils-cm/run-rhel.sh +++ b/script/get-sys-utils-cm/run-rhel.sh @@ -39,7 +39,7 @@ ${MLC_SUDO} ${MLC_PACKAGE_TOOL} update && \ zip # Install Python deps though preference is to install them -# via cmr "get generic-python-lib _package.{Python PIP package name}" +# via mlcr "get generic-python-lib _package.{Python PIP package name}" if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? diff --git a/script/get-sys-utils-cm/run-sles.sh b/script/get-sys-utils-cm/run-sles.sh index c597387a9..6e67072e8 100644 --- a/script/get-sys-utils-cm/run-sles.sh +++ b/script/get-sys-utils-cm/run-sles.sh @@ -35,7 +35,7 @@ ${MLC_SUDO} ${MLC_PACKAGE_TOOL} update && \ zip # Install Python deps though preference is to install them -# via cmr "get generic-python-lib _package.{Python PIP package name}" +# via mlcr "get generic-python-lib _package.{Python PIP package name}" if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? diff --git a/script/get-sys-utils-cm/run-ubuntu.sh b/script/get-sys-utils-cm/run-ubuntu.sh index 72f7b6fd1..d6fb9f7ed 100644 --- a/script/get-sys-utils-cm/run-ubuntu.sh +++ b/script/get-sys-utils-cm/run-ubuntu.sh @@ -57,7 +57,7 @@ ${MLC_SUDO} ${MLC_APT_TOOL} update && \ zlib1g-dev # Install Python deps though preference is to install them -# via cmr "get generic-python-lib _package.{Python PIP package name}" +# via mlcr "get generic-python-lib _package.{Python PIP package name}" if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? diff --git a/script/get-tensorrt/README-extra.md b/script/get-tensorrt/README-extra.md deleted file mode 100644 index 925426511..000000000 --- a/script/get-tensorrt/README-extra.md +++ /dev/null @@ -1,11 +0,0 @@ -# Get TensorRT - -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) installs TensorRT when the corrsponding [tar file](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-tar) is provided as an input. - -## How to Use -``` -cm run script --tags=get,tensorrt --tar_file= -``` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-tensorrt/customize.py b/script/get-tensorrt/customize.py index 17f19d63f..639d901f3 100644 --- a/script/get-tensorrt/customize.py +++ b/script/get-tensorrt/customize.py @@ -91,7 +91,7 @@ def preprocess(i): tags = ["get", "tensorrt"] if env.get('MLC_TENSORRT_REQUIRE_DEV', '') != 'yes': tags.append("_dev") - return {'return': 1, 'error': 'Please envoke cmr "' + + return {'return': 1, 'error': 'Please envoke mlcr "' + " ".join(tags) + '" --tar_file={full path to the TensorRT tar file}'} print('Untaring file - can take some time ...') diff --git a/script/get-terraform/README-extra.md b/script/get-terraform/README-extra.md deleted file mode 100644 index e9cb784f7..000000000 --- a/script/get-terraform/README-extra.md +++ /dev/null @@ -1,9 +0,0 @@ -# Get Terraform -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed Terraform on the system and if not found calls the [install script for Terraform](../script/install-terraform-from-src). - -## Exported Variables -* `MLC_TERRAFORM_BIN_WITH_PATH` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-tvm-model/README-extra.md b/script/get-tvm-model/README-extra.md deleted file mode 100644 index e387efd57..000000000 --- a/script/get-tvm-model/README-extra.md +++ /dev/null @@ -1,21 +0,0 @@ -# CM script - -This script starts tuning (if specified) and compilation of any model using Apache TVM. - -## How To -```bash -cm run script --tags=get,tvm-model,_[VARIATION] -``` -where, `[VARIATION]` is one of -1) Frontend frameworks name (`onnx`, `pytorch`, `tensorflow`, `tflite`) -2) Precision (`fp32`, `int8`) -3) TVM Runtime (`virtual_machine` or `graph_executor`) -4) `tune-model` variation if you want to start tuning the model using TVM MetaScheduler -5) Model name (`model.#`) -6) Batch size (`batch_size.#`) -in 5 and 6 you can insert any suitable value instead of the symbol `#`, e.g. `model.bert` or `batch_size.8`. - -## Notes - -For PyTorch and TensorFlow frontends you should specify evironment variable `MLC_ML_MODEL_INPUT_SHAPES` with input shapes of the model you want to compile (e.g. `"input": (16, 3, 224, 224)`) or separate variables `MLC_ML_MODEL_IMAGE_NUM_CHANNELS`, `MLC_ML_MODEL_IMAGE_WIDTH`, `MLC_ML_MODEL_IMAGE_HEIGHT` for 2D CV models and `MLC_ML_MODEL_MAX_SEQ_LENGTH` for language models. -If your model is in ONNX format then all input shapes can be extracted automatically. diff --git a/script/get-tvm/README-extra.md b/script/get-tvm/README-extra.md deleted file mode 100644 index ae5cc929e..000000000 --- a/script/get-tvm/README-extra.md +++ /dev/null @@ -1,5 +0,0 @@ -```bash -cm run script "get llvm" --version=14.0.0 -cm run script "get tvm _llvm" --version=0.10.0 -cm run script "python app image-classification tvm-onnx" -``` diff --git a/script/get-zephyr-sdk/README-extra.md b/script/get-zephyr-sdk/README-extra.md deleted file mode 100644 index ae73f91f8..000000000 --- a/script/get-zephyr-sdk/README-extra.md +++ /dev/null @@ -1,19 +0,0 @@ -# GET-ZEPHYR-SDK -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) installs the [Zephyr-SDK](https://github.com/zephyrproject-rtos/sdk-ng/releases) from a prebuilt binary. - -## Install -```bash -cm run script --tags=get,zephyr-sdk --version=0.13.2 -``` -## Exported Variables -1. [ZEPHYR_SDK_INSTALL_DIR](https://github.com/mlcommons/ck/blob/master/mlc-mlops/script/get-zephyr-sdk/customize.py#L13): Location in CM cache where Zephyr SDK is installed. -2. [ZEPHYR_TOOLCHAIN_VARIANT](https://github.com/mlcommons/ck/blob/master/mlc-mlops/script/get-zephyr-sdk/customize.py#L12) - -## Supported Versions -1. 0.13.1 -2. 0.13.2 -3. 0.15.0 - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/install-cuda-prebuilt/README-extra.md b/script/install-cuda-prebuilt/README-extra.md deleted file mode 100644 index ca9a792ad..000000000 --- a/script/install-cuda-prebuilt/README-extra.md +++ /dev/null @@ -1,4 +0,0 @@ -# Notes - -This script is in a prototyping alpha stage. Needs to be considerably updated and unified! - diff --git a/script/install-llvm-prebuilt/README-extra.md b/script/install-llvm-prebuilt/README-extra.md deleted file mode 100644 index 992fd3cf1..000000000 --- a/script/install-llvm-prebuilt/README-extra.md +++ /dev/null @@ -1,99 +0,0 @@ -# Get LLVM -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed llvm on the system and if not found calls the [install script for llvm](../script/install-llvm-prebuilt). - -## Exported Variables -* `MLC_LLVM_CLANG_BIN` -* `MLC_LLVM_CLANG_BIN_WITH_PATH` -* `MLC_C_COMPILER_BIN` -* `MLC_C_COMPILER_WITH_PATH` -* `MLC_CXX_COMPILER_BIN` -* `MLC_CXX_COMPILER_WITH_PATH` -* `MLC_COMPILER_*` - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 -3. Windows 10, 11 - -# CLI - -## Default -```bash -cm run script "install llvm prebuilt" -``` -or -```bash -cm run script --tags=get,llvm -``` - -## Version - -```bash -cm run script "install llvm prebuilt" --version=14.0.0 -``` - -## Version min -```bash -cm run script "install llvm prebuilt" --version_min=12.0.0 -``` - -## Version max -```bash -cm run script "install llvm prebuilt" --version_max=13.999.999 --version_max_usable=13.0.0 -``` - -## Force new detection even if llvm is already found and cached -```bash -cm run script "install llvm prebuilt" --new -``` - -## Test - -```bash -cm run script "app image corner-detection" -``` - -## Reproducibility matrix - -*Test detection and installation on different platforms* - -* Windows, Linux, MacOS - -### Ubuntu 22.04 - -* 17.0.6 -* 17.0.5 -* 17.0.4 -* 17.0.2 -* 16.0.4 -* 16.0.0 - `sudo apt install libncurses5` -* 15.0.6 -* 14.0.0 - - -### RHEL 9 - -#### v14.0.0: ✓ - -```bash -cm rm cache -f -cm run script "install llvm prebuilt" --version=14.0.0 -cm run script "app image corner-detection" -``` - -#### v13.0.0: Need special command - -```bash -cm rm cache -f -cm run script "install llvm prebuilt" --version=13.0.0 --env.MLC_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz -cm run script "app image corner-detection" -``` - -#### v12.0.0: Need special command - -```bash -cm rm cache -f -cm run script "install llvm prebuilt" --version=12.0.0 --env.MLC_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz -cm run script "app image corner-detection" -``` diff --git a/script/plug-prebuilt-cudnn-to-cuda/README-extra.md b/script/plug-prebuilt-cudnn-to-cuda/README-extra.md deleted file mode 100644 index 204c394fa..000000000 --- a/script/plug-prebuilt-cudnn-to-cuda/README-extra.md +++ /dev/null @@ -1,2 +0,0 @@ -Useful info: -* https://medium.com/@yushantripleseven/managing-multiple-cuda-cudnn-installations-ba9cdc5e2654 diff --git a/script/plug-prebuilt-cudnn-to-cuda/run.sh b/script/plug-prebuilt-cudnn-to-cuda/run.sh index 4bcd029f7..0a3906e22 100644 --- a/script/plug-prebuilt-cudnn-to-cuda/run.sh +++ b/script/plug-prebuilt-cudnn-to-cuda/run.sh @@ -31,7 +31,7 @@ ${MLC_SUDO} chmod a+r ${MLC_CUDA_PATH_LIB}/libcudnn* echo "Adding file that cuDNN is installed ..." echo "" if [ "${MLC_SUDO}" == "sudo" ]; then - ${MLC_SUDO} sh -c "echo '${MLC_VERSION}' > ${CUDA_HOME}/cm_installed_cudnn.txt" + ${MLC_SUDO} sh -c "echo '${MLC_VERSION}' > ${CUDA_HOME}/mlc_installed_cudnn.txt" else - echo "${MLC_VERSION}" > ${CUDA_HOME}/cm_installed_cudnn.txt + echo "${MLC_VERSION}" > ${CUDA_HOME}/mlc_installed_cudnn.txt fi diff --git a/script/plug-prebuilt-cusparselt-to-cuda/run.sh b/script/plug-prebuilt-cusparselt-to-cuda/run.sh index e11a9596a..7783a35a6 100644 --- a/script/plug-prebuilt-cusparselt-to-cuda/run.sh +++ b/script/plug-prebuilt-cusparselt-to-cuda/run.sh @@ -31,7 +31,7 @@ ${MLC_SUDO} chmod a+r ${MLC_CUDA_PATH_LIB}/libcusparseLt* echo "Adding file that CUSPARSELT is installed ..." echo "" if [ "${MLC_SUDO}" == "sudo" ]; then - ${MLC_SUDO} sh -c "echo '${MLC_VERSION}' > ${CUDA_HOME}/cm_installed_cusparselt.txt" + ${MLC_SUDO} sh -c "echo '${MLC_VERSION}' > ${CUDA_HOME}/mlc_installed_cusparselt.txt" else - echo "${MLC_VERSION}" > ${CUDA_HOME}/cm_installed_cusparselt.txt + echo "${MLC_VERSION}" > ${CUDA_HOME}/mlc_installed_cusparselt.txt fi diff --git a/script/prune-bert-models/README-extra.md b/script/prune-bert-models/README-extra.md deleted file mode 100644 index e98cb6332..000000000 --- a/script/prune-bert-models/README-extra.md +++ /dev/null @@ -1 +0,0 @@ -Moved [here](https://github.com/ctuning/cm4research/blob/main/script/reproduce-neurips-paper-2022-arxiv-2204.09656/README-extra.md). diff --git a/script/publish-results-to-dashboard/code.py b/script/publish-results-to-dashboard/code.py index bcd1ff23a..068c9a7e6 100644 --- a/script/publish-results-to-dashboard/code.py +++ b/script/publish-results-to-dashboard/code.py @@ -88,7 +88,7 @@ def main(): for k in x: env_key = x[k] if os.environ.get(env_key, '') != '': - result['cm_misc_input_' + k] = os.environ[env_key] + result['mlc_misc_input_' + k] = os.environ[env_key] wandb.init(entity=dashboard_user, project=dashboard_project, diff --git a/script/remote-run-commands/README-extra.md b/script/remote-run-commands/README-extra.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md b/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md deleted file mode 100644 index ab78e4a31..000000000 --- a/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md +++ /dev/null @@ -1,13 +0,0 @@ -This script reproduces OctoML MLPerf TinyML Submission from v1.0. -## Install -```bash -cm run script --tags=reproduce,tiny,mlperf,octoml,_[VARIANT],_[MODEL] -``` -where, -* `[VARIANT]` is one of `cmsis_nn`,`native` -* `[MODEL]` is one of `ad`, `ic`, `kws`, `vww` - -The generated binary can be located inside -```bash -find `cm find cache --tags=reproduce,tiny,mlperf,octoml,_[VARIANT],_[MODEL] -``` diff --git a/script/run-all-mlperf-models/run-bert-macos.sh b/script/run-all-mlperf-models/run-bert-macos.sh index 192248f78..e0275153c 100644 --- a/script/run-all-mlperf-models/run-bert-macos.sh +++ b/script/run-all-mlperf-models/run-bert-macos.sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' power="" #Add your run commands here... -find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-bert.sh b/script/run-all-mlperf-models/run-bert.sh index bc9bbf16d..530c55e48 100644 --- a/script/run-all-mlperf-models/run-bert.sh +++ b/script/run-all-mlperf-models/run-bert.sh @@ -38,26 +38,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' #Add your run commands here... -find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-cpp-implementation.sh b/script/run-all-mlperf-models/run-cpp-implementation.sh index c93bfdfa9..7159cbcd8 100644 --- a/script/run-all-mlperf-models/run-cpp-implementation.sh +++ b/script/run-all-mlperf-models/run-cpp-implementation.sh @@ -29,21 +29,21 @@ division="closed" POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 " POWER="" -run "cm run script --tags=set,system,performance,mode" +run "mlcr --tags=set,system,performance,mode" #cpp -run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --adr.compiler.tags=gcc \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=2000 " -run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --adr.compiler.tags=gcc \ --category=edge --division=open --scenario=Offline --quiet" -run "cm run script --tags=generate-run-cmds,inference,_submission \ +run "mlcr --tags=generate-run-cmds,inference,_submission \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ @@ -53,7 +53,7 @@ run "cm run script --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "cm run script --tags=generate-run-cmds,inference,_submission \ +run "mlcr --tags=generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ @@ -63,7 +63,7 @@ run "cm run script --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "cm run script --tags=generate-run-cmds,inference,_submission \ +run "mlcr --tags=generate-run-cmds,inference,_submission \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ @@ -73,7 +73,7 @@ run "cm run script --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "cm run script --tags=generate-run-cmds,inference,_submission \ +run "mlcr --tags=generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ @@ -85,20 +85,20 @@ ${POWER} \ # GPU -run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --adr.compiler.tags=gcc \ --test_query_count=20000 \ --category=edge --division=open --scenario=Offline --quiet" -run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --adr.compiler.tags=gcc \ --test_query_count=2000 \ --category=edge --division=open --scenario=Offline --quiet" -run "cm run script --tags=generate-run-cmds,inference,_submission \ +run "mlcr --tags=generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --category=edge --division=$division --quiet \ @@ -108,7 +108,7 @@ run "cm run script --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "cm run script --tags=generate-run-cmds,inference,_submission \ +run "mlcr --tags=generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ @@ -119,7 +119,7 @@ ${POWER} \ --results_dir=$HOME/results_dir" -run "cm run script --tags=generate-run-cmds,inference,_submission \ +run "mlcr --tags=generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=SingleStream \ @@ -130,7 +130,7 @@ run "cm run script --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "cm run script --tags=generate-run-cmds,inference,_submission \ +run "mlcr --tags=generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ @@ -141,7 +141,7 @@ ${POWER} \ --results_dir=$HOME/results_dir" #multistream -run "cm run script --tags=generate-run-cmds,inference,_submission \ +run "mlcr --tags=generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=MultiStream \ @@ -152,7 +152,7 @@ run "cm run script --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "cm run script --tags=generate-run-cmds,inference,_submission \ +run "mlcr --tags=generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=MultiStream \ --category=edge --division=$division --quiet \ diff --git a/script/run-all-mlperf-models/run-mobilenet-models.sh b/script/run-all-mlperf-models/run-mobilenet-models.sh index 1ae46f66b..8fa760c8e 100644 --- a/script/run-all-mlperf-models/run-mobilenet-models.sh +++ b/script/run-all-mlperf-models/run-mobilenet-models.sh @@ -32,35 +32,35 @@ extra_tags="" #Add your run commands here... # run "$MLC_RUN_CMD" -run "cm run script --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +run "mlcr --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "cm run script --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ +run "mlcr --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ ${POWER} \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "cm run script --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ +run "mlcr --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ ${POWER} \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "cm run script --tags=run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ +run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "cm run script --tags=run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ +run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ ${POWER} \ ${extra_option} \ --adr.compiler.tags=gcc \ --results_dir=$HOME/results_dir" -run "cm run script --tags=run,mobilenet-models,_tflite,_armnn,_neon,_populate-readme$extra_tags \ +run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_populate-readme$extra_tags \ ${POWER} \ ${extra_option} \ --adr.compiler.tags=gcc \ diff --git a/script/run-all-mlperf-models/run-nvidia-4090.sh b/script/run-all-mlperf-models/run-nvidia-4090.sh index deb6884bd..bc4eb5ae5 100644 --- a/script/run-all-mlperf-models/run-nvidia-4090.sh +++ b/script/run-all-mlperf-models/run-nvidia-4090.sh @@ -38,7 +38,7 @@ power="" power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -49,7 +49,7 @@ find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-per #run "3d-unet" "30" "${find_performance_cmd}" -submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --execution-mode=valid --implementation=$implementation --device=$device --backend=$backend --results_dir=$HOME/results_dir \ --category=$category --division=$division --skip_submission_generation=yes --quiet $power' diff --git a/script/run-all-mlperf-models/run-nvidia-a100.sh b/script/run-all-mlperf-models/run-nvidia-a100.sh index e793a1fb5..70069b9a7 100644 --- a/script/run-all-mlperf-models/run-nvidia-a100.sh +++ b/script/run-all-mlperf-models/run-nvidia-a100.sh @@ -37,7 +37,7 @@ connection_type="sxm" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -48,7 +48,7 @@ run "bert-99" "20000" "${find_performance_cmd}" run "3d-unet-99.9" "30" "${find_performance_cmd}" -submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --adr.nvidia-harensss.tags=_${connection_type} $power' diff --git a/script/run-all-mlperf-models/run-nvidia-t4.sh b/script/run-all-mlperf-models/run-nvidia-t4.sh index 4a9176ab6..facdb0a60 100644 --- a/script/run-all-mlperf-models/run-nvidia-t4.sh +++ b/script/run-all-mlperf-models/run-nvidia-t4.sh @@ -35,7 +35,7 @@ category="edge,datacenter" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -47,7 +47,7 @@ run "bert-99.9" "5000" "${find_performance_cmd}" run "3d-unet" "10" "${find_performance_cmd}" -submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet' diff --git a/script/run-all-mlperf-models/run-pruned-bert.sh b/script/run-all-mlperf-models/run-pruned-bert.sh index e44c6480c..b7bc2beae 100644 --- a/script/run-all-mlperf-models/run-pruned-bert.sh +++ b/script/run-all-mlperf-models/run-pruned-bert.sh @@ -45,7 +45,7 @@ scenario="Offline" if [[ $scenario == "Offline" ]]; then for stub in ${zoo_stub_list[@]}; do -cmd="cm run script --tags=run,mlperf,inference,generate-run-cmds,_find-performance \ +cmd="mlcr --tags=run,mlperf,inference,generate-run-cmds,_find-performance \ --adr.python.version_min=3.8 \ --implementation=reference \ --model=bert-99 \ @@ -64,7 +64,7 @@ done fi for stub in ${zoo_stub_list[@]}; do - cmd="cm run script --tags=run,mlperf,inference,generate-run-cmds \ + cmd="mlcr --tags=run,mlperf,inference,generate-run-cmds \ --adr.python.version_min=3.8 \ --adr.compiler.tags=gcc \ --implementation=reference \ diff --git a/script/run-all-mlperf-models/run-reference-models.sh b/script/run-all-mlperf-models/run-reference-models.sh index e01ac97e6..84d7526fd 100644 --- a/script/run-all-mlperf-models/run-reference-models.sh +++ b/script/run-all-mlperf-models/run-reference-models.sh @@ -25,43 +25,43 @@ function run() { division="closed" #Add your run commands here... # run "$MLC_RUN_CMD" -run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=100" -run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=rnnt --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ --category=edge --division=$division --quiet" -run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=rnnt --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=retinanet --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" diff --git a/script/run-all-mlperf-models/run-resnet50-macos.sh b/script/run-all-mlperf-models/run-resnet50-macos.sh index 191907397..ea2f91346 100644 --- a/script/run-all-mlperf-models/run-resnet50-macos.sh +++ b/script/run-all-mlperf-models/run-resnet50-macos.sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' power="" #Add your run commands here... -find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-resnet50.sh b/script/run-all-mlperf-models/run-resnet50.sh index e3f3077c0..d9945c745 100644 --- a/script/run-all-mlperf-models/run-resnet50.sh +++ b/script/run-all-mlperf-models/run-resnet50.sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' #Add your run commands here... -find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-retinanet-sh b/script/run-all-mlperf-models/run-retinanet-sh index b3151d068..c5ede6296 100644 --- a/script/run-all-mlperf-models/run-retinanet-sh +++ b/script/run-all-mlperf-models/run-retinanet-sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' #Add your run commands here... -find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/template.sh b/script/run-all-mlperf-models/template.sh index 4af440f10..ff43cf2fe 100644 --- a/script/run-all-mlperf-models/template.sh +++ b/script/run-all-mlperf-models/template.sh @@ -40,26 +40,26 @@ function run_test() { power=${POWER_STRING} #Add your run commands here... -find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-docker-container/README-extra.md b/script/run-docker-container/README-extra.md deleted file mode 100644 index b930ef964..000000000 --- a/script/run-docker-container/README-extra.md +++ /dev/null @@ -1,15 +0,0 @@ -This script runs a docker container and launces the given CM script inside it. -If the container image is not existing, corresponding build is initiated via CM dependencies. - -## How to Run -```bash -cm run script \ ---tags=run,docker,container -``` -### Options -1. `--script_tags="get,gcc"`: Script tags for the CM script to be run inside the docker container. - If this is not set the cm command run inside the docker container is `cm version` -2. `--cm_repo=ctuning@mlcommons-ck`: To use a different repo for CM scripts like "ctuning@mlcommons-ck". Default: `mlcommons@cm4mlops` -3. `--base="ubuntu:22.04"`: Specify the base image for Dockerfile. Default: "ubuntu:20.04" -4. `--recreate=yes`: To recreate docker image even when existing. Default: "no" -5. `--adr.build-docker-image.tags=_cache`: To use build cache for docker image build. Default: "" (`nocache`) diff --git a/script/run-docker-container/meta.yaml b/script/run-docker-container/meta.yaml index f3a9a5c40..394cd4c51 100644 --- a/script/run-docker-container/meta.yaml +++ b/script/run-docker-container/meta.yaml @@ -24,7 +24,7 @@ input_mapping: num_gpus: MLC_DOCKER_ADD_NUM_GPUS base: MLC_DOCKER_IMAGE_BASE cache: MLC_DOCKER_CACHE - cm_repo: MLC_MLOPS_REPO + mlc_repo: MLC_MLOPS_REPO detached: MLC_DOCKER_DETACHED_MODE device: MLC_DOCKER_ADD_DEVICE docker_image_base: MLC_DOCKER_IMAGE_BASE diff --git a/script/run-mlperf-inference-app/README-extra.md b/script/run-mlperf-inference-app/README-extra.md deleted file mode 100644 index b91bf8e31..000000000 --- a/script/run-mlperf-inference-app/README-extra.md +++ /dev/null @@ -1,21 +0,0 @@ -# About - -This is a universal CM interface to run and customize all MLPerf inference benchmarks. -It is composed from the [portable automation recipes (CM scripts)](https://access.cknowledge.org/playground/?action=scripts). - -Check [this documentation](https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference) -and [CM GUI](https://access.cknowledge.org/playground/?action=howtorun&bench_uid=39877bb63fb54725) -to learn how to run MLPerf benchmarks via CM. - - - -# Authors - -* [Grigori Fursin](https://cKnowledge.org/gfursin) -* [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh) - - -# Acknowledgments - -We thank [the community](../../../CONTRIBUTING.md) for their suggestions and contributions! - diff --git a/script/run-mlperf-inference-app/run_mobilenet.py b/script/run-mlperf-inference-app/run_mobilenet.py index bfe0657fe..8b5eb2b2f 100644 --- a/script/run-mlperf-inference-app/run_mobilenet.py +++ b/script/run-mlperf-inference-app/run_mobilenet.py @@ -97,7 +97,7 @@ } } print(mlc_input) - r = mlc.access(cm_input) + r = mlc.access(mlc_input) if r['return'] > 0: print(r) # exit(1) diff --git a/script/run-mlperf-inference-mobilenet-models/meta.yaml b/script/run-mlperf-inference-mobilenet-models/meta.yaml index 7fae10c70..b87bf4896 100644 --- a/script/run-mlperf-inference-mobilenet-models/meta.yaml +++ b/script/run-mlperf-inference-mobilenet-models/meta.yaml @@ -16,7 +16,7 @@ docker: results_dir: RESULTS_DIR submission_dir: SUBMISSION_DIR docker_run_final_cmds: - - cm run script --tags=run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True + - mlcr --tags=run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True --adr.compiler.tags=gcc fake_run_deps: false mounts: diff --git a/script/run-mlperf-inference-submission-checker/README-extra.md b/script/run-mlperf-inference-submission-checker/README-extra.md deleted file mode 100644 index 80c280055..000000000 --- a/script/run-mlperf-inference-submission-checker/README-extra.md +++ /dev/null @@ -1,10 +0,0 @@ -# Run MLPerf Inference Submission Checker -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Inference submission checker](https://github.com/mlcommons/inference/blob/master/tools/submission/submission-checker.py) on a given submission folder. - -## How To -```bash -cm run script --tags=run,mlperf,inference,submission,checker --submitter=[SUBMITTER_NAME] --submission_dir=[SUBMISSION_FOLDER] -``` - -### Additional Options -* `[--skip_compliance]:` Skips the compliance tests diff --git a/script/run-mlperf-power-client/README-extra.md b/script/run-mlperf-power-client/README-extra.md deleted file mode 100644 index d13278d9b..000000000 --- a/script/run-mlperf-power-client/README-extra.md +++ /dev/null @@ -1,15 +0,0 @@ -# Run MLPerf Power Client Script -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Power Server script](https://github.com/mlcommons/power-dev/tree/master/ptd_client_server). - -## How To -```bash -cm run script --tags=run,mlperf,power,client [--log_dir=<> --power_server=<> \ ---loadgen_logs_dir=<> --ntp_server=<> --run_cmd=<>] -``` - -### Default Values -1. `log_dir`: `logs` -2. `power_server`: `localhost` -3. `loadgen_logs_dir`: `loadgen_logs`, -4. `ntp_server`: `time.google.com` -5. `run_cmd`: `dummy.sh` diff --git a/script/run-mlperf-power-server/README-extra.md b/script/run-mlperf-power-server/README-extra.md deleted file mode 100644 index 78b0457f7..000000000 --- a/script/run-mlperf-power-server/README-extra.md +++ /dev/null @@ -1,17 +0,0 @@ -# Run MLPerf Power Server Script -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Power Server script](https://github.com/mlcommons/power-dev/tree/master/ptd_client_server). - -## How To -```bash -cm run script --tags=run,mlperf,power,server [--interface_flag=<> \ ---device_port=<> --outdir=<> --logfile=<> --outdir=<> --device_type=<> ] -``` - -### Default Values -1. `ntp_server`: `time.google.com` -2. `interface_flag`: "" -3. `device_port`: `/dev/usbtmc0` -4. `device_type`: `49` -5. `outdir`: `~/mlperf_power_logs` -6. `logfile`: `logs_ptdaemon.txt` - diff --git a/script/run-terraform/README-about.md b/script/run-terraform/README-about.md index f890c6170..674ebee42 100644 --- a/script/run-terraform/README-about.md +++ b/script/run-terraform/README-about.md @@ -7,6 +7,6 @@ gcloud auth application-default login The above two commands will install google-cloud-cli and authorizes the user to access it. Once done, you can start creating gcp instance using CM commands like below. To destroy an instance just repeat the same command with `--destroy` option. ``` -cm run script --tags=run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit +mlcr --tags=run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit ``` Here, `mlperf-inference-tests` is the name of the google project as created in [Google cloud console](https://console.cloud.google.com/apis/dashboard) diff --git a/script/run-terraform/README-extra.md b/script/run-terraform/README-extra.md deleted file mode 100644 index 47c1f4f30..000000000 --- a/script/run-terraform/README-extra.md +++ /dev/null @@ -1 +0,0 @@ -Please copy aws/credentials.example to aws/credentials.sh file after adding your AWS credentials diff --git a/script/save-mlperf-inference-implementation-state/customize.py b/script/save-mlperf-inference-implementation-state/customize.py index dd0129cd1..79b9023c4 100644 --- a/script/save-mlperf-inference-implementation-state/customize.py +++ b/script/save-mlperf-inference-implementation-state/customize.py @@ -40,7 +40,7 @@ def preprocess(i): 'fake_run': True } - r = mlc.access(cm_input) + r = mlc.access(mlc_input) if r['return'] > 0: return r diff --git a/script/set-venv/README-extra.md b/script/set-venv/README-extra.md deleted file mode 100644 index 987ad1f67..000000000 --- a/script/set-venv/README-extra.md +++ /dev/null @@ -1,6 +0,0 @@ -# Examples - -```bash -cmr "set venv" mlperf-test -cmr "set venv" mlperf-test2 --python=/usr/bin/python3 -``` diff --git a/script/tar-my-folder/README-extra.md b/script/tar-my-folder/README-extra.md deleted file mode 100644 index 8c9b52508..000000000 --- a/script/tar-my-folder/README-extra.md +++ /dev/null @@ -1,12 +0,0 @@ -# Compress using tar -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) compresses a given folder and generates a tar.gz file - -## How To -```bash -cm run script --tags=run,tar --input_dir=[DIR_PATH] -``` - - -### Additional Options -* `--output_dir:` Directory in which to generate the output file. Default is current working directory -* `--outfile:`: Output filename. Default is inputfoldername".gz" diff --git a/script/truncate-mlperf-inference-accuracy-log/README-extra.md b/script/truncate-mlperf-inference-accuracy-log/README-extra.md deleted file mode 100644 index 71b498f99..000000000 --- a/script/truncate-mlperf-inference-accuracy-log/README-extra.md +++ /dev/null @@ -1,7 +0,0 @@ -# MLPerf Inference Accuracy Log Truncator -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Inference accuracy log truncator](https://github.com/mlcommons/inference/blob/master/tools/submission/truncate_accuracy_log.py) on a given submission folder. - -## How To -```bash -cm run script --tags=run,mlperf,inference,accuracy,truncator --submitter=[SUBMITTER_NAME] --submission_dir=[SUBMISSION_FOLDER] -``` diff --git a/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md b/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md deleted file mode 100644 index e0901f6c1..000000000 --- a/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md +++ /dev/null @@ -1,17 +0,0 @@ -This is a wrapper script to [Reproduce MLPerf OctoML TinyML Results](https://github.com/octoml/ck/tree/master/mlc-mlops/script/reproduce-mlperf-octoml-tinyml-results) -which runs the script for the two microtvm variants and their supported models. - -## Install -``` -cm run script --tags=generate,tiny,octoml,submission -``` - -The above command should produce five elf binaries which can be located inside the respective cache entries given by the below command -``` -cm show cache --tags=reproduce,tiny,octoml,mlperf -``` - -## Install and Flash -``` -cm run script --tags=generate,tiny,octoml,submission --flash -``` From bd1a164c4b7f0297321930171c764600a0806288 Mon Sep 17 00:00:00 2001 From: arjunsuresh Date: Thu, 23 Jan 2025 15:17:11 +0000 Subject: [PATCH 3/7] [Automated Commit] Format Codebase --- script/app-image-classification-onnx-py/src/onnx_classify.py | 2 +- script/get-dataset-cognata-mlcommons/customize.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/script/app-image-classification-onnx-py/src/onnx_classify.py b/script/app-image-classification-onnx-py/src/onnx_classify.py index 32da50189..e516758cd 100644 --- a/script/app-image-classification-onnx-py/src/onnx_classify.py +++ b/script/app-image-classification-onnx-py/src/onnx_classify.py @@ -189,7 +189,7 @@ def load_a_batch(batch_filenames): mlc_status['classifications'].append({'class_idx': int(class_idx), 'softmax': float(softmax_vector[class_idx]), - 'label': labels[class_idx]}) + 'label': labels[class_idx]}) print('') print('Top classification: {}'.format(top_classification)) diff --git a/script/get-dataset-cognata-mlcommons/customize.py b/script/get-dataset-cognata-mlcommons/customize.py index 43b327769..c74a15825 100644 --- a/script/get-dataset-cognata-mlcommons/customize.py +++ b/script/get-dataset-cognata-mlcommons/customize.py @@ -80,7 +80,8 @@ def postprocess(i): env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] return {'return': 0} - mlc_cache_dataset_cfg_file = os.path.join(mlc_cache_dataset_path, 'cfg.json') + mlc_cache_dataset_cfg_file = os.path.join( + mlc_cache_dataset_path, 'cfg.json') env['MLC_DATASET_MLCOMMONS_COGNATA_CFG_FILE'] = mlc_cache_dataset_cfg_file res = utils.load_json(mlc_cache_dataset_cfg_file) From 7ae40d6678e5d880b6090e9dc8299ee5ed33e7e9 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 23 Jan 2025 15:21:03 +0000 Subject: [PATCH 4/7] Update format.yml --- .github/workflows/format.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 0f5384d7b..8523dbfc5 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -11,6 +11,8 @@ env: jobs: format-code: runs-on: ubuntu-latest + permissions: + contents: write steps: - name: Checkout code uses: actions/checkout@v4 @@ -54,8 +56,8 @@ jobs: HAS_CHANGES=$(git diff --staged --name-only) if [ ${#HAS_CHANGES} -gt 0 ]; then # Use the GitHub actor's name and email - git config --global user.name "${GITHUB_ACTOR}" - git config --global user.email "${GITHUB_ACTOR}@users.noreply.github.com" + git config --global user.name github-actions[bot] + git config --global user.email "github-actions[bot]@users.noreply.github.com" # Commit changes git commit -m '[Automated Commit] Format Codebase' git push From 4cd235a0f9bfe666af409473da8156733308fe9f Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 23 Jan 2025 20:56:43 +0530 Subject: [PATCH 5/7] Fix compare_versions --- automation/script/module.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automation/script/module.py b/automation/script/module.py index ae5e7db48..0852d1d69 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -776,7 +776,7 @@ def _run(self, i): # Check min CM version requirement min_mlc_version = meta.get('min_mlc_version', '').strip() if min_mlc_version != '': - comparison = compare_versions( + comparison = utils.compare_versions( current_mlc_version, min_mlc_version) if comparison < 0: return {'return': 1, 'error': 'This script requires MLC version >= {} while current MLC version is {} - please update using "pip install mlcflow -U"'.format( From 9327ac53147003a9ee515f4703042c0c71c5db58 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 23 Jan 2025 21:08:10 +0530 Subject: [PATCH 6/7] Added proper parsing for mlc version --- automation/script/module.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/automation/script/module.py b/automation/script/module.py index 0852d1d69..34daeacc4 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -13,8 +13,8 @@ from mlc.main import Automation import mlc.utils as utils -from mlc.main import __version__ as current_mlc_version from utils import * +from importlib.metadata import version class ScriptAutomation(Automation): @@ -776,6 +776,7 @@ def _run(self, i): # Check min CM version requirement min_mlc_version = meta.get('min_mlc_version', '').strip() if min_mlc_version != '': + current_mlc_version = version(mlc) comparison = utils.compare_versions( current_mlc_version, min_mlc_version) if comparison < 0: From f5737fa7a24bdbda51d6faf8cb665b75b2a565d2 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 23 Jan 2025 21:13:55 +0530 Subject: [PATCH 7/7] Added proper parsing for mlc version --- automation/script/module.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/automation/script/module.py b/automation/script/module.py index 34daeacc4..8bc6261e9 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -14,7 +14,6 @@ from mlc.main import Automation import mlc.utils as utils from utils import * -from importlib.metadata import version class ScriptAutomation(Automation): @@ -776,12 +775,16 @@ def _run(self, i): # Check min CM version requirement min_mlc_version = meta.get('min_mlc_version', '').strip() if min_mlc_version != '': - current_mlc_version = version(mlc) - comparison = utils.compare_versions( - current_mlc_version, min_mlc_version) - if comparison < 0: - return {'return': 1, 'error': 'This script requires MLC version >= {} while current MLC version is {} - please update using "pip install mlcflow -U"'.format( - min_mlc_version, current_mlc_version)} + try: + import importlib.metadata + current_mlc_version = importlib.metadata.version("mlc") + comparison = utils.compare_versions( + current_mlc_version, min_mlc_version) + if comparison < 0: + return {'return': 1, 'error': 'This script requires MLC version >= {} while current MLC version is {} - please update using "pip install mlcflow -U"'.format( + min_mlc_version, current_mlc_version)} + except Exception as e: + error = format(e) # Check path to repo script_repo_path = script_artifact.repo.path