From 2789a41608b4953a3cdf314dd580b0b8b5fb7e6e Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 23 Jan 2025 20:01:45 +0530 Subject: [PATCH] Cleanups for MLC --- .github/scripts/process_individual_tests.py | 8 +- .../workflows/run-individual-script-tests.yml | 2 +- ...t-amd-mlperf-inference-implementations.yml | 2 +- ...intel-mlperf-inference-implementations.yml | 2 +- .../test-mlc-based-submission-generation.yml | 10 +- .../workflows/test-mlc-script-features.yml | 4 +- .../workflows/test-mlperf-inference-dlrm.yml | 4 +- .../workflows/test-mlperf-inference-gptj.yml | 2 +- .../test-mlperf-inference-llama2.yml | 4 +- .../test-mlperf-inference-mixtral.yml | 4 +- .../test-mlperf-inference-resnet50.yml | 4 +- .../workflows/test-mlperf-inference-rnnt.yml | 2 +- .../workflows/test-mlperf-inference-sdxl.yaml | 4 +- .../workflows/test-mlperf-inference-tvm.yml | 2 +- ...vidia-mlperf-inference-implementations.yml | 4 +- .../workflows/test-qaic-compute-sdk-build.yml | 2 +- .github/workflows/test-scc24-sdxl.yaml | 24 +- automation/cache/module_misc.py | 2 +- automation/cfg/README-extra.md | 8 - automation/cfg/README.md | 27 - automation/cfg/_cm.json | 12 - automation/cfg/module.py | 259 -- automation/experiment/README-extra.md | 315 -- automation/experiment/README.md | 87 - automation/experiment/_cm.json | 11 - automation/experiment/module.py | 844 ------ automation/experiment/tests/test2.bat | 1 - automation/experiment/tests/test2.sh | 1 - automation/experiment/tests/test3.bat | 1 - automation/experiment/tests/test3.sh | 1 - automation/experiment/tests/test3_input.yaml | 4 - automation/experiment/tests/test__json.bat | 1 - automation/experiment/tests/test__json.sh | 1 - automation/experiment/tests/test__yaml.bat | 1 - automation/experiment/tests/test__yaml.sh | 1 - automation/experiment/tests/test_input.json | 14 - automation/experiment/tests/test_input.yaml | 4 - automation/experiment/tests/test_run.bat | 3 - automation/experiment/tests/test_run.sh | 1 - automation/script/README-extra.md | 1035 ------- automation/script/README-specs.md | 79 - automation/script/README.md | 460 +-- automation/script/docker.py | 12 +- automation/script/module.py | 236 +- automation/script/module_misc.py | 2522 ----------------- automation/script/template-ae-python/_cm.yaml | 4 +- .../script/template-ae-python/analyze.bat | 6 +- .../script/template-ae-python/analyze.sh | 6 +- .../script/template-ae-python/customize.py | 2 +- .../template-ae-python/install_deps.bat | 8 +- .../script/template-ae-python/install_deps.sh | 8 +- automation/script/template-ae-python/main.py | 2 +- automation/script/template-ae-python/plot.bat | 6 +- automation/script/template-ae-python/plot.sh | 6 +- .../script/template-ae-python/reproduce.bat | 6 +- .../script/template-ae-python/reproduce.sh | 6 +- automation/script/template-ae-python/run.bat | 6 +- automation/script/template-ae-python/run.sh | 6 +- .../script/template-ae-python/validate.bat | 6 +- .../script/template-ae-python/validate.sh | 6 +- automation/script/template-python/_cm.yaml | 4 +- .../script/template-python/customize.py | 4 +- automation/script/template-python/main.py | 2 +- automation/script/template-python/run.bat | 10 +- automation/script/template-python/run.sh | 10 +- automation/script/template-pytorch/_cm.yaml | 4 +- .../script/template-pytorch/customize.py | 4 +- automation/script/template-pytorch/main.py | 2 +- automation/script/template-pytorch/run.bat | 10 +- automation/script/template-pytorch/run.sh | 10 +- automation/script/template/customize.py | 2 +- automation/script/template/run.sh | 10 +- automation/script/template_list_of_scripts.md | 12 +- automation/utils.py | 8 +- automation/utils/README.md | 387 --- automation/utils/_cm.json | 12 - automation/utils/module.py | 1108 -------- automation/utils/module_cfg.py | 339 --- script/activate-python-venv/customize.py | 4 +- script/activate-python-venv/run.bat | 4 +- script/activate-python-venv/run.sh | 4 +- script/add-custom-nvidia-system/customize.py | 2 +- script/add-custom-nvidia-system/run.sh | 4 +- .../customize.py | 8 +- .../meta.yaml | 16 +- .../app-image-classification-onnx-py/run.bat | 28 +- .../app-image-classification-onnx-py/run.sh | 34 +- .../src/onnx_classify.py | 4 +- .../tests/README.md | 4 +- .../include/benchmark.h | 2 +- .../meta.yaml | 4 +- .../run.sh | 4 +- .../meta.yaml | 4 +- .../app-image-classification-torch-py/run.bat | 22 +- .../app-image-classification-torch-py/run.sh | 22 +- .../src/pytorch_classify_preprocessed.py | 12 +- .../meta.yaml | 4 +- .../run.sh | 16 +- .../src/classify.py | 14 +- .../app-image-corner-detection/customize.py | 24 +- script/app-image-corner-detection/meta.yaml | 4 +- script/app-image-corner-detection/run.sh | 2 +- .../README-extra.md | 26 +- .../app-loadgen-generic-python/customize.py | 74 +- script/app-loadgen-generic-python/meta.yaml | 106 +- script/app-loadgen-generic-python/run.bat | 2 +- script/app-loadgen-generic-python/run.sh | 2 +- .../tests/modular-cm-containers/_common.bat | 7 - .../tests/modular-cm-containers/_common.sh | 10 - .../tests/modular-cm-containers/build.bat | 16 - .../tests/modular-cm-containers/build.sh | 18 - ...dgen-generic-python--ubuntu-cpu.Dockerfile | 96 - .../loadgen-generic-python-auto.Dockerfile | 33 - .../tests/modular-cm-containers/run.bat | 3 - .../tests/modular-cm-containers/run.sh | 3 - .../customize.py | 198 +- .../meta.yaml | 196 +- .../ref/python/backend_pytorch_native.py | 6 +- .../ref/python/cognata.py | 6 +- .../ref/python/main.py | 6 +- script/app-mlperf-automotive/customize.py | 38 +- script/app-mlperf-automotive/meta.yaml | 160 +- script/app-mlperf-inference-amd/customize.py | 26 +- script/app-mlperf-inference-amd/meta.yaml | 134 +- script/app-mlperf-inference-amd/run-llama2.sh | 20 +- script/app-mlperf-inference-amd/run.sh | 6 +- .../armnn/classification.cpp | 20 +- .../customize.py | 78 +- .../inc/benchmark.h | 40 +- .../meta.yaml | 168 +- .../src/classification.cpp | 14 +- .../app-mlperf-inference-dummy/customize.py | 30 +- script/app-mlperf-inference-dummy/meta.yaml | 116 +- script/app-mlperf-inference-dummy/run.sh | 6 +- .../build_bert_harness.sh | 14 +- .../build_gptj_harness.sh | 6 +- .../build_resnet50_harness.sh | 18 +- .../build_retinanet_harness.sh | 14 +- .../build_sdxl_harness.sh | 2 +- .../calibrate_dlrm_v2_model.sh | 6 +- .../calibrate_gptj_int4_model.sh | 4 +- .../compile_resnet50.sh | 8 +- .../compile_retinanet.sh | 10 +- .../app-mlperf-inference-intel/customize.py | 194 +- script/app-mlperf-inference-intel/meta.yaml | 244 +- .../prepare_3d-unet_data_model.sh | 8 +- .../prepare_imagenet_calibration.sh | 4 +- .../run_3d-unet_harness.sh | 24 +- .../run_bert_harness.sh | 12 +- .../run_dlrm_v2_harness.sh | 18 +- .../run_gptj_harness_v3_1.sh | 18 +- .../run_gptj_harness_v4_0.sh | 18 +- .../run_resnet50_harness.sh | 20 +- .../run_retinanet_harness.sh | 10 +- .../run_sdxl_harness.sh | 12 +- .../customize.py | 72 +- .../meta.yaml | 98 +- .../src/main.cpp | 42 +- .../customize.py | 480 ++-- .../meta.yaml | 500 ++-- .../nvidia/retinanet.py | 4 +- .../README-about.md | 137 - .../app-mlperf-inference-nvidia/customize.py | 222 +- script/app-mlperf-inference-nvidia/meta.yaml | 654 ++--- script/app-mlperf-inference-nvidia/run.sh | 6 +- .../customize.py | 136 +- .../app-mlperf-inference-qualcomm/meta.yaml | 254 +- script/app-mlperf-inference-qualcomm/run.sh | 6 +- .../app-mlperf-inference-redhat/customize.py | 58 +- script/app-mlperf-inference-redhat/meta.yaml | 124 +- script/app-mlperf-inference-redhat/run.sh | 6 +- script/app-mlperf-inference/README-extra.md | 2 +- script/app-mlperf-inference/customize.py | 190 +- script/app-mlperf-inference/meta.yaml | 570 ++-- .../app-mlperf-training-nvidia/customize.py | 38 +- script/app-mlperf-training-nvidia/meta.yaml | 48 +- .../run-bert-training.sh | 4 +- script/app-mlperf-training-nvidia/run.sh | 6 +- .../customize.py | 30 +- .../app-mlperf-training-reference/meta.yaml | 50 +- .../run-bert-training.sh | 6 +- script/app-mlperf-training-reference/run.sh | 6 +- .../README-extra.md | 2 +- script/app-stable-diffusion-onnx-py/meta.yaml | 10 +- .../app-stable-diffusion-onnx-py/process.py | 10 +- script/app-stable-diffusion-onnx-py/run.bat | 2 +- script/app-stable-diffusion-onnx-py/run.sh | 2 +- script/authenticate-github-cli/customize.py | 10 +- script/authenticate-github-cli/meta.yaml | 4 +- script/authenticate-github-cli/run.bat | 14 +- script/authenticate-github-cli/run.sh | 10 +- .../customize.py | 4 +- .../meta.yaml | 2 +- .../run-template.sh | 6 +- script/benchmark-program-mlperf/customize.py | 50 +- script/benchmark-program-mlperf/meta.yaml | 8 +- script/benchmark-program/customize.py | 98 +- script/benchmark-program/meta.yaml | 18 +- script/benchmark-program/run-ubuntu.sh | 6 +- script/benchmark-program/run.bat | 22 +- script/benchmark-program/run.sh | 38 +- script/build-docker-image/customize.py | 64 +- .../build-docker-image/examples/0-common.bat | 21 - .../examples/0-generate.bat | 9 - .../build-docker-image/examples/1-build.bat | 8 - .../examples/2-run-cm-command1.bat | 3 - .../examples/2-run-cm-command2.bat | 3 - .../examples/2-run-cm-command3.bat | 3 - .../examples/2-run-cm-command4.bat | 3 - .../examples/2-run-cm-command5.bat | 3 - .../examples/2-run-interactive1.bat | 3 - .../examples/2-run-interactive2.bat | 3 - .../examples/3-push-to-docker-hub.bat | 3 - .../Dockerfile.cm-base-ubuntu-22.04-20230804 | 38 - .../Dockerfile.cm-base-ubuntu-23.04-20230804 | 38 - .../Dockerfile.cm-base-ubuntu-23.04-latest | 38 - ...classification-onnx-py-ubuntu-23.04-latest | 45 - script/build-docker-image/examples/README.md | 1 - .../examples/computer_mouse.jpg | Bin 41154 -> 0 bytes ...cm-script-app-image-classification-onnx-py | 6 - ...e-classification-python-onnx-with-file.bat | 6 - ...ge-classification-python-onnx-with-file.sh | 5 - ...un-cm-image-classification-python-onnx.bat | 1 - ...run-cm-image-classification-python-onnx.sh | 3 - script/build-docker-image/meta.yaml | 36 +- script/build-docker-image/run.bat | 8 +- script/build-docker-image/run.sh | 8 +- script/build-dockerfile/README-extra.md | 27 - script/build-dockerfile/customize.py | 152 +- script/build-dockerfile/meta.yaml | 76 +- .../customize.py | 14 +- .../meta.yaml | 92 +- .../run.sh | 8 +- script/calibrate-model-for.qaic/customize.py | 48 +- script/calibrate-model-for.qaic/meta.yaml | 64 +- script/calibrate-model-for.qaic/run.sh | 8 +- .../customize.py | 22 +- .../meta.yaml | 14 +- .../run.sh | 10 +- script/compile-model-for.qaic/customize.py | 34 +- script/compile-model-for.qaic/meta.yaml | 126 +- script/compile-model-for.qaic/run.sh | 8 +- script/compile-program/customize.py | 56 +- script/compile-program/run.bat | 24 +- script/compile-program/run.sh | 28 +- script/convert-csv-to-md/customize.py | 10 +- script/convert-csv-to-md/meta.yaml | 4 +- script/convert-csv-to-md/run.sh | 8 +- .../customize.py | 4 +- .../meta.yaml | 6 +- .../run.sh | 2 +- script/copy-to-clipboard/code.py | 4 +- script/copy-to-clipboard/meta.yaml | 8 +- script/copy-to-clipboard/run.bat | 2 +- script/copy-to-clipboard/run.sh | 2 +- script/create-conda-env/customize.py | 12 +- script/create-conda-env/meta.yaml | 8 +- script/create-conda-env/run.sh | 2 +- script/create-custom-cache-entry/customize.py | 14 +- script/create-custom-cache-entry/meta.yaml | 12 +- .../customize.py | 16 +- .../create-fpgaconvnet-app-tinyml/meta.yaml | 2 +- script/create-fpgaconvnet-app-tinyml/run.sh | 8 +- .../customize.py | 38 +- .../meta.yaml | 6 +- .../create-fpgaconvnet-config-tinyml/run.sh | 8 +- script/create-patch/customize.py | 8 +- script/create-patch/meta.yaml | 6 +- script/destroy-terraform/run.sh | 6 +- script/detect-cpu/README-extra.md | 16 +- script/detect-cpu/customize.py | 78 +- script/detect-cpu/meta.yaml | 6 +- script/detect-cpu/run.sh | 42 +- script/detect-os/customize.py | 92 +- script/detect-os/meta.yaml | 16 +- script/detect-os/run.sh | 26 +- script/detect-sudo/customize.py | 14 +- script/detect-sudo/meta.yaml | 2 +- script/detect-sudo/run.sh | 8 +- script/download-and-extract/README-extra.md | 34 +- script/download-and-extract/customize.py | 28 +- script/download-and-extract/meta.yaml | 64 +- .../tests/download-and-extract-file.bat | 2 +- .../tests/download-and-extract-file2.bat | 2 +- script/download-file/README-extra.md | 28 +- script/download-file/customize.py | 190 +- script/download-file/meta.yaml | 48 +- script/download-file/run.bat | 34 +- script/download-file/run.sh | 36 +- script/download-file/tests/download-file.bat | 2 +- script/download-file/tests/download-file2.bat | 2 +- script/download-torrent/customize.py | 18 +- script/download-torrent/meta.yaml | 10 +- script/download-torrent/run.sh | 14 +- script/draw-graph-from-json-data/customize.py | 12 +- script/draw-graph-from-json-data/meta.yaml | 6 +- script/draw-graph-from-json-data/run.sh | 10 +- script/dump-pip-freeze/customize.py | 8 +- script/dump-pip-freeze/dump.py | 2 +- script/dump-pip-freeze/run.bat | 4 +- script/dump-pip-freeze/run.sh | 10 +- script/extract-file/README-extra.md | 18 +- script/extract-file/customize.py | 140 +- script/extract-file/meta.yaml | 26 +- script/extract-file/run.bat | 22 +- script/extract-file/run.sh | 8 +- script/fail/customize.py | 4 +- script/fail/meta.yaml | 2 +- script/flash-tinyml-binary/customize.py | 4 +- script/flash-tinyml-binary/meta.yaml | 6 +- script/flash-tinyml-binary/run.sh | 4 +- .../customize.py | 82 +- .../meta.yaml | 82 +- .../customize.py | 258 +- .../meta.yaml | 60 +- .../generate-mlperf-tiny-report/customize.py | 14 +- script/generate-mlperf-tiny-report/meta.yaml | 4 +- .../run_submission_checker.bat | 6 +- .../run_submission_checker.sh | 6 +- .../customize.py | 12 +- .../generate-mlperf-tiny-submission/meta.yaml | 2 +- script/generate-nvidia-engine/customize.py | 14 +- script/generate-nvidia-engine/meta.yaml | 38 +- script/generate-nvidia-engine/run.sh | 4 +- script/get-android-sdk/customize.py | 46 +- script/get-android-sdk/meta.yaml | 24 +- .../get-android-sdk/prepare-sdk-manager.bat | 16 +- script/get-android-sdk/prepare-sdk-manager.sh | 16 +- script/get-aocl/customize.py | 12 +- script/get-aocl/meta.yaml | 12 +- script/get-aocl/run.sh | 4 +- script/get-aria2/customize.py | 38 +- script/get-aria2/install.bat | 6 +- script/get-aria2/install.sh | 18 +- script/get-aria2/meta.yaml | 12 +- script/get-aria2/run.bat | 2 +- script/get-aria2/run.sh | 2 +- script/get-aws-cli/README-extra.md | 2 +- script/get-aws-cli/customize.py | 14 +- script/get-aws-cli/meta.yaml | 4 +- script/get-bazel/README-extra.md | 2 +- script/get-bazel/customize.py | 14 +- script/get-bazel/meta.yaml | 4 +- script/get-bazel/run.bat | 2 +- script/get-bazel/run.sh | 4 +- script/get-blis/customize.py | 12 +- script/get-blis/meta.yaml | 14 +- script/get-blis/run.sh | 4 +- script/get-cache-dir/customize.py | 10 +- script/get-cache-dir/meta.yaml | 6 +- script/get-ck/COPYRIGHT.md | 9 - script/get-ck/README.md | 1 - script/get-ck/meta.yaml | 10 - script/get-ck/run.bat | 1 - script/get-ck/run.sh | 3 - script/get-cl/customize.py | 42 +- script/get-cl/meta.yaml | 10 +- script/get-cl/run.bat | 2 +- script/get-cmake/customize.py | 16 +- script/get-cmake/meta.yaml | 10 +- script/get-cmake/run.bat | 2 +- script/get-cmake/run.sh | 2 +- script/get-cmsis_5/README-extra.md | 5 - script/get-cmsis_5/customize.py | 8 +- script/get-cmsis_5/meta.yaml | 18 +- script/get-cmsis_5/run.sh | 14 +- script/get-compiler-flags/customize.py | 30 +- script/get-compiler-flags/meta.yaml | 4 +- script/get-compiler-rust/customize.py | 2 +- script/get-compiler-rust/run.sh | 6 +- script/get-conda/customize.py | 34 +- script/get-conda/install.sh | 10 +- script/get-conda/meta.yaml | 14 +- script/get-conda/run.bat | 2 +- script/get-conda/run.sh | 2 +- script/get-croissant/meta.yaml | 2 +- script/get-croissant/run.bat | 6 +- script/get-croissant/run.sh | 6 +- script/get-cuda-devices/customize.py | 6 +- script/get-cuda-devices/detect.sh | 2 +- script/get-cuda-devices/meta.yaml | 12 +- script/get-cuda-devices/run.bat | 12 +- script/get-cuda-devices/run.sh | 12 +- script/get-cuda/README-extra.md | 6 +- script/get-cuda/customize.py | 74 +- script/get-cuda/meta.yaml | 52 +- script/get-cuda/run.bat | 2 +- script/get-cuda/run.sh | 6 +- script/get-cudnn/customize.py | 74 +- script/get-cudnn/meta.yaml | 26 +- script/get-cudnn/run.sh | 6 +- script/get-dataset-cifar10/meta.yaml | 12 +- script/get-dataset-cifar10/run.bat | 26 +- script/get-dataset-cifar10/run.sh | 26 +- script/get-dataset-cnndm/customize.py | 20 +- script/get-dataset-cnndm/meta.yaml | 22 +- script/get-dataset-cnndm/run-intel.sh | 2 +- script/get-dataset-cnndm/run.sh | 8 +- script/get-dataset-coco/README-extra.md | 34 +- script/get-dataset-coco/customize.py | 64 +- script/get-dataset-coco/meta.yaml | 52 +- script/get-dataset-coco2014/customize.py | 24 +- script/get-dataset-coco2014/meta.yaml | 38 +- script/get-dataset-coco2014/run.bat | 10 +- script/get-dataset-coco2014/run.sh | 12 +- .../customize.py | 66 +- .../get-dataset-cognata-mlcommons/meta.yaml | 86 +- script/get-dataset-criteo/README-extra.md | 2 +- script/get-dataset-criteo/meta.yaml | 12 +- script/get-dataset-criteo/run.sh | 12 +- script/get-dataset-igbh/customize.py | 34 +- script/get-dataset-igbh/meta.yaml | 300 +- script/get-dataset-igbh/run.sh | 6 +- script/get-dataset-imagenet-aux/meta.yaml | 34 +- .../meta.yaml | 14 +- .../get-dataset-imagenet-helper/customize.py | 4 +- .../imagenet_helper/__init__.py | 46 +- script/get-dataset-imagenet-helper/meta.yaml | 2 +- .../get-dataset-imagenet-train/customize.py | 32 +- script/get-dataset-imagenet-train/meta.yaml | 20 +- script/get-dataset-imagenet-val/customize.py | 46 +- script/get-dataset-imagenet-val/meta.yaml | 54 +- script/get-dataset-imagenet-val/run.bat | 8 +- script/get-dataset-kits19/customize.py | 18 +- script/get-dataset-kits19/meta.yaml | 32 +- script/get-dataset-kits19/run.sh | 26 +- .../get-dataset-librispeech/README-extra.md | 8 +- script/get-dataset-librispeech/customize.py | 6 +- script/get-dataset-librispeech/meta.yaml | 36 +- script/get-dataset-librispeech/run.sh | 4 +- .../customize.py | 14 +- .../meta.yaml | 32 +- .../customize.py | 10 +- .../meta.yaml | 18 +- .../run.sh | 4 +- .../customize.py | 12 +- .../meta.yaml | 18 +- .../customize.py | 8 +- .../filter.py | 2 +- .../meta.yaml | 20 +- .../run-filter.sh | 4 +- script/get-dataset-openimages/customize.py | 40 +- script/get-dataset-openimages/meta.yaml | 72 +- script/get-dataset-openimages/run.bat | 10 +- script/get-dataset-openimages/run.sh | 14 +- script/get-dataset-openorca/customize.py | 12 +- script/get-dataset-openorca/meta.yaml | 24 +- script/get-dataset-squad-vocab/customize.py | 2 +- script/get-dataset-squad-vocab/meta.yaml | 18 +- script/get-dataset-squad/README-extra.md | 6 +- script/get-dataset-squad/customize.py | 8 +- script/get-dataset-squad/meta.yaml | 32 +- .../customize.py | 50 +- .../get-dlrm-data-mlperf-inference/meta.yaml | 18 +- script/get-dlrm-data-mlperf-inference/run.sh | 8 +- script/get-dlrm/customize.py | 12 +- script/get-dlrm/meta.yaml | 10 +- script/get-dlrm/run.sh | 6 +- script/get-docker/customize.py | 16 +- script/get-docker/meta.yaml | 4 +- script/get-gcc/README-extra.md | 14 +- script/get-gcc/customize.py | 70 +- script/get-gcc/meta.yaml | 12 +- script/get-gcc/run.bat | 2 +- script/get-gcc/run.sh | 2 +- script/get-generic-python-lib/customize.py | 74 +- .../get-generic-python-lib/detect-version.py | 2 +- script/get-generic-python-lib/install.bat | 6 +- script/get-generic-python-lib/install.sh | 36 +- script/get-generic-python-lib/meta.yaml | 540 ++-- script/get-generic-python-lib/run.bat | 4 +- script/get-generic-python-lib/run.sh | 4 +- .../tensorflow/run-aarch64.sh | 10 +- .../tensorflow/run-macos.sh | 6 +- .../get-generic-python-lib/uninstall_deps.sh | 4 +- .../get-generic-python-lib/validate_cache.bat | 4 +- .../get-generic-python-lib/validate_cache.sh | 4 +- script/get-generic-sys-util/customize.py | 72 +- script/get-generic-sys-util/detect.sh | 12 +- .../install-with-retry.sh | 12 +- script/get-generic-sys-util/install.sh | 6 +- script/get-generic-sys-util/meta.yaml | 440 +-- script/get-gh-actions-runner/customize.py | 16 +- script/get-gh-actions-runner/meta.yaml | 18 +- script/get-gh-actions-runner/run.sh | 4 +- script/get-git-repo/README-extra.md | 2 +- script/get-git-repo/customize.py | 88 +- script/get-git-repo/meta.yaml | 62 +- script/get-git-repo/run.bat | 30 +- script/get-git-repo/run.sh | 38 +- script/get-github-cli/customize.py | 8 +- script/get-go/README-extra.md | 2 +- script/get-go/customize.py | 14 +- script/get-go/meta.yaml | 6 +- script/get-google-saxml/meta.yaml | 8 +- script/get-google-test/customize.py | 8 +- script/get-google-test/meta.yaml | 6 +- script/get-google-test/run.sh | 10 +- script/get-huggingface-cli/customize.py | 10 +- script/get-huggingface-cli/meta.yaml | 4 +- script/get-huggingface-cli/run.bat | 8 +- script/get-huggingface-cli/run.sh | 6 +- script/get-ipol-src/customize.py | 12 +- script/get-ipol-src/meta.yaml | 16 +- script/get-java/customize.py | 42 +- script/get-java/install-prebuilt.bat | 8 +- script/get-java/install-prebuilt.sh | 12 +- script/get-java/meta.yaml | 14 +- script/get-java/run.bat | 2 +- script/get-java/run.sh | 2 +- script/get-javac/customize.py | 48 +- script/get-javac/install-prebuilt.bat | 8 +- script/get-javac/install-prebuilt.sh | 12 +- script/get-javac/meta.yaml | 16 +- script/get-javac/run.bat | 2 +- script/get-javac/run.sh | 2 +- script/get-lib-armnn/customize.py | 14 +- script/get-lib-armnn/meta.yaml | 20 +- script/get-lib-armnn/run.sh | 4 +- script/get-lib-dnnl/customize.py | 2 +- script/get-lib-dnnl/meta.yaml | 10 +- script/get-lib-dnnl/run.sh | 6 +- script/get-lib-protobuf/customize.py | 8 +- script/get-lib-protobuf/meta.yaml | 16 +- script/get-lib-protobuf/run.sh | 4 +- script/get-lib-qaic-api/customize.py | 12 +- script/get-lib-qaic-api/meta.yaml | 6 +- script/get-llvm/README-extra.md | 20 +- script/get-llvm/customize.py | 60 +- script/get-llvm/meta.yaml | 16 +- script/get-llvm/run.bat | 2 +- script/get-llvm/run.sh | 2 +- script/get-microtvm/README-extra.md | 2 +- script/get-microtvm/customize.py | 10 +- script/get-microtvm/meta.yaml | 20 +- script/get-microtvm/run.sh | 6 +- .../get-ml-model-3d-unet-kits19/customize.py | 12 +- script/get-ml-model-3d-unet-kits19/meta.yaml | 82 +- .../customize.py | 24 +- .../get-ml-model-abtf-ssd-pytorch/meta.yaml | 108 +- script/get-ml-model-bert-base-squad/meta.yaml | 50 +- .../customize.py | 26 +- .../get-ml-model-bert-large-squad/meta.yaml | 184 +- .../run-packed.sh | 2 +- script/get-ml-model-dlrm-terabyte/meta.yaml | 108 +- script/get-ml-model-dlrm-terabyte/run.sh | 4 +- .../customize.py | 28 +- .../get-ml-model-efficientnet-lite/meta.yaml | 116 +- script/get-ml-model-gptj/customize.py | 46 +- script/get-ml-model-gptj/meta.yaml | 94 +- .../get-ml-model-gptj/run-int4-calibration.sh | 6 +- script/get-ml-model-gptj/run-intel.sh | 12 +- script/get-ml-model-gptj/run-nvidia.sh | 16 +- .../get-ml-model-gptj/run-saxml-quantized.sh | 6 +- script/get-ml-model-gptj/run-saxml.sh | 4 +- .../get-ml-model-huggingface-zoo/customize.py | 20 +- .../download_model.py | 16 +- script/get-ml-model-huggingface-zoo/meta.yaml | 36 +- script/get-ml-model-huggingface-zoo/run.bat | 4 +- script/get-ml-model-huggingface-zoo/run.sh | 4 +- script/get-ml-model-llama2/customize.py | 30 +- script/get-ml-model-llama2/meta.yaml | 76 +- script/get-ml-model-llama2/run-amd.sh | 8 +- script/get-ml-model-llama2/run-nvidia.sh | 22 +- script/get-ml-model-llama3/customize.py | 16 +- script/get-ml-model-llama3/meta.yaml | 26 +- script/get-ml-model-mixtral/customize.py | 8 +- script/get-ml-model-mixtral/meta.yaml | 30 +- script/get-ml-model-mobilenet/README-extra.md | 6 +- script/get-ml-model-mobilenet/customize.py | 28 +- script/get-ml-model-mobilenet/meta.yaml | 250 +- .../get-ml-model-neuralmagic-zoo/customize.py | 10 +- .../download_sparse.py | 4 +- script/get-ml-model-neuralmagic-zoo/meta.yaml | 250 +- script/get-ml-model-neuralmagic-zoo/run.bat | 2 +- script/get-ml-model-neuralmagic-zoo/run.sh | 2 +- script/get-ml-model-resnet50/README-extra.md | 6 +- script/get-ml-model-resnet50/customize.py | 18 +- script/get-ml-model-resnet50/meta.yaml | 238 +- script/get-ml-model-resnet50/run-fix-input.sh | 4 +- .../customize.py | 18 +- .../get-ml-model-retinanet-nvidia/meta.yaml | 18 +- .../nvidia_patch_retinanet_efficientnms.py | 6 +- script/get-ml-model-retinanet-nvidia/run.sh | 10 +- script/get-ml-model-retinanet/README-extra.md | 6 +- script/get-ml-model-retinanet/customize.py | 18 +- script/get-ml-model-retinanet/meta.yaml | 90 +- script/get-ml-model-retinanet/run-no-nms.sh | 14 +- script/get-ml-model-rgat/customize.py | 16 +- script/get-ml-model-rgat/meta.yaml | 32 +- script/get-ml-model-rnnt/customize.py | 14 +- script/get-ml-model-rnnt/meta.yaml | 28 +- .../customize.py | 10 +- .../get-ml-model-stable-diffusion/meta.yaml | 74 +- script/get-ml-model-tiny-resnet/customize.py | 16 +- script/get-ml-model-tiny-resnet/meta.yaml | 80 +- script/get-ml-model-tiny-resnet/run.sh | 4 +- .../meta.yaml | 6 +- .../customize.py | 24 +- .../meta.yaml | 14 +- .../run.sh | 8 +- .../customize.py | 8 +- .../meta.yaml | 8 +- .../run.sh | 10 +- .../get-mlperf-inference-loadgen/customize.py | 14 +- script/get-mlperf-inference-loadgen/meta.yaml | 70 +- script/get-mlperf-inference-loadgen/run.bat | 16 +- script/get-mlperf-inference-loadgen/run.sh | 24 +- .../customize.py | 6 +- .../meta.yaml | 2 +- .../customize.py | 12 +- .../meta.yaml | 14 +- .../run.sh | 10 +- .../customize.py | 8 +- .../meta.yaml | 14 +- .../README-extra.md | 2 +- .../get-mlperf-inference-results/customize.py | 24 +- script/get-mlperf-inference-results/meta.yaml | 36 +- .../get-mlperf-inference-src/README-extra.md | 6 +- script/get-mlperf-inference-src/customize.py | 108 +- script/get-mlperf-inference-src/meta.yaml | 126 +- .../customize.py | 8 +- .../meta.yaml | 10 +- .../customize.py | 78 +- .../meta.yaml | 20 +- .../customize.py | 154 +- .../detect_memory.sh | 6 +- .../get_memory_info.py | 2 +- .../meta.yaml | 24 +- .../get-mlperf-inference-utils/customize.py | 6 +- script/get-mlperf-logging/customize.py | 4 +- script/get-mlperf-logging/meta.yaml | 4 +- script/get-mlperf-power-dev/customize.py | 10 +- script/get-mlperf-power-dev/meta.yaml | 32 +- .../customize.py | 12 +- .../meta.yaml | 10 +- .../run.bat | 28 +- .../run.sh | 18 +- script/get-mlperf-tiny-src/customize.py | 34 +- script/get-mlperf-tiny-src/meta.yaml | 10 +- script/get-mlperf-tiny-src/run.bat | 28 +- script/get-mlperf-tiny-src/run.sh | 18 +- .../customize.py | 10 +- .../get-mlperf-training-nvidia-code/meta.yaml | 18 +- .../get-mlperf-training-src/README-extra.md | 2 +- script/get-mlperf-training-src/customize.py | 6 +- script/get-mlperf-training-src/meta.yaml | 52 +- script/get-nvidia-mitten/meta.yaml | 8 +- script/get-nvidia-mitten/run.sh | 4 +- script/get-onnxruntime-prebuilt/customize.py | 16 +- script/get-onnxruntime-prebuilt/meta.yaml | 8 +- script/get-onnxruntime-prebuilt/run.bat | 2 +- script/get-onnxruntime-prebuilt/run.sh | 2 +- script/get-openssl/README-extra.md | 2 +- script/get-openssl/customize.py | 12 +- script/get-openssl/meta.yaml | 6 +- script/get-openssl/run.sh | 2 +- script/get-platform-details/customize.py | 20 +- script/get-platform-details/meta.yaml | 32 +- script/get-platform-details/run.sh | 22 +- .../README-extra.md | 2 +- .../customize.py | 26 +- .../get-preprocessed-dataset-criteo/meta.yaml | 70 +- .../preprocess.py | 22 +- .../preprocess_multihot.sh | 4 +- .../run-multihot.sh | 4 +- script/get-preprocessed-dataset-criteo/run.sh | 4 +- .../src/generic_preprocess.py | 46 +- .../preprocess_object_detection_dataset.py | 60 +- .../README-extra.md | 8 +- .../customize.py | 46 +- .../meta.yaml | 162 +- .../preprocess.py | 24 +- .../get-preprocessed-dataset-imagenet/run.bat | 2 +- .../get-preprocessed-dataset-imagenet/run.sh | 4 +- .../customize.py | 18 +- .../get-preprocessed-dataset-kits19/meta.yaml | 28 +- script/get-preprocessed-dataset-kits19/run.sh | 2 +- .../customize.py | 12 +- .../meta.yaml | 26 +- .../run.sh | 2 +- .../README-extra.md | 8 +- .../customize.py | 38 +- .../meta.yaml | 110 +- .../preprocess.py | 24 +- .../run.bat | 2 +- .../run.sh | 2 +- .../customize.py | 40 +- .../meta.yaml | 38 +- .../get-preprocessed-dataset-openorca/run.sh | 6 +- .../customize.py | 38 +- .../get-preprocessed-dataset-squad/meta.yaml | 26 +- .../run-packed.sh | 10 +- script/get-preprocessed-dataset-squad/run.sh | 22 +- script/get-python3/README-extra.md | 8 +- script/get-python3/customize.py | 36 +- script/get-python3/meta.yaml | 16 +- script/get-python3/run.bat | 2 +- script/get-python3/run.sh | 2 +- script/get-qaic-apps-sdk/customize.py | 18 +- script/get-qaic-apps-sdk/meta.yaml | 2 +- script/get-qaic-platform-sdk/customize.py | 22 +- script/get-qaic-platform-sdk/meta.yaml | 4 +- script/get-qaic-software-kit/customize.py | 18 +- script/get-qaic-software-kit/meta.yaml | 14 +- script/get-qaic-software-kit/run.sh | 8 +- script/get-rclone-config/customize.py | 6 +- script/get-rclone-config/meta.yaml | 2 +- script/get-rclone-config/run.sh | 10 +- script/get-rclone/configs/rclone.conf | 2 +- script/get-rclone/customize.py | 30 +- script/get-rclone/install.bat | 10 +- script/get-rclone/install.sh | 8 +- script/get-rclone/meta.yaml | 12 +- script/get-rocm-devices/customize.py | 6 +- script/get-rocm-devices/detect.sh | 2 +- script/get-rocm-devices/run.sh | 4 +- script/get-rocm/customize.py | 16 +- script/get-rocm/meta.yaml | 4 +- script/get-rocm/run.sh | 2 +- script/get-spec-ptd/README-extra.md | 4 +- script/get-spec-ptd/customize.py | 12 +- script/get-spec-ptd/meta.yaml | 24 +- script/get-spec-ptd/run.sh | 4 +- script/get-sys-utils-cm/customize.py | 10 +- script/get-sys-utils-cm/do_pip_installs.sh | 2 +- .../get-sys-utils-cm/do_pip_installs.sh.old | 2 +- script/get-sys-utils-cm/meta.yaml | 12 +- script/get-sys-utils-cm/run-arch.sh | 12 +- script/get-sys-utils-cm/run-debian.sh | 12 +- script/get-sys-utils-cm/run-macos.sh | 6 +- script/get-sys-utils-cm/run-rhel.sh | 16 +- script/get-sys-utils-cm/run-sles.sh | 14 +- script/get-sys-utils-cm/run-ubuntu.sh | 12 +- script/get-sys-utils-min/customize.py | 8 +- script/get-sys-utils-min/meta.yaml | 12 +- script/get-tensorrt/customize.py | 66 +- script/get-tensorrt/meta.yaml | 8 +- script/get-tensorrt/run.sh | 16 +- script/get-terraform/README-extra.md | 2 +- script/get-terraform/customize.py | 14 +- script/get-terraform/meta.yaml | 4 +- script/get-tvm-model/README-extra.md | 2 +- script/get-tvm-model/customize.py | 30 +- script/get-tvm-model/meta.yaml | 36 +- script/get-tvm-model/process.py | 36 +- script/get-tvm-model/run.sh | 2 +- script/get-tvm/customize.py | 6 +- script/get-tvm/meta.yaml | 28 +- script/get-tvm/run.sh | 34 +- script/get-xilinx-sdk/customize.py | 4 +- script/get-xilinx-sdk/meta.yaml | 8 +- script/get-xilinx-sdk/run.sh | 8 +- script/get-zendnn/customize.py | 8 +- script/get-zephyr-sdk/README-extra.md | 4 +- script/get-zephyr-sdk/customize.py | 2 +- script/get-zephyr-sdk/meta.yaml | 6 +- script/get-zephyr-sdk/run.sh | 10 +- script/get-zephyr/README-extra.md | 8 - script/get-zephyr/customize.py | 2 +- script/get-zephyr/meta.yaml | 4 +- script/get-zephyr/run-ubuntu.sh | 2 +- script/get-zephyr/run.sh | 8 +- script/install-apt-package/customize.py | 14 +- script/install-apt-package/meta.yaml | 6 +- script/install-apt-package/run.sh | 2 +- script/install-aws-cli/meta.yaml | 4 +- script/install-aws-cli/run.sh | 12 +- script/install-bazel/customize.py | 22 +- script/install-bazel/meta.yaml | 6 +- script/install-bazel/run-aarch64.sh | 20 +- script/install-bazel/run.bat | 6 +- script/install-bazel/run.sh | 12 +- script/install-cmake-prebuilt/customize.py | 18 +- script/install-cmake-prebuilt/meta.yaml | 6 +- script/install-cmake-prebuilt/run.sh | 6 +- .../install-cuda-package-manager/customize.py | 2 +- script/install-cuda-package-manager/meta.yaml | 2 +- script/install-cuda-package-manager/run.sh | 2 +- script/install-cuda-prebuilt/customize.py | 26 +- script/install-cuda-prebuilt/meta.yaml | 44 +- script/install-cuda-prebuilt/run.sh | 4 +- script/install-diffusers-from-src/meta.yaml | 30 +- script/install-diffusers-from-src/run.sh | 6 +- script/install-gcc-src/customize.py | 10 +- script/install-gcc-src/meta.yaml | 6 +- script/install-gcc-src/run.sh | 10 +- .../customize.py | 20 +- .../install-generic-conda-package/meta.yaml | 16 +- script/install-generic-conda-package/run.sh | 2 +- script/install-gflags-from-src/customize.py | 4 +- script/install-gflags-from-src/meta.yaml | 22 +- script/install-gflags-from-src/run.sh | 4 +- script/install-gflags/customize.py | 4 +- script/install-gflags/meta.yaml | 2 +- script/install-gflags/run.sh | 12 +- script/install-github-cli/customize.py | 4 +- .../customize.py | 4 +- .../meta.yaml | 24 +- .../run.sh | 6 +- script/install-ipex-from-src/customize.py | 18 +- script/install-ipex-from-src/meta.yaml | 56 +- script/install-ipex-from-src/run.sh | 20 +- script/install-llvm-prebuilt/README-extra.md | 18 +- script/install-llvm-prebuilt/customize.py | 30 +- script/install-llvm-prebuilt/meta.yaml | 6 +- script/install-llvm-prebuilt/run.bat | 4 +- script/install-llvm-prebuilt/run.sh | 6 +- script/install-llvm-src/customize.py | 38 +- .../install-llvm-16-intel-mlperf-inference.sh | 8 +- script/install-llvm-src/meta.yaml | 54 +- script/install-llvm-src/run.sh | 14 +- .../customize.py | 2 +- .../install-mlperf-logging-from-src/meta.yaml | 8 +- script/install-mlperf-logging-from-src/run.sh | 10 +- script/install-nccl-libs/customize.py | 2 +- script/install-nccl-libs/run-ubuntu.sh | 4 +- script/install-nccl-libs/run.sh | 8 +- script/install-numactl-from-src/customize.py | 2 +- script/install-numactl-from-src/meta.yaml | 24 +- script/install-numactl-from-src/run.sh | 2 +- script/install-onednn-from-src/customize.py | 6 +- script/install-onednn-from-src/meta.yaml | 30 +- .../run-intel-mlperf-inference-bert.sh | 4 +- .../run-intel-mlperf-inference.sh | 4 +- script/install-onednn-from-src/run.sh | 4 +- .../install-onnxruntime-from-src/customize.py | 6 +- script/install-onnxruntime-from-src/meta.yaml | 24 +- script/install-onnxruntime-from-src/run.sh | 12 +- script/install-opencv-from-src/customize.py | 4 +- script/install-opencv-from-src/meta.yaml | 24 +- script/install-opencv-from-src/run.sh | 4 +- script/install-openssl/customize.py | 10 +- script/install-openssl/meta.yaml | 6 +- script/install-openssl/run.sh | 12 +- .../customize.py | 4 +- .../meta.yaml | 2 +- script/install-python-src/customize.py | 14 +- script/install-python-src/meta.yaml | 48 +- script/install-python-src/run.sh | 28 +- script/install-python-venv/customize.py | 24 +- script/install-python-venv/meta.yaml | 4 +- script/install-python-venv/run.bat | 4 +- script/install-python-venv/run.sh | 8 +- script/install-pytorch-from-src/customize.py | 10 +- script/install-pytorch-from-src/meta.yaml | 52 +- .../run-intel-mlperf-inference-v3_1.sh | 6 +- .../run-intel-mlperf-inference-vision.sh | 8 +- script/install-pytorch-from-src/run.sh | 10 +- .../install-pytorch-kineto-from-src/meta.yaml | 34 +- script/install-pytorch-kineto-from-src/run.sh | 2 +- .../customize.py | 18 +- .../meta.yaml | 20 +- .../install-qaic-compute-sdk-from-src/run.sh | 14 +- script/install-rapidjson-from-src/meta.yaml | 22 +- script/install-rapidjson-from-src/run.sh | 4 +- script/install-rocm/customize.py | 4 +- script/install-rocm/meta.yaml | 2 +- script/install-rocm/run-rhel.sh | 4 +- script/install-rocm/run-ubuntu.sh | 6 +- script/install-rocm/run.sh | 2 - script/install-tensorflow-for-c/run.sh | 8 +- .../install-tensorflow-from-src/customize.py | 4 +- script/install-tensorflow-from-src/meta.yaml | 52 +- script/install-tensorflow-from-src/run.sh | 14 +- .../install-terraform-from-src/customize.py | 4 +- script/install-terraform-from-src/meta.yaml | 6 +- script/install-terraform-from-src/run.sh | 4 +- script/install-tflite-from-src/meta.yaml | 8 +- script/install-tflite-from-src/run.sh | 6 +- script/install-torchvision-from-src/meta.yaml | 38 +- script/install-torchvision-from-src/run.sh | 6 +- .../customize.py | 10 +- .../install-tpp-pytorch-extension/meta.yaml | 28 +- script/install-tpp-pytorch-extension/run.sh | 8 +- .../customize.py | 2 +- .../install-transformers-from-src/meta.yaml | 30 +- script/install-transformers-from-src/run.sh | 8 +- .../plug-prebuilt-cudnn-to-cuda/customize.py | 20 +- script/plug-prebuilt-cudnn-to-cuda/meta.yaml | 14 +- script/plug-prebuilt-cudnn-to-cuda/run.sh | 26 +- .../customize.py | 20 +- .../meta.yaml | 14 +- .../plug-prebuilt-cusparselt-to-cuda/run.sh | 26 +- .../prepare-training-data-bert/customize.py | 44 +- script/prepare-training-data-bert/meta.yaml | 68 +- .../prepare-training-data-bert/run-nvidia.sh | 12 +- .../run-reference.sh | 26 +- script/prepare-training-data-bert/run.sh | 20 +- .../prepare-training-data-bert/run_config.yml | 2 +- .../prepare-training-data-resnet/customize.py | 40 +- script/prepare-training-data-resnet/meta.yaml | 26 +- .../run-nvidia.sh | 16 +- .../run-reference.sh | 16 +- .../run_config.yml | 2 +- .../customize.py | 20 +- .../meta.yaml | 10 +- .../run.sh | 2 +- script/print-any-text/COPYRIGHT.md | 9 - script/print-any-text/README.md | 1 - script/print-any-text/customize.py | 30 - script/print-any-text/meta.yaml | 34 - script/print-any-text/run.bat | 5 - script/print-any-text/run.sh | 3 - script/print-croissant-desc/COPYRIGHT.md | 9 - script/print-croissant-desc/README-extra.md | 16 - script/print-croissant-desc/README.md | 1 - script/print-croissant-desc/code.py | 29 - script/print-croissant-desc/meta.yaml | 29 - script/print-croissant-desc/run.bat | 2 - script/print-croissant-desc/run.sh | 4 - script/print-hello-world-java/COPYRIGHT.md | 9 - script/print-hello-world-java/README.md | 1 - script/print-hello-world-java/code.java | 27 - script/print-hello-world-java/meta.yaml | 17 - script/print-hello-world-java/run.bat | 4 - script/print-hello-world-java/run.sh | 6 - script/print-hello-world-javac/COPYRIGHT.md | 9 - script/print-hello-world-javac/README.md | 1 - script/print-hello-world-javac/code.java | 27 - script/print-hello-world-javac/meta.yaml | 17 - script/print-hello-world-javac/run.bat | 8 - script/print-hello-world-javac/run.sh | 10 - script/print-hello-world-py/COPYRIGHT.md | 9 - script/print-hello-world-py/README.md | 1 - script/print-hello-world-py/app.py | 20 - script/print-hello-world-py/customize.py | 20 - script/print-hello-world-py/meta.yaml | 24 - script/print-hello-world-py/run.bat | 8 - script/print-hello-world-py/run.sh | 11 - script/print-hello-world/COPYRIGHT.md | 9 - script/print-hello-world/README.md | 1 - script/print-hello-world/meta.yaml | 48 - script/print-hello-world/run.bat | 16 - script/print-hello-world/run.sh | 18 - script/print-python-version/COPYRIGHT.md | 9 - script/print-python-version/README.md | 1 - script/print-python-version/meta.yaml | 15 - script/print-python-version/run.bat | 8 - script/print-python-version/run.sh | 11 - script/process-mlperf-accuracy/customize.py | 154 +- script/process-mlperf-accuracy/meta.yaml | 74 +- script/process-mlperf-accuracy/run.bat | 4 +- script/process-mlperf-accuracy/run.sh | 2 +- script/prune-bert-models/customize.py | 16 +- script/prune-bert-models/meta.yaml | 22 +- script/prune-bert-models/run.sh | 16 +- script/publish-results-to-dashboard/code.py | 34 +- script/publish-results-to-dashboard/run.bat | 2 +- script/publish-results-to-dashboard/run.sh | 2 +- script/pull-git-repo/customize.py | 6 +- script/pull-git-repo/meta.yaml | 2 +- script/pull-git-repo/run.bat | 8 +- script/pull-git-repo/run.sh | 8 +- script/push-csv-to-spreadsheet/google_api.py | 6 +- script/push-csv-to-spreadsheet/meta.yaml | 8 +- script/push-csv-to-spreadsheet/run.sh | 2 +- .../customize.py | 16 +- .../meta.yaml | 14 +- .../run.bat | 24 +- .../run.sh | 18 +- script/remote-run-commands/customize.py | 20 +- script/remote-run-commands/meta.yaml | 26 +- script/remote-run-commands/run.sh | 2 +- .../README.md | 381 --- .../customize.py | 8 +- .../meta.yaml | 26 +- .../run.sh | 16 +- .../customize.py | 4 +- .../meta.yaml | 10 +- .../run-resnet.sh | 8 +- .../reproduce-mlperf-training-nvidia/run.sh | 6 +- script/run-all-mlperf-models/README.md | 237 -- script/run-all-mlperf-models/customize.py | 2 +- .../run-all-mlperf-models/run-bert-macos.sh | 8 +- script/run-all-mlperf-models/run-bert.sh | 8 +- .../run-cpp-implementation.sh | 8 +- .../run-mobilenet-models.sh | 8 +- .../run-all-mlperf-models/run-nvidia-4090.sh | 10 +- .../run-all-mlperf-models/run-nvidia-a100.sh | 10 +- script/run-all-mlperf-models/run-nvidia-t4.sh | 8 +- .../run-all-mlperf-models/run-pruned-bert.sh | 6 +- .../run-reference-models.sh | 8 +- .../run-resnet50-macos.sh | 8 +- script/run-all-mlperf-models/run-resnet50.sh | 8 +- script/run-all-mlperf-models/run-retinanet-sh | 8 +- script/run-all-mlperf-models/template.sh | 6 +- script/run-docker-container/customize.py | 160 +- script/run-docker-container/meta.yaml | 84 +- script/run-mlperf-automotive-app/customize.py | 180 +- script/run-mlperf-automotive-app/meta.yaml | 198 +- script/run-mlperf-inference-app/customize.py | 216 +- script/run-mlperf-inference-app/meta.yaml | 284 +- .../README-about.md | 107 - .../customize.py | 52 +- .../meta.yaml | 82 +- .../customize.py | 54 +- .../meta.yaml | 54 +- .../run.bat | 6 +- .../run.sh | 6 +- script/run-mlperf-power-client/customize.py | 34 +- script/run-mlperf-power-client/meta.yaml | 30 +- script/run-mlperf-power-client/run.sh | 6 +- script/run-mlperf-power-server/customize.py | 30 +- script/run-mlperf-power-server/meta.yaml | 32 +- .../customize.py | 20 +- .../meta.yaml | 28 +- .../run.sh | 4 +- script/run-python/meta.yaml | 2 +- script/run-python/run.bat | 2 +- script/run-python/run.sh | 2 +- script/run-terraform/customize.py | 20 +- script/run-terraform/meta.yaml | 32 +- script/run-terraform/run.sh | 8 +- script/run-vllm-server/customize.py | 194 +- script/run-vllm-server/meta.yaml | 198 +- script/run-vllm-server/run.sh | 4 +- script/runtime-system-infos/customize.py | 8 +- script/runtime-system-infos/meta.yaml | 4 +- .../customize.py | 6 +- script/set-device-settings-qaic/customize.py | 10 +- script/set-device-settings-qaic/meta.yaml | 8 +- script/set-device-settings-qaic/run.sh | 18 +- script/set-echo-off-win/customize.py | 2 +- script/set-performance-mode/customize.py | 2 +- script/set-performance-mode/meta.yaml | 8 +- script/set-performance-mode/run-ubuntu.sh | 26 +- script/set-performance-mode/run.sh | 8 +- script/set-sqlite-dir/customize.py | 2 +- script/set-sqlite-dir/meta.yaml | 4 +- script/set-sqlite-dir/run.bat | 2 +- script/set-sqlite-dir/run.sh | 2 +- script/set-user-limits/customize.py | 8 +- script/set-user-limits/meta.yaml | 2 +- script/set-user-limits/run.sh | 10 +- script/set-venv/customize.py | 16 +- script/set-venv/meta.yaml | 2 +- script/submit-mlperf-results/customize.py | 8 +- script/submit-mlperf-results/meta.yaml | 8 +- script/tar-my-folder/customize.py | 8 +- script/tar-my-folder/meta.yaml | 6 +- .../customize.py | 12 +- .../meta.yaml | 8 +- .../run.sh | 2 +- script/upgrade-python-pip/run.bat | 2 +- script/upgrade-python-pip/run.sh | 2 +- .../README-extra.md | 2 +- .../customize.py | 4 +- .../meta.yaml | 4 +- 1049 files changed, 12373 insertions(+), 21696 deletions(-) delete mode 100644 automation/cfg/README-extra.md delete mode 100644 automation/cfg/README.md delete mode 100644 automation/cfg/_cm.json delete mode 100644 automation/cfg/module.py delete mode 100644 automation/experiment/README-extra.md delete mode 100644 automation/experiment/README.md delete mode 100644 automation/experiment/_cm.json delete mode 100644 automation/experiment/module.py delete mode 100644 automation/experiment/tests/test2.bat delete mode 100644 automation/experiment/tests/test2.sh delete mode 100644 automation/experiment/tests/test3.bat delete mode 100644 automation/experiment/tests/test3.sh delete mode 100644 automation/experiment/tests/test3_input.yaml delete mode 100644 automation/experiment/tests/test__json.bat delete mode 100644 automation/experiment/tests/test__json.sh delete mode 100644 automation/experiment/tests/test__yaml.bat delete mode 100644 automation/experiment/tests/test__yaml.sh delete mode 100644 automation/experiment/tests/test_input.json delete mode 100644 automation/experiment/tests/test_input.yaml delete mode 100644 automation/experiment/tests/test_run.bat delete mode 100644 automation/experiment/tests/test_run.sh delete mode 100644 automation/script/README-extra.md delete mode 100644 automation/script/README-specs.md delete mode 100644 automation/script/module_misc.py delete mode 100644 automation/utils/README.md delete mode 100644 automation/utils/_cm.json delete mode 100644 automation/utils/module.py delete mode 100644 automation/utils/module_cfg.py delete mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat delete mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh delete mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat delete mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh delete mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile delete mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile delete mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat delete mode 100644 script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh delete mode 100644 script/app-mlperf-inference-nvidia/README-about.md delete mode 100644 script/build-docker-image/examples/0-common.bat delete mode 100644 script/build-docker-image/examples/0-generate.bat delete mode 100644 script/build-docker-image/examples/1-build.bat delete mode 100644 script/build-docker-image/examples/2-run-cm-command1.bat delete mode 100644 script/build-docker-image/examples/2-run-cm-command2.bat delete mode 100644 script/build-docker-image/examples/2-run-cm-command3.bat delete mode 100644 script/build-docker-image/examples/2-run-cm-command4.bat delete mode 100644 script/build-docker-image/examples/2-run-cm-command5.bat delete mode 100644 script/build-docker-image/examples/2-run-interactive1.bat delete mode 100644 script/build-docker-image/examples/2-run-interactive2.bat delete mode 100644 script/build-docker-image/examples/3-push-to-docker-hub.bat delete mode 100644 script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 delete mode 100644 script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 delete mode 100644 script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest delete mode 100644 script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest delete mode 100644 script/build-docker-image/examples/README.md delete mode 100644 script/build-docker-image/examples/computer_mouse.jpg delete mode 100644 script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py delete mode 100644 script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat delete mode 100644 script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh delete mode 100644 script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat delete mode 100644 script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh delete mode 100644 script/build-dockerfile/README-extra.md delete mode 100644 script/get-ck/COPYRIGHT.md delete mode 100644 script/get-ck/README.md delete mode 100644 script/get-ck/meta.yaml delete mode 100644 script/get-ck/run.bat delete mode 100644 script/get-ck/run.sh delete mode 100644 script/get-cmsis_5/README-extra.md delete mode 100644 script/get-zephyr/README-extra.md delete mode 100644 script/install-rocm/run.sh delete mode 100644 script/print-any-text/COPYRIGHT.md delete mode 100644 script/print-any-text/README.md delete mode 100644 script/print-any-text/customize.py delete mode 100644 script/print-any-text/meta.yaml delete mode 100644 script/print-any-text/run.bat delete mode 100644 script/print-any-text/run.sh delete mode 100644 script/print-croissant-desc/COPYRIGHT.md delete mode 100644 script/print-croissant-desc/README-extra.md delete mode 100644 script/print-croissant-desc/README.md delete mode 100644 script/print-croissant-desc/code.py delete mode 100644 script/print-croissant-desc/meta.yaml delete mode 100644 script/print-croissant-desc/run.bat delete mode 100644 script/print-croissant-desc/run.sh delete mode 100644 script/print-hello-world-java/COPYRIGHT.md delete mode 100644 script/print-hello-world-java/README.md delete mode 100644 script/print-hello-world-java/code.java delete mode 100644 script/print-hello-world-java/meta.yaml delete mode 100644 script/print-hello-world-java/run.bat delete mode 100644 script/print-hello-world-java/run.sh delete mode 100644 script/print-hello-world-javac/COPYRIGHT.md delete mode 100644 script/print-hello-world-javac/README.md delete mode 100644 script/print-hello-world-javac/code.java delete mode 100644 script/print-hello-world-javac/meta.yaml delete mode 100644 script/print-hello-world-javac/run.bat delete mode 100644 script/print-hello-world-javac/run.sh delete mode 100644 script/print-hello-world-py/COPYRIGHT.md delete mode 100644 script/print-hello-world-py/README.md delete mode 100644 script/print-hello-world-py/app.py delete mode 100644 script/print-hello-world-py/customize.py delete mode 100644 script/print-hello-world-py/meta.yaml delete mode 100644 script/print-hello-world-py/run.bat delete mode 100644 script/print-hello-world-py/run.sh delete mode 100644 script/print-hello-world/COPYRIGHT.md delete mode 100644 script/print-hello-world/README.md delete mode 100644 script/print-hello-world/meta.yaml delete mode 100644 script/print-hello-world/run.bat delete mode 100644 script/print-hello-world/run.sh delete mode 100644 script/print-python-version/COPYRIGHT.md delete mode 100644 script/print-python-version/README.md delete mode 100644 script/print-python-version/meta.yaml delete mode 100644 script/print-python-version/run.bat delete mode 100644 script/print-python-version/run.sh delete mode 100644 script/reproduce-mlperf-inference-dummy/README.md delete mode 100644 script/run-all-mlperf-models/README.md delete mode 100644 script/run-mlperf-inference-mobilenet-models/README-about.md diff --git a/.github/scripts/process_individual_tests.py b/.github/scripts/process_individual_tests.py index d328aad63..848656d50 100644 --- a/.github/scripts/process_individual_tests.py +++ b/.github/scripts/process_individual_tests.py @@ -25,10 +25,10 @@ ii = { 'action': 'test', 'target': 'script', 'item': uid, 'quiet': 'yes', 'out': 'con' } - if os.environ.get('DOCKER_CM_REPO', '') != '': - ii['docker_cm_repo'] = os.environ['DOCKER_CM_REPO'] - if os.environ.get('DOCKER_CM_REPO_BRANCH', '') != '': - ii['docker_cm_repo_branch'] = os.environ['DOCKER_CM_REPO_BRANCH'] + if os.environ.get('DOCKER_MLC_REPO', '') != '': + ii['docker_cm_repo'] = os.environ['DOCKER_MLC_REPO'] + if os.environ.get('DOCKER_MLC_REPO_BRANCH', '') != '': + ii['docker_cm_repo_branch'] = os.environ['DOCKER_MLC_REPO_BRANCH'] if os.environ.get('TEST_INPUT_INDEX', '') != '': ii['test_input_index'] = os.environ['TEST_INPUT_INDEX'] print(ii) diff --git a/.github/workflows/run-individual-script-tests.yml b/.github/workflows/run-individual-script-tests.yml index cd0f2712d..b9c31990f 100644 --- a/.github/workflows/run-individual-script-tests.yml +++ b/.github/workflows/run-individual-script-tests.yml @@ -34,4 +34,4 @@ jobs: done pip install mlcflow mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - DOCKER_CM_REPO=${{ github.event.pull_request.head.repo.html_url }} DOCKER_CM_REPO_BRANCH=${{ github.event.pull_request.head.ref }} TEST_INPUT_INDEX=${{ matrix.test-input-index }} python3 .github/scripts/process_individual_tests.py ${{ steps.getfile.outputs.files }} + DOCKER_MLC_REPO=${{ github.event.pull_request.head.repo.html_url }} DOCKER_MLC_REPO_BRANCH=${{ github.event.pull_request.head.ref }} TEST_INPUT_INDEX=${{ matrix.test-input-index }} python3 .github/scripts/process_individual_tests.py ${{ steps.getfile.outputs.files }} diff --git a/.github/workflows/test-amd-mlperf-inference-implementations.yml b/.github/workflows/test-amd-mlperf-inference-implementations.yml index 2e140c32e..512a2af8e 100644 --- a/.github/workflows/test-amd-mlperf-inference-implementations.yml +++ b/.github/workflows/test-amd-mlperf-inference-implementations.yml @@ -19,7 +19,7 @@ jobs: if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi python3 -m venv gh_action source gh_action/bin/activate - export CM_REPOS=$HOME/GH_CM + export MLC_REPOS=$HOME/GH_MLC pip install --upgrade cm4mlops cm pull repo cm run script --tags=run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet --docker_skip_run_cmd=yes diff --git a/.github/workflows/test-intel-mlperf-inference-implementations.yml b/.github/workflows/test-intel-mlperf-inference-implementations.yml index 166a1a77c..c70e5bb22 100644 --- a/.github/workflows/test-intel-mlperf-inference-implementations.yml +++ b/.github/workflows/test-intel-mlperf-inference-implementations.yml @@ -19,7 +19,7 @@ jobs: if [ -f "gh_action_conda/bin/deactivate" ]; then source gh_action_conda/bin/deactivate; fi python3 -m venv gh_action_conda source gh_action_conda/bin/activate - export CM_REPOS=$HOME/GH_CM + export MLC_REPOS=$HOME/GH_MLC pip install --upgrade cm4mlops pip install tabulate cm run script --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_cm_repo=mlcommons@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet diff --git a/.github/workflows/test-mlc-based-submission-generation.yml b/.github/workflows/test-mlc-based-submission-generation.yml index c409e955f..5c97a2d3e 100644 --- a/.github/workflows/test-mlc-based-submission-generation.yml +++ b/.github/workflows/test-mlc-based-submission-generation.yml @@ -67,21 +67,21 @@ jobs: extra_run_args=" --category=datacenter" description="Submission generation (system_meta.json not found in results folder)" elif [ "${{ matrix.case }}" == "closed" ]; then - extra_run_args=" --env.CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check"" + extra_run_args=" --env.MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check"" description="Test submission - contains closed edge and datacenter" elif [ "${{ matrix.case }}" == "closed-no-compliance" ]; then - extra_run_args=" --env.CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check"" + extra_run_args=" --env.MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check"" description="Test submission - contains closed edge and datacenter with no compliance tests" elif [ "${{ matrix.case }}" == "closed-power" ]; then - extra_run_args=" --env.CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check"" + extra_run_args=" --env.MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check"" description="Test submission - contains closed-power edge and datacenter results" elif [ "${{ matrix.case }}" == "closed-failed-power-logs" ]; then - extra_run_args=" --env.CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check"" + extra_run_args=" --env.MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check"" description="Test submission - contains closed-power edge and datacenter results with failed power logs" fi # Dynamically set the log group to simulate a dynamic step name echo "::group::$description" - mlc ${{ matrix.action }} script --tags=generate,inference,submission --version=v4.1 --clean --preprocess_submission=yes --results_dir=$PWD/submission_generation_tests/${{ matrix.case }}/ --run-checker --submitter=MLCommons --tar=yes --env.CM_TAR_OUTFILE=submission.tar.gz --division=${{ matrix.division }} --env.CM_DETERMINE_MEMORY_CONFIGURATION=yes --quiet $extra_run_args + mlc ${{ matrix.action }} script --tags=generate,inference,submission --version=v4.1 --clean --preprocess_submission=yes --results_dir=$PWD/submission_generation_tests/${{ matrix.case }}/ --run-checker --submitter=MLCommons --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet $extra_run_args exit_status=$? echo "Exit status for the job ${description} ${exit_status}" if [[ "${{ matrix.case }}" == "case-5" || "${{ matrix.case }}" == "case-6" ]]; then diff --git a/.github/workflows/test-mlc-script-features.yml b/.github/workflows/test-mlc-script-features.yml index 8ee512bca..96dabf921 100644 --- a/.github/workflows/test-mlc-script-features.yml +++ b/.github/workflows/test-mlc-script-features.yml @@ -63,10 +63,10 @@ jobs: - name: Run docker container from dockerhub on linux if: runner.os == 'linux' run: | - mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_cm_repo=mlcommons@mlperf-automations --image_name=cm-script-app-image-classification-onnx-py --env.CM_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.CM_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.CM_DOCKER_IMAGE_REPO=cknowledge --quiet + mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_cm_repo=mlcommons@mlperf-automations --image_name=cm-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=cknowledge --quiet - name: Run docker container locally on linux if: runner.os == 'linux' run: | - mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_cm_repo=mlcommons@mlperf-automations --image_name=mlc-script-app-image-classification-onnx-py --env.CM_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.CM_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.CM_DOCKER_IMAGE_REPO=local --quiet + mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_cm_repo=mlcommons@mlperf-automations --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet diff --git a/.github/workflows/test-mlperf-inference-dlrm.yml b/.github/workflows/test-mlperf-inference-dlrm.yml index f18b51b4d..749849842 100644 --- a/.github/workflows/test-mlperf-inference-dlrm.yml +++ b/.github/workflows/test-mlperf-inference-dlrm.yml @@ -22,7 +22,7 @@ jobs: run: | source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate - export CM_REPOS=$HOME/GH_CM + export MLC_REPOS=$HOME/GH_MLC python3 -m pip install cm4mlops cm pull repo cm run script --tags=run-mlperf,inference,_performance-only --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --model=dlrm-v2-99 --implementation=reference --backend=pytorch --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --clean @@ -42,7 +42,7 @@ jobs: run: | source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate - export CM_REPOS=$HOME/GH_CM + export MLC_REPOS=$HOME/GH_MLC python3 -m pip install cm4mlops cm pull repo cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=dlrm-v2-99 --implementation=intel --batch_size=1 --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean diff --git a/.github/workflows/test-mlperf-inference-gptj.yml b/.github/workflows/test-mlperf-inference-gptj.yml index bf2921bd2..26543d98e 100644 --- a/.github/workflows/test-mlperf-inference-gptj.yml +++ b/.github/workflows/test-mlperf-inference-gptj.yml @@ -23,7 +23,7 @@ jobs: run: | source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate - export CM_REPOS=$HOME/GH_CM + export MLC_REPOS=$HOME/GH_MLC python3 -m pip install cm4mlops cm pull repo cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --pull_changes=yes --pull_inference_changes=yes --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --get_platform_details=yes --implementation=reference --clean diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml index 986ee21be..ec52d6b06 100644 --- a/.github/workflows/test-mlperf-inference-llama2.yml +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -24,12 +24,12 @@ jobs: run: | source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate - export CM_REPOS=$HOME/GH_CM + export MLC_REPOS=$HOME/GH_MLC pip install cm4mlops pip install tabulate cm pull repo pip install "huggingface_hub[cli]" git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential - cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean + cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-mixtral.yml b/.github/workflows/test-mlperf-inference-mixtral.yml index c234d464e..174ae82a6 100644 --- a/.github/workflows/test-mlperf-inference-mixtral.yml +++ b/.github/workflows/test-mlperf-inference-mixtral.yml @@ -25,11 +25,11 @@ jobs: run: | source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate - export CM_REPOS=$HOME/GH_CM + export MLC_REPOS=$HOME/GH_MLC pip install cm4mlops pip install "huggingface_hub[cli]" git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential cm pull repo - cm run script --tags=run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.CM_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.CM_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1 + cm run script --tags=run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1 cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - GO-phoenix" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-resnet50.yml b/.github/workflows/test-mlperf-inference-resnet50.yml index 6114d1dff..5ee480e21 100644 --- a/.github/workflows/test-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-resnet50.yml @@ -13,7 +13,7 @@ jobs: mlperf-inference-r50: runs-on: ${{ matrix.os }} env: - CM_INDEX: "on" + MLC_INDEX: "on" strategy: fail-fast: false matrix: @@ -74,4 +74,4 @@ jobs: git commit -a -m "Test commit" git push https://x-access-token:${{ env.PAT }}@github.com/mlcommons/mlperf_inference_test_submissions_v5.0 - # mlcr --tags=push,github,mlperf,inference,submission --env.CM_GITHUB_PAT=pat --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + # mlcr --tags=push,github,mlperf,inference,submission --env.MLC_GITHUB_PAT=pat --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-rnnt.yml b/.github/workflows/test-mlperf-inference-rnnt.yml index 9937f9260..1d77ee282 100644 --- a/.github/workflows/test-mlperf-inference-rnnt.yml +++ b/.github/workflows/test-mlperf-inference-rnnt.yml @@ -30,7 +30,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies on Unix Platforms run: | - CM_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops + MLC_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops - name: Pull MLOps repository run: | cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} diff --git a/.github/workflows/test-mlperf-inference-sdxl.yaml b/.github/workflows/test-mlperf-inference-sdxl.yaml index 402424b54..cbdb0bd04 100644 --- a/.github/workflows/test-mlperf-inference-sdxl.yaml +++ b/.github/workflows/test-mlperf-inference-sdxl.yaml @@ -18,8 +18,8 @@ jobs: run: | source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate - export CM_REPOS=$HOME/GH_CM + export MLC_REPOS=$HOME/GH_MLC python3 -m pip install cm4mlops cm pull repo - cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-tvm.yml b/.github/workflows/test-mlperf-inference-tvm.yml index fa363d65c..8ecf27fa2 100644 --- a/.github/workflows/test-mlperf-inference-tvm.yml +++ b/.github/workflows/test-mlperf-inference-tvm.yml @@ -27,7 +27,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies on Unix Platforms run: | - CM_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops + MLC_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops - name: Pull MLOps repository run: | cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index 23cd33e54..5ff906bbd 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -47,8 +47,8 @@ jobs: if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi python3 -m venv gh_action source gh_action/bin/activate - export CM_REPOS=$HOME/GH_CM - CM_PULL_DEFAULT_MLOPS_REPO=no pip install --upgrade cm4mlops + export MLC_REPOS=$HOME/GH_MLC + MLC_PULL_DEFAULT_MLOPS_REPO=no pip install --upgrade cm4mlops cm pull repo cm run script --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=datacenter,edge --division=closed --docker_dt=yes --docker_it=no --docker_cm_repo=mlcommons@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string --quiet diff --git a/.github/workflows/test-qaic-compute-sdk-build.yml b/.github/workflows/test-qaic-compute-sdk-build.yml index 6bb069685..6eb901fed 100644 --- a/.github/workflows/test-qaic-compute-sdk-build.yml +++ b/.github/workflows/test-qaic-compute-sdk-build.yml @@ -26,7 +26,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | - CM_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops + MLC_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops cm run script --tags=get,sys-utils-cm --quiet - name: Test QAIC Compute SDK for compilation diff --git a/.github/workflows/test-scc24-sdxl.yaml b/.github/workflows/test-scc24-sdxl.yaml index 50849e7b1..61fac51db 100644 --- a/.github/workflows/test-scc24-sdxl.yaml +++ b/.github/workflows/test-scc24-sdxl.yaml @@ -9,8 +9,8 @@ jobs: if: github.repository_owner == 'gateoverflow' runs-on: [ self-hosted, linux, x64, GO-spr ] env: - CM_DOCKER_REPO: mlcommons@mlperf-automations - CM_DOCKER_REPO_BRANCH: dev + MLC_DOCKER_REPO: mlcommons@mlperf-automations + MLC_DOCKER_REPO_BRANCH: dev strategy: fail-fast: false matrix: @@ -24,21 +24,21 @@ jobs: if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi python3 -m venv gh_action source gh_action/bin/activate - export CM_REPOS=$HOME/GH_CM + export MLC_REPOS=$HOME/GH_MLC pip install --upgrade mlcflow pip install tabulate mlc pull repo - mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$CM_DOCKER_REPO --docker_cm_repo_branch=$CM_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$CM_DOCKER_REPO --docker_cm_repo_branch=$CM_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.CM_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results + mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions build_nvidia: if: github.repository_owner == 'gateoverflow' runs-on: [ self-hosted, linux, x64, GO-spr] env: - CM_DOCKER_REPO: mlcommons@mlperf-automations - CM_DOCKER_REPO_BRANCH: dev + MLC_DOCKER_REPO: mlcommons@mlperf-automations + MLC_DOCKER_REPO_BRANCH: dev strategy: fail-fast: false matrix: @@ -52,11 +52,11 @@ jobs: if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi python3 -m venv gh_action source gh_action/bin/activate - export CM_REPOS=$HOME/GH_CM + export MLC_REPOS=$HOME/GH_MLC pip install --upgrade mlcflow pip install tabulate mlc pull repo - mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$CM_DOCKER_REPO --docker_cm_repo_branch=$CM_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean - mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$CM_DOCKER_REPO --docker_cm_repo_branch=$CM_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.CM_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results + mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean + mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions diff --git a/automation/cache/module_misc.py b/automation/cache/module_misc.py index 91ecbd5a1..d4eb76d1c 100644 --- a/automation/cache/module_misc.py +++ b/automation/cache/module_misc.py @@ -86,7 +86,7 @@ def copy_to_remote(i): new_env = cm_cached_state['new_env'] new_state = cm_cached_state['new_state'] # Todo fix new state cm_repos_path = os.environ.get( - 'CM_REPOS', os.path.join( + 'MLC_REPOS', os.path.join( os.path.expanduser("~"), "CM", "repos")) cm_cache_path = os.path.realpath( os.path.join(cm_repos_path, "local", "cache")) diff --git a/automation/cfg/README-extra.md b/automation/cfg/README-extra.md deleted file mode 100644 index cc94030ab..000000000 --- a/automation/cfg/README-extra.md +++ /dev/null @@ -1,8 +0,0 @@ -Examples: - -```bash -cm set cfg default -cm set cfg default --key.script.silent -cm set cfg default --key.script.silent- - -``` diff --git a/automation/cfg/README.md b/automation/cfg/README.md deleted file mode 100644 index 3c82852c8..000000000 --- a/automation/cfg/README.md +++ /dev/null @@ -1,27 +0,0 @@ -*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* - -### Automation actions - -#### test - - * CM CLI: ```cm test cfg``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15)) - * CM CLI with UID: ```cm test cfg,88dce9c160324c5d``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'test' - 'automation':'cfg,88dce9c160324c5d' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -### Maintainers - -* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/cfg/_cm.json b/automation/cfg/_cm.json deleted file mode 100644 index 9a1dc030e..000000000 --- a/automation/cfg/_cm.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "action_substitutions": { - "set":"xset" - }, - "alias": "cfg", - "automation_alias": "automation", - "automation_uid": "bbeb15d8f0a944a4", - "tags": [ - "automation" - ], - "uid": "88dce9c160324c5d" -} diff --git a/automation/cfg/module.py b/automation/cfg/module.py deleted file mode 100644 index 6fff7d802..000000000 --- a/automation/cfg/module.py +++ /dev/null @@ -1,259 +0,0 @@ -# Universal cfg for CM automations -# -# Written by Grigori Fursin - -import os - -from cmind.automation import Automation -from cmind import utils - - -class CAutomation(Automation): - """ - Automation actions - """ - - ############################################################ - def __init__(self, cmind, automation_file): - super().__init__(cmind, __file__) - - ############################################################ - def test(self, i): - """ - Test automation - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - automation (str): automation as CM string object - - parsed_automation (list): prepared in CM CLI or CM access function - [ (automation alias, automation UID) ] or - [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] - - (artifact) (str): artifact as CM string object - - (parsed_artifact) (list): prepared in CM CLI or CM access function - [ (artifact alias, artifact UID) ] or - [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] - - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * Output from this automation action - - """ - - import json - print(json.dumps(i, indent=2)) - - return {'return': 0} - - ############################################################ - def xset(self, i): - """ - Set keys in configuration - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - (artifact) (str): CM artifact with configuration - (tags) (str): list of tags to find CM artifact with configuration - - (key) (dict): updating config - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * Output from this automation action - - """ - - import json - - r = self._find_cfg_artifact(i) - if r['return'] > 0: - return r - - # Path to cfg - path = r['path'] - path_to_config = r['path_to_config'] - config = r['config'] - - # Clean input to leave only keys for the configuration - new_config = i.get('key', {}) - - # If new config is empty, just print existing config - if len(new_config) > 0: - # Check if need to delete some - def check_to_delete(d): - - for k in list(d.keys()): - v = d[k] - if isinstance(v, dict): - check_to_delete(v) - else: - if k.endswith('-'): - if k[:-1] in d: - del (d[k[:-1]]) - del (d[k]) - else: - vsl = str(v).lower() - if vsl == 'none': - v = None - elif vsl == 'false': - v = False - elif vsl == 'true': - v = True - - d[k] = v - - utils.merge_dicts({'dict1': config, - 'dict2': new_config, - 'append_lists': True, - 'append_unique': True}) - - check_to_delete(config) - - r = utils.save_json(path_to_config, config) - if r['return'] > 0: - return r - - # Print config - print('Config:') - print('') - print(json.dumps(config, indent=2)) - - return {'return': 0} - - ############################################################ - def load(self, i): - """ - Load configuration - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - (artifact) (str): CM artifact with configuration - (tags) (str): list of tags to find CM artifact with configuration - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * Output from this automation action - - """ - - return self._find_cfg_artifact(i) - - ############################################################ - def _find_cfg_artifact(self, i): - """ - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - (artifact) (str): CM artifact with configuration - (tags) (str): list of tags to find CM artifact with configuration - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * Output from this automation action - - """ - - # Clean input to find artifact - ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags']) - - parsed_artifact = i.get('parsed_artifact', []) - - artifact_obj = parsed_artifact[0] if len(parsed_artifact) > 0 else None - artifact_repo = parsed_artifact[1] if len( - parsed_artifact) > 1 else None - - artifact = i.get('artifact', '') - - if artifact == '': - ii['artifact'] = 'default' - - tags = ii.get('tags', '') - - if 'cm-universal-cfg' not in tags: - if tags != '': - tags += ',' - tags += 'cm-universal-cfg' - - ii['tags'] = tags - - automation = ii['automation'] - if automation != '.' and ',' not in automation: - ii['automation'] = automation + ',' + self.meta['uid'] - - # Add placeholder (use common action) - - ii['action'] = 'find' - ii['out'] = '' - # Avoid recursion - use internal CM add function to add the script - # artifact - ii['common'] = True - - r = self.cmind.access(ii) - if r['return'] > 0: - return r - - lst = r['list'] - - if len(lst) == 0: - ii['action'] = 'add' - ii['meta'] = {} - - # Tags must be unique for default - r = self.cmind.access(ii) - if r['return'] > 0: - return r - - path = r['path'] - elif len(lst) > 1: - return { - 'return': 1, 'error': 'ambiguity in cfg name - more than 1 CM artifact found'} - else: - path = lst[0].path - - # Check if has config - path_to_cfg = os.path.join(path, 'config.json') - - config = {} - if os.path.isfile(path_to_cfg): - r = utils.load_json(path_to_cfg) - if r['return'] > 0: - return r - - config = r['meta'] - - return {'return': 0, 'path': path, - 'path_to_config': path_to_cfg, 'config': config} diff --git a/automation/experiment/README-extra.md b/automation/experiment/README-extra.md deleted file mode 100644 index 454c8d6ac..000000000 --- a/automation/experiment/README-extra.md +++ /dev/null @@ -1,315 +0,0 @@ -[ [Back to index](../../../docs/README.md) ] - -
-Click here to see the table of contents. - -* [CM "experiment" automation](#cm-"experiment"-automation) - * [Introducing CM experiment automation](#introducing-cm-experiment-automation) - * [Installing CM with ResearchOps/DevOps/MLOps automations](#installing-cm-with-researchops/devops/mlops-automations) - * [Understanding CM experiments](#understanding-cm-experiments) - * [Exploring combinations of parameters (autotuning, design space exploration)](#exploring-combinations-of-parameters-autotuning-design-space-exploration) - * [Aggregating and unifying results](#aggregating-and-unifying-results) - * [Visualizing results](#visualizing-results) - * [Sharing experiments with the community](#sharing-experiments-with-the-community) - * [Running CM experiments with CM scripts](#running-cm-experiments-with-cm-scripts) - * [Further community developments](#further-community-developments) - -
- -# CM "experiment" automation - -*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md), - [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) - and [CM scripts](../script/README-extra.md) to understand CM motivation and concepts. - You can also try [CM tutorials](https://github.com/mlcommons/ck/blob/master/docs/tutorials/README.md) - to run some applications and benchmarks on your platform using CM scripts.* - -## Introducing CM experiment automation - - -Researchers, engineers and students spend considerable amount of their time experimenting with -many different settings of applications, tools, compilers, software and hardware -to find the optimal combination suitable for their use cases. - -Based on their feedback, our [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) -started developing a CM automation called "experiment". -The goal is to provide a common interface to run, record, share, visualize and reproduce experiments -on any platform with any software, hardware and data. - -The community helped us test a prototype of our "experiment" automation to record results in a unified CM format -from [several MLPerf benchmarks](https://github.com/mlcommons/cm4mlperf-results) -including [MLPerf inference](https://github.com/mlcommons/inference) and [MLPerf Tiny](https://github.com/mlcommons/tiny), -visualize them at the [MLCommons CM platform](https://access.cknowledge.org/playground/?action=experiments&tags=all), -and improve them by the community via [public benchmarking, optimization and reproducibility challenges](https://access.cknowledge.org/playground/?action=challenges). - - - -## Installing CM with ResearchOps/DevOps/MLOps automations - -This CM automation is available in the most commonly used `mlcommons@cm4mlops` repository. - -First, install CM automation language as described [here](https://github.com/mlcommons/ck/blob/master/docs/installation.md). -Then, install or update this repository as follows: -```bash -cm pull repo mlcommons@cm4mlops -``` - -You can now test that CM experiment automation is available as follows: -```bash -cm run experiment --help -``` -or using `cme` shortcut in CM V1.4.1+ -```bash -cme --help -``` - - - -## Understanding CM experiments - -CM experiment simply wraps any user command line, creates an associated CM `experiment` artifact with a random ID (16 low case HEX characters) -and some user tags in `_cm.json`, creates extra `{date}{time}` subdirectory with `cm-input.json` file with CM input, -and executes the user command line inside an extra subdirectory with another random ID as shown below. - -The following command will print "Hello World!" while recording all the provenance in CM format in the local CM repository: - -```bash -cme --tags=my,experiment,hello-world -- echo "Hello World!" -``` -or -```bash -cm run experiment --tags=my,experiment,hello-world -- echo "Hello World!" -``` - -You should see the output similar to the following: -```bash - -Path to CM experiment artifact: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945 -Path to experiment: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.09-58-02.863466 -================================================================ -Experiment step: 1 out of 1 - -Path to experiment step: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.09-58-02.863466\7ed0ea0edd6b4dd7 - -"Hello World!" -``` - -You can find and explore the newly created CM artifact as follows: -```bash -cm find experiment --tags=my,experiment,hello-world -``` -or using UID -```bash -cm find experiment b83a1fb24dbf4945 -``` - -When running the same experiment again, CM will find existing artifact by tags and create new {date}{time} directory there: -```bash -cme --tags=my,experiment,hello-world -- echo "Hello World!" - -Path to CM experiment artifact: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945 -Path to experiment: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.10-02-08.911210 -================================================================ -Experiment step: 1 out of 1 - -Path to experiment step: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.10-02-08.911210\7ed0ea0edd6b4dd7 - -"Hello World!" -``` - -You can now replay this experiment as follows: -```bash -cm replay experiment --tags=my,experiment,hello-world -``` - -Note that you can obtain current directory where you called CM -(rather than the CM experiment artifact directory) via {{CD}} variable as follows: -```bash -cme --tags=my,experiment,hello-world -- echo {{CD}} -``` - -You can also record experiments in another CM repository instead of the `local` one as follows: -```bash -cm list repo -cme {CM repository from above list}: --tags=my,experiment,hello-world -- echo {{CD}} -``` - -Finally, you can force a specific artifact name instead of some random ID as follows: -```bash -cme {my experiment artifact name} --tags=my,experiment,hello-world -- echo {{CD}} -``` -or with given repository -```bash -cme {CM repository from above list}:{my experiment artifact name} --tags=my,experiment,hello-world -- echo {{CD}} -``` - -## Exploring combinations of parameters (autotuning, design space exploration) - -One of the most common tasks is computer engineering (and other sciences) -is to explore various combinations of parameters of some applications -and systems to select the optimal ones to trade off performance, accuracy, -power consumption, memory usage and other characteristics. - -As a starting point, we have implemented a very simple explorer as a Cartesian product -of any number of specified variables that are passed to a user command line via double curly braces `{{VAR}}` similar to GitHub. - -You just need to create a simple JSON file `cm-input.json` to describe sets/ranges for each variable as follows: -```json -{ - "explore": { - "VAR1": [ - 1, - 2, - 3 - ], - "VAR2": [ - "a", - "b" - ], - "VAR3": "[2**i for i in range(0,6)]" - } -} -``` - -or YAML `cm-input.yaml`: - -```yaml -explore: - VAR1: [1,2,3] - VAR2: ["a","b"] - VAR3: "[2**i for i in range(0,6)]" -``` - -You can then run the following example to see all iterations: -```bash -cm run experiment --tags=my,experiment,hello-world @test_input.yaml \ - -- echo %VAR1% --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-%%VAR3%% -``` - -Note that you can also define a Python list of range for other variables -directly in the command line as demonstrated in above example for `VAR4` - `{{VAR4{['xx','yy','zz']}}}`. - -CM will create or reuse experiment artifact with tags `my,experiment,hello-world` -and will then iterate in a Cartesian product of all detected variables. - -For each iteration, CM will create a `{date}{time}` subdirectory in a given experiment artifact -and will then run a user command line with substituted variables there. - -You can then replay any of the exploration experiment as follows: -```bash -cm replay experiment --tags={tags} --dir={sub directory} -``` - - - -## Aggregating and unifying results - -Users can expose any information such as measured characteristics of their applications and/or systems (performance, -hardware or OS state, accuracy, internal parameters, etc) to CM for further analysis and visualization -by generating a JSON `cm-result.json` file with any dictionary. - -If this file exists after executing a user command, CM will load it after each experiment or exploration step, -and merge it with a list in a common `cm-result.json` in `{date}{time}` directory for this experiment. - - - -## Visualizing results - -Users can now visualize multiple experiments using the CM GUI script as follows: -```bash -cm run script "gui _graph" --exp_tags=my,experiment,hello-world -``` - -This script will search for all CM experiment entries with these tags, read all `cm-result.json` files, -detect all keys used in result dictionaries, let users select these keys for X and Y axes -to prepare a 2D graph using a popular [StreamLit library](https://streamlit.io), add derived metrics and set constraints -as shown in the following example for one of the official [Tiny MLPerf submissions](https://github.com/mlcommons/tiny): - -![](../../script/import-mlperf-tiny-to-experiment/assets/cm-visualization-and-customization-of-tinymlperf-results2.png) - - - - - - -## Sharing experiments with the community - -It is possible to share experiments with a common automation interface -in your own GitHub/GitLab repository, container and zip/tar file -in a non-intrusive way. - -You need to go to a root directory of your project and initialize CM repository there -with a unique name "my-cool-project" as follows: - -```bash -cm init repo my-cool-project --path=. --prefix=cmr -``` - -This command will create a `cmr.yaml` file with a description and unique ID of this repository, -and will register it in the CM. Note that all CM automations and artifacts will be located -in the `cmr` sub-directory to avoid contaminating your project. They can be deleted -or moved to another project at any time. - -You can now record new experiments in this repository by adding `my-cool-project:` to the cm experiment command line as follows: -```bash -cm run experiment my-cool-project: --tags=my,experiment,hello-world -- echo "Hello World!" -``` - -You can also move a set of existing experiments from the `local` CM repository to the new one as follows: -```bash -cm move experiment my-cool-project: --tags=my,experiment,hello-world -``` - -You can continue replaying these experiments in the way no matter what CM repository they are in: -```bash -cm replay experiment --tags=my,experiment,hello-world -``` - -or you can enforce a specific repository as follows: -```bash -cm replay experiment my-cool-project: --tags=my,experiment,hello-world -``` - - - - - -## Running CM experiments with CM scripts - -User scripts and tools may contain some hardwired local paths that may prevent replaying them on another platform. -In such case, we suggest you to use [CM scripts](/../script/README-extra.md). - -CM scripts solve this problem by wrapping existing user scripts and tools and detecting/resolving paths -to specific tools and artifacts on a given user platform. - -You can find example of using CM scripts with CM experiments in [this directory](tests) - see `test3.bat` or `test3.sh`: -```bash -cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}} -``` - -You can use the following environment variables to pass the current path, -different paths to experiment entries and the number of experiment to your CM script: -* {{CD}} -* {{CM_EXPERIMENT_STEP}} -* {{CM_EXPERIMENT_PATH}} -* {{CM_EXPERIMENT_PATH2}} -* {{CM_EXPERIMENT_PATH3}} - - -Feel free to check [this tutorial](../../../docs/tutorials/common-interface-to-reproduce-research-projects.md) -to add CM scripts for your own applications, tools and native scripts. - -We are currently extending CM experiments and CM scripts for MLPerf benchmarks -to automate benchmarking, optimization and design space exploration of ML/AI systems -on any software and hardware - please stay tuned via our [Discord server](https://discord.gg/JjWNWXKxwT). - - - -## Further community developments - -We are developing this experiment automation in CM to help the community share, reproduce and reuse experiments -using a common, simple, human readable, and portable [automation language](../../../docs/README.md). - -Join our [Discord server](https://discord.gg/JjWNWXKxwT) from the [MLCommons task force on automation and reproducibility](../taskforce.md) -to participate in the unification and extension of this interface and CM scripts for diverse research projects and tools. - diff --git a/automation/experiment/README.md b/automation/experiment/README.md deleted file mode 100644 index 13ea6ec1a..000000000 --- a/automation/experiment/README.md +++ /dev/null @@ -1,87 +0,0 @@ -*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!* - -### Automation actions - -#### test - - * CM CLI: ```cm test experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22)) - * CM CLI with UID: ```cm test experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'test' - 'automation':'experiment,a0a2d123ef064bcb' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### run - - * CM CLI: ```cm run experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64)) - * CM CLI with UID: ```cm run experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'run' - 'automation':'experiment,a0a2d123ef064bcb' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### rerun - - * CM CLI: ```cm rerun experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428)) - * CM CLI with UID: ```cm rerun experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'rerun' - 'automation':'experiment,a0a2d123ef064bcb' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### replay - - * CM CLI: ```cm replay experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451)) - * CM CLI with UID: ```cm replay experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'replay' - 'automation':'experiment,a0a2d123ef064bcb' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -### Maintainers - -* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/experiment/_cm.json b/automation/experiment/_cm.json deleted file mode 100644 index 49bb0e616..000000000 --- a/automation/experiment/_cm.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "alias": "experiment", - "automation_alias": "automation", - "automation_uid": "bbeb15d8f0a944a4", - "desc": "Managing and reproducing experiments (under development)", - "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)", - "tags": [ - "automation" - ], - "uid": "a0a2d123ef064bcb" -} diff --git a/automation/experiment/module.py b/automation/experiment/module.py deleted file mode 100644 index 6e98029d5..000000000 --- a/automation/experiment/module.py +++ /dev/null @@ -1,844 +0,0 @@ -# Universal experiment automation to support universal benchmarking -# and optimization of apps and systems -# -# Written by Grigori Fursin - -import os -import itertools -import copy -import json - -from cmind.automation import Automation -from cmind import utils - - -class CAutomation(Automation): - """ - CM "experiment" automation actions - """ - - CM_RESULT_FILE = 'cm-result.json' - CM_INPUT_FILE = 'cm-input.json' - CM_OUTPUT_FILE = 'cm-output.json' - - ############################################################ - def __init__(self, cmind, automation_file): - super().__init__(cmind, __file__) - - ############################################################ - def test(self, i): - """ - Test automation - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - automation (str): automation as CM string object - - parsed_automation (list): prepared in CM CLI or CM access function - [ (automation alias, automation UID) ] or - [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] - - (artifact) (str): artifact as CM string object - - (parsed_artifact) (list): prepared in CM CLI or CM access function - [ (artifact alias, artifact UID) ] or - [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] - - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * Output from this automation action - """ - - import json - print(json.dumps(i, indent=2)) - - return {'return': 0} - - ############################################################ - - def run(self, i): - """ - Run experiment - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - (artifact) (str): experiment artifact name (can include repository separated by :) - (tags) (str): experiment tags separated by comma - - (dir) (str): force recording into a specific directory - - - (script) (str): find and run CM script by name - (s) - - (script_tags) (str): find and run CM script by tags - (stags) - - (rerun) (bool): if True, rerun experiment in a given entry/directory instead of creating a new one... - - (explore) (dict): exploration dictionary - - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * Output from this automation action - """ - - # Copy of original input - ii_copy = copy.deepcopy(i) - cur_dir = os.getcwd() - - # Find or add artifact based on repo/alias/tags - r = self._find_or_add_artifact(i) - if r['return'] > 0: - return r - - experiment = r['experiment'] - - console = i.get('out', '') == 'con' - - # Print experiment folder - experiment_path = experiment.path - - if console: - print('') - print('Path to CM experiment artifact: {}'.format(experiment_path)) - - # Get directory with datetime - datetime = i.get('dir', '') - - if datetime == '' and i.get('rerun', False): - # Check if already some dir exist - - directories = os.listdir(experiment_path) - - datetimes = sorted([f for f in directories if os.path.isfile( - os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True) - - if len(datetimes) == 1: - datetime = datetimes[0] - elif len(datetimes) > 1: - print('') - print('Select experiment:') - - datetimes = sorted(datetimes) - - num = 0 - print('') - for d in datetimes: - print('{}) {}'.format(num, d.replace('.', ' '))) - num += 1 - - if not console: - return { - 'return': 1, 'error': 'more than 1 experiment found.\nPlease use "cm rerun experiment --dir={date and time}"'} - - print('') - x = input('Make your selection or press Enter for 0: ') - - x = x.strip() - if x == '': - x = '0' - - selection = int(x) - - if selection < 0 or selection >= num: - selection = 0 - - datetime = datetimes[selection] - - if datetime != '': - experiment_path2 = os.path.join(experiment_path, datetime) - else: - num = 0 - found = False - - while not found: - r = utils.get_current_date_time({}) - if r['return'] > 0: - return r - - datetime = r['iso_datetime'].replace( - ':', '-').replace('T', '.') - - if num > 0: - datetime += '.' + str(num) - - experiment_path2 = os.path.join(experiment_path, datetime) - - if not os.path.isdir(experiment_path2): - found = True - break - - num += 1 - - # Check/create directory with date_time - if not os.path.isdir(experiment_path2): - os.makedirs(experiment_path2) - - # Change current path - print('Path to experiment: {}'.format(experiment_path2)) - - os.chdir(experiment_path2) - - # Record experiment input with possible exploration - experiment_input_file = os.path.join( - experiment_path2, self.CM_INPUT_FILE) - experiment_result_file = os.path.join( - experiment_path2, self.CM_RESULT_FILE) - - # Clean original input - for k in ['parsed_artifact', 'parsed_automation', 'cmd']: - if k in ii_copy: - del (ii_copy[k]) - - r = utils.save_json(file_name=experiment_input_file, meta=ii_copy) - if r['return'] > 0: - return r - - # Prepare run command - cmd = '' - - unparsed = i.get('unparsed_cmd', []) - if len(unparsed) > 0: - for u in unparsed: - if ' ' in u: - u = '"' + u + '"' - cmd += ' ' + u - - cmd = cmd.strip() - - # Prepare script run - env = i.get('env', {}) - - ii = {'action': 'native-run', - 'automation': 'script,5b4e0237da074764', - 'env': env} - - # Prepare exploration - # Note that from Python 3.7, dictionaries are ordered so we can define order for exploration in json/yaml - # ${{XYZ}} ${{ABC(range(1,2,3))}} - - # Extract exploration expressions from {{VAR{expression}}} - explore = i.get('explore', {}) - - j = 1 - k = 0 - while j >= 0: - j = cmd.find('}}}', k) - if j >= 0: - k = j + 1 - - l = cmd.rfind('{{', 0, j) - - if l >= 0: - l2 = cmd.find('{', l + 2, j) - if l2 >= 0: - k = l2 + 1 - - var = cmd[l + 2:l2] - expr = cmd[l2 + 1:j] - - explore[var] = expr - - cmd = cmd[:l2] + cmd[j + 1:] - - # Separate Design Space Exploration into var and range - explore_keys = [] - explore_dimensions = [] - - for k in explore: - v = explore[k] - - explore_keys.append(k) - - -if not isinstance(v, if ) v = eval(v) - - explore_dimensions.append(v) - - # Next command will run all iterations so we need to redo above command - # once again - step = 0 - - steps = itertools.product(*explore_dimensions) - - num_steps = len(list(steps)) - - steps = itertools.product(*explore_dimensions) - - ii_copy = copy.deepcopy(ii) - - for dimensions in steps: - - step += 1 - - print('================================================================') - print('Experiment step: {} out of {}'.format(step, num_steps)) - - print('') - - ii = copy.deepcopy(ii_copy) - - env = ii.get('env', {}) - - l_dimensions = len(dimensions) - if l_dimensions > 0: - print(' Updating ENV variables during exploration:') - - print('') - for j in range(l_dimensions): - v = dimensions[j] - k = explore_keys[j] - print(' - Dimension {}: "{}" = {}'.format(j, k, v)) - - env[k] = str(v) - - print('') - - # Generate UID and prepare extra directory: - r = utils.gen_uid() - if r['return'] > 0: - return r - - uid = r['uid'] - - experiment_path3 = os.path.join(experiment_path2, uid) - if not os.path.isdir(experiment_path3): - os.makedirs(experiment_path3) - - # Get date time of experiment - r = utils.get_current_date_time({}) - if r['return'] > 0: - return r - - current_datetime = r['iso_datetime'] - - # Change current path - print('Path to experiment step: {}'.format(experiment_path3)) - print('') - os.chdir(experiment_path3) - - # Prepare and run experiment in a given placeholder directory - os.chdir(experiment_path3) - - ii['env'] = env - - # Change only in CMD - env_local = {'CD': cur_dir, - 'CM_EXPERIMENT_STEP': str(step), - 'CM_EXPERIMENT_PATH': experiment_path, - 'CM_EXPERIMENT_PATH2': experiment_path2, - 'CM_EXPERIMENT_PATH3': experiment_path3} - - # Update {{}} in CMD - cmd_step = cmd - - j = 1 - k = 0 - while j >= 0: - j = cmd_step.find('{{', k) - if j >= 0: - k = j - l = cmd_step.find('}}', j + 2) - if l >= 0: - var = cmd_step[j + 2:l] - - # Such vars must be in env - if var not in env and var not in env_local: - return { - 'return': 1, 'error': 'key "{}" is not in env during exploration'.format(var)} - - if var in env: - value = env[var] - else: - value = env_local[var] - - cmd_step = cmd_step[:j] + str(value) + cmd_step[l + 2:] - - ii['command'] = cmd_step - - print('Generated CMD:') - print('') - print(cmd_step) - print('') - - # Prepare experiment step input - experiment_step_input_file = os.path.join( - experiment_path3, self.CM_INPUT_FILE) - - r = utils.save_json(file_name=experiment_step_input_file, meta=ii) - if r['return'] > 0: - return r - - experiment_step_output_file = os.path.join( - experiment_path3, self.CM_OUTPUT_FILE) - if os.path.isfile(experiment_step_output_file): - os.delete(experiment_step_output_file) - - # Run CMD - rr = self.cmind.access(ii) - if rr['return'] > 0: - return rr - - # Record output - result = {} - - if os.path.isfile(experiment_step_output_file): - r = utils.load_json(file_name=experiment_step_output_file) - if r['return'] > 0: - return r - - result = r['meta'] - - # Try to flatten - try: - flatten_result = flatten_dict(result) - result = flatten_result - except BaseException: - pass - - # Add extra info - result['uid'] = uid - result['iso_datetime'] = current_datetime - - # Attempt to append to the main file ... - all_results = [] - - if os.path.isfile(experiment_result_file): - r = utils.load_json(file_name=experiment_result_file) - if r['return'] > 0: - return r - - all_results = r['meta'] - - all_results.append(result) - - r = utils.save_json( - file_name=experiment_result_file, - meta=all_results) - if r['return'] > 0: - return r - - rr = {'return': 0, - 'experiment_path': experiment_path, - 'experiment_path2': experiment_path2} - - return rr - - ############################################################ - - def rerun(self, i): - """ - Rerun experiment - - cm run experiment --rerun=True ... - """ - - i['rerun'] = True - - return self.run(i) - - ############################################################ - - def replay(self, i): - """ - Replay experiment - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - (artifact) (str): experiment artifact - - (tags) (str): experiment tags separated by comma - - (dir) (str): experiment directory (often date time) - (uid) (str): unique ID of an experiment - - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * Output from this automation action - """ - - # Find or add artifact based on repo/alias/tags - i['fail_if_not_found'] = True - r = self._find_or_add_artifact(i) - if r['return'] > 0: - return r - - experiment = r['experiment'] - - console = i.get('out', '') == 'con' - - # Print experiment folder - experiment_path = experiment.path - - if console: - print('') - print('Path to CM experiment artifact: {}'.format(experiment_path)) - - # Check date and time folder - uid = i.get('uid', '') - datetime = i.get('dir', '') - - if datetime != '': - datetimes = [datetime] - else: - directories = os.listdir(experiment_path) - - datetimes = sorted([f for f in directories if os.path.isfile( - os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True) - - if len(datetimes) == 0: - return {'return': 1, 'error': 'experiment(s) not found in {}'.format( - experiment_path)} - - # Check datetime directory - found_result = {} - - if uid != '': - for d in datetimes: - r = self._find_uid({'path': experiment_path, 'datetime': d, 'uid': uid}) - if r['return'] > 0: - return r - - if len(r.get('result', {})) > 0: - found_result = r['result'] - datetime = d - experiment_path2 = os.path.join(experiment_path, datetime) - break - - if len(found_result) == 0: - return {'return': 1, 'error': 'couldn\'t find result with UID {} in {}'.format( - uid, experiment_path)} - - else: - if len(datetimes) == 1: - datetime = datetimes[0] - else: - print('') - print('Available experiments:') - - datetimes = sorted(datetimes) - - num = 0 - print('') - for d in datetimes: - print('{}) {}'.format(num, d.replace('.', ' '))) - num += 1 - - if not console: - return { - 'return': 1, 'error': 'more than 1 experiment found.\nPlease use "cm run experiment --dir={date and time}"'} - - print('') - x = input('Make your selection or press Enter for 0: ') - - x = x.strip() - if x == '': - x = '0' - - selection = int(x) - - if selection < 0 or selection >= num: - selection = 0 - - datetime = datetimes[selection] - - # Final path to experiment - experiment_path2 = os.path.join(experiment_path, datetime) - - if not os.path.isdir(experiment_path2): - return {'return': 1, 'error': 'experiment path not found {}'.format( - experiment_path2)} - - r = self._find_uid({'path': experiment_path, 'datetime': datetime}) - if r['return'] > 0: - return r - - results = r['meta'] - - if len(results) == 0: - return {'return': 1, 'error': 'results not found in {}'.format( - experiment_path2)} - - elif len(results) == 1: - selection = 0 - - else: - print('') - print('Available Unique IDs of results:') - - results = sorted(results, key=lambda x: x.get('uid', '')) - - num = 0 - print('') - for r in results: - print('{}) {}'.format(num, r.get('uid', ''))) - num += 1 - - if not console: - return { - 'return': 1, 'error': 'more than 1 result found.\nPlease use "cm run experiment --uid={result UID}"'} - - print('') - x = input('Make your selection or press Enter for 0: ') - - x = x.strip() - if x == '': - x = '0' - - selection = int(x) - - if selection < 0 or selection >= num: - selection = 0 - - found_result = results[selection] - uid = found_result['uid'] - - # Final info - if console: - print('') - print('Path to experiment: {}'.format(experiment_path2)) - - print('') - print('Result UID: {}'.format(uid)) - - # Attempt to load cm-input.json - experiment_input_file = os.path.join( - experiment_path2, self.CM_INPUT_FILE) - - if not os.path.isfile(experiment_input_file): - return { - 'return': 1, 'error': '{} not found - can\'t replay'.format(self.CM_INPUT_FILE)} - - r = utils.load_json(experiment_input_file) - if r['return'] > 0: - return r - - cm_input = r['meta'] - - tags = cm_input.get('tags', '').strip() - if 'replay' not in tags: - if tags != '': - tags += ',' - tags += 'replay' - cm_input['tags'] = tags - - if console: - print('') - print('Experiment input:') - print('') - print(json.dumps(cm_input, indent=2)) - print('') - - # Run experiment again - r = self.cmind.access(cm_input) - if r['return'] > 0: - return r - - # TBA - validate experiment, etc ... - - return {'return': 0} - - ############################################################ - - def _find_or_add_artifact(self, i): - """ - Find or add experiment artifact (reused in run and reply) - - Args: - (CM input dict): - - (fail_if_not_found) (bool) - if True, fail if experiment is not found - - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - experiment (CM artifact class): Experiment artifact - - """ - - console = i.get('out', '') == 'con' - - # Try to find experiment artifact by alias and/or tags - ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags']) - ii['action'] = 'find' - - ii_copy = copy.deepcopy(ii) - - # If artifact is specified, remove tags - artifact = ii.get('artifact', '').strip() - if artifact != '' and not artifact.endswith(':') \ - and '*' not in artifact and '?' not in artifact: - if 'tags' in ii: - del (ii['tags']) - - r = self.cmind.access(ii) - if r['return'] > 0: - return r - - lst = r['list'] - - if len(lst) > 1: - print('More than 1 experiment artifact found:') - - lst = sorted(lst, key=lambda x: x.path) - - num = 0 - print('') - for e in lst: - print('{}) {}'.format(num, e.path)) - print( - ' Tags: {}'.format( - ','.join( - e.meta.get( - 'tags', - [])))) - num += 1 - - if not console: - return {'return': 1, 'error': 'more than 1 experiment artifact found.\nPlease use "cm run experiment {name}" or "cm run experiment --tags={tags separated by comma}"'} - - print('') - x = input('Make your selection or press Enter for 0: ') - - x = x.strip() - if x == '': - x = '0' - - selection = int(x) - - if selection < 0 or selection >= num: - selection = 0 - - experiment = lst[selection] - - elif len(lst) == 1: - experiment = lst[0] - else: - # Create new entry - if i.get('fail_if_not_found', False): - return {'return': 1, 'error': 'experiment not found'} - - ii = copy.deepcopy(ii_copy) - ii['action'] = 'add' - r = self.cmind.access(ii) - if r['return'] > 0: - return r - - experiment_uid = r['meta']['uid'] - - r = self.cmind.access({'action': 'find', - 'automation': 'experiment,a0a2d123ef064bcb', - 'artifact': experiment_uid}) - if r['return'] > 0: - return r - - lst = r['list'] - if len(lst) == 0 or len(lst) >1: - return { - 'return': 1, 'error': 'created experiment artifact with UID {} but can\'t find it - weird'.format(experiment_uid)} - - experiment = lst[0] - - return {'return': 0, 'experiment': experiment} - - ############################################################ - def _find_uid(self, i): - """ - Find experiment result with a given UID - - Args: - (CM input dict): - - path (str): path to experiment artifact - datetime (str): sub-path to experiment - (uid) (str): experiment UID - - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - path_to_file (str): path to experiment result file - meta (dict): complete list of all results - result (dict): result dictionary with a given UID - - """ - - path = i['path'] - datetime = i['datetime'] - uid = i.get('uid', '').strip() - - path_to_experiment_result_file = os.path.join( - path, datetime, self.CM_RESULT_FILE) - - rr = {'return': 0, 'path_to_file': path_to_experiment_result_file} - - if os.path.isfile(path_to_experiment_result_file): - r = utils.load_json(file_name=path_to_experiment_result_file) - if r['return'] > 0: - return r - - meta = r['meta'] - - rr['meta'] = meta - - # Searching for UID - if uid != '': - for result in meta: - ruid = result.get('uid', '').strip() - if ruid != '' and ruid ==uid: - rr['result'] = result - break - - return rr - -############################################################################ - - -def flatten_dict(d, flat_dict= {}, prefix = ''): - - for k in d: - v = d[k] - - if type(v) is dict: - flatten_dict(v, flat_dict, prefix + k + '.') - else: - flat_dict[prefix + k] = v - - return flat_dict diff --git a/automation/experiment/tests/test2.bat b/automation/experiment/tests/test2.bat deleted file mode 100644 index 5ecb3a0d8..000000000 --- a/automation/experiment/tests/test2.bat +++ /dev/null @@ -1 +0,0 @@ -cm run experiment --tags=test @test_input.yaml -- echo %VAR1% --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-%%VAR3%% diff --git a/automation/experiment/tests/test2.sh b/automation/experiment/tests/test2.sh deleted file mode 100644 index 40d60a25a..000000000 --- a/automation/experiment/tests/test2.sh +++ /dev/null @@ -1 +0,0 @@ -cm run experiment --tags=test @test_input.yaml -- echo "\${VAR1} --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-\${VAR3}" \ No newline at end of file diff --git a/automation/experiment/tests/test3.bat b/automation/experiment/tests/test3.bat deleted file mode 100644 index 800e36076..000000000 --- a/automation/experiment/tests/test3.bat +++ /dev/null @@ -1 +0,0 @@ -cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}} diff --git a/automation/experiment/tests/test3.sh b/automation/experiment/tests/test3.sh deleted file mode 100644 index 148e56433..000000000 --- a/automation/experiment/tests/test3.sh +++ /dev/null @@ -1 +0,0 @@ -cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}} diff --git a/automation/experiment/tests/test3_input.yaml b/automation/experiment/tests/test3_input.yaml deleted file mode 100644 index 1c789f52a..000000000 --- a/automation/experiment/tests/test3_input.yaml +++ /dev/null @@ -1,4 +0,0 @@ -explore: - VAR1: [1,2,3] - VAR2: ["a","b"] - CM_ENV_TEST3: "[2**i for i in range(0,6)]" diff --git a/automation/experiment/tests/test__json.bat b/automation/experiment/tests/test__json.bat deleted file mode 100644 index 16eb9184b..000000000 --- a/automation/experiment/tests/test__json.bat +++ /dev/null @@ -1 +0,0 @@ -cm run experiment --tags=test @test_input.json -- {{CD}}\test_run.bat diff --git a/automation/experiment/tests/test__json.sh b/automation/experiment/tests/test__json.sh deleted file mode 100644 index a46cb98f5..000000000 --- a/automation/experiment/tests/test__json.sh +++ /dev/null @@ -1 +0,0 @@ -cm run experiment --tags=test @test_input.json -- {{CD}}/test_run.sh diff --git a/automation/experiment/tests/test__yaml.bat b/automation/experiment/tests/test__yaml.bat deleted file mode 100644 index e583f209b..000000000 --- a/automation/experiment/tests/test__yaml.bat +++ /dev/null @@ -1 +0,0 @@ -cm run experiment --tags=test @test_input.yaml -- {{CD}}\test_run.bat diff --git a/automation/experiment/tests/test__yaml.sh b/automation/experiment/tests/test__yaml.sh deleted file mode 100644 index 60c2f7a80..000000000 --- a/automation/experiment/tests/test__yaml.sh +++ /dev/null @@ -1 +0,0 @@ -cm run experiment --tags=test @test_input.yaml -- {{CD}}/test_run.sh diff --git a/automation/experiment/tests/test_input.json b/automation/experiment/tests/test_input.json deleted file mode 100644 index f682f5a34..000000000 --- a/automation/experiment/tests/test_input.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "explore": { - "VAR1": [ - 1, - 2, - 3 - ], - "VAR2": [ - "a", - "b" - ], - "VAR3": "[2**i for i in range(0,6)]" - } -} diff --git a/automation/experiment/tests/test_input.yaml b/automation/experiment/tests/test_input.yaml deleted file mode 100644 index a621c5ef9..000000000 --- a/automation/experiment/tests/test_input.yaml +++ /dev/null @@ -1,4 +0,0 @@ -explore: - VAR1: [1,2,3] - VAR2: ["a","b"] - VAR3: "[2**i for i in range(0,6)]" diff --git a/automation/experiment/tests/test_run.bat b/automation/experiment/tests/test_run.bat deleted file mode 100644 index b3aa91028..000000000 --- a/automation/experiment/tests/test_run.bat +++ /dev/null @@ -1,3 +0,0 @@ -echo %VAR1% --batch_size=%VAR3% %VAR2% - -echo {"x":%VAR1%, "y":"%VAR2%", "z":%VAR3%} > cm-output.json diff --git a/automation/experiment/tests/test_run.sh b/automation/experiment/tests/test_run.sh deleted file mode 100644 index 7ed1b472e..000000000 --- a/automation/experiment/tests/test_run.sh +++ /dev/null @@ -1 +0,0 @@ -echo $VAR1 --batch_size=$VAR3 $VAR2 diff --git a/automation/script/README-extra.md b/automation/script/README-extra.md deleted file mode 100644 index 7fc982067..000000000 --- a/automation/script/README-extra.md +++ /dev/null @@ -1,1035 +0,0 @@ -[ [Back to index](../../../docs/README.md) ] - -# CM "script" automation - - -
-Click here to see the table of contents. - - * [Motivation](#motivation) - * [Obtaining shared CM scripts](#obtaining-shared-cm-scripts) - * [Getting started with CM scripts](#getting-started-with-cm-scripts) - * [Understanding CM scripts](#understanding-cm-scripts) - * [Wrapping native scripts](#wrapping-native-scripts) - * [Modifying environment variables](#modifying-environment-variables) - * [Understanding unified output dictionary](#understanding-unified-output-dictionary) - * [Modifying state dictionary](#modifying-state-dictionary) - * [Running CM scripts via CM Python API](#running-cm-scripts-via-cm-python-api) - * [Assembling pipelines (workflows) of CM scripts](#assembling-pipelines-workflows-of-cm-scripts) - * [Customizing CM script execution flow](#customizing-cm-script-execution-flow) - * [Caching output of CM scripts](#caching-output-of-cm-scripts) - * [Assembling pipeline to compile and run image corner detection](#assembling-pipeline-to-compile-and-run-image-corner-detection) - * [Customizing sub-dependencies in a pipeline](#customizing-sub-dependencies-in-a-pipeline) - * [Using Python virtual environments](#using-python-virtual-environments) - * [Assembling pipelines with other artifacts included](#assembling-pipelines-with-other-artifacts-included) - * [Unifying host OS and CPU detection](#unifying-host-os-and-cpu-detection) - * [Detecting, installing and caching system dependencies](#detecting-installing-and-caching-system-dependencies) - * [Using variations](#using-variations) - * [Running CM scripts inside containers](#running-cm-scripts-inside-containers) - * [Getting help about other script automation flags](#getting-help-about-other-script-automation-flags) - * [Further reading](#further-reading) - -
- -*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md) - and [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) to understand CM motivation and concepts. - You can also try [CM tutorials](https://github.com/mlcommons/ck/blob/master/docs/tutorials/README.md) - to run some applications and benchmarks on your platform using CM scripts.* - -## Motivation - -While helping the community reproduce [150+ research papers](https://learning.acm.org/techtalks/reproducibility), -we have noticed that researchers always create their own ad-hoc scripts, environment variable and files -to perform *exactly the same steps (actions) across all papers* to prepare, run and reproduce their experiments -across different software, hardware, models and data. - -![](https://raw.githubusercontent.com/ctuning/ck-guide-images/master/cm-ad-hoc-projects.png) - -This experience motivated us to create a CM automation called "script" to warp native scripts -from research and industrial projects with a common, simple and unified CM Command Line Interface and Python API. - -Such non-intrusive wrapping helps to make numerous native scripts and tools more reusable, interoperable, portable, findable -and deterministic across different projects with different artifacts based on [FAIR principles](https://www.go-fair.org/fair-principles). - -CM scripts can be embedded into existing projects with minimal or no modifications at all, and they can be connected -into powerful and portable pipelines and workflows using simple JSON or YAML files -to prepare, run and reproduce experiments across continuously changing technology. - -Importantly, CM scripts can be executed in the same way in a native user environment, -Python virtual environments (to avoid messing up native environment) and containers -while automatically adapting to a given environment! - -![](https://raw.githubusercontent.com/ctuning/ck-guide-images/master/cm-unified-projects.png) - - - - - -## Obtaining shared CM scripts - -In order to reuse some CM scripts embedded into shared projects, -you need to install these projects via the CM interface. - -For example, to use automation scripts developed by the -[MLCommons task force on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) -and shared via GitHub, you just need to pull this repository via CM: - -```bash -cm pull repo --url=https://github.com/mlcommons/cm4mlops --checkout=dev -``` - -or - -```bash -cm pull repo mlcommons@cm4mlops --checkout=dev -``` - -You can now see all available CM scripts in your system as follows: - -```bash -cm find script -cm find script install* | sort - -``` - - -## Getting started with CM scripts - -You can run any of the above CM script on any platform as follows: -```bash -cm run script "tags separated by space" --keys=values --env.KEY=VALUE -cm run script --tags="tags separated by comma" --keys=values --env.KEY=VALUE -``` -or using a shortcut `cmr` available in CM V1.4.0+: -```bash -cmr "tags separated by space" --keys=values --env.KEY=VALUE -``` - -You can also use `-j` flag to print JSON output at the end of the script execution -and `-v` flag to show extra debug information during script execution. - -For example, you can download a RESNET-50 model in ONNX format from Zenodo using the following script: -```bash -cmr "download file" --url=https://zenodo.org/record/4735647/files/resnet50_v1.onnx -``` - -You can also obtain info about your OS (Linux, Windows, MacOS) in a unified way and print JSON output -as well as CM debug info as follows: -```bash -cmr "detect os" -j -v -``` - -You can turn on silent mode using CM cfg automation: -```bash -cm set cfg --key.script.silent -``` -or -```bash -cm set cfg default --key.script.silent -``` - - -## Understanding CM scripts - -CM scripts are treated as standard CM artifacts with the associated CM automation ["script"](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script), -CM action ["run"](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/module.py#L73), -and JSON and/or YAML meta descriptions. - -CM scripts can be invoked by using their alias, unique ID and human-readable tags (preferred method). - -For example, the [CM "Print Hello World" script](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world) -simply wraps 2 native `run.sh` and `run.bat` scripts to print "Hello World" on Linux, MacOs or Windows -together with a few environment variables: - -```bash -ls `cm find script print-hello-world` - -README.md _cm.json run.bat run.sh -``` - -It is described by this [_cm.json meta description file](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world/_cm.json) -with the following alias, UID and tags: - -```json -{ - "automation_alias": "script", - "automation_uid": "5b4e0237da074764", - - "alias": "print-hello-world", - "uid": "b9f0acba4aca4baa", - - "default_env": { - "CM_ENV_TEST1": "TEST1" - }, - - "env": { - "CM_ENV_TEST2": "TEST2" - }, - - "input_mapping": { - "test1": "CM_ENV_TEST1" - }, - - "new_env_keys": [ - "CM_ENV_TEST*" - ], - - "new_state_keys": [ - "hello_test*" - ], - - "tags": [ - "print", - "hello-world", - "hello world", - "hello", - "world", - "native-script", - "native", - "script" - ] -} -``` - -The `automation_alias` and `automation_uid` tells CM that this artifact can be used with the CM "script" automation. - -Therefore, this script can be executed from the command line in any of the following ways: - -```bash -cm run script print-hello-world -cm run script b9f0acba4aca4baa -cm run script --tags=print,native-script,hello-world -cm run script "print native-script hello-world" -``` - -The same script can be also executed using CM Python API as follows: -```python -import cmind - -output = cmind.access({'action':'run', 'automation':'script', 'tags':'print,native-script,hello-world'}) -if output['return']>0: - cmind.error(output) - -import json -print (json.dumps(output, indent=2)) -``` - -Normally you should see the following output along with some debug information (that will be removed soon): - -```bash - -... - -CM_ENV_TEST1 = TEST1 -CM_ENV_TEST2 = TEST2 - -HELLO WORLD! -... -``` - -### Wrapping native scripts - -*run.bat* and *run.sh* are native scripts that will be executed by this CM script in a unified way on Linux, MacOS and Windows: - -```bash -echo "" -echo "CM_ENV_TEST1 = ${CM_ENV_TEST1}" -echo "CM_ENV_TEST2 = ${CM_ENV_TEST2}" - -echo "" -echo "HELLO WORLD!" -``` - -The idea to use native scripts is to make it easier for researchers and engineers to reuse their existing automation scripts -while providing a common CM wrapper with a unified CLI, Python API and extensible meta descriptions. - - - - -### Modifying environment variables - -CM script automation CLI uses a flag `--env.VAR=VALUE` to set some environment variable and pass it to a native script -as shown in this example: - -```bash -cm run script "print native-script hello-world" \ - --env.CM_ENV_TEST1=ABC1 --env.CM_ENV_TEST2=ABC2 - -... - -CM_ENV_TEST1 = ABC1 -CM_ENV_TEST2 = TEST2 - -HELLO WORLD! -``` - -Note, that *CM_ENV_TEST2* did not change. This happened because dictionary `env` in the *_cm.json* forces *CM_ENV_TEST2* to *TEST2*, -while `default_env` dictionary allows environment variables to be updated externally. - -You can still force an environment variable to a given value externally using a `--const` flag as follows: - -```bash -cm run script "print native-script hello-world" \ - --env.CM_ENV_TEST1=ABC1 --const.CM_ENV_TEST2=ABC2 - -... - -CM_ENV_TEST1 = ABC1 -CM_ENV_TEST2 = ABC2 - -HELLO WORLD! - -``` - -You can also use a JSON file instead of flags. Create *input.json* (or any other filename): -```json -{ - "tags":"print,native-script,hello-world", - "env":{ - "CM_ENV_TEST1":"ABC1" - } -} -``` - -and run the CM script with this input file as follows: -``` -cm run script @input.json -``` - - -You can use YAML file instead of CLI. Create *input.yaml* (or any other filename): -```yaml -tags: "print,hello-world,script" -env: - CM_ENV_TEST1: "ABC1" -``` - -and run the CM script with this input file as follows: -``` -cm run script @input.yaml -``` - -Finally, you can map any other flag from the script CLI to an environment variable -using the key `input_mapping` in the `_cm.json` meta description of this script: - -```bash -cm run script "print native-script hello-world" --test1=ABC1 - -... - -CM_ENV_TEST1 = ABC1 -CM_ENV_TEST2 = TEST2 - -HELLO WORLD! - -``` - - -### Understanding unified output dictionary - -You can see the output of a given CM script in the JSON format by adding `--out=json` flag as follows: - -```bash -cm run script --tags=print,hello-world,script --env.CM_ENV_TEST1=ABC1 --out=json - -... - -CM_ENV_TEST1 = ABC1 -CM_ENV_TEST2 = ABC2 - -HELLO WORLD! - -{ - "deps": [], - "env": { - "CM_ENV_TEST1": "ABC1", - "CM_ENV_TEST2": "TEST2" - }, - "new_env": { - "CM_ENV_TEST1": "ABC1", - "CM_ENV_TEST2": "TEST2" - }, - "new_state": {}, - "return": 0, - "state": {} -} -``` - -Note that `new_env`shows new environment variables produced and explicitly exposed by this script -via a `new_env_keys` key in the `_cm.json` meta description of this script. - -This is needed to assemble automation pipelines and workflows while avoiding their contamination -with temporal environments. CM script must explicitly expose environment variables that will -go to the next stage of a pipeline. - -In the following example, `CM_ENV_TEST3` will be added to the `new_env` while `CM_XYZ` will not -since it is not included in `"new_env_keys":["CM_ENV_TEST*"]`: - -```bash -cm run script --tags=print,hello-world,script --env.CM_ENV_TEST1=ABC1 --out=json --env.CM_ENV_TEST3=ABC3 --env.CM_XYZ=XYZ -``` - -### Modifying state dictionary - -Sometimes, it is needed to use more complex structures than environment variables in scripts and workflows. -We use a dictionary `state` that can be updated and exposed by a given script via `new_state_keys` key -in the `_cm.json` meta description of this script. - -In the following example, `hello_world` key will be updated in the `new_state` dictionary, -while `hello` key will not be updated because it is not included in the wild card `"new_state_key":["hello_world*"]`: - -```bash -cm run script --tags=print,hello-world,script --out=json \ - --state.hello=xyz1 --state.hello_world=xyz2 - -... - -{ - "deps": [], - "env": { - "CM_ENV_TEST1": "TEST1", - "CM_ENV_TEST2": "TEST2" - }, - "new_env": { - "CM_ENV_TEST1": "TEST1", - "CM_ENV_TEST2": "TEST2" - }, - "new_state": { - "hello_world": "xyz2" - }, - "return": 0, - "state": { - "hello": "xyz1", - "hello_world": "xyz2" - } -} -``` - -### Running CM scripts via CM Python API - -You can run a given CM script from python or Jupyter notebooks as follows: - -```python - -import cmind - -r = cmind.access({'action':'run', - 'automation':'script', - 'tags':'print,hello-world,script', - 'const':{ - 'CM_ENV_TEST1':'ABC1', - }, - 'env':{ - 'CM_ENV_TEST2':'ABC2' - }, - 'state': { - 'hello':'xyz1', - 'hello_world':'xyz2' - } - }) - -print (r) - -``` - -```bash -... - -CM_ENV_TEST1 = ABC1 -CM_ENV_TEST2 = ABC2 - -HELLO WORLD! - -{'return': 0, - 'env': {'CM_ENV_TEST2': 'TEST2', 'CM_ENV_TEST1': 'ABC1'}, - 'new_env': {'CM_ENV_TEST2': 'TEST2', 'CM_ENV_TEST1': 'ABC1'}, - 'state': {'hello': 'xyz1', 'hello_world': 'xyz2'}, - 'new_state': {'hello_world': 'xyz2'}, - 'deps': []} - -``` - - - -### Assembling pipelines (workflows) of CM scripts - -We've added a simple mechanism to chain reusable CM scripts into complex pipelines -without the need for specialized workflow frameworks. - -Simply add the following dictionary "deps" to the `_cm.json` or `_cm.yaml` of your script as follows: - -```json - -{ - "deps": [ - { - "tags": "a string of tags separated by comma to find and execute the 1st CM script" - }, - { - "tags": "a string of tags separated by comma to find and execute the 1st CM script" - }, - ... - ] -} - -``` - -This CM script will run all dependent scripts in above sequence, aggregate environment variable and `state` dictionary, -and will then run native scripts. - -You can also turn on specific dependencies based on some values in specific environment variables or min/max version (if supported) -in this pipeline as follows: - -```json - -{ - "deps": [ - { - "tags": "a string of tags separated by comma to find and execute the 1st CM script", - "enable_if_env": { "USE_CUDA" : ["yes", "YES", "true"] } - }, - { - "tags": "a string of tags separated by comma to find and execute the 1st CM script" - "enable_if_env": { "USE_CPU" : ["yes", "YES", "true"] }, - "version_min": "3.10" - }, - ... - ] -} - -``` - -You can also specify dependencies to be invoked after executing native scripts -using a dictionary `"post_deps"` with the same format `"deps"`. - - -You can see an example of such dependencies in the [_cm.json](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world-py/_cm.json) -of the ["print-hello-world-py" CM script](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world-py) -that detects and unifies OS parameters using the ["detect-os" CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os), -detects or builds Python using the ["get-python3" CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3) -and then runs `code.py` with "Hello World" from `run.sh` or `run.bat`: - -```bash -cm run script "print python hello-world" -``` - - - - - - -### Customizing CM script execution flow - -If a developer adds `customize.py` file inside a given CM script, -it can be used to programmatically update environment variables, prepare input scripts -and even invoke other scripts programmatically using Python. - -If a function `preprocess` exists in this file, CM script will call it before -invoking a native script. - -If this function returns `{"skip":True}` in the output, -further execution of this script will be skipped. - -After executing the preprocess function, the CM script automation will record the global state dictionary -into *tmp-state.json* and the local state dictionary from this CM script into *tmp-state-new.json*. - -The CM script automation will then run a native script (run.sh on Linux/MacOS or run.bat on Windows) -with all merged environment variables from previous scripts. - -Note that native scripts can also create 2 files that will be automatically picked up and processed by the CM script automation: -* *tmp-run-env.out* - list of environment variables to update the "new_env" of a given CM script -* *tmp-run-state.json* - the state dictionary to update the "new_state" of a given CM script - -If `postprocess` function exists in the *customize.py* file, the CM script will call it -to finalize the postprocessing of files, environment variables, and the state dictionary. - -You can see an [example of such `customize.py` module](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-python3/customize.py) in the CM script -to [detect or install/build Python interpreter](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3) in a unified way on any machine. - -This script exposes a number of environment variables for a detected Python -in the [`postprocess` function](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-python3/customize.py#L60): - -* `CM_PYTHON_BIN` - python3.10 or python.exe or any other name of a Python interpreter on a given system -* `CM_PYTHON_BIN_PATH` - path to a detected or installed python -* `CM_PYTHON_BIN_WITH_PATH` - full path to a detected or installed python -* `LD_LIBRARY_PATH` - updated LD_LIBRARY_PATH to python -* `PATH` - updated PATH to python - -These environment variables can be reused by other CM scripts or external tools -while decoupling them from specific python versions and paths, and even allowing -multiple versions of tools and artifacts to co-exist on the same system -and plugged into CM scripts: - -```bash -cm run script "get python3" --out=json -``` - - - -### Caching output of CM scripts - -By default, CM scripts run wrapped scripts and tools, update environment variables and produce new files in the current directory. - -In many cases, we want to cache the output and environment variables when we run the same CM script with the same input again -to avoid potentially lengthy detections, downloads, builds and data pre/post processing. - -That's why we have developed another CM automation called ["cache"](../cache/README-extra.md) -to cache the output of scripts in the "cache" artifacts in the "local" CM repository -that can be found by tags or unique IDs like any other CM artifact. - -Our convention is to use names *get-{tool or artifact}* for CM scripts that detect already installed artifacts, -prepare their environment and cache them in the *local* CM repository using the "cache" automation. - -If installed artifact doesn't exist, we either enhance above scripts to include download, installation and even building -for a given artifact (if it's a tool) or we create extra CM scripts *install-{tool or artifact}* -that download and prepare tools and artifacts (install, build, preprocess, etc). - -For example, the CM script [*get-python3*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3) -has *customize.py* with *preprocess* function that implements the search for python3 on Linux -or python.exe on Windows, 2 native scripts *run.sh* and *run.bat* to obtain the version of the detected python installation, -and *postprocess* function to prepare environment variables *CM_PYTHON_BIN* and *CM_PYTHON_BIN_WITH_PATH* -that can be used by other CM scripts: - -```bash -cm run script "get python" --out=json -``` - -If you run it for the first time and CM script detects multiple versions of python co-existing on your system, -it will ask you to select one. CM will then cache the output in the *cache* artifact of the CM repository. -You can see all *cache* CM entries for other tools and artifacts as follows: - -```bash -cm show cache -``` -or -```bash -cm show cache --tags=get,python -``` - -You can see the cached files as follows: -```bash -ls `cm find cache --tags=get,python` -``` - -* _cm.json - CM meta description of this "cache" artifact with its unique ID, tags and other meta information -* cm-cached-state.json - dictionary with the new environment variables and the new state dictionary -* tmp-env-all.sh - all environment variables used during CM script execution -* tmp-env.sh - only new environment variables produced after CM script execution (it can be used directly by external tools) -* tmp-run.sh - all environment variables and a call to the native script (useful for reproducibility) -* tmp-state.json - the state before running native script - it can be loaded and used by native scripts and tools instead of using environment variables -* tmp-ver.out - the output of the --version command parsed by `postprocess` and `detect_version` functions in `customize.py` - - -If you (or other CM script) run this CM script to get the python tool for the second time, CM script will reuse the cached output: -```bash -cm run script "get python" --out=json -``` - -This also allows us to install multiple tool versions into different CM cache entries (python virtual environments, -LLVM compiler, etc) and use them separately without the need to change higher-level CM scripts - these tools -will be automatically plugged in: - -```bash -cm run script "install prebuilt llvm" --version=14.0.0 -cm run script "install prebuilt llvm" --version=16.0.0 -cm run script "install src llvm" -``` - - -Such approach allows us to "probe" the user environment, detect different tools and artifacts, unify them -and adapt complex applications to a user environment in an automatic, transparent and non-intrusive way -as shown in the next example. - - - - - - -## Assembling pipeline to compile and run image corner detection - -We can use automatically detected compiler from CM script to create simple and technology-neutral compilation and execution pipelines -in CM scripts. - -For example, we have implemented a simple [image corner detection CM script]( https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection ) -with [this meta description](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-corner-detection/_cm.json). - -It uses two other reusable CM scripts to compile a given program using a detected/installed and cached compiler via CM (such as LLVM), -and then run it with some input image. - -First, let's detect installed LLVM it via CM: - -```bash -cm run script "get llvm" -``` -or install a prebuilt version on Linux, MacOs or Windows: -```bash -cm run script "install prebuilt llvm" --version=14.0.0 -``` - -We can then run this CM script to compile and run image corner detection as follows: -```bash -cm run script "app image corner-detection" --input=`cm find script --tags=app,image,corner-detection`/computer_mouse.pgm -``` - -This CM script will preset environment variables for a detected/installed compiler, -compile our C program, run it via `run.sh` (Linux/MacOS) or `run.bat` (Windows) -and generate an output image *output_image_with_corners.pgm* in the `output` directory of this script: - -```bash -ls `cm find script --tags=app,image,corner-detection`/output - -image-corner output_image_with_corners.pgm - -``` - -Note that this directory also contains the compiled tool "image-corner" that can now be used independently from CM if necessary. - - - - -### Customizing sub-dependencies in a pipeline - -When running a CM script with many sub-dependencies similar to above example, -we may want to specify some version constraints on sub-dependencies such as LLVM. - -One can use the key `"names"` in the "deps" list of any CM script meta description -to specify multiple names for a given dependency. - -For example, a dependency to "get compiler" in CM script "compile-program" -has `"names":["compiler"]` as shown [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/compile-program/_cm.json#L15). - -We can now use a CM script flag `--add_deps_recursive.{some name}.{some key}={some value}` or -`--adr.{above name}.{some key}={some value}` to update a dictionary of all sub-dependencies -that has `some name`. - -For example, we can now specify to use LLVM 16.0.0 for image corner detection as follows: -```bash -cm run script "app image corner-detection" --adr.compiler.tags=llvm --adr.compiler.version=16.0.0 -``` - -If this compiler was not yet detected or installed by CM, it will find related scripts -to install either a prebuilt version of LLVM or build it from sources. - - -## Using Python virtual environments - -By default, CM scripts will install python dependencies into user space. -This can influence other existing projects and may not be desirable. -CM can be used inside virtual Python environments without any changes, -but a user still need to do some manual steps to set up such environment. -That's why we've developed a [CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv) -to automate creation of multiple Python virtual environments with different names: - -```bash -cm run script "install python-venv" --name={some name} -``` - -CM will create a virtual environment using default Python and save it in CM cache. -It is possible to create a python virtual environment with a minimal required version -or a specific one on Linux and MacOS as follows: - -```bash -cm run script "install python-venv" --version_min=3.8 --name=mlperf -cm run script "install python-venv" --version=3.10.8 --name=mlperf2 -``` - -In this case, CM will attempt to detect Python 3.10.8 on a system. -If CM can't detect it, CM will then automatically download and build it -using [this script](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-src). - -Now, when user runs pipelines that install Python dependencies, CM will detect -virtual environment in the CM cache as well as native Python and will ask a user -which one to use. - -It is possible to avoid such questions by using the flag `--adr.python.name=mlperf`. -In such case, CM will propagate the name of a virtual environment to all sub-dependencies -as shown in the next example. - -Instead of adding this flag to all scripts, you can specify it -using `CM_SCRIPT_EXTRA_CMD` environment variable as follows: -```bash -export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf" -``` - -You can even specify min Python version required as follows: -```bash -export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf --adr.python.version_min=3.9" -``` - -## Assembling pipelines with other artifacts included - -We can now use existing CM scripts as "LEGO" blocks to assemble more complex automation pipelines and workflows -while automatically downloading and plugging in -and pre-/post-processing all necessary artifacts (models, data sets, frameworks, compilers, etc) -on any supported platform (Linux, MacOS, Windows). - -For example, we have implemented a simple image classification application automated by the following CM script: -[*app-image-classification-onnx-py*]( https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py ). - -It is described by the following [`_cm.yaml`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml) meta description: - -```yaml -alias: app-image-classification-onnx-py -uid: 3d5e908e472b417e - -automation_alias: script -automation_uid: 5b4e0237da074764 - -category: "Modular ML/AI applications" - -tags: -- app -- image-classification -- onnx -- python - -default_env: - CM_BATCH_COUNT: '1' - CM_BATCH_SIZE: '1' - -deps: -- tags: detect,os -- tags: get,sys-utils-cm -- names: - - python - - python3 - tags: get,python3 -- tags: get,cuda - names: - - cuda - enable_if_env: - USE_CUDA: - - yes -- tags: get,dataset,imagenet,image-classification,original -- tags: get,dataset-aux,imagenet-aux,image-classification -- tags: get,ml-model,resnet50,_onnx,image-classification - -- tags: get,generic-python-lib,_onnxruntime - skip_if_env: - USE_CUDA: - - yes -- tags: get,generic-python-lib,_onnxruntime_gpu - enable_if_env: - USE_CUDA: - - yes - -variations: - cuda: - env: - USE_CUDA: yes -``` - - -Its `deps` pipeline runs other CM scripts to detect OS parameters, detect or install Python, -install the latest ONNX run-time, download ResNet-50 model and the minimal ImageNet dataset (500). - -It also contains [`run.sh`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/run.sh) -and [`run.bat`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/run.bat) -to install extra Python requirements (not yet unified by CM scripts) -and run a Python script that classifies an image from ImageNet -or an image provided by user. - -Before running it, let us install Python virtual environment via CM to avoid altering -native Python installation: -```bash -cm run script "install python-venv" --name=my-test -cm show cache --tags=python -``` - -You can run it on any system as follows: - -```bash -cm run script "python app image-classification onnx" - -``` - - -To avoid CM asking which python to use, you can force the use of Python virtual environment -as follows: - -```bash -cm run script "python app image-classification onnx" --adr.python.name=my-test -``` - - - -If you run this CM script for the first time, it may take some minutes because it will detect, download, build and cache all dependencies. - -When you run it again, it will plug in all cached dependencies: - -```bash -cm run script "python app image-classification onnx" --adr.python.name.my-test - -``` - -You can then run it with your own image as follows: -```bash -cm run script --tags=app,image-classification,onnx,python \ - --adr.python.name.my-test --input={path to my JPEG image} -``` - - - -## Unifying host OS and CPU detection - -In order to make experiments more portable and interoperable, we need to unify -the information about host OS and CPU across different systems. -We are gradually improving the following two CM scripts: - -* [`detect-os`](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os) -* [`detect-cpu`](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu) - -These two CM script have *customize.py* with preprocess and postprocess functions -and a native run script to detect OS info and update environment variables -and the state dictionary needed by all other CM scripts. - -You can run them on your platform as follows: - -```bash -cm run script "detect os" --out=json - -... - -cm run script "detect cpu" --out=json -``` - -If some information is missing or not consistent across different platforms, -you can improve it in a backwards compatible way. You can then submit a PR [here](https://github.com/mlcommons/ck/pulls) -to let the community reuse your knowledge and collaboratively enhance common automation scripts, pipelines and workflows - -that's why we called our project "Collective Knowledge". - - -## Detecting, installing and caching system dependencies - -Many projects require installation of some system dependencies. Unfortunately, the procedure -is different across different systems. - -That's why we have developed two other CM script to unify and automate this process on any system. - -* [`get-sys-utils-cm`]( https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm ) -* [`get-sys-utils-min`]( https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-min ) - -They will install (minimal) system dependencies based on the OS and CPU info detected by CM scripts mentioned above. - -The last script is particularly useful to make applications compatible with Windows -where many typical tools like "wget", "patch", etc are missing - they will be automatically -download by that script. - -You can use them as follows: -```bash -cm run script "get sys-utils-min" --out=json -cm run script "get sys-utils-cm" -``` - - - - -## Using variations - -In some cases, we want the same CM script to download some artifact in a different format. - -For example, we may want to download and cache ResNet50 model in ONNX or PyTorch or TensorFlow or TFLite format. - -In such case, we use so-called `variations` in the meta description of a given CM script. - -For example, the CM script [`get-ml-model-resnet50`] has many variations and combinations separated by comma -to download this model in multiple formats: - -* `onnx` -* `onnx,opset-11` -* `onnx,opset-8` -* `pytorch` -* `pytorch,fp32` -* `pytorch,int8` -* `tflite` -* `tflite,argmax` -* `tflite,no-argmax` -* `tensorflow` -* `batch_size.1` -* `batch_size.#` - -These variations simply update environment variables and add more dependencies on other CM scripts -before running `customize.py` and native scripts as described in [_cm.json]( https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-ml-model-resnet50/_cm.json#L30 ). - -It is possible to specify a required variation or multiple variations when running a given CM script by adding extra tags with "_" prefix. - -For example, you can install quantized ResNet-50 model in PyTorch int8 format as follows: - -```bash -cm run script "get ml-model resnet50 _pytorch _int8" --out=json -``` - -You can install another FP32 variation of this model at the same time: -```bash -cm run script "get ml-model resnet50 _pytorch _fp32" --out=json -``` - -You can now find them in cache by tags and variations as follows: -```bash -cm show cache --tags=get,ml-model,resnet50 -cm show cache --tags=get,ml-model,resnet50,_pytorch -cm show cache --tags=get,ml-model,resnet50,_pytorch,_fp32 -``` - - - - - - - - - - - -## Running CM scripts inside containers - -One of the important ideas behind using a common automation language -is to use it inside and outside containers thus avoiding the need to create -ad-hoc manual containers and README files. - -We can just use base containers and let the CM automation language -detect installed tools and connect external data with the automation pipelines and workflows. - -See examples of modular containers with CM language to automate the MLPerf inference benchmark from MLCommons -[here](https://github.com/mlcommons/ck/tree/master/docker). - -Note that we continue working on a CM functionality to automatically generate -Docker containers and README files when executing CM scripts -(a prototype was successfully validated in the MLPerf inference v3.0 submission): - -* https://github.com/mlcommons/cm4mlops/tree/main/script/build-dockerfile -* https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image - - - - -## Getting help about other script automation flags - -You can get help about all flags used to customize execution -of a given CM script from the command line as follows: - -```bash -cm run script --help -``` - -Some flags are useful to make it easier to debug scripts and save output in files. - -You can find more info about CM script execution flow in this [document](README-specs.md). - - - - - - - - - - - - -## Further reading - -* [CM "script" automation specification](README-specs.md) -* [MLCommons CM script sources](https://github.com/mlcommons/cm4mlops/tree/main/script) -* [List of portable and reusable CM scripts from MLCommons](https://access.cknowledge.org/playground/?action=scripts) -* [CM "cache" automation](../cache/README-extra.md) diff --git a/automation/script/README-specs.md b/automation/script/README-specs.md deleted file mode 100644 index 4b40feeba..000000000 --- a/automation/script/README-specs.md +++ /dev/null @@ -1,79 +0,0 @@ -# CM "script" automation specification - -Please check the [CM documentation](https://github.com/mlcommons/ck/tree/master/docs#collective-mind-language-cm) -for more details about the CM automation language. - -See the CM script introduction [here](README-extra.md). - -See the [automatically generated catalog](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md) of all CM scripts from MLCommons. - -## Getting started with CM scripts - -* A CM script is identified by a set of tags and by unique ID. -* Further each CM script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix. - -### CM script execution flow -* When a CM script is invoked (either by tags or by unique ID), its `_cm.json` is processed first which will check for any `deps` script and if there are, then they are executed in order. -* Once all the `deps` scripts are executed, `customize.py` file is checked and if existing `preprocess` function inside it is executed if present. -* Then any `prehook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` -* After this, keys in `env` dictionary is exported as `ENV` variables and `run` file if exists is executed. -* Once run file execution is done, any `posthook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` -* Then `postprocess` function inside customize.py is executed if present. -* After this stage any `post_deps` CM scripts mentioned in `_cm.json` is executed. - -** If a script is already cached, then the `preprocess`, `run file` and `postprocess` executions won't happen and only the dependencies marked as `dynamic` will be executed from `deps`, `prehook_deps`, `posthook_deps` and `postdeps`. - -### Input flags -When we run a CM script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `_cm.json` gets converted to the corresponding `ENV` variable. - -### Conditional execution of any `deps`, `post_deps` -We can use `skip_if_env` dictionary inside any `deps`, `prehook_deps`, `posthook_deps` or `post_deps` to make its execution conditional - -### Versions -We can specify any specific version of a script using `version`. `version_max` and `version_min` are also possible options. -* When `version_min` is given, any version above this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if above `version_min` will be used for installation. Otherwise `version_min` will be used as `version`. -* When `version_max` is given, any version below this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if below `version_max` will be used for installation. Otherwise `version_max_usable` (additional needed input for `version_max`) will be used as `version`. - -### Variations -* Variations are used to customize CM script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`. - -#### Variation groups -`group` is a key to map variations into a group and at any time only one variation from a group can be used in the variation tags. For example, both `cpu` and `cuda` can be two variations under the `device` group, but user can at any time use either `cpu` or `cuda` as variation tags but not both. - -#### Dynamic variations -Sometimes it is difficult to add all variations needed for a script like say `batch_size` which can take many different values. To handle this case, we support dynamic variations using '#' where '#' can be dynamically replaced by any string. For example, `"_batch_size.8"` can be used as a tag to turn on the dynamic variation `"_batch_size.#"`. - -### ENV flow during CM script execution -* [TBD] Issue added [here](https://github.com/mlcommons/ck/issues/382) -* During a given script execution incoming `env` dictionary is saved `(saved_env)` and all the updates happens on a copy of it. -* Once a script execution is over (which includes all the dependent script executions as well), newly created keys and any updated keys are merged with the `saved_env` provided the keys are mentioned in `new_env_keys` -* Same behaviour applies to `state` dictionary. - -#### Special env keys -* Any env key with a prefix `CM_TMP_*` and `CM_GIT_*` are not passed by default to any dependency. These can be force passed by adding the key(s) to the `force_env_keys` list of the concerned dependency. -* Similarly we can avoid any env key from being passed to a given dependency by adding the prefix of the key in the `clean_env_keys` list of the concerned dependency. -* `--input` is automatically converted to `CM_INPUT` env key -* `version` is converted to `CM_VERSION`, ``version_min` to `CM_VERSION_MIN` and `version_max` to `CM_VERSION_MAX` -* If `env['CM_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `CM_GIT_URL`) are changed to add this token. -* If `env['CM_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS. - -### Script Meta -#### Special keys in script meta -* TBD: `reuse_version`, `inherit_variation_tags`, `update_env_tags_from_env` - -### How cache works? -* If `cache=true` is set in a script meta, the result of the script execution is cached for further use. -* For a cached script, `env` and `state` updates are done using `new_env` and `new_state` dictionaries which are stored in the `cm-cached.json` file inside the cached folder. -* By using `--new` input, a new cache entry can be forced even when an old one exist. -* By default no depndencies are run for a cached entry unless `dynamic` key is set for it. - -### Updating ENV from inside the run script -* [TBD] - - -### Script workflow (env, deps, native scripts) - - - - -© 2022-24 [MLCommons](https://mlcommons.org)
diff --git a/automation/script/README.md b/automation/script/README.md index d4a4c62bc..bbedf887d 100644 --- a/automation/script/README.md +++ b/automation/script/README.md @@ -1,427 +1,77 @@ -*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!* +# CM "script" automation specification -### Automation actions +Please check the [CM documentation](https://github.com/mlcommons/ck/tree/master/docs#collective-mind-language-cm) +for more details about the CM automation language. -#### run - * CM CLI: ```cm run script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77)) - * CM CLI with UID: ```cm run script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77)) - * CM Python API: - ```python - import cmind - r=cm.access({ - 'action':'run' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77) - ```python - }) - if r['return']>0: - print(r['error']) - ``` +## Getting started with CM scripts -#### version +* A CM script is identified by a set of tags and by unique ID. +* Further each CM script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix. - * CM CLI: ```cm version script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199)) - * CM CLI with UID: ```cm version script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199)) - * CM Python API: - ```python - import cmind +### CM script execution flow +* When a CM script is invoked (either by tags or by unique ID), its `_cm.json` is processed first which will check for any `deps` script and if there are, then they are executed in order. +* Once all the `deps` scripts are executed, `customize.py` file is checked and if existing `preprocess` function inside it is executed if present. +* Then any `prehook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* After this, keys in `env` dictionary is exported as `ENV` variables and `run` file if exists is executed. +* Once run file execution is done, any `posthook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* Then `postprocess` function inside customize.py is executed if present. +* After this stage any `post_deps` CM scripts mentioned in `_cm.json` is executed. - r=cm.access({ - 'action':'version' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199) - ```python - }) - if r['return']>0: - print(r['error']) - ``` +** If a script is already cached, then the `preprocess`, `run file` and `postprocess` executions won't happen and only the dependencies marked as `dynamic` will be executed from `deps`, `prehook_deps`, `posthook_deps` and `postdeps`. -#### search +### Input flags +When we run a CM script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `_cm.json` gets converted to the corresponding `ENV` variable. - * CM CLI: ```cm search script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227)) - * CM CLI with UID: ```cm search script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227)) - * CM Python API: - ```python - import cmind +### Conditional execution of any `deps`, `post_deps` +We can use `skip_if_env` dictionary inside any `deps`, `prehook_deps`, `posthook_deps` or `post_deps` to make its execution conditional - r=cm.access({ - 'action':'search' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227) - ```python - }) - if r['return']>0: - print(r['error']) - ``` +### Versions +We can specify any specific version of a script using `version`. `version_max` and `version_min` are also possible options. +* When `version_min` is given, any version above this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if above `version_min` will be used for installation. Otherwise `version_min` will be used as `version`. +* When `version_max` is given, any version below this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if below `version_max` will be used for installation. Otherwise `version_max_usable` (additional needed input for `version_max`) will be used as `version`. -#### test +### Variations +* Variations are used to customize CM script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`. + +#### Variation groups +`group` is a key to map variations into a group and at any time only one variation from a group can be used in the variation tags. For example, both `cpu` and `cuda` can be two variations under the `device` group, but user can at any time use either `cpu` or `cuda` as variation tags but not both. - * CM CLI: ```cm test script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346)) - * CM CLI with UID: ```cm test script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346)) - * CM Python API: - ```python - import cmind +#### Dynamic variations +Sometimes it is difficult to add all variations needed for a script like say `batch_size` which can take many different values. To handle this case, we support dynamic variations using '#' where '#' can be dynamically replaced by any string. For example, `"_batch_size.8"` can be used as a tag to turn on the dynamic variation `"_batch_size.#"`. - r=cm.access({ - 'action':'test' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346) - ```python - }) - if r['return']>0: - print(r['error']) - ``` +### ENV flow during CM script execution +* [TBD] Issue added [here](https://github.com/mlcommons/ck/issues/382) +* During a given script execution incoming `env` dictionary is saved `(saved_env)` and all the updates happens on a copy of it. +* Once a script execution is over (which includes all the dependent script executions as well), newly created keys and any updated keys are merged with the `saved_env` provided the keys are mentioned in `new_env_keys` +* Same behaviour applies to `state` dictionary. -#### native_run +#### Special env keys +* Any env key with a prefix `MLC_TMP_*` and `MLC_GIT_*` are not passed by default to any dependency. These can be force passed by adding the key(s) to the `force_env_keys` list of the concerned dependency. +* Similarly we can avoid any env key from being passed to a given dependency by adding the prefix of the key in the `clean_env_keys` list of the concerned dependency. +* `--input` is automatically converted to `MLC_INPUT` env key +* `version` is converted to `MLC_VERSION`, ``version_min` to `MLC_VERSION_MIN` and `version_max` to `MLC_VERSION_MAX` +* If `env['MLC_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `MLC_GIT_URL`) are changed to add this token. +* If `env['MLC_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS. - * CM CLI: ```cm native_run script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412)) - * CM CLI with UID: ```cm native_run script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412)) - * CM Python API: - ```python - import cmind +### Script Meta +#### Special keys in script meta +* TBD: `reuse_version`, `inherit_variation_tags`, `update_env_tags_from_env` - r=cm.access({ - 'action':'native_run' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412) - ```python - }) - if r['return']>0: - print(r['error']) - ``` +### How cache works? +* If `cache=true` is set in a script meta, the result of the script execution is cached for further use. +* For a cached script, `env` and `state` updates are done using `new_env` and `new_state` dictionaries which are stored in the `mlc-cached.json` file inside the cached folder. +* By using `--new` input, a new cache entry can be forced even when an old one exist. +* By default no depndencies are run for a cached entry unless `dynamic` key is set for it. -#### add +### Updating ENV from inside the run script +* [TBD] - * CM CLI: ```cm add script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485)) - * CM CLI with UID: ```cm add script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485)) - * CM Python API: - ```python - import cmind - r=cm.access({ - 'action':'add' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485) - ```python - }) - if r['return']>0: - print(r['error']) - ``` +### Script workflow (env, deps, native scripts) -#### run_native_script + - * CM CLI: ```cm run_native_script script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270)) - * CM CLI with UID: ```cm run_native_script script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270)) - * CM Python API: - ```python - import cmind - r=cm.access({ - 'action':'run_native_script' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### find_file_in_paths - - * CM CLI: ```cm find_file_in_paths script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314)) - * CM CLI with UID: ```cm find_file_in_paths script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'find_file_in_paths' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### detect_version_using_script - - * CM CLI: ```cm detect_version_using_script script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533)) - * CM CLI with UID: ```cm detect_version_using_script script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'detect_version_using_script' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### find_artifact - - * CM CLI: ```cm find_artifact script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606)) - * CM CLI with UID: ```cm find_artifact script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'find_artifact' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### find_file_deep - - * CM CLI: ```cm find_file_deep script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764)) - * CM CLI with UID: ```cm find_file_deep script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'find_file_deep' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### find_file_back - - * CM CLI: ```cm find_file_back script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822)) - * CM CLI with UID: ```cm find_file_back script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'find_file_back' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### parse_version - - * CM CLI: ```cm parse_version script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863)) - * CM CLI with UID: ```cm parse_version script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'parse_version' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### update_deps - - * CM CLI: ```cm update_deps script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917)) - * CM CLI with UID: ```cm update_deps script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'update_deps' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### get_default_path_list - - * CM CLI: ```cm get_default_path_list script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937)) - * CM CLI with UID: ```cm get_default_path_list script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'get_default_path_list' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### doc - - * CM CLI: ```cm doc script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948)) - * CM CLI with UID: ```cm doc script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'doc' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### gui - - * CM CLI: ```cm gui script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976)) - * CM CLI with UID: ```cm gui script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'gui' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### dockerfile - - * CM CLI: ```cm dockerfile script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013)) - * CM CLI with UID: ```cm dockerfile script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'dockerfile' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### docker - - * CM CLI: ```cm docker script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041)) - * CM CLI with UID: ```cm docker script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'docker' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### prepare - - * CM CLI: ```cm prepare script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095)) - * CM CLI with UID: ```cm prepare script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'prepare' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### clean_some_tmp_files - - * CM CLI: ```cm clean_some_tmp_files script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106)) - * CM CLI with UID: ```cm clean_some_tmp_files script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'clean_some_tmp_files' - 'automation':'script,5b4e0237da074764' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -### Maintainers - -* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file +© 2022-25 [MLCommons](https://mlcommons.org)
diff --git a/automation/script/docker.py b/automation/script/docker.py index bd511d7e3..99fa619cf 100644 --- a/automation/script/docker.py +++ b/automation/script/docker.py @@ -95,7 +95,7 @@ def dockerfile(self_module, input_params): # Set Docker-specific configurations docker_settings = state_data.get('docker', {}) docker_settings['dockerfile_env'] = dockerfile_environment_vars - dockerfile_environment_vars['CM_RUN_STATE_DOCKER'] = True + dockerfile_environment_vars['MLC_RUN_STATE_DOCKER'] = True if not docker_settings.get('run', True) and not input_params.get( 'docker_run_override', False): @@ -115,7 +115,7 @@ def dockerfile(self_module, input_params): # Prune temporary environment variables run_command = copy.deepcopy(run_command_arc) for key in list(run_command.get('env', {}).keys()): - if key.startswith("CM_TMP_"): + if key.startswith("MLC_TMP_"): del run_command['env'][key] # Regenerate script command @@ -177,7 +177,7 @@ def dockerfile(self_module, input_params): # Push Docker image if specified if input_params.get('docker_push_image') in [True, 'True', 'yes']: - environment_vars['CM_DOCKER_PUSH_IMAGE'] = 'yes' + environment_vars['MLC_DOCKER_PUSH_IMAGE'] = 'yes' # Generate Dockerfile mlc_docker_input = { @@ -227,7 +227,7 @@ def docker_run(self_module, i): if i.get('docker_skip_build', False): noregenerate_docker_file = True norecreate_docker_image = True - env['CM_DOCKER_SKIP_BUILD'] = 'yes' + env['MLC_DOCKER_SKIP_BUILD'] = 'yes' # Prune unnecessary Docker-related input keys r = prune_input({'input': i, 'extra_keys_starts_with': ['docker_']}) @@ -249,7 +249,7 @@ def docker_run(self_module, i): if not lst: return {'return': 1, 'error': 'No scripts were found'} - env['CM_RUN_STATE_DOCKER'] = False + env['MLC_RUN_STATE_DOCKER'] = False state, const, const_state = i.get( 'state', {}), i.get( 'const', {}), i.get( @@ -259,7 +259,7 @@ def docker_run(self_module, i): docker_cache = i.get('docker_cache', "yes") if docker_cache.lower() in ["no", "false"]: - env.setdefault('CM_DOCKER_CACHE', docker_cache) + env.setdefault('MLC_DOCKER_CACHE', docker_cache) image_repo = i.get('docker_image_repo', '') add_deps_recursive = i.get('add_deps_recursive') diff --git a/automation/script/module.py b/automation/script/module.py index bceca05d3..71ea0c9ad 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -37,7 +37,7 @@ def __init__(self, action_object, automation_file): self.run_state['parent'] = None self.run_state['version_info'] = [] self.run_state['cache'] = False - self.file_with_cached_state = 'cm-cached-state.json' + self.file_with_cached_state = 'mlc-cached-state.json' self.tmp_file_env = 'tmp-env' self.tmp_file_env_all = 'tmp-env-all' @@ -50,20 +50,20 @@ def __init__(self, action_object, automation_file): self.__version__ = "1.3.2" - self.local_env_keys = ['CM_VERSION', - 'CM_VERSION_MIN', - 'CM_VERSION_MAX', - 'CM_VERSION_MAX_USABLE', - 'CM_DETECTED_VERSION', - 'CM_INPUT', - 'CM_OUTPUT', - 'CM_OUTBASENAME', - 'CM_OUTDIRNAME', - 'CM_NAME', - 'CM_EXTRA_CACHE_TAGS', - 'CM_TMP_*', - 'CM_GIT_*', - 'CM_RENEW_CACHE_ENTRY'] + self.local_env_keys = ['MLC_VERSION', + 'MLC_VERSION_MIN', + 'MLC_VERSION_MAX', + 'MLC_VERSION_MAX_USABLE', + 'MLC_DETECTED_VERSION', + 'MLC_INPUT', + 'MLC_OUTPUT', + 'MLC_OUTBASENAME', + 'MLC_OUTDIRNAME', + 'MLC_NAME', + 'MLC_EXTRA_CACHE_TAGS', + 'MLC_TMP_*', + 'MLC_GIT_*', + 'MLC_RENEW_CACHE_ENTRY'] self.input_flags_converted_to_tmp_env = ['path'] @@ -105,33 +105,33 @@ def run(self, i): (add_deps) (dict): {"name": {"tag": "tag(s)"}, "name": {"version": "version_no"}, ...} (add_deps_recursive) (dict): same as add_deps but is passed recursively onto dependencies as well - (version) (str): version to be added to env.CM_VERSION to specialize this flow - (version_min) (str): min version to be added to env.CM_VERSION_MIN to specialize this flow - (version_max) (str): max version to be added to env.CM_VERSION_MAX to specialize this flow - (version_max_usable) (str): max USABLE version to be added to env.CM_VERSION_MAX_USABLE + (version) (str): version to be added to env.MLC_VERSION to specialize this flow + (version_min) (str): min version to be added to env.MLC_VERSION_MIN to specialize this flow + (version_max) (str): max version to be added to env.MLC_VERSION_MAX to specialize this flow + (version_max_usable) (str): max USABLE version to be added to env.MLC_VERSION_MAX_USABLE - (path) (str): list of paths to be added to env.CM_TMP_PATH to specialize this flow + (path) (str): list of paths to be added to env.MLC_TMP_PATH to specialize this flow - (input) (str): converted to env.CM_INPUT (local env) - (output) (str): converted to env.CM_OUTPUT (local env) + (input) (str): converted to env.MLC_INPUT (local env) + (output) (str): converted to env.MLC_OUTPUT (local env) - (outbasename) (str): converted to env.CM_OUTBASENAME (local env) - (outdirname) (str): converted to env.CM_OUTDIRNAME (local env) + (outbasename) (str): converted to env.MLC_OUTBASENAME (local env) + (outdirname) (str): converted to env.MLC_OUTDIRNAME (local env) - (extra_cache_tags) (str): converted to env.CM_EXTRA_CACHE_TAGS and used to add to caching (local env) + (extra_cache_tags) (str): converted to env.MLC_EXTRA_CACHE_TAGS and used to add to caching (local env) - (name) (str): taken from env.CM_NAME and/or converted to env.CM_NAME (local env) + (name) (str): taken from env.MLC_NAME and/or converted to env.MLC_NAME (local env) Added to extra_cache_tags with "name-" prefix . Useful for python virtual env (to create multiple entries) - (quiet) (bool): if True, set env.CM_QUIET to "yes" and attempt to skip questions + (quiet) (bool): if True, set env.MLC_QUIET to "yes" and attempt to skip questions (the developers have to support it in pre/post processing and scripts) (skip_cache) (bool): if True, skip caching and run in current directory (force_cache) (bool): if True, force caching if can_force_cache=true in script meta (skip_remembered_selections) (bool): if True, skip remembered selections - (uses or sets env.CM_TMP_SKIP_REMEMBERED_SELECTIONS to "yes") + (uses or sets env.MLC_TMP_SKIP_REMEMBERED_SELECTIONS to "yes") (new) (bool): if True, skip search for cached and run again (renew) (bool): if True, rewrite cache entry if exists @@ -160,7 +160,7 @@ def run(self, i): inside a script specified by these tags (debug_script) (bool): if True, debug current script (set debug_script_tags to the tags of a current script) - (debug_uid) (str): if True, set CM_TMP_DEBUG_UID to this number to enable + (debug_uid) (str): if True, set MLC_TMP_DEBUG_UID to this number to enable remote python debugging of scripts and wrapped apps/tools (detected_versions) (dict): All the used scripts and their detected_versions @@ -178,12 +178,12 @@ def run(self, i): (pause) (bool): if True, pause at the end of the main script (Press Enter to continue) - (repro) (bool): if True, dump cm-run-script-input.json, cm-run_script_output.json, - cm-run-script-state.json, cm-run-script-info.json + (repro) (bool): if True, dump mlc-run-script-input.json, mlc-run_script_output.json, + mlc-run-script-state.json, mlc-run-script-info.json to improve the reproducibility of results (repro_prefix) (str): if !='', use it to record above files {repro-prefix)-input.json ... - (repro_dir) (str): if !='', use this directory to dump info (default = 'cm-repro') + (repro_dir) (str): if !='', use this directory to dump info (default = 'mlc-repro') (dump_version_info) (bool): dump info about resolved versions of tools in dependencies @@ -193,13 +193,13 @@ def run(self, i): (script_call_prefix) (str): how to call script in logs and READMEs (mlc run script) - (skip_sys_utils) (bool): if True, set env['CM_SKIP_SYS_UTILS']='yes' + (skip_sys_utils) (bool): if True, set env['MLC_SKIP_SYS_UTILS']='yes' to skip CM sys installation - (skip_sudo) (bool): if True, set env['CM_TMP_SKIP_SUDO']='yes' + (skip_sudo) (bool): if True, set env['MLC_TMP_SKIP_SUDO']='yes' to let scripts deal with that (silent) (bool): if True, attempt to suppress all info if supported - (sets CM_TMP_SILENT=yes) + (sets MLC_TMP_SILENT=yes) (s) (bool): the same as 'silent' ... @@ -239,11 +239,11 @@ def _run(self, i): if repro: repro_prefix = i.get('repro_prefix', '') if repro_prefix == '': - repro_prefix = 'cm-run-script' + repro_prefix = 'mlc-run-script' repro_dir = i.get('repro_dir', '') if repro_dir == '': - repro_dir = os.path.join(os.getcwd(), 'cm-repro') + repro_dir = os.path.join(os.getcwd(), 'mlc-repro') if not os.path.isdir(repro_dir): os.makedirs(repro_dir) @@ -277,9 +277,9 @@ def _run(self, i): start_time = time.time() - # Check extra input from environment variable CM_SCRIPT_EXTRA_CMD + # Check extra input from environment variable MLC_SCRIPT_EXTRA_CMD # Useful to set up default flags such as the name of virtual enviroment - extra_cli = os.environ.get('CM_SCRIPT_EXTRA_CMD', '').strip() + extra_cli = os.environ.get('MLC_SCRIPT_EXTRA_CMD', '').strip() if extra_cli != '': from cmind import cli r = cli.parse(extra_cli) @@ -369,22 +369,22 @@ def _run(self, i): 'prepare', False) if fake_run: - env['CM_TMP_FAKE_RUN'] = 'yes' + env['MLC_TMP_FAKE_RUN'] = 'yes' debug_uid = i.get('debug_uid', '') if debug_uid != '': - r = _update_env(env, 'CM_TMP_DEBUG_UID', debug_uid) + r = _update_env(env, 'MLC_TMP_DEBUG_UID', debug_uid) if r['return'] > 0: return r fake_deps = i.get('fake_deps', False) if fake_deps: - env['CM_TMP_FAKE_DEPS'] = 'yes' + env['MLC_TMP_FAKE_DEPS'] = 'yes' if str(i.get('skip_sys_utils', '')).lower() in ['true', 'yes']: - env['CM_SKIP_SYS_UTILS'] = 'yes' + env['MLC_SKIP_SYS_UTILS'] = 'yes' if str(i.get('skip_sudo', '')).lower() in ['true', 'yes']: - env['CM_TMP_SKIP_SUDO'] = 'yes' + env['MLC_TMP_SKIP_SUDO'] = 'yes' run_state = i.get('run_state', self.run_state) if not run_state.get('version_info', []): @@ -409,7 +409,7 @@ def _run(self, i): del (i['verbose']) if 'v' in i: del (i['v']) - env['CM_TMP_SILENT'] = 'yes' + env['MLC_TMP_SILENT'] = 'yes' run_state['tmp_silent'] = True if 'verbose' in i: @@ -418,7 +418,7 @@ def _run(self, i): verbose = i['v'] if verbose: - env['CM_VERBOSE'] = 'yes' + env['MLC_VERBOSE'] = 'yes' run_state['tmp_verbose'] = True logging.getLogger().setLevel(logging.DEBUG) @@ -445,7 +445,7 @@ def _run(self, i): # Detect current path and record in env for further use in native # scripts current_path = os.path.abspath(os.getcwd()) - r = _update_env(env, 'CM_TMP_CURRENT_PATH', current_path) + r = _update_env(env, 'MLC_TMP_CURRENT_PATH', current_path) if r['return'] > 0: return r @@ -454,15 +454,15 @@ def _run(self, i): 'quiet', False) if 'quiet' in i else ( env.get( - 'CM_QUIET', + 'MLC_QUIET', '').lower() == 'yes') if quiet: - env['CM_QUIET'] = 'yes' + env['MLC_QUIET'] = 'yes' skip_remembered_selections = i.get('skip_remembered_selections', False) if 'skip_remembered_selections' in i \ - else (env.get('CM_SKIP_REMEMBERED_SELECTIONS', '').lower() == 'yes') + else (env.get('MLC_SKIP_REMEMBERED_SELECTIONS', '').lower() == 'yes') if skip_remembered_selections: - env['CM_SKIP_REMEMBERED_SELECTIONS'] = 'yes' + env['MLC_SKIP_REMEMBERED_SELECTIONS'] = 'yes' # Prepare debug info parsed_script = i.get('parsed_artifact') @@ -484,7 +484,7 @@ def _run(self, i): # Bat extension for this host OS bat_ext = os_info['bat_ext'] - # Add permanent env from OS (such as CM_WINDOWS:"yes" on Windows) + # Add permanent env from OS (such as MLC_WINDOWS:"yes" on Windows) env_from_os_info = os_info.get('env', {}) if len(env_from_os_info) > 0: env.update(env_from_os_info) @@ -790,8 +790,8 @@ def _run(self, i): script_repo_path_with_prefix = os.path.join( script_repo_path, script_artifact.repo.meta['prefix']) - env['CM_TMP_CURRENT_SCRIPT_REPO_PATH'] = script_repo_path - env['CM_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX'] = script_repo_path_with_prefix + env['MLC_TMP_CURRENT_SCRIPT_REPO_PATH'] = script_repo_path + env['MLC_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX'] = script_repo_path_with_prefix # Check if has --help if i.get('help', False): @@ -936,7 +936,7 @@ def _run(self, i): explicit_variation_tags = r['explicit_variation_tags'] # USE CASE: - # HERE we may have versions in script input and env['CM_VERSION_*'] + # HERE we may have versions in script input and env['MLC_VERSION_*'] # STEP 900: Get version, min, max, usable from env (priority if passed from another script to force version), # then script input, then script meta @@ -951,14 +951,14 @@ def _run(self, i): # Second, take from env if version == '': - version = env.get('CM_VERSION', '') + version = env.get('MLC_VERSION', '') if version_min == '': - version_min = env.get('CM_VERSION_MIN', '') + version_min = env.get('MLC_VERSION_MIN', '') if version_max == '': - version_max = env.get('CM_VERSION_MAX', '') + version_max = env.get('MLC_VERSION_MAX', '') if version_max_usable == '': version_max_usable = env.get( - 'CM_VERSION_MAX_USABLE', '') + 'MLC_VERSION_MAX_USABLE', '') # Third, take from meta if version == '': @@ -973,10 +973,10 @@ def _run(self, i): # Update env with resolved versions notes = [] - for version_index in [(version, 'CM_VERSION', ' == {}'), - (version_min, 'CM_VERSION_MIN', ' >= {}'), - (version_max, 'CM_VERSION_MAX', ' <= {}'), - (version_max_usable, 'CM_VERSION_MAX_USABLE', '({})')]: + for version_index in [(version, 'MLC_VERSION', ' == {}'), + (version_min, 'MLC_VERSION_MIN', ' >= {}'), + (version_max, 'MLC_VERSION_MAX', ' <= {}'), + (version_max_usable, 'MLC_VERSION_MAX_USABLE', '({})')]: version_value = version_index[0] key = version_index[1] note = version_index[2] @@ -996,7 +996,7 @@ def _run(self, i): ' '.join(notes)) # STEP 900 output: version* set - # env['CM_VERSION*] set + # env['MLC_VERSION*] set # STEP 1000: Update version only if in "versions" (not obligatory) # can be useful when handling complex Git revisions @@ -1043,7 +1043,7 @@ def _run(self, i): if r['return'] > 0: return r - if str(env.get('CM_RUN_STATE_DOCKER', False) + if str(env.get('MLC_RUN_STATE_DOCKER', False) ).lower() in ['true', '1', 'yes']: if state.get('docker'): if str(state['docker'].get('run', True) @@ -1075,11 +1075,11 @@ def _run(self, i): recursion_spaces + ' - Doing fake run for script::{} as we are inside docker'.format(found_script_artifact)) fake_run = True - env['CM_TMP_FAKE_RUN'] = 'yes' + env['MLC_TMP_FAKE_RUN'] = 'yes' ####################################################################### # Check extra cache tags - x = env.get('CM_EXTRA_CACHE_TAGS', '').strip() + x = env.get('MLC_EXTRA_CACHE_TAGS', '').strip() extra_cache_tags = [] if x == '' else x.split(',') if i.get('extra_cache_tags', '') != '': @@ -1094,8 +1094,8 @@ def _run(self, i): if x not in extra_cache_tags: extra_cache_tags.append(x) - if env.get('CM_NAME', '') != '': - extra_cache_tags.append('name-' + env['CM_NAME'].strip().lower()) + if env.get('MLC_NAME', '') != '': + extra_cache_tags.append('name-' + env['MLC_NAME'].strip().lower()) ####################################################################### # Check if need to clean output files @@ -1445,7 +1445,7 @@ def _run(self, i): found_cached = False remove_tmp_tag = True - env['CM_RENEW_CACHE_ENTRY'] = 'yes' + env['MLC_RENEW_CACHE_ENTRY'] = 'yes' # Prepare files to be cleaned clean_files = [self.tmp_file_run_state, @@ -1501,7 +1501,7 @@ def _run(self, i): recursion_spaces + ' - Version is not specified - use either default_version from meta or min/max/usable: {}'.format(version)) - r = _update_env(env, 'CM_VERSION', version) + r = _update_env(env, 'MLC_VERSION', version) if r['return'] > 0: return r @@ -1531,7 +1531,7 @@ def _run(self, i): self._merge_dicts_with_tags( add_deps_recursive, versions_meta['add_deps_recursive']) - r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_PATH', path) + r = _update_env(env, 'MLC_TMP_CURRENT_SCRIPT_PATH', path) if r['return'] > 0: return r @@ -1730,9 +1730,9 @@ def _run(self, i): # Assemble PIP versions pip_version_string = '' - pip_version = env.get('CM_VERSION', '') - pip_version_min = env.get('CM_VERSION_MIN', '') - pip_version_max = env.get('CM_VERSION_MAX', '') + pip_version = env.get('MLC_VERSION', '') + pip_version_min = env.get('MLC_VERSION_MIN', '') + pip_version_max = env.get('MLC_VERSION_MAX', '') if pip_version != '': pip_version_string = '==' + pip_version @@ -1751,7 +1751,7 @@ def _run(self, i): r = _update_env( env, - 'CM_TMP_PIP_VERSION_STRING', + 'MLC_TMP_PIP_VERSION_STRING', pip_version_string) if r['return'] > 0: return r @@ -1763,13 +1763,13 @@ def _run(self, i): pip_version_string) tmp_curdir = os.getcwd() - if env.get('CM_OUTDIRNAME', '') != '': - if os.path.isabs(env['CM_OUTDIRNAME']) or recursion: - c_outdirname = env['CM_OUTDIRNAME'] + if env.get('MLC_OUTDIRNAME', '') != '': + if os.path.isabs(env['MLC_OUTDIRNAME']) or recursion: + c_outdirname = env['MLC_OUTDIRNAME'] else: c_outdirname = os.path.join( - env['CM_TMP_CURRENT_PATH'], env['CM_OUTDIRNAME']) - env['CM_OUTDIRNAME'] = c_outdirname + env['MLC_TMP_CURRENT_PATH'], env['MLC_OUTDIRNAME']) + env['MLC_OUTDIRNAME'] = c_outdirname if not os.path.exists(c_outdirname): os.makedirs(c_outdirname) @@ -1916,13 +1916,13 @@ def _run(self, i): if x not in cached_tags: cached_tags.append(x) - if env.get('CM_OUTDIRNAME', '') != '': + if env.get('MLC_OUTDIRNAME', '') != '': os.chdir(tmp_curdir) detected_version = env.get( - 'CM_DETECTED_VERSION', env.get( - 'CM_VERSION', '')) - dependent_cached_path = env.get('CM_GET_DEPENDENT_CACHED_PATH', '') + 'MLC_DETECTED_VERSION', env.get( + 'MLC_VERSION', '')) + dependent_cached_path = env.get('MLC_GET_DEPENDENT_CACHED_PATH', '') ####################################################################### # Finalize script @@ -2251,7 +2251,7 @@ def _update_env_from_input(self, env, i): for key in self.input_flags_converted_to_tmp_env: value = i.get(key, '').strip() if value != '': - env['CM_TMP_' + key.upper()] = value + env['MLC_TMP_' + key.upper()] = value for key in self.input_flags_converted_to_env: value = i.get( @@ -2264,7 +2264,7 @@ def _update_env_from_input(self, env, i): key, '') if value: - env[f"CM_{key.upper()}"] = value + env[f"MLC_{key.upper()}"] = value r = update_env_with_values(env) if r['return'] > 0: @@ -2276,7 +2276,7 @@ def _update_env_from_input(self, env, i): def _fix_cache_paths(self, env): ''' cm_repos_path = os.environ.get( - 'CM_REPOS', os.path.join( + 'MLC_REPOS', os.path.join( os.path.expanduser("~"), "CM", "repos")) current_cache_path = os.path.realpath( os.path.join(cm_repos_path, "local", "cache")) @@ -2324,7 +2324,7 @@ def _dump_version_info_for_script( if not quiet and not silent: pass - for f in ['cm-run-script-versions.json', 'version_info.json']: + for f in ['mlc-run-script-versions.json', 'version_info.json']: if not quiet and not silent: logging.info('Dumping versions to {}'.format(f)) r = utils.save_json(f, self.run_state.get('version_info', [])) @@ -3626,7 +3626,7 @@ def _run_deps(self, deps, clean_env_keys_deps, env, state, const, const_state, a if d.get("reuse_version", False): for k in tmp_env: - if k.startswith('CM_VERSION'): + if k.startswith('MLC_VERSION'): env[k] = tmp_env[k] update_tags_from_env = d.get("update_tags_from_env", []) @@ -4097,9 +4097,9 @@ def find_file_in_paths(self, i): run_script_input = i['run_script_input'] env_path_key = i['env_path_key'] - version = env.get('CM_VERSION', '') - version_min = env.get('CM_VERSION_MIN', '') - version_max = env.get('CM_VERSION_MAX', '') + version = env.get('MLC_VERSION', '') + version_min = env.get('MLC_VERSION_MIN', '') + version_max = env.get('MLC_VERSION_MAX', '') x = '' @@ -4230,9 +4230,9 @@ def detect_version_using_script(self, i): run_script_input = i['run_script_input'] - version = env.get('CM_VERSION', '') - version_min = env.get('CM_VERSION_MIN', '') - version_max = env.get('CM_VERSION_MAX', '') + version = env.get('MLC_VERSION', '') + version_min = env.get('MLC_VERSION_MIN', '') + version_max = env.get('MLC_VERSION_MAX', '') x = '' @@ -4339,10 +4339,10 @@ def find_artifact(self, i): # Check if forced to search in a specific path or multiple paths # separated by OS var separator (usually : or ;) - path = env.get('CM_TMP_PATH', '') + path = env.get('MLC_TMP_PATH', '') if path != '' and env.get( - 'CM_TMP_PATH_IGNORE_NON_EXISTANT', '') != 'yes': + 'MLC_TMP_PATH_IGNORE_NON_EXISTANT', '') != 'yes': # Can be a list of paths path_list_tmp = path.split(os_info['env_separator']) for path_tmp in path_list_tmp: @@ -4350,9 +4350,9 @@ def find_artifact(self, i): return {'return': 1, 'error': 'path {} doesn\'t exist'.format(path_tmp)} - # Check if forced path and file name from --input (CM_INPUT - local env + # Check if forced path and file name from --input (MLC_INPUT - local env # - will not be visible for higher-level script) - forced_file = env.get('CM_INPUT', '').strip() + forced_file = env.get('MLC_INPUT', '').strip() if forced_file != '': if not os.path.isfile(forced_file): return {'return': 1, @@ -4391,7 +4391,7 @@ def find_artifact(self, i): path_list.append(os.path.dirname(path_tmp)) # Check if quiet - select_default = True if env.get('CM_QUIET', '') == 'yes' else False + select_default = True if env.get('MLC_QUIET', '') == 'yes' else False # Prepare paths to search r = self.find_file_in_paths({'paths': path_list, @@ -4602,7 +4602,7 @@ def parse_version(self, i): which_env[env_key] = version # to be recorded in the cache meta - which_env['CM_DETECTED_VERSION'] = version + which_env['MLC_DETECTED_VERSION'] = version return {'return': 0, 'version': version, 'string': string} @@ -4754,7 +4754,7 @@ def clean_some_tmp_files(self, i): env = i.get('env', {}) - cur_work_dir = env.get('CM_TMP_CURRENT_SCRIPT_WORK_PATH', '') + cur_work_dir = env.get('MLC_TMP_CURRENT_SCRIPT_WORK_PATH', '') if cur_work_dir != '' and os.path.isdir(cur_work_dir): for x in ['tmp-run.bat', 'tmp-state.json']: xx = os.path.join(cur_work_dir, x) @@ -5147,12 +5147,12 @@ def update_env_with_values(env, fail_on_not_found=False, extra_env=None): # No placeholders found if not placeholders: - # Special handling for CM_GIT_URL - if key == 'CM_GIT_URL' and env.get('CM_GIT_AUTH', "no") == "yes": - if env.get('CM_GH_TOKEN', '') and '@' not in env['CM_GIT_URL']: - params = {"token": env['CM_GH_TOKEN']} + # Special handling for MLC_GIT_URL + if key == 'MLC_GIT_URL' and env.get('MLC_GIT_AUTH', "no") == "yes": + if env.get('MLC_GH_TOKEN', '') and '@' not in env['MLC_GIT_URL']: + params = {"token": env['MLC_GH_TOKEN']} value = get_git_url("token", value, params) - elif 'CM_GIT_SSH' in env: + elif 'MLC_GIT_SSH' in env: value = get_git_url("ssh", value) env[key] = value continue @@ -5313,11 +5313,11 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): cur_dir = os.getcwd() - r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_PATH', path) + r = _update_env(env, 'MLC_TMP_CURRENT_SCRIPT_PATH', path) if r['return'] > 0: return r - r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_WORK_PATH', cur_dir) + r = _update_env(env, 'MLC_TMP_CURRENT_SCRIPT_WORK_PATH', cur_dir) if r['return'] > 0: return r @@ -5586,9 +5586,9 @@ def get_script_name(env, path, script_name='run'): from os.path import exists - tmp_suff1 = env.get('CM_HOST_OS_FLAVOR', '') - tmp_suff2 = env.get('CM_HOST_OS_VERSION', '') - tmp_suff3 = env.get('CM_HOST_PLATFORM_FLAVOR', '') + tmp_suff1 = env.get('MLC_HOST_OS_FLAVOR', '') + tmp_suff2 = env.get('MLC_HOST_OS_VERSION', '') + tmp_suff3 = env.get('MLC_HOST_PLATFORM_FLAVOR', '') if exists(os.path.join(path, script_name + '-' + tmp_suff1 + '-' + tmp_suff2 + '-' + tmp_suff3 + '.sh')): @@ -5869,7 +5869,7 @@ def is_dep_tobe_skipped(d, env): Internal: check if this dependency is to be skipped """ if d.get('skip_if_fake_run', False) and env.get( - 'CM_TMP_FAKE_RUN', '') == 'yes': + 'MLC_TMP_FAKE_RUN', '') == 'yes': return True if "enable_if_env" in d: @@ -6311,7 +6311,7 @@ def dump_repro_start(repro_prefix, ii): import json # Clean reproducibility and experiment files - for f in ['cm-output.json', 'version_info.json', '-input.json', + for f in ['mlc-output.json', 'version_info.json', '-input.json', '-info.json', '-output.json', '-run-state.json']: ff = repro_prefix + f if f.startswith('-') else f if os.path.isfile(ff): @@ -6363,7 +6363,7 @@ def dump_repro_start(repro_prefix, ii): cm_output['input'] = ii try: - with open('cm-output.json', 'w', encoding='utf-8') as f: + with open('mlc-output.json', 'w', encoding='utf-8') as f: json.dump(cm_output, f, ensure_ascii=False, indent=2) except BaseException: pass @@ -6394,7 +6394,7 @@ def dump_repro(repro_prefix, rr, run_state): # Attempt to read try: - r = utils.load_json('cm-output.json') + r = utils.load_json('mlc-output.json') if r['return'] == 0: cm_output = r['meta'] except BaseException: @@ -6434,7 +6434,7 @@ def dump_repro(repro_prefix, rr, run_state): cm_output['acm_ctuning_repro_badge_functional'] = True try: - with open('cm-output.json', 'w', encoding='utf-8') as f: + with open('mlc-output.json', 'w', encoding='utf-8') as f: json.dump( cm_output, f, diff --git a/automation/script/module_misc.py b/automation/script/module_misc.py deleted file mode 100644 index 336073969..000000000 --- a/automation/script/module_misc.py +++ /dev/null @@ -1,2522 +0,0 @@ -import os -from cmind import utils - -# Meta deps - - -def process_deps(self_module, meta, meta_url, md_script_readme, - key, extra_space='', skip_from_meta=False, skip_if_empty=False): - - x = '' - y = [] - if len(meta.get(key, {})) > 0: - x = '***' - - for d in meta[key]: - d_tags = d.get('tags', '') - - z = extra_space + ' * ' + d_tags - y.append(z) - - names = d.get('names', []) - - for kk in [ - ('enable_if_env', 'Enable this dependency only if all ENV vars are set'), - ('enable_if_any_env', - 'Enable this dependency only if any of ENV vars are set'), - ('skip_if_env', - 'Skip this dependenecy only if all ENV vars are set'), - ('skip_if_any_env', - 'Skip this dependenecy only if any of ENV vars are set') - ]: - - k1 = kk[0] - k2 = kk[1] - - conditions = d.get(k1, {}) - if len(conditions) > 0: - y.append(extra_space + - ' * {}:
\n`{}`'.format(k2, str(conditions))) - - if len(names) > 0: - y.append( - extra_space + - ' * CM names: `--adr.' + - str(names) + - '...`') - - # Attempt to find related CM scripts - r = self_module.cmind.access({'action': 'find', - 'automation': 'script', - 'tags': d_tags}) - if r['return'] == 0: - lst = r['list'] - - if len(lst) == 0: - y.append(extra_space + - ' - *Warning: no scripts found*') - else: - for s in lst: - s_repo_meta = s.repo_meta - - s_repo_alias = s_repo_meta.get('alias', '') - s_repo_uid = s_repo_meta.get('uid', '') - - # Check URL - s_url = '' - s_url_repo = '' - if s_repo_alias == 'internal': - s_url_repo = 'https://github.com/mlcommons/ck/tree/master/cm/cmind/repo' - s_url = s_url_repo + '/script/' - elif '@' in s_repo_alias: - s_url_repo = 'https://github.com/' + \ - s_repo_alias.replace('@', '/') + '/tree/master' - if s_repo_meta.get('prefix', '') != '': - s_url_repo += '/' + s_repo_meta['prefix'] - s_url = s_url_repo + '/script/' - - s_alias = s.meta['alias'] - y.append( - extra_space + ' - CM script: [{}]({})'.format(s_alias, s_url + s_alias)) - - z = '' - if not skip_from_meta: - z = ' from [meta]({})'.format(meta_url) - - if not skip_if_empty or len(y) > 0: - md_script_readme.append( - (extra_space + - ' 1. ' + - x + - 'Read "{}" on other CM scripts' + - z + - x).format(key)) - md_script_readme += y - -############################################################ - - -def doc(i): - """ - Add CM automation. - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - parsed_artifact (list): prepared in CM CLI or CM access function - [ (artifact alias, artifact UID) ] or - [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] - - (repos) (str): list of repositories to search for automations - - (output_dir) (str): output directory (../docs by default) - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - """ - - self_module = i['self_module'] - - cur_dir = os.getcwd() - - template_file = 'template_list_of_scripts.md' - list_file = 'list_of_scripts.md' - - public_taskforce = '[Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)' - - console = i.get('out') == 'con' - - repos = i.get('repos', '') - if repos == '': - repos = 'internal,a4705959af8e447a' - - parsed_artifact = i.get('parsed_artifact', []) - - if len(parsed_artifact) < 1: - parsed_artifact = [('', ''), ('', '')] - elif len(parsed_artifact) < 2: - parsed_artifact.append(('', '')) - else: - repos = parsed_artifact[1][0] - - list_of_repos = repos.split(',') if ',' in repos else [repos] - - ii = utils.sub_input(i, self_module.cmind.cfg['artifact_keys'] + ['tags']) - - ii['out'] = None - - # Search for automations in repos - lst = [] - - for repo in list_of_repos: - parsed_artifact[1] = ( - '', repo) if utils.is_cm_uid(repo) else ( - repo, '') - ii['parsed_artifact'] = parsed_artifact - r = self_module.search(ii) - if r['return'] > 0: - return r - lst += r['list'] - - md = [] - - toc = [] - - toc_category = {} - toc_category_sort = {} - script_meta = {} - urls = {} - - for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')): - - toc_readme = [] - - # Common index for all scripts - md_script = [] - - path = artifact.path - meta = artifact.meta - original_meta = artifact.original_meta - - print('Documenting {}'.format(path)) - - alias = meta.get('alias', '') - uid = meta.get('uid', '') - - script_meta[alias] = meta - - name = meta.get('name', '') - developers = meta.get('developers', '') - - # Check if has tags help otherwise all tags - tags = meta.get('tags_help', '').strip() - if tags == '': - tags = meta.get('tags', []) - else: - tags = tags.split(' ') - - variations = meta.get('variations', {}) - - variation_keys = sorted(list(variations.keys())) - version_keys = sorted(list(meta.get('versions', {}).keys())) - - default_variation = meta.get('default_variation', '') - default_version = meta.get('default_version', '') - - input_mapping = meta.get('input_mapping', {}) - input_description = meta.get('input_description', {}) - - category = meta.get('category', '').strip() - category_sort = meta.get('category_sort', 0) - if category != '': - if category not in toc_category: - toc_category[category] = [] - - if category not in toc_category_sort or category_sort > 0: - toc_category_sort[category] = category_sort - - if alias not in toc_category[category]: - toc_category[category].append(alias) - - repo_path = artifact.repo_path - repo_meta = artifact.repo_meta - - repo_alias = repo_meta.get('alias', '') - repo_uid = repo_meta.get('uid', '') - - # Check URL - url = '' - url_repo = '' - if repo_alias == 'internal': - url_repo = 'https://github.com/mlcommons/ck/tree/dev/cm/cmind/repo' - url = url_repo + '/script/' - elif '@' in repo_alias: - url_repo = 'https://github.com/' + \ - repo_alias.replace('@', '/') + '/tree/dev' - if repo_meta.get('prefix', '') != '': - url_repo += '/' + repo_meta['prefix'] - url = url_repo + '/script/' - - if url != '': - url += alias - - urls[alias] = url - - # Check if there is about doc - path_readme = os.path.join(path, 'README.md') - path_readme_extra = os.path.join(path, 'README-extra.md') - path_readme_about = os.path.join(path, 'README-about.md') - - readme_about = '' - if os.path.isfile(path_readme_about): - r = utils.load_txt(path_readme_about, split=True) - if r['return'] > 0: - return - - s = r['string'] - readme_about = r['list'] - - ####################################################################### - # Start automatically generated README - md_script_readme = [ - # '
', - # 'Click here to see the table of contents.', - # '{{CM_README_TOC}}', - # '
', - # '', - 'Automatically generated README for this automation recipe: **{}**'.format( - meta['alias']), - ] - - md_script.append('## ' + alias) - md_script.append('') - -# x = 'About' -# md_script_readme.append('___') -# md_script_readme.append('### '+x) -# md_script_readme.append('') -# toc_readme.append(x) - -# x = 'About' -# md_script_readme.append('#### '+x) -# md_script_readme.append('') -# toc_readme.append(' '+x) - - if name != '': - name += '.' - md_script.append('*' + name + '*') - md_script.append('') - -# md_script_readme.append('*'+name+'*') -# md_script_readme.append('') - - if os.path.isfile(path_readme): - r = utils.load_txt(path_readme, split=True) - if r['return'] > 0: - return - - s = r['string'] - readme = r['list'] - - if not 'automatically generated' in s.lower(): - found_path_readme_extra = True - - # Attempt to rename to README-extra.md - if os.path.isfile(path_readme_extra): - return { - 'return': 1, 'error': 'README.md is not auto-generated and README-extra.md already exists - can\'t rename'} - - os.rename(path_readme, path_readme_extra) - - # Add to Git (if in git) - os.chdir(path) - os.system('git add README-extra.md') - os.chdir(cur_dir) - - if category != '': - md_script_readme.append('') - md_script_readme.append('Category: **{}**'.format(category)) - - md_script_readme.append('') - md_script_readme.append('License: **Apache 2.0**') - - md_script_readme.append('') - - if developers == '': - md_script_readme.append('Maintainers: ' + public_taskforce) - else: - md_script_readme.append('Developers: ' + developers) - - x = '* [{}]({})'.format(alias, url) - if name != '': - x += ' *(' + name + ')*' - toc.append(x) - - cm_readme_extra = '[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name={},{}) ] '.format( - alias, uid) - - if os.path.isfile(path_readme_extra): - readme_extra_url = url + '/README-extra.md' - - x = '* Notes from the authors, contributors and users: [*GitHub*]({})'.format( - readme_extra_url) - md_script.append(x) - - cm_readme_extra += '[ [Notes from the authors, contributors and users](README-extra.md) ] ' - - md_script_readme.append('') - md_script_readme.append('---') - md_script_readme.append('*' + cm_readme_extra.strip() + '*') - - if readme_about != '': - md_script_readme += ['', '---', ''] + readme_about - - x = 'Summary' - md_script_readme.append('') - md_script_readme.append('---') - md_script_readme += [ - # '
', - # 'Click to see the summary', - '#### Summary', - '' - ] - toc_readme.append(x) - - -# if category != '': -# x = 'Category' -# md_script_readme.append('___') -# md_script_readme.append('#### '+x) -# md_script_readme.append(' ') -# md_script_readme.append(category+'.') -# toc_readme.append(x) - -# x = '* Category: *{}*'.format(category + '.') -# md_script_readme.append(x) - - -# x = 'Origin' -# md_script_readme.append('___') -# md_script_readme.append('#### '+x) -# md_script_readme.append('') -# toc_readme.append(x) - - x = '* CM GitHub repository: *[{}]({})*'.format(repo_alias, url_repo) - md_script.append(x) - md_script_readme.append(x) - - x = '* GitHub directory for this script: *[GitHub]({})*'.format(url) - md_script.append(x) - md_script_readme.append(x) - - # Check meta - meta_file = self_module.cmind.cfg['file_cmeta'] - meta_path = os.path.join(path, meta_file) - - meta_file += '.yaml' if os.path.isfile( - meta_path + '.yaml') else '.json' - - meta_url = url + '/' + meta_file - - x = '* CM meta description of this script: *[GitHub]({})*'.format( - meta_url) - md_script.append(x) - -# x = '* CM automation "script": *[Docs]({})*'.format('https://github.com/octoml/ck/blob/master/docs/list_of_automations.md#script') -# md_script.append(x) -# md_script_readme.append(x) - - if len(variation_keys) > 0: - variation_pointer = "[,variations]" - variation_pointer2 = "[variations]" - else: - variation_pointer = '' - variation_pointer2 = '' - - if len(input_mapping) > 0: - input_mapping_pointer = "[--input_flags]" - else: - input_mapping_pointer = '' - - cli_all_tags = '`cm run script --tags={}`'.format(','.join(tags)) - cli_all_tags3 = '`cm run script --tags={}{} {}`'.format( - ','.join(tags), variation_pointer, input_mapping_pointer) - x = '* CM CLI with all tags: {}*'.format(cli_all_tags) - md_script.append(x) - - cli_help_tags_alternative = '`cmr "{}" --help`'.format(' '.join(tags)) - - cli_all_tags_alternative = '`cmr "{}"`'.format(' '.join(tags)) - cli_all_tags_alternative3 = '`cmr "{} {}" {}`'.format( - ' '.join(tags), variation_pointer2, input_mapping_pointer) - cli_all_tags_alternative_j = '`cmr "{} {}" {} -j`'.format( - ' '.join(tags), variation_pointer, input_mapping_pointer) - x = '* CM CLI alternative: {}*'.format(cli_all_tags_alternative) - md_script.append(x) - - cli_all_tags_alternative_docker = '`cm docker script "{}{}" {}`'.format( - ' '.join(tags), variation_pointer2, input_mapping_pointer) - - -# cli_uid = '`cm run script {} {}`'.format(meta['uid'], input_mapping_pointer) -# x = '* CM CLI with alias and UID: {}*'.format(cli_uid) -# md_script.append(x) - - if len(variation_keys) > 0: - x = '' - for variation in variation_keys: - if x != '': - x += ';  ' - x += '_' + variation - md_script.append('* Variations: *{}*'.format(x)) - - if default_variation != '': - md_script.append( - '* Default variation: *{}*'.format(default_variation)) - - if len(version_keys) > 0: - md_script.append( - '* Versions: *{}*'.format(';  '.join(version_keys))) - - if default_version != '': - md_script.append('* Default version: *{}*'.format(default_version)) - - md_script.append('') -# md_script_readme.append('') - - # Add extra to README - x = 'Meta description' -# md_script_readme.append('___') -# md_script_readme.append('### '+x) - md_script_readme.append( - '* CM meta description for this script: *[{}]({})*'.format(meta_file, meta_file)) -# md_script_readme.append('') -# toc_readme.append(x) - - x = 'Tags' -# md_script_readme.append('___') -# md_script_readme.append('### '+x) - md_script_readme.append( - '* All CM tags to find and reuse this script (see in above meta description): *{}*'.format(','.join(tags))) -# md_script_readme.append('') -# toc_readme.append(x) - - cache = meta.get('cache', False) - md_script_readme.append('* Output cached? *{}*'.format(str(cache))) - - md_script_readme.append( - '* See [pipeline of dependencies]({}) on other CM scripts'.format('#dependencies-on-other-cm-scripts')) - - md_script_readme += ['', - # '
' - ] - - # Add usage - x1 = 'Reuse this script in your project' - x1a = 'Install MLCommons CM automation meta-framework' - x1aa = 'Pull CM repository with this automation recipe (CM script)' - x1b = 'Print CM help from the command line' - x2 = 'Customize and run this script from the command line with different variations and flags' - x3 = 'Run this script from Python' - x3a = 'Run this script via GUI' - x4 = 'Run this script via Docker (beta)' - md_script_readme += [ - '', - '---', - '### ' + x1, - '', - '#### ' + x1a, - '', - '* [Install CM](https://access.cknowledge.org/playground/?action=install)', - '* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md)', - '', - '#### ' + x1aa, - '', - '```cm pull repo {}```'.format(repo_alias), - '', - '#### ' + x1b, - '', - '```{}```'.format(cli_help_tags_alternative), - '', - '#### ' + x2, - '', - '{}'.format(cli_all_tags), - '', - '{}'.format(cli_all_tags3), - '', - '*or*', - '', - '{}'.format(cli_all_tags_alternative), - '', - '{}'.format(cli_all_tags_alternative3), - '', - # '3. {}'.format(cli_uid), - ''] - - x = ' and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.' - if len(variation_keys) > 0: - md_script_readme += ['* *See the list of `variations` [here](#variations)' + x + '*', - '' - ] - - if input_description and len(input_description) > 0: - x = 'Input Flags' - md_script_readme.append('') - md_script_readme.append('#### ' + x) - toc_readme.append(' ' + x) - - md_script_readme.append('') - key0 = '' - for key in input_description: - if key0 == '': - key0 = key - - value = input_description[key] - desc = value - - if isinstance(value, dict): - desc = value['desc'] - - choices = value.get('choices', []) - if len(choices) > 0: - desc += ' {' + ','.join(choices) + '}' - - default = value.get('default', '') - if default != '': - desc += ' (*' + str(default) + '*)' - - md_script_readme.append('* --**{}**={}'.format(key, desc)) - - md_script_readme.append('') - md_script_readme.append( - '**Above CLI flags can be used in the Python CM API as follows:**') - md_script_readme.append('') - - x = '```python\nr=cm.access({... , "' + key0 + '":...}\n```' - md_script_readme.append(x) - - md_script_readme += ['#### ' + x3, - '', - '
', - 'Click here to expand this section.', - '', - '```python', - '', - 'import cmind', - '', - "r = cmind.access({'action':'run'", - " 'automation':'script',", - " 'tags':'{}'".format( - ','.join(tags)), - " 'out':'con',", - " ...", - " (other input keys for this script)", - " ...", - " })", - "", - "if r['return']>0:", - " print (r['error'])", - '', - '```', - '', - '
', - '', - - '', - '#### ' + x3a, - '', - '```cmr "cm gui" --script="' + - ','.join(tags) + '"```', - '', - # 'Use this [online GUI](https://cKnowledge.org/cm-gui/?tags={}) to generate CM CMD.'.format(','.join(tags)), - # '', - '#### ' + x4, - '', - '{}'.format(cli_all_tags_alternative_docker), - '' - ] - toc_readme.append(x1) - toc_readme.append(' ' + x1a) - toc_readme.append(' ' + x1b) - toc_readme.append(' ' + x2) - toc_readme.append(' ' + x3) - toc_readme.append(' ' + x3a) - toc_readme.append(' ' + x4) - - x = 'Customization' - md_script_readme.append('___') - md_script_readme.append('### ' + x) - md_script_readme.append('') - toc_readme.append(x) - - if len(variation_keys) > 0: - # x = 'Variation groups' - # md_script_readme.append('___') - # md_script_readme.append('### '+x) - # toc_readme.append(x) - - variation_groups = {} - default_variations = [] - variation_md = {} - variation_alias = {} - - # Normally should not use anymore. Should use default:true inside - # individual variations. - default_variation = meta.get('default_variation', '') - - for variation_key in sorted(variation_keys): - variation = variations[variation_key] - - alias = variation.get('alias', '').strip() - - if alias != '': - aliases = variation_alias.get(alias, []) - if variation_key not in aliases: - aliases.append(variation_key) - variation_alias[alias] = aliases - - # Do not continue this loop if alias - continue - - default = variation.get('default', False) - - if not default: - # Check outdated - if default_variation == variation_key: - default = True - - extra1 = '' - extra2 = '' - if default: - extra1 = '**' - extra2 = '** (default)' - - default_variations.append(variation_key) - - md_var = [] - - md_var.append( - '* {}`_{}`{}'.format(extra1, variation_key, extra2)) - - variation_md[variation_key] = md_var - -# md_script_readme+=md_var - - group = variation.get('group', '') - - if variation_key.endswith('_'): - group = '*Internal group (variations should not be selected manually)*' - elif group == '': - group = '*No group (any variation can be selected)*' - - if group not in variation_groups: - variation_groups[group] = [] - - variation_groups[group].append(variation_key) - - x = 'Variations' - md_script_readme.append('') - md_script_readme.append('#### ' + x) - toc_readme.append(' ' + x) - - variation_groups_order = meta.get('variation_groups_order', []) - for variation in sorted(variation_groups): - if variation not in variation_groups_order: - variation_groups_order.append(variation) - - for group_key in variation_groups_order: - md_script_readme.append('') - - if not group_key.startswith('*'): - md_script_readme.append( - ' * Group "**{}**"'.format(group_key)) - else: - md_script_readme.append(' * {}'.format(group_key)) - - md_script_readme += [ - '
', - ' Click here to expand this section.', - '' - ] - - for variation_key in sorted(variation_groups[group_key]): - variation = variations[variation_key] - - xmd = variation_md[variation_key] - - aliases = variation_alias.get(variation_key, []) - aliases2 = ['_' + v for v in aliases] - - if len(aliases) > 0: - xmd.append( - ' - Aliases: `{}`'.format(','.join(aliases2))) - - if len(variation.get('env', {})) > 0: - xmd.append(' - Environment variables:') - for key in variation['env']: - xmd.append( - ' - *{}*: `{}`'.format(key, variation['env'][key])) - - xmd.append(' - Workflow:') - - for dep in ['deps', 'prehook_deps', - 'posthook_deps', 'post_deps']: - process_deps( - self_module, - variation, - meta_url, - xmd, - dep, - ' ', - True, - True) - - for x in xmd: - md_script_readme.append(' ' + x) - - md_script_readme.append('') - md_script_readme.append('
') - md_script_readme.append('') - - # Check if has invalid_variation_combinations - vvc = meta.get('invalid_variation_combinations', []) - if len(vvc) > 0: - x = 'Unsupported or invalid variation combinations' - md_script_readme.append('') - md_script_readme.append('#### ' + x) - md_script_readme.append('') - md_script_readme.append('') - md_script_readme.append('') - toc_readme.append(' ' + x) - - for v in vvc: - vv = ['_' + x for x in v] - md_script_readme.append('* `' + ','.join(vv) + '`') - - if len(default_variations) > 0: - md_script_readme.append('') - md_script_readme.append('#### Default variations') - md_script_readme.append('') - - dv = ['_' + x for x in sorted(default_variations)] - - md_script_readme.append('`{}`'.format(','.join(dv))) - - # Check if has valid_variation_combinations - vvc = meta.get('valid_variation_combinations', []) - if len(vvc) > 0: - x = 'Valid variation combinations checked by the community' - md_script_readme.append('') - md_script_readme.append('#### ' + x) - md_script_readme.append('') - md_script_readme.append('') - md_script_readme.append('') - toc_readme.append(' ' + x) - - for v in vvc: - vv = ['_' + x for x in v] - md_script_readme.append('* `' + ','.join(vv) + '`') - - # Check input flags - if input_mapping and len(input_mapping) > 0: - x = 'Script flags mapped to environment' - md_script_readme.append('') - md_script_readme.append('#### ' + x) - toc_readme.append(' ' + x) - - md_script_readme.append('
') - md_script_readme.append( - 'Click here to expand this section.') - - md_script_readme.append('') - key0 = '' - for key in sorted(input_mapping): - if key0 == '': - key0 = key - value = input_mapping[key] - md_script_readme.append( - '* `--{}=value` → `{}=value`'.format(key, value)) - - md_script_readme.append('') - md_script_readme.append( - '**Above CLI flags can be used in the Python CM API as follows:**') - md_script_readme.append('') - - x = '```python\nr=cm.access({... , "' + key0 + '":...}\n```' - md_script_readme.append(x) - - md_script_readme.append('') - md_script_readme.append('
') - md_script_readme.append('') - - # Default environment - default_env = meta.get('default_env', {}) - - x = 'Default environment' -# md_script_readme.append('___') - md_script_readme.append('#### ' + x) - toc_readme.append(' ' + x) - - md_script_readme.append('') - md_script_readme.append('
') - md_script_readme.append( - 'Click here to expand this section.') - md_script_readme.append('') - md_script_readme.append( - 'These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.') - md_script_readme.append('') - - for key in default_env: - value = default_env[key] - md_script_readme.append('* {}: `{}`'.format(key, value)) - - md_script_readme.append('') - md_script_readme.append('
') - md_script_readme.append('') - - if len(version_keys) > 0 or default_version != '': - x = 'Versions' -# md_script_readme.append('___') - md_script_readme.append('#### ' + x) - toc_readme.append(x) - - if default_version != '': - md_script_readme.append( - 'Default version: `{}`'.format(default_version)) - md_script_readme.append('') - - if len(version_keys) > 0: - for version in version_keys: - md_script_readme.append('* `{}`'.format(version)) - - # Add workflow - x = 'Dependencies on other CM scripts' - md_script_readme += ['___', - '### ' + x, - ''] - toc_readme.append(x) - -# md_script_readme.append('
') -# md_script_readme.append('Click here to expand this section.') - - md_script_readme.append('') - - # Check customize.py file - path_customize = os.path.join(path, 'customize.py') - found_customize = False - found_customize_preprocess = False - found_customize_postprocess = False - found_output_env = [] - - if os.path.isfile(path_customize): - found_customize = True - - r = utils.load_txt(path_customize, split=True) - if r['return'] > 0: - return r - - customize = r['string'] - customize_l = r['list'] - - if 'def preprocess(' in customize: - found_customize_preprocess = True - - if 'def postprocess(' in customize: - found_customize_postprocess = True - - # Ugly attempt to get output env - found_postprocess = False - for l in customize_l: - # if not found_postprocess: - # if 'def postprocess' in l: - # found_postprocess = True - # else: - j = l.find(' env[') - if j >= 0: - j1 = l.find(']', j + 4) - if j1 >= 0: - j2 = l.find('=', j1 + 1) - if j2 >= 0: - key2 = l[j + 5:j1].strip() - key = key2[1:-1] - - if key.startswith( - 'CM_') and 'TMP' not in key and key not in found_output_env: - found_output_env.append(key) - - process_deps(self_module, meta, meta_url, md_script_readme, 'deps') - - x = '' - y = 'customize.py' - if found_customize_preprocess: - x = '***' - y = '[' + y + '](' + url + '/' + y + ')' - md_script_readme.append( - (' 1. ' + x + 'Run "preprocess" function from {}' + x).format(y)) - - process_deps( - self_module, - meta, - meta_url, - md_script_readme, - 'prehook_deps') - - # Check scripts - files = os.listdir(path) - x = '' - y = [] - for f in sorted(files): - x = '***' - if f.startswith('run') and ( - f.endswith('.sh') or f.endswith('.bat')): - f_url = url + '/' + f - y.append(' * [{}]({})'.format(f, f_url)) - - md_script_readme.append( - (' 1. ' + x + 'Run native script if exists' + x).format(y)) - md_script_readme += y - - process_deps( - self_module, - meta, - meta_url, - md_script_readme, - 'posthook_deps') - - x = '' - y = 'customize.py' - if found_customize_postprocess: - x = '***' - y = '[' + y + '](' + url + '/' + y + ')' - md_script_readme.append( - (' 1. ' + x + 'Run "postrocess" function from {}' + x).format(y)) - - process_deps( - self_module, - meta, - meta_url, - md_script_readme, - 'post_deps') - # md_script_readme.append('
') - md_script_readme.append('') - - # New environment - new_env_keys = meta.get('new_env_keys', []) - - x = 'Script output' - md_script_readme.append('___') - md_script_readme.append('### ' + x) - toc_readme.append(x) - - md_script_readme.append(cli_all_tags_alternative_j) - - x = 'New environment keys (filter)' - md_script_readme.append('#### ' + x) - toc_readme.append(x) - - md_script_readme.append('') - for key in sorted(new_env_keys): - md_script_readme.append('* `{}`'.format(key)) - - # Pass found_output_env through above filter - found_output_env_filtered = [] - - import fnmatch - - for key in found_output_env: - add = False - - for f in new_env_keys: - if fnmatch.fnmatch(key, f): - add = True - break - - if add: - found_output_env_filtered.append(key) - - x = 'New environment keys auto-detected from customize' - md_script_readme.append('#### ' + x) - toc_readme.append(x) - - md_script_readme.append('') - for key in sorted(found_output_env_filtered): - md_script_readme.append('* `{}`'.format(key)) - - # Add maintainers -# x = 'Maintainers' -# md_script_readme.append('___') -# md_script_readme.append('### '+x) -# md_script_readme.append('') -# md_script_readme.append('* ' + public_taskforce) -# toc_readme.append(x) - - # Process TOC - toc_readme_string = '\n' - for x in toc_readme: - x2 = x - prefix = '' - - if x.startswith(' '): - prefix = ' ' - x2 = x[1:] - - x2 = x2.lower().replace(' ', '-').replace(',', '') - toc_readme_string += prefix + '* [{}](#{})\n'.format(x, x2) - - # Add to the total list - md += md_script - - s = '\n'.join(md_script_readme) - - s = s.replace('{{CM_README_EXTRA}}', cm_readme_extra) -# s = s.replace('{{CM_SEE_README_EXTRA}}', cm_see_readme_extra) - s = s.replace('{{CM_README_TOC}}', toc_readme_string) - - r = utils.save_txt(path_readme, s) - if r['return'] > 0: - return r - - # Add to Git (if in git) - os.chdir(path) - os.system('git add README.md') - os.chdir(cur_dir) - - # Recreate TOC with categories - toc2 = [] - - # , key = lambda x: -toc_category_sort[x]): - for category in sorted(toc_category): - toc2.append('### ' + category) - toc2.append('') - - for script in sorted(toc_category[category]): - - meta = script_meta[script] - - name = meta.get('name', '') - - url = urls[script] - - x = '* [{}]({})'.format(script, url) - if name != '': - x += ' *(' + name + ')*' - - toc2.append(x) - - toc2.append('') - - toc_category_string = '' - for category in sorted(toc_category): - category_link = category.lower().replace(' ', '-').replace('/', '') - toc_category_string += '* [{}](#{})\n'.format(category, category_link) - - # Load template - r = utils.load_txt(os.path.join(self_module.path, template_file)) - if r['return'] > 0: - return r - - s = r['string'] - - s = s.replace('{{CM_TOC2}}', '\n'.join(toc2)) - s = s.replace('{{CM_TOC}}', '\n'.join(toc)) -# s = s.replace('{{CM_MAIN}}', '\n'.join(md)) - s = s.replace('{{CM_MAIN}}', '') - s = s.replace('{{CM_TOC_CATEGORIES}}', toc_category_string) - - # Output - output_dir = i.get('output_dir', '') - - if output_dir == '': - output_dir = '..' - - output_file = os.path.join(output_dir, list_file) - - r = utils.save_txt(output_file, s) - if r['return'] > 0: - return r - - out_docs_file = os.path.join( - "..", - "docs", - "scripts", - category, - alias, - "index.md") - r = utils.save_txt(out_docs_file, s) - if r['return'] > 0: - return r - - return {'return': 0} - - -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# This function takes in a host path and returns the absolute path on host and the container -# If mounts is passed, the function appends the host path and the -# container path to mounts in the form "host_path:container_path" -def update_path_for_docker(path, mounts=None, force_path_target=''): - - path_orig = '' - path_target = '' - - if path != '': # and (os.path.isfile(path) or os.path.isdir(path)): - path = os.path.abspath(path) - - path_target = path - path_orig = path - - if os.name == 'nt': - from pathlib import PureWindowsPath, PurePosixPath - - x = PureWindowsPath(path_orig) - path_target = str(PurePosixPath('/', *x.parts[1:])) - - if not path_target.startswith('/'): - path_target = '/' + path_target - - path_target = '/cm-mount' + \ - path_target if force_path_target == '' else force_path_target - - # If file, mount directory - if os.path.isfile(path) or not os.path.isdir(path): - x = os.path.dirname(path_orig) + ':' + os.path.dirname(path_target) - else: - x = path_orig + ':' + path_target - - # CHeck if no duplicates - if mounts is not None: - to_add = True - for y in mounts: - if y.lower() == x.lower(): - to_add = False - break - if to_add: - mounts.append(x) - - return (path_orig, path_target) - -############################################################ - - -def process_inputs(i): - - import copy - - i_run_cmd_arc = i['run_cmd_arc'] - docker_settings = i['docker_settings'] - mounts = i['mounts'] - - # Check if need to update/map/mount inputs and env - i_run_cmd = copy.deepcopy(i_run_cmd_arc) - - def get_value_using_key_with_dots(d, k): - v = None - j = k.find('.') - if j >= 0: - k1 = k[:j] - k2 = k[j + 1:] - - if k1 in d: - v = d[k1] - - if '.' in k2: - v, d, k = get_value_using_key_with_dots(v, k2) - else: - d = v - k = k2 - if isinstance(v, dict): - v = v.get(k2) - else: - v = None - else: - if k == '': - v = d - else: - v = d.get(k) - - return v, d, k - - docker_input_paths = docker_settings.get('input_paths', []) - if len(i_run_cmd) > 0: - for k in docker_input_paths: - v2, i_run_cmd2, k2 = get_value_using_key_with_dots(i_run_cmd, k) - - if v2 is not None: - v = i_run_cmd2[k2] - - path_orig, path_target = update_path_for_docker(v, mounts) - - if path_target != '': - i_run_cmd2[k2] = path_target - - return {'return': 0, 'run_cmd': i_run_cmd} - - -############################################################ -def regenerate_script_cmd(i): - - script_uid = i['script_uid'] - script_alias = i['script_alias'] - tags = i['tags'] - docker_settings = i['docker_settings'] - fake_run = i.get('fake_run', False) - - i_run_cmd = i['run_cmd'] - - # Cleanup from env everything that has a host path value - if i_run_cmd.get('env'): - for key in list(i_run_cmd.get('env')): - if isinstance(i_run_cmd['env'][key], str) and ((os.path.join("local", "cache", "") in i_run_cmd['env'][key]) or ( - os.path.join("CM", "repos", "") in i_run_cmd['env'][key])): - del (i_run_cmd['env'][key]) - elif isinstance(i_run_cmd['env'][key], list): - values_to_remove = [] - for val in i_run_cmd['env'][key]: - if isinstance(val, str) and ((os.path.join("local", "cache", "") in val) or ( - os.path.join("CM", "repos", "") in val)): - values_to_remove.append(val) - if values_to_remove == i_run_cmd['env'][key]: - del (i_run_cmd['env'][key]) - else: - for val in values_to_remove: - i_run_cmd['env'][key].remove(val) - - docker_run_cmd_prefix = i['docker_run_cmd_prefix'] - - # Regenerate command from dictionary input - run_cmd = 'cm run script' - - x = '' - - # Check if there are some tags without variation - requested_tags = i_run_cmd.get('tags', []) - - tags_without_variation = False - for t in requested_tags: - if not t.startswith('_'): - tags_without_variation = True - break - - if not tags_without_variation: - # If no tags without variation, add script alias and UID explicitly - if script_uid != '': - x = script_uid - if script_alias != '': - if x != '': - x = ',' + x - x = script_alias + x - - if x != '': - run_cmd += ' ' + x + ' ' - - skip_input_for_fake_run = docker_settings.get( - 'skip_input_for_fake_run', []) - add_quotes_to_keys = docker_settings.get('add_quotes_to_keys', []) - - def rebuild_flags(i_run_cmd, fake_run, - skip_input_for_fake_run, add_quotes_to_keys, key_prefix): - - run_cmd = '' - - keys = list(i_run_cmd.keys()) - - if 'tags' in keys: - # Move tags first - tags_position = keys.index('tags') - del (keys[tags_position]) - keys = ['tags'] + keys - - for k in keys: - # Assemble long key if dictionary - long_key = key_prefix - if long_key != '': - long_key += '.' - long_key += k - - if fake_run and long_key in skip_input_for_fake_run: - continue - - v = i_run_cmd[k] - - q = '\\"' if long_key in add_quotes_to_keys else '' - - if isinstance(v, dict): - run_cmd += rebuild_flags(v, - fake_run, - skip_input_for_fake_run, - add_quotes_to_keys, - long_key) - elif isinstance(v, list): - x = '' - for vv in v: - if x != '': - x += ',' - x += q + str(vv) + q - run_cmd += ' --' + long_key + ',=' + x - else: - run_cmd += ' --' + long_key + '=' + q + str(v) + q - - return run_cmd - - run_cmd += rebuild_flags(i_run_cmd, - fake_run, - skip_input_for_fake_run, - add_quotes_to_keys, - '') - - run_cmd = docker_run_cmd_prefix + ' && ' + \ - run_cmd if docker_run_cmd_prefix != '' else run_cmd - - return {'return': 0, 'run_cmd_string': run_cmd} - - -############################################################ -def aux_search(i): - - self_module = i['self_module'] - - inp = i['input'] - - repos = inp.get('repos', '') -# Grigori Fursin remarked on 20240412 because this line prevents -# from searching for scripts in other public or private repositories. -# Not sure why we enforce just 2 repositories -# -# if repos == '': repos='internal,a4705959af8e447a' - - parsed_artifact = inp.get('parsed_artifact', []) - - if len(parsed_artifact) < 1: - parsed_artifact = [('', ''), ('', '')] - elif len(parsed_artifact) < 2: - parsed_artifact.append(('', '')) - else: - repos = parsed_artifact[1][0] - - list_of_repos = repos.split(',') if ',' in repos else [repos] - - ii = utils.sub_input( - inp, - self_module.cmind.cfg['artifact_keys'] + - ['tags']) - - ii['out'] = None - - # Search for automations in repos - lst = [] - for repo in list_of_repos: - parsed_artifact[1] = ( - '', repo) if utils.is_cm_uid(repo) else ( - repo, '') - ii['parsed_artifact'] = parsed_artifact - r = self_module.search(ii) - if r['return'] > 0: - return r - lst += r['list'] - - return {'return': 0, 'list': lst} - - -############################################################ -def dockerfile(i): - """ - Add CM automation. - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - (repos) (str): list of repositories to search for automations - (output_dir) (str): output directory (./ by default) - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - """ - - import copy - - # Check simplified CMD: cm docker script "python app image-classification onnx" - # If artifact has spaces, treat them as tags! - self_module = i['self_module'] - self_module.cmind.access( - {'action': 'detect_tags_in_artifact', 'automation': 'utils', 'input': i}) - - # Prepare "clean" input to replicate command - r = self_module.cmind.access({'action': 'prune_input', - 'automation': 'utils', - 'input': i, - 'extra_keys_starts_with': ['docker_']}) - i_run_cmd_arc = r['new_input'] - - cur_dir = os.getcwd() - - quiet = i.get('quiet', False) - - console = i.get('out') == 'con' - - # Search for script(s) - r = aux_search({'self_module': self_module, 'input': i}) - if r['return'] > 0: - return r - - lst = r['list'] - - if len(lst) == 0: - return {'return': 1, 'error': 'no scripts were found'} - - -# if i.get('cmd'): -# run_cmd = "cm run script " + " ".join( a for a in i['cmd'] if not a.startswith('--docker_') ) -# elif i.get('artifact'): -# run_cmd = "cm run script "+i['artifact'] -# elif i.get('tags'): -# run_cmd = "cm run script \""+" "+" ".join(i['tags']) + "\"" -# else: -# run_cmd = "" -# -# run_cmd = i.get('docker_run_cmd_prefix') + ' && ' + run_cmd if i.get('docker_run_cmd_prefix') else run_cmd - - env = i.get('env', {}) - state = i.get('state', {}) - const = i.get('const', {}) - const_state = i.get('const_state', {}) - script_automation = i['self_module'] - - dockerfile_env = i.get('dockerfile_env', {}) - - tags_split = i.get('tags', '').split(",") - variation_tags = [t[1:] for t in tags_split if t.startswith("_")] - - for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')): - - meta = artifact.meta - - script_path = artifact.path - - tags = meta.get("tags", []) - tag_string = ",".join(tags) - - script_alias = meta.get('alias', '') - script_uid = meta.get('uid', '') - - verbose = i.get('v', False) - show_time = i.get('show_time', False) - - run_state = {'deps': [], 'fake_deps': [], 'parent': None} - run_state['script_id'] = script_alias + "," + script_uid - run_state['script_variation_tags'] = variation_tags - variations = meta.get('variations', {}) - docker_settings = meta.get('docker', {}) - docker_settings['dockerfile_env'] = dockerfile_env - state['docker'] = docker_settings - add_deps_recursive = i.get('add_deps_recursive', {}) - - r = script_automation.update_state_from_meta( - meta, - env, - state, - const, - const_state, - deps=[], - post_deps=[], - prehook_deps=[], - posthook_deps=[], - new_env_keys=[], - new_state_keys=[], - run_state=run_state, - i=i) - if r['return'] > 0: - return r - - r = script_automation._update_state_from_variations( - i, - meta, - variation_tags, - variations, - env, - state, - const, - const_state, - deps=[], - post_deps=[], - prehook_deps=[], - posthook_deps=[], - new_env_keys_from_meta=[], - new_state_keys_from_meta=[], - add_deps_recursive=add_deps_recursive, - run_state=run_state, - recursion_spaces='', - verbose=False) - if r['return'] > 0: - return r - - docker_settings = state['docker'] - dockerfile_env = docker_settings['dockerfile_env'] - dockerfile_env['CM_RUN_STATE_DOCKER'] = True - - if not docker_settings.get('run', True) and not i.get( - 'docker_run_override', False): - print("docker.run set to False in _cm.json") - continue - '''run_config_path = os.path.join(script_path,'run_config.yml') - if not os.path.exists(run_config_path): - print("No run_config.yml file present in {}".format(script_path)) - continue - import yaml - with open(run_config_path, 'r') as run_config_file: - run_config = yaml.safe_load(run_config_file) - docker_settings = run_config.get('docker') - if not docker_settings or not docker_settings.get('build') or not run_config.get('run_with_default_inputs'): - print("Run config is not configured for docker run in {}".format(run_config_path)) - continue - ''' - - deps = docker_settings.get('build_deps', []) - if deps: - r = script_automation._run_deps( - deps, - [], - env, - {}, - {}, - {}, - {}, - '', - [], - '', - False, - '', - verbose, - show_time, - ' ', - run_state) - if r['return'] > 0: - return r - # For updating meta from update_meta_if_env - r = script_automation.update_state_from_meta( - meta, - env, - state, - const, - const_state, - deps=[], - post_deps=[], - prehook_deps=[], - posthook_deps=[], - new_env_keys=[], - new_state_keys=[], - run_state=run_state, - i=i) - if r['return'] > 0: - return r - docker_settings = state['docker'] - - d_env = i_run_cmd_arc.get('env', {}) - for key in list(d_env.keys()): - if key.startswith("CM_TMP_"): - del (d_env[key]) - - # Check if need to update/map/mount inputs and env - r = process_inputs({'run_cmd_arc': i_run_cmd_arc, - 'docker_settings': docker_settings, - 'mounts': []}) - if r['return'] > 0: - return r - - i_run_cmd = r['run_cmd'] - - docker_run_cmd_prefix = i.get( - 'docker_run_cmd_prefix', docker_settings.get( - 'run_cmd_prefix', '')) - - r = regenerate_script_cmd({'script_uid': script_uid, - 'script_alias': script_alias, - 'run_cmd': i_run_cmd, - 'tags': tags, - 'fake_run': True, - 'docker_settings': docker_settings, - 'docker_run_cmd_prefix': docker_run_cmd_prefix}) - if r['return'] > 0: - return r - - run_cmd = r['run_cmd_string'] - - cm_repo = i.get( - 'docker_cm_repo', - docker_settings.get( - 'cm_repo', - 'mlcommons@mlperf-automations')) - cm_repo_branch = i.get( - 'docker_cm_repo_branch', - docker_settings.get( - 'cm_repo_branch', - 'main')) - - cm_repo_flags = i.get( - 'docker_cm_repo_flags', - docker_settings.get( - 'cm_repo_flags', - '')) - - docker_base_image = i.get( - 'docker_base_image', - docker_settings.get('base_image')) - docker_os = i.get( - 'docker_os', docker_settings.get( - 'docker_os', 'ubuntu')) - docker_os_version = i.get( - 'docker_os_version', docker_settings.get( - 'docker_os_version', '22.04')) - - docker_cm_repos = i.get( - 'docker_cm_repos', - docker_settings.get( - 'cm_repos', - '')) - - docker_skip_cm_sys_upgrade = i.get( - 'docker_skip_cm_sys_upgrade', docker_settings.get( - 'skip_cm_sys_upgrade', '')) - - docker_extra_sys_deps = i.get('docker_extra_sys_deps', '') - - if not docker_base_image: - dockerfilename_suffix = docker_os + '_' + docker_os_version - else: - if os.name == 'nt': - dockerfilename_suffix = docker_base_image.replace( - '/', '-').replace(':', '-') - else: - dockerfilename_suffix = docker_base_image.split("/") - dockerfilename_suffix = dockerfilename_suffix[len( - dockerfilename_suffix) - 1] - - fake_run_deps = i.get( - 'fake_run_deps', docker_settings.get( - 'fake_run_deps', False)) - docker_run_final_cmds = docker_settings.get( - 'docker_run_final_cmds', []) - - r = check_gh_token(i, docker_settings, quiet) - if r['return'] > 0: - return r - gh_token = r['gh_token'] - i['docker_gh_token'] = gh_token # To pass to docker function if needed - - if i.get('docker_real_run', docker_settings.get( - 'docker_real_run', False)): - fake_run_option = " " - fake_run_deps = False - else: - fake_run_option = " --fake_run" - - docker_copy_files = i.get( - 'docker_copy_files', - docker_settings.get( - 'copy_files', - [])) - - env['CM_DOCKER_PRE_RUN_COMMANDS'] = docker_run_final_cmds - - docker_path = i.get('docker_path', '').strip() - if docker_path == '': - docker_path = script_path - - dockerfile_path = os.path.join( - docker_path, - 'dockerfiles', - dockerfilename_suffix + - '.Dockerfile') - - if i.get('print_deps'): - cm_input = {'action': 'run', - 'automation': 'script', - 'tags': f"""{i.get('tags')}""", - 'print_deps': True, - 'quiet': True, - 'silent': True, - 'fake_run': True, - 'fake_deps': True - } - r = self_module.cmind.access(cm_input) - if r['return'] > 0: - return r - print_deps = r['new_state']['print_deps'] - comments = ["#RUN " + dep for dep in print_deps] - comments.append("") - comments.append("# Run CM workflow") - else: - comments = [] - - if i.get('docker_push_image', '') in ['True', True, 'yes']: - env['CM_DOCKER_PUSH_IMAGE'] = 'yes' - - cm_docker_input = {'action': 'run', - 'automation': 'script', - 'tags': 'build,dockerfile', - 'cm_repo': cm_repo, - 'cm_repo_branch': cm_repo_branch, - 'cm_repo_flags': cm_repo_flags, - 'docker_base_image': docker_base_image, - 'docker_os': docker_os, - 'docker_os_version': docker_os_version, - 'skip_cm_sys_upgrade': docker_skip_cm_sys_upgrade, - 'file_path': dockerfile_path, - 'fake_run_option': fake_run_option, - 'comments': comments, - 'run_cmd': f'{run_cmd} --quiet', - 'script_tags': f"""{i.get('tags')}""", - 'copy_files': docker_copy_files, - 'quiet': True, - 'env': env, - 'dockerfile_env': dockerfile_env, - 'v': i.get('v', False), - 'fake_docker_deps': fake_run_deps, - 'print_deps': True, - 'real_run': True - } - - if docker_cm_repos != '': - cm_docker_input['cm_repos'] = docker_cm_repos - - if gh_token != '': - cm_docker_input['gh_token'] = gh_token - - if docker_extra_sys_deps != '': - cm_docker_input['extra_sys_deps'] = docker_extra_sys_deps - - r = self_module.cmind.access(cm_docker_input) - if r['return'] > 0: - return r - - print('') - print("Dockerfile generated at " + dockerfile_path) - - return {'return': 0} - -# we mount the main folder of the CM cache entry in case any file/folder -# in that cache entry is needed inside the container - - -def get_host_path(value): - path_split = value.split(os.sep) - if len(path_split) == 1: - return value - - new_value = '' - if "cache" in path_split and "local": - repo_entry_index = path_split.index("local") - if len(path_split) >= repo_entry_index + 3: - return os.sep.join(path_split[0:repo_entry_index + 3]) - - return value - - -def get_container_path_script(i): - tmp_dep_cached_path = i['tmp_dep_cached_path'] - value_mnt, value_env = get_container_path(tmp_dep_cached_path) - return {'return': 0, 'value_mnt': value_mnt, 'value_env': value_env} - - -def get_container_path(value): - path_split = value.split(os.sep) - if len(path_split) == 1: - return value - - new_value = '' - if "cache" in path_split and "local" in path_split: - new_path_split = ["", "home", "cmuser", "CM", "repos"] - repo_entry_index = path_split.index("local") - if len(path_split) >= repo_entry_index + 3: - new_path_split1 = new_path_split + \ - path_split[repo_entry_index:repo_entry_index + 3] - new_path_split2 = new_path_split + path_split[repo_entry_index:] - return "/".join(new_path_split1), "/".join(new_path_split2) - else: - orig_path, target_path = update_path_for_docker(path=value) - return target_path, target_path - - # return value, value - - -############################################################ -def docker(i): - """ - CM automation to run CM scripts via Docker - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - """ - - import copy - import re - - from cmind import __version__ as current_cm_version - - self_module = i['self_module'] - - if isinstance(i.get('docker', None), dict): - # Grigori started cleaning and refactoring this code on 20240929 - # - # 1. use --docker dictionary instead of --docker_{keys} - - if utils.compare_versions(current_cm_version, '2.3.8.1') >= 0: - docker_params = utils.convert_dictionary(i['docker'], 'docker') - i.update(docker_params) - del (i['docker']) - - quiet = i.get('quiet', False) - - detached = i.get('docker_detached', '') - if detached == '': - detached = i.get('docker_dt', '') - if detached == '': - detached = 'no' - - interactive = i.get('docker_interactive', '') - if interactive == '': - interactive = i.get('docker_it', '') - - verbose = i.get('v', False) - show_time = i.get('show_time', False) - - # Check simplified CMD: cm docker script "python app image-classification onnx" - # If artifact has spaces, treat them as tags! - self_module.cmind.access( - {'action': 'detect_tags_in_artifact', 'automation': 'utils', 'input': i}) - - # CAREFUL -> artifacts and parsed_artifacts are not supported in input - # (and should not be?) - if 'artifacts' in i: - del (i['artifacts']) - if 'parsed_artifacts' in i: - del (i['parsed_artifacts']) - - # Prepare "clean" input to replicate command - r = self_module.cmind.access({'action': 'prune_input', - 'automation': 'utils', - 'input': i, - 'extra_keys_starts_with': ['docker_']}) - i_run_cmd_arc = r['new_input'] - - env = i.get('env', {}) - - noregenerate_docker_file = i.get('docker_noregenerate', False) - norecreate_docker_image = i.get('docker_norecreate', True) - recreate_docker_image = i.get('docker_recreate', False) - if recreate_docker_image: # force recreate - norecreate_docker_image = False - - if i.get('docker_skip_build', False): - noregenerate_docker_file = True - norecreate_docker_image = True - env['CM_DOCKER_SKIP_BUILD'] = 'yes' - - # Check available configurations - docker_cfg = i.get('docker_cfg', '') - docker_cfg_uid = i.get('docker_cfg_uid', '') - - if docker_cfg != '' or docker_cfg_uid != '': - # Check if docker_cfg is turned on but not selected - if isinstance(docker_cfg, bool) or str( - docker_cfg).lower() in ['true', 'yes']: - docker_cfg = '' - - r = self_module.cmind.access({'action': 'select_cfg', - 'automation': 'utils,dc2743f8450541e3', - 'tags': 'basic,docker,configurations', - 'title': 'docker', - 'alias': docker_cfg, - 'uid': docker_cfg_uid}) - if r['return'] > 0: - if r['return'] == 16: - return {'return': 1, 'error': 'Docker configuration {} was not found'.format( - docker_cfg)} - return r - - selection = r['selection'] - - docker_input_update = selection['meta']['input'] - - i.update(docker_input_update) - - ########################################################################## - # Run dockerfile - if not noregenerate_docker_file: - r = utils.call_internal_module( - self_module, __file__, 'module_misc', 'dockerfile', i) - if r['return'] > 0: - return r - - # Save current directory - cur_dir = os.getcwd() - - console = i.get('out') == 'con' - - # Search for script(s) - r = aux_search({'self_module': self_module, 'input': i}) - if r['return'] > 0: - return r - - lst = r['list'] - - if len(lst) == 0: - return {'return': 1, 'error': 'no scripts were found'} - - env['CM_RUN_STATE_DOCKER'] = False - script_automation = i['self_module'] - state = i.get('state', {}) - const = i.get('const', {}) - const_state = i.get('const_state', {}) - - tags_split = i.get('tags', '').split(",") - variation_tags = [t[1:] for t in tags_split if t.startswith("_")] - - docker_cache = i.get('docker_cache', "yes") - if docker_cache in ["no", False, "False"]: - if 'CM_DOCKER_CACHE' not in env: - env['CM_DOCKER_CACHE'] = docker_cache - - image_repo = i.get('docker_image_repo', '') - - # Host system needs to have docker - r = self_module.cmind.access({'action': 'run', - 'automation': 'script', - 'tags': "get,docker"}) - if r['return'] > 0: - return r - - for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')): - - meta = artifact.meta - - if i.get('help', False): - return utils.call_internal_module(self_module, __file__, 'module_help', 'print_help', { - 'meta': meta, 'path': artifact.path}) - - script_path = artifact.path - - tags = meta.get("tags", []) - tag_string = ",".join(tags) - - script_alias = meta.get('alias', '') - script_uid = meta.get('uid', '') - - mounts = copy.deepcopy(i.get('docker_mounts', [])) - - '''run_config_path = os.path.join(script_path,'run_config.yml') - if not os.path.exists(run_config_path): - print("No run_config.yml file present in {}".format(script_path)) - continue - import yaml - with open(run_config_path, 'r') as run_config_file: - run_config = yaml.safe_load(run_config_file) - ''' - - variations = meta.get('variations', {}) - docker_settings = meta.get('docker', {}) - state['docker'] = docker_settings - # Todo: Support state, const and add_deps_recursive - run_state = {'deps': [], 'fake_deps': [], 'parent': None} - run_state['script_id'] = script_alias + "," + script_uid - run_state['script_variation_tags'] = variation_tags - add_deps_recursive = i.get('add_deps_recursive', {}) - - r = script_automation.update_state_from_meta( - meta, - env, - state, - const, - const_state, - deps=[], - post_deps=[], - prehook_deps=[], - posthook_deps=[], - new_env_keys=[], - new_state_keys=[], - run_state=run_state, - i=i) - if r['return'] > 0: - return r - - r = script_automation._update_state_from_variations( - i, - meta, - variation_tags, - variations, - env, - state, - const, - const_state, - deps=[], - post_deps=[], - prehook_deps=[], - posthook_deps=[], - new_env_keys_from_meta=[], - new_state_keys_from_meta=[], - add_deps_recursive=add_deps_recursive, - run_state=run_state, - recursion_spaces='', - verbose=False) - if r['return'] > 0: - return r - - docker_settings = state['docker'] - - if not docker_settings.get('run', True) and not i.get( - 'docker_run_override', False): - print("docker.run set to False in _cm.json") - continue - ''' - if not docker_settings or not docker_settings.get('build') or not run_config.get('run_with_default_inputs'): - print("Run config is not configured for docker run in {}".format(run_config_path)) - continue - ''' - - r = script_automation._update_env_from_input(env, i) - if r['return'] > 0: - return r - - # mount outdirname path - if env.get('CM_OUTDIRNAME', '') != '': - mounts.append(f"""{env['CM_OUTDIRNAME']}:{env['CM_OUTDIRNAME']}""") - - # Check if need to update/map/mount inputs and env - r = process_inputs({'run_cmd_arc': i_run_cmd_arc, - 'docker_settings': docker_settings, - 'mounts': mounts}) - if r['return'] > 0: - return r - - i_run_cmd = r['run_cmd'] - - # Check if need to mount home directory - current_path_target = '/cm-mount/current' - if docker_settings.get('mount_current_dir', '') == 'yes': - update_path_for_docker( - '.', mounts, force_path_target=current_path_target) - - _os = i.get('docker_os', docker_settings.get('os', 'ubuntu')) - version = i.get( - 'docker_os_version', - docker_settings.get( - 'os_version', - '22.04')) - - build_deps = docker_settings.get('deps', []) - deps = docker_settings.get('deps', []) - deps = build_deps + deps - if deps: - r = script_automation._run_deps( - deps, - [], - env, - {}, - {}, - {}, - {}, - '', - [], - '', - False, - '', - verbose, - show_time, - ' ', - run_state) - if r['return'] > 0: - return r - - # For updating meta from update_meta_if_env - r = script_automation.update_state_from_meta( - meta, - env, - state, - const, - const_state, - deps=[], - post_deps=[], - prehook_deps=[], - posthook_deps=[], - new_env_keys=[], - new_state_keys=[], - run_state=run_state, - i=i) - if r['return'] > 0: - return r - - docker_settings = state['docker'] - - for key in docker_settings.get('mounts', []): - mounts.append(key) - - # Updating environment variables from CM input based on input_mapping - # from meta - input_mapping = meta.get('input_mapping', {}) - - for c_input in input_mapping: - if c_input in i: - env[input_mapping[c_input]] = i[c_input] - # del(i[c_input]) - - # Updating environment variables from CM input based on - # docker_input_mapping from meta - - docker_input_mapping = docker_settings.get('docker_input_mapping', {}) - - for c_input in docker_input_mapping: - if c_input in i: - env[docker_input_mapping[c_input]] = i[c_input] - # del(i[c_input]) - - # env keys corresponding to container mounts are explicitly passed to - # the container run cmd - container_env = {} - for index in range(len(mounts)): - mount = mounts[index] - # Since windows may have 2 :, we search from the right - j = mount.rfind(':') - if j > 0: - mount_parts = [mount[:j], mount[j + 1:]] - else: - return { - 'return': 1, 'error': 'Can\'t find separator : in a mount string: {}'.format(mount)} - -# mount_parts = mount.split(":") -# if len(mount_parts) != 2: -# return {'return': 1, 'error': f'Invalid mount specified in docker -# settings'} - - host_mount = mount_parts[0] - new_host_mount = host_mount - container_mount = mount_parts[1] - new_container_mount = container_mount - - tmp_values = re.findall(r'\${{ (.*?) }}', str(host_mount)) - skip = False - host_env_key = None - if tmp_values: - for tmp_value in tmp_values: - if tmp_value in env: - host_env_key = tmp_value - new_host_mount = get_host_path(env[tmp_value]) - else: # we skip those mounts - mounts[index] = None - skip = True - break - - tmp_values = re.findall(r'\${{ (.*?) }}', str(container_mount)) - if tmp_values: - for tmp_value in tmp_values: - container_env_key = tmp_value - if tmp_value in env: - new_container_mount, new_container_mount_env = get_container_path( - env[tmp_value]) - container_env_key = new_container_mount_env - else: # we skip those mounts - mounts[index] = None - skip = True - break - else: - container_env_key = str(container_mount) - - if skip: - continue - mounts[index] = new_host_mount + ":" + new_container_mount - if host_env_key: - container_env[host_env_key] = container_env_key - - for v in docker_input_mapping: - if docker_input_mapping[v] == host_env_key: - i[v] = container_env_key - i_run_cmd[v] = container_env_key - - mounts = list(filter(lambda item: item is not None, mounts)) - - mount_string = "" if len(mounts) == 0 else ",".join(mounts) - - # check for proxy settings and pass onto the docker - proxy_keys = [ - "ftp_proxy", - "FTP_PROXY", - "http_proxy", - "HTTP_PROXY", - "https_proxy", - "HTTPS_PROXY", - "no_proxy", - "NO_PROXY", - "socks_proxy", - "SOCKS_PROXY", - "GH_TOKEN"] - - if env.get('+ CM_DOCKER_BUILD_ARGS', []) == []: - env['+ CM_DOCKER_BUILD_ARGS'] = [] - - for key in proxy_keys: - if os.environ.get(key, '') != '': - value = os.environ[key] - container_env[key] = value - env['+ CM_DOCKER_BUILD_ARGS'].append( - "{}={}".format(key, value)) - - if container_env: - if not i_run_cmd.get('env'): - i_run_cmd['env'] = container_env - else: - i_run_cmd['env'] = {**i_run_cmd['env'], **container_env} - - docker_use_host_group_id = i.get( - 'docker_use_host_group_id', - docker_settings.get('use_host_group_id')) - if str(docker_use_host_group_id).lower() not in [ - 'false', 'no', '0'] and os.name != 'nt': - env['+ CM_DOCKER_BUILD_ARGS'].append( - "{}={}".format('GID', '\\" $(id -g $USER) \\"')) - - docker_use_host_user_id = i.get( - 'docker_use_host_user_id', - docker_settings.get('use_host_user_id')) - if str(docker_use_host_user_id).lower() not in [ - 'false', 'no', '0'] and os.name != 'nt': - env['+ CM_DOCKER_BUILD_ARGS'].append( - "{}={}".format('UID', '\\" $(id -u $USER) \\"')) - - docker_base_image = i.get( - 'docker_base_image', - docker_settings.get('base_image')) - docker_os = i.get('docker_os', docker_settings.get('os', 'ubuntu')) - docker_os_version = i.get( - 'docker_os_version', docker_settings.get( - 'os_version', '22.04')) - image_tag_extra = i.get( - 'docker_image_tag_extra', - docker_settings.get( - 'image_tag_extra', - '-latest')) - - if not docker_base_image: - dockerfilename_suffix = docker_os + '_' + docker_os_version - else: - if os.name == 'nt': - dockerfilename_suffix = docker_base_image.replace( - '/', '-').replace(':', '-') - else: - dockerfilename_suffix = docker_base_image.split("/") - dockerfilename_suffix = dockerfilename_suffix[len( - dockerfilename_suffix) - 1] - - cm_repo = i.get( - 'docker_cm_repo', - docker_settings.get( - 'cm_repo', - 'mlcommons@mlperf-automations')) - - docker_path = i.get('docker_path', '').strip() - if docker_path == '': - docker_path = script_path - - dockerfile_path = os.path.join( - docker_path, - 'dockerfiles', - dockerfilename_suffix + - '.Dockerfile') - - # Skips docker run cmd and gives an interactive shell to the user - docker_skip_run_cmd = i.get( - 'docker_skip_run_cmd', docker_settings.get( - 'skip_run_cmd', False)) - - docker_pre_run_cmds = i.get( - 'docker_pre_run_cmds', []) + docker_settings.get('pre_run_cmds', []) - - docker_run_cmd_prefix = i.get( - 'docker_run_cmd_prefix', docker_settings.get( - 'run_cmd_prefix', '')) - - all_gpus = i.get('docker_all_gpus', docker_settings.get('all_gpus')) - - num_gpus = i.get('docker_num_gpus', docker_settings.get('num_gpus')) - - device = i.get('docker_device', docker_settings.get('device')) - - image_name = i.get( - 'docker_image_name', - docker_settings.get( - 'image_name', - '')) - - r = check_gh_token(i, docker_settings, quiet) - if r['return'] > 0: - return r - gh_token = r['gh_token'] - - port_maps = i.get( - 'docker_port_maps', - docker_settings.get( - 'port_maps', - [])) - - shm_size = i.get( - 'docker_shm_size', - docker_settings.get( - 'shm_size', - '')) - - pass_user_id = i.get( - 'docker_pass_user_id', - docker_settings.get( - 'pass_user_id', - '')) - pass_user_group = i.get( - 'docker_pass_user_group', - docker_settings.get( - 'pass_user_group', - '')) - - extra_run_args = i.get( - 'docker_extra_run_args', - docker_settings.get( - 'extra_run_args', - '')) - - if detached == '': - detached = docker_settings.get('detached', '') - - if str(docker_skip_run_cmd).lower() in ['true', '1', 'yes']: - interactive = 'yes' - elif interactive == '': - interactive = docker_settings.get('interactive', '') - - -# # Regenerate run_cmd -# if i.get('cmd'): -# run_cmd = "cm run script " + " ".join( a for a in i['cmd'] if not a.startswith('--docker_') ) -# elif i.get('artifact'): -# run_cmd = "cm run script "+i['artifact'] -# elif i.get('tags'): -# run_cmd = "cm run script \""+" "+" ".join(i['tags']) + "\"" -# else: -# run_cmd = "" - - r = regenerate_script_cmd({'script_uid': script_uid, - 'script_alias': script_alias, - 'tags': tags, - 'run_cmd': i_run_cmd, - 'docker_settings': docker_settings, - 'docker_run_cmd_prefix': i.get('docker_run_cmd_prefix', '')}) - if r['return'] > 0: - return r - run_cmd = r['run_cmd_string'] + ' ' + ' --docker_run_deps ' - - env['CM_RUN_STATE_DOCKER'] = True - - if docker_settings.get('mount_current_dir', '') == 'yes': - run_cmd = 'cd ' + current_path_target + ' && ' + run_cmd - - final_run_cmd = run_cmd if docker_skip_run_cmd not in [ - 'yes', True, 'True'] else 'cm version' - - print('') - print('CM command line regenerated to be used inside Docker:') - print('') - print(final_run_cmd) - print('') - - docker_recreate_image = 'yes' if str(norecreate_docker_image).lower() not in [ - "yes", "true", "1"] else 'no' - - if i.get('docker_push_image', '') in ['True', True, 'yes']: - env['CM_DOCKER_PUSH_IMAGE'] = 'yes' - - cm_docker_input = {'action': 'run', - 'automation': 'script', - 'tags': 'run,docker,container', - 'recreate': docker_recreate_image, - 'docker_base_image': docker_base_image, - 'docker_os': docker_os, - 'docker_os_version': docker_os_version, - 'cm_repo': cm_repo, - 'env': env, - 'interactive': interactive, - 'mounts': mounts, - # 'image_tag': script_alias, - 'image_tag_extra': image_tag_extra, - 'detached': detached, - 'script_tags': f"""{i.get('tags')}""", - 'run_cmd': final_run_cmd, - 'v': i.get('v', False), - 'quiet': True, - 'pre_run_cmds': docker_pre_run_cmds, - 'real_run': True, - 'add_deps_recursive': { - 'build-docker-image': { - 'dockerfile': dockerfile_path - } - } - } - - if image_repo: - cm_docker_input['image_repo'] = image_repo - - if image_name: - cm_docker_input['image_name'] = image_name - - if all_gpus: - cm_docker_input['all_gpus'] = True - - if num_gpus: - cm_docker_input['num_gpus'] = str(num_gpus) - - if device: - cm_docker_input['device'] = device - - if gh_token != '': - cm_docker_input['gh_token'] = gh_token - - if port_maps: - cm_docker_input['port_maps'] = port_maps - - if shm_size != '': - cm_docker_input['shm_size'] = shm_size - - if pass_user_id != '': - cm_docker_input['pass_user_id'] = pass_user_id - - if pass_user_group != '': - cm_docker_input['pass_user_group'] = pass_user_group - - if extra_run_args != '': - cm_docker_input['extra_run_args'] = extra_run_args - - if i.get('docker_save_script', ''): - cm_docker_input['save_script'] = i['docker_save_script'] - - print('') - - r = self_module.cmind.access(cm_docker_input) - if r['return'] > 0: - return r - - return {'return': 0} - -############################################################ - - -def check_gh_token(i, docker_settings, quiet): - gh_token = i.get('docker_gh_token', '') - - if docker_settings.get('gh_token_required', False) and gh_token == '': - rx = { - 'return': 1, - 'error': 'GH token is required but not provided. Use --docker_gh_token to set it'} - - if quiet: - return rx - - print('') - gh_token = input( - 'Enter GitHub token to access private CM repositories required for this CM script: ') - - if gh_token == '': - return rx - - return {'return': 0, 'gh_token': gh_token} diff --git a/automation/script/template-ae-python/_cm.yaml b/automation/script/template-ae-python/_cm.yaml index 8019b3647..261e4cf75 100644 --- a/automation/script/template-ae-python/_cm.yaml +++ b/automation/script/template-ae-python/_cm.yaml @@ -13,10 +13,10 @@ deps: script_name: run input_mapping: - experiment: CM_EXPERIMENT + experiment: MLC_EXPERIMENT default_env: - CM_EXPERIMENT: '1' + MLC_EXPERIMENT: '1' variations: install_deps: diff --git a/automation/script/template-ae-python/analyze.bat b/automation/script/template-ae-python/analyze.bat index 7e786771a..375cfaebf 100644 --- a/automation/script/template-ae-python/analyze.bat +++ b/automation/script/template-ae-python/analyze.bat @@ -4,9 +4,9 @@ set CUR_DIR=%cd% echo. echo Current execution path: %CUR_DIR% -echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% -echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% +echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% +echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% rem echo. -rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/analyze.sh b/automation/script/template-ae-python/analyze.sh index 630c3db3d..53c10c73c 100644 --- a/automation/script/template-ae-python/analyze.sh +++ b/automation/script/template-ae-python/analyze.sh @@ -4,9 +4,9 @@ CUR_DIR=${PWD} echo "" echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" +echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" #echo "" -#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py #test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/customize.py b/automation/script/template-ae-python/customize.py index 273999d46..bd7c12dd3 100644 --- a/automation/script/template-ae-python/customize.py +++ b/automation/script/template-ae-python/customize.py @@ -12,7 +12,7 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} diff --git a/automation/script/template-ae-python/install_deps.bat b/automation/script/template-ae-python/install_deps.bat index 47f7e7ce2..3419d9511 100644 --- a/automation/script/template-ae-python/install_deps.bat +++ b/automation/script/template-ae-python/install_deps.bat @@ -4,15 +4,15 @@ set CUR_DIR=%cd% echo. echo Current execution path: %CUR_DIR% -echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% -echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% +echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% +echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% -if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( +if exist "%MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( echo. echo Installing requirements.txt ... echo. - %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + %MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% ) diff --git a/automation/script/template-ae-python/install_deps.sh b/automation/script/template-ae-python/install_deps.sh index cb7c44c2b..5e8c50a20 100644 --- a/automation/script/template-ae-python/install_deps.sh +++ b/automation/script/template-ae-python/install_deps.sh @@ -4,14 +4,14 @@ CUR_DIR=${PWD} echo "" echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" +echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" -if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then +if test -f "${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then echo "" echo "Installing requirements.txt ..." echo "" - ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt test $? -eq 0 || exit 1 fi diff --git a/automation/script/template-ae-python/main.py b/automation/script/template-ae-python/main.py index caa499bf0..48b974b7f 100644 --- a/automation/script/template-ae-python/main.py +++ b/automation/script/template-ae-python/main.py @@ -4,7 +4,7 @@ print('') print('Main script:') - print('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT', ''))) + print('Experiment: {}'.format(os.environ.get('MLC_EXPERIMENT', ''))) print('') exit(0) diff --git a/automation/script/template-ae-python/plot.bat b/automation/script/template-ae-python/plot.bat index 7e786771a..375cfaebf 100644 --- a/automation/script/template-ae-python/plot.bat +++ b/automation/script/template-ae-python/plot.bat @@ -4,9 +4,9 @@ set CUR_DIR=%cd% echo. echo Current execution path: %CUR_DIR% -echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% -echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% +echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% +echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% rem echo. -rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/plot.sh b/automation/script/template-ae-python/plot.sh index 630c3db3d..53c10c73c 100644 --- a/automation/script/template-ae-python/plot.sh +++ b/automation/script/template-ae-python/plot.sh @@ -4,9 +4,9 @@ CUR_DIR=${PWD} echo "" echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" +echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" #echo "" -#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py #test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/reproduce.bat b/automation/script/template-ae-python/reproduce.bat index 7e786771a..375cfaebf 100644 --- a/automation/script/template-ae-python/reproduce.bat +++ b/automation/script/template-ae-python/reproduce.bat @@ -4,9 +4,9 @@ set CUR_DIR=%cd% echo. echo Current execution path: %CUR_DIR% -echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% -echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% +echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% +echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% rem echo. -rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/reproduce.sh b/automation/script/template-ae-python/reproduce.sh index 630c3db3d..53c10c73c 100644 --- a/automation/script/template-ae-python/reproduce.sh +++ b/automation/script/template-ae-python/reproduce.sh @@ -4,9 +4,9 @@ CUR_DIR=${PWD} echo "" echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" +echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" #echo "" -#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py #test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/run.bat b/automation/script/template-ae-python/run.bat index 6c1274ce6..f1b69d26d 100644 --- a/automation/script/template-ae-python/run.bat +++ b/automation/script/template-ae-python/run.bat @@ -4,9 +4,9 @@ set CUR_DIR=%cd% echo. echo Current execution path: %CUR_DIR% -echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% -echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% +echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% +echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% echo. -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/run.sh b/automation/script/template-ae-python/run.sh index 2150b45dc..a4b86e69a 100644 --- a/automation/script/template-ae-python/run.sh +++ b/automation/script/template-ae-python/run.sh @@ -4,9 +4,9 @@ CUR_DIR=${PWD} echo "" echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" +echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" echo "" -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/validate.bat b/automation/script/template-ae-python/validate.bat index 7e786771a..375cfaebf 100644 --- a/automation/script/template-ae-python/validate.bat +++ b/automation/script/template-ae-python/validate.bat @@ -4,9 +4,9 @@ set CUR_DIR=%cd% echo. echo Current execution path: %CUR_DIR% -echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% -echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% +echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% +echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% rem echo. -rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/validate.sh b/automation/script/template-ae-python/validate.sh index 630c3db3d..53c10c73c 100644 --- a/automation/script/template-ae-python/validate.sh +++ b/automation/script/template-ae-python/validate.sh @@ -4,9 +4,9 @@ CUR_DIR=${PWD} echo "" echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" +echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" #echo "" -#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py #test $? -eq 0 || exit 1 diff --git a/automation/script/template-python/_cm.yaml b/automation/script/template-python/_cm.yaml index adbb8d4e7..11f646860 100644 --- a/automation/script/template-python/_cm.yaml +++ b/automation/script/template-python/_cm.yaml @@ -11,11 +11,11 @@ deps: - python3 input_mapping: - var1: CM_VAR1 + var1: MLC_VAR1 req: PIP_REQUIREMENTS default_env: - CM_VAR1: 'something' + MLC_VAR1: 'something' variations: req: diff --git a/automation/script/template-python/customize.py b/automation/script/template-python/customize.py index 625b643d4..8961ab5ca 100644 --- a/automation/script/template-python/customize.py +++ b/automation/script/template-python/customize.py @@ -15,9 +15,9 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - print(' ENV CM_VAR1: {}'.format(env.get('CM_VAR1', ''))) + print(' ENV MLC_VAR1: {}'.format(env.get('MLC_VAR1', ''))) return {'return': 0} diff --git a/automation/script/template-python/main.py b/automation/script/template-python/main.py index e3302f36f..68245e7bd 100644 --- a/automation/script/template-python/main.py +++ b/automation/script/template-python/main.py @@ -4,7 +4,7 @@ print('') print('Main script:') - print('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1', ''))) + print('ENV MLC_VAR1: {}'.format(os.environ.get('MLC_VAR1', ''))) print('') exit(0) diff --git a/automation/script/template-python/run.bat b/automation/script/template-python/run.bat index f9e1264bc..11e897362 100644 --- a/automation/script/template-python/run.bat +++ b/automation/script/template-python/run.bat @@ -4,22 +4,22 @@ set CUR_DIR=%cd% echo. echo Current execution path: %CUR_DIR% -echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS% -echo ENV CM_VAR1: %CM_VAR1% +echo ENV MLC_VAR1: %MLC_VAR1% if "%PIP_REQUIREMENTS%" == "True" ( - if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( + if exist "%MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( echo. echo Installing requirements.txt ... echo. - %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + %MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% ) ) echo. -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-python/run.sh b/automation/script/template-python/run.sh index a1a6aec2e..a3e2021b9 100644 --- a/automation/script/template-python/run.sh +++ b/automation/script/template-python/run.sh @@ -4,21 +4,21 @@ CUR_DIR=${PWD} echo "" echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}" -echo "ENV CM_VAR1: ${CM_VAR1}" +echo "ENV MLC_VAR1: ${MLC_VAR1}" if [ "${PIP_REQUIREMENTS}" == "True" ]; then - if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then + if test -f "${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then echo "" echo "Installing requirements.txt ..." echo "" - ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt test $? -eq 0 || exit 1 fi fi echo "" -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py test $? -eq 0 || exit 1 diff --git a/automation/script/template-pytorch/_cm.yaml b/automation/script/template-pytorch/_cm.yaml index eaff95e47..22cd7a635 100644 --- a/automation/script/template-pytorch/_cm.yaml +++ b/automation/script/template-pytorch/_cm.yaml @@ -24,11 +24,11 @@ deps: input_mapping: - var1: CM_VAR1 + var1: MLC_VAR1 req: PIP_REQUIREMENTS default_env: - CM_VAR1: 'something' + MLC_VAR1: 'something' variations: req: diff --git a/automation/script/template-pytorch/customize.py b/automation/script/template-pytorch/customize.py index 625b643d4..8961ab5ca 100644 --- a/automation/script/template-pytorch/customize.py +++ b/automation/script/template-pytorch/customize.py @@ -15,9 +15,9 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - print(' ENV CM_VAR1: {}'.format(env.get('CM_VAR1', ''))) + print(' ENV MLC_VAR1: {}'.format(env.get('MLC_VAR1', ''))) return {'return': 0} diff --git a/automation/script/template-pytorch/main.py b/automation/script/template-pytorch/main.py index 217aed3b9..3bfcd7572 100644 --- a/automation/script/template-pytorch/main.py +++ b/automation/script/template-pytorch/main.py @@ -6,7 +6,7 @@ print('') print('Main script:') - print('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1', ''))) + print('ENV MLC_VAR1: {}'.format(os.environ.get('MLC_VAR1', ''))) print('ENV USE_CUDA: {}'.format(os.environ.get('USE_CUDA', ''))) print('') print('PyTorch version: {}'.format(torch.__version__)) diff --git a/automation/script/template-pytorch/run.bat b/automation/script/template-pytorch/run.bat index f9e1264bc..11e897362 100644 --- a/automation/script/template-pytorch/run.bat +++ b/automation/script/template-pytorch/run.bat @@ -4,22 +4,22 @@ set CUR_DIR=%cd% echo. echo Current execution path: %CUR_DIR% -echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS% -echo ENV CM_VAR1: %CM_VAR1% +echo ENV MLC_VAR1: %MLC_VAR1% if "%PIP_REQUIREMENTS%" == "True" ( - if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( + if exist "%MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( echo. echo Installing requirements.txt ... echo. - %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + %MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% ) ) echo. -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-pytorch/run.sh b/automation/script/template-pytorch/run.sh index a1a6aec2e..a3e2021b9 100644 --- a/automation/script/template-pytorch/run.sh +++ b/automation/script/template-pytorch/run.sh @@ -4,21 +4,21 @@ CUR_DIR=${PWD} echo "" echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}" -echo "ENV CM_VAR1: ${CM_VAR1}" +echo "ENV MLC_VAR1: ${MLC_VAR1}" if [ "${PIP_REQUIREMENTS}" == "True" ]; then - if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then + if test -f "${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then echo "" echo "Installing requirements.txt ..." echo "" - ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt test $? -eq 0 || exit 1 fi fi echo "" -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py test $? -eq 0 || exit 1 diff --git a/automation/script/template/customize.py b/automation/script/template/customize.py index 273999d46..bd7c12dd3 100644 --- a/automation/script/template/customize.py +++ b/automation/script/template/customize.py @@ -12,7 +12,7 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} diff --git a/automation/script/template/run.sh b/automation/script/template/run.sh index 4c23c380e..32cf4d51e 100644 --- a/automation/script/template/run.sh +++ b/automation/script/template/run.sh @@ -1,17 +1,17 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency echo "Running: " -echo "${CM_RUN_CMD}" +echo "${MLC_RUN_CMD}" echo "" -if [[ ${CM_FAKE_RUN} != "yes" ]]; then - eval "${CM_RUN_CMD}" +if [[ ${MLC_FAKE_RUN} != "yes" ]]; then + eval "${MLC_RUN_CMD}" test $? -eq 0 || exit 1 fi diff --git a/automation/script/template_list_of_scripts.md b/automation/script/template_list_of_scripts.md index 198a500f1..07fb95cb7 100644 --- a/automation/script/template_list_of_scripts.md +++ b/automation/script/template_list_of_scripts.md @@ -17,11 +17,11 @@ via CM command line, Python API or GUI. CM scripts can easily chained together into automation workflows using `deps` and `tags` keys while automatically updating all environment variables and paths -for a given task and platform [using simple JSON or YAML](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml). +for a given task and platform [using simple JSON or YAML](https://github.com/mlcommons/ck/blob/master/mlc-mlops/script/app-image-classification-onnx-py/_cm.yaml). *Note that CM is a community project being developed and extended by [MLCommons members and individual contributors](../CONTRIBUTING.md) - - you can find source code of CM scripts maintained by MLCommons [here](../cm-mlops/script). + you can find source code of CM scripts maintained by MLCommons [here](../mlc-mlops/script). Please join [Discord server](https://discord.gg/JjWNWXKxwT) to participate in collaborative developments or provide your feedback.* @@ -40,13 +40,13 @@ for a given task and platform [using simple JSON or YAML](https://github.com/mlc # List of CM scripts by categories -{{CM_TOC_CATEGORIES}} +{{MLC_TOC_CATEGORIES}} -{{CM_TOC2}} +{{MLC_TOC2}} # List of all sorted CM scripts -{{CM_TOC}} +{{MLC_TOC}} -{{CM_MAIN}} +{{MLC_MAIN}} diff --git a/automation/utils.py b/automation/utils.py index 95aa0b2e9..61cc08e28 100644 --- a/automation/utils.py +++ b/automation/utils.py @@ -44,7 +44,7 @@ def get_host_os_info(i={}): info['run_bat'] = 'call ${bat_file}' info['start_script'] = ['@echo off', ''] info['env'] = { - "CM_WINDOWS": "yes" + "MLC_WINDOWS": "yes" } else: if platform.system().lower().startswith('darwin'): @@ -121,7 +121,7 @@ def download_file(i): (chunk_size) (int): chunck size in bytes (65536 by default) (text) (str): print text before downloaded status ("Downloaded: " by default) (verify) (bool): verify SSL certificate if True (True by default) - can be switched by global env CM_UTILS_DOWNLOAD_VERIFY_SSL = no + can be switched by global env MLC_UTILS_DOWNLOAD_VERIFY_SSL = no Returns: (CM return dict): @@ -170,8 +170,8 @@ def download_file(i): text = i.get('text', 'Downloaded: ') - if 'CM_UTILS_DOWNLOAD_VERIFY_SSL' in os.environ: - verify = os.environ['CM_UTILS_DOWNLOAD_VERIFY_SSL'] == 'yes' + if 'MLC_UTILS_DOWNLOAD_VERIFY_SSL' in os.environ: + verify = os.environ['MLC_UTILS_DOWNLOAD_VERIFY_SSL'] == 'yes' else: verify = i.get('verify', True) diff --git a/automation/utils/README.md b/automation/utils/README.md deleted file mode 100644 index 9a844c656..000000000 --- a/automation/utils/README.md +++ /dev/null @@ -1,387 +0,0 @@ -*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* - -### Automation actions - -#### test - - * CM CLI: ```cm test utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15)) - * CM CLI with UID: ```cm test utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'test' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### get_host_os_info - - * CM CLI: ```cm get_host_os_info utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54)) - * CM CLI with UID: ```cm get_host_os_info utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'get_host_os_info' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### download_file - - * CM CLI: ```cm download_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156)) - * CM CLI with UID: ```cm download_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'download_file' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### unzip_file - - * CM CLI: ```cm unzip_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265)) - * CM CLI with UID: ```cm unzip_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'unzip_file' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### compare_versions - - * CM CLI: ```cm compare_versions utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343)) - * CM CLI with UID: ```cm compare_versions utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'compare_versions' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### json2yaml - - * CM CLI: ```cm json2yaml utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391)) - * CM CLI with UID: ```cm json2yaml utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'json2yaml' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### yaml2json - - * CM CLI: ```cm yaml2json utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429)) - * CM CLI with UID: ```cm yaml2json utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'yaml2json' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### sort_json - - * CM CLI: ```cm sort_json utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467)) - * CM CLI with UID: ```cm sort_json utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'sort_json' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### dos2unix - - * CM CLI: ```cm dos2unix utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504)) - * CM CLI with UID: ```cm dos2unix utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'dos2unix' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### replace_string_in_file - - * CM CLI: ```cm replace_string_in_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541)) - * CM CLI with UID: ```cm replace_string_in_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'replace_string_in_file' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### create_toc_from_md - - * CM CLI: ```cm create_toc_from_md utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591)) - * CM CLI with UID: ```cm create_toc_from_md utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'create_toc_from_md' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### copy_to_clipboard - - * CM CLI: ```cm copy_to_clipboard utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659)) - * CM CLI with UID: ```cm copy_to_clipboard utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'copy_to_clipboard' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### list_files_recursively - - * CM CLI: ```cm list_files_recursively utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737)) - * CM CLI with UID: ```cm list_files_recursively utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'list_files_recursively' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### generate_secret - - * CM CLI: ```cm generate_secret utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770)) - * CM CLI with UID: ```cm generate_secret utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'generate_secret' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### detect_tags_in_artifact - - * CM CLI: ```cm detect_tags_in_artifact utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793)) - * CM CLI with UID: ```cm detect_tags_in_artifact utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'detect_tags_in_artifact' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### prune_input - - * CM CLI: ```cm prune_input utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822)) - * CM CLI with UID: ```cm prune_input utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'prune_input' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### uid - - * CM CLI: ```cm uid utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864)) - * CM CLI with UID: ```cm uid utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'uid' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### system - - * CM CLI: ```cm system utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891)) - * CM CLI with UID: ```cm system utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'system' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### load_cfg - - * CM CLI: ```cm load_cfg utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969)) - * CM CLI with UID: ```cm load_cfg utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'load_cfg' - 'automation':'utils,dc2743f8450541e3' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -### Maintainers - -* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/utils/_cm.json b/automation/utils/_cm.json deleted file mode 100644 index f2dc9c5b6..000000000 --- a/automation/utils/_cm.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "alias": "utils", - "automation_alias": "automation", - "automation_uid": "bbeb15d8f0a944a4", - "desc": "Accessing various CM utils", - "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)", - "sort": 800, - "tags": [ - "automation" - ], - "uid": "dc2743f8450541e3" -} diff --git a/automation/utils/module.py b/automation/utils/module.py deleted file mode 100644 index 2a4851b0a..000000000 --- a/automation/utils/module.py +++ /dev/null @@ -1,1108 +0,0 @@ -import os - -from cmind.automation import Automation -from cmind import utils - - -class CAutomation(Automation): - """ - Automation actions - """ - - ############################################################ - def __init__(self, cmind, automation_file): - super().__init__(cmind, __file__) - - ############################################################ - def test(self, i): - """ - Test automation - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - automation (str): automation as CM string object - - parsed_automation (list): prepared in CM CLI or CM access function - [ (automation alias, automation UID) ] or - [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] - - (artifact) (str): artifact as CM string object - - (parsed_artifact) (list): prepared in CM CLI or CM access function - [ (artifact alias, artifact UID) ] or - [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] - - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * Output from this automation action - - """ - - import json - print(json.dumps(i, indent=2)) - - return {'return': 0} - - ########################################################################## - def get_host_os_info(self, i): - """ - Get some host platform name (currently windows or linux) and OS bits - - Args: - (CM input dict): - - (bits) (str): force host platform bits - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * info (dict): - * platform (str): "windows", "linux" or "darwin" - * bat_ext (str): ".bat" or ".sh" - * bits (str): 32 or 64 bits - * python_bits 9str): python bits - - """ - - import os - import platform - import struct - - info = {} - - pbits = str(8 * struct.calcsize("P")) - - if platform.system().lower().startswith('win'): - platform = 'windows' - info['bat_ext'] = '.bat' - info['set_env'] = 'set ${key}=${value}' - info['env_separator'] = ';' - info['env_var'] = '%env_var%' - info['bat_rem'] = 'rem ${rem}' - info['run_local_bat'] = 'call ${bat_file}' - info['run_local_bat_from_python'] = 'call ${bat_file}' - info['run_bat'] = 'call ${bat_file}' - info['start_script'] = ['@echo off', ''] - info['env'] = { - "CM_WINDOWS": "yes" - } - else: - if platform.system().lower().startswith('darwin'): - platform = 'darwin' - else: - platform = 'linux' - - info['bat_ext'] = '.sh' - info['set_env'] = 'export ${key}="${value}"' - info['env_separator'] = ':' - info['env_var'] = '${env_var}' - info['set_exec_file'] = 'chmod 755 "${file_name}"' - info['bat_rem'] = '# ${rem}' - info['run_local_bat'] = '. ./${bat_file}' - info['run_local_bat_from_python'] = 'bash -c ". ./${bat_file}"' - info['run_bat'] = '. ${bat_file}' - info['start_script'] = ['#!/bin/bash', ''] - info['env'] = {} - - info['platform'] = platform - - obits = i.get('bits', '') - if obits == '': - obits = '32' - if platform == 'windows': - # Trying to get fast way to detect bits - if os.environ.get('ProgramW6432', '') != '' or os.environ.get( - 'ProgramFiles(x86)', '') != '': # pragma: no cover - obits = '64' - else: - # On Linux use first getconf LONG_BIT and if doesn't work use - # python bits - - obits = pbits - - r = utils.gen_tmp_file({}) - if r['return'] > 0: - return r - - fn = r['file_name'] - - cmd = 'getconf LONG_BIT > ' + fn - rx = os.system(cmd) - - if rx == 0: - r = utils.load_txt(file_name=fn, remove_after_read=True) - - if r['return'] == 0: - s = r['string'].strip() - if len(s) > 0 and len(s) < 4: - obits = s - else: - if os.path.isfile(fn): - os.remove(fn) - - info['bits'] = obits - info['python_bits'] = pbits - - return {'return': 0, 'info': info} - - ########################################################################## - def download_file(self, i): - """ - Download file using requests - - Args: - (CM input dict): - - url (str): URL with file - (filename) (str): explicit file name - (path) (str): path to record file (or current if empty) - (chunk_size) (int): chunck size in bytes (65536 by default) - (text) (str): print text before downloaded status ("Downloaded: " by default) - (verify) (bool): verify SSL certificate if True (True by default) - can be switched by global env CM_UTILS_DOWNLOAD_VERIFY_SSL = no - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * filename (str): file name - * path (str): path to file - * size (int): file size - - """ - - import requests - import time - import sys - from urllib import parse - - # Get URL - url = i['url'] - - # Check file name - file_name = i.get('filename', '') - if file_name == '': - parsed_url = parse.urlparse(url) - file_name = os.path.basename(parsed_url.path) - - # Check path - path = i.get('path', '') - if path is None or path == '': - path = os.getcwd() - - # Output file - path_to_file = os.path.join(path, file_name) - - if os.path.isfile(path_to_file): - os.remove(path_to_file) - - print('Downloading to {}'.format(path_to_file)) - print('') - - # Download - size = -1 - downloaded = 0 - chunk_size = i.get('chunk_size', 65536) - - text = i.get('text', 'Downloaded: ') - - if 'CM_UTILS_DOWNLOAD_VERIFY_SSL' in os.environ: - verify = os.environ['CM_UTILS_DOWNLOAD_VERIFY_SSL'] == 'yes' - else: - verify = i.get('verify', True) - - try: - with requests.get(url, stream=True, allow_redirects=True, verify=verify) as download: - download.raise_for_status() - - size_string = download.headers.get('Content-Length') - - if size_string is None: - transfer_encoding = download.headers.get( - 'Transfer-Encoding', '') - if transfer_encoding != 'chunked': - return {'return': 1, 'error': 'did not receive file'} - else: - size_string = "0" - - size = int(size_string) - - with open(path_to_file, 'wb') as output: - for chunk in download.iter_content(chunk_size=chunk_size): - - if chunk: - output.write(chunk) - if size == 0: - continue - downloaded += 1 - percent = downloaded * chunk_size * 100 / size - - sys.stdout.write("\r{}{:3.0f}%".format(text, percent)) - sys.stdout.flush() - - sys.stdout.write("\r{}{:3.0f}%".format(text, 100)) - sys.stdout.flush() - - except Exception as e: - return {'return': 1, 'error': format(e)} - - print('') - if size == 0: - file_stats = os.stat(path_to_file) - size = file_stats.st_size - - return {'return': 0, 'filename': file_name, - 'path': path_to_file, 'size': size} - - ########################################################################## - def unzip_file(self, i): - """ - Unzip file - - Args: - (CM input dict): - - filename (str): explicit file name - (path) (str): path where to unzip file (current path otherwise) - (strip_folders) (int): strip first folders - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - """ - - import zipfile - - # Check file name - file_name = i['filename'] - - if not os.path.isfile(file_name): - return {'return': 1, - 'error': 'file {} not found'.format(file_name)} - - console = i.get('out') == 'con' - - # Attempt to read cmr.json - file_name_handle = open(file_name, 'rb') - file_name_zip = zipfile.ZipFile(file_name_handle) - - info_files = file_name_zip.infolist() - - path = i.get('path', '') - if path is None or path == '': - path = os.getcwd() - - strip_folders = i.get('strip_folders', 0) - - # Unpacking zip - for info in info_files: - f = info.filename - permissions = info.external_attr - - if not f.startswith('..') and not f.startswith( - '/') and not f.startswith('\\'): - f_zip = f - - if strip_folders > 0: - fsplit = f.split('/') # Zip standard on all OS - f = '/'.join(fsplit[strip_folders:]) - - file_path = os.path.join(path, f) - - if f.endswith('/'): - # create directory - if not os.path.exists(file_path): - os.makedirs(file_path) - else: - dir_name = os.path.dirname(file_path) - if not os.path.exists(dir_name): - os.makedirs(dir_name) - - # extract file - file_out = open(file_path, 'wb') - file_out.write(file_name_zip.read(f_zip)) - file_out.close() - - if permissions > 0xffff: - os.chmod(file_path, permissions >> 16) - - file_name_zip.close() - file_name_handle.close() - - return {'return': 0} - - ########################################################################## - def compare_versions(self, i): - """ - Compare versions - - Args: - - version1 (str): version 1 - version2 (str): version 2 - - Returns: - (CM return dict): - - * comparison (int): 1 - version 1 > version 2 - 0 - version 1 == version 2 - -1 - version 1 < version 2 - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - """ - - version1 = i['version1'] - version2 = i['version2'] - - l_version1 = version1.split('.') - l_version2 = version2.split('.') - - # 3.9.6 vs 3.9 - # 3.9 vs 3.9.6 - - i_version1 = [int(v) if v.isdigit() else v for v in l_version1] - i_version2 = [int(v) if v.isdigit() else v for v in l_version2] - - comparison = 0 - - for index in range(max(len(i_version1), len(i_version2))): - v1 = i_version1[index] if index < len(i_version1) else 0 - v2 = i_version2[index] if index < len(i_version2) else 0 - - if v1 > v2: - comparison = 1 - break - elif v1 < v2: - comparison = -1 - break - - return {'return': 0, 'comparison': comparison} - - ########################################################################## - def json2yaml(self, i): - """ - Convert JSON file to YAML - - Args: - - input (str): input file (.json) - (output) (str): output file (.yaml) - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - """ - - input_file = i.get('input', '') - - if input_file == '': - return {'return': 1, 'error': 'please specify --input={json file}'} - - output_file = i.get('output', '') - - r = utils.load_json(input_file, check_if_exists=True) - if r['return'] > 0: - return r - - meta = r['meta'] - - if output_file == '': - output_file = input_file[:- - 5] if input_file.endswith('.json') else input_file - output_file += '.yaml' - - r = utils.save_yaml(output_file, meta) - if r['return'] > 0: - return r - - return {'return': 0} - - ########################################################################## - def yaml2json(self, i): - """ - Convert YAML file to JSON - - Args: - - input (str): input file (.yaml) - (output) (str): output file (.json) - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - """ - - input_file = i.get('input', '') - - if input_file == '': - return {'return': 1, 'error': 'please specify --input={yaml file}'} - - output_file = i.get('output', '') - - r = utils.load_yaml(input_file, check_if_exists=True) - if r['return'] > 0: - return r - - meta = r['meta'] - - if output_file == '': - output_file = input_file[:- - 5] if input_file.endswith('.yaml') else input_file - output_file += '.json' - - r = utils.save_json(output_file, meta) - if r['return'] > 0: - return r - - return {'return': 0} - - ########################################################################## - def sort_json(self, i): - """ - Sort JSON file - - Args: - - input (str): input file (.json) - (output) (str): output file - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - """ - - input_file = i.get('input', '') - - if input_file == '': - return {'return': 1, 'error': 'please specify --input={json file}'} - - r = utils.load_json(input_file, check_if_exists=True) - if r['return'] > 0: - return r - - meta = r['meta'] - - output_file = i.get('output', '') - - if output_file == '': - output_file = input_file - - r = utils.save_json(output_file, meta, sort_keys=True) - if r['return'] > 0: - return r - - return {'return': 0} - - ########################################################################## - def dos2unix(self, i): - """ - Convert DOS file to UNIX (remove \r) - - Args: - - input (str): input file (.txt) - (output) (str): output file - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - """ - - input_file = i.get('input', '') - - if input_file == '': - return {'return': 1, 'error': 'please specify --input={txt file}'} - - r = utils.load_txt(input_file, check_if_exists=True) - if r['return'] > 0: - return r - - s = r['string'].replace('\r', '') - - output_file = i.get('output', '') - - if output_file == '': - output_file = input_file - - r = utils.save_txt(output_file, s) - if r['return'] > 0: - return r - - return {'return': 0} - - ########################################################################## - def replace_string_in_file(self, i): - """ - Convert DOS file to UNIX (remove \r) - - Args: - - input (str): input file (.txt) - (output) (str): output file - string (str): string to replace - replacement (str): replacement string - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - (update) (bool): True if file was upated - """ - - input_file = i.get('input', '') - if input_file == '': - return {'return': 1, 'error': 'please specify --input={txt file}'} - - string = i.get('string', '') - if string == '': - return {'return': 1, - 'error': 'please specify --string={string to replace}'} - - replacement = i.get('replacement', '') - if replacement == '': - return {'return': 1, - 'error': 'please specify --replacement={string to replace}'} - - output_file = i.get('output', '') - - if output_file == '': - output_file = input_file - - r = utils.load_txt(input_file, check_if_exists=True) - if r['return'] > 0: - return r - - s = r['string'].replace('\r', '') - - s = s.replace(string, replacement) - - r = utils.save_txt(output_file, s) - if r['return'] > 0: - return r - - return {'return': 0} - - ########################################################################## - def create_toc_from_md(self, i): - """ - Convert DOS file to UNIX (remove \r) - - Args: - - input (str): input file (.md) - (output) (str): output file (input+'.toc) - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - """ - - input_file = i.get('input', '') - if input_file == '': - return {'return': 1, 'error': 'please specify --input={txt file}'} - - output_file = i.get('output', '') - - if output_file == '': - output_file = input_file + '.toc' - - r = utils.load_txt(input_file, check_if_exists=True) - if r['return'] > 0: - return r - - lines = r['string'].split('\n') - - toc = [] - - toc.append('
') - toc.append('Click here to see the table of contents.') - toc.append('') - - for line in lines: - line = line.strip() - - if line.startswith('#'): - j = line.find(' ') - if j >= 0: - title = line[j:].strip() - - x = title.lower().replace(' ', '-') - - for k in range(0, 2): - if x.startswith('*'): - x = x[1:] - if x.endswith('*'): - x = x[:-1] - - for z in [':', '+', '.', '(', ')', ',']: - x = x.replace(z, '') - - y = ' ' * (2 * (j - 1)) + '* [' + title + '](#' + x + ')' - - toc.append(y) - - toc.append('') - toc.append('
') - - r = utils.save_txt(output_file, '\n'.join(toc) + '\n') - if r['return'] > 0: - return r - - return {'return': 0} - - ########################################################################## - def copy_to_clipboard(self, i): - """ - Copy string to a clipboard - - Args: - - string (str): string to copy to a clipboard - (add_quotes) (bool): add quotes to the string in a clipboard - (skip_fail) (bool): if True, do not fail - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - """ - - s = i.get('string', '') - - if i.get('add_quotes', False): - s = '"' + s + '"' - - failed = False - warning = '' - - # Try to load pyperclip (seems to work fine on Windows) - try: - import pyperclip - except Exception as e: - warning = format(e) - failed = True - pass - - if not failed: - pyperclip.copy(s) - else: - failed = False - - # Try to load Tkinter - try: - from Tkinter import Tk - except ImportError as e: - warning = format(e) - failed = True - pass - - if failed: - failed = False - try: - from tkinter import Tk - except ImportError as e: - warning = format(e) - failed = True - pass - - if not failed: - # Copy to clipboard - try: - r = Tk() - r.withdraw() - r.clipboard_clear() - r.clipboard_append(s) - r.update() - r.destroy() - except Exception as e: - failed = True - warning = format(e) - - rr = {'return': 0} - - if failed: - if not i.get('skip_fail', False): - return {'return': 1, 'error': warning} - - rr['warning'] = warning - - return rr - - ########################################################################## - def list_files_recursively(self, i): - """ - List files and concatenate into string separate by comma - - Args: - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - """ - - files = os.walk('.') - - s = '' - - for (dir_path, dir_names, file_names) in files: - for f in file_names: - if s != '': - s += ',' - - if dir_path == '.': - dir_path2 = '' - else: - dir_path2 = dir_path[2:].replace('\\', '/') + '/' - - s += dir_path2 + f - - print(s) - - return {'return': 0} - - ########################################################################## - def generate_secret(self, i): - """ - Generate secret for web apps - - Args: - - Returns: - (CM return dict): - - secret (str): secret - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - """ - - import secrets - s = secrets.token_urlsafe(16) - - print(s) - - return {'return': 0, 'secret': s} - - ########################################################################## - def detect_tags_in_artifact(self, i): - """ - Detect if there are tags in an artifact name (spaces) and update input - - Args: - - input (dict) : original input - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - """ - - inp = i['input'] - - artifact = inp.get('artifact', '') - if artifact == '.': - del (inp['artifact']) - elif ' ' in artifact: # or ',' in artifact: - del (inp['artifact']) - if 'parsed_artifact' in inp: - del (inp['parsed_artifact']) - # Force substitute tags - inp['tags'] = artifact.replace(' ', ',') - - return {'return': 0} - - ########################################################################## - def prune_input(self, i): - """ - Leave only input keys and remove the rest (to regenerate CM commands) - - Args: - - input (dict) : original input - (extra_keys_starts_with) (list): remove keys that starts - with the ones from this list - - Returns: - (CM return dict): - - new_input (dict): pruned input - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - """ - - import copy - - inp = i['input'] - extra_keys = i.get('extra_keys_starts_with', []) - - i_run_cmd_arc = copy.deepcopy(inp) - for k in inp: - remove = False - if k in ['action', 'automation', 'cmd', 'out', - 'parsed_automation', 'parsed_artifact', 'self_module']: - remove = True - if not remove: - for ek in extra_keys: - if k.startswith(ek): - remove = True - break - - if remove: - del (i_run_cmd_arc[k]) - - return {'return': 0, 'new_input': i_run_cmd_arc} - - ########################################################################## - - def uid(self, i): - """ - Generate CM UID. - - Args: - (CM input dict): empty dict - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * uid (str): CM UID - """ - - console = i.get('out') == 'con' - - r = utils.gen_uid() - - if console: - print(r['uid']) - - return r - - ########################################################################## - - def system(self, i): - """ - Run system command and redirect output to string. - - Args: - (CM input dict): - - * cmd (str): command line - * (path) (str): go to this directory and return back to current - * (stdout) (str): stdout file - * (stderr) (str): stderr file - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * ret (int): return code - * std (str): stdout + stderr - * stdout (str): stdout - * stderr (str): stderr - """ - - cmd = i['cmd'] - - if cmd == '': - return {'return': 1, 'error': 'cmd is empty'} - - path = i.get('path', '') - if path != '' and os.path.isdir(path): - cur_dir = os.getcwd() - os.chdir(path) - - if i.get('stdout', '') != '': - fn1 = i['stdout'] - fn1_delete = False - else: - r = utils.gen_tmp_file({}) - if r['return'] > 0: - return r - fn1 = r['file_name'] - fn1_delete = True - - if i.get('stderr', '') != '': - fn2 = i['stderr'] - fn2_delete = False - else: - r = utils.gen_tmp_file({}) - if r['return'] > 0: - return r - fn2 = r['file_name'] - fn2_delete = True - - cmd += ' > ' + fn1 + ' 2> ' + fn2 - rx = os.system(cmd) - - std = '' - stdout = '' - stderr = '' - - if os.path.isfile(fn1): - r = utils.load_txt(file_name=fn1, remove_after_read=fn1_delete) - if r['return'] == 0: - stdout = r['string'].strip() - - if os.path.isfile(fn2): - r = utils.load_txt(file_name=fn2, remove_after_read=fn2_delete) - if r['return'] == 0: - stderr = r['string'].strip() - - std = stdout - if stderr != '': - if std != '': - std += '\n' - std += stderr - - if path != '' and os.path.isdir(path): - os.chdir(cur_dir) - - return {'return': 0, 'ret': rx, 'stdout': stdout, - 'stderr': stderr, 'std': std} - - ############################################################ - def load_cfg(self, i): - """ - Load configuration artifacts and files - - Args: - (CM input dict): - - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - """ - - return utils.call_internal_module( - self, __file__, 'module_cfg', 'load_cfg', i) - - ############################################################ - def select_cfg(self, i): - """ - Select cfg interactively - - Args: - (CM input dict): - tags (str): list of tags to find cfg - alias (str): alias of a cfg file - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - """ - - i['self_module'] = self - - return utils.call_internal_module( - self, __file__, 'module_cfg', 'select_cfg', i) - - ############################################################ - def print_yaml(self, i): - """ - Print YAML file - - Args: - (CM input dict): - file (str): input file - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - """ - - filename = i.get('file', '') - if filename == '': - return {'return': 1, 'error': 'please specify --file={YAML file}'} - - r = utils.load_yaml(filename, check_if_exists=True) - if r['return'] > 0: - return r - - meta = r['meta'] - - import json - print(json.dumps(meta, indent=2)) - - return {'return': 0} - - ############################################################ - def print_json(self, i): - """ - Print YAML file - - Args: - (CM input dict): - file (str): input file - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - """ - - filename = i.get('file', '') - if filename == '': - return {'return': 1, 'error': 'please specify --file={JSON file}'} - - r = utils.load_json(filename, check_if_exists=True) - if r['return'] > 0: - return r - - meta = r['meta'] - - import json - print(json.dumps(meta, indent=2)) - - return {'return': 0} diff --git a/automation/utils/module_cfg.py b/automation/utils/module_cfg.py deleted file mode 100644 index 36ec30915..000000000 --- a/automation/utils/module_cfg.py +++ /dev/null @@ -1,339 +0,0 @@ -import os -import cmind -import copy - -base_path = {} -base_path_meta = {} - -########################################################################## - - -def load_cfg(i): - - tags = i.get('tags', '') - artifact = i.get('artifact', '') - - key = i.get('key', '') - key_end = i.get('key_end', []) - - ii = {'action': 'find', - 'automation': 'cfg'} - if artifact != '': - ii['artifact'] = artifact - elif tags != '': - ii['tags'] = tags - - r = cmind.access(ii) - if r['return'] > 0: - return r - - lst = r['list'] - - prune = i.get('prune', {}) - prune_key = prune.get('key', '') - prune_key_uid = prune.get('key_uid', '') - prune_meta_key = prune.get('meta_key', '') - prune_meta_key_uid = prune.get('meta_key_uid', '') - prune_uid = prune.get('uid', '') - prune_list = prune.get('list', []) - - # Checking individual files inside CM entry - selection = [] - - if i.get('skip_files', False): - for l in lst: - meta = l.meta - full_path = l.path - - meta['full_path'] = full_path - - add = True - - if prune_key != '' and prune_key_uid != '': - if prune_key_uid not in meta.get(prune_key, []): - add = False - - if add: - selection.append(meta) - else: - for l in lst: - path = l.path - - main_meta = l.meta - - skip = False - - if prune_meta_key != '' and prune_meta_key_uid != '': - if prune_meta_key_uid not in main_meta.get(prune_meta_key, []): - skip = True - - if skip: - continue - - all_tags = main_meta.get('tags', []) - - files = os.listdir(path) - - for f in files: - if key != '' and not f.startswith(key): - continue - - if f.startswith('_') or (not f.endswith( - '.json') and not f.endswith('.yaml')): - continue - - if len(key_end) > 0: - skip = True - for ke in key_end: - if f.endswith(ke): - skip = False - break - if skip: - continue - - full_path = os.path.join(path, f) - - full_path_without_ext = full_path[:-5] - - r = cmind.utils.load_yaml_and_json(full_path_without_ext) - if r['return'] > 0: - print('Warning: problem loading file {}'.format(full_path)) - else: - meta = r['meta'] - - # Check base - r = process_base(meta, full_path) - if r['return'] > 0: - return r - meta = r['meta'] - - uid = meta['uid'] - - # Check pruning - add = True - - if len(prune) > 0: - if prune_uid != '' and uid != prune_uid: - add = False - - if add and len( - prune_list) > 0 and uid not in prune_list: - add = False - - if add and prune_key != '' and prune_key_uid != '' and prune_key_uid != meta.get( - prune_key, None): - add = False - - if add: - meta['full_path'] = full_path - - add_all_tags = copy.deepcopy(all_tags) - - name = meta.get('name', '') - if name == '': - name = ' '.join(meta.get('tags', [])) - name = name.strip() - meta['name'] = name - - file_tags = meta.get('tags', '').strip() - if file_tags == '': - if name != '': - add_all_tags += [v.lower() - for v in name.split(' ')] - else: - add_all_tags += file_tags.split(',') - - meta['all_tags'] = add_all_tags - - meta['main_meta'] = main_meta - - selection.append(meta) - - return {'return': 0, 'lst': lst, 'selection': selection} - -########################################################################## - - -def process_base(meta, full_path): - - global base_path, base_path_meta - - _base = meta.get('_base', '') - if _base != '': - name = '' - - filename = _base - full_path_base = os.path.dirname(full_path) - - if not filename.endswith('.yaml') and not filename.endswith('.json'): - return {'return': 1, 'error': '_base file {} in {} must be .yaml or .json'.format( - filename, full_path)} - - if ':' in _base: - x = _base.split(':') - name = x[0] - - full_path_base = base_path.get(name, '') - if full_path_base == '': - - # Find artifact - r = cmind.access({'action': 'find', - 'automation': 'cfg', - 'artifact': name}) - if r['return'] > 0: - return r - - lst = r['list'] - - if len(lst) == 0: - if not os.path.isfile(path): - return {'return': 1, 'error': '_base artifact {} not found in {}'.format( - name, full_path)} - - full_path_base = lst[0].path - - base_path[name] = full_path_base - - filename = x[1] - - # Load base - path = os.path.join(full_path_base, filename) - - if not os.path.isfile(path): - return {'return': 1, 'error': '_base file {} not found in {}'.format( - filename, full_path)} - - if path in base_path_meta: - base = copy.deepcopy(base_path_meta[path]) - else: - path_without_ext = path[:-5] - - r = cmind.utils.load_yaml_and_json(path_without_ext) - if r['return'] > 0: - return r - - base = r['meta'] - - base_path_meta[path] = copy.deepcopy(base) - - for k in meta: - v = meta[k] - - if k not in base: - base[k] = v - else: - if isinstance(v, str): - # Only merge a few special keys and overwrite the rest - if k in ['tags', 'name']: - base[k] += meta[k] - else: - base[k] = meta[k] - - -elif isinstance(v, elif) for vv in v: - base[k].append(vv) -elif isinstance(v, elif ) base[k].merge(v) - - meta = base - - return {'return': 0, 'meta':meta} - -########################################################################## - -def select_cfg(i): - - self_module = i['self_module'] - tags = i['tags'] - alias = i.get('alias', '') - uid = i.get('uid', '') - title = i.get('title', '') - - # Check if alias is not provided - r = self_module.cmind.access({'action': 'find', 'automation':'cfg', 'tags':'basic,docker,configurations'}) - if r['return'] > 0: - return r - - lst = r['list'] - - selector = [] - - # Do coarse-grain search for CM artifacts - for l in lst: - p = l.path - - if alias != '': - for ext in ['.json', '.yaml']: - p1 = os.path.join(p, alias +ext) - if os.path.isfile(p1): - selector.append({'path': p1, 'alias':alias}) - break - - else: - files = os.listdir(p) - - for f in files: - if not f.startswith('_cm') and ( - f.endswith('.json') or f.endswith('.yaml')): - selector.append({'path': os.path.join(p, f), 'alias':f[:-5]}) - - # Load meta for name and UID - selector_with_meta = [] - for s in range(0, len(selector)): - ss = selector[s] - - path = ss['path'] - - full_path_without_ext = path[:-5] - - r = cmind.utils.load_yaml_and_json(full_path_without_ext) - if r['return'] >0: - print('Warning: problem loading configuration file {}'.format(path)) - - meta = r['meta'] - - if uid == '' or meta.get('uid', '') == uid: - ss['meta'] = meta - selector_with_meta.append(ss) - - # Quit if no configurations found - if len(selector_with_meta) == 0: - return {'return': 16, 'error':'configuration was not found'} - - select = 0 - if len(selector_with_meta) > 1: - xtitle = ' ' + title if title != '' else '' - print('') - print('Available{} configurations:'.format(xtitle)) - - print('') - - selector_with_meta = sorted(selector_with_meta, key = lambda x: x['meta'].get('name', '')) - s = 0 - for ss in selector_with_meta: - alias = ss['alias'] - uid = ss['meta'].get('uid', '') - name = ss['meta'].get('name', '') - - x = name - if x!='': - x+=' ' - x += '(' + uid + ')' - - print(f'{s}) {x}'.format(s, x)) - - s += 1 - - print('') - select = input('Enter configuration number of press Enter for 0: ') - - if select.strip() == '': - select = '0' - - select = int(select) - - if select <0 or select>=len(selector): - return {'return': 1, 'error':'selection is out of range'} - - ss = selector_with_meta[select] - - return {'return': 0, 'selection':ss} diff --git a/script/activate-python-venv/customize.py b/script/activate-python-venv/customize.py index 1d0e96c3c..2740bb9d8 100644 --- a/script/activate-python-venv/customize.py +++ b/script/activate-python-venv/customize.py @@ -12,9 +12,9 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - name = env.get('CM_NAME', '') + name = env.get('MLC_NAME', '') if name != '': name = name.strip().lower() diff --git a/script/activate-python-venv/run.bat b/script/activate-python-venv/run.bat index 5ca2ac0ed..76b2bfe18 100644 --- a/script/activate-python-venv/run.bat +++ b/script/activate-python-venv/run.bat @@ -1,7 +1,7 @@ echo. -echo call "%CM_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd" +echo call "%MLC_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd" echo. echo Enter exit to exit virtual env. echo. -call %CM_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd +call %MLC_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd diff --git a/script/activate-python-venv/run.sh b/script/activate-python-venv/run.sh index 6569b07e5..0753ad888 100644 --- a/script/activate-python-venv/run.sh +++ b/script/activate-python-venv/run.sh @@ -1,9 +1,9 @@ #!/bin/bash echo "" -echo " bash --init-file ${CM_VIRTUAL_ENV_SCRIPTS_PATH}/activate" +echo " bash --init-file ${MLC_VIRTUAL_ENV_SCRIPTS_PATH}/activate" echo "" echo " Enter exit to exit virtual env." echo "" -bash --init-file ${CM_VIRTUAL_ENV_SCRIPTS_PATH}/activate +bash --init-file ${MLC_VIRTUAL_ENV_SCRIPTS_PATH}/activate diff --git a/script/add-custom-nvidia-system/customize.py b/script/add-custom-nvidia-system/customize.py index 714ce821d..b711c5664 100644 --- a/script/add-custom-nvidia-system/customize.py +++ b/script/add-custom-nvidia-system/customize.py @@ -19,6 +19,6 @@ def postprocess(i): env = i['env'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] return {'return': 0} diff --git a/script/add-custom-nvidia-system/run.sh b/script/add-custom-nvidia-system/run.sh index b89617f7f..8ce1d0b64 100644 --- a/script/add-custom-nvidia-system/run.sh +++ b/script/add-custom-nvidia-system/run.sh @@ -1,5 +1,5 @@ #!/bin/bash CUR=$PWD -cd ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH} -${CM_PYTHON_BIN_WITH_PATH} scripts/custom_systems/add_custom_system.py +cd ${MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH} +${MLC_PYTHON_BIN_WITH_PATH} scripts/custom_systems/add_custom_system.py test $? -eq 0 || exit $? diff --git a/script/app-image-classification-onnx-py/customize.py b/script/app-image-classification-onnx-py/customize.py index e3371f217..338986722 100644 --- a/script/app-image-classification-onnx-py/customize.py +++ b/script/app-image-classification-onnx-py/customize.py @@ -29,10 +29,10 @@ def postprocess(i): data = state.get('cm_app_image_classification_onnx_py', {}) - fjson = 'cm-image-classification-onnx-py.json' - fyaml = 'cm-image-classification-onnx-py.yaml' + fjson = 'mlc-image-classification-onnx-py.json' + fyaml = 'mlc-image-classification-onnx-py.yaml' - output = env.get('CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT', '') + output = env.get('MLC_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT', '') if output != '': if not os.path.exists(output): os.makedirs(output) @@ -56,7 +56,7 @@ def postprocess(i): top_classification = data.get('top_classification', '') - if env.get('CM_TMP_SILENT', '') != 'yes': + if env.get('MLC_TMP_SILENT', '') != 'yes': if top_classification != '': print('') x = 'Top classification: {}'.format(top_classification) diff --git a/script/app-image-classification-onnx-py/meta.yaml b/script/app-image-classification-onnx-py/meta.yaml index e53b91ec2..82a559f8f 100644 --- a/script/app-image-classification-onnx-py/meta.yaml +++ b/script/app-image-classification-onnx-py/meta.yaml @@ -16,8 +16,8 @@ tags: tags_help: "modular python app image-classification onnx" default_env: - CM_BATCH_COUNT: '1' - CM_BATCH_SIZE: '1' + MLC_BATCH_COUNT: '1' + MLC_BATCH_SIZE: '1' deps: @@ -81,12 +81,12 @@ variations: USE_CPU: yes input_mapping: - input: CM_IMAGE - output: CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT + input: MLC_IMAGE + output: MLC_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT new_env_keys: - - CM_APP_IMAGE_CLASSIFICATION_ONNX_PY* + - MLC_APP_IMAGE_CLASSIFICATION_ONNX_PY* new_state_keys: @@ -107,13 +107,13 @@ docker: skip_cm_sys_upgrade: 'yes' cm_repo_flags: '--branch=dev' use_host_group_id: 'yes' - image_tag_extra: '-cm-dev' + image_tag_extra: '-mlc-dev' input_paths: - input - - env.CM_IMAGE + - env.MLC_IMAGE - output skip_input_for_fake_run: - input - - env.CM_IMAGE + - env.MLC_IMAGE - output - j diff --git a/script/app-image-classification-onnx-py/run.bat b/script/app-image-classification-onnx-py/run.bat index ee7db9867..c3aa5611a 100644 --- a/script/app-image-classification-onnx-py/run.bat +++ b/script/app-image-classification-onnx-py/run.bat @@ -1,29 +1,29 @@ -rem echo %CM_PYTHON_BIN% -rem echo %CM_DATASET_PATH% -rem echo %CM_DATASET_AUX_PATH% -rem echo %CM_ML_MODEL_FILE_WITH_PATH% +rem echo %MLC_PYTHON_BIN% +rem echo %MLC_DATASET_PATH% +rem echo %MLC_DATASET_AUX_PATH% +rem echo %MLC_ML_MODEL_FILE_WITH_PATH% rem connect CM intelligent components with CK env -set CK_ENV_ONNX_MODEL_ONNX_FILEPATH=%CM_ML_MODEL_FILE_WITH_PATH% +set CK_ENV_ONNX_MODEL_ONNX_FILEPATH=%MLC_ML_MODEL_FILE_WITH_PATH% set CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME=input_tensor:0 set CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME=softmax_tensor:0 -set CK_ENV_DATASET_IMAGENET_VAL=%CM_DATASET_PATH% -set CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%CM_DATASET_AUX_PATH%\synset_words.txt +set CK_ENV_DATASET_IMAGENET_VAL=%MLC_DATASET_PATH% +set CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%MLC_DATASET_AUX_PATH%\synset_words.txt set ML_MODEL_DATA_LAYOUT=NCHW -set CK_BATCH_SIZE=%CM_BATCH_SIZE% -set CK_BATCH_COUNT=%CM_BATCH_COUNT% +set CK_BATCH_SIZE=%MLC_BATCH_SIZE% +set CK_BATCH_COUNT=%MLC_BATCH_COUNT% -IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD% +IF NOT DEFINED MLC_TMP_CURRENT_SCRIPT_PATH SET MLC_TMP_CURRENT_SCRIPT_PATH=%CD% -IF DEFINED CM_INPUT SET CM_IMAGE=%CM_INPUT% +IF DEFINED MLC_INPUT SET MLC_IMAGE=%MLC_INPUT% echo. -%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt +%MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% echo. -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\onnx_classify.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\src\onnx_classify.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% rem Just a demo to pass environment variables from native scripts back to CM workflows -echo CM_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess > tmp-run-env.out +echo MLC_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess > tmp-run-env.out diff --git a/script/app-image-classification-onnx-py/run.sh b/script/app-image-classification-onnx-py/run.sh index 62b07e1f1..4325faf5a 100644 --- a/script/app-image-classification-onnx-py/run.sh +++ b/script/app-image-classification-onnx-py/run.sh @@ -1,37 +1,37 @@ #!/bin/bash -if [[ ${CM_RUN_DOCKER_CONTAINER} == "yes" ]]; then +if [[ ${MLC_RUN_DOCKER_CONTAINER} == "yes" ]]; then exit 0 fi -#echo ${CM_PYTHON_BIN} -#echo ${CM_DATASET_PATH} -#echo ${CM_DATASET_AUX_PATH} -#echo ${CM_ML_MODEL_FILE_WITH_PATH} -CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3} -CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +#echo ${MLC_PYTHON_BIN} +#echo ${MLC_DATASET_PATH} +#echo ${MLC_DATASET_AUX_PATH} +#echo ${MLC_ML_MODEL_FILE_WITH_PATH} +MLC_PYTHON_BIN=${MLC_PYTHON_BIN_WITH_PATH:-python3} +MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD} # connect CM intelligent components with CK env -export CK_ENV_ONNX_MODEL_ONNX_FILEPATH=${CM_ML_MODEL_FILE_WITH_PATH} +export CK_ENV_ONNX_MODEL_ONNX_FILEPATH=${MLC_ML_MODEL_FILE_WITH_PATH} export CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME="input_tensor:0" export CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME="softmax_tensor:0" -export CK_ENV_DATASET_IMAGENET_VAL=${CM_DATASET_PATH} -export CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${CM_DATASET_AUX_PATH}/synset_words.txt +export CK_ENV_DATASET_IMAGENET_VAL=${MLC_DATASET_PATH} +export CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${MLC_DATASET_AUX_PATH}/synset_words.txt export ML_MODEL_DATA_LAYOUT="NCHW" -export CK_BATCH_SIZE=${CM_BATCH_SIZE} -export CK_BATCH_COUNT=${CM_BATCH_COUNT} +export CK_BATCH_SIZE=${MLC_BATCH_SIZE} +export CK_BATCH_COUNT=${MLC_BATCH_COUNT} -if [[ "${CM_INPUT}" != "" ]]; then export CM_IMAGE=${CM_INPUT}; fi +if [[ "${MLC_INPUT}" != "" ]]; then export MLC_IMAGE=${MLC_INPUT}; fi -PIP_EXTRA=`${CM_PYTHON_BIN} -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` +PIP_EXTRA=`${MLC_PYTHON_BIN} -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` echo "" -${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${PIP_EXTRA} +${MLC_PYTHON_BIN} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${PIP_EXTRA} test $? -eq 0 || exit 1 echo "" -${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/onnx_classify.py +${MLC_PYTHON_BIN} ${MLC_TMP_CURRENT_SCRIPT_PATH}/src/onnx_classify.py test $? -eq 0 || exit 1 # Just a demo to pass environment variables from native scripts back to CM workflows -echo "CM_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess" > tmp-run-env.out +echo "MLC_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess" > tmp-run-env.out diff --git a/script/app-image-classification-onnx-py/src/onnx_classify.py b/script/app-image-classification-onnx-py/src/onnx_classify.py index c2c5a6ceb..5ce1dd4db 100644 --- a/script/app-image-classification-onnx-py/src/onnx_classify.py +++ b/script/app-image-classification-onnx-py/src/onnx_classify.py @@ -156,8 +156,8 @@ def load_a_batch(batch_filenames): i) for i in range(batch_size)] # Grigori: trick to test models: - if os.environ.get('CM_IMAGE', '') != '': - batch_filenames = [os.environ['CM_IMAGE']] + if os.environ.get('MLC_IMAGE', '') != '': + batch_filenames = [os.environ['MLC_IMAGE']] batch_data = load_a_batch(batch_filenames) # print(batch_data.shape) diff --git a/script/app-image-classification-onnx-py/tests/README.md b/script/app-image-classification-onnx-py/tests/README.md index 899509cb7..15254aa91 100644 --- a/script/app-image-classification-onnx-py/tests/README.md +++ b/script/app-image-classification-onnx-py/tests/README.md @@ -1,9 +1,9 @@ ```bash docker system prune -a -f -cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.CM_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e +cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.MLC_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e -cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --env.CM_IMAGE=computer_mouse.jpg +cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --env.MLC_IMAGE=computer_mouse.jpg cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg cmrd "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg -j --docker_it diff --git a/script/app-image-classification-tf-onnx-cpp/include/benchmark.h b/script/app-image-classification-tf-onnx-cpp/include/benchmark.h index 42b0418fc..4951aa232 100644 --- a/script/app-image-classification-tf-onnx-cpp/include/benchmark.h +++ b/script/app-image-classification-tf-onnx-cpp/include/benchmark.h @@ -124,7 +124,7 @@ class BenchmarkSettings { const int num_classes = 1000; const bool normalize_img = getenv_s("CK_ENV_TENSORFLOW_MODEL_NORMALIZE_DATA") == "YES"; const bool subtract_mean = getenv_s("CK_ENV_TENSORFLOW_MODEL_SUBTRACT_MEAN") == "YES"; - const char *given_channel_means_str = getenv("CM_ML_MODEL_GIVEN_CHANNEL_MEANS"); + const char *given_channel_means_str = getenv("MLC_ML_MODEL_GIVEN_CHANNEL_MEANS"); const bool full_report = getenv_i("CK_SILENT_MODE") == 0; diff --git a/script/app-image-classification-tf-onnx-cpp/meta.yaml b/script/app-image-classification-tf-onnx-cpp/meta.yaml index c7ee8b560..957a0d28f 100644 --- a/script/app-image-classification-tf-onnx-cpp/meta.yaml +++ b/script/app-image-classification-tf-onnx-cpp/meta.yaml @@ -3,8 +3,8 @@ automation_alias: script automation_uid: 5b4e0237da074764 category: Modular AI/ML application pipeline default_env: - CM_BATCH_COUNT: '1' - CM_BATCH_SIZE: '1' + MLC_BATCH_COUNT: '1' + MLC_BATCH_SIZE: '1' deps: - tags: detect,os - tags: get,sys-utils-cm diff --git a/script/app-image-classification-tf-onnx-cpp/run.sh b/script/app-image-classification-tf-onnx-cpp/run.sh index b4a46853b..8133da599 100644 --- a/script/app-image-classification-tf-onnx-cpp/run.sh +++ b/script/app-image-classification-tf-onnx-cpp/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} -${CM_CXX_COMPILER_WITH_PATH} -O3 ${CM_TMP_CURRENT_SCRIPT_PATH}/src/classification.cpp -o classification.exe -ltensorflow +MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD} +${MLC_CXX_COMPILER_WITH_PATH} -O3 ${MLC_TMP_CURRENT_SCRIPT_PATH}/src/classification.cpp -o classification.exe -ltensorflow test $? -eq 0 || exit 1 diff --git a/script/app-image-classification-torch-py/meta.yaml b/script/app-image-classification-torch-py/meta.yaml index 6684bb737..44736f7a2 100644 --- a/script/app-image-classification-torch-py/meta.yaml +++ b/script/app-image-classification-torch-py/meta.yaml @@ -3,8 +3,8 @@ automation_alias: script automation_uid: 5b4e0237da074764 category: Modular AI/ML application pipeline default_env: - CM_BATCH_COUNT: '1' - CM_BATCH_SIZE: '1' + MLC_BATCH_COUNT: '1' + MLC_BATCH_SIZE: '1' deps: - tags: detect,os - names: diff --git a/script/app-image-classification-torch-py/run.bat b/script/app-image-classification-torch-py/run.bat index 1415d4265..f4ec7e5d0 100644 --- a/script/app-image-classification-torch-py/run.bat +++ b/script/app-image-classification-torch-py/run.bat @@ -1,20 +1,20 @@ rem connect CM portable scripts with CK env -set CM_ML_TORCH_MODEL_NAME=resnet50 -set CM_ML_MODEL_INPUT_DATA_TYPE=float32 -set CM_ML_MODEL_IMAGE_HEIGHT=224 -set CM_ML_MODEL_IMAGE_WIDTH=224 +set MLC_ML_TORCH_MODEL_NAME=resnet50 +set MLC_ML_MODEL_INPUT_DATA_TYPE=float32 +set MLC_ML_MODEL_IMAGE_HEIGHT=224 +set MLC_ML_MODEL_IMAGE_WIDTH=224 -rem set CM_DATASET_IMAGENET_PREPROCESSED_DIR=%CM_DATASET_PREPROCESSED_PATH% +rem set MLC_DATASET_IMAGENET_PREPROCESSED_DIR=%MLC_DATASET_PREPROCESSED_PATH% -set CM_DATASET_IMAGENET_PREPROCESSED_DIR=%CM_DATASET_PREPROCESSED_FULL_PATH% -set CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%CM_DATASET_AUX_PATH%\synset_words.txt -set CM_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32 -set CM_RESULTS_DIR=%CM_TMP_CURRENT_SCRIPT_PATH%\results +set MLC_DATASET_IMAGENET_PREPROCESSED_DIR=%MLC_DATASET_PREPROCESSED_FULL_PATH% +set MLC_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%MLC_DATASET_AUX_PATH%\synset_words.txt +set MLC_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32 +set MLC_RESULTS_DIR=%MLC_TMP_CURRENT_SCRIPT_PATH%\results set ML_MODEL_DATA_LAYOUT=NCHW -%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt +%MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\pytorch_classify_preprocessed.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\src\pytorch_classify_preprocessed.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/app-image-classification-torch-py/run.sh b/script/app-image-classification-torch-py/run.sh index b50b79eb4..478332299 100644 --- a/script/app-image-classification-torch-py/run.sh +++ b/script/app-image-classification-torch-py/run.sh @@ -1,20 +1,20 @@ #!/bin/bash -CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD} # connect CM intelligent components with CK env -export CM_ML_TORCH_MODEL_NAME=resnet50 -export CM_ML_MODEL_INPUT_DATA_TYPE=float32 -export CM_ML_MODEL_IMAGE_HEIGHT=224 -export CM_ML_MODEL_IMAGE_WIDTH=224 -export CM_DATASET_IMAGENET_PREPROCESSED_DIR=${CM_DATASET_PREPROCESSED_FULL_PATH} -export CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${CM_DATASET_AUX_PATH}/synset_words.txt -export CM_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32 -export CM_RESULTS_DIR=${CM_TMP_CURRENT_SCRIPT_PATH}/results +export MLC_ML_TORCH_MODEL_NAME=resnet50 +export MLC_ML_MODEL_INPUT_DATA_TYPE=float32 +export MLC_ML_MODEL_IMAGE_HEIGHT=224 +export MLC_ML_MODEL_IMAGE_WIDTH=224 +export MLC_DATASET_IMAGENET_PREPROCESSED_DIR=${MLC_DATASET_PREPROCESSED_FULL_PATH} +export MLC_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${MLC_DATASET_AUX_PATH}/synset_words.txt +export MLC_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32 +export MLC_RESULTS_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH}/results export ML_MODEL_DATA_LAYOUT=NCHW -${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt +${MLC_PYTHON_BIN} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt test $? -eq 0 || exit 1 -${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/pytorch_classify_preprocessed.py +${MLC_PYTHON_BIN} ${MLC_TMP_CURRENT_SCRIPT_PATH}/src/pytorch_classify_preprocessed.py test $? -eq 0 || exit 1 diff --git a/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py index 863b3a651..ff20972c6 100644 --- a/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py +++ b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py @@ -19,9 +19,9 @@ # Writing the results out: # -RESULTS_DIR = os.getenv('CM_RESULTS_DIR') +RESULTS_DIR = os.getenv('MLC_RESULTS_DIR') FULL_REPORT = os.getenv( - 'CM_SILENT_MODE', + 'MLC_SILENT_MODE', '0') in ( 'NO', 'no', @@ -31,14 +31,14 @@ # Processing by batches: # -BATCH_COUNT = int(os.getenv('CM_BATCH_COUNT', 1)) +BATCH_COUNT = int(os.getenv('MLC_BATCH_COUNT', 1)) # Enabling GPU if available and not disabled: # USE_CUDA = (os.getenv('USE_CUDA', '').strip() == 'yes') -labels_path = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] +labels_path = os.environ['MLC_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] def load_labels(labels_filepath): @@ -69,7 +69,7 @@ def main(): os.mkdir(RESULTS_DIR) # Load the [cached] Torch model - path_to_model_pth = os.environ['CM_ML_MODEL_FILE_WITH_PATH'] + path_to_model_pth = os.environ['MLC_ML_MODEL_FILE_WITH_PATH'] model = models.resnet50(pretrained=False) model.load_state_dict(torch.load(path_to_model_pth)) @@ -90,7 +90,7 @@ def main(): first_classification_time = 0 images_loaded = 0 - image_path = os.environ.get('CM_INPUT', '') + image_path = os.environ.get('MLC_INPUT', '') if image_path != '': normalize_data_bool = True diff --git a/script/app-image-classification-tvm-onnx-py/meta.yaml b/script/app-image-classification-tvm-onnx-py/meta.yaml index 2b5cc9cca..c0abe6398 100644 --- a/script/app-image-classification-tvm-onnx-py/meta.yaml +++ b/script/app-image-classification-tvm-onnx-py/meta.yaml @@ -7,8 +7,8 @@ automation_uid: 5b4e0237da074764 category: Modular AI/ML application pipeline default_env: - CM_BATCH_COUNT: '1' - CM_BATCH_SIZE: '1' + MLC_BATCH_COUNT: '1' + MLC_BATCH_SIZE: '1' deps: - tags: detect,os diff --git a/script/app-image-classification-tvm-onnx-py/run.sh b/script/app-image-classification-tvm-onnx-py/run.sh index 8eb066077..145a6c799 100644 --- a/script/app-image-classification-tvm-onnx-py/run.sh +++ b/script/app-image-classification-tvm-onnx-py/run.sh @@ -1,9 +1,9 @@ #!/bin/bash -CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD} -#if [[ ${CM_HOST_PLATFORM_FLAVOR} == "arm64" ]]; then -# ${CM_PYTHON_BIN} -m pip install -i https://test.pypi.org/simple/ onnxruntime==1.9.0.dev174552 +#if [[ ${MLC_HOST_PLATFORM_FLAVOR} == "arm64" ]]; then +# ${MLC_PYTHON_BIN} -m pip install -i https://test.pypi.org/simple/ onnxruntime==1.9.0.dev174552 #fi export USE_TVM=yes @@ -12,15 +12,15 @@ export USE_TVM=yes wget -nc https://raw.githubusercontent.com/mlcommons/ck-mlops/main/program/ml-task-image-classification-tvm-onnx-cpu/synset.txt test $? -eq 0 || exit 1 -${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt +${MLC_PYTHON_BIN} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt test $? -eq 0 || exit 1 -if [[ "${CM_INPUT}" != "" ]]; then - export CM_IMAGE=${CM_INPUT} +if [[ "${MLC_INPUT}" != "" ]]; then + export MLC_IMAGE=${MLC_INPUT} else - export CM_IMAGE=${CM_DATASET_PATH}/ILSVRC2012_val_00000001.JPEG + export MLC_IMAGE=${MLC_DATASET_PATH}/ILSVRC2012_val_00000001.JPEG fi -${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/classify.py --image ${CM_IMAGE} +${MLC_PYTHON_BIN} ${MLC_TMP_CURRENT_SCRIPT_PATH}/src/classify.py --image ${MLC_IMAGE} test $? -eq 0 || exit 1 diff --git a/script/app-image-classification-tvm-onnx-py/src/classify.py b/script/app-image-classification-tvm-onnx-py/src/classify.py index 20c164288..058c42bfa 100644 --- a/script/app-image-classification-tvm-onnx-py/src/classify.py +++ b/script/app-image-classification-tvm-onnx-py/src/classify.py @@ -107,9 +107,9 @@ def run_case(dtype, image, target): # plt.show() plt.savefig('pre-processed-image.png') # Load model - model_path = os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', '') + model_path = os.environ.get('MLC_ML_MODEL_FILE_WITH_PATH', '') if model_path == '': - print('Error: environment variable CM_ML_MODEL_FILE_WITH_PATH is not defined') + print('Error: environment variable MLC_ML_MODEL_FILE_WITH_PATH is not defined') exit(1) opt = rt.SessionOptions() @@ -154,9 +154,9 @@ def run_case(dtype, image, target): build_conf = {'relay.backend.use_auto_scheduler': False} opt_lvl = int(os.environ.get('TVM_OPT_LEVEL', 3)) - host = os.environ.get('CM_HOST_PLATFORM_FLAVOR') + host = os.environ.get('MLC_HOST_PLATFORM_FLAVOR') if host == 'x86_64' and 'AMD' in os.environ.get( - 'CM_HOST_CPU_VENDOR_ID', ''): + 'MLC_HOST_CPU_VENDOR_ID', ''): target = os.environ.get('TVM_TARGET', 'llvm -mcpu=znver2') else: target = os.environ.get('TVM_TARGET', 'llvm') @@ -283,7 +283,7 @@ def run_case(dtype, image, target): args = parser.parse_args() if args.image.strip().lower() == '': - print('Please specify path to an image using CM_IMAGE environment variable!') + print('Please specify path to an image using MLC_IMAGE environment variable!') exit(1) # set parameter @@ -296,7 +296,7 @@ def run_case(dtype, image, target): out_shape = (batch_size, num_classes) dtype = 'float32' - if os.environ.get('CM_TVM_DTYPE', '') != '': - dtype = os.environ['CM_TVM_DTYPE'] + if os.environ.get('MLC_TVM_DTYPE', '') != '': + dtype = os.environ['MLC_TVM_DTYPE'] run_case(dtype, args.image, args.target) diff --git a/script/app-image-corner-detection/customize.py b/script/app-image-corner-detection/customize.py index f27ee028a..7b37eb663 100644 --- a/script/app-image-corner-detection/customize.py +++ b/script/app-image-corner-detection/customize.py @@ -8,28 +8,28 @@ def preprocess(i): env = i['env'] script_path = i['run_script_input']['path'] - env["CM_SOURCE_FOLDER_PATH"] = script_path - env['CM_C_SOURCE_FILES'] = "susan.c" + env["MLC_SOURCE_FOLDER_PATH"] = script_path + env['MLC_C_SOURCE_FILES'] = "susan.c" - if 'CM_INPUT' not in env: - env['CM_INPUT'] = os.path.join(script_path, 'data.pgm') + if 'MLC_INPUT' not in env: + env['MLC_INPUT'] = os.path.join(script_path, 'data.pgm') - if 'CM_OUTPUT' not in env: - env['CM_OUTPUT'] = 'output_image_with_corners.pgm' + if 'MLC_OUTPUT' not in env: + env['MLC_OUTPUT'] = 'output_image_with_corners.pgm' - if 'CM_RUN_DIR' not in env: + if 'MLC_RUN_DIR' not in env: output_path = os.path.join(script_path, "output") if output_path != '' and not os.path.isdir(output_path): os.makedirs(output_path) - env['CM_RUN_DIR'] = output_path + env['MLC_RUN_DIR'] = output_path - env['CM_RUN_SUFFIX'] = env['CM_INPUT'] + ' ' + env['CM_OUTPUT'] + ' -c' + env['MLC_RUN_SUFFIX'] = env['MLC_INPUT'] + ' ' + env['MLC_OUTPUT'] + ' -c' if os_info['platform'] == 'windows': - env['CM_BIN_NAME'] = 'image-corner.exe' + env['MLC_BIN_NAME'] = 'image-corner.exe' else: - env['CM_BIN_NAME'] = 'image-corner' + env['MLC_BIN_NAME'] = 'image-corner' env['+ LDCFLAGS'] = ["-lm"] return {'return': 0} @@ -38,6 +38,6 @@ def preprocess(i): def postprocess(i): env = i['env'] - print(env['CM_OUTPUT'] + " generated in " + env['CM_RUN_DIR']) + print(env['MLC_OUTPUT'] + " generated in " + env['MLC_RUN_DIR']) return {'return': 0} diff --git a/script/app-image-corner-detection/meta.yaml b/script/app-image-corner-detection/meta.yaml index 1fd27d9b6..2deedbde5 100644 --- a/script/app-image-corner-detection/meta.yaml +++ b/script/app-image-corner-detection/meta.yaml @@ -18,11 +18,11 @@ deps: posthook_deps: - skip_if_env: - CM_SKIP_COMPILE: + MLC_SKIP_COMPILE: - 'on' tags: compile,cpp-program - skip_if_env: - CM_SKIP_RUN: + MLC_SKIP_RUN: - 'on' tags: benchmark-program diff --git a/script/app-image-corner-detection/run.sh b/script/app-image-corner-detection/run.sh index 30cfbdd00..033e2f3aa 100644 --- a/script/app-image-corner-detection/run.sh +++ b/script/app-image-corner-detection/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUR=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +CUR=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD} mkdir -p $CUR"/output" test $? -eq 0 || exit 1 diff --git a/script/app-loadgen-generic-python/README-extra.md b/script/app-loadgen-generic-python/README-extra.md index cdd08ef41..6222b6574 100644 --- a/script/app-loadgen-generic-python/README-extra.md +++ b/script/app-loadgen-generic-python/README-extra.md @@ -49,7 +49,7 @@ including the above one, any time a script with python dependency is run. To avo can set up the following environment variable with the name of the current virtual environment: ```bash -export CM_SCRIPT_EXTRA_CMD="--adr.python.name=loadgen" +export MLC_SCRIPT_EXTRA_CMD="--adr.python.name=loadgen" ``` The `--adr` flag stands for "Add to all Dependencies Recursively" and will find all sub-dependencies on other CM scripts @@ -250,16 +250,16 @@ Available variations: Available flags mapped to environment variables: - --concurrency -> --env.CM_MLPERF_CONCURRENCY - --ep -> --env.CM_MLPERF_EXECUTION_PROVIDER - --execmode -> --env.CM_MLPERF_EXEC_MODE - --interop -> --env.CM_MLPERF_INTEROP - --intraop -> --env.CM_MLPERF_INTRAOP - --modelpath -> --env.CM_ML_MODEL_FILE_WITH_PATH - --output_dir -> --env.CM_MLPERF_OUTPUT_DIR - --runner -> --env.CM_MLPERF_RUNNER - --samples -> --env.CM_MLPERF_LOADGEN_SAMPLES - --scenario -> --env.CM_MLPERF_LOADGEN_SCENARIO + --concurrency -> --env.MLC_MLPERF_CONCURRENCY + --ep -> --env.MLC_MLPERF_EXECUTION_PROVIDER + --execmode -> --env.MLC_MLPERF_EXEC_MODE + --interop -> --env.MLC_MLPERF_INTEROP + --intraop -> --env.MLC_MLPERF_INTRAOP + --modelpath -> --env.MLC_ML_MODEL_FILE_WITH_PATH + --output_dir -> --env.MLC_MLPERF_OUTPUT_DIR + --runner -> --env.MLC_MLPERF_RUNNER + --samples -> --env.MLC_MLPERF_LOADGEN_SAMPLES + --scenario -> --env.MLC_MLPERF_LOADGEN_SCENARIO ``` @@ -272,8 +272,8 @@ cm docker script "python app loadgen-generic _onnxruntime _custom _huggingface _ ## Tuning CPU performance via CM experiment ```bash -cm run experiment --tags=loadgen,python,llama2 -- cmr script "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2 --intraop={{CM_OPT_INTRAOP{[1,2,4]}}} --interop={{CM_OPT_INTEROP{[1,2,4]}}} --quiet -cm run experiment --tags=loadgen,python,llama2 -- cmr "python app loadgen-generic _onnxruntime" --modelpath={PATH TO ONNX MODEL} --samples=2 --intraop={{CM_OPT_INTRAOP{[1,2,4]}}} --interop={{CM_OPT_INTEROP{[1,2,4]}}} --quiet +cm run experiment --tags=loadgen,python,llama2 -- cmr script "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2 --intraop={{MLC_OPT_INTRAOP{[1,2,4]}}} --interop={{MLC_OPT_INTEROP{[1,2,4]}}} --quiet +cm run experiment --tags=loadgen,python,llama2 -- cmr "python app loadgen-generic _onnxruntime" --modelpath={PATH TO ONNX MODEL} --samples=2 --intraop={{MLC_OPT_INTRAOP{[1,2,4]}}} --interop={{MLC_OPT_INTEROP{[1,2,4]}}} --quiet ``` diff --git a/script/app-loadgen-generic-python/customize.py b/script/app-loadgen-generic-python/customize.py index 55050fadb..9a4a6104e 100644 --- a/script/app-loadgen-generic-python/customize.py +++ b/script/app-loadgen-generic-python/customize.py @@ -11,81 +11,81 @@ def preprocess(i): env = i['env'] - if 'CM_ML_MODEL_FILE_WITH_PATH' not in env: + if 'MLC_ML_MODEL_FILE_WITH_PATH' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the model to run'} - run_opts = env.get('CM_RUN_OPTS', '') + run_opts = env.get('MLC_RUN_OPTS', '') - if env.get('CM_MLPERF_BACKEND', '') != '': - run_opts += " -b " + env['CM_MLPERF_BACKEND'] + if env.get('MLC_MLPERF_BACKEND', '') != '': + run_opts += " -b " + env['MLC_MLPERF_BACKEND'] - if env.get('CM_MLPERF_RUNNER', '') != '': - run_opts += " -r " + env['CM_MLPERF_RUNNER'] + if env.get('MLC_MLPERF_RUNNER', '') != '': + run_opts += " -r " + env['MLC_MLPERF_RUNNER'] - if env.get('CM_MLPERF_CONCURRENCY', '') != '': - run_opts += " --concurrency " + env['CM_MLPERF_CONCURRENCY'] + if env.get('MLC_MLPERF_CONCURRENCY', '') != '': + run_opts += " --concurrency " + env['MLC_MLPERF_CONCURRENCY'] - if env.get('CM_MLPERF_EXECUTION_PROVIDER', '') != '': - run_opts += " --ep " + env['CM_MLPERF_EXECUTION_PROVIDER'] + if env.get('MLC_MLPERF_EXECUTION_PROVIDER', '') != '': + run_opts += " --ep " + env['MLC_MLPERF_EXECUTION_PROVIDER'] - if env.get('CM_MLPERF_INTRAOP', '') != '': - run_opts += " --intraop " + env['CM_MLPERF_INTRAOP'] + if env.get('MLC_MLPERF_INTRAOP', '') != '': + run_opts += " --intraop " + env['MLC_MLPERF_INTRAOP'] - if env.get('CM_MLPERF_INTEROP', '') != '': - run_opts += " --interop " + env['CM_MLPERF_INTEROP'] + if env.get('MLC_MLPERF_INTEROP', '') != '': + run_opts += " --interop " + env['MLC_MLPERF_INTEROP'] - if env.get('CM_MLPERF_EXECMODE', '') != '': - run_opts += " --execmode " + env['CM_MLPERF_EXECUTION_MODE'] + if env.get('MLC_MLPERF_EXECMODE', '') != '': + run_opts += " --execmode " + env['MLC_MLPERF_EXECUTION_MODE'] - if env.get('CM_MLPERF_LOADGEN_SAMPLES', '') != '': - run_opts += " --samples " + env['CM_MLPERF_LOADGEN_SAMPLES'] + if env.get('MLC_MLPERF_LOADGEN_SAMPLES', '') != '': + run_opts += " --samples " + env['MLC_MLPERF_LOADGEN_SAMPLES'] - if env.get('CM_MLPERF_LOADGEN_EXPECTED_QPS', '') != '': + if env.get('MLC_MLPERF_LOADGEN_EXPECTED_QPS', '') != '': run_opts += " --loadgen_expected_qps " + \ - env['CM_MLPERF_LOADGEN_EXPECTED_QPS'] + env['MLC_MLPERF_LOADGEN_EXPECTED_QPS'] - if env.get('CM_MLPERF_LOADGEN_DURATION_SEC', '') != '': + if env.get('MLC_MLPERF_LOADGEN_DURATION_SEC', '') != '': run_opts += " --loadgen_duration_sec " + \ - env['CM_MLPERF_LOADGEN_DURATION_SEC'] + env['MLC_MLPERF_LOADGEN_DURATION_SEC'] - if env.get('CM_MLPERF_OUTPUT_DIR', '') != '': - run_opts += " --output " + env['CM_MLPERF_OUTPUT_DIR'] + if env.get('MLC_MLPERF_OUTPUT_DIR', '') != '': + run_opts += " --output " + env['MLC_MLPERF_OUTPUT_DIR'] - if env.get('CM_ML_MODEL_CODE_WITH_PATH', '') != '': - run_opts += " --model_code " + env['CM_ML_MODEL_CODE_WITH_PATH'] + if env.get('MLC_ML_MODEL_CODE_WITH_PATH', '') != '': + run_opts += " --model_code " + env['MLC_ML_MODEL_CODE_WITH_PATH'] - if env.get('CM_ML_MODEL_CFG_WITH_PATH', '') != '': - run_opts += " --model_cfg " + env['CM_ML_MODEL_CFG_WITH_PATH'] + if env.get('MLC_ML_MODEL_CFG_WITH_PATH', '') != '': + run_opts += " --model_cfg " + env['MLC_ML_MODEL_CFG_WITH_PATH'] else: # Check cfg from command line - cfg = env.get('CM_ML_MODEL_CFG', {}) + cfg = env.get('MLC_ML_MODEL_CFG', {}) if len(cfg) > 0: - del (env['CM_ML_MODEL_CFG']) + del (env['MLC_ML_MODEL_CFG']) import json import tempfile tfile = tempfile.NamedTemporaryFile(mode="w+", suffix='.json') - fd, tfile = tempfile.mkstemp(suffix='.json', prefix='cm-cfg-') + fd, tfile = tempfile.mkstemp(suffix='.json', prefix='mlc-cfg-') os.close(fd) with open(tfile, 'w') as fd: json.dump(cfg, fd) - env['CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE'] = tfile + env['MLC_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE'] = tfile run_opts += " --model_cfg " + tfile - if env.get('CM_ML_MODEL_SAMPLE_WITH_PATH', '') != '': + if env.get('MLC_ML_MODEL_SAMPLE_WITH_PATH', '') != '': run_opts += " --model_sample_pickle " + \ - env['CM_ML_MODEL_SAMPLE_WITH_PATH'] + env['MLC_ML_MODEL_SAMPLE_WITH_PATH'] # Add path to file model weights at the end of command line - run_opts += ' ' + env['CM_ML_MODEL_FILE_WITH_PATH'] + run_opts += ' ' + env['MLC_ML_MODEL_FILE_WITH_PATH'] - env['CM_RUN_OPTS'] = run_opts + env['MLC_RUN_OPTS'] = run_opts print('') print('Assembled flags: {}'.format(run_opts)) @@ -98,7 +98,7 @@ def postprocess(i): env = i['env'] - tfile = env.get('CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE', '') + tfile = env.get('MLC_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE', '') if tfile != '' and os.path.isfile(tfile): os.remove(tfile) diff --git a/script/app-loadgen-generic-python/meta.yaml b/script/app-loadgen-generic-python/meta.yaml index 3e5fe56e1..ba5a3c616 100644 --- a/script/app-loadgen-generic-python/meta.yaml +++ b/script/app-loadgen-generic-python/meta.yaml @@ -23,31 +23,31 @@ tags_help: "python app generic loadgen" # Default environment default_env: - CM_MLPERF_EXECUTION_MODE: parallel - CM_MLPERF_BACKEND: onnxruntime + MLC_MLPERF_EXECUTION_MODE: parallel + MLC_MLPERF_BACKEND: onnxruntime # Map script inputs to environment variables input_mapping: - modelpath: CM_ML_MODEL_FILE_WITH_PATH - modelcodepath: CM_ML_MODEL_CODE_WITH_PATH - modelcfgpath: CM_ML_MODEL_CFG_WITH_PATH - modelcfg: CM_ML_MODEL_CFG - modelsamplepath: CM_ML_MODEL_SAMPLE_WITH_PATH - output_dir: CM_MLPERF_OUTPUT_DIR - scenario: CM_MLPERF_LOADGEN_SCENARIO - runner: CM_MLPERF_RUNNER - concurrency: CM_MLPERF_CONCURRENCY - ep: CM_MLPERF_EXECUTION_PROVIDER - intraop: CM_MLPERF_INTRAOP - interop: CM_MLPERF_INTEROP - execmode: CM_MLPERF_EXEC_MODE - samples: CM_MLPERF_LOADGEN_SAMPLES - loadgen_expected_qps: CM_MLPERF_LOADGEN_EXPECTED_QPS - loadgen_duration_sec: CM_MLPERF_LOADGEN_DURATION_SEC + modelpath: MLC_ML_MODEL_FILE_WITH_PATH + modelcodepath: MLC_ML_MODEL_CODE_WITH_PATH + modelcfgpath: MLC_ML_MODEL_CFG_WITH_PATH + modelcfg: MLC_ML_MODEL_CFG + modelsamplepath: MLC_ML_MODEL_SAMPLE_WITH_PATH + output_dir: MLC_MLPERF_OUTPUT_DIR + scenario: MLC_MLPERF_LOADGEN_SCENARIO + runner: MLC_MLPERF_RUNNER + concurrency: MLC_MLPERF_CONCURRENCY + ep: MLC_MLPERF_EXECUTION_PROVIDER + intraop: MLC_MLPERF_INTRAOP + interop: MLC_MLPERF_INTEROP + execmode: MLC_MLPERF_EXEC_MODE + samples: MLC_MLPERF_LOADGEN_SAMPLES + loadgen_expected_qps: MLC_MLPERF_LOADGEN_EXPECTED_QPS + loadgen_duration_sec: MLC_MLPERF_LOADGEN_DURATION_SEC # New env keys exported from this script new_env_keys: - - CM_MLPERF_* + - MLC_MLPERF_* # Dependencies on other CM scripts @@ -73,7 +73,7 @@ deps: # Detect CUDA if required - tags: get,cuda enable_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu # Install loadgen @@ -85,25 +85,25 @@ deps: # Install ML engines via CM # ONNX - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cpu tags: get,generic-python-lib,_onnxruntime names: - onnxruntime - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu tags: get,generic-python-lib,_onnxruntime_gpu names: - onnxruntime - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime tags: get,generic-python-lib,_onnx names: @@ -116,18 +116,18 @@ deps: # CPU - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cpu tags: get,generic-python-lib,_torch names: - torch - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cpu tags: get,generic-python-lib,_torchvision names: @@ -136,18 +136,18 @@ deps: # CUDA/GPU - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu tags: get,generic-python-lib,_torch_cuda names: - torch - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu tags: get,generic-python-lib,_torchvision_cuda names: @@ -158,17 +158,17 @@ deps: ######################################################################## # Install MLPerf models - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 tags: get,ml-model,resnet50,_onnx - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet tags: get,ml-model,retinanet,_onnx,_fp32 - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet tags: get,ml-model,retinanet,_onnx,_fp32 @@ -181,14 +181,14 @@ variations: pytorch: group: backend env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: pytorch onnxruntime: group: backend default: true env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: onnxruntime @@ -199,9 +199,9 @@ variations: default: true env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: cpu - CM_MLPERF_EXECUTION_PROVIDER: + MLC_MLPERF_EXECUTION_PROVIDER: CPUExecutionProvider cuda: @@ -211,9 +211,9 @@ variations: group: device env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: gpu - CM_MLPERF_EXECUTION_PROVIDER: + MLC_MLPERF_EXECUTION_PROVIDER: CUDAExecutionProvider @@ -222,25 +222,25 @@ variations: group: models env: - CM_MODEL: retinanet + MLC_MODEL: retinanet resnet50: group: models env: - CM_MODEL: resnet50 + MLC_MODEL: resnet50 custom: group: models env: - CM_MODEL: custom + MLC_MODEL: custom huggingface: env: - CM_CUSTOM_MODEL_SOURCE: huggingface + MLC_CUSTOM_MODEL_SOURCE: huggingface custom,huggingface: deps: @@ -249,16 +249,16 @@ variations: - hf-downloader update_tags_from_env_with_prefix: "_model-stub.": - - CM_ML_MODEL_STUB + - MLC_ML_MODEL_STUB model-stub.#: env: - CM_ML_MODEL_STUB: "#" + MLC_ML_MODEL_STUB: "#" cmc: env: - CM_CUSTOM_MODEL_CMC: yes + MLC_CUSTOM_MODEL_CMC: yes custom,cmc: @@ -303,15 +303,15 @@ docker: input_paths: - modelpath - modelsamplepath - - env.CM_ML_MODEL_FILE_WITH_PATH - - env.CM_ML_MODEL_CODE_WITH_PATH + - env.MLC_ML_MODEL_FILE_WITH_PATH + - env.MLC_ML_MODEL_CODE_WITH_PATH - output_dir - repro_dir skip_input_for_fake_run: - modelpath - modelsamplepath - - env.CM_ML_MODEL_FILE_WITH_PATH - - env.CM_ML_MODEL_CODE_WITH_PATH + - env.MLC_ML_MODEL_FILE_WITH_PATH + - env.MLC_ML_MODEL_CODE_WITH_PATH - output_dir - scenario - runner diff --git a/script/app-loadgen-generic-python/run.bat b/script/app-loadgen-generic-python/run.bat index 3d4b5d58b..921853c60 100644 --- a/script/app-loadgen-generic-python/run.bat +++ b/script/app-loadgen-generic-python/run.bat @@ -1,4 +1,4 @@ rem native script -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\main.py %CM_RUN_OPTS% +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\src\main.py %MLC_RUN_OPTS% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/app-loadgen-generic-python/run.sh b/script/app-loadgen-generic-python/run.sh index 2a13312f0..843ecb749 100644 --- a/script/app-loadgen-generic-python/run.sh +++ b/script/app-loadgen-generic-python/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/main.py ${CM_RUN_OPTS} +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/src/main.py ${MLC_RUN_OPTS} test $? -eq 0 || exit 1 diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat b/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat deleted file mode 100644 index c7154832f..000000000 --- a/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat +++ /dev/null @@ -1,7 +0,0 @@ -rem set CM_CACHE=--no-cache - -set CM_DOCKER_ORG=modularcm -set CM_DOCKER_NAME=loadgen-generic-python -set CM_OS_NAME=ubuntu -set CM_HW_TARGET=cpu -set CM_OS_VERSION=22.04 diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh b/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh deleted file mode 100644 index 5f49d3be9..000000000 --- a/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh +++ /dev/null @@ -1,10 +0,0 @@ -#! /bin/bash - -#export CM_CACHE="--no-cache" - -export CM_DOCKER_ORG=modularcm -export CM_DOCKER_NAME="loadgen-generic-python" -export CM_OS_NAME="ubuntu" -export CM_HW_TARGET="cpu" -export CM_OS_VERSION="22.04" - diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat b/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat deleted file mode 100644 index f51ea46b6..000000000 --- a/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat +++ /dev/null @@ -1,16 +0,0 @@ -call _common.bat - -docker build -f %CM_DOCKER_NAME%--%CM_OS_NAME%-%CM_HW_TARGET%.Dockerfile ^ - -t %CM_DOCKER_ORG%/%CM_DOCKER_NAME%-%CM_HW_TARGET%:%CM_OS_NAME%-%CM_OS_VERSION% ^ - --build-arg cm_os_name=%CM_OS_NAME% ^ - --build-arg cm_hw_target=%CM_HW_TARGET% ^ - --build-arg cm_os_version=%CM_OS_VERSION% ^ - --build-arg cm_version="" ^ - --build-arg cm_automation_repo="ctuning@mlcommons-ck" ^ - --build-arg cm_automation_checkout="" ^ - --build-arg cm_python_version="3.10.8" ^ - --build-arg cm_mlperf_inference_loadgen_version="" ^ - --build-arg cm_mlperf_inference_src_tags="" ^ - --build-arg cm_mlperf_inference_src_version="" ^ - --build-arg CM_ONNXRUNTIME_VERSION="1.13.1" ^ - %CM_CACHE% . diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh b/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh deleted file mode 100644 index 186a0eae9..000000000 --- a/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh +++ /dev/null @@ -1,18 +0,0 @@ -#! /bin/bash - -. ./_common.sh - -time docker build -f ${CM_DOCKER_NAME}--${CM_OS_NAME}-${CM_HW_TARGET}.Dockerfile \ - -t ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}-${CM_HW_TARGET}:${CM_OS_NAME}-${CM_OS_VERSION} \ - --build-arg cm_os_name=${CM_OS_NAME} \ - --build-arg cm_hw_target=${CM_HW_TARGET} \ - --build-arg cm_os_version=${CM_OS_VERSION} \ - --build-arg cm_version="" \ - --build-arg cm_automation_repo="ctuning@mlcommons-ck" \ - --build-arg cm_automation_checkout="" \ - --build-arg cm_python_version="3.10.8" \ - --build-arg cm_mlperf_inference_loadgen_version="" \ - --build-arg cm_mlperf_inference_src_tags="" \ - --build-arg cm_mlperf_inference_src_version="" \ - --build-arg CM_ONNXRUNTIME_VERSION="1.13.1" \ - ${CM_CACHE} . diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile b/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile deleted file mode 100644 index c82296c66..000000000 --- a/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile +++ /dev/null @@ -1,96 +0,0 @@ -# Modular MLPerf container with the MLCommons CM automation meta-framework - -# Preparing OS -ARG cm_os_name="ubuntu" -ARG cm_os_version="22.04" - -FROM ${cm_os_name}:${cm_os_version} - -# Maintained by the MLCommons taskforce on automation and reproducibility and OctoML -LABEL github="https://github.com/mlcommons/ck" -LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" - -# Customization -ARG CM_GH_TOKEN - -# Prepare shell and entry point -SHELL ["/bin/bash", "-c"] -ENTRYPOINT ["/bin/bash", "-c"] - -# Install system dependencies -# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes -RUN apt-get update -y -RUN apt-get install -y lsb-release -RUN apt-get install -y python3 python3-pip git wget sudo - -# Extra python deps -RUN python3 -m pip install requests - -# CM version -ARG cm_version="" -ENV CM_VERSION="${cm_version}" -RUN if [ "${CM_VERSION}" != "" ] ; then \ - python3 -m pip install cmind==${CM_VERSION} ; \ - else \ - python3 -m pip install cmind ; \ - fi - -# Setup docker environment -ENTRYPOINT ["/bin/bash", "-c"] -ENV TZ=US/Pacific -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone - -# Setup docker user -# See example in https://github.com/mlcommons/GaNDLF/blob/master/Dockerfile-CPU -RUN groupadd --gid 10001 cm -RUN useradd --uid 10000 -g cm --create-home --shell /bin/bash cmuser -RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers - -USER cmuser:cm -WORKDIR /home/cmuser - -# Check CM installation -RUN lsb_release -a > sys-version-os.log -RUN uname -a > sys-version-kernel.log -RUN python3 --version > sys-version-python3.log -RUN cm version > sys-version-cm.log - -################################################################################ -# Get CM automation repository -ARG cm_automation_repo="mlcommons@ck" -ARG cm_automation_repo_checkout="" -ENV CM_AUTOMATION_REPO=${cm_automation_repo} -ENV CM_AUTOMATION_REPO_CHECKOUT=${cm_automation_repo_checkout} -RUN echo ${CM_AUTOMATION_REPO} -RUN cm pull repo ${CM_AUTOMATION_REPO} --checkout=${CM_AUTOMATION_REPO_CHECKOUT} - -################################################################################ -# Install CM system dependencies -RUN cm run script "get sys-utils-cm" --quiet - -# Detect/install python -ARG cm_python_version="" -RUN cm run script "get python3" --version=${cm_python_version} - -################################################################################ -# Build MLPerf loadgen -ARG cm_mlperf_inference_loadgen_version="" -RUN cm run script "get mlperf loadgen" --adr.compiler.tags=gcc --version=${cm_mlperf_inference_loadgen_version} --adr.inference-src-loadgen.version=${cm_mlperf_inference_loadgen_version} -v - -################################################################################ -# Install ONNX runtime -ARG CM_ONNXRUNTIME_VERSION="" -RUN cm run script "get generic-python-lib _onnxruntime" --version=${CM_ONNXRUNTIME_VERSION} - -ARG CM_MLPERF_CHOICE_BACKEND="onnxruntime" -ARG CM_MLPERF_CHOICE_DEVICE="cpu" - -RUN cm run script --tags=python,app,loadgen-generic,_onnxruntime,_resnet50 \ - --adr.compiler.tags=gcc \ - --adr.python.version_min=3.8 \ - --quiet \ - --fake_run - -################################################################################ -# CMD entry point -CMD /bin/bash diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile b/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile deleted file mode 100644 index 195acdec6..000000000 --- a/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -FROM ubuntu:20.04 -SHELL ["/bin/bash", "-c"] -ARG CM_GH_TOKEN - -# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes -# Install system dependencies -RUN apt-get update -y -RUN apt-get install -y python3 python3-pip git sudo wget - -# Install python packages -RUN python3 -m pip install cmind requests - -# Setup docker environment -ENTRYPOINT ["/bin/bash", "-c"] -ENV TZ=US/Pacific -ENV PATH=${PATH}:$HOME/.local/bin -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone - -# Setup docker user -RUN groupadd cm -RUN useradd -g cm --create-home --shell /bin/bash cmuser -RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers -USER cmuser:cm -WORKDIR /home/cmuser - -# Download CM repo for scripts -RUN cm pull repo ctuning@mlcommons-ck - -# Install all system dependencies -RUN cm run script --quiet --tags=get,sys-utils-cm - -# Run commands -RUN cm run script --quiet --tags=python,app,loadgen-generic,_onnxruntime,_resnet50 --fake_run diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat b/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat deleted file mode 100644 index 171aeecab..000000000 --- a/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat +++ /dev/null @@ -1,3 +0,0 @@ -call _common.bat - -docker run -it %CM_DOCKER_ORG%/%CM_DOCKER_NAME%-%CM_HW_TARGET%:%CM_OS_NAME%-%CM_OS_VERSION% diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh b/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh deleted file mode 100644 index c82d4b7b1..000000000 --- a/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh +++ /dev/null @@ -1,3 +0,0 @@ -. ./_common.sh - -docker run -it ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}-%CM_HW_TARGET%:${CM_OS_NAME}-${CM_OS_VERSION} diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 70ae33af2..bfce993cd 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -12,147 +12,147 @@ def preprocess(i): state = i['state'] script_path = i['run_script_input']['path'] - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes": return {'return': 0} - if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes": return {'return': 0} - if env.get('CM_MLPERF_POWER', '') == "yes": + if env.get('MLC_MLPERF_POWER', '') == "yes": power = "yes" else: power = "no" - rerun = True if env.get("CM_RERUN", "") != '' else False + rerun = True if env.get("MLC_RERUN", "") != '' else False - if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: - env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" + if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env: + env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline" - if 'CM_MLPERF_LOADGEN_MODE' not in env: - env['CM_MLPERF_LOADGEN_MODE'] = "accuracy" + if 'MLC_MLPERF_LOADGEN_MODE' not in env: + env['MLC_MLPERF_LOADGEN_MODE'] = "accuracy" - if 'CM_MODEL' not in env: + if 'MLC_MODEL' not in env: return { 'return': 1, 'error': "Please select a variation specifying the model to run"} - # if env['CM_MODEL'] == "resnet50": - # cmd = "cp " + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['CM_DATASET_PATH'], + # if env['MLC_MODEL'] == "resnet50": + # cmd = "cp " + os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['MLC_DATASET_PATH'], # "val_map.txt") # ret = os.system(cmd) - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \ - env.get('CM_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \ + env.get('MLC_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " - if 'CM_MLPERF_LOADGEN_QPS' not in env: - env['CM_MLPERF_LOADGEN_QPS_OPT'] = "" + if 'MLC_MLPERF_LOADGEN_QPS' not in env: + env['MLC_MLPERF_LOADGEN_QPS_OPT'] = "" else: - env['CM_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \ - env['CM_MLPERF_LOADGEN_QPS'] + env['MLC_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \ + env['MLC_MLPERF_LOADGEN_QPS'] - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['CM_MLPERF_LOADGEN_QPS_OPT'] + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] - if 'CM_NUM_THREADS' not in env: - if 'CM_MINIMIZE_THREADS' in env: - env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // - (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + if 'MLC_NUM_THREADS' not in env: + if 'MLC_MINIMIZE_THREADS' in env: + env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // + (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) else: - env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') - if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and not env.get( - 'CM_MLPERF_MODEL_SKIP_BATCHING', False): - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \ - str(env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE']) + if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and not env.get( + 'MLC_MLPERF_MODEL_SKIP_BATCHING', False): + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \ + str(env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE']) - if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE', '') != '': - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \ - str(env['CM_MLPERF_LOADGEN_BATCH_SIZE']) + if env.get('MLC_MLPERF_LOADGEN_BATCH_SIZE', '') != '': + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \ + str(env['MLC_MLPERF_LOADGEN_BATCH_SIZE']) - if env.get('CM_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get( - 'CM_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and env.get('CM_MLPERF_RUN_STYLE', '') != "valid": - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \ - env['CM_MLPERF_LOADGEN_QUERY_COUNT'] + if env.get('MLC_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get( + 'MLC_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and env.get('MLC_MLPERF_RUN_STYLE', '') != "valid": + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \ + env['MLC_MLPERF_LOADGEN_QUERY_COUNT'] print("Using MLCommons Inference source from '" + - env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") - if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'MLC_MLPERF_CONF' not in env: + env['MLC_MLPERF_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") x = "" if os_info['platform'] == 'windows' else "'" - if "llama2-70b" in env['CM_MODEL']: - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \ - x + env['CM_MLPERF_CONF'] + x + if "llama2-70b" in env['MLC_MODEL']: + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \ + x + env['MLC_MLPERF_CONF'] + x else: - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \ - x + env['CM_MLPERF_CONF'] + x + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \ + x + env['MLC_MLPERF_CONF'] + x - env['MODEL_DIR'] = env.get('CM_ML_MODEL_PATH') + env['MODEL_DIR'] = env.get('MLC_ML_MODEL_PATH') if not env['MODEL_DIR']: env['MODEL_DIR'] = os.path.dirname( env.get( - 'CM_MLPERF_CUSTOM_MODEL_PATH', - env.get('CM_ML_MODEL_FILE_WITH_PATH'))) + 'MLC_MLPERF_CUSTOM_MODEL_PATH', + env.get('MLC_ML_MODEL_FILE_WITH_PATH'))) RUN_CMD = "" - scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] scenario_extra_options = '' - NUM_THREADS = env['CM_NUM_THREADS'] - if int(NUM_THREADS) > 2 and env['CM_MLPERF_DEVICE'] == "gpu": + NUM_THREADS = env['MLC_NUM_THREADS'] + if int(NUM_THREADS) > 2 and env['MLC_MLPERF_DEVICE'] == "gpu": NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU - if env['CM_MODEL'] in ['resnet50', 'retinanet', 'stable-diffusion-xl']: + if env['MLC_MODEL'] in ['resnet50', 'retinanet', 'stable-diffusion-xl']: scenario_extra_options += " --threads " + NUM_THREADS - ml_model_name = env['CM_MODEL'] - if 'CM_MLPERF_USER_CONF' in env: - user_conf_path = env['CM_MLPERF_USER_CONF'] + ml_model_name = env['MLC_MODEL'] + if 'MLC_MLPERF_USER_CONF' in env: + user_conf_path = env['MLC_MLPERF_USER_CONF'] x = "" if os_info['platform'] == 'windows' else "'" scenario_extra_options += " --user_conf " + x + user_conf_path + x - mode = env['CM_MLPERF_LOADGEN_MODE'] + mode = env['MLC_MLPERF_LOADGEN_MODE'] mode_extra_options = "" # Grigori blocked for ABTF to preprocess data set on the fly for now # we can later move it to a separate script to preprocess data set -# if 'CM_DATASET_PREPROCESSED_PATH' in env and env['CM_MODEL'] in [ 'resnet50', 'retinanet' ]: -# #dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['CM_DATASET_PREPROCESSED_PATH'] -# if env.get('CM_MLPERF_LAST_RELEASE') not in [ "v2.0", "v2.1" ]: -# dataset_options = " --use_preprocessed_dataset --cache_dir "+env['CM_DATASET_PREPROCESSED_PATH'] +# if 'MLC_DATASET_PREPROCESSED_PATH' in env and env['MLC_MODEL'] in [ 'resnet50', 'retinanet' ]: +# #dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['MLC_DATASET_PREPROCESSED_PATH'] +# if env.get('MLC_MLPERF_LAST_RELEASE') not in [ "v2.0", "v2.1" ]: +# dataset_options = " --use_preprocessed_dataset --cache_dir "+env['MLC_DATASET_PREPROCESSED_PATH'] # else: # dataset_options = "" -# if env['CM_MODEL'] == "retinanet": -# dataset_options += " --dataset-list "+ env['CM_DATASET_ANNOTATIONS_FILE_PATH'] -# elif env['CM_MODEL'] == "resnet50": -# dataset_options += " --dataset-list "+ os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") -# env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH') +# if env['MLC_MODEL'] == "retinanet": +# dataset_options += " --dataset-list "+ env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] +# elif env['MLC_MODEL'] == "resnet50": +# dataset_options += " --dataset-list "+ os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") +# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') # else: -# if 'CM_DATASET_PREPROCESSED_PATH' in env: -# env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH') +# if 'MLC_DATASET_PREPROCESSED_PATH' in env: +# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') # else: -# env['DATA_DIR'] = env.get('CM_DATASET_PATH') +# env['DATA_DIR'] = env.get('MLC_DATASET_PATH') # dataset_options = '' # Grigori added for ABTF -# dataset_path = env.get('CM_DATASET_PATH') +# dataset_path = env.get('MLC_DATASET_PATH') # env['DATA_DIR'] = dataset_path -# dataset_options = " --dataset-list " + env['CM_DATASET_ANNOTATIONS_FILE_PATH'] +# dataset_options = " --dataset-list " + env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] # dataset_options += " --cache_dir " + os.path.join(script_path, 'preprocessed-dataset') dataset_options = '' - if env.get('CM_MLPERF_EXTRA_DATASET_ARGS', '') != '': - dataset_options += " " + env['CM_MLPERF_EXTRA_DATASET_ARGS'] + if env.get('MLC_MLPERF_EXTRA_DATASET_ARGS', '') != '': + dataset_options += " " + env['MLC_MLPERF_EXTRA_DATASET_ARGS'] if mode == "accuracy": mode_extra_options += " --accuracy" - env['CM_OUTPUT_PREDICTIONS_PATH'] = os.path.join( - env['CM_DATASET_MLCOMMONS_COGNATA_PATH'], - env['CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS'], + env['MLC_OUTPUT_PREDICTIONS_PATH'] = os.path.join( + env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'], + env['MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS'], 'Cognata_Camera_01_8M_png', 'output') @@ -161,13 +161,13 @@ def preprocess(i): elif mode == "compliance": - audit_full_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH'] + audit_full_path = env['MLC_MLPERF_INFERENCE_AUDIT_PATH'] mode_extra_options = " --audit '" + audit_full_path + "'" - if env.get('CM_MLPERF_OUTPUT_DIR', '') == '': - env['CM_MLPERF_OUTPUT_DIR'] = os.getcwd() + if env.get('MLC_MLPERF_OUTPUT_DIR', '') == '': + env['MLC_MLPERF_OUTPUT_DIR'] = os.getcwd() - mlperf_implementation = env.get('CM_MLPERF_IMPLEMENTATION', 'reference') + mlperf_implementation = env.get('MLC_MLPERF_IMPLEMENTATION', 'reference') # Generate CMD @@ -176,25 +176,25 @@ def preprocess(i): cmd, run_dir = get_run_cmd_reference( os_info, env, scenario_extra_options, mode_extra_options, dataset_options, script_path) - if env.get('CM_NETWORK_LOADGEN', '') == "lon": + if env.get('MLC_NETWORK_LOADGEN', '') == "lon": run_cmd = i['state']['mlperf_inference_run_cmd'] - env['CM_SSH_RUN_COMMANDS'] = [] - env['CM_SSH_RUN_COMMANDS'].append( + env['MLC_SSH_RUN_COMMANDS'] = [] + env['MLC_SSH_RUN_COMMANDS'].append( run_cmd.replace( "--network=lon", "--network=sut") + " &") - env['CM_MLPERF_RUN_CMD'] = cmd - env['CM_RUN_DIR'] = run_dir - env['CM_RUN_CMD'] = cmd - env['CK_PROGRAM_TMP_DIR'] = env.get('CM_ML_MODEL_PATH') # for tvm + env['MLC_MLPERF_RUN_CMD'] = cmd + env['MLC_RUN_DIR'] = run_dir + env['MLC_RUN_CMD'] = cmd + env['CK_PROGRAM_TMP_DIR'] = env.get('MLC_ML_MODEL_PATH') # for tvm - if env.get('CM_HOST_PLATFORM_FLAVOR', '') == "arm64": - env['CM_HOST_PLATFORM_FLAVOR'] = "aarch64" + if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == "arm64": + env['MLC_HOST_PLATFORM_FLAVOR'] = "aarch64" - if not env.get('CM_COGNATA_ACCURACY_DUMP_FILE'): - env['CM_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( + if not env.get('MLC_COGNATA_ACCURACY_DUMP_FILE'): + env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( env['OUTPUT_DIR'], "accuracy.txt") return {'return': 0} @@ -208,33 +208,33 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, ########################################################################## # Grigori added for ABTF demo - if env['CM_MODEL'] in ['retinanet']: + if env['MLC_MODEL'] in ['retinanet']: run_dir = os.path.join(script_path, 'ref') env['RUN_DIR'] = run_dir - env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - cognata_dataset_path = env['CM_DATASET_MLCOMMONS_COGNATA_PATH'] -# cognata_dataset_path = env['CM_DATASET_PATH'] # Using open images + cognata_dataset_path = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] +# cognata_dataset_path = env['MLC_DATASET_PATH'] # Using open images # dataset for some tests path_to_model = env.get( - 'CM_MLPERF_CUSTOM_MODEL_PATH', + 'MLC_MLPERF_CUSTOM_MODEL_PATH', env.get( - 'CM_ML_MODEL_FILE_WITH_PATH', - env.get('CM_ML_MODEL_CODE_WITH_PATH'))) + 'MLC_ML_MODEL_FILE_WITH_PATH', + env.get('MLC_ML_MODEL_CODE_WITH_PATH'))) env['MODEL_FILE'] = path_to_model - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " " + os.path.join(run_dir, "python", "main.py") + " --profile " + env['CM_MODEL'] + "-" + env['CM_MLPERF_BACKEND'] + \ + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " " + os.path.join(run_dir, "python", "main.py") + " --profile " + env['MLC_MODEL'] + "-" + env['MLC_MLPERF_BACKEND'] + \ " --model=" + q + path_to_model + q + \ - " --dataset=" + env["CM_MLPERF_VISION_DATASET_OPTION"] + \ + " --dataset=" + env["MLC_MLPERF_VISION_DATASET_OPTION"] + \ " --dataset-path=" + q + cognata_dataset_path + q + \ " --cache_dir=" + q + os.path.join(script_path, 'tmp-preprocessed-dataset') + q + \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \ + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \ " --output " + q + env['OUTPUT_DIR'] + q + " " + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ scenario_extra_options + mode_extra_options + dataset_options ########################################################################## diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index b22f119d6..e5567ac27 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -17,70 +17,70 @@ tags: # Default environment default_env: - CM_MLPERF_LOADGEN_MODE: accuracy - CM_MLPERF_LOADGEN_SCENARIO: Offline - CM_MLPERF_LOADGEN_BUILD_FROM_SRC: 'on' - CM_OUTPUT_FOLDER_NAME: test_results - CM_MLPERF_RUN_STYLE: test - CM_TEST_QUERY_COUNT: '10' - CM_MLPERF_QUANTIZATION: off - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: '' + MLC_MLPERF_LOADGEN_MODE: accuracy + MLC_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_BUILD_FROM_SRC: 'on' + MLC_OUTPUT_FOLDER_NAME: test_results + MLC_MLPERF_RUN_STYLE: test + MLC_TEST_QUERY_COUNT: '10' + MLC_MLPERF_QUANTIZATION: off + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: '' # Map script inputs to environment variables input_mapping: - device: CM_MLPERF_DEVICE - count: CM_MLPERF_LOADGEN_QUERY_COUNT - docker: CM_RUN_DOCKER_CONTAINER - hw_name: CM_HW_NAME + device: MLC_MLPERF_DEVICE + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + docker: MLC_RUN_DOCKER_CONTAINER + hw_name: MLC_HW_NAME imagenet_path: IMAGENET_PATH - max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - mode: CM_MLPERF_LOADGEN_MODE - num_threads: CM_NUM_THREADS - threads: CM_NUM_THREADS - dataset: CM_MLPERF_VISION_DATASET_OPTION - model: CM_MLPERF_CUSTOM_MODEL_PATH + max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + mode: MLC_MLPERF_LOADGEN_MODE + num_threads: MLC_NUM_THREADS + threads: MLC_NUM_THREADS + dataset: MLC_MLPERF_VISION_DATASET_OPTION + model: MLC_MLPERF_CUSTOM_MODEL_PATH output_dir: OUTPUT_BASE_DIR - power: CM_MLPERF_POWER - power_server: CM_MLPERF_POWER_SERVER_ADDRESS - ntp_server: CM_MLPERF_POWER_NTP_SERVER - max_amps: CM_MLPERF_POWER_MAX_AMPS - max_volts: CM_MLPERF_POWER_MAX_VOLTS - regenerate_files: CM_REGENERATE_MEASURE_FILES - rerun: CM_RERUN - scenario: CM_MLPERF_LOADGEN_SCENARIO - test_query_count: CM_TEST_QUERY_COUNT - clean: CM_MLPERF_CLEAN_SUBMISSION_DIR - dataset_args: CM_MLPERF_EXTRA_DATASET_ARGS - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - output: CM_MLPERF_OUTPUT_DIR + power: MLC_MLPERF_POWER + power_server: MLC_MLPERF_POWER_SERVER_ADDRESS + ntp_server: MLC_MLPERF_POWER_NTP_SERVER + max_amps: MLC_MLPERF_POWER_MAX_AMPS + max_volts: MLC_MLPERF_POWER_MAX_VOLTS + regenerate_files: MLC_REGENERATE_MEASURE_FILES + rerun: MLC_RERUN + scenario: MLC_MLPERF_LOADGEN_SCENARIO + test_query_count: MLC_TEST_QUERY_COUNT + clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR + dataset_args: MLC_MLPERF_EXTRA_DATASET_ARGS + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + output: MLC_MLPERF_OUTPUT_DIR # Duplicate CM environment variables to the ones used in native apps env_key_mappings: - CM_HOST_: HOST_ - CM_ML_: ML_ - CM_MLPERF_TVM: MLPERF_TVM - CM_MLPERF_DELETE: MLPERF_DELETE + MLC_HOST_: HOST_ + MLC_ML_: ML_ + MLC_MLPERF_TVM: MLPERF_TVM + MLC_MLPERF_DELETE: MLPERF_DELETE # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* - - CM_HW_NAME - - CM_COGNATA_ACCURACY_DUMP_FILE - - CM_OUTPUT_PREDICTIONS_PATH - - CM_ML_MODEL_* - - CM_MAX_EXAMPLES + - MLC_MLPERF_* + - MLC_DATASET_* + - MLC_HW_NAME + - MLC_COGNATA_ACCURACY_DUMP_FILE + - MLC_OUTPUT_PREDICTIONS_PATH + - MLC_ML_MODEL_* + - MLC_MAX_EXAMPLES new_state_keys: - mlperf-inference-implementation - - CM_SUT_* + - MLC_SUT_* # Dependencies on other CM scripts deps: @@ -123,10 +123,10 @@ deps: - ml-engine-onnxruntime - onnxruntime enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - tvm-onnx - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cpu - rocm @@ -135,36 +135,36 @@ deps: names: - ml-engine-onnxruntime-cuda enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - tvm-onnx - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu skip_if_env: - CM_MODEL: + MLC_MODEL: - 3d-unet-99 - 3d-unet-99.9 ## resnet50 and 3d-unet need both onnxruntime and onnxruntime_gpu on cuda - tags: get,generic-python-lib,_onnxruntime enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu - CM_MODEL: + MLC_MODEL: - 3d-unet-99 - 3d-unet-99.9 - resnet50 - tags: get,generic-python-lib,_onnxruntime_gpu env: - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: "" + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: "" enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu - CM_MODEL: + MLC_MODEL: - 3d-unet-99 - 3d-unet-99.9 - resnet50 @@ -175,10 +175,10 @@ deps: - ml-engine-pytorch - pytorch enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - tvm-pytorch - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cpu - rocm @@ -188,11 +188,11 @@ deps: - ml-engine-pytorch - pytorch enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - tvm-pytorch - ray - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu ## Torchvision (CPU) @@ -200,10 +200,10 @@ deps: names: - ml-engine-torchvision enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - tvm-pytorch - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cpu ## Torchvision (CUDA) @@ -211,11 +211,11 @@ deps: names: - ml-engine-torchvision enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - tvm-pytorch - ray - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu ## tensorrt @@ -223,7 +223,7 @@ deps: names: - ml-engine-tensorrt enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - ray ## torch_tensorrt @@ -231,7 +231,7 @@ deps: names: - ml-engine-torch_tensorrt enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - ray ## Ray @@ -239,7 +239,7 @@ deps: names: - ray enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - ray @@ -250,7 +250,7 @@ deps: - ml-engine-tensorflow - tensorflow enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tf - tflite @@ -267,7 +267,7 @@ deps: # Install MLPerf loadgen - tags: get,generic-python-lib,_package.mlcommons-loadgen enable_if_env: - CM_MLPERF_LOADGEN_BUILD_FROM_SRC: + MLC_MLPERF_LOADGEN_BUILD_FROM_SRC: - "off" names: - loadgen @@ -275,7 +275,7 @@ deps: - tags: get,loadgen enable_if_any_env: - CM_MLPERF_LOADGEN_BUILD_FROM_SRC: + MLC_MLPERF_LOADGEN_BUILD_FROM_SRC: - "on" names: - loadgen @@ -287,7 +287,7 @@ deps: # # Download MLPerf inference source # - tags: get,mlcommons,inference,src # env: -# CM_GET_MLPERF_IMPLEMENTATION_ONLY: 'yes' +# MLC_GET_MLPERF_IMPLEMENTATION_ONLY: 'yes' # names: # - mlperf-implementation @@ -301,7 +301,7 @@ prehook_deps: - remote-run-cmds tags: remote,run,cmds enable_if_env: - CM_ASSH_RUN_COMMANDS: + MLC_ASSH_RUN_COMMANDS: - "on" @@ -311,7 +311,7 @@ posthook_deps: - mlperf-runner tags: benchmark-mlperf skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - "on" @@ -331,23 +331,23 @@ variations: group: implementation default: true env: - CM_MLPERF_PYTHON: 'yes' - CM_MLPERF_IMPLEMENTATION: reference + MLC_MLPERF_PYTHON: 'yes' + MLC_MLPERF_IMPLEMENTATION: reference # ML engine onnxruntime: group: framework env: - CM_MLPERF_BACKEND: onnxruntime + MLC_MLPERF_BACKEND: onnxruntime onnxruntime,cpu: env: - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND_VERSION: <<>> onnxruntime,cuda: env: - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND_VERSION: <<>> ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "CUDAExecutionProvider" @@ -362,8 +362,8 @@ variations: ml-model: tags: raw,_pytorch env: - CM_MLPERF_BACKEND: pytorch - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: pytorch + MLC_MLPERF_BACKEND_VERSION: <<>> @@ -376,9 +376,9 @@ variations: # - tags: get,generic-python-lib,_pycocotools # # env: -# CM_MODEL: retinanet -# CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: 'yes' -# CM_MLPERF_LOADGEN_MAX_BATCHSIZE: '1' +# MLC_MODEL: retinanet +# MLC_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: 'yes' +# MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: '1' abtf-demo-model: @@ -401,7 +401,7 @@ variations: - ml-model-abtf env: - CM_MODEL: retinanet + MLC_MODEL: retinanet abtf-poc-model: group: models @@ -420,7 +420,7 @@ variations: - cocoeval - tags: get,dataset,raw,mlcommons-cognata,_abtf-poc skip_if_env: - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - 'yes' names: - raw-dataset-mlcommons-cognata @@ -429,14 +429,14 @@ variations: - ml-model-abtf env: - CM_MODEL: retinanet + MLC_MODEL: retinanet # Target devices cpu: group: device default: true env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu CUDA_VISIBLE_DEVICES: '' USE_CUDA: no USE_GPU: no @@ -444,7 +444,7 @@ variations: cuda: group: device env: - CM_MLPERF_DEVICE: gpu + MLC_MLPERF_DEVICE: gpu USE_CUDA: yes USE_GPU: yes @@ -453,17 +453,17 @@ variations: # Loadgen scenarios offline: env: - CM_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_SCENARIO: Offline multistream: env: - CM_MLPERF_LOADGEN_SCENARIO: MultiStream + MLC_MLPERF_LOADGEN_SCENARIO: MultiStream singlestream: env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 server: env: - CM_MLPERF_LOADGEN_SCENARIO: Server + MLC_MLPERF_LOADGEN_SCENARIO: Server mvp_demo: env: diff --git a/script/app-mlperf-automotive-mlcommons-python/ref/python/backend_pytorch_native.py b/script/app-mlperf-automotive-mlcommons-python/ref/python/backend_pytorch_native.py index ec5401979..063cb1ce4 100644 --- a/script/app-mlperf-automotive-mlcommons-python/ref/python/backend_pytorch_native.py +++ b/script/app-mlperf-automotive-mlcommons-python/ref/python/backend_pytorch_native.py @@ -36,15 +36,15 @@ def image_format(self): def load(self, model_path, inputs=None, outputs=None): # From ABTF code - sys.path.insert(0, os.environ['CM_ML_MODEL_CODE_WITH_PATH']) + sys.path.insert(0, os.environ['MLC_ML_MODEL_CODE_WITH_PATH']) from src.transform import SSDTransformer from src.utils import generate_dboxes, Encoder, colors, coco_classes from src.model import SSD, ResNet - abtf_model_config = os.environ.get('CM_ABTF_ML_MODEL_CONFIG', '') + abtf_model_config = os.environ.get('MLC_ABTF_ML_MODEL_CONFIG', '') - num_classes_str = os.environ.get('CM_ABTF_NUM_CLASSES', '').strip() + num_classes_str = os.environ.get('MLC_ABTF_NUM_CLASSES', '').strip() self.num_classes = int( num_classes_str) if num_classes_str != '' else 15 diff --git a/script/app-mlperf-automotive-mlcommons-python/ref/python/cognata.py b/script/app-mlperf-automotive-mlcommons-python/ref/python/cognata.py index 005fa4e2d..752f6dc77 100644 --- a/script/app-mlperf-automotive-mlcommons-python/ref/python/cognata.py +++ b/script/app-mlperf-automotive-mlcommons-python/ref/python/cognata.py @@ -74,13 +74,13 @@ def __init__(self, data_path, image_list, name, use_cache=0, image_size=None, # Grigori added for tests # Check if overridden by extrnal environment for tests x = os.environ.get( - 'CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS', + 'MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS', '').strip() if x != '': folders = x.split(';') if ';' in x else [x] x = os.environ.get( - 'CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES', + 'MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES', '').strip() if x != '': cameras = x.split(';') if ';' in x else [x] @@ -103,7 +103,7 @@ def __init__(self, data_path, image_list, name, use_cache=0, image_size=None, print(' Time: {:.2f} sec.'.format(time.time() - start)) if os.environ.get( - 'CM_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS', '') == 'yes': + 'MLC_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS', '') == 'yes': label_map = cognata_labels.label_map label_info = cognata_labels.label_info diff --git a/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py b/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py index e4462da8c..255554f82 100644 --- a/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py +++ b/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py @@ -378,8 +378,8 @@ def add_results(final_results, name, result_dict, if "mAP" in result_dict: result["mAP"] = 100. * result_dict["mAP"] acc_str += ", mAP={:.3f}%".format(result["mAP"]) - if os.environ.get('CM_COGNATA_ACCURACY_DUMP_FILE', '') != '': - accuracy_file = os.environ['CM_COGNATA_ACCURACY_DUMP_FILE'] + if os.environ.get('MLC_COGNATA_ACCURACY_DUMP_FILE', '') != '': + accuracy_file = os.environ['MLC_COGNATA_ACCURACY_DUMP_FILE'] with open(accuracy_file, "w") as f: f.write("{:.3f}%".format(result["mAP"])) @@ -489,7 +489,7 @@ def main(): count = ds.get_item_count() # warmup - if os.environ.get('CM_ABTF_ML_MODEL_SKIP_WARMUP', + if os.environ.get('MLC_ABTF_ML_MODEL_SKIP_WARMUP', '').strip().lower() != 'yes': ds.load_query_samples([0]) for _ in range(5): diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py index a9ea896d6..4a7600b14 100644 --- a/script/app-mlperf-automotive/customize.py +++ b/script/app-mlperf-automotive/customize.py @@ -39,16 +39,16 @@ def postprocess(i): env['CMD'] = '' - # if env.get('CM_MLPERF_USER_CONF', '') == '': + # if env.get('MLC_MLPERF_USER_CONF', '') == '': # return {'return': 0} - output_dir = env['CM_MLPERF_OUTPUT_DIR'] - mode = env['CM_MLPERF_LOADGEN_MODE'] + output_dir = env['MLC_MLPERF_OUTPUT_DIR'] + mode = env['MLC_MLPERF_LOADGEN_MODE'] - model = env['CM_MODEL'] - model_full_name = env.get('CM_ML_MODEL_FULL_NAME', model) + model = env['MLC_MODEL'] + model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', model) - scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] if not os.path.exists(output_dir) or not os.path.exists( os.path.join(output_dir, "mlperf_log_summary.txt")): @@ -60,12 +60,12 @@ def postprocess(i): result = mlperf_log['result_mean_latency_ns'] / 1000000 elif mode == "accuracy": if not env.get( - 'CM_COGNATA_ACCURACY_DUMP_FILE'): # can happen while reusing old runs - env['CM_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( + 'MLC_COGNATA_ACCURACY_DUMP_FILE'): # can happen while reusing old runs + env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( output_dir, "accuracy.txt") acc = "" - if os.path.exists(env['CM_COGNATA_ACCURACY_DUMP_FILE']): - with open(env['CM_COGNATA_ACCURACY_DUMP_FILE'], "r") as f: + if os.path.exists(env['MLC_COGNATA_ACCURACY_DUMP_FILE']): + with open(env['MLC_COGNATA_ACCURACY_DUMP_FILE'], "r") as f: acc = f.readline() result = acc else: @@ -74,26 +74,26 @@ def postprocess(i): valid = {'performance': True, 'accuracy': True} # its POC power_result = None # No power measurement in POC - # result, valid, power_result = mlperf_utils.get_result_from_log(env['CM_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode) + # result, valid, power_result = mlperf_utils.get_result_from_log(env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode) if not state.get('mlc-mlperf-inference-results'): state['mlc-mlperf-inference-results'] = {} if not state.get('mlc-mlperf-inference-results-last'): state['mlc-mlperf-inference-results-last'] = {} if not state['mlc-mlperf-inference-results'].get( - state['CM_SUT_CONFIG_NAME']): - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']] = {} - if not state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['MLC_SUT_CONFIG_NAME']): + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']] = {} + if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ].get(model): - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model] = {} - if not state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']][model] = {} + if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model].get(scenario): - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model][scenario] = {} - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model][scenario][mode] = result - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model][scenario][mode + '_valid'] = valid.get(mode, False) state['mlc-mlperf-inference-results-last'][mode] = result diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index a999c0ee0..5f39eaac5 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -18,53 +18,53 @@ predeps: no # Default environment default_env: - CM_MLPERF_LOADGEN_MODE: accuracy - CM_MLPERF_LOADGEN_SCENARIO: Offline - CM_OUTPUT_FOLDER_NAME: test_results - CM_MLPERF_RUN_STYLE: test - CM_TEST_QUERY_COUNT: '10' - CM_MLPERF_QUANTIZATION: off - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: '' + MLC_MLPERF_LOADGEN_MODE: accuracy + MLC_MLPERF_LOADGEN_SCENARIO: Offline + MLC_OUTPUT_FOLDER_NAME: test_results + MLC_MLPERF_RUN_STYLE: test + MLC_TEST_QUERY_COUNT: '10' + MLC_MLPERF_QUANTIZATION: off + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: '' # Map script inputs to environment variables input_mapping: - device: CM_MLPERF_DEVICE - count: CM_MLPERF_LOADGEN_QUERY_COUNT - docker: CM_RUN_DOCKER_CONTAINER - hw_name: CM_HW_NAME + device: MLC_MLPERF_DEVICE + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + docker: MLC_RUN_DOCKER_CONTAINER + hw_name: MLC_HW_NAME imagenet_path: IMAGENET_PATH - max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - mode: CM_MLPERF_LOADGEN_MODE - num_threads: CM_NUM_THREADS - threads: CM_NUM_THREADS - dataset: CM_MLPERF_VISION_DATASET_OPTION - model: CM_MLPERF_CUSTOM_MODEL_PATH + max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + mode: MLC_MLPERF_LOADGEN_MODE + num_threads: MLC_NUM_THREADS + threads: MLC_NUM_THREADS + dataset: MLC_MLPERF_VISION_DATASET_OPTION + model: MLC_MLPERF_CUSTOM_MODEL_PATH output_dir: OUTPUT_BASE_DIR - power: CM_MLPERF_POWER - power_server: CM_MLPERF_POWER_SERVER_ADDRESS - ntp_server: CM_MLPERF_POWER_NTP_SERVER - max_amps: CM_MLPERF_POWER_MAX_AMPS - max_volts: CM_MLPERF_POWER_MAX_VOLTS - regenerate_files: CM_REGENERATE_MEASURE_FILES - rerun: CM_RERUN - scenario: CM_MLPERF_LOADGEN_SCENARIO - test_query_count: CM_TEST_QUERY_COUNT - clean: CM_MLPERF_CLEAN_SUBMISSION_DIR - dataset_args: CM_MLPERF_EXTRA_DATASET_ARGS - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - output: CM_MLPERF_OUTPUT_DIR + power: MLC_MLPERF_POWER + power_server: MLC_MLPERF_POWER_SERVER_ADDRESS + ntp_server: MLC_MLPERF_POWER_NTP_SERVER + max_amps: MLC_MLPERF_POWER_MAX_AMPS + max_volts: MLC_MLPERF_POWER_MAX_VOLTS + regenerate_files: MLC_REGENERATE_MEASURE_FILES + rerun: MLC_RERUN + scenario: MLC_MLPERF_LOADGEN_SCENARIO + test_query_count: MLC_TEST_QUERY_COUNT + clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR + dataset_args: MLC_MLPERF_EXTRA_DATASET_ARGS + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + output: MLC_MLPERF_OUTPUT_DIR # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* - - CM_OUTPUT_PREDICTIONS_PATH + - MLC_MLPERF_* + - MLC_OUTPUT_PREDICTIONS_PATH new_state_keys: - mlc-mlperf-inference-results* @@ -103,7 +103,7 @@ docker: deps: - tags: get,abtf,scratch,space mounts: - - "${{ CM_ABTF_SCRATCH_PATH_DATASETS }}:${{ CM_ABTF_SCRATCH_PATH_DATASETS }}" + - "${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}:${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}" # Variations to customize dependencies @@ -114,15 +114,15 @@ variations: group: implementation default: true env: - CM_MLPERF_PYTHON: 'yes' - CM_MLPERF_IMPLEMENTATION: reference + MLC_MLPERF_PYTHON: 'yes' + MLC_MLPERF_IMPLEMENTATION: reference prehook_deps: - names: - python-reference-abtf-inference - abtf-inference-implementation tags: run-mlperf-inference,demo,abtf-model skip_if_env: - CM_SKIP_RUN: + MLC_SKIP_RUN: - yes @@ -130,29 +130,29 @@ variations: fast: group: execution-mode env: - CM_FAST_FACTOR: '5' - CM_OUTPUT_FOLDER_NAME: fast_results - CM_MLPERF_RUN_STYLE: fast + MLC_FAST_FACTOR: '5' + MLC_OUTPUT_FOLDER_NAME: fast_results + MLC_MLPERF_RUN_STYLE: fast test: group: execution-mode default: true env: - CM_OUTPUT_FOLDER_NAME: test_results - CM_MLPERF_RUN_STYLE: test + MLC_OUTPUT_FOLDER_NAME: test_results + MLC_MLPERF_RUN_STYLE: test valid: group: execution-mode env: - CM_OUTPUT_FOLDER_NAME: valid_results - CM_MLPERF_RUN_STYLE: valid + MLC_OUTPUT_FOLDER_NAME: valid_results + MLC_MLPERF_RUN_STYLE: valid # ML engine onnxruntime: group: framework env: - CM_MLPERF_BACKEND: onnxruntime + MLC_MLPERF_BACKEND: onnxruntime add_deps_recursive: abtf-inference-implementation: tags: _onnxruntime @@ -160,11 +160,11 @@ variations: onnxruntime,cpu: env: - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND_VERSION: <<>> onnxruntime,cuda: env: - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND_VERSION: <<>> ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "CUDAExecutionProvider" @@ -172,8 +172,8 @@ variations: group: framework default: true env: - CM_MLPERF_BACKEND: pytorch - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: pytorch + MLC_MLPERF_BACKEND_VERSION: <<>> add_deps_recursive: abtf-inference-implementation: tags: _pytorch @@ -181,7 +181,7 @@ variations: abtf-demo-model: env: - CM_MODEL: retinanet + MLC_MODEL: retinanet group: models add_deps_recursive: abtf-inference-implementation: @@ -189,7 +189,7 @@ variations: abtf-poc-model: env: - CM_MODEL: retinanet + MLC_MODEL: retinanet default: true group: models add_deps_recursive: @@ -201,11 +201,11 @@ variations: names: - raw-dataset-mlcommons-cognata enable_if_env: - CM_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_IN_HOST: + MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_IN_HOST: - yes mounts: - - "${{ CM_DATASET_MLCOMMONS_COGNATA_PATH }}:${{ CM_DATASET_MLCOMMONS_COGNATA_PATH }}" + - "${{ MLC_DATASET_MLCOMMONS_COGNATA_PATH }}:${{ MLC_DATASET_MLCOMMONS_COGNATA_PATH }}" # Target devices @@ -213,7 +213,7 @@ variations: group: device default: true env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu CUDA_VISIBLE_DEVICES: '' USE_CUDA: no USE_GPU: no @@ -224,7 +224,7 @@ variations: cuda: group: device env: - CM_MLPERF_DEVICE: gpu + MLC_MLPERF_DEVICE: gpu USE_CUDA: yes USE_GPU: yes add_deps_recursive: @@ -239,13 +239,13 @@ variations: # Loadgen scenarios offline: env: - CM_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_SCENARIO: Offline add_deps_recursive: abtf-inference-implementation: tags: _offline multistream: env: - CM_MLPERF_LOADGEN_SCENARIO: MultiStream + MLC_MLPERF_LOADGEN_SCENARIO: MultiStream add_deps_recursive: abtf-inference-implementation: tags: _multistream @@ -253,35 +253,35 @@ variations: group: loadgen-scenario default: true env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream add_deps_recursive: abtf-inference-implementation: tags: _singlestream server: env: - CM_MLPERF_LOADGEN_SCENARIO: Server + MLC_MLPERF_LOADGEN_SCENARIO: Server add_deps_recursive: abtf-inference-implementation: tags: _server mvp-demo: env: - CM_ABTF_MVP_DEMO: yes - CM_MLPERF_VISION_DATASET_OPTION: cognata-8mp-pt - CM_ABTF_ML_MODEL_CONFIG: baseline_8MP_ss_scales_all - CM_ABTF_NUM_CLASSES: 15 - CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: 10002_Urban_Clear_Morning - CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: Cognata_Camera_01_8M - CM_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS: 'yes' - CM_ABTF_ML_MODEL_SKIP_WARMUP: 'yes' + MLC_ABTF_MVP_DEMO: yes + MLC_MLPERF_VISION_DATASET_OPTION: cognata-8mp-pt + MLC_ABTF_ML_MODEL_CONFIG: baseline_8MP_ss_scales_all + MLC_ABTF_NUM_CLASSES: 15 + MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: 10002_Urban_Clear_Morning + MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: Cognata_Camera_01_8M + MLC_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS: 'yes' + MLC_ABTF_ML_MODEL_SKIP_WARMUP: 'yes' poc-demo: env: - CM_ABTF_POC_DEMO: yes - CM_MLPERF_VISION_DATASET_OPTION: cognata-8mp-pt - CM_ABTF_ML_MODEL_CONFIG: baseline_8MP_ss_scales_fm1_5x5_all - CM_ABTF_NUM_CLASSES: 15 - CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: 10002_Urban_Clear_Morning - CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: Cognata_Camera_01_8M - CM_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS: 'yes' - CM_ABTF_ML_MODEL_SKIP_WARMUP: 'yes' + MLC_ABTF_POC_DEMO: yes + MLC_MLPERF_VISION_DATASET_OPTION: cognata-8mp-pt + MLC_ABTF_ML_MODEL_CONFIG: baseline_8MP_ss_scales_fm1_5x5_all + MLC_ABTF_NUM_CLASSES: 15 + MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: 10002_Urban_Clear_Morning + MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: Cognata_Camera_01_8M + MLC_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS: 'yes' + MLC_ABTF_ML_MODEL_SKIP_WARMUP: 'yes' diff --git a/script/app-mlperf-inference-amd/customize.py b/script/app-mlperf-inference-amd/customize.py index 16d6245ee..c2945f45c 100644 --- a/script/app-mlperf-inference-amd/customize.py +++ b/script/app-mlperf-inference-amd/customize.py @@ -11,31 +11,31 @@ def preprocess(i): return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes": return {'return': 0} - env['CM_MLPERF_AMD_SCRIPT_PATH'] = env['CM_TMP_CURRENT_SCRIPT_PATH'] - env['CM_MLPERF_AMD_CODE_PATH'] = os.path.join( - env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "closed", "AMD") + env['MLC_MLPERF_AMD_SCRIPT_PATH'] = env['MLC_TMP_CURRENT_SCRIPT_PATH'] + env['MLC_MLPERF_AMD_CODE_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "closed", "AMD") - if 'CM_MODEL' not in env: + if 'MLC_MODEL' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the model to run'} - if 'CM_MLPERF_BACKEND' not in env: + if 'MLC_MLPERF_BACKEND' not in env: return {'return': 1, 'error': 'Please select a variation specifying the backend'} - if 'CM_MLPERF_DEVICE' not in env: + if 'MLC_MLPERF_DEVICE' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the device to run on'} - if "llama2" in env['CM_MODEL']: - env['CM_RUN_DIR'] = i['run_script_input']['path'] - env['CM_MLPERF_AMD_LLAMA2_CODE_PATH'] = os.path.join( - env['CM_MLPERF_AMD_CODE_PATH'], "llama2-70b-99.9/VllmFp8") - env['CM_RUN_CMD'] = "bash run-llama2.sh " + if "llama2" in env['MLC_MODEL']: + env['MLC_RUN_DIR'] = i['run_script_input']['path'] + env['MLC_MLPERF_AMD_LLAMA2_CODE_PATH'] = os.path.join( + env['MLC_MLPERF_AMD_CODE_PATH'], "llama2-70b-99.9/VllmFp8") + env['MLC_RUN_CMD'] = "bash run-llama2.sh " else: return {'return': 1, 'error': 'Model {} not supported'.format( - env['CM_MODEL'])} + env['MLC_MODEL'])} return {'return': 0} # return {'return':1, 'error': 'Run command needs to be tested'} diff --git a/script/app-mlperf-inference-amd/meta.yaml b/script/app-mlperf-inference-amd/meta.yaml index f073011f8..2c3b6d063 100644 --- a/script/app-mlperf-inference-amd/meta.yaml +++ b/script/app-mlperf-inference-amd/meta.yaml @@ -21,51 +21,51 @@ tags: # Default environment default_env: - CM_MLPERF_LOADGEN_SCENARIO: Offline - CM_MLPERF_LOADGEN_MODE: performance - CM_SKIP_PREPROCESS_DATASET: 'no' - CM_SKIP_MODEL_DOWNLOAD: 'no' - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: redhat_harness - CM_MLPERF_SKIP_RUN: 'no' + MLC_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_MODE: performance + MLC_SKIP_PREPROCESS_DATASET: 'no' + MLC_SKIP_MODEL_DOWNLOAD: 'no' + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: redhat_harness + MLC_MLPERF_SKIP_RUN: 'no' env: - CM_CALL_MLPERF_RUNNER: 'no' + MLC_CALL_MLPERF_RUNNER: 'no' # Map script inputs to environment variables input_mapping: - count: CM_MLPERF_LOADGEN_QUERY_COUNT - max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - mlperf_conf: CM_MLPERF_CONF - mode: CM_MLPERF_LOADGEN_MODE - output_dir: CM_MLPERF_OUTPUT_DIR - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - scenario: CM_MLPERF_LOADGEN_SCENARIO - user_conf: CM_MLPERF_USER_CONF - skip_preprocess: CM_SKIP_PREPROCESS_DATASET - skip_preprocessing: CM_SKIP_PREPROCESS_DATASET - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - rerun: CM_RERUN - results_repo: CM_MLPERF_INFERENCE_RESULTS_REPO + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: MLC_MLPERF_CONF + mode: MLC_MLPERF_LOADGEN_MODE + output_dir: MLC_MLPERF_OUTPUT_DIR + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + scenario: MLC_MLPERF_LOADGEN_SCENARIO + user_conf: MLC_MLPERF_USER_CONF + skip_preprocess: MLC_SKIP_PREPROCESS_DATASET + skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: MLC_RERUN + results_repo: MLC_MLPERF_INFERENCE_RESULTS_REPO new_state_keys: - mlperf-inference-implementation - - CM_SUT_* + - MLC_SUT_* # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* - - CM_HW_NAME - - CM_ML_MODEL_* - - CM_MAX_EXAMPLES - - CM_IMAGENET_ACCURACY_DTYPE - - CM_SQUAD_ACCURACY_DTYPE + - MLC_MLPERF_* + - MLC_DATASET_* + - MLC_HW_NAME + - MLC_ML_MODEL_* + - MLC_MAX_EXAMPLES + - MLC_IMAGENET_ACCURACY_DTYPE + - MLC_SQUAD_ACCURACY_DTYPE # Dependencies on other CM scripts @@ -111,9 +111,9 @@ deps: - inference-code update_tags_from_env_with_prefix: _repo.: - - CM_MLPERF_INFERENCE_RESULTS_REPO + - MLC_MLPERF_INFERENCE_RESULTS_REPO env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO extra_cache_tags: results,repo,mlperf # Post dependencies to run this app including for power measurement @@ -123,7 +123,7 @@ post_deps: - runner - mlperf-runner skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'yes' - yes tags: benchmark-mlperf @@ -139,29 +139,29 @@ variations: group: device default: true env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu cuda: group: device env: - CM_MLPERF_DEVICE: gpu - CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + MLC_MLPERF_DEVICE: gpu + MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart rocm: group: device env: - CM_MLPERF_DEVICE: rocm - CM_MLPERF_DEVICE_LIB_NAMESPEC: rocm + MLC_MLPERF_DEVICE: rocm + MLC_MLPERF_DEVICE_LIB_NAMESPEC: rocm openshift: group: backend default: true env: - CM_MLPERF_BACKEND: openshift + MLC_MLPERF_BACKEND: openshift pytorch: group: backend env: - CM_MLPERF_BACKEND: pytorch + MLC_MLPERF_BACKEND: pytorch pytorch,cuda: deps: @@ -184,14 +184,14 @@ variations: group: model default: true env: - CM_MODEL: resnet50 + MLC_MODEL: resnet50 retinanet: group: model base: - bs.1 env: - CM_MODEL: retinanet + MLC_MODEL: retinanet bert_: {} @@ -201,15 +201,15 @@ variations: base: - bert_ env: - CM_MODEL: bert-99 - CM_SQUAD_ACCURACY_DTYPE: float32 + MLC_MODEL: bert-99 + MLC_SQUAD_ACCURACY_DTYPE: float32 bert-99.9: group: model base: - bert_ env: - CM_MODEL: bert-99.9 + MLC_MODEL: bert-99.9 bert_: {} @@ -219,15 +219,15 @@ variations: base: - bert_ env: - CM_MODEL: bert-99 - CM_SQUAD_ACCURACY_DTYPE: float32 + MLC_MODEL: bert-99 + MLC_SQUAD_ACCURACY_DTYPE: float32 bert-99.9: group: model base: - bert_ env: - CM_MODEL: bert-99.9 + MLC_MODEL: bert-99.9 gptj_: deps: @@ -241,15 +241,15 @@ variations: base: - gptj_ env: - CM_MODEL: gptj-99 - CM_SQUAD_ACCURACY_DTYPE: float32 + MLC_MODEL: gptj-99 + MLC_SQUAD_ACCURACY_DTYPE: float32 gptj-99.9: group: model base: - gptj_ env: - CM_MODEL: gptj-99.9 + MLC_MODEL: gptj-99.9 llama2-70b_: deps: @@ -259,42 +259,42 @@ variations: - tags: get,preprocessed,dataset,openorca,_mlc,_validation - tags: get,ml-model,llama2,_amd,_pytorch skip_if_env: - CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: - 'yes' - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - 'yes' - tags: get,preprocessed,dataset,openorca,_mlc,_validation - tags: download,file,_url.https://github.com/vllm-project/vllm/blob/38c4b7e863570a045308af814c72f4504297222e/tests/fp8_kv/llama2-70b-fp8-kv/kv_cache_scales.json extra_cache_tags: llama2-scales,kv-cache force_cache: true env: - CM_DOWNLOAD_FINAL_ENV_NAME: QUANTIZATION_PARAM_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: QUANTIZATION_PARAM_PATH - tags: get,generic-python-lib,_package.vllm names: - vllm - tags: get,git,repo,_repo.https://github.com/mlcommons/inference_results_v4.1,_branch.cm-code-only extra_cache_tags: inference,results env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_RESULTS_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_RESULTS_PATH llama2-70b-99: group: model base: - llama2-70b_ env: - CM_MODEL: llama2-70b-99 + MLC_MODEL: llama2-70b-99 llama2-70b-99.9: group: model base: - llama2-70b_ env: - CM_MODEL: llama2-70b-99.9 + MLC_MODEL: llama2-70b-99.9 singlestream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream singlestream,resnet50: default_variations: @@ -307,17 +307,17 @@ variations: multistream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: MultiStream + MLC_MLPERF_LOADGEN_SCENARIO: MultiStream offline: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_SCENARIO: Offline server: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Server + MLC_MLPERF_LOADGEN_SCENARIO: Server uint8: group: precision @@ -330,12 +330,12 @@ variations: group: version default: true env: - CM_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.0 + MLC_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.0 r4.1_default: group: version env: - CM_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.1 + MLC_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.1 docker: real_run: False diff --git a/script/app-mlperf-inference-amd/run-llama2.sh b/script/app-mlperf-inference-amd/run-llama2.sh index 10f36f8ca..a056a713b 100644 --- a/script/app-mlperf-inference-amd/run-llama2.sh +++ b/script/app-mlperf-inference-amd/run-llama2.sh @@ -17,21 +17,21 @@ export HARNESS_DISABLE_VLLM_LOGS=1 export VLLM_LOGGING_LEVEL=ERROR MODEL_PATH=${LLAMA2_CHECKPOINT_PATH:-/data/llm/llama2-70b-chat/} -DATASET_PATH=${CM_DATASET_OPENORCA_PREPROCESSED_PATH:-/data/open_orca/open_orca_gpt4_tokenized_llama.sampled_24576.pkl.gz} -QUANTIZED_WEIGHTS_PATH=${CM_LLAMA2_FINAL_SAFE_TENSORS_PATH:-quantized/quark_share/modelzoo/llama2_70b_wfp8_afp8_ofp8_nomerge/json-safetensors/llama.safetensors} +DATASET_PATH=${MLC_DATASET_OPENORCA_PREPROCESSED_PATH:-/data/open_orca/open_orca_gpt4_tokenized_llama.sampled_24576.pkl.gz} +QUANTIZED_WEIGHTS_PATH=${MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH:-quantized/quark_share/modelzoo/llama2_70b_wfp8_afp8_ofp8_nomerge/json-safetensors/llama.safetensors} QUANTIZATION_PARAM_PATH=${QUANTIZATION_PARAM_PATH:-/app/kv_cache_scales.json} -MLPERF_CONF="${CM_MLPERF_CONF:-/app/mlperf_inference/mlperf.conf}" -USER_CONF="${CM_MLPERF_USER_CONF:-/lab-mlperf-inference/code/llama2-70b-99.9/mlperf_config_VllmFp8/user.conf}" +MLPERF_CONF="${MLC_MLPERF_CONF:-/app/mlperf_inference/mlperf.conf}" +USER_CONF="${MLC_MLPERF_USER_CONF:-/lab-mlperf-inference/code/llama2-70b-99.9/mlperf_config_VllmFp8/user.conf}" SUBMISSION=${SUBMISSION:-0} -LOG_DIR=${CM_MLPERF_OUTPUT_DIR} +LOG_DIR=${MLC_MLPERF_OUTPUT_DIR} cp $USER_CONF ${LOG_DIR}/user.conf COMMON_CMD_OPTIONS="\ - --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \ + --scenario ${MLC_MLPERF_LOADGEN_SCENARIO} \ --output-log-dir ${LOG_DIR} \ --model-path $MODEL_PATH \ --mlperf-conf $MLPERF_CONF \ @@ -48,16 +48,16 @@ COMMON_CMD_OPTIONS="\ --quantized-weights-path ${QUANTIZED_WEIGHTS_PATH} \ --quantization-param-path ${QUANTIZATION_PARAM_PATH}" -if [ "${CM_MLPERF_LOADGEN_MODE}" == "accuracy" ]; then +if [ "${MLC_MLPERF_LOADGEN_MODE}" == "accuracy" ]; then COMMON_CMD_OPTIONS+=" --accuracy" fi -if [ "${CM_MLPERF_LOADGEN_SCENARIO}" == "Offline" ]; then +if [ "${MLC_MLPERF_LOADGEN_SCENARIO}" == "Offline" ]; then WD=${WD:-0} SORTING=${SORTING:-descending} #ascending #descending #lexicographic #skip export VLLM_SCHED_PREFILL_KVC_FREEPCT=31.0 # generate run command - cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_Offline.py \ + cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_Offline.py \ ${COMMON_CMD_OPTIONS} \ --warmup-duration ${WD} \ --sorting ${SORTING} \ @@ -65,7 +65,7 @@ if [ "${CM_MLPERF_LOADGEN_SCENARIO}" == "Offline" ]; then --gpu-memory-utilization 0.99" else # generate run command - cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_SyncServer.py \ + cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_SyncServer.py \ ${COMMON_CMD_OPTIONS} \ --enable-warm-up \ --enable-batcher" diff --git a/script/app-mlperf-inference-amd/run.sh b/script/app-mlperf-inference-amd/run.sh index ddcd0b550..0c6a8fc4a 100644 --- a/script/app-mlperf-inference-amd/run.sh +++ b/script/app-mlperf-inference-amd/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then - cd ${CM_RUN_DIR} - cmd=${CM_RUN_CMD} +if [[ ${MLC_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${MLC_RUN_DIR} + cmd=${MLC_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp b/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp index c641e9d1e..5ba78b0ca 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp @@ -56,10 +56,10 @@ class Program { public: Program () : runtime( armnn::IRuntime::Create(options) ) { - bool use_neon = getenv_b("CM_MLPERF_TFLITE_USE_NEON"); - bool use_opencl = getenv_b("CM_MLPERF_TFLITE_USE_OPENCL"); - string input_layer_name = getenv_s("CM_ML_MODEL_INPUT_LAYER_NAME"); - string output_layer_name = getenv_s("CM_ML_MODEL_OUTPUT_LAYER_NAME"); + bool use_neon = getenv_b("MLC_MLPERF_TFLITE_USE_NEON"); + bool use_opencl = getenv_b("MLC_MLPERF_TFLITE_USE_OPENCL"); + string input_layer_name = getenv_s("MLC_ML_MODEL_INPUT_LAYER_NAME"); + string output_layer_name = getenv_s("MLC_ML_MODEL_OUTPUT_LAYER_NAME"); settings = new BenchmarkSettings(MODEL_TYPE::LITE); @@ -333,14 +333,14 @@ void TestSingleStream(Program *prg) { SystemUnderTestSingleStream sut(prg); QuerySampleLibrarySingleStream qsl(prg); - const std::string mlperf_conf_path = getenv_s("CM_MLPERF_CONF"); - const std::string user_conf_path = getenv_s("CM_MLPERF_USER_CONF"); + const std::string mlperf_conf_path = getenv_s("MLC_MLPERF_CONF"); + const std::string user_conf_path = getenv_s("MLC_MLPERF_USER_CONF"); - std::string model_name = getenv_opt_s("CM_MODEL", "unknown_model"); - std::string logs_dir = getenv_opt_s("CM_MLPERF_LOADGEN_LOGS_DIR", ""); + std::string model_name = getenv_opt_s("MLC_MODEL", "unknown_model"); + std::string logs_dir = getenv_opt_s("MLC_MLPERF_LOADGEN_LOGS_DIR", ""); - const std::string scenario_string = getenv_s("CM_MLPERF_LOADGEN_SCENARIO"); - const std::string mode_string = getenv_s("CM_MLPERF_LOADGEN_MODE"); + const std::string scenario_string = getenv_s("MLC_MLPERF_LOADGEN_SCENARIO"); + const std::string mode_string = getenv_s("MLC_MLPERF_LOADGEN_MODE"); std::cout << "Path to mlperf.conf : " << mlperf_conf_path << std::endl; std::cout << "Path to user.conf : " << user_conf_path << std::endl; diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py b/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py index 8589a8241..8bca479d2 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py @@ -11,30 +11,30 @@ def preprocess(i): return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes": return {'return': 0} - if 'CM_MODEL' not in env: + if 'MLC_MODEL' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the model to run'} - if 'CM_MLPERF_BACKEND' not in env: + if 'MLC_MLPERF_BACKEND' not in env: return {'return': 1, 'error': 'Please select a variation specifying the backend'} - if 'CM_MLPERF_DEVICE' not in env: + if 'MLC_MLPERF_DEVICE' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the device to run on'} source_files = [] script_path = i['run_script_input']['path'] - env['CM_SOURCE_FOLDER_PATH'] = os.path.join( - script_path, env['CM_TMP_SRC_FOLDER']) + env['MLC_SOURCE_FOLDER_PATH'] = os.path.join( + script_path, env['MLC_TMP_SRC_FOLDER']) - for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']): + for file in os.listdir(env['MLC_SOURCE_FOLDER_PATH']): if file.endswith(".c") or file.endswith(".cpp"): source_files.append(file) - env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) + env['MLC_CXX_SOURCE_FILES'] = ";".join(source_files) if '+CPLUS_INCLUDE_PATH' not in env: env['+CPLUS_INCLUDE_PATH'] = [] @@ -43,24 +43,24 @@ def preprocess(i): env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) # TODO: get cuda path ugly fix - if env['CM_MLPERF_DEVICE'] == 'gpu': - env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) - env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) - env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB']) - env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + if env['MLC_MLPERF_DEVICE'] == 'gpu': + env['+C_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE']) + env['+CPLUS_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE']) + env['+LD_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_LIB']) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_INCLUDE']) if '+ CXXFLAGS' not in env: env['+ CXXFLAGS'] = [] env['+ CXXFLAGS'].append("-std=c++17") - # add preprocessor flag like "#define CM_MODEL_RESNET50" - env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) - # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" - env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + - env['CM_MLPERF_BACKEND'].upper()) - # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" - env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + - env['CM_MLPERF_DEVICE'].upper()) + # add preprocessor flag like "#define MLC_MODEL_RESNET50" + env['+ CXXFLAGS'].append('-DMLC_MODEL_' + env['MLC_MODEL'].upper()) + # add preprocessor flag like "#define MLC_MLPERF_BACKEND_ONNXRUNTIME" + env['+ CXXFLAGS'].append('-DMLC_MLPERF_BACKEND_' + + env['MLC_MLPERF_BACKEND'].upper()) + # add preprocessor flag like "#define MLC_MLPERF_DEVICE_CPU" + env['+ CXXFLAGS'].append('-DMLC_MLPERF_DEVICE_' + + env['MLC_MLPERF_DEVICE'].upper()) if '+ LDCXXFLAGS' not in env: env['+ LDCXXFLAGS'] = [] @@ -70,33 +70,33 @@ def preprocess(i): "-lpthread" ] # e.g. -lonnxruntime - if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: + if 'MLC_MLPERF_BACKEND_LIB_NAMESPEC' in env: env['+ LDCXXFLAGS'].append('-l' + - env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + env['MLC_MLPERF_BACKEND_LIB_NAMESPEC']) # e.g. -lcudart - if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: - env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) + if 'MLC_MLPERF_DEVICE_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['MLC_MLPERF_DEVICE_LIB_NAMESPEC']) - if env.get('CM_TMP_LINK_LIBS', []): - libs = env['CM_TMP_LINK_LIBS'].split(",") + if env.get('MLC_TMP_LINK_LIBS', []): + libs = env['MLC_TMP_LINK_LIBS'].split(",") for lib in libs: env['+ LDCXXFLAGS'].append(' -l' + lib) - env['CM_LINKER_LANG'] = 'CXX' - env['CM_RUN_DIR'] = os.getcwd() + env['MLC_LINKER_LANG'] = 'CXX' + env['MLC_RUN_DIR'] = os.getcwd() - if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") - if 'CM_MLPERF_USER_CONF' not in env: - env['CM_MLPERF_USER_CONF'] = os.path.join( - env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + if 'MLC_MLPERF_CONF' not in env: + env['MLC_MLPERF_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'MLC_MLPERF_USER_CONF' not in env: + env['MLC_MLPERF_USER_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") - if env.get('CM_DATASET_COMPRESSED', "no").lower() in [ - "yes", "on", "true"] and "float" in env.get('CM_MLPERF_MODEL_PRECISION', ''): + if env.get('MLC_DATASET_COMPRESSED', "no").lower() in [ + "yes", "on", "true"] and "float" in env.get('MLC_MLPERF_MODEL_PRECISION', ''): # Use all cores for input preprocessing - env['CM_HOST_USE_ALL_CORES'] = "yes" - env['CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2'] = "with_live_preprocessing" + env['MLC_HOST_USE_ALL_CORES'] = "yes" + env['MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2'] = "with_live_preprocessing" return {'return': 0} diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h b/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h index 76f1209a8..c63a4e221 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h @@ -98,25 +98,25 @@ class Accumulator { class BenchmarkSettings { public: - const std::string images_dir = getenv_s("CM_DATASET_PREPROCESSED_PATH"); - const std::string available_images_file = getenv_s("CM_DATASET_PREPROCESSED_IMAGES_LIST"); - const bool skip_internal_preprocessing = (getenv_opt_s("CM_DATASET_COMPRESSED", "off") == "off"); - const std::string result_dir = getenv_s("CM_MLPERF_OUTPUT_DIR"); - const std::string input_layer_name = getenv_s("CM_ML_MODEL_INPUT_LAYER_NAME"); - const std::string output_layer_name = getenv_s("CM_ML_MODEL_OUTPUT_LAYER_NAME"); - const int images_in_memory_max = getenv_i("CM_LOADGEN_BUFFER_SIZE"); - const int image_size = getenv_i("CM_DATASET_INPUT_SQUARE_SIDE"); + const std::string images_dir = getenv_s("MLC_DATASET_PREPROCESSED_PATH"); + const std::string available_images_file = getenv_s("MLC_DATASET_PREPROCESSED_IMAGES_LIST"); + const bool skip_internal_preprocessing = (getenv_opt_s("MLC_DATASET_COMPRESSED", "off") == "off"); + const std::string result_dir = getenv_s("MLC_MLPERF_OUTPUT_DIR"); + const std::string input_layer_name = getenv_s("MLC_ML_MODEL_INPUT_LAYER_NAME"); + const std::string output_layer_name = getenv_s("MLC_ML_MODEL_OUTPUT_LAYER_NAME"); + const int images_in_memory_max = getenv_i("MLC_LOADGEN_BUFFER_SIZE"); + const int image_size = getenv_i("MLC_DATASET_INPUT_SQUARE_SIDE"); const int batch_size = 1; const int num_channels = 3; const int num_classes = 1000; - const bool normalize_img = getenv_b("CM_ML_MODEL_NORMALIZE_DATA"); + const bool normalize_img = getenv_b("MLC_ML_MODEL_NORMALIZE_DATA"); - const bool subtract_mean = getenv_b("CM_ML_MODEL_SUBTRACT_MEANS"); - const char *given_channel_means_str = getenv("CM_ML_MODEL_GIVEN_CHANNEL_MEANS"); + const bool subtract_mean = getenv_b("MLC_ML_MODEL_SUBTRACT_MEANS"); + const char *given_channel_means_str = getenv("MLC_ML_MODEL_GIVEN_CHANNEL_MEANS"); - const bool trigger_cold_run = getenv_b("CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN"); + const bool trigger_cold_run = getenv_b("MLC_MLPERF_LOADGEN_TRIGGER_COLD_RUN"); - const int verbosity_level = getenv_i("CM_VERBOSE"); + const int verbosity_level = getenv_i("MLC_VERBOSE"); BenchmarkSettings(enum MODEL_TYPE mode = MODEL_TYPE::LITE) { @@ -130,11 +130,11 @@ class BenchmarkSettings { switch (mode) { case MODEL_TYPE::LITE: - _graph_file = getenv_s("CM_ML_MODEL_FILE_WITH_PATH"); + _graph_file = getenv_s("MLC_ML_MODEL_FILE_WITH_PATH"); break; case MODEL_TYPE::TF_FROZEN: - _graph_file = getenv_s("CM_ML_MODEL_FILE_WITH_PATH"); + _graph_file = getenv_s("MLC_ML_MODEL_FILE_WITH_PATH"); break; default: @@ -144,13 +144,13 @@ class BenchmarkSettings { }; _number_of_threads = std::thread::hardware_concurrency(); - if (getenv_opt_s("CM_HOST_USE_ALL_CORES", "no") != "yes") { + if (getenv_opt_s("MLC_HOST_USE_ALL_CORES", "no") != "yes") { _number_of_threads = _number_of_threads < 1 ? 1 : _number_of_threads; - _number_of_threads = !getenv("CM_HOST_CPU_TOTAL_CORES") + _number_of_threads = !getenv("MLC_HOST_CPU_TOTAL_CORES") ? _number_of_threads - : getenv_i("CM_HOST_CPU_TOTAL_CORES"); - if (getenv_i("CM_HOST_CPU_TOTAL_CORES") && getenv_i("CM_HOST_CPU_THREADS_PER_CORE")) { - _number_of_threads = getenv_i("CM_HOST_CPU_TOTAL_CORES") / getenv_i("CM_HOST_CPU_THREADS_PER_CORE"); + : getenv_i("MLC_HOST_CPU_TOTAL_CORES"); + if (getenv_i("MLC_HOST_CPU_TOTAL_CORES") && getenv_i("MLC_HOST_CPU_THREADS_PER_CORE")) { + _number_of_threads = getenv_i("MLC_HOST_CPU_TOTAL_CORES") / getenv_i("MLC_HOST_CPU_THREADS_PER_CORE"); } } // Print settings diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml b/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml index e66ae2bac..815a2a152 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml @@ -3,27 +3,27 @@ automation_alias: script automation_uid: 5b4e0237da074764 category: Modular MLPerf inference benchmark pipeline default_env: - CM_DATASET_COMPRESSED: 'off' - CM_DATASET_INPUT_SQUARE_SIDE: '224' - CM_FAST_COMPILATION: 'yes' - CM_LOADGEN_BUFFER_SIZE: '1024' - CM_MLPERF_LOADGEN_MODE: accuracy - CM_MLPERF_LOADGEN_SCENARIO: SingleStream - CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN: '0' - CM_MLPERF_OUTPUT_DIR: . - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_cpp - CM_MLPERF_TFLITE_USE_NEON: '0' - CM_MLPERF_TFLITE_USE_OPENCL: '0' - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 - CM_ML_MODEL_NORMALIZE_DATA: '0' - CM_ML_MODEL_SUBTRACT_MEANS: '1' - CM_VERBOSE: '0' + MLC_DATASET_COMPRESSED: 'off' + MLC_DATASET_INPUT_SQUARE_SIDE: '224' + MLC_FAST_COMPILATION: 'yes' + MLC_LOADGEN_BUFFER_SIZE: '1024' + MLC_MLPERF_LOADGEN_MODE: accuracy + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_TRIGGER_COLD_RUN: '0' + MLC_MLPERF_OUTPUT_DIR: . + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_cpp + MLC_MLPERF_TFLITE_USE_NEON: '0' + MLC_MLPERF_TFLITE_USE_OPENCL: '0' + MLC_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 + MLC_ML_MODEL_NORMALIZE_DATA: '0' + MLC_ML_MODEL_SUBTRACT_MEANS: '1' + MLC_VERBOSE: '0' deps: - tags: detect,os - tags: detect,cpu - tags: get,sys-utils-cm - enable_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu tags: get,cuda - names: @@ -33,10 +33,10 @@ deps: - inference-src tags: get,mlcommons,inference,src - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tflite - armnn_tflite - CM_MODEL: + MLC_MODEL: - mobilenet names: - ml-model @@ -44,10 +44,10 @@ deps: - mobilenet-model tags: get,ml-model,mobilenet,raw,_tflite - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tflite - armnn_tflite - CM_MODEL: + MLC_MODEL: - resnet50 names: - ml-model @@ -55,9 +55,9 @@ deps: - resnet50-model tags: get,ml-model,resnet50,raw,_tflite,_no-argmax - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tf - CM_MODEL: + MLC_MODEL: - resnet50 names: - ml-model @@ -65,10 +65,10 @@ deps: - resnet50-model tags: get,ml-model,resnet50,raw,_tf - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tflite - armnn_tflite - CM_MODEL: + MLC_MODEL: - efficientnet names: - ml-model @@ -80,39 +80,39 @@ deps: - tflite tags: get,tensorflow,lib,_tflite - enable_if_env: - CM_MLPERF_TFLITE_USE_ARMNN: + MLC_MLPERF_TFLITE_USE_ARMNN: - 'yes' names: - armnn - lib-armnn tags: get,lib,armnn input_mapping: - compressed_dataset: CM_DATASET_COMPRESSED - count: CM_MLPERF_LOADGEN_QUERY_COUNT - mlperf_conf: CM_MLPERF_CONF - mode: CM_MLPERF_LOADGEN_MODE - output_dir: CM_MLPERF_OUTPUT_DIR - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - scenario: CM_MLPERF_LOADGEN_SCENARIO - user_conf: CM_MLPERF_USER_CONF - verbose: CM_VERBOSE + compressed_dataset: MLC_DATASET_COMPRESSED + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + mlperf_conf: MLC_MLPERF_CONF + mode: MLC_MLPERF_LOADGEN_MODE + output_dir: MLC_MLPERF_OUTPUT_DIR + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + scenario: MLC_MLPERF_LOADGEN_SCENARIO + user_conf: MLC_MLPERF_USER_CONF + verbose: MLC_VERBOSE new_env_keys: -- CM_MLPERF_* -- CM_ML_MODEL_* -- CM_HW_NAME +- MLC_MLPERF_* +- MLC_ML_MODEL_* +- MLC_HW_NAME new_state_keys: -- CM_SUT_* +- MLC_SUT_* post_deps: - names: - compiler-program skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'yes' tags: compile,program - names: - mlperf-runner skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'yes' tags: benchmark-mlperf - names: @@ -123,40 +123,40 @@ prehook_deps: - user-conf-generator tags: generate,user-conf,mlperf,inference - enable_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'no' - CM_MODEL: + MLC_MODEL: - resnet50 names: - imagenet-preprocessed - preprocessed-dataset skip_if_env: - CM_DATASET_COMPRESSED: + MLC_DATASET_COMPRESSED: - 'on' tags: get,dataset,preprocessed,imagenet,_for.resnet50,_rgb32,_NHWC update_tags_from_env: - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS + - MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS - enable_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'no' - CM_MODEL: + MLC_MODEL: - mobilenet - efficientnet names: - imagenet-preprocessed - preprocessed-dataset skip_if_env: - CM_DATASET_COMPRESSED: + MLC_DATASET_COMPRESSED: - 'on' tags: get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb32,_NHWC update_tags_from_env: - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS + - MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS - enable_if_env: - CM_DATASET_COMPRESSED: + MLC_DATASET_COMPRESSED: - 'on' - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'no' - CM_MODEL: + MLC_MODEL: - mobilenet - efficientnet names: @@ -164,20 +164,20 @@ prehook_deps: - preprocessed-dataset tags: get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb8,_NHWC update_tags_from_env: - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS + - MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS - enable_if_env: - CM_DATASET_COMPRESSED: + MLC_DATASET_COMPRESSED: - 'on' - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'no' - CM_MODEL: + MLC_MODEL: - resnet50 names: - imagenet-preprocessed - preprocessed-dataset tags: get,dataset,preprocessed,imagenet,_for.resnet50,_rgb8,_NHWC update_tags_from_env: - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS + - MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS tags: - app - mlcommons @@ -191,23 +191,23 @@ variations: default_variations: optimization-target: use-neon env: - CM_MLPERF_TFLITE_USE_ARMNN: 'yes' - CM_TMP_LINK_LIBS: tensorflowlite,armnn + MLC_MLPERF_TFLITE_USE_ARMNN: 'yes' + MLC_TMP_LINK_LIBS: tensorflowlite,armnn armnn,tflite: env: - CM_MLPERF_BACKEND: armnn_tflite - CM_MLPERF_BACKEND_VERSION: <<>> - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_armnn_cpp - CM_TMP_LINK_LIBS: tensorflowlite,armnn,armnnTfLiteParser - CM_TMP_SRC_FOLDER: armnn + MLC_MLPERF_BACKEND: armnn_tflite + MLC_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_armnn_cpp + MLC_TMP_LINK_LIBS: tensorflowlite,armnn,armnnTfLiteParser + MLC_TMP_SRC_FOLDER: armnn cpu: default: true env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu group: device efficientnet: env: - CM_MODEL: efficientnet + MLC_MODEL: efficientnet group: model fp32: adr: @@ -217,12 +217,12 @@ variations: tags: _float32 default: true env: - CM_MLPERF_MODEL_PRECISION: float32 + MLC_MLPERF_MODEL_PRECISION: float32 group: precision gpu: env: - CM_MLPERF_DEVICE: gpu - CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + MLC_MLPERF_DEVICE: gpu + MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart group: device int8: adr: @@ -231,34 +231,34 @@ variations: preprocessed-dataset: tags: _int8 env: - CM_DATASET_COMPRESSED: 'on' - CM_MLPERF_MODEL_PRECISION: int8 + MLC_DATASET_COMPRESSED: 'on' + MLC_MLPERF_MODEL_PRECISION: int8 group: precision mobilenet: env: - CM_MODEL: mobilenet + MLC_MODEL: mobilenet group: model resnet50: default: true env: - CM_MODEL: resnet50 + MLC_MODEL: resnet50 group: model singlestream: default: true env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream group: loadgen-scenario tf: env: - CM_MLPERF_BACKEND: tf + MLC_MLPERF_BACKEND: tf group: backend tflite: default: true env: - CM_MLPERF_BACKEND: tflite - CM_MLPERF_BACKEND_VERSION: master - CM_TMP_LINK_LIBS: tensorflowlite - CM_TMP_SRC_FOLDER: src + MLC_MLPERF_BACKEND: tflite + MLC_MLPERF_BACKEND_VERSION: master + MLC_TMP_LINK_LIBS: tensorflowlite + MLC_TMP_SRC_FOLDER: src group: backend uint8: adr: @@ -267,16 +267,16 @@ variations: preprocessed-dataset: tags: _int8 env: - CM_DATASET_COMPRESSED: 'on' - CM_MLPERF_MODEL_PRECISION: uint8 + MLC_DATASET_COMPRESSED: 'on' + MLC_MLPERF_MODEL_PRECISION: uint8 group: precision use-neon: env: - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_neon - CM_MLPERF_TFLITE_USE_NEON: '1' + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_neon + MLC_MLPERF_TFLITE_USE_NEON: '1' group: optimization-target use-opencl: env: - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_opencl - CM_MLPERF_TFLITE_USE_OPENCL: '1' + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_opencl + MLC_MLPERF_TFLITE_USE_OPENCL: '1' group: optimization-target diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp b/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp index 9493f5430..dbe464a9e 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp @@ -295,15 +295,15 @@ void TestSingleStream(Program *prg) { SystemUnderTestSingleStream sut(prg); QuerySampleLibrarySingleStream qsl(prg); - const std::string mlperf_conf_path = getenv_s("CM_MLPERF_CONF"); - const std::string user_conf_path = getenv_s("CM_MLPERF_USER_CONF"); - const std::string audit_conf_path = getenv_opt_s("CM_MLPERF_INFERENCE_AUDIT_PATH",""); + const std::string mlperf_conf_path = getenv_s("MLC_MLPERF_CONF"); + const std::string user_conf_path = getenv_s("MLC_MLPERF_USER_CONF"); + const std::string audit_conf_path = getenv_opt_s("MLC_MLPERF_INFERENCE_AUDIT_PATH",""); - std::string model_name = getenv_opt_s("CM_MODEL", "unknown_model"); - std::string logs_dir = getenv_opt_s("CM_MLPERF_LOADGEN_LOGS_DIR", ""); + std::string model_name = getenv_opt_s("MLC_MODEL", "unknown_model"); + std::string logs_dir = getenv_opt_s("MLC_MLPERF_LOADGEN_LOGS_DIR", ""); - const std::string scenario_string = getenv_s("CM_MLPERF_LOADGEN_SCENARIO"); - const std::string mode_string = getenv_s("CM_MLPERF_LOADGEN_MODE"); + const std::string scenario_string = getenv_s("MLC_MLPERF_LOADGEN_SCENARIO"); + const std::string mode_string = getenv_s("MLC_MLPERF_LOADGEN_MODE"); std::cout << "Path to mlperf.conf : " << mlperf_conf_path << std::endl; std::cout << "Path to user.conf : " << user_conf_path << std::endl; diff --git a/script/app-mlperf-inference-dummy/customize.py b/script/app-mlperf-inference-dummy/customize.py index 40e41f738..f200e915b 100644 --- a/script/app-mlperf-inference-dummy/customize.py +++ b/script/app-mlperf-inference-dummy/customize.py @@ -11,20 +11,20 @@ def preprocess(i): return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes": return {'return': 0} - if 'CM_MODEL' not in env: + if 'MLC_MODEL' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the model to run'} - if 'CM_MLPERF_BACKEND' not in env: + if 'MLC_MLPERF_BACKEND' not in env: return {'return': 1, 'error': 'Please select a variation specifying the backend'} - if 'CM_MLPERF_DEVICE' not in env: + if 'MLC_MLPERF_DEVICE' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the device to run on'} - r = get_run_cmd(env['CM_MODEL'], i) + r = get_run_cmd(env['MLC_MODEL'], i) if r['return'] > 0: return r run_cmd = r['run_cmd'] @@ -37,16 +37,16 @@ def preprocess(i): def get_run_cmd(model, i): env = i['env'] if "gptj" in model: - scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] - device = env['CM_MLPERF_DEVICE'] - mode = env['CM_MLPERF_LOADGEN_MODE'] - outdir = env['CM_MLPERF_OUTPUT_DIR'] - mlperf_conf_path = env['CM_MLPERF_CONF'] - user_conf_path = env['CM_MLPERF_USER_CONF'] - api_server = env.get('CM_MLPERF_INFERENCE_API_SERVER', 'localhost') + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] + device = env['MLC_MLPERF_DEVICE'] + mode = env['MLC_MLPERF_LOADGEN_MODE'] + outdir = env['MLC_MLPERF_OUTPUT_DIR'] + mlperf_conf_path = env['MLC_MLPERF_CONF'] + user_conf_path = env['MLC_MLPERF_USER_CONF'] + api_server = env.get('MLC_MLPERF_INFERENCE_API_SERVER', 'localhost') model_path = env['GPTJ_CHECKPOINT_PATH'] - dataset_path = env['CM_DATASET_CNNDM_EVAL_PATH'] - precision = env['CM_MLPERF_MODEL_PRECISION'] + dataset_path = env['MLC_DATASET_CNNDM_EVAL_PATH'] + precision = env['MLC_MLPERF_MODEL_PRECISION'] if mode == "accuracy": accuracy_string = " --accuracy " else: @@ -55,7 +55,7 @@ def get_run_cmd(model, i): run_cmd = f"python3 -u main.py --scenario {scenario} --model-path {model_path} --api-server {api_server} --api-model-name gpt-j-cnn --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} " submitter = "CTuning" run_dir = os.path.join( - env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], + env['MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "open", submitter, "code", diff --git a/script/app-mlperf-inference-dummy/meta.yaml b/script/app-mlperf-inference-dummy/meta.yaml index 1343835b6..f8876eb81 100644 --- a/script/app-mlperf-inference-dummy/meta.yaml +++ b/script/app-mlperf-inference-dummy/meta.yaml @@ -21,51 +21,51 @@ tags: # Default environment default_env: - CM_MLPERF_LOADGEN_SCENARIO: Offline - CM_MLPERF_LOADGEN_MODE: performance - CM_SKIP_PREPROCESS_DATASET: 'no' - CM_SKIP_MODEL_DOWNLOAD: 'no' - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: dummy_harness - CM_MLPERF_SKIP_RUN: 'no' + MLC_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_MODE: performance + MLC_SKIP_PREPROCESS_DATASET: 'no' + MLC_SKIP_MODEL_DOWNLOAD: 'no' + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: dummy_harness + MLC_MLPERF_SKIP_RUN: 'no' env: - CM_CALL_MLPERF_RUNNER: 'no' + MLC_CALL_MLPERF_RUNNER: 'no' # Map script inputs to environment variables input_mapping: - count: CM_MLPERF_LOADGEN_QUERY_COUNT - max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - mlperf_conf: CM_MLPERF_CONF - mode: CM_MLPERF_LOADGEN_MODE - output_dir: CM_MLPERF_OUTPUT_DIR - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - scenario: CM_MLPERF_LOADGEN_SCENARIO - user_conf: CM_MLPERF_USER_CONF - skip_preprocess: CM_SKIP_PREPROCESS_DATASET - skip_preprocessing: CM_SKIP_PREPROCESS_DATASET - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - rerun: CM_RERUN - results_repo: CM_MLPERF_INFERENCE_RESULTS_REPO + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: MLC_MLPERF_CONF + mode: MLC_MLPERF_LOADGEN_MODE + output_dir: MLC_MLPERF_OUTPUT_DIR + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + scenario: MLC_MLPERF_LOADGEN_SCENARIO + user_conf: MLC_MLPERF_USER_CONF + skip_preprocess: MLC_SKIP_PREPROCESS_DATASET + skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: MLC_RERUN + results_repo: MLC_MLPERF_INFERENCE_RESULTS_REPO new_state_keys: - mlperf-inference-implementation - - CM_SUT_* + - MLC_SUT_* # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* - - CM_HW_NAME - - CM_ML_MODEL_* - - CM_MAX_EXAMPLES - - CM_IMAGENET_ACCURACY_DTYPE - - CM_SQUAD_ACCURACY_DTYPE + - MLC_MLPERF_* + - MLC_DATASET_* + - MLC_HW_NAME + - MLC_ML_MODEL_* + - MLC_MAX_EXAMPLES + - MLC_IMAGENET_ACCURACY_DTYPE + - MLC_SQUAD_ACCURACY_DTYPE # Dependencies on other CM scripts @@ -110,9 +110,9 @@ deps: inference-results inference-code updats_tags_from_env_with_prefix: - _repo.: CM_MLPERF_INFERENCE_RESULTS_REPO + _repo.: MLC_MLPERF_INFERENCE_RESULTS_REPO env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO extra_cache_tags: inference-implementation,mlperf # Post dependencies to run this app including for power measurement @@ -122,7 +122,7 @@ post_deps: - runner - mlperf-runner skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'yes' - yes tags: benchmark-mlperf @@ -138,18 +138,18 @@ variations: group: device default: true env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu cuda: group: device env: - CM_MLPERF_DEVICE: gpu - CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + MLC_MLPERF_DEVICE: gpu + MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart pytorch: group: backend default: true env: - CM_MLPERF_BACKEND: pytorch + MLC_MLPERF_BACKEND: pytorch pytorch,cuda: deps: @@ -168,14 +168,14 @@ variations: group: model default: true env: - CM_MODEL: resnet50 + MLC_MODEL: resnet50 retinanet: group: model base: - bs.1 env: - CM_MODEL: retinanet + MLC_MODEL: retinanet bert_: {} @@ -185,15 +185,15 @@ variations: base: - bert_ env: - CM_MODEL: bert-99 - CM_SQUAD_ACCURACY_DTYPE: float32 + MLC_MODEL: bert-99 + MLC_SQUAD_ACCURACY_DTYPE: float32 bert-99.9: group: model base: - bert_ env: - CM_MODEL: bert-99.9 + MLC_MODEL: bert-99.9 bert_: {} @@ -203,15 +203,15 @@ variations: base: - bert_ env: - CM_MODEL: bert-99 - CM_SQUAD_ACCURACY_DTYPE: float32 + MLC_MODEL: bert-99 + MLC_SQUAD_ACCURACY_DTYPE: float32 bert-99.9: group: model base: - bert_ env: - CM_MODEL: bert-99.9 + MLC_MODEL: bert-99.9 gptj_: deps: @@ -225,15 +225,15 @@ variations: base: - gptj_ env: - CM_MODEL: gptj-99 - CM_SQUAD_ACCURACY_DTYPE: float32 + MLC_MODEL: gptj-99 + MLC_SQUAD_ACCURACY_DTYPE: float32 gptj-99.9: group: model base: - gptj_ env: - CM_MODEL: gptj-99.9 + MLC_MODEL: gptj-99.9 llama2-70b_: {} @@ -243,19 +243,19 @@ variations: base: - llama2-70b_ env: - CM_MODEL: llama2-70b-99 + MLC_MODEL: llama2-70b-99 llama2-70b-99.9: group: model base: - llama2-70b_ env: - CM_MODEL: llama2-70b-99.9 + MLC_MODEL: llama2-70b-99.9 singlestream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream singlestream,resnet50: default_variations: @@ -268,17 +268,17 @@ variations: multistream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: MultiStream + MLC_MLPERF_LOADGEN_SCENARIO: MultiStream offline: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_SCENARIO: Offline server: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Server + MLC_MLPERF_LOADGEN_SCENARIO: Server uint8: group: precision diff --git a/script/app-mlperf-inference-dummy/run.sh b/script/app-mlperf-inference-dummy/run.sh index ddcd0b550..0c6a8fc4a 100644 --- a/script/app-mlperf-inference-dummy/run.sh +++ b/script/app-mlperf-inference-dummy/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then - cd ${CM_RUN_DIR} - cmd=${CM_RUN_CMD} +if [[ ${MLC_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${MLC_RUN_DIR} + cmd=${MLC_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? diff --git a/script/app-mlperf-inference-intel/build_bert_harness.sh b/script/app-mlperf-inference-intel/build_bert_harness.sh index 4a2b957a9..bb2477caa 100644 --- a/script/app-mlperf-inference-intel/build_bert_harness.sh +++ b/script/app-mlperf-inference-intel/build_bert_harness.sh @@ -1,21 +1,21 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH echo $PWD if [ ! -d harness ]; then mkdir -p harness fi -rm -rf ${CM_CONDA_LIB_PATH}/cmake/mkl/* +rm -rf ${MLC_CONDA_LIB_PATH}/cmake/mkl/* -rsync -avz --exclude=".git" ${CM_HARNESS_CODE_ROOT}/ harness/ +rsync -avz --exclude=".git" ${MLC_HARNESS_CODE_ROOT}/ harness/ pushd harness -rsync -avz --exclude=".git" ${CM_MLPERF_INFERENCE_SOURCE}/ inference/ +rsync -avz --exclude=".git" ${MLC_MLPERF_INFERENCE_SOURCE}/ inference/ test $? -eq 0 || exit $? pushd mlperf_plugins rm -rf onednn -rsync -avz --exclude=".git" ${CM_ONEDNN_INSTALLED_PATH}/ onednn/ +rsync -avz --exclude=".git" ${MLC_ONEDNN_INSTALLED_PATH}/ onednn/ test $? -eq 0 || exit $? popd @@ -30,13 +30,13 @@ test $? -eq 0 || exit $? mkdir -p bert/dataset cd bert -ln -sf ${CM_DATASET_SQUAD_VAL_PATH} dataset/dev-v1.1.json +ln -sf ${MLC_DATASET_SQUAD_VAL_PATH} dataset/dev-v1.1.json test $? -eq 0 || exit $? if [ ! -d model ]; then git clone https://huggingface.co/bert-large-uncased model cd model rm pytorch_model.bin - ln -sf ${CM_ML_MODEL_FILE_WITH_PATH} pytorch_model.bin + ln -sf ${MLC_ML_MODEL_FILE_WITH_PATH} pytorch_model.bin test $? -eq 0 || exit $? cd .. fi diff --git a/script/app-mlperf-inference-intel/build_gptj_harness.sh b/script/app-mlperf-inference-intel/build_gptj_harness.sh index 3c2f26dc4..5175f4ede 100644 --- a/script/app-mlperf-inference-intel/build_gptj_harness.sh +++ b/script/app-mlperf-inference-intel/build_gptj_harness.sh @@ -1,14 +1,14 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH echo $PWD if [ ! -d harness ]; then mkdir -p harness fi -echo ${CM_HARNESS_CODE_ROOT} -cd ${CM_HARNESS_CODE_ROOT} +echo ${MLC_HARNESS_CODE_ROOT} +cd ${MLC_HARNESS_CODE_ROOT} cd utils python -m pip install . test $? -eq 0 || exit $? diff --git a/script/app-mlperf-inference-intel/build_resnet50_harness.sh b/script/app-mlperf-inference-intel/build_resnet50_harness.sh index 92ef96243..3a27e4d3b 100644 --- a/script/app-mlperf-inference-intel/build_resnet50_harness.sh +++ b/script/app-mlperf-inference-intel/build_resnet50_harness.sh @@ -1,11 +1,11 @@ -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH echo $PWD export DATA_CAL_DIR=calibration_dataset -export CHECKPOINT=${CM_ML_MODEL_FILE_WITH_PATH} +export CHECKPOINT=${MLC_ML_MODEL_FILE_WITH_PATH} -cd ${CM_HARNESS_CODE_ROOT} +cd ${MLC_HARNESS_CODE_ROOT} cd src/ckernels/ && mkdir -p 3rdparty && \ cd 3rdparty && \ @@ -16,7 +16,7 @@ cd src/ckernels/ && mkdir -p 3rdparty && \ export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} -export IPEX_PATH=${CM_IPEX_INSTALLED_PATH} +export IPEX_PATH=${MLC_IPEX_INSTALLED_PATH} export TORCH_PATH=`python -c 'import torch;print(torch.utils.cmake_prefix_path)'` if [[ -z ${TORCH_PATH} ]]; then @@ -24,11 +24,11 @@ if [[ -z ${TORCH_PATH} ]]; then exit 1 fi -export LOADGEN_DIR="${CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH}/../" -export OPENCV_DIR=${CM_OPENCV_BUILD_PATH} -export RAPIDJSON_INCLUDE_DIR=${CM_RAPIDJSON_SRC_REPO_PATH}/include -export GFLAGS_DIR=${CM_GFLAGS_BUILD_PATH} -export ONEDNN_DIR=${CM_ONEDNN_INSTALLED_PATH} +export LOADGEN_DIR="${MLC_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH}/../" +export OPENCV_DIR=${MLC_OPENCV_BUILD_PATH} +export RAPIDJSON_INCLUDE_DIR=${MLC_RAPIDJSON_SRC_REPO_PATH}/include +export GFLAGS_DIR=${MLC_GFLAGS_BUILD_PATH} +export ONEDNN_DIR=${MLC_ONEDNN_INSTALLED_PATH} export USE_CUDA=0 BUILD_DIR=${PWD}/build diff --git a/script/app-mlperf-inference-intel/build_retinanet_harness.sh b/script/app-mlperf-inference-intel/build_retinanet_harness.sh index 0d577b26b..225e6bdc4 100644 --- a/script/app-mlperf-inference-intel/build_retinanet_harness.sh +++ b/script/app-mlperf-inference-intel/build_retinanet_harness.sh @@ -1,11 +1,11 @@ -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH -cd ${CM_HARNESS_CODE_ROOT} +cd ${MLC_HARNESS_CODE_ROOT} export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} -export IPEX_PATH=${CM_IPEX_INSTALLED_PATH} +export IPEX_PATH=${MLC_IPEX_INSTALLED_PATH} export TORCH_PATH=`python -c 'import torch;print(torch.utils.cmake_prefix_path)'` if [[ -z ${TORCH_PATH} ]]; then @@ -13,10 +13,10 @@ if [[ -z ${TORCH_PATH} ]]; then exit 1 fi -export LOADGEN_DIR="${CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH}/../" -export OPENCV_DIR=${CM_OPENCV_BUILD_PATH} -export RAPIDJSON_INCLUDE_DIR=${CM_RAPIDJSON_SRC_REPO_PATH}/include -export GFLAGS_DIR=${CM_GFLAGS_BUILD_PATH} +export LOADGEN_DIR="${MLC_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH}/../" +export OPENCV_DIR=${MLC_OPENCV_BUILD_PATH} +export RAPIDJSON_INCLUDE_DIR=${MLC_RAPIDJSON_SRC_REPO_PATH}/include +export GFLAGS_DIR=${MLC_GFLAGS_BUILD_PATH} export USE_CUDA=0 BUILD_DIR=${PWD}/build diff --git a/script/app-mlperf-inference-intel/build_sdxl_harness.sh b/script/app-mlperf-inference-intel/build_sdxl_harness.sh index a0817e495..1fdebbf55 100644 --- a/script/app-mlperf-inference-intel/build_sdxl_harness.sh +++ b/script/app-mlperf-inference-intel/build_sdxl_harness.sh @@ -1,4 +1,4 @@ -cd ${CM_HARNESS_CODE_ROOT} +cd ${MLC_HARNESS_CODE_ROOT} cd utils cmd=" python -m pip install ." diff --git a/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh b/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh index 82aa6906c..7c95d0d29 100644 --- a/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh +++ b/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh @@ -1,9 +1,9 @@ #!/bin/bash -export MODEL_DIR=${CM_ML_MODEL_FILE_WITH_PATH} +export MODEL_DIR=${MLC_ML_MODEL_FILE_WITH_PATH} export DATA_DIR=/mnt/dlrm_data -echo ${CM_HARNESS_CODE_ROOT} -cd ${CM_HARNESS_CODE_ROOT} +echo ${MLC_HARNESS_CODE_ROOT} +cd ${MLC_HARNESS_CODE_ROOT} python -m pip install scikit-learn==1.3.0 torchsnapshot torchrec==0.3.2 test $? -eq 0 || exit $? python -m pip install fbgemm-gpu==0.3.2 --index-url https://download.pytorch.org/whl/cpu diff --git a/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh b/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh index 75a0774d5..6e112a681 100644 --- a/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh +++ b/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh @@ -1,8 +1,8 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH -cd ${CM_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH} +cd ${MLC_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH} CUR_DIR=$(pwd) export WORKLOAD_DATA=${CUR_DIR}/data mkdir -p ${WORKLOAD_DATA} diff --git a/script/app-mlperf-inference-intel/compile_resnet50.sh b/script/app-mlperf-inference-intel/compile_resnet50.sh index ee81956ec..8ba7f4812 100644 --- a/script/app-mlperf-inference-intel/compile_resnet50.sh +++ b/script/app-mlperf-inference-intel/compile_resnet50.sh @@ -1,9 +1,9 @@ -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH -export DATA_CAL_DIR=${CM_HARNESS_CODE_ROOT}/calibration_dataset -export CHECKPOINT=${CM_ML_MODEL_FILE_WITH_PATH} +export DATA_CAL_DIR=${MLC_HARNESS_CODE_ROOT}/calibration_dataset +export CHECKPOINT=${MLC_ML_MODEL_FILE_WITH_PATH} -cd ${CM_HARNESS_CODE_ROOT} +cd ${MLC_HARNESS_CODE_ROOT} bash generate_torch_model.sh test "$?" -eq 0 || exit "$?" diff --git a/script/app-mlperf-inference-intel/compile_retinanet.sh b/script/app-mlperf-inference-intel/compile_retinanet.sh index 7e23b889a..933311523 100644 --- a/script/app-mlperf-inference-intel/compile_retinanet.sh +++ b/script/app-mlperf-inference-intel/compile_retinanet.sh @@ -1,11 +1,11 @@ -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH -export CALIBRATION_ANNOTATIONS=${CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH} +export CALIBRATION_ANNOTATIONS=${MLC_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH} -export CALIBRATION_DATA_DIR=${CM_CALIBRATION_DATASET_PATH} -export MODEL_CHECKPOINT=${CM_ML_MODEL_FILE_WITH_PATH} +export CALIBRATION_DATA_DIR=${MLC_CALIBRATION_DATASET_PATH} +export MODEL_CHECKPOINT=${MLC_ML_MODEL_FILE_WITH_PATH} -cd ${CM_HARNESS_CODE_ROOT} +cd ${MLC_HARNESS_CODE_ROOT} bash run_calibration.sh test "$?" -eq 0 || exit "$?" diff --git a/script/app-mlperf-inference-intel/customize.py b/script/app-mlperf-inference-intel/customize.py index 667e5fb86..932817163 100644 --- a/script/app-mlperf-inference-intel/customize.py +++ b/script/app-mlperf-inference-intel/customize.py @@ -11,68 +11,68 @@ def preprocess(i): return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes": return {'return': 0} import json - if 'CM_MODEL' not in env: + if 'MLC_MODEL' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the model to run'} - if 'CM_MLPERF_BACKEND' not in env: + if 'MLC_MLPERF_BACKEND' not in env: return {'return': 1, 'error': 'Please select a variation specifying the backend'} - if 'CM_MLPERF_DEVICE' not in env: + if 'MLC_MLPERF_DEVICE' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the device to run on'} - ml_model = env['CM_MODEL'] + ml_model = env['MLC_MODEL'] master_model = ml_model.replace("-99.9", "").replace("-99", "") master_model = master_model.replace("gptj", "gpt-j") - backend = env['CM_MLPERF_BACKEND'] - device = env['CM_MLPERF_DEVICE'] + backend = env['MLC_MLPERF_BACKEND'] + device = env['MLC_MLPERF_DEVICE'] code_base_folder = backend + '-' + device - if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == 'v4.0': + if env.get('MLC_MLPERF_INFERENCE_CODE_VERSION', '') == 'v4.0': if 'gptj' in ml_model: code_base_folder = "ITREX" if 'dlrm-v2' in ml_model: code_base_folder = "pytorch-cpu-int8" harness_root = os.path.join( - env['CM_MLPERF_INFERENCE_RESULTS_PATH'], + env['MLC_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'code', ml_model, code_base_folder) - env['CM_HARNESS_CODE_ROOT'] = harness_root + env['MLC_HARNESS_CODE_ROOT'] = harness_root - if env.get('CM_MODEL') == "resnet50": + if env.get('MLC_MODEL') == "resnet50": pass - elif "bert" in env.get('CM_MODEL'): + elif "bert" in env.get('MLC_MODEL'): pass - elif "retinanet" in env.get('CM_MODEL'): + elif "retinanet" in env.get('MLC_MODEL'): pass - elif "gptj" in env.get('CM_MODEL'): + elif "gptj" in env.get('MLC_MODEL'): env['CHECKPOINT_DIR'] = env['GPTJ_CHECKPOINT_PATH'] script_path = i['run_script_input']['path'] - if env['CM_MODEL'] == "retinanet": - env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + if env['MLC_MODEL'] == "retinanet": + env['MLC_DATASET_LIST'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] - if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") - if 'CM_MLPERF_USER_CONF' not in env: - env['CM_MLPERF_USER_CONF'] = os.path.join( - env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + if 'MLC_MLPERF_CONF' not in env: + env['MLC_MLPERF_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'MLC_MLPERF_USER_CONF' not in env: + env['MLC_MLPERF_USER_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") - loadgen_mode = env['CM_MLPERF_LOADGEN_MODE'] - env['CONDA_PREFIX'] = env['CM_CONDA_PREFIX'] + loadgen_mode = env['MLC_MLPERF_LOADGEN_MODE'] + env['CONDA_PREFIX'] = env['MLC_CONDA_PREFIX'] - if env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "calibration": + if env['MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "calibration": if master_model == "resnet50": i['run_script_input']['script_name'] = "prepare_imagenet_calibration" elif master_model == "3d-unet": @@ -81,55 +81,55 @@ def preprocess(i): i['run_script_input']['script_name'] = "calibrate_dlrm_v2_model" else: calibration_root = os.path.join( - env['CM_MLPERF_INFERENCE_RESULTS_PATH'], + env['MLC_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration', master_model, backend + "-" + device) - if "gpt" in env['CM_MODEL']: + if "gpt" in env['MLC_MODEL']: i['run_script_input']['script_name'] = "calibrate_gptj_int4_model" calibration_path = os.path.join(calibration_root, "INT4") - env['CM_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH'] = calibration_path + env['MLC_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH'] = calibration_path env['INT4_CALIBRATION_DIR'] = os.path.join( calibration_path, "data", "quantized-int4-model") - elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "compilation": + elif env['MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "compilation": if master_model == "resnet50": i['run_script_input']['script_name'] = "compile_resnet50" elif master_model == "retinanet": i['run_script_input']['script_name'] = "compile_retinanet" - env['CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH'] = os.path.join( - os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH']), 'retinanet-int8-model.pth') + env['MLC_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH'] = os.path.join( + os.path.dirname(env['MLC_ML_MODEL_FILE_WITH_PATH']), 'retinanet-int8-model.pth') - elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "build_harness": + elif env['MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "build_harness": print(f"Harness Root: {harness_root}") - if "bert" in env['CM_MODEL']: + if "bert" in env['MLC_MODEL']: i['run_script_input']['script_name'] = "build_bert_harness" - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( os.getcwd(), "harness", "build", "bert_inference") env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "bert") - elif "stable-diffusion" in env['CM_MODEL']: + elif "stable-diffusion" in env['MLC_MODEL']: i['run_script_input']['script_name'] = "build_sdxl_harness" - elif "resnet50" in env['CM_MODEL']: + elif "resnet50" in env['MLC_MODEL']: i['run_script_input']['script_name'] = "build_resnet50_harness" - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( os.getcwd(), "harness", "build", "resnet50_inference") env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "resnet50") - elif "retinanet" in env['CM_MODEL']: + elif "retinanet" in env['MLC_MODEL']: i['run_script_input']['script_name'] = "build_retinanet_harness" - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( os.getcwd(), "harness", "build", "retinanet_inference") - elif "gpt" in env['CM_MODEL']: + elif "gpt" in env['MLC_MODEL']: i['run_script_input']['script_name'] = "build_gptj_harness" - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( os.getcwd(), "harness", "build", "gptj_inference") env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "gptj") - env['MLPERF_INFERENCE_ROOT'] = env['CM_MLPERF_INFERENCE_SOURCE'] + env['MLPERF_INFERENCE_ROOT'] = env['MLC_MLPERF_INFERENCE_SOURCE'] if env.get('INTEL_GPTJ_INT4', '') == 'yes': model_precision = "int4" - if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == 'v3.1': + if env.get('MLC_MLPERF_INFERENCE_CODE_VERSION', '') == 'v3.1': env['RUN_QUANTIZATION_CMD'] = "bash run_quantization_int4.sh" else: env['FILE_TAG'] = "final" @@ -138,7 +138,7 @@ def preprocess(i): else: model_precision = "int8" env['RUN_QUANTIZATION_CMD'] = "bash run_quantization.sh" - if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1": + if env.get('MLC_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1": final_model_path = os.path.join( harness_root, "data", f"gpt-j-{model_precision}-model", "best_model.pt") else: @@ -148,93 +148,93 @@ def preprocess(i): env[model_dir_name] = os.path.dirname(final_model_path) if not os.path.exists(env[model_dir_name]): os.makedirs(env[model_dir_name]) - env['CM_ML_MODEL_PATH'] = env[model_dir_name] - env['CM_ML_MODEL_FILE_WITH_PATH'] = final_model_path - if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH', + env['MLC_ML_MODEL_PATH'] = env[model_dir_name] + env['MLC_ML_MODEL_FILE_WITH_PATH'] = final_model_path + if env.get('MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH', '') != '' and env.get('INT8_MODEL_DIR', '') != '': shutil.copy( - env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH'], + env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH'], env[model_dir_name]) - if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH', + if env.get('MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH', '') != '' and env.get('INT4_MODEL_DIR', '') != '': shutil.copy( - env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH'], + env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH'], env[model_dir_name]) - elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "run_harness": + elif env['MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "run_harness": print(f"Harness Root: {harness_root}") - if env.get('CM_MLPERF_LOADGEN_MODE', '') == "compliance": - audit_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH'] - shutil.copy(audit_path, env['CM_RUN_DIR']) + if env.get('MLC_MLPERF_LOADGEN_MODE', '') == "compliance": + audit_path = env['MLC_MLPERF_INFERENCE_AUDIT_PATH'] + shutil.copy(audit_path, env['MLC_RUN_DIR']) - if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy": + if env['MLC_MLPERF_LOADGEN_MODE'] == "accuracy": env['LOADGEN_MODE'] = 'Accuracy' else: env['LOADGEN_MODE'] = 'Performance' - if 'bert' in env['CM_MODEL']: + if 'bert' in env['MLC_MODEL']: env['MODEL_PATH'] = os.path.dirname(os.path.dirname( - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) env['DATASET_PATH'] = os.path.dirname(os.path.dirname( - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) - env['CM_RUN_DIR'] = i['run_script_input']['path'] - env['CM_RUN_CMD'] = "bash run_bert_harness.sh " + \ - ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE'] + env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['MLC_RUN_DIR'] = i['run_script_input']['path'] + env['MLC_RUN_CMD'] = "bash run_bert_harness.sh " + \ + ("--accuracy" if env['MLC_MLPERF_LOADGEN_MODE'] == "accuracy" else "") - elif 'resnet50' in env['CM_MODEL']: + elif 'resnet50' in env['MLC_MODEL']: env['MODEL_PATH'] = os.path.dirname(os.path.dirname( - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) env['DATASET_PATH'] = os.path.dirname(os.path.dirname( - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) - env['CM_RUN_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] - env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_resnet50_harness.sh')} " - - elif 'retinanet' in env['CM_MODEL']: - env['MODEL_PATH'] = env['CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH'] - env['DATA_DIR'] = env['CM_DATASET_PATH_ROOT'] - env['CM_RUN_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] - env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_retinanet_harness.sh')} " - - elif '3d-unet' in env['CM_MODEL']: - env['CM_RUN_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] - env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_3d-unet_harness.sh')} " - - elif 'dlrm' in env['CM_MODEL']: - env['CM_RUN_DIR'] = i['run_script_input']['path'] - env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_dlrm_v2_harness.sh')} " - - elif 'stable-diffusion' in env['CM_MODEL']: - env['CM_RUN_DIR'] = i['run_script_input']['path'] - env['CM_RUN_CMD'] = "bash run_sdxl_harness.sh " + \ - ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE'] + env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['MLC_RUN_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + env['MLC_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_resnet50_harness.sh')} " + + elif 'retinanet' in env['MLC_MODEL']: + env['MODEL_PATH'] = env['MLC_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH'] + env['DATA_DIR'] = env['MLC_DATASET_PATH_ROOT'] + env['MLC_RUN_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + env['MLC_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_retinanet_harness.sh')} " + + elif '3d-unet' in env['MLC_MODEL']: + env['MLC_RUN_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + env['MLC_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_3d-unet_harness.sh')} " + + elif 'dlrm' in env['MLC_MODEL']: + env['MLC_RUN_DIR'] = i['run_script_input']['path'] + env['MLC_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_dlrm_v2_harness.sh')} " + + elif 'stable-diffusion' in env['MLC_MODEL']: + env['MLC_RUN_DIR'] = i['run_script_input']['path'] + env['MLC_RUN_CMD'] = "bash run_sdxl_harness.sh " + \ + ("--accuracy" if env['MLC_MLPERF_LOADGEN_MODE'] == "accuracy" else "") - elif "gptj" in env['CM_MODEL']: - env['CM_RUN_DIR'] = i['run_script_input']['path'] - if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1": - env['CM_RUN_CMD'] = "bash run_gptj_harness_v3_1.sh " + elif "gptj" in env['MLC_MODEL']: + env['MLC_RUN_DIR'] = i['run_script_input']['path'] + if env.get('MLC_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1": + env['MLC_RUN_CMD'] = "bash run_gptj_harness_v3_1.sh " if env.get('INTEL_GPTJ_INT4', '') == 'yes': model_precision = "int4" - env['INT4_MODEL_DIR'] = env['CM_ML_MODEL_PATH'] + env['INT4_MODEL_DIR'] = env['MLC_ML_MODEL_PATH'] env['QUANTIZED_MODEL'] = os.path.join( env['INT4_MODEL_DIR'], "best_int4_model.pt") env['PRECISION'] = "int4_bf16_mixed" else: - env['INT8_MODEL_DIR'] = env['CM_ML_MODEL_PATH'] + env['INT8_MODEL_DIR'] = env['MLC_ML_MODEL_PATH'] env['QUANTIZED_MODEL'] = os.path.join( env["INT8_MODEL_DIR"], "best_model.pt") env['PRECISION'] = "int8" - elif env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v4.0": - env['CM_RUN_CMD'] = "bash run_gptj_harness_v4_0.sh " + elif env.get('MLC_MLPERF_INFERENCE_CODE_VERSION', '') == "v4.0": + env['MLC_RUN_CMD'] = "bash run_gptj_harness_v4_0.sh " - if env['CM_MLPERF_RUN_STYLE'] == "test": - env['TOTAL_SAMPLE_COUNT'] = env['CM_TEST_QUERY_COUNT'] + if env['MLC_MLPERF_RUN_STYLE'] == "test": + env['TOTAL_SAMPLE_COUNT'] = env['MLC_TEST_QUERY_COUNT'] else: env['TOTAL_SAMPLE_COUNT'] = env.get( - 'CM_MLPERF_MAX_QUERY_COUNT', env['CM_TEST_QUERY_COUNT']) + 'MLC_MLPERF_MAX_QUERY_COUNT', env['MLC_TEST_QUERY_COUNT']) - if env['CM_MLPERF_LOADGEN_SCENARIO'] == "Offline": + if env['MLC_MLPERF_LOADGEN_SCENARIO'] == "Offline": env['WORKERS_PER_PROC'] = 4 else: env['WORKERS_PER_PROC'] = 1 diff --git a/script/app-mlperf-inference-intel/meta.yaml b/script/app-mlperf-inference-intel/meta.yaml index 9a7c042d7..86a2806eb 100644 --- a/script/app-mlperf-inference-intel/meta.yaml +++ b/script/app-mlperf-inference-intel/meta.yaml @@ -24,47 +24,47 @@ tags: # Default environment default_env: - CM_BATCH_COUNT: '1' - CM_BATCH_SIZE: '1' - CM_FAST_COMPILATION: 'yes' - CM_MLPERF_LOADGEN_SCENARIO: Offline - CM_MLPERF_LOADGEN_MODE: performance - CM_SKIP_PREPROCESS_DATASET: 'no' - CM_SKIP_MODEL_DOWNLOAD: 'no' - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: intel - CM_MLPERF_SKIP_RUN: 'no' + MLC_BATCH_COUNT: '1' + MLC_BATCH_SIZE: '1' + MLC_FAST_COMPILATION: 'yes' + MLC_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_MODE: performance + MLC_SKIP_PREPROCESS_DATASET: 'no' + MLC_SKIP_MODEL_DOWNLOAD: 'no' + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: intel + MLC_MLPERF_SKIP_RUN: 'no' verbosity: 1 loadgen_trigger_cold_run: 0 env: - CM_CALL_MLPERF_RUNNER: 'no' + MLC_CALL_MLPERF_RUNNER: 'no' CUDA_VISIBLE_DEVICES: '' USE_CUDA: '0' # Map script inputs to environment variables input_mapping: - count: CM_MLPERF_LOADGEN_QUERY_COUNT - max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - mlperf_conf: CM_MLPERF_CONF - mode: CM_MLPERF_LOADGEN_MODE - output_dir: CM_MLPERF_OUTPUT_DIR - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - scenario: CM_MLPERF_LOADGEN_SCENARIO - user_conf: CM_MLPERF_USER_CONF - skip_preprocess: CM_SKIP_PREPROCESS_DATASET - skip_preprocessing: CM_SKIP_PREPROCESS_DATASET - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - rerun: CM_RERUN + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: MLC_MLPERF_CONF + mode: MLC_MLPERF_LOADGEN_MODE + output_dir: MLC_MLPERF_OUTPUT_DIR + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + scenario: MLC_MLPERF_LOADGEN_SCENARIO + user_conf: MLC_MLPERF_USER_CONF + skip_preprocess: MLC_SKIP_PREPROCESS_DATASET + skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: MLC_RERUN new_state_keys: - mlperf-inference-implementation - - CM_SUT_* + - MLC_SUT_* @@ -92,7 +92,7 @@ deps: # Install ResNet50 model (ONNX) and ImageNet - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 names: - resnet50-model @@ -100,7 +100,7 @@ deps: tags: get,ml-model,resnet50,_fp32,_pytorch - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 names: - imagenet-original @@ -114,7 +114,7 @@ deps: - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet names: - openimages-original @@ -122,7 +122,7 @@ deps: tags: get,dataset,original,openimages,_validation,_custom-annotations,_full - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet names: - openimages-calibration @@ -139,11 +139,11 @@ post_deps: - runner - mlperf-runner skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'yes' - yes enable_if_env: - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: + MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: - run_harness tags: benchmark-mlperf @@ -158,7 +158,7 @@ variations: group: version default: true env: - CM_MLPERF_INFERENCE_CODE_VERSION: "v4.0" + MLC_MLPERF_INFERENCE_CODE_VERSION: "v4.0" deps: - tags: get,mlperf,inference,results,_go names: @@ -175,7 +175,7 @@ variations: v3.1: group: version env: - CM_MLPERF_INFERENCE_CODE_VERSION: "v3.1" + MLC_MLPERF_INFERENCE_CODE_VERSION: "v3.1" deps: - tags: get,mlperf,inference,results,_ctuning names: @@ -200,15 +200,15 @@ variations: group: device default: true env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu # ML engine pytorch: group: framework default: true env: - CM_MLPERF_BACKEND: pytorch - CM_MLPERF_BACKEND_LIB_NAMESPEC: pytorch + MLC_MLPERF_BACKEND: pytorch + MLC_MLPERF_BACKEND_LIB_NAMESPEC: pytorch bs.#: env: @@ -219,12 +219,12 @@ variations: group: model default: true env: - CM_MODEL: resnet50 - CM_BENCHMARK: STANDALONE_CLASSIFICATION + MLC_MODEL: resnet50 + MLC_BENCHMARK: STANDALONE_CLASSIFICATION resnet50,int8: env: - CM_IMAGENET_ACCURACY_DTYPE: int8 + MLC_IMAGENET_ACCURACY_DTYPE: int8 bert-99: deps: @@ -232,15 +232,15 @@ variations: names: - bert-99-compiler env: - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int8 retinanet: group: model env: - CM_MODEL: retinanet - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" - CM_BENCHMARK: STANDALONE_OBJECT_DETECTION + MLC_MODEL: retinanet + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" + MLC_BENCHMARK: STANDALONE_OBJECT_DETECTION deps: - tags: get,generic-python-lib,_numpy @@ -254,25 +254,25 @@ variations: base: - 3d-unet_ env: - CM_MODEL: 3d-unet-99 + MLC_MODEL: 3d-unet-99 3d-unet-99.9: group: model base: - 3d-unet_ env: - CM_MODEL: 3d-unet-99.9 + MLC_MODEL: 3d-unet-99.9 3d-unet_: env: - CM_BENCHMARK: MEDICAL_IMAGING + MLC_BENCHMARK: MEDICAL_IMAGING deps: - tags: get,dataset,kits19,preprocessed - tags: get,ml-model,medical-imaging,3d-unet,_pytorch,_weights bert_: env: - CM_BENCHMARK: STANDALONE_BERT + MLC_BENCHMARK: STANDALONE_BERT bert_,pytorch: deps: @@ -308,7 +308,7 @@ variations: gptj_: env: - CM_BENCHMARK: STANDALONE_GPTJ + MLC_BENCHMARK: STANDALONE_GPTJ int4,gptj_,build-harness: deps: @@ -322,7 +322,7 @@ variations: - sut - loadgen-batchsize enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - v3.1 force_cache: true - tags: get,generic-python-lib,_package.optimum @@ -333,8 +333,8 @@ variations: sdxl: group: model env: - CM_BENCHMARK: STANDALONE_SDXL - CM_MODEL: stable-diffusion-xl + MLC_BENCHMARK: STANDALONE_SDXL + MLC_MODEL: stable-diffusion-xl sdxl,pytorch: adr: @@ -489,7 +489,7 @@ variations: names: - rapidjson-src env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_RAPIDJSON_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_RAPIDJSON_SRC_REPO_PATH - tags: install,gflags,from.src names: - gflags-from-src @@ -588,7 +588,7 @@ variations: - tags: get,torchvision,from.src,_sha.8e078971b8aebdeb1746fea58851e3754f103053 update_tags_from_env_with_prefix: "_python.": - - CM_PYTHON_BIN_WITH_PATH + - MLC_PYTHON_BIN_WITH_PATH names: - torchvision - tags: install,opencv,from.src,_branch.4.x @@ -598,7 +598,7 @@ variations: names: - rapidjson-src env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_RAPIDJSON_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_RAPIDJSON_SRC_REPO_PATH - tags: install,gflags,from.src names: - gflags-from-src @@ -641,13 +641,13 @@ variations: - run-mode - loadgen-scenario new_env_keys: - - CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH + - MLC_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH retinanet,compile-model: deps: - tags: get,ml-model,retinanet,_pytorch,_fp32 new_env_keys: - - CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH + - MLC_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH 3d-unet_,pytorch: adr: @@ -737,28 +737,28 @@ variations: - mkl tags: get,generic,conda-package,_package.mkl,_source.conda-forge enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - v4.0 - names: - conda-package - mkl-include tags: get,generic,conda-package,_package.mkl-include,_source.conda-forge enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - v4.0 - names: - conda-package - llvm-openmp tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - v4.0 - names: - conda-package - pybind11 tags: get,generic,conda-package,_package.pybind11,_source.conda-forge enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - v4.0 - names: - conda-package @@ -768,7 +768,7 @@ variations: names: - llvm-from-src enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - v3.1 - names: - conda-package @@ -783,7 +783,7 @@ variations: names: - ipex-from-src enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - v3.1 - tags: get,generic,conda-package,_package.ninja names: @@ -798,7 +798,7 @@ variations: enable_if_env: INTEL_GPTJ_INT4: - 'yes' - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - v3.1 - tags: get,generic-python-lib,_package.transformers names: @@ -826,7 +826,7 @@ variations: - accelerate - tags: get,generic-python-lib,_custom-python,_package.torch,_url.git+https://github.com/pytorch/pytorch.git@927dc662386af052018212c7d01309a506fc94cd enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - v3.1 env: "+ CXXFLAGS": @@ -836,13 +836,13 @@ variations: - "-Wno-free-nonheap-object" - tags: get,generic-python-lib,_custom-python,_package.torch env: - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - 'v4.0' - tags: install,intel-neural-speed,_for-intel-mlperf-inference-v4.0-gptj,_branch.mlperf-v4-0 enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - 'v4.0' @@ -851,18 +851,18 @@ variations: base: - gptj_ env: - CM_MODEL: gptj-99 - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + MLC_MODEL: gptj-99 + MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int8 gptj-99.9: group: model base: - gptj_ env: - CM_MODEL: gptj-99.9 - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + MLC_MODEL: gptj-99.9 + MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" dlrm-v2_,build-harness: deps: @@ -896,14 +896,14 @@ variations: - llvm-openmp tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - v4.0 - names: - conda-package - pybind11 tags: get,generic,conda-package,_package.pybind11,_source.conda-forge enable_if_env: - CM_MLPERF_INFERENCE_CODE_VERSION: + MLC_MLPERF_INFERENCE_CODE_VERSION: - v4.0 - names: - conda-package @@ -952,62 +952,62 @@ variations: base: - dlrm-v2_ env: - CM_MODEL: dlrm-v2-99 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + MLC_MODEL: dlrm-v2-99 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int8 dlrm-v2-99.9: group: model base: - dlrm-v2_ env: - CM_MODEL: dlrm-v2-99.9 + MLC_MODEL: dlrm-v2-99.9 standalone: group: network-mode default: true env: - CM_MLPERF_NETWORK_RUN_MODE: standalone + MLC_MLPERF_NETWORK_RUN_MODE: standalone network-server: group: network-mode env: - CM_MLPERF_NETWORK_RUN_MODE: network-server + MLC_MLPERF_NETWORK_RUN_MODE: network-server network-client: group: network-run-mode env: - CM_MLPERF_NETWORK_RUN_MODE: network-client + MLC_MLPERF_NETWORK_RUN_MODE: network-client bert_,network-server: env: - CM_BENCHMARK: NETWORK_BERT_SERVER + MLC_BENCHMARK: NETWORK_BERT_SERVER bert_,network-client: env: - CM_BENCHMARK: NETWORK_BERT_CLIENT + MLC_BENCHMARK: NETWORK_BERT_CLIENT bert-99: group: model base: - bert_ env: - CM_MODEL: bert-99 - CM_SQUAD_ACCURACY_DTYPE: float32 - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + MLC_MODEL: bert-99 + MLC_SQUAD_ACCURACY_DTYPE: float32 + MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" bert-99.9: group: model base: - bert_ env: - CM_MODEL: bert-99.9 - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + MLC_MODEL: bert-99.9 + MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" batch_size.#: group: loadgen-batchsize env: - CM_MLPERF_LOADGEN_BATCH_SIZE: "#" + MLC_MLPERF_LOADGEN_BATCH_SIZE: "#" build-harness: @@ -1015,23 +1015,23 @@ variations: real_run: false group: run-mode env: - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: build_harness + MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: build_harness new_env_keys: - - CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH - - CM_ML_MODEL_* + - MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH + - MLC_ML_MODEL_* - DATA_PATH compile-model: group: run-mode env: - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: compilation + MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: compilation calibration: group: run-mode env: - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: calibration + MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: calibration new_env_keys: - - CM_ML_MODEL_* + - MLC_ML_MODEL_* - INT4_CALIBRATION_DIR calibration,gptj_: @@ -1080,47 +1080,47 @@ variations: - tags: get,generic-sys-util,_rsync env: - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: run_harness + MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: run_harness # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* - - CM_HW_NAME - - CM_ML_MODEL_* - - CM_MAX_EXAMPLES - - CM_IMAGENET_ACCURACY_DTYPE - - CM_SQUAD_ACCURACY_DTYPE + - MLC_MLPERF_* + - MLC_DATASET_* + - MLC_HW_NAME + - MLC_ML_MODEL_* + - MLC_MAX_EXAMPLES + - MLC_IMAGENET_ACCURACY_DTYPE + - MLC_SQUAD_ACCURACY_DTYPE maxq: group: power-mode env: - CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes + MLC_MLPERF_NVIDIA_HARNESS_MAXQ: yes maxn: group: power-mode env: - CM_MLPERF_NVIDIA_HARNESS_MAXN: yes + MLC_MLPERF_NVIDIA_HARNESS_MAXN: yes singlestream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream multistream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: MultiStream + MLC_MLPERF_LOADGEN_SCENARIO: MultiStream offline: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_SCENARIO: Offline server: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Server + MLC_MLPERF_LOADGEN_SCENARIO: Server int4: group: precision @@ -1148,7 +1148,7 @@ variations: dataset-preprocessed: tags: _float32,_rgb32 env: - CM_IMAGENET_ACCURACY_DTYPE: float32 + MLC_IMAGENET_ACCURACY_DTYPE: float32 sapphire-rapids.112c: group: sut @@ -1163,21 +1163,21 @@ variations: KMP_BLOCKTIME: 10 WORKERS_PER_PROC: 1 default_env: - CM_MLPERF_LOADGEN_BATCH_SIZE: 8 + MLC_MLPERF_LOADGEN_BATCH_SIZE: 8 sapphire-rapids.24c,gptj-99,offline,int4: env: KMP_BLOCKTIME: 10 WORKERS_PER_PROC: 1 default_env: - CM_MLPERF_LOADGEN_BATCH_SIZE: 8 + MLC_MLPERF_LOADGEN_BATCH_SIZE: 8 sapphire-rapids.112c,gptj-99,offline,int8: env: KMP_BLOCKTIME: 1 WORKERS_PER_PROC: 2 default_env: - CM_MLPERF_LOADGEN_BATCH_SIZE: 14 + MLC_MLPERF_LOADGEN_BATCH_SIZE: 14 sapphire-rapids.112c,gptj-99,offline,int4: env: @@ -1185,21 +1185,21 @@ variations: KMP_BLOCKTIME: 1 WORKERS_PER_PROC: 3 default_env: - CM_MLPERF_LOADGEN_BATCH_SIZE: 8 + MLC_MLPERF_LOADGEN_BATCH_SIZE: 8 sapphire-rapids.112c,gptj-99,server,int8: env: KMP_BLOCKTIME: 1 WORKERS_PER_PROC: 2 default_env: - CM_MLPERF_LOADGEN_BATCH_SIZE: 1 + MLC_MLPERF_LOADGEN_BATCH_SIZE: 1 sapphire-rapids.112c,gptj-99,server,int4: env: KMP_BLOCKTIME: 1 WORKERS_PER_PROC: 4 default_env: - CM_MLPERF_LOADGEN_BATCH_SIZE: 1 + MLC_MLPERF_LOADGEN_BATCH_SIZE: 1 sapphire-rapids.24c,bert_: env: diff --git a/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh b/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh index 263388147..10cf123f1 100644 --- a/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh +++ b/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh @@ -1,13 +1,13 @@ #!/bin/bash -export DOWNLOAD_DATA_DIR=${CM_DATASET_PATH} -cd ${CM_HARNESS_CODE_ROOT} +export DOWNLOAD_DATA_DIR=${MLC_DATASET_PATH} +cd ${MLC_HARNESS_CODE_ROOT} mkdir -p build -ln -sf ${CM_DATASET_PREPROCESSED_PATH} build/preprocessed_data +ln -sf ${MLC_DATASET_PREPROCESSED_PATH} build/preprocessed_data mkdir -p build/model -ln -sf ${CM_ML_MODEL_FILE_WITH_PATH} build/model/3dunet_kits19_pytorch_checkpoint.pth +ln -sf ${MLC_ML_MODEL_FILE_WITH_PATH} build/model/3dunet_kits19_pytorch_checkpoint.pth #make setup #make duplicate_kits19_case_00185 diff --git a/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh b/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh index e8a4fc61f..a3cd92bec 100644 --- a/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh +++ b/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh @@ -1,6 +1,6 @@ -cd ${CM_HARNESS_CODE_ROOT} +cd ${MLC_HARNESS_CODE_ROOT} if [ ! -e ILSVRC2012_img_val ]; then - ln -s ${CM_DATASET_IMAGENET_VAL_PATH} ILSVRC2012_img_val + ln -s ${MLC_DATASET_IMAGENET_VAL_PATH} ILSVRC2012_img_val fi bash prepare_calibration_dataset.sh diff --git a/script/app-mlperf-inference-intel/run_3d-unet_harness.sh b/script/app-mlperf-inference-intel/run_3d-unet_harness.sh index 78f44fb2b..725986abd 100644 --- a/script/app-mlperf-inference-intel/run_3d-unet_harness.sh +++ b/script/app-mlperf-inference-intel/run_3d-unet_harness.sh @@ -1,8 +1,8 @@ #!/bin/bash -scenario=${CM_MLPERF_LOADGEN_SCENARIO} -OUTDIR="${CM_MLPERF_OUTPUT_DIR}" +scenario=${MLC_MLPERF_LOADGEN_SCENARIO} +OUTDIR="${MLC_MLPERF_OUTPUT_DIR}" #python ../../user_config.py @@ -14,11 +14,11 @@ number_threads=`nproc --all` export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') num_instance=$((number_cores/CPUS_PER_INSTANCE)) -export PYTHONPATH=${CM_HARNESS_CODE_ROOT}/common:$PYTHONPATH -cp -r ${CM_HARNESS_CODE_ROOT}/meta $OUTDIR/ -cp ${CM_HARNESS_CODE_ROOT}/unet3d_jit_model.pt $OUTDIR/ -cp ${CM_HARNESS_CODE_ROOT}/calibration_result.json $OUTDIR/ -ln -sf ${CM_HARNESS_CODE_ROOT}/build $OUTDIR/build +export PYTHONPATH=${MLC_HARNESS_CODE_ROOT}/common:$PYTHONPATH +cp -r ${MLC_HARNESS_CODE_ROOT}/meta $OUTDIR/ +cp ${MLC_HARNESS_CODE_ROOT}/unet3d_jit_model.pt $OUTDIR/ +cp ${MLC_HARNESS_CODE_ROOT}/calibration_result.json $OUTDIR/ +ln -sf ${MLC_HARNESS_CODE_ROOT}/build $OUTDIR/build #the log path is hardcoded in the intel implementation. This is a hack to get them to where we want rm -rf $OUTDIR/output_logs ln -sf $OUTDIR $OUTDIR/output_logs @@ -31,13 +31,13 @@ export LD_PRELOAD=$CONDA_PREFIX/lib/libjemalloc.so:$LD_PRELOAD export MALLOC_CONF="oversize_threshold:1,background_thread:true,percpu_arena:percpu,metadata_thp:always,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000"; -#cd ${CM_HARNESS_CODE_ROOT} -cmd="python ${CM_HARNESS_CODE_ROOT}/run.py \ +#cd ${MLC_HARNESS_CODE_ROOT} +cmd="python ${MLC_HARNESS_CODE_ROOT}/run.py \ --mode ${LOADGEN_MODE} \ --workload-name 3dunet \ - --mlperf-conf ${CM_MLPERF_CONF} \ - --user-conf ${CM_MLPERF_USER_CONF} \ - --workload-config ${CM_HARNESS_CODE_ROOT}/config.json \ + --mlperf-conf ${MLC_MLPERF_CONF} \ + --user-conf ${MLC_MLPERF_USER_CONF} \ + --workload-config ${MLC_HARNESS_CODE_ROOT}/config.json \ --num-instance $num_instance \ --cpus-per-instance $CPUS_PER_INSTANCE \ --scenario $scenario \ diff --git a/script/app-mlperf-inference-intel/run_bert_harness.sh b/script/app-mlperf-inference-intel/run_bert_harness.sh index b49783c6f..de10f4458 100644 --- a/script/app-mlperf-inference-intel/run_bert_harness.sh +++ b/script/app-mlperf-inference-intel/run_bert_harness.sh @@ -1,7 +1,7 @@ #!/bin/bash WORKERS_PER_PROC=${WORKERS_PER_PROC:-4} -THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${CM_HOST_CPU_THREADS_PER_CORE}) / ${CM_HOST_CPU_SOCKETS})) +THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${MLC_HOST_CPU_THREADS_PER_CORE}) / ${MLC_HOST_CPU_SOCKETS})) export LD_PRELOAD=${CONDA_PREFIX}/lib/libjemalloc.so export MALLOC_CONF="oversize_threshold:1,background_thread:true,percpu_arena:percpu,metadata_thp:always,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000"; @@ -14,13 +14,13 @@ num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') num_instance=$(($number_cores / $THREADS_PER_INSTANCE)) sut_dir=${MODEL_PATH} -executable=${CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH} -mode=${CM_MLPERF_LOADGEN_SCENARIO} -OUTDIR="${CM_MLPERF_OUTPUT_DIR}" +executable=${MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH} +mode=${MLC_MLPERF_LOADGEN_SCENARIO} +OUTDIR="${MLC_MLPERF_OUTPUT_DIR}" #python ../../user_config.py -USER_CONF="${CM_MLPERF_USER_CONF}" +USER_CONF="${MLC_MLPERF_USER_CONF}" -CONFIG="-n ${num_numa} -i ${num_instance} -j ${THREADS_PER_INSTANCE} --test_scenario=${mode} --model_file=${sut_dir}/bert.pt --sample_file=${sut_dir}/squad.pt --mlperf_config=${CM_MLPERF_CONF} --user_config=${USER_CONF} -o ${OUTDIR} -w 1300 --warmup ${accuracy}" +CONFIG="-n ${num_numa} -i ${num_instance} -j ${THREADS_PER_INSTANCE} --test_scenario=${mode} --model_file=${sut_dir}/bert.pt --sample_file=${sut_dir}/squad.pt --mlperf_config=${MLC_MLPERF_CONF} --user_config=${USER_CONF} -o ${OUTDIR} -w 1300 --warmup ${accuracy}" ${executable} ${CONFIG} diff --git a/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh b/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh index 65530c621..3e4d0adfa 100644 --- a/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh +++ b/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh @@ -1,9 +1,9 @@ #!/bin/bash -export MODEL_DIR=${CM_ML_MODEL_FILE_WITH_PATH} +export MODEL_DIR=${MLC_ML_MODEL_FILE_WITH_PATH} export DATA_DIR=/mnt/dlrm_data -NUM_SOCKETS=${CM_HOST_CPU_SOCKETS:-2} +NUM_SOCKETS=${MLC_HOST_CPU_SOCKETS:-2} export NUM_SOCKETS=$NUM_SOCKETS export num_physical_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` export CPUS_PER_SOCKET=$((num_physical_cores/NUM_SOCKETS)) @@ -15,7 +15,7 @@ export CPUS_FOR_LOADGEN=1 export BATCH_SIZE=100 export DNNL_MAX_CPU_ISA=AVX512_CORE_AMX -export LD_PRELOAD=${CM_CONDA_LIB_PATH}/libiomp5.so +export LD_PRELOAD=${MLC_CONDA_LIB_PATH}/libiomp5.so export KMP_BLOCKTIME=1 export OMP_NUM_THREADS=$CPUS_PER_INSTANCE @@ -38,21 +38,21 @@ export EXTRA_OPS="$extra_option" model_path="$MODEL_DIR/dlrm-multihot-pytorch.pt" profile=dlrm-multihot-pytorch -cd ${CM_HARNESS_CODE_ROOT} -OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}" +cd ${MLC_HARNESS_CODE_ROOT} +OUTPUT_DIR="${MLC_MLPERF_OUTPUT_DIR}" -if [[ "${CM_MLPERF_LOADGEN_MODE}" == "accuracy" ]]; then +if [[ "${MLC_MLPERF_LOADGEN_MODE}" == "accuracy" ]]; then accuracy_opt=" --accuracy" else accuracy_opt="" fi -USER_CONF="${CM_MLPERF_USER_CONF}" +USER_CONF="${MLC_MLPERF_USER_CONF}" cmd="python -u python/runner.py --profile $profile $common_opt --model dlrm --model-path $model_path \ ---config ${CM_MLPERF_CONF} --user-config ${CM_MLPERF_USER_CONF} \ +--config ${MLC_MLPERF_CONF} --user-config ${MLC_MLPERF_USER_CONF} \ --dataset multihot-criteo --dataset-path $DATA_DIR --output $OUTPUT_DIR $EXTRA_OPS \ --max-ind-range=40000000 --samples-to-aggregate-quantile-file=${PWD}/tools/dist_quantile.txt \ ---max-batchsize=$BATCH_SIZE --scenario=${CM_MLPERF_LOADGEN_SCENARIO} ${accuracy_opt}" +--max-batchsize=$BATCH_SIZE --scenario=${MLC_MLPERF_LOADGEN_SCENARIO} ${accuracy_opt}" echo "$cmd" diff --git a/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh b/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh index 74988df28..77a099e0f 100644 --- a/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh +++ b/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh @@ -1,5 +1,5 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH KMP_BLOCKTIME=${KMP_BLOCKTIME:-10} @@ -15,30 +15,30 @@ NUM_PROC=${NUM_PROC:-$num_numa} CPUS_PER_PROC=$((num_physical_cores/num_numa)) WORKERS_PER_PROC=${WORKERS_PER_PROC:-1} TOTAL_SAMPLE_COUNT=13368 -BATCH_SIZE=${CM_MLPERF_LOADGEN_BATCH_SIZE:-8} +BATCH_SIZE=${MLC_MLPERF_LOADGEN_BATCH_SIZE:-8} TIMESTAMP=$(date +%m-%d-%H-%M) HOSTNAME=$(hostname) #OUTPUT_DIR=offline-output-${HOSTNAME}-batch-${BATCH_SIZE}-procs-${NUM_PROC}-ins-per-proc-${WORKERS_PER_PROC}-${TIMESTAMP} -export WORKLOAD_DATA=${CM_HARNESS_CODE_ROOT}/data +export WORKLOAD_DATA=${MLC_HARNESS_CODE_ROOT}/data export VALIDATION_DATA_JSON=${WORKLOAD_DATA}/validation-data/cnn_dailymail_validation.json -cd ${CM_HARNESS_CODE_ROOT} -OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}" +cd ${MLC_HARNESS_CODE_ROOT} +OUTPUT_DIR="${MLC_MLPERF_OUTPUT_DIR}" -USER_CONF="${CM_MLPERF_USER_CONF}" +USER_CONF="${MLC_MLPERF_USER_CONF}" cmd="python runner.py --workload-name gptj \ - --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \ + --scenario ${MLC_MLPERF_LOADGEN_SCENARIO} \ --mode ${LOADGEN_MODE} \ --num-proc ${NUM_PROC} \ --cpus-per-proc ${CPUS_PER_PROC} \ --model-checkpoint-path ${CHECKPOINT_DIR} \ --dataset-path ${VALIDATION_DATA_JSON} \ --batch-size ${BATCH_SIZE} \ - --mlperf-conf ${CM_MLPERF_CONF} \ - --user-conf ${CM_MLPERF_USER_CONF} \ + --mlperf-conf ${MLC_MLPERF_CONF} \ + --user-conf ${MLC_MLPERF_USER_CONF} \ --precision ${PRECISION} \ --pad-inputs \ --quantized-model ${QUANTIZED_MODEL} \ diff --git a/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh b/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh index 9186f733a..881926060 100644 --- a/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh +++ b/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh @@ -1,5 +1,5 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH export KMP_BLOCKTIME=1 export KMP_AFFINITY=granularity=fine,compact,1,0 @@ -7,7 +7,7 @@ export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so # export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so # -BATCH_SIZE=${CM_MLPERF_LOADGEN_BATCH_SIZE} +BATCH_SIZE=${MLC_MLPERF_LOADGEN_BATCH_SIZE} DIR_SCRIPT=$(dirname "${BASH_SOURCE[0]}") [ -z $DIR_NS ] && DIR_NS="$DIR_SCRIPT/gpt-j-env/neural-speed" @@ -36,10 +36,10 @@ CPUS_PER_PROC=$((num_physical_cores / num_numa)) [ -z $BATCH_SIZE ] && BATCH_SIZE=12 [ -z $BEAM_SIZE ] && BEAM_SIZE=4 -OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}" -MODEL_PATH="${CM_ML_MODEL_FILE_WITH_PATH}" -cd ${CM_HARNESS_CODE_ROOT} -export WORKLOAD_DATA=${CM_HARNESS_CODE_ROOT}/data +OUTPUT_DIR="${MLC_MLPERF_OUTPUT_DIR}" +MODEL_PATH="${MLC_ML_MODEL_FILE_WITH_PATH}" +cd ${MLC_HARNESS_CODE_ROOT} +export WORKLOAD_DATA=${MLC_HARNESS_CODE_ROOT}/data export VALIDATION_DATA_JSON=${WORKLOAD_DATA}/validation-data/cnn_dailymail_validation.json @@ -49,7 +49,7 @@ done echo "Start time: $(date)" cmd="python runner.py --workload-name gptj \ - --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \ + --scenario ${MLC_MLPERF_LOADGEN_SCENARIO} \ --mode ${LOADGEN_MODE} \ --num-proc ${NUM_PROC} \ --cpus-per-proc ${CPUS_PER_PROC} \ @@ -58,8 +58,8 @@ cmd="python runner.py --workload-name gptj \ --model-checkpoint ${CHECKPOINT_DIR} \ --batch-size ${BATCH_SIZE} \ --beam-size ${BEAM_SIZE} \ - --mlperf-conf ${CM_MLPERF_CONF} \ - --user-conf ${CM_MLPERF_USER_CONF} \ + --mlperf-conf ${MLC_MLPERF_CONF} \ + --user-conf ${MLC_MLPERF_USER_CONF} \ --workers-per-proc ${WORKERS_PER_PROC} \ --total-sample-count ${TOTAL_SAMPLE_COUNT} \ --output-dir ${OUTPUT_DIR} \ diff --git a/script/app-mlperf-inference-intel/run_resnet50_harness.sh b/script/app-mlperf-inference-intel/run_resnet50_harness.sh index 861d891aa..22782a615 100644 --- a/script/app-mlperf-inference-intel/run_resnet50_harness.sh +++ b/script/app-mlperf-inference-intel/run_resnet50_harness.sh @@ -7,7 +7,7 @@ number_sockets=`grep physical.id /proc/cpuinfo | sort -u | wc -l` cpu_per_socket=$((number_cores/number_sockets)) WORKERS_PER_PROC=${WORKERS_PER_PROC:-4} -THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${CM_HOST_CPU_THREADS_PER_CORE}) / ${CM_HOST_CPU_SOCKETS})) +THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${MLC_HOST_CPU_THREADS_PER_CORE}) / ${MLC_HOST_CPU_SOCKETS})) export LD_PRELOAD=${CONDA_PREFIX}/lib/libjemalloc.so export LD_PRELOAD=${CONDA_PREFIX}/lib/libiomp5.so @@ -18,23 +18,23 @@ export KMP_BLOCKTIME=1 export $KMP_SETTING -export DATA_DIR=${CM_HARNESS_CODE_ROOT}/ILSVRC2012_img_val -export RN50_START=${CM_HARNESS_CODE_ROOT}/models/resnet50-start-int8-model.pth -export RN50_END=${CM_HARNESS_CODE_ROOT}/models/resnet50-end-int8-model.pth -export RN50_FULL=${CM_HARNESS_CODE_ROOT}/models/resnet50-full.pth +export DATA_DIR=${MLC_HARNESS_CODE_ROOT}/ILSVRC2012_img_val +export RN50_START=${MLC_HARNESS_CODE_ROOT}/models/resnet50-start-int8-model.pth +export RN50_END=${MLC_HARNESS_CODE_ROOT}/models/resnet50-end-int8-model.pth +export RN50_FULL=${MLC_HARNESS_CODE_ROOT}/models/resnet50-full.pth export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CONDA_PREFIX}/lib -rsync -avz ${CM_HARNESS_CODE_ROOT}/val_data/ ${DATA_DIR}/ -executable="${CM_HARNESS_CODE_ROOT}/build/bin/mlperf_runner" +rsync -avz ${MLC_HARNESS_CODE_ROOT}/val_data/ ${DATA_DIR}/ +executable="${MLC_HARNESS_CODE_ROOT}/build/bin/mlperf_runner" number_threads=`nproc --all` export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') num_instance=$(($number_cores / $THREADS_PER_INSTANCE)) -scenario=${CM_MLPERF_LOADGEN_SCENARIO} -OUTDIR="${CM_MLPERF_OUTPUT_DIR}" +scenario=${MLC_MLPERF_LOADGEN_SCENARIO} +OUTDIR="${MLC_MLPERF_OUTPUT_DIR}" scenario="Offline" #python ../../user_config.py @@ -42,7 +42,7 @@ scenario="Offline" CONFIG=" --scenario ${scenario} --mode ${LOADGEN_MODE} --model_name resnet50 \ --rn50-part1 ${RN50_START} --rn50-part3 ${RN50_END} --rn50-full-model ${RN50_FULL} \ --data_path ${DATA_DIR} \ - --mlperf_conf ${CM_MLPERF_CONF} --user_conf ${CM_MLPERF_USER_CONF} \ + --mlperf_conf ${MLC_MLPERF_CONF} --user_conf ${MLC_MLPERF_USER_CONF} \ --cpus_per_instance $CPUS_PER_INSTANCE \ --num_instance $number_cores \ --total_sample_count 50000 \ diff --git a/script/app-mlperf-inference-intel/run_retinanet_harness.sh b/script/app-mlperf-inference-intel/run_retinanet_harness.sh index 98ca3a5b2..d2e507508 100644 --- a/script/app-mlperf-inference-intel/run_retinanet_harness.sh +++ b/script/app-mlperf-inference-intel/run_retinanet_harness.sh @@ -8,7 +8,7 @@ cpu_per_socket=$((number_cores/number_sockets)) number_instance=$((number_cores/CPUS_PER_INSTANCE)) WORKERS_PER_PROC=${WORKERS_PER_PROC:-4} -THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${CM_HOST_CPU_THREADS_PER_CORE}) / ${CM_HOST_CPU_SOCKETS})) +THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${MLC_HOST_CPU_THREADS_PER_CORE}) / ${MLC_HOST_CPU_SOCKETS})) export LD_PRELOAD=${CONDA_PREFIX}/lib/libjemalloc.so export LD_PRELOAD=${CONDA_PREFIX}/lib/libiomp5.so @@ -21,15 +21,15 @@ export $KMP_SETTING export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CONDA_PREFIX}/lib -executable="${CM_HARNESS_CODE_ROOT}/build/bin/mlperf_runner" +executable="${MLC_HARNESS_CODE_ROOT}/build/bin/mlperf_runner" number_threads=`nproc --all` export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') num_instance=$(($number_cores / $THREADS_PER_INSTANCE)) -scenario=${CM_MLPERF_LOADGEN_SCENARIO} -OUTDIR="${CM_MLPERF_OUTPUT_DIR}" +scenario=${MLC_MLPERF_LOADGEN_SCENARIO} +OUTDIR="${MLC_MLPERF_OUTPUT_DIR}" scenario="Offline" #python ../../user_config.py @@ -37,7 +37,7 @@ scenario="Offline" CONFIG=" --scenario ${scenario} --mode ${LOADGEN_MODE} --model_name retinanet \ --model_path ${MODEL_PATH} \ --data_path ${DATA_DIR} \ - --mlperf_conf ${CM_MLPERF_CONF} --user_conf ${CM_MLPERF_USER_CONF} \ + --mlperf_conf ${MLC_MLPERF_CONF} --user_conf ${MLC_MLPERF_USER_CONF} \ --cpus_per_instance $CPUS_PER_INSTANCE \ --num_instance $number_instance \ --total_sample_count 24781 \ diff --git a/script/app-mlperf-inference-intel/run_sdxl_harness.sh b/script/app-mlperf-inference-intel/run_sdxl_harness.sh index 3dd71ec83..1b3c7bc20 100644 --- a/script/app-mlperf-inference-intel/run_sdxl_harness.sh +++ b/script/app-mlperf-inference-intel/run_sdxl_harness.sh @@ -6,16 +6,16 @@ export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so # export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so # -BATCH_SIZE=${CM_MLPERF_LOADGEN_BATCH_SIZE} +BATCH_SIZE=${MLC_MLPERF_LOADGEN_BATCH_SIZE} export num_physical_cores=$(lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l) num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') -OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}" +OUTPUT_DIR="${MLC_MLPERF_OUTPUT_DIR}" MODEL_PATH="${SDXL_CHECKPOINT_PATH}" -cd ${CM_HARNESS_CODE_ROOT} +cd ${MLC_HARNESS_CODE_ROOT} NUM_PROC=1 CPUS_PER_PROC=16 @@ -30,14 +30,14 @@ echo "Start time: $(date)" cmd="python -u main.py \ --dtype bfloat16 \ --device 'cpu' \ - --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \ + --scenario ${MLC_MLPERF_LOADGEN_SCENARIO} \ --mode ${LOADGEN_MODE} \ --num-proc ${NUM_PROC} \ --cpus-per-proc ${CPUS_PER_PROC} \ --model-path ${MODEL_PATH} \ --batch-size ${BATCH_SIZE} \ - --mlperf-conf ${CM_MLPERF_CONF} \ - --user-conf ${CM_MLPERF_USER_CONF} \ + --mlperf-conf ${MLC_MLPERF_CONF} \ + --user-conf ${MLC_MLPERF_USER_CONF} \ --workers-per-proc ${WORKERS_PER_PROC} \ --total-sample-count ${TOTAL_SAMPLE_COUNT} \ --log-dir ${OUTPUT_DIR} " diff --git a/script/app-mlperf-inference-mlcommons-cpp/customize.py b/script/app-mlperf-inference-mlcommons-cpp/customize.py index 1884a0798..c71d90f27 100644 --- a/script/app-mlperf-inference-mlcommons-cpp/customize.py +++ b/script/app-mlperf-inference-mlcommons-cpp/customize.py @@ -22,30 +22,30 @@ def preprocess(i): env = i['env'] - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes": return {'return': 0} - if 'CM_MODEL' not in env: + if 'MLC_MODEL' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the model to run'} - if 'CM_MLPERF_BACKEND' not in env: + if 'MLC_MLPERF_BACKEND' not in env: return {'return': 1, 'error': 'Please select a variation specifying the backend'} - if 'CM_MLPERF_DEVICE' not in env: + if 'MLC_MLPERF_DEVICE' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the device to run on'} source_files = [] script_path = i['run_script_input']['path'] - if env['CM_MODEL'] == "retinanet": - env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] - env['CM_SOURCE_FOLDER_PATH'] = os.path.join(script_path, "src") + if env['MLC_MODEL'] == "retinanet": + env['MLC_DATASET_LIST'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] + env['MLC_SOURCE_FOLDER_PATH'] = os.path.join(script_path, "src") - for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']): + for file in os.listdir(env['MLC_SOURCE_FOLDER_PATH']): if file.endswith(".c") or file.endswith(".cpp"): source_files.append(file) - env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) + env['MLC_CXX_SOURCE_FILES'] = ";".join(source_files) if '+CPLUS_INCLUDE_PATH' not in env: env['+CPLUS_INCLUDE_PATH'] = [] @@ -53,24 +53,24 @@ def preprocess(i): env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) - if env['CM_MLPERF_DEVICE'] == 'gpu': - env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) - env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) - env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB']) - env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + if env['MLC_MLPERF_DEVICE'] == 'gpu': + env['+C_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE']) + env['+CPLUS_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE']) + env['+LD_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_LIB']) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_INCLUDE']) if '+ CXXFLAGS' not in env: env['+ CXXFLAGS'] = [] env['+ CXXFLAGS'].append("-std=c++14") - # add preprocessor flag like "#define CM_MODEL_RESNET50" - env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) - # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" - env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + - env['CM_MLPERF_BACKEND'].upper()) - # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" - env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + - env['CM_MLPERF_DEVICE'].upper()) + # add preprocessor flag like "#define MLC_MODEL_RESNET50" + env['+ CXXFLAGS'].append('-DMLC_MODEL_' + env['MLC_MODEL'].upper()) + # add preprocessor flag like "#define MLC_MLPERF_BACKEND_ONNXRUNTIME" + env['+ CXXFLAGS'].append('-DMLC_MLPERF_BACKEND_' + + env['MLC_MLPERF_BACKEND'].upper()) + # add preprocessor flag like "#define MLC_MLPERF_DEVICE_CPU" + env['+ CXXFLAGS'].append('-DMLC_MLPERF_DEVICE_' + + env['MLC_MLPERF_DEVICE'].upper()) if '+ LDCXXFLAGS' not in env: env['+ LDCXXFLAGS'] = [] @@ -80,22 +80,22 @@ def preprocess(i): "-lpthread" ] # e.g. -lonnxruntime - if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: + if 'MLC_MLPERF_BACKEND_LIB_NAMESPEC' in env: env['+ LDCXXFLAGS'].append('-l' + - env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + env['MLC_MLPERF_BACKEND_LIB_NAMESPEC']) # e.g. -lcudart - if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: - env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) - - env['CM_LINKER_LANG'] = 'CXX' - env['CM_RUN_DIR'] = os.getcwd() - - if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") - if 'CM_MLPERF_USER_CONF' not in env: - env['CM_MLPERF_USER_CONF'] = os.path.join( - env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + if 'MLC_MLPERF_DEVICE_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['MLC_MLPERF_DEVICE_LIB_NAMESPEC']) + + env['MLC_LINKER_LANG'] = 'CXX' + env['MLC_RUN_DIR'] = os.getcwd() + + if 'MLC_MLPERF_CONF' not in env: + env['MLC_MLPERF_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'MLC_MLPERF_USER_CONF' not in env: + env['MLC_MLPERF_USER_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") return {'return': 0} diff --git a/script/app-mlperf-inference-mlcommons-cpp/meta.yaml b/script/app-mlperf-inference-mlcommons-cpp/meta.yaml index e13bab985..dda32e172 100644 --- a/script/app-mlperf-inference-mlcommons-cpp/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-cpp/meta.yaml @@ -21,32 +21,32 @@ tags: # Default environment default_env: - CM_BATCH_COUNT: '1' - CM_BATCH_SIZE: '1' - CM_FAST_COMPILATION: "yes" - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: cpp + MLC_BATCH_COUNT: '1' + MLC_BATCH_SIZE: '1' + MLC_FAST_COMPILATION: "yes" + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: cpp # Map script inputs to environment variables input_mapping: - count: CM_MLPERF_LOADGEN_QUERY_COUNT - max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - mlperf_conf: CM_MLPERF_CONF - mode: CM_MLPERF_LOADGEN_MODE - output_dir: CM_MLPERF_OUTPUT_DIR - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - scenario: CM_MLPERF_LOADGEN_SCENARIO - user_conf: CM_MLPERF_USER_CONF + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: MLC_MLPERF_CONF + mode: MLC_MLPERF_LOADGEN_MODE + output_dir: MLC_MLPERF_OUTPUT_DIR + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + scenario: MLC_MLPERF_LOADGEN_SCENARIO + user_conf: MLC_MLPERF_USER_CONF new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* - - CM_ML_MODEL_* - - CM_HW_NAME + - MLC_MLPERF_* + - MLC_DATASET_* + - MLC_ML_MODEL_* + - MLC_HW_NAME new_state_keys: - mlperf-inference-implementation - - CM_SUT_* + - MLC_SUT_* # Dependencies on other CM scripts @@ -64,7 +64,7 @@ deps: # Detect CUDA if required - tags: get,cuda,_cudnn enable_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu ######################################################################## @@ -83,16 +83,16 @@ deps: ######################################################################## # Install ML engines via CM - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cpu tags: get,lib,onnxruntime,lang-cpp,_cpu - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu tags: get,lib,onnxruntime,lang-cpp,_cuda @@ -101,14 +101,14 @@ deps: # Install ResNet50 model (ONNX) and ImageNet - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 names: - imagenet-preprocessed tags: get,dataset,preprocessed,imagenet,_NCHW - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 tags: get,ml-model,raw,resnet50,_onnx @@ -117,14 +117,14 @@ deps: # Install RetinaNet model (ONNX) and OpenImages - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet names: - openimages-preprocessed tags: get,dataset,preprocessed,openimages,_validation,_NCHW - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet tags: get,ml-model,retinanet,_onnx,_fp32 @@ -141,14 +141,14 @@ post_deps: - compile-program tags: compile,cpp-program skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - "yes" - names: - mlperf-runner tags: benchmark-mlperf skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - "yes" - tags: save,mlperf,inference,state @@ -162,90 +162,90 @@ variations: group: device default: true env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu cuda: group: device env: - CM_MLPERF_DEVICE: gpu - CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + MLC_MLPERF_DEVICE: gpu + MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart # ML engine onnxruntime: group: framework default: true env: - CM_MLPERF_BACKEND: onnxruntime - CM_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime + MLC_MLPERF_BACKEND: onnxruntime + MLC_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime pytorch: group: framework env: - CM_MLPERF_BACKEND: pytorch + MLC_MLPERF_BACKEND: pytorch tf: group: framework env: - CM_MLPERF_BACKEND: tf + MLC_MLPERF_BACKEND: tf tflite: group: framework env: - CM_MLPERF_BACKEND: tflite + MLC_MLPERF_BACKEND: tflite tvm-onnx: group: framework env: - CM_MLPERF_BACKEND: tvm-onnx + MLC_MLPERF_BACKEND: tvm-onnx # Reference MLPerf models resnet50: group: model default: true env: - CM_MODEL: resnet50 + MLC_MODEL: resnet50 retinanet: group: model default_env: - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 env: - CM_MODEL: retinanet + MLC_MODEL: retinanet resnet50,offline: default_env: - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 32 + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 32 resnet50,server: default_env: - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 32 + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 32 resnet50,multistream: default_env: - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 8 + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 8 batch-size.#: group: batch-size env: - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" offline: group: loadgen-scenario default: true env: - CM_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_SCENARIO: Offline multistream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: MultiStream + MLC_MLPERF_LOADGEN_SCENARIO: MultiStream singlestream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 server: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Server + MLC_MLPERF_LOADGEN_SCENARIO: Server multistream,resnet50: default_variations: diff --git a/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp b/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp index c5a3c809e..bf3fe86b6 100644 --- a/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp +++ b/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp @@ -11,11 +11,11 @@ #include "model.h" #include "sample_library.h" #include "system.h" -#ifdef CM_MLPERF_DEVICE_GPU +#ifdef MLC_MLPERF_DEVICE_GPU #include "gpu_device.h" #endif -#ifdef CM_MLPERF_BACKEND_ONNXRUNTIME +#ifdef MLC_MLPERF_BACKEND_ONNXRUNTIME #include "onnxruntime_backend.h" #endif @@ -23,28 +23,28 @@ class InputSettings { public: InputSettings() { - mlperf_conf_path = getenv("CM_MLPERF_CONF", "../inference/mlperf.conf"); - user_conf_path = getenv("CM_MLPERF_USER_CONF", "../inference/vision/classification_and_detection/user.conf"); - audit_conf_path = getenv("CM_MLPERF_INFERENCE_AUDIT_PATH", ""); - output_dir = getenv("CM_MLPERF_OUTPUT_DIR", "."); - backend_name = getenv("CM_MLPERF_BACKEND", "onnxruntime"); - device_name = getenv("CM_MLPERF_DEVICE", "cpu"); - model_name = getenv("CM_MODEL", "resnet50"); - model_path = getenv("CM_ML_MODEL_FILE_WITH_PATH", ""); - dataset_preprocessed_path = getenv("CM_DATASET_PREPROCESSED_PATH", ""); - dataset_path = getenv("CM_DATASET_PATH", ""); - dataset_list = getenv("CM_DATASET_LIST", ""); - imagenet_val_path = getenv("CM_DATASET_AUX_PATH", "") + "/val.txt"; - scenario_name = getenv("CM_MLPERF_LOADGEN_SCENARIO", "Offline"); - mode_name = getenv("CM_MLPERF_LOADGEN_MODE", "PerformanceOnly"); + mlperf_conf_path = getenv("MLC_MLPERF_CONF", "../inference/mlperf.conf"); + user_conf_path = getenv("MLC_MLPERF_USER_CONF", "../inference/vision/classification_and_detection/user.conf"); + audit_conf_path = getenv("MLC_MLPERF_INFERENCE_AUDIT_PATH", ""); + output_dir = getenv("MLC_MLPERF_OUTPUT_DIR", "."); + backend_name = getenv("MLC_MLPERF_BACKEND", "onnxruntime"); + device_name = getenv("MLC_MLPERF_DEVICE", "cpu"); + model_name = getenv("MLC_MODEL", "resnet50"); + model_path = getenv("MLC_ML_MODEL_FILE_WITH_PATH", ""); + dataset_preprocessed_path = getenv("MLC_DATASET_PREPROCESSED_PATH", ""); + dataset_path = getenv("MLC_DATASET_PATH", ""); + dataset_list = getenv("MLC_DATASET_LIST", ""); + imagenet_val_path = getenv("MLC_DATASET_AUX_PATH", "") + "/val.txt"; + scenario_name = getenv("MLC_MLPERF_LOADGEN_SCENARIO", "Offline"); + mode_name = getenv("MLC_MLPERF_LOADGEN_MODE", "PerformanceOnly"); if (mode_name == "accuracy") mode_name = "AccuracyOnly"; if (mode_name == "performance") mode_name = "PerformanceOnly"; - query_count_override = std::stol(getenv("CM_MLPERF_LOADGEN_QUERY_COUNT", "0")); + query_count_override = std::stol(getenv("MLC_MLPERF_LOADGEN_QUERY_COUNT", "0")); query_count_override = 0; - performance_sample_count = std::stol(getenv("CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT", "0")); - batch_size = std::stol(getenv("CM_MLPERF_LOADGEN_MAX_BATCHSIZE", "32")); + performance_sample_count = std::stol(getenv("MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT", "0")); + batch_size = std::stol(getenv("MLC_MLPERF_LOADGEN_MAX_BATCHSIZE", "32")); std::cout << "MLPerf Conf path: " << mlperf_conf_path << std::endl; std::cout << "User Conf path: " << user_conf_path << std::endl; std::cout << "Dataset Preprocessed path: " << dataset_preprocessed_path << std::endl; @@ -133,7 +133,7 @@ int main(int argc, const char *argv[]) { if (input_settings.device_name == "cpu") { device.reset(new CPUDevice()); } else if (input_settings.device_name == "gpu") { -#ifdef CM_MLPERF_DEVICE_GPU +#ifdef MLC_MLPERF_DEVICE_GPU device.reset(new GPUDevice()); #endif } else { @@ -161,7 +161,7 @@ int main(int argc, const char *argv[]) { // build backend std::shared_ptr backend; if (input_settings.backend_name == "onnxruntime") { -#ifdef CM_MLPERF_BACKEND_ONNXRUNTIME +#ifdef MLC_MLPERF_BACKEND_ONNXRUNTIME backend.reset(new OnnxRuntimeBackend( model, device, performance_sample_count, input_settings.batch_size, input_settings.device_name == "gpu")); diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index 0a4d22cdb..dc0e29942 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -12,76 +12,76 @@ def preprocess(i): state = i['state'] script_path = i['run_script_input']['path'] - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes": return {'return': 0} - if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes": return {'return': 0} - if env.get('CM_MLPERF_POWER', '') == "yes": + if env.get('MLC_MLPERF_POWER', '') == "yes": power = "yes" else: power = "no" - rerun = True if env.get("CM_RERUN", "") != '' else False + rerun = True if env.get("MLC_RERUN", "") != '' else False - if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: - env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" + if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env: + env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline" - if 'CM_MLPERF_LOADGEN_MODE' not in env: - env['CM_MLPERF_LOADGEN_MODE'] = "accuracy" + if 'MLC_MLPERF_LOADGEN_MODE' not in env: + env['MLC_MLPERF_LOADGEN_MODE'] = "accuracy" - if 'CM_MODEL' not in env: + if 'MLC_MODEL' not in env: return { 'return': 1, 'error': "Please select a variation specifying the model to run"} - # if env['CM_MODEL'] == "resnet50": - # cmd = "cp " + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['CM_DATASET_PATH'], + # if env['MLC_MODEL'] == "resnet50": + # cmd = "cp " + os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['MLC_DATASET_PATH'], # "val_map.txt") # ret = os.system(cmd) - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \ - env.get('CM_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \ + env.get('MLC_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " - if 'CM_MLPERF_LOADGEN_QPS' not in env: - env['CM_MLPERF_LOADGEN_QPS_OPT'] = "" + if 'MLC_MLPERF_LOADGEN_QPS' not in env: + env['MLC_MLPERF_LOADGEN_QPS_OPT'] = "" else: - env['CM_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \ - env['CM_MLPERF_LOADGEN_QPS'] + env['MLC_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \ + env['MLC_MLPERF_LOADGEN_QPS'] - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['CM_MLPERF_LOADGEN_QPS_OPT'] + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] - if 'CM_NUM_THREADS' not in env: - if 'CM_MINIMIZE_THREADS' in env: - env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // - (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + if 'MLC_NUM_THREADS' not in env: + if 'MLC_MINIMIZE_THREADS' in env: + env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // + (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) else: - env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') - if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and str(env.get( - 'CM_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in ["true", "1", "yes"]: - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \ - str(env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE']) + if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and str(env.get( + 'MLC_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in ["true", "1", "yes"]: + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \ + str(env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE']) - if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE', '') != '': - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \ - str(env['CM_MLPERF_LOADGEN_BATCH_SIZE']) + if env.get('MLC_MLPERF_LOADGEN_BATCH_SIZE', '') != '': + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \ + str(env['MLC_MLPERF_LOADGEN_BATCH_SIZE']) - if env.get('CM_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get('CM_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and ( - env['CM_MLPERF_LOADGEN_MODE'] == 'accuracy' or 'gptj' in env['CM_MODEL'] or 'llama2' in env['CM_MODEL'] or 'mixtral' in env['CM_MODEL'] or 'llama3' in env['CM_MODEL']) and env.get('CM_MLPERF_RUN_STYLE', '') != "valid": - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \ - env['CM_MLPERF_LOADGEN_QUERY_COUNT'] + if env.get('MLC_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get('MLC_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and ( + env['MLC_MLPERF_LOADGEN_MODE'] == 'accuracy' or 'gptj' in env['MLC_MODEL'] or 'llama2' in env['MLC_MODEL'] or 'mixtral' in env['MLC_MODEL'] or 'llama3' in env['MLC_MODEL']) and env.get('MLC_MLPERF_RUN_STYLE', '') != "valid": + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \ + env['MLC_MLPERF_LOADGEN_QUERY_COUNT'] print("Using MLCommons Inference source from '" + - env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") - if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'MLC_MLPERF_CONF' not in env: + env['MLC_MLPERF_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") x = "" if os_info['platform'] == 'windows' else "'" - inference_src_version = env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION', '') + inference_src_version = env.get('MLC_MLPERF_INFERENCE_SOURCE_VERSION', '') version_tuple = None if inference_src_version: version_tuple = tuple(map(int, inference_src_version.split('.'))) @@ -89,80 +89,80 @@ def preprocess(i): if version_tuple and version_tuple >= (4, 1, 1): pass # mlperf_conf is automatically loaded by the loadgen else: - if "llama2-70b" in env['CM_MODEL'] or "mixtral-8x7b" in env["CM_MODEL"]: - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \ - x + env['CM_MLPERF_CONF'] + x + if "llama2-70b" in env['MLC_MODEL'] or "mixtral-8x7b" in env["MLC_MODEL"]: + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \ + x + env['MLC_MLPERF_CONF'] + x else: - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \ - x + env['CM_MLPERF_CONF'] + x + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \ + x + env['MLC_MLPERF_CONF'] + x - if env.get('CM_NETWORK_LOADGEN', '') != "lon" and env.get( - 'CM_MLPERF_INFERENCE_API_SERVER', '') == '' and "llama2-70b" not in env['CM_MODEL']: - env['MODEL_DIR'] = env.get('CM_ML_MODEL_PATH') + if env.get('MLC_NETWORK_LOADGEN', '') != "lon" and env.get( + 'MLC_MLPERF_INFERENCE_API_SERVER', '') == '' and "llama2-70b" not in env['MLC_MODEL']: + env['MODEL_DIR'] = env.get('MLC_ML_MODEL_PATH') if not env['MODEL_DIR']: env['MODEL_DIR'] = os.path.dirname( env.get( - 'CM_MLPERF_CUSTOM_MODEL_PATH', + 'MLC_MLPERF_CUSTOM_MODEL_PATH', env.get( - 'CM_ML_MODEL_FILE_WITH_PATH', + 'MLC_ML_MODEL_FILE_WITH_PATH', ''))) RUN_CMD = "" state['RUN'] = {} - scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] state['RUN'][scenario] = {} scenario_extra_options = '' - NUM_THREADS = env['CM_NUM_THREADS'] + NUM_THREADS = env['MLC_NUM_THREADS'] if int( - NUM_THREADS) > 2 and env['CM_MLPERF_DEVICE'] == "gpu" and env['CM_MODEL'] != "rgat": + NUM_THREADS) > 2 and env['MLC_MLPERF_DEVICE'] == "gpu" and env['MLC_MODEL'] != "rgat": NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU - if env['CM_MODEL'] in ['resnet50', 'retinanet', + if env['MLC_MODEL'] in ['resnet50', 'retinanet', 'stable-diffusion-xl', 'rgat']: scenario_extra_options += " --threads " + NUM_THREADS - ml_model_name = env['CM_MODEL'] - if 'CM_MLPERF_USER_CONF' in env: - user_conf_path = env['CM_MLPERF_USER_CONF'] + ml_model_name = env['MLC_MODEL'] + if 'MLC_MLPERF_USER_CONF' in env: + user_conf_path = env['MLC_MLPERF_USER_CONF'] x = "" if os_info['platform'] == 'windows' else "'" - if 'llama2-70b' in env['CM_MODEL'] or "mixtral-8x7b" in env["CM_MODEL"] or "llama3" in env["CM_MODEL"]: + if 'llama2-70b' in env['MLC_MODEL'] or "mixtral-8x7b" in env["MLC_MODEL"] or "llama3" in env["MLC_MODEL"]: scenario_extra_options += " --user-conf " + x + user_conf_path + x else: scenario_extra_options += " --user_conf " + x + user_conf_path + x - mode = env['CM_MLPERF_LOADGEN_MODE'] + mode = env['MLC_MLPERF_LOADGEN_MODE'] mode_extra_options = "" - if 'CM_DATASET_PREPROCESSED_PATH' in env and env['CM_MODEL'] in [ + if 'MLC_DATASET_PREPROCESSED_PATH' in env and env['MLC_MODEL'] in [ 'resnet50', 'retinanet']: - # dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['CM_DATASET_PREPROCESSED_PATH'] - if env.get('CM_MLPERF_LAST_RELEASE') not in ["v2.0", "v2.1"]: + # dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['MLC_DATASET_PREPROCESSED_PATH'] + if env.get('MLC_MLPERF_LAST_RELEASE') not in ["v2.0", "v2.1"]: dataset_options = " --use_preprocessed_dataset --cache_dir " + \ - env['CM_DATASET_PREPROCESSED_PATH'] + env['MLC_DATASET_PREPROCESSED_PATH'] else: dataset_options = "" - if env['CM_MODEL'] == "retinanet": + if env['MLC_MODEL'] == "retinanet": dataset_options += " --dataset-list " + \ - env['CM_DATASET_ANNOTATIONS_FILE_PATH'] - elif env['CM_MODEL'] == "resnet50": + env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] + elif env['MLC_MODEL'] == "resnet50": dataset_options += " --dataset-list " + \ - os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") - env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH') + os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") + env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') else: - if 'CM_DATASET_PREPROCESSED_PATH' in env: - env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH') + if 'MLC_DATASET_PREPROCESSED_PATH' in env: + env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') else: - env['DATA_DIR'] = env.get('CM_DATASET_PATH') + env['DATA_DIR'] = env.get('MLC_DATASET_PATH') - if "dlrm" in env['CM_MODEL']: - env['DATA_DIR'] = env['CM_CRITEO_PREPROCESSED_PATH'] + if "dlrm" in env['MLC_MODEL']: + env['DATA_DIR'] = env['MLC_CRITEO_PREPROCESSED_PATH'] dataset_options = '' - if env.get('CM_MLPERF_EXTRA_DATASET_ARGS', '') != '': - dataset_options += " " + env['CM_MLPERF_EXTRA_DATASET_ARGS'] + if env.get('MLC_MLPERF_EXTRA_DATASET_ARGS', '') != '': + dataset_options += " " + env['MLC_MLPERF_EXTRA_DATASET_ARGS'] if mode == "accuracy": mode_extra_options += " --accuracy" @@ -172,32 +172,32 @@ def preprocess(i): elif mode == "compliance": - audit_full_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH'] + audit_full_path = env['MLC_MLPERF_INFERENCE_AUDIT_PATH'] mode_extra_options = " --audit '" + audit_full_path + "'" - if env.get('CM_MLPERF_OUTPUT_DIR', '') == '': - env['CM_MLPERF_OUTPUT_DIR'] = os.getcwd() + if env.get('MLC_MLPERF_OUTPUT_DIR', '') == '': + env['MLC_MLPERF_OUTPUT_DIR'] = os.getcwd() - mlperf_implementation = env.get('CM_MLPERF_IMPLEMENTATION', 'reference') + mlperf_implementation = env.get('MLC_MLPERF_IMPLEMENTATION', 'reference') cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options, mode_extra_options, dataset_options, mlperf_implementation) - if env.get('CM_NETWORK_LOADGEN', '') == "lon": + if env.get('MLC_NETWORK_LOADGEN', '') == "lon": run_cmd = i['state']['mlperf_inference_run_cmd'] - env['CM_SSH_RUN_COMMANDS'] = [] - env['CM_SSH_RUN_COMMANDS'].append( + env['MLC_SSH_RUN_COMMANDS'] = [] + env['MLC_SSH_RUN_COMMANDS'].append( run_cmd.replace( "--network=lon", "--network=sut") + " &") - env['CM_MLPERF_RUN_CMD'] = cmd - env['CM_RUN_DIR'] = run_dir - env['CM_RUN_CMD'] = cmd - env['CK_PROGRAM_TMP_DIR'] = env.get('CM_ML_MODEL_PATH') # for tvm + env['MLC_MLPERF_RUN_CMD'] = cmd + env['MLC_RUN_DIR'] = run_dir + env['MLC_RUN_CMD'] = cmd + env['CK_PROGRAM_TMP_DIR'] = env.get('MLC_ML_MODEL_PATH') # for tvm - if env.get('CM_HOST_PLATFORM_FLAVOR', '') == "arm64": - env['CM_HOST_PLATFORM_FLAVOR'] = "aarch64" + if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == "arm64": + env['MLC_HOST_PLATFORM_FLAVOR'] = "aarch64" return {'return': 0} @@ -216,318 +216,318 @@ def get_run_cmd(os_info, env, scenario_extra_options, def get_run_cmd_reference( os_info, env, scenario_extra_options, mode_extra_options, dataset_options): - if env['CM_MODEL'] in ["gptj-99", "gptj-99.9"]: + if env['MLC_MODEL'] in ["gptj-99", "gptj-99.9"]: env['RUN_DIR'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j") - if env.get('CM_NETWORK_LOADGEN', '') != "lon": - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + \ - " main.py --model-path=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_EVAL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ - ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \ + env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j") + if env.get('MLC_NETWORK_LOADGEN', '') != "lon": + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + \ + " main.py --model-path=" + env['MLC_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['MLC_DATASET_EVAL_PATH'] + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION'] + \ scenario_extra_options + mode_extra_options + dataset_options else: - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + \ - " main.py" + ' --dataset-path=' + env['CM_DATASET_EVAL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ - ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \ + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + \ + " main.py" + ' --dataset-path=' + env['MLC_DATASET_EVAL_PATH'] + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION'] + \ scenario_extra_options + mode_extra_options + dataset_options cmd = cmd.replace("--count", "--max_examples") - if env['CM_MLPERF_DEVICE'] == "gpu": + if env['MLC_MLPERF_DEVICE'] == "gpu": gpu_options = " --gpu" env['CUDA_VISIBLE_DEVICES'] = "0" else: gpu_options = "" cmd = cmd + gpu_options - env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR'] - if env['CM_MODEL'] in ["resnet50", "retinanet"]: + if env['MLC_MODEL'] in ["resnet50", "retinanet"]: env['RUN_DIR'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "vision", "classification_and_detection") - env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] - if env.get('CM_MLPERF_VISION_DATASET_OPTION', '') == '' and env.get( - 'CM_MLPERF_DEVICE') != "tpu": + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + if env.get('MLC_MLPERF_VISION_DATASET_OPTION', '') == '' and env.get( + 'MLC_MLPERF_DEVICE') != "tpu": if os_info['platform'] == 'windows': - cmd = "python python/main.py --profile " + env['CM_MODEL'] + "-" + env['CM_MLPERF_BACKEND'] + \ - " --model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_PREPROCESSED_PATH'] + \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \ + cmd = "python python/main.py --profile " + env['MLC_MODEL'] + "-" + env['MLC_MLPERF_BACKEND'] + \ + " --model=" + env['MLC_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['MLC_DATASET_PREPROCESSED_PATH'] + \ + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \ " --output " + env['OUTPUT_DIR'] + " " + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ scenario_extra_options + mode_extra_options + dataset_options else: - cmd = "./run_local.sh " + env['CM_MLPERF_BACKEND'] + ' ' + \ - env['CM_MODEL'] + ' ' + env['CM_MLPERF_DEVICE'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + cmd = "./run_local.sh " + env['MLC_MLPERF_BACKEND'] + ' ' + \ + env['MLC_MODEL'] + ' ' + env['MLC_MLPERF_DEVICE'] + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ scenario_extra_options + mode_extra_options + dataset_options return cmd, env['RUN_DIR'] - if env['CM_MLPERF_BACKEND'] == "ncnn": + if env['MLC_MLPERF_BACKEND'] == "ncnn": env['MODEL_FILE'] = os.path.join( os.path.dirname( - env.get('CM_ML_MODEL_FILE_WITH_PATH')), + env.get('MLC_ML_MODEL_FILE_WITH_PATH')), "resnet50_v1") else: env['MODEL_FILE'] = env.get( - 'CM_MLPERF_CUSTOM_MODEL_PATH', - env.get('CM_ML_MODEL_FILE_WITH_PATH')) + 'MLC_MLPERF_CUSTOM_MODEL_PATH', + env.get('MLC_ML_MODEL_FILE_WITH_PATH')) if not env['MODEL_FILE']: return {'return': 1, 'error': 'No valid model file found!'} - env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR'] - extra_options = " --output " + env['CM_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['CM_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ - " --dataset-path " + env['CM_DATASET_PREPROCESSED_PATH'] + " --model " + env['MODEL_FILE'] + \ - " --preprocessed_dir " + env['CM_DATASET_PREPROCESSED_PATH'] + extra_options = " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['MLC_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ + " --dataset-path " + env['MLC_DATASET_PREPROCESSED_PATH'] + " --model " + env['MODEL_FILE'] + \ + " --preprocessed_dir " + env['MLC_DATASET_PREPROCESSED_PATH'] - if env.get('CM_MLPERF_DEVICE') == "tpu": - cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env.get('CM_SUDO', "") + " " + env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " +\ - "--backend " + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + " --device tpu " + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ + if env.get('MLC_MLPERF_DEVICE') == "tpu": + cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env.get('MLC_SUDO', "") + " " + env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " +\ + "--backend " + env['MLC_MLPERF_BACKEND'] + " --scenario=" + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " --device tpu " + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ mode_extra_options + dataset_options + extra_options else: - cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " +\ - "--backend " + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ + cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " +\ + "--backend " + env['MLC_MLPERF_BACKEND'] + " --scenario=" + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ mode_extra_options + dataset_options + extra_options env['SKIP_VERIFY_ACCURACY'] = True - elif "bert" in env['CM_MODEL']: + elif "bert" in env['MLC_MODEL']: env['RUN_DIR'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], "language", "bert") + env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "bert") env['MODEL_FILE'] = env.get( - 'CM_MLPERF_CUSTOM_MODEL_PATH', - env.get('CM_ML_MODEL_FILE_WITH_PATH')) + 'MLC_MLPERF_CUSTOM_MODEL_PATH', + env.get('MLC_ML_MODEL_FILE_WITH_PATH')) if not env['MODEL_FILE']: return {'return': 1, 'error': 'No valid model file found!'} - if env.get('CM_MLPERF_QUANTIZATION') in ["on", True, "1", "True"]: + if env.get('MLC_MLPERF_QUANTIZATION') in ["on", True, "1", "True"]: quantization_options = " --quantized" else: quantization_options = "" - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + env['MLC_MLPERF_BACKEND'] + " --scenario=" + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ mode_extra_options + dataset_options + quantization_options - if env['CM_MLPERF_BACKEND'] == "deepsparse": + if env['MLC_MLPERF_BACKEND'] == "deepsparse": cmd += " --batch_size=" + \ - env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ + env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ " --model_path=" + env['MODEL_FILE'] - if env.get('CM_MLPERF_CUSTOM_MODEL_PATH', '') != '': - env['CM_ML_MODEL_FILE_WITH_PATH'] = env['MODEL_FILE'] + if env.get('MLC_MLPERF_CUSTOM_MODEL_PATH', '') != '': + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MODEL_FILE'] cmd = cmd.replace("--count", "--max_examples") - env['VOCAB_FILE'] = env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] - env['DATASET_FILE'] = env['CM_DATASET_SQUAD_VAL_PATH'] - env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + env['VOCAB_FILE'] = env['MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + env['DATASET_FILE'] = env['MLC_DATASET_SQUAD_VAL_PATH'] + env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR'] env['SKIP_VERIFY_ACCURACY'] = True - elif "rnnt" in env['CM_MODEL']: + elif "rnnt" in env['MLC_MODEL']: - env['RUN_DIR'] = env['CM_MLPERF_INFERENCE_RNNT_PATH'] - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend " + env['CM_MLPERF_BACKEND'] + \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - " --manifest " + env['CM_DATASET_PREPROCESSED_JSON'] + \ - " --dataset_dir " + os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "..") + \ + env['RUN_DIR'] = env['MLC_MLPERF_INFERENCE_RNNT_PATH'] + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " run.py --backend " + env['MLC_MLPERF_BACKEND'] + \ + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ + " --manifest " + env['MLC_DATASET_PREPROCESSED_JSON'] + \ + " --dataset_dir " + os.path.join(env['MLC_DATASET_PREPROCESSED_PATH'], "..") + \ " --pytorch_config_toml " + os.path.join("pytorch", "configs", "rnnt.toml") + \ - " --pytorch_checkpoint " + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ - " --log_dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + " --pytorch_checkpoint " + env['MLC_ML_MODEL_FILE_WITH_PATH'] + \ + " --log_dir " + env['MLC_MLPERF_OUTPUT_DIR'] + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ scenario_extra_options + mode_extra_options + dataset_options env['SKIP_VERIFY_ACCURACY'] = True - elif "stable-diffusion-xl" in env['CM_MODEL']: + elif "stable-diffusion-xl" in env['MLC_MODEL']: env['RUN_DIR'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image") + env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image") if env.get('+PYTHONPATH', '') == '': env['+PYTHONPATH'] = [] env['+PYTHONPATH'].append( os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", "fid")) - backend = env['CM_MLPERF_BACKEND'] - device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] not in [ + backend = env['MLC_MLPERF_BACKEND'] + device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] not in [ "gpu", "rocm"] else "cuda" - max_batchsize = env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + max_batchsize = env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ " --profile " + 'stable-diffusion-xl-pytorch ' + \ " --dataset " + 'coco-1024' + \ - " --dataset-path " + env['CM_DATASET_PATH_ROOT'] + \ - ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'].replace("bfloat", "bf").replace("float", "fp") + \ + " --dataset-path " + env['MLC_DATASET_PATH_ROOT'] + \ + ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION'].replace("bfloat", "bf").replace("float", "fp") + \ " --device " + device + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ scenario_extra_options + mode_extra_options + \ - " --output " + env['CM_MLPERF_OUTPUT_DIR'] + \ - " --model-path " + env['CM_ML_MODEL_PATH'] + " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + \ + " --model-path " + env['MLC_ML_MODEL_PATH'] if "--max-batchsize" not in cmd: cmd += " --max-batchsize " + max_batchsize - if env.get('CM_COCO2014_SAMPLE_ID_PATH', '') != '': - cmd += " --ids-path " + env['CM_COCO2014_SAMPLE_ID_PATH'] + if env.get('MLC_COCO2014_SAMPLE_ID_PATH', '') != '': + cmd += " --ids-path " + env['MLC_COCO2014_SAMPLE_ID_PATH'] - elif "llama2-70b" in env['CM_MODEL']: + elif "llama2-70b" in env['MLC_MODEL']: env['RUN_DIR'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b") - backend = env['CM_MLPERF_BACKEND'] - device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda" + backend = env['MLC_MLPERF_BACKEND'] + device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda" - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - " --dataset-path " + env['CM_DATASET_PREPROCESSED_PATH'] + \ + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ + " --dataset-path " + env['MLC_DATASET_PREPROCESSED_PATH'] + \ " --device " + device.replace("cuda", "cuda:0") + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ scenario_extra_options + mode_extra_options + \ - " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ - ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] - - if env.get('CM_MLPERF_INFERENCE_API_SERVER', '') != '': - env['CM_VLLM_SERVER_MODEL_NAME'] = env.get( - "CM_VLLM_SERVER_MODEL_NAME") or "NousResearch/Meta-Llama-3-8B-Instruct" - # env['CM_MLPERF_INFERENCE_API_SERVER'] = "http://localhost:8000" - cmd += f""" --api-server {env['CM_MLPERF_INFERENCE_API_SERVER']} \ - --model-path {env['CM_VLLM_SERVER_MODEL_NAME']} \ - --api-model-name {env['CM_VLLM_SERVER_MODEL_NAME']} --vllm """ + " --output-log-dir " + env['MLC_MLPERF_OUTPUT_DIR'] + \ + ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION'] + + if env.get('MLC_MLPERF_INFERENCE_API_SERVER', '') != '': + env['MLC_VLLM_SERVER_MODEL_NAME'] = env.get( + "MLC_VLLM_SERVER_MODEL_NAME") or "NousResearch/Meta-Llama-3-8B-Instruct" + # env['MLC_MLPERF_INFERENCE_API_SERVER'] = "http://localhost:8000" + cmd += f""" --api-server {env['MLC_MLPERF_INFERENCE_API_SERVER']} \ + --model-path {env['MLC_VLLM_SERVER_MODEL_NAME']} \ + --api-model-name {env['MLC_VLLM_SERVER_MODEL_NAME']} --vllm """ else: cmd += f" --model-path {env['LLAMA2_CHECKPOINT_PATH']}" - if env.get('CM_MLPERF_INFERENCE_NUM_WORKERS', '') != '': - cmd += f" --num-workers {env['CM_MLPERF_INFERENCE_NUM_WORKERS']}" + if env.get('MLC_MLPERF_INFERENCE_NUM_WORKERS', '') != '': + cmd += f" --num-workers {env['MLC_MLPERF_INFERENCE_NUM_WORKERS']}" cmd = cmd.replace("--count", "--total-sample-count") cmd = cmd.replace("--max-batchsize", "--batch-size") - elif "mixtral-8x7b" in env['CM_MODEL']: + elif "mixtral-8x7b" in env['MLC_MODEL']: env['RUN_DIR'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b") - backend = env['CM_MLPERF_BACKEND'] - device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda" - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - " --dataset-path " + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] + \ + backend = env['MLC_MLPERF_BACKEND'] + device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda" + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ + " --dataset-path " + env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] + \ " --device " + device.replace("cuda", "cuda:0") + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ scenario_extra_options + mode_extra_options + \ - " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ - ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \ + " --output-log-dir " + env['MLC_MLPERF_OUTPUT_DIR'] + \ + ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION'] + \ " --model-path " + env['MIXTRAL_CHECKPOINT_PATH'] cmd = cmd.replace("--count", "--total-sample-count") cmd = cmd.replace("--max-batchsize", "--batch-size") - elif "3d-unet" in env['CM_MODEL']: + elif "3d-unet" in env['MLC_MODEL']: - env['RUN_DIR'] = env['CM_MLPERF_INFERENCE_3DUNET_PATH'] - backend = env['CM_MLPERF_BACKEND'] if env['CM_MLPERF_BACKEND'] != 'tf' else 'tensorflow' - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + backend + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ - " --model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ - " --preprocessed_data_dir=" + env['CM_DATASET_KITS19_PREPROCESSED_PATH'] + \ + env['RUN_DIR'] = env['MLC_MLPERF_INFERENCE_3DUNET_PATH'] + backend = env['MLC_MLPERF_BACKEND'] if env['MLC_MLPERF_BACKEND'] != 'tf' else 'tensorflow' + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + backend + " --scenario=" + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + " --model=" + env['MLC_ML_MODEL_FILE_WITH_PATH'] + \ + " --preprocessed_data_dir=" + env['MLC_DATASET_KITS19_PREPROCESSED_PATH'] + \ scenario_extra_options + mode_extra_options + dataset_options - env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR'] env['SKIP_VERIFY_ACCURACY'] = True - elif "dlrm" in env['CM_MODEL']: # DLRM is in draft stage + elif "dlrm" in env['MLC_MODEL']: # DLRM is in draft stage env['RUN_DIR'] = os.path.join( - env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch") - if 'multihot-criteo-sample' in env['CM_ML_MODEL_DATASET_TYPE']: + env['MLC_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch") + if 'multihot-criteo-sample' in env['MLC_ML_MODEL_DATASET_TYPE']: dataset = "multihot-criteo-sample" - elif 'multihot-criteo' in env['CM_ML_MODEL_DATASET_TYPE']: + elif 'multihot-criteo' in env['MLC_ML_MODEL_DATASET_TYPE']: dataset = "multihot-criteo" env['MODEL_DIR'] = os.path.join(env['MODEL_DIR'], "model_weights") - if env.get('CM_MLPERF_BIN_LOADER', '') == 'yes': + if env.get('MLC_MLPERF_BIN_LOADER', '') == 'yes': mlperf_bin_loader_string = " --mlperf-bin-loader" else: mlperf_bin_loader_string = "" - if env.get('CM_ML_MODEL_DEBUG', '') == 'yes': + if env.get('MLC_ML_MODEL_DEBUG', '') == 'yes': config = " --max-ind-range=10000000 --data-sub-sample-rate=0.875 " else: config = " --max-ind-range=40000000 " - if env['CM_MLPERF_DEVICE'] == "gpu": + if env['MLC_MLPERF_DEVICE'] == "gpu": gpu_options = "" env['CUDA_VISIBLE_DEVICES'] = "0" else: gpu_options = "" env['WORLD_SIZE'] = "1" - if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy" and env['CM_MLPERF_LOADGEN_SCENARIO'] == "Offline": + if env['MLC_MLPERF_LOADGEN_MODE'] == "accuracy" and env['MLC_MLPERF_LOADGEN_SCENARIO'] == "Offline": mode_extra_options += " --samples-per-query-offline=1" - cmd = " ./run_local.sh " + env['CM_MLPERF_BACKEND'] + \ - ' dlrm ' + dataset + ' ' + env['CM_MLPERF_DEVICE'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + cmd = " ./run_local.sh " + env['MLC_MLPERF_BACKEND'] + \ + ' dlrm ' + dataset + ' ' + env['MLC_MLPERF_DEVICE'] + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ config + mlperf_bin_loader_string + \ ' --samples-to-aggregate-quantile-file=./tools/dist_quantile.txt ' + \ scenario_extra_options + mode_extra_options + dataset_options + gpu_options cmd = cmd.replace("--count", "--count-queries") - env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - elif "rgat" in env['CM_MODEL']: + elif "rgat" in env['MLC_MODEL']: env['RUN_DIR'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "graph", "R-GAT") - backend = env['CM_MLPERF_BACKEND'] + backend = env['MLC_MLPERF_BACKEND'] - dtype_rgat = env['CM_MLPERF_MODEL_PRECISION'].replace("float", "fp") + dtype_rgat = env['MLC_MLPERF_MODEL_PRECISION'].replace("float", "fp") - if env.get('CM_MLPERF_SUBMISSION_GENERATION_STYLE', '') == "full": + if env.get('MLC_MLPERF_SUBMISSION_GENERATION_STYLE', '') == "full": mode_extra_options += " --dataset igbh-dgl --profile rgat-dgl-full " else: mode_extra_options += " --dataset igbh-dgl-tiny --profile debug-dgl " - device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda" + device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda" # have to add the condition for running in debug mode or real run mode - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - " --dataset-path " + env['CM_DATASET_IGBH_PATH'] + \ + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ + " --dataset-path " + env['MLC_DATASET_IGBH_PATH'] + \ " --device " + device.replace("cuda", "gpu") + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ scenario_extra_options + mode_extra_options + \ - " --output " + env['CM_MLPERF_OUTPUT_DIR'] + \ + " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + \ ' --dtype ' + dtype_rgat + \ " --model-path " + env['RGAT_CHECKPOINT_PATH'] - if env.get('CM_ACTIVATE_RGAT_IN_MEMORY', '') == "yes": + if env.get('MLC_ACTIVATE_RGAT_IN_MEMORY', '') == "yes": cmd += " --in-memory " - elif "llama3" in env['CM_MODEL']: + elif "llama3" in env['MLC_MODEL']: env['RUN_DIR'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama3.1-405b") - if int(env.get('CM_MLPERF_INFERENCE_TP_SIZE', '')) > 1: + if int(env.get('MLC_MLPERF_INFERENCE_TP_SIZE', '')) > 1: env['VLLM_WORKER_MULTIPROC_METHOD'] = "spawn" - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - " --dataset-path " + env['CM_DATASET_LLAMA3_PATH'] + \ - " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ - ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \ - " --model-path " + env['CM_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + \ - " --tensor-parallel-size " + env['CM_MLPERF_INFERENCE_TP_SIZE'] + \ + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ + " --dataset-path " + env['MLC_DATASET_LLAMA3_PATH'] + \ + " --output-log-dir " + env['MLC_MLPERF_OUTPUT_DIR'] + \ + ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION'] + \ + " --model-path " + env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + \ + " --tensor-parallel-size " + env['MLC_MLPERF_INFERENCE_TP_SIZE'] + \ " --vllm " - if env.get('CM_MLPERF_INFERENCE_NUM_WORKERS', '') != '': - cmd += f" --num-workers {env['CM_MLPERF_INFERENCE_NUM_WORKERS']}" + if env.get('MLC_MLPERF_INFERENCE_NUM_WORKERS', '') != '': + cmd += f" --num-workers {env['MLC_MLPERF_INFERENCE_NUM_WORKERS']}" cmd = cmd.replace("--count", "--total-sample-count") cmd = cmd.replace("--max-batchsize", "--batch-size") - if env.get('CM_NETWORK_LOADGEN', '') in ["lon", "sut"]: - cmd = cmd + " " + "--network " + env['CM_NETWORK_LOADGEN'] - if env.get('CM_NETWORK_LOADGEN_SUT_SERVERS', []): - sut_servers = env['CM_NETWORK_LOADGEN_SUT_SERVERS'] + if env.get('MLC_NETWORK_LOADGEN', '') in ["lon", "sut"]: + cmd = cmd + " " + "--network " + env['MLC_NETWORK_LOADGEN'] + if env.get('MLC_NETWORK_LOADGEN_SUT_SERVERS', []): + sut_servers = env['MLC_NETWORK_LOADGEN_SUT_SERVERS'] cmd += " --sut_server '" + "','".join(sut_servers) + "' " return cmd, env['RUN_DIR'] diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml index 250d2dc86..a23acee4d 100644 --- a/script/app-mlperf-inference-mlcommons-python/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml @@ -22,69 +22,69 @@ tags: # Default environment default_env: - CM_MLPERF_LOADGEN_MODE: accuracy - CM_MLPERF_LOADGEN_SCENARIO: Offline - CM_OUTPUT_FOLDER_NAME: test_results - CM_MLPERF_RUN_STYLE: test - CM_TEST_QUERY_COUNT: "10" - CM_MLPERF_QUANTIZATION: off - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: "" + MLC_MLPERF_LOADGEN_MODE: accuracy + MLC_MLPERF_LOADGEN_SCENARIO: Offline + MLC_OUTPUT_FOLDER_NAME: test_results + MLC_MLPERF_RUN_STYLE: test + MLC_TEST_QUERY_COUNT: "10" + MLC_MLPERF_QUANTIZATION: off + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: "" docker: real_run: False # Map script inputs to environment variables input_mapping: - count: CM_MLPERF_LOADGEN_QUERY_COUNT - docker: CM_RUN_DOCKER_CONTAINER - hw_name: CM_HW_NAME + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + docker: MLC_RUN_DOCKER_CONTAINER + hw_name: MLC_HW_NAME imagenet_path: IMAGENET_PATH - max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - mode: CM_MLPERF_LOADGEN_MODE - num_threads: CM_NUM_THREADS - threads: CM_NUM_THREADS - dataset: CM_MLPERF_VISION_DATASET_OPTION - model: CM_MLPERF_CUSTOM_MODEL_PATH + max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + mode: MLC_MLPERF_LOADGEN_MODE + num_threads: MLC_NUM_THREADS + threads: MLC_NUM_THREADS + dataset: MLC_MLPERF_VISION_DATASET_OPTION + model: MLC_MLPERF_CUSTOM_MODEL_PATH output_dir: OUTPUT_BASE_DIR - power: CM_MLPERF_POWER - power_server: CM_MLPERF_POWER_SERVER_ADDRESS - ntp_server: CM_MLPERF_POWER_NTP_SERVER - max_amps: CM_MLPERF_POWER_MAX_AMPS - max_volts: CM_MLPERF_POWER_MAX_VOLTS - regenerate_files: CM_REGENERATE_MEASURE_FILES - rerun: CM_RERUN - scenario: CM_MLPERF_LOADGEN_SCENARIO - test_query_count: CM_TEST_QUERY_COUNT - clean: CM_MLPERF_CLEAN_SUBMISSION_DIR - dataset_args: CM_MLPERF_EXTRA_DATASET_ARGS - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - network: CM_NETWORK_LOADGEN - sut_servers: CM_NETWORK_LOADGEN_SUT_SERVERS + power: MLC_MLPERF_POWER + power_server: MLC_MLPERF_POWER_SERVER_ADDRESS + ntp_server: MLC_MLPERF_POWER_NTP_SERVER + max_amps: MLC_MLPERF_POWER_MAX_AMPS + max_volts: MLC_MLPERF_POWER_MAX_VOLTS + regenerate_files: MLC_REGENERATE_MEASURE_FILES + rerun: MLC_RERUN + scenario: MLC_MLPERF_LOADGEN_SCENARIO + test_query_count: MLC_TEST_QUERY_COUNT + clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR + dataset_args: MLC_MLPERF_EXTRA_DATASET_ARGS + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + network: MLC_NETWORK_LOADGEN + sut_servers: MLC_NETWORK_LOADGEN_SUT_SERVERS # Duplicate CM environment variables to the ones used in native apps env_key_mappings: - CM_HOST_: HOST_ - CM_ML_: ML_ - CM_MLPERF_TVM: MLPERF_TVM - CM_MLPERF_DELETE: MLPERF_DELETE + MLC_HOST_: HOST_ + MLC_ML_: ML_ + MLC_MLPERF_TVM: MLPERF_TVM + MLC_MLPERF_DELETE: MLPERF_DELETE # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* - - CM_HW_NAME - - CM_ML_MODEL_* - - CM_MAX_EXAMPLES - - CM_VLLM_* + - MLC_MLPERF_* + - MLC_DATASET_* + - MLC_HW_NAME + - MLC_ML_MODEL_* + - MLC_MAX_EXAMPLES + - MLC_VLLM_* new_state_keys: - mlperf-inference-implementation - - CM_SUT_* + - MLC_SUT_* # Dependencies on other CM scripts deps: @@ -108,9 +108,9 @@ deps: names: - cuda enable_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - tf - tflite @@ -119,7 +119,7 @@ deps: # Detect TensorRT if required - tags: get,nvidia,tensorrt enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tensorrt ######################################################################## @@ -131,10 +131,10 @@ deps: - ml-engine-onnxruntime - onnxruntime enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - tvm-onnx - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cpu - rocm @@ -143,36 +143,36 @@ deps: names: - ml-engine-onnxruntime-cuda enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - tvm-onnx - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu skip_if_env: - CM_MODEL: + MLC_MODEL: - 3d-unet-99 - 3d-unet-99.9 ## resnet50 and 3d-unet need both onnxruntime and onnxruntime_gpu on cuda - tags: get,generic-python-lib,_onnxruntime enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu - CM_MODEL: + MLC_MODEL: - 3d-unet-99 - 3d-unet-99.9 - resnet50 - tags: get,generic-python-lib,_onnxruntime_gpu env: - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: "" + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: "" enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu - CM_MODEL: + MLC_MODEL: - 3d-unet-99 - 3d-unet-99.9 - resnet50 @@ -184,14 +184,14 @@ deps: - ml-engine-pytorch - pytorch skip_if_env: - CM_MODEL: + MLC_MODEL: - dlrm-v2-99 - dlrm-v2-99.9 enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - tvm-pytorch - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cpu - rocm @@ -201,11 +201,11 @@ deps: - ml-engine-pytorch - pytorch enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - tvm-pytorch - ray - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu ## Torchvision (CPU) @@ -214,15 +214,15 @@ deps: - ml-engine-torchvision - torchvision skip_if_env: - CM_MODEL: + MLC_MODEL: - dlrm-v2-99 - dlrm-v2-99.9 - rgat enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - tvm-pytorch - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cpu ## Torchvision (CUDA) @@ -231,16 +231,16 @@ deps: - ml-engine-torchvision - torchvision skip_if_env: - CM_MODEL: + MLC_MODEL: - dlrm-v2-99 - dlrm-v2-99.9 - rgat enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - tvm-pytorch - ray - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu ## tensorrt @@ -248,7 +248,7 @@ deps: names: - ml-engine-tensorrt enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - ray ## torch_tensorrt @@ -256,7 +256,7 @@ deps: names: - ml-engine-torch_tensorrt enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - ray ## Ray @@ -264,7 +264,7 @@ deps: names: - ray enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - ray ## async_timeout (for multi-node) @@ -274,7 +274,7 @@ deps: names: - async_timeout enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - ray ## Transformers @@ -282,7 +282,7 @@ deps: names: - ml-engine-transformers enable_if_env: - CM_MODEL: + MLC_MODEL: - bert-99 - bert-99.9 - gptj-99 @@ -294,7 +294,7 @@ deps: - ml-engine-tensorflow - tensorflow enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tf ## NCNN @@ -302,29 +302,29 @@ deps: names: - ml-engine-ncnn enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - ncnn - tags: get,tensorflow,lib,_tflite names: - ml-engine-tflite enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tflite ######################################################################## # Install ML models - tags: get,ml-model,neural-magic,zoo - # sets CM_MLPERF_CUSTOM_MODEL_PATH + # sets MLC_MLPERF_CUSTOM_MODEL_PATH names: - custom-ml-model enable_if_env: - CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB: + MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB: - "on" update_tags_from_env_with_prefix: "_model-stub.": - - CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB + - MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB ## ResNet50 - tags: get,ml-model,image-classification,resnet50 @@ -332,10 +332,10 @@ deps: - ml-model - resnet50-model enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 skip_if_env: - CM_MLPERF_CUSTOM_MODEL_PATH: + MLC_MLPERF_CUSTOM_MODEL_PATH: - "on" ## RetinaNet @@ -344,7 +344,7 @@ deps: - ml-model - retinanet-model enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet ## GPT-J @@ -354,11 +354,11 @@ deps: - gptj-model - gpt-j-model enable_if_env: - CM_MODEL: + MLC_MODEL: - gptj-99 - gptj-99.9 skip_if_env: - CM_NETWORK_LOADGEN: + MLC_NETWORK_LOADGEN: - lon ## RetinaNet (PyTorch weights, FP32) @@ -367,11 +367,11 @@ deps: - ml-model - retinanet-model enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - CM_MLPERF_IMPLEMENTATION: + MLC_MLPERF_IMPLEMENTATION: - nvidia - CM_MODEL: + MLC_MODEL: - retinanet ## BERT @@ -380,11 +380,11 @@ deps: - ml-model - bert-model enable_if_env: - CM_MODEL: + MLC_MODEL: - bert-99 - bert-99.9 skip_if_env: - CM_MLPERF_CUSTOM_MODEL_PATH: + MLC_MLPERF_CUSTOM_MODEL_PATH: - "on" ## SDXL @@ -394,15 +394,15 @@ deps: - sdxl-model - ml-model-float16 enable_if_env: - CM_MODEL: + MLC_MODEL: - stable-diffusion-xl skip_if_any_env: - CM_MLPERF_CUSTOM_MODEL_PATH: + MLC_MLPERF_CUSTOM_MODEL_PATH: - "on" skip_if_env: - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - "yes" - CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: - "yes" ## LLAMA2-70B @@ -411,18 +411,18 @@ deps: - ml-model - llama2-model enable_if_env: - CM_MODEL: + MLC_MODEL: - llama2-70b-99 - llama2-70b-99.9 skip_if_any_env: - CM_MLPERF_CUSTOM_MODEL_PATH: + MLC_MLPERF_CUSTOM_MODEL_PATH: - "on" - CM_MLPERF_INFERENCE_API_SERVER: + MLC_MLPERF_INFERENCE_API_SERVER: - "on" skip_if_env: - CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: - "yes" - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - "yes" ## mixtral-8x7b @@ -431,15 +431,15 @@ deps: - ml-model - mixtral-model enable_if_env: - CM_MODEL: + MLC_MODEL: - mixtral-8x7b skip_if_any_env: - CM_MLPERF_CUSTOM_MODEL_PATH: + MLC_MLPERF_CUSTOM_MODEL_PATH: - "on" skip_if_env: - CM_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: - "yes" - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - "yes" ## 3d-unet @@ -448,7 +448,7 @@ deps: - ml-model - 3d-unet-model enable_if_env: - CM_MODEL: + MLC_MODEL: - 3d-unet-99 - 3d-unet-99.9 @@ -458,7 +458,7 @@ deps: - ml-model - rnnt-model enable_if_env: - CM_MODEL: + MLC_MODEL: - rnnt ## Dlrm @@ -467,13 +467,13 @@ deps: - ml-model - dlrm-model enable_if_env: - CM_MODEL: + MLC_MODEL: - dlrm-99 - dlrm-99.9 - dlrm-v2-99 - dlrm-v2-99.9 skip_if_env: - CM_ML_MODEL_FILE_WITH_PATH: + MLC_ML_MODEL_FILE_WITH_PATH: - "on" ## RGAT @@ -481,7 +481,7 @@ deps: names: - rgat-model enable_if_env: - CM_MODEL: + MLC_MODEL: - rgat skip_if_env: RGAT_CHECKPOINT_PATH: @@ -493,13 +493,13 @@ deps: - llama3-405b-model - llama3-402b-model enable_if_env: - CM_MODEL: + MLC_MODEL: - llama3_1-405b - llama3-405b skip_if_env: - CM_USE_MODEL_FROM_HOST: + MLC_USE_MODEL_FROM_HOST: - "yes" - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - "yes" ######################################################################## @@ -510,24 +510,24 @@ deps: names: - imagenet-preprocessed enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 skip_if_env: - CM_MLPERF_VISION_DATASET_OPTION: + MLC_MLPERF_VISION_DATASET_OPTION: - on - tags: get,dataset,image-classification,imagenet,preprocessed,_pytorch names: - imagenet-preprocessed enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 - CM_MLPERF_VISION_DATASET_OPTION: + MLC_MLPERF_VISION_DATASET_OPTION: - imagenet_pytorch - tags: get,dataset-aux,image-classification,imagenet-aux enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 ## Open Images for RetinaNet @@ -535,7 +535,7 @@ deps: names: - openimages-preprocessed enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet ## CNNDM for Large Language Model @@ -543,7 +543,7 @@ deps: names: - cnndm-original enable_if_env: - CM_MODEL: + MLC_MODEL: - gptj-99 - gptj-99.9 @@ -552,13 +552,13 @@ deps: names: - squad-original enable_if_env: - CM_MODEL: + MLC_MODEL: - bert-99 - bert-99.9 - tags: get,dataset-aux,squad-vocab enable_if_env: - CM_MODEL: + MLC_MODEL: - bert-99 - bert-99.9 @@ -568,7 +568,7 @@ deps: - coco2014-preprocessed - coco2014-dataset enable_if_env: - CM_MODEL: + MLC_MODEL: - stable-diffusion-xl ## OpenOrca for LLAMA2-70b @@ -576,7 +576,7 @@ deps: names: - openorca-preprocessed enable_if_env: - CM_MODEL: + MLC_MODEL: - llama2-70b-99 - llama2-70b-99.9 @@ -585,12 +585,12 @@ deps: names: - openorca-mbxp-gsm8k-combined-preprocessed enable_if_env: - CM_MODEL: + MLC_MODEL: - mixtral-8x7b skip_if_env: - CM_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: - "yes" - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - "yes" ## Kits19 for 3d-unet @@ -598,13 +598,13 @@ deps: names: - kits19-preprocessed enable_if_env: - CM_MODEL: + MLC_MODEL: - 3d-unet-99 - 3d-unet-99.9 skip_if_env: - CM_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST: + MLC_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST: - "yes" - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - "yes" ## Librispeech for rnnt @@ -612,7 +612,7 @@ deps: names: - librispeech-preprocessed enable_if_env: - CM_MODEL: + MLC_MODEL: - rnnt ## Criteo for dlrm @@ -620,11 +620,11 @@ deps: names: - criteo-preprocessed enable_if_env: - CM_MODEL: + MLC_MODEL: - dlrm-v2-99 - dlrm-v2-99.9 skip_if_env: - CM_CRITEO_PREPROCESSED_PATH: + MLC_CRITEO_PREPROCESSED_PATH: - on ## igbh for rgat @@ -633,12 +633,12 @@ deps: - igbh-dataset - illinois-graph-benchmark-heterogeneous enable_if_env: - CM_MODEL: + MLC_MODEL: - rgat skip_if_env: - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - "yes" - CM_USE_DATASET_FROM_HOST: + MLC_USE_DATASET_FROM_HOST: - "yes" ## llama3_1 dataset @@ -647,13 +647,13 @@ deps: - llama3_1-dataset - llama3-dataset enable_if_env: - CM_MODEL: + MLC_MODEL: - llama3_1-405b - llama3-402b skip_if_env: - CM_USE_DATASET_FROM_HOST: + MLC_USE_DATASET_FROM_HOST: - "yes" - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - "yes" ######################################################################## @@ -664,7 +664,7 @@ deps: names: - user-conf-generator skip_if_env: - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - "yes" # Install MLPerf loadgen @@ -681,7 +681,7 @@ deps: # Download MLPerf inference source - tags: get,mlcommons,inference,src env: - CM_GET_MLPERF_IMPLEMENTATION_ONLY: "yes" + MLC_GET_MLPERF_IMPLEMENTATION_ONLY: "yes" names: - mlperf-implementation @@ -692,7 +692,7 @@ prehook_deps: - remote-run-cmds tags: remote,run,cmds enable_if_env: - CM_ASSH_RUN_COMMANDS: + MLC_ASSH_RUN_COMMANDS: - "on" posthook_deps: @@ -700,7 +700,7 @@ posthook_deps: - mlperf-runner tags: benchmark-mlperf skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - "on" post_deps: @@ -717,8 +717,8 @@ variations: imagenet-accuracy-script: tags: _float32 env: - CM_MLPERF_PYTHON: "yes" - CM_MLPERF_IMPLEMENTATION: reference + MLC_MLPERF_PYTHON: "yes" + MLC_MLPERF_IMPLEMENTATION: reference # ML engine onnxruntime: @@ -735,15 +735,15 @@ variations: version_max: "1.26.4" version_max_usable: "1.26.4" env: - CM_MLPERF_BACKEND: onnxruntime + MLC_MLPERF_BACKEND: onnxruntime onnxruntime,cpu: env: - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND_VERSION: <<>> onnxruntime,cuda: env: - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND_VERSION: <<>> ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "CUDAExecutionProvider" pytorch: @@ -759,8 +759,8 @@ variations: version_max: "1.26.4" version_max_usable: "1.26.4" env: - CM_MLPERF_BACKEND: pytorch - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: pytorch + MLC_MLPERF_BACKEND_VERSION: <<>> pytorch,rocm: add_deps_recursive: @@ -779,15 +779,15 @@ variations: ml-model: tags: raw,_pytorch env: - CM_MLPERF_BACKEND: ray - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: ray + MLC_MLPERF_BACKEND_VERSION: <<>> tf,rocm: add_deps_recursive: tensorflow: tags: _rocm env: - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND_VERSION: <<>> onnxruntime,rocm: add_deps_recursive: @@ -795,7 +795,7 @@ variations: tags: _rocm env: ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "ROCMExecutionProvider" - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND_VERSION: <<>> ncnn: group: framework @@ -805,9 +805,9 @@ variations: ml-model: tags: raw,_ncnn env: - CM_MLPERF_BACKEND: ncnn - CM_MLPERF_BACKEND_VERSION: <<>> - CM_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch + MLC_MLPERF_BACKEND: ncnn + MLC_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch tflite: group: framework @@ -817,9 +817,9 @@ variations: ml-model: tags: raw,_tflite,_no-argmax env: - CM_MLPERF_BACKEND: tflite - CM_MLPERF_BACKEND_VERSION: <<>> - CM_MLPERF_VISION_DATASET_OPTION: imagenet_tflite_tpu + MLC_MLPERF_BACKEND: tflite + MLC_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_VISION_DATASET_OPTION: imagenet_tflite_tpu tf: group: framework @@ -829,8 +829,8 @@ variations: ml-model: tags: raw,_tf env: - CM_MLPERF_BACKEND: tf - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: tf + MLC_MLPERF_BACKEND_VERSION: <<>> tensorflow: alias: tf @@ -838,16 +838,16 @@ variations: deepsparse: group: framework env: - CM_MLPERF_BACKEND: deepsparse - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: deepsparse + MLC_MLPERF_BACKEND_VERSION: <<>> deps: - tags: get,generic-python-lib,_deepsparse skip_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - aarch64 - tags: get,generic-python-lib,_package.deepsparse-nightly enable_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - aarch64 add_deps_recursive: mlperf-implementation: @@ -858,8 +858,8 @@ variations: tvm-onnx: group: framework env: - CM_MLPERF_BACKEND: tvm-onnx - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: tvm-onnx + MLC_MLPERF_BACKEND_VERSION: <<>> deps: - tags: get,generic-python-lib,_onnx - tags: get,generic-python-lib,_numpy @@ -873,13 +873,13 @@ variations: - tvm-model update_tags_from_env_with_prefix: _model.: - - CM_MODEL + - MLC_MODEL tvm-tflite: group: framework env: - CM_MLPERF_BACKEND: tvm-tflite - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: tvm-tflite + MLC_MLPERF_BACKEND_VERSION: <<>> deps: - tags: get,generic-python-lib,_tflite - tags: get,tvm @@ -890,14 +890,14 @@ variations: - tvm-model update_tags_from_env_with_prefix: _model.: - - CM_MODEL + - MLC_MODEL tvm-pytorch: group: framework env: - CM_MLPERF_BACKEND: tvm-pytorch - CM_MLPERF_BACKEND_VERSION: <<>> - CM_PREPROCESS_PYTORCH: "yes" + MLC_MLPERF_BACKEND: tvm-pytorch + MLC_MLPERF_BACKEND_VERSION: <<>> + MLC_PREPROCESS_PYTORCH: "yes" MLPERF_TVM_TORCH_QUANTIZED_ENGINE: qnnpack deps: - tags: get,generic-python-lib,_torch @@ -912,7 +912,7 @@ variations: - tvm-model update_tags_from_env_with_prefix: _model.: - - CM_MODEL + - MLC_MODEL # Reference MLPerf models gptj-99.9: @@ -920,14 +920,14 @@ variations: base: - gptj_ env: - CM_MODEL: gptj-99.9 + MLC_MODEL: gptj-99.9 gptj-99: group: models base: - gptj_ env: - CM_MODEL: gptj-99 + MLC_MODEL: gptj-99 gptj_: deps: @@ -940,18 +940,18 @@ variations: base: - bert env: - CM_MODEL: bert-99.9 + MLC_MODEL: bert-99.9 bert-99: group: models base: - bert env: - CM_MODEL: bert-99 + MLC_MODEL: bert-99 bert: env: - CM_MLPERF_MODEL_SKIP_BATCHING: true + MLC_MLPERF_MODEL_SKIP_BATCHING: true deps: - tags: get,generic-python-lib,_package.pydantic - tags: get,generic-python-lib,_tokenization @@ -962,19 +962,19 @@ variations: - protobuf version_max: "3.19" enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tf - tflite - tags: get,generic-python-lib,_boto3 enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - tags: get,generic-python-lib,_torch names: - ml-engine-pytorch - pytorch skip_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu add_deps_recursive: inference-src: @@ -983,8 +983,8 @@ variations: sdxl: group: models env: - CM_MODEL: stable-diffusion-xl - CM_NUM_THREADS: "1" + MLC_MODEL: stable-diffusion-xl + MLC_NUM_THREADS: "1" deps: - tags: get,generic-python-lib,_package.diffusers names: @@ -1019,8 +1019,8 @@ variations: llama2-70b_: env: - CM_MLPERF_MODEL_SKIP_BATCHING: false - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51" + MLC_MLPERF_MODEL_SKIP_BATCHING: false + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51" deps: - tags: get,generic-python-lib,_package.transformers names: @@ -1064,25 +1064,25 @@ variations: llama2-70b-99: group: models env: - CM_MODEL: llama2-70b-99 + MLC_MODEL: llama2-70b-99 base: - llama2-70b_ llama2-70b_,cuda: default_env: - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 8 + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 8 llama2-70b-99.9: group: models env: - CM_MODEL: llama2-70b-99.9 + MLC_MODEL: llama2-70b-99.9 base: - llama2-70b_ mixtral-8x7b: group: models env: - CM_MODEL: mixtral-8x7b + MLC_MODEL: mixtral-8x7b deps: - tags: get,rust-compiler names: @@ -1123,26 +1123,26 @@ variations: mixtral-8x7b,cuda: default_env: - CM_MLPERF_LOADGEN_BATCH_SIZE: 1 + MLC_MLPERF_LOADGEN_BATCH_SIZE: 1 3d-unet-99.9: group: models base: - 3d-unet env: - CM_MODEL: 3d-unet-99.9 + MLC_MODEL: 3d-unet-99.9 3d-unet-99: group: models base: - 3d-unet env: - CM_MODEL: 3d-unet-99 + MLC_MODEL: 3d-unet-99 3d-unet: env: - CM_TMP_IGNORE_MLPERF_QUERY_COUNT: true - CM_MLPERF_MODEL_SKIP_BATCHING: true + MLC_TMP_IGNORE_MLPERF_QUERY_COUNT: true + MLC_MLPERF_MODEL_SKIP_BATCHING: true deps: - tags: get,generic-python-lib,_package.nibabel - tags: get,generic-python-lib,_package.scipy @@ -1155,19 +1155,19 @@ variations: base: - dlrm-v2_ env: - CM_MODEL: dlrm-v2-99.9 + MLC_MODEL: dlrm-v2-99.9 dlrm-v2-99: group: models base: - dlrm-v2_ env: - CM_MODEL: dlrm-v2-99 + MLC_MODEL: dlrm-v2-99 dlrm-v2_: env: - CM_MLPERF_MODEL_SKIP_BATCHING: true - CM_ML_MODEL_DATASET_TYPE: multihot-criteo + MLC_MLPERF_MODEL_SKIP_BATCHING: true + MLC_ML_MODEL_DATASET_TYPE: multihot-criteo dlrm-v2_,pytorch: deps: @@ -1202,9 +1202,9 @@ variations: rnnt: group: models env: - CM_MODEL: rnnt - CM_MLPERF_MODEL_SKIP_BATCHING: true - CM_TMP_IGNORE_MLPERF_QUERY_COUNT: true + MLC_MODEL: rnnt + MLC_MLPERF_MODEL_SKIP_BATCHING: true + MLC_TMP_IGNORE_MLPERF_QUERY_COUNT: true deps: - tags: get,generic-python-lib,_package.pydantic version_max: "1.10.9" @@ -1225,16 +1225,16 @@ variations: - tags: get,generic-python-lib,_pycocotools env: - CM_MODEL: retinanet - CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: "yes" - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: "1" + MLC_MODEL: retinanet + MLC_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: "yes" + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: "1" resnet50: group: models default: true env: - CM_MODEL: resnet50 - CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: "yes" + MLC_MODEL: resnet50 + MLC_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: "yes" deps: - tags: get,generic-python-lib,_opencv-python - tags: get,generic-python-lib,_numpy @@ -1248,14 +1248,14 @@ variations: version_max: "4.23.4" version_max_usable: "4.23.4" enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tf - tflite rgat: group: models env: - CM_MODEL: rgat + MLC_MODEL: rgat add_deps_recursive: pytorch: version_max: "2.4.0" @@ -1275,38 +1275,38 @@ variations: - tags: get,generic-python-lib,_package.torch-geometric update_tags_from_env_with_prefix: _find_links_url.: - - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL + - MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL - tags: get,generic-python-lib,_package.torch-scatter update_tags_from_env_with_prefix: _find_links_url.: - - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL + - MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL - tags: get,generic-python-lib,_package.torch-sparse update_tags_from_env_with_prefix: _find_links_url.: - - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL + - MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL - tags: get,generic-python-lib,_package.dgl update_tags_from_env_with_prefix: _find_links_url.: - - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL + - MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL rgat,cuda: env: - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>.html" - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL: "https://data.dgl.ai/wheels/torch-<<>>/cu121/repo.html" + MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>.html" + MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL: "https://data.dgl.ai/wheels/torch-<<>>/cu121/repo.html" rgat,cpu: env: - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html" - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL: "https://data.dgl.ai/wheels/torch-<<>>/repo.html" + MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html" + MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL: "https://data.dgl.ai/wheels/torch-<<>>/repo.html" llama3_1-405b: group: models env: - CM_MODEL: llama3_1-405b + MLC_MODEL: llama3_1-405b adr: pytorch: version_max: 2.5.1 - CM_MODEL: llama3-402b + MLC_MODEL: llama3-402b deps: - tags: get,generic-python-lib,_package.torchvision - tags: get,generic-python-lib,_package.torchaudio @@ -1316,25 +1316,25 @@ variations: - tags: get,generic-python-lib,_package.accelerate - tags: get,generic-python-lib,_package.vllm env: - CM_GENERIC_PYTHON_PIP_EXTRA: "--upgrade" + MLC_GENERIC_PYTHON_PIP_EXTRA: "--upgrade" - tags: get,generic-python-lib,_package.pybind11 - tags: get,generic-python-lib,_package.pandas version_max: 2.2.1 llama3_1-405b,cuda: env: - CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>.html" + MLC_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>.html" llama3_1-405b,cpu: env: - CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html" + MLC_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html" # Target devices cpu: group: device default: true env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu CUDA_VISIBLE_DEVICES: "" USE_CUDA: no USE_GPU: no @@ -1342,20 +1342,20 @@ variations: cuda: group: device env: - CM_MLPERF_DEVICE: gpu + MLC_MLPERF_DEVICE: gpu USE_CUDA: yes USE_GPU: yes rocm: group: device env: - CM_MLPERF_DEVICE: rocm + MLC_MLPERF_DEVICE: rocm USE_GPU: yes tpu: group: device env: - CM_MLPERF_DEVICE: tpu + MLC_MLPERF_DEVICE: tpu tpu,tflite: add_deps_recursive: @@ -1365,16 +1365,16 @@ variations: # Loadgen scenarios offline: env: - CM_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_SCENARIO: Offline multistream: env: - CM_MLPERF_LOADGEN_SCENARIO: MultiStream + MLC_MLPERF_LOADGEN_SCENARIO: MultiStream singlestream: env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream server: env: - CM_MLPERF_LOADGEN_SCENARIO: Server + MLC_MLPERF_LOADGEN_SCENARIO: Server # Model precision fp32: @@ -1384,8 +1384,8 @@ variations: ml-model: tags: _fp32 env: - CM_MLPERF_QUANTIZATION: off - CM_MLPERF_MODEL_PRECISION: float32 + MLC_MLPERF_QUANTIZATION: off + MLC_MLPERF_MODEL_PRECISION: float32 # Model precision float16: @@ -1394,8 +1394,8 @@ variations: ml-model-float16: tags: _fp16 env: - CM_MLPERF_QUANTIZATION: off - CM_MLPERF_MODEL_PRECISION: float16 + MLC_MLPERF_QUANTIZATION: off + MLC_MLPERF_MODEL_PRECISION: float16 # Model precision bfloat16: @@ -1404,14 +1404,14 @@ variations: ml-model-float16: tags: _fp16 env: - CM_MLPERF_QUANTIZATION: off - CM_MLPERF_MODEL_PRECISION: bfloat16 + MLC_MLPERF_QUANTIZATION: off + MLC_MLPERF_MODEL_PRECISION: bfloat16 int8: group: precision env: - CM_MLPERF_QUANTIZATION: on - CM_MLPERF_MODEL_PRECISION: int8 + MLC_MLPERF_QUANTIZATION: on + MLC_MLPERF_MODEL_PRECISION: int8 add_deps_recursive: ml-model: tags: _int8 @@ -1422,7 +1422,7 @@ variations: batch_size.#: group: batch-size env: - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" add_deps_recursive: ml-model: tags: _batch_size.# @@ -1436,14 +1436,14 @@ variations: names: - flask env: - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_sut - CM_NETWORK_LOADGEN: sut + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_sut + MLC_NETWORK_LOADGEN: sut network-lon: group: network env: - CM_NETWORK_LOADGEN: lon - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_loadgen + MLC_NETWORK_LOADGEN: lon + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_loadgen beam_size.#: env: @@ -1459,6 +1459,6 @@ variations: loadgen: version: r2.1 env: - CM_RERUN: "yes" - CM_SKIP_SYS_UTILS: "yes" - CM_TEST_QUERY_COUNT: "100" + MLC_RERUN: "yes" + MLC_SKIP_SYS_UTILS: "yes" + MLC_TEST_QUERY_COUNT: "100" diff --git a/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py b/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py index 090d1b072..dd2a3e016 100644 --- a/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py +++ b/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py @@ -55,14 +55,14 @@ G_OPENIMAGE_CALMAP_PATH = "data_maps/open-images-v6-mlperf/cal_map.txt" G_OPENIMAGE_VALSET_PATH = os.path.join( os.environ.get( - "CM_DATASET_PATH", + "MLC_DATASET_PATH", "build/data/open-images-v6-mlperf"), "validation", "data") G_OPENIMAGE_VALMAP_PATH = "data_maps/open-images-v6-mlperf/val_map.txt" G_OPENIMAGE_ANNO_PATH = os.path.join( os.environ.get( - "CM_DATASET_PATH", + "MLC_DATASET_PATH", "build/data/open-images-v6-mlperf"), "annotations", "openimages-mlperf.json") diff --git a/script/app-mlperf-inference-nvidia/README-about.md b/script/app-mlperf-inference-nvidia/README-about.md deleted file mode 100644 index b78d64b62..000000000 --- a/script/app-mlperf-inference-nvidia/README-about.md +++ /dev/null @@ -1,137 +0,0 @@ -This script is a CM wrapper to the official [Nvidia submission code](https://github.com/mlcommons/inference_results_v3.0/tree/master/closed/NVIDIA) used for MLPerf inference submissions. - - - -## Download the needed files - -* Please ask privately in [this discord channel](https://discord.gg/y7hupJsUNb) if you would like to get access to an Amazon S3 bucket containing all the needed files for easiness. Otherwise, you can download them from the below links. - -For x86 machines, please download the latest install tar files from the below sites -1. [cuDNN](https://developer.nvidia.com/cudnn) (for cuda 11) -2. [TensorRT](https://developer.nvidia.com/tensorrt) -3. Imagenet validation set (unfortunately not available via public URL) following the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) - -
- - - -## Using Docker (Recommended on x86 systems) - - -Assuming all the downloaded files are to the user home directory please do the following steps: - -1. Download CUDA 11.8 - ``` - wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run - ``` -2. [Install docker](https://docs.docker.com/engine/install/) and [Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) - -3. Give docker permission to the current user - ``` - sudo usermod -aG docker $USER - ``` - Logout and login - Restart docker if required and confirm that Nvidia container toolkit is working by - ``` - nvidia-ctk --version - ``` -4. Check if Nvidia driver is working properly on the host. - ``` - nvidia-smi - ``` - If the above command produces any error you'll need to install Nvidia drivers on the host. You can do this via CM if you have sudo access - ``` - cmr "install cuda prebuilt _driver" --version=11.8.0 - ``` -5. Build the docker container and mount the paths from the host machine. - ** You may want to change the `scratch_path` location as it can take 100s of GBs.** - ```bash - cm docker script --tags=build,nvidia,inference,server \ - --cuda_run_file_path=$HOME/cuda_11.8.0_520.61.05_linux.run \ - --tensorrt_tar_file_path=$HOME/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ - --cudnn_tar_file_path=$HOME/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ - --imagenet_path=$HOME/imagenet-2012-val \ - --scratch_path=$HOME/mlperf_scratch \ - --docker_cm_repo=mlcommons@cm4mlops \ - --results_dir=$HOME/results_dir \ - --submission_dir=$HOME/submission_dir \ - --adr.compiler.tags=gcc - ``` - * Use `--docker_cache=no` to turn off docker caching - * Use `--docker_run_cmd_prefix="cm pull repo mlcommons@cm4mlops --checkout=dev"` to update the CK repository when docker caching is used - * Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). - -6. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files - ### Example output - ``` - ============================================ - => A system ID is a string containing only letters, numbers, and underscores - => that is used as the human-readable name of the system. It is also used as - => the system name when creating the measurements/ and results/ entries. - => This string should also start with a letter to be a valid Python enum member name. - => Specify the system ID to use for the current system: phoenix - => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix - => This script will generate Benchmark Configuration stubs for the detected system. - Continue? [y/n]: y - ``` - Now you'll be inside the CM Nvidia docker container and can run further scripts. - -7. Once the build is complete, you can proceed with any further CM scripts like for MLPerf inference. You can also save the container at this stage using [docker commit](https://docs.docker.com/engine/reference/commandline/commit/) so that it can be launched later without having to go through the previous steps. - -
- -
- - - -## Without Docker - - -1. Install CUDA - If CUDA is not detected, CM should download and install it automatically when you run the workflow. - ** Nvidia drivers are expected to be installed on the system ** - -2. Install cuDNN - ```bash - cmr "get cudnn" --tar_file= - ``` -3. Install TensorRT - ```bash - cmr "get tensorrt _dev" --tar_file= - ``` - On non x86 systems like Nvidia Orin, you can do a package manager install and then CM should pick up the installation automatically during the workflow run. - -4. Build the Nvidia inference server - ``` - cmr "build nvidia inference server" \ - --adr.install-cuda-prebuilt.local_run_file_path=/data/cuda_11.8.0_520.61.05_linux.run \ - --adr.tensorrt.tar_file=/data/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ - --adr.cudnn.tar_file=/data/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ - --adr.compiler.tags=gcc \ - [--custom_system=no] - ``` - Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). - -5. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files - - ### Example output - ``` - ============================================ - => A system ID is a string containing only letters, numbers, and underscores - => that is used as the human-readable name of the system. It is also used as - => the system name when creating the measurements/ and results/ entries. - => This string should also start with a letter to be a valid Python enum member name. - => Specify the system ID to use for the current system: phoenix - => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix - => This script will generate Benchmark Configuration stubs for the detected system. - Continue? [y/n]: y - ``` -
- - -## Acknowledgments - -* A common CM interface and automation for MLPerf inference benchmarks was developed by Arjun Suresh and Grigori Fursin - sponsored by the [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org). -* Nvidia's MLPerf inference implementation was developed by Zhihan Jiang, Ethan Cheng, Yiheng Zhang and Jinho Suh. - diff --git a/script/app-mlperf-inference-nvidia/customize.py b/script/app-mlperf-inference-nvidia/customize.py index 851f9fa84..36b3a575e 100644 --- a/script/app-mlperf-inference-nvidia/customize.py +++ b/script/app-mlperf-inference-nvidia/customize.py @@ -11,40 +11,40 @@ def preprocess(i): return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - if str(env.get('CM_RUN_STATE_DOCKER', '')).lower() in ['1', 'true', 'yes']: + if str(env.get('MLC_RUN_STATE_DOCKER', '')).lower() in ['1', 'true', 'yes']: return {'return': 0} - if env.get('CM_MODEL', '') == '': + if env.get('MLC_MODEL', '') == '': return { 'return': 1, 'error': 'Please select a variation specifying the model to run'} make_command = env['MLPERF_NVIDIA_RUN_COMMAND'] - if env.get('CM_MLPERF_DEVICE', '') == '': + if env.get('MLC_MLPERF_DEVICE', '') == '': return { 'return': 1, 'error': 'Please select a variation specifying the device to run on'} - if env.get('CM_MLPERF_SKIP_RUN', + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes" and make_command == "run_harness": return {'return': 0} - env['MLPERF_SCRATCH_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] + env['MLPERF_SCRATCH_PATH'] = env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'] cmds = [] - scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] - mode = env['CM_MLPERF_LOADGEN_MODE'] + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] + mode = env['MLC_MLPERF_LOADGEN_MODE'] make_command = env['MLPERF_NVIDIA_RUN_COMMAND'] if make_command == "prebuild": cmds.append(f"""make prebuild NETWORK_NODE=SUT""") - if env['CM_MODEL'] == "resnet50": + if env['MLC_MODEL'] == "resnet50": target_data_path = os.path.join( env['MLPERF_SCRATCH_PATH'], 'data', 'imagenet') if not os.path.exists(target_data_path): cmds.append( - f"""ln -sf {env['CM_DATASET_IMAGENET_PATH']} {target_data_path}""") + f"""ln -sf {env['MLC_DATASET_IMAGENET_PATH']} {target_data_path}""") model_path = os.path.join( env['MLPERF_SCRATCH_PATH'], @@ -57,10 +57,10 @@ def preprocess(i): if not os.path.exists(model_path): cmds.append( - f"""ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}""") + f"""ln -sf {env['MLC_ML_MODEL_FILE_WITH_PATH']} {model_path}""") model_name = "resnet50" - elif "bert" in env['CM_MODEL']: + elif "bert" in env['MLC_MODEL']: target_data_path = os.path.join( env['MLPERF_SCRATCH_PATH'], 'data', 'squad') if not os.path.exists(target_data_path): @@ -87,34 +87,34 @@ def preprocess(i): if not os.path.exists(fp32_model_path): cmds.append( - f"""cp -r --remove-destination {env['CM_ML_MODEL_BERT_LARGE_FP32_PATH']} {fp32_model_path}""") + f"""cp -r --remove-destination {env['MLC_ML_MODEL_BERT_LARGE_FP32_PATH']} {fp32_model_path}""") if not os.path.exists(int8_model_path): cmds.append( - f"""cp -r --remove-destination {env['CM_ML_MODEL_BERT_LARGE_INT8_PATH']} {int8_model_path}""") + f"""cp -r --remove-destination {env['MLC_ML_MODEL_BERT_LARGE_INT8_PATH']} {int8_model_path}""") if not os.path.exists(vocab_path): cmds.append( - f"""cp -r --remove-destination {env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH']} {vocab_path}""") + f"""cp -r --remove-destination {env['MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH']} {vocab_path}""") model_name = "bert" model_path = fp32_model_path - elif "stable-diffusion" in env["CM_MODEL"]: + elif "stable-diffusion" in env["MLC_MODEL"]: target_data_path = os.path.join( env['MLPERF_SCRATCH_PATH'], 'data', 'coco', 'SDXL') tsv_file = os.path.join(target_data_path, "captions_5k_final.tsv") if os.path.exists(tsv_file): with open(tsv_file, "r") as file: line_count = sum(1 for line in file) - if env.get('CM_MLPERF_SUBMISSION_GENERATION_STYLE', '') == 'full': + if env.get('MLC_MLPERF_SUBMISSION_GENERATION_STYLE', '') == 'full': if line_count < 5000: shutil.rmtree(target_data_path) if not os.path.exists(tsv_file): os.makedirs(target_data_path, exist_ok=True) # cmds.append("make download_data BENCHMARKS='stable-diffusion-xl'") - env['CM_REQUIRE_COCO2014_DOWNLOAD'] = 'yes' + env['MLC_REQUIRE_COCO2014_DOWNLOAD'] = 'yes' cmds.append( - f"""cp -r \\$CM_DATASET_PATH_ROOT/captions/captions.tsv {target_data_path}/captions_5k_final.tsv""") + f"""cp -r \\$MLC_DATASET_PATH_ROOT/captions/captions.tsv {target_data_path}/captions_5k_final.tsv""") cmds.append( - f"""cp -r \\$CM_DATASET_PATH_ROOT/latents/latents.pt {target_data_path}/latents.pt""") + f"""cp -r \\$MLC_DATASET_PATH_ROOT/latents/latents.pt {target_data_path}/latents.pt""") fp16_model_path = os.path.join( env['MLPERF_SCRATCH_PATH'], 'models', @@ -129,13 +129,13 @@ def preprocess(i): if not os.path.exists(fp16_model_path): if os.path.islink(fp16_model_path): cmds.append(f"rm -f {fp16_model_path}") - env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' + env['MLC_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' cmds.append(f"cp -r \\$SDXL_CHECKPOINT_PATH {fp16_model_path}") model_name = "stable-diffusion-xl" model_path = fp16_model_path - elif "3d-unet" in env['CM_MODEL']: + elif "3d-unet" in env['MLC_MODEL']: target_data_path = os.path.join( env['MLPERF_SCRATCH_PATH'], 'data', @@ -153,7 +153,7 @@ def preprocess(i): if not os.path.exists(target_data_path) or not os.path.exists( inference_cases_json_path) or not os.path.exists(calibration_cases_json_path): - # cmds.append(f"ln -sf {env['CM_DATASET_PATH']} {target_data_path}") + # cmds.append(f"ln -sf {env['MLC_DATASET_PATH']} {target_data_path}") cmds.append("make download_data BENCHMARKS='3d-unet'") model_path = os.path.join( @@ -163,7 +163,7 @@ def preprocess(i): '3dUNetKiTS19.onnx') model_name = "3d-unet" - elif "rnnt" in env['CM_MODEL']: + elif "rnnt" in env['MLC_MODEL']: target_data_path = os.path.join( env['MLPERF_SCRATCH_PATH'], 'data', @@ -173,7 +173,7 @@ def preprocess(i): if not os.path.exists(target_data_path_base_dir): cmds.append(f"mkdir -p {target_data_path_base_dir}") if not os.path.exists(target_data_path): - # cmds.append(f"ln -sf {env['CM_DATASET_LIBRISPEECH_PATH']} {target_data_path}") + # cmds.append(f"ln -sf {env['MLC_DATASET_LIBRISPEECH_PATH']} {target_data_path}") cmds.append("make download_data BENCHMARKS='rnnt'") model_path = os.path.join( @@ -183,12 +183,12 @@ def preprocess(i): 'DistributedDataParallel_1576581068.9962234-epoch-100.pt') model_name = "rnnt" - elif "pdlrm" in env['CM_MODEL']: + elif "pdlrm" in env['MLC_MODEL']: target_data_path = os.path.join( env['MLPERF_SCRATCH_PATH'], 'data', 'criteo') if not os.path.exists(target_data_path): cmds.append( - f"ln -sf {env['CM_DATASET_PREPROCESSED_PATH']} {target_data_path}") + f"ln -sf {env['MLC_DATASET_PREPROCESSED_PATH']} {target_data_path}") model_path = os.path.join( env['MLPERF_SCRATCH_PATH'], @@ -200,18 +200,18 @@ def preprocess(i): if not os.path.exists(model_path): cmds.append( - f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}") + f"ln -sf {env['MLC_ML_MODEL_FILE_WITH_PATH']} {model_path}") model_name = "dlrm" - elif "dlrm-v2" in env['CM_MODEL']: + elif "dlrm-v2" in env['MLC_MODEL']: model_name = "dlrm-v2" - elif env['CM_MODEL'] == "retinanet": + elif env['MLC_MODEL'] == "retinanet": # print(env) - dataset_path = env['CM_DATASET_OPENIMAGES_PATH'] + dataset_path = env['MLC_DATASET_OPENIMAGES_PATH'] # return {'return': 1, 'error': 'error'} - annotations_path = env['CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH'] + annotations_path = env['MLC_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH'] target_data_path_dir = os.path.join( env['MLPERF_SCRATCH_PATH'], 'data', 'open-images-v6-mlperf') if not os.path.exists(target_data_path_dir): @@ -231,7 +231,7 @@ def preprocess(i): if not os.path.exists(target_data_path): cmds.append(f"ln -sf {dataset_path} {target_data_path}") - calibration_dataset_path = env['CM_OPENIMAGES_CALIBRATION_DATASET_PATH'] + calibration_dataset_path = env['MLC_OPENIMAGES_CALIBRATION_DATASET_PATH'] target_data_path_dir = os.path.join( env['MLPERF_SCRATCH_PATH'], 'data', @@ -264,7 +264,7 @@ def preprocess(i): model_name = "retinanet" - elif "gptj" in env['CM_MODEL']: + elif "gptj" in env['MLC_MODEL']: target_data_path = os.path.join( env['MLPERF_SCRATCH_PATH'], 'data', @@ -283,7 +283,7 @@ def preprocess(i): 'models', 'GPTJ-6B', 'fp8-quantized-ammo', - env['CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX']) + env['MLC_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX']) vocab_path = os.path.join( env['MLPERF_SCRATCH_PATH'], 'models', @@ -297,15 +297,15 @@ def preprocess(i): if not os.path.exists(fp32_model_path): # download via prehook_deps - env['CM_REQUIRE_GPTJ_MODEL_DOWNLOAD'] = 'yes' + env['MLC_REQUIRE_GPTJ_MODEL_DOWNLOAD'] = 'yes' if make_command == "build_engine": cmds.append( - f"cp -r $CM_ML_MODEL_FILE_WITH_PATH {fp32_model_path}") + f"cp -r $MLC_ML_MODEL_FILE_WITH_PATH {fp32_model_path}") model_name = "gptj" model_path = fp8_model_path - elif "llama2" in env["CM_MODEL"]: + elif "llama2" in env["MLC_MODEL"]: # path to which the data file is present target_data_path = os.path.join( env['MLPERF_SCRATCH_PATH'], @@ -317,7 +317,7 @@ def preprocess(i): 'preprocessed_data', 'open_orca', 'open_orca_gpt4_tokenized_llama.sampled_24576.pkl') - tmp_tp_size = env['CM_NVIDIA_TP_SIZE'] + tmp_tp_size = env['MLC_NVIDIA_TP_SIZE'] if tmp_tp_size == "1": fp8_model_path = os.path.join( env['MLPERF_SCRATCH_PATH'], @@ -333,13 +333,13 @@ def preprocess(i): 'fp8-quantized-ammo', f'llama2-70b-chat-hf-tp{tmp_tp_size}pp1-fp8') if not os.path.exists(target_data_file_path): - if env.get('CM_NVIDIA_LLAMA_DATASET_FILE_PATH', '') == '': + if env.get('MLC_NVIDIA_LLAMA_DATASET_FILE_PATH', '') == '': return { 'return': 1, 'error': 'Please specify the path to LLAMA2 dataset (pickle file)'} if not os.path.exists(target_data_path): cmds.append(f"mkdir {target_data_path}") cmds.append( - f"ln -sf {env['CM_NVIDIA_LLAMA_DATASET_FILE_PATH']} {target_data_file_path}") + f"ln -sf {env['MLC_NVIDIA_LLAMA_DATASET_FILE_PATH']} {target_data_file_path}") model_name = "llama2-70b" model_path = fp8_model_path @@ -347,13 +347,13 @@ def preprocess(i): # cmds.append(f"make prebuild") if make_command == "download_model": if not os.path.exists(model_path): - if "llama2" in env['CM_MODEL']: + if "llama2" in env['MLC_MODEL']: if not os.path.exists(os.path.join(model_path, 'config.json')): return { 'return': 1, 'error': f'Quantised model absent - did not detect config.json in path {model_path}'} else: cmds.append(f"make download_model BENCHMARKS='{model_name}'") - elif "stable-diffusion" in env['CM_MODEL']: + elif "stable-diffusion" in env['MLC_MODEL']: folders = ["clip1", "clip2", "unetxl", "vae"] for folder in folders: onnx_model_path = os.path.join( @@ -364,7 +364,7 @@ def preprocess(i): folder, 'model.onnx') if not os.path.exists(onnx_model_path): - env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' + env['MLC_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' cmds.append( f"make download_model BENCHMARKS='{model_name}'") break @@ -377,19 +377,19 @@ def preprocess(i): 'unetxl.int8', 'unet.onnx') if not os.path.exists(ammo_model_path): - env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' + env['MLC_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' cmds.append( f"make download_model BENCHMARKS='{model_name}'") else: return {'return': 0} elif make_command == "preprocess_data": - if env['CM_MODEL'] == "rnnt": + if env['MLC_MODEL'] == "rnnt": cmds.append( f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_dev_clean_500_raw')}") cmds.append( f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_train_clean_512_wav')}") - if "llama2" in env["CM_MODEL"]: + if "llama2" in env["MLC_MODEL"]: # Preprocessing script in the inference results repo is not checking whether the preprocessed # file is already there, so we are handling it here. target_preprocessed_data_path = os.path.join( @@ -405,27 +405,27 @@ def preprocess(i): else: scenario = scenario.lower() - if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy": + if env['MLC_MLPERF_LOADGEN_MODE'] == "accuracy": test_mode = "AccuracyOnly" - elif env['CM_MLPERF_LOADGEN_MODE'] == "performance": + elif env['MLC_MLPERF_LOADGEN_MODE'] == "performance": test_mode = "PerformanceOnly" - elif env['CM_MLPERF_LOADGEN_MODE'] == "compliance": + elif env['MLC_MLPERF_LOADGEN_MODE'] == "compliance": test_mode = "" test_name = env.get( - 'CM_MLPERF_LOADGEN_COMPLIANCE_TEST', + 'MLC_MLPERF_LOADGEN_COMPLIANCE_TEST', 'test01').lower() - env['CM_MLPERF_NVIDIA_RUN_COMMAND'] = "run_audit_{}_once".format( + env['MLC_MLPERF_NVIDIA_RUN_COMMAND'] = "run_audit_{}_once".format( test_name) make_command = "run_audit_{}_once".format(test_name) else: return {'return': 1, 'error': 'Unsupported mode: {}'.format( - env['CM_MLPERF_LOADGEN_MODE'])} + env['MLC_MLPERF_LOADGEN_MODE'])} run_config = '' - target_qps = env.get('CM_MLPERF_LOADGEN_TARGET_QPS') - offline_target_qps = env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') - server_target_qps = env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS') + target_qps = env.get('MLC_MLPERF_LOADGEN_TARGET_QPS') + offline_target_qps = env.get('MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') + server_target_qps = env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS') if target_qps: target_qps = int(float(target_qps)) if scenario == "offline" and not offline_target_qps: @@ -440,11 +440,11 @@ def preprocess(i): server_target_qps = int(float(server_target_qps)) run_config += f" --server_target_qps={server_target_qps}" - target_latency = env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY') + target_latency = env.get('MLC_MLPERF_LOADGEN_TARGET_LATENCY') singlestream_target_latency = env.get( - 'CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY') + 'MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY') multistream_target_latency = env.get( - 'CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY') + 'MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY') if target_latency: target_latency_ns = int(float(target_latency) * 1000000) if scenario == "singlestream" and not singlestream_target_latency: @@ -461,23 +461,23 @@ def preprocess(i): float(multistream_target_latency) * 1000000) run_config += f" --multi_stream_expected_latency_ns={multistream_target_latency_ns}" - high_accuracy = "99.9" in env['CM_MODEL'] + high_accuracy = "99.9" in env['MLC_MODEL'] config_ver_list = [] - use_lon = env.get('CM_MLPERF_NVIDIA_HARNESS_LON') + use_lon = env.get('MLC_MLPERF_NVIDIA_HARNESS_LON') if use_lon: config_ver_list.append("lon_node") # run_config += " --lon_node" - maxq = env.get('CM_MLPERF_NVIDIA_HARNESS_MAXQ') + maxq = env.get('MLC_MLPERF_NVIDIA_HARNESS_MAXQ') if maxq: config_ver_list.append("maxq") if high_accuracy: config_ver_list.append("high_accuracy") - use_triton = env.get('CM_MLPERF_NVIDIA_HARNESS_USE_TRITON') + use_triton = env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_TRITON') if use_triton: run_config += " --use_triton " config_ver_list.append("triton") @@ -485,114 +485,114 @@ def preprocess(i): if config_ver_list: run_config += f" --config_ver={'_'.join(config_ver_list)}" - user_conf_path = env.get('CM_MLPERF_USER_CONF') - if user_conf_path and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": + user_conf_path = env.get('MLC_MLPERF_USER_CONF') + if user_conf_path and env['MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": run_config += f" --user_conf_path={user_conf_path}" - mlperf_conf_path = env.get('CM_MLPERF_INFERENCE_CONF_PATH') - if mlperf_conf_path and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": + mlperf_conf_path = env.get('MLC_MLPERF_INFERENCE_CONF_PATH') + if mlperf_conf_path and env['MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": run_config += f" --mlperf_conf_path={mlperf_conf_path}" - power_setting = env.get('CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING') - if power_setting and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": + power_setting = env.get('MLC_MLPERF_NVIDIA_HARNESS_POWER_SETTING') + if power_setting and env['MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": run_config += f" --power_setting={power_setting}" - gpu_copy_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS') + gpu_copy_streams = env.get('MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS') if gpu_copy_streams: run_config += f" --gpu_copy_streams={gpu_copy_streams}" gpu_inference_streams = env.get( - 'CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS') + 'MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS') if gpu_inference_streams: run_config += f" --gpu_inference_streams={gpu_inference_streams}" - dla_copy_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS') + dla_copy_streams = env.get('MLC_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS') if dla_copy_streams: run_config += f" --dla_copy_streams={dla_copy_streams}" dla_inference_streams = env.get( - 'CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS') + 'MLC_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS') if dla_inference_streams: run_config += f" --dla_inference_streams={dla_inference_streams}" - gpu_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE') + gpu_batch_size = env.get('MLC_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE') if gpu_batch_size: run_config += f" --gpu_batch_size={gpu_batch_size}" - dla_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE') + dla_batch_size = env.get('MLC_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE') if dla_batch_size: run_config += f" --dla_batch_size={dla_batch_size}" - input_format = env.get('CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT') + input_format = env.get('MLC_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT') if input_format: run_config += f" --input_format={input_format}" performance_sample_count = env.get( - 'CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT') + 'MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT') if performance_sample_count: run_config += f" --performance_sample_count={performance_sample_count}" - devices = env.get('CM_MLPERF_NVIDIA_HARNESS_DEVICES') + devices = env.get('MLC_MLPERF_NVIDIA_HARNESS_DEVICES') if devices: run_config += f" --devices={devices}" - audio_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE') + audio_batch_size = env.get('MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE') if audio_batch_size: run_config += f" --audio_batch_size={audio_batch_size}" disable_encoder_plugin = str( - env.get('CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN', '')) + env.get('MLC_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN', '')) if disable_encoder_plugin and disable_encoder_plugin.lower() not in [ "no", "false", "0", ""]: run_config += " --disable_encoder_plugin" disable_beta1_smallk = str( - env.get('CM_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK', '')) + env.get('MLC_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK', '')) if disable_beta1_smallk and disable_beta1_smallk.lower() in [ "yes", "true", "1"]: run_config += " --disable_beta1_smallk" - workspace_size = env.get('CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE') + workspace_size = env.get('MLC_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE') if workspace_size: run_config += f" --workspace_size={workspace_size}" - if env.get('CM_MLPERF_LOADGEN_LOGS_DIR'): - env['MLPERF_LOADGEN_LOGS_DIR'] = env['CM_MLPERF_LOADGEN_LOGS_DIR'] + if env.get('MLC_MLPERF_LOADGEN_LOGS_DIR'): + env['MLPERF_LOADGEN_LOGS_DIR'] = env['MLC_MLPERF_LOADGEN_LOGS_DIR'] - log_dir = env.get('CM_MLPERF_NVIDIA_HARNESS_LOG_DIR') + log_dir = env.get('MLC_MLPERF_NVIDIA_HARNESS_LOG_DIR') if log_dir: run_config += f" --log_dir={log_dir}" - use_graphs = str(env.get('CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS', '')) + use_graphs = str(env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS', '')) if use_graphs and use_graphs.lower() not in ["no", "false", "0", ""]: run_config += " --use_graphs" use_deque_limit = str( - env.get('CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT')) + env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT')) if use_deque_limit and use_deque_limit.lower() not in [ "no", "false", "0"]: run_config += " --use_deque_limit" deque_timeout_usec = env.get( - 'CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC') + 'MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC') if deque_timeout_usec: run_config += f" --deque_timeout_usec={deque_timeout_usec}" use_cuda_thread_per_device = str( - env.get('CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE', '')) + env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE', '')) if use_cuda_thread_per_device and use_cuda_thread_per_device.lower() not in [ "no", "false", "0", ""]: run_config += " --use_cuda_thread_per_device" run_infer_on_copy_streams = str( - env.get('CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS', '')) + env.get('MLC_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS', '')) if run_infer_on_copy_streams and run_infer_on_copy_streams.lower() not in [ "no", "false", "0", ""]: run_config += " --run_infer_on_copy_streams" start_from_device = str( env.get( - 'CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE', + 'MLC_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE', '')) if start_from_device and start_from_device.lower() not in [ "no", "false", "0", ""]: @@ -600,75 +600,75 @@ def preprocess(i): end_on_device = str( env.get( - 'CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE', + 'MLC_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE', '')) if end_on_device and end_on_device.lower() not in [ "no", "false", "0", ""]: run_config += " --end_on_device" - max_dlas = env.get('CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS') + max_dlas = env.get('MLC_MLPERF_NVIDIA_HARNESS_MAX_DLAS') if max_dlas: run_config += f" --max_dlas={max_dlas}" graphs_max_seqlen = env.get( - 'CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN') + 'MLC_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN') if graphs_max_seqlen: run_config += f" --graphs_max_seqlen={graphs_max_seqlen}" num_issue_query_threads = env.get( - 'CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS') + 'MLC_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS') if num_issue_query_threads: run_config += f" --num_issue_query_threads={num_issue_query_threads}" - soft_drop = env.get('CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP') + soft_drop = env.get('MLC_MLPERF_NVIDIA_HARNESS_SOFT_DROP') if soft_drop: run_config += f" --soft_drop={soft_drop}" use_small_tile_gemm_plugin = str( - env.get('CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN', '')) + env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN', '')) if use_small_tile_gemm_plugin and use_small_tile_gemm_plugin.lower() not in [ "no", "false", "0", ""]: run_config += f" --use_small_tile_gemm_plugin" audio_buffer_num_lines = env.get( - 'CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES') + 'MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES') if audio_buffer_num_lines: run_config += f" --audio_buffer_num_lines={audio_buffer_num_lines}" - use_fp8 = str(env.get('CM_MLPERF_NVIDIA_HARNESS_USE_FP8', '')) + use_fp8 = str(env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_FP8', '')) if use_fp8 and use_fp8.lower() not in ["no", "false", "0", ""]: run_config += f" --use_fp8" - if "llama2" in env["CM_MODEL"]: + if "llama2" in env["MLC_MODEL"]: run_config += f" --fp8_quant_model_path={fp8_model_path}" run_config += f" --tensor_parallelism={tmp_tp_size}" - enable_sort = env.get('CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') + enable_sort = env.get('MLC_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') if enable_sort and enable_sort.lower() not in ["no", "false", "0"]: run_config += f" --enable_sort" sdxl_server_batcher_time_limit = env.get( - 'CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') + 'MLC_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') if sdxl_server_batcher_time_limit: run_config += f" --sdxl_batcher_time_limit {sdxl_server_batcher_time_limit}" num_sort_segments = env.get( - 'CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS') + 'MLC_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS') if num_sort_segments: run_config += f" --num_sort_segments={num_sort_segments}" embedding_weights_on_gpu_part = env.get( - 'CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART', '') + 'MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART', '') if embedding_weights_on_gpu_part != '': run_config += f" --embedding_weights_on_gpu_part={embedding_weights_on_gpu_part}" - num_warmups = env.get('CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS', '') + num_warmups = env.get('MLC_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS', '') if num_warmups != '': run_config += f" --num_warmups={num_warmups}" skip_postprocess = str( env.get( - 'CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS', + 'MLC_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS', '')) if skip_postprocess and skip_postprocess.lower() not in [ "no", "false", "0", ""]: @@ -680,13 +680,13 @@ def preprocess(i): test_mode_string = "" extra_build_engine_options_string = env.get( - 'CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS', '') + 'MLC_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS', '') extra_run_options_string = env.get( - 'CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS', + 'MLC_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS', '') # will be ignored during build engine - if "stable-diffusion" in env["CM_MODEL"]: + if "stable-diffusion" in env["MLC_MODEL"]: extra_build_engine_options_string += f""" --model_path { os.path.join( env['MLPERF_SCRATCH_PATH'], @@ -698,9 +698,9 @@ def preprocess(i): cmds.append(f"""make {make_command} RUN_ARGS=' --benchmarks={model_name} --scenarios={scenario} {test_mode_string} {run_config} {extra_build_engine_options_string} {extra_run_options_string}'""") run_cmd = " && ".join(cmds) - env['CM_MLPERF_RUN_CMD'] = run_cmd - env['CM_RUN_CMD'] = run_cmd - env['CM_RUN_DIR'] = env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] + env['MLC_MLPERF_RUN_CMD'] = run_cmd + env['MLC_RUN_CMD'] = run_cmd + env['MLC_RUN_DIR'] = env['MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] # print(env) diff --git a/script/app-mlperf-inference-nvidia/meta.yaml b/script/app-mlperf-inference-nvidia/meta.yaml index 5b96c7f65..473c336c6 100644 --- a/script/app-mlperf-inference-nvidia/meta.yaml +++ b/script/app-mlperf-inference-nvidia/meta.yaml @@ -22,72 +22,72 @@ tags: # Default environment default_env: - CM_BATCH_COUNT: '1' - CM_BATCH_SIZE: '1' - CM_FAST_COMPILATION: 'yes' - CM_MLPERF_LOADGEN_SCENARIO: Offline - CM_MLPERF_LOADGEN_MODE: performance + MLC_BATCH_COUNT: '1' + MLC_BATCH_SIZE: '1' + MLC_FAST_COMPILATION: 'yes' + MLC_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_MODE: performance # SKIP_POLICIES: '1' - CM_SKIP_PREPROCESS_DATASET: 'no' - CM_SKIP_MODEL_DOWNLOAD: 'no' - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia_original - CM_MLPERF_SKIP_RUN: 'no' + MLC_SKIP_PREPROCESS_DATASET: 'no' + MLC_SKIP_MODEL_DOWNLOAD: 'no' + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia_original + MLC_MLPERF_SKIP_RUN: 'no' env: - CM_CALL_MLPERF_RUNNER: 'no' + MLC_CALL_MLPERF_RUNNER: 'no' # Map script inputs to environment variables input_mapping: - count: CM_MLPERF_LOADGEN_QUERY_COUNT - max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - mlperf_conf: CM_MLPERF_CONF - mode: CM_MLPERF_LOADGEN_MODE - output_dir: CM_MLPERF_OUTPUT_DIR - scenario: CM_MLPERF_LOADGEN_SCENARIO - user_conf: CM_MLPERF_USER_CONF - devices: CM_MLPERF_NVIDIA_HARNESS_DEVICES - skip_preprocess: CM_SKIP_PREPROCESS_DATASET - skip_preprocessing: CM_SKIP_PREPROCESS_DATASET - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - use_triton: CM_MLPERF_NVIDIA_HARNESS_USE_TRITON - gpu_copy_streams: CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS - gpu_inference_streams: CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS - gpu_batch_size: CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE - dla_copy_streams: CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS - dla_inference_streams: CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS - dla_batch_size: CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE - input_format: CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - workspace_size: CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE - log_dir: CM_MLPERF_NVIDIA_HARNESS_LOG_DIR - use_graphs: CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS - run_infer_on_copy_streams: CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS - start_from_device: CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE - end_on_device: CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE - max_dlas: CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS - power_setting: CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: MLC_MLPERF_CONF + mode: MLC_MLPERF_LOADGEN_MODE + output_dir: MLC_MLPERF_OUTPUT_DIR + scenario: MLC_MLPERF_LOADGEN_SCENARIO + user_conf: MLC_MLPERF_USER_CONF + devices: MLC_MLPERF_NVIDIA_HARNESS_DEVICES + skip_preprocess: MLC_SKIP_PREPROCESS_DATASET + skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + use_triton: MLC_MLPERF_NVIDIA_HARNESS_USE_TRITON + gpu_copy_streams: MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS + gpu_inference_streams: MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS + gpu_batch_size: MLC_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE + dla_copy_streams: MLC_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS + dla_inference_streams: MLC_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS + dla_batch_size: MLC_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE + input_format: MLC_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + workspace_size: MLC_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE + log_dir: MLC_MLPERF_NVIDIA_HARNESS_LOG_DIR + use_graphs: MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS + run_infer_on_copy_streams: MLC_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS + start_from_device: MLC_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE + end_on_device: MLC_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE + max_dlas: MLC_MLPERF_NVIDIA_HARNESS_MAX_DLAS + power_setting: MLC_MLPERF_NVIDIA_HARNESS_POWER_SETTING make_cmd: MLPERF_NVIDIA_RUN_COMMAND - rerun: CM_RERUN - extra_run_options: CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS - use_deque_limit: CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT - deque_timeout_usec: CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC - use_cuda_thread_per_device: CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE - num_warmups: CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS - graphs_max_seqlen: CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN - num_issue_query_threads: CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS - soft_drop: CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP - use_small_tile_gemm_plugin: CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN - audio_buffer_num_lines: CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES - use_fp8: CM_MLPERF_NVIDIA_HARNESS_USE_FP8 - enable_sort: CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT - num_sort_segments: CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS - skip_postprocess: CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS - embedding_weights_on_gpu_part: CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART - sdxl_batcher_time_limit: CM_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT + rerun: MLC_RERUN + extra_run_options: MLC_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS + use_deque_limit: MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT + deque_timeout_usec: MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC + use_cuda_thread_per_device: MLC_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE + num_warmups: MLC_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS + graphs_max_seqlen: MLC_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN + num_issue_query_threads: MLC_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS + soft_drop: MLC_MLPERF_NVIDIA_HARNESS_SOFT_DROP + use_small_tile_gemm_plugin: MLC_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN + audio_buffer_num_lines: MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES + use_fp8: MLC_MLPERF_NVIDIA_HARNESS_USE_FP8 + enable_sort: MLC_MLPERF_NVIDIA_HARNESS_ENABLE_SORT + num_sort_segments: MLC_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS + skip_postprocess: MLC_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS + embedding_weights_on_gpu_part: MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART + sdxl_batcher_time_limit: MLC_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT # Dependencies on other CM scripts @@ -116,19 +116,19 @@ deps: # Install ResNet50 model (ONNX) and ImageNet - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 skip_if_env: - CM_USE_DATASET_FROM_HOST: + MLC_USE_DATASET_FROM_HOST: - 'yes' - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - 'yes' names: - imagenet-original tags: get,dataset,original,imagenet,_full - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 names: - resnet50-model @@ -139,7 +139,7 @@ deps: # Install kits19 dataset - enable_if_env: - CM_MODEL: + MLC_MODEL: - 3d-unet-99-disabled - 3d-unet-99.9-disabled names: @@ -151,7 +151,7 @@ deps: # Install librispeech dataset - enable_if_env: - CM_MODEL: + MLC_MODEL: - rnnt names: - librispeech-original @@ -161,13 +161,13 @@ deps: # Install criteo dataset - enable_if_env: - CM_MODEL: + MLC_MODEL: - dlrm-v2-99 - dlrm-v2-99.9 skip_if_any_env: DLRM_DATA_PATH: - 'on' - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - 'yes' names: - criteo-preprocessed @@ -176,13 +176,13 @@ deps: ######################################################################## # Install dlrm model - enable_if_env: - CM_MODEL: + MLC_MODEL: - dlrm-v2-99 - dlrm-v2-99.9 skip_if_any_env: DLRM_DATA_PATH: - on - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - 'yes' names: - dlrm-model @@ -191,7 +191,7 @@ deps: ######################################################################## # Install bert models - enable_if_env: - CM_MODEL: + MLC_MODEL: - bert-99 - bert-99.9 names: @@ -200,7 +200,7 @@ deps: tags: get,ml-model,bert,_onnx,_fp32 - enable_if_env: - CM_MODEL: + MLC_MODEL: - bert-99 - bert-99.9 names: @@ -209,7 +209,7 @@ deps: tags: get,ml-model,bert,_onnx,_int8 - enable_if_env: - CM_MODEL: + MLC_MODEL: - bert-99 - bert-99.9 names: @@ -220,24 +220,24 @@ deps: # Install OpenImages - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet skip_if_env: - CM_USE_DATASET_FROM_HOST: + MLC_USE_DATASET_FROM_HOST: - 'yes' - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - 'yes' names: - openimages-original tags: get,dataset,original,openimages,_validation,_full,_custom-annotations - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet skip_if_env: - CM_USE_DATASET_FROM_HOST: + MLC_USE_DATASET_FROM_HOST: - 'yes' - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - 'yes' names: - openimages-calibration @@ -258,9 +258,9 @@ deps: - tags: pull,git,repo env: - CM_GIT_CHECKOUT_PATH: '<<>>' + MLC_GIT_CHECKOUT_PATH: '<<>>' enable_if_env: - CM_MLPERF_INFERENCE_PULL_CODE_CHANGES: + MLC_MLPERF_INFERENCE_PULL_CODE_CHANGES: - 'yes' # Creates user conf for given SUT @@ -268,7 +268,7 @@ deps: names: - user-conf-generator enable_if_env: - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: + MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: - run_harness - tags: get,generic-python-lib,_package.pycuda @@ -277,16 +277,16 @@ deps: - tags: get,generic-python-lib,_package.nvmitten update_tags_from_env_with_prefix: _path.: - - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH + - MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH enable_if_env: - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - 'yes' - True - 'True' - tags: get,nvidia,mitten skip_if_env: - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - 'yes' - True - 'True' @@ -295,9 +295,9 @@ prehook_deps: ######################################################################## # Install GPTJ-6B model - enable_if_env: - CM_REQUIRE_GPTJ_MODEL_DOWNLOAD: + MLC_REQUIRE_GPTJ_MODEL_DOWNLOAD: - 'yes' - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: + MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: - download_model - preprocess_data names: @@ -306,9 +306,9 @@ prehook_deps: # Download model for sdxl - enable_if_env: - CM_MODEL: + MLC_MODEL: - stable-diffusion-xl - CM_REQUIRE_SDXL_MODEL_DOWNLOAD: + MLC_REQUIRE_SDXL_MODEL_DOWNLOAD: - 'yes' names: - stable-diffusion-xl @@ -316,16 +316,16 @@ prehook_deps: - ml-model tags: get,ml-model,sdxl,_fp16,_rclone skip_if_env: - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - 'yes' - CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: - 'yes' # Install coco2014 dataset - enable_if_env: - CM_REQUIRE_COCO2014_DOWNLOAD: + MLC_REQUIRE_COCO2014_DOWNLOAD: - 'yes' - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: + MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: - preprocess_data names: - coco2014-dataset @@ -338,12 +338,12 @@ post_deps: - runner - mlperf-runner skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'yes' - yes tags: benchmark-mlperf enable_if_env: - CM_CALL_MLPERF_RUNNER: + MLC_CALL_MLPERF_RUNNER: - yes - tags: save,mlperf,inference,state names: @@ -355,8 +355,8 @@ variations: v4.1: group: version env: - CM_MLPERF_INFERENCE_CODE_VERSION: "v4.1" - CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized + MLC_MLPERF_INFERENCE_CODE_VERSION: "v4.1" + MLC_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized adr: pytorch: tags: _for-nvidia-mlperf-inference-v4.1 @@ -365,8 +365,8 @@ variations: group: version default: true env: - CM_MLPERF_INFERENCE_CODE_VERSION: "v4.0" - CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized + MLC_MLPERF_INFERENCE_CODE_VERSION: "v4.0" + MLC_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized adr: pytorch: tags: _for-nvidia-mlperf-inference-v4.0 @@ -374,15 +374,15 @@ variations: v4.0: group: version env: - CM_MLPERF_INFERENCE_CODE_VERSION: "v4.0" - CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized + MLC_MLPERF_INFERENCE_CODE_VERSION: "v4.0" + MLC_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized adr: pytorch: tags: _for-nvidia-mlperf-inference-v4.0 v3.1: env: - CM_MLPERF_INFERENCE_CODE_VERSION: "v3.1" - CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-07142023.pth + MLC_MLPERF_INFERENCE_CODE_VERSION: "v3.1" + MLC_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-07142023.pth adr: pytorch: tags: _for-nvidia-mlperf-inference-v3.1 @@ -391,31 +391,31 @@ variations: cpu: group: device env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu cuda: group: device default: true env: - CM_MLPERF_DEVICE: gpu - CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + MLC_MLPERF_DEVICE: gpu + MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart tensorrt: group: backend default: true env: - CM_MLPERF_BACKEND: tensorrt - CM_MLPERF_BACKEND_NAME: TensorRT + MLC_MLPERF_BACKEND: tensorrt + MLC_MLPERF_BACKEND_NAME: TensorRT # Reference MLPerf models resnet50: group: model default: true env: - CM_MODEL: resnet50 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion - CM_ML_MODEL_INPUTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: 10 + MLC_MODEL: resnet50 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + MLC_ML_MODEL_INPUTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: 10 deps: - tags: get,generic-python-lib,_onnx-graphsurgeon version: 0.3.27 @@ -425,11 +425,11 @@ variations: retinanet: group: model env: - CM_MODEL: retinanet - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion - CM_ML_MODEL_INPUTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_MODEL: retinanet + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + MLC_ML_MODEL_INPUTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 deps: - tags: get,generic-python-lib,_Pillow - tags: get,generic-python-lib,_opencv-python @@ -442,15 +442,15 @@ variations: sdxl: new_env_keys: - - CM_SDXL_ACCURACY_RUN_DEVICE + - MLC_SDXL_ACCURACY_RUN_DEVICE group: model env: - CM_MODEL: stable-diffusion-xl - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/main/script/get-ml-model-stable-diffusion/_cm.json#L174" - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "quantization, affine fusion" - CM_ML_MODEL_INPUTS_DATA_TYPE: int32 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_SDXL_ACCURACY_RUN_DEVICE: "gpu" + MLC_MODEL: stable-diffusion-xl + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/main/script/get-ml-model-stable-diffusion/_cm.json#L174" + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: "quantization, affine fusion" + MLC_ML_MODEL_INPUTS_DATA_TYPE: int32 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_SDXL_ACCURACY_RUN_DEVICE: "gpu" deps: - tags: get,generic-python-lib,_package.diffusers names: @@ -498,8 +498,8 @@ variations: - nvidia-ammo version: 0.7.4 env: - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: "https://pypi.nvidia.com" - CM_GENERIC_PYTHON_PIP_EXTRA: "--no-cache-dir" + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: "https://pypi.nvidia.com" + MLC_GENERIC_PYTHON_PIP_EXTRA: "--no-cache-dir" - tags: get,generic-python-lib,_package.optimum names: - optimum @@ -538,22 +538,22 @@ variations: base: - bert_ env: - CM_MODEL: bert-99 - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion - CM_ML_MODEL_INPUTS_DATA_TYPE: int32 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_MODEL: bert-99 + MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + MLC_ML_MODEL_INPUTS_DATA_TYPE: int32 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 bert-99.9: group: model base: - bert_ env: - CM_MODEL: bert-99.9 - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion - CM_ML_MODEL_INPUTS_DATA_TYPE: int32 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + MLC_MODEL: bert-99.9 + MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + MLC_ML_MODEL_INPUTS_DATA_TYPE: int32 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 3d-unet_: deps: @@ -571,31 +571,31 @@ variations: base: - 3d-unet_ env: - CM_MODEL: 3d-unet-99 - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx" - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion - CM_ML_MODEL_INPUTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_MODEL: 3d-unet-99 + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx" + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + MLC_ML_MODEL_INPUTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 3d-unet-99.9: group: model base: - 3d-unet_ env: - CM_MODEL: 3d-unet-99.9 - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx" - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion - CM_ML_MODEL_INPUTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_MODEL: 3d-unet-99.9 + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx" + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + MLC_ML_MODEL_INPUTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 rnnt: group: model env: - CM_MODEL: rnnt - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt" - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion - CM_ML_MODEL_INPUTS_DATA_TYPE: fp16 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + MLC_MODEL: rnnt + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt" + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp16 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 deps: - tags: get,generic-python-lib,_toml - tags: get,generic-python-lib,_torchvision_cuda @@ -613,8 +613,8 @@ variations: dlrm_: new_env_keys: - - CM_DLRM_V2_DAY23_FILE_PATH - - CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH + - MLC_DLRM_V2_DAY23_FILE_PATH + - MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH deps: - tags: get,dlrm,data,mlperf,inference,_nvidia - tags: get,generic-python-lib,_package.torchsnapshot @@ -630,20 +630,20 @@ variations: base: - dlrm_ env: - CM_MODEL: dlrm-v2-99 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + MLC_MODEL: dlrm-v2-99 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 dlrm-v2-99.9: group: model base: - dlrm_ env: - CM_MODEL: dlrm-v2-99.9 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + MLC_MODEL: dlrm-v2-99.9 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 llama2-70b_: deps: @@ -678,24 +678,24 @@ variations: names: - rouge-score env: - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51" - CM_ML_MODEL_INPUTS_DATA_TYPE: int32 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51" + MLC_ML_MODEL_INPUTS_DATA_TYPE: int32 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion llama2-70b-99: group: model base: - llama2-70b_ env: - CM_MODEL: llama2-70b-99 + MLC_MODEL: llama2-70b-99 llama2-70b-99.9: group: model base: - llama2-70b_ env: - CM_MODEL: llama2-70b-99.9 + MLC_MODEL: llama2-70b-99.9 gptj_: deps: @@ -706,7 +706,7 @@ variations: - tags: get,generic-python-lib,_onnx-graphsurgeon - tags: get,generic-python-lib,_package.sympy env: - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download" + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download" gptj_,build: deps: @@ -729,33 +729,33 @@ variations: base: - gptj_ env: - CM_MODEL: gptj-99 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion - CM_ML_MODEL_INPUTS_DATA_TYPE: int32 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + MLC_MODEL: gptj-99 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + MLC_ML_MODEL_INPUTS_DATA_TYPE: int32 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 gptj-99.9: group: model base: - gptj_ env: - CM_MODEL: gptj-99.9 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion - CM_ML_MODEL_INPUTS_DATA_TYPE: int32 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + MLC_MODEL: gptj-99.9 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + MLC_ML_MODEL_INPUTS_DATA_TYPE: int32 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 batch_size.#: group: batch-size env: - CM_MODEL_BATCH_SIZE: "#" - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "#" - #CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "gpu_batch_size.#" + MLC_MODEL_BATCH_SIZE: "#" + MLC_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "#" + #MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "gpu_batch_size.#" dla_batch_size.#: group: dla-batch-size env: - CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE: "#" - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2: "dla_batch_size.#" + MLC_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE: "#" + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2: "dla_batch_size.#" adr: build-engine: tags: _dla_batch_size.# @@ -763,25 +763,25 @@ variations: use_triton: group: triton env: - CM_MLPERF_NVIDIA_HARNESS_USE_TRITON: "yes" - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX3: "using_triton" + MLC_MLPERF_NVIDIA_HARNESS_USE_TRITON: "yes" + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX3: "using_triton" use-graphs: group: graphs env: - CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: "yes" + MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: "yes" prebuild: group: run-mode env: MLPERF_NVIDIA_RUN_COMMAND: prebuild - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: prebuild + MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: prebuild build: group: run-mode env: MLPERF_NVIDIA_RUN_COMMAND: build - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: build + MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: build deps: - tags: get,cmake version_min: "3.18" @@ -826,12 +826,12 @@ variations: maxq: group: power-mode env: - CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes + MLC_MLPERF_NVIDIA_HARNESS_MAXQ: yes maxn: group: power-mode env: - CM_MLPERF_NVIDIA_HARNESS_MAXN: yes + MLC_MLPERF_NVIDIA_HARNESS_MAXN: yes preprocess-data: alias: preprocess_data @@ -840,7 +840,7 @@ variations: group: run-mode env: MLPERF_NVIDIA_RUN_COMMAND: preprocess_data - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: preprocess_data + MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: preprocess_data download-model: alias: download-model @@ -849,18 +849,18 @@ variations: group: run-mode env: MLPERF_NVIDIA_RUN_COMMAND: download_model - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: download_model + MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: download_model deps: - tags: get,generic-python-lib,_torch_cuda enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet calibrate: group: run-mode env: MLPERF_NVIDIA_RUN_COMMAND: calibrate - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: calibrate + MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: calibrate deps: - tags: reproduce,mlperf,inference,nvidia,harness,_download_model inherit_variation_tags: true @@ -874,7 +874,7 @@ variations: - batch-size - triton skip_if_env: - CM_MODEL: + MLC_MODEL: - retinanet_old - resnet50 - bert-99 @@ -891,7 +891,7 @@ variations: loadgen-scenario: offline env: MLPERF_NVIDIA_RUN_COMMAND: generate_engines - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: generate_engines + MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: generate_engines deps: # Detect CUDA - names: @@ -923,7 +923,7 @@ variations: - triton - build-engine-options skip_if_env: - CM_MODEL: + MLC_MODEL: - dlrm-v2-99 - dlrm-v2-99.9 @@ -941,7 +941,7 @@ variations: - power-mode - build-engine-options skip_if_env: - CM_MODEL: + MLC_MODEL: - retinanet_old - resnet50 - bert-99 @@ -952,7 +952,7 @@ variations: - tags: reproduce,mlperf,inference,nvidia,harness,_calibrate inherit_variation_tags: true enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet force_cache: true skip_inherit_variation_groups: @@ -971,20 +971,20 @@ variations: singlestream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream CUDA_VISIBLE_DEVICES_NOT_USED: "0" multistream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: MultiStream + MLC_MLPERF_LOADGEN_SCENARIO: MultiStream offline: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_SCENARIO: Offline server: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Server + MLC_MLPERF_LOADGEN_SCENARIO: Server run-harness: alis: run_harness @@ -1036,7 +1036,7 @@ variations: - build-engine-options force_cache: true skip_if_env: - CM_MODEL: + MLC_MODEL: - dlrm-v2-99 - dlrm-v2-99.9 @@ -1053,7 +1053,7 @@ variations: - build-engine-options force_cache: true skip_if_env: - CM_MODEL: + MLC_MODEL: - retinanet - resnet50 - bert-99 @@ -1062,73 +1062,73 @@ variations: - dlrm-v2-99.9 - stable-diffusion-xl env: - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: run_harness + MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: run_harness MLPERF_NVIDIA_RUN_COMMAND: run_harness - CM_CALL_MLPERF_RUNNER: 'yes' + MLC_CALL_MLPERF_RUNNER: 'yes' new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* - - CM_ML_MODEL_* - - CM_HW_NAME - - CM_MAX_EXAMPLES + - MLC_MLPERF_* + - MLC_DATASET_* + - MLC_ML_MODEL_* + - MLC_HW_NAME + - MLC_MAX_EXAMPLES new_state_keys: - mlperf-inference-implementation - - CM_SUT_* + - MLC_SUT_* build_engine_options.#: group: build-engine-options env: - CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS: "#" + MLC_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS: "#" gpu_memory.16: group: device-memory env: - CM_NVIDIA_GPU_MEMORY: "16" + MLC_NVIDIA_GPU_MEMORY: "16" gpu_memory.24: group: device-memory env: - CM_NVIDIA_GPU_MEMORY: "24" + MLC_NVIDIA_GPU_MEMORY: "24" gpu_memory.8: group: device-memory env: - CM_NVIDIA_GPU_MEMORY: "8" + MLC_NVIDIA_GPU_MEMORY: "8" gpu_memory.32: group: device-memory env: - CM_NVIDIA_GPU_MEMORY: "32" + MLC_NVIDIA_GPU_MEMORY: "32" gpu_memory.40: group: device-memory env: - CM_NVIDIA_GPU_MEMORY: "40" + MLC_NVIDIA_GPU_MEMORY: "40" gpu_memory.48: group: device-memory env: - CM_NVIDIA_GPU_MEMORY: "48" + MLC_NVIDIA_GPU_MEMORY: "48" gpu_memory.80: group: device-memory env: - CM_NVIDIA_GPU_MEMORY: "80" + MLC_NVIDIA_GPU_MEMORY: "80" gpu_memory.#: group: device-memory env: - CM_NVIDIA_GPU_MEMORY: "#" + MLC_NVIDIA_GPU_MEMORY: "#" singlestream,resnet50: env: - CM_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK: yes + MLC_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK: yes SKIP_POLICIES: '0' # skip_policies used to give better latency but is not working with 4.0 and later Nvidia codes server,resnet50: env: - CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000 - CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: True - CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE: True - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: 9 - CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: 2 + MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000 + MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: True + MLC_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE: True + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: 9 + MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: 2 multistream,resnet50: env: - CM_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK: yes + MLC_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK: yes SKIP_POLICIES: '0' singlestream,run_harness: @@ -1137,7 +1137,7 @@ variations: llama2-70b_,run_harness: env: - CM_MLPERF_NVIDIA_HARNESS_USE_FP8: 'True' + MLC_MLPERF_NVIDIA_HARNESS_USE_FP8: 'True' gptj_,run_harness: deps: @@ -1147,10 +1147,10 @@ variations: - tags: get,cmake version_min: "3.25.0" env: - CM_MLPERF_NVIDIA_HARNESS_USE_FP8: 'True' - CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT: 'True' - CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS: '2' - CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS: True + MLC_MLPERF_NVIDIA_HARNESS_USE_FP8: 'True' + MLC_MLPERF_NVIDIA_HARNESS_ENABLE_SORT: 'True' + MLC_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS: '2' + MLC_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS: True gpu_memory.80,num-gpus.2,llama2-70b,offline,run_harness: default_variations: @@ -1244,13 +1244,13 @@ variations: default_variations: batch-size: batch_size.64 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" gpu_memory.16,resnet50,offline,run_harness: default_variations: batch-size: batch_size.1024 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" gpu_memory.40,resnet50,offline,run_harness: default_variations: @@ -1275,13 +1275,13 @@ variations: num-gpus.#: group: num-gpus env: - CM_NVIDIA_NUM_GPUS: "#" + MLC_NVIDIA_NUM_GPUS: "#" num-gpus.1: group: num-gpus default: true env: - CM_NVIDIA_NUM_GPUS: "1" + MLC_NVIDIA_NUM_GPUS: "1" resnet50,server,run_harness: default_variations: @@ -1323,8 +1323,8 @@ variations: default_variations: batch-size: batch_size.2 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" gpu_memory.80,retinanet,offline,run_harness: default_variations: @@ -1334,8 +1334,8 @@ variations: default_variations: batch-size: batch_size.8 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" gpu_memory.8,rnnt,offline,run_harness: default_variations: @@ -1401,13 +1401,13 @@ variations: default_variations: batch-size: batch_size.1400 env: - CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.40" + MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.40" gpu_memory.24,dlrm_,offline,run_harness: default_variations: batch-size: batch_size.1400 env: - CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.30" + MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.30" gpu_memory.32,dlrm_,offline,run_harness: default_variations: @@ -1417,7 +1417,7 @@ variations: default_variations: batch-size: batch_size.1400 env: - CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.50" + MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.50" gpu_memory.80,dlrm_,offline,run_harness: default_variations: @@ -1426,13 +1426,13 @@ variations: orin: group: gpu-name env: - CM_NVIDIA_CUSTOM_GPU: "yes" - CM_MODEL_BATCH_SIZE: "" #we pick from nvidia config - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>" + MLC_NVIDIA_CUSTOM_GPU: "yes" + MLC_MODEL_BATCH_SIZE: "" #we pick from nvidia config + MLC_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>" orin,rnnt,singlestream,run_harness: env: - CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1" + MLC_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1" orin,sdxl,offline,run_harness: default_variations: @@ -1441,7 +1441,7 @@ variations: rtx_4090: group: gpu-name env: - CM_NVIDIA_CUSTOM_GPU: "yes" + MLC_NVIDIA_CUSTOM_GPU: "yes" rtx_4090,sdxl,offline,run_harness: default_variations: @@ -1466,15 +1466,15 @@ variations: default_variations: batch-size: batch_size.2 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" rtx_4090,retinanet,server,run_harness: default_variations: batch-size: batch_size.2 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" rtx_4090,bert_,offline,run_harness: default_variations: @@ -1512,12 +1512,12 @@ variations: default_variations: batch-size: batch_size.1400 env: - CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.30" + MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.30" a6000: group: gpu-name env: - CM_NVIDIA_CUSTOM_GPU: "yes" + MLC_NVIDIA_CUSTOM_GPU: "yes" rtx_a6000,resnet50,offline,run_harness: default_variations: @@ -1566,7 +1566,7 @@ variations: rtx_6000_ada: group: gpu-name env: - CM_NVIDIA_CUSTOM_GPU: "yes" + MLC_NVIDIA_CUSTOM_GPU: "yes" rtx_6000_ada,resnet50,offline,run_harness: default_variations: @@ -1615,63 +1615,63 @@ variations: l4: group: gpu-name env: - CM_NVIDIA_CUSTOM_GPU: "yes" + MLC_NVIDIA_CUSTOM_GPU: "yes" l4,sdxl,offline,run_harness: default_variations: batch-size: batch_size.1 env: - CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' - CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 0.6 + MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 0.6 l4,sdxl,offline,run_harness,num-gpu.8: default_variations: batch-size: batch_size.1 env: - CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' - CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 4.8 + MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 4.8 l4,sdxl,server,run_harness,num-gpu.1: default_variations: batch-size: batch_size.1 env: - CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' - CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 0.55 - CM_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT: 0 + MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS: 0.55 + MLC_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT: 0 l4,sdxl,server,run_harness,num-gpu.8: default_variations: batch-size: batch_size.1 env: - CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' - CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 5.05 - CM_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT: 0 + MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS: 5.05 + MLC_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT: 0 l4,resnet50: default_env: - CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 10500 - CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 9000 - CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.35 - CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 1 + MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 10500 + MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS: 9000 + MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.35 + MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 1 l4,resnet50,offline,run_harness: default_variations: batch-size: batch_size.32 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "1" - CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "1" + MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' l4,resnet50,server,run_harness: default_variations: batch-size: batch_size.16 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "9" - CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' - CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' - CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000 - CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE: 'True' + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "9" + MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' + MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000 + MLC_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE: 'True' l4,retinanet,offline,run_harness: default_variations: @@ -1681,11 +1681,11 @@ variations: default_variations: batch-size: batch_size.2 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' - CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 30000 - CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000 + MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' + MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 30000 + MLC_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000 l4,bert_,offline,run_harness: default_variations: @@ -1695,10 +1695,10 @@ variations: default_variations: batch-size: batch_size.16 env: - CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "200" - CM_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "1" - CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "1.0" - CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "True" + MLC_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "200" + MLC_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "1" + MLC_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "1.0" + MLC_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "True" l4,3d-unet_,offline,run_harness: default_variations: @@ -1712,9 +1712,9 @@ variations: default_variations: batch-size: batch_size.512 env: - CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "64" - CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES: "1024" - CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1024" + MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "64" + MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES: "1024" + MLC_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1024" l4,dlrm_,offline,run_harness: default_variations: @@ -1722,30 +1722,30 @@ variations: t4: group: gpu-name env: - CM_NVIDIA_CUSTOM_GPU: "yes" + MLC_NVIDIA_CUSTOM_GPU: "yes" t4,resnet50: default_env: - CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 4900 - CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 4000 - CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.6 - CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 2 + MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 4900 + MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS: 4000 + MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.6 + MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 2 t4,resnet50,offline,run_harness: default_variations: batch-size: batch_size.256 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" t4,resnet50,server,run_harness: default_variations: batch-size: batch_size.26 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" - CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: True - CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000 - CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "0.993" + MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: True + MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000 + MLC_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "0.993" t4,retinanet,offline,run_harness: default_variations: @@ -1755,11 +1755,11 @@ variations: default_variations: batch-size: batch_size.2 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' - CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 20000 - CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000 + MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' + MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 20000 + MLC_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000 t4,bert_,offline,run_harness: default_variations: @@ -1769,9 +1769,9 @@ variations: default_variations: batch-size: batch_size.4 env: - CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "240" - CM_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "0" - CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "no" + MLC_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "240" + MLC_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "0" + MLC_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "no" t4,3d-unet_,offline,run_harness: default_variations: @@ -1781,19 +1781,19 @@ variations: default_variations: batch-size: batch_size.2048 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" - CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' - CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128" - CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128" + MLC_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True" t4,rnnt,server,run_harness: default_variations: batch-size: batch_size.2048 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" - CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' - CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128" - CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128" + MLC_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True" t4,dlrm_,offline,run_harness: default_variations: @@ -1808,30 +1808,30 @@ variations: custom: group: gpu-name env: - CM_NVIDIA_CUSTOM_GPU: "yes" - CM_MODEL_BATCH_SIZE: "" #we pick from nvidia config - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>" + MLC_NVIDIA_CUSTOM_GPU: "yes" + MLC_MODEL_BATCH_SIZE: "" #we pick from nvidia config + MLC_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>" a100: default_variation: gpu-connection: sxm group: gpu-name env: - CM_NVIDIA_CUSTOM_GPU: "yes" + MLC_NVIDIA_CUSTOM_GPU: "yes" a100,sxm,resnet50,offline,run_harness: default_variations: batch-size: batch_size.2048 env: - CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT: "2048" + MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT: "2048" a100,sxm,retinanet,offline,run_harness: default_variations: batch-size: batch_size.32 env: - CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" - CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: "300000000000" + MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + MLC_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: "300000000000" a100,sxm,bert_,offline,run_harness: default_variations: diff --git a/script/app-mlperf-inference-nvidia/run.sh b/script/app-mlperf-inference-nvidia/run.sh index ddcd0b550..0c6a8fc4a 100644 --- a/script/app-mlperf-inference-nvidia/run.sh +++ b/script/app-mlperf-inference-nvidia/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then - cd ${CM_RUN_DIR} - cmd=${CM_RUN_CMD} +if [[ ${MLC_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${MLC_RUN_DIR} + cmd=${MLC_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? diff --git a/script/app-mlperf-inference-qualcomm/customize.py b/script/app-mlperf-inference-qualcomm/customize.py index e14de6d5a..8b64acb6d 100644 --- a/script/app-mlperf-inference-qualcomm/customize.py +++ b/script/app-mlperf-inference-qualcomm/customize.py @@ -11,33 +11,33 @@ def preprocess(i): return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes": return {'return': 0} - if 'CM_MODEL' not in env: + if 'MLC_MODEL' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the model to run'} - if 'CM_MLPERF_BACKEND' not in env: + if 'MLC_MLPERF_BACKEND' not in env: return {'return': 1, 'error': 'Please select a variation specifying the backend'} - if 'CM_MLPERF_DEVICE' not in env: + if 'MLC_MLPERF_DEVICE' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the device to run on'} - kilt_root = env['CM_KILT_CHECKOUT_PATH'] + kilt_root = env['MLC_KILT_CHECKOUT_PATH'] print(f"Harness Root: {kilt_root}") source_files = [] - env['CM_SOURCE_FOLDER_PATH'] = env['CM_KILT_CHECKOUT_PATH'] + env['MLC_SOURCE_FOLDER_PATH'] = env['MLC_KILT_CHECKOUT_PATH'] - env['kilt_model_root'] = env.get('CM_ML_MODEL_FILE_WITH_PATH') + env['kilt_model_root'] = env.get('MLC_ML_MODEL_FILE_WITH_PATH') - if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE', '') != '': - env['kilt_model_batch_size'] = env['CM_MLPERF_LOADGEN_BATCH_SIZE'] + if env.get('MLC_MLPERF_LOADGEN_BATCH_SIZE', '') != '': + env['kilt_model_batch_size'] = env['MLC_MLPERF_LOADGEN_BATCH_SIZE'] - if env.get('CM_QAIC_DEVICES', '') != '': - env['kilt_device_ids'] = env['CM_QAIC_DEVICES'] + if env.get('MLC_QAIC_DEVICES', '') != '': + env['kilt_device_ids'] = env['MLC_QAIC_DEVICES'] if '+ CXXFLAGS' not in env: env['+ CXXFLAGS'] = [] @@ -45,40 +45,40 @@ def preprocess(i): if '+CPLUS_INCLUDE_PATH' not in env: env['+CPLUS_INCLUDE_PATH'] = [] - if env['CM_MLPERF_DEVICE'] == "qaic": + if env['MLC_MLPERF_DEVICE'] == "qaic": env['kilt_model_root'] = os.path.dirname( - env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH']) + env['MLC_QAIC_MODEL_COMPILED_BINARY_WITH_PATH']) - if env.get('CM_MODEL') == "resnet50": - env['dataset_imagenet_preprocessed_subset_fof'] = env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] - env['dataset_imagenet_preprocessed_dir'] = env['CM_DATASET_PREPROCESSED_PATH'] + if env.get('MLC_MODEL') == "resnet50": + env['dataset_imagenet_preprocessed_subset_fof'] = env['MLC_DATASET_PREPROCESSED_IMAGENAMES_LIST'] + env['dataset_imagenet_preprocessed_dir'] = env['MLC_DATASET_PREPROCESSED_PATH'] - elif "bert" in env.get('CM_MODEL'): - env['dataset_squad_tokenized_max_seq_length'] = env['CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH'] - env['dataset_squad_tokenized_root'] = env['CM_DATASET_SQUAD_TOKENIZED_ROOT'] + elif "bert" in env.get('MLC_MODEL'): + env['dataset_squad_tokenized_max_seq_length'] = env['MLC_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH'] + env['dataset_squad_tokenized_root'] = env['MLC_DATASET_SQUAD_TOKENIZED_ROOT'] env['dataset_squad_tokenized_input_ids'] = os.path.basename( - env['CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS']) + env['MLC_DATASET_SQUAD_TOKENIZED_INPUT_IDS']) env['dataset_squad_tokenized_input_mask'] = os.path.basename( - env['CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK']) + env['MLC_DATASET_SQUAD_TOKENIZED_INPUT_MASK']) env['dataset_squad_tokenized_segment_ids'] = os.path.basename( - env['CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS']) + env['MLC_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS']) - elif "retinanet" in env.get('CM_MODEL'): + elif "retinanet" in env.get('MLC_MODEL'): env['kilt_prior_bin_path'] = os.path.join( kilt_root, "plugins", "nms-abp", "data") env['kilt_object_detection_preprocessed_subset_fof'] = os.path.basename( - env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST']) - env['kilt_object_detection_preprocessed_dir'] = env['CM_DATASET_PREPROCESSED_PATH'] + env['MLC_DATASET_PREPROCESSED_IMAGENAMES_LIST']) + env['kilt_object_detection_preprocessed_dir'] = env['MLC_DATASET_PREPROCESSED_PATH'] env['+ CXXFLAGS'].append("-DMODEL_RX50") env['+ CXXFLAGS'].append("-DSDK_1_11_X") - loc_offset = env.get('CM_QAIC_MODEL_RETINANET_LOC_OFFSET') + loc_offset = env.get('MLC_QAIC_MODEL_RETINANET_LOC_OFFSET') if loc_offset: env['+ CXXFLAGS'].append("-DMODEL_RX50") keys = ['LOC_OFFSET', 'LOC_SCALE', 'CONF_OFFSET', 'CONF_SCALE'] - if env.get('CM_RETINANET_USE_MULTIPLE_SCALES_OFFSETS', '') == 'yes': + if env.get('MLC_RETINANET_USE_MULTIPLE_SCALES_OFFSETS', '') == 'yes': env['+ CXXFLAGS'].append("-DUSE_MULTIPLE_SCALES_OFFSETS=1") for j in range(0, 4): keys.append(f'LOC_OFFSET{j}') @@ -87,11 +87,11 @@ def preprocess(i): keys.append(f'CONF_SCALE{j}') for key in keys: - value = env.get('CM_QAIC_MODEL_RETINANET_' + key, '') + value = env.get('MLC_QAIC_MODEL_RETINANET_' + key, '') if value != '': env['+ CXXFLAGS'].append(f" -D{key}_={value} ") - if env.get('CM_BENCHMARK', '') == 'NETWORK_BERT_SERVER': + if env.get('MLC_BENCHMARK', '') == 'NETWORK_BERT_SERVER': source_files.append( os.path.join( kilt_root, @@ -109,12 +109,12 @@ def preprocess(i): "server", "server.cpp")) env['+ CXXFLAGS'].append("-DNETWORK_DIVISION=1") - elif env.get('CM_BENCHMARK', '') == 'NETWORK_BERT_CLIENT': + elif env.get('MLC_BENCHMARK', '') == 'NETWORK_BERT_CLIENT': # source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "pack.cpp")) # env['+CPLUS_INCLUDE_PATH'].append(kilt_root) # source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "client.cpp")) env['+ CXXFLAGS'].append("-DNETWORK_DIVISION") - elif env.get('CM_BENCHMARK', '') == 'STANDALONE_BERT': + elif env.get('MLC_BENCHMARK', '') == 'STANDALONE_BERT': source_files.append( os.path.join( kilt_root, @@ -124,14 +124,14 @@ def preprocess(i): "pack.cpp")) script_path = i['run_script_input']['path'] - if env['CM_MODEL'] == "retinanet": - env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + if env['MLC_MODEL'] == "retinanet": + env['MLC_DATASET_LIST'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] - for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']): + for file in os.listdir(env['MLC_SOURCE_FOLDER_PATH']): if file.endswith(".c") or file.endswith(".cpp"): source_files.append(file) - if 'SERVER' not in env.get('CM_BENCHMARK', ''): + if 'SERVER' not in env.get('MLC_BENCHMARK', ''): source_files.append( os.path.join( kilt_root, @@ -139,18 +139,18 @@ def preprocess(i): "harness", "harness.cpp")) - # source_files.append(env['CM_QAIC_API_SRC_FILE']) + # source_files.append(env['MLC_QAIC_API_SRC_FILE']) env['+CPLUS_INCLUDE_PATH'].append(kilt_root) env['+C_INCLUDE_PATH'].append(kilt_root) - if env['CM_MLPERF_DEVICE'] == 'gpu': - env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) - env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) - env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB']) - env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + if env['MLC_MLPERF_DEVICE'] == 'gpu': + env['+C_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE']) + env['+CPLUS_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE']) + env['+LD_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_LIB']) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_INCLUDE']) - elif env['CM_MLPERF_DEVICE'] == 'qaic': + elif env['MLC_MLPERF_DEVICE'] == 'qaic': source_files.append( os.path.join( kilt_root, @@ -161,24 +161,24 @@ def preprocess(i): "QAicInfApi.cpp")) print(f"Compiling the source files: {source_files}") - env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) + env['MLC_CXX_SOURCE_FILES'] = ";".join(source_files) env['+ CXXFLAGS'].append("-std=c++17") env['+ CXXFLAGS'].append("-fpermissive") env['+ CXXFLAGS'].append("-DKILT_CONFIG_FROM_ENV") env['+ CXXFLAGS'].append("-DKILT_CONFIG_TRANSLATE_X") - env['+ CXXFLAGS'].append("-DKILT_BENCHMARK_" + env['CM_BENCHMARK']) + env['+ CXXFLAGS'].append("-DKILT_BENCHMARK_" + env['MLC_BENCHMARK']) env['+ CXXFLAGS'].append("-DKILT_DEVICE_" + env['device'].upper()) - # add preprocessor flag like "#define CM_MODEL_RESNET50" - # env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) - # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" - env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + - env['CM_MLPERF_BACKEND'].upper()) - # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" - env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + - env['CM_MLPERF_DEVICE'].upper()) + # add preprocessor flag like "#define MLC_MODEL_RESNET50" + # env['+ CXXFLAGS'].append('-DMLC_MODEL_' + env['MLC_MODEL'].upper()) + # add preprocessor flag like "#define MLC_MLPERF_BACKEND_ONNXRUNTIME" + env['+ CXXFLAGS'].append('-DMLC_MLPERF_BACKEND_' + + env['MLC_MLPERF_BACKEND'].upper()) + # add preprocessor flag like "#define MLC_MLPERF_DEVICE_CPU" + env['+ CXXFLAGS'].append('-DMLC_MLPERF_DEVICE_' + + env['MLC_MLPERF_DEVICE'].upper()) if '+ LDCXXFLAGS' not in env: env['+ LDCXXFLAGS'] = [] @@ -189,33 +189,33 @@ def preprocess(i): "-ldl" ] # e.g. -lonnxruntime - if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: + if 'MLC_MLPERF_BACKEND_LIB_NAMESPEC' in env: env['+ LDCXXFLAGS'].append('-l' + - env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + env['MLC_MLPERF_BACKEND_LIB_NAMESPEC']) # e.g. -lcudart - if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: - env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) + if 'MLC_MLPERF_DEVICE_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['MLC_MLPERF_DEVICE_LIB_NAMESPEC']) if '-DPRINT_NETWORK_DESCRIPTOR' in env['+ CXXFLAGS']: env['+ LDCXXFLAGS'].append('-lprotobuf') - env['CM_LINKER_LANG'] = 'CXX' - env['CM_RUN_DIR'] = env.get('CM_MLPERF_OUTPUT_DIR', os.getcwd()) + env['MLC_LINKER_LANG'] = 'CXX' + env['MLC_RUN_DIR'] = env.get('MLC_MLPERF_OUTPUT_DIR', os.getcwd()) - if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") - if 'CM_MLPERF_USER_CONF' not in env: - env['CM_MLPERF_USER_CONF'] = os.path.join( - env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + if 'MLC_MLPERF_CONF' not in env: + env['MLC_MLPERF_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'MLC_MLPERF_USER_CONF' not in env: + env['MLC_MLPERF_USER_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") # to LOADGEN_MLPERF_CONF - env['loadgen_mlperf_conf_path'] = env['CM_MLPERF_CONF'] + env['loadgen_mlperf_conf_path'] = env['MLC_MLPERF_CONF'] # to LOADGEN_USER_CONF - env['loadgen_user_conf_path'] = env['CM_MLPERF_USER_CONF'] - env['loadgen_scenario'] = env['CM_MLPERF_LOADGEN_SCENARIO'] + env['loadgen_user_conf_path'] = env['MLC_MLPERF_USER_CONF'] + env['loadgen_scenario'] = env['MLC_MLPERF_LOADGEN_SCENARIO'] - loadgen_mode = env['CM_MLPERF_LOADGEN_MODE'] + loadgen_mode = env['MLC_MLPERF_LOADGEN_MODE'] if loadgen_mode == 'performance': kilt_loadgen_mode = 'PerformanceOnly' elif loadgen_mode == 'accuracy': diff --git a/script/app-mlperf-inference-qualcomm/meta.yaml b/script/app-mlperf-inference-qualcomm/meta.yaml index 5e3de4302..1e508e0e6 100644 --- a/script/app-mlperf-inference-qualcomm/meta.yaml +++ b/script/app-mlperf-inference-qualcomm/meta.yaml @@ -24,59 +24,59 @@ tags: # Default environment default_env: - CM_BATCH_COUNT: '1' - CM_BATCH_SIZE: '1' - CM_FAST_COMPILATION: 'yes' - CM_MLPERF_LOADGEN_SCENARIO: Offline - CM_MLPERF_LOADGEN_MODE: performance - CM_SKIP_PREPROCESS_DATASET: 'no' - CM_SKIP_MODEL_DOWNLOAD: 'no' - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: kilt - CM_MLPERF_SKIP_RUN: 'no' - CM_KILT_REPO_URL: https://github.com/GATEOverflow/kilt-mlperf - CM_QAIC_DEVICES: "0" + MLC_BATCH_COUNT: '1' + MLC_BATCH_SIZE: '1' + MLC_FAST_COMPILATION: 'yes' + MLC_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_MODE: performance + MLC_SKIP_PREPROCESS_DATASET: 'no' + MLC_SKIP_MODEL_DOWNLOAD: 'no' + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: kilt + MLC_MLPERF_SKIP_RUN: 'no' + MLC_KILT_REPO_URL: https://github.com/GATEOverflow/kilt-mlperf + MLC_QAIC_DEVICES: "0" kilt_max_wait_abs: 10000 verbosity: 0 loadgen_trigger_cold_run: 0 env: - CM_CALL_MLPERF_RUNNER: 'no' + MLC_CALL_MLPERF_RUNNER: 'no' # Map script inputs to environment variables input_mapping: - count: CM_MLPERF_LOADGEN_QUERY_COUNT - max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - mlperf_conf: CM_MLPERF_CONF - mode: CM_MLPERF_LOADGEN_MODE - output_dir: CM_MLPERF_OUTPUT_DIR - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - scenario: CM_MLPERF_LOADGEN_SCENARIO - user_conf: CM_MLPERF_USER_CONF - devices: CM_QAIC_DEVICES - skip_preprocess: CM_SKIP_PREPROCESS_DATASET - skip_preprocessing: CM_SKIP_PREPROCESS_DATASET - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - rerun: CM_RERUN + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: MLC_MLPERF_CONF + mode: MLC_MLPERF_LOADGEN_MODE + output_dir: MLC_MLPERF_OUTPUT_DIR + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + scenario: MLC_MLPERF_LOADGEN_SCENARIO + user_conf: MLC_MLPERF_USER_CONF + devices: MLC_QAIC_DEVICES + skip_preprocess: MLC_SKIP_PREPROCESS_DATASET + skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: MLC_RERUN new_state_keys: - mlperf-inference-implementation - - CM_SUT_* + - MLC_SUT_* # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* - - CM_HW_NAME - - CM_ML_MODEL_* - - CM_MAX_EXAMPLES - - CM_IMAGENET_ACCURACY_DTYPE - - CM_SQUAD_ACCURACY_DTYPE + - MLC_MLPERF_* + - MLC_DATASET_* + - MLC_HW_NAME + - MLC_ML_MODEL_* + - MLC_MAX_EXAMPLES + - MLC_IMAGENET_ACCURACY_DTYPE + - MLC_SQUAD_ACCURACY_DTYPE # Dependencies on other CM scripts @@ -97,10 +97,10 @@ deps: - kilt-repo update_tags_from_env_with_prefix: _repo.: - - CM_KILT_REPO_URL + - MLC_KILT_REPO_URL extra_cache_tags: kilt,kilt-repo env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_KILT_CHECKOUT_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_KILT_CHECKOUT_PATH ######################################################################## # Install MLPerf inference dependencies @@ -129,10 +129,10 @@ deps: # Install ResNet50 model (ONNX) and ImageNet - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 skip_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - qaic names: - resnet50-model @@ -140,27 +140,27 @@ deps: tags: get,ml-model,resnet50,_fp32,_onnx,_from-tf - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - qaic tags: compile,qaic,model,_resnet50 names: - qaic-model-compiler - resnet50-compiler skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - yes - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 names: - imagenet-preprocessed - dataset-preprocessed tags: get,dataset,imagenet,preprocessed,_for.resnet50,_NHWC,_full skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - yes @@ -169,45 +169,45 @@ deps: # Install bert dependencies - enable_if_env: - CM_MODEL: + MLC_MODEL: - bert-99 - bert-99.9 names: - bert-vocab tags: get,squad-vocab skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - yes - enable_if_env: - CM_MODEL: + MLC_MODEL: - bert-99 - bert-99.9 names: - squad-tokenized tags: get,dataset,tokenized,squad,_raw skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - yes ######################################################################## # Install OpenImages - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - qaic tags: compile,qaic,model,_retinanet names: - qaic-model-compiler - retinanet-compiler skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - yes - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet names: - openimages-preprocessed @@ -215,11 +215,11 @@ deps: tags: get,dataset,preprocessed,openimages,_for.retinanet.onnx,_NCHW,_validation,_custom-annotations update_tags_from_env_with_prefix1: #disabling now to prevent unnecessary preprocessing _quant-scale.: - - CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET + - MLC_QAIC_MODEL_RETINANET_IMAGE_OFFSET _quant-offset.: - - CM_QAIC_MODEL_RETINANET_IMAGE_SCALE + - MLC_QAIC_MODEL_RETINANET_IMAGE_SCALE skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - yes @@ -228,16 +228,16 @@ deps: ######################################################################## # Install ML engines via CM - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cpu tags: get,lib,onnxruntime,lang-cpp,_cpu - enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - onnxruntime - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu tags: get,lib,onnxruntime,lang-cpp,_cuda @@ -249,14 +249,14 @@ post_deps: - compile-program tags: compile,cpp-program skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - yes - names: - runner - mlperf-runner skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'yes' - yes tags: benchmark-mlperf @@ -272,47 +272,47 @@ variations: group: device default: true env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu kilt_backend_type: cpu cuda: group: device env: - CM_MLPERF_DEVICE: gpu - CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + MLC_MLPERF_DEVICE: gpu + MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart kilt_backend_type: gpu qaic: group: device env: - CM_MLPERF_DEVICE: qaic - CM_MLPERF_DEVICE_LIB_NAMESPEC: QAic + MLC_MLPERF_DEVICE: qaic + MLC_MLPERF_DEVICE_LIB_NAMESPEC: QAic kilt_backend_type: qaic deps: - tags: get,qaic,platform,sdk skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - yes - tags: get,lib,protobuf,_tag.v3.11.4 skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - yes - tags: set,device,mode,qaic enable_if_env: - CM_QAIC_VC: + MLC_QAIC_VC: "on" update_tags_from_env_with_prefix": _vc.: - - CM_QAIC_VC + - MLC_QAIC_VC - tags: set,device,mode,qaic,_ecc enable_if_env: - CM_QAIC_ECC: + MLC_QAIC_ECC: "yes" tensorrt: group: framework env: - CM_MLPERF_BACKEND: tensorrt + MLC_MLPERF_BACKEND: tensorrt device: tensorrt - CM_MLPERF_BACKEND_NAME: TensorRT + MLC_MLPERF_BACKEND_NAME: TensorRT # ML engine onnxruntime: @@ -320,15 +320,15 @@ variations: default: true env: device: onnxrt - CM_MLPERF_BACKEND: onnxruntime - CM_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime + MLC_MLPERF_BACKEND: onnxruntime + MLC_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime glow: group: framework env: device: qaic - CM_MLPERF_BACKEND: glow - CM_MLPERF_BACKEND_LIB_NAMESPEC: QAic + MLC_MLPERF_BACKEND: glow + MLC_MLPERF_BACKEND_LIB_NAMESPEC: QAic bs.#: group: batch-size @@ -348,7 +348,7 @@ variations: group: model default: true env: - CM_MODEL: resnet50 + MLC_MODEL: resnet50 kilt_model_name: resnet50 kilt_input_count: 1 kilt_output_count: 1 @@ -359,16 +359,16 @@ variations: ml_model_image_height: 224 loadgen_buffer_size: 1024 loadgen_dataset_size: 50000 - CM_BENCHMARK: STANDALONE_CLASSIFICATION + MLC_BENCHMARK: STANDALONE_CLASSIFICATION resnet50,uint8: env: kilt_input_format: "UINT8,-1,224,224,3" kilt_device_qaic_skip_stage: convert - CM_IMAGENET_ACCURACY_DTYPE: int8 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + MLC_IMAGENET_ACCURACY_DTYPE: int8 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int8 bert-99,qaic: deps: @@ -377,12 +377,12 @@ variations: - qaic-model-compiler - bert-99-compiler skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - yes env: - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int32 - CM_ML_MODEL_INPUTS_DATA_TYPE: int8,fp16 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int32 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int8,fp16 bert-99.9,qaic: deps: @@ -391,20 +391,20 @@ variations: - qaic-model-compiler - bert-99.9-compiler skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - yes env: - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int32 - CM_ML_MODEL_INPUTS_DATA_TYPE: fp16 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int32 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp16 retinanet: group: model base: - bs.1 env: - CM_MODEL: retinanet - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" + MLC_MODEL: retinanet + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" kilt_model_name: retinanet kilt_input_count: 1 #kilt_model_disable_nms: '' @@ -417,7 +417,7 @@ variations: ml_model_image_width: 800 loadgen_buffer_size: 64 loadgen_dataset_size: 24576 - CM_BENCHMARK: STANDALONE_OBJECT_DETECTION + MLC_BENCHMARK: STANDALONE_OBJECT_DETECTION deps: - tags: get,generic-python-lib,_Pillow @@ -432,9 +432,9 @@ variations: kilt_device_qaic_skip_stage: 'convert' kilt_input_format: "UINT8,1,3,800,800" kilt_output_format: "INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,4,1000:INT8,14,1000:INT8,1,4,1000:INT8,1,4,1000:INT8,1,4,1000" - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int8 bert_: @@ -443,7 +443,7 @@ variations: - tags: get,generic-python-lib,_safetensors - tags: get,generic-python-lib,_onnx env: - CM_BENCHMARK: STANDALONE_BERT + MLC_BENCHMARK: STANDALONE_BERT kilt_model_name: bert kilt_model_seq_length: 384 kilt_model_bert_variant: BERT_PACKED @@ -467,25 +467,25 @@ variations: group: run-mode default: true env: - CM_RUN_MODE: standalone + MLC_RUN_MODE: standalone network-server: group: run-mode env: - CM_RUN_MODE: network-server + MLC_RUN_MODE: network-server network-client: group: run-mode env: - CM_RUN_MODE: network-client + MLC_RUN_MODE: network-client bert_,network-server: env: - CM_BENCHMARK: NETWORK_BERT_SERVER + MLC_BENCHMARK: NETWORK_BERT_SERVER bert_,network-client: env: - CM_BENCHMARK: NETWORK_BERT_CLIENT + MLC_BENCHMARK: NETWORK_BERT_CLIENT bert_,singlestream: env: @@ -496,22 +496,22 @@ variations: base: - bert_ env: - CM_MODEL: bert-99 - CM_SQUAD_ACCURACY_DTYPE: float32 - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + MLC_MODEL: bert-99 + MLC_SQUAD_ACCURACY_DTYPE: float32 + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" bert-99.9: group: model base: - bert_ env: - CM_MODEL: bert-99.9 - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + MLC_MODEL: bert-99.9 + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" loadgen-batch-size.#: group: loadgen-batch-size env: - CM_MLPERF_LOADGEN_BATCH_SIZE: "#" + MLC_MLPERF_LOADGEN_BATCH_SIZE: "#" bert-99,offline: default_variations: @@ -523,23 +523,23 @@ variations: activation-count.#: env: - CM_MLPERF_QAIC_ACTIVATION_COUNT: "#" - #CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "activation_count.#" + MLC_MLPERF_QAIC_ACTIVATION_COUNT: "#" + #MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "activation_count.#" maxq: group: power-mode env: - CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes + MLC_MLPERF_NVIDIA_HARNESS_MAXQ: yes maxn: group: power-mode env: - CM_MLPERF_NVIDIA_HARNESS_MAXN: yes + MLC_MLPERF_NVIDIA_HARNESS_MAXN: yes singlestream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream adr: qaic-model-compiler: tags: _singlestream @@ -554,21 +554,21 @@ variations: multistream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: MultiStream + MLC_MLPERF_LOADGEN_SCENARIO: MultiStream adr: qaic-model-compiler: tags: _multistream offline: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_SCENARIO: Offline adr: qaic-model-compiler: tags: _offline server: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Server + MLC_MLPERF_LOADGEN_SCENARIO: Server adr: qaic-model-compiler: tags: _server @@ -586,7 +586,7 @@ variations: dataset-preprocessed: tags: _float32,_rgb32 env: - CM_IMAGENET_ACCURACY_DTYPE: float32 + MLC_IMAGENET_ACCURACY_DTYPE: float32 nsp.14: group: nsp @@ -614,12 +614,12 @@ variations: base: - nsp.14 env: - CM_QAIC_DEVICES: "0,1,2,3,4,5,6,7" + MLC_QAIC_DEVICES: "0,1,2,3,4,5,6,7" qaic_queue_length: 4 dl2q.24xlarge,singlestream: env: - CM_QAIC_DEVICES: 0 + MLC_QAIC_DEVICES: 0 qaic_activation_count: "1" dl2q.24xlarge,resnet50,offline: @@ -668,11 +668,11 @@ variations: num-devices.4: env: - CM_QAIC_DEVICES: "0,1,2,3" + MLC_QAIC_DEVICES: "0,1,2,3" pro,num-devices.4,singlestream: env: - CM_QAIC_DEVICES: "0" + MLC_QAIC_DEVICES: "0" qaic_activation_count: "1" pro,num-devices.4,resnet50,offline: @@ -740,7 +740,7 @@ variations: base: - nsp.9 env: - CM_QAIC_DEVICES: "0" + MLC_QAIC_DEVICES: "0" qaic_queue_length: 6 rb6,singlestream: diff --git a/script/app-mlperf-inference-qualcomm/run.sh b/script/app-mlperf-inference-qualcomm/run.sh index ddcd0b550..0c6a8fc4a 100644 --- a/script/app-mlperf-inference-qualcomm/run.sh +++ b/script/app-mlperf-inference-qualcomm/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then - cd ${CM_RUN_DIR} - cmd=${CM_RUN_CMD} +if [[ ${MLC_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${MLC_RUN_DIR} + cmd=${MLC_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? diff --git a/script/app-mlperf-inference-redhat/customize.py b/script/app-mlperf-inference-redhat/customize.py index d5d4ee85d..7278f89a2 100644 --- a/script/app-mlperf-inference-redhat/customize.py +++ b/script/app-mlperf-inference-redhat/customize.py @@ -11,29 +11,29 @@ def preprocess(i): return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes": return {'return': 0} - if 'CM_MODEL' not in env: + if 'MLC_MODEL' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the model to run'} - if 'CM_MLPERF_BACKEND' not in env: + if 'MLC_MLPERF_BACKEND' not in env: return {'return': 1, 'error': 'Please select a variation specifying the backend'} - if 'CM_MLPERF_DEVICE' not in env: + if 'MLC_MLPERF_DEVICE' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the device to run on'} - r = get_run_cmd(env['CM_MODEL'], i) + r = get_run_cmd(env['MLC_MODEL'], i) if r['return'] > 0: return r run_cmd = r['run_cmd'] run_dir = r['run_dir'] print(run_cmd) print(run_dir) - env['CM_MLPERF_RUN_CMD'] = run_cmd - env['CM_RUN_DIR'] = run_dir - env['CM_RUN_CMD'] = run_cmd + env['MLC_MLPERF_RUN_CMD'] = run_cmd + env['MLC_RUN_DIR'] = run_dir + env['MLC_RUN_CMD'] = run_cmd return {'return': 0} # return {'return':1, 'error': 'Run command needs to be tested'} @@ -42,16 +42,16 @@ def preprocess(i): def get_run_cmd(model, i): env = i['env'] if "gptj" in model: - scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] - device = env['CM_MLPERF_DEVICE'] - mode = env['CM_MLPERF_LOADGEN_MODE'] - outdir = env['CM_MLPERF_OUTPUT_DIR'] - mlperf_conf_path = env['CM_MLPERF_CONF'] - user_conf_path = env['CM_MLPERF_USER_CONF'] - api_server = env.get('CM_MLPERF_INFERENCE_API_SERVER', 'localhost') + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] + device = env['MLC_MLPERF_DEVICE'] + mode = env['MLC_MLPERF_LOADGEN_MODE'] + outdir = env['MLC_MLPERF_OUTPUT_DIR'] + mlperf_conf_path = env['MLC_MLPERF_CONF'] + user_conf_path = env['MLC_MLPERF_USER_CONF'] + api_server = env.get('MLC_MLPERF_INFERENCE_API_SERVER', 'localhost') model_path = env['GPTJ_CHECKPOINT_PATH'] - dataset_path = env['CM_DATASET_CNNDM_EVAL_PATH'] - precision = env['CM_MLPERF_MODEL_PRECISION'] + dataset_path = env['MLC_DATASET_CNNDM_EVAL_PATH'] + precision = env['MLC_MLPERF_MODEL_PRECISION'] if mode == "accuracy": accuracy_string = " --accuracy " else: @@ -60,7 +60,7 @@ def get_run_cmd(model, i): run_cmd = f"python3 -u main.py --scenario {scenario} --model-path {model_path} --api-server {api_server} --api-model-name gpt-j-cnn --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} " submitter = "CTuning" run_dir = os.path.join( - env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], + env['MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "open", submitter, "code", @@ -69,18 +69,18 @@ def get_run_cmd(model, i): return {'return': 0, 'run_cmd': run_cmd, 'run_dir': run_dir} if "llama2" in model: - scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] - device = env['CM_MLPERF_DEVICE'] - mode = env['CM_MLPERF_LOADGEN_MODE'] - outdir = env['CM_MLPERF_OUTPUT_DIR'] - mlperf_conf_path = env['CM_MLPERF_CONF'] - user_conf_path = env['CM_MLPERF_USER_CONF'] + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] + device = env['MLC_MLPERF_DEVICE'] + mode = env['MLC_MLPERF_LOADGEN_MODE'] + outdir = env['MLC_MLPERF_OUTPUT_DIR'] + mlperf_conf_path = env['MLC_MLPERF_CONF'] + user_conf_path = env['MLC_MLPERF_USER_CONF'] api_server = env.get( - 'CM_MLPERF_INFERENCE_API_SERVER', + 'MLC_MLPERF_INFERENCE_API_SERVER', 'localhost:8000/v1') - api_model_name = env['CM_VLLM_SERVER_MODEL_NAME'] - dataset_path = env['CM_DATASET_OPENORCA_PATH'] - precision = env['CM_MLPERF_MODEL_PRECISION'] + api_model_name = env['MLC_VLLM_SERVER_MODEL_NAME'] + dataset_path = env['MLC_DATASET_OPENORCA_PATH'] + precision = env['MLC_MLPERF_MODEL_PRECISION'] if mode == "accuracy": accuracy_string = " --accuracy " else: @@ -89,7 +89,7 @@ def get_run_cmd(model, i): run_cmd = f"python3 -u 'main.py' --scenario {scenario} --model-path {api_model_name} --api-model-name {api_model_name} --api-server {api_server} --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} " submitter = "RedHat-Supermicro" run_dir = os.path.join( - env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], + env['MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "open", submitter, "code", diff --git a/script/app-mlperf-inference-redhat/meta.yaml b/script/app-mlperf-inference-redhat/meta.yaml index 2c7011bd5..55af68d65 100644 --- a/script/app-mlperf-inference-redhat/meta.yaml +++ b/script/app-mlperf-inference-redhat/meta.yaml @@ -21,51 +21,51 @@ tags: # Default environment default_env: - CM_MLPERF_LOADGEN_SCENARIO: Offline - CM_MLPERF_LOADGEN_MODE: performance - CM_SKIP_PREPROCESS_DATASET: 'no' - CM_SKIP_MODEL_DOWNLOAD: 'no' - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: redhat_harness - CM_MLPERF_SKIP_RUN: 'no' + MLC_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_MODE: performance + MLC_SKIP_PREPROCESS_DATASET: 'no' + MLC_SKIP_MODEL_DOWNLOAD: 'no' + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: redhat_harness + MLC_MLPERF_SKIP_RUN: 'no' env: - CM_CALL_MLPERF_RUNNER: 'no' + MLC_CALL_MLPERF_RUNNER: 'no' # Map script inputs to environment variables input_mapping: - count: CM_MLPERF_LOADGEN_QUERY_COUNT - max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - mlperf_conf: CM_MLPERF_CONF - mode: CM_MLPERF_LOADGEN_MODE - output_dir: CM_MLPERF_OUTPUT_DIR - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - scenario: CM_MLPERF_LOADGEN_SCENARIO - user_conf: CM_MLPERF_USER_CONF - skip_preprocess: CM_SKIP_PREPROCESS_DATASET - skip_preprocessing: CM_SKIP_PREPROCESS_DATASET - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - rerun: CM_RERUN - results_repo: CM_MLPERF_INFERENCE_RESULTS_REPO + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: MLC_MLPERF_CONF + mode: MLC_MLPERF_LOADGEN_MODE + output_dir: MLC_MLPERF_OUTPUT_DIR + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + scenario: MLC_MLPERF_LOADGEN_SCENARIO + user_conf: MLC_MLPERF_USER_CONF + skip_preprocess: MLC_SKIP_PREPROCESS_DATASET + skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: MLC_RERUN + results_repo: MLC_MLPERF_INFERENCE_RESULTS_REPO new_state_keys: - mlperf-inference-implementation - - CM_SUT_* + - MLC_SUT_* # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* - - CM_HW_NAME - - CM_ML_MODEL_* - - CM_MAX_EXAMPLES - - CM_IMAGENET_ACCURACY_DTYPE - - CM_SQUAD_ACCURACY_DTYPE + - MLC_MLPERF_* + - MLC_DATASET_* + - MLC_HW_NAME + - MLC_ML_MODEL_* + - MLC_MAX_EXAMPLES + - MLC_IMAGENET_ACCURACY_DTYPE + - MLC_SQUAD_ACCURACY_DTYPE # Dependencies on other CM scripts @@ -111,9 +111,9 @@ deps: - inference-code update_tags_from_env_with_prefix: _repo.: - - CM_MLPERF_INFERENCE_RESULTS_REPO + - MLC_MLPERF_INFERENCE_RESULTS_REPO env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO extra_cache_tags: results,repo,mlperf # Post dependencies to run this app including for power measurement @@ -123,7 +123,7 @@ post_deps: - runner - mlperf-runner skip_if_env: - CM_MLPERF_SKIP_RUN: + MLC_MLPERF_SKIP_RUN: - 'yes' - yes tags: benchmark-mlperf @@ -139,23 +139,23 @@ variations: group: device default: true env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu cuda: group: device env: - CM_MLPERF_DEVICE: gpu - CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + MLC_MLPERF_DEVICE: gpu + MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart openshift: group: backend default: true env: - CM_MLPERF_BACKEND: openshift + MLC_MLPERF_BACKEND: openshift pytorch: group: backend env: - CM_MLPERF_BACKEND: pytorch + MLC_MLPERF_BACKEND: pytorch pytorch,cuda: deps: @@ -174,14 +174,14 @@ variations: group: model default: true env: - CM_MODEL: resnet50 + MLC_MODEL: resnet50 retinanet: group: model base: - bs.1 env: - CM_MODEL: retinanet + MLC_MODEL: retinanet bert_: {} @@ -191,15 +191,15 @@ variations: base: - bert_ env: - CM_MODEL: bert-99 - CM_SQUAD_ACCURACY_DTYPE: float32 + MLC_MODEL: bert-99 + MLC_SQUAD_ACCURACY_DTYPE: float32 bert-99.9: group: model base: - bert_ env: - CM_MODEL: bert-99.9 + MLC_MODEL: bert-99.9 bert_: {} @@ -209,15 +209,15 @@ variations: base: - bert_ env: - CM_MODEL: bert-99 - CM_SQUAD_ACCURACY_DTYPE: float32 + MLC_MODEL: bert-99 + MLC_SQUAD_ACCURACY_DTYPE: float32 bert-99.9: group: model base: - bert_ env: - CM_MODEL: bert-99.9 + MLC_MODEL: bert-99.9 gptj_: deps: @@ -231,42 +231,42 @@ variations: base: - gptj_ env: - CM_MODEL: gptj-99 - CM_SQUAD_ACCURACY_DTYPE: float32 + MLC_MODEL: gptj-99 + MLC_SQUAD_ACCURACY_DTYPE: float32 gptj-99.9: group: model base: - gptj_ env: - CM_MODEL: gptj-99.9 + MLC_MODEL: gptj-99.9 llama2-70b_: deps: - tags: get,dataset,openorca,language-processing,original,_redhat env: - CM_MLPERF_IMPLEMENTATION: redhat + MLC_MLPERF_IMPLEMENTATION: redhat env: - CM_VLLM_SERVER_MODEL_NAME: NousResearch/Meta-Llama-3-8B-Instruct # assigned just for testing purpose + MLC_VLLM_SERVER_MODEL_NAME: NousResearch/Meta-Llama-3-8B-Instruct # assigned just for testing purpose llama2-70b-99: group: model base: - llama2-70b_ env: - CM_MODEL: llama2-70b-99 + MLC_MODEL: llama2-70b-99 llama2-70b-99.9: group: model base: - llama2-70b_ env: - CM_MODEL: llama2-70b-99.9 + MLC_MODEL: llama2-70b-99.9 singlestream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream singlestream,resnet50: default_variations: @@ -279,17 +279,17 @@ variations: multistream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: MultiStream + MLC_MLPERF_LOADGEN_SCENARIO: MultiStream offline: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_SCENARIO: Offline server: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Server + MLC_MLPERF_LOADGEN_SCENARIO: Server uint8: group: precision @@ -302,7 +302,7 @@ variations: group: version default: true env: - CM_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.0 + MLC_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.0 docker: real_run: False diff --git a/script/app-mlperf-inference-redhat/run.sh b/script/app-mlperf-inference-redhat/run.sh index ddcd0b550..0c6a8fc4a 100644 --- a/script/app-mlperf-inference-redhat/run.sh +++ b/script/app-mlperf-inference-redhat/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then - cd ${CM_RUN_DIR} - cmd=${CM_RUN_CMD} +if [[ ${MLC_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${MLC_RUN_DIR} + cmd=${MLC_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? diff --git a/script/app-mlperf-inference/README-extra.md b/script/app-mlperf-inference/README-extra.md index e661f3e53..f412c3c8f 100644 --- a/script/app-mlperf-inference/README-extra.md +++ b/script/app-mlperf-inference/README-extra.md @@ -56,7 +56,7 @@ The first run of this CM script takes around 25 minutes on a GCP instance with 1 CM will automatically detect, install and cache all the necessary ML components while adapting them to your system using [portable CM scripts](https://github.com/mlcommons/cm4mlops/tree/main/script). -These dependencies are described using [this simple YAML file](https://github.com/octoml/ck/blob/master/cm-mlops/script/app-mlperf-inference-reference/_cm.yaml#L57) +These dependencies are described using [this simple YAML file](https://github.com/octoml/ck/blob/master/mlc-mlops/script/app-mlperf-inference-reference/_cm.yaml#L57) and can be turned on or off using different environment variables passed to this CM script using `--env.KEY=VALUE`. You should see the following output in the end: diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py index 976f5124b..21e34e98a 100644 --- a/script/app-mlperf-inference/customize.py +++ b/script/app-mlperf-inference/customize.py @@ -18,19 +18,19 @@ def preprocess(i): env = i['env'] state = i['state'] - if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'nvidia': - if env.get('CM_NVIDIA_GPU_NAME', '') in [ + if env.get('MLC_MLPERF_IMPLEMENTATION', '') == 'nvidia': + if env.get('MLC_NVIDIA_GPU_NAME', '') in [ "rtx_4090", "a100", "t4", "l4", "orin", "custom"]: - env['CM_NVIDIA_HARNESS_GPU_VARIATION'] = "_" + \ - env['CM_NVIDIA_GPU_NAME'] - env['CM_NVIDIA_GPU_MEMORY'] = '' + env['MLC_NVIDIA_HARNESS_GPU_VARIATION'] = "_" + \ + env['MLC_NVIDIA_GPU_NAME'] + env['MLC_NVIDIA_GPU_MEMORY'] = '' else: gpu_memory = i['state'].get( 'cm_cuda_device_prop', '').get('Global memory') gpu_memory_size = str( int((float(gpu_memory) / (1024 * 1024 * 1024) + 7) / 8) * 8) - env['CM_NVIDIA_GPU_MEMORY'] = gpu_memory_size - env['CM_NVIDIA_HARNESS_GPU_VARIATION'] = '' + env['MLC_NVIDIA_GPU_MEMORY'] = gpu_memory_size + env['MLC_NVIDIA_HARNESS_GPU_VARIATION'] = '' if 'cmd' in i['input']: state['mlperf_inference_run_cmd'] = "cm run script " + \ @@ -42,9 +42,9 @@ def preprocess(i): state['mlperf-inference-implementation']['script_id'] = run_state['script_id'] + \ ":" + ",".join(run_state['script_variation_tags']) - if env.get('CM_VLLM_SERVER_MODEL_NAME', '') != '' and env.get( - 'CM_ML_MODEL_FULL_NAME', '') == '': - env['CM_ML_MODEL_FULL_NAME'] = env['CM_VLLM_SERVER_MODEL_NAME'].replace( + if env.get('MLC_VLLM_SERVER_MODEL_NAME', '') != '' and env.get( + 'MLC_ML_MODEL_FULL_NAME', '') == '': + env['MLC_ML_MODEL_FULL_NAME'] = env['MLC_VLLM_SERVER_MODEL_NAME'].replace( "/", "_") return {'return': 0} @@ -61,14 +61,14 @@ def postprocess(i): env['CMD'] = '' state = i['state'] - # if env.get('CM_MLPERF_USER_CONF', '') == '': + # if env.get('MLC_MLPERF_USER_CONF', '') == '': # return {'return': 0} - output_dir = env['CM_MLPERF_OUTPUT_DIR'] + output_dir = env['MLC_MLPERF_OUTPUT_DIR'] - result_sut_folder_path = env['CM_MLPERF_INFERENCE_RESULTS_SUT_PATH'] + result_sut_folder_path = env['MLC_MLPERF_INFERENCE_RESULTS_SUT_PATH'] - mode = env['CM_MLPERF_LOADGEN_MODE'] + mode = env['MLC_MLPERF_LOADGEN_MODE'] if not os.path.exists(output_dir) or not os.path.exists( os.path.join(output_dir, "mlperf_log_summary.txt")): @@ -76,62 +76,62 @@ def postprocess(i): return {'return': 0} # in power mode copy the log files from tmp_power directory - if env.get('CM_MLPERF_POWER', '') == "yes" and mode == "performance": + if env.get('MLC_MLPERF_POWER', '') == "yes" and mode == "performance": mlperf_power_logs_dir = os.path.join( - env['CM_MLPERF_OUTPUT_DIR'], "..", "power") + env['MLC_MLPERF_OUTPUT_DIR'], "..", "power") mlperf_ranging_logs_dir = os.path.join( - env['CM_MLPERF_OUTPUT_DIR'], "..", "ranging") + env['MLC_MLPERF_OUTPUT_DIR'], "..", "ranging") if os.path.exists(os.path.join( - env['CM_MLPERF_POWER_LOG_DIR'], "power")): + env['MLC_MLPERF_POWER_LOG_DIR'], "power")): if os.path.exists(mlperf_power_logs_dir): shutil.rmtree(mlperf_power_logs_dir) shutil.copytree( os.path.join( - env['CM_MLPERF_POWER_LOG_DIR'], + env['MLC_MLPERF_POWER_LOG_DIR'], "power"), mlperf_power_logs_dir) if os.path.exists(os.path.join( - env['CM_MLPERF_POWER_LOG_DIR'], "ranging")): + env['MLC_MLPERF_POWER_LOG_DIR'], "ranging")): if os.path.exists(mlperf_ranging_logs_dir): shutil.rmtree(mlperf_ranging_logs_dir) shutil.copytree( os.path.join( - env['CM_MLPERF_POWER_LOG_DIR'], + env['MLC_MLPERF_POWER_LOG_DIR'], "ranging"), mlperf_ranging_logs_dir) if os.path.exists(os.path.join( - env['CM_MLPERF_POWER_LOG_DIR'], "run_1", "spl.txt")): + env['MLC_MLPERF_POWER_LOG_DIR'], "run_1", "spl.txt")): shutil.copyfile( os.path.join( - env['CM_MLPERF_POWER_LOG_DIR'], + env['MLC_MLPERF_POWER_LOG_DIR'], "run_1", "spl.txt"), os.path.join( - env['CM_MLPERF_OUTPUT_DIR'], + env['MLC_MLPERF_OUTPUT_DIR'], "spl.txt")) - model = env['CM_MODEL'] - model_full_name = env.get('CM_ML_MODEL_FULL_NAME', model) + model = env['MLC_MODEL'] + model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', model) if mode == "accuracy" or mode == "compliance" and env[ - 'CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] == "TEST01": + 'MLC_MLPERF_LOADGEN_COMPLIANCE_TEST'] == "TEST01": out_baseline_accuracy_string = f"""> {os.path.join(output_dir, "accuracy", "baseline_accuracy.txt")} """ out_compliance_accuracy_string = f"""> {os.path.join(output_dir, "accuracy", "compliance_accuracy.txt")} """ if model == "resnet50": accuracy_filename = "accuracy-imagenet.py" - accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + accuracy_filepath = os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", accuracy_filename) dataset_args = " --imagenet-val-file " + \ - os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") accuracy_log_file_option_name = " --mlperf-accuracy-file " - datatype_option = " --dtype " + env['CM_IMAGENET_ACCURACY_DTYPE'] + datatype_option = " --dtype " + env['MLC_IMAGENET_ACCURACY_DTYPE'] elif model == "retinanet": accuracy_filename = "accuracy-openimages.py" - accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + accuracy_filepath = os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", accuracy_filename) dataset_args = " --openimages-dir " + \ os.getcwd() # just to make the script happy @@ -141,20 +141,20 @@ def postprocess(i): elif 'bert' in model: accuracy_filename = "accuracy-squad.py" accuracy_filepath = os.path.join( - env['CM_MLPERF_INFERENCE_BERT_PATH'], accuracy_filename) - dataset_args = " --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + "' --vocab_file '" + \ - env['CM_DATASET_SQUAD_VOCAB_PATH'] + \ + env['MLC_MLPERF_INFERENCE_BERT_PATH'], accuracy_filename) + dataset_args = " --val_data '" + env['MLC_DATASET_SQUAD_VAL_PATH'] + "' --vocab_file '" + \ + env['MLC_DATASET_SQUAD_VOCAB_PATH'] + \ "' --out_file predictions.json " accuracy_log_file_option_name = " --log_file " datatype_option = " --output_dtype " + \ - env['CM_SQUAD_ACCURACY_DTYPE'] + env['MLC_SQUAD_ACCURACY_DTYPE'] elif 'rgat' in model: accuracy_filename = "accuracy_igbh.py" accuracy_filepath = os.path.join( - env['CM_MLPERF_INFERENCE_RGAT_PATH'], "tools", accuracy_filename) - dataset_args = " --dataset-path '" + env['CM_DATASET_IGBH_PATH'] + "' --dataset-size '" + \ - env['CM_DATASET_IGBH_SIZE'] + "'" + env['MLC_MLPERF_INFERENCE_RGAT_PATH'], "tools", accuracy_filename) + dataset_args = " --dataset-path '" + env['MLC_DATASET_IGBH_PATH'] + "' --dataset-size '" + \ + env['MLC_DATASET_IGBH_SIZE'] + "'" accuracy_log_file_option_name = " --mlperf-accuracy-file " datatype_option = "" out_baseline_accuracy_string = f""" --output-file {os.path.join(output_dir, "accuracy", "baseline_accuracy.txt")} """ @@ -172,24 +172,24 @@ def postprocess(i): pass # Not giving an error now. But accuracy paths need to be done for other benchmarks which may need the non-determinism test # return {'return': 1, 'error': f'Accuracy paths not done for model # {model}'} - scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] if not state.get('mlc-mlperf-inference-results'): state['mlc-mlperf-inference-results'] = {} if not state.get('mlc-mlperf-inference-results-last'): state['mlc-mlperf-inference-results-last'] = {} if not state['mlc-mlperf-inference-results'].get( - state['CM_SUT_CONFIG_NAME']): - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']] = {} - if not state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['MLC_SUT_CONFIG_NAME']): + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']] = {} + if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ].get(model): - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model] = {} - if not state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']][model] = {} + if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model].get(scenario): - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model][scenario] = {} - # if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes" and mode == + # if env.get("MLC_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes" and mode == # "performance" and scenario != "Server": if mode == "performance" and scenario != "Server": os.chdir(output_dir) @@ -224,9 +224,9 @@ def postprocess(i): if "\\(ns\\)" in pattern[scenario]: value = str(float(value) / 1000000) # convert to milliseconds - sut_name = state['CM_SUT_CONFIG_NAME'] - sut_config = state['CM_SUT_CONFIG'][sut_name] - sut_config_path = state['CM_SUT_CONFIG_PATH'][sut_name] + sut_name = state['MLC_SUT_CONFIG_NAME'] + sut_config = state['MLC_SUT_CONFIG'][sut_name] + sut_config_path = state['MLC_SUT_CONFIG_PATH'][sut_name] if scenario not in sut_config[model_full_name]: sut_config[model_full_name][scenario] = {} sut_config[model_full_name][scenario][metric] = value @@ -245,20 +245,20 @@ def postprocess(i): else: measurements = {} measurements['starting_weights_filename'] = env.get( - 'CM_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get( - 'CM_ML_MODEL_FILE', measurements.get( + 'MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get( + 'MLC_ML_MODEL_FILE', measurements.get( 'starting_weights_filename', ''))) measurements['retraining'] = env.get( - 'CM_ML_MODEL_RETRAINING', measurements.get( + 'MLC_ML_MODEL_RETRAINING', measurements.get( 'retraining', 'no')) measurements['input_data_types'] = env.get( - 'CM_ML_MODEL_INPUTS_DATA_TYPE', measurements.get( + 'MLC_ML_MODEL_INPUTS_DATA_TYPE', measurements.get( 'input_data_types', 'fp32')) measurements['weight_data_types'] = env.get( - 'CM_ML_MODEL_WEIGHTS_DATA_TYPE', measurements.get( + 'MLC_ML_MODEL_WEIGHTS_DATA_TYPE', measurements.get( 'weight_data_types', 'fp32')) measurements['weight_transformations'] = env.get( - 'CM_ML_MODEL_WEIGHT_TRANSFORMATIONS', measurements.get( + 'MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS', measurements.get( 'weight_transformations', 'none')) os.chdir(output_dir) @@ -279,7 +279,7 @@ def postprocess(i): state['app_mlperf_inference_log_summary'][y[0].strip().lower() ] = y[1].strip() - if env.get("CM_MLPERF_PRINT_SUMMARY", "").lower() not in [ + if env.get("MLC_MLPERF_PRINT_SUMMARY", "").lower() not in [ "no", "0", "false"]: print("\n") print(mlperf_log_summary) @@ -288,15 +288,15 @@ def postprocess(i): json.dump(measurements, fp, indent=2) cm_sut_info = {} - cm_sut_info['system_name'] = state['CM_SUT_META']['system_name'] - cm_sut_info['implementation'] = env['CM_MLPERF_IMPLEMENTATION'] - cm_sut_info['device'] = env['CM_MLPERF_DEVICE'] - cm_sut_info['framework'] = state['CM_SUT_META']['framework'] - cm_sut_info['run_config'] = env['CM_MLPERF_INFERENCE_SUT_RUN_CONFIG'] - with open(os.path.join(result_sut_folder_path, "cm-sut-info.json"), "w") as fp: + cm_sut_info['system_name'] = state['MLC_SUT_META']['system_name'] + cm_sut_info['implementation'] = env['MLC_MLPERF_IMPLEMENTATION'] + cm_sut_info['device'] = env['MLC_MLPERF_DEVICE'] + cm_sut_info['framework'] = state['MLC_SUT_META']['framework'] + cm_sut_info['run_config'] = env['MLC_MLPERF_INFERENCE_SUT_RUN_CONFIG'] + with open(os.path.join(result_sut_folder_path, "mlc-sut-info.json"), "w") as fp: json.dump(cm_sut_info, fp, indent=2) - system_meta = state['CM_SUT_META'] + system_meta = state['MLC_SUT_META'] with open("system_meta.json", "w") as fp: json.dump(system_meta, fp, indent=2) @@ -312,14 +312,14 @@ def postprocess(i): state['app_mlperf_inference_measurements'] = copy.deepcopy( measurements) - if os.path.exists(env['CM_MLPERF_CONF']): - shutil.copy(env['CM_MLPERF_CONF'], 'mlperf.conf') + if os.path.exists(env['MLC_MLPERF_CONF']): + shutil.copy(env['MLC_MLPERF_CONF'], 'mlperf.conf') - if os.path.exists(env['CM_MLPERF_USER_CONF']): - shutil.copy(env['CM_MLPERF_USER_CONF'], 'user.conf') + if os.path.exists(env['MLC_MLPERF_USER_CONF']): + shutil.copy(env['MLC_MLPERF_USER_CONF'], 'user.conf') result, valid, power_result = mlperf_utils.get_result_from_log( - env['CM_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION')) + env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('MLC_MLPERF_INFERENCE_SOURCE_VERSION')) power = None power_efficiency = None if power_result: @@ -328,9 +328,9 @@ def postprocess(i): power = power_result_split[0] power_efficiency = power_result_split[1] - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model][scenario][mode] = result - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model][scenario][mode + '_valid'] = valid.get(mode, False) state['mlc-mlperf-inference-results-last'][mode] = result @@ -338,14 +338,14 @@ def postprocess(i): '_valid'] = valid.get(mode, False) if power: - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model][scenario]['power'] = power - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model][scenario]['power_valid'] = valid['power'] state['mlc-mlperf-inference-results-last']['power'] = power state['mlc-mlperf-inference-results-last']['power_valid'] = valid['power'] if power_efficiency: - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model][scenario]['power_efficiency'] = power_efficiency state['mlc-mlperf-inference-results-last']['power_efficiency'] = power_efficiency @@ -358,15 +358,15 @@ def postprocess(i): } x = '' - if env.get('CM_HOST_OS_FLAVOR', '') != '': - x += env['CM_HOST_OS_FLAVOR'] - if env.get('CM_HOST_OS_VERSION', '') != '': - x += ' ' + env['CM_HOST_OS_VERSION'] + if env.get('MLC_HOST_OS_FLAVOR', '') != '': + x += env['MLC_HOST_OS_FLAVOR'] + if env.get('MLC_HOST_OS_VERSION', '') != '': + x += ' ' + env['MLC_HOST_OS_VERSION'] if x != '': host_info['os_version_sys'] = x - if env.get('CM_HOST_SYSTEM_NAME', '') != '': - host_info['system_name'] = env['CM_HOST_SYSTEM_NAME'] + if env.get('MLC_HOST_SYSTEM_NAME', '') != '': + host_info['system_name'] = env['MLC_HOST_SYSTEM_NAME'] # Check CM automation repository repo_name = 'mlcommons@mlperf-automations' @@ -471,22 +471,22 @@ def postprocess(i): elif mode == "compliance": - test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") + test = env.get("MLC_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") RESULT_DIR = os.path.split(output_dir)[0] COMPLIANCE_DIR = output_dir OUTPUT_DIR = os.path.dirname(COMPLIANCE_DIR) SCRIPT_PATH = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, "run_verification.py") if test == "TEST06": - cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32" + cmd = f"{env['MLC_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32" else: - cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -r {RESULT_DIR} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR}" + cmd = f"{env['MLC_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -r {RESULT_DIR} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR}" print(cmd) os.system(cmd) @@ -496,7 +496,7 @@ def postprocess(i): run_script_input = i['run_script_input'] automation = i['automation'] - SCRIPT_PATH = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, + SCRIPT_PATH = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, "create_accuracy_baseline.sh") TEST01_DIR = os.path.join(OUTPUT_DIR, "TEST01") OUTPUT_DIR = os.path.join(OUTPUT_DIR, "TEST01", "accuracy") @@ -529,7 +529,7 @@ def postprocess(i): baseline_accuracy_file = os.path.join( TEST01_DIR, "mlperf_log_accuracy_baseline.json") - CMD = "cd " + ACCURACY_DIR + " && " + env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ + CMD = "cd " + ACCURACY_DIR + " && " + env['MLC_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ baseline_accuracy_file + ' ' + dataset_args + \ datatype_option + out_baseline_accuracy_string @@ -543,7 +543,7 @@ def postprocess(i): return {'return': 1, 'error': f"{baseline_accuracy_file} is empty"} - CMD = "cd " + ACCURACY_DIR + " && " + env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ + CMD = "cd " + ACCURACY_DIR + " && " + env['MLC_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ os.path.join(TEST01_DIR, "mlperf_log_accuracy.json") + \ dataset_args + datatype_option + out_compliance_accuracy_string @@ -555,17 +555,17 @@ def postprocess(i): import submission_checker as checker is_valid = checker.check_compliance_perf_dir( COMPLIANCE_DIR) if test != "TEST06" else True - state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] ][model][scenario][test] = "passed" if is_valid else "failed" # portion of the code where the avg utilisation and system informations are extracted # NOTE: The section is under development and print statements are added # for further debugging - if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": + if env.get('MLC_PROFILE_NVIDIA_POWER', '') == "on": import pandas as pd system_utilisation_info_dump = {} logs_dir = output_dir - # logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR']) + # logs_dir = env.get('MLC_LOGS_DIR', env['MLC_RUN_DIR']) sys_utilisation_log = pd.read_csv( os.path.join( logs_dir, @@ -609,11 +609,11 @@ def postprocess(i): if state.get( 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'): - env['CM_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join( + env['MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join( output_dir, "mlc-version-info.json") - env['CM_MLPERF_RUN_DEPS_GRAPH'] = os.path.join( + env['MLC_MLPERF_RUN_DEPS_GRAPH'] = os.path.join( output_dir, "mlc-deps.png") - env['CM_MLPERF_RUN_DEPS_MERMAID'] = os.path.join( + env['MLC_MLPERF_RUN_DEPS_MERMAID'] = os.path.join( output_dir, "mlc-deps.mmd") with open(os.path.join(output_dir, "mlc-version-info.json"), "w") as f: f.write( @@ -621,7 +621,7 @@ def postprocess(i): state['mlperf-inference-implementation']['version_info'], indent=2)) - if env.get('CM_DUMP_SYSTEM_INFO', True): + if env.get('MLC_DUMP_SYSTEM_INFO', True): dump_script_output( "detect,os", env, @@ -638,8 +638,8 @@ def postprocess(i): os.path.join( output_dir, "cpu_info.json")) - env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( - env['CM_MLPERF_OUTPUT_DIR'], "pip_freeze.raw") + env['MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( + env['MLC_MLPERF_OUTPUT_DIR'], "pip_freeze.raw") dump_script_output( "dump,pip,freeze", env, diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index bf5057814..305535b49 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -21,62 +21,62 @@ tags: # Default environment default_env: - CM_MLPERF_LOADGEN_MODE: accuracy - CM_MLPERF_LOADGEN_SCENARIO: Offline - CM_OUTPUT_FOLDER_NAME: test_results - CM_MLPERF_RUN_STYLE: test - CM_TEST_QUERY_COUNT: '10' - CM_MLPERF_QUANTIZATION: off - CM_GET_PLATFORM_DETAILS: yes + MLC_MLPERF_LOADGEN_MODE: accuracy + MLC_MLPERF_LOADGEN_SCENARIO: Offline + MLC_OUTPUT_FOLDER_NAME: test_results + MLC_MLPERF_RUN_STYLE: test + MLC_TEST_QUERY_COUNT: '10' + MLC_MLPERF_QUANTIZATION: off + MLC_GET_PLATFORM_DETAILS: yes env: - CM_MLPERF_PRINT_SUMMARY: "no" - CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'no' + MLC_MLPERF_PRINT_SUMMARY: "no" + MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'no' # Map script inputs to environment variables input_mapping: - count: CM_MLPERF_LOADGEN_QUERY_COUNT - docker: CM_RUN_DOCKER_CONTAINER - hw_name: CM_HW_NAME + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + docker: MLC_RUN_DOCKER_CONTAINER + hw_name: MLC_HW_NAME imagenet_path: IMAGENET_PATH - max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - mode: CM_MLPERF_LOADGEN_MODE - num_threads: CM_NUM_THREADS + max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + mode: MLC_MLPERF_LOADGEN_MODE + num_threads: MLC_NUM_THREADS output_dir: OUTPUT_BASE_DIR - power: CM_MLPERF_POWER - power_server: CM_MLPERF_POWER_SERVER_ADDRESS - ntp_server: CM_MLPERF_POWER_NTP_SERVER - max_amps: CM_MLPERF_POWER_MAX_AMPS - max_volts: CM_MLPERF_POWER_MAX_VOLTS - regenerate_files: CM_REGENERATE_MEASURE_FILES - rerun: CM_RERUN - scenario: CM_MLPERF_LOADGEN_SCENARIO - test_query_count: CM_TEST_QUERY_COUNT - clean: CM_MLPERF_CLEAN_SUBMISSION_DIR - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - readme: CM_MLPERF_README - debug: CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM - gpu_name: CM_NVIDIA_GPU_NAME - nvidia_llama2_dataset_file_path: CM_NVIDIA_LLAMA_DATASET_FILE_PATH - tp_size: CM_NVIDIA_TP_SIZE - use_dataset_from_host: CM_USE_DATASET_FROM_HOST + power: MLC_MLPERF_POWER + power_server: MLC_MLPERF_POWER_SERVER_ADDRESS + ntp_server: MLC_MLPERF_POWER_NTP_SERVER + max_amps: MLC_MLPERF_POWER_MAX_AMPS + max_volts: MLC_MLPERF_POWER_MAX_VOLTS + regenerate_files: MLC_REGENERATE_MEASURE_FILES + rerun: MLC_RERUN + scenario: MLC_MLPERF_LOADGEN_SCENARIO + test_query_count: MLC_TEST_QUERY_COUNT + clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + readme: MLC_MLPERF_README + debug: MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM + gpu_name: MLC_NVIDIA_GPU_NAME + nvidia_llama2_dataset_file_path: MLC_NVIDIA_LLAMA_DATASET_FILE_PATH + tp_size: MLC_NVIDIA_TP_SIZE + use_dataset_from_host: MLC_USE_DATASET_FROM_HOST predeps: False # Duplicate CM environment variables to the ones used in native apps env_key_mappings: - CM_HOST_: HOST_ - CM_ML_: ML_ - CM_MLPERF_TVM: MLPERF_TVM + MLC_HOST_: HOST_ + MLC_ML_: ML_ + MLC_MLPERF_TVM: MLPERF_TVM # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* + - MLC_MLPERF_* new_state_keys: - app_mlperf_inference_* @@ -107,38 +107,38 @@ deps: - inference-src - tags: pull,git,repo env: - CM_GIT_CHECKOUT_PATH: '<<>>' + MLC_GIT_CHECKOUT_PATH: '<<>>' enable_if_env: - CM_MLPERF_INFERENCE_PULL_SRC_CHANGES: + MLC_MLPERF_INFERENCE_PULL_SRC_CHANGES: - 'yes' - tags: get,mlperf,inference,utils - tags: install,pip-package,for-cmind-python,_package.pandas enable_if_env: - CM_PROFILE_NVIDIA_POWER: + MLC_PROFILE_NVIDIA_POWER: - on posthook_deps: - tags: get,mlperf,sut,description #populate system meta information like framework - tags: get,platform,details enable_if_any_env: - CM_GET_PLATFORM_DETAILS: + MLC_GET_PLATFORM_DETAILS: - yes skip_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy env: - CM_PLATFORM_DETAILS_FILE_PATH: '<<>>/system_info.txt' + MLC_PLATFORM_DETAILS_FILE_PATH: '<<>>/system_info.txt' post_deps: - tags: draw,graph,from-json enable_if_env: - CM_MLPERF_RUN_JSON_VERSION_INFO_FILE: + MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE: - on env: - CM_JSON_INPUT_FILE: <<>> - CM_OUTPUT_IMAGE_PATH: <<>> - CM_OUTPUT_MERMAID_PATH: <<>> + MLC_JSON_INPUT_FILE: <<>> + MLC_OUTPUT_IMAGE_PATH: <<>> + MLC_OUTPUT_MERMAID_PATH: <<>> # Order of variations for documentation variation_groups_order: @@ -160,17 +160,17 @@ variations: imagenet-accuracy-script: tags: _int64 env: - CM_MLPERF_CPP: 'yes' - CM_MLPERF_IMPLEMENTATION: mlcommons_cpp - CM_IMAGENET_ACCURACY_DTYPE: float32 - CM_OPENIMAGES_ACCURACY_DTYPE: float32 + MLC_MLPERF_CPP: 'yes' + MLC_MLPERF_IMPLEMENTATION: mlcommons_cpp + MLC_IMAGENET_ACCURACY_DTYPE: float32 + MLC_OPENIMAGES_ACCURACY_DTYPE: float32 prehook_deps: - names: - cpp-mlperf-inference - mlperf-inference-implementation tags: app,mlperf,cpp,inference skip_if_env: - CM_SKIP_RUN: + MLC_SKIP_RUN: - yes mil: @@ -192,17 +192,17 @@ variations: imagenet-accuracy-script: tags: _float32 env: - CM_MLPERF_TFLITE_CPP: 'yes' - CM_MLPERF_CPP: 'yes' - CM_MLPERF_IMPLEMENTATION: ctuning_cpp_tflite - CM_IMAGENET_ACCURACY_DTYPE: float32 + MLC_MLPERF_TFLITE_CPP: 'yes' + MLC_MLPERF_CPP: 'yes' + MLC_MLPERF_IMPLEMENTATION: ctuning_cpp_tflite + MLC_IMAGENET_ACCURACY_DTYPE: float32 prehook_deps: - names: - tflite-cpp-mlperf-inference - mlperf-inference-implementation tags: app,mlperf,tflite-cpp,inference skip_if_env: - CM_SKIP_RUN: + MLC_SKIP_RUN: - yes reference: @@ -224,20 +224,20 @@ variations: llama3_1-405b-accuracy-script: tags: _int32 env: - CM_MLPERF_PYTHON: 'yes' - CM_MLPERF_IMPLEMENTATION: mlcommons_python - CM_SQUAD_ACCURACY_DTYPE: float32 - CM_IMAGENET_ACCURACY_DTYPE: float32 - CM_OPENIMAGES_ACCURACY_DTYPE: float32 - CM_LIBRISPEECH_ACCURACY_DTYPE: float32 - CM_CNNDM_ACCURACY_DTYPE: int32 + MLC_MLPERF_PYTHON: 'yes' + MLC_MLPERF_IMPLEMENTATION: mlcommons_python + MLC_SQUAD_ACCURACY_DTYPE: float32 + MLC_IMAGENET_ACCURACY_DTYPE: float32 + MLC_OPENIMAGES_ACCURACY_DTYPE: float32 + MLC_LIBRISPEECH_ACCURACY_DTYPE: float32 + MLC_CNNDM_ACCURACY_DTYPE: int32 prehook_deps: - names: - python-reference-mlperf-inference - mlperf-inference-implementation tags: app,mlperf,reference,inference skip_if_env: - CM_SKIP_RUN: + MLC_SKIP_RUN: - yes neuralmagic: @@ -299,36 +299,36 @@ variations: deps: - tags: get,ml-model,gptj,raw skip_if_env: - CM_MLPERF_NVIDIA_SKIP_GPTJ: + MLC_MLPERF_NVIDIA_SKIP_GPTJ: - "yes" - tags: get,ml-model,gptj,_nvidia,_fp8 skip_if_env: - CM_MLPERF_NVIDIA_SKIP_GPTJ: + MLC_MLPERF_NVIDIA_SKIP_GPTJ: - "yes" - tags: get,ml-model,llama2-70b,_nvidia,_fp8 update_tags_from_env_with_prefix: _tp-size.: - - CM_NVIDIA_TP_SIZE + - MLC_NVIDIA_TP_SIZE skip_if_env: - CM_MLPERF_NVIDIA_SKIP_LLAMA2_70B: + MLC_MLPERF_NVIDIA_SKIP_LLAMA2_70B: - "yes" - tags: get,dataset,imagenet,validation,original,_full names: - imagenet-original - dataset-original skip_if_env: - CM_MLPERF_NVIDIA_SKIP_RESNET50: + MLC_MLPERF_NVIDIA_SKIP_RESNET50: - "yes" - tags: get,dlrm,data,mlperf,inference,_nvidia skip_if_env: - CM_MLPERF_NVIDIA_SKIP_DLRM: + MLC_MLPERF_NVIDIA_SKIP_DLRM: - "yes" - enable_if_env: - CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: - 'yes' tags: get,ml-model,sdxl,_fp16,_rclone skip_if_env: - CM_MLPERF_NVIDIA_SKIP_SDXL: + MLC_MLPERF_NVIDIA_SKIP_SDXL: - "yes" env: BUILD_TRTLLM: 1 @@ -340,20 +340,20 @@ variations: image_name: mlperf-inference-nvidia-v4.1-dev-common update_meta_if_env: - enable_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 docker: base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v4.0-cuda12.2-cudnn8.9-x86_64-ubuntu20.04-public env: - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' + MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' - skip_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 docker: base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v4.1-cuda12.4-pytorch24.04-ubuntu22.04-aarch64-GraceHopper-release env: - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp310-cp310-linux_aarch64.whl' + MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp310-cp310-linux_aarch64.whl' @@ -376,7 +376,7 @@ variations: - tags: get,ml-model,gptj,_nvidia,_fp8 update_tags_from_env_with_prefix: _tp-size.: - - CM_NVIDIA_TP_SIZE + - MLC_NVIDIA_TP_SIZE nvidia-original,r4.1_default: docker: @@ -388,7 +388,7 @@ variations: - tags: get,ml-model,gptj,_nvidia,_fp8 update_tags_from_env_with_prefix: _tp-size.: - - CM_NVIDIA_TP_SIZE + - MLC_NVIDIA_TP_SIZE nvidia-original,r4.1-dev_default,llama2-70b_: @@ -398,7 +398,7 @@ variations: - tags: get,ml-model,llama2-70b,_nvidia,_fp8 update_tags_from_env_with_prefix: _tp-size.: - - CM_NVIDIA_TP_SIZE + - MLC_NVIDIA_TP_SIZE env: BUILD_TRTLLM: 1 @@ -408,7 +408,7 @@ variations: - tags: get,ml-model,llama2-70b,_nvidia,_fp8 update_tags_from_env_with_prefix: _tp-size.: - - CM_NVIDIA_TP_SIZE + - MLC_NVIDIA_TP_SIZE env: BUILD_TRTLLM: 1 @@ -425,19 +425,19 @@ variations: - mlperf-inference-nvidia-scratch-space - tags: get,nvidia-docker skip_if_env: - CM_SKIP_GET_NVIDIA_DOCKER: + MLC_SKIP_GET_NVIDIA_DOCKER: - yes mounts: - - "${{ CM_CUDNN_TAR_FILE_PATH }}:${{ CM_CUDNN_TAR_FILE_PATH }}" - - "${{ CM_TENSORRT_TAR_FILE_PATH }}:${{ CM_TENSORRT_TAR_FILE_PATH }}" + - "${{ MLC_CUDNN_TAR_FILE_PATH }}:${{ MLC_CUDNN_TAR_FILE_PATH }}" + - "${{ MLC_TENSORRT_TAR_FILE_PATH }}:${{ MLC_TENSORRT_TAR_FILE_PATH }}" - "${{ CUDA_RUN_FILE_LOCAL_PATH }}:${{ CUDA_RUN_FILE_LOCAL_PATH }}" - "${{ MLPERF_SCRATCH_PATH }}:${{ MLPERF_SCRATCH_PATH }}" update_meta_if_env: - enable_if_env: - CM_HOST_OS_FLAVOR: + MLC_HOST_OS_FLAVOR: - ubuntu - CM_HOST_OS_VERSION: + MLC_HOST_OS_VERSION: - 20.04 docker: extra_run_args: ' --runtime=nvidia --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' @@ -459,12 +459,12 @@ variations: tags: _int32 env: BUILD_TRTLLM: 0 - CM_MLPERF_IMPLEMENTATION: nvidia - CM_SQUAD_ACCURACY_DTYPE: float16 - CM_IMAGENET_ACCURACY_DTYPE: int32 - CM_CNNDM_ACCURACY_DTYPE: int32 - CM_LIBRISPEECH_ACCURACY_DTYPE: int8 - CM_DOCKER_USE_VIRTUAL_PYTHON: no + MLC_MLPERF_IMPLEMENTATION: nvidia + MLC_SQUAD_ACCURACY_DTYPE: float16 + MLC_IMAGENET_ACCURACY_DTYPE: int32 + MLC_CNNDM_ACCURACY_DTYPE: int32 + MLC_LIBRISPEECH_ACCURACY_DTYPE: int8 + MLC_DOCKER_USE_VIRTUAL_PYTHON: no prehook_deps: - names: - nvidia-original-mlperf-inference @@ -472,13 +472,13 @@ variations: - mlperf-inference-implementation tags: reproduce,mlperf,nvidia,inference,_run_harness skip_if_env: - CM_SKIP_RUN: + MLC_SKIP_RUN: - yes update_tags_from_env_with_prefix: "_gpu_memory." : - - CM_NVIDIA_GPU_MEMORY + - MLC_NVIDIA_GPU_MEMORY update_tags_from_env: - - CM_NVIDIA_HARNESS_GPU_VARIATION + - MLC_NVIDIA_HARNESS_GPU_VARIATION intel: alias: intel-original @@ -490,7 +490,7 @@ variations: interactive: True extra_run_args: ' --privileged' mounts: - - "${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}:${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}" + - "${{ MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}:${{ MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}" - "${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }}" skip_run_cmd: 'no' shm_size: '32gb' @@ -500,7 +500,7 @@ variations: docker_input_mapping: criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH dlrm_data_path: DLRM_DATA_PATH - intel_gptj_int8_model_path: CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH + intel_gptj_int8_model_path: MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH default_variations: device: cpu backend: pytorch @@ -512,10 +512,10 @@ variations: - mlperf-inference-implementation tags: reproduce,mlperf,inference,intel skip_if_env: - CM_SKIP_RUN: + MLC_SKIP_RUN: - yes env: - CM_MLPERF_IMPLEMENTATION: intel + MLC_MLPERF_IMPLEMENTATION: intel intel-original,gptj_: adr: @@ -552,10 +552,10 @@ variations: - mlperf-inference-implementation tags: reproduce,mlperf,inference,amd skip_if_env: - CM_SKIP_RUN: + MLC_SKIP_RUN: - yes env: - CM_MLPERF_IMPLEMENTATION: amd + MLC_MLPERF_IMPLEMENTATION: amd redhat: group: @@ -571,10 +571,10 @@ variations: - mlperf-inference-implementation tags: reproduce,mlperf,inference,redhat skip_if_env: - CM_SKIP_RUN: + MLC_SKIP_RUN: - yes env: - CM_MLPERF_IMPLEMENTATION: redhat + MLC_MLPERF_IMPLEMENTATION: redhat docker: interactive: True @@ -595,10 +595,10 @@ variations: - mlperf-inference-implementation tags: reproduce,mlperf,inference,kilt skip_if_env: - CM_SKIP_RUN: + MLC_SKIP_RUN: - yes env: - CM_MLPERF_IMPLEMENTATION: qualcomm + MLC_MLPERF_IMPLEMENTATION: qualcomm docker: interactive: True @@ -651,7 +651,7 @@ variations: default: true env: - CM_MODEL: + MLC_MODEL: resnet50 deps: - tags: get,dataset-aux,imagenet-aux @@ -660,10 +660,10 @@ variations: tags: _resnet50 posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' names: - mlperf-accuracy-script @@ -673,7 +673,7 @@ variations: deps: - tags: get,dataset,imagenet,validation,original,_full enable_if_env: - CM_USE_DATASET_FROM_HOST: + MLC_USE_DATASET_FROM_HOST: - 'yes' names: - imagenet-original @@ -683,17 +683,17 @@ variations: group: model env: - CM_MODEL: + MLC_MODEL: retinanet add_deps_recursive: mlperf-inference-implementation: tags: _retinanet posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' names: - mlperf-accuracy-script @@ -705,13 +705,13 @@ variations: - names: - openimages-original enable_if_env: - CM_USE_DATASET_FROM_HOST: + MLC_USE_DATASET_FROM_HOST: - 'yes' tags: get,dataset,original,openimages,_validation,_full,_custom-annotations - names: - openimages-calibration enable_if_env: - CM_USE_DATASET_FROM_HOST: + MLC_USE_DATASET_FROM_HOST: - 'yes' tags: get,dataset,original,openimages,_calibration @@ -721,7 +721,7 @@ variations: base: - 3d-unet_ env: - CM_MODEL: + MLC_MODEL: 3d-unet-99 add_deps_recursive: mlperf-inference-implementation: @@ -733,7 +733,7 @@ variations: base: - 3d-unet_ env: - CM_MODEL: + MLC_MODEL: 3d-unet-99.9 add_deps_recursive: mlperf-inference-implementation: @@ -741,18 +741,18 @@ variations: 3d-unet_: default_env: - CM_MLPERF_INFERENCE_TEST_QPS: "0.01" + MLC_MLPERF_INFERENCE_TEST_QPS: "0.01" env: - CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' skip_if_env: - CM_MLPERF_IMPLEMENTATION: + MLC_MLPERF_IMPLEMENTATION: - nvidia names: - mlperf-accuracy-script @@ -764,7 +764,7 @@ variations: image_name: mlperf-inference-mlcommons-python-implementation-3d-unet deps: - enable_if_env: - CM_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST: + MLC_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST: - 'yes' tags: get,dataset,kits19,preprocessed @@ -775,17 +775,17 @@ variations: mlperf-inference-implementation: tags: _rgat env: - CM_MODEL: + MLC_MODEL: rgat posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' skip_if_env: - CM_MLPERF_IMPLEMENTATION: + MLC_MLPERF_IMPLEMENTATION: - nvidia names: - mlperf-accuracy-script @@ -795,7 +795,7 @@ variations: deps: - tags: get,dataset,igbh enable_if_env: - CM_USE_DATASET_FROM_HOST: + MLC_USE_DATASET_FROM_HOST: - 'yes' names: - igbh-original @@ -808,17 +808,17 @@ variations: mlperf-inference-implementation: tags: _llama3_1-405b env: - CM_MODEL: + MLC_MODEL: llama3_1-405b posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' skip_if_env: - CM_MLPERF_IMPLEMENTATION: + MLC_MLPERF_IMPLEMENTATION: - nvidia names: - mlperf-accuracy-script @@ -828,7 +828,7 @@ variations: deps: - tags: get,ml-model,llama3 enable_if_env: - CM_USE_DATASET_FROM_HOST: + MLC_USE_DATASET_FROM_HOST: - 'yes' names: - llama3_1-405b @@ -839,8 +839,8 @@ variations: group: model env: - CM_MODEL: stable-diffusion-xl - CM_MLPERF_INFERENCE_TEST_QPS: "0.05" + MLC_MODEL: stable-diffusion-xl + MLC_MLPERF_INFERENCE_TEST_QPS: "0.05" default_variations: precision: float32 add_deps_recursive: @@ -848,10 +848,10 @@ variations: tags: _sdxl posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' names: - mlperf-accuracy-script @@ -862,9 +862,9 @@ variations: docker: deps: - enable_if_any_env: - CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: - 'yes' - CM_USE_MODEL_FROM_HOST: + MLC_USE_MODEL_FROM_HOST: - 'yes' tags: get,ml-model,sdxl,_fp16,_rclone @@ -873,9 +873,9 @@ variations: image_name: mlperf-inference-mlcommons-python-implementation-sdxl-float16 deps: - enable_if_any_env: - CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: - 'yes' - CM_USE_MODEL_FROM_HOST: + MLC_USE_MODEL_FROM_HOST: - 'yes' tags: get,ml-model,sdxl,_fp16,_rclone @@ -884,9 +884,9 @@ variations: image_name: mlperf-inference-mlcommons-python-implementation-sdxl-bfloat16 deps: - enable_if_any_env: - CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: - 'yes' - CM_USE_MODEL_FROM_HOST: + MLC_USE_MODEL_FROM_HOST: - 'yes' tags: get,ml-model,sdxl,_fp16,_rclone @@ -895,26 +895,26 @@ variations: image_name: mlperf-inference-mlcommons-python-implementation-sdxl-float32 deps: - enable_if_any_env: - CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: - 'yes' - CM_USE_MODEL_FROM_HOST: + MLC_USE_MODEL_FROM_HOST: - 'yes' tags: get,ml-model,sdxl,_fp32,_rclone llama2-70b_: default_env: - CM_MLPERF_INFERENCE_TEST_QPS: "0.01" + MLC_MLPERF_INFERENCE_TEST_QPS: "0.01" env: - CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' skip_if_env: - CM_MLPERF_IMPLEMENTATION: + MLC_MLPERF_IMPLEMENTATION: - nvidia names: - mlperf-accuracy-script @@ -927,7 +927,7 @@ variations: base: - llama2-70b_ env: - CM_MODEL: + MLC_MODEL: llama2-70b-99 add_deps_recursive: mlperf-inference-implementation: @@ -939,7 +939,7 @@ variations: base: - llama2-70b_ env: - CM_MODEL: + MLC_MODEL: llama2-70b-99.9 add_deps_recursive: mlperf-inference-implementation: @@ -950,9 +950,9 @@ variations: image_name: mlperf-inference-mlcommons-python-implementation-llama2-70b deps: - enable_if_any_env: - CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: - 'yes' - CM_USE_MODEL_FROM_HOST: + MLC_USE_MODEL_FROM_HOST: - 'yes' tags: get,ml-model,llama2 @@ -960,12 +960,12 @@ variations: docker: image_name: mlperf-inference-amd-python-implementation-llama2-70b mounts: - - "${{ CM_LLAMA2_FINAL_SAFE_TENSORS_PATH }}:${{ CM_LLAMA2_FINAL_SAFE_TENSORS_PATH }" + - "${{ MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH }}:${{ MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH }" deps: - enable_if_any_env: - CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: - 'yes' - CM_USE_MODEL_FROM_HOST: + MLC_USE_MODEL_FROM_HOST: - 'yes' tags: get,ml-model,llama2,_amd,_pytorch @@ -975,22 +975,22 @@ variations: base: - mixtral-8x7b env: - CM_MODEL: + MLC_MODEL: mixtral-8x7b add_deps_recursive: mlperf-inference-implementation: tags: _mixtral-8x7b env: - CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' skip_if_env: - CM_MLPERF_IMPLEMENTATION: + MLC_MLPERF_IMPLEMENTATION: - nvidia names: - mlperf-accuracy-script @@ -1006,38 +1006,38 @@ variations: - ml-model - mixtral-model enable_if_any_env: - CM_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: - 'yes' - CM_USE_MODEL_FROM_HOST: + MLC_USE_MODEL_FROM_HOST: - 'yes' - tags: get,dataset-mixtral,openorca-mbxp-gsm8k-combined names: - openorca-mbxp-gsm8k-combined-preprocessed enable_if_env: - CM_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: - 'yes' mounts: - "${{ MIXTRAL_CHECKPOINT_PATH }}:${{ MIXTRAL_CHECKPOINT_PATH }}" - - "${{ CM_DATASET_MIXTRAL_PREPROCESSED_PATH }}:${{ CM_DATASET_MIXTRAL_PREPROCESSED_PATH }}" + - "${{ MLC_DATASET_MIXTRAL_PREPROCESSED_PATH }}:${{ MLC_DATASET_MIXTRAL_PREPROCESSED_PATH }}" rnnt: group: model env: - CM_MODEL: + MLC_MODEL: rnnt add_deps_recursive: mlperf-inference-implementation: tags: _rnnt posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' skip_if_env: - CM_MLPERF_IMPLEMENTATION: + MLC_MLPERF_IMPLEMENTATION: - nvidia names: - mlperf-accuracy-script @@ -1046,7 +1046,7 @@ variations: rnnt,reference: env: - CM_MLPERF_PRINT_SUMMARY: "no" + MLC_MLPERF_PRINT_SUMMARY: "no" gptj-99: group: @@ -1054,7 +1054,7 @@ variations: base: - gptj_ env: - CM_MODEL: + MLC_MODEL: gptj-99 add_deps_recursive: mlperf-inference-implementation: @@ -1066,7 +1066,7 @@ variations: base: - gptj_ env: - CM_MODEL: + MLC_MODEL: gptj-99.9 add_deps_recursive: mlperf-inference-implementation: @@ -1080,13 +1080,13 @@ variations: deps: - tags: get,ml-model,gptj,raw env: - CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' names: - cnndm-accuracy-script @@ -1096,17 +1096,17 @@ variations: bert_: deps: - skip_if_env: - CM_DATASET_SQUAD_VAL_PATH: "on" + MLC_DATASET_SQUAD_VAL_PATH: "on" tags: get,dataset,squad,language-processing - skip_if_env: - CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: "on" + MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: "on" tags: get,dataset-aux,squad-vocab posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' names: - squad-accuracy-script @@ -1122,7 +1122,7 @@ variations: base: - bert_ env: - CM_MODEL: + MLC_MODEL: bert-99 add_deps_recursive: mlperf-inference-implementation: @@ -1134,7 +1134,7 @@ variations: base: - bert_ env: - CM_MODEL: + MLC_MODEL: bert-99.9 add_deps_recursive: mlperf-inference-implementation: @@ -1143,10 +1143,10 @@ variations: dlrm_: posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' names: - terabyte-accuracy-script @@ -1159,7 +1159,7 @@ variations: base: - dlrm_ env: - CM_MODEL: + MLC_MODEL: dlrm-v2-99 add_deps_recursive: mlperf-inference-implementation: @@ -1171,7 +1171,7 @@ variations: base: - dlrm_ env: - CM_MODEL: + MLC_MODEL: dlrm-v2-99.9 add_deps_recursive: mlperf-inference-implementation: @@ -1197,17 +1197,17 @@ variations: - tags: get,preprocessed,dataset,criteo,_mlc - tags: get,ml-model,dlrm,_pytorch,_fp32 mounts: - - "${{ CM_ML_MODEL_FILE_WITH_PATH }}:${{ CM_ML_MODEL_FILE_WITH_PATH }}" + - "${{ MLC_ML_MODEL_FILE_WITH_PATH }}:${{ MLC_ML_MODEL_FILE_WITH_PATH }}" - "${{ DLRM_DATA_PATH }}:${{ DLRM_DATA_PATH }}" dockerfile_env: - CM_ML_MODEL_FILE_WITH_PATH: "on" + MLC_ML_MODEL_FILE_WITH_PATH: "on" mobilenet: group: model env: - CM_MODEL: + MLC_MODEL: mobilenet add_deps_recursive: mlperf-inference-implementation: @@ -1216,10 +1216,10 @@ variations: - tags: get,dataset-aux,imagenet-aux posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' names: - mlperf-accuracy-script @@ -1230,7 +1230,7 @@ variations: group: model env: - CM_MODEL: + MLC_MODEL: efficientnet add_deps_recursive: mlperf-inference-implementation: @@ -1239,10 +1239,10 @@ variations: - tags: get,dataset-aux,imagenet-aux posthook_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - accuracy - all - CM_MLPERF_ACCURACY_RESULTS_DIR: + MLC_MLPERF_ACCURACY_RESULTS_DIR: - 'on' names: - mlperf-accuracy-script @@ -1252,7 +1252,7 @@ variations: onnxruntime: group: backend env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: onnxruntime add_deps_recursive: mlperf-inference-implementation: @@ -1261,7 +1261,7 @@ variations: tensorrt: group: backend env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: tensorrt add_deps_recursive: mlperf-inference-implementation: @@ -1273,7 +1273,7 @@ variations: tf: group: backend env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: tf add_deps_recursive: mlperf-inference-implementation: @@ -1282,7 +1282,7 @@ variations: pytorch: group: backend env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: pytorch add_deps_recursive: mlperf-inference-implementation: @@ -1291,7 +1291,7 @@ variations: openshift: group: backend env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: openshift add_deps_recursive: mlperf-inference-implementation: @@ -1300,7 +1300,7 @@ variations: ncnn: group: backend env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: ncnn add_deps_recursive: mlperf-inference-implementation: @@ -1311,7 +1311,7 @@ variations: default_variations: precision: int8 env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: deepsparse add_deps_recursive: mlperf-inference-implementation: @@ -1320,7 +1320,7 @@ variations: tflite: group: backend env: - CM_MLPERF_BACKEND: tflite + MLC_MLPERF_BACKEND: tflite add_deps_recursive: mlperf-inference-implementation: tags: _tflite @@ -1328,7 +1328,7 @@ variations: glow: group: backend env: - CM_MLPERF_BACKEND: glow + MLC_MLPERF_BACKEND: glow add_deps_recursive: mlperf-inference-implementation: tags: _glow @@ -1338,7 +1338,7 @@ variations: base: - batch_size.1 env: - CM_MLPERF_BACKEND: tvm-onnx + MLC_MLPERF_BACKEND: tvm-onnx add_deps_recursive: mlperf-inference-implementation: tags: _tvm-onnx @@ -1348,7 +1348,7 @@ variations: base: - batch_size.1 env: - CM_MLPERF_BACKEND: tvm-pytorch + MLC_MLPERF_BACKEND: tvm-pytorch add_deps_recursive: mlperf-inference-implementation: tags: _tvm-pytorch @@ -1358,7 +1358,7 @@ variations: base: - batch_size.1 env: - CM_MLPERF_BACKEND: tvm-tflite + MLC_MLPERF_BACKEND: tvm-tflite add_deps_recursive: mlperf-inference-implementation: tags: _tvm-tflite @@ -1366,7 +1366,7 @@ variations: ray: group: backend env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: ray add_deps_recursive: mlperf-inference-implementation: @@ -1378,7 +1378,7 @@ variations: default: True env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: cpu add_deps_recursive: mlperf-inference-implementation: @@ -1394,12 +1394,12 @@ variations: deps: - tags: get,nvidia-docker skip_if_env: - CM_SKIP_GET_NVIDIA_DOCKER: + MLC_SKIP_GET_NVIDIA_DOCKER: - yes group: device env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: gpu add_deps_recursive: mlperf-inference-implementation: @@ -1407,7 +1407,7 @@ variations: deps: - tags: get,cuda-devices,_with-pycuda skip_if_env: - CM_CUDA_DEVICE_PROP_GLOBAL_MEMORY: + MLC_CUDA_DEVICE_PROP_GLOBAL_MEMORY: - "yes" - "on" rocm: @@ -1416,7 +1416,7 @@ variations: group: device env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: rocm add_deps_recursive: mlperf-inference-implementation: @@ -1425,7 +1425,7 @@ variations: group: device env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: qaic add_deps_recursive: mlperf-inference-implementation: @@ -1435,7 +1435,7 @@ variations: group: device env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: tpu add_deps_recursive: mlperf-inference-implementation: @@ -1445,16 +1445,16 @@ variations: fast: group: execution-mode env: - CM_FAST_FACTOR: '5' - CM_OUTPUT_FOLDER_NAME: fast_results - CM_MLPERF_RUN_STYLE: fast + MLC_FAST_FACTOR: '5' + MLC_OUTPUT_FOLDER_NAME: fast_results + MLC_MLPERF_RUN_STYLE: fast test: group: execution-mode default: true env: - CM_OUTPUT_FOLDER_NAME: test_results - CM_MLPERF_RUN_STYLE: test + MLC_OUTPUT_FOLDER_NAME: test_results + MLC_MLPERF_RUN_STYLE: test valid,retinanet: adr: @@ -1464,8 +1464,8 @@ variations: valid: group: execution-mode env: - CM_OUTPUT_FOLDER_NAME: valid_results - CM_MLPERF_RUN_STYLE: valid + MLC_OUTPUT_FOLDER_NAME: valid_results + MLC_MLPERF_RUN_STYLE: valid # Model precision quantized: @@ -1478,8 +1478,8 @@ variations: group: precision default: true env: - CM_MLPERF_QUANTIZATION: off - CM_MLPERF_MODEL_PRECISION: float32 + MLC_MLPERF_QUANTIZATION: off + MLC_MLPERF_MODEL_PRECISION: float32 add_deps_recursive: python-reference-mlperf-inference: tags: _fp32 @@ -1489,8 +1489,8 @@ variations: float16: group: precision env: - CM_MLPERF_QUANTIZATION: off - CM_MLPERF_MODEL_PRECISION: float16 + MLC_MLPERF_QUANTIZATION: off + MLC_MLPERF_MODEL_PRECISION: float16 add_deps_recursive: python-reference-mlperf-inference: tags: _float16 @@ -1500,8 +1500,8 @@ variations: bfloat16: group: precision env: - CM_MLPERF_QUANTIZATION: off - CM_MLPERF_MODEL_PRECISION: bfloat16 + MLC_MLPERF_QUANTIZATION: off + MLC_MLPERF_MODEL_PRECISION: bfloat16 add_deps_recursive: python-reference-mlperf-inference: tags: _bfloat16 @@ -1509,16 +1509,16 @@ variations: int4: group: precision env: - CM_MLPERF_QUANTIZATION: on - CM_MLPERF_MODEL_PRECISION: int4 + MLC_MLPERF_QUANTIZATION: on + MLC_MLPERF_MODEL_PRECISION: int4 add_deps_recursive: mlperf-inference-implementation: tags: _int4 int8: group: precision env: - CM_MLPERF_QUANTIZATION: on - CM_MLPERF_MODEL_PRECISION: int8 + MLC_MLPERF_QUANTIZATION: on + MLC_MLPERF_MODEL_PRECISION: int8 add_deps_recursive: mlperf-inference-implementation: tags: _int8 @@ -1528,8 +1528,8 @@ variations: uint8: group: precision env: - CM_MLPERF_QUANTIZATION: on - CM_MLPERF_MODEL_PRECISION: uint8 + MLC_MLPERF_QUANTIZATION: on + MLC_MLPERF_MODEL_PRECISION: uint8 add_deps_recursive: mlperf-inference-implementation: tags: _uint8 @@ -1540,36 +1540,36 @@ variations: group: loadgen-scenario default: true env: - CM_MLPERF_LOADGEN_SCENARIO: Offline + MLC_MLPERF_LOADGEN_SCENARIO: Offline add_deps_recursive: mlperf-inference-implementation: tags: _offline multistream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: MultiStream + MLC_MLPERF_LOADGEN_SCENARIO: MultiStream add_deps_recursive: mlperf-inference-implementation: tags: _multistream singlestream: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream add_deps_recursive: mlperf-inference-implementation: tags: _singlestream server: group: loadgen-scenario env: - CM_MLPERF_LOADGEN_SCENARIO: Server + MLC_MLPERF_LOADGEN_SCENARIO: Server add_deps_recursive: mlperf-inference-implementation: tags: _server power: env: - CM_MLPERF_POWER: 'yes' - CM_SYSTEM_POWER: 'yes' + MLC_MLPERF_POWER: 'yes' + MLC_SYSTEM_POWER: 'yes' add_deps_recursive: mlperf-runner: tags: @@ -1578,7 +1578,7 @@ variations: batch_size.#: group: batch_size env: - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: '#' + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: '#' add_deps_recursive: mlperf-inference-implementation: tags: _batch_size.# @@ -1601,8 +1601,8 @@ variations: version: r2.1 tags: _custom env: - CM_SKIP_SYS_UTILS: 'yes' - CM_TEST_QUERY_COUNT: '100' + MLC_SKIP_SYS_UTILS: 'yes' + MLC_TEST_QUERY_COUNT: '100' r3.0_default: group: @@ -1619,7 +1619,7 @@ variations: version: r2.1 tags: _custom env: - CM_SKIP_SYS_UTILS: 'yes' + MLC_SKIP_SYS_UTILS: 'yes' r3.1_default: group: @@ -1632,10 +1632,10 @@ variations: version: r3.0 tags: _nvidia-only default_env: - CM_SKIP_SYS_UTILS: 'yes' - CM_REGENERATE_MEASURE_FILES: 'yes' + MLC_SKIP_SYS_UTILS: 'yes' + MLC_REGENERATE_MEASURE_FILES: 'yes' env: - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl' + MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl' r4.0-dev_default: group: @@ -1650,10 +1650,10 @@ variations: intel-harness: tags: _v3.1 default_env: - CM_SKIP_SYS_UTILS: 'yes' - CM_REGENERATE_MEASURE_FILES: 'yes' + MLC_SKIP_SYS_UTILS: 'yes' + MLC_REGENERATE_MEASURE_FILES: 'yes' env: - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl' + MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl' r4.0_default: group: @@ -1668,10 +1668,10 @@ variations: intel-harness: tags: _v3.1 default_env: - CM_SKIP_SYS_UTILS: 'yes' - CM_REGENERATE_MEASURE_FILES: 'yes' + MLC_SKIP_SYS_UTILS: 'yes' + MLC_REGENERATE_MEASURE_FILES: 'yes' env: - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' + MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' #uses public code for inference v4.1 @@ -1688,8 +1688,8 @@ variations: intel-harness: tags: _v4.0 default_env: - CM_SKIP_SYS_UTILS: 'yes' - CM_REGENERATE_MEASURE_FILES: 'yes' + MLC_SKIP_SYS_UTILS: 'yes' + MLC_REGENERATE_MEASURE_FILES: 'yes' r4.1_default: group: @@ -1704,11 +1704,11 @@ variations: intel-harness: tags: _v4.1 default_env: - CM_SKIP_SYS_UTILS: 'yes' - CM_REGENERATE_MEASURE_FILES: 'yes' + MLC_SKIP_SYS_UTILS: 'yes' + MLC_REGENERATE_MEASURE_FILES: 'yes' env: - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' - CM_MLPERF_INFERENCE_VERSION: '4.1' + MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' + MLC_MLPERF_INFERENCE_VERSION: '4.1' r5.0-dev_default: group: @@ -1725,10 +1725,10 @@ variations: inference-src: version: r5.0 default_env: - CM_SKIP_SYS_UTILS: 'yes' - CM_REGENERATE_MEASURE_FILES: 'yes' + MLC_SKIP_SYS_UTILS: 'yes' + MLC_REGENERATE_MEASURE_FILES: 'yes' env: - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' + MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' invalid_variation_combinations: @@ -1804,9 +1804,9 @@ input_description: adr.compiler.tags: desc: "Compiler for loadgen" default: gcc - adr.inference-src-loadgen.env.CM_GIT_URL: + adr.inference-src-loadgen.env.MLC_GIT_URL: desc: "Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations)" - adr.inference-src.env.CM_GIT_URL: + adr.inference-src.env.MLC_GIT_URL: desc: "Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations)" quiet: desc: "Quiet run (select default values for all questions)" @@ -1819,7 +1819,7 @@ input_description: update_meta_if_env: - enable_if_env: - CM_CONTAINER_TOOL: + MLC_CONTAINER_TOOL: - podman # podman maps the host userid to the root user inside the container docker: @@ -1827,16 +1827,16 @@ update_meta_if_env: use_host_user_id: False pass_user_group: False #useful if docker is run by a different user from the one who built it and under the same group default_env: - CM_DOCKER_USE_DEFAULT_USER: 'yes' + MLC_DOCKER_USE_DEFAULT_USER: 'yes' - skip_if_env: - CM_CONTAINER_TOOL: + MLC_CONTAINER_TOOL: - podman docker: use_host_group_id: True use_host_user_id: True pass_user_group: True #useful if docker is run by a different user from the one who built it and under the same group - enable_if_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - linux adr: compiler: @@ -1853,27 +1853,27 @@ docker: names: - get-mlperf-inference-submission-dir skip_if_env: - CM_MLPERF_INFERENCE_SUBMISSION_DIR: [ on ] + MLC_MLPERF_INFERENCE_SUBMISSION_DIR: [ on ] pre_run_cmds: #- cm pull repo && cm run script --tags=get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update - cm pull repo mounts: - - "${{ CM_DATASET_IMAGENET_PATH }}:${{ CM_DATASET_IMAGENET_PATH }}" - - "${{ CM_DATASET_OPENIMAGES_PATH }}:${{ CM_DATASET_OPENIMAGES_PATH }}" - - "${{ CM_OPENIMAGES_CALIBRATION_DATASET_PATH }}:${{ CM_OPENIMAGES_CALIBRATION_DATASET_PATH }}" - - "${{ CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH }}:${{ CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH }}" - - "${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}" + - "${{ MLC_DATASET_IMAGENET_PATH }}:${{ MLC_DATASET_IMAGENET_PATH }}" + - "${{ MLC_DATASET_OPENIMAGES_PATH }}:${{ MLC_DATASET_OPENIMAGES_PATH }}" + - "${{ MLC_OPENIMAGES_CALIBRATION_DATASET_PATH }}:${{ MLC_OPENIMAGES_CALIBRATION_DATASET_PATH }}" + - "${{ MLC_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH }}:${{ MLC_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH }}" + - "${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}:${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}" - "${{ OUTPUT_BASE_DIR }}:${{ OUTPUT_BASE_DIR }}" - - "${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}" + - "${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}" - "${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }}" - - "${{ CM_CRITEO_PREPROCESSED_PATH }}:${{ CM_CRITEO_PREPROCESSED_PATH }}" + - "${{ MLC_CRITEO_PREPROCESSED_PATH }}:${{ MLC_CRITEO_PREPROCESSED_PATH }}" - "${{ LLAMA2_CHECKPOINT_PATH }}:${{ LLAMA2_CHECKPOINT_PATH }}" - - "${{ CM_NVIDIA_LLAMA_DATASET_FILE_PATH }}:${{ CM_NVIDIA_LLAMA_DATASET_FILE_PATH }}" + - "${{ MLC_NVIDIA_LLAMA_DATASET_FILE_PATH }}:${{ MLC_NVIDIA_LLAMA_DATASET_FILE_PATH }}" - "${{ SDXL_CHECKPOINT_PATH }}:${{ SDXL_CHECKPOINT_PATH }}" - - "${{ CM_DATASET_KITS19_PREPROCESSED_PATH }}:${{ CM_DATASET_KITS19_PREPROCESSED_PATH }}" - - "${{ CM_DATASET_IGBH_PATH }}:${{ CM_DATASET_IGBH_PATH }}" - - "${{ CM_ML_MODEL_RGAT_CHECKPOINT_PATH }}:${{ CM_ML_MODEL_RGAT_CHECKPOINT_PATH }}" + - "${{ MLC_DATASET_KITS19_PREPROCESSED_PATH }}:${{ MLC_DATASET_KITS19_PREPROCESSED_PATH }}" + - "${{ MLC_DATASET_IGBH_PATH }}:${{ MLC_DATASET_IGBH_PATH }}" + - "${{ MLC_ML_MODEL_RGAT_CHECKPOINT_PATH }}:${{ MLC_ML_MODEL_RGAT_CHECKPOINT_PATH }}" skip_run_cmd: 'no' shm_size: '32gb' interactive: True @@ -1890,6 +1890,6 @@ docker: results_dir: RESULTS_DIR submission_dir: SUBMISSION_DIR dlrm_data_path: DLRM_DATA_PATH - intel_gptj_int8_model_path: CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH - nvidia_llama2_dataset_file_path: CM_NVIDIA_LLAMA_DATASET_FILE_PATH - tp_size: CM_NVIDIA_TP_SIZE + intel_gptj_int8_model_path: MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH + nvidia_llama2_dataset_file_path: MLC_NVIDIA_LLAMA_DATASET_FILE_PATH + tp_size: MLC_NVIDIA_TP_SIZE diff --git a/script/app-mlperf-training-nvidia/customize.py b/script/app-mlperf-training-nvidia/customize.py index e1613cd75..890daa60b 100644 --- a/script/app-mlperf-training-nvidia/customize.py +++ b/script/app-mlperf-training-nvidia/customize.py @@ -12,54 +12,54 @@ def preprocess(i): state = i['state'] script_path = i['run_script_input']['path'] - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes": return {'return': 0} - if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes": return {'return': 0} - if env.get('CM_MLPERF_POWER', '') == "yes": + if env.get('MLC_MLPERF_POWER', '') == "yes": power = "yes" else: power = "no" - rerun = True if env.get("CM_RERUN", "") != '' else False + rerun = True if env.get("MLC_RERUN", "") != '' else False - if 'CM_MLPERF_MODEL' not in env: + if 'MLC_MLPERF_MODEL' not in env: return { 'return': 1, 'error': "Please select a variation specifying the model to run"} - if 'CM_NUM_THREADS' not in env: - if 'CM_MINIMIZE_THREADS' in env: - env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // - (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + if 'MLC_NUM_THREADS' not in env: + if 'MLC_MINIMIZE_THREADS' in env: + env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // + (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) else: - env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') print("Using MLCommons Training source from '" + - env['CM_MLPERF_TRAINING_SOURCE'] + "'") + env['MLC_MLPERF_TRAINING_SOURCE'] + "'") - NUM_THREADS = env['CM_NUM_THREADS'] + NUM_THREADS = env['MLC_NUM_THREADS'] - if "bert" in env['CM_MLPERF_MODEL']: - env['CM_RUN_DIR'] = os.path.join( - env['CM_GIT_REPO_CHECKOUT_PATH'], + if "bert" in env['MLC_MLPERF_MODEL']: + env['MLC_RUN_DIR'] = os.path.join( + env['MLC_GIT_REPO_CHECKOUT_PATH'], "NVIDIA", "benchmarks", "bert", "implementations", "pytorch-22.09") - if "resnet" in env['CM_MLPERF_MODEL']: - env['CM_RUN_DIR'] = os.path.join( - env['CM_GIT_REPO_CHECKOUT_PATH'], + if "resnet" in env['MLC_MLPERF_MODEL']: + env['MLC_RUN_DIR'] = os.path.join( + env['MLC_GIT_REPO_CHECKOUT_PATH'], "NVIDIA", "benchmarks", "resnet", "implementations", "mxnet-22.04") - env['CM_RESULTS_DIR'] = os.getcwd() + env['MLC_RESULTS_DIR'] = os.getcwd() return {'return': 0} diff --git a/script/app-mlperf-training-nvidia/meta.yaml b/script/app-mlperf-training-nvidia/meta.yaml index a2fad3584..abf4e7dd9 100644 --- a/script/app-mlperf-training-nvidia/meta.yaml +++ b/script/app-mlperf-training-nvidia/meta.yaml @@ -19,29 +19,29 @@ tags: # Default environment default_env: - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia # Map script inputs to environment variables input_mapping: - docker: CM_RUN_DOCKER_CONTAINER - hw_name: CM_HW_NAME - num_threads: CM_NUM_THREADS - model: CM_MLPERF_CUSTOM_MODEL_PATH + docker: MLC_RUN_DOCKER_CONTAINER + hw_name: MLC_HW_NAME + num_threads: MLC_NUM_THREADS + model: MLC_MLPERF_CUSTOM_MODEL_PATH output_dir: OUTPUT_BASE_DIR - rerun: CM_RERUN - clean: CM_MLPERF_CLEAN_SUBMISSION_DIR + rerun: MLC_RERUN + clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* - - CM_HW_NAME - - CM_ML_MODEL_* + - MLC_MLPERF_* + - MLC_DATASET_* + - MLC_HW_NAME + - MLC_ML_MODEL_* new_state_keys: - mlperf-inference-implementation - - CM_SUT_* + - MLC_SUT_* # Dependencies on other CM scripts deps: @@ -77,7 +77,7 @@ deps: # Detect CUDA if required - tags: get,cuda enable_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cuda @@ -86,9 +86,9 @@ deps: names: - ml-engine-torchvision enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cuda - tags: get,generic-python-lib,_mlperf_logging @@ -103,7 +103,7 @@ deps: - prepare-data - bert-model enable_if_env: - CM_MLPERF_MODEL: + MLC_MLPERF_MODEL: - bert @@ -113,14 +113,14 @@ variations: pytorch: group: framework env: - CM_MLPERF_BACKEND: pytorch - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: pytorch + MLC_MLPERF_BACKEND_VERSION: <<>> tf: group: framework env: - CM_MLPERF_BACKEND: tf - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: tf + MLC_MLPERF_BACKEND_VERSION: <<>> tensorflow: alias: tf @@ -128,14 +128,14 @@ variations: # Reference MLPerf models bert: env: - CM_MLPERF_MODEL: bert + MLC_MLPERF_MODEL: bert deps: - tags: get,generic-python-lib,_protobuf names: - protobuf version_max: "3.19" enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tf - tflite - tags: get,generic-python-lib,_torch @@ -144,7 +144,7 @@ variations: tpu: group: device env: - CM_MLPERF_DEVICE: tpu + MLC_MLPERF_DEVICE: tpu CUDA_VISIBLE_DEVICES: '' USE_CUDA: no @@ -152,5 +152,5 @@ variations: group: device default: true env: - CM_MLPERF_DEVICE: cuda + MLC_MLPERF_DEVICE: cuda USE_CUDA: yes diff --git a/script/app-mlperf-training-nvidia/run-bert-training.sh b/script/app-mlperf-training-nvidia/run-bert-training.sh index 1515404f3..69daeebda 100644 --- a/script/app-mlperf-training-nvidia/run-bert-training.sh +++ b/script/app-mlperf-training-nvidia/run-bert-training.sh @@ -1,8 +1,8 @@ #!/bin/bash source ./config_DGXA100_1x8x56x1.sh -results_dir=${CM_RESULTS_DIR} -cmd="CONT=mlperf-nvidia:language_model DATADIR=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength DATADIR_PHASE2=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength EVALDIR=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/eval_varlength/ CHECKPOINTDIR=${results_dir} CHECKPOINTDIR_PHASE1=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/phase1 ./run_with_docker.sh" +results_dir=${MLC_RESULTS_DIR} +cmd="CONT=mlperf-nvidia:language_model DATADIR=${MLC_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength DATADIR_PHASE2=${MLC_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength EVALDIR=${MLC_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/eval_varlength/ CHECKPOINTDIR=${results_dir} CHECKPOINTDIR_PHASE1=${MLC_MLPERF_TRAINING_BERT_DATA_PATH}/phase1 ./run_with_docker.sh" echo "$cmd" eval "$cmd" test $? -eq 0 || exit $? diff --git a/script/app-mlperf-training-nvidia/run.sh b/script/app-mlperf-training-nvidia/run.sh index 2f15ea73b..24500651c 100644 --- a/script/app-mlperf-training-nvidia/run.sh +++ b/script/app-mlperf-training-nvidia/run.sh @@ -1,10 +1,10 @@ #!/bin/bash -cmd="cd ${CM_RUN_DIR}" +cmd="cd ${MLC_RUN_DIR}" echo "$cmd" eval "$cmd" -if [[ ${CM_MLPERF_MODEL} == "bert" ]]; then - bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh +if [[ ${MLC_MLPERF_MODEL} == "bert" ]]; then + bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh test $? -eq 0 || exit $? fi diff --git a/script/app-mlperf-training-reference/customize.py b/script/app-mlperf-training-reference/customize.py index 54a544fcb..fb2d0c709 100644 --- a/script/app-mlperf-training-reference/customize.py +++ b/script/app-mlperf-training-reference/customize.py @@ -12,38 +12,38 @@ def preprocess(i): state = i['state'] script_path = i['run_script_input']['path'] - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes": return {'return': 0} - if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes": return {'return': 0} - if env.get('CM_MLPERF_POWER', '') == "yes": + if env.get('MLC_MLPERF_POWER', '') == "yes": power = "yes" else: power = "no" - rerun = True if env.get("CM_RERUN", "") != '' else False + rerun = True if env.get("MLC_RERUN", "") != '' else False - if 'CM_MLPERF_MODEL' not in env: + if 'MLC_MLPERF_MODEL' not in env: return { 'return': 1, 'error': "Please select a variation specifying the model to run"} - if 'CM_NUM_THREADS' not in env: - if 'CM_MINIMIZE_THREADS' in env: - env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // - (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + if 'MLC_NUM_THREADS' not in env: + if 'MLC_MINIMIZE_THREADS' in env: + env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // + (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) else: - env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') print("Using MLCommons Training source from '" + - env['CM_MLPERF_TRAINING_SOURCE'] + "'") + env['MLC_MLPERF_TRAINING_SOURCE'] + "'") - NUM_THREADS = env['CM_NUM_THREADS'] + NUM_THREADS = env['MLC_NUM_THREADS'] - if "bert" in env['CM_MLPERF_MODEL']: - env['CM_RUN_DIR'] = os.path.join( - env['CM_MLPERF_TRAINING_SOURCE'], + if "bert" in env['MLC_MLPERF_MODEL']: + env['MLC_RUN_DIR'] = os.path.join( + env['MLC_MLPERF_TRAINING_SOURCE'], "language_model", "tensorflow", "bert") diff --git a/script/app-mlperf-training-reference/meta.yaml b/script/app-mlperf-training-reference/meta.yaml index 56b4ad05d..45b0633be 100644 --- a/script/app-mlperf-training-reference/meta.yaml +++ b/script/app-mlperf-training-reference/meta.yaml @@ -20,30 +20,30 @@ tags: # Default environment default_env: - CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: '' + MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: '' # Map script inputs to environment variables input_mapping: - docker: CM_RUN_DOCKER_CONTAINER - hw_name: CM_HW_NAME - num_threads: CM_NUM_THREADS - model: CM_MLPERF_CUSTOM_MODEL_PATH + docker: MLC_RUN_DOCKER_CONTAINER + hw_name: MLC_HW_NAME + num_threads: MLC_NUM_THREADS + model: MLC_MLPERF_CUSTOM_MODEL_PATH output_dir: OUTPUT_BASE_DIR - rerun: CM_RERUN - clean: CM_MLPERF_CLEAN_SUBMISSION_DIR + rerun: MLC_RERUN + clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* - - CM_HW_NAME - - CM_ML_MODEL_* + - MLC_MLPERF_* + - MLC_DATASET_* + - MLC_HW_NAME + - MLC_ML_MODEL_* new_state_keys: - mlperf-inference-implementation - - CM_SUT_* + - MLC_SUT_* # Dependencies on other CM scripts deps: @@ -71,7 +71,7 @@ deps: # Detect CUDA if required - tags: get,cuda enable_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cuda @@ -80,9 +80,9 @@ deps: names: - ml-engine-torchvision enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - pytorch - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cuda - tags: get,generic-python-lib,_mlperf_logging @@ -97,7 +97,7 @@ deps: - prepare-data - bert-model enable_if_env: - CM_MLPERF_MODEL: + MLC_MLPERF_MODEL: - bert @@ -107,14 +107,14 @@ variations: pytorch: group: framework env: - CM_MLPERF_BACKEND: pytorch - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: pytorch + MLC_MLPERF_BACKEND_VERSION: <<>> tf: group: framework env: - CM_MLPERF_BACKEND: tf - CM_MLPERF_BACKEND_VERSION: <<>> + MLC_MLPERF_BACKEND: tf + MLC_MLPERF_BACKEND_VERSION: <<>> tensorflow: alias: tf @@ -122,14 +122,14 @@ variations: # Reference MLPerf models bert: env: - CM_MLPERF_MODEL: bert + MLC_MLPERF_MODEL: bert deps: - tags: get,generic-python-lib,_protobuf names: - protobuf version_max: "3.19" enable_if_env: - CM_MLPERF_BACKEND: + MLC_MLPERF_BACKEND: - tf - tflite - tags: get,generic-python-lib,_torch @@ -138,7 +138,7 @@ variations: tpu: group: device env: - CM_MLPERF_DEVICE: tpu + MLC_MLPERF_DEVICE: tpu CUDA_VISIBLE_DEVICES: '' USE_CUDA: no @@ -146,5 +146,5 @@ variations: group: device default: true env: - CM_MLPERF_DEVICE: cuda + MLC_MLPERF_DEVICE: cuda USE_CUDA: yes diff --git a/script/app-mlperf-training-reference/run-bert-training.sh b/script/app-mlperf-training-reference/run-bert-training.sh index 08ed5b70a..1ba44bebb 100644 --- a/script/app-mlperf-training-reference/run-bert-training.sh +++ b/script/app-mlperf-training-reference/run-bert-training.sh @@ -3,14 +3,14 @@ export TF_XLA_FLAGS='--tf_xla_auto_jit=2' train_batch_size=24 cmd="python run_pretraining.py \ - --bert_config_file=${CM_MLPERF_TRAINING_BERT_CONFIG_PATH} \ + --bert_config_file=${MLC_MLPERF_TRAINING_BERT_CONFIG_PATH} \ --output_dir=/tmp/output/ \ - --input_file=${CM_MLPERF_TRAINING_BERT_TFRECORDS_PATH}/part* \ + --input_file=${MLC_MLPERF_TRAINING_BERT_TFRECORDS_PATH}/part* \ --nodo_eval \ --do_train \ --eval_batch_size=8 \ --learning_rate=0.0001 \ - --init_checkpoint=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/phase1/model.ckpt-28252 \ + --init_checkpoint=${MLC_MLPERF_TRAINING_BERT_DATA_PATH}/phase1/model.ckpt-28252 \ --iterations_per_loop=1000 \ --max_predictions_per_seq=76 \ --max_seq_length=512 \ diff --git a/script/app-mlperf-training-reference/run.sh b/script/app-mlperf-training-reference/run.sh index 2f15ea73b..24500651c 100644 --- a/script/app-mlperf-training-reference/run.sh +++ b/script/app-mlperf-training-reference/run.sh @@ -1,10 +1,10 @@ #!/bin/bash -cmd="cd ${CM_RUN_DIR}" +cmd="cd ${MLC_RUN_DIR}" echo "$cmd" eval "$cmd" -if [[ ${CM_MLPERF_MODEL} == "bert" ]]; then - bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh +if [[ ${MLC_MLPERF_MODEL} == "bert" ]]; then + bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh test $? -eq 0 || exit $? fi diff --git a/script/app-stable-diffusion-onnx-py/README-extra.md b/script/app-stable-diffusion-onnx-py/README-extra.md index ecab8070e..de321d158 100644 --- a/script/app-stable-diffusion-onnx-py/README-extra.md +++ b/script/app-stable-diffusion-onnx-py/README-extra.md @@ -12,7 +12,7 @@ cm run script "python app stable-diffusion onnx" --adr.python.name=sd-test --tex cm rm cache -f cm run script "python app stable-diffusion onnx _cuda" --adr.python.name=sd-test --text="crazy programmer" -cm docker script "python app stable-diffusion onnx" --text="crazy programmer" --output=. --docker_cm_repo=ctuning@mlcommons-ck --env.CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO=xyz4 +cm docker script "python app stable-diffusion onnx" --text="crazy programmer" --output=. --docker_cm_repo=ctuning@mlcommons-ck --env.MLC_DOCKER_ADD_FLAG_TO_MLC_MLOPS_REPO=xyz4 ``` diff --git a/script/app-stable-diffusion-onnx-py/meta.yaml b/script/app-stable-diffusion-onnx-py/meta.yaml index 306bebbb5..4aacbe801 100644 --- a/script/app-stable-diffusion-onnx-py/meta.yaml +++ b/script/app-stable-diffusion-onnx-py/meta.yaml @@ -77,18 +77,18 @@ variations: group: target env: USE_CUDA: yes - CM_DEVICE: cuda:0 + MLC_DEVICE: cuda:0 cpu: group: target default: yes env: USE_CPU: yes - CM_DEVICE: cpu + MLC_DEVICE: cpu input_mapping: - text: CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT - output: CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT + text: MLC_APP_STABLE_DIFFUSION_ONNX_PY_TEXT + output: MLC_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT input_description: @@ -107,4 +107,4 @@ docker: skip_input_for_fake_run: - text - output - - env.CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO + - env.MLC_DOCKER_ADD_FLAG_TO_MLC_MLOPS_REPO diff --git a/script/app-stable-diffusion-onnx-py/process.py b/script/app-stable-diffusion-onnx-py/process.py index 86bbd3c3b..86a59ef19 100644 --- a/script/app-stable-diffusion-onnx-py/process.py +++ b/script/app-stable-diffusion-onnx-py/process.py @@ -4,24 +4,24 @@ from optimum.onnxruntime import ORTStableDiffusionPipeline -output = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT', '') +output = os.environ.get('MLC_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT', '') f = os.path.join(output, 'output.png') if os.path.isfile(f): os.remove(f) -cm_model_path = os.environ.get('CM_ML_MODEL_PATH', '') +cm_model_path = os.environ.get('MLC_ML_MODEL_PATH', '') if cm_model_path == '': - print('Error: CM_ML_MODEL_PATH env is not defined') + print('Error: MLC_ML_MODEL_PATH env is not defined') exit(1) -device = os.environ.get('CM_DEVICE', '') +device = os.environ.get('MLC_DEVICE', '') pipeline = ORTStableDiffusionPipeline.from_pretrained( cm_model_path, local_files_only=True).to(device) -text = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT', '') +text = os.environ.get('MLC_APP_STABLE_DIFFUSION_ONNX_PY_TEXT', '') if text == '': text = "a photo of an astronaut riding a horse on mars" diff --git a/script/app-stable-diffusion-onnx-py/run.bat b/script/app-stable-diffusion-onnx-py/run.bat index fbcf3a07e..03fa74bd9 100644 --- a/script/app-stable-diffusion-onnx-py/run.bat +++ b/script/app-stable-diffusion-onnx-py/run.bat @@ -1,2 +1,2 @@ -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\process.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\process.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/app-stable-diffusion-onnx-py/run.sh b/script/app-stable-diffusion-onnx-py/run.sh index efffec67f..b2cd262a4 100644 --- a/script/app-stable-diffusion-onnx-py/run.sh +++ b/script/app-stable-diffusion-onnx-py/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/process.py +${MLC_PYTHON_BIN} ${MLC_TMP_CURRENT_SCRIPT_PATH}/process.py test $? -eq 0 || exit 1 diff --git a/script/authenticate-github-cli/customize.py b/script/authenticate-github-cli/customize.py index a4fb19772..631b93f58 100644 --- a/script/authenticate-github-cli/customize.py +++ b/script/authenticate-github-cli/customize.py @@ -13,16 +13,16 @@ def preprocess(i): automation = i['automation'] cmd = "gh auth login" - if env.get('CM_GH_AUTH_TOKEN', '') != '': + if env.get('MLC_GH_AUTH_TOKEN', '') != '': if os_info['platform'] == 'windows': with open("token", "w") as f: - f.write(env['CM_GH_AUTH_TOKEN']) + f.write(env['MLC_GH_AUTH_TOKEN']) cmd = f"{cmd} --with-token < token" else: - cmd = f" echo {env['CM_GH_AUTH_TOKEN']} | {cmd} --with-token" + cmd = f" echo {env['MLC_GH_AUTH_TOKEN']} | {cmd} --with-token" - env['CM_RUN_CMD'] = cmd - quiet = (env.get('CM_QUIET', False) == 'yes') + env['MLC_RUN_CMD'] = cmd + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} diff --git a/script/authenticate-github-cli/meta.yaml b/script/authenticate-github-cli/meta.yaml index 605cc955f..1a69bff6e 100644 --- a/script/authenticate-github-cli/meta.yaml +++ b/script/authenticate-github-cli/meta.yaml @@ -3,8 +3,8 @@ automation_alias: script automation_uid: 5b4e0237da074764 cache: true input_mapping: - with_token: CM_GH_AUTH_TOKEN - with-token: CM_GH_AUTH_TOKEN + with_token: MLC_GH_AUTH_TOKEN + with-token: MLC_GH_AUTH_TOKEN tags: - auth - authenticate diff --git a/script/authenticate-github-cli/run.bat b/script/authenticate-github-cli/run.bat index 2366ffc07..73a07ec19 100644 --- a/script/authenticate-github-cli/run.bat +++ b/script/authenticate-github-cli/run.bat @@ -1,15 +1,15 @@ @echo off echo Running gh auth: -REM Not printing CM_RUN_CMD as it can contain secret -REM echo %CM_RUN_CMD% +REM Not printing MLC_RUN_CMD as it can contain secret +REM echo %MLC_RUN_CMD% echo. -REM Check if CM_FAKE_RUN is not equal to "yes" -if not "%CM_FAKE_RUN%"=="yes" ( +REM Check if MLC_FAKE_RUN is not equal to "yes" +if not "%MLC_FAKE_RUN%"=="yes" ( - REM Execute the command stored in CM_RUN_CMD - REM %CM_RUN_CMD% - echo %CM_GH_AUTH_TOKEN% | gh auth login --with-token + REM Execute the command stored in MLC_RUN_CMD + REM %MLC_RUN_CMD% + echo %MLC_GH_AUTH_TOKEN% | gh auth login --with-token REM Check the exit code and exit with error if non-zero if %ERRORLEVEL% neq 0 ( diff --git a/script/authenticate-github-cli/run.sh b/script/authenticate-github-cli/run.sh index 58c52dad6..ad1472f09 100644 --- a/script/authenticate-github-cli/run.sh +++ b/script/authenticate-github-cli/run.sh @@ -1,18 +1,18 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency echo "Running gh auth: " #Not printing as it can contain secret -#echo "${CM_RUN_CMD}" +#echo "${MLC_RUN_CMD}" echo "" -if [[ ${CM_FAKE_RUN} != "yes" ]]; then - eval "${CM_RUN_CMD}" +if [[ ${MLC_FAKE_RUN} != "yes" ]]; then + eval "${MLC_RUN_CMD}" test $? -eq 0 || exit 1 fi diff --git a/script/benchmark-any-mlperf-inference-implementation/customize.py b/script/benchmark-any-mlperf-inference-implementation/customize.py index b5961dbae..644de05b2 100644 --- a/script/benchmark-any-mlperf-inference-implementation/customize.py +++ b/script/benchmark-any-mlperf-inference-implementation/customize.py @@ -13,7 +13,7 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') models = env['MODELS'].split(",") @@ -68,7 +68,7 @@ def preprocess(i): cmds.append(cmd) assemble_tflite_cmds(cmds) - if env.get('CM_HOST_CPU_ARCHITECTURE', '') == "aarch64": + if env.get('MLC_HOST_CPU_ARCHITECTURE', '') == "aarch64": extra_tags = ",_armnn,_use-neon" cmd = f'export extra_tags="{extra_tags}"' cmds.append(cmd) diff --git a/script/benchmark-any-mlperf-inference-implementation/meta.yaml b/script/benchmark-any-mlperf-inference-implementation/meta.yaml index 5f1ae4ad6..2e176a5b4 100644 --- a/script/benchmark-any-mlperf-inference-implementation/meta.yaml +++ b/script/benchmark-any-mlperf-inference-implementation/meta.yaml @@ -302,7 +302,7 @@ variations: rb6,qualcomm: default_env: - EXTRA_ARGS: " --adr.mlperf-inference-implementation.tags=_rb6 --env.CM_MLPERF_SHORT_RANGING_RUN=no" + EXTRA_ARGS: " --adr.mlperf-inference-implementation.tags=_rb6 --env.MLC_MLPERF_SHORT_RANGING_RUN=no" state: resnet50: qaic: diff --git a/script/benchmark-any-mlperf-inference-implementation/run-template.sh b/script/benchmark-any-mlperf-inference-implementation/run-template.sh index 17c1ffa00..8556de945 100644 --- a/script/benchmark-any-mlperf-inference-implementation/run-template.sh +++ b/script/benchmark-any-mlperf-inference-implementation/run-template.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi diff --git a/script/benchmark-program-mlperf/customize.py b/script/benchmark-program-mlperf/customize.py index fa8c7d627..3f92511eb 100644 --- a/script/benchmark-program-mlperf/customize.py +++ b/script/benchmark-program-mlperf/customize.py @@ -14,51 +14,51 @@ def postprocess(i): os_info = i['os_info'] env = i['env'] - env['CM_MLPERF_RUN_CMD'] = env.get('CM_RUN_CMD') + env['MLC_MLPERF_RUN_CMD'] = env.get('MLC_RUN_CMD') - if env.get('CM_MLPERF_POWER', '') == "yes": + if env.get('MLC_MLPERF_POWER', '') == "yes": - if env.get('CM_MLPERF_SHORT_RANGING_RUN', '') != 'no': - # Write '0' to the count.txt file in CM_RUN_DIR - count_file = os.path.join(env.get('CM_RUN_DIR', ''), 'count.txt') + if env.get('MLC_MLPERF_SHORT_RANGING_RUN', '') != 'no': + # Write '0' to the count.txt file in MLC_RUN_DIR + count_file = os.path.join(env.get('MLC_RUN_DIR', ''), 'count.txt') with open(count_file, 'w') as f: f.write('0') if os_info['platform'] != 'windows': # Construct the shell command with proper escaping - env['CM_MLPERF_RUN_CMD'] = r""" -CM_MLPERF_RUN_COUNT=\$(cat \${CM_RUN_DIR}/count.txt); -echo \${CM_MLPERF_RUN_COUNT}; -CM_MLPERF_RUN_COUNT=\$((CM_MLPERF_RUN_COUNT+1)); -echo \${CM_MLPERF_RUN_COUNT} > \${CM_RUN_DIR}/count.txt; + env['MLC_MLPERF_RUN_CMD'] = r""" +MLC_MLPERF_RUN_COUNT=\$(cat \${MLC_RUN_DIR}/count.txt); +echo \${MLC_MLPERF_RUN_COUNT}; +MLC_MLPERF_RUN_COUNT=\$((MLC_MLPERF_RUN_COUNT+1)); +echo \${MLC_MLPERF_RUN_COUNT} > \${MLC_RUN_DIR}/count.txt; -if [ \${CM_MLPERF_RUN_COUNT} -eq 1 ]; then -export CM_MLPERF_USER_CONF="${CM_MLPERF_RANGING_USER_CONF}"; +if [ \${MLC_MLPERF_RUN_COUNT} -eq 1 ]; then +export MLC_MLPERF_USER_CONF="${MLC_MLPERF_RANGING_USER_CONF}"; else -export CM_MLPERF_USER_CONF="${CM_MLPERF_TESTING_USER_CONF}"; +export MLC_MLPERF_USER_CONF="${MLC_MLPERF_TESTING_USER_CONF}"; fi ; - """ + env.get('CM_RUN_CMD', '').strip() + """ + env.get('MLC_RUN_CMD', '').strip() else: - env['CM_MLPERF_RUN_CMD'] = r""" + env['MLC_MLPERF_RUN_CMD'] = r""" :: Read the current count from the file -set /p CM_MLPERF_RUN_COUNT=<%CM_RUN_DIR%\count.txt -echo !CM_MLPERF_RUN_COUNT! +set /p MLC_MLPERF_RUN_COUNT=<%MLC_RUN_DIR%\count.txt +echo !MLC_MLPERF_RUN_COUNT! :: Increment the count -set /a CM_MLPERF_RUN_COUNT=!CM_MLPERF_RUN_COUNT! + 1 -echo !CM_MLPERF_RUN_COUNT! > %CM_RUN_DIR%\count.txt +set /a MLC_MLPERF_RUN_COUNT=!MLC_MLPERF_RUN_COUNT! + 1 +echo !MLC_MLPERF_RUN_COUNT! > %MLC_RUN_DIR%\count.txt :: Check the value and set the environment variable accordingly -if !CM_MLPERF_RUN_COUNT! EQU 1 ( - set CM_MLPERF_USER_CONF=%CM_MLPERF_RANGING_USER_CONF% +if !MLC_MLPERF_RUN_COUNT! EQU 1 ( + set MLC_MLPERF_USER_CONF=%MLC_MLPERF_RANGING_USER_CONF% ) else ( - set CM_MLPERF_USER_CONF=%CM_MLPERF_TESTING_USER_CONF% + set MLC_MLPERF_USER_CONF=%MLC_MLPERF_TESTING_USER_CONF% ) - """ + env.get('CM_RUN_CMD', '').strip() + """ + env.get('MLC_RUN_CMD', '').strip() else: - # Just use the existing CM_RUN_CMD if no ranging run is needed - env['CM_MLPERF_RUN_CMD'] = env.get('CM_RUN_CMD', '').strip() + # Just use the existing MLC_RUN_CMD if no ranging run is needed + env['MLC_MLPERF_RUN_CMD'] = env.get('MLC_RUN_CMD', '').strip() return {'return': 0} diff --git a/script/benchmark-program-mlperf/meta.yaml b/script/benchmark-program-mlperf/meta.yaml index ed532f8bc..d5ffe62f3 100644 --- a/script/benchmark-program-mlperf/meta.yaml +++ b/script/benchmark-program-mlperf/meta.yaml @@ -17,14 +17,14 @@ variations: tags: benchmark-program,program power: env: - CM_MLPERF_POWER: 'yes' - CM_SAVE_CONSOLE_LOG: 'no' + MLC_MLPERF_POWER: 'yes' + MLC_SAVE_CONSOLE_LOG: 'no' group: power-mode new_env_keys: - - CM_MLPERF_* + - MLC_MLPERF_* post_deps: - enable_if_env: - CM_MLPERF_LOADGEN_MODE: + MLC_MLPERF_LOADGEN_MODE: - performance names: - mlperf-power-client diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py index 08e15863c..d0286557a 100644 --- a/script/benchmark-program/customize.py +++ b/script/benchmark-program/customize.py @@ -8,109 +8,109 @@ def preprocess(i): q = '"' if os_info['platform'] == 'windows' else "'" - if env.get('CM_RUN_CMD', '') == '': - if env.get('CM_BIN_NAME', '') == '': + if env.get('MLC_RUN_CMD', '') == '': + if env.get('MLC_BIN_NAME', '') == '': x = 'run.exe' if os_info['platform'] == 'windows' else 'run.out' - env['CM_BIN_NAME'] = x + env['MLC_BIN_NAME'] = x if os_info['platform'] == 'windows': - env['CM_RUN_CMD'] = env.get( - 'CM_RUN_PREFIX', '') + env['CM_BIN_NAME'] - if env.get('CM_RUN_SUFFIX', '') != '': - env['CM_RUN_CMD'] += ' ' + env['CM_RUN_SUFFIX'] + env['MLC_RUN_CMD'] = env.get( + 'MLC_RUN_PREFIX', '') + env['MLC_BIN_NAME'] + if env.get('MLC_RUN_SUFFIX', '') != '': + env['MLC_RUN_CMD'] += ' ' + env['MLC_RUN_SUFFIX'] else: - if env['CM_ENABLE_NUMACTL'].lower() in ["on", "1", "true", "yes"]: - env['CM_ENABLE_NUMACTL'] = "1" - CM_RUN_PREFIX = "numactl " + env['CM_NUMACTL_MEMBIND'] + ' ' + if env['MLC_ENABLE_NUMACTL'].lower() in ["on", "1", "true", "yes"]: + env['MLC_ENABLE_NUMACTL'] = "1" + MLC_RUN_PREFIX = "numactl " + env['MLC_NUMACTL_MEMBIND'] + ' ' else: - CM_RUN_PREFIX = '' + MLC_RUN_PREFIX = '' - CM_RUN_PREFIX += env.get('CM_RUN_PREFIX', '') + MLC_RUN_PREFIX += env.get('MLC_RUN_PREFIX', '') - env['CM_RUN_PREFIX'] = CM_RUN_PREFIX + env['MLC_RUN_PREFIX'] = MLC_RUN_PREFIX - CM_RUN_SUFFIX = ( - env['CM_REDIRECT_OUT'] + - ' ') if 'CM_REDIRECT_OUT' in env else '' - CM_RUN_SUFFIX += (env['CM_REDIRECT_ERR'] + - ' ') if 'CM_REDIRECT_ERR' in env else '' + MLC_RUN_SUFFIX = ( + env['MLC_REDIRECT_OUT'] + + ' ') if 'MLC_REDIRECT_OUT' in env else '' + MLC_RUN_SUFFIX += (env['MLC_REDIRECT_ERR'] + + ' ') if 'MLC_REDIRECT_ERR' in env else '' - env['CM_RUN_SUFFIX'] = env['CM_RUN_SUFFIX'] + \ - CM_RUN_SUFFIX if 'CM_RUN_SUFFIX' in env else CM_RUN_SUFFIX + env['MLC_RUN_SUFFIX'] = env['MLC_RUN_SUFFIX'] + \ + MLC_RUN_SUFFIX if 'MLC_RUN_SUFFIX' in env else MLC_RUN_SUFFIX - if env.get('CM_RUN_DIR', '') == '': - env['CM_RUN_DIR'] = os.getcwd() + if env.get('MLC_RUN_DIR', '') == '': + env['MLC_RUN_DIR'] = os.getcwd() - env['CM_RUN_CMD'] = CM_RUN_PREFIX + ' ' + os.path.join( - env['CM_RUN_DIR'], env['CM_BIN_NAME']) + ' ' + env['CM_RUN_SUFFIX'] + env['MLC_RUN_CMD'] = MLC_RUN_PREFIX + ' ' + os.path.join( + env['MLC_RUN_DIR'], env['MLC_BIN_NAME']) + ' ' + env['MLC_RUN_SUFFIX'] - x = env.get('CM_RUN_PREFIX0', '') + x = env.get('MLC_RUN_PREFIX0', '') if x != '': - env['CM_RUN_CMD'] = x + ' ' + env.get('CM_RUN_CMD', '') + env['MLC_RUN_CMD'] = x + ' ' + env.get('MLC_RUN_CMD', '') if os_info['platform'] != 'windows' and str( - env.get('CM_SAVE_CONSOLE_LOG', True)).lower() not in ["no", "false", "0"]: - logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR']) - env['CM_RUN_CMD'] += r" 2>&1 | tee " + q + os.path.join( + env.get('MLC_SAVE_CONSOLE_LOG', True)).lower() not in ["no", "false", "0"]: + logs_dir = env.get('MLC_LOGS_DIR', env['MLC_RUN_DIR']) + env['MLC_RUN_CMD'] += r" 2>&1 | tee " + q + os.path.join( logs_dir, "console.out") + q + r"; echo \${PIPESTATUS[0]} > exitstatus" # additional arguments and tags for measuring system informations(only if - # 'CM_PROFILE_NVIDIA_POWER' is 'on') - if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": - env['CM_SYS_UTILISATION_SCRIPT_TAGS'] = '' + # 'MLC_PROFILE_NVIDIA_POWER' is 'on') + if env.get('MLC_PROFILE_NVIDIA_POWER', '') == "on": + env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] = '' # this section is for selecting the variation - if env.get('CM_MLPERF_DEVICE', '') == "gpu": - env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cuda' - elif env.get('CM_MLPERF_DEVICE', '') == "cpu": - env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cpu' + if env.get('MLC_MLPERF_DEVICE', '') == "gpu": + env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cuda' + elif env.get('MLC_MLPERF_DEVICE', '') == "cpu": + env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cpu' # this section is for supplying the input arguments/tags - env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --log_dir=\'' + \ + env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] += ' --log_dir=\'' + \ logs_dir + '\'' # specify the logs directory # specifying the interval in which the system information should be # measured - if env.get('CM_SYSTEM_INFO_MEASUREMENT_INTERVAL', '') != '': - env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --interval=\"' + \ - env['CM_SYSTEM_INFO_MEASUREMENT_INTERVAL'] + '\"' + if env.get('MLC_SYSTEM_INFO_MEASUREMENT_INTERVAL', '') != '': + env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] += ' --interval=\"' + \ + env['MLC_SYSTEM_INFO_MEASUREMENT_INTERVAL'] + '\"' # generate the pre run cmd - recording runtime system infos pre_run_cmd = "" - if env.get('CM_PRE_RUN_CMD_EXTERNAL', '') != '': - pre_run_cmd += env['CM_PRE_RUN_CMD_EXTERNAL'] + if env.get('MLC_PRE_RUN_CMD_EXTERNAL', '') != '': + pre_run_cmd += env['MLC_PRE_RUN_CMD_EXTERNAL'] - if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": + if env.get('MLC_PROFILE_NVIDIA_POWER', '') == "on": if pre_run_cmd != '': pre_run_cmd += ' && ' # running the script as a process in background pre_run_cmd = pre_run_cmd + 'cm run script --tags=runtime,system,utilisation' + \ - env['CM_SYS_UTILISATION_SCRIPT_TAGS'] + ' --quiet & ' + env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] + ' --quiet & ' # obtain the command if of the background process pre_run_cmd += r" cmd_pid=\$! && echo CMD_PID=\$cmd_pid" print( f"Pre run command for recording the runtime system information: {pre_run_cmd}") - env['CM_PRE_RUN_CMD'] = pre_run_cmd + env['MLC_PRE_RUN_CMD'] = pre_run_cmd # generate the post run cmd - for killing the process that records runtime # system infos post_run_cmd = "" - if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": + if env.get('MLC_PROFILE_NVIDIA_POWER', '') == "on": post_run_cmd += r"echo killing process \$cmd_pid && kill -TERM \${cmd_pid}" print( f"Post run command for killing the process that measures the runtime system information: {post_run_cmd}") - env['CM_POST_RUN_CMD'] = post_run_cmd + env['MLC_POST_RUN_CMD'] = post_run_cmd # Print info print('***************************************************************************') print('CM script::benchmark-program/run.sh') print('') - print('Run Directory: {}'.format(env.get('CM_RUN_DIR', ''))) + print('Run Directory: {}'.format(env.get('MLC_RUN_DIR', ''))) print('') - print('CMD: {}'.format(env.get('CM_RUN_CMD', ''))) + print('CMD: {}'.format(env.get('MLC_RUN_CMD', ''))) print('') diff --git a/script/benchmark-program/meta.yaml b/script/benchmark-program/meta.yaml index 4abb48d60..73d59e6c8 100644 --- a/script/benchmark-program/meta.yaml +++ b/script/benchmark-program/meta.yaml @@ -3,19 +3,19 @@ automation_alias: script automation_uid: 5b4e0237da074764 category: DevOps automation default_env: - CM_ENABLE_NUMACTL: '0' - CM_ENABLE_PROFILING: '0' + MLC_ENABLE_NUMACTL: '0' + MLC_ENABLE_PROFILING: '0' deps: - tags: detect,cpu - enable_if_env: - CM_SET_PERFORMANCE_MODE: + MLC_SET_PERFORMANCE_MODE: - 'on' - 'yes' - 'True' - true tags: set,performance,mode,_performance new_env_keys: -- CM_RUN_CMD +- MLC_RUN_CMD tags: - program - benchmark @@ -25,14 +25,14 @@ uid: 19f369ef47084895 variations: numactl: default_env: - CM_ENABLE_NUMACTL: 1 - CM_NUMACTL_MEMBIND: --localalloc + MLC_ENABLE_NUMACTL: 1 + MLC_NUMACTL_MEMBIND: --localalloc numactl-interleave: default_env: - CM_ENABLE_NUMACTL: 1 - CM_NUMACTL_MEMBIND: --interleave=all + MLC_ENABLE_NUMACTL: 1 + MLC_NUMACTL_MEMBIND: --interleave=all profile: default_env: - CM_ENABLE_PROFILING: 1 + MLC_ENABLE_PROFILING: 1 deps: - tags: get,profiler diff --git a/script/benchmark-program/run-ubuntu.sh b/script/benchmark-program/run-ubuntu.sh index 1f19ed80b..dfca75282 100644 --- a/script/benchmark-program/run-ubuntu.sh +++ b/script/benchmark-program/run-ubuntu.sh @@ -1,9 +1,9 @@ #!/bin/bash -CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD} -cd ${CM_TMP_CURRENT_SCRIPT_PATH} -if [ ${CM_ENABLE_NUMACTL} == "1" ]; then +cd ${MLC_TMP_CURRENT_SCRIPT_PATH} +if [ ${MLC_ENABLE_NUMACTL} == "1" ]; then sudo apt-get install numactl fi diff --git a/script/benchmark-program/run.bat b/script/benchmark-program/run.bat index d15449355..ccc797361 100644 --- a/script/benchmark-program/run.bat +++ b/script/benchmark-program/run.bat @@ -1,21 +1,21 @@ @echo off -if "%CM_RUN_DIR%" == "" ( - echo CM_RUN_DIR is not set +if "%MLC_RUN_DIR%" == "" ( + echo MLC_RUN_DIR is not set exit 1 ) -cd %CM_RUN_DIR% +cd %MLC_RUN_DIR% -if "%CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM%" == "True" ( +if "%MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM%" == "True" ( echo ***************************************************** echo You are now in Debug shell with pre-set CM env and can run the following command line manually: echo. - if not "%CM_RUN_CMD0%" == "" ( - echo %CM_RUN_CMD0% + if not "%MLC_RUN_CMD0%" == "" ( + echo %MLC_RUN_CMD0% ) else ( - echo %CM_RUN_CMD% + echo %MLC_RUN_CMD% ) echo. @@ -27,13 +27,13 @@ if "%CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM%" == "True" ( exit 0 ) -rem Check CM_RUN_CMD0 -if not "%CM_RUN_CMD0%" == "" ( +rem Check MLC_RUN_CMD0 +if not "%MLC_RUN_CMD0%" == "" ( echo. - %CM_RUN_CMD0% + %MLC_RUN_CMD0% ) else ( echo. - %CM_RUN_CMD% + %MLC_RUN_CMD% ) IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/benchmark-program/run.sh b/script/benchmark-program/run.sh index 6eb39d333..011e6a8fe 100755 --- a/script/benchmark-program/run.sh +++ b/script/benchmark-program/run.sh @@ -2,8 +2,8 @@ # function to safely exit the background process safe_exit() { - if [[ "${CM_POST_RUN_CMD}" != "" ]]; then - eval ${CM_POST_RUN_CMD} + if [[ "${MLC_POST_RUN_CMD}" != "" ]]; then + eval ${MLC_POST_RUN_CMD} if [ $? -eq 0 ]; then exit 0 else @@ -15,27 +15,27 @@ safe_exit() { # trap signals to redirect the execution flow to safe_exit trap safe_exit SIGINT SIGTERM -if [[ ${CM_MLPERF_POWER} == "yes" && ${CM_MLPERF_LOADGEN_MODE} == "performance" ]]; then +if [[ ${MLC_MLPERF_POWER} == "yes" && ${MLC_MLPERF_LOADGEN_MODE} == "performance" ]]; then exit 0 fi # Run -if [ -z ${CM_RUN_DIR} ]; then - echo "CM_RUN_DIR is not set" +if [ -z ${MLC_RUN_DIR} ]; then + echo "MLC_RUN_DIR is not set" exit 1 fi -cd ${CM_RUN_DIR} +cd ${MLC_RUN_DIR} -if [[ "${CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM}" == "True" ]]; then +if [[ "${MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM}" == "True" ]]; then echo "*****************************************************" echo "You are now in Debug shell with pre-set CM env and can run the following command line manually:" echo "" - if [[ "${CM_RUN_CMD0}" != "" ]]; then - echo "${CM_RUN_CMD0}" + if [[ "${MLC_RUN_CMD0}" != "" ]]; then + echo "${MLC_RUN_CMD0}" else - echo "${CM_RUN_CMD}" + echo "${MLC_RUN_CMD}" fi echo "" @@ -46,7 +46,7 @@ if [[ "${CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM}" == "True" ]]; then # # cp -f tmp-run.sh debug-script-benchmark-program.sh # -# sed -e 's/CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM="True"/CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM="False"/g' -i debug-script-benchmark-program.sh +# sed -e 's/MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM="True"/MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM="False"/g' -i debug-script-benchmark-program.sh bash @@ -54,8 +54,8 @@ if [[ "${CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM}" == "True" ]]; then exit 0 fi -echo $CM_PRE_RUN_CMD -eval ${CM_PRE_RUN_CMD} +echo $MLC_PRE_RUN_CMD +eval ${MLC_PRE_RUN_CMD} # Function to run command and check exit status run_command() { @@ -78,17 +78,17 @@ run_command() { fi } -# Run CM_RUN_CMD0 if it exists, otherwise run CM_RUN_CMD -if [[ -n "$CM_RUN_CMD0" ]]; then - run_command "$CM_RUN_CMD0" +# Run MLC_RUN_CMD0 if it exists, otherwise run MLC_RUN_CMD +if [[ -n "$MLC_RUN_CMD0" ]]; then + run_command "$MLC_RUN_CMD0" fi -run_command "$CM_RUN_CMD" +run_command "$MLC_RUN_CMD" # Run post-run command if it exists -if [[ -n "$CM_POST_RUN_CMD" ]]; then - eval "$CM_POST_RUN_CMD" +if [[ -n "$MLC_POST_RUN_CMD" ]]; then + eval "$MLC_POST_RUN_CMD" post_exitstatus=$? # Exit if post-run command fails if [[ $post_exitstatus -ne 0 ]]; then diff --git a/script/build-docker-image/customize.py b/script/build-docker-image/customize.py index a231b6b09..be7c33035 100644 --- a/script/build-docker-image/customize.py +++ b/script/build-docker-image/customize.py @@ -8,57 +8,57 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '') + dockerfile_path = env.get('MLC_DOCKERFILE_WITH_PATH', '') if dockerfile_path != '' and os.path.exists(dockerfile_path): build_dockerfile = False - env['CM_BUILD_DOCKERFILE'] = "no" + env['MLC_BUILD_DOCKERFILE'] = "no" os.chdir(os.path.dirname(dockerfile_path)) else: build_dockerfile = True - env['CM_BUILD_DOCKERFILE'] = "yes" - env['CM_DOCKERFILE_BUILD_FROM_IMAGE_SCRIPT'] = "yes" + env['MLC_BUILD_DOCKERFILE'] = "yes" + env['MLC_DOCKERFILE_BUILD_FROM_IMAGE_SCRIPT'] = "yes" - CM_DOCKER_BUILD_ARGS = env.get('+ CM_DOCKER_BUILD_ARGS', []) + MLC_DOCKER_BUILD_ARGS = env.get('+ MLC_DOCKER_BUILD_ARGS', []) - if env.get('CM_GH_TOKEN', '') != '': - CM_DOCKER_BUILD_ARGS.append("CM_GH_TOKEN=" + env['CM_GH_TOKEN']) + if env.get('MLC_GH_TOKEN', '') != '': + MLC_DOCKER_BUILD_ARGS.append("MLC_GH_TOKEN=" + env['MLC_GH_TOKEN']) - if CM_DOCKER_BUILD_ARGS: + if MLC_DOCKER_BUILD_ARGS: build_args = "--build-arg " + \ - " --build-arg ".join(CM_DOCKER_BUILD_ARGS) + " --build-arg ".join(MLC_DOCKER_BUILD_ARGS) else: build_args = "" - env['CM_DOCKER_BUILD_ARGS'] = build_args + env['MLC_DOCKER_BUILD_ARGS'] = build_args -# if 'CM_DOCKERFILE_WITH_PATH' not in env or not exists(env['CM_DOCKERFILE_WITH_PATH']): -# env['CM_BUILD_DOCKERFILE'] = "yes" +# if 'MLC_DOCKERFILE_WITH_PATH' not in env or not exists(env['MLC_DOCKERFILE_WITH_PATH']): +# env['MLC_BUILD_DOCKERFILE'] = "yes" # else: -# env['CM_BUILD_DOCKERFILE'] = "no" +# env['MLC_BUILD_DOCKERFILE'] = "no" # - if env.get("CM_DOCKER_IMAGE_REPO", "") == '': - env['CM_DOCKER_IMAGE_REPO'] = "localhost/local" + if env.get("MLC_DOCKER_IMAGE_REPO", "") == '': + env['MLC_DOCKER_IMAGE_REPO'] = "localhost/local" - docker_image_name = env.get('CM_DOCKER_IMAGE_NAME', '') + docker_image_name = env.get('MLC_DOCKER_IMAGE_NAME', '') if docker_image_name == '': - docker_image_name = "cm-script-" + \ - env.get('CM_DOCKER_RUN_SCRIPT_TAGS', '').replace( + docker_image_name = "mlc-script-" + \ + env.get('MLC_DOCKER_RUN_SCRIPT_TAGS', '').replace( ',', '-').replace('_', '-') - env['CM_DOCKER_IMAGE_NAME'] = docker_image_name.lower() + env['MLC_DOCKER_IMAGE_NAME'] = docker_image_name.lower() - if env.get("CM_DOCKER_IMAGE_TAG", "") == '': - env['CM_DOCKER_IMAGE_TAG'] = "latest" + if env.get("MLC_DOCKER_IMAGE_TAG", "") == '': + env['MLC_DOCKER_IMAGE_TAG'] = "latest" - if str(env.get("CM_DOCKER_CACHE", "yes")).lower() in ["no", "false", "0"]: - env["CM_DOCKER_CACHE_ARG"] = " --no-cache" + if str(env.get("MLC_DOCKER_CACHE", "yes")).lower() in ["no", "false", "0"]: + env["MLC_DOCKER_CACHE_ARG"] = " --no-cache" CMD = '' image_name = get_image_name(env) if build_dockerfile: - dockerfile_path = r"\${CM_DOCKERFILE_WITH_PATH}" + dockerfile_path = r"\${MLC_DOCKERFILE_WITH_PATH}" # Write .dockerignore with open('.dockerignore', 'w') as f: @@ -66,8 +66,8 @@ def preprocess(i): # Prepare CMD to build image XCMD = [ - f'{env["CM_CONTAINER_TOOL"]} build ' + - env.get('CM_DOCKER_CACHE_ARG', ''), + f'{env["MLC_CONTAINER_TOOL"]} build ' + + env.get('MLC_DOCKER_CACHE_ARG', ''), ' ' + build_args, ' -f "' + dockerfile_path + '"', ' -t "' + image_name, @@ -89,16 +89,16 @@ def preprocess(i): print('') - env['CM_DOCKER_BUILD_CMD'] = CMD + env['MLC_DOCKER_BUILD_CMD'] = CMD return {'return': 0} def get_image_name(env): - image_name = env.get('CM_DOCKER_IMAGE_REPO', '') + '/' + \ - env.get('CM_DOCKER_IMAGE_NAME', '') + ':' + \ - env.get('CM_DOCKER_IMAGE_TAG', '') + '"' + image_name = env.get('MLC_DOCKER_IMAGE_REPO', '') + '/' + \ + env.get('MLC_DOCKER_IMAGE_NAME', '') + ':' + \ + env.get('MLC_DOCKER_IMAGE_TAG', '') + '"' return image_name @@ -108,13 +108,13 @@ def postprocess(i): env = i['env'] # Check if need to push docker image to the Docker Hub - if env.get('CM_DOCKER_PUSH_IMAGE', '') in ['True', True, 'yes']: + if env.get('MLC_DOCKER_PUSH_IMAGE', '') in ['True', True, 'yes']: image_name = get_image_name(env) # Prepare CMD to build image PCMD = 'docker image push ' + image_name - dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '') + dockerfile_path = env.get('MLC_DOCKERFILE_WITH_PATH', '') if dockerfile_path != '' and os.path.isfile(dockerfile_path): with open(dockerfile_path + '.push.sh', 'w') as f: f.write(PCMD + '\n') diff --git a/script/build-docker-image/examples/0-common.bat b/script/build-docker-image/examples/0-common.bat deleted file mode 100644 index 721cc1b5d..000000000 --- a/script/build-docker-image/examples/0-common.bat +++ /dev/null @@ -1,21 +0,0 @@ -set DOCKER_IMAGE_REPO=cknowledge - -set DOCKER_OS=ubuntu - -rem set DOCKER_OS_VER=22.04 -set DOCKER_OS_VER=23.04 -set DOCKER_PIP_EXTRA_FLAGS=--break-system-packages - -rem set DOCKER_IMAGE_NAME=cm-base -set DOCKER_IMAGE_NAME=cm-script-app-image-classification-onnx-py -set DOCKER_IMAGE_POST_FILE=%CD%\extra-cmd.cm-script-app-image-classification-onnx-py - -rem set DOCKER_IMAGE_TAG=%DOCKER_OS%-%DOCKER_OS_VER%-20230804 - -set DOCKER_IMAGE_TAG=%DOCKER_OS%-%DOCKER_OS_VER%-latest -set DOCKERFILE_EXT=%DOCKER_IMAGE_NAME%-%DOCKER_IMAGE_TAG% - -set DOCKER_PACKAGE_MANAGER_UPDATE_CMD="apt-get update -y && apt-get upgrade -y" - -set DOCKER_CM_MLOPS_REPO="ctuning@mlcommons-ck" -rem set DOCKER_CM_MLOPS_REPO="mlcommons@ck" diff --git a/script/build-docker-image/examples/0-generate.bat b/script/build-docker-image/examples/0-generate.bat deleted file mode 100644 index 443d029ae..000000000 --- a/script/build-docker-image/examples/0-generate.bat +++ /dev/null @@ -1,9 +0,0 @@ -call 0-common.bat - -cmr "build dockerfile" --file_path=%CD%\Dockerfile.%DOCKERFILE_EXT% ^ - --docker_os=%DOCKER_OS% ^ - --docker_os_version=%DOCKER_OS_VER% ^ - --package_manager_update_cmd=%DOCKER_PACKAGE_MANAGER_UPDATE_CMD% ^ - --pip_extra_flags=%DOCKER_PIP_EXTRA_FLAGS% ^ - --post_file=%DOCKER_IMAGE_POST_FILE% ^ - --cm_repo=%DOCKER_CM_MLOPS_REPO% diff --git a/script/build-docker-image/examples/1-build.bat b/script/build-docker-image/examples/1-build.bat deleted file mode 100644 index 2356eb032..000000000 --- a/script/build-docker-image/examples/1-build.bat +++ /dev/null @@ -1,8 +0,0 @@ -call 0-common.bat - -cmr "build docker image" --dockerfile=%CD%\Dockerfile.%DOCKERFILE_EXT% ^ - --docker_os=%DOCKER_OS% ^ - --docker_os_version=%DOCKER_OS_VER% ^ - --image_repo=%DOCKER_IMAGE_REPO% ^ - --image_name=%DOCKER_IMAGE_NAME% ^ - --image_tag=%DOCKER_IMAGE_TAG% diff --git a/script/build-docker-image/examples/2-run-cm-command1.bat b/script/build-docker-image/examples/2-run-cm-command1.bat deleted file mode 100644 index eeeadd311..000000000 --- a/script/build-docker-image/examples/2-run-cm-command1.bat +++ /dev/null @@ -1,3 +0,0 @@ -call 0-common.bat - -docker run -it %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% "cmr 'detect os' -j" diff --git a/script/build-docker-image/examples/2-run-cm-command2.bat b/script/build-docker-image/examples/2-run-cm-command2.bat deleted file mode 100644 index ac1c8a3a6..000000000 --- a/script/build-docker-image/examples/2-run-cm-command2.bat +++ /dev/null @@ -1,3 +0,0 @@ -call 0-common.bat - -cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_name=%DOCKER_IMAGE_NAME% --image_tag=%DOCKER_IMAGE_TAG% --run_cmd="cmr 'detect os' -j" diff --git a/script/build-docker-image/examples/2-run-cm-command3.bat b/script/build-docker-image/examples/2-run-cm-command3.bat deleted file mode 100644 index e690f093c..000000000 --- a/script/build-docker-image/examples/2-run-cm-command3.bat +++ /dev/null @@ -1,3 +0,0 @@ -call 0-common.bat - -cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_tag=%DOCKER_IMAGE_TAG% --script_tags=detect,os diff --git a/script/build-docker-image/examples/2-run-cm-command4.bat b/script/build-docker-image/examples/2-run-cm-command4.bat deleted file mode 100644 index c2e6f801c..000000000 --- a/script/build-docker-image/examples/2-run-cm-command4.bat +++ /dev/null @@ -1,3 +0,0 @@ -call 0-common.bat - -cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_tag=%DOCKER_IMAGE_TAG% --script_tags=detect,os --it diff --git a/script/build-docker-image/examples/2-run-cm-command5.bat b/script/build-docker-image/examples/2-run-cm-command5.bat deleted file mode 100644 index d153437f1..000000000 --- a/script/build-docker-image/examples/2-run-cm-command5.bat +++ /dev/null @@ -1,3 +0,0 @@ -call 0-common.bat - -cm docker script --tags=detect,os -j diff --git a/script/build-docker-image/examples/2-run-interactive1.bat b/script/build-docker-image/examples/2-run-interactive1.bat deleted file mode 100644 index 917dda930..000000000 --- a/script/build-docker-image/examples/2-run-interactive1.bat +++ /dev/null @@ -1,3 +0,0 @@ -call 0-common.bat - -docker run -it %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% -c bash diff --git a/script/build-docker-image/examples/2-run-interactive2.bat b/script/build-docker-image/examples/2-run-interactive2.bat deleted file mode 100644 index 67dd22650..000000000 --- a/script/build-docker-image/examples/2-run-interactive2.bat +++ /dev/null @@ -1,3 +0,0 @@ -call 0-common.bat - -cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_name=%DOCKER_IMAGE_NAME% --image_tag=%DOCKER_IMAGE_TAG% --it diff --git a/script/build-docker-image/examples/3-push-to-docker-hub.bat b/script/build-docker-image/examples/3-push-to-docker-hub.bat deleted file mode 100644 index 2c9eb634d..000000000 --- a/script/build-docker-image/examples/3-push-to-docker-hub.bat +++ /dev/null @@ -1,3 +0,0 @@ -call 0-common.bat - -docker push %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% diff --git a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 deleted file mode 100644 index 418e73363..000000000 --- a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 +++ /dev/null @@ -1,38 +0,0 @@ -FROM ubuntu:22.04 - -# Maintained by the MLCommons taskforce on automation and reproducibility -LABEL github="https://github.com/mlcommons/ck" -LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" - -SHELL ["/bin/bash", "-c"] -ARG CM_GH_TOKEN - -# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes -# Install system dependencies -RUN apt-get update -y && apt-get upgrade -y -RUN apt-get install -y python3 python3-pip git sudo wget - -# Install python packages -RUN python3 -m pip install cmind requests - -# Setup docker environment -ENTRYPOINT ["/bin/bash", "-c"] -ENV TZ="US/Pacific" -ENV PATH="${PATH}:/home/cmuser/.local/bin" -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone - -# Setup docker user -RUN groupadd cm -RUN useradd -g cm --create-home --shell /bin/bash cmuser -RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers -USER cmuser:cm -WORKDIR /home/cmuser - -# Download CM repo for scripts -RUN cm pull repo mlcommons@ck --dummy - -# Install all system dependencies -RUN cm run script --quiet --tags=get,sys-utils-cm - -# Run commands -RUN cm version diff --git a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 deleted file mode 100644 index 478e155f6..000000000 --- a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 +++ /dev/null @@ -1,38 +0,0 @@ -FROM ubuntu:23.04 - -# Maintained by the MLCommons taskforce on automation and reproducibility -LABEL github="https://github.com/mlcommons/ck" -LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" - -SHELL ["/bin/bash", "-c"] -ARG CM_GH_TOKEN - -# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes -# Install system dependencies -RUN apt-get update -y && apt-get upgrade -y -RUN apt-get install -y python3 python3-pip git sudo wget - -# Install python packages -RUN python3 -m pip install cmind requests --break-system-packages - -# Setup docker environment -ENTRYPOINT ["/bin/bash", "-c"] -ENV TZ="US/Pacific" -ENV PATH="${PATH}:/home/cmuser/.local/bin" -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone - -# Setup docker user -RUN groupadd cm -RUN useradd -g cm --create-home --shell /bin/bash cmuser -RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers -USER cmuser:cm -WORKDIR /home/cmuser - -# Download CM repo for scripts -RUN cm pull repo mlcommons@ck --dummy - -# Install all system dependencies -RUN cm run script --quiet --tags=get,sys-utils-cm - -# Run commands -RUN cm version diff --git a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest deleted file mode 100644 index 832a37669..000000000 --- a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest +++ /dev/null @@ -1,38 +0,0 @@ -FROM ubuntu:23.04 - -# Maintained by the MLCommons taskforce on automation and reproducibility -LABEL github="https://github.com/mlcommons/ck" -LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" - -SHELL ["/bin/bash", "-c"] -ARG CM_GH_TOKEN - -# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes -# Install system dependencies -RUN apt-get update -y && apt-get upgrade -y -RUN apt-get install -y python3 python3-pip git sudo wget - -# Install python packages -RUN python3 -m pip install cmind requests --break-system-packages - -# Setup docker environment -ENTRYPOINT ["/bin/bash", "-c"] -ENV TZ="US/Pacific" -ENV PATH="${PATH}:/home/cmuser/.local/bin" -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone - -# Setup docker user -RUN groupadd cm -RUN useradd -g cm --create-home --shell /bin/bash cmuser -RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers -USER cmuser:cm -WORKDIR /home/cmuser - -# Download CM repo for scripts -RUN cm pull repo ctuning@mlcommons-ck - -# Install all system dependencies -RUN cm run script --quiet --tags=get,sys-utils-cm - -# Run commands -RUN cm version diff --git a/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest b/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest deleted file mode 100644 index 7ce0af2fb..000000000 --- a/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest +++ /dev/null @@ -1,45 +0,0 @@ -FROM ubuntu:23.04 - -# Maintained by the MLCommons taskforce on automation and reproducibility -LABEL github="https://github.com/mlcommons/ck" -LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" - -SHELL ["/bin/bash", "-c"] -ARG CM_GH_TOKEN - -# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes -# Install system dependencies -RUN apt-get update -y && apt-get upgrade -y -RUN apt-get install -y python3 python3-pip git sudo wget - -# Install python packages -RUN python3 -m pip install cmind requests --break-system-packages - -# Setup docker environment -ENTRYPOINT ["/bin/bash", "-c"] -ENV TZ="US/Pacific" -ENV PATH="${PATH}:/home/cmuser/.local/bin" -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone - -# Setup docker user -RUN groupadd cm -RUN useradd -g cm --create-home --shell /bin/bash cmuser -RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers -USER cmuser:cm -WORKDIR /home/cmuser - -# Download CM repo for scripts -RUN cm pull repo ctuning@mlcommons-ck - -# Install all system dependencies -RUN cm run script --quiet --tags=get,sys-utils-cm - -# Run commands -RUN cm version - -# Create virtual python environment -RUN cmr "install python-venv" --name=cm --quiet - -# Run image classification and install all related CM components automatically -RUN cmr "python app image-classification onnx" --adr.python.name=cm --quiet - diff --git a/script/build-docker-image/examples/README.md b/script/build-docker-image/examples/README.md deleted file mode 100644 index 8035bc429..000000000 --- a/script/build-docker-image/examples/README.md +++ /dev/null @@ -1 +0,0 @@ -https://hub.docker.com/r/cknowledge/cm-base/tags diff --git a/script/build-docker-image/examples/computer_mouse.jpg b/script/build-docker-image/examples/computer_mouse.jpg deleted file mode 100644 index e7f8abb6fe93d18af393ea036b24b907cc48e786..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 41154 zcmce-XIxWVvp2kxkWfM|0YZy3LkT^U2pC$ZB3%dqL0V`cO(~Zcs&oNCy3#vH6%|mr zbOX{vR8&MjL_x%^&-S{n`<(l{&-wCvc{jhE_0Q~;*|TTO%$l`k^LzRC8o+5rG9>{J z2m~+z|A5~w#H)-Ve7ym{)KnH=0sw#o;DA8?M%Tb^oFHZf+w0)T4S@j=u>EtQA@IN3 zBoGVzgEK*l{ihEU#LD1r*}=9BJXJu<3%0kwlLz|We0(o6{LN>P-p zIsf7x0baT$oS|G_mN&iMy7ffy+JM}B1`B}mzSYZqGfFYN-LW?}!v;Df^d!P+1l zTJ^8~AQu4k9~k}*MuT+3f6y^iP(To*_5Zmqc>H;e2m}Cdfh`Mo{%<4lztI#xIXuDs zh2Z(8bpU1g)BZXB@cB2r3cSG|x{|7z=KuWs)APR%17#Hp3p0IXO@|Yhe_;b9W#bbd zD@IVp|JDx_9O*wY0~iRv`fm(=3jXpBUjVSt z(ge|e-|bH~(0?HR+!NFpSY8utY{qP0X_alNe_dh( zGr`2v9v1I+{t90AoR_vqm6H_913`^k&lRYbf)vYWY7o{of<+}F&T~uHGtR4b%=GG6 zD=CSl?pRM+y`#Fxq{3Vx1v`nD>R$YD`XYu6QOEd9sO0Q}+ZODIHBlZe~}g)_AAruSinUbMs_m^JnF1RBZH2 zkbGIT!yXp(PF8nJZAxAdyv+LTs>)AtwP$%9F;fvxKgl)NH!O72gjpqMg86EU^<2+> zybMGdoj`NqO~58~uqf zg5>5NTkUXGf}kCns@j*!GOETYpqwsTrQ9qNC^-q^DQgD7`~mr8@o2{NZRH)wyLtS_67W0(z~q4oR58kIDq1 z%6oCXaGF&j+e5w~maZW!rj_n`ldZ(ts|?6?DOTL=2?p6%L#(8U zIB$v#1~Hdl0E!2||1#rC=K>A9l}`?FYS{N0+chOwaP)cuufeZDcfj#e8Q|(og~{{C zdtEj`IYFmd720YXr8yG?$k2*+LSAp6FO4LJK>gNQhe8L5#mS7W!QGq+edWxzJCW>r z`7Ub$Ce@C_+v%FI?y${w$4oo>s#XGdA4$(07Wxw3VUqPFxn$v(WOK7@tRQ!lK9{T^ zgiD-$YsKLihABn=$XKpiFOZij8%0tRonup-C)!_Qy*a8fF6%Iq%kWfNPvuKy_8i@k zR$6Y--k;qYNRJ1Fj}5^r8?+CorNWpHqTrCacFK| z4e(l%j$+82-q|yalEvl4 zC@C5b&m_ujL=-;2GIh)VHyG!4MJxa;SWblU*b0q9jAHX=kxz zdZRD*VU}S(jc&8QYkqRi+@AUs zONE0M=%U%Ept!OQ$NM-(g0Xbg zEhJjR1V-xi$(M_we?+Fg+=9b1Xu{(~r+6D1(Rl7U;e)I|@ds2BFV>p1_Y6T3p@q_( z=N1r*+;0}{{JJXE^YxPN9M7UJF$tbnwlRIjk9ie$@+$E=&O!JjAY!<<7&(A^Jn8C z4$X7Ve0iul?D3H$)0e-e{pZGJ#ZTZH*;#kG?MLA7c+y~8r=A?C@bw8oN91-4H0n3N z?y05{(2&968z?uG@i64C!kMS80z-6ODytT<9>Bx~8a#bZvY^Y3N^ zlZ&LWve9q>jj@t$eTF)8R98)Pj?d-_ za?eR|iT{aXMmUu}jkUSOBj&0}2Ay&kl4@^yE=O*mJaOor1ed+9O$}bPtrcp0$}h}U zhuk7l!DcZP0KC_>&D0!hiB3Cx6E}a(@0-0{kjqU<;5X*8nyrZnd#B6n7 zhVH(y7(-_)3NVq8kPK#eR@Hv7`Y`{Q26s!tqUTdbKigH2S54WT`Z6k9ed=>M4}!n{ zCCPl(=&8hRvC&g6hrT(Swe-N8oV6$VufaZH6w56|&K2ye{!TPRH+)8w@O3mec}mXB z-r?}nhnspIP}gg>Y&A3ky4iAV{B^P4Ca(QJj{S9VOH-F^h!XsGbI`-J2&Z?>G1@Tx zP;V>dXN|wrd&Un70o&EuB!%qL-Rvu!rG25lR`0*wc$le5R^#*%u+hn4J8?WL%c>M>V}!7mk07TwgZREP1P0O}rD8_uQUzT;=4TM>FB{6&7Dl z-lumK&)0ugKW@smRI=$mXBM_tibtN{m5rsd+nT4kOsxpL=sneTN-?xgqW<{GCEvcY zp{avQn%b|}Tn`r1ZTuf)2_?ueF%mciIm@5qvu*6uqjC)QyV)$#s&?VIl30QRdROK1EePwnKbIbw!kr^}+Q zUcJdNNXzTf9K**iUKy2ti`83{vGyLU7?EY>`o4Blp{7xemonj_hNnMck7oHyy+-LcUoV^LC#S87STr zs(-5)llbf1()t;-<$I%$W`X(%h1F&(GK5yoe$s@cq>9CEm8gLodLPk4{O0;d@~1aB zkV_sh$B=y|*CZcmcpKrE24QNje8hD&@}$K>7!?6CGI7DkM?u;S2di(>imq@h`6%bdr?wyQU!!(O--i54ch zhfQ^=FoOy0x>;VX!X(4wOBF*QCMKQw`T5cLRo~DjZNGt1j%4)wL>t3di7rl+ETe%` z*WbX4ZJuVvgYoa%gy;ubd)X#-ju}?)jfuLF>yfTC$AwFD;zO?3>v$_P6r^SPr;RCQIUGwnme zb=SDl(gqun@#(hwN*Vn8dr}H~FB+CVdXF4G_;B#>S?JQL){mo80=@PDt_m$jx$GMj z8%CZkueM3A&%Bz;=PY@ZChP92^PvDaOJpCpxA*nkhwBlJQ%@RXab{f(d1?x~1NL^O zb0V1!x+`bctB1Z4r0f~_$q$n+O@A?to!sy#jFNg0Td;BE#+mwONnD-Q3;_#-8?d6!<@tS>>A--I3KtN9^T`ATb;=?6; zLfIM@(!{oK8Lu;ZaoC|s?%G4mq)gk`(3IMnN|I8nc-@Fv=?R`^k-tQH9CP>6+i$tV zU3g%}*CB5kwc}zvc1O%J#$JfyLgBqBrLbUX(P&X_fnBwA%!0abf#`4G*9qSdE-UxF zSl?5}BrL=JugKbwfSR+) zJVsX<>Q5AA(D)?>A1IgH@g&P$Y-r+TezvyBs_sUjJ<*4kxEd!0ywUlETl%3cuQ7>DK;gO%>IQ z<|voUmym12@q|?e3G6QeBVVhd=nNm9r)Ix_hhHZ`x=g)s^htN55-sAq61z9})YS&) zx789zS0gjA#PyH7!00+VW>ubTFSkPeDID>3ip;u(uE- z-rX`MbwMY$TPgba@J;w@!tBS>_BxgizFDmPDDWc)-;l4(^UD1&hG=wo7`mwT;T?aB zP2zS)>V3yYpHj7DE~F`UdcAyzL59-;Uo7`)IEtB0Wk+5OXqM+FG{p=S*lIs~gx6PL zy;ZOt48Ldo8z|h8^(l(-sByj=^6mL$p4yGt!&KuWt<{I5gJVRym#g+6$0r7}bYAEG z22^K+N)GNDgzN92JWe*c$BH4uddI$LFy9iwM|xB# z7ybG&_1JhXOlC{-H(*+zn!aXXEBmx{x9f8M@=xYcijw0GoOXG&&WclVYj2wyW<%1o<9!7kwDzZ7F&5a~ zYtpf=ckkM{z^(S317@Wz%s%_ccgFMfN<AoB zyJ_o-A>k9OWcwnfULm{K3lUw$1)s#bWcB6j4~xDn9PwNDi2u|O`V5e}uhrbLSF|8~ ztIzTr@38jbQnV}HwQANoVp~%iZ(-y!m#kosIv1=Srn1V+`{aj_n|HyxH!rkyk>f&Q znOZgSsA`j^;TI3ze<-Rx?ZX$$WW}@?!%IKuG_bb0rexQe%YTC4@2;RRCJ~?>@4@uo zHNz+;qwP1whmQO>KbpB0;F6GU7vR;mo)-Kx8h!J(|1I~LlcdyJ8EPqZ$IFW*Mv zFuUlj(Mgq;RZGn-cP3MNTtAGzcam5%{25e;XeuAFrAjyQ`8}h!b{DYfP@BoUpJStD z$$7o~edHcPzxhI&n+18~RylIj;!^zvsw?o$w8K$OUXe9MzSFX8lEBg2J?U^;%?Ify z_V;?^3I3m?*O6^6XCyyOeLc&(t7RX&w4c(k_KaWC>-3URt>4t~>JyijMIILToowz# zt}|ct8fc&e+h3@A+I!-yU3TPZkS6eh!-bqaqc)pk=A{f}m}l?_Y(w zuA8h||M1z5+6P;NK$~#lS0ADHVM=hg(sBdNtXZ`>J26?ca%~san04k-;&6v9wDI(l z&=dDLf4z))h&p#3nY{UJ{g#+$+_=LNjilY9NgH1k+^@cJMqVlNwG-}9+U2lTS#mRd zT{t4_En1ATKiAK9?ySIxvW8<1iy}hPf(zO7^lKWHRW6wCUO#nseL82)5~(`aY-T@V zc+5|DV8_@%0(DHxWkaud!$0YWBeaQ0$8gAbsxEBt4RoP*1l?UA-3+kNxk|a@=BRSsaA}xh6S*SCgl9&xIJKx!7p4$mdgxm5 zD-Ks2%zo_H$ZtS#qW;b)^+V&%-#}~WQMPkl@;xLC73eL+qMP{I?*(ZY7nnc9%Srnb zJvzMQ7v>nU@AKDrfUSCFkU8^%>?rdUt@BD2Uc8~5FaeVC1#zN=GC6~FLw;j}D6Pcm z?%1!KtslxC%KE0tX^Iv=IX$Qx8#=>CUz>zM8l3Gm<6SN5L~IO#u(%Ds_ytuvs9#|N zhg}?!*b3cR36Mak6B_t3JQd_Xz_ z{&CY`nduPW@TDcn9A|$U3rQRkT{~j2;uTioqF?&tn>e~Tt zYT%?z-f`t4*;~j$_XdtLcg50^MsT+%F{)F>EWy_VR<7awAkS)UF1my$XuN(~e?=<) zFyTS@{0LU4RrDjipy>&-k-6P)MeIh1MB-noed6n}KFqSDXl__O` z%7vinOP87i%vE1msI-;mSDx_~p;<>ZR1W>6H*7yDfv-DP5OS9HV|VPMS2~RFoY#?l zPZ;B>O@3kS&3^fqsSI;hkh9HQ*E(-*OyW*vk69o+EqJ)xFlym0gns7A0IwPUX*tnq_`M_xzea(34H>d#e|o$jm;Qc0{f z4}^}qMuz9&uw=TzG-6toY}Lu?ftWtB!dklKSj>bvHs$>mK{j$%^88w0Ne`G~@{ae2 zOs88cCK#yG73QYstn?$(Ya+dq$~5o3b>2{5dc$c2Qe2W+*$GqY>5f47@);}1Kb2bU zSAGL+I*W0fY>V}PJ;#r;@h==b{YV-QMgrlKqBSK^?>e?n{MTHuKhsSpeuCZAu9rE| zW(%Caw}B2=b= z7Mas!Tj=s$1@9R~jqKBT1ig>x@hn>JMtyU&Y{?D`37>2#e4LhR!%T3wb}WZ1QWKA{yCX*IgjKpSZ@^E2Jsn zCAe0&&ac=j7^(lk2qYnOpH@5l7*PG1=yaXA*yG3zs&q`dxYOZB==6~y=HjW;YgCkP z`V(T0TW(y-Q;J}Z>G?PhmQf{nZ`B}%>E3vO{^Vr zPGc!$$oh~JZm|GqdQvhSLlJW;M6ZRv!}Qka-a)_o_G4-Zs}+e}!yn`+LgMz3EAoDk zS#DEh8&&lYFx#n`+j^4|{4~5qE?&D|tA2y|Muc=d;^79w>6cQ#=Qzc{vdMlqowBrN z%q5?5apKA3pOnX+bA47=d&k2{R9q$K|?g5Q^QDbD*N%P3Xamv!|$!!%}; zs_x7BMDHrpT<@w58f=$v%8R>D_d;Jr`T)i z_qDgXxwrg2I3pVMd0ug1^6ZVbk@YjdOR?sEl=U-8d)^fHrnfxacB;OOqO9PGn9fRAHV1%`~S$>=%!p~IC zX{~UW=eUXbB^6}HK*#x9MTA?-xyBAhVuy)$oXeLe|_2l|^PNV(17$ zO#8CZ0~Vh&F-z)oglAclAMK_5e*9m>*#BR_pE5H57J3nI1_T1E05c+(SU6c&n3-94 z**RD_`FRBd_<2z%w1|`#T1Y||g~G^VB&3hZ$jS(c;goP#B`K^7_U~FXBO@aV6AKRu z3lA2JLSz4*kH2fx4B-0||DRg*-^Jwrs8&NE0E__+$_ZAKO~G3ApJFvwXh$$GK$#f- z6sx(8z?2!_de#VT_dkVd9$s;kHbx13n}JzA)iWWIaW6g?PaY%AIa_tcE+JVNJQ zf~-*gSJ4_|1=7*~6s`ZGY7OB!0v5vmXVv;^9;GX0$og({o_qp_;>E<*ZV;(?dne%3 zPo(SOak5lEKmHD{)!X?D3bV6NZCzt|4zM`fl(IH&b*WxVZw}AE@*Ls2oMHmA%y`X5 zS%|})Dz6LotcV+TF(#G49Hl5*t_l7ajuh;cONyP99DSZ31+W<$N&(osT|$TEA?&In z-g%p#;sh&pt!ffM?~-U6<1Xx}inmPd7}a}t*Yr<|qaP($ig`NZ4kWNwm~>M_0~xPS3u~I??iXI^cK0t|Xo|W# z=_y?QV_IsV6umWnT$$zQW`(xYSpQzIm>5zkhwvWXYpaFOtHtE&KzqXzB>y zm+`w{{tzl`Id9(1Y#Ac|{j6D781>lmI^mTIePKw~%cRnrdkK&Rq>xEEdmv|^vN&Hlq zm`jtOY)oPkOI@8%Uioon_3s#|^M>VN8m=^S2Y%VkyN8CR1|v(kp=Go zqvplM8dAZlYGx}lDXyRqSJ>PHpYn+RIk@^at_Cp+%5)g$2RRO0vlUj*=ETv@1U2I> z$V?7p|4sR0`NRg$xx|f0cMsUyY3N;4W`-x81|x}BqcIx`iH)mUj*{8N>h~C$u1p#> z5JUa38Qw{Y>f>?@kP7?EViK$v>84ukc$&zIN;0cpKj9Oi8{d@OTmacYwa;`>*&s9K z$I3dYeO48YEknw{@o-DPfnJ?iQR{ul3Zl;Ql?#Jh#@`-ydIBKYTFF&Sn~N&^+q8{u zjB8=RyNG1?U&-Iu$|i3^%I=nTn#sl=Na#mr)1q?vbSpfmw051W0$25wR5nNxTT{DE z3Ni9Y=rJ$c_|U0ro*92#cEkhNc?kyq$9>rlm@WD<27X`|fuk=QHZDUP@u1SV+0$o<-h6^( zFhjr5Z|GK_A~HHNnGR``64CiA0cz-s=jH;s9rTGJA(LE2_2fqcYc#BsJ82=ii6#(L zDm%Dz4(7Cr2#@BH-(e&mGek~bfVrjJ7M>cd7&<8l$w`{Uvit1^*z^}kL1wz~loN0Y z(~#6`DIu|j5!Fs=4fB`uzc(!WHcB#ku6=Nk3M>+r$`{Hy85Ni2GkoZqi~5USm^OZn zJ0vZ@TV9-gE8SAz_gUf;9b$O1p!d%5v_pTWclp^uxiZEzM30E*3t!0e_chk-McBzz z_OP)9$7e76)re^1j|awdw(xBrx+Nh@J#L~x|1*YD;TBgnG1T|Xx^e7X7|=-~NbGuE zjFsCYb8ma!-SI`V`)o^RoQUPdUy!X>JO`=1ySy+rX|zwnb}gGaK($qWwFArKs?CtC_=Eh|^(Cp=4!-bJF2;2XUsKwFV`bpnNqOzMuNy)G$tpU-AWiYdG3-{=Ch+Z&3C+<z+pt>}ip%lE&*^GmFe0K3 zXLY!J@*Lcnfn<+Hu3Dr+hji)!RDk2<@Y$<%VxFHJGTg)$q9OXm+J%S2kryw(S7^U^G25zRDg**;uq}wj#rF^KX+Y zI~(AEnJk!*U-)E7(La(BTenIYNw32_VT`a7ajY0S=>-kMoka4`IyL{ra{tU2* zN!JP$B6^&9;=GC@R;@8Wz(wh7KcdGvhYv-Qe0Y1M-GPM-ok(iH@Vd`p>qJi25v0NZ zbiynZYmE(~zzoYfDRAz-2ZmG(y5mfR_IF-Xyg{WEtcBU2QmJgoCOWm%uOA}MRf@%N zBHH3iMX>Uab}6Xqn9hc#8gc`))N831*e)5Nf#H5IK z@{&FvoKZ!h;VAKDgLL>VsxHj9Rd}UBP5mx+H()J0nk`G)0F%s)TLZyF7UUJ{b{*Pl zyM6S)Y-|=HPQ53G#&NDQGaVB`&2SbnWZGD86cLd-Ld6o;C_y1gFgK;%j55IM^1PZx z8^Ic&D$Z}x-JD@p9gLmZy7bNi?iYu5!O7%B*2!ZD*rmJ3)qo6&L`gUDiNwWdcx%}q z(t)t57&9QeGA{5~Dyt=o1SXHmFtfp7qVAHTivFhO%`$MNYP^`*dUk|6)qZsz60LH8 z^P$f(eDZGKqx7S9Q)ew87rYyJ1_TYkn2J9OsgAey-y{Rz6xULQmI7zEzdsOl;6(<8 z^hFY>31V2@mE&0!U?#!|Gi(YVNZ0`tYCE1S1A3s-O8`1`%W|Z$IJ34><#LoVdQ_=o zGrSD$BT4o3ITHn-2M~)UvyAHPX60RmRDje@K)TS6lG?JmuJMik zNdXxEI+|b)+V>9srfyXOh9hv0{>;^s&K{mM?o3dNpX*;=I4{w^a*MweQf58Rq1(^G z+8(ya>~WE%vm+wrQ3-2>m377TU581vLb4C|r*KLY`$I=AFu`~9lv;J48a2vA;`k65 zuglT(5S?igQ_537HZ|ML!-_jaztV~aJMWs*lxwzo;(?+6It0(&YE~vC6f%34W-&ZV z<8`;L17;IIOCH3)D_N4Q{1IG$JU+YKrjLfRmXs!OqK~#d?agWDWu+HM5Q$6H_^Z)0 za=-sw0Wx5Z$h;zt11BFl6*a*Brj2rID^iad%}ryKsNh7T7+|0#88$-8IKZDSzx5N^ z&lhLzuHsP#&9~|(^JY|!qo)(8Y@S|;j|iB@jla)1t~dG8vMe-WK$J9u z>)-fbe7hj@0MM4w6FHA`A&Qp!%iUgap(-AOBa%nwn247+({bS}-xJTPSeSt6_Sd2e z{R7|=PP%EH47FEbEvA`epR;R)X!P6pVWs=)o&xdKVk6}~^7Ngu4Vwla~$*fBj8 zKqQMR9f2EeGi;l26_vs4{CEtbX>ttvnHf%ilqWTik`c95S**&()xdGItO+d}N)xoC?;!Y(|A=eW&m3nE| zY32%30ESCg1m~SMl4d&^v`z?BhtUwTv8V*5#^8d|L4T1nxCqGO9GA(kQfPNa9mrl{ z*&=Tnpt6a8LuN&OQ18%;Je%2&Qh?oj3^E zi?kdbFi}VXX0arH0H)GuhRr>5Amu_se-$iuVC_eq0cUR)8jh<`#LQz}`i3@S8Ix?xF-_ORClK7s&=PzgWGrYnRGOOvMoPdnp$yNJcF6<6 zJnshM<)_Xo#@>SW@ES&esSYNUi6#dG!TY?N@%X#o9|tC5epTNY;6QV}hX`o_%p9T#3%NxVp8bJkKjf1X*sOluaqIY5V);2fVViqJT2gA-6S zF{dAp59G=Uji6lcF^z^62(Pyf;hNc+62H^Lh85k_%vjjm=~8`<=S-E%&&9exzh|0~ z$3z|0P;sM&NZ4*YOI-@3CR_GU+2&zA6+7Euv~6`Hf4<(cli^oV6Dq?9(ZgR(9YCZ{ zQ0h`LE4F=F^)}D+Wf*a$8~!vwVhXVpDl92?X6{}{(gW<|)7l$zGE9)VJ41o@n zc21O;5RypjitRL{$%QcQT9%>P)#Si6llQURUZ( ze6bU|WfS+dRTQLraoAIMWgAVepwY>4$+C zw1q8Tst+nC!LOAW&Bv8%hDjlkl9z(HJ_8Ga*OL3CH*KvD?cQmI;6q+^VK#!clX}>8 z%rFdo4|dG}e7c;pW#00_Q%YsAV=F$s>y!n2I6lo0N%$JVD6FLQ842czEn%oIcPc>l z8>O4DsT{ZNiTi z*Q3G$G;Zj1qA?R4G)tK`?kbS+VK(xyS9M2OSk~xdLf0yelKe)}a;QwrioBY^wks%~ zeR?xK8sSW1l13#6mUmK_~|p~($tnyzb9>-P8O#9 z!l2?E$KFCP96-!vQ@LeU_5k?+6=JAl(l0$Qgf6+awrX`b$2k&AxW93R`VGj7V7UQ! zN)8c`r4;5z7Z83<=@ESqQ+9D9-LC0=eU*h!&7gC@iCNO2a$VO)s9>Hf{#2VG>C-k@ z3Sx35mXOs{8p5C|qXKRZLYQm;3pafTo4z|Nj6*Mnrs$!P#eDxxnjthC&qNcgmZ^ie zWH^uq8Vb0J(HU~GgRQ3BJMcgm_$0lQsoyMQb8{)W!hsdOe|0jb4cs-7a-~Il%urGb zFYf6FWGFbwmZKD?RO?0SF)2H}ndBM55AV#YzSA08R|n@eaL*rab#H%QY>i$%onu@- z|ME-9r}R@59SNVppM7ipLMifJ9j|Q_)zIO6R&yz`9(*+>_Mv@XW8M7?599l12M;^w zKkxOr;(hkUJ*G61cV8)nsYQ1iQa@R5-2XnU-dB3vP%AUx?OxS)ZFuBKEY5RwK0U9d z=m4KUq+cJdz-E`jcOcOW7v+yYz>Kc7wak~V5Cd~$B!TO%tCE)`QGk1NrZXd$wgiM` zFEl|NVK=r>N-EHO{!c>704laa$PsA*e1qB>_{H_F(;c4y-)NHjtp$$oBz^xnC&2wo zJ|*^ZrFKzXetlbd-q|w=hJ|-o(yiG@vQVLN69(UcDyXVTr+!Kt?>xg1ll~-`yX#CI z*0kY@S8buc3wI{8SN zQ5mLl#kXeM$QpLl$&QtsIkilKuqZ*QlO&2=7cS0DD0s%I=M zinMnnisbzDb$y=hMA%-tcQ0J}ZQGNRm#v@6Z@$!fX-rafcK)Q8yZb8Zc2>kLCYM8M znEMJv+sQykxEeN27RV~e9(L0tFV;NW=QS@*s~A4F;{qgoqlxDcE~v2cPGZc_Jl^Xc~!+$<=Bfm zwOoU*TrS?a@zMCB;x6M>q3lEc_*b0~&3K$ly<&rQaUU(iGlMw`po_jSE`cNpo@eXS z*cQYfrP6M@_9JepfoX6+Z0wB5757#pq}DiPvwskFpw$&$2@~(9rrR&TU;hS($T8O; zf4br_d(2w)LRDxE=wf>e`Jt6N2ypd*N;=#@(I9!J>bA4UBiW~K)V#Q4P`Qn0x4XF6 z>4*C(Lp(Z9V2stDgqO6=8E9-fH8xhkwPyG>mjNsj^rJD!{ppF`+q48Z^?rl19+)G2 zW@AfM%AzBm452D~MYJS2CGHF(JOtb6Cu<}qKYLw#qI)@K5SCq6{b#jl9hZS_mBlNG zlT1ueG&OvJ{v;E7RQf0FAg;rVt&w@cy;mFBlLqEw0{3FN7w+9o1dCP4`a$)E>C*5t z3&HQ~wFePfSlLw3%p2F#zhw-^$0wh*+&w~-_|s0qE7;jF!P=KO)$AkolhN8Kh8wRo z^Er%begbc`5;w` z({byf7thVR4d05{JS^!cJ$>zpz_sg*kDtHDZ+mHp-RtVC)a;U?ay%ya`8|?5E;0~` zJL@7P;{7SY|HtzYj?hoSihQ-hzN1kgHffK`j`j6FxVxH;)rmXgK~8?cO3Mf!oCcL!HY^g3jGPS8jOGZ2XZtd&m>a)}1*g2aW>odxw2#4qkT}+NrL$jIJl1yi>s4 z9@vdLkLNYp@!PSbFa?EL7wRcTfkkOH@eh)5>`fcx_TWm@`;k%^H??3TbjO2$Y@W;OR5_#-;33u_c*Gt_li9`OS+0>Xux?cZGZH4ljL*|vGNNL! zrPZ9bV+SfruAMwmSxk)BCsN-_G8@J}_+tdG&5o5+b;CwV{uqSWQ9&XHd$4xo@W)!7 zgqMton%{(dcqZpF-!Ko$4b7*15mv)@-8p-6<=kf+YB$PlLG*0ZNm0L8vFg{oL+0sp z+edky544|d{t%Dq6}|sV)BOIc%cc^av>OY3_wLG`yB0OFTR=YKkmr5^>seO$gk^|H z^gqT)>no6ITWN;REwkG#lL_7N#aIqtH;m#h3F#GnUOwAW77dnQJSgtOYtkq~LD+Ht zCX~7{Gq?Pbj&LozNcvT)gp}wnaD);Gkl4>d+5(NoiVR(=Jsl%(5LJ@0lVN3;EdBt> zd4ea*S`JaMOJTnL8pHV{uK?=R+S+Q?O+(@-O^KW%cnB*vs_9@#k?bdv#hoyl-{x!r zJDPx+dYT=}1}^d$L#oL5^H8h0PQNZEqJF0=xDzPh$2?(Kk#}n*_2`TH)ZXtqYWIgU zO=I@`K2_aM(zz%8A;xWV_0z|gGq0+^H`Ue7DuMq)xBYNp`)e(BYind8)K59p;farn zY0zfO)x%2@&$&h)7QQw9Om>SmTl~2FOKg7VWxC|}PrwfCdecrT}dIQaauaoLd+R~ygZX4p z;ib@|3hg_?udq)a^SzkXy`21}zW=SRTvGUtz0KJ-GeMm-$=b4h zgO)kmTf5SJ5?Qa*I|%?B{+k-NzQADbmzNVwg%GpqUaF4;as8vpS)5JtCkjbl3Lj@| z(^3SgqGPd7k@FeglDT1kC@JHJ$hUPcC*7oRx03Fb2Y}lV<=pLDZ2I=z6jTw8aK{UfV`PbL6nRI$4WE+|^=on^GX|q(j(3jzzUtGm(on6h)=PXa$OXfKB zGBc=lToZI9Y4CZYcQo{%&&XYcK748TRG`v@26R7PuXuitk~}}c{{M_H{cu0b;?Du zr_k&f*^v5HbG|-txNdzG4ekShSu6UO8jALkP9FGEN$}no^&R=x@gK>)eV}t({fYwrSsKbdfE@p&HT?uiO0duBrIKBdCZnRp1j$=ckq{9 zOZE46STo%M9g&ymUbVYgm$=HO%EA8=d}|+6^ZAQ#HOH3>)3CVxA5LG{#oSI^p0}kJ zCoE2j-F$tQ{wmUcG2zEb!=-y^Pc@bnRBE(cs4^p?elI^0_FZ*Pb8y~viF7TwKY8^h zo8}{JS2-hp{agM%kMqR39~G6@>ifLWJ~nl|%8jGtH*k^Fr~aGdJGlPQlEUoRnkKft zl6v4}FrtqI{4;j_5E&tav`f}%6%i2Ku}6G|->Z1CEKh4SUp{%RH4gkgIn@$yw^EV) zF56G`pgKs)fQNe|pbdF$J^me@aP!{oG%4o)kaU$%QNB-m=>};KWRdPgdTCHtx*I74 zmTr{pkZurI>6C8i?vxS`=?3X8-{<##Kd{epIQs$4ea*}@GuO<0?fCX1!^o+5YkCeN zS#&fiP`EKJ?5>QFp-8_a=^`d+2*8Roea-$OA%K&j!K$Q$)r}`t@SPQw(eN27s3C~r zo8;I6+kuVbOMdJaNwO&EGJcR%gIq&m;VeA^8&7Pn+;>*8kOVD9Q-U;_@G*ZYKe~tc zy6t7^e<0_ge;{whM~%NnaX)V#`V>>0;)2&L{(&gJ*Zw+%bvBz|^CaAn!Ea5{bTy7)`8a`JpF-Y|Nr%}in44J;^N z>9L9pcz3g%kR+W9IXPLS zi3bs79n)oSi~C2bYQUA59soWo>YddAfW9QDL_m9Ch0lqWUnXf4oE*;(TPtaWuOXQz z!snXEvdQ3@cqszF@r$S@ebRqEE)iug5z0T)ye048=iiU`^$1C8C_0RoR{=b*!Xd51 zn@tD%n^@*!1ETg<%(qL=to~Dvo%!sIf3Gb6B-Hbpp5MXIxJ062lZ~V-U5?@;zB&nu z`#Lr+8K`idH~GO8o^!n3PHa88t_khghUIeA|!t;##|G@ISHs=)VC-|LxC6{aJq63dKn+pd1UqCU_Yxt=T3KCf%( z=V05?3q7x7JU^B=+FD&WX!9a9Y4X;y87|}(`jb*IMR+VPGOVXS4-h5VwD^&hNTYxo z(6EE6_bU_4S*aRGx`6V= z&VCHt{Fb7hVu1VI8!!EM)P74)a``N(3_qzkrVWuErx%EbO`1)HkgmXBC=rtti|(fe zGJ{t(v1YXCob!A=5?K){me9p{bqK4J}fnw`pO-gN@!##P`(e9pO$c5bAWMy zm0fALz-PiW6%_o>@4+UY@4dh3ww+Eg#c`~e_4E{1Fz?J~04HrxD`C+Kgi5|d-Pz4i z57oLLMA`;CTPjT$=%;^~q7j5>m-X*;9O_7rs$Sm5ls8DJ9yMnZFyFaD2(7{G@?NsmYs5UB^S zy3XK|C>lYZ{1YRz@1XZBOw-UnLD;f-;yZz`>fq0D{HF?6^RHx`&Z-!ijJZ!gvGu_ zuV{2fHjArVHX710)HQ5cnGF$bklfO~vkLf;*=^PvUmmXs$d|dzAI&xgB8V08OYxZX zW8G)UZkUR?qOrpEoKmSt+8LPBz{cP-MzfUuLc^$0JZ^(is1n)Uo++=Y{b;iL0*}SI z9ooIABkpx{MEKX|jmWx3h4^%I+Uzy4QJl%T+)6gZgQtJ#QSmJ4rZNqfvhcb^^)6L? zclfiAylxJx{&W?5{V0*>c$47)?{HX>z{sz-!H45%KfnK@xbW13SJLpBENfdAb|A~@ z7{C*KjioQFCS~fcKEH1BCGQ%ABu$MQ$4Gy|Xpo8gK+?GTgRgWKE)jT$Xc^z}wVewL zN{g~U=ZSI&T2+e)ZCxPfBpFV!v?q5ZD$A7|@GG)>0!0s4vJ5bHb(uSUh{sj^aAe9K zUmuOSA52n+x{Cajj%uOaX6QvS8#n5ZY!1-aHKCg#0^&W3Ri3(a%4;?B$VQelryx9E zdvUO|*V2sIOi;7@<=a_=+dAun0(WHN$}pJitSVxAoZq77G|_pjepUGj?Ek?qOjLs6 zHq2)?szi{6Sa}{Ha$k3zQPVz$4SjxMvnUcoY7C zBAq3CHgbK)qC0U7{(%H+i)#;jZBtJDJbjCb1|x>=K4pZZ#>QrZ6BHyy>%Q|#eQ86e z9y`Qqqs{TMT9^?PJz~~P0i|mvA?*+1a&7`oMf{K*VC?#X#xLe)KI6SBNH3;}g2j!v zcBx98^xB9?+Ec|y((+$t>Cam0>7zkB1dQL zhTNMkAxiv~1&}V(V$()*s3MtSnc~{vBJbfk(k9YHji7+l+5?FExod`Y{z^y&(hBDe z5p^L|bPaL@0)6cuX2Z}mf=;?Rt}ua^R^B%OL$kKr#JQzuxdGn{1qn- zj>|MuKfaDUSj`h}UY}4b>~hNR0b^Hi94MDh7#WI!F>gIxvdD)HHDl%`8uRv_Q}#NF zPn!KPfB5n(Jt{GZQ%MWIRCsgy&C1O(n9EH;SXV<0C6~nw(3j4u5(Jxk-sKH_K8c(u z+-KRnblmd%Ykms-?&YPnVVapu!2dn*sL6!XDvCyFQ^N|6Mx`a{d;(8M*}(R@5TE*R zPQAMoCy5J?jv1i5qb>;g`7@^;=vVW(Tj9NMu_A)$pkuHC6uJUg5A(8vX~%h6H2=#+ z1?RiA4Ed|f-w%%jUq)0lT(a^VG(Pe}>;=Sr>O%E8h6`|;@{5DQ>o#iwkLo@5wUflOnQKmGM{8wBw*e_W% zw5#vnIJw=YHB;-;v5}7*9lQFYN53@_AB1~-#l{SToO$C2$e9`Hv;JO+8*Ws(bD8z| z{tj;{%GA60{rnG<`?n3gA)>kFq0f{A5YUs-+Z0G$RMo_dZ+v7+t$O5iRQQ_VR+E_bw-b_y_6k1NSFOb(5rUFpv z1$4u}Sjp6tQ0nc2fI-^!A(UgXfo{&T0ErxyS&K#`d#YS)N2k)}_Az)Jio=L<)Q@*> zO8QBL26C9TIgJwBfzq#w+UpjQp*;1HDzx$dWtm)a+Iql%_K@Vh(=C*ekW{k`Ta_Xa zagsDF1gQ^3Zt!`C(|A3q!>3GVnnSl2E3oLeFZ4Y_K46UAX0aYSdLhXy+< z**N64e44-KZaz%I(l!Z54=y}M--fqyp+g7{C3K(S1`llv@rw#~OE)L1nSK{o=(yb2 zuQSeN!%lU>Rv|$fw?ih}b9zz#K;2RM$M=t|S>%C_e3#FY=@*!O_xkIX1)X%ypAUc6 zT+wKDY1`OvByhTI`(RH8A%1PZYjbtDZ7@kJ!+x??OESJim*)HvfTUJ75WA1RNP3 z@JYzra_Rv?`k%?T{`cTSa|WoJ{&?Vs-2aT=QM2n zAIQeXd%c+{4GTkaqk;E%VfV^*i}1XI+1N*yw5;m;@2mpE)~e1Q#EGjt*s1z=W%MSV zGE_y}gOzFKz&AVI*^62gGLwcqJ#${X9jB`0qgti;$HOt};5^3bGZ&32t0P!T5n~Zv zO3A!aQPK2k_K_gEO=Tk+r~Ai<5xjZxHmyHle%-8a^oms3kEz9f^-_WW3LV~Vys;b0 z&XaGJZ~X1ZS;tiRy_uJ5^or?uTQ_0UL;h^}QygCLT?3=;qC0$gyBO~ki`70`7((kq zJAuqvmf^K1-(A@EM@?fRh(p!A&!lf@s@}NA(wDQMIEcDW+)FD0a^2DmW%Zo{c2OfW zSPXwsbi#__FJ{V|Dx+N=2=C0Cp=U~V>w4kNjM5{7C)g>v<;btB_YwB{R9K^}P^sEm zR=WBUSpb%#Xu1O8z}2`%WkZSxjVB{J75rgR4HGZ$jtIR+b4l?IjrqQSOOFT`=t4F1 z3pO+L{ReV$^o@@EOC?)873Y#r+4a$}`Fj&PGSNBXfdUF8MalO*$XnEplysNKq#P9e zRLkRJk(mks$sY=Akkze9sY>2OlD=$7Wyycd7|bXw7o=X&np)$SgB~Zd;Yj3!h>{;r zwmDPw!y|dFsufd>dbBU>Vm(Ea4(diO8D#eo98ab=BXo+1E5wZK1|I~j9*tnd4(2Q{HLX6VZsn)=8 zwCrUTqE+5PB7M6;?#zcNju=f*L*UnWN7>pbQ?0)c`dE@?qMYpX2#;FoAORdY*^y)vxF0gSk=>T6Age;%IDf&)o@otjgQ;cXKz#^|x(k#PiCq z&n#;_TB2M>XJK*Q>jFff4tN{1t7Q7@mzT^|M)UP_s+uVtl^`xZA*S! zIKSUy`j6EelN|65eJP9JA4QgG`s2rLZmy=}@o#vzF-@dv^-;~dNurUvIm3RAw?Bkp z(Y|S~%At@ObWHWTIWHvVlDeCy7J&7ZinXd#1I zsIY=Jg?A<}@OLWrMSniT^i4?_qs)tkh!0K-!s#u=X3kv=+Ui5XLmk%=*gd~FwKIorCMPd8)`;6sJnO2(MUzsZqIVC zX0~&h1fLKd^TQvRw(Rr4Uzei(1TIE?-h)r8((Ad{UQ2UdAjkLgM~M8Cx8Xp&P#hgc z6{f6lc>H9@W^nGQ$`zNU2!knG@9h^-46r3C=1FMw?p3_vr^q?=;dA4gme=2!G6z(G z+)`Gtd6mix!t^meI=4SxRvoBW&1zHhx5%5A>jaP}ydunF;Za)x25E3$vR^O+;&8so zI>lxa=TgnDZ(k>vjI~rn)_WemPHd&k$c9&7It%k$?A!D!Knkf}c6omEdau(7$=-7I zx+j!M9!9cW6CI9E~vW3(-4U>9gqUo%)9T+l=tYpp~DpTU=&e z(cx)aD@z!hdm2ZZEwGVV`Q0{X+CS1{?17HJc1=ZQrxHH!_O$^ z{#!vlCQMa@jXUzZr1Hx;PtAJB*PskDSszO)TfFGl|J;raARzQ1KO6`LP2y}V>cu8wUAaE?U5In}s8W@28tJ<` zI==JB$Vi+;6qhoyj&e5H2i6HxQWwhMC4{!N&{q5w?a&^aSF)ci7+mioqMvi4TBcj-eh^ zFo5TzWT9_(;><8%ubK;ps;Ud0$1-Xf&cI}F7N1bqZvJ!ne^?fI93n{tIxk5T7+8Ea zLiOqcazw&HW%fqZd>E__SoGxy6LR<(P`SSVqPN-qfCd_mOdMbK z7oDdK{Jh==hdfn?Qvs9Tm*Vq;X~fra`Rqk0eh~N2%hzCr*|<~(F=6p_>y=)Ffemj| zRM|*Q!*s9JulMvi|3IZ?XlMv$W+DqoO6d_d zxJ&E2(GwR!W5LmrWQrS}yuUHxxJ7;6hD_4I@gzk(x7JH)_B@HYd)3iAGQO#TwazWS zPlDAoaA-uYyZ3)QI&(hI?7$0mB%bL^KoXuK+dJ!StM}?V?Qtf<*^e{;VN?(cW|az3 zvLgm@%eh>k289GOR)3{H%XE22fBX`3jnhkv+b+2+*8TRYzGUhP#vj{_pmQX@0P7E7 zJwGv451ShW`aM1L1o&sV1^7vD61>_pqCZj|q4Tg=Vip;i&drvtNdCc*-P8V- zAWODm49L;(ju_qvl}WydCLFngQFqPV2O>96kgxat!^w|1jj0}~5MMG(7WBHL{>1y1 z1VuKN0zmn@92{9vqJZdRV45s6I>r=92Po>i$RUdj1f;+;kXOBx^fH5En|!mm{B7oq z@UPd<;TI0%CMS>JB?UH7+01j>xAke4-c@g-H(@aqP?u*;=W6AB{;dtV)U$nv`@R0r zZ95BusWK|Mp8w-cj7a6-!)GmR4^E(kJ9lZzvP&DQ0UbJIaE+%fa7kE#u{scLIvXtc zh=&z*w9WF>Z)~%QB^s$_pW|V9^!9lzAI8l1uP?KoJ1x2B_4Yn@g5Orf3OzY^j$it` z87WwD(b%}6ul)y7?Y+vsMqr@ikKE$X;DkoTrY36)>MqBK;(HG_e}>LP3q6w&J85GL zo=&jW+7=fL?)YDMj?kCxJUH+V-WeD8*F1PqZ($T|`^2TWe64DBYH|8z>B!?x<~bug zap)QyBs8`z7JDHy#2(TpH1=04mWOtoH>~($TP*K^`lhn^dg<~xlXE&oOzD2}0NfM7 zM7n81BT%I|GCLw_q*Na2aV`c1)=q7}kG;p0vVy<(`Oz8IvVuFdah?ejpGq#W2*^_h zM|<{*7ir$d0}4AHZm1mYKG&dNHPSU|ldu?2*uN5#CVe4`4n>y4qR$joWEPx4qE%b! zqUj=&uKj$BvHCqtdL!D>&VIa>$4IF(Fje`I$LJe9H?wkNqX1!~0N!kYVZis20$*TU zgP-(8DU!ObxecdzUIfSH2b~ww1Cn!AM~NY#GX-fb(n3u&7bZ*LQCT7I;XA)VTY0}{ zXa5w5b>s5jndfOA3oeTaal=t}_Tb|Xn>SDiUPP9ELB%WfuXQF?P}juyYRmmeS)z0IRg zTa38TADmp^w~FEGm-AU+8EPRw{W3UsN=;WEx7BHhT?O+*RuMT5PbQyk3SeRC>Oi;t zMZcgL9%ba09xN7ffL?#32dU{T(MzHSzXoJqJp)sE)b4Zz8Me4*o|a92__H}Ts2&>g zFPzT3W&VMf4~P2=gu*n9KSCr@u9n+~I-E^tqnkaSMC%ci~!qXBOd&CUQShhKP@z(P~OS_!E^ zXJJt=upRM?*TCzgx+^04U0nJtJZ%1~De|iM7Z`)au&0wNHuE!3Au;;M1HRe33I@;2rgjb9U(xoRStQx6WIp9h}^?%^UKBe<7sA z9z8)MB9ADN8h~RRPfQDq<%j$nfcq-P64RdTHuw0y9ZKIEV;`6kHPZ1kXpbY-0^Z%p z9@xcb5--jSoDa6*90x~7d&eIMj7DBxw~flUsdR&-*69t@VV26I23F-c+@o~y6x3T} zPzNk-6qIFqq$f%I@}bGS54h*Gl-^M}7S+@%e*(^-yqOC*9bL9Lixc{dk_I@0FHuf* zWZB$K?Wr6F_TW=rudvHXsRToU^t;G9W48(9JCx;pdsNRJu_dlYkLf;ah^v$3x437s;4{bI@|82>=l*-=l0x4AC( z`Zo?_wvUPtQ3mcEh)*uhtCw6w68xJhwpAO(-nr|ss_Zic*=NLtP|jzG3k{V3&)6BEq2R&U z<#ma-Lm8sJk~95tgCBe6;3?ldyqddc>M|yBDQ-Twoff5Ke^u}wChp#Gy|Ars)RTC- z_6K_^t4u$s3tSRek}sznFzjHBbc7LDEf=}d%{kC>AA|yJpNKKt8A4eO^-h{aT|vHP zN9h4cXvHe+bA%?NW4!fN6XroC3+d^4xK~MUlzn#?{9!TPH_8tCJbKtuFo()zF|8&| zjo&ilOM4_?O9UZFUID`wy?}gW38gm-@7%r+C~GwO3|_Zxpw_RC2%$*3|lYcj1ydq=A`23aX%zXjpe*jJ1W6 zaBeS##dz~gVfI(reP`OD{RNu$Gr8=W=#Q8s1FS6l91M-HruF)^nxjc@=uBfPO%Geb zi@a}mgjh)GN?K@w%HuL96#bYX3$kqVZZQrSHn?7n(tl1o*u1nu@wP)T^Rg9V$s0^uLsjJKzt&Syv%73$T21tf;`Vx#X zZ~#tu12{)Zj8|`?0l>+zBVu`khqfW#>6+!KEk7lUSirCl=AW|$<3wLLWs3T$?D{HC zQfY^JWK8-EhW062_h|YOZrk$ybTwvF@Q0J%e(Ws6zPgW@RPU(BmfE70_;ZWLqN6d{ ze0wa_Ee?r|vb}K{6%Q{2L~b%{?LqytZfvN5udCbQU2O%(+h0*F?=mj4@FH5i(WuxV zcby7g*_8(kFQE))-cddT_;#ID1ESOvx8)o;PBVN-Z*zRRXeEdX>CGcy=JG(8T-is8 zyvyYf&d(^k^pxCetT-|_!+H%+D zavI6?U^UB6%Uc9KWG})P-a86pbN>oT4afRg)HLhuQ~L~Cyfkb2eUt6+4i)+7Tvh_uN$w9efDO*^U?57&I>YAL#QKy|_ebMZ-pwrm}yj zyK%|QdkA^Y*`x5UHTT?#g8Tj$A{gbB@Yp6W@X1vSyeY43asm`_lfL{0D#xB2tD$vt zNRRjemD}VkZ-gr4M_&u60#O^QmVm}rl~~6xOE!P`RA}64-@18xR!z5R2ca=F?S8i% zy65NKRLYl^X}|vJ$GE5S%V|W_@0!1DoTD8VeZC!ek1zbg=fbk{PEWYrGh+dZ_(eNO zvP+L`*Ysp$S?T!`a};U1KcoR8x7?$4SbNdTjj78u~bW%Rw&d1+x8X32BK%T^;l+vQLs%VWwgYF5J;pg23L zNQ69yX={g4y%8B3zkDe!vSYH|LAB|gbEw{XwEnzr=GSy8_MB0k{pP9G731260EPT+ z>#C@ojQrqP)}}*Wuol|n$f4;ULA`Y@X>|2ETLVe;*Yjwz!Hzg%SYv_ck|^DSwefr> z+uh79I8J=-fn__}^>lT`vP?57JM&p@-PfldcFdP>bZd3@!9!HkK7X-WBky%?&g&c^ zHaa$JnRjfMh0F(Pta4g}R>SVB>gpx6BU*_>Fnc_qPQfe%EJo%2;kJsz!;YtbGpl4z z1PqUpQ+YP;yl{2ROe77#vy1|f@K?g>mP_Q{TlsJ&DQK(QbIdE0I zQ3G&y=?h|AKyuqmZmGMhqJa}I)>q~eJf^OxgREQo^wA5{{2vNRw;ns+oIck7NE>ib z{@it@yqv7NHVYKS{Y&pluY{B$9}7eQjoY}@ZB(!qfkmISA1Bn`G2-}As1h7KV}M`+ zlt=gdi!%{ZGxH(2WoSZed~?~Uo1*N6#ow>)R643qD%!0(yPh^M_L<6DLnfti2V6Vw z?u7RC&R*L70};LX^mMQ~b#Vzz+>khBT3S8I$o^^JqYg_Fo6w-&sAo9*OJea@_Xyb< zTe8KI_yC&VZM_t3g-$vFh!rokZmA1dptkf_s$#a6XM{&U2Q*6RS8cJ^%Qz=TE-D58 zHu%K*w}y$Wx-Pjs^IKUzMZZ!pyr)0^+muQse2Nggtz>#BY|G58!4*v-;9o*yxHI=bJ+JR;Soz#uM#nln{V=EAoFKK(%%`P; zc3%SZzVnmXSp+l7{(&rKiD%pJkMjBlVY{dK@GrOa;+oFWNmZ`@#qT;W+4;0jsGF&`U|9*x?FaFX=CH>f>8AX?m7b#_~ZuY^< zlnkAT>;(lCb*y6v8lnJ7y@gE#hvIuv7j*3kxFk#~ZPP->5tb84LW-4=!~nmuoZvCn zWvvTYoVQySARJU@{uGDtW&N_4ac4&PGR4kw=99+-%CxHktmlGN`n^^|A*axt8KfcU zymDlPKo^3msM7Iw%%dHmDwco6181$CxsX^F{jK`aXH}mcHJ@hS1hpgz!i=Ojb>@ z?;7VFT0(L(K|6d)qvDG-MH%XFonfHPjw3oZMM)TPE^Sq=!mAX9R6i+|{$sVgfp*{u z8+_40ZL3q^r7A0FjXV0gD($AU)N_C7+a}$kHvX}KZxXEsy3acPqBbYdgzvXopl`-- zgLo-^BVEQVOkx)fT;Q`-onNMA_#gZK10@|A80PQ(1J&Z86|WyNYjYep)95_Wll}u; z!>4}uPlUqirnO1jBPOU`5x$5oXOYW&4czZVnu11x8X1&shpLvB6OzTF)uTl?4JqB) zw`~`;uAjdZeb<-n=D8#)W5ZgIoPXzReJ|lUFrRp#IJ$Fgcz)MDq2SK`4pHS?MCaVu zm#KxpSsDE&G(r7{j2#E?&5~w>K}t%vw8Mxk)?pdL_UXYgpe`6|9}9zWqsD4}OoGbc zyTVytF*gsGx+V#Ye`A>7HFeDtQaa=fS=plAn&5@Y|L5$A&Ydzvly5{mt5LJ?HGBt| zlahl%nxM+DFxG0Qa+Y{m_iy#Ax(sLMbs_76Nlv!NM=ADkoAwf zPNy?Lg`}{=dC9~D{$(Q(v}BAQ?|Oz%x+qqi!~%fED-JCIv)}<#12tXB)RxYdg@=|0 zT9-eHf=$3KggVG!gjgA)?dW5tMfyr@a=US9#MaZJJzkd`xp%;qb3=_i~N7YfZhyygUhvUXQ}+_SMSApbz2b!!G; z+9N9WC~K@pfzi?x^9tg-54Ms-asdgMF9LKIn#He49m3El~T#_(}Jm9QvhSxTkNEjwG zy}vQs2+1E~KzWnD?)(>lGlj+9rGJiJZ{lYaDDviW)wHn*2*@t z*ph$KLA34fzMfiH4h$=%?E(49W6fFHhq;WW`OMBwzT^tngz{)W+DXq&e}x%M^ug3j z54R!#m{T&}rO)>;^;JMY{q}k|G~Olknv5&Q3b~gvq?Zo4t8aCT1BsTe%xoB0H3!*y zE6$8NKu-)t%Z-Jt-1V>+u@aqLz@VyQYiVjxV~fhaK=SM011O0_T@C9E?Q14vLv_by zLdh#}&Y1@j{0kO$AJKoHJN?HSvB`aW>*DK20>VO*Y!&sp{JUOlk)ql!Gtn)L=z@Fb zggA3ACUapc{_whsTZuF;)~aWyzw4C`{iD?*?5J1U#zR-0*-u%2@X~+wsS5i*E`?^! zj=@zG<=rb{A~@eb5Rq?Rz}F-s&XFK3p1#Gb!Wgikx zWxL8bk1cyWFvr*MQbMbPmgI;&zy*2$3gt}WBOI}dR1mXnK zUuw%PVmRfSafEpnv2QHzN5FF9T{G_xOcQl*tZL{m!W=kfRIo23>KPz^<4O+Xr+-TIl5liAJ_;OB2i9MvI z2hQ0JA!3n(m{s$YfSy2or({}W=z<66G8hNwtVjVSDqVhgX69(2DVrPQ)o6&tpFvt6 zzt)5!efud|N`J{wAFQ1A00wUn_5<=GjFp>(Nf*D+g=qn$cZSFh>1K&b3&Vkif|@U| zLp=zj19kgMWt1>#*~~Z}yE4+F>h4yvt$cqWoLdOM@CusC=C0tLDWsD~ z9QN>fBN-Y_*(<23sW=YSC4b5kuPWro)ABA6i4>+05B@>l9?!?%`c?{$Cy=K&UYN-c zEd?}c%9+foXE*<%d|r_;o}JsSQQMH)i>QY)V@MH2K#|_e4tK$@I}^oJo?AXu2`|79 zHF9x632@d0r}}`=Y5~W(rU9pHIY0tTH&$tr>3>hVO~H6nz<3V@oHA**1rMB)gq0Bh zrvRW*1g(n(Fd}L%>xO6&GZEa(?K(}IPqnD0{}bZnPkI5Vnf75y?xapR;Q#qag8EP6 zM4L+e)m3q6DdfMyRG+B&u8iuseO(6g14>7F{xSzVGXFZ94(S_$P}+Q;eb zB2z-I1tke1X1g?XOJl0Z{$zfjnnJUXV!AiIw^_e0cr~58N7(?ebUS z5ppGdShPPQky+2QUPj5RETZftk#h&5q!CGTYI|=~VJ|fP%dc$rL;mG^hDs7obq_H( zTB%0O9`gRzW)%td?^~(ipF?DS5GP)W~ z7lTqlYXxLa(d;-Ass;Z46FmvFP(Hm5@%5$B)}r}@OT1JPR6bH^TY$uTLh}Rg9>6fc z<3XK1qJ+d89KEnJ!RrAU1}r@hNSBeAt+tt1d85Aqe!)i06&_Pc8*4D57E;Kp9%F+<*4?zM#SycG~UrS!^&0T4;in_vCJHvhbK`zr{~y;(GOrw~epyuu}i|G2*#6 zZ|i-_`@vXs`KQFsOh9vTDmE~wjiY#E4os&8K?FnC5(~=0+=fU`_<&7(XeWgKX&ux z0Y$Il@pvvlNk}L?dRqe;L_GV!SoivtCerG@>91~zZ!uo( znA@hyv_HJmWaQdkV)o9VK(31VkHQTt#eL^dQ@U>_+x%Js$@Xh()9g&Exzwc!8&%3m+~42l3dS>|yV1E+^gYXwlM7v2XgXxItB zXrZXO>^RpHfa%8Evn8~U# zv(^H?>*9q|)(j&oyNKYFZ9pKf9Fhb=L#&Ia=Mam{&?Vv_oeL$@fm6(LDd_Kk=kjM_ ze5&xQ#`^DsI#0d?-&|EhG-Ee*%Svt*{n?JN_%{y54{Q`PQX2m@&9(@;%+JLlh?BSW zk>eI`1(SaK1N9#co=J>kOE=zRQExl13CRs~dR#Ccc&D0|YVhI~OAF}820%)MU!F>T z>FT?|BGIQbzX6IYZ$_O2Es#QM#JYx}UE+VEe#rrV!d(s0*V7|13c6;;8A7{44O>Ct z2+*S<_B|_Z7NIlH!4RTV}+wz70oCn*Zqn!w2c%={z!HNy^T^77_RwB z>Xo_imYX$oe%Ln}S=A+{bx#MS4vmC>D#IxQSQA;*Yb$$s^jmxv3Vdy-N z-6*I)e$`3jW(`jG+fb*CW4bD1KC}4Y+5G*PSnkuL4%~v$u}fAOAcL`;2t<|}0t{Ex zGnKc!whFoxMIED>Ps5I9T9ebK86El@+2l`I7oDdowl}_>j6G-CSORC1{b)#ZR?` zF1B@v@ZX?q#Y(!>3b@*(qPWnTy5JU|*b&7WqQpzPpk^U&VbF3ExMsEwvr;by)WV^P z&D0&mP;T~Ad<{#xT~;K3C|D#xhpz$uP%sJw@=PELe53q0Asme&;DiW;(ufy)$uDJt zMO5$s!C&8Qzf4s9_Jq_m>cR;j3S1i%!FW&d4d}}eUcd)!pR$ttKZmVUHSYnc2!Gxi z(MtWU16O=DVPWf;$H`hekBdj%5sNXQLTJ^p)bEbs=}Wi#LNto+iWpHN9+*y|9DHk~ zGBh6V%FdL*Q9QGFMYFvKwNY-#Mt!<+QR?>}B*vQa5h4 zd+!`STFG^mcD98BaXb4z-h)Q50)<;$MVL~bd|xX=h^FqiMgepP&&V^d`z^c7A`V`dd671f}qug0=&oXrr`(T!AhqW-drj>fdEJ@$UmvU{Eeez z07zf9u;lPq&39Q}YCs#&0r;eM?6MBg%Ol_%sV)<{lyX>5(@iLuhLDXC%LF{8jOXA$ zeXsdOARHxc_GM~YC)?ixsZEbG1Q)ULo|WRwxxW8MTCUJUvBAha8O}<2WO`l9X zDQU=&ei>OX#O$`%>t#YWv*`H~Rt17pgJ+czk4W^uCBU|kzk6g5wGc|=g#w@_*xqx^ z?uj#43i6fQQ^E_C?HO4e;|wrL9Qhl1jFg09;oG%h;zU10vq-ickM@vIC!rynhdeaW z(poG?r$Fq_57=rCZbVjtzz0ZD8w0P*pmr zSroU9ZDXgw41j;l2K*H=m;&iW#xLSXuEJ!IM)SxUD z2%gZ47|y>o7CfD8h7n!eS~Ydsh-y5>HPzg)_H)HN%2lE-Xk5#sqg*|so)K*?{R6pf z?7pWdYheBd3g>#BEWLUqrU1V4{56kA<+}2d6=LefycJ7}av5DvRIAbeh z?S3ur`>d!vV7PVtDEOi%(K~_0@oTKT_+jb9xansN!LREFHAf8ljGqtGe~mf585!R} zDNAS@guD^$a=#!P+S!89g1^SXB;HU5I7qpm0FZHzOb*zhltW`R#h0?fmt>X0*CBh7 zTp&}WO`?xy1i}}z*+E{Ex{#Km5XL$*oiN}s0Gde{&}_Tp+i{vrM@O7K&&KsQN@@SUoH)5ycT1aMTW_CeOT@X{ThnNS%))8_~U)K$7w0OU{Uk=&B zi(JnI{f?w#2coBz<+plI^3j0_I(< z8RxR&N9uO-B!>J6==Cc;p)aytu39s&fk(sjabx*@ zL6Y4b~C81@JkVlM3-ex?Mj zgvpnHUR(chz`&@e8Mw zK7p4w9;Fc9%<#iM5cX}JsB|3G7~G)M`cTo@OYzsNUR;UOqx5X#@K&j@>bSM-2+@(Z z168C!1k?FD&*u+!v21#fulZG(TaIr=$5gprwGFsHp@c*Dl~T#ygv<6^FNnubqAOV@ zU`%_ll7h5Qz4x``v{ZKZQZ9FaLxIgmLtJ_&3DPb&rpcvVfy48X8u8S@tN^x13S5(c zmttCuEoa1&JQTstga(Yl_RYF}u-_RaD+K?W^bBP+U>UdLTcdyj^Eyb>RH6X69#9@(Nqf5Mgv7BSjG) zqd4$e?Rlnp5Ts#_j&xml<4eHaYfKf6MR9n>s;Ixq{_#pZG7`OjHlHMMp}ahhFr-ly zbve4vRQap%abGtr|0a?*X=rCWWw4n5#sR8riD9>KchV)Ok!)lI)D?}ZM0704ME0DQ zDtUvfxXe+GcY_@_1);+MEm9g5V7PPATIWq6doFfqFN3M*%dOJ>ipf-us-Bn2 zEt(r1n4`L(e+E6@KI9(?#`FpeI2Tpmf+wbf9WexIl+Ew6BYnt=` zf7e{ZvWL&FH*DwkS&yn}hzuxvsY3nf03pE8-|twwg!1c|`nC3Ev1Q|Jtv5%xPH9sd z_>+h|RFyugP(?KU_>-#5MS@q6|DH=n*& zh-$^|(Q(TQ5_gwj>S~t@!cmZ$3w=Cwz&1kkh1-SN`-)=>+GcZfAb<*B&{6@8ZwrGi zlvRz97K$`Fi^i%xp@CZDjOcPr@6p8FT|T>V*U&c=Q-0gC;Zd^R-}sCXJl0&9jfcL? zX7um?j@m)|){RbzHjGX3)J;1kry2d`y01K{7@C%!@tnQKC2F$BnV&(C zu#9$Wg9r^~jYcxUpoOm%HK^vi^eEgLqd!cXqF2@G$FAwk^a!TP1JUk7lps#i5>9gE zxO8GcW2-})ffQ5?ff6}Xd#RY16d7H5>O;ocW~Q)N9#3|hF2dGoKF%iGbW}$tUcsMe z)i?{BxI_T`qU}`6dkz5xyUg}|k!fi*uZO^D;yYS%TP%N+bLpyL!bVLYuD+OAxM;r7 z>B3YMprqk%w2J}qslAqm@$8;FOn9@{=~;3yv)GH}U-^@@#@&W3B@N$jgkaTqHe!`y zpU0_AdXi&|U%vPF#<**erXiGgo3%8yXL@@gaoY511W}B7b^Cu6U1?m>+4mL@H?%Sn zL#DKF8AHO+gse1ep#{yd{-#4B);C%pa7;}HDKRxoDVNfeN}H_omnA`o8Y(VRkWyM^ zP>NP)N(|wW=GOeb^9En=0zc1v&OOh0&bjvjW9vO@;?KtII~@fc{f1T9yM}wndlx<5c8mNB76FdqY1x!%l8GTbT2c>)-UNTfpsm?OXSq zMXlJ>fBAXrp$;-NfaUPJW6lB|`>|gC*zbA~T6mc|?0(I-ZYJGLd|rO(vFqc!71H5} zUj*M}hf;M~2TWn!A(?JSXO1Z>i>>E%cR0uS?WSEx9rd_=Z*!K5x?_Ks*FirL_H72( zp7dMkCi(3(e`amMQ#C$&=I60bD{$*Rf0^j0Sf>fV&q2N=J@g3;+1xJJJsQ6mtDN8R zpe?AXquE$v_;c;%QpVUB6j^b1LVL;Kd2{>N`m;`yoSwZOCx(ABdrGt`6e5Yx=xM zY4?$XMdoABT^ZW&`#p=#rlQ~%DZglEg+AOo{OTeZj9^|MA}otm@Mg2Ss9xJhtzp#D zU5o48`5kr0AwotW)eCJ1OBdzP#1x1dYsS648g4jN3spCy*1+AvylhK6-{pBAZEBy5 znEjUBg`i9GQbBvK7cP#7^n?ST&5KA>rmCwg{lOYxmpk{Ba^jrDxPho#A7q?;_2cmv zII~D``)sb5k(~D{wu$TB_bL8e#HVP?gyTG+W^tAFUHas3!|rvjn%5=Vu z>C(*#&TfxFdt+u}+W{`+vT?`SclF1UCtZJDxLml|?|G6#?i)!P&t-ON(gl9kovL5I{(UasUlXq}>dN61?70)IC&Ruv{XM%!3=FUbkO$p&+yu6hl-M+6BCyecV)b0r1{gI_{5agn~-fXbuuA0DE4_`VxT#Bfgki1;~{9luX%k!6Kg`uTmq4@0FgB3>> zc$dpN1Sg_D<{ZmuKmFuh!y8avrxYxNmgy!?($K8lvcC#B+ORUwtk*U z3C+Tk_OhGvj|`8l6kHs$kxcy|(S(B6LWTAiAN!dTK6^Ae*lU>6hZd)~uStns^0k_l zI{)0E^T#EuW2uNy5k9ag_R#%HpDUW4Z#e*7DG;%vyZ&5e-6Gq2HufHCMG8aAZHqOz z2Mz-Jey|P>KFJt_9^TrTTO8}(u5v2hM&n*oMI0!Sak7OXsC?i^K>H-rOQBT`T3fUla@@@GD&=&N zDTg=sp#Ka;WuaPr^W$h=7Vjt%|L3oB7g_w6rXlgp`@=7IlS{cKcc_EvqSwDB@@XT~ za%yG+2A6u(v5S2=%A|X9V1MacYIVEUvfhwRGwJelp{-{T0~TrLt8U=&0cI+;#WP)K zpT{rp!U7neNp?_Y>1p5Kdc|_|8QH_<@7}@n+~`+#P@LPHqFqzYlbb>_>l4K<)Avud zd`(C}wd|6Z|2pUNaAbF=$vZHE+aczE*;Xvw_xV-JhWq1VOcOSp?;l1fBae6xaphuKOUyE z`Q|modzP&V-O1Js4Fv{a?|qBZ9P_xb_Q=0Q06yqx=-7QW(;q`-vVbc^z9n|Ky?r^_gx=9f4ek8BotzjVINjAJ=6Y#gSTPQj@2s% zLuuqnTfLbF&1B2wIL*$=UmMfZ)U8p+SNqm(1s6+e8iTbo|L`r~PB4)HC*<@rLQ9y4 zN6htNmIM+VcNZ32Zd2Mu-)c&RL$yqYfbu%LN>ON0f) zR4sJg68sZbO9mfg*v3p}+5n&-EqhR+-;*vwc9KSE1b7UGb!@d4E~7}M*^Pe=VY>zi zedN)%qRiwzEuIC*&dc8JR3uoJ?Om5TjnyOVQT(JB=6PGgu}5gx5@kt#Ds_r(q@(2| zcFkczh@z2~H4UO>nV8$!4|&RyUKI)tFf(IR)>E(nI9N46&mfYp29!6{XclfHEsFpm zy$EL92Px7P> z2EjFXFbV7H&8F>w*V0>6j%_$)PwFl&Nt*EcYL{yi$q0R%G%Bi;Tc`oLkGsfGT^s4!bIq?UK=vMFIk-art9j2jfwSk3wy$ElPDDy_q zY}z=Zd_}43!;hk|Jcs(4RbuLvQb2f@Q44mx>>{^3W|$u;0%_=)_g`MbJFkxPSByb^ zcg=5V0fENQnN})_0wsWIET;PSpR#FTON|UD;V^OB*`eO)FoCR-RWNZwC?PgJZQuw| zhcv>4&4;NLdPEYT9l^<9=t!sX(Mkv^YmasrKVK9bi)~%BU^B9({8(LN1#&FaH@_sm zA<>lWBHA>!bWay48RMJr)an&YiW1h^Qq?feYtfaqnVp)Q?d0k}W3Q@KZVQ_CQr+#!g-gHNYZWSjt+g4y3rq;@W zCCA~-`^{dcK`IuE74Xiwg=*0XLDX)C(^ME;h^lq~?F5JYR2UbO(WPyGq;Anf;qQx{ z3GJs{GnGe?of_^Ibv2wG>I}NZP3@;L1HYT6PEKZ|x@}&kS zJ1*+4=vYZNL_lZq-yM!{u&+BqeU@yK+K{i`k^n8}R8e5NGLaYq}4iI8bk%$$kKc}ENtFltwMu{OvzDQ(cB#O5#;#3GTqWf z<8J8Y^4);}9hdGj*px`GUd8Fr--+$F2#mioM0W!Vp9PmClE%ppw`i}mon>1xxMWT_ z80?(V1;{yXm8DNa2o38yoA%aGcIG?*@!fUy%0?Xn#NVN`Qw0JmqHa-pnuGs%zRd)-~Q4zwQwVB4LcqQ;DAgfNhWnk}H>}5xnLbUHv?dKfZ03jM^J_MKAN=@cFdl3o&L|u)fFYLAh z*u!9+dJvB}j$}Y0YgY1p6v(!L;@L`LncT-$n>?7$>QsALf%3>2POSGVf`d`)O)wMN zN(_Y!Q;x+X?%I?g#7#Q0we(hT4D%*ay|OnA zXF%EN(NGmrIo}%C01d7+8_UBcl3LY*J_{A4?^SHA8G986B_d{S4@dqqW8$V^1n^o@ zArIz$p>NMFmU$h#>0_Z!2ZF}p#evFgmmb|Ba6SNrg7Tp3LE4^aSUln=`(B_BGm|go z?&?b0MBVkM1P`}uE?Hqv-~0%mo+3O@B#!BWf-a|MDT;trJq)sB(UJvgk}Rk|&e?L# zw0Dx+CP3f2Od0>JYO9DSWEQ7xNbuD<^l4}-zMPJ(+@yJ?+smhkBNi#n1r8*V4KPte z%Cn|&tPcW^nh071Kg<*$kF|uuIdq}A8UcRPtO{4ghzUys3Os})v;%C4jBshe>=HhY z?Iu)v@}dl++4LefO9|-6uwD5bu?jW1CKmfDj2zI;>SCLOQ7!v!*luUj1`ZRiXU;Bh zlWQ!tf{{CU~IN%w06vg$8M znm55=jdMt5>k#uM7y&pvvQ=f-l+MIuP6UD|L;C?fk1}IdWnm0r*HMUjH$KU$PSgq{v(MCamFPOK^{=(o5D|>i8+9bv%j$rk1(?CSSDDoEs`F+mE6u*-!X!Zj zq2MyXjKp}dj95T`ic>6_KyDURVc$~RMy2ossTXv#n;0A?}G%}5?;?bj=kWHue9!boY0Rp*qA8ZBHp?P@^# z&?v`24r&5NlMpfNfC_exc?{b>ZJ>+aB*V74+=LEni7NLgY8Uydm~gJV-#a;NmY350 z)U8Em=UT9^WgrahuEG^eEsoy%KTQZt6k%yVPWygG(84ykk*9rVyocbg-@0Cl zU<*mJ`O=aC8DCpW$(Ke##o%(?s9l27-ZjQGi;zY5s&!MZ>U{3BU21p#u$K6UA1&~B- zv7-muyk${z|CIsX-LKuMWsI1;MG5Uo+ql_@Qx<2Myy$I!bP}S**D>GepbemipkYBl zmM+QwCcv-Q&IBc`mI1AIY}GuI1Y-M&c=Qoy<2d<;*<8Vh*}#!x;c0O+2|95k&Yd=4 zr<{OjIQc>rNp>o~0Rc=v*e=1iJh=N6(rm#$=?HEJS2lF~oes`6ZE6BlNJy$6-iZJM zn!`-{?>ekaXG5l~qIPO_-rO~;w+#WF08+#D@P9JLNk5UuE_*S7^fQQ!g8lv@qd}$2 z;IUIOp0+%!O+wH>HzP^^DZ*?C4#0gLz|171!6i8~yWV7$UpU~(FQQ?-!E!AZkVVh0~ zgN@*dPr4RX2}Fx4>yM>a^?%#p@-@a|!|~g4VE|$JNO8TVhN<6vDobNg*7>T(V&HHj zF4C?7&c3T{*v84=QC<$R*Ji1gNuW<{@?O(IeJl9M0MiqUyo58C2;XPJxiE`rY8NfOJ5v_!>Un+BfUSqIl-xU%d$a`Uz}Dz@&TIy$Nw%3VG!a7^ZuFvjm`nX; z7RG^sLRz?@;GZLq<~XWsqpC(%(WHY_-qCtPUsmzHNMX1R z7BkmMjn*T_4CL?7emzoo<`KF%&{s>Ov;LPh%jX#!#-h>2698cf+)!JYO3fceV8@2?zyH4P>dwjYgdJg*BQOhE~x zIh3%e0y*ALc=Y(?zRPQ5z0=Y{b?IEesP*!jcSnK~6|r2$H>!Is#`lUt_wBds04BPT zW4b>k68}cLV$<#4ZlI5b-g@!%=hV`I`xd%8{I@wuK{PDwYI6{g`(i`=+K2qtz0X~A zImkwnvw$YH+g{7zVktx0$E~>O3B;gC_Bs2^IWG?=4Sw*BrS4lEu*QN zS*(MxU7EAlC%vpDbmV6r#Suv5IcG^v5tNNC7)MVL1z;PZrW_Q6FRoD!6OdCbH|AjW z4Zc$_W-m0(br4sevYPT$p?zX@e~pU(tu6z--%ODNA#5M5qiP#0*ekXUBLI=vKzpu7 zX_tNTt+ma!(2a^DtJ-Db$8>0j+KB=?S9No*eUnDp5F;XXnGW?4nMEjnxxJpOX)UY5 zSK8~}V3IH~Y?E-git=hCq`^d>Z=F`u9P7f(@z{U1;OCYq&h5o=)4<|%$g5i3Z`;() z_RsRof9f8#IncKRG$YK|reM!!X*b&EsN>wyu}6X*ODv|m?>Bc9UusH!0ywr|Yw2?a pa^S(Ya07d;T!oUBXtdHJHMdN_ePy^)-FR_TZvWDzTTcI~{~thZMP&d0 diff --git a/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py b/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py deleted file mode 100644 index 981f9b94d..000000000 --- a/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py +++ /dev/null @@ -1,6 +0,0 @@ - -# Create virtual python environment -RUN cmr "install python-venv" --name=cm --quiet - -# Run image classification and install all related CM components automatically -RUN cmr "python app image-classification onnx" --adr.python.name=cm --quiet diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat b/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat deleted file mode 100644 index c4f8e2204..000000000 --- a/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat +++ /dev/null @@ -1,6 +0,0 @@ -rem call this script with computer_mouse.jpg as input - -call 0-common.bat - -rem docker run -v %CD%:/tmp/host -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/%1" -docker run -v %CD%:/tmp/host -it --rm %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/%1" \ No newline at end of file diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh b/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh deleted file mode 100644 index 55314e9e4..000000000 --- a/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -# call this script with computer_mouse.jpg as input - -docker run -v $PWD:/tmp/host -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/$1" diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat b/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat deleted file mode 100644 index 762ed99fd..000000000 --- a/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat +++ /dev/null @@ -1 +0,0 @@ -docker run -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm" diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh b/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh deleted file mode 100644 index a24a06ed9..000000000 --- a/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm" diff --git a/script/build-docker-image/meta.yaml b/script/build-docker-image/meta.yaml index 8fd7c2571..d1d86083a 100644 --- a/script/build-docker-image/meta.yaml +++ b/script/build-docker-image/meta.yaml @@ -16,33 +16,33 @@ tags: - dockerimage default_env: - CM_DOCKER_IMAGE_REPO: local - CM_DOCKER_IMAGE_TAG: latest + MLC_DOCKER_IMAGE_REPO: local + MLC_DOCKER_IMAGE_TAG: latest input_mapping: - cache: CM_DOCKER_CACHE - cm_repo: CM_MLOPS_REPO - docker_os: CM_DOCKER_OS - docker_os_version: CM_DOCKER_OS_VERSION - dockerfile: CM_DOCKERFILE_WITH_PATH - gh_token: CM_GH_TOKEN - image_name: CM_DOCKER_IMAGE_NAME - image_repo: CM_DOCKER_IMAGE_REPO - image_tag: CM_DOCKER_IMAGE_TAG - post_run_cmds: CM_DOCKER_POST_RUN_COMMANDS - pre_run_cmds: CM_DOCKER_PRE_RUN_COMMANDS - real_run: CM_REAL_RUN - script_tags: CM_DOCKER_RUN_SCRIPT_TAGS - push_image: CM_DOCKER_PUSH_IMAGE + cache: MLC_DOCKER_CACHE + cm_repo: MLC_MLOPS_REPO + docker_os: MLC_DOCKER_OS + docker_os_version: MLC_DOCKER_OS_VERSION + dockerfile: MLC_DOCKERFILE_WITH_PATH + gh_token: MLC_GH_TOKEN + image_name: MLC_DOCKER_IMAGE_NAME + image_repo: MLC_DOCKER_IMAGE_REPO + image_tag: MLC_DOCKER_IMAGE_TAG + post_run_cmds: MLC_DOCKER_POST_RUN_COMMANDS + pre_run_cmds: MLC_DOCKER_PRE_RUN_COMMANDS + real_run: MLC_REAL_RUN + script_tags: MLC_DOCKER_RUN_SCRIPT_TAGS + push_image: MLC_DOCKER_PUSH_IMAGE new_env_keys: -- CM_DOCKER_* +- MLC_DOCKER_* deps: - tags: get,docker prehook_deps: - enable_if_env: - CM_BUILD_DOCKERFILE: + MLC_BUILD_DOCKERFILE: - 'yes' tags: build,dockerfile diff --git a/script/build-docker-image/run.bat b/script/build-docker-image/run.bat index d3a1b061d..ab5be88c7 100644 --- a/script/build-docker-image/run.bat +++ b/script/build-docker-image/run.bat @@ -1,13 +1,13 @@ -if exist %CM_DOCKERFILE_WITH_PATH% ( +if exist %MLC_DOCKERFILE_WITH_PATH% ( rem echo .git > .dockerignore rem echo. -rem echo docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f %CM_DOCKERFILE_WITH_PATH% -t %CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG% . +rem echo docker build %MLC_DOCKER_CACHE_ARG% %MLC_DOCKER_BUILD_ARGS% -f %MLC_DOCKERFILE_WITH_PATH% -t %MLC_DOCKER_IMAGE_REPO%/%MLC_DOCKER_IMAGE_NAME%:%MLC_DOCKER_IMAGE_TAG% . rem echo. -rem docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f "%CM_DOCKERFILE_WITH_PATH%" -t "%CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG%" . +rem docker build %MLC_DOCKER_CACHE_ARG% %MLC_DOCKER_BUILD_ARGS% -f "%MLC_DOCKERFILE_WITH_PATH%" -t "%MLC_DOCKER_IMAGE_REPO%/%MLC_DOCKER_IMAGE_NAME%:%MLC_DOCKER_IMAGE_TAG%" . - %CM_DOCKER_BUILD_CMD% + %MLC_DOCKER_BUILD_CMD% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% echo. diff --git a/script/build-docker-image/run.sh b/script/build-docker-image/run.sh index 6aa2390c0..38eeb399e 100644 --- a/script/build-docker-image/run.sh +++ b/script/build-docker-image/run.sh @@ -1,14 +1,14 @@ #!/bin/bash -if [ -f "${CM_DOCKERFILE_WITH_PATH}" ]; then +if [ -f "${MLC_DOCKERFILE_WITH_PATH}" ]; then # echo ".git" > .dockerignore # echo "" -# echo "docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f ${CM_DOCKERFILE_WITH_PATH} -t ${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG} ." +# echo "docker build ${MLC_DOCKER_CACHE_ARG} ${MLC_DOCKER_BUILD_ARGS} -f ${MLC_DOCKERFILE_WITH_PATH} -t ${MLC_DOCKER_IMAGE_REPO}/${MLC_DOCKER_IMAGE_NAME}:${MLC_DOCKER_IMAGE_TAG} ." -# docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f "${CM_DOCKERFILE_WITH_PATH}" -t "${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG}" . +# docker build ${MLC_DOCKER_CACHE_ARG} ${MLC_DOCKER_BUILD_ARGS} -f "${MLC_DOCKERFILE_WITH_PATH}" -t "${MLC_DOCKER_IMAGE_REPO}/${MLC_DOCKER_IMAGE_NAME}:${MLC_DOCKER_IMAGE_TAG}" . - eval "${CM_DOCKER_BUILD_CMD}" + eval "${MLC_DOCKER_BUILD_CMD}" test $? -eq 0 || exit 1 echo "" diff --git a/script/build-dockerfile/README-extra.md b/script/build-dockerfile/README-extra.md deleted file mode 100644 index 992fee4b3..000000000 --- a/script/build-dockerfile/README-extra.md +++ /dev/null @@ -1,27 +0,0 @@ -# Build CM Dockerfile -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) builds a dockerfile with for using CM. - -## How to use -```bash -cm run script --tags=build,dockerfile --docker_os=[DOCKER_OS] --docker_os_version=[DOCKER_OS_VERSION] --build --image_repo=[IMAGE_REPO] --image_tag=[IMAGE_TAG] --gh_token=[GITHUB_AUTH_TOKEN] --script_tags=[CM_SCRIPT_TAGS] -``` -where -* `[DOCKER_OS]` is one of `ubuntu` or `rhel`. Default is `ubuntu`. -* `[DOCKER_OS_VERSION]` is one of `18.04`, `20.04`, `22.04` for `ubuntu` and `9` for `rhel`. Default is `20.04`. -* `--build` option calls the [CM docker image build script](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/build-docker-image) to build a docker image from the generated dockerfile. Default is off. -* `[GITHUB_AUTH_TOKEN]`: Github auth token to be passed to docker build to use as build argument. This is optional. -* `[CM_SCRIPT_TAGS]`: Tags for the CM script which should be run as the last command inside dockerfile. This script will do a fake run and set up all its dependencies in the docker image once built. -* `[IMAGE_REPO]`: Repo name to add the docker image. Default is `local`. -* `[IMAGE_TAG]`: Tag for the docker image. Default is `latest`. - - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 - -## Sample dockerfiles -1. [Ubuntu 18.04](dockerfiles/ubuntu_18.04.Dockerfile) -2. [Ubuntu 20.04](dockerfiles/ubuntu_20.04.Dockerfile) -1. [Ubuntu 22.04](dockerfiles/ubuntu_22.04.Dockerfile) -1. [rhel9](dockerfiles/rhel_9.Dockerfile) - diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index c95c2f96f..a91853185 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -11,9 +11,9 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - if env["CM_DOCKER_OS"] not in ["ubuntu", "rhel", "arch"]: + if env["MLC_DOCKER_OS"] not in ["ubuntu", "rhel", "arch"]: return { - 'return': 1, 'error': f"Specified docker OS: {env['CM_DOCKER_OS']}. Currently only ubuntu, rhel and arch are supported in CM docker"} + 'return': 1, 'error': f"Specified docker OS: {env['MLC_DOCKER_OS']}. Currently only ubuntu, rhel and arch are supported in CM docker"} path = i['run_script_input']['path'] @@ -26,9 +26,9 @@ def preprocess(i): copy_files = [] automation = i['automation'] - # print(env['CM_DOCKER_RUN_SCRIPT_TAGS']) - if env.get('CM_DOCKER_RUN_SCRIPT_TAGS', '') != '': - script_tags = env['CM_DOCKER_RUN_SCRIPT_TAGS'] + # print(env['MLC_DOCKER_RUN_SCRIPT_TAGS']) + if env.get('MLC_DOCKER_RUN_SCRIPT_TAGS', '') != '': + script_tags = env['MLC_DOCKER_RUN_SCRIPT_TAGS'] found_scripts = automation.action_object.access( {'action': 'search', 'automation': 'script', 'tags': script_tags}) scripts_list = found_scripts['list'] @@ -54,13 +54,13 @@ def preprocess(i): # build_args.append(arg) # input_args.append("--"+input_+"="+"$"+env_) - if "CM_DOCKER_OS_VERSION" not in env: - env["CM_DOCKER_OS_VERSION"] = "20.04" + if "MLC_DOCKER_OS_VERSION" not in env: + env["MLC_DOCKER_OS_VERSION"] = "20.04" - docker_image_base = get_value(env, config, 'FROM', 'CM_DOCKER_IMAGE_BASE') + docker_image_base = get_value(env, config, 'FROM', 'MLC_DOCKER_IMAGE_BASE') if not docker_image_base: return { - 'return': 1, 'error': f"Version \"{env['CM_DOCKER_OS_VERSION']}\" is not supported yet for \"{env['CM_DOCKER_OS']}\" "} + 'return': 1, 'error': f"Version \"{env['MLC_DOCKER_OS_VERSION']}\" is not supported yet for \"{env['MLC_DOCKER_OS']}\" "} # Handle cm_mlops Repository if env.get("MLC_REPO_PATH", "") != "": @@ -79,7 +79,7 @@ def preprocess(i): # Define the build context directory (where the Dockerfile will be) build_context_dir = os.path.dirname( env.get( - 'CM_DOCKERFILE_WITH_PATH', + 'MLC_DOCKERFILE_WITH_PATH', os.path.join( os.getcwd(), "Dockerfile"))) @@ -118,8 +118,8 @@ def preprocess(i): # MLC_REPO_PATH is not set; use mlc pull repo as before use_copy_repo = False - if env.get("CM_MLOPS_REPO", "") != "": - cm_mlops_repo = env["CM_MLOPS_REPO"] + if env.get("MLC_MLOPS_REPO", "") != "": + cm_mlops_repo = env["MLC_MLOPS_REPO"] # the below pattern matches both the HTTPS and SSH git link formats git_link_pattern = r'^(https?://github\.com/([^/]+)/([^/]+)(?:\.git)?|git@github\.com:([^/]+)/([^/]+)(?:\.git)?)$' if match := re.match(git_link_pattern, cm_mlops_repo): @@ -131,17 +131,17 @@ def preprocess(i): repo_name = match.group(5) cm_mlops_repo = f"{repo_owner}@{repo_name}" print( - f"Converted repo format from {env['CM_MLOPS_REPO']} to {cm_mlops_repo}") + f"Converted repo format from {env['MLC_MLOPS_REPO']} to {cm_mlops_repo}") else: cm_mlops_repo = "mlcommons@mlperf-automations" - cm_mlops_repo_branch_string = f" --branch={env['CM_MLOPS_REPO_BRANCH']}" + cm_mlops_repo_branch_string = f" --branch={env['MLC_MLOPS_REPO_BRANCH']}" - if env.get('CM_DOCKERFILE_WITH_PATH', '') == '': - env['CM_DOCKERFILE_WITH_PATH'] = os.path.join( + if env.get('MLC_DOCKERFILE_WITH_PATH', '') == '': + env['MLC_DOCKERFILE_WITH_PATH'] = os.path.join( os.getcwd(), "Dockerfile") - dockerfile_with_path = env['CM_DOCKERFILE_WITH_PATH'] + dockerfile_with_path = env['MLC_DOCKERFILE_WITH_PATH'] dockerfile_dir = os.path.dirname(dockerfile_with_path) extra_dir = os.path.dirname(dockerfile_with_path) @@ -150,7 +150,7 @@ def preprocess(i): os.makedirs(extra_dir, exist_ok=True) f = open(dockerfile_with_path, "w") - EOL = env['CM_DOCKER_IMAGE_EOL'] + EOL = env['MLC_DOCKER_IMAGE_EOL'] f.write('FROM ' + docker_image_base + EOL) # Maintainers @@ -166,12 +166,12 @@ def preprocess(i): f.write(EOL) - image_label = get_value(env, config, 'LABEL', 'CM_DOCKER_IMAGE_LABEL') + image_label = get_value(env, config, 'LABEL', 'MLC_DOCKER_IMAGE_LABEL') if image_label: f.write('LABEL ' + image_label + EOL) f.write(EOL) - shell = get_value(env, config, 'SHELL', 'CM_DOCKER_IMAGE_SHELL') + shell = get_value(env, config, 'SHELL', 'MLC_DOCKER_IMAGE_SHELL') if shell: # f.write('SHELL ' + shell + EOL) f.write(EOL) @@ -192,8 +192,8 @@ def preprocess(i): f.write(EOL) copy_cmds = [] - if 'CM_DOCKER_COPY_FILES' in env: - for copy_file in env['CM_DOCKER_COPY_FILES']: + if 'MLC_DOCKER_COPY_FILES' in env: + for copy_file in env['MLC_DOCKER_COPY_FILES']: copy_split = copy_file.split(":") if len(copy_split) != 2: return { @@ -217,20 +217,20 @@ def preprocess(i): env, config, 'package-manager-update-cmd', - 'CM_PACKAGE_MANAGER_UPDATE_CMD') + + 'MLC_PACKAGE_MANAGER_UPDATE_CMD') + EOL) f.write('RUN ' + get_value(env, config, 'package-manager-get-cmd') + " " + " ".join(get_value(env, config, 'packages')) + EOL) - if env.get('CM_DOCKER_EXTRA_SYS_DEPS', '') != '': - f.write('RUN ' + env['CM_DOCKER_EXTRA_SYS_DEPS'] + EOL) + if env.get('MLC_DOCKER_EXTRA_SYS_DEPS', '') != '': + f.write('RUN ' + env['MLC_DOCKER_EXTRA_SYS_DEPS'] + EOL) - if env['CM_DOCKER_OS'] == "ubuntu": - if int(env['CM_DOCKER_OS_VERSION'].split('.')[0]) >= 23: + if env['MLC_DOCKER_OS'] == "ubuntu": + if int(env['MLC_DOCKER_OS_VERSION'].split('.')[0]) >= 23: if "--break-system-packages" not in env.get( - 'CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''): - env['CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS'] = " --break-system-packages" - pip_extra_flags = env.get('CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', '') + 'MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''): + env['MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS'] = " --break-system-packages" + pip_extra_flags = env.get('MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS', '') f.write(EOL + '# Setup docker environment' + EOL) @@ -238,7 +238,7 @@ def preprocess(i): env, config, 'ENTRYPOINT', - 'CM_DOCKER_IMAGE_ENTRYPOINT') + 'MLC_DOCKER_IMAGE_ENTRYPOINT') if entry_point: f.write('ENTRYPOINT ' + entry_point + EOL) @@ -251,11 +251,11 @@ def preprocess(i): docker_user = get_value(env, config, 'USER', 'MLC_DOCKER_USER') docker_group = get_value(env, config, 'GROUP', 'MLC_DOCKER_GROUP') - if env.get('CM_CONTAINER_TOOL', '') == 'podman' and env.get( - 'CM_DOCKER_USE_DEFAULT_USER', '') == '': - env['CM_DOCKER_USE_DEFAULT_USER'] = 'yes' + if env.get('MLC_CONTAINER_TOOL', '') == 'podman' and env.get( + 'MLC_DOCKER_USE_DEFAULT_USER', '') == '': + env['MLC_DOCKER_USE_DEFAULT_USER'] = 'yes' - if docker_user and str(env.get('CM_DOCKER_USE_DEFAULT_USER', '')).lower() not in [ + if docker_user and str(env.get('MLC_DOCKER_USE_DEFAULT_USER', '')).lower() not in [ "yes", "1", "true"]: f.write('RUN groupadd -g $GID -o ' + docker_group + EOL) @@ -277,21 +277,21 @@ def preprocess(i): else: f.write('ENV HOME=/root' + EOL) - dockerfile_env = env.get('CM_DOCKERFILE_ENV', {}) + dockerfile_env = env.get('MLC_DOCKERFILE_ENV', {}) dockerfile_env_input_string = "" for docker_env_key in dockerfile_env: dockerfile_env_input_string = dockerfile_env_input_string + " --env." + \ docker_env_key + "=" + str(dockerfile_env[docker_env_key]) - workdir = get_value(env, config, 'WORKDIR', 'CM_DOCKER_WORKDIR') - if workdir and ("/home/mlcuser" not in workdir or str(env.get('CM_DOCKER_USE_DEFAULT_USER', '')).lower() not in [ + workdir = get_value(env, config, 'WORKDIR', 'MLC_DOCKER_WORKDIR') + if workdir and ("/home/mlcuser" not in workdir or str(env.get('MLC_DOCKER_USE_DEFAULT_USER', '')).lower() not in [ "yes", "1", "true"]): f.write('WORKDIR ' + workdir + EOL) f.write(EOL + '# Install python packages' + EOL) - python = get_value(env, config, 'PYTHON', 'CM_DOCKERFILE_PYTHON') + python = get_value(env, config, 'PYTHON', 'MLC_DOCKERFILE_PYTHON') - docker_use_virtual_python = env.get('CM_DOCKER_USE_VIRTUAL_PYTHON', "yes") + docker_use_virtual_python = env.get('MLC_DOCKER_USE_VIRTUAL_PYTHON', "yes") if str(docker_use_virtual_python).lower() not in ["no", "0", "false"]: f.write('RUN {} -m venv $HOME/venv/mlc'.format(python) + " " + EOL) f.write('ENV PATH="$HOME/venv/mlc/bin:$PATH"' + EOL) @@ -323,7 +323,7 @@ def preprocess(i): else: # Use mlc pull repo as before - x = env.get('CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO', '') + x = env.get('MLC_DOCKER_ADD_FLAG_TO_MLC_MLOPS_REPO', '') if x != '': x = ' ' + x @@ -335,55 +335,55 @@ def preprocess(i): EOL) # Check extra repositories - x = env.get('CM_DOCKER_EXTRA_CM_REPOS', '') + x = env.get('MLC_DOCKER_EXTRA_MLC_REPOS', '') if x != '': for y in x.split(','): f.write('RUN ' + y + EOL) - if str(env.get('CM_DOCKER_SKIP_CM_SYS_UPGRADE', False) + if str(env.get('MLC_DOCKER_SKIP_MLC_SYS_UPGRADE', False) ).lower() not in ["true", "1", "yes"]: f.write(EOL + '# Install all system dependencies' + EOL) f.write('RUN mlc run script --tags=get,sys-utils-cm --quiet' + EOL) - if 'CM_DOCKER_PRE_RUN_COMMANDS' in env: - for pre_run_cmd in env['CM_DOCKER_PRE_RUN_COMMANDS']: + if 'MLC_DOCKER_PRE_RUN_COMMANDS' in env: + for pre_run_cmd in env['MLC_DOCKER_PRE_RUN_COMMANDS']: f.write('RUN ' + pre_run_cmd + EOL) run_cmd_extra = " " + \ - env.get('CM_DOCKER_RUN_CMD_EXTRA', '').replace(":", "=") - gh_token = get_value(env, config, "GH_TOKEN", "CM_GH_TOKEN") + env.get('MLC_DOCKER_RUN_CMD_EXTRA', '').replace(":", "=") + gh_token = get_value(env, config, "GH_TOKEN", "MLC_GH_TOKEN") if gh_token: - run_cmd_extra = " --env.CM_GH_TOKEN=$CM_GH_TOKEN" + run_cmd_extra = " --env.MLC_GH_TOKEN=$MLC_GH_TOKEN" f.write(EOL + '# Run commands' + EOL) - for comment in env.get('CM_DOCKER_RUN_COMMENTS', []): + for comment in env.get('MLC_DOCKER_RUN_COMMENTS', []): f.write(comment + EOL) skip_extra = False - if 'CM_DOCKER_RUN_CMD' not in env: - env['CM_DOCKER_RUN_CMD'] = "" - if 'CM_DOCKER_RUN_SCRIPT_TAGS' not in env: - env['CM_DOCKER_RUN_CMD'] += "mlc version" + if 'MLC_DOCKER_RUN_CMD' not in env: + env['MLC_DOCKER_RUN_CMD'] = "" + if 'MLC_DOCKER_RUN_SCRIPT_TAGS' not in env: + env['MLC_DOCKER_RUN_CMD'] += "mlc version" skip_extra = True else: - if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False') + if str(env.get('MLC_DOCKER_NOT_PULL_UPDATE', 'False') ).lower() not in ["yes", "1", "true"]: - env['CM_DOCKER_RUN_CMD'] += "mlc pull repo && " - env['CM_DOCKER_RUN_CMD'] += "mlc run script --tags=" + \ - env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' + env['MLC_DOCKER_RUN_CMD'] += "mlc pull repo && " + env['MLC_DOCKER_RUN_CMD'] += "mlc run script --tags=" + \ + env['MLC_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' else: - if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False') + if str(env.get('MLC_DOCKER_NOT_PULL_UPDATE', 'False') ).lower() not in ["yes", "1", "true"]: - env['CM_DOCKER_RUN_CMD'] = "mlc pull repo && " + \ - env['CM_DOCKER_RUN_CMD'] + env['MLC_DOCKER_RUN_CMD'] = "mlc pull repo && " + \ + env['MLC_DOCKER_RUN_CMD'] - print(env['CM_DOCKER_RUN_CMD']) - fake_run = env.get("CM_DOCKER_FAKE_RUN_OPTION", + print(env['MLC_DOCKER_RUN_CMD']) + fake_run = env.get("MLC_DOCKER_FAKE_RUN_OPTION", " --fake_run") + dockerfile_env_input_string fake_run = fake_run + \ - " --fake_deps" if env.get('CM_DOCKER_FAKE_DEPS') else fake_run + " --fake_deps" if env.get('MLC_DOCKER_FAKE_DEPS') else fake_run - x = 'RUN ' + env['CM_DOCKER_RUN_CMD'] + x = 'RUN ' + env['MLC_DOCKER_RUN_CMD'] if not skip_extra: x += fake_run @@ -392,11 +392,11 @@ def preprocess(i): if run_cmd_extra != '': x += ' ' + run_cmd_extra - if env.get('CM_DOCKER_RUN_SCRIPT_TAGS', '') != '' and str(env.get( - 'CM_DOCKER_ADD_DEPENDENT_SCRIPTS_RUN_COMMANDS', '')).lower() in ["yes", "1", "true"]: + if env.get('MLC_DOCKER_RUN_SCRIPT_TAGS', '') != '' and str(env.get( + 'MLC_DOCKER_ADD_DEPENDENT_SCRIPTS_RUN_COMMANDS', '')).lower() in ["yes", "1", "true"]: mlc_input = {'action': 'run', 'automation': 'script', - 'tags': f"""{env['CM_DOCKER_RUN_SCRIPT_TAGS']}""", + 'tags': f"""{env['MLC_DOCKER_RUN_SCRIPT_TAGS']}""", 'print_deps': True, 'quiet': True, 'silent': True, @@ -407,7 +407,7 @@ def preprocess(i): if r['return'] > 0: return r print_deps = r['new_state']['print_deps'] - fake_run_str = " --fake_run" if env.get('CM_DOCKER_FAKE_DEPS') else "" + fake_run_str = " --fake_run" if env.get('MLC_DOCKER_FAKE_DEPS') else "" cmds = ["RUN " + dep for dep in print_deps] for cmd in cmds: f.write(cmd + fake_run_str + EOL) @@ -415,19 +415,19 @@ def preprocess(i): f.write(x + EOL) # fake_run to install the dependent scripts and caching them - if not "run" in env['CM_DOCKER_RUN_CMD'] and str( - env.get('CM_REAL_RUN', False)).lower() in ["false", "0", "no"]: + if not "run" in env['MLC_DOCKER_RUN_CMD'] and str( + env.get('MLC_REAL_RUN', False)).lower() in ["false", "0", "no"]: fake_run = dockerfile_env_input_string - x = 'RUN ' + env['CM_DOCKER_RUN_CMD'] + fake_run + run_cmd_extra + x = 'RUN ' + env['MLC_DOCKER_RUN_CMD'] + fake_run + run_cmd_extra if '--quiet' not in x: x += ' --quiet ' x += EOL f.write(x) - if 'CM_DOCKER_POST_RUN_COMMANDS' in env: - for post_run_cmd in env['CM_DOCKER_POST_RUN_COMMANDS']: + if 'MLC_DOCKER_POST_RUN_COMMANDS' in env: + for post_run_cmd in env['MLC_DOCKER_POST_RUN_COMMANDS']: f.write('RUN ' + post_run_cmd + EOL) post_file = env.get('DOCKER_IMAGE_POST_FILE', '') @@ -443,7 +443,7 @@ def preprocess(i): f.close() - # f = open(env['CM_DOCKERFILE_WITH_PATH'], "r") + # f = open(env['MLC_DOCKERFILE_WITH_PATH'], "r") # print(f.read()) return {'return': 0} @@ -456,8 +456,8 @@ def get_value(env, config, key, env_key=None): if env.get(env_key, None) is not None: return env[env_key] - docker_os = env['CM_DOCKER_OS'] - docker_os_version = env['CM_DOCKER_OS_VERSION'] + docker_os = env['MLC_DOCKER_OS'] + docker_os_version = env['MLC_DOCKER_OS_VERSION'] version_meta = config['distros'][docker_os]['versions'].get( docker_os_version, '') diff --git a/script/build-dockerfile/meta.yaml b/script/build-dockerfile/meta.yaml index cc81fe3c5..a8acb0c30 100644 --- a/script/build-dockerfile/meta.yaml +++ b/script/build-dockerfile/meta.yaml @@ -13,49 +13,49 @@ cache: false category: Docker automation default_env: - CM_DOCKER_BUILD_SLIM: 'no' - CM_DOCKER_IMAGE_EOL: ' + MLC_DOCKER_BUILD_SLIM: 'no' + MLC_DOCKER_IMAGE_EOL: ' ' - CM_DOCKER_OS: ubuntu - CM_DOCKER_NOT_PULL_UPDATE: False - CM_MLOPS_REPO_BRANCH: mlc + MLC_DOCKER_OS: ubuntu + MLC_DOCKER_NOT_PULL_UPDATE: False + MLC_MLOPS_REPO_BRANCH: mlc input_mapping: - build: CM_BUILD_DOCKER_IMAGE - cache: CM_DOCKER_CACHE - cm_repo: CM_MLOPS_REPO - cm_repo_flags: CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO - cm_repos: CM_DOCKER_EXTRA_CM_REPOS - cm_repo_branch: CM_MLOPS_REPO_BRANCH - comments: CM_DOCKER_RUN_COMMENTS - copy_files: CM_DOCKER_COPY_FILES - docker_base_image: CM_DOCKER_IMAGE_BASE - docker_os: CM_DOCKER_OS - docker_os_version: CM_DOCKER_OS_VERSION - dockerfile_env: CM_DOCKERFILE_ENV - extra_sys_deps: CM_DOCKER_EXTRA_SYS_DEPS - fake_docker_deps: CM_DOCKER_FAKE_DEPS - fake_run_option: CM_DOCKER_FAKE_RUN_OPTION - file_path: CM_DOCKERFILE_WITH_PATH - gh_token: CM_GH_TOKEN - image_repo: CM_DOCKER_IMAGE_REPO - image_tag: CM_DOCKER_IMAGE_TAG - package_manager_update_cmd: CM_PACKAGE_MANAGER_UPDATE_CMD - pip_extra_flags: CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS + build: MLC_BUILD_DOCKER_IMAGE + cache: MLC_DOCKER_CACHE + cm_repo: MLC_MLOPS_REPO + cm_repo_flags: MLC_DOCKER_ADD_FLAG_TO_MLC_MLOPS_REPO + cm_repos: MLC_DOCKER_EXTRA_MLC_REPOS + cm_repo_branch: MLC_MLOPS_REPO_BRANCH + comments: MLC_DOCKER_RUN_COMMENTS + copy_files: MLC_DOCKER_COPY_FILES + docker_base_image: MLC_DOCKER_IMAGE_BASE + docker_os: MLC_DOCKER_OS + docker_os_version: MLC_DOCKER_OS_VERSION + dockerfile_env: MLC_DOCKERFILE_ENV + extra_sys_deps: MLC_DOCKER_EXTRA_SYS_DEPS + fake_docker_deps: MLC_DOCKER_FAKE_DEPS + fake_run_option: MLC_DOCKER_FAKE_RUN_OPTION + file_path: MLC_DOCKERFILE_WITH_PATH + gh_token: MLC_GH_TOKEN + image_repo: MLC_DOCKER_IMAGE_REPO + image_tag: MLC_DOCKER_IMAGE_TAG + package_manager_update_cmd: MLC_PACKAGE_MANAGER_UPDATE_CMD + pip_extra_flags: MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS post_file: DOCKER_IMAGE_POST_FILE - post_run_cmds: CM_DOCKER_POST_RUN_COMMANDS - pre_run_cmds: CM_DOCKER_PRE_RUN_COMMANDS - real_run: CM_REAL_RUN - run_cmd: CM_DOCKER_RUN_CMD - run_cmd_extra: CM_DOCKER_RUN_CMD_EXTRA - script_tags: CM_DOCKER_RUN_SCRIPT_TAGS - skip_cm_sys_upgrade: CM_DOCKER_SKIP_CM_SYS_UPGRADE - push_image: CM_DOCKER_PUSH_IMAGE - docker_not_pull_update: CM_DOCKER_NOT_PULL_UPDATE + post_run_cmds: MLC_DOCKER_POST_RUN_COMMANDS + pre_run_cmds: MLC_DOCKER_PRE_RUN_COMMANDS + real_run: MLC_REAL_RUN + run_cmd: MLC_DOCKER_RUN_CMD + run_cmd_extra: MLC_DOCKER_RUN_CMD_EXTRA + script_tags: MLC_DOCKER_RUN_SCRIPT_TAGS + skip_cm_sys_upgrade: MLC_DOCKER_SKIP_MLC_SYS_UPGRADE + push_image: MLC_DOCKER_PUSH_IMAGE + docker_not_pull_update: MLC_DOCKER_NOT_PULL_UPDATE new_env_keys: -- CM_DOCKERFILE_* +- MLC_DOCKERFILE_* deps: - tags: get,docker @@ -64,7 +64,7 @@ deps: post_deps: - enable_if_env: - CM_BUILD_DOCKER_IMAGE: + MLC_BUILD_DOCKER_IMAGE: - 'yes' names: - build-docker-image @@ -73,4 +73,4 @@ post_deps: variations: slim: env: - CM_DOCKER_BUILD_SLIM: 'yes' + MLC_DOCKER_BUILD_SLIM: 'yes' diff --git a/script/build-mlperf-inference-server-nvidia/customize.py b/script/build-mlperf-inference-server-nvidia/customize.py index f150d930b..5fa70aa45 100644 --- a/script/build-mlperf-inference-server-nvidia/customize.py +++ b/script/build-mlperf-inference-server-nvidia/customize.py @@ -14,25 +14,25 @@ def preprocess(i): if '+LIBRARY_PATH' not in env: env['+LIBRARY_PATH'] = [] - if 'CM_TENSORRT_INSTALL_PATH' in env: + if 'MLC_TENSORRT_INSTALL_PATH' in env: env['+LIBRARY_PATH'].append(os.path.join( - env['CM_TENSORRT_INSTALL_PATH'], "lib")) + env['MLC_TENSORRT_INSTALL_PATH'], "lib")) cxxflags = [ "-Wno-error=switch", "-DDALI_1_15=1", "-Wno-error=maybe-uninitialized"] - if env.get('CM_GCC_VERSION', '') != '': - gcc_major_version = env['CM_GCC_VERSION'].split(".")[0] + if env.get('MLC_GCC_VERSION', '') != '': + gcc_major_version = env['MLC_GCC_VERSION'].split(".")[0] if int(gcc_major_version) > 10: - if env.get('CM_MLPERF_INFERENCE_VERSION', '') != "4.1": + if env.get('MLC_MLPERF_INFERENCE_VERSION', '') != "4.1": cxxflags.append("-Wno-error=range-loop-construct") - if env.get('CM_MLPERF_DEVICE', '') == "inferentia": + if env.get('MLC_MLPERF_DEVICE', '') == "inferentia": env['USE_INFERENTIA'] = "1" env['USE_NIGHTLY'] = "0" - env['CM_MAKE_BUILD_COMMAND'] = "build" + env['MLC_MAKE_BUILD_COMMAND'] = "build" if '+ CXXFLAGS' not in env: env['+ CXXFLAGS'] = [] diff --git a/script/build-mlperf-inference-server-nvidia/meta.yaml b/script/build-mlperf-inference-server-nvidia/meta.yaml index c5003f67c..d7f005bb4 100644 --- a/script/build-mlperf-inference-server-nvidia/meta.yaml +++ b/script/build-mlperf-inference-server-nvidia/meta.yaml @@ -22,16 +22,16 @@ tags: new_env_keys: - - CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH + - MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH default_env: - CM_MAKE_BUILD_COMMAND: build - CM_MAKE_CLEAN: "no" - CM_CUSTOM_SYSTEM_NVIDIA: "yes" + MLC_MAKE_BUILD_COMMAND: build + MLC_MAKE_CLEAN: "no" + MLC_CUSTOM_SYSTEM_NVIDIA: "yes" input_mapping: - custom_system: CM_CUSTOM_SYSTEM_NVIDIA - clean: CM_MAKE_CLEAN + custom_system: MLC_CUSTOM_SYSTEM_NVIDIA + clean: MLC_MAKE_CLEAN # Dependencies on other CM scripts @@ -57,7 +57,7 @@ deps: names: - cuda enable_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cuda - inferentia @@ -66,11 +66,11 @@ deps: names: - tensorrt enable_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cuda - inferentia skip_if_env: - CM_TENSORRT_SYSTEM_DETECT: + MLC_TENSORRT_SYSTEM_DETECT: - yes # Detect gcc @@ -113,7 +113,7 @@ deps: - tags: get,generic-python-lib,_pycuda version: "2022.2.2" skip_if_env: - CM_RUN_STATE_DOCKER: + MLC_RUN_STATE_DOCKER: - 'yes' - True - 'True' @@ -137,7 +137,7 @@ post_deps: - custom-system-nvidia - nvidia-inference-common-code skip_if_env: - CM_CUSTOM_SYSTEM_NVIDIA: + MLC_CUSTOM_SYSTEM_NVIDIA: - "no" - False - "False" @@ -147,17 +147,17 @@ variations: cpu: group: device env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu inferentia: group: device env: - CM_MLPERF_DEVICE: inferentia + MLC_MLPERF_DEVICE: inferentia cuda: group: device default: true env: - CM_MLPERF_DEVICE: cuda - CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + MLC_MLPERF_DEVICE: cuda + MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart ctuning: group: code @@ -200,15 +200,15 @@ variations: - pytorch - torch skip_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 - tags: get,generic-python-lib,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torch-2.1.0a0+git32f93b1-cp38-cp38-linux_x86_64.whl enable_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 - tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v4.0 @@ -216,15 +216,15 @@ variations: - pytorchvision - torchvision skip_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 - tags: get,generic-python-lib,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torchvision-0.16.0a0+657027f-cp38-cp38-linux_x86_64.whl enable_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 versions: @@ -274,30 +274,30 @@ versions: - pytorch - torch skip_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 - tags: get,generic-python-lib,_package.torch,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torch-2.1.0a0+git32f93b1-cp38-cp38-linux_x86_64.whl enable_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 - tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v4.0 names: - pytorchvision - torchvision skip_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 - tags: get,generic-python-lib,_package.torchvision,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torchvision-0.16.0a0+657027f-cp38-cp38-linux_x86_64.whl enable_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 r4.1-dev: @@ -316,30 +316,30 @@ versions: - pytorch - torch skip_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 - tags: get,generic-python-lib,_package.torch,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torch-2.1.0a0+git32f93b1-cp38-cp38-linux_x86_64.whl enable_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 - tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v4.0 names: - pytorchvision - torchvision skip_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 - tags: get,generic-python-lib,_package.torchvision,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torchvision-0.16.0a0+657027f-cp38-cp38-linux_x86_64.whl enable_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - 8 r4.1: @@ -368,8 +368,8 @@ docker: criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH results_dir: RESULTS_DIR submission_dir: SUBMISSION_DIR - cudnn_tar_file_path: CM_CUDNN_TAR_FILE_PATH - tensorrt_tar_file_path: CM_TENSORRT_TAR_FILE_PATH + cudnn_tar_file_path: MLC_CUDNN_TAR_FILE_PATH + tensorrt_tar_file_path: MLC_TENSORRT_TAR_FILE_PATH cuda_run_file_path: CUDA_RUN_FILE_LOCAL_PATH dlrm_data_path: DLRM_DATA_PATH scratch_path: MLPERF_SCRATCH_PATH @@ -379,7 +379,7 @@ docker: - tags: get,mlperf,inference,submission,dir,local - tags: get,nvidia-docker skip_if_env: - CM_SKIP_GET_NVIDIA_DOCKER: + MLC_SKIP_GET_NVIDIA_DOCKER: - yes pre_run_cmds: @@ -387,12 +387,12 @@ docker: run_cmd_prefix: sudo apt remove -y cmake mounts: - "${{ IMAGENET_PATH }}:/data/imagenet-val" - - "${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}" - - "${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}" + - "${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}:${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}" + - "${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}" - "${{ RESULTS_DIR }}:/home/cmuser/results_dir" - "${{ SUBMISSION_DIR }}:/home/cmuser/submission_dir" - - "${{ CM_CUDNN_TAR_FILE_PATH }}:${{ CM_CUDNN_TAR_FILE_PATH }}" - - "${{ CM_TENSORRT_TAR_FILE_PATH }}:${{ CM_TENSORRT_TAR_FILE_PATH }}" + - "${{ MLC_CUDNN_TAR_FILE_PATH }}:${{ MLC_CUDNN_TAR_FILE_PATH }}" + - "${{ MLC_TENSORRT_TAR_FILE_PATH }}:${{ MLC_TENSORRT_TAR_FILE_PATH }}" - "${{ CUDA_RUN_FILE_LOCAL_PATH }}:${{ CUDA_RUN_FILE_LOCAL_PATH }}" - "${{ MLPERF_SCRATCH_PATH }}:${{ MLPERF_SCRATCH_PATH }}" - "${{ DLRM_DATA_PATH }}:/home/mlperf_inf_dlrmv2" diff --git a/script/build-mlperf-inference-server-nvidia/run.sh b/script/build-mlperf-inference-server-nvidia/run.sh index e03aaa72b..ac990aa62 100644 --- a/script/build-mlperf-inference-server-nvidia/run.sh +++ b/script/build-mlperf-inference-server-nvidia/run.sh @@ -1,16 +1,16 @@ #!/bin/bash CUR=$PWD -cd ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH} +cd ${MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH} -if [[ ${CM_MAKE_CLEAN} == "yes" ]]; then +if [[ ${MLC_MAKE_CLEAN} == "yes" ]]; then make clean fi -if [[ ${CM_MLPERF_DEVICE} == "inferentia" ]]; then +if [[ ${MLC_MLPERF_DEVICE} == "inferentia" ]]; then make prebuild fi -SKIP_DRIVER_CHECK=1 make ${CM_MAKE_BUILD_COMMAND} +SKIP_DRIVER_CHECK=1 make ${MLC_MAKE_BUILD_COMMAND} test $? -eq 0 || exit $? diff --git a/script/calibrate-model-for.qaic/customize.py b/script/calibrate-model-for.qaic/customize.py index 32ff19353..6e09b1a2f 100644 --- a/script/calibrate-model-for.qaic/customize.py +++ b/script/calibrate-model-for.qaic/customize.py @@ -14,9 +14,9 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if env.get('CM_CREATE_INPUT_BATCH', '') == 'yes': + if env.get('MLC_CREATE_INPUT_BATCH', '') == 'yes': r = create_batched_inputs(env) if r['return'] > 0: return r @@ -28,14 +28,14 @@ def preprocess(i): print("Profiling from " + os.getcwd()) - env['CM_RUN_CMD'] = cmd + env['MLC_RUN_CMD'] = cmd return {'return': 0} def create_batched_inputs(env): - original_images_file = env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] - batchsize = env['CM_QAIC_MODEL_BATCH_SIZE'] + original_images_file = env['MLC_DATASET_PREPROCESSED_IMAGES_LIST'] + batchsize = env['MLC_QAIC_MODEL_BATCH_SIZE'] file_paths = [] with open(original_images_file) as f: @@ -71,13 +71,13 @@ def create_batched_inputs(env): def construct_calibration_cmd(env): - compiler_params = env['CM_QAIC_COMPILER_PARAMS'] - batchsize = env.get('CM_QAIC_MODEL_BATCH_SIZE', "1") - cmd = env['CM_QAIC_EXEC_PATH'] + " " - if env.get('CM_CREATE_INPUT_BATCH', '') == 'yes': + compiler_params = env['MLC_QAIC_COMPILER_PARAMS'] + batchsize = env.get('MLC_QAIC_MODEL_BATCH_SIZE', "1") + cmd = env['MLC_QAIC_EXEC_PATH'] + " " + if env.get('MLC_CREATE_INPUT_BATCH', '') == 'yes': cmd += " -input-list-file=batched_input_files -batchsize=" + batchsize + " " cmd += compiler_params + " -dump-profile=profile.yaml -model=" + \ - env['CM_ML_MODEL_FILE_WITH_PATH'] + env['MLC_ML_MODEL_FILE_WITH_PATH'] return {'return': 0, 'cmd': cmd} @@ -86,10 +86,10 @@ def postprocess(i): env = i['env'] profile_file_path = os.path.join(os.getcwd(), "profile.yaml") - env['CM_QAIC_MODEL_PROFILE_WITH_PATH'] = profile_file_path + env['MLC_QAIC_MODEL_PROFILE_WITH_PATH'] = profile_file_path - if env.get('CM_ML_MODEL_INPUT_LAYER_NAME', '') != '': - input_layer_names = [env.get('CM_ML_MODEL_INPUT_LAYER_NAME')] + if env.get('MLC_ML_MODEL_INPUT_LAYER_NAME', '') != '': + input_layer_names = [env.get('MLC_ML_MODEL_INPUT_LAYER_NAME')] else: input_layer_names = ["images:0", "images/:0"] @@ -128,7 +128,7 @@ def postprocess(i): "TopK_578/:0" ] - if env.get('CM_QAIC_MODEL_NAME', '') == "retinanet": + if env.get('MLC_QAIC_MODEL_NAME', '') == "retinanet": with open(profile_file_path, "r") as stream: try: output_min_val_loc = sys.maxsize @@ -158,8 +158,8 @@ def postprocess(i): max_val = k['Max'] scale, offset = get_scale_offset( min_val, max_val) - env['CM_QAIC_MODEL_RETINANET_IMAGE_SCALE'] = scale - env['CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET'] = offset + env['MLC_QAIC_MODEL_RETINANET_IMAGE_SCALE'] = scale + env['MLC_QAIC_MODEL_RETINANET_IMAGE_OFFSET'] = offset if k["NodeOutputName"] in output_layer_names_loc[oindex]: min_val = k['Min'] @@ -172,9 +172,9 @@ def postprocess(i): min_val, max_val) index = output_layer_names_loc[oindex].index( k["NodeOutputName"]) - env[f'CM_QAIC_MODEL_RETINANET_LOC_SCALE{index}'] = loc_scale + env[f'MLC_QAIC_MODEL_RETINANET_LOC_SCALE{index}'] = loc_scale # to uint8 is done in NMS code - env[f'CM_QAIC_MODEL_RETINANET_LOC_OFFSET{index}'] = loc_offset - 128 + env[f'MLC_QAIC_MODEL_RETINANET_LOC_OFFSET{index}'] = loc_offset - 128 total_range = max_val - min_val scale = total_range / 256.0 @@ -191,9 +191,9 @@ def postprocess(i): min_val, max_val) index = output_layer_names_conf[oindex].index( k["NodeOutputName"]) - env[f'CM_QAIC_MODEL_RETINANET_CONF_SCALE{index}'] = conf_scale + env[f'MLC_QAIC_MODEL_RETINANET_CONF_SCALE{index}'] = conf_scale # to uint8 is done in NMS code - env[f'CM_QAIC_MODEL_RETINANET_CONF_OFFSET{index}'] = conf_offset - 128 + env[f'MLC_QAIC_MODEL_RETINANET_CONF_OFFSET{index}'] = conf_offset - 128 total_range = max_val - min_val scale = total_range / 256.0 offset = round(-min_val / scale) @@ -202,10 +202,10 @@ def postprocess(i): output_min_val_loc, output_max_val_loc) conf_scale, conf_offset = get_scale_offset( output_min_val_conf, output_max_val_conf) - env['CM_QAIC_MODEL_RETINANET_LOC_SCALE'] = loc_scale - env['CM_QAIC_MODEL_RETINANET_LOC_OFFSET'] = loc_offset - 128 # to uint8 is done in NMS code - env['CM_QAIC_MODEL_RETINANET_CONF_SCALE'] = conf_scale - env['CM_QAIC_MODEL_RETINANET_CONF_OFFSET'] = conf_offset - 128 # to uint8 is done in NMS code + env['MLC_QAIC_MODEL_RETINANET_LOC_SCALE'] = loc_scale + env['MLC_QAIC_MODEL_RETINANET_LOC_OFFSET'] = loc_offset - 128 # to uint8 is done in NMS code + env['MLC_QAIC_MODEL_RETINANET_CONF_SCALE'] = conf_scale + env['MLC_QAIC_MODEL_RETINANET_CONF_OFFSET'] = conf_offset - 128 # to uint8 is done in NMS code except yaml.YAMLError as exc: return {'return': 1, 'error': exc} diff --git a/script/calibrate-model-for.qaic/meta.yaml b/script/calibrate-model-for.qaic/meta.yaml index 958d9f846..e86389a9f 100644 --- a/script/calibrate-model-for.qaic/meta.yaml +++ b/script/calibrate-model-for.qaic/meta.yaml @@ -9,21 +9,21 @@ deps: - qaic-apps-sdk tags: get,qaic,apps,sdk - enable_if_env: - CM_CALIBRATE_OPENIMAGES: + MLC_CALIBRATE_OPENIMAGES: - 'yes' names: - openimages-cal - preprocessed-dataset tags: get,preprocessed,dataset,_calibration,openimages,_for.retinanet.onnx,_NCHW,_fp32,_custom-annotations - enable_if_env: - CM_CALIBRATE_IMAGENET: + MLC_CALIBRATE_IMAGENET: - 'yes' names: - imagenet-cal - preprocessed-calibration-dataset tags: get,dataset,imagenet,preprocessed,_calibration,_for.resnet50,_float32,_rgb32 - enable_if_env: - CM_CALIBRATE_SQUAD: + MLC_CALIBRATE_SQUAD: - 'on' names: - squad-cal @@ -33,7 +33,7 @@ deps: - model-src tags: get,ml-model new_env_keys: -- CM_QAIC_MODEL_PROFILE_* +- MLC_QAIC_MODEL_PROFILE_* tags: - qaic - calibrate @@ -46,12 +46,12 @@ variations: base: - bert_ env: - CM_CALIBRATE_SQUAD: 'yes' - CM_QAIC_COMPILER_ARGS: '' - CM_QAIC_COMPILER_PARAMS: -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,<<>> - -input-list-file=<<>> -num-histogram-bins=512 - -profiling-threads=<<>> - CM_QAIC_MODEL_TO_CONVERT: calibrate_bert_mlperf + MLC_CALIBRATE_SQUAD: 'yes' + MLC_QAIC_COMPILER_ARGS: '' + MLC_QAIC_COMPILER_PARAMS: -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,<<>> + -input-list-file=<<>> -num-histogram-bins=512 + -profiling-threads=<<>> + MLC_QAIC_MODEL_TO_CONVERT: calibrate_bert_mlperf group: model bert_: adr: @@ -60,17 +60,17 @@ variations: default-variations: seq-length: seq.384 env: - CM_CREATE_INPUT_BATCH: 'no' - CM_QAIC_MODEL_NAME: bert-large + MLC_CREATE_INPUT_BATCH: 'no' + MLC_QAIC_MODEL_NAME: bert-large bs.#: env: - CM_CREATE_INPUT_BATCH: 'yes' - CM_QAIC_MODEL_BATCH_SIZE: '#' + MLC_CREATE_INPUT_BATCH: 'yes' + MLC_QAIC_MODEL_BATCH_SIZE: '#' group: batch-size bs.1: env: - CM_CREATE_INPUT_BATCH: 'yes' - CM_QAIC_MODEL_BATCH_SIZE: '1' + MLC_CREATE_INPUT_BATCH: 'yes' + MLC_QAIC_MODEL_BATCH_SIZE: '1' group: batch-size filter-size.#: ad: @@ -99,12 +99,12 @@ variations: calibration-option: mlperf.option1 model-framework: tf env: - CM_CALIBRATE_IMAGENET: 'yes' - CM_QAIC_COMPILER_ARGS: '' - CM_QAIC_COMPILER_PARAMS: -output-node-name=ArgMax -profiling-threads=<<>> - CM_QAIC_MODEL_NAME: resnet50 - CM_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf - CM_QAIC_OUTPUT_NODE_NAME: -output-node-name=ArgMax + MLC_CALIBRATE_IMAGENET: 'yes' + MLC_QAIC_COMPILER_ARGS: '' + MLC_QAIC_COMPILER_PARAMS: -output-node-name=ArgMax -profiling-threads=<<>> + MLC_QAIC_MODEL_NAME: resnet50 + MLC_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf + MLC_QAIC_OUTPUT_NODE_NAME: -output-node-name=ArgMax group: model resnet50,tf: adr: @@ -113,34 +113,34 @@ variations: preprocessed-dataset: tags: _NHWC env: - CM_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf + MLC_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf retinanet: adr: model-src: tags: retinanet,_no-nms,_onnx env: - CM_CALIBRATE_OPENIMAGES: 'yes' - CM_QAIC_COMPILER_ARGS: '' - CM_QAIC_COMPILER_PARAMS: -enable-channelwise -profiling-threads=<<>> - -onnx-define-symbol=batch_size,<<>> -node-precision-info=<<>> - CM_QAIC_MODEL_NAME: retinanet - CM_QAIC_MODEL_TO_CONVERT: calibrate_retinanet_no_nms_mlperf + MLC_CALIBRATE_OPENIMAGES: 'yes' + MLC_QAIC_COMPILER_ARGS: '' + MLC_QAIC_COMPILER_PARAMS: -enable-channelwise -profiling-threads=<<>> + -onnx-define-symbol=batch_size,<<>> -node-precision-info=<<>> + MLC_QAIC_MODEL_NAME: retinanet + MLC_QAIC_MODEL_TO_CONVERT: calibrate_retinanet_no_nms_mlperf group: model new_env_keys: - - CM_QAIC_MODEL_RETINANET_* + - MLC_QAIC_MODEL_RETINANET_* seq.#: ad: squad-preprocessed: tags: _seq.# env: - CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: '#' + MLC_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: '#' group: seq-length seq.384: ad: squad-preprocessed: tags: _seq.384 env: - CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: '#' + MLC_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: '#' group: seq-length tf: group: model-framework diff --git a/script/calibrate-model-for.qaic/run.sh b/script/calibrate-model-for.qaic/run.sh index 59b1aed3d..7da7962b9 100644 --- a/script/calibrate-model-for.qaic/run.sh +++ b/script/calibrate-model-for.qaic/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,12 +17,12 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -run "$CM_RUN_CMD" +run "$MLC_RUN_CMD" diff --git a/script/clean-nvidia-mlperf-inference-scratch-space/customize.py b/script/clean-nvidia-mlperf-inference-scratch-space/customize.py index 149fe34e7..2e3d4bc64 100644 --- a/script/clean-nvidia-mlperf-inference-scratch-space/customize.py +++ b/script/clean-nvidia-mlperf-inference-scratch-space/customize.py @@ -12,21 +12,21 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') clean_cmd = '' cache_rm_tags = '' - extra_cache_rm_tags = env.get('CM_CLEAN_EXTRA_CACHE_RM_TAGS', '') + extra_cache_rm_tags = env.get('MLC_CLEAN_EXTRA_CACHE_RM_TAGS', '') - if env.get('CM_MODEL', '') == 'sdxl': - if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'downloaded_data': - clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "data", "coco", "SDXL")} """ + if env.get('MLC_MODEL', '') == 'sdxl': + if env.get('MLC_CLEAN_ARTIFACT_NAME', '') == 'downloaded_data': + clean_cmd = f"""rm -rf {os.path.join(env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'], "data", "coco", "SDXL")} """ cache_rm_tags = "nvidia-harness,_preprocess_data,_sdxl" - if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'preprocessed_data': - clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "preprocessed_data", "coco2014-tokenized-sdxl")} """ + if env.get('MLC_CLEAN_ARTIFACT_NAME', '') == 'preprocessed_data': + clean_cmd = f"""rm -rf {os.path.join(env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'], "preprocessed_data", "coco2014-tokenized-sdxl")} """ cache_rm_tags = "nvidia-harness,_preprocess_data,_sdxl" - if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'downloaded_model': - clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "models", "SDXL")} """ + if env.get('MLC_CLEAN_ARTIFACT_NAME', '') == 'downloaded_model': + clean_cmd = f"""rm -rf {os.path.join(env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'], "models", "SDXL")} """ cache_rm_tags = "nvidia-harness,_download_model,_sdxl" cache_rm_tags = cache_rm_tags + extra_cache_rm_tags @@ -39,10 +39,10 @@ def preprocess(i): return r if r['return'] == 0: # cache entry found if clean_cmd != '': - env['CM_RUN_CMD'] = clean_cmd + env['MLC_RUN_CMD'] = clean_cmd else: if clean_cmd != '': - env['CM_RUN_CMD'] = clean_cmd + env['MLC_RUN_CMD'] = clean_cmd return {'return': 0} diff --git a/script/clean-nvidia-mlperf-inference-scratch-space/meta.yaml b/script/clean-nvidia-mlperf-inference-scratch-space/meta.yaml index 079fe309d..1909c3a07 100644 --- a/script/clean-nvidia-mlperf-inference-scratch-space/meta.yaml +++ b/script/clean-nvidia-mlperf-inference-scratch-space/meta.yaml @@ -11,7 +11,7 @@ tags: - inference uid: bb41f6e3608e4e8a input_mapping: - extra_cache_rm_tags: CM_CLEAN_EXTRA_CACHE_RM_TAGS + extra_cache_rm_tags: MLC_CLEAN_EXTRA_CACHE_RM_TAGS deps: # Get Nvidia scratch space where data and models get downloaded - tags: get,mlperf,inference,nvidia,scratch,space @@ -22,24 +22,24 @@ variations: sdxl: group: model env: - CM_MODEL: sdxl + MLC_MODEL: sdxl downloaded-data: group: artifact env: - CM_CLEAN_ARTIFACT_NAME: downloaded_data + MLC_CLEAN_ARTIFACT_NAME: downloaded_data preprocessed-data: group: artifact env: - CM_CLEAN_ARTIFACT_NAME: preprocessed_data + MLC_CLEAN_ARTIFACT_NAME: preprocessed_data downloaded-model: group: artifact env: - CM_CLEAN_ARTIFACT_NAME: downloaded_model + MLC_CLEAN_ARTIFACT_NAME: downloaded_model v4.1: group: version env: - CM_NVIDIA_MLPERF_INFERENCE_CODE_VERSION: v4.1 + MLC_NVIDIA_MLPERF_INFERENCE_CODE_VERSION: v4.1 v4.0: group: version env: - CM_NVIDIA_MLPERF_INFERENCE_CODE_VERSION: v4.0 + MLC_NVIDIA_MLPERF_INFERENCE_CODE_VERSION: v4.0 diff --git a/script/clean-nvidia-mlperf-inference-scratch-space/run.sh b/script/clean-nvidia-mlperf-inference-scratch-space/run.sh index 4c23c380e..32cf4d51e 100644 --- a/script/clean-nvidia-mlperf-inference-scratch-space/run.sh +++ b/script/clean-nvidia-mlperf-inference-scratch-space/run.sh @@ -1,17 +1,17 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency echo "Running: " -echo "${CM_RUN_CMD}" +echo "${MLC_RUN_CMD}" echo "" -if [[ ${CM_FAKE_RUN} != "yes" ]]; then - eval "${CM_RUN_CMD}" +if [[ ${MLC_FAKE_RUN} != "yes" ]]; then + eval "${MLC_RUN_CMD}" test $? -eq 0 || exit 1 fi diff --git a/script/compile-model-for.qaic/customize.py b/script/compile-model-for.qaic/customize.py index 8f9f5717c..c055cfb07 100644 --- a/script/compile-model-for.qaic/customize.py +++ b/script/compile-model-for.qaic/customize.py @@ -12,9 +12,9 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if env.get('CM_REGISTER_CACHE', '') == '': + if env.get('MLC_REGISTER_CACHE', '') == '': r = construct_compilation_cmd(env) if r['return'] > 0: @@ -23,18 +23,18 @@ def preprocess(i): print("Compiling from " + os.getcwd()) - env['CM_QAIC_MODEL_FINAL_COMPILATION_CMD'] = cmd + env['MLC_QAIC_MODEL_FINAL_COMPILATION_CMD'] = cmd - env['CM_RUN_CMD'] = cmd + env['MLC_RUN_CMD'] = cmd else: import shutil print( "Creating cache entry from " + - env['CM_REGISTER_CACHE'] + + env['MLC_REGISTER_CACHE'] + " to " + os.getcwd()) r = shutil.copytree( - env['CM_REGISTER_CACHE'], + env['MLC_REGISTER_CACHE'], os.path.join( os.getcwd(), "elfs")) @@ -44,14 +44,14 @@ def preprocess(i): def construct_compilation_cmd(env): - compiler_params_base = env['CM_QAIC_MODEL_COMPILER_PARAMS_BASE'] - compiler_args = env['CM_QAIC_MODEL_COMPILER_ARGS'] + \ - ' ' + env.get('CM_QAIC_MODEL_COMPILER_ARGS_SUT', '') - batchsize = env.get('CM_QAIC_MODEL_BATCH_SIZE') + compiler_params_base = env['MLC_QAIC_MODEL_COMPILER_PARAMS_BASE'] + compiler_args = env['MLC_QAIC_MODEL_COMPILER_ARGS'] + \ + ' ' + env.get('MLC_QAIC_MODEL_COMPILER_ARGS_SUT', '') + batchsize = env.get('MLC_QAIC_MODEL_BATCH_SIZE') - if env.get('CM_QAIC_MODEL_QUANTIZATION', '') == 'yes': + if env.get('MLC_QAIC_MODEL_QUANTIZATION', '') == 'yes': profile_string = " -load-profile=" + \ - env['CM_QAIC_MODEL_PROFILE_WITH_PATH'] + env['MLC_QAIC_MODEL_PROFILE_WITH_PATH'] else: profile_string = '' @@ -61,14 +61,14 @@ def construct_compilation_cmd(env): compiler_params += " -batchsize=" + batchsize percentile_calibration_params = env.get( - 'CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS') + 'MLC_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS') if percentile_calibration_params: compiler_params += " " + percentile_calibration_params aic_binary_dir = os.path.join(os.getcwd(), "elfs") - cmd = env['CM_QAIC_EXEC_PATH'] + \ - " -model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ + cmd = env['MLC_QAIC_EXEC_PATH'] + \ + " -model=" + env['MLC_ML_MODEL_FILE_WITH_PATH'] + \ profile_string + ' -aic-binary-dir=' + aic_binary_dir + ' ' \ + compiler_params @@ -78,12 +78,12 @@ def construct_compilation_cmd(env): def postprocess(i): env = i['env'] - env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] = os.path.join( + env['MLC_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] = os.path.join( os.getcwd(), "elfs", "programqpc.bin") if not os.path.isdir(os.path.join(os.getcwd(), "elfs")): return { 'return': 1, 'error': 'elfs directory not found inside the compiled directory'} - env['CM_ML_MODEL_FILE_WITH_PATH'] = env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] return {'return': 0} diff --git a/script/compile-model-for.qaic/meta.yaml b/script/compile-model-for.qaic/meta.yaml index 634bb948b..b2b667c94 100644 --- a/script/compile-model-for.qaic/meta.yaml +++ b/script/compile-model-for.qaic/meta.yaml @@ -8,34 +8,34 @@ deps: - names: - qaic-apps-sdk skip_if_env: - CM_REGISTER_CACHE: + MLC_REGISTER_CACHE: - 'on' tags: get,qaic,apps,sdk - enable_if_env: - CM_COMPILE_RETINANET: + MLC_COMPILE_RETINANET: - 'yes' names: - retinanet-profile - qaic-profile tags: qaic,calibrate,_retinanet - enable_if_env: - CM_COMPILE_RESNET: + MLC_COMPILE_RESNET: - 'on' names: - resnet-profile - qaic-profile skip_if_env: - CM_REGISTER_CACHE: + MLC_REGISTER_CACHE: - 'on' tags: qaic,calibrate,_resnet50 - names: - model-src tags: get,ml-model input_mapping: - register: CM_REGISTER_CACHE + register: MLC_REGISTER_CACHE new_env_keys: -- CM_QAIC_MODEL* -- CM_ML_MODEL_FILE_WITH_PATH +- MLC_QAIC_MODEL* +- MLC_ML_MODEL_FILE_WITH_PATH tags: - qaic - compile @@ -54,39 +54,39 @@ variations: - qaic-profile tags: calibrate,qaic,_bert-99 env: - CM_COMPILE_BERT: 'on' - CM_QAIC_MODEL_COMPILER_ARGS: '' - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -execute-nodes-in-fp16=Add,Div,Erf,Softmax + MLC_COMPILE_BERT: 'on' + MLC_QAIC_MODEL_COMPILER_ARGS: '' + MLC_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -execute-nodes-in-fp16=Add,Div,Erf,Softmax -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -quantization-precision-bias=Int32 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -multicast-weights -combine-inputs=false -combine-outputs=false - CM_QAIC_MODEL_TO_CONVERT: calibrate_bert_mlperf + MLC_QAIC_MODEL_TO_CONVERT: calibrate_bert_mlperf bert-99,offline: env: - CM_QAIC_MODEL_COMPILER_ARGS: -allocator-dealloc-delay=2 -size-split-granularity=1536 + MLC_QAIC_MODEL_COMPILER_ARGS: -allocator-dealloc-delay=2 -size-split-granularity=1536 -vtcm-working-set-limit-ratio=1 - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=2 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=2 bert-99,offline,nsp.14: env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3 bert-99,offline,nsp.16: env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=2 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=2 bert-99,server: env: - CM_QAIC_MODEL_COMPILER_ARGS: -allocator-dealloc-delay=2 -size-split-granularity=1536 + MLC_QAIC_MODEL_COMPILER_ARGS: -allocator-dealloc-delay=2 -size-split-granularity=1536 -vtcm-working-set-limit-ratio=1 - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3 bert-99,server,nsp.14: env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3 bert-99,singlestream: env: - CM_QAIC_MODEL_COMPILER_ARGS: '' - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=8 -ols=1 + MLC_QAIC_MODEL_COMPILER_ARGS: '' + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=8 -ols=1 bert-99,singlestream,nsp.14: env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=8 -ols=1 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=8 -ols=1 bert-99.9: adr: model-src: @@ -94,40 +94,40 @@ variations: base: - no-quantized env: - CM_COMPILE_BERT: 'on' - CM_QAIC_MODEL_COMPILER_ARGS: '' - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -convert-to-fp16 + MLC_COMPILE_BERT: 'on' + MLC_QAIC_MODEL_COMPILER_ARGS: '' + MLC_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -convert-to-fp16 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -combine-inputs=false -combine-outputs=false - CM_QAIC_MODEL_TO_CONVERT: bert_mlperf + MLC_QAIC_MODEL_TO_CONVERT: bert_mlperf bert-99.9,offline: env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2 bert-99.9,offline,nsp.14: env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2 bert-99.9,offline,nsp.16: env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2 bert-99.9,server: env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 bert-99.9,server,nsp.14: env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 bs.#: adr: qaic-profile: tags: _bs.# env: - CM_QAIC_MODEL_BATCH_SIZE: '#' + MLC_QAIC_MODEL_BATCH_SIZE: '#' group: batch-size bs.1: adr: qaic-profile: tags: _bs.1 env: - CM_QAIC_MODEL_BATCH_SIZE: '1' + MLC_QAIC_MODEL_BATCH_SIZE: '1' group: batch-size filter-size.#: ad: @@ -138,7 +138,7 @@ variations: group: mlperf-scenario no-quantized: env: - CM_QAIC_MODEL_QUANTIZATION: 'no' + MLC_QAIC_MODEL_QUANTIZATION: 'no' group: quantization nsp.14: group: nsp @@ -152,13 +152,13 @@ variations: group: mlperf-scenario pc.#: env: - CM_QAIC_MODEL_COMPILER_PERCENTILE_CALIBRATION_VALUE: '#' - CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS: -quantization-calibration=Percentile -percentile-calibration-value=<<>> + MLC_QAIC_MODEL_COMPILER_PERCENTILE_CALIBRATION_VALUE: '#' + MLC_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS: -quantization-calibration=Percentile -percentile-calibration-value=<<>> group: percentile-calibration quantized: default: true env: - CM_QAIC_MODEL_QUANTIZATION: 'yes' + MLC_QAIC_MODEL_QUANTIZATION: 'yes' group: quantization resnet50: adr: @@ -167,87 +167,87 @@ variations: default_variations: model-framework: tf env: - CM_COMPILE_RESNET: 'on' - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -quantization-schema=symmetric_with_uint8 + MLC_COMPILE_RESNET: 'on' + MLC_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -output-node-name=ArgMax -vvv -compile-only -use-producer-dma=1 - CM_QAIC_MODEL_TO_CONVERT: compile_resnet50_tf + MLC_QAIC_MODEL_TO_CONVERT: compile_resnet50_tf resnet50,multistream: env: - CM_QAIC_MODEL_COMPILER_ARGS: '' - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1 -ols=1 + MLC_QAIC_MODEL_COMPILER_ARGS: '' + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1 -ols=1 resnet50,multistream,nsp.14: default_variations: batch-size: bs.1 env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 resnet50,offline: env: - CM_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=2,2 -multicast-weights - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1,2 -ols=4 + MLC_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=2,2 -multicast-weights + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1,2 -ols=4 resnet50,offline,nsp.14: default_variations: batch-size: bs.8 env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1,2 -ols=4 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1,2 -ols=4 resnet50,server: env: {} resnet50,server,nsp.14: default_variations: batch-size: bs.8 env: - CM_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=2,2 -mos=1,2 -multicast-weights - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -ols=4 + MLC_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=2,2 -mos=1,2 -multicast-weights + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -ols=4 resnet50,server,nsp.16: default_variations: batch-size: bs.8 env: - CM_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=4,4 -mos=1,4 - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -ols=4 + MLC_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=4,4 -mos=1,4 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -ols=4 resnet50,singlestream: env: - CM_QAIC_MODEL_COMPILER_ARGS: -aic-num-of-instances=1 - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 + MLC_QAIC_MODEL_COMPILER_ARGS: -aic-num-of-instances=1 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 resnet50,singlestream,nsp.14: default_variations: batch-size: bs.1 env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 resnet50,tf: ad: model-src: tags: _fix-input-shape env: - CM_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf + MLC_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf retinanet: adr: model-src: tags: retinanet,_no-nms env: - CM_COMPILE_RETINANET: 'on' - CM_QAIC_MODEL_COMPILER_ARGS: -aic-enable-depth-first - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -compile-only - -enable-channelwise -onnx-define-symbol=batch_size,1 -node-precision-info=<<>> + MLC_COMPILE_RETINANET: 'on' + MLC_QAIC_MODEL_COMPILER_ARGS: -aic-enable-depth-first + MLC_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -compile-only + -enable-channelwise -onnx-define-symbol=batch_size,1 -node-precision-info=<<>> -quantization-schema-constants=symmetric_with_uint8 -quantization-schema-activations=asymmetric -quantization-calibration=None - CM_QAIC_MODEL_TO_CONVERT: calibrate_retinanet_no_nms_mlperf + MLC_QAIC_MODEL_TO_CONVERT: calibrate_retinanet_no_nms_mlperf new_env_keys: - - CM_QAIC_MODEL_RETINANET_* + - MLC_QAIC_MODEL_RETINANET_* retinanet,multistream: {} retinanet,nsp.14: env: {} retinanet,offline: env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=1 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=1 retinanet,offline,nsp.14: {} retinanet,server: {} retinanet,server,nsp.14: {} retinanet,singlestream: env: - CM_QAIC_MODEL_COMPILER_ARGS: '' - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 + MLC_QAIC_MODEL_COMPILER_ARGS: '' + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 retinanet,singlestream,nsp.14: env: - CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 + MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 server: group: mlperf-scenario singlestream: diff --git a/script/compile-model-for.qaic/run.sh b/script/compile-model-for.qaic/run.sh index c5c3c04cb..d20c3a705 100644 --- a/script/compile-model-for.qaic/run.sh +++ b/script/compile-model-for.qaic/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi @@ -25,4 +25,4 @@ function run() { #Add your run commands here... run "rm -rf elfs" -run "$CM_RUN_CMD" +run "$MLC_RUN_CMD" diff --git a/script/compile-program/customize.py b/script/compile-program/customize.py index 9fa65ee54..681f88ca7 100644 --- a/script/compile-program/customize.py +++ b/script/compile-program/customize.py @@ -7,56 +7,56 @@ def preprocess(i): env = i['env'] CPPFLAGS = env.get('+ CPPFLAGS', []) - env['CM_C_COMPILER_FLAGS'] = " ".join(env.get('+ CFLAGS', []) + CPPFLAGS) - env['CM_CXX_COMPILER_FLAGS'] = " ".join( + env['MLC_C_COMPILER_FLAGS'] = " ".join(env.get('+ CFLAGS', []) + CPPFLAGS) + env['MLC_CXX_COMPILER_FLAGS'] = " ".join( env.get('+ CXXFLAGS', []) + CPPFLAGS) - env['CM_F_COMPILER_FLAGS'] = " ".join(env.get('+ FFLAGS', [])) + env['MLC_F_COMPILER_FLAGS'] = " ".join(env.get('+ FFLAGS', [])) CPATH = env.get('+CPATH', []) - env['CM_C_INCLUDE_PATH'] = " -I".join([" "] + + env['MLC_C_INCLUDE_PATH'] = " -I".join([" "] + env.get('+C_INCLUDE_PATH', []) + CPATH) - env['CM_CPLUS_INCLUDE_PATH'] = " -I".join( + env['MLC_CPLUS_INCLUDE_PATH'] = " -I".join( [" "] + env.get('+CPLUS_INCLUDE_PATH', []) + CPATH) - env['CM_F_INCLUDE_PATH'] = " -I".join([" "] + + env['MLC_F_INCLUDE_PATH'] = " -I".join([" "] + env.get('+F_INCLUDE_PATH', []) + CPATH) # If windows, need to extend it more ... if os_info['platform'] == 'windows' and env.get( - 'CM_COMPILER_FAMILY', '') != 'LLVM': + 'MLC_COMPILER_FAMILY', '') != 'LLVM': print("WARNING: compile-program script should be extended to support flags for non-LLVM compilers on Windows") return {'return': 0} LDFLAGS = env.get('+ LDFLAGS', []) - env['CM_C_LINKER_FLAGS'] = " ".join(env.get('+ LDCFLAGS', []) + LDFLAGS) - env['CM_CXX_LINKER_FLAGS'] = " ".join( + env['MLC_C_LINKER_FLAGS'] = " ".join(env.get('+ LDCFLAGS', []) + LDFLAGS) + env['MLC_CXX_LINKER_FLAGS'] = " ".join( env.get('+ LDCXXFLAGS', []) + LDFLAGS) - env['CM_F_LINKER_FLAGS'] = " ".join(env.get('+ LDFFLAGS', []) + LDFLAGS) + env['MLC_F_LINKER_FLAGS'] = " ".join(env.get('+ LDFFLAGS', []) + LDFLAGS) - if env.get('CM_LINKER_LANG', 'C') == "C": - env['CM_LINKER_BIN'] = env['CM_C_COMPILER_BIN'] - env['CM_LINKER_WITH_PATH'] = env['CM_C_COMPILER_WITH_PATH'] - env['CM_LINKER_COMPILE_FLAGS'] = env['CM_C_COMPILER_FLAGS'] - env['CM_LINKER_FLAGS'] = env['CM_C_LINKER_FLAGS'] + if env.get('MLC_LINKER_LANG', 'C') == "C": + env['MLC_LINKER_BIN'] = env['MLC_C_COMPILER_BIN'] + env['MLC_LINKER_WITH_PATH'] = env['MLC_C_COMPILER_WITH_PATH'] + env['MLC_LINKER_COMPILE_FLAGS'] = env['MLC_C_COMPILER_FLAGS'] + env['MLC_LINKER_FLAGS'] = env['MLC_C_LINKER_FLAGS'] - elif env.get('CM_LINKER_LANG', 'C') == "CXX": - env['CM_LINKER_BIN'] = env['CM_CXX_COMPILER_BIN'] - env['CM_LINKER_WITH_PATH'] = env['CM_CXX_COMPILER_WITH_PATH'] - env['CM_LINKER_COMPILE_FLAGS'] = env['CM_CXX_COMPILER_FLAGS'] - env['CM_LINKER_FLAGS'] = env['CM_CXX_LINKER_FLAGS'] + elif env.get('MLC_LINKER_LANG', 'C') == "CXX": + env['MLC_LINKER_BIN'] = env['MLC_CXX_COMPILER_BIN'] + env['MLC_LINKER_WITH_PATH'] = env['MLC_CXX_COMPILER_WITH_PATH'] + env['MLC_LINKER_COMPILE_FLAGS'] = env['MLC_CXX_COMPILER_FLAGS'] + env['MLC_LINKER_FLAGS'] = env['MLC_CXX_LINKER_FLAGS'] - elif env.get('CM_LINKER_LANG', 'C') == "F": - env['CM_LINKER_BIN'] = env['CM_F_COMPILER_BIN'] - env['CM_LINKER_WITH_PATH'] = env['CM_F_COMPILER_WITH_PATH'] - env['CM_LINKER_COMPILE_FLAGS'] = env['CM_F_COMPILER_FLAGS'] - env['CM_LINKER_FLAGS'] = env['CM_F_LINKER_FLAGS'] + elif env.get('MLC_LINKER_LANG', 'C') == "F": + env['MLC_LINKER_BIN'] = env['MLC_F_COMPILER_BIN'] + env['MLC_LINKER_WITH_PATH'] = env['MLC_F_COMPILER_WITH_PATH'] + env['MLC_LINKER_COMPILE_FLAGS'] = env['MLC_F_COMPILER_FLAGS'] + env['MLC_LINKER_FLAGS'] = env['MLC_F_LINKER_FLAGS'] - env['CM_LD_LIBRARY_PATH'] = " -L".join([" "] + + env['MLC_LD_LIBRARY_PATH'] = " -L".join([" "] + env.get('+LD_LIBRARY_PATH', [])) - env['CM_SOURCE_FOLDER_PATH'] = env['CM_SOURCE_FOLDER_PATH'] if 'CM_SOURCE_FOLDER_PATH' in env else env[ - 'CM_TMP_CURRENT_SCRIPT_PATH'] if 'CM_TMP_CURRENT_SCRIPT_PATH' in env else '' + env['MLC_SOURCE_FOLDER_PATH'] = env['MLC_SOURCE_FOLDER_PATH'] if 'MLC_SOURCE_FOLDER_PATH' in env else env[ + 'MLC_TMP_CURRENT_SCRIPT_PATH'] if 'MLC_TMP_CURRENT_SCRIPT_PATH' in env else '' return {'return': 0} diff --git a/script/compile-program/run.bat b/script/compile-program/run.bat index ece5d9e9c..8a9e5436d 100644 --- a/script/compile-program/run.bat +++ b/script/compile-program/run.bat @@ -1,16 +1,16 @@ rem Compile program -set BIN_NAME=%CM_BIN_NAME% -IF NOT DEFINED CM_BIN_NAME SET BIN_NAME=run.exe +set BIN_NAME=%MLC_BIN_NAME% +IF NOT DEFINED MLC_BIN_NAME SET BIN_NAME=run.exe -set RUN_DIR=%CM_RUN_DIR% -IF NOT DEFINED CM_RUN_DIR SET RUN_DIR=. +set RUN_DIR=%MLC_RUN_DIR% +IF NOT DEFINED MLC_RUN_DIR SET RUN_DIR=. echo. echo Checking compiler version ... echo. -"%CM_C_COMPILER_WITH_PATH%" %CM_C_COMPILER_FLAG_VERSION% +"%MLC_C_COMPILER_WITH_PATH%" %MLC_C_COMPILER_FLAG_VERSION% echo. echo Compiling source files ... @@ -18,18 +18,18 @@ echo. if not exist %RUN_DIR% mkdir %RUN_DIR% -cd %CM_SOURCE_FOLDER_PATH% +cd %MLC_SOURCE_FOLDER_PATH% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -if not "%CM_C_SOURCE_FILES%" == "" ( - echo %CM_C_COMPILER_WITH_PATH% %CM_C_COMPILER_FLAGS% %CM_C_INCLUDE_PATH% %CM_C_SOURCE_FILES% %CM_LD_LIBRARY_PATH% %LDCFLAGS% %CM_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" - "%CM_C_COMPILER_WITH_PATH%" %CM_C_COMPILER_FLAGS% %CM_C_INCLUDE_PATH% %CM_C_SOURCE_FILES% %CM_LD_LIBRARY_PATH% %LDCFLAGS% %CM_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" +if not "%MLC_C_SOURCE_FILES%" == "" ( + echo %MLC_C_COMPILER_WITH_PATH% %MLC_C_COMPILER_FLAGS% %MLC_C_INCLUDE_PATH% %MLC_C_SOURCE_FILES% %MLC_LD_LIBRARY_PATH% %LDCFLAGS% %MLC_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" + "%MLC_C_COMPILER_WITH_PATH%" %MLC_C_COMPILER_FLAGS% %MLC_C_INCLUDE_PATH% %MLC_C_SOURCE_FILES% %MLC_LD_LIBRARY_PATH% %LDCFLAGS% %MLC_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% ) -if not "%CM_CXX_SOURCE_FILES%" == "" ( - echo %CM_CXX_COMPILER_WITH_PATH% %CM_CXX_SOURCE_FILES% %CM_CXX_COMPILER_FLAGS% %CM_CPLUS_INCLUDE_PATH% %CM_LD_LIBRARY_PATH% %LDCXXFLAGS% %CM_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" - "%CM_CXX_COMPILER_WITH_PATH%" %CM_CXX_SOURCE_FILES% %CM_CXX_COMPILER_FLAGS% %CM_CPLUS_INCLUDE_PATH% %CM_LD_LIBRARY_PATH% %LDCXXFLAGS% %CM_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" +if not "%MLC_CXX_SOURCE_FILES%" == "" ( + echo %MLC_CXX_COMPILER_WITH_PATH% %MLC_CXX_SOURCE_FILES% %MLC_CXX_COMPILER_FLAGS% %MLC_CPLUS_INCLUDE_PATH% %MLC_LD_LIBRARY_PATH% %LDCXXFLAGS% %MLC_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" + "%MLC_CXX_COMPILER_WITH_PATH%" %MLC_CXX_SOURCE_FILES% %MLC_CXX_COMPILER_FLAGS% %MLC_CPLUS_INCLUDE_PATH% %MLC_LD_LIBRARY_PATH% %LDCXXFLAGS% %MLC_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% ) diff --git a/script/compile-program/run.sh b/script/compile-program/run.sh index 7e98bc47d..68045945d 100644 --- a/script/compile-program/run.sh +++ b/script/compile-program/run.sh @@ -2,11 +2,11 @@ # Compile -BIN_NAME=${CM_BIN_NAME:-run.out} -RUN_DIR=${CM_RUN_DIR:-.} +BIN_NAME=${MLC_BIN_NAME:-run.out} +RUN_DIR=${MLC_RUN_DIR:-.} echo "RUN_DIR=$RUN_DIR" -if [[ ${CM_SKIP_RECOMPILE} == "yes" ]]; then +if [[ ${MLC_SKIP_RECOMPILE} == "yes" ]]; then if [ -f ${RUN_DIR}/${BIN_NAME} ]; then exit 0 fi @@ -14,13 +14,13 @@ fi rm -f ${RUN_DIR}/${BIN_NAME} -if [ -z "${CM_SOURCE_FOLDER_PATH}" ]; then - echo "No source directory (CM_SOURCE_FOLDER_PATH} specified" +if [ -z "${MLC_SOURCE_FOLDER_PATH}" ]; then + echo "No source directory (MLC_SOURCE_FOLDER_PATH} specified" exit 1 fi -if [[ -z "${CM_C_SOURCE_FILES}" && -z "${CM_CXX_SOURCE_FILES}" && -z "${CM_F_SOURCE_FILES}" ]]; then - echo "No source files (CM_C_SOURCE_FILES or CM_CXX_SOURCE_FILES or CM_F_SOURCE_FILES) specified" +if [[ -z "${MLC_C_SOURCE_FILES}" && -z "${MLC_CXX_SOURCE_FILES}" && -z "${MLC_F_SOURCE_FILES}" ]]; then + echo "No source files (MLC_C_SOURCE_FILES or MLC_CXX_SOURCE_FILES or MLC_F_SOURCE_FILES) specified" exit 1 fi @@ -28,34 +28,34 @@ echo "" echo "Checking compiler version ..." echo "" -${CM_C_COMPILER_WITH_PATH} ${CM_C_COMPILER_FLAG_VERSION} +${MLC_C_COMPILER_WITH_PATH} ${MLC_C_COMPILER_FLAG_VERSION} echo "" echo "Compiling source files ..." echo "" -cd ${CM_SOURCE_FOLDER_PATH} +cd ${MLC_SOURCE_FOLDER_PATH} test $? -eq 0 || exit 1 -IFS=';' read -ra FILES <<< "${CM_C_SOURCE_FILES}" +IFS=';' read -ra FILES <<< "${MLC_C_SOURCE_FILES}" for file in "${FILES[@]}"; do base="$(basename -- $file)" base_name=${base%.*} echo $base echo $basename - CMD="${CM_C_COMPILER_WITH_PATH} -c ${CM_C_COMPILER_FLAGS} ${CM_C_INCLUDE_PATH} $file ${CM_C_COMPILER_FLAG_OUTPUT}$base_name.o" + CMD="${MLC_C_COMPILER_WITH_PATH} -c ${MLC_C_COMPILER_FLAGS} ${MLC_C_INCLUDE_PATH} $file ${MLC_C_COMPILER_FLAG_OUTPUT}$base_name.o" echo $CMD eval $CMD test $? -eq 0 || exit 1 done -IFS=';' read -ra FILES <<< "${CM_CXX_SOURCE_FILES}" +IFS=';' read -ra FILES <<< "${MLC_CXX_SOURCE_FILES}" for file in "${FILES[@]}"; do base="$(basename -- $file)" base_name=${base%.*} echo $base echo $basename - CMD="${CM_CXX_COMPILER_WITH_PATH} -c ${CM_CXX_COMPILER_FLAGS} ${CM_CPLUS_INCLUDE_PATH} $file ${CM_CXX_COMPILER_FLAG_OUTPUT}$base_name.o" + CMD="${MLC_CXX_COMPILER_WITH_PATH} -c ${MLC_CXX_COMPILER_FLAGS} ${MLC_CPLUS_INCLUDE_PATH} $file ${MLC_CXX_COMPILER_FLAG_OUTPUT}$base_name.o" echo $CMD eval $CMD test $? -eq 0 || exit 1 @@ -65,7 +65,7 @@ done echo "" echo "Linking ..." echo "" -CMD="${CM_LINKER_WITH_PATH} ${CM_LINKER_COMPILE_FLAGS} *.o -o ${RUN_DIR}/${BIN_NAME} ${CM_LD_LIBRARY_PATH} ${CM_LINKER_FLAGS}" +CMD="${MLC_LINKER_WITH_PATH} ${MLC_LINKER_COMPILE_FLAGS} *.o -o ${RUN_DIR}/${BIN_NAME} ${MLC_LD_LIBRARY_PATH} ${MLC_LINKER_FLAGS}" echo $CMD eval $CMD diff --git a/script/convert-csv-to-md/customize.py b/script/convert-csv-to-md/customize.py index 872c2b678..e547a6219 100644 --- a/script/convert-csv-to-md/customize.py +++ b/script/convert-csv-to-md/customize.py @@ -12,14 +12,14 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - csv_file = env.get('CM_CSV_FILE', '') - md_file = env.get('CM_MD_FILE', '') + csv_file = env.get('MLC_CSV_FILE', '') + md_file = env.get('MLC_MD_FILE', '') process_file = os.path.join(i['run_script_input']['path'], "process.py") - env['CM_RUN_CMD'] = '{} {} {} {} '.format( - env["CM_PYTHON_BIN_WITH_PATH"], process_file, csv_file, md_file) + env['MLC_RUN_CMD'] = '{} {} {} {} '.format( + env["MLC_PYTHON_BIN_WITH_PATH"], process_file, csv_file, md_file) return {'return': 0} diff --git a/script/convert-csv-to-md/meta.yaml b/script/convert-csv-to-md/meta.yaml index 962601431..e1ed6f82e 100644 --- a/script/convert-csv-to-md/meta.yaml +++ b/script/convert-csv-to-md/meta.yaml @@ -17,8 +17,8 @@ deps: docker_input_mapping: {} input_description: {} input_mapping: - csv_file: CM_CSV_FILE - md_file: CM_MD_FILE + csv_file: MLC_CSV_FILE + md_file: MLC_MD_FILE new_env_keys: [] new_state_keys: [] post_deps: [] diff --git a/script/convert-csv-to-md/run.sh b/script/convert-csv-to-md/run.sh index 59b1aed3d..7da7962b9 100644 --- a/script/convert-csv-to-md/run.sh +++ b/script/convert-csv-to-md/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,12 +17,12 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -run "$CM_RUN_CMD" +run "$MLC_RUN_CMD" diff --git a/script/convert-ml-model-huggingface-to-onnx/customize.py b/script/convert-ml-model-huggingface-to-onnx/customize.py index 90950b120..2c3959044 100644 --- a/script/convert-ml-model-huggingface-to-onnx/customize.py +++ b/script/convert-ml-model-huggingface-to-onnx/customize.py @@ -8,8 +8,8 @@ def preprocess(i): env = i['env'] - if env.get("CM_MODEL_HUGG_PATH", "") == "": - return {'return': 1, 'error': 'CM_MODEL_HUGG_PATH is not set'} + if env.get("MLC_MODEL_HUGG_PATH", "") == "": + return {'return': 1, 'error': 'MLC_MODEL_HUGG_PATH is not set'} automation = i['automation'] diff --git a/script/convert-ml-model-huggingface-to-onnx/meta.yaml b/script/convert-ml-model-huggingface-to-onnx/meta.yaml index 54f54b0e5..53b6c6474 100644 --- a/script/convert-ml-model-huggingface-to-onnx/meta.yaml +++ b/script/convert-ml-model-huggingface-to-onnx/meta.yaml @@ -12,8 +12,8 @@ deps: - tags: get,generic-python-lib,_onnxruntime env: {} new_env_keys: -- CM_ML_MODEL* -- CM_MODEL_HUGG_PATH +- MLC_ML_MODEL* +- MLC_MODEL_HUGG_PATH - HUGGINGFACE_ONNX_FILE_PATH tags: - ml-model @@ -26,4 +26,4 @@ uid: eacb01655d7e49ac variations: model-path.#: env: - CM_MODEL_HUGG_PATH: '#' + MLC_MODEL_HUGG_PATH: '#' diff --git a/script/convert-ml-model-huggingface-to-onnx/run.sh b/script/convert-ml-model-huggingface-to-onnx/run.sh index 56be76db9..04a94bbcf 100644 --- a/script/convert-ml-model-huggingface-to-onnx/run.sh +++ b/script/convert-ml-model-huggingface-to-onnx/run.sh @@ -1,2 +1,2 @@ #!/bin/bash -python -m transformers.onnx --model=${CM_MODEL_HUGG_PATH} ${PWD} \ No newline at end of file +python -m transformers.onnx --model=${MLC_MODEL_HUGG_PATH} ${PWD} \ No newline at end of file diff --git a/script/copy-to-clipboard/code.py b/script/copy-to-clipboard/code.py index 0a1aa014a..ce7fe265a 100644 --- a/script/copy-to-clipboard/code.py +++ b/script/copy-to-clipboard/code.py @@ -1,10 +1,10 @@ import os import pyperclip as pc -text = os.environ.get('CM_COPY_TO_CLIPBOARD_TEXT', '') +text = os.environ.get('MLC_COPY_TO_CLIPBOARD_TEXT', '') add_quotes = os.environ.get( - 'CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES', '') in [ + 'MLC_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES', '') in [ True, 'True', 'yes'] if add_quotes: diff --git a/script/copy-to-clipboard/meta.yaml b/script/copy-to-clipboard/meta.yaml index de631040b..f10ed5d9a 100644 --- a/script/copy-to-clipboard/meta.yaml +++ b/script/copy-to-clipboard/meta.yaml @@ -26,7 +26,7 @@ deps: - tags: get,generic-python-lib,_package.pyperclip input_mapping: - text: CM_COPY_TO_CLIPBOARD_TEXT - t: CM_COPY_TO_CLIPBOARD_TEXT - add_quotes: CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES - q: CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES + text: MLC_COPY_TO_CLIPBOARD_TEXT + t: MLC_COPY_TO_CLIPBOARD_TEXT + add_quotes: MLC_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES + q: MLC_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES diff --git a/script/copy-to-clipboard/run.bat b/script/copy-to-clipboard/run.bat index 545178f20..0e1db36fe 100644 --- a/script/copy-to-clipboard/run.bat +++ b/script/copy-to-clipboard/run.bat @@ -1,4 +1,4 @@ rem native script -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\code.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/copy-to-clipboard/run.sh b/script/copy-to-clipboard/run.sh index fa6f579f7..88087983d 100644 --- a/script/copy-to-clipboard/run.sh +++ b/script/copy-to-clipboard/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/code.py test $? -eq 0 || exit 1 diff --git a/script/create-conda-env/customize.py b/script/create-conda-env/customize.py index ddc74fab9..e22b25b6c 100644 --- a/script/create-conda-env/customize.py +++ b/script/create-conda-env/customize.py @@ -12,7 +12,7 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - if env.get('CM_CONDA_ENV_NAME', '') == '': + if env.get('MLC_CONDA_ENV_NAME', '') == '': return {'return': 1, 'error': 'Please use "_name." variation'} return {'return': 0} @@ -23,11 +23,11 @@ def postprocess(i): conda_prefix = os.getcwd() env['CONDA_PREFIX'] = conda_prefix - env['CM_CONDA_PREFIX'] = conda_prefix - env['CM_CONDA_BIN_PATH'] = os.path.join(conda_prefix, "bin") - env['CM_CONDA_LIB_PATH'] = os.path.join(conda_prefix, "lib") + env['MLC_CONDA_PREFIX'] = conda_prefix + env['MLC_CONDA_BIN_PATH'] = os.path.join(conda_prefix, "bin") + env['MLC_CONDA_LIB_PATH'] = os.path.join(conda_prefix, "lib") - env['+PATH'] = [env['CM_CONDA_BIN_PATH']] - env['+LD_LIBRARY_PATH'] = [env['CM_CONDA_LIB_PATH']] + env['+PATH'] = [env['MLC_CONDA_BIN_PATH']] + env['+LD_LIBRARY_PATH'] = [env['MLC_CONDA_LIB_PATH']] return {'return': 0} diff --git a/script/create-conda-env/meta.yaml b/script/create-conda-env/meta.yaml index 56a61f0f1..672843771 100644 --- a/script/create-conda-env/meta.yaml +++ b/script/create-conda-env/meta.yaml @@ -12,10 +12,10 @@ deps: new_env_keys: - +PATH - +LD_LIBRARY_PATH -- CM_CONDA_PREFIX +- MLC_CONDA_PREFIX - CONDA_PREFIX -- CM_CONDA_BIN_PATH -- CM_CONDA_LIB_PATH +- MLC_CONDA_BIN_PATH +- MLC_CONDA_LIB_PATH tags: - create - get @@ -27,4 +27,4 @@ uid: e39e0b04c86a40f2 variations: name.#: env: - CM_CONDA_ENV_NAME: '#' + MLC_CONDA_ENV_NAME: '#' diff --git a/script/create-conda-env/run.sh b/script/create-conda-env/run.sh index 540dde9b1..f608722aa 100644 --- a/script/create-conda-env/run.sh +++ b/script/create-conda-env/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -cmd="${CM_CONDA_BIN_WITH_PATH} create -p ${PWD}" +cmd="${MLC_CONDA_BIN_WITH_PATH} create -p ${PWD}" echo "$cmd" eval "$cmd" test $? -eq 0 || exit $? diff --git a/script/create-custom-cache-entry/customize.py b/script/create-custom-cache-entry/customize.py index ce3e0a988..a0299f50d 100644 --- a/script/create-custom-cache-entry/customize.py +++ b/script/create-custom-cache-entry/customize.py @@ -9,7 +9,7 @@ def preprocess(i): env = i['env'] extra_cache_tags = [] - if env.get('CM_EXTRA_CACHE_TAGS', '').strip() == '': + if env.get('MLC_EXTRA_CACHE_TAGS', '').strip() == '': print('') extra_cache_tags_str = input( 'Enter extra tags for the custom CACHE entry separated by comma: ') @@ -23,7 +23,7 @@ def postprocess(i): env = i['env'] - path = env.get('CM_CUSTOM_CACHE_ENTRY_PATH', '').strip() + path = env.get('MLC_CUSTOM_CACHE_ENTRY_PATH', '').strip() if path != '': if not os.path.isdir(path): @@ -32,17 +32,17 @@ def postprocess(i): path = os.getcwd() x = '' - env_key = env.get('CM_CUSTOM_CACHE_ENTRY_ENV_KEY', '') + env_key = env.get('MLC_CUSTOM_CACHE_ENTRY_ENV_KEY', '') if env_key != '': x = env_key + '_' - env['CM_CUSTOM_CACHE_ENTRY_{}PATH'.format(x)] = path - env['CM_CUSTOM_CACHE_ENTRY_PATH'] = path + env['MLC_CUSTOM_CACHE_ENTRY_{}PATH'.format(x)] = path + env['MLC_CUSTOM_CACHE_ENTRY_PATH'] = path - env_key2 = env.get('CM_CUSTOM_CACHE_ENTRY_ENV_KEY2', '') + env_key2 = env.get('MLC_CUSTOM_CACHE_ENTRY_ENV_KEY2', '') v = env.get(env_key2, '') real_path = v if v != '' else path - env['CM_CUSTOM_CACHE_ENTRY_{}REAL_PATH'.format(x)] = real_path + env['MLC_CUSTOM_CACHE_ENTRY_{}REAL_PATH'.format(x)] = real_path return {'return': 0} diff --git a/script/create-custom-cache-entry/meta.yaml b/script/create-custom-cache-entry/meta.yaml index 7272bb99a..d7f876fae 100644 --- a/script/create-custom-cache-entry/meta.yaml +++ b/script/create-custom-cache-entry/meta.yaml @@ -15,13 +15,13 @@ category: CM automation cache: true input_mapping: - env_key: CM_CUSTOM_CACHE_ENTRY_ENV_KEY - env_key2: CM_CUSTOM_CACHE_ENTRY_ENV_KEY2 - path: CM_CUSTOM_CACHE_ENTRY_PATH - to: CM_CUSTOM_CACHE_ENTRY_PATH + env_key: MLC_CUSTOM_CACHE_ENTRY_ENV_KEY + env_key2: MLC_CUSTOM_CACHE_ENTRY_ENV_KEY2 + path: MLC_CUSTOM_CACHE_ENTRY_PATH + to: MLC_CUSTOM_CACHE_ENTRY_PATH new_env_keys: -- CM_CUSTOM_CACHE_ENTRY* +- MLC_CUSTOM_CACHE_ENTRY* print_env_at_the_end: - CM_CUSTOM_CACHE_ENTRY_PATH: "Path to custom cache entry" + MLC_CUSTOM_CACHE_ENTRY_PATH: "Path to custom cache entry" diff --git a/script/create-fpgaconvnet-app-tinyml/customize.py b/script/create-fpgaconvnet-app-tinyml/customize.py index a12f17f36..ae8668d89 100644 --- a/script/create-fpgaconvnet-app-tinyml/customize.py +++ b/script/create-fpgaconvnet-app-tinyml/customize.py @@ -12,15 +12,15 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - network_env_name = env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] - run_dir = env['CM_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR'] + network_env_name = env['MLC_TINY_FPGACONVNET_NETWORK_ENV_NAME'] + run_dir = env['MLC_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR'] run_cmd = "cd " + run_dir + " && xsct create_boot_image.tcl" - env['CM_RUN_CMD'] = run_cmd - env['CM_RUN_DIR'] = run_dir + env['MLC_RUN_CMD'] = run_cmd + env['MLC_RUN_DIR'] = run_dir return {'return': 0} @@ -30,10 +30,10 @@ def postprocess(i): env = i['env'] return {'return': 1} - network = env['CM_TINY_NETWORK_NAME'] + network = env['MLC_TINY_NETWORK_NAME'] json_location = os.path.join( - env['CM_RUN_DIR'], - env['CM_TINY_NETWORK_NAME'] + ".json") + env['MLC_RUN_DIR'], + env['MLC_TINY_NETWORK_NAME'] + ".json") if os.path.exists(json_location): print( f"JSON configuration file for {network} created at {json_location}") diff --git a/script/create-fpgaconvnet-app-tinyml/meta.yaml b/script/create-fpgaconvnet-app-tinyml/meta.yaml index 3ad1cdc9b..ad63685de 100644 --- a/script/create-fpgaconvnet-app-tinyml/meta.yaml +++ b/script/create-fpgaconvnet-app-tinyml/meta.yaml @@ -39,6 +39,6 @@ variations: tags: _zc706 default: true env: - CM_TINY_BOARD: zc706 + MLC_TINY_BOARD: zc706 group: board versions: {} diff --git a/script/create-fpgaconvnet-app-tinyml/run.sh b/script/create-fpgaconvnet-app-tinyml/run.sh index fe67c233c..35de74bab 100644 --- a/script/create-fpgaconvnet-app-tinyml/run.sh +++ b/script/create-fpgaconvnet-app-tinyml/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency function exit_if_error() { test $? -eq 0 || exit $? @@ -15,12 +15,12 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" fi exit_if_error } #Add your run commands here... -run "${CM_RUN_CMD}" +run "${MLC_RUN_CMD}" diff --git a/script/create-fpgaconvnet-config-tinyml/customize.py b/script/create-fpgaconvnet-config-tinyml/customize.py index 90ed2d6f0..853ff007f 100644 --- a/script/create-fpgaconvnet-config-tinyml/customize.py +++ b/script/create-fpgaconvnet-config-tinyml/customize.py @@ -12,30 +12,30 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') code_path = os.path.join( - env['CM_GIT_REPO_CHECKOUT_PATH'], + env['MLC_GIT_REPO_CHECKOUT_PATH'], "closed", "fpgaconvnet", "code") - network_env_name = env['CM_TINY_NETWORK_NAME'].replace("-", "_").upper() - env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] = network_env_name - env['CM_TINY_FPGACONVNET_' + network_env_name + '_CODE_PATH'] = code_path + network_env_name = env['MLC_TINY_NETWORK_NAME'].replace("-", "_").upper() + env['MLC_TINY_FPGACONVNET_NETWORK_ENV_NAME'] = network_env_name + env['MLC_TINY_FPGACONVNET_' + network_env_name + '_CODE_PATH'] = code_path - board = env.get('CM_TINY_BOARD', 'zc706') + board = env.get('MLC_TINY_BOARD', 'zc706') - benchmark = env.get('CM_TINY_BENCHMARK', 'ic') + benchmark = env.get('MLC_TINY_BENCHMARK', 'ic') run_dir = os.path.join(code_path, board, benchmark) - env['CM_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR'] = run_dir + env['MLC_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR'] = run_dir run_cmd = "cd " + run_dir + " && " + \ - env['CM_PYTHON_BIN_WITH_PATH'] + " " + "create_config.py" + env['MLC_PYTHON_BIN_WITH_PATH'] + " " + "create_config.py" - env['ML_MODEL_FILE_WITH_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] - env['CM_RUN_CMD'] = run_cmd - env['CM_RUN_DIR'] = run_dir + env['ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] + env['MLC_RUN_CMD'] = run_cmd + env['MLC_RUN_DIR'] = run_dir return {'return': 0} @@ -44,21 +44,21 @@ def postprocess(i): env = i['env'] - network = env['CM_TINY_NETWORK_NAME'] - env['CM_TINY_FPGACONVNET_NETWORK_NAME'] = network - network_env_name = env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] + network = env['MLC_TINY_NETWORK_NAME'] + env['MLC_TINY_FPGACONVNET_NETWORK_NAME'] = network + network_env_name = env['MLC_TINY_FPGACONVNET_NETWORK_ENV_NAME'] json_location = os.path.join( - env['CM_RUN_DIR'], - env['CM_TINY_NETWORK_NAME'] + ".json") + env['MLC_RUN_DIR'], + env['MLC_TINY_NETWORK_NAME'] + ".json") if os.path.exists(json_location): print( f"JSON configuration file for {network} created at {json_location}") else: return {'return': 1, 'error': "JSON configuration file generation failed"} - env['CM_TINY_FPGACONVNET_CONFIG_FILE_' + + env['MLC_TINY_FPGACONVNET_CONFIG_FILE_' + network_env_name + '_PATH'] = json_location - env['CM_GET_DEPENDENT_CACHED_PATH'] = json_location + env['MLC_GET_DEPENDENT_CACHED_PATH'] = json_location return {'return': 0} diff --git a/script/create-fpgaconvnet-config-tinyml/meta.yaml b/script/create-fpgaconvnet-config-tinyml/meta.yaml index f74a3165d..b22316e2f 100644 --- a/script/create-fpgaconvnet-config-tinyml/meta.yaml +++ b/script/create-fpgaconvnet-config-tinyml/meta.yaml @@ -12,7 +12,7 @@ deps: input_description: {} input_mapping: {} new_env_keys: -- CM_TINY_FPGACONVNET* +- MLC_TINY_FPGACONVNET* new_state_keys: [] post_deps: [] posthook_deps: [] @@ -32,9 +32,9 @@ variations: zc706: default: true env: - CM_TINY_BOARD: zc706 + MLC_TINY_BOARD: zc706 group: board zc706,ic: env: - CM_TINY_NETWORK_NAME: zc706-resnet + MLC_TINY_NETWORK_NAME: zc706-resnet versions: {} diff --git a/script/create-fpgaconvnet-config-tinyml/run.sh b/script/create-fpgaconvnet-config-tinyml/run.sh index fe67c233c..35de74bab 100644 --- a/script/create-fpgaconvnet-config-tinyml/run.sh +++ b/script/create-fpgaconvnet-config-tinyml/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency function exit_if_error() { test $? -eq 0 || exit $? @@ -15,12 +15,12 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" fi exit_if_error } #Add your run commands here... -run "${CM_RUN_CMD}" +run "${MLC_RUN_CMD}" diff --git a/script/create-patch/customize.py b/script/create-patch/customize.py index 14c64623d..573e6735e 100644 --- a/script/create-patch/customize.py +++ b/script/create-patch/customize.py @@ -12,23 +12,23 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - new_dir = env.get('CM_CREATE_PATCH_NEW', '') + new_dir = env.get('MLC_CREATE_PATCH_NEW', '') if new_dir == '': return {'return': 1, 'error': 'specify NEW directory using --new'} if not os.path.isdir(new_dir): return {'return': 1, 'error': 'NEW directory doesn\'t exist {}'.format(new_dir)} - old_dir = env.get('CM_CREATE_PATCH_OLD', '') + old_dir = env.get('MLC_CREATE_PATCH_OLD', '') if old_dir == '': return {'return': 1, 'error': 'specify OLD directory using --old'} if not os.path.isdir(old_dir): return {'return': 1, 'error': 'OLD directory doesn\'t exist {}'.format(old_dir)} - exclude = env.get('CM_CREATE_PATCH_EXCLUDE', '').strip() + exclude = env.get('MLC_CREATE_PATCH_EXCLUDE', '').strip() x_exclude = '' if exclude != '': diff --git a/script/create-patch/meta.yaml b/script/create-patch/meta.yaml index cbcedb648..d5c83fbd0 100644 --- a/script/create-patch/meta.yaml +++ b/script/create-patch/meta.yaml @@ -12,9 +12,9 @@ deps: - tags: detect,os input_mapping: - new: CM_CREATE_PATCH_NEW - old: CM_CREATE_PATCH_OLD - exclude: CM_CREATE_PATCH_EXCLUDE + new: MLC_CREATE_PATCH_NEW + old: MLC_CREATE_PATCH_OLD + exclude: MLC_CREATE_PATCH_EXCLUDE tags: - create diff --git a/script/destroy-terraform/run.sh b/script/destroy-terraform/run.sh index 9e0ae31ac..25e40a3a0 100644 --- a/script/destroy-terraform/run.sh +++ b/script/destroy-terraform/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -source ${CM_TERRAFORM_CONFIG_DIR}/credentials.sh -source ${CM_TERRAFORM_CONFIG_DIR}/apply_credentials.sh -cd ${CM_TERRAFORM_RUN_DIR} +source ${MLC_TERRAFORM_CONFIG_DIR}/credentials.sh +source ${MLC_TERRAFORM_CONFIG_DIR}/apply_credentials.sh +cd ${MLC_TERRAFORM_RUN_DIR} terraform destroy --auto-approve test $? -eq 0 || exit 1 diff --git a/script/detect-cpu/README-extra.md b/script/detect-cpu/README-extra.md index c2326c281..3e6e1dad0 100644 --- a/script/detect-cpu/README-extra.md +++ b/script/detect-cpu/README-extra.md @@ -2,14 +2,14 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the host CPU details and exports them in a unified list of environment variables to be reused across the supported operating systems. ## Exported Variables -* `CM_HOST_CPU_L1I_CACHE_SIZE` -* `CM_HOST_CPU_L2_CACHE_SIZE` -* `CM_HOST_CPU_MEMSIZE` -* `CM_HOST_CPU_SOCKETS` -* `CM_HOST_CPU_THREADS_PER_CORE` -* `CM_HOST_CPU_TOTAL_CORES` -* `CM_HOST_CPU_TOTAL_LOGICAL_CORES` -* `CM_HOST_CPU_TOTAL_PHYSICAL_CORES` +* `MLC_HOST_CPU_L1I_CACHE_SIZE` +* `MLC_HOST_CPU_L2_CACHE_SIZE` +* `MLC_HOST_CPU_MEMSIZE` +* `MLC_HOST_CPU_SOCKETS` +* `MLC_HOST_CPU_THREADS_PER_CORE` +* `MLC_HOST_CPU_TOTAL_CORES` +* `MLC_HOST_CPU_TOTAL_LOGICAL_CORES` +* `MLC_HOST_CPU_TOTAL_PHYSICAL_CORES` ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/detect-cpu/customize.py b/script/detect-cpu/customize.py index cd0c6dc95..6ca244cf7 100644 --- a/script/detect-cpu/customize.py +++ b/script/detect-cpu/customize.py @@ -119,36 +119,36 @@ def postprocess(i): # Unifying some CPU info across different platforms unified_env = { - 'CM_CPUINFO_CPUs': 'CM_HOST_CPU_TOTAL_CORES', - 'CM_CPUINFO_L1d_cache': 'CM_HOST_CPU_L1D_CACHE_SIZE', - 'CM_CPUINFO_L1i_cache': 'CM_HOST_CPU_L1I_CACHE_SIZE', - 'CM_CPUINFO_L2_cache': 'CM_HOST_CPU_L2_CACHE_SIZE', - 'CM_CPUINFO_L3_cache': 'CM_HOST_CPU_L3_CACHE_SIZE', - 'CM_CPUINFO_Sockets': 'CM_HOST_CPU_SOCKETS', - 'CM_CPUINFO_NUMA_nodes': 'CM_HOST_CPU_NUMA_NODES', - 'CM_CPUINFO_Cores_per_socket': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', - 'CM_CPUINFO_Cores_per_cluster': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', - 'CM_CPUINFO_Threads_per_core': 'CM_HOST_CPU_THREADS_PER_CORE', - 'CM_CPUINFO_Architecture': 'CM_HOST_CPU_ARCHITECTURE', - 'CM_CPUINFO_CPU_family': 'CM_HOST_CPU_FAMILY', - 'CM_CPUINFO_CPU_max_MHz': 'CM_HOST_CPU_MAX_MHZ', - 'CM_CPUINFO_Model_name': 'CM_HOST_CPU_MODEL_NAME', - 'CM_CPUINFO_On_line_CPUs_list': 'CM_HOST_CPU_ON_LINE_CPUS_LIST', - 'CM_CPUINFO_Vendor_ID': 'CM_HOST_CPU_VENDOR_ID', - 'CM_CPUINFO_hw_physicalcpu': 'CM_HOST_CPU_TOTAL_PHYSICAL_CORES', - 'CM_CPUINFO_hw_logicalcpu': 'CM_HOST_CPU_TOTAL_CORES', - 'CM_CPUINFO_hw_packages': 'CM_HOST_CPU_SOCKETS', - 'CM_CPUINFO_hw_memsize': 'CM_HOST_CPU_MEMSIZE', - 'CM_CPUINFO_hw_l1icachesize': 'CM_HOST_CPU_L1I_CACHE_SIZE', - 'CM_CPUINFO_hw_l1dcachesize': 'CM_HOST_CPU_L1D_CACHE_SIZE', - 'CM_CPUINFO_hw_l2cachesize': 'CM_HOST_CPU_L2_CACHE_SIZE' + 'MLC_CPUINFO_CPUs': 'MLC_HOST_CPU_TOTAL_CORES', + 'MLC_CPUINFO_L1d_cache': 'MLC_HOST_CPU_L1D_CACHE_SIZE', + 'MLC_CPUINFO_L1i_cache': 'MLC_HOST_CPU_L1I_CACHE_SIZE', + 'MLC_CPUINFO_L2_cache': 'MLC_HOST_CPU_L2_CACHE_SIZE', + 'MLC_CPUINFO_L3_cache': 'MLC_HOST_CPU_L3_CACHE_SIZE', + 'MLC_CPUINFO_Sockets': 'MLC_HOST_CPU_SOCKETS', + 'MLC_CPUINFO_NUMA_nodes': 'MLC_HOST_CPU_NUMA_NODES', + 'MLC_CPUINFO_Cores_per_socket': 'MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', + 'MLC_CPUINFO_Cores_per_cluster': 'MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', + 'MLC_CPUINFO_Threads_per_core': 'MLC_HOST_CPU_THREADS_PER_CORE', + 'MLC_CPUINFO_Architecture': 'MLC_HOST_CPU_ARCHITECTURE', + 'MLC_CPUINFO_CPU_family': 'MLC_HOST_CPU_FAMILY', + 'MLC_CPUINFO_CPU_max_MHz': 'MLC_HOST_CPU_MAX_MHZ', + 'MLC_CPUINFO_Model_name': 'MLC_HOST_CPU_MODEL_NAME', + 'MLC_CPUINFO_On_line_CPUs_list': 'MLC_HOST_CPU_ON_LINE_CPUS_LIST', + 'MLC_CPUINFO_Vendor_ID': 'MLC_HOST_CPU_VENDOR_ID', + 'MLC_CPUINFO_hw_physicalcpu': 'MLC_HOST_CPU_TOTAL_PHYSICAL_CORES', + 'MLC_CPUINFO_hw_logicalcpu': 'MLC_HOST_CPU_TOTAL_CORES', + 'MLC_CPUINFO_hw_packages': 'MLC_HOST_CPU_SOCKETS', + 'MLC_CPUINFO_hw_memsize': 'MLC_HOST_CPU_MEMSIZE', + 'MLC_CPUINFO_hw_l1icachesize': 'MLC_HOST_CPU_L1I_CACHE_SIZE', + 'MLC_CPUINFO_hw_l1dcachesize': 'MLC_HOST_CPU_L1D_CACHE_SIZE', + 'MLC_CPUINFO_hw_l2cachesize': 'MLC_HOST_CPU_L2_CACHE_SIZE' } - if env['CM_HOST_OS_TYPE'] == 'linux': + if env['MLC_HOST_OS_TYPE'] == 'linux': vkeys = ['Architecture', 'Model name', 'Vendor ID', 'CPU family', 'NUMA node(s)', 'CPU(s)', 'On-line CPU(s) list', 'Socket(s)', 'Core(s) per socket', 'Core(s) per cluster', 'Thread(s) per core', 'L1d cache', 'L1i cache', 'L2 cache', 'L3 cache', 'CPU max MHz'] - elif env['CM_HOST_OS_FLAVOR'] == 'macos': + elif env['MLC_HOST_OS_FLAVOR'] == 'macos': vkeys = ['hw.physicalcpu', 'hw.logicalcpu', 'hw.packages', 'hw.ncpu', 'hw.memsize', 'hw.l1icachesize', 'hw.l2cachesize'] if vkeys: @@ -156,7 +156,7 @@ def postprocess(i): v = s.split(':') key = v[0] if key in vkeys: - env_key = 'CM_CPUINFO_' + key.replace( + env_key = 'MLC_CPUINFO_' + key.replace( " ", "_").replace( '(', @@ -172,21 +172,21 @@ def postprocess(i): else: env[env_key] = v[1].strip() - if env.get('CM_HOST_CPU_SOCKETS', '') == '-': # assume as 1 - env['CM_HOST_CPU_SOCKETS'] = '1' + if env.get('MLC_HOST_CPU_SOCKETS', '') == '-': # assume as 1 + env['MLC_HOST_CPU_SOCKETS'] = '1' - if env.get('CM_HOST_CPU_TOTAL_CORES', '') != '' and env.get( - 'CM_HOST_CPU_TOTAL_LOGICAL_CORES', '') == '': - env['CM_HOST_CPU_TOTAL_LOGICAL_CORES'] = env['CM_HOST_CPU_TOTAL_CORES'] + if env.get('MLC_HOST_CPU_TOTAL_CORES', '') != '' and env.get( + 'MLC_HOST_CPU_TOTAL_LOGICAL_CORES', '') == '': + env['MLC_HOST_CPU_TOTAL_LOGICAL_CORES'] = env['MLC_HOST_CPU_TOTAL_CORES'] - if env.get('CM_HOST_CPU_TOTAL_LOGICAL_CORES', '') != '' and env.get( - 'CM_HOST_CPU_TOTAL_PHYSICAL_CORES', '') != '' and env.get('CM_HOST_CPU_THREADS_PER_CORE', '') == '': - env['CM_HOST_CPU_THREADS_PER_CORE'] = str(int(int(env['CM_HOST_CPU_TOTAL_LOGICAL_CORES']) // - int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES']))) + if env.get('MLC_HOST_CPU_TOTAL_LOGICAL_CORES', '') != '' and env.get( + 'MLC_HOST_CPU_TOTAL_PHYSICAL_CORES', '') != '' and env.get('MLC_HOST_CPU_THREADS_PER_CORE', '') == '': + env['MLC_HOST_CPU_THREADS_PER_CORE'] = str(int(int(env['MLC_HOST_CPU_TOTAL_LOGICAL_CORES']) // + int(env['MLC_HOST_CPU_TOTAL_PHYSICAL_CORES']))) - if env.get('CM_HOST_CPU_SOCKETS', '') != '' and env.get('CM_HOST_CPU_TOTAL_PHYSICAL_CORES', - '') != '' and env.get('CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', '') == '': - env['CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET'] = str( - int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES']) // int(env['CM_HOST_CPU_SOCKETS'])) + if env.get('MLC_HOST_CPU_SOCKETS', '') != '' and env.get('MLC_HOST_CPU_TOTAL_PHYSICAL_CORES', + '') != '' and env.get('MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', '') == '': + env['MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET'] = str( + int(env['MLC_HOST_CPU_TOTAL_PHYSICAL_CORES']) // int(env['MLC_HOST_CPU_SOCKETS'])) return {'return': 0} diff --git a/script/detect-cpu/meta.yaml b/script/detect-cpu/meta.yaml index 1da7a920e..7befa2ece 100644 --- a/script/detect-cpu/meta.yaml +++ b/script/detect-cpu/meta.yaml @@ -9,9 +9,9 @@ clean_files: deps: - tags: detect,os new_env_keys: -- CM_HOST_CPU_* -- CM_HOST_MEMORY_CAPACITY -- CM_HOST_DISK_CAPACITY +- MLC_HOST_CPU_* +- MLC_HOST_MEMORY_CAPACITY +- MLC_HOST_DISK_CAPACITY new_state_keys: - host_device_raw_info tags: diff --git a/script/detect-cpu/run.sh b/script/detect-cpu/run.sh index 2ca2fcc9b..b57ecc367 100644 --- a/script/detect-cpu/run.sh +++ b/script/detect-cpu/run.sh @@ -11,37 +11,37 @@ extract_field() { echo "${value:-$default}" } -if [[ ${CM_HOST_OS_FLAVOR} == "macos" ]]; then +if [[ ${MLC_HOST_OS_FLAVOR} == "macos" ]]; then sysctl -a | grep hw > tmp-lscpu.out else lscpu > tmp-lscpu.out memory_capacity=`free -h --si | grep Mem: | tr -s ' ' | cut -d' ' -f2` - echo "CM_HOST_MEMORY_CAPACITY=$memory_capacity">>tmp-run-env.out + echo "MLC_HOST_MEMORY_CAPACITY=$memory_capacity">>tmp-run-env.out disk_capacity=`df -h --total -l |grep total |tr -s ' '|cut -d' ' -f2` - echo "CM_HOST_DISK_CAPACITY=$disk_capacity">>tmp-run-env.out + echo "MLC_HOST_DISK_CAPACITY=$disk_capacity">>tmp-run-env.out # extract cpu information which are not there in lscpu - CM_HOST_CPU_WRITE_PROTECT_SUPPORT=$(extract_field "wp" "Not Found") - CM_HOST_CPU_MICROCODE=$(extract_field "microcode" "Not Found") - CM_HOST_CPU_FPU_SUPPORT=$(extract_field "fpu" "Not Found") - CM_HOST_CPU_FPU_EXCEPTION_SUPPORT=$(extract_field "fpu_exception" "Not Found") - CM_HOST_CPU_BUGS=$(extract_field "bugs" "Not Found") - CM_HOST_CPU_TLB_SIZE=$(extract_field "TLB size" "Not Found") - CM_HOST_CPU_CFLUSH_SIZE=$(extract_field "clflush size" "Not Found") - CM_HOST_CACHE_ALIGNMENT_SIZE=$(extract_field "cache_alignment" "Not Found") - CM_HOST_POWER_MANAGEMENT=$(extract_field "power management" "Not Found") + MLC_HOST_CPU_WRITE_PROTECT_SUPPORT=$(extract_field "wp" "Not Found") + MLC_HOST_CPU_MICROCODE=$(extract_field "microcode" "Not Found") + MLC_HOST_CPU_FPU_SUPPORT=$(extract_field "fpu" "Not Found") + MLC_HOST_CPU_FPU_EXCEPTION_SUPPORT=$(extract_field "fpu_exception" "Not Found") + MLC_HOST_CPU_BUGS=$(extract_field "bugs" "Not Found") + MLC_HOST_CPU_TLB_SIZE=$(extract_field "TLB size" "Not Found") + MLC_HOST_CPU_CFLUSH_SIZE=$(extract_field "clflush size" "Not Found") + MLC_HOST_CACHE_ALIGNMENT_SIZE=$(extract_field "cache_alignment" "Not Found") + MLC_HOST_POWER_MANAGEMENT=$(extract_field "power management" "Not Found") # Write results to a file { - echo "CM_HOST_CPU_WRITE_PROTECT_SUPPORT=$CM_HOST_CPU_WRITE_PROTECT_SUPPORT" - echo "CM_HOST_CPU_MICROCODE=$CM_HOST_CPU_MICROCODE" - echo "CM_HOST_CPU_FPU_SUPPORT=$CM_HOST_CPU_FPU_SUPPORT" - echo "CM_HOST_CPU_FPU_EXCEPTION_SUPPORT=$CM_HOST_CPU_FPU_EXCEPTION_SUPPORT" - echo "CM_HOST_CPU_BUGS=$CM_HOST_CPU_BUGS" - echo "CM_HOST_CPU_TLB_SIZE=$CM_HOST_CPU_TLB_SIZE" - echo "CM_HOST_CPU_CFLUSH_SIZE=$CM_HOST_CPU_CFLUSH_SIZE" - echo "CM_HOST_CACHE_ALIGNMENT_SIZE=$CM_HOST_CACHE_ALIGNMENT_SIZE" - echo "CM_HOST_POWER_MANAGEMENT=$CM_HOST_POWER_MANAGEMENT" + echo "MLC_HOST_CPU_WRITE_PROTECT_SUPPORT=$MLC_HOST_CPU_WRITE_PROTECT_SUPPORT" + echo "MLC_HOST_CPU_MICROCODE=$MLC_HOST_CPU_MICROCODE" + echo "MLC_HOST_CPU_FPU_SUPPORT=$MLC_HOST_CPU_FPU_SUPPORT" + echo "MLC_HOST_CPU_FPU_EXCEPTION_SUPPORT=$MLC_HOST_CPU_FPU_EXCEPTION_SUPPORT" + echo "MLC_HOST_CPU_BUGS=$MLC_HOST_CPU_BUGS" + echo "MLC_HOST_CPU_TLB_SIZE=$MLC_HOST_CPU_TLB_SIZE" + echo "MLC_HOST_CPU_CFLUSH_SIZE=$MLC_HOST_CPU_CFLUSH_SIZE" + echo "MLC_HOST_CACHE_ALIGNMENT_SIZE=$MLC_HOST_CACHE_ALIGNMENT_SIZE" + echo "MLC_HOST_POWER_MANAGEMENT=$MLC_HOST_POWER_MANAGEMENT" } >> tmp-run-env.out fi diff --git a/script/detect-os/customize.py b/script/detect-os/customize.py index 82ee00d7a..fb87b1220 100644 --- a/script/detect-os/customize.py +++ b/script/detect-os/customize.py @@ -11,9 +11,9 @@ def preprocess(i): os_info = i['os_info'] # Update env variables - env['CM_HOST_OS_TYPE'] = os_info['platform'] - env['CM_HOST_OS_BITS'] = os_info['bits'] - env['CM_HOST_PYTHON_BITS'] = os_info['python_bits'] + env['MLC_HOST_OS_TYPE'] = os_info['platform'] + env['MLC_HOST_OS_BITS'] = os_info['bits'] + env['MLC_HOST_PYTHON_BITS'] = os_info['python_bits'] # Update state (demo) # state['os_info'] = os_info @@ -43,7 +43,7 @@ def postprocess(i): for _dir in dirs: if _dir != '' and _dir not in lib_dir: lib_dir.append(_dir) - env['+CM_HOST_OS_DEFAULT_LIBRARY_PATH'] = lib_dir + env['+MLC_HOST_OS_DEFAULT_LIBRARY_PATH'] = lib_dir r = utils.load_txt(file_name='tmp-run.out', check_if_exists=True, @@ -56,54 +56,54 @@ def postprocess(i): state['os_uname_machine'] = s[0] state['os_uname_all'] = s[1] - env['CM_HOST_OS_MACHINE'] = state['os_uname_machine'] + env['MLC_HOST_OS_MACHINE'] = state['os_uname_machine'] else: - env['CM_HOST_OS_PACKAGE_MANAGER'] = "choco" + env['MLC_HOST_OS_PACKAGE_MANAGER'] = "choco" import platform - env['CM_HOST_SYSTEM_NAME'] = platform.node() - - if 'CM_HOST_OS_PACKAGE_MANAGER' not in env: - if env.get('CM_HOST_OS_FLAVOR', '') == "ubuntu" or \ - "debian" in env.get('CM_HOST_OS_FLAVOR_LIKE', '') or \ - env.get('CM_HOST_OS_FLAVOR', '') == "debian": - env['CM_HOST_OS_PACKAGE_MANAGER'] = "apt" - if env.get('CM_HOST_OS_FLAVOR', '') == "rhel" or \ - "rhel" in env.get('CM_HOST_OS_FLAVOR_LIKE', ''): - env['CM_HOST_OS_PACKAGE_MANAGER'] = "dnf" - if env.get('CM_HOST_OS_FLAVOR', '') == "amzn": - env['CM_HOST_OS_PACKAGE_MANAGER'] = "yum" - if env.get('CM_HOST_OS_FLAVOR_LIKE', '') == "arch": - env['CM_HOST_OS_PACKAGE_MANAGER'] = "arch" - if env.get('CM_HOST_OS_FLAVOR', '') == "macos": - env['CM_HOST_OS_PACKAGE_MANAGER'] = "brew" - if env.get('CM_HOST_OS_FLAVOR', '') == "sles": - env['CM_HOST_OS_PACKAGE_MANAGER'] = "zypper" - if env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "apt": - env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "DEBIAN_FRONTEND=noninteractive apt-get install -y" - env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "apt-get update -y" - elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "dnf": - env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "dnf install -y" - env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "dnf update -y" - elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "pacman": - env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "pacman -Sy --noconfirm" - env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "pacman -Syu" - elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "brew": - env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "brew install" - env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "brew update" - elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "yum": - env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "yum install -y --skip-broken" - env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "yum update -y" - elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "zypper": - env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "zypper install -y" - env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "zypper update -y" - elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "choco": - env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "choco install -y" - env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "choco upgrade -y" + env['MLC_HOST_SYSTEM_NAME'] = platform.node() + + if 'MLC_HOST_OS_PACKAGE_MANAGER' not in env: + if env.get('MLC_HOST_OS_FLAVOR', '') == "ubuntu" or \ + "debian" in env.get('MLC_HOST_OS_FLAVOR_LIKE', '') or \ + env.get('MLC_HOST_OS_FLAVOR', '') == "debian": + env['MLC_HOST_OS_PACKAGE_MANAGER'] = "apt" + if env.get('MLC_HOST_OS_FLAVOR', '') == "rhel" or \ + "rhel" in env.get('MLC_HOST_OS_FLAVOR_LIKE', ''): + env['MLC_HOST_OS_PACKAGE_MANAGER'] = "dnf" + if env.get('MLC_HOST_OS_FLAVOR', '') == "amzn": + env['MLC_HOST_OS_PACKAGE_MANAGER'] = "yum" + if env.get('MLC_HOST_OS_FLAVOR_LIKE', '') == "arch": + env['MLC_HOST_OS_PACKAGE_MANAGER'] = "arch" + if env.get('MLC_HOST_OS_FLAVOR', '') == "macos": + env['MLC_HOST_OS_PACKAGE_MANAGER'] = "brew" + if env.get('MLC_HOST_OS_FLAVOR', '') == "sles": + env['MLC_HOST_OS_PACKAGE_MANAGER'] = "zypper" + if env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "apt": + env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "DEBIAN_FRONTEND=noninteractive apt-get install -y" + env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "apt-get update -y" + elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "dnf": + env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "dnf install -y" + env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "dnf update -y" + elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "pacman": + env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "pacman -Sy --noconfirm" + env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "pacman -Syu" + elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "brew": + env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "brew install" + env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "brew update" + elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "yum": + env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "yum install -y --skip-broken" + env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "yum update -y" + elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "zypper": + env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "zypper install -y" + env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "zypper update -y" + elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "choco": + env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "choco install -y" + env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "choco upgrade -y" if os.path.exists("/.dockerenv"): - env['CM_RUN_INSIDE_DOCKER'] = "yes" + env['MLC_RUN_INSIDE_DOCKER'] = "yes" return {'return': 0} diff --git a/script/detect-os/meta.yaml b/script/detect-os/meta.yaml index 12266a02a..8f9070cac 100644 --- a/script/detect-os/meta.yaml +++ b/script/detect-os/meta.yaml @@ -5,21 +5,21 @@ category: Platform information clean_files: - tmp-run.out new_env_keys: -- CM_HOST_OS_* -- +CM_HOST_OS_* -- CM_HOST_PLATFORM_* -- CM_HOST_PYTHON_* -- CM_HOST_SYSTEM_NAME -- CM_RUN_STATE_DOCKER +- MLC_HOST_OS_* +- +MLC_HOST_OS_* +- MLC_HOST_PLATFORM_* +- MLC_HOST_PYTHON_* +- MLC_HOST_SYSTEM_NAME +- MLC_RUN_STATE_DOCKER - +PATH new_state_keys: - os_uname_* post_deps: - enable_if_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - windows skip_if_env: - CM_WINDOWS_SYS_UTILS_MIN_INSTALL: + MLC_WINDOWS_SYS_UTILS_MIN_INSTALL: - 'yes' tags: get,sys-utils-min tags: diff --git a/script/detect-os/run.sh b/script/detect-os/run.sh index 9e3c56cd9..a3ff776e2 100644 --- a/script/detect-os/run.sh +++ b/script/detect-os/run.sh @@ -3,19 +3,19 @@ uname -m > tmp-run.out uname -a >> tmp-run.out if test -f "/etc/os-release"; then - echo "CM_HOST_OS_FLAVOR=`cat /etc/os-release | grep '^ID=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out - echo "CM_HOST_OS_FLAVOR_LIKE=`cat /etc/os-release | grep '^ID_LIKE=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out - echo "CM_HOST_OS_VERSION=`cat /etc/os-release | grep '^VERSION_ID=' | cut -d'=' -f2 | cut -d'"' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out - echo "CM_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out - echo "CM_HOST_PLATFORM_FLAVOR=`uname -m`" >> tmp-run-env.out - echo "CM_HOST_OS_GLIBC_VERSION=`ldd --version | tail -n +1 | head -1 | cut -d')' -f2 | cut -d' ' -f2`" >> tmp-run-env.out + echo "MLC_HOST_OS_FLAVOR=`cat /etc/os-release | grep '^ID=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out + echo "MLC_HOST_OS_FLAVOR_LIKE=`cat /etc/os-release | grep '^ID_LIKE=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out + echo "MLC_HOST_OS_VERSION=`cat /etc/os-release | grep '^VERSION_ID=' | cut -d'=' -f2 | cut -d'"' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out + echo "MLC_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out + echo "MLC_HOST_PLATFORM_FLAVOR=`uname -m`" >> tmp-run-env.out + echo "MLC_HOST_OS_GLIBC_VERSION=`ldd --version | tail -n +1 | head -1 | cut -d')' -f2 | cut -d' ' -f2`" >> tmp-run-env.out else - CM_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f2 | tr '[:upper:]' '[:lower:]'` - if [ -z ${CM_HOST_OS_FLAVOR} ]; then - CM_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f3 | tr '[:upper:]' '[:lower:]' ` + MLC_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f2 | tr '[:upper:]' '[:lower:]'` + if [ -z ${MLC_HOST_OS_FLAVOR} ]; then + MLC_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f3 | tr '[:upper:]' '[:lower:]' ` fi - echo "CM_HOST_OS_FLAVOR=${CM_HOST_OS_FLAVOR}" >> tmp-run-env.out - echo "CM_HOST_OS_VERSION=`sw_vers | grep '^ProductVersion:' | cut -f2 | tr '[:upper:]' '[:lower:]' `" >> tmp-run-env.out - echo "CM_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out - echo "CM_HOST_PLATFORM_FLAVOR=`uname -m `" >> tmp-run-env.out + echo "MLC_HOST_OS_FLAVOR=${MLC_HOST_OS_FLAVOR}" >> tmp-run-env.out + echo "MLC_HOST_OS_VERSION=`sw_vers | grep '^ProductVersion:' | cut -f2 | tr '[:upper:]' '[:lower:]' `" >> tmp-run-env.out + echo "MLC_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out + echo "MLC_HOST_PLATFORM_FLAVOR=`uname -m `" >> tmp-run-env.out fi diff --git a/script/detect-sudo/customize.py b/script/detect-sudo/customize.py index bc5e92296..844a2b328 100644 --- a/script/detect-sudo/customize.py +++ b/script/detect-sudo/customize.py @@ -16,19 +16,19 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') if os.geteuid() == 0: - env['CM_SUDO'] = '' # root user does not need sudo - env['CM_SUDO_USER'] = "yes" + env['MLC_SUDO'] = '' # root user does not need sudo + env['MLC_SUDO_USER'] = "yes" else: if can_execute_sudo_without_password() or prompt_sudo() == 0: - env['CM_SUDO_USER'] = "yes" - env['CM_SUDO'] = 'sudo' + env['MLC_SUDO_USER'] = "yes" + env['MLC_SUDO'] = 'sudo' else: - env['CM_SUDO_USER'] = "no" - env['CM_SUDO'] = '' + env['MLC_SUDO_USER'] = "no" + env['MLC_SUDO'] = '' return {'return': 0} diff --git a/script/detect-sudo/meta.yaml b/script/detect-sudo/meta.yaml index 64b60a5f6..6e3cb29fb 100644 --- a/script/detect-sudo/meta.yaml +++ b/script/detect-sudo/meta.yaml @@ -9,7 +9,7 @@ cache: false category: DevOps automation new_env_keys: - - CM_SUDO* + - MLC_SUDO* tags: - detect diff --git a/script/detect-sudo/run.sh b/script/detect-sudo/run.sh index 3a584c10c..821adb3f9 100644 --- a/script/detect-sudo/run.sh +++ b/script/detect-sudo/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,11 +17,11 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" diff --git a/script/download-and-extract/README-extra.md b/script/download-and-extract/README-extra.md index 6573ab848..91d015ee4 100644 --- a/script/download-and-extract/README-extra.md +++ b/script/download-and-extract/README-extra.md @@ -4,7 +4,7 @@ ### Use internal CM download function -This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/utils/module.py#L157) +This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/mlc-mlops/automation/utils/module.py#L157) to download and extract a given file to the current directory: ```bash @@ -26,25 +26,25 @@ cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotation ```json "new_env": { - "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip", - "CM_EXTRACT_EXTRACTED_PATH": "D:\\Work", - "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work" + "MLC_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip", + "MLC_EXTRACT_EXTRACTED_PATH": "D:\\Work", + "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Work" }, ``` #### Input flags and equivalent environment variables -* `--url` or `--env.CM_DAE_URL` - URL to download file -* `--verify` or `--env.CM_VERIFY_SSL` - set to `no` to skip SSL certificate verification -* `--download_path` or `--store` or `--env.CM_DOWNLOAD_PATH` - where to download file -* `--local_path` or `--from` or `--env.CM_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading -* `--extract_path` or `--to` or `--env.CM_EXTRACT_PATH` - where to extract files (--input should have full path then) -* `--extra_folder` or `--env.CM_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory) +* `--url` or `--env.MLC_DAE_URL` - URL to download file +* `--verify` or `--env.MLC_VERIFY_SSL` - set to `no` to skip SSL certificate verification +* `--download_path` or `--store` or `--env.MLC_DOWNLOAD_PATH` - where to download file +* `--local_path` or `--from` or `--env.MLC_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading +* `--extract_path` or `--to` or `--env.MLC_EXTRACT_PATH` - where to extract files (--input should have full path then) +* `--extra_folder` or `--env.MLC_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory) #### Variations -* `_keep` or `_no-remove-extracted` or `--env.CM_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default) +* `_keep` or `_no-remove-extracted` or `--env.MLC_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default) @@ -63,25 +63,25 @@ cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-anno ### Check MD5SUM ```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 ``` ### Save to another file ```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_FILENAME=xyz --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_FILENAME=xyz --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 ``` ### Save to another place ```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 ``` ### Reuse local file instead of downloading a file ```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j ``` @@ -89,7 +89,7 @@ cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-anno ```bash -cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --store=$HOME/dir1 --to=$HOME/dir2 +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --store=$HOME/dir1 --to=$HOME/dir2 ``` @@ -100,7 +100,7 @@ You can use all above commands with `--force_cache` and `--extra_cache_tags` fla In such case, a given file will be downloaded to CM cache and can be reused by other CM scripts and workflows: ```bash -cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations +cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations ``` You can find it in CM cache using extra cache tags as follows: diff --git a/script/download-and-extract/customize.py b/script/download-and-extract/customize.py index 32cac6476..86f8b1d77 100644 --- a/script/download-and-extract/customize.py +++ b/script/download-and-extract/customize.py @@ -13,7 +13,7 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') if i['input'].get('force_cache'): extra_cache_tags = i['input'].get('extra_cache_tags', '') @@ -33,20 +33,20 @@ def preprocess(i): if r['return'] > 0: return r - if env.get('CM_DOWNLOAD_LOCAL_FILE_PATH'): - filepath = env['CM_DOWNLOAD_LOCAL_FILE_PATH'] + if env.get('MLC_DOWNLOAD_LOCAL_FILE_PATH'): + filepath = env['MLC_DOWNLOAD_LOCAL_FILE_PATH'] if not os.path.exists(filepath): return {'return': 1, 'error': 'Local file {} doesn\'t exist'.format(filepath)} - env['CM_EXTRACT_REMOVE_EXTRACTED'] = 'no' + env['MLC_EXTRACT_REMOVE_EXTRACTED'] = 'no' - if str(env.get('CM_DAE_EXTRACT_DOWNLOADED') + if str(env.get('MLC_DAE_EXTRACT_DOWNLOADED') ).lower() in ["yes", "1", "true"]: - if (env.get('CM_EXTRACT_FINAL_ENV_NAME', '') == '') and ( - env.get('CM_DAE_FINAL_ENV_NAME', '') != ''): - env['CM_EXTRACT_FINAL_ENV_NAME'] = env['CM_DAE_FINAL_ENV_NAME'] + if (env.get('MLC_EXTRACT_FINAL_ENV_NAME', '') == '') and ( + env.get('MLC_DAE_FINAL_ENV_NAME', '') != ''): + env['MLC_EXTRACT_FINAL_ENV_NAME'] = env['MLC_DAE_FINAL_ENV_NAME'] return {'return': 0} @@ -54,20 +54,20 @@ def preprocess(i): def postprocess(i): env = i['env'] - filepath = env.get('CM_EXTRACT_EXTRACTED_PATH', '') + filepath = env.get('MLC_EXTRACT_EXTRACTED_PATH', '') if filepath == '': - filepath = env.get('CM_DOWNLOAD_DOWNLOADED_PATH', '') + filepath = env.get('MLC_DOWNLOAD_DOWNLOADED_PATH', '') if filepath == '': return {'return': 1, - 'error': 'No extracted path set in "CM_EXTRACT_EXTRACTED_PATH"'} + 'error': 'No extracted path set in "MLC_EXTRACT_EXTRACTED_PATH"'} if not os.path.exists(filepath): return {'return': 1, 'error': 'Extracted path doesn\'t exist: {}'.format(filepath)} - if env.get('CM_DAE_FINAL_ENV_NAME'): - env[env['CM_DAE_FINAL_ENV_NAME']] = filepath + if env.get('MLC_DAE_FINAL_ENV_NAME'): + env[env['MLC_DAE_FINAL_ENV_NAME']] = filepath - env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + env['MLC_GET_DEPENDENT_CACHED_PATH'] = filepath return {'return': 0} diff --git a/script/download-and-extract/meta.yaml b/script/download-and-extract/meta.yaml index bd4003b91..1d02bfbae 100644 --- a/script/download-and-extract/meta.yaml +++ b/script/download-and-extract/meta.yaml @@ -7,51 +7,51 @@ category: DevOps automation deps: [] input_description: {} input_mapping: - download_path: CM_DOWNLOAD_PATH - extra_folder: CM_EXTRACT_TO_FOLDER - extract_path: CM_EXTRACT_PATH - from: CM_DOWNLOAD_LOCAL_FILE_PATH - local_path: CM_DOWNLOAD_LOCAL_FILE_PATH - store: CM_DOWNLOAD_PATH - to: CM_EXTRACT_PATH - url: CM_DAE_URL - verify: CM_VERIFY_SSL + download_path: MLC_DOWNLOAD_PATH + extra_folder: MLC_EXTRACT_TO_FOLDER + extract_path: MLC_EXTRACT_PATH + from: MLC_DOWNLOAD_LOCAL_FILE_PATH + local_path: MLC_DOWNLOAD_LOCAL_FILE_PATH + store: MLC_DOWNLOAD_PATH + to: MLC_EXTRACT_PATH + url: MLC_DAE_URL + verify: MLC_VERIFY_SSL new_env_keys: -- CM_DOWNLOAD_DOWNLOADED_PATH* -- CM_EXTRACT_EXTRACTED_PATH -- <<>> -- <<>> -- <<>> -- CM_GET_DEPENDENT_CACHED_PATH +- MLC_DOWNLOAD_DOWNLOADED_PATH* +- MLC_EXTRACT_EXTRACTED_PATH +- <<>> +- <<>> +- <<>> +- MLC_GET_DEPENDENT_CACHED_PATH new_state_keys: [] post_deps: [] posthook_deps: - enable_if_env: - CM_DAE_EXTRACT_DOWNLOADED: + MLC_DAE_EXTRACT_DOWNLOADED: - 'yes' - 'True' names: - extract-script tags: extract,file force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _path.: - - CM_DOWNLOAD_DOWNLOADED_PATH - - CM_TORRENT_DOWNLOADED_PATH + - MLC_DOWNLOAD_DOWNLOADED_PATH + - MLC_TORRENT_DOWNLOADED_PATH prehook_deps: - names: - download-script skip_if_env: - CM_DAE_DOWNLOAD_USING_TORRENT: + MLC_DAE_DOWNLOAD_USING_TORRENT: - 'yes' - 'True' tags: download,file force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_DAE_URL + - MLC_DAE_URL tags: - dae - file @@ -72,7 +72,7 @@ variations: group: download-tool extract: env: - CM_DAE_EXTRACT_DOWNLOADED: 'yes' + MLC_DAE_EXTRACT_DOWNLOADED: 'yes' gdown: add_deps: download-script: @@ -81,11 +81,11 @@ variations: keep: default: 'true' env: - CM_EXTRACT_REMOVE_EXTRACTED: 'no' + MLC_EXTRACT_REMOVE_EXTRACTED: 'no' group: keep no-remove-extracted: env: - CM_EXTRACT_REMOVE_EXTRACTED: 'no' + MLC_EXTRACT_REMOVE_EXTRACTED: 'no' group: keep rclone: add_deps: @@ -94,21 +94,21 @@ variations: group: download-tool torrent: env: - CM_DAE_DOWNLOAD_USING_TORRENT: 'yes' - CM_TORRENT_DOWNLOADED_FILE_NAME: <<>> - CM_TORRENT_DOWNLOADED_PATH_ENV_KEY: CM_DAE_FILEPATH - CM_TORRENT_WAIT_UNTIL_COMPLETED: 'yes' + MLC_DAE_DOWNLOAD_USING_TORRENT: 'yes' + MLC_TORRENT_DOWNLOADED_FILE_NAME: <<>> + MLC_TORRENT_DOWNLOADED_PATH_ENV_KEY: MLC_DAE_FILEPATH + MLC_TORRENT_WAIT_UNTIL_COMPLETED: 'yes' group: download-tool new_env_keys: - - CM_TORRENT_DOWNLOADED_PATH + - MLC_TORRENT_DOWNLOADED_PATH prehook_deps: - tags: download,torrent update_tags_from_env_with_prefix: _torrent.: - - CM_DAE_TORRENT_PATH + - MLC_DAE_TORRENT_PATH url.#: env: - CM_DAE_URL: '#' + MLC_DAE_URL: '#' wget: add_deps: download-script: diff --git a/script/download-and-extract/tests/download-and-extract-file.bat b/script/download-and-extract/tests/download-and-extract-file.bat index 0688461de..ecb28f0c7 100644 --- a/script/download-and-extract/tests/download-and-extract-file.bat +++ b/script/download-and-extract/tests/download-and-extract-file.bat @@ -1 +1 @@ -cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract _no-remove-extracted" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 +cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract _no-remove-extracted" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/script/download-and-extract/tests/download-and-extract-file2.bat b/script/download-and-extract/tests/download-and-extract-file2.bat index af344b927..f2806eb1a 100644 --- a/script/download-and-extract/tests/download-and-extract-file2.bat +++ b/script/download-and-extract/tests/download-and-extract-file2.bat @@ -1 +1 @@ -cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 +cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/script/download-file/README-extra.md b/script/download-file/README-extra.md index b645b0419..09c9d065b 100644 --- a/script/download-file/README-extra.md +++ b/script/download-file/README-extra.md @@ -4,7 +4,7 @@ ### Use internal CM download function -This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/utils/module.py#L157) +This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/mlc-mlops/automation/utils/module.py#L157) to download a given file to the current directory: ```bash @@ -26,17 +26,17 @@ cmr "download file" _url.https://cKnowledge.org/test/coco-2017-val-annotations.z ```json "new_env": { - "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip", - "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip" + "MLC_DOWNLOAD_DOWNLOADED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip", + "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip" }, ``` #### Input flags and equivalent environment variables -* `--url` or `--env.CM_DAE_URL` - URL to download file -* `--download_path` or `--to` or `--env.CM_DOWNLOAD_PATH` - where to download file -* `--local_path` or `--from` or `--env.CM_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading -* `--verify` or `--env.CM_VERIFY_SSL` - set to `no` to skip SSL certificate verification +* `--url` or `--env.MLC_DAE_URL` - URL to download file +* `--download_path` or `--to` or `--env.MLC_DOWNLOAD_PATH` - where to download file +* `--local_path` or `--from` or `--env.MLC_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading +* `--verify` or `--env.MLC_VERIFY_SSL` - set to `no` to skip SSL certificate verification ### Use wget without SSL certificate verification @@ -54,32 +54,32 @@ cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zi ### Check MD5SUM ```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 ``` ### Save to another file ```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_FILENAME=xyz --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_FILENAME=xyz --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 ``` ### Save to another place ```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 ``` ### Reuse local file instead of downloading a file ```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j ``` Output environment variables produced by this CM script: ```json "new_env": { - "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip", - "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work\\coco-2017-val-annotations.zip" + "MLC_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip", + "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Work\\coco-2017-val-annotations.zip" } ``` @@ -89,7 +89,7 @@ You can use all above commands with `--force_cache` and `--extra_cache_tags` fla In such case, a given file will be downloaded to CM cache and can be reused by other CM scripts and workflows: ```bash -cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations ``` You can find it in CM cache using extra cache tags as follows: diff --git a/script/download-file/customize.py b/script/download-file/customize.py index fc237635c..f72034d5f 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -33,10 +33,10 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - tool = env.get('CM_DOWNLOAD_TOOL', '') - pre_clean = env.get('CM_PRE_DOWNLOAD_CLEAN', False) + tool = env.get('MLC_DOWNLOAD_TOOL', '') + pre_clean = env.get('MLC_PRE_DOWNLOAD_CLEAN', False) # xsep = '^&^&' if windows else '&&' xsep = '&&' @@ -52,26 +52,26 @@ def preprocess(i): else: del_cmd = "rm -f" - if env.get('CM_DOWNLOAD_LOCAL_FILE_PATH'): - filepath = env['CM_DOWNLOAD_LOCAL_FILE_PATH'] + if env.get('MLC_DOWNLOAD_LOCAL_FILE_PATH'): + filepath = env['MLC_DOWNLOAD_LOCAL_FILE_PATH'] if not os.path.exists(filepath): return {'return': 1, 'error': 'Local file {} doesn\'t exist'.format(filepath)} - env['CM_DOWNLOAD_CMD'] = "" + env['MLC_DOWNLOAD_CMD'] = "" - env['CM_DOWNLOAD_FILENAME'] = filepath + env['MLC_DOWNLOAD_FILENAME'] = filepath if not quiet: print('') print('Using local file: {}'.format(filepath)) else: - url = env.get('CM_DOWNLOAD_URL', '') + url = env.get('MLC_DOWNLOAD_URL', '') if url == '': return { - 'return': 1, 'error': 'please specify URL using --url={URL} or --env.CM_DOWNLOAD_URL={URL}'} + 'return': 1, 'error': 'please specify URL using --url={URL} or --env.MLC_DOWNLOAD_URL={URL}'} print('') print('Downloading from {}'.format(url)) @@ -82,42 +82,42 @@ def preprocess(i): else: url = url.replace('&', '\\&') - extra_download_options = env.get('CM_DOWNLOAD_EXTRA_OPTIONS', '') + extra_download_options = env.get('MLC_DOWNLOAD_EXTRA_OPTIONS', '') - verify_ssl = env.get('CM_VERIFY_SSL', "True") + verify_ssl = env.get('MLC_VERIFY_SSL', "True") if str(verify_ssl).lower() in [ "no", "false"] or os_info['platform'] == 'windows': verify_ssl = False else: verify_ssl = True - if env.get('CM_DOWNLOAD_PATH', '') != '': - download_path = env['CM_DOWNLOAD_PATH'] + if env.get('MLC_DOWNLOAD_PATH', '') != '': + download_path = env['MLC_DOWNLOAD_PATH'] if not os.path.exists(download_path): os.makedirs(download_path, exist_ok=True) os.chdir(download_path) - if env.get('CM_DOWNLOAD_FILENAME', '') == '': - urltail = os.path.basename(env['CM_DOWNLOAD_URL']) - urlhead = os.path.dirname(env['CM_DOWNLOAD_URL']) + if env.get('MLC_DOWNLOAD_FILENAME', '') == '': + urltail = os.path.basename(env['MLC_DOWNLOAD_URL']) + urlhead = os.path.dirname(env['MLC_DOWNLOAD_URL']) if "." in urltail and "/" in urlhead: # Check if ? after filename j = urltail.find('?') if j > 0: urltail = urltail[:j] - env['CM_DOWNLOAD_FILENAME'] = urltail - elif env.get('CM_DOWNLOAD_TOOL', '') == "rclone": - env['CM_DOWNLOAD_FILENAME'] = urltail + env['MLC_DOWNLOAD_FILENAME'] = urltail + elif env.get('MLC_DOWNLOAD_TOOL', '') == "rclone": + env['MLC_DOWNLOAD_FILENAME'] = urltail else: - env['CM_DOWNLOAD_FILENAME'] = "index.html" + env['MLC_DOWNLOAD_FILENAME'] = "index.html" if tool == "cmutil": cmutil_require_download = 0 - if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '': + if env.get('MLC_DOWNLOAD_CHECKSUM_FILE', '') != '': if os_info['platform'] == 'windows': - checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{escape_special_chars(env['CM_DOWNLOAD_CHECKSUM_FILE'])}" + checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{escape_special_chars(env['MLC_DOWNLOAD_CHECKSUM_FILE'])}" else: - checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}" + checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{q}{env['MLC_DOWNLOAD_CHECKSUM_FILE']}{q}" checksum_result = subprocess.run( checksum_cmd, cwd=f'{q}{filepath}{q}', @@ -125,45 +125,45 @@ def preprocess(i): text=True, shell=True, env=subprocess_env) - elif env.get('CM_DOWNLOAD_CHECKSUM', '') != '': + elif env.get('MLC_DOWNLOAD_CHECKSUM', '') != '': if os_info['platform'] == 'windows': - checksum_cmd = f"echo {env.get('CM_DOWNLOAD_CHECKSUM')} {x}{escape_special_chars(env['CM_DOWNLOAD_FILENAME'])} | md5sum -c{x_c} -" + checksum_cmd = f"echo {env.get('MLC_DOWNLOAD_CHECKSUM')} {x}{escape_special_chars(env['MLC_DOWNLOAD_FILENAME'])} | md5sum -c{x_c} -" else: - checksum_cmd = f"echo {env.get('CM_DOWNLOAD_CHECKSUM')} {x}{q}{env['CM_DOWNLOAD_FILENAME']}{q} | md5sum -c{x_c} -" + checksum_cmd = f"echo {env.get('MLC_DOWNLOAD_CHECKSUM')} {x}{q}{env['MLC_DOWNLOAD_FILENAME']}{q} | md5sum -c{x_c} -" checksum_result = subprocess.run( checksum_cmd, capture_output=True, text=True, shell=True, env=subprocess_env) - if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '' or env.get( - 'CM_DOWNLOAD_CHECKSUM', '') != '': + if env.get('MLC_DOWNLOAD_CHECKSUM_FILE', '') != '' or env.get( + 'MLC_DOWNLOAD_CHECKSUM', '') != '': # print(checksum_result) #for debugging if "checksum did not match" in checksum_result.stderr.lower(): computed_checksum = subprocess.run( - f"md5sum {env['CM_DOWNLOAD_FILENAME']}", + f"md5sum {env['MLC_DOWNLOAD_FILENAME']}", capture_output=True, text=True, shell=True).stdout.split(" ")[0] print( - f"WARNING: File already present, mismatch between original checksum({env.get('CM_DOWNLOAD_CHECKSUM')}) and computed checksum({computed_checksum}). Deleting the already present file and downloading new.") + f"WARNING: File already present, mismatch between original checksum({env.get('MLC_DOWNLOAD_CHECKSUM')}) and computed checksum({computed_checksum}). Deleting the already present file and downloading new.") try: - os.remove(env['CM_DOWNLOAD_FILENAME']) + os.remove(env['MLC_DOWNLOAD_FILENAME']) print( - f"File {env['CM_DOWNLOAD_FILENAME']} deleted successfully.") + f"File {env['MLC_DOWNLOAD_FILENAME']} deleted successfully.") except PermissionError: return { - "return": 1, "error": f"Permission denied to delete file {env['CM_DOWNLOAD_FILENAME']}."} + "return": 1, "error": f"Permission denied to delete file {env['MLC_DOWNLOAD_FILENAME']}."} cmutil_require_download = 1 elif "no such file" in checksum_result.stderr.lower(): - # print(f"No file {env['CM_DOWNLOAD_FILENAME']}. Downloading through cmutil.") + # print(f"No file {env['MLC_DOWNLOAD_FILENAME']}. Downloading through cmutil.") cmutil_require_download = 1 elif checksum_result.returncode > 0: return { "return": 1, "error": f"Error while checking checksum: {checksum_result.stderr}"} else: print( - f"File {env['CM_DOWNLOAD_FILENAME']} already present, original checksum and computed checksum matches! Skipping Download..") + f"File {env['MLC_DOWNLOAD_FILENAME']} already present, original checksum and computed checksum matches! Skipping Download..") else: cmutil_require_download = 1 @@ -176,7 +176,7 @@ def preprocess(i): if r['return'] == 0: break oldurl = url - url = env.get('CM_DOWNLOAD_URL' + str(i), '') + url = env.get('MLC_DOWNLOAD_URL' + str(i), '') if url == '': break print(f"Download from {oldurl} failed, trying from {url}") @@ -184,123 +184,123 @@ def preprocess(i): if r['return'] > 0: return r - env['CM_DOWNLOAD_CMD'] = "" - env['CM_DOWNLOAD_FILENAME'] = r['filename'] + env['MLC_DOWNLOAD_CMD'] = "" + env['MLC_DOWNLOAD_FILENAME'] = r['filename'] elif tool == "wget": - if env.get('CM_DOWNLOAD_FILENAME', '') != '': - extra_download_options += f" --tries=3 -O {q}{env['CM_DOWNLOAD_FILENAME']}{q} " + if env.get('MLC_DOWNLOAD_FILENAME', '') != '': + extra_download_options += f" --tries=3 -O {q}{env['MLC_DOWNLOAD_FILENAME']}{q} " if not verify_ssl: extra_download_options += "--no-check-certificate " - env['CM_DOWNLOAD_CMD'] = f"wget -nc {extra_download_options} {url}" + env['MLC_DOWNLOAD_CMD'] = f"wget -nc {extra_download_options} {url}" for i in range(1, 5): - url = env.get('CM_DOWNLOAD_URL' + str(i), '') + url = env.get('MLC_DOWNLOAD_URL' + str(i), '') if url == '': break - env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && wget -nc {extra_download_options} {url})" - print(env['CM_DOWNLOAD_CMD']) + env['MLC_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['MLC_DOWNLOAD_FILENAME']} || true) && wget -nc {extra_download_options} {url})" + print(env['MLC_DOWNLOAD_CMD']) elif tool == "curl": - if env.get('CM_DOWNLOAD_FILENAME', '') != '': - extra_download_options += f" --output {q}{env['CM_DOWNLOAD_FILENAME']}{q} " + if env.get('MLC_DOWNLOAD_FILENAME', '') != '': + extra_download_options += f" --output {q}{env['MLC_DOWNLOAD_FILENAME']}{q} " - env['CM_DOWNLOAD_CMD'] = f"curl {extra_download_options} {url}" + env['MLC_DOWNLOAD_CMD'] = f"curl {extra_download_options} {url}" for i in range(1, 5): - url = env.get('CM_DOWNLOAD_URL' + str(i), '') + url = env.get('MLC_DOWNLOAD_URL' + str(i), '') if url == '': break - env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && curl {extra_download_options} {url})" + env['MLC_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['MLC_DOWNLOAD_FILENAME']} || true) && curl {extra_download_options} {url})" elif tool == "gdown": if not verify_ssl: extra_download_options += "--no-check-certificate " - env['CM_DOWNLOAD_CMD'] = f"gdown {extra_download_options} {url}" + env['MLC_DOWNLOAD_CMD'] = f"gdown {extra_download_options} {url}" for i in range(1, 5): - url = env.get('CM_DOWNLOAD_URL' + str(i), '') + url = env.get('MLC_DOWNLOAD_URL' + str(i), '') if url == '': break - env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && gdown {extra_download_options} {url})" + env['MLC_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['MLC_DOWNLOAD_FILENAME']} || true) && gdown {extra_download_options} {url})" elif tool == "rclone": # keeping this for backward compatibility. Ideally should be done # via get,rclone-config script - if env.get('CM_RCLONE_CONFIG_CMD', '') != '': - env['CM_DOWNLOAD_CONFIG_CMD'] = env['CM_RCLONE_CONFIG_CMD'] - rclone_copy_using = env.get('CM_RCLONE_COPY_USING', 'sync') + if env.get('MLC_RCLONE_CONFIG_CMD', '') != '': + env['MLC_DOWNLOAD_CONFIG_CMD'] = env['MLC_RCLONE_CONFIG_CMD'] + rclone_copy_using = env.get('MLC_RCLONE_COPY_USING', 'sync') if rclone_copy_using == "sync": pre_clean = False - if env["CM_HOST_OS_TYPE"] == "windows": + if env["MLC_HOST_OS_TYPE"] == "windows": # have to modify the variable from url to temp_url if it is # going to be used anywhere after this point url = url.replace("%", "%%") - temp_download_file = env['CM_DOWNLOAD_FILENAME'].replace( + temp_download_file = env['MLC_DOWNLOAD_FILENAME'].replace( "%", "%%") - env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P --error-on-no-transfer" + env['MLC_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P --error-on-no-transfer" else: - env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])}{q} -P --error-on-no-transfer" + env['MLC_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['MLC_DOWNLOAD_FILENAME'])}{q} -P --error-on-no-transfer" - filename = env['CM_DOWNLOAD_FILENAME'] - env['CM_DOWNLOAD_DOWNLOADED_FILENAME'] = filename + filename = env['MLC_DOWNLOAD_FILENAME'] + env['MLC_DOWNLOAD_DOWNLOADED_FILENAME'] = filename - filename = os.path.basename(env['CM_DOWNLOAD_FILENAME']) + filename = os.path.basename(env['MLC_DOWNLOAD_FILENAME']) filepath = os.path.join(os.getcwd(), filename) - env['CM_DOWNLOAD_DOWNLOADED_PATH'] = filepath + env['MLC_DOWNLOAD_DOWNLOADED_PATH'] = filepath # verify checksum if file already present - if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '': - env['CM_DOWNLOAD_CHECKSUM_CMD'] = f"cd {q}{filepath}{q} {xsep} md5sum -c {x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}" - elif env.get('CM_DOWNLOAD_CHECKSUM', '') != '': + if env.get('MLC_DOWNLOAD_CHECKSUM_FILE', '') != '': + env['MLC_DOWNLOAD_CHECKSUM_CMD'] = f"cd {q}{filepath}{q} {xsep} md5sum -c {x_c} {x}{q}{env['MLC_DOWNLOAD_CHECKSUM_FILE']}{q}" + elif env.get('MLC_DOWNLOAD_CHECKSUM', '') != '': if os_info['platform'] == 'windows': - env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{} | md5sum {} -c -".format( - env.get('CM_DOWNLOAD_CHECKSUM'), x, escape_special_chars( - env['CM_DOWNLOAD_FILENAME']), x_c) + env['MLC_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{} | md5sum {} -c -".format( + env.get('MLC_DOWNLOAD_CHECKSUM'), x, escape_special_chars( + env['MLC_DOWNLOAD_FILENAME']), x_c) else: - env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{}{}{} | md5sum {} -c -".format( - env.get('CM_DOWNLOAD_CHECKSUM'), x, q, env['CM_DOWNLOAD_FILENAME'], q, x_c) + env['MLC_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{}{}{} | md5sum {} -c -".format( + env.get('MLC_DOWNLOAD_CHECKSUM'), x, q, env['MLC_DOWNLOAD_FILENAME'], q, x_c) for i in range(1, 5): - if env.get('CM_DOWNLOAD_CHECKSUM' + str(i), '') == '': + if env.get('MLC_DOWNLOAD_CHECKSUM' + str(i), '') == '': break if os_info['platform'] == 'windows': - env['CM_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{} | md5sum {} -c -".format( + env['MLC_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{} | md5sum {} -c -".format( env.get( - 'CM_DOWNLOAD_CHECKSUM' + + 'MLC_DOWNLOAD_CHECKSUM' + str(i)), x, escape_special_chars( - env['CM_DOWNLOAD_FILENAME']), + env['MLC_DOWNLOAD_FILENAME']), x_c) else: - env['CM_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{}{}{} | md5sum {} -c -".format( + env['MLC_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{}{}{} | md5sum {} -c -".format( env.get( - 'CM_DOWNLOAD_CHECKSUM' + + 'MLC_DOWNLOAD_CHECKSUM' + str(i)), x, q, - env['CM_DOWNLOAD_FILENAME'].replace( + env['MLC_DOWNLOAD_FILENAME'].replace( "%", "%%"), q, x_c) - # print(env['CM_DOWNLOAD_CHECKSUM_CMD']) + # print(env['MLC_DOWNLOAD_CHECKSUM_CMD']) else: - env['CM_DOWNLOAD_CHECKSUM_CMD'] = "" + env['MLC_DOWNLOAD_CHECKSUM_CMD'] = "" if not pre_clean: - env['CM_PRE_DOWNLOAD_CMD'] = '' + env['MLC_PRE_DOWNLOAD_CMD'] = '' if os_info['platform'] == 'windows' and env.get( - 'CM_DOWNLOAD_CMD', '') != '': - env['CM_DOWNLOAD_CMD'] = escape_special_chars( - env['CM_DOWNLOAD_CMD'], tool) + 'MLC_DOWNLOAD_CMD', '') != '': + env['MLC_DOWNLOAD_CMD'] = escape_special_chars( + env['MLC_DOWNLOAD_CMD'], tool) if pre_clean: - env['CM_PRE_DOWNLOAD_CLEAN_CMD'] = "del /Q %CM_DOWNLOAD_FILENAME%" + env['MLC_PRE_DOWNLOAD_CLEAN_CMD'] = "del /Q %MLC_DOWNLOAD_FILENAME%" # Check that if empty CMD, should add "" - for x in ['CM_DOWNLOAD_CMD', 'CM_DOWNLOAD_CHECKSUM_CMD']: + for x in ['MLC_DOWNLOAD_CMD', 'MLC_DOWNLOAD_CHECKSUM_CMD']: env[x + '_USED'] = 'YES' if env.get(x, '') != '' else 'NO' else: - env['CM_PRE_DOWNLOAD_CLEAN_CMD'] = "rm -f {}".format( - env['CM_DOWNLOAD_FILENAME']) + env['MLC_PRE_DOWNLOAD_CLEAN_CMD'] = "rm -f {}".format( + env['MLC_DOWNLOAD_FILENAME']) return {'return': 0} @@ -311,23 +311,23 @@ def postprocess(i): env = i['env'] - filepath = env['CM_DOWNLOAD_DOWNLOADED_PATH'] + filepath = env['MLC_DOWNLOAD_DOWNLOADED_PATH'] if not os.path.exists(filepath): return { - 'return': 1, 'error': 'Downloaded path {} does not exist. Probably CM_DOWNLOAD_FILENAME is not set and CM_DOWNLOAD_URL given is not pointing to a file'.format(filepath)} + 'return': 1, 'error': 'Downloaded path {} does not exist. Probably MLC_DOWNLOAD_FILENAME is not set and MLC_DOWNLOAD_URL given is not pointing to a file'.format(filepath)} - if env.get('CM_DOWNLOAD_RENAME_FILE', '') != '': + if env.get('MLC_DOWNLOAD_RENAME_FILE', '') != '': file_dir = os.path.dirname(filepath) - new_file_name = env['CM_DOWNLOAD_RENAME_FILE'] + new_file_name = env['MLC_DOWNLOAD_RENAME_FILE'] new_file_path = os.path.join(file_dir, new_file_name) os.rename(filepath, new_file_path) filepath = new_file_path - if env.get('CM_DOWNLOAD_FINAL_ENV_NAME', '') != '': - env[env['CM_DOWNLOAD_FINAL_ENV_NAME']] = filepath + if env.get('MLC_DOWNLOAD_FINAL_ENV_NAME', '') != '': + env[env['MLC_DOWNLOAD_FINAL_ENV_NAME']] = filepath - env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + env['MLC_GET_DEPENDENT_CACHED_PATH'] = filepath # Since may change directory, check if need to clean some temporal files automation.clean_some_tmp_files({'env': env}) diff --git a/script/download-file/meta.yaml b/script/download-file/meta.yaml index aedf0ab58..832275c1c 100644 --- a/script/download-file/meta.yaml +++ b/script/download-file/meta.yaml @@ -5,30 +5,30 @@ cache: false can_force_cache: true category: DevOps automation default_env: - CM_RCLONE_COPY_USING: sync + MLC_RCLONE_COPY_USING: sync deps: - tags: detect,os - enable_if_env: - CM_DOWNLOAD_CHECKSUM: + MLC_DOWNLOAD_CHECKSUM: - 'on' - CM_HOST_OS_FLAVOR: + MLC_HOST_OS_FLAVOR: - macos tags: get,generic-sys-util,_md5sha1sum input_description: {} input_mapping: - download_path: CM_DOWNLOAD_PATH - from: CM_DOWNLOAD_LOCAL_FILE_PATH - local_path: CM_DOWNLOAD_LOCAL_FILE_PATH - md5sum: CM_DOWNLOAD_CHECKSUM - output_file: CM_DOWNLOAD_FILENAME - store: CM_DOWNLOAD_PATH - url: CM_DOWNLOAD_URL - verify: CM_VERIFY_SSL - verify_ssl: CM_VERIFY_SSL + download_path: MLC_DOWNLOAD_PATH + from: MLC_DOWNLOAD_LOCAL_FILE_PATH + local_path: MLC_DOWNLOAD_LOCAL_FILE_PATH + md5sum: MLC_DOWNLOAD_CHECKSUM + output_file: MLC_DOWNLOAD_FILENAME + store: MLC_DOWNLOAD_PATH + url: MLC_DOWNLOAD_URL + verify: MLC_VERIFY_SSL + verify_ssl: MLC_VERIFY_SSL new_env_keys: -- CM_DOWNLOAD_DOWNLOADED_PATH -- <<>> -- CM_GET_DEPENDENT_CACHED_PATH +- MLC_DOWNLOAD_DOWNLOADED_PATH +- <<>> +- MLC_GET_DEPENDENT_CACHED_PATH new_state_keys: [] post_deps: [] prehook_deps: [] @@ -42,38 +42,38 @@ variations: cmutil: default: true env: - CM_DOWNLOAD_TOOL: cmutil + MLC_DOWNLOAD_TOOL: cmutil group: download-tool curl: default_env: - CM_DOWNLOAD_CURL_EMULATE_BROWSER: 'no' + MLC_DOWNLOAD_CURL_EMULATE_BROWSER: 'no' env: - CM_DOWNLOAD_TOOL: curl + MLC_DOWNLOAD_TOOL: curl group: download-tool gdown: deps: - tags: get,generic-python-lib,_package.gdown env: - CM_DOWNLOAD_TOOL: gdown + MLC_DOWNLOAD_TOOL: gdown group: download-tool rclone: deps: - tags: get,rclone - enable_if_env: - CM_RCLONE_CONFIG_NAME: + MLC_RCLONE_CONFIG_NAME: - 'on' tags: get,rclone-config update_tags_from_env_with_prefix: _: - - CM_RCLONE_CONFIG_NAME + - MLC_RCLONE_CONFIG_NAME env: - CM_DOWNLOAD_TOOL: rclone + MLC_DOWNLOAD_TOOL: rclone group: download-tool url.#: env: - CM_DOWNLOAD_URL: '#' + MLC_DOWNLOAD_URL: '#' wget: env: - CM_DOWNLOAD_TOOL: wget + MLC_DOWNLOAD_TOOL: wget group: download-tool versions: {} diff --git a/script/download-file/run.bat b/script/download-file/run.bat index 5449c9ecf..dcd7603c9 100644 --- a/script/download-file/run.bat +++ b/script/download-file/run.bat @@ -5,33 +5,33 @@ rem If MD5 is wrong, download again! rem Next line allows ERRORLEVEL inside if statements! setlocal enabledelayedexpansion -if NOT "%CM_DOWNLOAD_CONFIG_CMD%" == "" ( +if NOT "%MLC_DOWNLOAD_CONFIG_CMD%" == "" ( echo. - echo %CM_DOWNLOAD_CONFIG_CMD% + echo %MLC_DOWNLOAD_CONFIG_CMD% echo. - %CM_DOWNLOAD_CONFIG_CMD% + %MLC_DOWNLOAD_CONFIG_CMD% IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! ) set require_download=1 -if not "%CM_DOWNLOAD_LOCAL_FILE_PATH%" == "" ( +if not "%MLC_DOWNLOAD_LOCAL_FILE_PATH%" == "" ( set require_download=0 ) -if "%CM_DOWNLOAD_TOOL%" == "cmutil" ( +if "%MLC_DOWNLOAD_TOOL%" == "cmutil" ( set require_download=0 ) -if exist "%CM_DOWNLOAD_DOWNLOADED_PATH%" ( - if "%CM_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" ( +if exist "%MLC_DOWNLOAD_DOWNLOADED_PATH%" ( + if "%MLC_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" ( echo. - echo %CM_DOWNLOAD_CHECKSUM_CMD% - cmd /c %CM_DOWNLOAD_CHECKSUM_CMD% + echo %MLC_DOWNLOAD_CHECKSUM_CMD% + cmd /c %MLC_DOWNLOAD_CHECKSUM_CMD% IF !ERRORLEVEL! NEQ 0 ( - if NOT "%CM_DOWNLOAD_LOCAL_FILE_PATH%" == "" exit 1 - if "%CM_DOWNLOAD_CMD_USED%" == "NO" exit 1 + if NOT "%MLC_DOWNLOAD_LOCAL_FILE_PATH%" == "" exit 1 + if "%MLC_DOWNLOAD_CMD_USED%" == "NO" exit 1 ) else ( set require_download=0 ) @@ -40,17 +40,17 @@ if exist "%CM_DOWNLOAD_DOWNLOADED_PATH%" ( if "!require_download!" == "1" ( echo. - cmd /c %CM_PRE_DOWNLOAD_CLEAN_CMD% + cmd /c %MLC_PRE_DOWNLOAD_CLEAN_CMD% echo. - echo %CM_DOWNLOAD_CMD% - cmd /c %CM_DOWNLOAD_CMD% + echo %MLC_DOWNLOAD_CMD% + cmd /c %MLC_DOWNLOAD_CMD% IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! - if "%CM_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" ( + if "%MLC_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" ( echo. - echo %CM_DOWNLOAD_CHECKSUM_CMD% - cmd /c %CM_DOWNLOAD_CHECKSUM_CMD% + echo %MLC_DOWNLOAD_CHECKSUM_CMD% + cmd /c %MLC_DOWNLOAD_CHECKSUM_CMD% IF !ERRORLEVEL! NEQ 0 EXIT 1 ) ) diff --git a/script/download-file/run.sh b/script/download-file/run.sh index c02e44f00..b737ea34e 100644 --- a/script/download-file/run.sh +++ b/script/download-file/run.sh @@ -1,32 +1,32 @@ #!/bin/bash # Execute config command if it exists -if [[ -n ${CM_DOWNLOAD_CONFIG_CMD} ]]; then - echo -e "\nExecuting: ${CM_DOWNLOAD_CONFIG_CMD}" - eval "${CM_DOWNLOAD_CONFIG_CMD}" || exit $? +if [[ -n ${MLC_DOWNLOAD_CONFIG_CMD} ]]; then + echo -e "\nExecuting: ${MLC_DOWNLOAD_CONFIG_CMD}" + eval "${MLC_DOWNLOAD_CONFIG_CMD}" || exit $? fi # Assume download is required by default require_download=1 # No download needed if a local file path is specified or the tool is 'cmutil' -if [[ -n "${CM_DOWNLOAD_LOCAL_FILE_PATH}" || ${CM_DOWNLOAD_TOOL} == "cmutil" ]]; then +if [[ -n "${MLC_DOWNLOAD_LOCAL_FILE_PATH}" || ${MLC_DOWNLOAD_TOOL} == "cmutil" ]]; then require_download=0 fi # If the file exists, check the checksum if necessary -if [[ -e "${CM_DOWNLOAD_DOWNLOADED_PATH}" && -n "${CM_DOWNLOAD_CHECKSUM_CMD}" ]]; then - echo -e "\nChecking checksum: ${CM_DOWNLOAD_CHECKSUM_CMD}" - eval "${CM_DOWNLOAD_CHECKSUM_CMD}" +if [[ -e "${MLC_DOWNLOAD_DOWNLOADED_PATH}" && -n "${MLC_DOWNLOAD_CHECKSUM_CMD}" ]]; then + echo -e "\nChecking checksum: ${MLC_DOWNLOAD_CHECKSUM_CMD}" + eval "${MLC_DOWNLOAD_CHECKSUM_CMD}" if [[ $? -ne 0 ]]; then # If the checksum fails, handle errors based on whether the file is local - if [[ -n "${CM_DOWNLOAD_LOCAL_FILE_PATH}" ]]; then + if [[ -n "${MLC_DOWNLOAD_LOCAL_FILE_PATH}" ]]; then echo "Checksum failed for local file. Exiting." exit 1 else echo "Checksum failed. Marking for re-download." - CM_PRE_DOWNLOAD_CLEAN=true + MLC_PRE_DOWNLOAD_CLEAN=true fi else # If checksum succeeds, no download is required @@ -39,20 +39,20 @@ if [[ ${require_download} == 1 ]]; then echo "" # If a pre-download clean command is specified and needed, execute it - if [[ -n "${CM_PRE_DOWNLOAD_CLEAN}" && "${CM_PRE_DOWNLOAD_CLEAN,,}" != "false" ]]; then - echo "Executing pre-download clean: ${CM_PRE_DOWNLOAD_CLEAN_CMD}" - eval "${CM_PRE_DOWNLOAD_CLEAN_CMD}" || exit $? + if [[ -n "${MLC_PRE_DOWNLOAD_CLEAN}" && "${MLC_PRE_DOWNLOAD_CLEAN,,}" != "false" ]]; then + echo "Executing pre-download clean: ${MLC_PRE_DOWNLOAD_CLEAN_CMD}" + eval "${MLC_PRE_DOWNLOAD_CLEAN_CMD}" || exit $? fi # Execute the download command - echo "Downloading: ${CM_DOWNLOAD_CMD}" - eval "${CM_DOWNLOAD_CMD}" || exit $? + echo "Downloading: ${MLC_DOWNLOAD_CMD}" + eval "${MLC_DOWNLOAD_CMD}" || exit $? fi # Verify checksum again if necessary -if [[ ${CM_DOWNLOAD_TOOL} == "cmutil" || ${require_download} == 1 ]]; then - if [[ -n "${CM_DOWNLOAD_CHECKSUM_CMD}" ]]; then - echo -e "\nVerifying checksum after download: ${CM_DOWNLOAD_CHECKSUM_CMD}" - eval "${CM_DOWNLOAD_CHECKSUM_CMD}" || exit $? +if [[ ${MLC_DOWNLOAD_TOOL} == "cmutil" || ${require_download} == 1 ]]; then + if [[ -n "${MLC_DOWNLOAD_CHECKSUM_CMD}" ]]; then + echo -e "\nVerifying checksum after download: ${MLC_DOWNLOAD_CHECKSUM_CMD}" + eval "${MLC_DOWNLOAD_CHECKSUM_CMD}" || exit $? fi fi diff --git a/script/download-file/tests/download-file.bat b/script/download-file/tests/download-file.bat index 442150282..dbfcfc5ce 100644 --- a/script/download-file/tests/download-file.bat +++ b/script/download-file/tests/download-file.bat @@ -1,2 +1,2 @@ -cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 +cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/script/download-file/tests/download-file2.bat b/script/download-file/tests/download-file2.bat index 2032bc177..6d919c8c1 100644 --- a/script/download-file/tests/download-file2.bat +++ b/script/download-file/tests/download-file2.bat @@ -1 +1 @@ -cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _cmutil" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 +cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _cmutil" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/script/download-torrent/customize.py b/script/download-torrent/customize.py index e194e7ff7..0006d5680 100644 --- a/script/download-torrent/customize.py +++ b/script/download-torrent/customize.py @@ -12,10 +12,10 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if not env.get('CM_TORRENT_DOWNLOADED_FILE_NAME'): - return {'return': 1, 'error': 'CM_TORRENT_DOWNLOADED_FILE_NAME is not set'} + if not env.get('MLC_TORRENT_DOWNLOADED_FILE_NAME'): + return {'return': 1, 'error': 'MLC_TORRENT_DOWNLOADED_FILE_NAME is not set'} return {'return': 0} @@ -24,14 +24,14 @@ def postprocess(i): env = i['env'] torrent_downloaded_path = os.path.join( - env['CM_TORRENT_DOWNLOADED_DIR'], - env['CM_TORRENT_DOWNLOADED_NAME']) - env['CM_TORRENT_DOWNLOADED_PATH'] = torrent_downloaded_path + env['MLC_TORRENT_DOWNLOADED_DIR'], + env['MLC_TORRENT_DOWNLOADED_NAME']) + env['MLC_TORRENT_DOWNLOADED_PATH'] = torrent_downloaded_path - if 'CM_TORRENT_DOWNLOADED_PATH_ENV_KEY' in env: - key = env['CM_TORRENT_DOWNLOADED_PATH_ENV_KEY'] + if 'MLC_TORRENT_DOWNLOADED_PATH_ENV_KEY' in env: + key = env['MLC_TORRENT_DOWNLOADED_PATH_ENV_KEY'] env[key] = torrent_downloaded_path - env['CM_GET_DEPENDENT_CACHED_PATH'] = torrent_downloaded_path + env['MLC_GET_DEPENDENT_CACHED_PATH'] = torrent_downloaded_path return {'return': 0} diff --git a/script/download-torrent/meta.yaml b/script/download-torrent/meta.yaml index d2e83b8fe..632f007f6 100644 --- a/script/download-torrent/meta.yaml +++ b/script/download-torrent/meta.yaml @@ -4,15 +4,15 @@ automation_uid: 5b4e0237da074764 cache: true category: DevOps automation default_env: - CM_TORRENT_WAIT_UNTIL_COMPLETED: 'no' + MLC_TORRENT_WAIT_UNTIL_COMPLETED: 'no' deps: - tags: get,generic-sys-util,_transmission input_description: {} input_mapping: - wait: CM_TORRENT_WAIT_UNTIL_COMPLETED + wait: MLC_TORRENT_WAIT_UNTIL_COMPLETED new_env_keys: -- CM_TORRENT_DOWNLOADED_PATH -- <<>> +- MLC_TORRENT_DOWNLOADED_PATH +- <<>> new_state_keys: [] post_deps: [] posthook_deps: [] @@ -25,5 +25,5 @@ uid: 69b752c5618e45bb variations: torrent.#: env: - CM_TORRENT_FILE: '#' + MLC_TORRENT_FILE: '#' versions: {} diff --git a/script/download-torrent/run.sh b/script/download-torrent/run.sh index c3d639ff1..c00afb96d 100644 --- a/script/download-torrent/run.sh +++ b/script/download-torrent/run.sh @@ -1,7 +1,7 @@ #!/bin/bash chmod 777 ${PWD} -#transmission-remote --no-auth --download-dir ${PWD} -a ${CM_TORRENT_FILE} -cmd="transmission-remote --download-dir ${PWD} -a ${CM_TORRENT_FILE}" +#transmission-remote --no-auth --download-dir ${PWD} -a ${MLC_TORRENT_FILE} +cmd="transmission-remote --download-dir ${PWD} -a ${MLC_TORRENT_FILE}" echo $cmd eval $cmd test $? -eq 0 || exit $? @@ -11,10 +11,10 @@ echo $cmd eval $cmd test $? -eq 0 || exit $? -if [[ ${CM_TORRENT_WAIT_UNTIL_COMPLETED} == "yes" ]]; then +if [[ ${MLC_TORRENT_WAIT_UNTIL_COMPLETED} == "yes" ]]; then while true; do - out=`transmission-remote -l |grep ${CM_TORRENT_DOWNLOADED_FILE_NAME} | grep "100%"` + out=`transmission-remote -l |grep ${MLC_TORRENT_DOWNLOADED_FILE_NAME} | grep "100%"` if [[ -z $out ]]; then transmission-remote -l sleep 6 @@ -24,11 +24,11 @@ if [[ ${CM_TORRENT_WAIT_UNTIL_COMPLETED} == "yes" ]]; then done fi -id=`transmission-remote -l |grep ${CM_TORRENT_DOWNLOADED_FILE_NAME} |tr -s ' ' | cut -d' ' -f2` +id=`transmission-remote -l |grep ${MLC_TORRENT_DOWNLOADED_FILE_NAME} |tr -s ' ' | cut -d' ' -f2` test $? -eq 0 || exit $? location=`transmission-remote -t${id} -i |grep Location |cut -d':' -f2 |tr -d ' '` test $? -eq 0 || exit $? -echo "CM_TORRENT_DOWNLOADED_DIR=$location">> tmp-run-env.out +echo "MLC_TORRENT_DOWNLOADED_DIR=$location">> tmp-run-env.out name=`transmission-remote -t${id} -i |grep Name |cut -d':' -f2 |tr -d ' '` test $? -eq 0 || exit $? -echo "CM_TORRENT_DOWNLOADED_NAME=$name">> tmp-run-env.out +echo "MLC_TORRENT_DOWNLOADED_NAME=$name">> tmp-run-env.out diff --git a/script/draw-graph-from-json-data/customize.py b/script/draw-graph-from-json-data/customize.py index 8fafad78f..77affa7e3 100644 --- a/script/draw-graph-from-json-data/customize.py +++ b/script/draw-graph-from-json-data/customize.py @@ -12,15 +12,15 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - env['CM_RUN_CMD'] = f"""{env['CM_PYTHON_BIN_WITH_PATH']} {os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'],"process-cm-deps.py")} {env['CM_JSON_INPUT_FILE']}""" + env['MLC_RUN_CMD'] = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(env['MLC_TMP_CURRENT_SCRIPT_PATH'],"process-mlc-deps.py")} {env['MLC_JSON_INPUT_FILE']}""" - if env.get('CM_OUTPUT_IMAGE_PATH', '') != '': - env['CM_RUN_CMD'] += f""" --output_image {env['CM_OUTPUT_IMAGE_PATH']}""" + if env.get('MLC_OUTPUT_IMAGE_PATH', '') != '': + env['MLC_RUN_CMD'] += f""" --output_image {env['MLC_OUTPUT_IMAGE_PATH']}""" - if env.get('CM_OUTPUT_MERMAID_PATH', '') != '': - env['CM_RUN_CMD'] += f""" --output_mermaid {env['CM_OUTPUT_MERMAID_PATH']}""" + if env.get('MLC_OUTPUT_MERMAID_PATH', '') != '': + env['MLC_RUN_CMD'] += f""" --output_mermaid {env['MLC_OUTPUT_MERMAID_PATH']}""" return {'return': 0} diff --git a/script/draw-graph-from-json-data/meta.yaml b/script/draw-graph-from-json-data/meta.yaml index eb1d1a157..971fe1027 100644 --- a/script/draw-graph-from-json-data/meta.yaml +++ b/script/draw-graph-from-json-data/meta.yaml @@ -9,9 +9,9 @@ tags: - from-json-data uid: 2ed1ebcb6be548fd input_mapping: - input: CM_JSON_INPUT_FILE - json_input_file: CM_JSON_INPUT_FILE - output_image_path: CM_OUTPUT_IMAGE_PATH + input: MLC_JSON_INPUT_FILE + json_input_file: MLC_JSON_INPUT_FILE + output_image_path: MLC_OUTPUT_IMAGE_PATH deps: - tags: get,python3 names: diff --git a/script/draw-graph-from-json-data/run.sh b/script/draw-graph-from-json-data/run.sh index 4c23c380e..32cf4d51e 100644 --- a/script/draw-graph-from-json-data/run.sh +++ b/script/draw-graph-from-json-data/run.sh @@ -1,17 +1,17 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency echo "Running: " -echo "${CM_RUN_CMD}" +echo "${MLC_RUN_CMD}" echo "" -if [[ ${CM_FAKE_RUN} != "yes" ]]; then - eval "${CM_RUN_CMD}" +if [[ ${MLC_FAKE_RUN} != "yes" ]]; then + eval "${MLC_RUN_CMD}" test $? -eq 0 || exit 1 fi diff --git a/script/dump-pip-freeze/customize.py b/script/dump-pip-freeze/customize.py index 9c2940d1e..617e387df 100644 --- a/script/dump-pip-freeze/customize.py +++ b/script/dump-pip-freeze/customize.py @@ -12,11 +12,11 @@ def preprocess(i): automation = i['automation'] - if env.get('CM_DUMP_RAW_PIP_FREEZE_FILE_PATH', '') == '': - env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( + if env.get('MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH', '') == '': + env['MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( os.getcwd(), "tmp-pip-freeze") - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} @@ -31,7 +31,7 @@ def postprocess(i): automation = i['automation'] pip_freeze = {} - pip_freeze_file = env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] + pip_freeze_file = env['MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH'] if not os.path.isfile(pip_freeze_file): # If was not created, sometimes issues on Windows # There is another workaround diff --git a/script/dump-pip-freeze/dump.py b/script/dump-pip-freeze/dump.py index c6d4dc2ea..9fe8e3ebd 100644 --- a/script/dump-pip-freeze/dump.py +++ b/script/dump-pip-freeze/dump.py @@ -2,7 +2,7 @@ from pip._internal.operations import freeze pip_freeze_out = os.environ.get( - 'CM_DUMP_RAW_PIP_FREEZE_FILE_PATH', + 'MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH', 'tmp-pip-freeze') if os.path.isfile(pip_freeze_out): diff --git a/script/dump-pip-freeze/run.bat b/script/dump-pip-freeze/run.bat index b323ddc22..18f6b56e5 100644 --- a/script/dump-pip-freeze/run.bat +++ b/script/dump-pip-freeze/run.bat @@ -1,4 +1,4 @@ -if not "%CM_FAKE_RUN%" == "yes" ( - %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\dump.py +if not "%MLC_FAKE_RUN%" == "yes" ( + %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\dump.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% ) diff --git a/script/dump-pip-freeze/run.sh b/script/dump-pip-freeze/run.sh index a1cdb52eb..8d4d76e1a 100644 --- a/script/dump-pip-freeze/run.sh +++ b/script/dump-pip-freeze/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,12 +17,12 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" -run "${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/dump.py" +# run "$MLC_RUN_CMD" +run "${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/dump.py" diff --git a/script/extract-file/README-extra.md b/script/extract-file/README-extra.md index fbd8ccaf4..b227eadca 100644 --- a/script/extract-file/README-extra.md +++ b/script/extract-file/README-extra.md @@ -37,20 +37,20 @@ cmr "extract file _keep" --input=coco-2017-val-annotations.zip -j ```json "new_env": { - "CM_EXTRACT_EXTRACTED_PATH": "D:\\Work99.3 readme\\xyz", - "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work99.3 readme\\xyz" + "MLC_EXTRACT_EXTRACTED_PATH": "D:\\Work99.3 readme\\xyz", + "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Work99.3 readme\\xyz" }, ``` #### Input flags and equivalent environment variables -* `--input` or `--env.CM_EXTRACT_FILEPATH` - input file -* `--extract_path` or `--to` or `--env.CM_EXTRACT_PATH` - where to extract files (--input should have full path then) -* `--extra_folder` or `--env.CM_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory) +* `--input` or `--env.MLC_EXTRACT_FILEPATH` - input file +* `--extract_path` or `--to` or `--env.MLC_EXTRACT_PATH` - where to extract files (--input should have full path then) +* `--extra_folder` or `--env.MLC_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory) #### Variations -* `_keep` or `_no-remove-extracted` or `--env.CM_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default) +* `_keep` or `_no-remove-extracted` or `--env.MLC_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default) @@ -59,7 +59,7 @@ cmr "extract file _keep" --input=coco-2017-val-annotations.zip -j Note that you need to provide a full path to the archive file if you want to extract it to some directory: ```bash -cmr "extract file _keep" --input="$PWD/coco-2017-val-annotations.zip" --extract_path="$HOME/cm-test" +cmr "extract file _keep" --input="$PWD/coco-2017-val-annotations.zip" --extract_path="$HOME/mlc-test" ``` ### Add extra folder to extracted files @@ -85,7 +85,7 @@ cmr "download file _url.https://cKnowledge.org/test/captions_val2017.json.gz" Then extract it and test MD5SUM as follows: ```bash -cmr "extract file _keep _path.captions_val2017.json.gz" --env.CM_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f -j +cmr "extract file _keep _path.captions_val2017.json.gz" --env.MLC_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f -j ``` @@ -93,7 +93,7 @@ cmr "extract file _keep _path.captions_val2017.json.gz" --env.CM_EXTRACT_EXTRACT Some workflows may need to use a different filename than original. You can change it as follows: ```bash -cmr "extract file _keep _path.captions_val2017.json.gz" --env.CM_EXTRACT_EXTRACTED_FILENAME=new-file.json --env.CM_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f +cmr "extract file _keep _path.captions_val2017.json.gz" --env.MLC_EXTRACT_EXTRACTED_FILENAME=new-file.json --env.MLC_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f ``` diff --git a/script/extract-file/customize.py b/script/extract-file/customize.py index f4489105d..9e15efbfa 100644 --- a/script/extract-file/customize.py +++ b/script/extract-file/customize.py @@ -21,21 +21,21 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - filename = env.get('CM_EXTRACT_FILEPATH', '') + filename = env.get('MLC_EXTRACT_FILEPATH', '') if filename == '': return { - 'return': 1, 'error': 'Extract with no download requested and CM_EXTRACT_FILEPATH is not set'} + 'return': 1, 'error': 'Extract with no download requested and MLC_EXTRACT_FILEPATH is not set'} if windows: filename = filename.replace("%", "%%") - env['CM_EXTRACT_FILENAME'] = filename + env['MLC_EXTRACT_FILENAME'] = filename # Check if extract to some path outside CM cache (to reuse large files # later if cache is cleaned) - extract_path = env.get('CM_EXTRACT_PATH', '') + extract_path = env.get('MLC_EXTRACT_PATH', '') if extract_path != '': if not os.path.exists(extract_path): os.makedirs(extract_path, exist_ok=True) @@ -44,114 +44,114 @@ def preprocess(i): # By default remove archive after extraction remove_extracted = False if env.get( - 'CM_EXTRACT_REMOVE_EXTRACTED', + 'MLC_EXTRACT_REMOVE_EXTRACTED', '').lower() == 'no' else True if filename.endswith(".zip") or filename.endswith(".pth"): - env['CM_EXTRACT_TOOL'] = "unzip" + env['MLC_EXTRACT_TOOL'] = "unzip" elif filename.endswith(".tar.gz"): if windows: x = '"' if ' ' in filename else '' - env['CM_EXTRACT_CMD0'] = 'gzip -d ' + x + filename + x + env['MLC_EXTRACT_CMD0'] = 'gzip -d ' + x + filename + x filename = filename[:-3] # leave only .tar - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' - env['CM_EXTRACT_TOOL'] = 'tar ' + env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['MLC_EXTRACT_TOOL'] = 'tar ' elif os_info['platform'] == 'darwin': - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvzf ' - env['CM_EXTRACT_TOOL'] = 'tar ' + env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvzf ' + env['MLC_EXTRACT_TOOL'] = 'tar ' else: - env['CM_EXTRACT_TOOL_OPTIONS'] = ' --skip-old-files -xvzf ' - env['CM_EXTRACT_TOOL'] = 'tar ' + env['MLC_EXTRACT_TOOL_OPTIONS'] = ' --skip-old-files -xvzf ' + env['MLC_EXTRACT_TOOL'] = 'tar ' elif filename.endswith(".tar.xz"): if windows: x = '"' if ' ' in filename else '' - env['CM_EXTRACT_CMD0'] = 'xz -d ' + x + filename + x + env['MLC_EXTRACT_CMD0'] = 'xz -d ' + x + filename + x filename = filename[:-3] # leave only .tar - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' - env['CM_EXTRACT_TOOL'] = 'tar ' + env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['MLC_EXTRACT_TOOL'] = 'tar ' else: - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvJf' - env['CM_EXTRACT_TOOL'] = 'tar ' + env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvJf' + env['MLC_EXTRACT_TOOL'] = 'tar ' elif filename.endswith(".tar"): - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' - env['CM_EXTRACT_TOOL'] = 'tar ' + env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['MLC_EXTRACT_TOOL'] = 'tar ' elif filename.endswith(".gz"): # Check target filename - extracted_filename = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') + extracted_filename = env.get('MLC_EXTRACT_EXTRACTED_FILENAME', '') if extracted_filename == '': extracted_filename = os.path.basename(filename)[:-3] - env['CM_EXTRACT_EXTRACTED_FILENAME'] = extracted_filename + env['MLC_EXTRACT_EXTRACTED_FILENAME'] = extracted_filename x = '-c' if windows else '-k' - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \ + env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \ (x + ' ' if not remove_extracted else '') + \ ' > ' + q + extracted_filename + q + ' < ' - env['CM_EXTRACT_TOOL'] = 'gzip ' - elif env.get('CM_EXTRACT_UNZIP', '') == 'yes': - env['CM_EXTRACT_TOOL'] = 'unzip ' - elif env.get('CM_EXTRACT_UNTAR', '') == 'yes': - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' - env['CM_EXTRACT_TOOL'] = 'tar ' - elif env.get('CM_EXTRACT_GZIP', '') == 'yes': - env['CM_EXTRACT_CMD'] = 'gzip ' - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \ + env['MLC_EXTRACT_TOOL'] = 'gzip ' + elif env.get('MLC_EXTRACT_UNZIP', '') == 'yes': + env['MLC_EXTRACT_TOOL'] = 'unzip ' + elif env.get('MLC_EXTRACT_UNTAR', '') == 'yes': + env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['MLC_EXTRACT_TOOL'] = 'tar ' + elif env.get('MLC_EXTRACT_GZIP', '') == 'yes': + env['MLC_EXTRACT_CMD'] = 'gzip ' + env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \ ('-k ' if not remove_extracted else '') else: return {'return': 1, - 'error': 'Neither CM_EXTRACT_UNZIP nor CM_EXTRACT_UNTAR is yes'} + 'error': 'Neither MLC_EXTRACT_UNZIP nor MLC_EXTRACT_UNTAR is yes'} - env['CM_EXTRACT_PRE_CMD'] = '' + env['MLC_EXTRACT_PRE_CMD'] = '' - extract_to_folder = env.get('CM_EXTRACT_TO_FOLDER', '') + extract_to_folder = env.get('MLC_EXTRACT_TO_FOLDER', '') # Check if extract to additional folder in the current directory (or external path) # to avoid messing up other files and keep clean directory structure # particularly if archive has many sub-directories and files if extract_to_folder != '': - if 'tar ' in env['CM_EXTRACT_TOOL']: + if 'tar ' in env['MLC_EXTRACT_TOOL']: x = '' if windows else '-p' y = '"' if ' ' in extract_to_folder else '' - # env['CM_EXTRACT_TOOL_OPTIONS'] = ' --one-top-level='+ env['CM_EXTRACT_TO_FOLDER'] + env.get('CM_EXTRACT_TOOL_OPTIONS', '') - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -C ' + y + extract_to_folder + \ - y + ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '') - env['CM_EXTRACT_PRE_CMD'] = 'mkdir ' + x + ' ' + \ + # env['MLC_EXTRACT_TOOL_OPTIONS'] = ' --one-top-level='+ env['MLC_EXTRACT_TO_FOLDER'] + env.get('MLC_EXTRACT_TOOL_OPTIONS', '') + env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -C ' + y + extract_to_folder + \ + y + ' ' + env.get('MLC_EXTRACT_TOOL_OPTIONS', '') + env['MLC_EXTRACT_PRE_CMD'] = 'mkdir ' + x + ' ' + \ y + extract_to_folder + y + ' ' + xsep + ' ' - env['CM_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder + env['MLC_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder - elif 'unzip' in env['CM_EXTRACT_TOOL']: - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + q + extract_to_folder + q - env['CM_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder + elif 'unzip' in env['MLC_EXTRACT_TOOL']: + env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -d ' + q + extract_to_folder + q + env['MLC_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder x = '"' if ' ' in filename else '' - env['CM_EXTRACT_CMD'] = env['CM_EXTRACT_PRE_CMD'] + env['CM_EXTRACT_TOOL'] + ' ' + \ - env.get('CM_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \ - ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '') + ' ' + x + filename + x + env['MLC_EXTRACT_CMD'] = env['MLC_EXTRACT_PRE_CMD'] + env['MLC_EXTRACT_TOOL'] + ' ' + \ + env.get('MLC_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \ + ' ' + env.get('MLC_EXTRACT_TOOL_OPTIONS', '') + ' ' + x + filename + x print('') print('Current directory: {}'.format(os.getcwd())) - print('Command line: "{}"'.format(env['CM_EXTRACT_CMD'])) + print('Command line: "{}"'.format(env['MLC_EXTRACT_CMD'])) print('') - final_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') + final_file = env.get('MLC_EXTRACT_EXTRACTED_FILENAME', '') if final_file != '': - if env.get('CM_EXTRACT_EXTRACTED_CHECKSUM_FILE', '') != '': - env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = f"cd {q}{final_file}{q} {xsep} md5sum -c {q}{env['CM_EXTRACT_EXTRACTED_CHECKSUM_FILE']}{q}" - elif env.get('CM_EXTRACT_EXTRACTED_CHECKSUM', '') != '': + if env.get('MLC_EXTRACT_EXTRACTED_CHECKSUM_FILE', '') != '': + env['MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = f"cd {q}{final_file}{q} {xsep} md5sum -c {q}{env['MLC_EXTRACT_EXTRACTED_CHECKSUM_FILE']}{q}" + elif env.get('MLC_EXTRACT_EXTRACTED_CHECKSUM', '') != '': x = '*' if os_info['platform'] == 'windows' else '' - env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "echo {} {}{q}{}{q} | md5sum -c".format( - env.get('CM_EXTRACT_EXTRACTED_CHECKSUM'), x, env['CM_EXTRACT_EXTRACTED_FILENAME']) + env['MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "echo {} {}{q}{}{q} | md5sum -c".format( + env.get('MLC_EXTRACT_EXTRACTED_CHECKSUM'), x, env['MLC_EXTRACT_EXTRACTED_FILENAME']) else: - env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "" + env['MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "" else: - env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "" + env['MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "" # Not needed - can be simpler with cmd /c {empty} # if os_info['platform'] == 'windows': # # Check that if empty CMD, should add "" -# for x in ['CM_EXTRACT_CMD', 'CM_EXTRACT_EXTRACTED_CHECKSUM_CMD']: +# for x in ['MLC_EXTRACT_CMD', 'MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD']: # env[x+'_USED']='YES' if env.get(x,'')!='' else 'NO' # If force cache, add filepath to tag unless _path is used ... @@ -170,10 +170,10 @@ def postprocess(i): env = i['env'] - extract_to_folder = env.get('CM_EXTRACT_TO_FOLDER', '') - extract_path = env.get('CM_EXTRACT_PATH', '') + extract_to_folder = env.get('MLC_EXTRACT_TO_FOLDER', '') + extract_path = env.get('MLC_EXTRACT_PATH', '') - extracted_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') + extracted_file = env.get('MLC_EXTRACT_EXTRACTED_FILENAME', '') # Preparing filepath # Can be either full extracted filename (such as model) or folder @@ -182,7 +182,7 @@ def postprocess(i): filename = os.path.basename(extracted_file) # We do not use this env variable anymore -# folderpath = env.get('CM_EXTRACT_EXTRACT_TO_PATH', '') +# folderpath = env.get('MLC_EXTRACT_EXTRACT_TO_PATH', '') folderpath = extract_path if extract_path != '' else os.getcwd() filepath = os.path.join(folderpath, filename) @@ -193,21 +193,21 @@ def postprocess(i): if not os.path.exists(filepath): return { 'return': 1, 'error': 'Path {} was not created or doesn\'t exist'.format(filepath)} -# return {'return':1, 'error': 'CM_EXTRACT_EXTRACTED_FILENAME and -# CM_EXTRACT_TO_FOLDER are not set'} +# return {'return':1, 'error': 'MLC_EXTRACT_EXTRACTED_FILENAME and +# MLC_EXTRACT_TO_FOLDER are not set'} - env['CM_EXTRACT_EXTRACTED_PATH'] = filepath + env['MLC_EXTRACT_EXTRACTED_PATH'] = filepath # Set external environment variable with the final path - if env.get('CM_EXTRACT_FINAL_ENV_NAME', '') != '': - env[env['CM_EXTRACT_FINAL_ENV_NAME']] = filepath + if env.get('MLC_EXTRACT_FINAL_ENV_NAME', '') != '': + env[env['MLC_EXTRACT_FINAL_ENV_NAME']] = filepath # Detect if this file will be deleted or moved - env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + env['MLC_GET_DEPENDENT_CACHED_PATH'] = filepath # Check if need to remove archive after extraction - if env.get('CM_EXTRACT_REMOVE_EXTRACTED', '').lower() != 'no': - archive_filepath = env.get('CM_EXTRACT_FILEPATH', '') + if env.get('MLC_EXTRACT_REMOVE_EXTRACTED', '').lower() != 'no': + archive_filepath = env.get('MLC_EXTRACT_FILEPATH', '') if archive_filepath != '' and os.path.isfile(archive_filepath): os.remove(archive_filepath) diff --git a/script/extract-file/meta.yaml b/script/extract-file/meta.yaml index 3cee898a0..56f29fe1d 100644 --- a/script/extract-file/meta.yaml +++ b/script/extract-file/meta.yaml @@ -7,24 +7,24 @@ category: DevOps automation deps: - tags: detect,os - enable_if_env: - CM_HOST_OS_FLAVOR: + MLC_HOST_OS_FLAVOR: - macos skip_if_any_env: - CM_EXTRACT_EXTRACTED_CHECKSUM: + MLC_EXTRACT_EXTRACTED_CHECKSUM: - 'off' - CM_EXTRACT_EXTRACTED_CHECKSUM_FILE: + MLC_EXTRACT_EXTRACTED_CHECKSUM_FILE: - 'off' tags: get,generic-sys-util,_md5sha1sum input_description: {} input_mapping: - extra_folder: CM_EXTRACT_TO_FOLDER - extract_path: CM_EXTRACT_PATH - input: CM_EXTRACT_FILEPATH - to: CM_EXTRACT_PATH + extra_folder: MLC_EXTRACT_TO_FOLDER + extract_path: MLC_EXTRACT_PATH + input: MLC_EXTRACT_FILEPATH + to: MLC_EXTRACT_PATH new_env_keys: -- CM_EXTRACT_EXTRACTED_PATH -- <<>> -- CM_GET_DEPENDENT_CACHED_PATH +- MLC_EXTRACT_EXTRACTED_PATH +- <<>> +- MLC_GET_DEPENDENT_CACHED_PATH new_state_keys: [] post_deps: [] posthook_deps: [] @@ -37,11 +37,11 @@ uid: 3f0b76219d004817 variations: keep: env: - CM_EXTRACT_REMOVE_EXTRACTED: 'no' + MLC_EXTRACT_REMOVE_EXTRACTED: 'no' no-remove-extracted: env: - CM_EXTRACT_REMOVE_EXTRACTED: 'no' + MLC_EXTRACT_REMOVE_EXTRACTED: 'no' path.#: env: - CM_EXTRACT_FILEPATH: '#' + MLC_EXTRACT_FILEPATH: '#' versions: {} diff --git a/script/extract-file/run.bat b/script/extract-file/run.bat index 530ebbd2c..2a2727965 100644 --- a/script/extract-file/run.bat +++ b/script/extract-file/run.bat @@ -7,33 +7,33 @@ setlocal enabledelayedexpansion set require_extract=1 -if exist "%CM_EXTRACT_EXTRACTED_FILENAME%" ( +if exist "%MLC_EXTRACT_EXTRACTED_FILENAME%" ( set require_extract=0 echo. - echo %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% - cmd /c %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% + echo %MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD% + cmd /c %MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD% IF !ERRORLEVEL! NEQ 0 ( set require_extract=1 - del /Q %CM_EXTRACT_EXTRACTED_FILENAME% + del /Q %MLC_EXTRACT_EXTRACTED_FILENAME% ) ) if "!require_extract!" == "1" ( - if not "%CM_EXTRACT_CMD0%" == "" ( + if not "%MLC_EXTRACT_CMD0%" == "" ( echo. - echo %CM_EXTRACT_CMD0% - cmd /c %CM_EXTRACT_CMD0% + echo %MLC_EXTRACT_CMD0% + cmd /c %MLC_EXTRACT_CMD0% IF !ERRORLEVEL! NEQ 0 EXIT 1 ) echo. - echo %CM_EXTRACT_CMD% - cmd /c %CM_EXTRACT_CMD% + echo %MLC_EXTRACT_CMD% + cmd /c %MLC_EXTRACT_CMD% IF !ERRORLEVEL! NEQ 0 EXIT 1 echo. - echo %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% - cmd /c %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% + echo %MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD% + cmd /c %MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD% IF !ERRORLEVEL! NEQ 0 EXIT 1 ) diff --git a/script/extract-file/run.sh b/script/extract-file/run.sh index 4ee4f8512..29627a196 100644 --- a/script/extract-file/run.sh +++ b/script/extract-file/run.sh @@ -1,20 +1,20 @@ #!/bin/bash -if [ -e "${CM_EXTRACT_EXTRACTED_FILENAME}" ] ; then - CMD=${CM_EXTRACT_EXTRACTED_CHECKSUM_CMD} +if [ -e "${MLC_EXTRACT_EXTRACTED_FILENAME}" ] ; then + CMD=${MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD} echo "" echo "${CMD}" eval "${CMD}" test $? -eq 0 && exit 0 fi -CMD=${CM_EXTRACT_CMD} +CMD=${MLC_EXTRACT_CMD} echo "" echo "${CMD}" eval "${CMD}" test $? -eq 0 || exit $? -CMD=${CM_EXTRACT_EXTRACTED_CHECKSUM_CMD} +CMD=${MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD} echo "" echo "${CMD}" eval "${CMD}" diff --git a/script/fail/customize.py b/script/fail/customize.py index 69ccec77e..3f826ffe2 100644 --- a/script/fail/customize.py +++ b/script/fail/customize.py @@ -12,10 +12,10 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') # Checking conditions - if env.get('CM_FAIL_WINDOWS', '').lower() == 'true': + if env.get('MLC_FAIL_WINDOWS', '').lower() == 'true': if os_info['platform'] == 'windows': return {'return': 1, 'error': 'CM detected fail condition: running on Windows'} diff --git a/script/fail/meta.yaml b/script/fail/meta.yaml index 9c5d8fcfc..a68d75749 100644 --- a/script/fail/meta.yaml +++ b/script/fail/meta.yaml @@ -15,4 +15,4 @@ tags: variations: windows: env: - CM_FAIL_WINDOWS: true + MLC_FAIL_WINDOWS: true diff --git a/script/flash-tinyml-binary/customize.py b/script/flash-tinyml-binary/customize.py index ab0d7e5a3..80690ce16 100644 --- a/script/flash-tinyml-binary/customize.py +++ b/script/flash-tinyml-binary/customize.py @@ -9,9 +9,9 @@ def preprocess(i): if os_info['platform'] == 'windows': return {'return': 1, 'error': 'Windows is not supported in this script yet'} - if 'CM_TINY_BUILD_DIR' not in env: + if 'MLC_TINY_BUILD_DIR' not in env: return { - 'return': 1, 'error': 'Please set CM_TINY_BUILD_DIR to the build directory of the model'} + 'return': 1, 'error': 'Please set MLC_TINY_BUILD_DIR to the build directory of the model'} return {'return': 0} diff --git a/script/flash-tinyml-binary/meta.yaml b/script/flash-tinyml-binary/meta.yaml index 42ebb7ae7..92cab05ae 100644 --- a/script/flash-tinyml-binary/meta.yaml +++ b/script/flash-tinyml-binary/meta.yaml @@ -14,13 +14,13 @@ deps: tags: get,zephyr-sdk - inherit_variation_tags: 'True' skip_if_env: - CM_TINY_BUILD_DIR: + MLC_TINY_BUILD_DIR: - 'on' tags: reproduce,tiny,mlperf input_mapping: - build_dir: CM_TINY_BUILD_DIR + build_dir: MLC_TINY_BUILD_DIR local_env_keys: -- CM_* +- MLC_* tags: - flash - tiny diff --git a/script/flash-tinyml-binary/run.sh b/script/flash-tinyml-binary/run.sh index 962dc74d5..9d8231794 100644 --- a/script/flash-tinyml-binary/run.sh +++ b/script/flash-tinyml-binary/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -build_dir=${CM_TINY_BUILD_DIR} -cmd="cd ${CM_ZEPHYR_DIR}" +build_dir=${MLC_TINY_BUILD_DIR} +cmd="cd ${MLC_ZEPHYR_DIR}" echo $cmd eval $cmd cmd="west flash --build-dir ${build_dir}" diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index a6f5167aa..384b0c9b8 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -60,39 +60,39 @@ def generate_submission(env, state, inp, submission_division): # Save current user directory cur_dir = os.getcwd() - if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR_', '') == '': + if env.get('MLC_MLPERF_INFERENCE_RESULTS_DIR_', '') == '': results_dir = os.path.join( - env['CM_MLPERF_INFERENCE_RESULTS_DIR'], - f"{env['CM_MLPERF_RUN_STYLE']}_results") + env['MLC_MLPERF_INFERENCE_RESULTS_DIR'], + f"{env['MLC_MLPERF_RUN_STYLE']}_results") else: - results_dir = env['CM_MLPERF_INFERENCE_RESULTS_DIR_'] + results_dir = env['MLC_MLPERF_INFERENCE_RESULTS_DIR_'] - mlperf_path = env['CM_MLPERF_INFERENCE_SOURCE'] + mlperf_path = env['MLC_MLPERF_INFERENCE_SOURCE'] submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") sys.path.append(submission_checker_dir) - if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') == '': + if env.get('MLC_MLPERF_INFERENCE_SUBMISSION_DIR', '') == '': from pathlib import Path user_home = str(Path.home()) - env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join( user_home, "mlperf_submission") - submission_dir = env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') + submission_dir = env.get('MLC_MLPERF_INFERENCE_SUBMISSION_DIR', '') if submission_dir == '': submission_base_dir = env.get( - 'CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR', '') + 'MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR', '') if submission_base_dir == '': - return {'return': 1, 'error': f"Both CM_MLPERF_INFERENCE_SUBMISSION_DIR and CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR can not be empty!"} + return {'return': 1, 'error': f"Both MLC_MLPERF_INFERENCE_SUBMISSION_DIR and MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR can not be empty!"} else: submission_dir = os.path.join( submission_base_dir, "mlperf_inference_submission") - env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = submission_dir + env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR'] = submission_dir - if env.get('CM_MLPERF_CLEAN_SUBMISSION_DIR', '') != '': + if env.get('MLC_MLPERF_CLEAN_SUBMISSION_DIR', '') != '': print('=================================================') print( 'Cleaning {} ...'.format( - env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'])) + env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR'])) if os.path.exists(submission_dir): shutil.rmtree(submission_dir) print('=================================================') @@ -100,7 +100,7 @@ def generate_submission(env, state, inp, submission_division): if not os.path.isdir(submission_dir): os.makedirs(submission_dir) - if str(env.get('CM_MLPERF_SUBMISSION_DIR_SHARED', '') + if str(env.get('MLC_MLPERF_SUBMISSION_DIR_SHARED', '') ).lower() in ["yes", "true", "1"]: os.chmod(submission_dir, 0o2775) @@ -112,15 +112,15 @@ def generate_submission(env, state, inp, submission_division): results_dir, f))] - system_meta_default = state['CM_SUT_META'] + system_meta_default = state['MLC_SUT_META'] # set pytorch as the default framework if system_meta_default['framework'] == '': system_meta_default['framework'] = "pytorch" system_meta_tmp = {} - if 'CM_MLPERF_SUBMISSION_SYSTEM_TYPE' in env: - system_meta_tmp['system_type'] = env['CM_MLPERF_SUBMISSION_SYSTEM_TYPE'] + if 'MLC_MLPERF_SUBMISSION_SYSTEM_TYPE' in env: + system_meta_tmp['system_type'] = env['MLC_MLPERF_SUBMISSION_SYSTEM_TYPE'] if submission_division != "": system_meta_tmp['division'] = submission_division @@ -128,13 +128,13 @@ def generate_submission(env, state, inp, submission_division): else: division = system_meta_default['division'] - if 'CM_MLPERF_SUBMISSION_CATEGORY' in env: - system_meta_tmp['system_type'] = env['CM_MLPERF_SUBMISSION_CATEGORY'].replace( + if 'MLC_MLPERF_SUBMISSION_CATEGORY' in env: + system_meta_tmp['system_type'] = env['MLC_MLPERF_SUBMISSION_CATEGORY'].replace( "-", ",") duplicate = ( env.get( - 'CM_MLPERF_DUPLICATE_SCENARIO_RESULTS', + 'MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS', 'no') in [ "yes", "True"]) @@ -150,25 +150,25 @@ def generate_submission(env, state, inp, submission_division): os.makedirs(path_submission_division) # Check submitter - if env.get('CM_MLPERF_SUBMITTER'): - submitter = env['CM_MLPERF_SUBMITTER'] + if env.get('MLC_MLPERF_SUBMITTER'): + submitter = env['MLC_MLPERF_SUBMITTER'] system_meta_tmp['submitter'] = submitter else: submitter = system_meta_default['submitter'] - env['CM_MLPERF_SUBMITTER'] = submitter + env['MLC_MLPERF_SUBMITTER'] = submitter print('* MLPerf inference submitter: {}'.format(submitter)) - if env.get('CM_MLPERF_SUT_SW_NOTES_EXTRA', '') != '': + if env.get('MLC_MLPERF_SUT_SW_NOTES_EXTRA', '') != '': sw_notes = f"""{ system_meta_tmp['sw_notes']} { - env['CM_MLPERF_SUT_SW_NOTES_EXTRA']}""" + env['MLC_MLPERF_SUT_SW_NOTES_EXTRA']}""" system_meta_tmp['sw_notes'] = sw_notes - if env.get('CM_MLPERF_SUT_HW_NOTES_EXTRA', '') != '': + if env.get('MLC_MLPERF_SUT_HW_NOTES_EXTRA', '') != '': hw_notes = f"""{ system_meta_tmp['hw_notes']} { - env['CM_MLPERF_SUT_HW_NOTES_EXTRA']}""" + env['MLC_MLPERF_SUT_HW_NOTES_EXTRA']}""" system_meta_tmp['hw_notes'] = hw_notes path_submission = os.path.join(path_submission_division, submitter) @@ -176,7 +176,7 @@ def generate_submission(env, state, inp, submission_division): os.makedirs(path_submission) # SUT base - system = env.get('CM_HW_NAME', 'default').replace(' ', '_') + system = env.get('MLC_HW_NAME', 'default').replace(' ', '_') code_path = os.path.join(path_submission, "code") @@ -237,7 +237,7 @@ def generate_submission(env, state, inp, submission_division): if division == "open" and len(model_mapping_combined) == 0: for model in models: is_valid, returned_model_name = model_in_valid_models( - model, env.get('CM_MLPERF_LAST_RELEASE', 'v4.1')) + model, env.get('MLC_MLPERF_LAST_RELEASE', 'v4.1')) if not is_valid: result_model_path = os.path.join(result_path, model) scenarios = [ @@ -276,7 +276,7 @@ def generate_submission(env, state, inp, submission_division): {model: returned_model_name}) if check_dict_filled(sut_info.keys(), sut_info): - system = env.get('CM_HW_NAME', sut_info["system_name"]) + system = env.get('MLC_HW_NAME', sut_info["system_name"]) implementation = sut_info["implementation"] device = sut_info["device"] framework = sut_info["framework"].replace(" ", "_") @@ -431,11 +431,11 @@ def generate_submission(env, state, inp, submission_division): submission_power_path, f)) analyzer_settings_file = env.get( - 'CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH', os.path.join( - env['CM_TMP_CURRENT_SCRIPT_PATH'], "default_files", "analyzer_table.md")) + 'MLC_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH', os.path.join( + env['MLC_TMP_CURRENT_SCRIPT_PATH'], "default_files", "analyzer_table.md")) power_settings_file = env.get( - 'CM_MLPERF_POWER_SETTINGS_FILE_PATH', os.path.join( - env['CM_TMP_CURRENT_SCRIPT_PATH'], "default_files", "power_settings.md")) + 'MLC_MLPERF_POWER_SETTINGS_FILE_PATH', os.path.join( + env['MLC_TMP_CURRENT_SCRIPT_PATH'], "default_files", "power_settings.md")) shutil.copy( analyzer_settings_file, os.path.join( @@ -651,7 +651,7 @@ def generate_submission(env, state, inp, submission_division): readme_suffix = "" result_string, result = mlperf_utils.get_result_string( - env['CM_MLPERF_LAST_RELEASE'], model, scenario, result_scenario_path, power_run, sub_res, division, system_file, model_precision, env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION')) + env['MLC_MLPERF_LAST_RELEASE'], model, scenario, result_scenario_path, power_run, sub_res, division, system_file, model_precision, env.get('MLC_MLPERF_INFERENCE_SOURCE_VERSION')) for key in result: results[model][scenario][key] = result[key] @@ -693,11 +693,11 @@ def generate_submission(env, state, inp, submission_division): measurement_path, "system_info.txt")) else: - if env.get('CM_GET_PLATFORM_DETAILS', '') == "yes": + if env.get('MLC_GET_PLATFORM_DETAILS', '') == "yes": mlc_input = {'action': 'run', 'automation': 'script', 'tags': 'get,platform,details', - 'env': {'CM_PLATFORM_DETAILS_FILE_PATH': os.path.join(measurement_path, "system_info.txt")}, + 'env': {'MLC_PLATFORM_DETAILS_FILE_PATH': os.path.join(measurement_path, "system_info.txt")}, 'quiet': True } r = mlc.access(mlc_input) @@ -725,15 +725,15 @@ def postprocess(i): submission_divisions = [] - if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') in [ + if env.get('MLC_MLPERF_SUBMISSION_DIVISION', '') in [ "open-closed", "closed-open"]: submission_divisions = ["open", "closed"] - elif env.get('CM_MLPERF_SUBMISSION_DIVISION', '') != '': - submission_divisions.append(env['CM_MLPERF_SUBMISSION_DIVISION']) + elif env.get('MLC_MLPERF_SUBMISSION_DIVISION', '') != '': + submission_divisions.append(env['MLC_MLPERF_SUBMISSION_DIVISION']) # if submission division is not assigned, default value would be taken in # submission_generation function - if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') == '': + if env.get('MLC_MLPERF_SUBMISSION_DIVISION', '') == '': r = generate_submission(env, state, inp, submission_division="") if r['return'] > 0: return r diff --git a/script/generate-mlperf-inference-submission/meta.yaml b/script/generate-mlperf-inference-submission/meta.yaml index 013997df9..4c5a0ab34 100644 --- a/script/generate-mlperf-inference-submission/meta.yaml +++ b/script/generate-mlperf-inference-submission/meta.yaml @@ -4,9 +4,9 @@ automation_uid: 5b4e0237da074764 cache: false category: MLPerf benchmark support default_env: - CM_MLPERF_RUN_STYLE: valid - CM_MLPERF_SUBMISSION_DIR_SHARED: 'yes' - CM_RUN_MLPERF_ACCURACY: 'on' + MLC_MLPERF_RUN_STYLE: valid + MLC_MLPERF_SUBMISSION_DIR_SHARED: 'yes' + MLC_RUN_MLPERF_ACCURACY: 'on' predeps: False deps: - names: @@ -22,13 +22,13 @@ deps: - names: - get-mlperf-results-dir skip_if_env: - CM_MLPERF_INFERENCE_RESULTS_DIR_: + MLC_MLPERF_INFERENCE_RESULTS_DIR_: - 'on' tags: get,mlperf,results,dir,local - names: - get-mlperf-submission-dir skip_if_env: - CM_MLPERF_INFERENCE_SUBMISSION_DIR: + MLC_MLPERF_INFERENCE_SUBMISSION_DIR: - 'on' tags: get,mlperf,submission,dir docker: @@ -37,22 +37,22 @@ docker: deps: - names: get-mlperf-inference-results-dir skip_if_env: - CM_MLPERF_INFERENCE_RESULTS_DIR_: + MLC_MLPERF_INFERENCE_RESULTS_DIR_: - 'on' tags: get,mlperf,inference,results,dir,local - names: get-mlperf-inference-submission-dir skip_if_any_env: - CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR: + MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR: - 'on' tags: get,mlperf,inference,submission,dir,local docker_input_mapping: - results_dir: CM_MLPERF_INFERENCE_RESULTS_DIR_ - submission_base_dir: CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR + results_dir: MLC_MLPERF_INFERENCE_RESULTS_DIR_ + submission_base_dir: MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR extra_run_args: ' --cap-add SYS_ADMIN' mounts: - - ${{ CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR + - ${{ MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR }}:${{ MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR }} - - ${{ CM_MLPERF_INFERENCE_RESULTS_DIR_ }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR_ }} + - ${{ MLC_MLPERF_INFERENCE_RESULTS_DIR_ }}:${{ MLC_MLPERF_INFERENCE_RESULTS_DIR_ }} os: ubuntu os_version: '22.04' pre_run_cmds: @@ -61,49 +61,49 @@ docker: use_host_group_id: true use_host_user_id: true input_mapping: - analyzer_settings_file: CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH - category: CM_MLPERF_SUBMISSION_CATEGORY - clean: CM_MLPERF_CLEAN_SUBMISSION_DIR - dashboard: CM_MLPERF_DASHBOARD - dashboard_wb_project: CM_MLPERF_DASHBOARD_WANDB_PROJECT - device: CM_MLPERF_DEVICE - division: CM_MLPERF_SUBMISSION_DIVISION - duplicate: CM_MLPERF_DUPLICATE_SCENARIO_RESULTS - extra_checker_args: CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARG - hw_name: CM_HW_NAME - hw_notes_extra: CM_MLPERF_SUT_HW_NOTES_EXTRA - infer_scenario_results: CM_MLPERF_DUPLICATE_SCENARIO_RESULTS - power_settings_file: CM_MLPERF_POWER_SETTINGS_FILE_PATH - preprocess: CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR - preprocess_submission: CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR - results_dir: CM_MLPERF_INFERENCE_RESULTS_DIR_ - run_checker: CM_RUN_SUBMISSION_CHECKER - run_style: CM_MLPERF_RUN_STYLE - skip_truncation: CM_SKIP_TRUNCATE_ACCURACY - submission_base_dir: CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR - submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR - submitter: CM_MLPERF_SUBMITTER - sw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA - tar: CM_TAR_SUBMISSION_DIR - get_platform_details: CM_GET_PLATFORM_DETAILS - version: CM_MLPERF_SUBMISSION_CHECKER_VERSION + analyzer_settings_file: MLC_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH + category: MLC_MLPERF_SUBMISSION_CATEGORY + clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR + dashboard: MLC_MLPERF_DASHBOARD + dashboard_wb_project: MLC_MLPERF_DASHBOARD_WANDB_PROJECT + device: MLC_MLPERF_DEVICE + division: MLC_MLPERF_SUBMISSION_DIVISION + duplicate: MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS + extra_checker_args: MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARG + hw_name: MLC_HW_NAME + hw_notes_extra: MLC_MLPERF_SUT_HW_NOTES_EXTRA + infer_scenario_results: MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS + power_settings_file: MLC_MLPERF_POWER_SETTINGS_FILE_PATH + preprocess: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR + preprocess_submission: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR + results_dir: MLC_MLPERF_INFERENCE_RESULTS_DIR_ + run_checker: MLC_RUN_SUBMISSION_CHECKER + run_style: MLC_MLPERF_RUN_STYLE + skip_truncation: MLC_SKIP_TRUNCATE_ACCURACY + submission_base_dir: MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR + submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR + submitter: MLC_MLPERF_SUBMITTER + sw_notes_extra: MLC_MLPERF_SUT_SW_NOTES_EXTRA + tar: MLC_TAR_SUBMISSION_DIR + get_platform_details: MLC_GET_PLATFORM_DETAILS + version: MLC_MLPERF_SUBMISSION_CHECKER_VERSION post_deps: - enable_if_env: - CM_RUN_MLPERF_ACCURACY: + MLC_RUN_MLPERF_ACCURACY: - 'on' skip_if_env: - CM_SKIP_TRUNCATE_ACCURACY: + MLC_SKIP_TRUNCATE_ACCURACY: - 'yes' tags: accuracy,truncate,mlc - enable_if_env: - CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR: + MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR: - 'on' - 'True' - 'yes' - true tags: preprocess,mlperf,submission - skip_if_env: - CM_RUN_SUBMISSION_CHECKER: + MLC_RUN_SUBMISSION_CHECKER: - 'no' names: - mlperf-inference-submission-checker diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index 88255718c..068a4161f 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -13,77 +13,77 @@ def preprocess(i): state = i['state'] script_path = i['run_script_input']['path'] - rerun = True if env.get("CM_RERUN", "") != '' else False + rerun = True if env.get("MLC_RERUN", "") != '' else False - env['CM_MLPERF_SKIP_RUN'] = env.get('CM_MLPERF_SKIP_RUN', "no") + env['MLC_MLPERF_SKIP_RUN'] = env.get('MLC_MLPERF_SKIP_RUN', "no") - mlperf_path = env['CM_MLPERF_INFERENCE_SOURCE'] + mlperf_path = env['MLC_MLPERF_INFERENCE_SOURCE'] submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") sys.path.append(submission_checker_dir) - version = env.get('CM_MLPERF_INFERENCE_VERSION', "4.1") + version = env.get('MLC_MLPERF_INFERENCE_VERSION', "4.1") required_files = [] required_files = get_checker_files() - if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: - env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" + if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env: + env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline" - if 'CM_MLPERF_LOADGEN_MODE' not in env: + if 'MLC_MLPERF_LOADGEN_MODE' not in env: print("\nNo mode given. Using accuracy as default\n") - env['CM_MLPERF_LOADGEN_MODE'] = "accuracy" + env['MLC_MLPERF_LOADGEN_MODE'] = "accuracy" if env.get('OUTPUT_BASE_DIR', '') == '': env['OUTPUT_BASE_DIR'] = env.get( - 'CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) + 'MLC_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) - if 'CM_NUM_THREADS' not in env: - if 'CM_MINIMIZE_THREADS' in env: - env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // - (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + if 'MLC_NUM_THREADS' not in env: + if 'MLC_MINIMIZE_THREADS' in env: + env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // + (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) else: - env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') print("Using MLCommons Inference source from '" + - env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") - if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'MLC_MLPERF_CONF' not in env: + env['MLC_MLPERF_CONF'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") RUN_CMD = "" state['RUN'] = {} - scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] state['RUN'][scenario] = {} - model_full_name = env.get('CM_ML_MODEL_FULL_NAME', env['CM_MODEL']) + model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', env['MLC_MODEL']) - if model_full_name != env['CM_MODEL']: - if 'model_mapping' not in state['CM_SUT_CONFIG']: - state['CM_SUT_CONFIG']['model_mappings'] = {} - state['CM_SUT_CONFIG']['model_mappings'][model_full_name] = env['CM_MODEL'] + if model_full_name != env['MLC_MODEL']: + if 'model_mapping' not in state['MLC_SUT_CONFIG']: + state['MLC_SUT_CONFIG']['model_mappings'] = {} + state['MLC_SUT_CONFIG']['model_mappings'][model_full_name] = env['MLC_MODEL'] - if model_full_name not in i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']]: - i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']][model_full_name] = {} + if model_full_name not in i['state']['MLC_SUT_CONFIG'][env['MLC_SUT_NAME']]: + i['state']['MLC_SUT_CONFIG'][env['MLC_SUT_NAME']][model_full_name] = {} - if scenario not in i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME'] + if scenario not in i['state']['MLC_SUT_CONFIG'][env['MLC_SUT_NAME'] ][model_full_name]: - i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME'] + i['state']['MLC_SUT_CONFIG'][env['MLC_SUT_NAME'] ][model_full_name][scenario] = {} - conf = i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME'] + conf = i['state']['MLC_SUT_CONFIG'][env['MLC_SUT_NAME'] ][model_full_name][scenario] - mode = env['CM_MLPERF_LOADGEN_MODE'] + mode = env['MLC_MLPERF_LOADGEN_MODE'] user_conf = '' - if env['CM_MLPERF_RUN_STYLE'] == "fast": - fast_factor = int(env['CM_FAST_FACTOR']) + if env['MLC_MLPERF_RUN_STYLE'] == "fast": + fast_factor = int(env['MLC_FAST_FACTOR']) else: fast_factor = 1 - ml_model_name = env['CM_MODEL'] + ml_model_name = env['MLC_MODEL'] if 'bert' in ml_model_name: ml_model_name = "bert" if 'dlrm' in ml_model_name: @@ -101,19 +101,19 @@ def preprocess(i): if scenario in ['Offline', 'Server']: metric = "target_qps" tolerance = 1.01 - # value = env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS') if scenario == "Server" else env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') - value = env.get('CM_MLPERF_LOADGEN_TARGET_QPS') + # value = env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS') if scenario == "Server" else env.get('MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') + value = env.get('MLC_MLPERF_LOADGEN_TARGET_QPS') elif scenario in ['SingleStream', 'MultiStream']: metric = "target_latency" - value = env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY') + value = env.get('MLC_MLPERF_LOADGEN_TARGET_LATENCY') if value: if scenario == "SingleStream" and ( 1000 / float(value) * 660 < 100): - env['CM_MLPERF_USE_MAX_DURATION'] = 'no' + env['MLC_MLPERF_USE_MAX_DURATION'] = 'no' elif scenario == "MultiStream" and (1000 / float(value) * 660 < 662): - env['CM_MLPERF_USE_MAX_DURATION'] = 'no' - if env.get('CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"] and env.get( - 'CM_MLPERF_USE_MAX_DURATION', "yes").lower() not in ["no", "false", "0"]: + env['MLC_MLPERF_USE_MAX_DURATION'] = 'no' + if env.get('MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"] and env.get( + 'MLC_MLPERF_USE_MAX_DURATION', "yes").lower() not in ["no", "false", "0"]: tolerance = 0.4 # much lower because we have max_duration else: tolerance = 0.9 @@ -136,26 +136,26 @@ def preprocess(i): "Adjusted configuration value {} {}".format( metric_value, metric)) else: - # if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + # if env.get("MLC_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": if metric == "target_qps": - if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + if env.get("MLC_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": print("In find performance mode: using 1 as target_qps") else: print("No target_qps specified. Using 1 as target_qps") conf[metric] = 1 if metric == "target_latency": - if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + if env.get("MLC_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": print("In find performance mode: using 0.5ms as target_latency") else: print("No target_latency specified. Using default") - if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() in ["no", "false", "0"] or env.get( - 'CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() in ["yes", "1", "true"]: + if env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes').lower() in ["no", "false", "0"] or env.get( + 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() in ["yes", "1", "true"]: # Total number of queries needed is a multiple of dataset # size. So we dont use max_duration and so we need to be # careful with the input latency - if '3d-unet' in env['CM_MODEL']: + if '3d-unet' in env['MLC_MODEL']: conf[metric] = 400 - elif 'gptj' in env['CM_MODEL']: + elif 'gptj' in env['MLC_MODEL']: conf[metric] = 1000 else: conf[metric] = 100 @@ -164,93 +164,93 @@ def preprocess(i): metric_value = conf[metric] # else: # return {'return': 1, 'error': f"Config details missing for - # SUT:{env['CM_SUT_NAME']}, Model:{env['CM_MODEL']}, Scenario: + # SUT:{env['MLC_SUT_NAME']}, Model:{env['MLC_MODEL']}, Scenario: # {scenario}. Please input {metric} value"} # Pass the modified performance metrics to the implementation - if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + if env.get("MLC_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": if metric == "target_latency" and env.get( - 'CM_MLPERF_LOADGEN_TARGET_LATENCY', '') == '': - env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = conf[metric] - elif metric == "target_qps" and env.get('CM_MLPERF_LOADGEN_TARGET_QPS', '') == '': - env['CM_MLPERF_LOADGEN_TARGET_QPS'] = conf[metric] + 'MLC_MLPERF_LOADGEN_TARGET_LATENCY', '') == '': + env['MLC_MLPERF_LOADGEN_TARGET_LATENCY'] = conf[metric] + elif metric == "target_qps" and env.get('MLC_MLPERF_LOADGEN_TARGET_QPS', '') == '': + env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = conf[metric] - if env['CM_MLPERF_RUN_STYLE'] == "fast": + if env['MLC_MLPERF_RUN_STYLE'] == "fast": if scenario == "Offline": metric_value = float(metric_value) / fast_factor if scenario in ["SingleStream", "MultiStream"]: metric_value = float(metric_value) * fast_factor - elif env['CM_MLPERF_RUN_STYLE'] == "test": + elif env['MLC_MLPERF_RUN_STYLE'] == "test": if scenario == "Offline": - metric_value = float(env.get('CM_MLPERF_INFERENCE_TEST_QPS', 1)) + metric_value = float(env.get('MLC_MLPERF_INFERENCE_TEST_QPS', 1)) if scenario in ["SingleStream"]: metric_value = 1000 - elif env['CM_MLPERF_RUN_STYLE'] == "valid": + elif env['MLC_MLPERF_RUN_STYLE'] == "valid": if scenario == "Offline": required_min_queries_offline = {} required_min_queries_offline = get_required_min_queries_offline( - env['CM_MODEL'], version) + env['MLC_MODEL'], version) if mode == "compliance" and scenario == "Server": # Adjust the server_target_qps - test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") + test = env.get("MLC_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") if test == "TEST01": metric_value = str( float(metric_value) * float( env.get( - "CM_MLPERF_TEST01_SERVER_ADJUST_FACTOR", + "MLC_MLPERF_TEST01_SERVER_ADJUST_FACTOR", 0.96))) # if test == "TEST05": - # metric_value = str(float(metric_value) * float(env.get("CM_MLPERF_TEST05_SERVER_ADJUST_FACTOR", 0.97))) + # metric_value = str(float(metric_value) * float(env.get("MLC_MLPERF_TEST05_SERVER_ADJUST_FACTOR", 0.97))) if test == "TEST04": metric_value = str( float(metric_value) * float( env.get( - "CM_MLPERF_TEST04_SERVER_ADJUST_FACTOR", + "MLC_MLPERF_TEST04_SERVER_ADJUST_FACTOR", 0.97))) conf[metric] = metric_value user_conf += ml_model_name + "." + scenario + \ "." + metric + " = " + str(metric_value) + "\n" - if env.get('CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT', '') != '': - performance_sample_count = env['CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT'] + if env.get('MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT', '') != '': + performance_sample_count = env['MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT'] user_conf += ml_model_name + ".*.performance_sample_count_override = " + \ performance_sample_count + "\n" log_mode = mode - if 'CM_MLPERF_POWER' in env and mode == "performance": + if 'MLC_MLPERF_POWER' in env and mode == "performance": log_mode = "performance_power" - env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] = os.path.join( - env['OUTPUT_BASE_DIR'], env['CM_OUTPUT_FOLDER_NAME']) + env['MLC_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] = os.path.join( + env['OUTPUT_BASE_DIR'], env['MLC_OUTPUT_FOLDER_NAME']) sut_name = env.get( - 'CM_SUT_NAME', - env['CM_MLPERF_BACKEND'] + + 'MLC_SUT_NAME', + env['MLC_MLPERF_BACKEND'] + "-" + - env['CM_MLPERF_DEVICE']) - OUTPUT_DIR = os.path.join(env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name, + env['MLC_MLPERF_DEVICE']) + OUTPUT_DIR = os.path.join(env['MLC_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name, model_full_name, scenario.lower(), mode) - env['CM_MLPERF_INFERENCE_RESULTS_SUT_PATH'] = os.path.join( - env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name) + env['MLC_MLPERF_INFERENCE_RESULTS_SUT_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name) - if 'CM_MLPERF_POWER' in env and mode == "performance": - env['CM_MLPERF_POWER_LOG_DIR'] = os.path.join(OUTPUT_DIR, "tmp_power") + if 'MLC_MLPERF_POWER' in env and mode == "performance": + env['MLC_MLPERF_POWER_LOG_DIR'] = os.path.join(OUTPUT_DIR, "tmp_power") if mode == "accuracy": pass elif mode == "performance": OUTPUT_DIR = os.path.join(OUTPUT_DIR, "run_1") elif mode == "compliance": - test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") + test = env.get("MLC_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") OUTPUT_DIR = os.path.join( env['OUTPUT_BASE_DIR'], - env['CM_OUTPUT_FOLDER_NAME'], + env['MLC_OUTPUT_FOLDER_NAME'], sut_name, model_full_name, scenario.lower(), @@ -261,12 +261,12 @@ def preprocess(i): audit_path = test audit_full_path = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", audit_path, "audit.config") - env['CM_MLPERF_INFERENCE_AUDIT_PATH'] = audit_full_path + env['MLC_MLPERF_INFERENCE_AUDIT_PATH'] = audit_full_path # copy the audit conf to the run directory incase the implementation is # not supporting the audit-conf path if not os.path.exists(OUTPUT_DIR): @@ -277,20 +277,20 @@ def preprocess(i): OUTPUT_DIR, "audit.config")) - env['CM_MLPERF_OUTPUT_DIR'] = OUTPUT_DIR - env['CM_LOGS_DIR'] = OUTPUT_DIR - env['CM_MLPERF_LOADGEN_LOGS_DIR'] = OUTPUT_DIR + env['MLC_MLPERF_OUTPUT_DIR'] = OUTPUT_DIR + env['MLC_LOGS_DIR'] = OUTPUT_DIR + env['MLC_MLPERF_LOADGEN_LOGS_DIR'] = OUTPUT_DIR if mode == "accuracy": - output_dir = env['CM_MLPERF_OUTPUT_DIR'] - env['CM_MLPERF_ACCURACY_RESULTS_DIR'] = output_dir + output_dir = env['MLC_MLPERF_OUTPUT_DIR'] + env['MLC_MLPERF_ACCURACY_RESULTS_DIR'] = output_dir else: - env['CM_MLPERF_ACCURACY_RESULTS_DIR'] = '' + env['MLC_MLPERF_ACCURACY_RESULTS_DIR'] = '' run_exists = run_files_exist(log_mode, OUTPUT_DIR, required_files, env) - if 'CM_MLPERF_POWER' in env and env.get( - 'CM_MLPERF_SHORT_RANGING_RUN', '') != 'no' and env['CM_MLPERF_RUN_STYLE'] == "valid" and mode == "performance": + if 'MLC_MLPERF_POWER' in env and env.get( + 'MLC_MLPERF_SHORT_RANGING_RUN', '') != 'no' and env['MLC_MLPERF_RUN_STYLE'] == "valid" and mode == "performance": short_ranging = True else: short_ranging = False @@ -301,18 +301,18 @@ def preprocess(i): ranging_user_conf += ml_model_name + "." + \ scenario + ".min_duration = 300000" + "\n" - if env['CM_MLPERF_RUN_STYLE'] == "test": - max_duration_test_s = int(env.get('CM_MLPERF_MAX_DURATION_TEST', 30)) + if env['MLC_MLPERF_RUN_STYLE'] == "test": + max_duration_test_s = int(env.get('MLC_MLPERF_MAX_DURATION_TEST', 30)) max_duration_test = str(max_duration_test_s * 1000) # in milliseconds - query_count = int(env.get('CM_TEST_QUERY_COUNT', 5)) + query_count = int(env.get('MLC_TEST_QUERY_COUNT', 5)) min_query_count = int( env.get( - 'CM_MLPERF_INFERENCE_MIN_QUERY_COUNT', + 'MLC_MLPERF_INFERENCE_MIN_QUERY_COUNT', query_count)) max_query_count = max( min_query_count, int( env.get( - 'CM_MLPERF_INFERENCE_MAX_QUERY_COUNT', query_count))) + 'MLC_MLPERF_INFERENCE_MAX_QUERY_COUNT', query_count))) user_conf += ml_model_name + "." + scenario + \ ".max_query_count = " + str(max_query_count) + "\n" user_conf += ml_model_name + "." + scenario + \ @@ -320,19 +320,19 @@ def preprocess(i): user_conf += ml_model_name + "." + scenario + ".min_duration = 0" + "\n" user_conf += ml_model_name + "." + scenario + \ ".sample_concatenate_permutation = 0" + "\n" - env['CM_MLPERF_MAX_QUERY_COUNT'] = max_query_count + env['MLC_MLPERF_MAX_QUERY_COUNT'] = max_query_count # max_duration is effective for all scenarios except the Offline - if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in [ + if env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes').lower() not in [ "no", "false", "0"]: if scenario != "Offline": user_conf += ml_model_name + "." + scenario + \ f".max_duration = {max_duration_test}" + "\n" - elif env['CM_MLPERF_RUN_STYLE'] == "fast": + elif env['MLC_MLPERF_RUN_STYLE'] == "fast": user_conf += ml_model_name + "." + scenario + \ ".sample_concatenate_permutation = 0" + "\n" - max_duration_fast_s = int(env.get('CM_MLPERF_MAX_DURATION_FAST', 120)) + max_duration_fast_s = int(env.get('MLC_MLPERF_MAX_DURATION_FAST', 120)) max_duration_fast = str(max_duration_fast_s * 1000) # in milliseconds if scenario == "Server": user_conf += ml_model_name + "." + scenario + \ @@ -341,31 +341,31 @@ def preprocess(i): query_count = str(int((660 / fast_factor) * (float(target_qps)))) user_conf += ml_model_name + "." + scenario + \ ".max_query_count = " + query_count + "\n" - env['CM_MLPERF_MAX_QUERY_COUNT'] = query_count + env['MLC_MLPERF_MAX_QUERY_COUNT'] = query_count else: max_duration_valid_s = int( - env.get('CM_MLPERF_MAX_DURATION_VALID', 660)) + env.get('MLC_MLPERF_MAX_DURATION_VALID', 660)) max_duration_valid = str( max_duration_valid_s * 1000) # in milliseconds max_duration_ranging_s = int( - env.get('CM_MLPERF_MAX_DURATION_RANGING', 300)) + env.get('MLC_MLPERF_MAX_DURATION_RANGING', 300)) max_duration_ranging = str( max_duration_ranging_s * 1000) # in milliseconds if scenario == "MultiStream" or scenario == "SingleStream": - if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in ["no", "false", "0"] and env.get( - 'CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"]: + if env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes').lower() not in ["no", "false", "0"] and env.get( + 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"]: user_conf += ml_model_name + "." + scenario + \ f".max_duration = {max_duration_valid}" + "\n" - elif env.get('CM_MLPERF_INFERENCE_MIN_DURATION', '') != '': + elif env.get('MLC_MLPERF_INFERENCE_MIN_DURATION', '') != '': user_conf += ml_model_name + "." + scenario + ".min_duration = " + \ - env['CM_MLPERF_INFERENCE_MIN_DURATION'] + " \n" + env['MLC_MLPERF_INFERENCE_MIN_DURATION'] + " \n" if scenario == "MultiStream": user_conf += ml_model_name + "." + scenario + ".min_query_count = " + \ env.get( - 'CM_MLPERF_INFERENCE_MULTISTREAM_MIN_QUERY_COUNT', + 'MLC_MLPERF_INFERENCE_MULTISTREAM_MIN_QUERY_COUNT', "662") + "\n" if short_ranging: ranging_user_conf += ml_model_name + "." + scenario + \ @@ -383,7 +383,7 @@ def preprocess(i): if query_count: # needed for squad accuracy checker - env['CM_MAX_EXAMPLES'] = str(query_count) + env['MLC_MAX_EXAMPLES'] = str(query_count) import uuid from pathlib import Path @@ -399,44 +399,44 @@ def preprocess(i): ranging_user_conf_file = Path(ranging_user_conf_path) ranging_user_conf_file.write_text(ranging_user_conf) - if (env.get('CM_MLPERF_LOADGEN_QUERY_COUNT', '') == '') and query_count and ( - (mode != "accuracy") or (env['CM_MLPERF_RUN_STYLE'] != "valid")): - env['CM_MLPERF_LOADGEN_QUERY_COUNT'] = str(query_count) + if (env.get('MLC_MLPERF_LOADGEN_QUERY_COUNT', '') == '') and query_count and ( + (mode != "accuracy") or (env['MLC_MLPERF_RUN_STYLE'] != "valid")): + env['MLC_MLPERF_LOADGEN_QUERY_COUNT'] = str(query_count) if not run_exists or rerun: print("Output Dir: '" + OUTPUT_DIR + "'") print(user_conf) - if env.get('CM_MLPERF_POWER', '') == "yes" and os.path.exists( - env.get('CM_MLPERF_POWER_LOG_DIR', '')): - shutil.rmtree(env['CM_MLPERF_POWER_LOG_DIR']) + if env.get('MLC_MLPERF_POWER', '') == "yes" and os.path.exists( + env.get('MLC_MLPERF_POWER_LOG_DIR', '')): + shutil.rmtree(env['MLC_MLPERF_POWER_LOG_DIR']) else: - if not env.get('CM_MLPERF_COMPLIANCE_RUN_POSTPONED', False): + if not env.get('MLC_MLPERF_COMPLIANCE_RUN_POSTPONED', False): print("Run files exist, skipping run...\n") - env['CM_MLPERF_SKIP_RUN'] = "yes" + env['MLC_MLPERF_SKIP_RUN'] = "yes" if not run_exists or rerun or not measure_files_exist(OUTPUT_DIR, - required_files[4]) or env.get("CM_MLPERF_LOADGEN_COMPLIANCE", "") == "yes" or env.get("CM_REGENERATE_MEASURE_FILES", False): + required_files[4]) or env.get("MLC_MLPERF_LOADGEN_COMPLIANCE", "") == "yes" or env.get("MLC_REGENERATE_MEASURE_FILES", False): - env['CM_MLPERF_TESTING_USER_CONF'] = os.path.join( + env['MLC_MLPERF_TESTING_USER_CONF'] = os.path.join( os.path.dirname(user_conf_path), key + ".conf") # user_conf_path - env['CM_MLPERF_RANGING_USER_CONF'] = os.path.join( + env['MLC_MLPERF_RANGING_USER_CONF'] = os.path.join( os.path.dirname(user_conf_path), "ranging_" + key + ".conf") # ranging_user_conf_path for a shorter run if short_ranging: - env['CM_MLPERF_USER_CONF'] = r"\${CM_MLPERF_USER_CONF}" + env['MLC_MLPERF_USER_CONF'] = r"\${MLC_MLPERF_USER_CONF}" else: - env['CM_MLPERF_USER_CONF'] = os.path.join( + env['MLC_MLPERF_USER_CONF'] = os.path.join( os.path.dirname(user_conf_path), key + ".conf") # user_conf_path else: print( f"Measure files exist at {OUTPUT_DIR}. Skipping regeneration...\n") - env['CM_MLPERF_USER_CONF'] = '' + env['MLC_MLPERF_USER_CONF'] = '' os.makedirs(OUTPUT_DIR, exist_ok=True) - if str(env.get('CM_MLPERF_RESULTS_DIR_SHARED', '') + if str(env.get('MLC_MLPERF_RESULTS_DIR_SHARED', '') ).lower() in ["yes", "true", "1"]: os.chmod(OUTPUT_DIR, 0o2775) @@ -500,37 +500,37 @@ def run_files_exist(mode, OUTPUT_DIR, run_files, env): "result_validity" not in mlperf_log.get_keys() or mlperf_log["result_validity"] != "VALID" ): - env['CM_MLPERF_COMPLIANCE_RUN_POSTPONED'] = True + env['MLC_MLPERF_COMPLIANCE_RUN_POSTPONED'] = True return True - test = env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] + test = env['MLC_MLPERF_LOADGEN_COMPLIANCE_TEST'] SCRIPT_PATH = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, "run_verification.py") if test == "TEST06": - cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32" + cmd = f"{env['MLC_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32" else: - cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -r {RESULT_DIR} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR}" + cmd = f"{env['MLC_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -r {RESULT_DIR} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR}" print(cmd) os.system(cmd) is_valid = checker.check_compliance_perf_dir(COMPLIANCE_DIR) - if not is_valid and 'Stream' in env['CM_MLPERF_LOADGEN_SCENARIO']: + if not is_valid and 'Stream' in env['MLC_MLPERF_LOADGEN_SCENARIO']: # We have the determined latency, compliance test failed, so lets # not use max duration - env['CM_MLPERF_USE_MAX_DURATION'] = 'no' - env['CM_MLPERF_INFERENCE_MIN_DURATION'] = '990000' # Try a longer run + env['MLC_MLPERF_USE_MAX_DURATION'] = 'no' + env['MLC_MLPERF_INFERENCE_MIN_DURATION'] = '990000' # Try a longer run return is_valid if "power" in mode and env.get( - 'CM_MLPERF_SKIP_POWER_CHECKS', 'no').lower() not in ["yes", "true", "on"]: + 'MLC_MLPERF_SKIP_POWER_CHECKS', 'no').lower() not in ["yes", "true", "on"]: from power.power_checker import check as check_power_more try: is_valid = check_power_more(os.path.dirname(OUTPUT_DIR)) == 0 diff --git a/script/generate-mlperf-inference-user-conf/meta.yaml b/script/generate-mlperf-inference-user-conf/meta.yaml index c19bdcba3..fbba97b33 100644 --- a/script/generate-mlperf-inference-user-conf/meta.yaml +++ b/script/generate-mlperf-inference-user-conf/meta.yaml @@ -19,48 +19,48 @@ tags: # Default environment default_env: - CM_MLPERF_LOADGEN_MODE: accuracy - CM_MLPERF_LOADGEN_SCENARIO: Offline - CM_OUTPUT_FOLDER_NAME: test_results - CM_MLPERF_RUN_STYLE: test - CM_TEST_QUERY_COUNT: '10' - CM_FAST_FACTOR: '5' - CM_MLPERF_QUANTIZATION: off - CM_MLPERF_RESULTS_DIR_SHARED: yes + MLC_MLPERF_LOADGEN_MODE: accuracy + MLC_MLPERF_LOADGEN_SCENARIO: Offline + MLC_OUTPUT_FOLDER_NAME: test_results + MLC_MLPERF_RUN_STYLE: test + MLC_TEST_QUERY_COUNT: '10' + MLC_FAST_FACTOR: '5' + MLC_MLPERF_QUANTIZATION: off + MLC_MLPERF_RESULTS_DIR_SHARED: yes docker: real_run: False # Map script inputs to environment variables input_mapping: - count: CM_MLPERF_LOADGEN_QUERY_COUNT - hw_name: CM_HW_NAME - mode: CM_MLPERF_LOADGEN_MODE - num_threads: CM_NUM_THREADS + count: MLC_MLPERF_LOADGEN_QUERY_COUNT + hw_name: MLC_HW_NAME + mode: MLC_MLPERF_LOADGEN_MODE + num_threads: MLC_NUM_THREADS output_dir: OUTPUT_BASE_DIR - power: CM_MLPERF_POWER - regenerate_files: CM_REGENERATE_MEASURE_FILES - rerun: CM_RERUN - scenario: CM_MLPERF_LOADGEN_SCENARIO - test_query_count: CM_TEST_QUERY_COUNT - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + power: MLC_MLPERF_POWER + regenerate_files: MLC_REGENERATE_MEASURE_FILES + rerun: MLC_RERUN + scenario: MLC_MLPERF_LOADGEN_SCENARIO + test_query_count: MLC_TEST_QUERY_COUNT + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT # Env keys which are exposed to higher level scripts new_env_keys: - - CM_MLPERF_* - - CM_LOGS_DIR - - CM_HW_* - - CM_SUT_* - - CM_MAX_EXAMPLES + - MLC_MLPERF_* + - MLC_LOGS_DIR + - MLC_HW_* + - MLC_SUT_* + - MLC_MAX_EXAMPLES new_state_keys: - - CM_SUT_* + - MLC_SUT_* # Dependencies on other CM scripts deps: diff --git a/script/generate-mlperf-tiny-report/customize.py b/script/generate-mlperf-tiny-report/customize.py index 6efd29616..32bc3701d 100644 --- a/script/generate-mlperf-tiny-report/customize.py +++ b/script/generate-mlperf-tiny-report/customize.py @@ -13,7 +13,7 @@ def preprocess(i): cur_dir = os.getcwd() # Query cache for results dirs - env_repo_tags = env.get('CM_IMPORT_TINYMLPERF_REPO_TAGS', '').strip() + env_repo_tags = env.get('MLC_IMPORT_TINYMLPERF_REPO_TAGS', '').strip() xtags = '' if env_repo_tags == '' else ',version-' + env_repo_tags r = mlc.access({'action': 'find', @@ -45,9 +45,9 @@ def preprocess(i): run_script_input = i['run_script_input'] automation = i['automation'] - env['CM_TINYMLPERF_REPO_PATH'] = path - env['CM_TINYMLPERF_CURRENT_DIR'] = cur_dir - env['CM_TINYMLPERF_REPO_VERSION'] = version + env['MLC_TINYMLPERF_REPO_PATH'] = path + env['MLC_TINYMLPERF_CURRENT_DIR'] = cur_dir + env['MLC_TINYMLPERF_REPO_VERSION'] = version print('') print('Repo path: {}'.format(path)) @@ -65,9 +65,9 @@ def postprocess(i): env = i['env'] - path = env['CM_TINYMLPERF_REPO_PATH'] - cur_dir = env['CM_TINYMLPERF_CURRENT_DIR'] - version = env['CM_TINYMLPERF_REPO_VERSION'] + path = env['MLC_TINYMLPERF_REPO_PATH'] + cur_dir = env['MLC_TINYMLPERF_CURRENT_DIR'] + version = env['MLC_TINYMLPERF_REPO_VERSION'] for ext in ['.csv', '.xlsx']: diff --git a/script/generate-mlperf-tiny-report/meta.yaml b/script/generate-mlperf-tiny-report/meta.yaml index 3af0906f7..467226c1b 100644 --- a/script/generate-mlperf-tiny-report/meta.yaml +++ b/script/generate-mlperf-tiny-report/meta.yaml @@ -10,7 +10,7 @@ category: "MLPerf benchmark support" developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)" default_env: - CM_IMPORT_TINYMLPERF_REPO_TAGS: "1.1-private" + MLC_IMPORT_TINYMLPERF_REPO_TAGS: "1.1-private" # User-friendly tags to find this CM script tags: @@ -21,7 +21,7 @@ tags: - report input_mapping: - repo_tags: CM_IMPORT_TINYMLPERF_REPO_TAGS + repo_tags: MLC_IMPORT_TINYMLPERF_REPO_TAGS # Dependencies on other CM scripts deps: diff --git a/script/generate-mlperf-tiny-report/run_submission_checker.bat b/script/generate-mlperf-tiny-report/run_submission_checker.bat index 5d9a6fbaf..5cd8a781a 100644 --- a/script/generate-mlperf-tiny-report/run_submission_checker.bat +++ b/script/generate-mlperf-tiny-report/run_submission_checker.bat @@ -1,10 +1,10 @@ -cd %CM_TINYMLPERF_REPO_PATH% +cd %MLC_TINYMLPERF_REPO_PATH% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% echo. -%CM_PYTHON_BIN_WITH_PATH% submission_checker.py --input . +%MLC_PYTHON_BIN_WITH_PATH% submission_checker.py --input . IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% echo. -%CM_PYTHON_BIN_WITH_PATH% generate_final_report.py --input summary.csv +%MLC_PYTHON_BIN_WITH_PATH% generate_final_report.py --input summary.csv IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/generate-mlperf-tiny-report/run_submission_checker.sh b/script/generate-mlperf-tiny-report/run_submission_checker.sh index d858c9b22..115b52365 100644 --- a/script/generate-mlperf-tiny-report/run_submission_checker.sh +++ b/script/generate-mlperf-tiny-report/run_submission_checker.sh @@ -1,12 +1,12 @@ #!/bin/bash -cd ${CM_TINYMLPERF_REPO_PATH} +cd ${MLC_TINYMLPERF_REPO_PATH} test $? -eq 0 || exit $? echo "" -${CM_PYTHON_BIN_WITH_PATH} submission_checker.py --input . +${MLC_PYTHON_BIN_WITH_PATH} submission_checker.py --input . test $? -eq 0 || exit $? echo "" -${CM_PYTHON_BIN_WITH_PATH} generate_final_report.py --input summary.csv +${MLC_PYTHON_BIN_WITH_PATH} generate_final_report.py --input summary.csv test $? -eq 0 || exit $? diff --git a/script/generate-mlperf-tiny-submission/customize.py b/script/generate-mlperf-tiny-submission/customize.py index 45e7f2d84..32a97ef28 100644 --- a/script/generate-mlperf-tiny-submission/customize.py +++ b/script/generate-mlperf-tiny-submission/customize.py @@ -17,11 +17,11 @@ def generate_submission(i): env = i['env'] state = i['state'] inp = i['input'] - results_dir = env['CM_MLPERF_RESULTS_DIR'] + results_dir = env['MLC_MLPERF_RESULTS_DIR'] - if 'CM_MLPERF_SUBMISSION_DIR' not in env: - env['CM_MLPERF_SUBMISSION_DIR'] = os.path.join(cur_dir, "results") - submission_dir = env['CM_MLPERF_SUBMISSION_DIR'] + if 'MLC_MLPERF_SUBMISSION_DIR' not in env: + env['MLC_MLPERF_SUBMISSION_DIR'] = os.path.join(cur_dir, "results") + submission_dir = env['MLC_MLPERF_SUBMISSION_DIR'] if not os.path.isdir(submission_dir): os.makedirs(submission_dir) @@ -37,7 +37,7 @@ def generate_submission(i): if division not in ['open', 'closed']: return {'return': 1, 'error': '"division" must be "open" or "closed"'} - system_meta = state['CM_SUT_META'] + system_meta = state['MLC_SUT_META'] division = system_meta['division'] print('* MLPerf tiny division: {}'.format(division)) @@ -49,7 +49,7 @@ def generate_submission(i): # Check submitter submitter = system_meta['submitter'] - env['CM_MLPERF_SUBMITTER'] = submitter + env['MLC_MLPERF_SUBMITTER'] = submitter print('* MLPerf tiny submitter: {}'.format(submitter)) diff --git a/script/generate-mlperf-tiny-submission/meta.yaml b/script/generate-mlperf-tiny-submission/meta.yaml index e6f112c42..5b6bce128 100644 --- a/script/generate-mlperf-tiny-submission/meta.yaml +++ b/script/generate-mlperf-tiny-submission/meta.yaml @@ -11,7 +11,7 @@ deps: - tags: get,sut,system-description post_deps: - enable_if_env: - CM_MLPERF_RUN_STYLE: + MLC_MLPERF_RUN_STYLE: - valid tags: - generate diff --git a/script/generate-nvidia-engine/customize.py b/script/generate-nvidia-engine/customize.py index 832e32e6c..11a97df9c 100644 --- a/script/generate-nvidia-engine/customize.py +++ b/script/generate-nvidia-engine/customize.py @@ -10,21 +10,21 @@ def preprocess(i): if os_info['platform'] == 'windows': return {'return': 1, 'error': 'Windows is not supported in this script yet'} - if 'CM_MODEL' not in env: + if 'MLC_MODEL' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the model to run'} - if 'CM_MLPERF_DEVICE' not in env: + if 'MLC_MLPERF_DEVICE' not in env: return { 'return': 1, 'error': 'Please select a variation specifying the device to run on'} # will later extend to other scenarios - scenarios = env['CM_LOADGEN_SCENARIO'] + scenarios = env['MLC_LOADGEN_SCENARIO'] cmd = " --action generate_engines " +\ - " --benchmarks " + env['CM_MODEL'] + \ + " --benchmarks " + env['MLC_MODEL'] + \ " --scenarios " + scenarios + \ - " --gpu_batch_size=" + env['CM_MODEL_BATCH_SIZE'] +\ - " --gpu_copy_streams=" + env['CM_GPU_COPY_STREAMS'] +\ - " --workspace_size=" + env['CM_TENSORRT_WORKSPACE_SIZE'] + " --gpu_batch_size=" + env['MLC_MODEL_BATCH_SIZE'] +\ + " --gpu_copy_streams=" + env['MLC_GPU_COPY_STREAMS'] +\ + " --workspace_size=" + env['MLC_TENSORRT_WORKSPACE_SIZE'] ~ diff --git a/script/generate-nvidia-engine/meta.yaml b/script/generate-nvidia-engine/meta.yaml index 7a6852447..b63ba77e1 100644 --- a/script/generate-nvidia-engine/meta.yaml +++ b/script/generate-nvidia-engine/meta.yaml @@ -18,19 +18,19 @@ tags: # Default environment default_env: - CM_BATCH_COUNT: '1' - CM_BATCH_SIZE: '1' - CM_LOADGEN_SCENARIO: 'Offline' - CM_GPU_COPY_STREAMS: '1' - CM_TENSORRT_WORKSPACE_SIZE: '4194304' + MLC_BATCH_COUNT: '1' + MLC_BATCH_SIZE: '1' + MLC_LOADGEN_SCENARIO: 'Offline' + MLC_GPU_COPY_STREAMS: '1' + MLC_TENSORRT_WORKSPACE_SIZE: '4194304' # Map script inputs to environment variables input_mapping: - output_dir: CM_MLPERF_OUTPUT_DIR + output_dir: MLC_MLPERF_OUTPUT_DIR new_env_keys: - - CM_MLPERF_* - - CM_DATASET_* + - MLC_MLPERF_* + - MLC_DATASET_* # Dependencies on other CM scripts @@ -81,14 +81,14 @@ deps: # Install ResNet50 model (ONNX) and ImageNet - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 names: - imagenet-preprocessed tags: get,dataset,preprocessed,imagenet,_NCHW - enable_if_env: - CM_MODEL: + MLC_MODEL: - resnet50 names: - ml-model @@ -100,14 +100,14 @@ deps: # Install RetinaNet model (ONNX) and OpenImages - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet names: - openimages-preprocessed tags: get,dataset,preprocessed,openimages,_validation,_NCHW - enable_if_env: - CM_MODEL: + MLC_MODEL: - retinanet names: - ml-model @@ -124,11 +124,11 @@ variations: group: device default: true env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu cuda: env: - CM_MLPERF_DEVICE: gpu - CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + MLC_MLPERF_DEVICE: gpu + MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart # Reference MLPerf models @@ -136,17 +136,17 @@ variations: group: model default: true env: - CM_MODEL: resnet50 + MLC_MODEL: resnet50 retinanet: group: model env: - CM_MODEL: retinanet + MLC_MODEL: retinanet batch_size.#: env: - CM_MODEL_BATCH_SIZE: # + MLC_MODEL_BATCH_SIZE: # copy_streams.#: env: - CM_GPU_COPY_STREAMS: # + MLC_GPU_COPY_STREAMS: # diff --git a/script/generate-nvidia-engine/run.sh b/script/generate-nvidia-engine/run.sh index c5dd2d9a4..4372d5023 100644 --- a/script/generate-nvidia-engine/run.sh +++ b/script/generate-nvidia-engine/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -nvidia_code_path=${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH} +nvidia_code_path=${MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH} cd ${nvidia_code_path} -scenarios=${CM_TMP_LOADGEN_SCENARIOS} +scenarios=${MLC_TMP_LOADGEN_SCENARIOS} #batchsize=$ python3 code/main.py --action generate_engines --benchmarks resnet50 --scenarios $scenarios --gpu_batch_size=256 --gpu_copy_streams=1 --workspace_size=4194304 diff --git a/script/get-android-sdk/customize.py b/script/get-android-sdk/customize.py index c1f7aea5d..38598bc5b 100644 --- a/script/get-android-sdk/customize.py +++ b/script/get-android-sdk/customize.py @@ -24,7 +24,7 @@ def preprocess(i): if android_home == '': android_home = cur_dir - env['CM_ANDROID_HOME'] = android_home + env['MLC_ANDROID_HOME'] = android_home env['ANDROID_HOME'] = android_home paths = [] @@ -61,19 +61,19 @@ def preprocess(i): os.chdir(new_path) - cmdline_tools_version = env.get('CM_ANDROID_CMDLINE_TOOLS_VERSION', '') + cmdline_tools_version = env.get('MLC_ANDROID_CMDLINE_TOOLS_VERSION', '') - env['CM_ANDROID_CMDLINE_TOOLS_VERSION'] = cmdline_tools_version + env['MLC_ANDROID_CMDLINE_TOOLS_VERSION'] = cmdline_tools_version - package_url = env['CM_ANDROID_CMDLINE_TOOLS_URL'] + package_url = env['MLC_ANDROID_CMDLINE_TOOLS_URL'] package_url = package_url.replace( - '${CM_ANDROID_CMDLINE_TOOLS_OS}', + '${MLC_ANDROID_CMDLINE_TOOLS_OS}', host_os_for_android) package_url = package_url.replace( - '${CM_ANDROID_CMDLINE_TOOLS_VERSION}', + '${MLC_ANDROID_CMDLINE_TOOLS_VERSION}', cmdline_tools_version) - env['CM_ANDROID_CMDLINE_TOOLS_URL'] = package_url + env['MLC_ANDROID_CMDLINE_TOOLS_URL'] = package_url print('') print('Downloading from {} ...'.format(package_url)) @@ -114,10 +114,10 @@ def preprocess(i): sdk_manager_dir = os.path.dirname(sdk_manager_path) - env['CM_ANDROID_SDK_MANAGER_BIN'] = sdk_manager_file - env['CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH'] = sdk_manager_path + env['MLC_ANDROID_SDK_MANAGER_BIN'] = sdk_manager_file + env['MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH'] = sdk_manager_path - env['CM_GET_DEPENDENT_CACHED_PATH'] = cur_dir + env['MLC_GET_DEPENDENT_CACHED_PATH'] = cur_dir paths.append(sdk_manager_dir) @@ -129,41 +129,41 @@ def preprocess(i): if r['return'] > 0: return r - build_tools_version = env['CM_ANDROID_BUILD_TOOLS_VERSION'] + build_tools_version = env['MLC_ANDROID_BUILD_TOOLS_VERSION'] path_build_tools = os.path.join( android_home, 'build-tools', build_tools_version) - env['CM_ANDROID_BUILD_TOOLS_PATH'] = path_build_tools + env['MLC_ANDROID_BUILD_TOOLS_PATH'] = path_build_tools paths.append(path_build_tools) - cmake_version = env['CM_ANDROID_CMAKE_VERSION'] + cmake_version = env['MLC_ANDROID_CMAKE_VERSION'] path_cmake = os.path.join(android_home, 'cmake', cmake_version, 'bin') - env['CM_ANDROID_CMAKE_PATH'] = path_cmake + env['MLC_ANDROID_CMAKE_PATH'] = path_cmake paths.append(path_cmake) path_emulator = os.path.join(android_home, 'emulator') - env['CM_ANDROID_EMULATOR_PATH'] = path_emulator + env['MLC_ANDROID_EMULATOR_PATH'] = path_emulator paths.append(path_emulator) path_platform_tools = os.path.join(android_home, 'platform-tools') - env['CM_ANDROID_PLATFORM_TOOLS_PATH'] = path_platform_tools + env['MLC_ANDROID_PLATFORM_TOOLS_PATH'] = path_platform_tools paths.append(path_platform_tools) - android_version = env['CM_ANDROID_VERSION'] + android_version = env['MLC_ANDROID_VERSION'] path_platforms = os.path.join(android_home, 'platforms', android_version) - env['CM_ANDROID_PLATFORMS_PATH'] = path_platforms + env['MLC_ANDROID_PLATFORMS_PATH'] = path_platforms path_tools = os.path.join(android_home, 'tools') - env['CM_ANDROID_TOOLS_PATH'] = path_tools + env['MLC_ANDROID_TOOLS_PATH'] = path_tools paths.append(path_tools) - android_ndk_version = env['CM_ANDROID_NDK_VERSION'] + android_ndk_version = env['MLC_ANDROID_NDK_VERSION'] # Check Android NDK path_ndk = os.path.join(android_home, 'ndk', android_ndk_version) - env['CM_ANDROID_NDK_PATH'] = path_ndk + env['MLC_ANDROID_NDK_PATH'] = path_ndk env['ANDROID_NDK_HOME'] = path_ndk path_ndk_compiler = os.path.join( @@ -173,8 +173,8 @@ def preprocess(i): 'prebuilt', host_os_for_ndk, 'bin') - env['CM_ANDROID_LLVM_PATH'] = path_ndk_compiler - env['CM_ANDROID_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( + env['MLC_ANDROID_LLVM_PATH'] = path_ndk_compiler + env['MLC_ANDROID_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( path_ndk_compiler, 'clang.exe') paths.append(path_ndk_compiler) diff --git a/script/get-android-sdk/meta.yaml b/script/get-android-sdk/meta.yaml index a4da9f94f..f8d1edb6d 100644 --- a/script/get-android-sdk/meta.yaml +++ b/script/get-android-sdk/meta.yaml @@ -4,23 +4,23 @@ automation_uid: 5b4e0237da074764 cache: true category: Detection or installation of tools and artifacts default_env: - CM_ANDROID_BUILD_TOOLS_VERSION: 29.0.3 - CM_ANDROID_CMAKE_VERSION: 3.6.4111459 - CM_ANDROID_CMDLINE_TOOLS_URL: https://dl.google.com/android/repository/commandlinetools-${CM_ANDROID_CMDLINE_TOOLS_OS}-${CM_ANDROID_CMDLINE_TOOLS_VERSION}_latest.zip - CM_ANDROID_CMDLINE_TOOLS_VERSION: '9123335' - CM_ANDROID_NDK_VERSION: 21.3.6528147 - CM_ANDROID_VERSION: '30' + MLC_ANDROID_BUILD_TOOLS_VERSION: 29.0.3 + MLC_ANDROID_CMAKE_VERSION: 3.6.4111459 + MLC_ANDROID_CMDLINE_TOOLS_URL: https://dl.google.com/android/repository/commandlinetools-${MLC_ANDROID_CMDLINE_TOOLS_OS}-${MLC_ANDROID_CMDLINE_TOOLS_VERSION}_latest.zip + MLC_ANDROID_CMDLINE_TOOLS_VERSION: '9123335' + MLC_ANDROID_NDK_VERSION: 21.3.6528147 + MLC_ANDROID_VERSION: '30' deps: - tags: detect,os - tags: get,java input_mapping: - android_cmake_version: CM_ANDROID_CMAKE_VERSION - android_ndk_version: CM_ANDROID_NDK_VERSION - android_version: CM_ANDROID_VERSION - build_tools_version: CM_ANDROID_BUILD_TOOLS_VERSION - cmdline_tools_version: CM_ANDROID_CMDLINE_TOOLS_VERSION + android_cmake_version: MLC_ANDROID_CMAKE_VERSION + android_ndk_version: MLC_ANDROID_NDK_VERSION + android_version: MLC_ANDROID_VERSION + build_tools_version: MLC_ANDROID_BUILD_TOOLS_VERSION + cmdline_tools_version: MLC_ANDROID_CMDLINE_TOOLS_VERSION new_env_keys: -- CM_ANDROID_HOME +- MLC_ANDROID_HOME - ANDROID_HOME - ANDROID_NDK_HOME - +PATH diff --git a/script/get-android-sdk/prepare-sdk-manager.bat b/script/get-android-sdk/prepare-sdk-manager.bat index 5b1add122..33814d57f 100644 --- a/script/get-android-sdk/prepare-sdk-manager.bat +++ b/script/get-android-sdk/prepare-sdk-manager.bat @@ -1,27 +1,27 @@ -echo %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% +echo %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% -call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --version > tmp-ver.out +call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --version > tmp-ver.out IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% more tmp-ver.out -call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --licenses +call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --licenses IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% ^ +call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% ^ "tools" ^ "platform-tools" ^ "extras;android;m2repository" ^ "extras;google;m2repository" ^ "extras;google;google_play_services" ^ - "build-tools;%CM_ANDROID_BUILD_TOOLS_VERSION%" + "build-tools;%MLC_ANDROID_BUILD_TOOLS_VERSION%" IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "platforms;android-%CM_ANDROID_VERSION%" +call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "platforms;android-%MLC_ANDROID_VERSION%" IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "cmake;%CM_ANDROID_CMAKE_VERSION%" +call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "cmake;%MLC_ANDROID_CMAKE_VERSION%" IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "ndk;%CM_ANDROID_NDK_VERSION%" +call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "ndk;%MLC_ANDROID_NDK_VERSION%" IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-android-sdk/prepare-sdk-manager.sh b/script/get-android-sdk/prepare-sdk-manager.sh index 8613a43b1..9161e1355 100644 --- a/script/get-android-sdk/prepare-sdk-manager.sh +++ b/script/get-android-sdk/prepare-sdk-manager.sh @@ -1,26 +1,26 @@ echo ${JAVA_HOME} -echo ${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} +echo ${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} -${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --version > tmp-ver.out +${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --version > tmp-ver.out cat tmp-ver.out -${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --licenses +${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --licenses test $? -eq 0 || exit 1 -${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} \ +${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} \ "tools" \ "platform-tools" \ "extras;android;m2repository" \ "extras;google;m2repository" \ "extras;google;google_play_services" \ - "build-tools;${CM_ANDROID_BUILD_TOOLS_VERSION}" + "build-tools;${MLC_ANDROID_BUILD_TOOLS_VERSION}" test $? -eq 0 || exit 1 -${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "platforms;android-${CM_ANDROID_VERSION}" +${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "platforms;android-${MLC_ANDROID_VERSION}" test $? -eq 0 || exit 1 -${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "cmake;${CM_ANDROID_CMAKE_VERSION}" +${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "cmake;${MLC_ANDROID_CMAKE_VERSION}" test $? -eq 0 || exit 1 -${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "ndk;${CM_ANDROID_NDK_VERSION}" +${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "ndk;${MLC_ANDROID_NDK_VERSION}" test $? -eq 0 || exit 1 diff --git a/script/get-aocl/customize.py b/script/get-aocl/customize.py index 67b95ed28..a9ad06398 100644 --- a/script/get-aocl/customize.py +++ b/script/get-aocl/customize.py @@ -12,7 +12,7 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} @@ -21,15 +21,15 @@ def postprocess(i): env = i['env'] - env['CM_AOCL_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] - env['CM_AOCL_BUILD_PATH'] = os.path.join( - env['CM_GIT_REPO_CHECKOUT_PATH'], "build") + env['MLC_AOCL_SRC_PATH'] = env['MLC_GIT_REPO_CHECKOUT_PATH'] + env['MLC_AOCL_BUILD_PATH'] = os.path.join( + env['MLC_GIT_REPO_CHECKOUT_PATH'], "build") aocl_lib_path = os.path.join( - env['CM_GIT_REPO_CHECKOUT_PATH'], + env['MLC_GIT_REPO_CHECKOUT_PATH'], "build", "aocl-release", "src") - env['CM_AOCL_LIB_PATH'] = aocl_lib_path + env['MLC_AOCL_LIB_PATH'] = aocl_lib_path env['+LIBRARY_PATH'] = [aocl_lib_path] if '+LIBRARY_PATH' not in env else env['+LIBRARY_PATH'] + [aocl_lib_path] env['+LD_LIBRARY_PATH'] = [aocl_lib_path] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [aocl_lib_path] diff --git a/script/get-aocl/meta.yaml b/script/get-aocl/meta.yaml index 061d6829e..66bd6b660 100644 --- a/script/get-aocl/meta.yaml +++ b/script/get-aocl/meta.yaml @@ -8,12 +8,12 @@ deps: - tags: get,generic,sys-util,_libmpfr-dev - tags: get,generic-python-lib,_scons - force_env_keys: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT tags: get,git,_repo.https://github.com/amd/aocl-libm-ose new_env_keys: -- CM_AOCL_BUILD_PATH -- CM_AOCL_SRC_PATH -- CM_AOCL_LIB_PATH +- MLC_AOCL_BUILD_PATH +- MLC_AOCL_SRC_PATH +- MLC_AOCL_LIB_PATH - +LD_LIBRARY_PATH - +LIBRARY_PATH tags: @@ -27,7 +27,7 @@ variations: {} versions: '4.0': env: - CM_GIT_CHECKOUT: aocl-4.0 + MLC_GIT_CHECKOUT: aocl-4.0 master: env: - CM_GIT_CHECKOUT: master + MLC_GIT_CHECKOUT: master diff --git a/script/get-aocl/run.sh b/script/get-aocl/run.sh index 1b00dd9fd..d36d37f4a 100644 --- a/script/get-aocl/run.sh +++ b/script/get-aocl/run.sh @@ -1,9 +1,9 @@ #!/bin/bash -if [[ -z ${CM_GIT_REPO_CHECKOUT_PATH} ]]; then +if [[ -z ${MLC_GIT_REPO_CHECKOUT_PATH} ]]; then echo "Git repository not found!" exit 1 fi -cd ${CM_GIT_REPO_CHECKOUT_PATH} +cd ${MLC_GIT_REPO_CHECKOUT_PATH} scons test $? -eq 0 || exit $? diff --git a/script/get-aria2/customize.py b/script/get-aria2/customize.py index c45449430..f52b1d3bf 100644 --- a/script/get-aria2/customize.py +++ b/script/get-aria2/customize.py @@ -16,7 +16,7 @@ def preprocess(i): file_name = file_name_core + \ '.exe' if os_info['platform'] == 'windows' else file_name_core - force_install = env.get('CM_FORCE_INSTALL', False) == True + force_install = env.get('MLC_FORCE_INSTALL', False) == True if not force_install: r = i['automation'].find_artifact({'file_name': file_name, @@ -24,7 +24,7 @@ def preprocess(i): 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_ARIA2_BIN_WITH_PATH', + 'env_path_key': 'MLC_ARIA2_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: @@ -37,9 +37,9 @@ def preprocess(i): # Force install if force_install: # Attempt to run installer - version = env.get('CM_VERSION', '') + version = env.get('MLC_VERSION', '') if version == '': - version = env['CM_ARIA2_DEFAULT_INSTALL_VERSION'] + version = env['MLC_ARIA2_DEFAULT_INSTALL_VERSION'] if os_info['platform'] == 'windows': archive = 'aria2-{}-win-64bit-build1' @@ -53,15 +53,15 @@ def preprocess(i): archive = archive.format(version) archive_with_ext = archive + ext - env['CM_ARIA2_DOWNLOAD_DIR'] = archive + env['MLC_ARIA2_DOWNLOAD_DIR'] = archive - env['CM_ARIA2_DOWNLOAD_FILE'] = archive_with_ext + env['MLC_ARIA2_DOWNLOAD_FILE'] = archive_with_ext if ext2 != '': - env['CM_ARIA2_DOWNLOAD_FILE2'] = archive + ext2 + env['MLC_ARIA2_DOWNLOAD_FILE2'] = archive + ext2 url = 'https://github.com/aria2/aria2/releases/download/release-{}/{}'.format( version, archive_with_ext) - env['CM_ARIA2_DOWNLOAD_URL'] = url + env['MLC_ARIA2_DOWNLOAD_URL'] = url print('URL to download ARIA2: {}'.format(url)) @@ -71,7 +71,7 @@ def preprocess(i): return r if os_info['platform'] == 'windows' or env.get( - 'CM_ARIA2_BUILD_FROM_SRC', '').lower() == 'true': + 'MLC_ARIA2_BUILD_FROM_SRC', '').lower() == 'true': install_path = os.path.join(os.getcwd(), archive) path_to_file = os.path.join(install_path, file_name) @@ -79,18 +79,18 @@ def preprocess(i): return {'return': 1, 'error': 'file not found: {}'.format(path_to_file)} - env['CM_ARIA2_BIN_WITH_PATH'] = path_to_file - env['CM_ARIA2_INSTALLED_TO_CACHE'] = 'yes' + env['MLC_ARIA2_BIN_WITH_PATH'] = path_to_file + env['MLC_ARIA2_INSTALLED_TO_CACHE'] = 'yes' else: - path_to_bin = r['env_tmp'].get('CM_ARIA2_BIN_WITH_PATH', '') - env['CM_ARIA2_BIN_WITH_PATH'] = path_to_bin + path_to_bin = r['env_tmp'].get('MLC_ARIA2_BIN_WITH_PATH', '') + env['MLC_ARIA2_BIN_WITH_PATH'] = path_to_bin r = i['automation'].find_artifact({'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_ARIA2_BIN_WITH_PATH', + 'env_path_key': 'MLC_ARIA2_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: @@ -104,7 +104,7 @@ def detect_version(i): r = i['automation'].parse_version({'match_text': r'aria2 version\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_ARIA2_VERSION', + 'env_key': 'MLC_ARIA2_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -123,13 +123,13 @@ def postprocess(i): return r version = r['version'] - found_file_path = env['CM_ARIA2_BIN_WITH_PATH'] + found_file_path = env['MLC_ARIA2_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_ARIA2_INSTALLED_PATH'] = found_path + env['MLC_ARIA2_INSTALLED_PATH'] = found_path - if env.get('CM_ARIA2_INSTALLED_TO_CACHE', '') == 'yes': - env['+PATH'] = [env['CM_ARIA2_INSTALLED_PATH']] + if env.get('MLC_ARIA2_INSTALLED_TO_CACHE', '') == 'yes': + env['+PATH'] = [env['MLC_ARIA2_INSTALLED_PATH']] return {'return': 0, 'version': version} diff --git a/script/get-aria2/install.bat b/script/get-aria2/install.bat index 6255f0caf..baeca0e3f 100644 --- a/script/get-aria2/install.bat +++ b/script/get-aria2/install.bat @@ -1,9 +1,9 @@ echo. -del /Q /S %CM_ARIA2_DOWNLOAD_FILE% +del /Q /S %MLC_ARIA2_DOWNLOAD_FILE% -wget --no-check-certificate %CM_ARIA2_DOWNLOAD_URL% +wget --no-check-certificate %MLC_ARIA2_DOWNLOAD_URL% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -unzip -o -q %CM_ARIA2_DOWNLOAD_FILE% +unzip -o -q %MLC_ARIA2_DOWNLOAD_FILE% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-aria2/install.sh b/script/get-aria2/install.sh index d9424732d..aa865ed74 100644 --- a/script/get-aria2/install.sh +++ b/script/get-aria2/install.sh @@ -2,24 +2,24 @@ echo "" -if [[ "${CM_ARIA2_BUILD_FROM_SRC}" == "True" ]]; then +if [[ "${MLC_ARIA2_BUILD_FROM_SRC}" == "True" ]]; then echo "Building from sources ..." echo "" - rm -rf ${CM_ARIA2_DOWNLOAD_FILE} - rm -rf ${CM_ARIA2_DOWNLOAD_FILE2} + rm -rf ${MLC_ARIA2_DOWNLOAD_FILE} + rm -rf ${MLC_ARIA2_DOWNLOAD_FILE2} - wget --no-check-certificate ${CM_ARIA2_DOWNLOAD_URL} + wget --no-check-certificate ${MLC_ARIA2_DOWNLOAD_URL} test $? -eq 0 || exit $? - bzip2 -d ${CM_ARIA2_DOWNLOAD_FILE} + bzip2 -d ${MLC_ARIA2_DOWNLOAD_FILE} test $? -eq 0 || exit $? - tar xvf ${CM_ARIA2_DOWNLOAD_FILE2} + tar xvf ${MLC_ARIA2_DOWNLOAD_FILE2} test $? -eq 0 || exit $? - cd ${CM_ARIA2_DOWNLOAD_DIR} + cd ${MLC_ARIA2_DOWNLOAD_DIR} test $? -eq 0 || exit $? ./configure --prefix=$PWD/bin @@ -35,13 +35,13 @@ else echo "Installing binary via sudo ..." echo "" - cmd="sudo ${CM_HOST_OS_PACKAGE_MANAGER} install aria2" + cmd="sudo ${MLC_HOST_OS_PACKAGE_MANAGER} install aria2" echo "$cmd" $cmd test $? -eq 0 || exit $? path_to_bin=`which aria2c` - echo "CM_ARIA2_BIN_WITH_PATH=$path_to_bin" > tmp-run-env.out + echo "MLC_ARIA2_BIN_WITH_PATH=$path_to_bin" > tmp-run-env.out fi diff --git a/script/get-aria2/meta.yaml b/script/get-aria2/meta.yaml index 6fdd8bb17..79981d1d8 100644 --- a/script/get-aria2/meta.yaml +++ b/script/get-aria2/meta.yaml @@ -9,8 +9,8 @@ cache: true category: Detection or installation of tools and artifacts input_mapping: - install: CM_FORCE_INSTALL - src: CM_ARIA2_BUILD_FROM_SRC + install: MLC_FORCE_INSTALL + src: MLC_ARIA2_BUILD_FROM_SRC deps: - tags: detect,cpu @@ -21,15 +21,15 @@ deps: # - tags: print,native,hello-world env: - CM_REQUIRE_INSTALL: no - CM_ARIA2_DEFAULT_INSTALL_VERSION: "1.37.0" + MLC_REQUIRE_INSTALL: no + MLC_ARIA2_DEFAULT_INSTALL_VERSION: "1.37.0" new_env_keys: - - CM_ARIA2_* + - MLC_ARIA2_* - +PATH print_env_at_the_end: - CM_ARIA2_INSTALLED_PATH: Path to the tool + MLC_ARIA2_INSTALLED_PATH: Path to the tool tags: - get diff --git a/script/get-aria2/run.bat b/script/get-aria2/run.bat index 625b7edc0..eb4f33ef4 100644 --- a/script/get-aria2/run.bat +++ b/script/get-aria2/run.bat @@ -1,4 +1,4 @@ rem Detect version -%CM_ARIA2_BIN_WITH_PATH% --version > tmp-ver.out +%MLC_ARIA2_BIN_WITH_PATH% --version > tmp-ver.out IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-aria2/run.sh b/script/get-aria2/run.sh index 85ba9421a..e44fb6e3d 100644 --- a/script/get-aria2/run.sh +++ b/script/get-aria2/run.sh @@ -2,5 +2,5 @@ # Detect version -${CM_ARIA2_BIN_WITH_PATH} --version > tmp-ver.out +${MLC_ARIA2_BIN_WITH_PATH} --version > tmp-ver.out test $? -eq 0 || exit 1 diff --git a/script/get-aws-cli/README-extra.md b/script/get-aws-cli/README-extra.md index 7c8475871..94c96ea86 100644 --- a/script/get-aws-cli/README-extra.md +++ b/script/get-aws-cli/README-extra.md @@ -2,7 +2,7 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed aws-cli on the system and if not found calls the [install script for aws-cli](../script/install-aws-cli). ## Exported Variables -* `CM_AWS_BIN_WITH_PATH` +* `MLC_AWS_BIN_WITH_PATH` ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-aws-cli/customize.py b/script/get-aws-cli/customize.py index 281127a9b..1da3b4d97 100644 --- a/script/get-aws-cli/customize.py +++ b/script/get-aws-cli/customize.py @@ -12,18 +12,18 @@ def preprocess(i): file_name = 'aws.exe' if os_info['platform'] == 'windows' else 'aws' env['FILE_NAME'] = file_name - if 'CM_AWS_BIN_WITH_PATH' not in env: + if 'MLC_AWS_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_AWS_BIN_WITH_PATH', + 'env_path_key': 'MLC_AWS_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: if r['return'] == 16: - env['CM_REQUIRE_INSTALL'] = "yes" + env['MLC_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r @@ -34,7 +34,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'aws-cli/([\d.]+)\s', 'group_number': 1, - 'env_key': 'CM_AWS_VERSION', + 'env_key': 'MLC_AWS_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -54,11 +54,11 @@ def postprocess(i): return r version = r['version'] - found_file_path = env['CM_AWS_BIN_WITH_PATH'] + found_file_path = env['MLC_AWS_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_AWS_INSTALLED_PATH'] = found_path + env['MLC_AWS_INSTALLED_PATH'] = found_path - env['CM_AWS_CACHE_TAGS'] = 'version-' + version + env['MLC_AWS_CACHE_TAGS'] = 'version-' + version return {'return': 0, 'version': version} diff --git a/script/get-aws-cli/meta.yaml b/script/get-aws-cli/meta.yaml index a8017278c..63f621344 100644 --- a/script/get-aws-cli/meta.yaml +++ b/script/get-aws-cli/meta.yaml @@ -5,10 +5,10 @@ cache: true category: Cloud automation clean_files: [] new_env_keys: -- CM_AWS_* +- MLC_AWS_* prehook_deps: - enable_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' reuse_version: true tags: install,aws-cli diff --git a/script/get-bazel/README-extra.md b/script/get-bazel/README-extra.md index 8e11a61bc..a0cc8d963 100644 --- a/script/get-bazel/README-extra.md +++ b/script/get-bazel/README-extra.md @@ -2,7 +2,7 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed bazel on the system and if not found calls the [install script for bazel](../script/install-bazel). ## Exported Variables -* `CM_BAZEL_BIN_WITH_PATH` +* `MLC_BAZEL_BIN_WITH_PATH` ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-bazel/customize.py b/script/get-bazel/customize.py index 416a20477..32a629ea3 100644 --- a/script/get-bazel/customize.py +++ b/script/get-bazel/customize.py @@ -12,18 +12,18 @@ def preprocess(i): file_name = 'bazel.exe' if os_info['platform'] == 'windows' else 'bazel' env['FILE_NAME'] = file_name - if 'CM_BAZEL_BIN_WITH_PATH' not in env: + if 'MLC_BAZEL_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_BAZEL_BIN_WITH_PATH', + 'env_path_key': 'MLC_BAZEL_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: if r['return'] == 16: - env['CM_REQUIRE_INSTALL'] = "yes" + env['MLC_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r @@ -34,7 +34,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'bazel\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_BAZEL_VERSION', + 'env_key': 'MLC_BAZEL_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -54,12 +54,12 @@ def postprocess(i): return r version = r['version'] - found_file_path = env['CM_BAZEL_BIN_WITH_PATH'] + found_file_path = env['MLC_BAZEL_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_BAZEL_INSTALLED_PATH'] = found_path + env['MLC_BAZEL_INSTALLED_PATH'] = found_path env['+PATH'] = [found_path] - env['CM_BAZEL_CACHE_TAGS'] = 'version-' + version + env['MLC_BAZEL_CACHE_TAGS'] = 'version-' + version return {'return': 0, 'version': version} diff --git a/script/get-bazel/meta.yaml b/script/get-bazel/meta.yaml index ee5b19581..574651236 100644 --- a/script/get-bazel/meta.yaml +++ b/script/get-bazel/meta.yaml @@ -4,11 +4,11 @@ automation_uid: 5b4e0237da074764 cache: true category: Detection or installation of tools and artifacts new_env_keys: -- CM_BAZEL_* +- MLC_BAZEL_* - +PATH prehook_deps: - enable_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' reuse_version: true tags: install,bazel diff --git a/script/get-bazel/run.bat b/script/get-bazel/run.bat index 1e8da4b27..9eba886b3 100644 --- a/script/get-bazel/run.bat +++ b/script/get-bazel/run.bat @@ -1,2 +1,2 @@ -%CM_BAZEL_BIN_WITH_PATH% --version > tmp-ver.out +%MLC_BAZEL_BIN_WITH_PATH% --version > tmp-ver.out IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-bazel/run.sh b/script/get-bazel/run.sh index e145f4638..b5084b2eb 100644 --- a/script/get-bazel/run.sh +++ b/script/get-bazel/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -bazel_bin=${CM_BAZEL_BIN_WITH_PATH} -if [[ ${CM_VERSION} == "0.26.1" ]]; then +bazel_bin=${MLC_BAZEL_BIN_WITH_PATH} +if [[ ${MLC_VERSION} == "0.26.1" ]]; then ${bazel_bin} version |grep "Build label" |sed 's/Build label:/bazel/' > tmp-ver.out else ${bazel_bin} --version > tmp-ver.out diff --git a/script/get-blis/customize.py b/script/get-blis/customize.py index 3bfe968fc..19f524ef8 100644 --- a/script/get-blis/customize.py +++ b/script/get-blis/customize.py @@ -12,9 +12,9 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - env['CM_BLIS_SRC_PATH'] = env['CM_GIT_CHECKOUT_PATH'] + env['MLC_BLIS_SRC_PATH'] = env['MLC_GIT_CHECKOUT_PATH'] return {'return': 0} @@ -22,11 +22,11 @@ def preprocess(i): def postprocess(i): env = i['env'] - install_dir = os.path.join(env['CM_BLIS_SRC_PATH'], "install") + install_dir = os.path.join(env['MLC_BLIS_SRC_PATH'], "install") - env['CM_BLIS_INSTALL_PATH'] = install_dir - env['CM_BLIS_INC'] = os.path.join(install_dir, 'include', 'blis') - env['CM_BLIS_LIB'] = os.path.join(install_dir, 'lib', 'libblis.a') + env['MLC_BLIS_INSTALL_PATH'] = install_dir + env['MLC_BLIS_INC'] = os.path.join(install_dir, 'include', 'blis') + env['MLC_BLIS_LIB'] = os.path.join(install_dir, 'lib', 'libblis.a') blis_lib_path = os.path.join(install_dir, 'lib') diff --git a/script/get-blis/meta.yaml b/script/get-blis/meta.yaml index 8f90c9e9d..dab16ffb9 100644 --- a/script/get-blis/meta.yaml +++ b/script/get-blis/meta.yaml @@ -6,7 +6,7 @@ category: Detection or installation of tools and artifacts default_version: master deps: - force_env_keys: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT names: - blis-source-repo tags: get,git @@ -14,11 +14,11 @@ deps: input_description: {} input_mapping: {} new_env_keys: -- CM_BLIS_SRC_PATH +- MLC_BLIS_SRC_PATH - +LD_LIBRARY_PATH -- CM_BLIS_INSTALL_PATH -- CM_BLIS_INC -- CM_BLIS_LIB +- MLC_BLIS_INSTALL_PATH +- MLC_BLIS_INC +- MLC_BLIS_LIB new_state_keys: [] post_deps: [] posthook_deps: [] @@ -43,7 +43,7 @@ variations: versions: 0.9.0: env: - CM_GIT_CHECKOUT: 0.9.0 + MLC_GIT_CHECKOUT: 0.9.0 master: env: - CM_GIT_CHECKOUT: master + MLC_GIT_CHECKOUT: master diff --git a/script/get-blis/run.sh b/script/get-blis/run.sh index 4c6d91d78..756795b82 100644 --- a/script/get-blis/run.sh +++ b/script/get-blis/run.sh @@ -3,10 +3,10 @@ CUR=$PWD mkdir -p install test $? -eq 0 || exit $? INSTALL_DIR=$PWD/install -cd ${CM_BLIS_SRC_PATH} +cd ${MLC_BLIS_SRC_PATH} ./configure --prefix=$INSTALL_DIR auto test $? -eq 0 || exit $? -make -j${CM_HOST_CPU_TOTAL_PHYSICAL_CORES} +make -j${MLC_HOST_CPU_TOTAL_PHYSICAL_CORES} test $? -eq 0 || exit $? make install test $? -eq 0 || exit $? diff --git a/script/get-cache-dir/customize.py b/script/get-cache-dir/customize.py index bd5bd1468..41ac52d30 100644 --- a/script/get-cache-dir/customize.py +++ b/script/get-cache-dir/customize.py @@ -12,7 +12,7 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} @@ -22,10 +22,10 @@ def postprocess(i): env = i['env'] cache_dir = os.getcwd() - if env.get('CM_CACHE_DIR_ENV_NAME', '') != '': - env[env['CM_CACHE_DIR_ENV_NAME']] = cache_dir + if env.get('MLC_CACHE_DIR_ENV_NAME', '') != '': + env[env['MLC_CACHE_DIR_ENV_NAME']] = cache_dir - env['CM_CACHE_DIR'] = cache_dir - env['CM_GET_DEPENDENT_CACHED_PATH'] = cache_dir + env['MLC_CACHE_DIR'] = cache_dir + env['MLC_GET_DEPENDENT_CACHED_PATH'] = cache_dir return {'return': 0} diff --git a/script/get-cache-dir/meta.yaml b/script/get-cache-dir/meta.yaml index ad9695f53..e02b9a7cb 100644 --- a/script/get-cache-dir/meta.yaml +++ b/script/get-cache-dir/meta.yaml @@ -8,8 +8,8 @@ docker: run: false input_description: {} new_env_keys: -- CM_CACHE_DIR -- <<>> +- MLC_CACHE_DIR +- <<>> new_state_keys: [] post_deps: [] posthook_deps: [] @@ -23,5 +23,5 @@ uid: 48f4622e059b45ce variations: name.#: env: - CM_CACHE_DIR_NAME: '#' + MLC_CACHE_DIR_NAME: '#' versions: {} diff --git a/script/get-ck/COPYRIGHT.md b/script/get-ck/COPYRIGHT.md deleted file mode 100644 index 9e44ad290..000000000 --- a/script/get-ck/COPYRIGHT.md +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Notice - -© 2022-2025 MLCommons. All Rights Reserved. - -This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: - -[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - -Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-ck/README.md b/script/get-ck/README.md deleted file mode 100644 index 0f8f829cf..000000000 --- a/script/get-ck/README.md +++ /dev/null @@ -1 +0,0 @@ -Please see [https://docs.mlcommons.org/cm4mlops/scripts/Legacy-CK-support/get-ck](https://docs.mlcommons.org/cm4mlops/scripts/Legacy-CK-support/get-ck) for the documentation of this CM script. diff --git a/script/get-ck/meta.yaml b/script/get-ck/meta.yaml deleted file mode 100644 index 2dbb1fb66..000000000 --- a/script/get-ck/meta.yaml +++ /dev/null @@ -1,10 +0,0 @@ -alias: get-ck -automation_alias: script -automation_uid: 5b4e0237da074764 -cache: true -category: Legacy CK support -tags: -- get -- ck -- ck-framework -uid: 5575126797174cac diff --git a/script/get-ck/run.bat b/script/get-ck/run.bat deleted file mode 100644 index 75d92799e..000000000 --- a/script/get-ck/run.bat +++ /dev/null @@ -1 +0,0 @@ -pip install ck diff --git a/script/get-ck/run.sh b/script/get-ck/run.sh deleted file mode 100644 index eae526fd3..000000000 --- a/script/get-ck/run.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -python3 -m pip install ck ${CM_CK_FRAMEWORK_INSTALL_CLI} diff --git a/script/get-cl/customize.py b/script/get-cl/customize.py index 2bae685a2..cd6123150 100644 --- a/script/get-cl/customize.py +++ b/script/get-cl/customize.py @@ -17,13 +17,13 @@ def preprocess(i): file_name = 'cl.exe' - # Will check env['CM_TMP_PATH'] if comes from installation script + # Will check env['MLC_TMP_PATH'] if comes from installation script ii = {'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_CL_BIN_WITH_PATH', + 'env_path_key': 'MLC_CL_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces} @@ -33,8 +33,8 @@ def preprocess(i): if rr['return'] != 16: return rr - if env.get('CM_INPUT', '').strip() == '' and env.get( - 'CM_TMP_PATH', '').strip() == '': + if env.get('MLC_INPUT', '').strip() == '' and env.get( + 'MLC_TMP_PATH', '').strip() == '': print( i['recursion_spaces'] + @@ -59,8 +59,8 @@ def preprocess(i): tmp_paths = ';'.join(found_paths) - env['CM_TMP_PATH'] = tmp_paths - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + env['MLC_TMP_PATH'] = tmp_paths + env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' ii['env'] = env @@ -96,19 +96,19 @@ def preprocess(i): state['script_prefix'] = script_prefix - env['CM_CL_BIN'] = file_name - env['CM_CL_BIN_WITH_PATH'] = os.path.join(found_path, file_name) + env['MLC_CL_BIN'] = file_name + env['MLC_CL_BIN_WITH_PATH'] = os.path.join(found_path, file_name) # General compiler for general program compilation - env['CM_C_COMPILER_BIN'] = file_name - env['CM_C_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name) - env['CM_C_COMPILER_FLAG_OUTPUT'] = '/Fe:' - env['CM_C_COMPILER_FLAG_VERSION'] = '' + env['MLC_C_COMPILER_BIN'] = file_name + env['MLC_C_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name) + env['MLC_C_COMPILER_FLAG_OUTPUT'] = '/Fe:' + env['MLC_C_COMPILER_FLAG_VERSION'] = '' - env['CM_CXX_COMPILER_BIN'] = env['CM_C_COMPILER_BIN'] - env['CM_CXX_COMPILER_WITH_PATH'] = env['CM_C_COMPILER_WITH_PATH'] - env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '/Fe:' - env['CM_CXX_COMPILER_FLAG_VERSION'] = '' + env['MLC_CXX_COMPILER_BIN'] = env['MLC_C_COMPILER_BIN'] + env['MLC_CXX_COMPILER_WITH_PATH'] = env['MLC_C_COMPILER_WITH_PATH'] + env['MLC_CXX_COMPILER_FLAG_OUTPUT'] = '/Fe:' + env['MLC_CXX_COMPILER_FLAG_VERSION'] = '' return {'return': 0} @@ -116,7 +116,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'Version\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_CL_VERSION', + 'env_key': 'MLC_CL_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -139,9 +139,9 @@ def postprocess(i): version = r['version'] - env['CM_CL_CACHE_TAGS'] = 'version-' + version - env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-msvc' - env['CM_COMPILER_FAMILY'] = 'MSVC' - env['CM_COMPILER_VERSION'] = env['CM_CL_VERSION'] + env['MLC_CL_CACHE_TAGS'] = 'version-' + version + env['MLC_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-msvc' + env['MLC_COMPILER_FAMILY'] = 'MSVC' + env['MLC_COMPILER_VERSION'] = env['MLC_CL_VERSION'] return {'return': 0, 'version': version} diff --git a/script/get-cl/meta.yaml b/script/get-cl/meta.yaml index 2bc7741d7..8938f3463 100644 --- a/script/get-cl/meta.yaml +++ b/script/get-cl/meta.yaml @@ -6,11 +6,11 @@ category: Compiler automation clean_files: [] name: Detect or install Microsoft C compiler new_env_keys: -- CM_CL_* -- CM_C_COMPILER_* -- CM_CXX_COMPILER_* -- CM_COMPILER_* -- CM_LINKER_* +- MLC_CL_* +- MLC_C_COMPILER_* +- MLC_CXX_COMPILER_* +- MLC_COMPILER_* +- MLC_LINKER_* - +PATH new_state_keys: - script_prefix diff --git a/script/get-cl/run.bat b/script/get-cl/run.bat index 2a5fc7c9b..e56cee4a2 100644 --- a/script/get-cl/run.bat +++ b/script/get-cl/run.bat @@ -1,3 +1,3 @@ -"%CM_CL_BIN_WITH_PATH%" > tmp-ver.out 2>&1 +"%MLC_CL_BIN_WITH_PATH%" > tmp-ver.out 2>&1 IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-cmake/customize.py b/script/get-cmake/customize.py index b0201bed6..f276ab1bf 100644 --- a/script/get-cmake/customize.py +++ b/script/get-cmake/customize.py @@ -12,18 +12,18 @@ def preprocess(i): file_name = 'cmake.exe' if os_info['platform'] == 'windows' else 'cmake' - if 'CM_CMAKE_BIN_WITH_PATH' not in env: + if 'MLC_CMAKE_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_CMAKE_BIN_WITH_PATH', + 'env_path_key': 'MLC_CMAKE_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: if r['return'] == 16: - env['CM_REQUIRE_INSTALL'] = "yes" + env['MLC_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r @@ -34,7 +34,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'cmake version\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_CMAKE_VERSION', + 'env_key': 'MLC_CMAKE_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -53,13 +53,13 @@ def postprocess(i): return r version = r['version'] - found_file_path = env['CM_CMAKE_BIN_WITH_PATH'] + found_file_path = env['MLC_CMAKE_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_CMAKE_CACHE_TAGS'] = 'version-' + version + env['MLC_CMAKE_CACHE_TAGS'] = 'version-' + version - if 'CM_HOST_CPU_TOTAL_CORES' in env: - env['CM_MAKE_CORES'] = env['CM_HOST_CPU_TOTAL_CORES'] + if 'MLC_HOST_CPU_TOTAL_CORES' in env: + env['MLC_MAKE_CORES'] = env['MLC_HOST_CPU_TOTAL_CORES'] return {'return': 0, 'version': version} diff --git a/script/get-cmake/meta.yaml b/script/get-cmake/meta.yaml index ae051d22a..5545297f1 100644 --- a/script/get-cmake/meta.yaml +++ b/script/get-cmake/meta.yaml @@ -6,19 +6,19 @@ category: Detection or installation of tools and artifacts deps: - tags: detect,cpu env: - CM_REQUIRE_INSTALL: 'no' + MLC_REQUIRE_INSTALL: 'no' new_env_keys: -- CM_CMAKE_* -- CM_MAKE_CORES +- MLC_CMAKE_* +- MLC_MAKE_CORES - +PATH prehook_deps: - enable_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' reuse_version: true tags: install,cmake,prebuilt print_env_at_the_end: - CM_CMAKE_BIN_WITH_PATH: Path to the tool + MLC_CMAKE_BIN_WITH_PATH: Path to the tool tags: - get - cmake diff --git a/script/get-cmake/run.bat b/script/get-cmake/run.bat index 0802ae828..940bd06d2 100644 --- a/script/get-cmake/run.bat +++ b/script/get-cmake/run.bat @@ -1,2 +1,2 @@ -%CM_CMAKE_BIN_WITH_PATH% --version > tmp-ver.out +%MLC_CMAKE_BIN_WITH_PATH% --version > tmp-ver.out IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-cmake/run.sh b/script/get-cmake/run.sh index 6d2aeff97..9d9230232 100644 --- a/script/get-cmake/run.sh +++ b/script/get-cmake/run.sh @@ -1,5 +1,5 @@ #!/bin/bash -cmake_bin=${CM_CMAKE_BIN_WITH_PATH} +cmake_bin=${MLC_CMAKE_BIN_WITH_PATH} ${cmake_bin} --version > tmp-ver.out test $? -eq 0 || exit 1 diff --git a/script/get-cmsis_5/README-extra.md b/script/get-cmsis_5/README-extra.md deleted file mode 100644 index 1f052e7ea..000000000 --- a/script/get-cmsis_5/README-extra.md +++ /dev/null @@ -1,5 +0,0 @@ -# GET-CMSIS_5 -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) clones the git repository of [CMSIS Version 5](https://github.com/ARM-software/CMSIS_5) and cache it in CM for reuse across other CM scripts. - -## Exported Variables -1. [CMSIS_PATH](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-cmsis_5/customize.py#L23): Location in CM cache where CMSIS_5 git repository is cloned. diff --git a/script/get-cmsis_5/customize.py b/script/get-cmsis_5/customize.py index e5fac8d7e..099629649 100644 --- a/script/get-cmsis_5/customize.py +++ b/script/get-cmsis_5/customize.py @@ -10,10 +10,10 @@ def preprocess(i): if os_info['platform'] == 'windows': return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - if 'CM_GIT_DEPTH' not in env: - env['CM_GIT_DEPTH'] = '' - if 'CM_GIT_RECURSE_SUBMODULES' not in env: - env['CM_GIT_RECURSE_SUBMODULES'] = '' + if 'MLC_GIT_DEPTH' not in env: + env['MLC_GIT_DEPTH'] = '' + if 'MLC_GIT_RECURSE_SUBMODULES' not in env: + env['MLC_GIT_RECURSE_SUBMODULES'] = '' return {'return': 0} diff --git a/script/get-cmsis_5/meta.yaml b/script/get-cmsis_5/meta.yaml index e28a2d5aa..95ac1ef3a 100644 --- a/script/get-cmsis_5/meta.yaml +++ b/script/get-cmsis_5/meta.yaml @@ -4,9 +4,9 @@ automation_uid: 5b4e0237da074764 cache: true category: Detection or installation of tools and artifacts default_env: - CM_GIT_DEPTH: '' - CM_GIT_PATCH: 'no' - CM_GIT_URL: https://github.com/ARM-software/CMSIS_5.git + MLC_GIT_DEPTH: '' + MLC_GIT_PATCH: 'no' + MLC_GIT_URL: https://github.com/ARM-software/CMSIS_5.git default_version: custom deps: - tags: detect,os @@ -21,18 +21,18 @@ uid: 2258c212b11443f5 variations: recurse-submodules: env: - CM_GIT_RECURSE_SUBMODULES: --recurse-submodules + MLC_GIT_RECURSE_SUBMODULES: --recurse-submodules short-history: env: - CM_GIT_DEPTH: --depth 10 + MLC_GIT_DEPTH: --depth 10 versions: custom: env: - CM_GIT_CHECKOUT: e5dc19182f6084de32d8dc5a22c84e01210f4995 - CM_GIT_SHA: 'yes' + MLC_GIT_CHECKOUT: e5dc19182f6084de32d8dc5a22c84e01210f4995 + MLC_GIT_SHA: 'yes' develop: env: - CM_GIT_CHECKOUT: develop + MLC_GIT_CHECKOUT: develop master: env: - CM_GIT_CHECKOUT: master + MLC_GIT_CHECKOUT: master diff --git a/script/get-cmsis_5/run.sh b/script/get-cmsis_5/run.sh index 9093c093b..47d1e2554 100644 --- a/script/get-cmsis_5/run.sh +++ b/script/get-cmsis_5/run.sh @@ -1,21 +1,21 @@ #!/bin/bash CUR_DIR=$PWD -SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} +SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH} echo "******************************************************" if [ ! -d "cmsis" ]; then - if [ -z ${CM_GIT_SHA} ]; then - echo "Cloning CMSIS_5 from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." - git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} cmsis + if [ -z ${MLC_GIT_SHA} ]; then + echo "Cloning CMSIS_5 from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES}..." + git clone ${MLC_GIT_RECURSE_SUBMODULES} -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} ${MLC_GIT_DEPTH} cmsis if [ "${?}" != "0" ]; then exit 1; fi else - echo "Cloning CMSIS_5 from ${CM_GIT_URL} with default branch and checkout ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." - git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} cmsis + echo "Cloning CMSIS_5 from ${MLC_GIT_URL} with default branch and checkout ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES}..." + git clone ${MLC_GIT_RECURSE_SUBMODULES} ${MLC_GIT_URL} ${MLC_GIT_DEPTH} cmsis if [ "${?}" != "0" ]; then exit 1; fi cd cmsis - git checkout "${CM_GIT_CHECKOUT}" + git checkout "${MLC_GIT_CHECKOUT}" if [ "${?}" != "0" ]; then exit 1; fi fi fi diff --git a/script/get-compiler-flags/customize.py b/script/get-compiler-flags/customize.py index dd7ee775a..96463c054 100644 --- a/script/get-compiler-flags/customize.py +++ b/script/get-compiler-flags/customize.py @@ -16,16 +16,16 @@ def preprocess(i): if os_info['platform'] == 'windows': return {'return': 0} - if env.get("CM_FAST_COMPILATION") in ["yes", "on", "1"]: - DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_FAST", "-O3") + if env.get("MLC_FAST_COMPILATION") in ["yes", "on", "1"]: + DEFAULT_COMPILER_FLAGS = env.get("MLC_COMPILER_FLAGS_FAST", "-O3") # -flto") - this flag is not always available - DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_FAST", "-O3") - elif env.get("CM_DEBUG_COMPILATION") in ["yes", "on", "1"]: - DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_DEBUG", "-O0") - DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_DEBUG", "-O0") + DEFAULT_LINKER_FLAGS = env.get("MLC_LINKER_FLAGS_FAST", "-O3") + elif env.get("MLC_DEBUG_COMPILATION") in ["yes", "on", "1"]: + DEFAULT_COMPILER_FLAGS = env.get("MLC_COMPILER_FLAGS_DEBUG", "-O0") + DEFAULT_LINKER_FLAGS = env.get("MLC_LINKER_FLAGS_DEBUG", "-O0") else: - DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_DEFAULT", "-O2") - DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_DEFAULT", "-O2") + DEFAULT_COMPILER_FLAGS = env.get("MLC_COMPILER_FLAGS_DEFAULT", "-O2") + DEFAULT_LINKER_FLAGS = env.get("MLC_LINKER_FLAGS_DEFAULT", "-O2") env['+ CFLAGS'] += DEFAULT_COMPILER_FLAGS.split(" ") env['+ CXXFLAGS'] += DEFAULT_COMPILER_FLAGS.split(" ") @@ -51,15 +51,15 @@ def preprocess(i): break if 'gcc' not in out: inc_dir.append(out.strip()) - env['+CM_HOST_OS_DEFAULT_INCLUDE_PATH'] = inc_dir + env['+MLC_HOST_OS_DEFAULT_INCLUDE_PATH'] = inc_dir -# if env['CM_C_COMPILER_BIN'] == 'icc': -# if env['CM_CPUINFO_Vendor_ID'] == 'GenuineIntel': -# if int(env['CM_CPUINFO_CPU_family']) >= 0: +# if env['MLC_C_COMPILER_BIN'] == 'icc': +# if env['MLC_CPUINFO_Vendor_ID'] == 'GenuineIntel': +# if int(env['MLC_CPUINFO_CPU_family']) >= 0: # env['+ CFLAGS'] += ["-ipo"] -# if env['CM_C_COMPILER_BIN'] == 'gcc': -# if env['CM_HOST_CPU_VENDOR_ID'] == 'AMD': -# if int(env['CM_HOST_CPU_FAMILY']) >= 0: +# if env['MLC_C_COMPILER_BIN'] == 'gcc': +# if env['MLC_HOST_CPU_VENDOR_ID'] == 'AMD': +# if int(env['MLC_HOST_CPU_FAMILY']) >= 0: # env['+ CFLAGS'] += ["-march=znver2", "-flto"] return {'return': 0} diff --git a/script/get-compiler-flags/meta.yaml b/script/get-compiler-flags/meta.yaml index 080020d0d..c70bc6161 100644 --- a/script/get-compiler-flags/meta.yaml +++ b/script/get-compiler-flags/meta.yaml @@ -7,7 +7,7 @@ deps: - names: - compiler skip_if_env: - CM_C_COMPILER_BIN: + MLC_C_COMPILER_BIN: - 'on' tags: get,compiler new_env_keys: @@ -15,7 +15,7 @@ new_env_keys: - + CXXFLAGS - + FFLAGS - + LDFLAGS -- +CM_HOST_OS_DEFAULT_INCLUDE_PATH +- +MLC_HOST_OS_DEFAULT_INCLUDE_PATH tags: - get - compiler-flags diff --git a/script/get-compiler-rust/customize.py b/script/get-compiler-rust/customize.py index 01bf84b37..7481e6527 100644 --- a/script/get-compiler-rust/customize.py +++ b/script/get-compiler-rust/customize.py @@ -12,7 +12,7 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} diff --git a/script/get-compiler-rust/run.sh b/script/get-compiler-rust/run.sh index 4651e2fd0..28a25ced9 100644 --- a/script/get-compiler-rust/run.sh +++ b/script/get-compiler-rust/run.sh @@ -1,7 +1,7 @@ -CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3} +MLC_PYTHON_BIN=${MLC_PYTHON_BIN_WITH_PATH:-python3} -${CM_PYTHON_BIN} -m pip install --upgrade pip ${CM_PYTHON_PIP_COMMON_EXTRA} -${CM_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${CM_PYTHON_PIP_COMMON_EXTRA} +${MLC_PYTHON_BIN} -m pip install --upgrade pip ${MLC_PYTHON_PIP_COMMON_EXTRA} +${MLC_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${MLC_PYTHON_PIP_COMMON_EXTRA} curl https://sh.rustup.rs -sSf -o tmp.sh sh tmp.sh -y diff --git a/script/get-conda/customize.py b/script/get-conda/customize.py index 3c44af161..691b19a1d 100644 --- a/script/get-conda/customize.py +++ b/script/get-conda/customize.py @@ -12,11 +12,11 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - conda_prefix_name = env.get('CM_CONDA_PREFIX_NAME', '') + conda_prefix_name = env.get('MLC_CONDA_PREFIX_NAME', '') r = None file_name = 'conda.exe' if os_info['platform'] == 'windows' else 'conda' if conda_prefix_name == '': - tmp_path = env.get('CM_CONDA_INSTALL_PATH', env.get('CM_TMP_PATH', '')) + tmp_path = env.get('MLC_CONDA_INSTALL_PATH', env.get('MLC_TMP_PATH', '')) if tmp_path: x = ';' if os_info['platform'] == 'windows' else ':' tmp_path += x @@ -24,26 +24,26 @@ def preprocess(i): if os.path.exists(conda_path): tmp_path += os.path.join(os.path.expanduser("~"), "miniconda3", "bin") - env['CM_TMP_PATH'] = tmp_path + env['MLC_TMP_PATH'] = tmp_path r = i['automation'].find_artifact({'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_CONDA_BIN_WITH_PATH', + 'env_path_key': 'MLC_CONDA_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) else: - env['CM_CONDA_INSTALL_PATH'] = os.path.join(os.getcwd(), "miniconda3") + env['MLC_CONDA_INSTALL_PATH'] = os.path.join(os.getcwd(), "miniconda3") bin_dir = 'Scripts' if os_info['platform'] == 'windows' else 'bin' - env['CM_CONDA_BIN_WITH_PATH'] = os.path.join( - env['CM_CONDA_INSTALL_PATH'], bin_dir, file_name) + env['MLC_CONDA_BIN_WITH_PATH'] = os.path.join( + env['MLC_CONDA_INSTALL_PATH'], bin_dir, file_name) if conda_prefix_name != '' or r['return'] > 0: if conda_prefix_name != '' or r['return'] == 16: if conda_prefix_name == '': - if env.get('CM_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes': + if env.get('MLC_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes': return r print(recursion_spaces + ' # {}'.format(r['error'])) @@ -56,11 +56,11 @@ def preprocess(i): # Grigori: temporal fix - should be generalized/improved above if os_info['platform'] == 'windows' and env.get( - 'CM_CONDA_BIN_WITH_PATH', '') == '': - env['CM_CONDA_INSTALL_PATH'] = os.path.join( + 'MLC_CONDA_BIN_WITH_PATH', '') == '': + env['MLC_CONDA_INSTALL_PATH'] = os.path.join( os.getcwd(), "miniconda3") - env['CM_CONDA_BIN_WITH_PATH'] = os.path.join( - env['CM_CONDA_INSTALL_PATH'], 'Scripts', file_name) + env['MLC_CONDA_BIN_WITH_PATH'] = os.path.join( + env['MLC_CONDA_INSTALL_PATH'], 'Scripts', file_name) else: found_path = r['found_path'] @@ -72,7 +72,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'conda\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_CONDA_VERSION', + 'env_key': 'MLC_CONDA_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -86,19 +86,19 @@ def postprocess(i): if r['return'] > 0: return r - conda_bin_path = os.path.dirname(env['CM_CONDA_BIN_WITH_PATH']) - env['CM_CONDA_BIN_PATH'] = conda_bin_path + conda_bin_path = os.path.dirname(env['MLC_CONDA_BIN_WITH_PATH']) + env['MLC_CONDA_BIN_PATH'] = conda_bin_path env['+PATH'] = [conda_bin_path] conda_prefix = os.path.dirname(conda_bin_path) - env['CM_CONDA_PREFIX'] = conda_prefix + env['MLC_CONDA_PREFIX'] = conda_prefix env['CONDA_PREFIX'] = conda_prefix conda_lib_path = os.path.join(conda_prefix, "lib") if os.path.exists(conda_lib_path): - env['CM_CONDA_LIB_PATH'] = conda_lib_path + env['MLC_CONDA_LIB_PATH'] = conda_lib_path env['+LD_LIBRARY_PATH'] = [conda_lib_path] env['+LIBRARY_PATH'] = [conda_lib_path] diff --git a/script/get-conda/install.sh b/script/get-conda/install.sh index 6d1888285..17bd859aa 100644 --- a/script/get-conda/install.sh +++ b/script/get-conda/install.sh @@ -4,14 +4,14 @@ curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Minicond test $? -eq 0 || exit $? chmod +x ~/miniconda.sh -if [ ! -z ${CM_CONDA_PREFIX_NAME} ]; then - CM_CONDA_INSTALL_PATH=$PWD/miniconda3 - rm -rf ${CM_CONDA_INSTALL_PATH} +if [ ! -z ${MLC_CONDA_PREFIX_NAME} ]; then + MLC_CONDA_INSTALL_PATH=$PWD/miniconda3 + rm -rf ${MLC_CONDA_INSTALL_PATH} fi -if [ ! -z ${CM_CONDA_INSTALL_PATH} ]; then - ~/miniconda.sh -b -p ${CM_CONDA_INSTALL_PATH} +if [ ! -z ${MLC_CONDA_INSTALL_PATH} ]; then + ~/miniconda.sh -b -p ${MLC_CONDA_INSTALL_PATH} else ~/miniconda.sh -b fi diff --git a/script/get-conda/meta.yaml b/script/get-conda/meta.yaml index 8e34801fa..1bb33b194 100644 --- a/script/get-conda/meta.yaml +++ b/script/get-conda/meta.yaml @@ -10,11 +10,11 @@ new_env_keys: - +PATH - +LD_LIBRARY_PATH - +LIBRARY_PATH -- CM_CONDA_PREFIX +- MLC_CONDA_PREFIX - CONDA_PREFIX -- CM_CONDA_BIN_PATH -- CM_CONDA_BIN_WITH_PATH -- CM_CONDA_LIB_PATH +- MLC_CONDA_BIN_PATH +- MLC_CONDA_BIN_WITH_PATH +- MLC_CONDA_LIB_PATH tags: - get - conda @@ -26,12 +26,12 @@ variations: conda-package: tags: _name.# env: - CM_CONDA_PREFIX_NAME: '#' + MLC_CONDA_PREFIX_NAME: '#' python-3.#: env: - CM_CONDA_PYTHON_VERSION: 3.# + MLC_CONDA_PYTHON_VERSION: 3.# group: conda-python python-3.8: env: - CM_CONDA_PYTHON_VERSION: '3.8' + MLC_CONDA_PYTHON_VERSION: '3.8' group: conda-python diff --git a/script/get-conda/run.bat b/script/get-conda/run.bat index 99b9d97d2..2cbb75627 100644 --- a/script/get-conda/run.bat +++ b/script/get-conda/run.bat @@ -1 +1 @@ -%CM_CONDA_BIN_WITH_PATH% --version > tmp-ver.out +%MLC_CONDA_BIN_WITH_PATH% --version > tmp-ver.out diff --git a/script/get-conda/run.sh b/script/get-conda/run.sh index 5d61f106f..e37ec0ddc 100644 --- a/script/get-conda/run.sh +++ b/script/get-conda/run.sh @@ -1,3 +1,3 @@ #!/bin/bash -${CM_CONDA_BIN_WITH_PATH} --version > tmp-ver.out +${MLC_CONDA_BIN_WITH_PATH} --version > tmp-ver.out diff --git a/script/get-croissant/meta.yaml b/script/get-croissant/meta.yaml index a024189d2..f53583122 100644 --- a/script/get-croissant/meta.yaml +++ b/script/get-croissant/meta.yaml @@ -18,7 +18,7 @@ deps: version_min: '3.10' - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLCOMMONS_CROISSANT_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLCOMMONS_CROISSANT_PATH extra_cache_tags: mlcommons,croissant names: - git-mlcommons-croissant diff --git a/script/get-croissant/run.bat b/script/get-croissant/run.bat index 3177de9f6..f23b67b8f 100644 --- a/script/get-croissant/run.bat +++ b/script/get-croissant/run.bat @@ -2,13 +2,13 @@ echo ======================================================= -cd %CM_MLCOMMONS_CROISSANT_PATH%\python\mlcroissant +cd %MLC_MLCOMMONS_CROISSANT_PATH%\python\mlcroissant IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% echo. -echo Running %CM_PYTHON_BIN_WITH_PATH% -m pip install -e .[git] +echo Running %MLC_PYTHON_BIN_WITH_PATH% -m pip install -e .[git] -%CM_PYTHON_BIN_WITH_PATH% -m pip install -e .[git] +%MLC_PYTHON_BIN_WITH_PATH% -m pip install -e .[git] IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% echo. diff --git a/script/get-croissant/run.sh b/script/get-croissant/run.sh index dd2c67bb2..3fadc239d 100644 --- a/script/get-croissant/run.sh +++ b/script/get-croissant/run.sh @@ -2,13 +2,13 @@ echo "=======================================================" -cd ${CM_MLCOMMONS_CROISSANT_PATH}/python/mlcroissant +cd ${MLC_MLCOMMONS_CROISSANT_PATH}/python/mlcroissant if [ "${?}" != "0" ]; then exit 1; fi echo "" -echo "Running ${CM_PYTHON_BIN_WITH_PATH} -m pip install -e .[git]" +echo "Running ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -e .[git]" -${CM_PYTHON_BIN_WITH_PATH} -m pip install -e .[git] +${MLC_PYTHON_BIN_WITH_PATH} -m pip install -e .[git] if [ "${?}" != "0" ]; then exit 1; fi echo "" diff --git a/script/get-cuda-devices/customize.py b/script/get-cuda-devices/customize.py index 26969185d..62832a6e7 100644 --- a/script/get-cuda-devices/customize.py +++ b/script/get-cuda-devices/customize.py @@ -7,7 +7,7 @@ def preprocess(i): env = i['env'] - if str(env.get('CM_DETECT_USING_PYCUDA', '') + if str(env.get('MLC_DETECT_USING_PYCUDA', '') ).lower() in ["1", "yes", "true"]: i['run_script_input']['script_name'] = 'detect' @@ -54,11 +54,11 @@ def postprocess(i): gpu[gpu_id][key] = val p[key] = val - key_env = 'CM_CUDA_DEVICE_PROP_' + key.upper().replace(' ', '_') + key_env = 'MLC_CUDA_DEVICE_PROP_' + key.upper().replace(' ', '_') env[key_env] = val state['cm_cuda_num_devices'] = gpu_id + 1 - env['CM_CUDA_NUM_DEVICES'] = gpu_id + 1 + env['MLC_CUDA_NUM_DEVICES'] = gpu_id + 1 state['cm_cuda_device_prop'] = p state['cm_cuda_devices_prop'] = gpu diff --git a/script/get-cuda-devices/detect.sh b/script/get-cuda-devices/detect.sh index 8f6b93596..9de8aa64b 100644 --- a/script/get-cuda-devices/detect.sh +++ b/script/get-cuda-devices/detect.sh @@ -1,4 +1,4 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/detect.py test $? -eq 0 || exit $? diff --git a/script/get-cuda-devices/meta.yaml b/script/get-cuda-devices/meta.yaml index 2d4869286..a340263e4 100644 --- a/script/get-cuda-devices/meta.yaml +++ b/script/get-cuda-devices/meta.yaml @@ -29,15 +29,15 @@ docker: skip_cm_sys_upgrade: 'yes' cm_repo_flags: '--checkout=dev' use_host_group_id: 'yes' - image_tag_extra: '-cm-dev' + image_tag_extra: '-mlc-dev' env: - CM_DETECT_USING_PYCUDA: 'no' + MLC_DETECT_USING_PYCUDA: 'no' new_env_keys: -- CM_CUDA_DEVICE_* -- CM_CUDA_NUM_DEVICES -- CM_CUDA_VERSION +- MLC_CUDA_DEVICE_* +- MLC_CUDA_NUM_DEVICES +- MLC_CUDA_VERSION new_state_keys: - cm_cuda_device_prop @@ -50,7 +50,7 @@ print_files_if_script_error: variations: with-pycuda: env: - CM_DETECT_USING_PYCUDA: 'yes' + MLC_DETECT_USING_PYCUDA: 'yes' deps: - tags: get,python3 names: diff --git a/script/get-cuda-devices/run.bat b/script/get-cuda-devices/run.bat index 4f1467c19..2b2c03d5c 100644 --- a/script/get-cuda-devices/run.bat +++ b/script/get-cuda-devices/run.bat @@ -3,22 +3,22 @@ rem Compile del a.exe echo. -echo NVCC path: %CM_NVCC_BIN_WITH_PATH% +echo NVCC path: %MLC_NVCC_BIN_WITH_PATH% echo. echo. echo Checking compiler version ... echo. -"%CM_NVCC_BIN_WITH_PATH%" -V +"%MLC_NVCC_BIN_WITH_PATH%" -V echo. echo Compiling program ... echo. -cd %CM_TMP_CURRENT_SCRIPT_PATH% +cd %MLC_TMP_CURRENT_SCRIPT_PATH% -"%CM_NVCC_BIN_WITH_PATH%" print_cuda_devices.cu -allow-unsupported-compiler -DWINDOWS +"%MLC_NVCC_BIN_WITH_PATH%" print_cuda_devices.cu -allow-unsupported-compiler -DWINDOWS IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% rem Return to the original path obtained in CM @@ -27,7 +27,7 @@ echo. echo Running program ... echo. -cd %CM_TMP_CURRENT_PATH% +cd %MLC_TMP_CURRENT_PATH% -%CM_TMP_CURRENT_SCRIPT_PATH%\a.exe > tmp-run.out +%MLC_TMP_CURRENT_SCRIPT_PATH%\a.exe > tmp-run.out IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-cuda-devices/run.sh b/script/get-cuda-devices/run.sh index 3d208dd6b..2ee43d856 100644 --- a/script/get-cuda-devices/run.sh +++ b/script/get-cuda-devices/run.sh @@ -5,22 +5,22 @@ rm a.out echo "" -echo "NVCC path: ${CM_NVCC_BIN_WITH_PATH}" +echo "NVCC path: ${MLC_NVCC_BIN_WITH_PATH}" echo "" echo "" echo "Checking compiler version ..." echo "" -${CM_NVCC_BIN_WITH_PATH} -V +${MLC_NVCC_BIN_WITH_PATH} -V echo "" echo "Compiling program ..." echo "" -cd ${CM_TMP_CURRENT_SCRIPT_PATH} +cd ${MLC_TMP_CURRENT_SCRIPT_PATH} -${CM_NVCC_BIN_WITH_PATH} -allow-unsupported-compiler print_cuda_devices.cu +${MLC_NVCC_BIN_WITH_PATH} -allow-unsupported-compiler print_cuda_devices.cu test $? -eq 0 || exit 1 # Return to the original path obtained in CM @@ -29,7 +29,7 @@ echo "" echo "Running program ..." echo "" -cd ${CM_TMP_CURRENT_PATH} +cd ${MLC_TMP_CURRENT_PATH} -${CM_TMP_CURRENT_SCRIPT_PATH}/a.out > tmp-run.out +${MLC_TMP_CURRENT_SCRIPT_PATH}/a.out > tmp-run.out test $? -eq 0 || exit 1 diff --git a/script/get-cuda/README-extra.md b/script/get-cuda/README-extra.md index c075711ff..d1d37c98c 100644 --- a/script/get-cuda/README-extra.md +++ b/script/get-cuda/README-extra.md @@ -4,9 +4,9 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/scrip and if not found calls the [install script for CUDA](../script/install-cuda-prebuilt). ## Exported Variables -* `CM_CUDA_INSTALLED_PATH` -* `CM_CUDA_VERSION` -* `CM_NVCC_BIN_WITH_PATH` +* `MLC_CUDA_INSTALLED_PATH` +* `MLC_CUDA_VERSION` +* `MLC_NVCC_BIN_WITH_PATH` * `CUDA_HOME` * `CUDA_PATH` diff --git a/script/get-cuda/customize.py b/script/get-cuda/customize.py index c8a68c4a7..2c9ae7915 100644 --- a/script/get-cuda/customize.py +++ b/script/get-cuda/customize.py @@ -10,15 +10,15 @@ def preprocess(i): env = i['env'] if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': - env['CM_SUDO'] = '' + env['MLC_SUDO'] = '' recursion_spaces = i['recursion_spaces'] if os_info['platform'] == 'windows': - file_name = env['CM_TMP_FILE_TO_CHECK_WINDOWS'] + file_name = env['MLC_TMP_FILE_TO_CHECK_WINDOWS'] - if env.get('CM_INPUT', '').strip() == '' and env.get( - 'CM_TMP_PATH', '').strip() == '': + if env.get('MLC_INPUT', '').strip() == '' and env.get( + 'MLC_TMP_PATH', '').strip() == '': # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" paths = [] for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", @@ -34,31 +34,31 @@ def preprocess(i): tmp_paths = ';'.join(paths) tmp_paths += ';' + os.environ.get('PATH', '') - env['CM_TMP_PATH'] = tmp_paths - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + env['MLC_TMP_PATH'] = tmp_paths + env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' else: - file_name = env['CM_TMP_FILE_TO_CHECK_UNIX'] + file_name = env['MLC_TMP_FILE_TO_CHECK_UNIX'] # paths to cuda are not always in PATH - add a few typical locations to search for # (unless forced by a user) - if env.get('CM_INPUT', '').strip() == '' and env.get( - 'CM_TMP_PATH', '').strip() == '': + if env.get('MLC_INPUT', '').strip() == '' and env.get( + 'MLC_TMP_PATH', '').strip() == '': system_path = os.environ.get('PATH') if system_path: system_path = system_path + ":" - env['CM_TMP_PATH'] = system_path + \ + env['MLC_TMP_PATH'] = system_path + \ '/usr/local/cuda/bin:/usr/cuda/bin:/usr/local/cuda-11/bin:/usr/cuda-11/bin:/usr/local/cuda-12/bin:/usr/cuda-12/bin:/usr/local/packages/cuda' - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' - if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": - env_key = 'CM_NVCC_BIN_WITH_PATH' + if env['MLC_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + env_key = 'MLC_NVCC_BIN_WITH_PATH' path_env_key = 'PATH' else: - env_key = 'CM_CUDA_RT_WITH_PATH' + env_key = 'MLC_CUDA_RT_WITH_PATH' path_env_key = 'LD_LIBRARY_PATH' - env['CM_TMP_ENV_KEY'] = env_key + env['MLC_TMP_ENV_KEY'] = env_key if env_key not in env: r = i['automation'].find_artifact({'file_name': file_name, @@ -73,8 +73,8 @@ def preprocess(i): if os_info['platform'] == 'windows': return r - if r['return'] == 16 and env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": - env['CM_REQUIRE_INSTALL'] = "yes" + if r['return'] == 16 and env['MLC_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + env['MLC_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r @@ -84,7 +84,7 @@ def preprocess(i): def detect_version(i): env = i['env'] - if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + if env['MLC_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": return detect_version_nvcc(i) else: return detect_version_cuda_lib(i) @@ -93,7 +93,7 @@ def detect_version(i): def detect_version_nvcc(i): r = i['automation'].parse_version({'match_text': r'release\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_CUDA_VERSION', + 'env_key': 'MLC_CUDA_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -109,7 +109,7 @@ def detect_version_cuda_lib(i): env = i['env'] print(env) - cuda_rt_file_path = env['CM_CUDA_RT_WITH_PATH'] + cuda_rt_file_path = env['MLC_CUDA_RT_WITH_PATH'] cuda_lib_path = os.path.dirname(cuda_rt_file_path) cuda_path = os.path.abspath(os.path.join(cuda_lib_path, os.pardir)) @@ -123,7 +123,7 @@ def detect_version_cuda_lib(i): if cuda_version_info: cuda_version = cuda_version_info.get('version') - env['CM_CUDA_VERSION'] = cuda_version + env['MLC_CUDA_VERSION'] = cuda_version version = cuda_version print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) @@ -142,35 +142,35 @@ def postprocess(i): return r version = r['version'] - env['CM_CUDA_CACHE_TAGS'] = 'version-' + version + env['MLC_CUDA_CACHE_TAGS'] = 'version-' + version - found_file_path = env[env['CM_TMP_ENV_KEY']] + found_file_path = env[env['MLC_TMP_ENV_KEY']] - if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + if env['MLC_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": cuda_path_bin = os.path.dirname(found_file_path) - env['CM_CUDA_PATH_BIN'] = cuda_path_bin + env['MLC_CUDA_PATH_BIN'] = cuda_path_bin cuda_path = os.path.dirname(cuda_path_bin) - env['CM_CUDA_INSTALLED_PATH'] = cuda_path - env['CM_NVCC_BIN'] = os.path.basename(found_file_path) + env['MLC_CUDA_INSTALLED_PATH'] = cuda_path + env['MLC_NVCC_BIN'] = os.path.basename(found_file_path) else: # We traverse backwards until we find a path with include dir parent_path = os.path.dirname(found_file_path) - env['CM_CUDA_PATH_LIB'] = parent_path + env['MLC_CUDA_PATH_LIB'] = parent_path parent_path = os.path.dirname(parent_path) while os.path.isdir(parent_path): if os.path.exists(os.path.join(parent_path, "include")): print("Path is " + parent_path) found_path = parent_path cuda_path = found_path - env['CM_CUDA_INSTALLED_PATH'] = cuda_path + env['MLC_CUDA_INSTALLED_PATH'] = cuda_path break else: parent_path = os.path.dirname(parent_path) - if 'CM_CUDA_INSTALLED_PATH' not in env: + if 'MLC_CUDA_INSTALLED_PATH' not in env: return { 'return': 1, 'error': "No CUDA installation path with an include directory is found"} @@ -194,7 +194,7 @@ def postprocess(i): env['+C_INCLUDE_PATH'].append(cuda_path_include) env['+CPLUS_INCLUDE_PATH'].append(cuda_path_include) - env['CM_CUDA_PATH_INCLUDE'] = cuda_path_include + env['MLC_CUDA_PATH_INCLUDE'] = cuda_path_include # Lib if os_info['platform'] == 'windows': @@ -213,19 +213,19 @@ def postprocess(i): env['+LD_LIBRARY_PATH'].append(cuda_path_lib) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(cuda_path_lib) - env['CM_CUDA_PATH_LIB'] = cuda_path_lib + env['MLC_CUDA_PATH_LIB'] = cuda_path_lib break if '+ LDFLAGS' not in env: env['+ LDFLAGS'] = [] - if 'CM_CUDA_PATH_LIB' in env and not cuda_system_path_install: - x = env['CM_CUDA_PATH_LIB'] + if 'MLC_CUDA_PATH_LIB' in env and not cuda_system_path_install: + x = env['MLC_CUDA_PATH_LIB'] if ' ' in x: x = '"' + x + '"' env['+ LDFLAGS'].append("-L" + x) - env['CM_CUDA_VERSION_STRING'] = "cu" + \ - env['CM_CUDA_VERSION'].replace(".", "") - env['CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX5'] = env['CM_CUDA_VERSION_STRING'] + env['MLC_CUDA_VERSION_STRING'] = "cu" + \ + env['MLC_CUDA_VERSION'].replace(".", "") + env['MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX5'] = env['MLC_CUDA_VERSION_STRING'] return {'return': 0, 'version': version} diff --git a/script/get-cuda/meta.yaml b/script/get-cuda/meta.yaml index db5a30b0b..ec5e26b69 100644 --- a/script/get-cuda/meta.yaml +++ b/script/get-cuda/meta.yaml @@ -21,32 +21,32 @@ cache: true category: CUDA automation default_env: - CM_CUDA_PATH_LIB_CUDNN_EXISTS: 'no' - CM_REQUIRE_INSTALL: 'no' + MLC_CUDA_PATH_LIB_CUDNN_EXISTS: 'no' + MLC_REQUIRE_INSTALL: 'no' deps: - tags: detect,os - enable_if_env: - CM_CUDA_FULL_TOOLKIT_INSTALL: + MLC_CUDA_FULL_TOOLKIT_INSTALL: - 'yes' - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - windows names: - compiler tags: get,cl input_mapping: - cudnn_tar_file: CM_CUDNN_TAR_FILE_PATH - cudnn_tar_path: CM_CUDNN_TAR_FILE_PATH + cudnn_tar_file: MLC_CUDNN_TAR_FILE_PATH + cudnn_tar_path: MLC_CUDNN_TAR_FILE_PATH skip_sudo: CUDA_SKIP_SUDO - skip_cudnn_install: CM_CUDA_SKIP_CUDNN_INSTALL + skip_cudnn_install: MLC_CUDA_SKIP_CUDNN_INSTALL new_env_keys: - CUDA_HOME - CUDA_PATH -- CM_CUDA_* -- CM_NVCC_* -- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX5 +- MLC_CUDA_* +- MLC_NVCC_* +- MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX5 - +PATH - +C_INCLUDE_PATH - +CPLUS_INCLUDE_PATH @@ -56,22 +56,22 @@ new_env_keys: prehook_deps: - enable_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' names: - install-cuda-prebuilt reuse_version: true tags: install,cuda,prebuilt - enable_if_env: - CM_CUDA_PACKAGE_MANAGER_INSTALL: + MLC_CUDA_PACKAGE_MANAGER_INSTALL: - 'yes' tags: get,generic-sys-util,_nvidia-cuda-toolkit print_env_at_the_end: - CM_CUDA_PATH_LIB_CUDNN_EXISTS: '' - CM_CUDA_VERSION: '' - CM_CUDA_VERSION_STRING: '' - CM_NVCC_BIN_WITH_PATH: '' + MLC_CUDA_PATH_LIB_CUDNN_EXISTS: '' + MLC_CUDA_VERSION: '' + MLC_CUDA_VERSION_STRING: '' + MLC_NVCC_BIN_WITH_PATH: '' CUDA_HOME: '' print_files_if_script_error: @@ -80,31 +80,31 @@ print_files_if_script_error: variations: cudnn: env: - CM_CUDA_NEEDS_CUDNN: 'yes' + MLC_CUDA_NEEDS_CUDNN: 'yes' post_deps: - names: - cudnn tags: get,nvidia,cudnn skip_if_env: - CM_CUDA_SKIP_CUDNN_INSTALL: + MLC_CUDA_SKIP_CUDNN_INSTALL: - yes lib-only: env: - CM_CUDA_FULL_TOOLKIT_INSTALL: 'no' - CM_TMP_FILE_TO_CHECK_UNIX: libcudart.so - CM_TMP_FILE_TO_CHECK_WINDOWS: libcudart.dll + MLC_CUDA_FULL_TOOLKIT_INSTALL: 'no' + MLC_TMP_FILE_TO_CHECK_UNIX: libcudart.so + MLC_TMP_FILE_TO_CHECK_WINDOWS: libcudart.dll group: installation-mode package-manager: env: - CM_CUDA_PACKAGE_MANAGER_INSTALL: 'yes' + MLC_CUDA_PACKAGE_MANAGER_INSTALL: 'yes' prebuilt: env: - CM_REQUIRE_INSTALL: 'yes' + MLC_REQUIRE_INSTALL: 'yes' toolkit: default: true env: - CM_CUDA_FULL_TOOLKIT_INSTALL: 'yes' - CM_TMP_FILE_TO_CHECK_UNIX: nvcc - CM_TMP_FILE_TO_CHECK_WINDOWS: nvcc.exe + MLC_CUDA_FULL_TOOLKIT_INSTALL: 'yes' + MLC_TMP_FILE_TO_CHECK_UNIX: nvcc + MLC_TMP_FILE_TO_CHECK_WINDOWS: nvcc.exe group: installation-mode diff --git a/script/get-cuda/run.bat b/script/get-cuda/run.bat index 89af970ac..38ed97dc7 100644 --- a/script/get-cuda/run.bat +++ b/script/get-cuda/run.bat @@ -1,3 +1,3 @@ -"%CM_NVCC_BIN_WITH_PATH%" -V > tmp-ver.out +"%MLC_NVCC_BIN_WITH_PATH%" -V > tmp-ver.out IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-cuda/run.sh b/script/get-cuda/run.sh index aac0fee36..2ba9d511c 100644 --- a/script/get-cuda/run.sh +++ b/script/get-cuda/run.sh @@ -1,14 +1,14 @@ #!/bin/bash -if [[ ${CM_CUDA_FULL_TOOLKIT_INSTALL} == "no" ]]; then +if [[ ${MLC_CUDA_FULL_TOOLKIT_INSTALL} == "no" ]]; then exit 0 fi -nvcc_bin=${CM_NVCC_BIN_WITH_PATH:-nvcc} +nvcc_bin=${MLC_NVCC_BIN_WITH_PATH:-nvcc} ${nvcc_bin} -V > tmp-ver.out test $? -eq 0 || exit 1 if [[ ${nvcc_bin} == "nvcc" ]]; then nvcc_path=`which nvcc` - echo "CM_NVCC_BIN_WITH_PATH=${nvcc_path}" >> tmp-run-env.out + echo "MLC_NVCC_BIN_WITH_PATH=${nvcc_path}" >> tmp-run-env.out test $? -eq 0 || exit 1 fi diff --git a/script/get-cudnn/customize.py b/script/get-cudnn/customize.py index ed64cd241..097c4342d 100644 --- a/script/get-cudnn/customize.py +++ b/script/get-cudnn/customize.py @@ -12,12 +12,12 @@ def preprocess(i): env = i['env'] - env['CM_TMP_RUN_COPY_SCRIPT'] = "no" + env['MLC_TMP_RUN_COPY_SCRIPT'] = "no" # If TAR file is not explicitly specified, search - if env.get('CM_CUDNN_TAR_FILE_PATH', '') == '': + if env.get('MLC_CUDNN_TAR_FILE_PATH', '') == '': - cuda_path_lib = env.get('CM_CUDA_PATH_LIB') + cuda_path_lib = env.get('MLC_CUDA_PATH_LIB') if os_info['platform'] == 'windows': extra_pre = '' @@ -27,21 +27,21 @@ def preprocess(i): extra_ext = 'so' libfilename = extra_pre + 'cudnn.' + extra_ext - env['CM_CUDNN_VERSION'] = 'vdetected' + env['MLC_CUDNN_VERSION'] = 'vdetected' if os.path.exists(os.path.join(cuda_path_lib, libfilename)): - env['CM_CUDA_PATH_LIB_CUDNN'] = env['CM_CUDA_PATH_LIB'] + env['MLC_CUDA_PATH_LIB_CUDNN'] = env['MLC_CUDA_PATH_LIB'] return {'return': 0} - if env.get('CM_TMP_PATH', '').strip() != '': - path = env.get('CM_TMP_PATH') + if env.get('MLC_TMP_PATH', '').strip() != '': + path = env.get('MLC_TMP_PATH') if os.path.exists(os.path.join(path, libfilename)): - env['CM_CUDA_PATH_LIB_CUDNN'] = path + env['MLC_CUDA_PATH_LIB_CUDNN'] = path return {'return': 0} - if env.get('CM_INPUT', '').strip() == '': + if env.get('MLC_INPUT', '').strip() == '': if os_info['platform'] == 'windows': - if env.get('CM_TMP_PATH', '').strip() == '': + if env.get('MLC_TMP_PATH', '').strip() == '': # Check in "C:\Program Files\NVIDIA GPU Computing # Toolkit\CUDA" paths = [] @@ -58,32 +58,32 @@ def preprocess(i): tmp_paths = ';'.join(paths) tmp_paths += ';' + os.environ.get('PATH', '') - env['CM_TMP_PATH'] = tmp_paths - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + env['MLC_TMP_PATH'] = tmp_paths + env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' else: # paths to cuda are not always in PATH - add a few typical locations to search for # (unless forced by a user) - cm_tmp_path = env.get('CM_TMP_PATH', '').strip() + cm_tmp_path = env.get('MLC_TMP_PATH', '').strip() if cm_tmp_path != '': cm_tmp_path += ':' cm_tmp_path += '/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib' cm_tmp_path += os.path.expandvars(':$CUDNN_ROOT/lib') - env['CM_TMP_PATH'] = cm_tmp_path - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + env['MLC_TMP_PATH'] = cm_tmp_path + env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' for lib_path in env.get( - '+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): + '+MLC_HOST_OS_DEFAULT_LIBRARY_PATH', []): if (os.path.exists(lib_path)): - env['CM_TMP_PATH'] += ':' + lib_path + env['MLC_TMP_PATH'] += ':' + lib_path r = i['automation'].find_artifact({'file_name': libfilename, 'env': env, 'os_info': os_info, 'default_path_env_key': 'LD_LIBRARY_PATH', 'detect_version': False, - 'env_path_key': 'CM_CUDA_PATH_LIB_CUDNN', + 'env_path_key': 'MLC_CUDA_PATH_LIB_CUDNN', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: @@ -91,23 +91,23 @@ def preprocess(i): return r if r['return'] == 16: - env['CM_TMP_REQUIRE_INSTALL'] = "yes" + env['MLC_TMP_REQUIRE_INSTALL'] = "yes" else: return r else: # On Linux we may detected file instead of path to cudnn - if os.path.isfile(env['CM_CUDA_PATH_LIB_CUDNN']): - env['CM_CUDA_PATH_LIB_CUDNN'] = os.path.dirname( - env['CM_CUDA_PATH_LIB_CUDNN']) + if os.path.isfile(env['MLC_CUDA_PATH_LIB_CUDNN']): + env['MLC_CUDA_PATH_LIB_CUDNN'] = os.path.dirname( + env['MLC_CUDA_PATH_LIB_CUDNN']) return {'return': 0} - if env.get('CM_CUDNN_TAR_FILE_PATH', '') == '': + if env.get('MLC_CUDNN_TAR_FILE_PATH', '') == '': return {'return': 1, 'error': 'Please envoke cm run script "get cudnn" --tar_file={full path to the cuDNN tar file}'} print('Untaring file - can take some time ...') - my_tar = tarfile.open(os.path.expanduser(env['CM_CUDNN_TAR_FILE_PATH'])) + my_tar = tarfile.open(os.path.expanduser(env['MLC_CUDNN_TAR_FILE_PATH'])) folder_name = my_tar.getnames()[0] if not os.path.exists(os.path.join(os.getcwd(), folder_name)): my_tar.extractall() @@ -119,14 +119,14 @@ def preprocess(i): return { 'return': 1, 'error': 'Extracted CUDNN folder does not seem proper - Version information missing'} version = version_match.group(1) - env['CM_CUDNN_VERSION'] = version + env['MLC_CUDNN_VERSION'] = version inc_path = os.path.join(os.getcwd(), folder_name, "include") lib_path = os.path.join(os.getcwd(), folder_name, "lib") - cuda_inc_path = env['CM_CUDA_PATH_INCLUDE'] - cuda_lib_path = env['CM_CUDA_PATH_LIB'] - env['CM_CUDA_PATH_LIB_CUDNN'] = env['CM_CUDA_PATH_LIB'] - env['CM_CUDA_PATH_INCLUDE_CUDNN'] = env['CM_CUDA_PATH_INCLUDE'] + cuda_inc_path = env['MLC_CUDA_PATH_INCLUDE'] + cuda_lib_path = env['MLC_CUDA_PATH_LIB'] + env['MLC_CUDA_PATH_LIB_CUDNN'] = env['MLC_CUDA_PATH_LIB'] + env['MLC_CUDA_PATH_INCLUDE_CUDNN'] = env['MLC_CUDA_PATH_INCLUDE'] try: print( @@ -136,9 +136,9 @@ def preprocess(i): shutil.copytree(lib_path, cuda_lib_path, dirs_exist_ok=True) except BaseException: # Need to copy to system path via run.sh - env['CM_TMP_RUN_COPY_SCRIPT'] = "yes" - env['CM_TMP_INC_PATH'] = inc_path - env['CM_TMP_LIB_PATH'] = lib_path + env['MLC_TMP_RUN_COPY_SCRIPT'] = "yes" + env['MLC_TMP_INC_PATH'] = inc_path + env['MLC_TMP_LIB_PATH'] = lib_path return {'return': 0} @@ -149,10 +149,10 @@ def postprocess(i): env = i['env'] - version = env['CM_CUDNN_VERSION'] + version = env['MLC_CUDNN_VERSION'] if version == 'vdetected': - path_to_cudnn = env.get('CM_CUDA_PATH_LIB_CUDNN', '') + path_to_cudnn = env.get('MLC_CUDA_PATH_LIB_CUDNN', '') if os.path.isdir(path_to_cudnn): path_to_include = path_to_cudnn path_to_include_file = '' @@ -169,7 +169,7 @@ def postprocess(i): path_to_include_file = x if path_to_include_file != '': - env['CM_CUDA_PATH_INCLUDE_CUDNN'] = os.path.dirname( + env['MLC_CUDA_PATH_INCLUDE_CUDNN'] = os.path.dirname( path_to_include_file) r = utils.load_txt(path_to_include_file, split=True) @@ -195,8 +195,8 @@ def postprocess(i): if xversion != '': version = xversion - env['CM_CUDNN_VERSION'] = xversion + env['MLC_CUDNN_VERSION'] = xversion - env['CM_CUDA_PATH_LIB_CUDNN_EXISTS'] = 'yes' + env['MLC_CUDA_PATH_LIB_CUDNN_EXISTS'] = 'yes' return {'return': 0, 'version': version} diff --git a/script/get-cudnn/meta.yaml b/script/get-cudnn/meta.yaml index fa5ccd2c7..b4f459bee 100644 --- a/script/get-cudnn/meta.yaml +++ b/script/get-cudnn/meta.yaml @@ -14,8 +14,8 @@ cache: true category: CUDA automation default_env: - CM_INPUT: '' - CM_SUDO: sudo + MLC_INPUT: '' + MLC_SUDO: sudo deps: - tags: detect,os @@ -23,9 +23,9 @@ deps: - names: - cuda skip_if_env: - CM_CUDA_PATH_INCLUDE: + MLC_CUDA_PATH_INCLUDE: - 'on' - CM_CUDA_PATH_LIB: + MLC_CUDA_PATH_LIB: - 'on' tags: get,cuda @@ -36,14 +36,14 @@ input_description: desc: Full path to the cuDNN Tar file downloaded from Nvidia website (https://developer.nvidia.com/cudnn) input_mapping: - input: CM_INPUT - tar_file: CM_CUDNN_TAR_FILE_PATH + input: MLC_INPUT + tar_file: MLC_CUDNN_TAR_FILE_PATH new_env_keys: -- CM_CUDNN_* -- CM_CUDA_PATH_LIB_CUDNN -- CM_CUDA_PATH_INCLUDE_CUDNN -- CM_CUDA_PATH_LIB_CUDNN_EXISTS +- MLC_CUDNN_* +- MLC_CUDA_PATH_LIB_CUDNN +- MLC_CUDA_PATH_INCLUDE_CUDNN +- MLC_CUDA_PATH_LIB_CUDNN_EXISTS - +PATH - +C_INCLUDE_PATH - +CPLUS_INCLUDE_PATH @@ -51,6 +51,6 @@ new_env_keys: - +DYLD_FALLBACK_LIBRARY_PATH print_env_at_the_end: - CM_CUDA_PATH_LIB_CUDNN: '' - CM_CUDA_PATH_INCLUDE_CUDNN: '' - CM_CUDNN_VERSION: '' + MLC_CUDA_PATH_LIB_CUDNN: '' + MLC_CUDA_PATH_INCLUDE_CUDNN: '' + MLC_CUDNN_VERSION: '' diff --git a/script/get-cudnn/run.sh b/script/get-cudnn/run.sh index e2cb00fb0..0ac138303 100644 --- a/script/get-cudnn/run.sh +++ b/script/get-cudnn/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -if [ ${CM_TMP_RUN_COPY_SCRIPT} == "yes" ]; then - cmd="${CM_SUDO} cp ${CM_TMP_INC_PATH}/*.h ${CM_CUDA_PATH_INCLUDE}/" +if [ ${MLC_TMP_RUN_COPY_SCRIPT} == "yes" ]; then + cmd="${MLC_SUDO} cp ${MLC_TMP_INC_PATH}/*.h ${MLC_CUDA_PATH_INCLUDE}/" echo $cmd eval $cmd test $? -eq 0 || exit 1 - cmd="${CM_SUDO} cp -P ${CM_TMP_LIB_PATH}/libcudnn* ${CM_CUDA_PATH_LIB}/" + cmd="${MLC_SUDO} cp -P ${MLC_TMP_LIB_PATH}/libcudnn* ${MLC_CUDA_PATH_LIB}/" echo $cmd eval $cmd test $? -eq 0 || exit 1 diff --git a/script/get-dataset-cifar10/meta.yaml b/script/get-dataset-cifar10/meta.yaml index 1be5ef644..dfc05ab15 100644 --- a/script/get-dataset-cifar10/meta.yaml +++ b/script/get-dataset-cifar10/meta.yaml @@ -6,7 +6,7 @@ category: AI/ML datasets deps: - tags: detect,os new_env_keys: -- CM_DATASET_* +- MLC_DATASET_* tags: - get - dataset @@ -19,10 +19,10 @@ variations: python: default: true env: - CM_DATASET: CIFAR10 - CM_DATASET_CIFAR10: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz - CM_DATASET_FILENAME: cifar-10-python.tar.gz - CM_DATASET_FILENAME1: cifar-10-python.tar + MLC_DATASET: CIFAR10 + MLC_DATASET_CIFAR10: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz + MLC_DATASET_FILENAME: cifar-10-python.tar.gz + MLC_DATASET_FILENAME1: cifar-10-python.tar group: data_format tiny: deps: @@ -33,4 +33,4 @@ variations: - tags: get,tinymlperf,src - tags: get,src,eembc,energy-runner env: - CM_DATASET_CONVERT_TO_TINYMLPERF: 'yes' + MLC_DATASET_CONVERT_TO_TINYMLPERF: 'yes' diff --git a/script/get-dataset-cifar10/run.bat b/script/get-dataset-cifar10/run.bat index 8f54fb86e..621dbd632 100644 --- a/script/get-dataset-cifar10/run.bat +++ b/script/get-dataset-cifar10/run.bat @@ -1,48 +1,48 @@ -wget -nc %CM_DATASET_CIFAR10% --no-check-certificate +wget -nc %MLC_DATASET_CIFAR10% --no-check-certificate IF %ERRORLEVEL% NEQ 0 EXIT 1 -del /Q /S %CM_DATASET_FILENAME1% +del /Q /S %MLC_DATASET_FILENAME1% -gzip -d %CM_DATASET_FILENAME% +gzip -d %MLC_DATASET_FILENAME% IF %ERRORLEVEL% NEQ 0 EXIT 1 -tar -xvf %CM_DATASET_FILENAME1% +tar -xvf %MLC_DATASET_FILENAME1% IF %ERRORLEVEL% NEQ 0 EXIT 1 -del /Q /S %CM_DATASET_FILENAME1% +del /Q /S %MLC_DATASET_FILENAME1% -echo CM_DATASET_PATH=%CD%\cifar-10-batches-py > tmp-run-env.out -echo CM_DATASET_CIFAR10_PATH=%CD%\cifar-10-batches-py >> tmp-run-env.out +echo MLC_DATASET_PATH=%CD%\cifar-10-batches-py > tmp-run-env.out +echo MLC_DATASET_CIFAR10_PATH=%CD%\cifar-10-batches-py >> tmp-run-env.out -if "%CM_DATASET_CONVERT_TO_TINYMLPERF%" == "yes" ( +if "%MLC_DATASET_CONVERT_TO_TINYMLPERF%" == "yes" ( echo. echo Copying TinyMLPerf convertor ... echo. - copy /B /Y %CM_MLPERF_TINY_TRAINING_IC%\* . + copy /B /Y %MLC_MLPERF_TINY_TRAINING_IC%\* . echo. echo Installing Python requirements ... echo. - %CM_PYTHON_BIN% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + %MLC_PYTHON_BIN% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt IF %ERRORLEVEL% NEQ 0 EXIT 1 echo. echo Converting ... echo. - %CM_PYTHON_BIN% perf_samples_loader.py + %MLC_PYTHON_BIN% perf_samples_loader.py IF %ERRORLEVEL% NEQ 0 EXIT 1 copy /B /Y y_labels.csv perf_samples - echo CM_DATASET_CIFAR10_TINYMLPERF_PATH=%CD%\perf_samples >> tmp-run-env.out + echo MLC_DATASET_CIFAR10_TINYMLPERF_PATH=%CD%\perf_samples >> tmp-run-env.out echo. echo Copying to EEMBC runner user space ... echo. - copy /B /Y perf_samples\* %CM_EEMBC_ENERGY_RUNNER_DATASETS%\ic01 + copy /B /Y perf_samples\* %MLC_EEMBC_ENERGY_RUNNER_DATASETS%\ic01 ) diff --git a/script/get-dataset-cifar10/run.sh b/script/get-dataset-cifar10/run.sh index a113a2e4d..814177d52 100644 --- a/script/get-dataset-cifar10/run.sh +++ b/script/get-dataset-cifar10/run.sh @@ -1,50 +1,50 @@ #!/bin/bash -wget -nc ${CM_DATASET_CIFAR10} --no-check-certificate +wget -nc ${MLC_DATASET_CIFAR10} --no-check-certificate test $? -eq 0 || exit 1 -rm -rf ${CM_DATASET_FILENAME1} +rm -rf ${MLC_DATASET_FILENAME1} -gzip -d ${CM_DATASET_FILENAME} +gzip -d ${MLC_DATASET_FILENAME} test $? -eq 0 || exit 1 -tar -xvf ${CM_DATASET_FILENAME1} +tar -xvf ${MLC_DATASET_FILENAME1} test $? -eq 0 || exit 1 -rm -rf ${CM_DATASET_FILENAME} +rm -rf ${MLC_DATASET_FILENAME} -echo "CM_DATASET_PATH=$PWD/cifar-10-batches-py" > tmp-run-env.out -echo "CM_DATASET_CIFAR10_PATH=$PWD/cifar-10-batches-py" >> tmp-run-env.out +echo "MLC_DATASET_PATH=$PWD/cifar-10-batches-py" > tmp-run-env.out +echo "MLC_DATASET_CIFAR10_PATH=$PWD/cifar-10-batches-py" >> tmp-run-env.out -if [ "${CM_DATASET_CONVERT_TO_TINYMLPERF}" == "yes" ]; then +if [ "${MLC_DATASET_CONVERT_TO_TINYMLPERF}" == "yes" ]; then echo "" echo "Copying TinyMLPerf convertor ..." echo "" - cp -rf ${CM_MLPERF_TINY_TRAINING_IC}/* . + cp -rf ${MLC_MLPERF_TINY_TRAINING_IC}/* . echo "" echo "Installing Python requirements ..." echo "" - ${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + ${MLC_PYTHON_BIN} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt if [ "${?}" != "0" ]; then exit 1; fi echo "" echo "Converting in $PWD ..." echo "" - ${CM_PYTHON_BIN} perf_samples_loader.py + ${MLC_PYTHON_BIN} perf_samples_loader.py if [ "${?}" != "0" ]; then exit 1; fi cp -rf y_labels.csv perf_samples - echo "CM_DATASET_CIFAR10_TINYMLPERF_PATH=$PWD/perf_samples" >> tmp-run-env.out + echo "MLC_DATASET_CIFAR10_TINYMLPERF_PATH=$PWD/perf_samples" >> tmp-run-env.out echo "" echo "Copying to EEMBC runner user space ..." echo "" - cp -rf perf_samples/* ${CM_EEMBC_ENERGY_RUNNER_DATASETS}/ic01 + cp -rf perf_samples/* ${MLC_EEMBC_ENERGY_RUNNER_DATASETS}/ic01 fi diff --git a/script/get-dataset-cnndm/customize.py b/script/get-dataset-cnndm/customize.py index a6cf2d476..34726734f 100644 --- a/script/get-dataset-cnndm/customize.py +++ b/script/get-dataset-cnndm/customize.py @@ -7,11 +7,11 @@ def preprocess(i): env = i['env'] - if env.get('CM_CNNDM_INTEL_VARIATION', '') == 'yes': + if env.get('MLC_CNNDM_INTEL_VARIATION', '') == 'yes': i['run_script_input']['script_name'] = "run-intel" else: print("Using MLCommons Inference source from '" + - env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") return {'return': 0} @@ -19,18 +19,18 @@ def preprocess(i): def postprocess(i): env = i['env'] - if env.get('CM_DATASET_CALIBRATION', '') == "no": - env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install') - env['CM_DATASET_EVAL_PATH'] = os.path.join( + if env.get('MLC_DATASET_CALIBRATION', '') == "no": + env['MLC_DATASET_PATH'] = os.path.join(os.getcwd(), 'install') + env['MLC_DATASET_EVAL_PATH'] = os.path.join( os.getcwd(), 'install', 'cnn_eval.json') - env['CM_DATASET_CNNDM_EVAL_PATH'] = os.path.join( + env['MLC_DATASET_CNNDM_EVAL_PATH'] = os.path.join( os.getcwd(), 'install', 'cnn_eval.json') - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_PATH'] else: - env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + env['MLC_CALIBRATION_DATASET_PATH'] = os.path.join( os.getcwd(), 'install', 'cnn_dailymail_calibration.json') - env['CM_CALIBRATION_DATASET_CNNDM_PATH'] = os.path.join( + env['MLC_CALIBRATION_DATASET_CNNDM_PATH'] = os.path.join( os.getcwd(), 'install', 'cnn_dailymail_calibration.json') - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_CALIBRATION_DATASET_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_CALIBRATION_DATASET_PATH'] return {'return': 0} diff --git a/script/get-dataset-cnndm/meta.yaml b/script/get-dataset-cnndm/meta.yaml index 91b2af381..bdc27957b 100644 --- a/script/get-dataset-cnndm/meta.yaml +++ b/script/get-dataset-cnndm/meta.yaml @@ -4,7 +4,7 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML datasets default_env: - CM_DATASET_CALIBRATION: 'no' + MLC_DATASET_CALIBRATION: 'no' deps: - tags: get,sys-utils-cm - names: @@ -15,7 +15,7 @@ deps: - names: - inference-src skip_if_env: - CM_CNNDM_INTEL_VARIATION: + MLC_CNNDM_INTEL_VARIATION: - 'yes' tags: mlperf,inference,source - tags: get,generic-python-lib,_package.simplejson @@ -23,7 +23,7 @@ deps: - tags: get,generic-python-lib,_package.tokenizers - tags: get,generic-python-lib,_numpy env: - CM_DATASET: CNNDM + MLC_DATASET: CNNDM tags: - get - dataset @@ -35,21 +35,21 @@ uid: aed298c156e24257 variations: calibration: env: - CM_DATASET_CALIBRATION: 'yes' + MLC_DATASET_CALIBRATION: 'yes' group: dataset-type new_env_keys: - - CM_CALIBRATION_DATASET_PATH - - CM_CALIBRATION_DATASET_CNNDM_PATH + - MLC_CALIBRATION_DATASET_PATH + - MLC_CALIBRATION_DATASET_CNNDM_PATH intel: {} intel,validation: env: - CM_CNNDM_INTEL_VARIATION: 'yes' + MLC_CNNDM_INTEL_VARIATION: 'yes' validation: default: true env: - CM_DATASET_CALIBRATION: 'no' + MLC_DATASET_CALIBRATION: 'no' group: dataset-type new_env_keys: - - CM_DATASET_PATH - - CM_DATASET_EVAL_PATH - - CM_DATASET_CNNDM_EVAL_PATH + - MLC_DATASET_PATH + - MLC_DATASET_EVAL_PATH + - MLC_DATASET_CNNDM_EVAL_PATH diff --git a/script/get-dataset-cnndm/run-intel.sh b/script/get-dataset-cnndm/run-intel.sh index 067f158a5..36976e282 100644 --- a/script/get-dataset-cnndm/run-intel.sh +++ b/script/get-dataset-cnndm/run-intel.sh @@ -9,7 +9,7 @@ export DATASET_CNNDM_PATH=${CUR}/install wget -nc https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/gptj-99/pytorch-cpu/download-dataset.py test $? -eq 0 || exit 1 -cmd="${CM_PYTHON_BIN_WITH_PATH} download-dataset.py --split validation --output-dir ${DATASET_CNNDM_PATH}" +cmd="${MLC_PYTHON_BIN_WITH_PATH} download-dataset.py --split validation --output-dir ${DATASET_CNNDM_PATH}" echo "$cmd" eval "$cmd" test $? -eq 0 || exit 1 diff --git a/script/get-dataset-cnndm/run.sh b/script/get-dataset-cnndm/run.sh index f9aa3864b..48e3050e0 100644 --- a/script/get-dataset-cnndm/run.sh +++ b/script/get-dataset-cnndm/run.sh @@ -4,16 +4,16 @@ CUR=${PWD} mkdir -p install export DATASET_CNNDM_PATH=${CUR}/install -cd ${CM_MLPERF_INFERENCE_SOURCE} +cd ${MLC_MLPERF_INFERENCE_SOURCE} cd language/gpt-j -if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then - cmd="${CM_PYTHON_BIN_WITH_PATH} download_cnndm.py" +if [[ ${MLC_DATASET_CALIBRATION} == "no" ]]; then + cmd="${MLC_PYTHON_BIN_WITH_PATH} download_cnndm.py" echo $cmd eval $cmd test $? -eq 0 || exit 1 else - cmd="${CM_PYTHON_BIN_WITH_PATH} prepare-calibration.py --calibration-list-file calibration-list.txt --output-dir ${DATASET_CNNDM_PATH}" + cmd="${MLC_PYTHON_BIN_WITH_PATH} prepare-calibration.py --calibration-list-file calibration-list.txt --output-dir ${DATASET_CNNDM_PATH}" echo $cmd eval $cmd test $? -eq 0 || exit 1 diff --git a/script/get-dataset-coco/README-extra.md b/script/get-dataset-coco/README-extra.md index 9f19d2e8d..2bf3a5321 100644 --- a/script/get-dataset-coco/README-extra.md +++ b/script/get-dataset-coco/README-extra.md @@ -36,23 +36,23 @@ cmr "get coco dataset _val _2017" -j ```json "new_env": { - "CM_DATASET_COCO_URL_ANNOTATIONS": "http://images.cocodataset.org/annotations", - "CM_DATASET_COCO_URL_DATA": "http://images.cocodataset.org/zips", - "CM_DATASET_COCO_VERSION": "2017", - "CM_DATASET_COCO_TYPE": "val", - "CM_DATASET_COCO_SIZE": "complete", - "CM_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\annotations_trainval2017.zip", - "CM_DATASET_COCO_ANNOTATIONS_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\annotations", - "CM_DATASET_COCO_DATA_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\val2017.zip", - "CM_DATASET_COCO_DATA_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\val2017", - "CM_DATASET_COCO_MD5SUM_ANN": "f4bbac642086de4f52a3fdda2de5fa2c", - "CM_DATASET_COCO_MD5SUM_DATA": "442b8da7639aecaf257c1dceb8ba8c80", - "CM_DATASET_COCO_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07", - "CM_DATASET_COCO_TYPE_AND_VERSION": "val2017", - "CM_DATASET_COCO_URL_ANNOTATIONS_FULL": "http://images.cocodataset.org/annotations/annotations_trainval2017.zip", - "CM_DATASET_COCO_URL_DATA_FULL": "http://images.cocodataset.org/zips/val2017.zip", - "CM_DATASET_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07", - "CM_DATASET_PATH_ROOT": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07" + "MLC_DATASET_COCO_URL_ANNOTATIONS": "http://images.cocodataset.org/annotations", + "MLC_DATASET_COCO_URL_DATA": "http://images.cocodataset.org/zips", + "MLC_DATASET_COCO_VERSION": "2017", + "MLC_DATASET_COCO_TYPE": "val", + "MLC_DATASET_COCO_SIZE": "complete", + "MLC_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\annotations_trainval2017.zip", + "MLC_DATASET_COCO_ANNOTATIONS_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\annotations", + "MLC_DATASET_COCO_DATA_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\val2017.zip", + "MLC_DATASET_COCO_DATA_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\val2017", + "MLC_DATASET_COCO_MD5SUM_ANN": "f4bbac642086de4f52a3fdda2de5fa2c", + "MLC_DATASET_COCO_MD5SUM_DATA": "442b8da7639aecaf257c1dceb8ba8c80", + "MLC_DATASET_COCO_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07", + "MLC_DATASET_COCO_TYPE_AND_VERSION": "val2017", + "MLC_DATASET_COCO_URL_ANNOTATIONS_FULL": "http://images.cocodataset.org/annotations/annotations_trainval2017.zip", + "MLC_DATASET_COCO_URL_DATA_FULL": "http://images.cocodataset.org/zips/val2017.zip", + "MLC_DATASET_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07", + "MLC_DATASET_PATH_ROOT": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07" }, ``` diff --git a/script/get-dataset-coco/customize.py b/script/get-dataset-coco/customize.py index b7ee135ee..029967fc4 100644 --- a/script/get-dataset-coco/customize.py +++ b/script/get-dataset-coco/customize.py @@ -10,11 +10,11 @@ def preprocess(i): automation = i['automation'] env = i['env'] meta = i['meta'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') # Check if path is there to detect existing data set detected = False - path = env.get('CM_TMP_PATH', '') + path = env.get('MLC_TMP_PATH', '') if path != '': if not os.path.isdir(path): return {'return': 1, @@ -40,14 +40,14 @@ def preprocess(i): print('') print('Detected COCO dataset {} {}'.format(tp, ver)) - env['CM_DATASET_COCO_DETECTED'] = 'yes' - env['CM_DATASET_COCO_PATH'] = path + env['MLC_DATASET_COCO_DETECTED'] = 'yes' + env['MLC_DATASET_COCO_PATH'] = path else: - ver = env['CM_DATASET_COCO_VERSION'] - tp = env['CM_DATASET_COCO_TYPE'] + ver = env['MLC_DATASET_COCO_VERSION'] + tp = env['MLC_DATASET_COCO_TYPE'] # Prepare URL - size = env.get('CM_DATASET_COCO_SIZE', '') + size = env.get('MLC_DATASET_COCO_SIZE', '') if size == 'small' and tp == 'val' and ver == '2017': # We prepared a small version with 50 images for val 2017 @@ -60,8 +60,8 @@ def preprocess(i): filename_annotation) else: - url_data = env['CM_DATASET_COCO_URL_DATA'] - url_ann = env['CM_DATASET_COCO_URL_ANNOTATIONS'] + url_data = env['MLC_DATASET_COCO_URL_DATA'] + url_ann = env['MLC_DATASET_COCO_URL_ANNOTATIONS'] filename_data = tp + ver + '.zip' filename_annotation = 'annotations_trainval' + ver + '.zip' @@ -80,7 +80,7 @@ def preprocess(i): 'extra_cache_tags': download_extra_cache_tags } - path_from = env.get('CM_FROM', '') + path_from = env.get('MLC_FROM', '') if path_from != '': path_from_data = os.path.join(path_from, filename_data) if not os.path.isfile(path_from_data): @@ -94,12 +94,12 @@ def preprocess(i): path_from_annotation)} dae_input_annotation['local_path'] = path_from_annotation - path_to = env.get('CM_TO', '') + path_to = env.get('MLC_TO', '') if path_to != '': dae_input_data['extract_path'] = path_to dae_input_annotation['extract_path'] = path_to - path_store = env.get('CM_STORE', '') + path_store = env.get('MLC_STORE', '') if path_store != '': dae_input_data['download_path'] = path_store dae_input_data['tags'] = '_keep' @@ -116,11 +116,11 @@ def preprocess(i): return r # Prepare environment variables - env['CM_DATASET_COCO_VERSION'] = ver - env['CM_DATASET_COCO_TYPE'] = tp - env['CM_DATASET_COCO_TYPE_AND_VERSION'] = tp + ver - env['CM_DATASET_COCO_URL_DATA_FULL'] = url_data_full - env['CM_DATASET_COCO_URL_ANNOTATIONS_FULL'] = url_ann_full + env['MLC_DATASET_COCO_VERSION'] = ver + env['MLC_DATASET_COCO_TYPE'] = tp + env['MLC_DATASET_COCO_TYPE_AND_VERSION'] = tp + ver + env['MLC_DATASET_COCO_URL_DATA_FULL'] = url_data_full + env['MLC_DATASET_COCO_URL_ANNOTATIONS_FULL'] = url_ann_full # Check MD5SUM md5sum_data = '' @@ -136,9 +136,9 @@ def preprocess(i): md5sum_ann = 'f4bbac642086de4f52a3fdda2de5fa2c' if md5sum_data != '': - env['CM_DATASET_COCO_MD5SUM_DATA'] = md5sum_data + env['MLC_DATASET_COCO_MD5SUM_DATA'] = md5sum_data if md5sum_ann != '': - env['CM_DATASET_COCO_MD5SUM_ANN'] = md5sum_ann + env['MLC_DATASET_COCO_MD5SUM_ANN'] = md5sum_ann if not detected: print('') @@ -160,25 +160,25 @@ def postprocess(i): env = i['env'] - tp_ver = env['CM_DATASET_COCO_TYPE_AND_VERSION'] + tp_ver = env['MLC_DATASET_COCO_TYPE_AND_VERSION'] - path_to = env.get('CM_TO', '') + path_to = env.get('MLC_TO', '') # Check if detected or downloaded - if env.get('CM_DATASET_COCO_DETECTED', + if env.get('MLC_DATASET_COCO_DETECTED', '').lower() == 'yes' or path_to != '': - path_all = env['CM_DATASET_COCO_PATH'] if path_to == '' else path_to + path_all = env['MLC_DATASET_COCO_PATH'] if path_to == '' else path_to - env['CM_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver) - env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join( + env['MLC_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver) + env['MLC_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join( path_all, 'annotations') else: path_all = os.getcwd() # Moving 2 directories to 1 place - path_data = env['CM_DATASET_COCO_DATA_PATH'] - path_ann = env['CM_DATASET_COCO_ANNOTATIONS_PATH'] + path_data = env['MLC_DATASET_COCO_DATA_PATH'] + path_ann = env['MLC_DATASET_COCO_ANNOTATIONS_PATH'] print('') print(path_all) @@ -192,8 +192,8 @@ def postprocess(i): command1 = ' move /y ' + path_data_full + ' ' + tp_ver command2 = ' move /y ' + path_ann_full + ' annotations' - env['CM_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver) - env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join( + env['MLC_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver) + env['MLC_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join( path_all, 'annotations') else: # Make soft links from data and annotations into 1 directory @@ -206,8 +206,8 @@ def postprocess(i): print(command) os.system(command) - env['CM_DATASET_COCO_PATH'] = path_all - env['CM_DATASET_PATH'] = path_all - env['CM_DATASET_PATH_ROOT'] = path_all + env['MLC_DATASET_COCO_PATH'] = path_all + env['MLC_DATASET_PATH'] = path_all + env['MLC_DATASET_PATH_ROOT'] = path_all return {'return': 0} diff --git a/script/get-dataset-coco/meta.yaml b/script/get-dataset-coco/meta.yaml index 301d76951..bf842a486 100644 --- a/script/get-dataset-coco/meta.yaml +++ b/script/get-dataset-coco/meta.yaml @@ -17,53 +17,53 @@ docker: - to skip_run_cmd: 'no' env: - CM_DATASET: COCO - CM_DATASET_COCO_URL_ANNOTATIONS: http://images.cocodataset.org/annotations - CM_DATASET_COCO_URL_DATA: http://images.cocodataset.org/zips + MLC_DATASET: COCO + MLC_DATASET_COCO_URL_ANNOTATIONS: http://images.cocodataset.org/annotations + MLC_DATASET_COCO_URL_DATA: http://images.cocodataset.org/zips input_mapping: - from: CM_FROM - home: CM_HOME_DIR - store: CM_STORE - to: CM_TO + from: MLC_FROM + home: MLC_HOME_DIR + store: MLC_STORE + to: MLC_TO new_env_keys: -- CM_DATASET_COCO* -- CM_DATASET_PATH -- CM_DATASET_PATH_ROOT +- MLC_DATASET_COCO* +- MLC_DATASET_PATH +- MLC_DATASET_PATH_ROOT prehook_deps: - env: - CM_DOWNLOAD_CHECKSUM: <<>> - CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_COCO_DATA_DOWNLOAD_PATH - CM_EXTRACT_FINAL_ENV_NAME: CM_DATASET_COCO_DATA_PATH + MLC_DOWNLOAD_CHECKSUM: <<>> + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_COCO_DATA_DOWNLOAD_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_COCO_DATA_PATH force_cache: true names: - get-dataset-coco-data - 746e5dad5e784ad6 skip_if_env: - CM_DATASET_COCO_DETECTED: + MLC_DATASET_COCO_DETECTED: - 'yes' skip_if_fake_run: true tags: download-and-extract,file,_wget,_extract update_tags_from_env_with_prefix: _url.: - - CM_DATASET_COCO_URL_DATA_FULL + - MLC_DATASET_COCO_URL_DATA_FULL verify: false - env: - CM_DOWNLOAD_CHECKSUM: <<>> - CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH - CM_DOWNLOAD_PATH: <<>> - CM_EXTRACT_FINAL_ENV_NAME: CM_DATASET_COCO_ANNOTATIONS_PATH + MLC_DOWNLOAD_CHECKSUM: <<>> + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH + MLC_DOWNLOAD_PATH: <<>> + MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_COCO_ANNOTATIONS_PATH force_cache: true names: - get-dataset-coco-annotations - edb6cd092ff64171 skip_if_env: - CM_DATASET_COCO_DETECTED: + MLC_DATASET_COCO_DETECTED: - 'yes' skip_if_fake_run: true tags: download-and-extract,file,_wget,_extract update_tags_from_env_with_prefix: _url.: - - CM_DATASET_COCO_URL_ANNOTATIONS_FULL + - MLC_DATASET_COCO_URL_ANNOTATIONS_FULL verify: false tags: - get @@ -75,23 +75,23 @@ variations: '2017': default: true env: - CM_DATASET_COCO_VERSION: '2017' + MLC_DATASET_COCO_VERSION: '2017' group: version complete: default: true env: - CM_DATASET_COCO_SIZE: complete + MLC_DATASET_COCO_SIZE: complete group: size small: env: - CM_DATASET_COCO_SIZE: small + MLC_DATASET_COCO_SIZE: small group: size train: env: - CM_DATASET_COCO_TYPE: train + MLC_DATASET_COCO_TYPE: train group: type val: default: true env: - CM_DATASET_COCO_TYPE: val + MLC_DATASET_COCO_TYPE: val group: type diff --git a/script/get-dataset-coco2014/customize.py b/script/get-dataset-coco2014/customize.py index 90a502219..837539efc 100644 --- a/script/get-dataset-coco2014/customize.py +++ b/script/get-dataset-coco2014/customize.py @@ -8,33 +8,33 @@ def preprocess(i): env = i['env'] print("Using MLCommons Inference source from '" + - env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") run_dir = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools") - env['CM_RUN_DIR'] = run_dir + env['MLC_RUN_DIR'] = run_dir return {'return': 0} def postprocess(i): env = i['env'] - if env.get('CM_GENERATE_SAMPLE_ID', '') == "yes": - env['CM_COCO2014_SAMPLE_ID_PATH'] = os.path.join( + if env.get('MLC_GENERATE_SAMPLE_ID', '') == "yes": + env['MLC_COCO2014_SAMPLE_ID_PATH'] = os.path.join( os.getcwd(), 'sample_ids.txt') - print(env['CM_COCO2014_SAMPLE_ID_PATH']) - if env.get('CM_DATASET_CALIBRATION', '') == "no": - env['CM_DATASET_PATH_ROOT'] = os.getcwd() - # env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data') - env['CM_DATASET_CAPTIONS_DIR_PATH'] = os.path.join( + print(env['MLC_COCO2014_SAMPLE_ID_PATH']) + if env.get('MLC_DATASET_CALIBRATION', '') == "no": + env['MLC_DATASET_PATH_ROOT'] = os.getcwd() + # env['MLC_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data') + env['MLC_DATASET_CAPTIONS_DIR_PATH'] = os.path.join( os.getcwd(), 'captions') - env['CM_DATASET_LATENTS_DIR_PATH'] = os.path.join( + env['MLC_DATASET_LATENTS_DIR_PATH'] = os.path.join( os.getcwd(), 'latents') else: - env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + env['MLC_CALIBRATION_DATASET_PATH'] = os.path.join( os.getcwd(), 'calibration', 'data') return {'return': 0} diff --git a/script/get-dataset-coco2014/meta.yaml b/script/get-dataset-coco2014/meta.yaml index 39c603642..fa3724f83 100644 --- a/script/get-dataset-coco2014/meta.yaml +++ b/script/get-dataset-coco2014/meta.yaml @@ -17,7 +17,7 @@ tags: - original default_env: - CM_DATASET_CALIBRATION: 'no' + MLC_DATASET_CALIBRATION: 'no' deps: @@ -30,26 +30,26 @@ deps: - tags: get,generic-python-lib,_package.pandas - force_env_keys: - - CM_GIT_* + - MLC_GIT_* names: - inference-src tags: mlperf,inference,source version: master env: - CM_DATASET: COCO2014 + MLC_DATASET: COCO2014 new_env_keys: -- CM_DATASET_PATH -- CM_DATASET_PATH_ROOT -- CM_DATASET_ANNOTATIONS_DIR_PATH -- CM_DATASET_ANNOTATIONS_FILE_PATH -- CM_CALIBRATION_DATASET_PATH -- CM_COCO2014_SAMPLE_ID_PATH +- MLC_DATASET_PATH +- MLC_DATASET_PATH_ROOT +- MLC_DATASET_ANNOTATIONS_DIR_PATH +- MLC_DATASET_ANNOTATIONS_FILE_PATH +- MLC_CALIBRATION_DATASET_PATH +- MLC_COCO2014_SAMPLE_ID_PATH posthook_deps: - enable_if_env: - CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: + MLC_DATASET_COCO2014_CUSTOM_ANNOTATIONS: - 'yes' tags: get,coco2014,annotations @@ -57,38 +57,38 @@ variations: '50': default: true env: - CM_DATASET_SIZE: '50' + MLC_DATASET_SIZE: '50' group: size '500': env: - CM_DATASET_SIZE: '500' + MLC_DATASET_SIZE: '500' group: size calibration: env: - CM_DATASET_CALIBRATION: 'yes' + MLC_DATASET_CALIBRATION: 'yes' group: dataset-type custom-annotations: env: - CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'yes' + MLC_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'yes' group: annotations default-annotations: default: true env: - CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'no' + MLC_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'no' group: annotations full: env: - CM_DATASET_SIZE: '' + MLC_DATASET_SIZE: '' group: size size.#: env: - CM_DATASET_SIZE: '#' + MLC_DATASET_SIZE: '#' group: size with-sample-ids: env: - CM_GENERATE_SAMPLE_ID: 'yes' + MLC_GENERATE_SAMPLE_ID: 'yes' validation: default: true env: - CM_DATASET_CALIBRATION: 'no' + MLC_DATASET_CALIBRATION: 'no' group: dataset-type diff --git a/script/get-dataset-coco2014/run.bat b/script/get-dataset-coco2014/run.bat index 9ac62e6ad..b0aa60f13 100644 --- a/script/get-dataset-coco2014/run.bat +++ b/script/get-dataset-coco2014/run.bat @@ -1,21 +1,21 @@ @echo off set CUR_DIR=%cd% -set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% +set SCRIPT_DIR=%MLC_TMP_CURRENT_SCRIPT_PATH% if not exist install mkdir install set INSTALL_DIR=%CUR_DIR%\install -cd %CM_RUN_DIR% +cd %MLC_RUN_DIR% -if not "%CM_DATASET_SIZE%" == "" ( - set MAX_IMAGES=--max-images %CM_DATASET_SIZE% --seed 42 +if not "%MLC_DATASET_SIZE%" == "" ( + set MAX_IMAGES=--max-images %MLC_DATASET_SIZE% --seed 42 ) else ( set MAX_IMAGES= ) rem TBD - next file doesn't exist in the latest inference - need to check/fix ... -%CM_PYTHON_BIN% download-coco-2014.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json +%MLC_PYTHON_BIN% download-coco-2014.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-dataset-coco2014/run.sh b/script/get-dataset-coco2014/run.sh index 3685b161c..a891b2330 100644 --- a/script/get-dataset-coco2014/run.sh +++ b/script/get-dataset-coco2014/run.sh @@ -1,17 +1,17 @@ #!/bin/bash python3() { - ${CM_PYTHON_BIN_WITH_PATH} "$@" + ${MLC_PYTHON_BIN_WITH_PATH} "$@" } export -f python3 CUR=${PWD} INSTALL_DIR=${CUR} -cd ${CM_RUN_DIR} +cd ${MLC_RUN_DIR} -if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then - if [ ! -z ${CM_DATASET_SIZE} ]; then - max_images=" -m ${CM_DATASET_SIZE}" +if [[ ${MLC_DATASET_CALIBRATION} == "no" ]]; then + if [ ! -z ${MLC_DATASET_SIZE} ]; then + max_images=" -m ${MLC_DATASET_SIZE}" else max_images="" fi @@ -32,7 +32,7 @@ else eval $cmd test $? -eq 0 || exit $? fi -if [[ ${CM_GENERATE_SAMPLE_ID} == "yes" ]]; then +if [[ ${MLC_GENERATE_SAMPLE_ID} == "yes" ]]; then cmd="python3 sample_ids.py --tsv-path ${INSTALL_DIR}/captions/captions.tsv --output-path ${INSTALL_DIR}/sample_ids.txt" echo $cmd eval $cmd diff --git a/script/get-dataset-cognata-mlcommons/customize.py b/script/get-dataset-cognata-mlcommons/customize.py index 471a44cbc..dbb18802f 100644 --- a/script/get-dataset-cognata-mlcommons/customize.py +++ b/script/get-dataset-cognata-mlcommons/customize.py @@ -8,7 +8,7 @@ def preprocess(i): env = i['env'] cm_cache_dataset_path = env.get( - 'CM_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip() + 'MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip() res = utils.load_json( os.path.join( @@ -16,42 +16,42 @@ def preprocess(i): 'cfg.json')) cfg = res.get('meta', {}) if cfg.get('imported', False): - env['CM_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'yes' + env['MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'yes' - if env.get('CM_ABTF_SCRATCH_PATH_DATASETS', '') != '': - env['CM_ABTF_SCRATCH_PATH_DATASET_COGNATA'] = os.path.join( - env['CM_ABTF_SCRATCH_PATH_DATASETS'], "cognata") - env['CM_ABTF_SCRATCH_PATH_DATASET_COGNATA_TMP'] = os.path.join( - env['CM_ABTF_SCRATCH_PATH_DATASETS'], "cognata_tmp") + if env.get('MLC_ABTF_SCRATCH_PATH_DATASETS', '') != '': + env['MLC_ABTF_SCRATCH_PATH_DATASET_COGNATA'] = os.path.join( + env['MLC_ABTF_SCRATCH_PATH_DATASETS'], "cognata") + env['MLC_ABTF_SCRATCH_PATH_DATASET_COGNATA_TMP'] = os.path.join( + env['MLC_ABTF_SCRATCH_PATH_DATASETS'], "cognata_tmp") - env['CM_DATASET_COGNATA_POC_TEXT_MD5_FILE_PATH'] = os.path.join( + env['MLC_DATASET_COGNATA_POC_TEXT_MD5_FILE_PATH'] = os.path.join( i['run_script_input']['path'], 'checksums', 'cognata_poc.txt') # Check if user requests path not in CM cache # - # --path (env CM_TMP_PATH) shows where to store Cognata data set instead of CM cahe + # --path (env MLC_TMP_PATH) shows where to store Cognata data set instead of CM cahe # --import tells CM to import existing Cognata from a given path and skip further download/processing # import_path = env.get( - 'CM_DATASET_MLCOMMONS_COGNATA_IMPORT_PATH', + 'MLC_DATASET_MLCOMMONS_COGNATA_IMPORT_PATH', '').strip() if import_path != '': if not os.path.isdir(import_path): return {'return': 1, 'error': 'directory to import this dataset doesn\'t exist: {}'.format( import_path)} - env['CM_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'yes' - env['CM_DATASET_MLCOMMONS_COGNATA_PATH'] = import_path + env['MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'yes' + env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] = import_path else: - path = env.get('CM_TMP_PATH', '') + path = env.get('MLC_TMP_PATH', '') if path != '': - env['CM_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'no' + env['MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'no' if not os.path.isdir(path): os.makedirs(path) - env['CM_DATASET_MLCOMMONS_COGNATA_PATH'] = path + env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] = path return {'return': 0} @@ -65,29 +65,29 @@ def postprocess(i): cur_dir = os.getcwd() - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') cm_cache_dataset_path = env.get( - 'CM_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip() + 'MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip() if not os.path.isdir(cm_cache_dataset_path): return { 'return': 1, 'error': 'Dataset corrupted - CM cache path not found: {}'.format(cm_cache_dataset_path)} - if env.get('CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES', '') == '': - env['CM_DATASET_MLCOMMONS_COGNATA_PATH'] = os.path.dirname( - env['CM_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH']) - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_MLCOMMONS_COGNATA_PATH'] + if env.get('MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES', '') == '': + env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] = os.path.dirname( + env['MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH']) + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] return {'return': 0} cm_cache_dataset_cfg_file = os.path.join(cm_cache_dataset_path, 'cfg.json') - env['CM_DATASET_MLCOMMONS_COGNATA_CFG_FILE'] = cm_cache_dataset_cfg_file + env['MLC_DATASET_MLCOMMONS_COGNATA_CFG_FILE'] = cm_cache_dataset_cfg_file res = utils.load_json(cm_cache_dataset_cfg_file) cfg = res.get('meta', {}) dataset_path = cfg.get('real_path', '') - dataset_path_requested = env.get('CM_DATASET_MLCOMMONS_COGNATA_PATH', '') + dataset_path_requested = env.get('MLC_DATASET_MLCOMMONS_COGNATA_PATH', '') if dataset_path == '': if dataset_path_requested != '': dataset_path = dataset_path_requested @@ -102,10 +102,10 @@ def postprocess(i): print('') print('Used dataset path: {}'.format(dataset_path)) - env['CM_DATASET_MLCOMMONS_COGNATA_PATH'] = dataset_path + env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] = dataset_path # If imported, don't process further - if env.get('CM_DATASET_MLCOMMONS_COGNATA_IMPORTED', '') == 'yes': + if env.get('MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED', '') == 'yes': cfg['imported'] = True else: cfg['imported'] = False @@ -118,7 +118,7 @@ def postprocess(i): # If processed once, don't process unless forced if cfg.get('processed', False): if not utils.check_if_true_yes_on( - env, 'CM_DATASET_MLCOMMONS_COGNATA_UPDATE'): + env, 'MLC_DATASET_MLCOMMONS_COGNATA_UPDATE'): print('') print('Already processed: use --update to update this dataset') @@ -146,7 +146,7 @@ def postprocess(i): first_url = dataset_meta.get('first_url', '').strip() if first_url == '': - x = env.get('CM_DATASET_MLCOMMONS_COGNATA_PRIVATE_URL', '').strip() + x = env.get('MLC_DATASET_MLCOMMONS_COGNATA_PRIVATE_URL', '').strip() if x != '': first_url = x else: @@ -196,12 +196,12 @@ def postprocess(i): # Parse XLSX and check serial number serial_numbers = [] for s in env.get( - 'CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS', '').strip().split(','): + 'MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS', '').strip().split(','): s = s.strip() if s != '' and s not in serial_numbers: serial_numbers.append(s) - dataset_key = env['CM_DATASET_MLCOMMONS_COGNATA_KEY1'] + dataset_key = env['MLC_DATASET_MLCOMMONS_COGNATA_KEY1'] url_key = 'Link to Excel File (Download Links)' serial_key = 'Serial Number' @@ -269,14 +269,14 @@ def postprocess(i): print('Processing subsets ...') group_names = [] - for s in env.get('CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES', + for s in env.get('MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES', '').strip().split(','): s = s.strip() if s != '' and s not in group_names: group_names.append(s) # Check if force some filenames - x = env.get('CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES', '').strip() + x = env.get('MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES', '').strip() file_names = [] if x != '': file_names = x.strip(';') if ';' in x else [x] @@ -339,7 +339,7 @@ def postprocess(i): continue if os.name == 'nt': - aria2_tool = env['CM_ARIA2_BIN_WITH_PATH'] + aria2_tool = env['MLC_ARIA2_BIN_WITH_PATH'] else: aria2_tool = 'aria2c' @@ -394,7 +394,7 @@ def postprocess(i): cfg['processed'] = True utils.save_json(cm_cache_dataset_cfg_file, cfg) - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_MLCOMMONS_COGNATA_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] return {'return': 0} diff --git a/script/get-dataset-cognata-mlcommons/meta.yaml b/script/get-dataset-cognata-mlcommons/meta.yaml index 1b8155d7b..b59662b22 100644 --- a/script/get-dataset-cognata-mlcommons/meta.yaml +++ b/script/get-dataset-cognata-mlcommons/meta.yaml @@ -28,16 +28,16 @@ category_sort: 8500 input_mapping: - update: CM_DATASET_MLCOMMONS_COGNATA_UPDATE - import: CM_DATASET_MLCOMMONS_COGNATA_IMPORT_PATH - private_url: CM_DATASET_MLCOMMONS_COGNATA_PRIVATE_URL - serial_numbers: CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS - group_names: CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES - file_names: CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES + update: MLC_DATASET_MLCOMMONS_COGNATA_UPDATE + import: MLC_DATASET_MLCOMMONS_COGNATA_IMPORT_PATH + private_url: MLC_DATASET_MLCOMMONS_COGNATA_PRIVATE_URL + serial_numbers: MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS + group_names: MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES + file_names: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES env: - CM_DATASET: MLCOMMONS_COGNATA - CM_DATASET_MLCOMMONS_COGNATA_KEY1: "Dataset 1.0" + MLC_DATASET: MLCOMMONS_COGNATA + MLC_DATASET_MLCOMMONS_COGNATA_KEY1: "Dataset 1.0" @@ -49,51 +49,51 @@ deps: tags: create,custom,cache,entry extra_cache_tags: dataset,cognata,mlcommons-cognata skip_if_env: - CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: + MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'off' env_key: DATASET_MLCOMMONS_COGNATA - # this script will prepare env CM_CUSTOM_CACHE_ENTRY_{env_key}_PATH + # this script will prepare env MLC_CUSTOM_CACHE_ENTRY_{env_key}_PATH prehook_deps: - names: - gdrive-downloader-cognata skip_if_env: - CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: + MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' enable_if_env: - CM_DATASET_MLCOMMONS_COGNATA_IMPORTED: + MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'no' - CM_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: + MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: - gdrive tags: download,file,_gdown,_url.https://drive.google.com/drive/folders/1FS-qLbzB5htgMnfry6z4gx8J_ZH_7MsJ?usp=drive_link env: - CM_DOWNLOAD_EXTRA_OPTIONS: " --folder" - CM_DOWNLOAD_FILENAME: 10002_Urban_Clear_Morning - CM_DOWNLOAD_FINAL_ENV_NAME: CM_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH + MLC_DOWNLOAD_EXTRA_OPTIONS: " --folder" + MLC_DOWNLOAD_FILENAME: 10002_Urban_Clear_Morning + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH force_cache: true extra_cache_tags: abtf,cognata,poc,dataset - names: - rclone-downloader-cognata skip_if_env: - CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: + MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' enable_if_env: - CM_DATASET_MLCOMMONS_COGNATA_IMPORTED: + MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'no' - CM_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: + MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: - rclone tags: download-and-extract,file,_extract,_rclone,_url.https://automotive.mlcommons-storage.org/Cognata_Dataset_PoC_Demo%2F10002_Urban_Clear_Morning.zip env: - CM_RCLONE_COPY_USING: copyurl - CM_RCLONE_CONFIG_CMD: '' - CM_DOWNLOAD_CHECKSUM: '76389b05b0ee1e08d354d3c1b696b8c0' - CM_EXTRACT_EXTRACTED_CHECKSUM_FILE: "<<>>" - CM_DOWNLOAD_PATH: <<>> - CM_EXTRACT_PATH: <<>> - CM_EXTRACT_EXTRACTED_FILENAME: 10002_Urban_Clear_Morning - CM_DAE_FINAL_ENV_NAME: CM_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH + MLC_RCLONE_COPY_USING: copyurl + MLC_RCLONE_CONFIG_CMD: '' + MLC_DOWNLOAD_CHECKSUM: '76389b05b0ee1e08d354d3c1b696b8c0' + MLC_EXTRACT_EXTRACTED_CHECKSUM_FILE: "<<>>" + MLC_DOWNLOAD_PATH: <<>> + MLC_EXTRACT_PATH: <<>> + MLC_EXTRACT_EXTRACTED_FILENAME: 10002_Urban_Clear_Morning + MLC_DAE_FINAL_ENV_NAME: MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH force_cache: true extra_cache_tags: abtf,cognata,poc,dataset @@ -102,28 +102,28 @@ prehook_deps: - python3 tags: get,python3 skip_if_env: - CM_DATASET_MLCOMMONS_COGNATA_IMPORTED: + MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'yes' enable_if_env: - CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: + MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' # Python package to read/write Excel files - tags: get,generic-python-lib,_package.openpyxl skip_if_env: - CM_DATASET_MLCOMMONS_COGNATA_IMPORTED: + MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'yes' enable_if_env: - CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: + MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' # Tool to download large files - tags: get,aria2 skip_if_env: - CM_DATASET_MLCOMMONS_COGNATA_IMPORTED: + MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'yes' enable_if_env: - CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: + MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' @@ -131,31 +131,31 @@ variations: abtf-demo: group: dataset-type env: - CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: "10002_Urban_Clear_Morning" - CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: "Cognata_Camera_01_8M" - CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: "Cognata_Camera_01_8M_ann.zip;Cognata_Camera_01_8M_ann_laneline.zip;Cognata_Camera_01_8M.zip" + MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: "10002_Urban_Clear_Morning" + MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: "Cognata_Camera_01_8M" + MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: "Cognata_Camera_01_8M_ann.zip;Cognata_Camera_01_8M_ann_laneline.zip;Cognata_Camera_01_8M.zip" abtf-poc: group: dataset-type default: true env: - CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: "10002_Urban_Clear_Morning" - CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: "Cognata_Camera_01_8M" - CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: "" + MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: "10002_Urban_Clear_Morning" + MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: "Cognata_Camera_01_8M" + MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: "" rclone: group: download-tool default: true env: - CM_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: rclone + MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: rclone gdrive: group: download-tool env: - CM_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: gdrive + MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: gdrive new_env_keys: -- CM_DATASET_MLCOMMONS_COGNATA* +- MLC_DATASET_MLCOMMONS_COGNATA* print_env_at_the_end: - CM_DATASET_MLCOMMONS_COGNATA_PATH: Path to Cognata dataset + MLC_DATASET_MLCOMMONS_COGNATA_PATH: Path to Cognata dataset diff --git a/script/get-dataset-criteo/README-extra.md b/script/get-dataset-criteo/README-extra.md index 345a59cfe..efe669715 100644 --- a/script/get-dataset-criteo/README-extra.md +++ b/script/get-dataset-criteo/README-extra.md @@ -2,7 +2,7 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the 24 days of Criteo dataset for MLPerf inference using DLRM. ## Exported Variables -* `CM_DATASET_PATH` +* `MLC_DATASET_PATH` ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-dataset-criteo/meta.yaml b/script/get-dataset-criteo/meta.yaml index 06bdd335c..772e48fb8 100644 --- a/script/get-dataset-criteo/meta.yaml +++ b/script/get-dataset-criteo/meta.yaml @@ -4,13 +4,13 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML datasets default_env: - CM_BACKUP_ZIPS: 'no' + MLC_BACKUP_ZIPS: 'no' env: - CM_DATASET: terabyte + MLC_DATASET: terabyte input_mapping: - criteo_path: CM_CRITEO_PATH + criteo_path: MLC_CRITEO_PATH new_env_keys: -- CM_DATASET* +- MLC_DATASET* tags: - get - dataset @@ -20,7 +20,7 @@ uid: 194a47d908714897 variations: backup: env: - CM_BACKUP_ZIPS: 'yes' + MLC_BACKUP_ZIPS: 'yes' fake: env: - CM_CRITEO_FAKE: 'yes' + MLC_CRITEO_FAKE: 'yes' diff --git a/script/get-dataset-criteo/run.sh b/script/get-dataset-criteo/run.sh index 32a1c777f..b6f321d2f 100644 --- a/script/get-dataset-criteo/run.sh +++ b/script/get-dataset-criteo/run.sh @@ -1,14 +1,14 @@ #!/bin/bash -if [ ! -z ${CM_CRITEO_PATH+x} ]; then - echo "CM_DATASET_PATH=${CM_CRITEO_PATH}" > tmp-run-env.out +if [ ! -z ${MLC_CRITEO_PATH+x} ]; then + echo "MLC_DATASET_PATH=${MLC_CRITEO_PATH}" > tmp-run-env.out test $? -eq 0 || exit 1 exit 0 fi CUR=$PWD -if [[ ${CM_CRITEO_FAKE} == "yes" ]]; then - cd ${CM_MLPERF_INFERENCE_DLRM_PATH}/pytorch/tools +if [[ ${MLC_CRITEO_FAKE} == "yes" ]]; then + cd ${MLC_MLPERF_INFERENCE_DLRM_PATH}/pytorch/tools bash ./make_fake_criteo.sh terabyte mv ./fake_criteo/* $CUR/ cd $CUR @@ -16,11 +16,11 @@ else curl -O -C - https://storage.googleapis.com/criteo-cail-datasets/day_{`seq -s "," 0 23`}.gz test $? -eq 0 || exit 1 - if [ ${CM_BACKUP_ZIPS:-no} == "yes" ]; then + if [ ${MLC_BACKUP_ZIPS:-no} == "yes" ]; then mkdir backup cp -r *.gz backup/ fi yes n | gunzip -k day_{0..23}.gz fi -echo "CM_DATASET_PATH=$PWD" > tmp-run-env.out +echo "MLC_DATASET_PATH=$PWD" > tmp-run-env.out diff --git a/script/get-dataset-igbh/customize.py b/script/get-dataset-igbh/customize.py index c454d415d..d64d701ba 100644 --- a/script/get-dataset-igbh/customize.py +++ b/script/get-dataset-igbh/customize.py @@ -12,46 +12,46 @@ def preprocess(i): return {'return': 1, 'error': 'Script not supported in windows yet!'} print("Using MLCommons Inference source from '" + - env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") # run cmd run_cmd = "" graph_folder = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], 'graph', 'R-GAT') + env['MLC_MLPERF_INFERENCE_SOURCE'], 'graph', 'R-GAT') - if env.get('CM_DATASET_IGBH_PATH', + if env.get('MLC_DATASET_IGBH_PATH', '') != '': # skip download, just register in cache - env['CM_DATASET_IGBH_OUT_PATH'] = env['CM_DATASET_IGBH_PATH'] + env['MLC_DATASET_IGBH_OUT_PATH'] = env['MLC_DATASET_IGBH_PATH'] return {'return': 0} - download_loc = env.get('CM_DATASET_IGBH_OUT_PATH', os.getcwd()) + download_loc = env.get('MLC_DATASET_IGBH_OUT_PATH', os.getcwd()) - env['CM_DATASET_IGBH_DOWNLOAD_LOCATION'] = download_loc + env['MLC_DATASET_IGBH_DOWNLOAD_LOCATION'] = download_loc run_cmd += f"cd {graph_folder} " x_sep = " && " # download the model - if env['CM_DATASET_IGBH_TYPE'] == "debug": - run_cmd += x_sep + env['CM_PYTHON_BIN_WITH_PATH'] + \ + if env['MLC_DATASET_IGBH_TYPE'] == "debug": + run_cmd += x_sep + env['MLC_PYTHON_BIN_WITH_PATH'] + \ f" tools/download_igbh_test.py --target-path {download_loc} " else: - env['CM_DATASET_IGBH_FULL_DOWNLOAD'] = 'yes' + env['MLC_DATASET_IGBH_FULL_DOWNLOAD'] = 'yes' # split seeds run_cmd += x_sep + \ f"""{ - env['CM_PYTHON_BIN_WITH_PATH']} tools/split_seeds.py --path {download_loc} --dataset_size { - env['CM_DATASET_IGBH_SIZE']} """ + env['MLC_PYTHON_BIN_WITH_PATH']} tools/split_seeds.py --path {download_loc} --dataset_size { + env['MLC_DATASET_IGBH_SIZE']} """ # compress graph(for glt implementation) - if env.get('CM_IGBH_GRAPH_COMPRESS', '') == "yes": + if env.get('MLC_IGBH_GRAPH_COMPRESS', '') == "yes": run_cmd += x_sep + \ - f"""{env['CM_PYTHON_BIN_WITH_PATH']} tools/compress_graph.py --path {download_loc} --dataset_size {env['CM_DATASET_IGBH_SIZE']} --layout {env['CM_IGBH_GRAPH_COMPRESS_LAYOUT']} + f"""{env['MLC_PYTHON_BIN_WITH_PATH']} tools/compress_graph.py --path {download_loc} --dataset_size {env['MLC_DATASET_IGBH_SIZE']} --layout {env['MLC_IGBH_GRAPH_COMPRESS_LAYOUT']} """ - env['CM_RUN_CMD'] = run_cmd + env['MLC_RUN_CMD'] = run_cmd return {'return': 0} @@ -60,10 +60,10 @@ def postprocess(i): env = i['env'] - env['CM_DATASET_IGBH_PATH'] = env.get( - 'CM_DATASET_IGBH_OUT_PATH', os.getcwd()) + env['MLC_DATASET_IGBH_PATH'] = env.get( + 'MLC_DATASET_IGBH_OUT_PATH', os.getcwd()) print( - f"Path to the IGBH dataset: {os.path.join(env['CM_DATASET_IGBH_PATH'], env['CM_DATASET_IGBH_SIZE'])}") + f"Path to the IGBH dataset: {os.path.join(env['MLC_DATASET_IGBH_PATH'], env['MLC_DATASET_IGBH_SIZE'])}") return {'return': 0} diff --git a/script/get-dataset-igbh/meta.yaml b/script/get-dataset-igbh/meta.yaml index 8e5c7b4cd..430fd3075 100644 --- a/script/get-dataset-igbh/meta.yaml +++ b/script/get-dataset-igbh/meta.yaml @@ -11,10 +11,10 @@ tags: - inference uid: 824e61316c074253 new_env_keys: - - CM_DATASET_IGBH_PATH - - CM_DATASET_IGBH_SIZE + - MLC_DATASET_IGBH_PATH + - MLC_DATASET_IGBH_SIZE input_mapping: - out_path: CM_DATASET_IGBH_OUT_PATH + out_path: MLC_DATASET_IGBH_OUT_PATH env: SKIP_USER_PROMPT: yes deps: @@ -32,16 +32,16 @@ deps: prehook_deps: #paper - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_feat.npy - CM_DOWNLOAD_CHECKSUM: 71058b9ac8011bafa1c5467504452d13 - CM_DOWNLOAD_FILENAME: node_feat.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_feat.npy + MLC_DOWNLOAD_CHECKSUM: 71058b9ac8011bafa1c5467504452d13 + MLC_DOWNLOAD_FILENAME: node_feat.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/paper/ extra_cache_tags: dataset,igbh,paper,node_feat force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME force_cache: true enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -49,18 +49,18 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_19.npy - CM_DOWNLOAD_CHECKSUM: be6fda45566e679bdb05ebea98ad16d4 - CM_DOWNLOAD_FILENAME: node_label_19.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_19.npy + MLC_DOWNLOAD_CHECKSUM: be6fda45566e679bdb05ebea98ad16d4 + MLC_DOWNLOAD_FILENAME: node_label_19.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/paper/ extra_cache_tags: dataset,igbh,paper,node_label_19 force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME force_cache: true enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -68,18 +68,18 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_2K.npy - CM_DOWNLOAD_CHECKSUM: 6eccab9a14f92f42be5b367c39002031 - CM_DOWNLOAD_FILENAME: node_label_2K.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_2K.npy + MLC_DOWNLOAD_CHECKSUM: 6eccab9a14f92f42be5b367c39002031 + MLC_DOWNLOAD_FILENAME: node_label_2K.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/paper/ extra_cache_tags: dataset,igbh,paper,node_label_2K force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -87,18 +87,18 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/paper_id_index_mapping.npy - CM_DOWNLOAD_CHECKSUM: f70dd642a4f7e41d926c91c8c054fc4c - CM_DOWNLOAD_FILENAME: paper_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/paper_id_index_mapping.npy + MLC_DOWNLOAD_CHECKSUM: f70dd642a4f7e41d926c91c8c054fc4c + MLC_DOWNLOAD_FILENAME: paper_id_index_mapping.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/paper/ extra_cache_tags: dataset,igbh,paper,paper_id_index_mapping force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -106,19 +106,19 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL #paper_cites_paper - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__cites__paper/edge_index.npy - CM_DOWNLOAD_CHECKSUM: f4897f53636c04a9c66f6063ec635c16 - CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper__cites__paper/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__cites__paper/edge_index.npy + MLC_DOWNLOAD_CHECKSUM: f4897f53636c04a9c66f6063ec635c16 + MLC_DOWNLOAD_FILENAME: edge_index.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/paper__cites__paper/ extra_cache_tags: dataset,igbh,paper_cites_paper,edge_index force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -126,19 +126,19 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL # author - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/author_id_index_mapping.npy - CM_DOWNLOAD_CHECKSUM: 58c15aab7dae03bbd57e6a4ac5e61bd9 - CM_DOWNLOAD_FILENAME: author_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/author/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/author_id_index_mapping.npy + MLC_DOWNLOAD_CHECKSUM: 58c15aab7dae03bbd57e6a4ac5e61bd9 + MLC_DOWNLOAD_FILENAME: author_id_index_mapping.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/author/ extra_cache_tags: dataset,igbh,author,author_id_index_mapping force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -146,18 +146,18 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/node_feat.npy - CM_DOWNLOAD_CHECKSUM: 2ec2512b554088381c04ec013e893c8d - CM_DOWNLOAD_FILENAME: node_feat.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/author/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/node_feat.npy + MLC_DOWNLOAD_CHECKSUM: 2ec2512b554088381c04ec013e893c8d + MLC_DOWNLOAD_FILENAME: node_feat.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/author/ extra_cache_tags: dataset,igbh,author,node_feat force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -165,19 +165,19 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL # conference - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/conference_id_index_mapping.npy - CM_DOWNLOAD_CHECKSUM: 0bf7c555d8c697b31b6af6c4cb6b6612 - CM_DOWNLOAD_FILENAME: conference_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/conference/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/conference_id_index_mapping.npy + MLC_DOWNLOAD_CHECKSUM: 0bf7c555d8c697b31b6af6c4cb6b6612 + MLC_DOWNLOAD_FILENAME: conference_id_index_mapping.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/conference/ extra_cache_tags: dataset,igbh,conference,conference_id_index_mapping force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -185,18 +185,18 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/node_feat.npy - CM_DOWNLOAD_CHECKSUM: 898ff529b8cf972261fedd50df6377f8 - CM_DOWNLOAD_FILENAME: node_feat.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/conference/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/node_feat.npy + MLC_DOWNLOAD_CHECKSUM: 898ff529b8cf972261fedd50df6377f8 + MLC_DOWNLOAD_FILENAME: node_feat.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/conference/ extra_cache_tags: dataset,igbh,conference,node_feat force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -204,19 +204,19 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL # institute - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/institute_id_index_mapping.npy - CM_DOWNLOAD_CHECKSUM: 03fb45eafb7bd35875ef4c7cd2a299a9 - CM_DOWNLOAD_FILENAME: institute_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/institute/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/institute_id_index_mapping.npy + MLC_DOWNLOAD_CHECKSUM: 03fb45eafb7bd35875ef4c7cd2a299a9 + MLC_DOWNLOAD_FILENAME: institute_id_index_mapping.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/institute/ extra_cache_tags: dataset,igbh,institute,institute_id_index_mapping force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -224,18 +224,18 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/node_feat.npy - CM_DOWNLOAD_CHECKSUM: 12eaeced22d17b4e97d4b4742331c819 - CM_DOWNLOAD_FILENAME: node_feat.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/institute/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/node_feat.npy + MLC_DOWNLOAD_CHECKSUM: 12eaeced22d17b4e97d4b4742331c819 + MLC_DOWNLOAD_FILENAME: node_feat.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/institute/ extra_cache_tags: dataset,igbh,institute,node_feat force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -243,19 +243,19 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL # journal - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/journal_id_index_mapping.npy - CM_DOWNLOAD_CHECKSUM: b630c20852b76d17a5c9c37b39176f69 - CM_DOWNLOAD_FILENAME: journal_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/journal/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/journal_id_index_mapping.npy + MLC_DOWNLOAD_CHECKSUM: b630c20852b76d17a5c9c37b39176f69 + MLC_DOWNLOAD_FILENAME: journal_id_index_mapping.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/journal/ extra_cache_tags: dataset,igbh,journal,journal_id_index_mapping force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -263,18 +263,18 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/node_feat.npy - CM_DOWNLOAD_CHECKSUM: 49d51b554b3004f10bee19d1c7f9b416 - CM_DOWNLOAD_FILENAME: node_feat.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/journal/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/node_feat.npy + MLC_DOWNLOAD_CHECKSUM: 49d51b554b3004f10bee19d1c7f9b416 + MLC_DOWNLOAD_FILENAME: node_feat.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/journal/ extra_cache_tags: dataset,igbh,journal,node_feat force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -282,19 +282,19 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL # fos - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/fos_id_index_mapping.npy - CM_DOWNLOAD_CHECKSUM: 0f0cfde619361cde35d3be9f201d081a - CM_DOWNLOAD_FILENAME: fos_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/fos/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/fos_id_index_mapping.npy + MLC_DOWNLOAD_CHECKSUM: 0f0cfde619361cde35d3be9f201d081a + MLC_DOWNLOAD_FILENAME: fos_id_index_mapping.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/fos/ extra_cache_tags: dataset,igbh,fos,fos_id_index_mapping force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -302,18 +302,18 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/node_feat.npy - CM_DOWNLOAD_CHECKSUM: 3ef3df19e2475c387fec10bac82773df - CM_DOWNLOAD_FILENAME: node_feat.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/fos/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/node_feat.npy + MLC_DOWNLOAD_CHECKSUM: 3ef3df19e2475c387fec10bac82773df + MLC_DOWNLOAD_FILENAME: node_feat.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/fos/ extra_cache_tags: dataset,igbh,fos,node_feat force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -321,19 +321,19 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL # author__affiliated_to__institute - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author__affiliated_to__institute/edge_index.npy - CM_DOWNLOAD_CHECKSUM: e35dba208f81e0987207f78787c75711 - CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/author__affiliated_to__institute/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author__affiliated_to__institute/edge_index.npy + MLC_DOWNLOAD_CHECKSUM: e35dba208f81e0987207f78787c75711 + MLC_DOWNLOAD_FILENAME: edge_index.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/author__affiliated_to__institute/ extra_cache_tags: dataset,igbh,author_affiliated_to_institute,edge_index force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -341,19 +341,19 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL # paper__published__journal - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__published__journal/edge_index.npy - CM_DOWNLOAD_CHECKSUM: 38505e83bde8e5cf94ae0a85afa60e13 - CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper__published__journal/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__published__journal/edge_index.npy + MLC_DOWNLOAD_CHECKSUM: 38505e83bde8e5cf94ae0a85afa60e13 + MLC_DOWNLOAD_FILENAME: edge_index.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/paper__published__journal/ extra_cache_tags: dataset,igbh,paper_published_journal,edge_index force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -361,19 +361,19 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL # paper__topic__fos - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__topic__fos/edge_index.npy - CM_DOWNLOAD_CHECKSUM: 427fb350a248ee6eaa8c21cde942fda4 - CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper__topic__fos/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__topic__fos/edge_index.npy + MLC_DOWNLOAD_CHECKSUM: 427fb350a248ee6eaa8c21cde942fda4 + MLC_DOWNLOAD_FILENAME: edge_index.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/paper__topic__fos/ extra_cache_tags: dataset,igbh,paper_topic_fos,edge_index force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -381,19 +381,19 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL # paper__venue__conference - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__venue__conference/edge_index.npy - CM_DOWNLOAD_CHECKSUM: 541b8d43cd93579305cfb71961e10a7d - CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper__venue__conference/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__venue__conference/edge_index.npy + MLC_DOWNLOAD_CHECKSUM: 541b8d43cd93579305cfb71961e10a7d + MLC_DOWNLOAD_FILENAME: edge_index.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/paper__venue__conference/ extra_cache_tags: dataset,igbh,paper_venue_conference,edge_index force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -401,19 +401,19 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL # paper__written_by__author - env: - CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__written_by__author/edge_index.npy - CM_DOWNLOAD_CHECKSUM: df39fe44bbcec93a640400e6d81ffcb5 - CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper__written_by__author/ + MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__written_by__author/edge_index.npy + MLC_DOWNLOAD_CHECKSUM: df39fe44bbcec93a640400e6d81ffcb5 + MLC_DOWNLOAD_FILENAME: edge_index.npy + MLC_DOWNLOAD_PATH: <<>>/full/processed/paper__written_by__author/ extra_cache_tags: dataset,igbh,paper_written_by_author,edge_index force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME enable_if_env: - CM_DATASET_IGBH_FULL_DOWNLOAD: + MLC_DATASET_IGBH_FULL_DOWNLOAD: - 'yes' names: - dae @@ -421,28 +421,28 @@ prehook_deps: tags: download-and-extract,_wget update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL variations: debug: default: true group: dataset-type env: - CM_DATASET_IGBH_TYPE: debug - CM_DATASET_IGBH_SIZE: tiny + MLC_DATASET_IGBH_TYPE: debug + MLC_DATASET_IGBH_SIZE: tiny full: group: dataset-type env: - CM_DATASET_IGBH_TYPE: full - CM_DATASET_IGBH_SIZE: full + MLC_DATASET_IGBH_TYPE: full + MLC_DATASET_IGBH_SIZE: full glt: env: - CM_IGBH_GRAPH_COMPRESS: yes + MLC_IGBH_GRAPH_COMPRESS: yes csc: group: compressed-layout default: true env: - CM_IGBH_GRAPH_COMPRESS_LAYOUT: csc + MLC_IGBH_GRAPH_COMPRESS_LAYOUT: csc csr: group: compressed-layout env: - CM_IGBH_GRAPH_COMPRESS_LAYOUT: csr + MLC_IGBH_GRAPH_COMPRESS_LAYOUT: csr diff --git a/script/get-dataset-igbh/run.sh b/script/get-dataset-igbh/run.sh index 238652160..edb705045 100644 --- a/script/get-dataset-igbh/run.sh +++ b/script/get-dataset-igbh/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -21,4 +21,4 @@ function run() { exit_if_error } -run "$CM_RUN_CMD" +run "$MLC_RUN_CMD" diff --git a/script/get-dataset-imagenet-aux/meta.yaml b/script/get-dataset-imagenet-aux/meta.yaml index c5944aedf..00036303f 100644 --- a/script/get-dataset-imagenet-aux/meta.yaml +++ b/script/get-dataset-imagenet-aux/meta.yaml @@ -4,21 +4,21 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML datasets new_env_keys: -- CM_DATASET_AUX_* +- MLC_DATASET_AUX_* prehook_deps: - env: - CM_DOWNLOAD_URL: <<>> - CM_DOWNLOAD_URL1: <<>> - CM_EXTRACT_EXTRACTED_FILENAME: <<>> - CM_EXTRACT_FINAL_ENV_NAME: CM_DATASET_AUX_PATH + MLC_DOWNLOAD_URL: <<>> + MLC_DOWNLOAD_URL1: <<>> + MLC_EXTRACT_EXTRACTED_FILENAME: <<>> + MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_AUX_PATH extra_cache_tags: imagenet-aux,dataset-aux force_cache: true tags: download-and-extract,_extract,_wget force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL tags: - get - aux @@ -29,26 +29,26 @@ uid: bb2c6dd8c8c64217 variations: '2012': env: - CM_DATASET_AUX_VER: '2012' + MLC_DATASET_AUX_VER: '2012' from.berkeleyvision: base: - '2012' default: true env: - CM_DOWNLOAD_CHECKSUM: f963098ea0e785a968ca1eb634003a90 - CM_DOWNLOAD_CHECKSUM1: ee346d67141e476df9c1a3f813552503 - CM_PACKAGE_URL: http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz - CM_PACKAGE_URL1: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz + MLC_DOWNLOAD_CHECKSUM: f963098ea0e785a968ca1eb634003a90 + MLC_DOWNLOAD_CHECKSUM1: ee346d67141e476df9c1a3f813552503 + MLC_PACKAGE_URL: http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz + MLC_PACKAGE_URL1: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz group: download-source from.dropbox: base: - '2012' env: - CM_DOWNLOAD_CHECKSUM: ee346d67141e476df9c1a3f813552503 - CM_DOWNLOAD_CHECKSUM1: f963098ea0e785a968ca1eb634003a90 - CM_PACKAGE_URL: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz - CM_PACKAGE_URL1: http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz + MLC_DOWNLOAD_CHECKSUM: ee346d67141e476df9c1a3f813552503 + MLC_DOWNLOAD_CHECKSUM1: f963098ea0e785a968ca1eb634003a90 + MLC_PACKAGE_URL: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz + MLC_PACKAGE_URL1: http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz group: download-source skip_ssl_verification: env: - CM_VERIFY_SSL: 'False' + MLC_VERIFY_SSL: 'False' diff --git a/script/get-dataset-imagenet-calibration/meta.yaml b/script/get-dataset-imagenet-calibration/meta.yaml index 7e499146a..dd61e9976 100644 --- a/script/get-dataset-imagenet-calibration/meta.yaml +++ b/script/get-dataset-imagenet-calibration/meta.yaml @@ -11,16 +11,16 @@ category: "AI/ML datasets" deps: - tags: download,file force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME force_cache: true extra_cache_tags: imagenet-calibration,imagenet,calibration names: - calibration-file-downloader env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH new_env_keys: -- CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH +- MLC_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH tags: - get @@ -33,16 +33,16 @@ variations: group: calibration-option default: true env: - CM_MLPERF_IMAGENET_CALIBRATION_OPTION: one - CM_DOWNLOAD_CHECKSUM: f09719174af3553119e2c621157773a6 + MLC_MLPERF_IMAGENET_CALIBRATION_OPTION: one + MLC_DOWNLOAD_CHECKSUM: f09719174af3553119e2c621157773a6 adr: calibration-file-downloader: tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/ImageNet/cal_image_list_option_1.txt mlperf.option2: group: calibration-option env: - CM_MLPERF_IMAGENET_CALIBRATION_OPTION: two - CM_DOWNLOAD_CHECKSUM: e44582af00e3b4fc3fac30efd6bdd05f + MLC_MLPERF_IMAGENET_CALIBRATION_OPTION: two + MLC_DOWNLOAD_CHECKSUM: e44582af00e3b4fc3fac30efd6bdd05f adr: calibration-file-downloader: tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/ImageNet/cal_image_list_option_2.txt diff --git a/script/get-dataset-imagenet-helper/customize.py b/script/get-dataset-imagenet-helper/customize.py index 911411db4..05ab824cd 100644 --- a/script/get-dataset-imagenet-helper/customize.py +++ b/script/get-dataset-imagenet-helper/customize.py @@ -5,9 +5,9 @@ def postprocess(i): env = i['env'] - script_path = env['CM_TMP_CURRENT_SCRIPT_PATH'] + script_path = env['MLC_TMP_CURRENT_SCRIPT_PATH'] - env['CM_DATASET_IMAGENET_HELPER_PATH'] = script_path + env['MLC_DATASET_IMAGENET_HELPER_PATH'] = script_path env['+PYTHONPATH'] = [script_path] return {'return': 0} diff --git a/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py b/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py index aa90deefd..83e5dbded 100644 --- a/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py +++ b/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py @@ -6,35 +6,35 @@ # Processing in batches: # -BATCH_SIZE = int(os.getenv('CM_BATCH_SIZE', 1)) +BATCH_SIZE = int(os.getenv('MLC_BATCH_SIZE', 1)) # Model properties: # -MODEL_IMAGE_HEIGHT = int(os.getenv('CM_ML_MODEL_IMAGE_HEIGHT', - os.getenv('CM_ONNX_MODEL_IMAGE_HEIGHT', - os.getenv('CM_TENSORFLOW_MODEL_IMAGE_HEIGHT', +MODEL_IMAGE_HEIGHT = int(os.getenv('MLC_ML_MODEL_IMAGE_HEIGHT', + os.getenv('MLC_ONNX_MODEL_IMAGE_HEIGHT', + os.getenv('MLC_TENSORFLOW_MODEL_IMAGE_HEIGHT', '')))) -MODEL_IMAGE_WIDTH = int(os.getenv('CM_ML_MODEL_IMAGE_WIDTH', - os.getenv('CM_ONNX_MODEL_IMAGE_WIDTH', - os.getenv('CM_TENSORFLOW_MODEL_IMAGE_WIDTH', +MODEL_IMAGE_WIDTH = int(os.getenv('MLC_ML_MODEL_IMAGE_WIDTH', + os.getenv('MLC_ONNX_MODEL_IMAGE_WIDTH', + os.getenv('MLC_TENSORFLOW_MODEL_IMAGE_WIDTH', '')))) -MODEL_IMAGE_CHANNELS = int(os.getenv('CM_ML_MODEL_IMAGE_CHANNELS', 3)) -MODEL_DATA_LAYOUT = os.getenv('CM_ML_MODEL_DATA_LAYOUT', 'NCHW') +MODEL_IMAGE_CHANNELS = int(os.getenv('MLC_ML_MODEL_IMAGE_CHANNELS', 3)) +MODEL_DATA_LAYOUT = os.getenv('MLC_ML_MODEL_DATA_LAYOUT', 'NCHW') MODEL_COLOURS_BGR = os.getenv( - 'CM_ML_MODEL_COLOUR_CHANNELS_BGR', 'NO') in ( + 'MLC_ML_MODEL_COLOUR_CHANNELS_BGR', 'NO') in ( 'YES', 'yes', 'ON', 'on', '1') -MODEL_INPUT_DATA_TYPE = os.getenv('CM_ML_MODEL_INPUT_DATA_TYPE', 'float32') -MODEL_DATA_TYPE = os.getenv('CM_ML_MODEL_DATA_TYPE', '(unknown)') +MODEL_INPUT_DATA_TYPE = os.getenv('MLC_ML_MODEL_INPUT_DATA_TYPE', 'float32') +MODEL_DATA_TYPE = os.getenv('MLC_ML_MODEL_DATA_TYPE', '(unknown)') MODEL_USE_DLA = os.getenv( - 'CM_ML_MODEL_USE_DLA', + 'MLC_ML_MODEL_USE_DLA', 'NO') in ( 'YES', 'yes', 'ON', 'on', '1') -MODEL_MAX_BATCH_SIZE = int(os.getenv('CM_ML_MODEL_MAX_BATCH_SIZE', BATCH_SIZE)) +MODEL_MAX_BATCH_SIZE = int(os.getenv('MLC_ML_MODEL_MAX_BATCH_SIZE', BATCH_SIZE)) # Internal processing: @@ -45,14 +45,14 @@ # Image normalization: # -MODEL_NORMALIZE_DATA = os.getenv('CM_ML_MODEL_NORMALIZE_DATA') in ( +MODEL_NORMALIZE_DATA = os.getenv('MLC_ML_MODEL_NORMALIZE_DATA') in ( 'YES', 'yes', 'ON', 'on', '1') -MODEL_NORMALIZE_LOWER = float(os.getenv('CM_ML_MODEL_NORMALIZE_LOWER', -1.0)) -MODEL_NORMALIZE_UPPER = float(os.getenv('CM_ML_MODEL_NORMALIZE_UPPER', 1.0)) +MODEL_NORMALIZE_LOWER = float(os.getenv('MLC_ML_MODEL_NORMALIZE_LOWER', -1.0)) +MODEL_NORMALIZE_UPPER = float(os.getenv('MLC_ML_MODEL_NORMALIZE_UPPER', 1.0)) SUBTRACT_MEAN = os.getenv( - 'CM_ML_MODEL_SUBTRACT_MEANS', 'YES') in ( + 'MLC_ML_MODEL_SUBTRACT_MEANS', 'YES') in ( 'YES', 'yes', 'ON', 'on', '1') -GIVEN_CHANNEL_MEANS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_MEANS', '') +GIVEN_CHANNEL_MEANS = os.getenv('MLC_ML_MODEL_GIVEN_CHANNEL_MEANS', '') if GIVEN_CHANNEL_MEANS: GIVEN_CHANNEL_MEANS = np.fromstring( GIVEN_CHANNEL_MEANS, @@ -62,7 +62,7 @@ # swapping Red and Blue colour channels GIVEN_CHANNEL_MEANS = GIVEN_CHANNEL_MEANS[::-1] -GIVEN_CHANNEL_STDS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_STDS', '') +GIVEN_CHANNEL_STDS = os.getenv('MLC_ML_MODEL_GIVEN_CHANNEL_STDS', '') if GIVEN_CHANNEL_STDS: GIVEN_CHANNEL_STDS = np.fromstring( GIVEN_CHANNEL_STDS, @@ -75,13 +75,13 @@ # ImageNet dataset properties: # -LABELS_PATH = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] +LABELS_PATH = os.environ['MLC_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] # Preprocessed input images' properties: # -IMAGE_DIR = os.getenv('CM_DATASET_PREPROCESSED_PATH') -IMAGE_DATA_TYPE = os.getenv('CM_DATASET_PREPROCESSED_DATA_TYPE', 'float32') +IMAGE_DIR = os.getenv('MLC_DATASET_PREPROCESSED_PATH') +IMAGE_DATA_TYPE = os.getenv('MLC_DATASET_PREPROCESSED_DATA_TYPE', 'float32') def load_labels(labels_filepath): diff --git a/script/get-dataset-imagenet-helper/meta.yaml b/script/get-dataset-imagenet-helper/meta.yaml index a6ab0e7c8..e81aa6e1a 100644 --- a/script/get-dataset-imagenet-helper/meta.yaml +++ b/script/get-dataset-imagenet-helper/meta.yaml @@ -5,7 +5,7 @@ cache: true category: AI/ML datasets new_env_keys: - +PYTHONPATH -- CM_DATASET_IMAGENET_HELPER_PATH +- MLC_DATASET_IMAGENET_HELPER_PATH tags: - get - imagenet diff --git a/script/get-dataset-imagenet-train/customize.py b/script/get-dataset-imagenet-train/customize.py index 5a760c895..fb6c67f66 100644 --- a/script/get-dataset-imagenet-train/customize.py +++ b/script/get-dataset-imagenet-train/customize.py @@ -13,16 +13,16 @@ def preprocess(i): if os_info['platform'] == 'windows': return {'return': 0} - env['CM_DATASET_IMAGENET_TRAIN_REQUIRE_DAE'] = 'no' + env['MLC_DATASET_IMAGENET_TRAIN_REQUIRE_DAE'] = 'no' - path = env.get('CM_INPUT', env.get('IMAGENET_TRAIN_PATH', '')).strip() + path = env.get('MLC_INPUT', env.get('IMAGENET_TRAIN_PATH', '')).strip() if path == '': - if env.get('CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH'): - path = env['CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH'] - env['CM_DAE_EXTRA_TAGS'] = "_torrent" - env['CM_DAE_TORRENT_PATH'] = path - env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + if env.get('MLC_DATASET_IMAGENET_TRAIN_TORRENT_PATH'): + path = env['MLC_DATASET_IMAGENET_TRAIN_TORRENT_PATH'] + env['MLC_DAE_EXTRA_TAGS'] = "_torrent" + env['MLC_DAE_TORRENT_PATH'] = path + env['MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' return {'return': 0} @@ -31,15 +31,15 @@ def preprocess(i): elif not os.path.isdir(path): if path.endswith(".tar"): - # env['CM_DAE_FILEPATH'] = path - env['CM_EXTRACT_FILEPATH'] = path - env['CM_DAE_ONLY_EXTRACT'] = 'yes' + # env['MLC_DAE_FILEPATH'] = path + env['MLC_EXTRACT_FILEPATH'] = path + env['MLC_DAE_ONLY_EXTRACT'] = 'yes' return {'return': 0} else: return {'return': 1, 'error': 'Path {} doesn\'t exist'.format(path)} else: - env['CM_EXTRACT_EXTRACTED_PATH'] = path + env['MLC_EXTRACT_EXTRACTED_PATH'] = path return {'return': 0} @@ -52,7 +52,7 @@ def postprocess(i): env = i['env'] - path = env['CM_EXTRACT_EXTRACTED_PATH'] + path = env['MLC_EXTRACT_EXTRACTED_PATH'] path_tar = os.path.join(path, 'n01440764.tar') @@ -60,10 +60,10 @@ def postprocess(i): return {'return': 1, 'error': 'ImageNet file {} not found'.format(path_tar)} - env['CM_DATASET_PATH'] = path - env['CM_DATASET_IMAGENET_PATH'] = path - env['CM_DATASET_IMAGENET_TRAIN_PATH'] = path + env['MLC_DATASET_PATH'] = path + env['MLC_DATASET_IMAGENET_PATH'] = path + env['MLC_DATASET_IMAGENET_TRAIN_PATH'] = path - env['CM_GET_DEPENDENT_CACHED_PATH'] = path + env['MLC_GET_DEPENDENT_CACHED_PATH'] = path return {'return': 0} diff --git a/script/get-dataset-imagenet-train/meta.yaml b/script/get-dataset-imagenet-train/meta.yaml index 3d1a1bd22..1b9d88575 100644 --- a/script/get-dataset-imagenet-train/meta.yaml +++ b/script/get-dataset-imagenet-train/meta.yaml @@ -7,36 +7,36 @@ deps: [] input_description: {} input_mapping: input: IMAGENET_TRAIN_PATH - torrent: CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH + torrent: MLC_DATASET_IMAGENET_TRAIN_TORRENT_PATH new_env_keys: -- CM_DATASET_PATH -- CM_DATASET_IMAGENET_* +- MLC_DATASET_PATH +- MLC_DATASET_IMAGENET_* new_state_keys: [] post_deps: [] posthook_deps: [] prehook_deps: - enable_if_env: - CM_DATASET_IMAGENET_VAL_REQUIRE_DAE: + MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE: - 'yes' - 'True' env: - CM_EXTRACT_TO_FOLDER: imagenet-2012-train + MLC_EXTRACT_TO_FOLDER: imagenet-2012-train tags: download-and-extract,file,_extract update_tags_from_env: - - CM_DAE_EXTRA_TAGS + - MLC_DAE_EXTRA_TAGS update_tags_from_env_with_prefix: _url.: - - CM_DAE_URL + - MLC_DAE_URL - enable_if_env: - CM_DAE_ONLY_EXTRACT: + MLC_DAE_ONLY_EXTRACT: - 'yes' - 'True' env: - CM_EXTRACT_TO_FOLDER: imagenet-2012-train + MLC_EXTRACT_TO_FOLDER: imagenet-2012-train tags: file,extract update_tags_from_env_with_prefix: _path.: - - CM_EXTRACT_PATH + - MLC_EXTRACT_PATH tags: - get - imagenet diff --git a/script/get-dataset-imagenet-val/customize.py b/script/get-dataset-imagenet-val/customize.py index e11648adf..b2f9a389d 100644 --- a/script/get-dataset-imagenet-val/customize.py +++ b/script/get-dataset-imagenet-val/customize.py @@ -11,32 +11,32 @@ def preprocess(i): meta = i['meta'] os_info = i['os_info'] - env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'no' + env['MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'no' - full = env.get('CM_IMAGENET_FULL', '').strip() == 'yes' + full = env.get('MLC_IMAGENET_FULL', '').strip() == 'yes' path = env.get( - 'CM_INPUT', + 'MLC_INPUT', env.get( 'IMAGENET_PATH', env.get( - 'CM_DATASET_IMAGENET_PATH', + 'MLC_DATASET_IMAGENET_PATH', ''))).strip() if path == '': if full: - if env.get('CM_DATASET_IMAGENET_VAL_TORRENT_PATH'): - path = env['CM_DATASET_IMAGENET_VAL_TORRENT_PATH'] - env['CM_DAE_EXTRA_TAGS'] = "_torrent" - env['CM_DAE_TORRENT_PATH'] = path - env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + if env.get('MLC_DATASET_IMAGENET_VAL_TORRENT_PATH'): + path = env['MLC_DATASET_IMAGENET_VAL_TORRENT_PATH'] + env['MLC_DAE_EXTRA_TAGS'] = "_torrent" + env['MLC_DAE_TORRENT_PATH'] = path + env['MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' return {'return': 0} else: - env['CM_DAE_URL'] = 'https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar' - env['CM_DAE_FILENAME'] = 'ILSVRC2012_img_val.tar' - env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + env['MLC_DAE_URL'] = 'https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar' + env['MLC_DAE_FILENAME'] = 'ILSVRC2012_img_val.tar' + env['MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' return {'return': 0} # return {'return':1, 'error':'Please rerun the last CM command @@ -46,18 +46,18 @@ def preprocess(i): # images}'} else: - env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + env['MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' elif not os.path.isdir(path): if path.endswith(".tar"): - env['CM_EXTRACT_FILEPATH'] = path - env['CM_DAE_ONLY_EXTRACT'] = 'yes' + env['MLC_EXTRACT_FILEPATH'] = path + env['MLC_DAE_ONLY_EXTRACT'] = 'yes' return {'return': 0} else: return {'return': 1, 'error': 'Path {} doesn\'t exist'.format(path)} else: - env['CM_EXTRACT_EXTRACTED_PATH'] = path + env['MLC_EXTRACT_EXTRACTED_PATH'] = path return {'return': 0} @@ -67,7 +67,7 @@ def postprocess(i): os_info = i['os_info'] env = i['env'] - path = env['CM_EXTRACT_EXTRACTED_PATH'] + path = env['MLC_EXTRACT_EXTRACTED_PATH'] path1 = os.path.join(path, 'imagenet-2012-val') if os.path.isdir(path1): path = path1 @@ -79,14 +79,14 @@ def postprocess(i): 'error': 'ImageNet file {} not found'.format(path_image)} files = os.listdir(path) - if len(files) < int(env.get('CM_DATASET_SIZE', 0)): + if len(files) < int(env.get('MLC_DATASET_SIZE', 0)): return {'return': 1, 'error': 'Only {} files found in {}. {} expected'.format( - len(files), path, env.get('CM_DATASET_SIZE'))} + len(files), path, env.get('MLC_DATASET_SIZE'))} - env['CM_DATASET_PATH'] = path - env['CM_DATASET_IMAGENET_PATH'] = path - env['CM_DATASET_IMAGENET_VAL_PATH'] = path + env['MLC_DATASET_PATH'] = path + env['MLC_DATASET_IMAGENET_PATH'] = path + env['MLC_DATASET_IMAGENET_VAL_PATH'] = path - env['CM_GET_DEPENDENT_CACHED_PATH'] = path + env['MLC_GET_DEPENDENT_CACHED_PATH'] = path return {'return': 0} diff --git a/script/get-dataset-imagenet-val/meta.yaml b/script/get-dataset-imagenet-val/meta.yaml index 0a23afac1..f1db2f01c 100644 --- a/script/get-dataset-imagenet-val/meta.yaml +++ b/script/get-dataset-imagenet-val/meta.yaml @@ -14,44 +14,44 @@ docker: run: false env: - CM_DATASET: IMAGENET + MLC_DATASET: IMAGENET input_mapping: imagenet_path: IMAGENET_PATH - torrent: CM_DATASET_IMAGENET_VAL_TORRENT_PATH + torrent: MLC_DATASET_IMAGENET_VAL_TORRENT_PATH new_env_keys: -- CM_DATASET_PATH -- CM_DATASET_IMAGENET_PATH -- CM_DATASET_IMAGENET_VAL_PATH -- CM_DATASET_SIZE -- CM_DATASET_VER +- MLC_DATASET_PATH +- MLC_DATASET_IMAGENET_PATH +- MLC_DATASET_IMAGENET_VAL_PATH +- MLC_DATASET_SIZE +- MLC_DATASET_VER prehook_deps: - enable_if_env: - CM_DATASET_IMAGENET_VAL_REQUIRE_DAE: + MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE: - 'yes' - 'True' env: - CM_EXTRACT_TO_FOLDER: imagenet-2012-val + MLC_EXTRACT_TO_FOLDER: imagenet-2012-val tags: download-and-extract,file,_extract force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env: - - CM_DAE_EXTRA_TAGS + - MLC_DAE_EXTRA_TAGS update_tags_from_env_with_prefix: _url.: - - CM_DAE_URL + - MLC_DAE_URL - enable_if_env: - CM_DAE_ONLY_EXTRACT: + MLC_DAE_ONLY_EXTRACT: - 'yes' - 'True' env: - CM_EXTRACT_TO_FOLDER: imagenet-2012-val + MLC_EXTRACT_TO_FOLDER: imagenet-2012-val tags: file,extract,_no-remove-extracted update_tags_from_env_with_prefix: _path.: - - CM_EXTRACT_PATH + - MLC_EXTRACT_PATH tags: - get @@ -67,7 +67,7 @@ variations: '2012': default: true env: - CM_DATASET_VER: '2012' + MLC_DATASET_VER: '2012' group: dataset-version 2012-500: base: @@ -79,25 +79,25 @@ variations: - '2012' full: env: - CM_DAE_FILENAME: ILSVRC2012_img_val.tar - CM_DATASET_SIZE: '50000' - CM_DOWNLOAD_CHECKSUM: 29b22e2961454d5413ddabcf34fc5622 - CM_IMAGENET_FULL: 'yes' + MLC_DAE_FILENAME: ILSVRC2012_img_val.tar + MLC_DATASET_SIZE: '50000' + MLC_DOWNLOAD_CHECKSUM: 29b22e2961454d5413ddabcf34fc5622 + MLC_IMAGENET_FULL: 'yes' group: count run-during-docker-build: docker: run: true size.#: env: - CM_DATASET_SIZE: '#' + MLC_DATASET_SIZE: '#' group: count size.500: default: true env: - CM_DAE_FILENAME: ILSVRC2012_img_val_500.tar - CM_DAE_URL: http://cKnowledge.org/ai/data/ILSVRC2012_img_val_500.tar - CM_DOWNLOAD_URL1: https://www.dropbox.com/scl/fi/a7fhjnzxi6x3ceapxh5bm/ILSVRC2012_img_val_500.tar?rlkey=hz4rabo9ve43co3c303y9r6l7&st=ulcgb3av&dl=1 - CM_DATASET_SIZE: '500' - CM_DOWNLOAD_CHECKSUM: 8627befdd8c2bcf305729020e9db354e - CM_DOWNLOAD_FILENAME: ILSVRC2012_img_val_500.tar + MLC_DAE_FILENAME: ILSVRC2012_img_val_500.tar + MLC_DAE_URL: http://cKnowledge.org/ai/data/ILSVRC2012_img_val_500.tar + MLC_DOWNLOAD_URL1: https://www.dropbox.com/scl/fi/a7fhjnzxi6x3ceapxh5bm/ILSVRC2012_img_val_500.tar?rlkey=hz4rabo9ve43co3c303y9r6l7&st=ulcgb3av&dl=1 + MLC_DATASET_SIZE: '500' + MLC_DOWNLOAD_CHECKSUM: 8627befdd8c2bcf305729020e9db354e + MLC_DOWNLOAD_FILENAME: ILSVRC2012_img_val_500.tar group: count diff --git a/script/get-dataset-imagenet-val/run.bat b/script/get-dataset-imagenet-val/run.bat index 94625b7e5..6f481b0e2 100644 --- a/script/get-dataset-imagenet-val/run.bat +++ b/script/get-dataset-imagenet-val/run.bat @@ -1,4 +1,4 @@ -if "%CM_EXTRACT_EXTRACTED_PATH%" == "" ( +if "%MLC_EXTRACT_EXTRACTED_PATH%" == "" ( echo. wget -nc https://www.dropbox.com/s/57s11df6pts3z69/ILSVRC2012_img_val_500.tar --no-check-certificate @@ -11,7 +11,7 @@ if "%CM_EXTRACT_EXTRACTED_PATH%" == "" ( del /Q /S ILSVRC2012_img_val_500.tar - echo CM_DATASET_PATH=%CD%\images > tmp-run-env.out - echo CM_DATASET_IMAGENET_PATH=%CD%\images >> tmp-run-env.out - echo CM_DATASET_IMAGENET_VAL_PATH=%CD%\images >> tmp-run-env.out + echo MLC_DATASET_PATH=%CD%\images > tmp-run-env.out + echo MLC_DATASET_IMAGENET_PATH=%CD%\images >> tmp-run-env.out + echo MLC_DATASET_IMAGENET_VAL_PATH=%CD%\images >> tmp-run-env.out ) diff --git a/script/get-dataset-kits19/customize.py b/script/get-dataset-kits19/customize.py index 5f95125c0..8b0a48fea 100644 --- a/script/get-dataset-kits19/customize.py +++ b/script/get-dataset-kits19/customize.py @@ -13,21 +13,21 @@ def preprocess(i): env = i['env'] meta = i['meta'] - if not env.get('CM_GIT_CHECKOUT', ''): + if not env.get('MLC_GIT_CHECKOUT', ''): return { - 'return': 1, 'error': 'Please provide a valid CM_GIT_SHA inside the custom variation of _cm.json'} + 'return': 1, 'error': 'Please provide a valid MLC_GIT_SHA inside the custom variation of _cm.json'} - if 'CM_GIT_DEPTH' not in env: - env['CM_GIT_DEPTH'] = '' + if 'MLC_GIT_DEPTH' not in env: + env['MLC_GIT_DEPTH'] = '' - if 'CM_GIT_RECURSE_SUBMODULES' not in env: - env['CM_GIT_RECURSE_SUBMODULES'] = '' + if 'MLC_GIT_RECURSE_SUBMODULES' not in env: + env['MLC_GIT_RECURSE_SUBMODULES'] = '' - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') versions = meta['versions'] if need_version != '' and not need_version in versions: - env['CM_GIT_CHECKOUT'] = need_version + env['MLC_GIT_CHECKOUT'] = need_version return {'return': 0} @@ -35,7 +35,7 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'kits19', 'data') + env['MLC_DATASET_PATH'] = os.path.join(os.getcwd(), 'kits19', 'data') state = i['state'] return {'return': 0} diff --git a/script/get-dataset-kits19/meta.yaml b/script/get-dataset-kits19/meta.yaml index eddb6a9a8..7c5716558 100644 --- a/script/get-dataset-kits19/meta.yaml +++ b/script/get-dataset-kits19/meta.yaml @@ -4,11 +4,11 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML datasets default_env: - CM_GIT_CHECKOUT: master - CM_GIT_DEPTH: --depth 2 - CM_GIT_PATCH: 'no' - CM_GIT_RECURSE_SUBMODULES: '' - CM_GIT_URL: https://github.com/neheller/kits19 + MLC_GIT_CHECKOUT: master + MLC_GIT_DEPTH: --depth 2 + MLC_GIT_PATCH: 'no' + MLC_GIT_RECURSE_SUBMODULES: '' + MLC_GIT_URL: https://github.com/neheller/kits19 default_version: master deps: - tags: detect,os @@ -17,7 +17,7 @@ deps: - python tags: get,python3 new_env_keys: -- CM_DATASET_* +- MLC_DATASET_* tags: - get - dataset @@ -29,32 +29,32 @@ uid: 79992bb221024ac5 variations: calibration: env: - CM_DATASET_CALIBRATION: 'yes' + MLC_DATASET_CALIBRATION: 'yes' default: base: - short-history env: - CM_GIT_PATCH: 'no' + MLC_GIT_PATCH: 'no' full-history: env: - CM_GIT_DEPTH: '' + MLC_GIT_DEPTH: '' no-recurse-submodules: env: - CM_GIT_RECURSE_SUBMODULES: '' + MLC_GIT_RECURSE_SUBMODULES: '' patch: env: - CM_GIT_PATCH: 'yes' + MLC_GIT_PATCH: 'yes' short-history: env: - CM_GIT_DEPTH: --depth 5 + MLC_GIT_DEPTH: --depth 5 validation: env: - CM_DATASET_VALIDATION: 'yes' + MLC_DATASET_VALIDATION: 'yes' versions: custom: env: - CM_GIT_CHECKOUT: '' - CM_GIT_SHA: 'yes' + MLC_GIT_CHECKOUT: '' + MLC_GIT_SHA: 'yes' master: env: - CM_GIT_CHECKOUT: master + MLC_GIT_CHECKOUT: master diff --git a/script/get-dataset-kits19/run.sh b/script/get-dataset-kits19/run.sh index f5bf0617a..aa164daf2 100644 --- a/script/get-dataset-kits19/run.sh +++ b/script/get-dataset-kits19/run.sh @@ -1,34 +1,34 @@ #!/bin/bash CUR_DIR=$PWD -SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} +SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH} echo "******************************************************" -echo "Cloning kits19 from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." +echo "Cloning kits19 from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES}..." if [ ! -d "kits19" ]; then - if [ -z ${CM_GIT_SHA} ]; then - cmd="git clone ${CM_GIT_RECURSE_SUBMODULES} -b ${CM_GIT_CHECKOUT} ${CM_GIT_URL} ${CM_GIT_DEPTH} kits19" + if [ -z ${MLC_GIT_SHA} ]; then + cmd="git clone ${MLC_GIT_RECURSE_SUBMODULES} -b ${MLC_GIT_CHECKOUT} ${MLC_GIT_URL} ${MLC_GIT_DEPTH} kits19" echo $cmd eval $cmd cd kits19 else - git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} kits19 + git clone ${MLC_GIT_RECURSE_SUBMODULES} ${MLC_GIT_URL} ${MLC_GIT_DEPTH} kits19 cd kits19 - git checkout -b "${CM_GIT_CHECKOUT}" + git checkout -b "${MLC_GIT_CHECKOUT}" fi if [ "${?}" != "0" ]; then exit 1; fi else cd kits19 fi -if [ ${CM_GIT_PATCH} == "yes" ]; then - patch_filename=${CM_GIT_PATCH_FILENAME} - if [ ! -n ${CM_GIT_PATCH_FILENAMES} ]; then - patchfile=${CM_GIT_PATCH_FILENAME:-"git.patch"} - CM_GIT_PATCH_FILENAMES=$patchfile +if [ ${MLC_GIT_PATCH} == "yes" ]; then + patch_filename=${MLC_GIT_PATCH_FILENAME} + if [ ! -n ${MLC_GIT_PATCH_FILENAMES} ]; then + patchfile=${MLC_GIT_PATCH_FILENAME:-"git.patch"} + MLC_GIT_PATCH_FILENAMES=$patchfile fi - IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILENAMES} + IFS=', ' read -r -a patch_files <<< ${MLC_GIT_PATCH_FILENAMES} for patch_filename in "${patch_files[@]}" do echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" @@ -37,7 +37,7 @@ if [ ${CM_GIT_PATCH} == "yes" ]; then done fi cd ${CUR_DIR}/kits19 -${CM_PYTHON_BIN_WITH_PATH} -m starter_code.get_imaging +${MLC_PYTHON_BIN_WITH_PATH} -m starter_code.get_imaging cd data cp -rf case_00185 case_00400 cd "$CUR_DIR" diff --git a/script/get-dataset-librispeech/README-extra.md b/script/get-dataset-librispeech/README-extra.md index 265902c92..d5d937fa3 100644 --- a/script/get-dataset-librispeech/README-extra.md +++ b/script/get-dataset-librispeech/README-extra.md @@ -16,10 +16,10 @@ where [VERSION] is one of * `train-other-500` ## Exported Variables -* `CM_DATASET_ARCHIVE:` -* `CM_DATASET_LIBRISPEECH_PATH:` -* `CM_DATASET_MD5:` -* `CM_DATASET_NAME:` +* `MLC_DATASET_ARCHIVE:` +* `MLC_DATASET_LIBRISPEECH_PATH:` +* `MLC_DATASET_MD5:` +* `MLC_DATASET_NAME:` ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-dataset-librispeech/customize.py b/script/get-dataset-librispeech/customize.py index 969b4b4ad..cf65e55c4 100644 --- a/script/get-dataset-librispeech/customize.py +++ b/script/get-dataset-librispeech/customize.py @@ -13,10 +13,10 @@ def preprocess(i): def postprocess(i): env = i['env'] - folder_name = env['CM_DATASET_ARCHIVE'].split(".")[0] - env['CM_DATASET_LIBRISPEECH_PATH'] = os.path.join( + folder_name = env['MLC_DATASET_ARCHIVE'].split(".")[0] + env['MLC_DATASET_LIBRISPEECH_PATH'] = os.path.join( os.getcwd(), "LibriSpeech", folder_name) - env['CM_DATASET_PATH'] = os.path.join( + env['MLC_DATASET_PATH'] = os.path.join( os.getcwd(), "LibriSpeech", folder_name) return {'return': 0} diff --git a/script/get-dataset-librispeech/meta.yaml b/script/get-dataset-librispeech/meta.yaml index ead114f4a..c696b3e25 100644 --- a/script/get-dataset-librispeech/meta.yaml +++ b/script/get-dataset-librispeech/meta.yaml @@ -9,10 +9,10 @@ deps: - sys-utils tags: get,sys-utils-cm env: - CM_DATASET: LIBRISPEECH - CM_WGET_URL: http://www.openslr.org/resources/12/<<>> + MLC_DATASET: LIBRISPEECH + MLC_WGET_URL: http://www.openslr.org/resources/12/<<>> new_env_keys: -- CM_DATASET_* +- MLC_DATASET_* tags: - get - dataset @@ -27,29 +27,29 @@ uid: 09f29df607e0415d versions: dev-clean: env: - CM_DATASET_ARCHIVE: dev-clean.tar.gz - CM_DATASET_MD5: 42e2234ba48799c1f50f24a7926300a1 - CM_DATASET_NAME: LibriSpeech Dev Clean dataset + MLC_DATASET_ARCHIVE: dev-clean.tar.gz + MLC_DATASET_MD5: 42e2234ba48799c1f50f24a7926300a1 + MLC_DATASET_NAME: LibriSpeech Dev Clean dataset dev-other: env: - CM_DATASET_ARCHIVE: dev-other.tar.gz - CM_DATASET_MD5: c8d0bcc9cca99d4f8b62fcc847357931 - CM_DATASET_NAME: LibriSpeech Dev Other dataset + MLC_DATASET_ARCHIVE: dev-other.tar.gz + MLC_DATASET_MD5: c8d0bcc9cca99d4f8b62fcc847357931 + MLC_DATASET_NAME: LibriSpeech Dev Other dataset test-clean: env: - CM_DATASET_ARCHIVE: test-clean.tar.gz - CM_DATASET_MD5: 32fa31d27d2e1cad72775fee3f4849a9 - CM_DATASET_NAME: LibriSpeech Test Clean dataset + MLC_DATASET_ARCHIVE: test-clean.tar.gz + MLC_DATASET_MD5: 32fa31d27d2e1cad72775fee3f4849a9 + MLC_DATASET_NAME: LibriSpeech Test Clean dataset test-other: env: - CM_DATASET_ARCHIVE: test-other.tar.gz - CM_DATASET_MD5: fb5a50374b501bb3bac4815ee91d3135 - CM_DATASET_NAME: LibriSpeech Test Other dataset + MLC_DATASET_ARCHIVE: test-other.tar.gz + MLC_DATASET_MD5: fb5a50374b501bb3bac4815ee91d3135 + MLC_DATASET_NAME: LibriSpeech Test Other dataset train-clean-100: env: - CM_DATASET_ARCHIVE: train-clean-100.tar.gz - CM_DATASET_MD5: 2a93770f6d5c6c964bc36631d331a522 - CM_DATASET_NAME: LibriSpeech Train Clean 100 dataset + MLC_DATASET_ARCHIVE: train-clean-100.tar.gz + MLC_DATASET_MD5: 2a93770f6d5c6c964bc36631d331a522 + MLC_DATASET_NAME: LibriSpeech Train Clean 100 dataset train-clean-360: env: DATASET_ARCHIVE: train-clean-360.tar.gz diff --git a/script/get-dataset-librispeech/run.sh b/script/get-dataset-librispeech/run.sh index 9c2fc2660..99bd174fe 100644 --- a/script/get-dataset-librispeech/run.sh +++ b/script/get-dataset-librispeech/run.sh @@ -1,8 +1,8 @@ #!/bin/bash -wget -nc ${CM_WGET_URL} --no-check-certificate +wget -nc ${MLC_WGET_URL} --no-check-certificate test $? -eq 0 || exit 1 -tar -x --skip-old-files -vf ${CM_DATASET_ARCHIVE} +tar -x --skip-old-files -vf ${MLC_DATASET_ARCHIVE} test $? -eq 0 || exit 1 diff --git a/script/get-dataset-mlperf-inference-llama3/customize.py b/script/get-dataset-mlperf-inference-llama3/customize.py index 827dcd2cd..c501a6e60 100644 --- a/script/get-dataset-mlperf-inference-llama3/customize.py +++ b/script/get-dataset-mlperf-inference-llama3/customize.py @@ -11,11 +11,11 @@ def preprocess(i): if os_info['platform'] == "windows": return {'return': 1, 'error': 'Script not supported in windows yet!'} - if env.get('CM_DATASET_LLAMA3_PATH', '') == '': - env['CM_TMP_REQUIRE_DOWNLOAD'] = "yes" + if env.get('MLC_DATASET_LLAMA3_PATH', '') == '': + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" - if env.get('CM_OUTDIRNAME', '') != '': - env['CM_DOWNLOAD_PATH'] = env['CM_OUTDIRNAME'] + if env.get('MLC_OUTDIRNAME', '') != '': + env['MLC_DOWNLOAD_PATH'] = env['MLC_OUTDIRNAME'] return {'return': 0} @@ -24,8 +24,8 @@ def postprocess(i): env = i['env'] - if env.get('CM_TMP_REQUIRE_DOWNLOAD', '') == "yes": - env['CM_DATASET_LLAMA3_PATH'] = os.path.join( - env['CM_DATASET_LLAMA3_PATH'], env['CM_DATASET_FILE_NAME']) + if env.get('MLC_TMP_REQUIRE_DOWNLOAD', '') == "yes": + env['MLC_DATASET_LLAMA3_PATH'] = os.path.join( + env['MLC_DATASET_LLAMA3_PATH'], env['MLC_DATASET_FILE_NAME']) return {'return': 0} diff --git a/script/get-dataset-mlperf-inference-llama3/meta.yaml b/script/get-dataset-mlperf-inference-llama3/meta.yaml index d8af83b88..1028ab0cf 100644 --- a/script/get-dataset-mlperf-inference-llama3/meta.yaml +++ b/script/get-dataset-mlperf-inference-llama3/meta.yaml @@ -10,47 +10,47 @@ tags: - inference uid: c3bc69599cbc4db7 new_env_keys: - - CM_DATASET_LLAMA3_PATH + - MLC_DATASET_LLAMA3_PATH input_mapping: - outdirname: CM_OUTDIRNAME + outdirname: MLC_OUTDIRNAME prehook_deps: - env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_LLAMA3_PATH - CM_EXTRACT_TO_FOLDER: llama-3-dataset + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_LLAMA3_PATH + MLC_EXTRACT_TO_FOLDER: llama-3-dataset extra_cache_tags: dataset,llama3 force_cache: true enable_if_env: - CM_TMP_REQUIRE_DOWNLOAD: + MLC_TMP_REQUIRE_DOWNLOAD: - 'yes' names: - dae tags: download-and-extract update_tags_from_env_with_prefix: _url.: - - CM_DOWNLOAD_URL + - MLC_DOWNLOAD_URL variations: validation: default: true group: dataset-type env: - CM_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/llama3_405b/mlperf_llama3.1_405b_dataset_8313_processed_fp16_eval.pkl - CM_DATASET_TYPE: validation - CM_DATASET_FILE_NAME: mlperf_llama3.1_405b_dataset_8313_processed_fp16_eval.pkl + MLC_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/llama3_405b/mlperf_llama3.1_405b_dataset_8313_processed_fp16_eval.pkl + MLC_DATASET_TYPE: validation + MLC_DATASET_FILE_NAME: mlperf_llama3.1_405b_dataset_8313_processed_fp16_eval.pkl calibration: group: dataset-type env: - CM_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/llama3_405b/mlperf_llama3.1_405b_calibration_dataset_512_processed_fp16_eval.pkl - CM_DATASET_TYPE: calibration - CM_DATASET_FILE_NAME: mlperf_llama3.1_405b_calibration_dataset_512_processed_fp16_eval.pkl + MLC_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/llama3_405b/mlperf_llama3.1_405b_calibration_dataset_512_processed_fp16_eval.pkl + MLC_DATASET_TYPE: calibration + MLC_DATASET_FILE_NAME: mlperf_llama3.1_405b_calibration_dataset_512_processed_fp16_eval.pkl rclone: add_deps_recursive: dae: tags: _rclone default: true env: - CM_DOWNLOAD_FILENAME: checkpoint - CM_DOWNLOAD_URL: <<>> - CM_RCLONE_CONFIG_NAME: mlc-inference + MLC_DOWNLOAD_FILENAME: checkpoint + MLC_DOWNLOAD_URL: <<>> + MLC_RCLONE_CONFIG_NAME: mlc-inference group: download-tool print_env_at_the_end: - CM_DATASET_LLAMA3_PATH: Path to the dataset + MLC_DATASET_LLAMA3_PATH: Path to the dataset diff --git a/script/get-dataset-mlperf-inference-mixtral/customize.py b/script/get-dataset-mlperf-inference-mixtral/customize.py index dc46a6661..bcfb39259 100644 --- a/script/get-dataset-mlperf-inference-mixtral/customize.py +++ b/script/get-dataset-mlperf-inference-mixtral/customize.py @@ -8,8 +8,8 @@ def preprocess(i): env = i['env'] - if env.get('CM_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes": - env['CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] = os.path.join( + if env.get('MLC_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes": + env['MLC_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] = os.path.join( os.getcwd(), "mixtral-test-dataset.pkl") return {'return': 0} @@ -18,9 +18,9 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] + env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['MLC_DATASET_PREPROCESSED_PATH'] - if env.get('CM_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes": - env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] + if env.get('MLC_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes": + env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['MLC_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] return {'return': 0} diff --git a/script/get-dataset-mlperf-inference-mixtral/meta.yaml b/script/get-dataset-mlperf-inference-mixtral/meta.yaml index 566f7bb05..99b8e0c2f 100644 --- a/script/get-dataset-mlperf-inference-mixtral/meta.yaml +++ b/script/get-dataset-mlperf-inference-mixtral/meta.yaml @@ -4,18 +4,18 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML datasets new_env_keys: -- CM_DATASET_* +- MLC_DATASET_* prehook_deps: - env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_PREPROCESSED_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_PREPROCESSED_PATH extra_cache_tags: mixtral,get-mixtral-dataset force_cache: true tags: download-and-extract force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL tags: - get - dataset-mixtral @@ -25,9 +25,9 @@ variations: mlcommons-storage: default: true env: - CM_DOWNLOAD_CHECKSUM: 78823c13e0e73e518872105c4b09628b - CM_DOWNLOAD_FILENAME: 2024.06.06_mixtral_15k_v4.pkl - CM_PACKAGE_URL: https://inference.mlcommons-storage.org/mixtral_8x7b%2F2024.06.06_mixtral_15k_v4.pkl + MLC_DOWNLOAD_CHECKSUM: 78823c13e0e73e518872105c4b09628b + MLC_DOWNLOAD_FILENAME: 2024.06.06_mixtral_15k_v4.pkl + MLC_PACKAGE_URL: https://inference.mlcommons-storage.org/mixtral_8x7b%2F2024.06.06_mixtral_15k_v4.pkl group: download-source size.#: base: @@ -36,5 +36,5 @@ variations: - tags: get,generic-python-lib,_package.pandas - tags: get,python3 env: - CM_DATASET_MIXTRAL_GENERATE_TEST_DATA: 'yes' - CM_DATASET_MIXTRAL_TEST_DATA_SIZE: '#' + MLC_DATASET_MIXTRAL_GENERATE_TEST_DATA: 'yes' + MLC_DATASET_MIXTRAL_TEST_DATA_SIZE: '#' diff --git a/script/get-dataset-mlperf-inference-mixtral/run.sh b/script/get-dataset-mlperf-inference-mixtral/run.sh index 91ad97a53..2bd2955b9 100644 --- a/script/get-dataset-mlperf-inference-mixtral/run.sh +++ b/script/get-dataset-mlperf-inference-mixtral/run.sh @@ -1,5 +1,5 @@ #!/bin/bash -if [[ "$CM_DATASET_MIXTRAL_GENERATE_TEST_DATA" == "yes" ]]; then - ${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/generate-test-dataset.py --dataset-path ${CM_DATASET_PREPROCESSED_PATH} --output-path ${CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH} --samples ${CM_DATASET_MIXTRAL_TEST_DATA_SIZE} +if [[ "$MLC_DATASET_MIXTRAL_GENERATE_TEST_DATA" == "yes" ]]; then + ${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/generate-test-dataset.py --dataset-path ${MLC_DATASET_PREPROCESSED_PATH} --output-path ${MLC_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH} --samples ${MLC_DATASET_MIXTRAL_TEST_DATA_SIZE} fi diff --git a/script/get-dataset-openimages-annotations/customize.py b/script/get-dataset-openimages-annotations/customize.py index 286ba3fa8..3b4160d4e 100644 --- a/script/get-dataset-openimages-annotations/customize.py +++ b/script/get-dataset-openimages-annotations/customize.py @@ -14,11 +14,11 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join( - env['CM_DATASET_ANNOTATIONS_FILE_PATH'], 'openimages-mlperf.json') - env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.dirname( - env['CM_DATASET_ANNOTATIONS_FILE_PATH']) - env['CM_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] - env['CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH'] = env['CM_DATASET_ANNOTATIONS_DIR_PATH'] + env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join( + env['MLC_DATASET_ANNOTATIONS_FILE_PATH'], 'openimages-mlperf.json') + env['MLC_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.dirname( + env['MLC_DATASET_ANNOTATIONS_FILE_PATH']) + env['MLC_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] + env['MLC_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH'] = env['MLC_DATASET_ANNOTATIONS_DIR_PATH'] return {'return': 0} diff --git a/script/get-dataset-openimages-annotations/meta.yaml b/script/get-dataset-openimages-annotations/meta.yaml index 16158cef6..03e44e436 100644 --- a/script/get-dataset-openimages-annotations/meta.yaml +++ b/script/get-dataset-openimages-annotations/meta.yaml @@ -4,21 +4,21 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML datasets new_env_keys: -- CM_DATASET_OPENIMAGES_ANNOTATIONS_* -- CM_DATASET_ANNOTATIONS_* +- MLC_DATASET_OPENIMAGES_ANNOTATIONS_* +- MLC_DATASET_ANNOTATIONS_* prehook_deps: - env: - CM_DAE_FINAL_ENV_NAME: CM_DATASET_ANNOTATIONS_FILE_PATH + MLC_DAE_FINAL_ENV_NAME: MLC_DATASET_ANNOTATIONS_FILE_PATH extra_cache_tags: retinanet,get,dataset-openimages-annotations force_cache: true tags: download-and-extract,_wget,_extract force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL print_env_at_the_end: - CM_DATASET_ANNOTATIONS_FILE_PATH: Path to OpenImages annotation file + MLC_DATASET_ANNOTATIONS_FILE_PATH: Path to OpenImages annotation file tags: - get - aux @@ -31,7 +31,7 @@ variations: from.github: default: true env: - CM_DOWNLOAD_CHECKSUM: 817fd8da3aeeb0575f1e2d2926b15e68 - CM_DOWNLOAD_FILENAME: openimages-mlperf_annotations_2.1.json.zip - CM_PACKAGE_URL: https://github.com/mlcommons/inference/releases/download/v2.1/openimages-mlperf_annotations_2.1.json.zip + MLC_DOWNLOAD_CHECKSUM: 817fd8da3aeeb0575f1e2d2926b15e68 + MLC_DOWNLOAD_FILENAME: openimages-mlperf_annotations_2.1.json.zip + MLC_PACKAGE_URL: https://github.com/mlcommons/inference/releases/download/v2.1/openimages-mlperf_annotations_2.1.json.zip group: download-source diff --git a/script/get-dataset-openimages-calibration/customize.py b/script/get-dataset-openimages-calibration/customize.py index 5fc459075..fc8466566 100644 --- a/script/get-dataset-openimages-calibration/customize.py +++ b/script/get-dataset-openimages-calibration/customize.py @@ -12,13 +12,13 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if env.get("CM_CALIBRATE_FILTER", "") == "yes": + if env.get("MLC_CALIBRATE_FILTER", "") == "yes": i['run_script_input']['script_name'] = "run-filter" - env['CM_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] = os.path.join( + env['MLC_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] = os.path.join( os.getcwd(), "filtered.txt") - env['CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH'] = env['CM_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] + env['MLC_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH'] = env['MLC_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] return {'return': 0} diff --git a/script/get-dataset-openimages-calibration/filter.py b/script/get-dataset-openimages-calibration/filter.py index d8d2638b5..66edc90b1 100644 --- a/script/get-dataset-openimages-calibration/filter.py +++ b/script/get-dataset-openimages-calibration/filter.py @@ -19,7 +19,7 @@ data['images'], key=lambda x: x['num_boxes'], reverse=os.environ.get( - 'CM_CALIBRATION_FILTER_ORDER_BY_NUM_BOXES_ASC', + 'MLC_CALIBRATION_FILTER_ORDER_BY_NUM_BOXES_ASC', '') == "yes") for image in data['images']: print(image['file_name']) diff --git a/script/get-dataset-openimages-calibration/meta.yaml b/script/get-dataset-openimages-calibration/meta.yaml index 6edd3716c..08585665b 100644 --- a/script/get-dataset-openimages-calibration/meta.yaml +++ b/script/get-dataset-openimages-calibration/meta.yaml @@ -11,16 +11,16 @@ category: "AI/ML datasets" deps: - tags: download,file force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME force_cache: true extra_cache_tags: openimages-calibration,openimages,calibration names: - calibration-file-downloader env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH new_env_keys: -- CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH +- MLC_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH tags: - get @@ -33,13 +33,13 @@ variations: group: calibration-option default: true env: - CM_MLPERF_OPENIMAGES_CALIBRATION_OPTION: one - CM_DOWNLOAD_CHECKSUM: 5c3196ddcec4605c6a9fcf004d9615e6 + MLC_MLPERF_OPENIMAGES_CALIBRATION_OPTION: one + MLC_DOWNLOAD_CHECKSUM: 5c3196ddcec4605c6a9fcf004d9615e6 adr: calibration-file-downloader: tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/openimages/openimages_cal_images_list.txt env: - CM_DOWNLOAD_CHECKSUM: 5c3196ddcec4605c6a9fcf004d9615e6 + MLC_DOWNLOAD_CHECKSUM: 5c3196ddcec4605c6a9fcf004d9615e6 filter: default_variations: filter-size: filter_size.400 @@ -50,15 +50,15 @@ variations: tags: get,python3 - tags: get,openimages,dataset,original,_calibration env: - CM_CALIBRATE_FILTER: '' + MLC_CALIBRATE_FILTER: '' env: - CM_CALIBRATE_FILTER: 'yes' + MLC_CALIBRATE_FILTER: 'yes' filter-size.#: group: filter-size env: - CM_CALIBRATION_FILTER_SIZE: "#" + MLC_CALIBRATION_FILTER_SIZE: "#" filter-size.400: group: filter-size env: - CM_CALIBRATION_FILTER_SIZE: 400 + MLC_CALIBRATION_FILTER_SIZE: 400 diff --git a/script/get-dataset-openimages-calibration/run-filter.sh b/script/get-dataset-openimages-calibration/run-filter.sh index 9b1a90c68..7517475fe 100644 --- a/script/get-dataset-openimages-calibration/run-filter.sh +++ b/script/get-dataset-openimages-calibration/run-filter.sh @@ -1,6 +1,6 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/filter.py ${CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH} > ordered.txt +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/filter.py ${MLC_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH} > ordered.txt test $? -eq 0 || exit $? -head -n ${CM_CALIBRATION_FILTER_SIZE} ordered.txt >filtered.txt +head -n ${MLC_CALIBRATION_FILTER_SIZE} ordered.txt >filtered.txt test $? -eq 0 || exit $? diff --git a/script/get-dataset-openimages/customize.py b/script/get-dataset-openimages/customize.py index 0f68a45c2..6eb686d97 100644 --- a/script/get-dataset-openimages/customize.py +++ b/script/get-dataset-openimages/customize.py @@ -10,7 +10,7 @@ def preprocess(i): print("") print("Using MLCommons Inference source from '" + - env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") print("") if os_info['platform'] == 'windows': @@ -58,7 +58,7 @@ def preprocess(i): if x != '': x += ' ' x += '"' + v + '"' - env['CM_DATASET_OPENIMAGES_CLASSES'] = x + env['MLC_DATASET_OPENIMAGES_CLASSES'] = x return {'return': 0} @@ -66,36 +66,36 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join( + env['MLC_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join( os.getcwd(), 'install', 'annotations') - if env.get('CM_DATASET_CALIBRATION', '') == "no": - env['CM_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install') - env['CM_DATASET_PATH'] = os.path.join( + if env.get('MLC_DATASET_CALIBRATION', '') == "no": + env['MLC_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install') + env['MLC_DATASET_PATH'] = os.path.join( os.getcwd(), 'install', 'validation', 'data') annotations_file_path = os.path.join( - env['CM_DATASET_ANNOTATIONS_DIR_PATH'], + env['MLC_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-mlperf.json") - env['CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path - env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = annotations_file_path - env['CM_DATASET_OPENIMAGES_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path - if env.get("CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS", '') == "yes": - annotations_file_src = env['CM_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] + env['MLC_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path + env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] = annotations_file_path + env['MLC_DATASET_OPENIMAGES_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path + if env.get("MLC_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS", '') == "yes": + annotations_file_src = env['MLC_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] shutil.copy( annotations_file_src, - env['CM_DATASET_ANNOTATIONS_DIR_PATH']) - env['CM_DATASET_OPENIMAGES_PATH'] = env['CM_DATASET_PATH'] - env['CM_DATASET_OPENIMAGES_PATH_ROOT'] = env['CM_DATASET_PATH_ROOT'] + env['MLC_DATASET_ANNOTATIONS_DIR_PATH']) + env['MLC_DATASET_OPENIMAGES_PATH'] = env['MLC_DATASET_PATH'] + env['MLC_DATASET_OPENIMAGES_PATH_ROOT'] = env['MLC_DATASET_PATH_ROOT'] else: - env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + env['MLC_CALIBRATION_DATASET_PATH'] = os.path.join( os.getcwd(), 'install', 'calibration', 'data') - env['CM_OPENIMAGES_CALIBRATION_DATASET_PATH'] = os.path.join( + env['MLC_OPENIMAGES_CALIBRATION_DATASET_PATH'] = os.path.join( os.getcwd(), 'install', 'calibration', 'data') - env['CM_CALIBRATION_DATASET_PATH_ROOT'] = os.path.join( + env['MLC_CALIBRATION_DATASET_PATH_ROOT'] = os.path.join( os.getcwd(), 'install') annotations_file_path = os.path.join( - env['CM_DATASET_ANNOTATIONS_DIR_PATH'], + env['MLC_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-calibration-mlperf.json") - env['CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path + env['MLC_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path return {'return': 0} diff --git a/script/get-dataset-openimages/meta.yaml b/script/get-dataset-openimages/meta.yaml index 2e0189183..e885d8585 100644 --- a/script/get-dataset-openimages/meta.yaml +++ b/script/get-dataset-openimages/meta.yaml @@ -5,7 +5,7 @@ cache: true category: AI/ML datasets category_sort: 8500 default_env: - CM_DATASET_CALIBRATION: 'no' + MLC_DATASET_CALIBRATION: 'no' deps: - names: - python @@ -13,7 +13,7 @@ deps: tags: get,python3 - tags: get,generic-python-lib,_requests - force_env_keys: - - CM_GIT_* + - MLC_GIT_* names: - inference-src tags: mlperf,inference,source @@ -26,25 +26,25 @@ deps: - pycocotools tags: get,generic-python-lib,_pycocotools env: - CM_DATASET: OPENIMAGES + MLC_DATASET: OPENIMAGES new_env_keys: -- CM_DATASET_PATH -- CM_DATASET_PATH_ROOT -- CM_DATASET_OPENIMAGES_PATH -- CM_DATASET_OPENIMAGES_DATASET_PATH -- CM_DATASET_OPENIMAGES_DATASET_PATH_ROOT -- CM_DATASET_ANNOTATIONS_DIR_PATH -- CM_DATASET_ANNOTATIONS_FILE_PATH -- CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH -- CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH -- CM_CALIBRATION_DATASET_PATH -- CM_CALIBRATION_DATASET_PATH_ROOT -- CM_OPENIMAGES_CALIBRATION_DATASET_PATH -- CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH -- CM_DATASET_OPENIMAGES_VALIDATION_ANNOTATIONS_FILE_PATH +- MLC_DATASET_PATH +- MLC_DATASET_PATH_ROOT +- MLC_DATASET_OPENIMAGES_PATH +- MLC_DATASET_OPENIMAGES_DATASET_PATH +- MLC_DATASET_OPENIMAGES_DATASET_PATH_ROOT +- MLC_DATASET_ANNOTATIONS_DIR_PATH +- MLC_DATASET_ANNOTATIONS_FILE_PATH +- MLC_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH +- MLC_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH +- MLC_CALIBRATION_DATASET_PATH +- MLC_CALIBRATION_DATASET_PATH_ROOT +- MLC_OPENIMAGES_CALIBRATION_DATASET_PATH +- MLC_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH +- MLC_DATASET_OPENIMAGES_VALIDATION_ANNOTATIONS_FILE_PATH posthook_deps: - enable_if_env: - CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: + MLC_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: - 'yes' tags: get,openimages,annotations tags: @@ -59,11 +59,11 @@ variations: '50': default: true env: - CM_DATASET_SIZE: '50' + MLC_DATASET_SIZE: '50' group: size '500': env: - CM_DATASET_SIZE: '500' + MLC_DATASET_SIZE: '500' group: size calibration: deps: @@ -71,20 +71,20 @@ variations: - openimages-calibration tags: get,openimages,calibration env: - CM_DATASET_CALIBRATION: 'yes' + MLC_DATASET_CALIBRATION: 'yes' group: dataset-type new_env_keys: - - CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH - - CM_CALIBRATION_DATASET_PATH - - CM_CALIBRATION_DATASET_PATH_ROOT + - MLC_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH + - MLC_CALIBRATION_DATASET_PATH + - MLC_CALIBRATION_DATASET_PATH_ROOT custom-annotations: env: - CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: 'yes' + MLC_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: 'yes' group: annotations default-annotations: default: true env: - CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: 'no' + MLC_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: 'no' group: annotations filter: {} filter,calibration: @@ -97,11 +97,11 @@ variations: tags: _filter-size.# full: env: - CM_DATASET_SIZE: '' + MLC_DATASET_SIZE: '' group: size size.#: env: - CM_DATASET_SIZE: '#' + MLC_DATASET_SIZE: '#' group: size using-fiftyone: add_deps_recursive: @@ -114,13 +114,13 @@ variations: validation: default: true env: - CM_DATASET_CALIBRATION: 'no' + MLC_DATASET_CALIBRATION: 'no' group: dataset-type new_env_keys: - - CM_DATASET_PATH - - CM_DATASET_PATH_ROOT - - CM_DATASET_OPENIMAGES_DATASET_PATH - - CM_DATASET_OPENIMAGES_DATASET_PATH_ROOT - - CM_DATASET_ANNOTATIONS_DIR_PATH - - CM_DATASET_ANNOTATIONS_FILE_PATH - - CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH + - MLC_DATASET_PATH + - MLC_DATASET_PATH_ROOT + - MLC_DATASET_OPENIMAGES_DATASET_PATH + - MLC_DATASET_OPENIMAGES_DATASET_PATH_ROOT + - MLC_DATASET_ANNOTATIONS_DIR_PATH + - MLC_DATASET_ANNOTATIONS_FILE_PATH + - MLC_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH diff --git a/script/get-dataset-openimages/run.bat b/script/get-dataset-openimages/run.bat index 742542d25..017eac131 100644 --- a/script/get-dataset-openimages/run.bat +++ b/script/get-dataset-openimages/run.bat @@ -1,21 +1,21 @@ @echo off set CUR_DIR=%cd% -set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% +set SCRIPT_DIR=%MLC_TMP_CURRENT_SCRIPT_PATH% if not exist install mkdir install set INSTALL_DIR=%CUR_DIR%\install -cd %CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH% +cd %MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH% -if not "%CM_DATASET_SIZE%" == "" ( - set MAX_IMAGES=--max-images %CM_DATASET_SIZE% --seed 42 +if not "%MLC_DATASET_SIZE%" == "" ( + set MAX_IMAGES=--max-images %MLC_DATASET_SIZE% --seed 42 ) else ( set MAX_IMAGES= ) -%CM_PYTHON_BIN% tools\openimages.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json --classes %CM_DATASET_OPENIMAGES_CLASSES% +%MLC_PYTHON_BIN% tools\openimages.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json --classes %MLC_DATASET_OPENIMAGES_CLASSES% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% rem Next is a hack to support MLPerf inference on Windows diff --git a/script/get-dataset-openimages/run.sh b/script/get-dataset-openimages/run.sh index 2fc6eaddf..2a034ed31 100644 --- a/script/get-dataset-openimages/run.sh +++ b/script/get-dataset-openimages/run.sh @@ -1,6 +1,6 @@ #!/bin/bash python3() { - ${CM_PYTHON_BIN_WITH_PATH} "$@" + ${MLC_PYTHON_BIN_WITH_PATH} "$@" } export -f python3 @@ -8,11 +8,11 @@ CUR=${PWD} mkdir -p install INSTALL_DIR=${CUR}/install -cd ${CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH} +cd ${MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH} cd tools -if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then - if [ ! -z ${CM_DATASET_SIZE} ]; then - max_images=" -m ${CM_DATASET_SIZE}" +if [[ ${MLC_DATASET_CALIBRATION} == "no" ]]; then + if [ ! -z ${MLC_DATASET_SIZE} ]; then + max_images=" -m ${MLC_DATASET_SIZE}" else max_images="" fi @@ -21,8 +21,8 @@ if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then eval $cmd test $? -eq 0 || exit 1 else - if [ -n ${CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH} ]; then - calibration_file_string=" --calibration-file ${CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH}" + if [ -n ${MLC_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH} ]; then + calibration_file_string=" --calibration-file ${MLC_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH}" else calibration_file_string="" fi diff --git a/script/get-dataset-openorca/customize.py b/script/get-dataset-openorca/customize.py index 6daca4bf6..3bb95817b 100644 --- a/script/get-dataset-openorca/customize.py +++ b/script/get-dataset-openorca/customize.py @@ -12,13 +12,13 @@ def preprocess(i): def postprocess(i): env = i['env'] - if env.get('CM_DATASET_CALIBRATION', '') == "no": - env['CM_DATASET_PATH_ROOT'] = env['CM_DATASET_OPENORCA_PATH'] - env['CM_DATASET_PATH'] = env['CM_DATASET_OPENORCA_PATH'] - env['CM_DATASET_OPENORCA_PARQUET'] = os.path.join( - env['CM_DATASET_OPENORCA_PATH'], '1M-GPT4-Augmented.parquet') + if env.get('MLC_DATASET_CALIBRATION', '') == "no": + env['MLC_DATASET_PATH_ROOT'] = env['MLC_DATASET_OPENORCA_PATH'] + env['MLC_DATASET_PATH'] = env['MLC_DATASET_OPENORCA_PATH'] + env['MLC_DATASET_OPENORCA_PARQUET'] = os.path.join( + env['MLC_DATASET_OPENORCA_PATH'], '1M-GPT4-Augmented.parquet') else: - env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + env['MLC_CALIBRATION_DATASET_PATH'] = os.path.join( os.getcwd(), 'install', 'calibration', 'data') return {'return': 0} diff --git a/script/get-dataset-openorca/meta.yaml b/script/get-dataset-openorca/meta.yaml index c860b0213..2e4856f4c 100644 --- a/script/get-dataset-openorca/meta.yaml +++ b/script/get-dataset-openorca/meta.yaml @@ -5,22 +5,22 @@ cache: true category: AI/ML datasets category_sort: 8500 default_env: - CM_DATASET_CALIBRATION: 'no' + MLC_DATASET_CALIBRATION: 'no' deps: - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_DATASET_OPENORCA_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_DATASET_OPENORCA_PATH extra_cache_tags: openorca,repo,src force_env_keys: - - CM_GIT_* + - MLC_GIT_* names: - openorca-src tags: get,git,repo,_lfs,_repo.https://huggingface.co/datasets/Open-Orca/OpenOrca force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME env: - CM_DATASET: OPENORCA + MLC_DATASET: OPENORCA new_env_keys: -- CM_DATASET_* +- MLC_DATASET_* tags: - get - dataset @@ -31,27 +31,27 @@ uid: 9252c4d90d5940b7 variations: '500': env: - CM_DATASET_SIZE: '500' + MLC_DATASET_SIZE: '500' group: size '60': env: - CM_DATASET_SIZE: '60' + MLC_DATASET_SIZE: '60' group: size calibration: env: - CM_DATASET_CALIBRATION: 'yes' + MLC_DATASET_CALIBRATION: 'yes' group: dataset-type full: default: true env: - CM_DATASET_SIZE: '24576' + MLC_DATASET_SIZE: '24576' group: size size.#: env: - CM_DATASET_SIZE: '#' + MLC_DATASET_SIZE: '#' group: size validation: default: true env: - CM_DATASET_CALIBRATION: 'no' + MLC_DATASET_CALIBRATION: 'no' group: dataset-type diff --git a/script/get-dataset-squad-vocab/customize.py b/script/get-dataset-squad-vocab/customize.py index 538250b19..5808e00b9 100644 --- a/script/get-dataset-squad-vocab/customize.py +++ b/script/get-dataset-squad-vocab/customize.py @@ -14,6 +14,6 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] = env['CM_DATASET_SQUAD_VOCAB_PATH'] + env['MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] = env['MLC_DATASET_SQUAD_VOCAB_PATH'] return {'return': 0} diff --git a/script/get-dataset-squad-vocab/meta.yaml b/script/get-dataset-squad-vocab/meta.yaml index aa1bad21c..f06c24282 100644 --- a/script/get-dataset-squad-vocab/meta.yaml +++ b/script/get-dataset-squad-vocab/meta.yaml @@ -4,21 +4,21 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML datasets new_env_keys: -- CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH -- CM_DATASET_SQUAD_VOCAB_PATH +- MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH +- MLC_DATASET_SQUAD_VOCAB_PATH prehook_deps: - env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_SQUAD_VOCAB_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_SQUAD_VOCAB_PATH extra_cache_tags: bert,get,dataset-squad-vocab force_cache: true tags: download-and-extract,_wget force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL print_env_at_the_end: - CM_DATASET_SQUAD_VOCAB_PATH: Path to SQUAD vocab file + MLC_DATASET_SQUAD_VOCAB_PATH: Path to SQUAD vocab file tags: - get - aux @@ -33,7 +33,7 @@ variations: from.zenodo: default: true env: - CM_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e - CM_DOWNLOAD_FILENAME: vocab.txt - CM_PACKAGE_URL: https://zenodo.org/record/3733868/files/vocab.txt + MLC_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e + MLC_DOWNLOAD_FILENAME: vocab.txt + MLC_PACKAGE_URL: https://zenodo.org/record/3733868/files/vocab.txt group: download-source diff --git a/script/get-dataset-squad/README-extra.md b/script/get-dataset-squad/README-extra.md index 4497abe6b..deb677c5f 100644 --- a/script/get-dataset-squad/README-extra.md +++ b/script/get-dataset-squad/README-extra.md @@ -11,9 +11,9 @@ where [VERSION] is one of * `2.0` ## Exported Variables -* `CM_DATASET_SQUAD_PATH:` Directory path to SQUAD dataset -* `CM_DATASET_SQUAD_TRAIN_PATH:` JSON file path to SQUAD training dataset -* `CM_DATASET_SQUAD_VAL_PATH:` JSON file path to SQUAD validation dataset +* `MLC_DATASET_SQUAD_PATH:` Directory path to SQUAD dataset +* `MLC_DATASET_SQUAD_TRAIN_PATH:` JSON file path to SQUAD training dataset +* `MLC_DATASET_SQUAD_VAL_PATH:` JSON file path to SQUAD validation dataset ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-dataset-squad/customize.py b/script/get-dataset-squad/customize.py index c372a75d8..655b7df97 100644 --- a/script/get-dataset-squad/customize.py +++ b/script/get-dataset-squad/customize.py @@ -14,9 +14,9 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_DATASET_SQUAD_PATH'] = os.path.dirname( - env['CM_DATASET_SQUAD_VAL_PATH']) - env['CM_DATASET_PATH'] = os.path.dirname(env['CM_DATASET_SQUAD_VAL_PATH']) - # env['CM_DATASET_SQUAD_VAL_PATH'] = os.path.join(os.getcwd(), env['CM_VAL_FILENAME']) + env['MLC_DATASET_SQUAD_PATH'] = os.path.dirname( + env['MLC_DATASET_SQUAD_VAL_PATH']) + env['MLC_DATASET_PATH'] = os.path.dirname(env['MLC_DATASET_SQUAD_VAL_PATH']) + # env['MLC_DATASET_SQUAD_VAL_PATH'] = os.path.join(os.getcwd(), env['MLC_VAL_FILENAME']) return {'return': 0} diff --git a/script/get-dataset-squad/meta.yaml b/script/get-dataset-squad/meta.yaml index d47fc9ce3..cc55e3b50 100644 --- a/script/get-dataset-squad/meta.yaml +++ b/script/get-dataset-squad/meta.yaml @@ -7,22 +7,22 @@ default_version: '1.1' deps: - tags: get,sys-utils-cm env: - CM_DATASET: SQUAD + MLC_DATASET: SQUAD new_env_keys: -- CM_DATASET_* +- MLC_DATASET_* prehook_deps: - env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_SQUAD_VAL_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_SQUAD_VAL_PATH extra_cache_tags: bert,get,dataset-squad force_cache: true tags: download-and-extract,_wget force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL print_env_at_the_end: - CM_DATASET_SQUAD_VAL_PATH: Path to SQUAD dataset + MLC_DATASET_SQUAD_VAL_PATH: Path to SQUAD dataset tags: - get - dataset @@ -34,15 +34,15 @@ uid: 6651c119c3ae49b3 versions: '1.1': env: - CM_DOWNLOAD_CHECKSUM: 3e85deb501d4e538b6bc56f786231552 - CM_DOWNLOAD_FILENAME: dev-v1.1.json - CM_PACKAGE_URL: https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v1.1.json - CM_TRAIN_FILENAME: train-v1.1.json - CM_VAL_FILENAME: dev-v1.1.json + MLC_DOWNLOAD_CHECKSUM: 3e85deb501d4e538b6bc56f786231552 + MLC_DOWNLOAD_FILENAME: dev-v1.1.json + MLC_PACKAGE_URL: https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v1.1.json + MLC_TRAIN_FILENAME: train-v1.1.json + MLC_VAL_FILENAME: dev-v1.1.json '2.0': env: - CM_DOWNLOAD_CHECKSUM: 246adae8b7002f8679c027697b0b7cf8 - CM_DOWNLOAD_FILENAME: dev-v2.0.json - CM_PACKAGE_URL: https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v2.0.json - CM_TRAIN_FILENAME: train-v2.0.json - CM_VAL_FILENAME: dev-v2.0.json + MLC_DOWNLOAD_CHECKSUM: 246adae8b7002f8679c027697b0b7cf8 + MLC_DOWNLOAD_FILENAME: dev-v2.0.json + MLC_PACKAGE_URL: https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v2.0.json + MLC_TRAIN_FILENAME: train-v2.0.json + MLC_VAL_FILENAME: dev-v2.0.json diff --git a/script/get-dlrm-data-mlperf-inference/customize.py b/script/get-dlrm-data-mlperf-inference/customize.py index 2684d2594..366a3425d 100644 --- a/script/get-dlrm-data-mlperf-inference/customize.py +++ b/script/get-dlrm-data-mlperf-inference/customize.py @@ -9,7 +9,7 @@ def preprocess(i): env = i['env'] dlrm_data_path = env.get( - 'CM_DLRM_DATA_PATH', env.get( + 'MLC_DLRM_DATA_PATH', env.get( 'DLRM_DATA_PATH', '')) if dlrm_data_path == '': print( @@ -35,48 +35,48 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - variation = env['CM_DLRM_DATA_VARIATION'] + variation = env['MLC_DLRM_DATA_VARIATION'] if variation == "nvidia": if not os.path.exists(os.path.join(dlrm_data_path, "model")): print(f'model directory is missing inside {dlrm_data_path}') - env['CM_DLRM_MODEL_DOWNLOAD'] = True + env['MLC_DLRM_MODEL_DOWNLOAD'] = True if not os.path.exists(os.path.join(dlrm_data_path, "criteo")): print(f'criteo directory is missing inside {dlrm_data_path}') - env['CM_DLRM_DATASET_DOWNLOAD'] = True + env['MLC_DLRM_DATASET_DOWNLOAD'] = True if not os.path.exists(os.path.join( dlrm_data_path, "model", "model_weights")): print( f'model_weights directory is missing inside {dlrm_data_path}/model') - env['CM_DLRM_MODEL_DOWNLOAD'] = True + env['MLC_DLRM_MODEL_DOWNLOAD'] = True if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23")): print(f'day23 directory is missing inside {dlrm_data_path}/day23') - env['CM_DLRM_DATASET_DOWNLOAD'] = True + env['MLC_DLRM_DATASET_DOWNLOAD'] = True if not os.path.exists(os.path.join( dlrm_data_path, "criteo", "day23", "fp32")): print( f'fp32 directory is missing inside {dlrm_data_path}/criteo/day23') - env['CM_DLRM_DATASET_DOWNLOAD'] = True + env['MLC_DLRM_DATASET_DOWNLOAD'] = True if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz")) and not os.path.exists( os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")): print( f'day_23_sparse_multi_hot.npz or day_23_sparse_multi_hot_unpacked is missing inside {dlrm_data_path}/criteo/day23/fp32') - env['CM_DLRM_DATASET_DOWNLOAD'] = True + env['MLC_DLRM_DATASET_DOWNLOAD'] = True if not os.path.exists(os.path.join( dlrm_data_path, "criteo", "day23", "fp32", "day_23_dense.npy")): print( f'day_23_dense.npy is missing inside {dlrm_data_path}/criteo/day23/fp32') - env['CM_DLRM_DATASET_DOWNLOAD'] = True + env['MLC_DLRM_DATASET_DOWNLOAD'] = True if not os.path.exists(os.path.join( dlrm_data_path, "criteo", "day23", "fp32", "day_23_labels.npy")): print( f'day_23_labels.npy is missing inside {dlrm_data_path}/criteo/day23/fp32') - env['CM_DLRM_DATASET_DOWNLOAD'] = True + env['MLC_DLRM_DATASET_DOWNLOAD'] = True if not os.path.exists(os.path.join( dlrm_data_path, "criteo", "day23", "raw_data")): - if env.get('CM_CRITEO_DAY23_RAW_DATA_PATH', '') == '': + if env.get('MLC_CRITEO_DAY23_RAW_DATA_PATH', '') == '': return { 'return': 1, 'error': 'Raw data missing inside {dlrm_data_path}/criteo/day23. Specify the target folder through input mapping(--criteo_day23_raw_data_path="path to raw criteo dataset")'} @@ -84,14 +84,14 @@ def preprocess(i): xsep = ' && ' # addition of run command to download the datasets and model - if env.get('CM_DLRM_DATASET_DOWNLOAD', False) == True: - run_cmd += 'cp -r "$CM_CRITEO_PREPROCESSED_PATH"/. ' + \ + if env.get('MLC_DLRM_DATASET_DOWNLOAD', False) == True: + run_cmd += 'cp -r "$MLC_CRITEO_PREPROCESSED_PATH"/. ' + \ os.path.join(dlrm_data_path, "criteo", "day23", "fp32") + xsep - if env.get('CM_DLRM_MODEL_DOWNLOAD', False) == True: - run_cmd += 'cp -r "$CM_ML_MODEL_FILE_WITH_PATH"/. ' + \ + if env.get('MLC_DLRM_MODEL_DOWNLOAD', False) == True: + run_cmd += 'cp -r "$MLC_ML_MODEL_FILE_WITH_PATH"/. ' + \ os.path.join(dlrm_data_path, "model") + xsep - if env.get('CM_DLRM_DATASET_DOWNLOAD', '') != True: + if env.get('MLC_DLRM_DATASET_DOWNLOAD', '') != True: if not os.path.exists(os.path.join( dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")): os.system(f"unzip {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot.npz')} -d {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot_unpacked')}") @@ -99,7 +99,7 @@ def preprocess(i): run_cmd += f"unzip {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot.npz')} -d {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot_unpacked')}" + xsep if os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", - "day_23_sparse_multi_hot.npz")) or env['CM_DLRM_DATASET_DOWNLOAD'] == True: + "day_23_sparse_multi_hot.npz")) or env['MLC_DLRM_DATASET_DOWNLOAD'] == True: file_path = os.path.join( dlrm_data_path, "criteo", @@ -131,12 +131,12 @@ def preprocess(i): run_cmd += ("cd {}; md5sum -c {}").format(dir_path, os.path.join(script_path, "checksums.txt")) - env['CM_DLRM_V2_DAY23_FILE_PATH'] = os.path.join( + env['MLC_DLRM_V2_DAY23_FILE_PATH'] = os.path.join( dlrm_data_path, "criteo", "day23", "raw_data") - env['CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH'] = os.path.join( + env['MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH'] = os.path.join( dlrm_data_path, "criteo", "day23", "sample_partition.txt") - env['CM_RUN_CMD'] = run_cmd + env['MLC_RUN_CMD'] = run_cmd return {'return': 0} @@ -145,11 +145,11 @@ def postprocess(i): env = i['env'] - if env.get('CM_DLRM_DATA_PATH', '') == '' and env.get( + if env.get('MLC_DLRM_DATA_PATH', '') == '' and env.get( 'DLRM_DATA_PATH', '') == '': - env['CM_DLRM_DATA_PATH'] = os.getcwd() + env['MLC_DLRM_DATA_PATH'] = os.getcwd() else: - env['CM_GET_DEPENDENT_CACHED_PATH'] = env.get( - 'CM_DLRM_DATA_PATH', env['DLRM_DATA_PATH']) + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env.get( + 'MLC_DLRM_DATA_PATH', env['DLRM_DATA_PATH']) return {'return': 0} diff --git a/script/get-dlrm-data-mlperf-inference/meta.yaml b/script/get-dlrm-data-mlperf-inference/meta.yaml index f287e37db..839a97ecf 100644 --- a/script/get-dlrm-data-mlperf-inference/meta.yaml +++ b/script/get-dlrm-data-mlperf-inference/meta.yaml @@ -12,30 +12,30 @@ uid: 34bdfcd9c8364935 docker: real_run: false new_env_keys: - - CM_DLRM_DATA_PATH + - MLC_DLRM_DATA_PATH - DLRM_DATA_PATH input_mapping: - dlrm_data_path: CM_DLRM_DATA_PATH - criteo_day23_raw_data_path: CM_CRITEO_DAY23_RAW_DATA_PATH + dlrm_data_path: MLC_DLRM_DATA_PATH + criteo_day23_raw_data_path: MLC_CRITEO_DAY23_RAW_DATA_PATH prehook_deps: - tags: get,ml-model,dlrm,_pytorch enable_if_env: - CM_DLRM_MODEL_DOWNLOAD: + MLC_DLRM_MODEL_DOWNLOAD: - "on" - tags: get,dataset,preprocessed,criteo,_mlc enable_if_env: - CM_DLRM_DATASET_DOWNLOAD: + MLC_DLRM_DATASET_DOWNLOAD: - "on" variations: nvidia: group: implementation default: true new_env_keys: - - CM_DLRM_V2_DAY23_FILE_PATH - - CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH + - MLC_DLRM_V2_DAY23_FILE_PATH + - MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH env: - CM_DLRM_DATA_VARIATION: nvidia + MLC_DLRM_DATA_VARIATION: nvidia intel: group: implementation env: - CM_DLRM_DATA_VARIATION: intel + MLC_DLRM_DATA_VARIATION: intel diff --git a/script/get-dlrm-data-mlperf-inference/run.sh b/script/get-dlrm-data-mlperf-inference/run.sh index d1cb7df69..180056e2f 100644 --- a/script/get-dlrm-data-mlperf-inference/run.sh +++ b/script/get-dlrm-data-mlperf-inference/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,11 +17,11 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -run "$CM_RUN_CMD" +run "$MLC_RUN_CMD" diff --git a/script/get-dlrm/customize.py b/script/get-dlrm/customize.py index e7c634f3c..33a8ca2ca 100644 --- a/script/get-dlrm/customize.py +++ b/script/get-dlrm/customize.py @@ -13,17 +13,17 @@ def preprocess(i): env = i['env'] meta = i['meta'] - if 'CM_GIT_DEPTH' not in env: - env['CM_GIT_DEPTH'] = '' + if 'MLC_GIT_DEPTH' not in env: + env['MLC_GIT_DEPTH'] = '' - if 'CM_GIT_RECURSE_SUBMODULES' not in env: - env['CM_GIT_RECURSE_SUBMODULES'] = '' + if 'MLC_GIT_RECURSE_SUBMODULES' not in env: + env['MLC_GIT_RECURSE_SUBMODULES'] = '' - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') versions = meta['versions'] if need_version != '' and not need_version in versions: - env['CM_GIT_CHECKOUT'] = need_version + env['MLC_GIT_CHECKOUT'] = need_version return {'return': 0} diff --git a/script/get-dlrm/meta.yaml b/script/get-dlrm/meta.yaml index cc598990f..a5f7e11f1 100644 --- a/script/get-dlrm/meta.yaml +++ b/script/get-dlrm/meta.yaml @@ -4,9 +4,9 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models default_env: - CM_GIT_DEPTH: --depth 10 - CM_GIT_PATCH: 'no' - CM_GIT_URL: https://github.com/facebookresearch/dlrm.git + MLC_GIT_DEPTH: --depth 10 + MLC_GIT_PATCH: 'no' + MLC_GIT_URL: https://github.com/facebookresearch/dlrm.git default_version: main deps: - tags: detect,os @@ -20,8 +20,8 @@ uid: 63680ac2449a4241 variations: full-history: env: - CM_GIT_DEPTH: '' + MLC_GIT_DEPTH: '' versions: main: env: - CM_GIT_CHECKOUT: main + MLC_GIT_CHECKOUT: main diff --git a/script/get-dlrm/run.sh b/script/get-dlrm/run.sh index 37e9e59a7..5aefd4511 100644 --- a/script/get-dlrm/run.sh +++ b/script/get-dlrm/run.sh @@ -1,12 +1,12 @@ #!/bin/bash CUR_DIR=$PWD -SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} +SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH} echo "******************************************************" -echo "Cloning DLRM from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." +echo "Cloning DLRM from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES}..." if [ ! -d "dlrm" ]; then - git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} dlrm + git clone ${MLC_GIT_RECURSE_SUBMODULES} -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} ${MLC_GIT_DEPTH} dlrm if [ "${?}" != "0" ]; then exit 1; fi fi diff --git a/script/get-docker/customize.py b/script/get-docker/customize.py index 08975cf52..c8aaf7376 100644 --- a/script/get-docker/customize.py +++ b/script/get-docker/customize.py @@ -15,13 +15,13 @@ def preprocess(i): file_name = 'docker.exe' if os_info['platform'] == 'windows' else 'docker' env['FILE_NAME'] = file_name - if 'CM_DOCKER_BIN_WITH_PATH' not in env: + if 'MLC_DOCKER_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_DOCKER_BIN_WITH_PATH', + 'env_path_key': 'MLC_DOCKER_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: @@ -40,7 +40,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'[Docker|podman] version\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_DOCKER_VERSION', + 'env_key': 'MLC_DOCKER_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -66,16 +66,16 @@ def postprocess(i): version = r['version'] tool = r['tool'] - found_file_path = env['CM_DOCKER_BIN_WITH_PATH'] + found_file_path = env['MLC_DOCKER_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_DOCKER_INSTALLED_PATH'] = found_path + env['MLC_DOCKER_INSTALLED_PATH'] = found_path env['+PATH'] = [found_path] - env['CM_DOCKER_CACHE_TAGS'] = 'version-' + version + env['MLC_DOCKER_CACHE_TAGS'] = 'version-' + version - env['CM_DOCKER_VERSION'] = version + env['MLC_DOCKER_VERSION'] = version - env['CM_CONTAINER_TOOL'] = tool + env['MLC_CONTAINER_TOOL'] = tool return {'return': 0, 'version': version} diff --git a/script/get-docker/meta.yaml b/script/get-docker/meta.yaml index 881039852..b3a5f1f89 100644 --- a/script/get-docker/meta.yaml +++ b/script/get-docker/meta.yaml @@ -9,8 +9,8 @@ docker_input_mapping: {} input_description: {} input_mapping: {} new_env_keys: [ - "CM_DOCKER_VERSION", - "CM_CONTAINER_TOOL" + "MLC_DOCKER_VERSION", + "MLC_CONTAINER_TOOL" ] new_state_keys: [] post_deps: [] diff --git a/script/get-gcc/README-extra.md b/script/get-gcc/README-extra.md index bb9d97694..a20669f48 100644 --- a/script/get-gcc/README-extra.md +++ b/script/get-gcc/README-extra.md @@ -2,13 +2,13 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed gcc on the system. ## Exported Variables -* `CM_GCC_BIN` -* `CM_GCC_BIN_WITH_PATH` -* `CM_C_COMPILER_BIN` -* `CM_C_COMPILER_WITH_PATH` -* `CM_CXX_COMPILER_BIN` -* `CM_CXX_COMPILER_WITH_PATH` -* `CM_COMPILER_*` +* `MLC_GCC_BIN` +* `MLC_GCC_BIN_WITH_PATH` +* `MLC_C_COMPILER_BIN` +* `MLC_C_COMPILER_WITH_PATH` +* `MLC_CXX_COMPILER_BIN` +* `MLC_CXX_COMPILER_WITH_PATH` +* `MLC_COMPILER_*` ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-gcc/customize.py b/script/get-gcc/customize.py index 2203b8a48..a8c8b3099 100644 --- a/script/get-gcc/customize.py +++ b/script/get-gcc/customize.py @@ -11,26 +11,26 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] file_name_c = 'gcc.exe' if os_info['platform'] == 'windows' else 'gcc' - if env.get('CM_HOST_OS_FLAVOR', '') == 'rhel': - if "12" in env.get('CM_VERSION', '') or "12" in env.get( - 'CM_VERSION_MIN', ''): - if env.get('CM_TMP_PATH', '') == '': - env['CM_TMP_PATH'] = '' - env['CM_TMP_PATH'] += "/opt/rh/gcc-toolset-12/root/usr/bin" - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' - - if 'CM_GCC_BIN_WITH_PATH' not in env: + if env.get('MLC_HOST_OS_FLAVOR', '') == 'rhel': + if "12" in env.get('MLC_VERSION', '') or "12" in env.get( + 'MLC_VERSION_MIN', ''): + if env.get('MLC_TMP_PATH', '') == '': + env['MLC_TMP_PATH'] = '' + env['MLC_TMP_PATH'] += "/opt/rh/gcc-toolset-12/root/usr/bin" + env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + if 'MLC_GCC_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name_c, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_GCC_BIN_WITH_PATH', + 'env_path_key': 'MLC_GCC_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: # if r['return'] == 16: - # if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': + # if env.get('MLC_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': # return r # # print (recursion_spaces+' # {}'.format(r['error'])) @@ -46,7 +46,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'\s+([\d.]+)', 'group_number': 1, - 'env_key': 'CM_GCC_VERSION', + 'env_key': 'MLC_GCC_VERSION', 'which_env': i['env']}) if r['return'] > 0: if 'clang' in r['error']: @@ -66,41 +66,41 @@ def postprocess(i): if r['return'] > 0: return r - env['CM_COMPILER_FAMILY'] = 'GCC' + env['MLC_COMPILER_FAMILY'] = 'GCC' version = r['version'] - env['CM_COMPILER_VERSION'] = env['CM_GCC_VERSION'] - env['CM_GCC_CACHE_TAGS'] = 'version-' + version - env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-gcc' + env['MLC_COMPILER_VERSION'] = env['MLC_GCC_VERSION'] + env['MLC_GCC_CACHE_TAGS'] = 'version-' + version + env['MLC_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-gcc' - found_file_path = env['CM_GCC_BIN_WITH_PATH'] + found_file_path = env['MLC_GCC_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_GCC_INSTALLED_PATH'] = found_path + env['MLC_GCC_INSTALLED_PATH'] = found_path file_name_c = os.path.basename(found_file_path) # G: changed next line to handle cases like gcc-8 file_name_cpp = file_name_c.replace('gcc', 'g++') env['FILE_NAME_CPP'] = file_name_cpp - env['CM_GCC_BIN'] = file_name_c + env['MLC_GCC_BIN'] = file_name_c # General compiler for general program compilation - env['CM_C_COMPILER_BIN'] = file_name_c - env['CM_C_COMPILER_FLAG_OUTPUT'] = '-o ' - env['CM_C_COMPILER_WITH_PATH'] = found_file_path - env['CM_C_COMPILER_FLAG_VERSION'] = '--version' - - env['CM_CXX_COMPILER_BIN'] = file_name_cpp - env['CM_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp) - env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '-o ' - env['CM_CXX_COMPILER_FLAG_VERSION'] = '--version' - - env['CM_COMPILER_FLAGS_FAST'] = "-O3" - env['CM_LINKER_FLAGS_FAST'] = "-O3" - env['CM_COMPILER_FLAGS_DEBUG'] = "-O0" - env['CM_LINKER_FLAGS_DEBUG'] = "-O0" - env['CM_COMPILER_FLAGS_DEFAULT'] = "-O2" - env['CM_LINKER_FLAGS_DEFAULT'] = "-O2" + env['MLC_C_COMPILER_BIN'] = file_name_c + env['MLC_C_COMPILER_FLAG_OUTPUT'] = '-o ' + env['MLC_C_COMPILER_WITH_PATH'] = found_file_path + env['MLC_C_COMPILER_FLAG_VERSION'] = '--version' + + env['MLC_CXX_COMPILER_BIN'] = file_name_cpp + env['MLC_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp) + env['MLC_CXX_COMPILER_FLAG_OUTPUT'] = '-o ' + env['MLC_CXX_COMPILER_FLAG_VERSION'] = '--version' + + env['MLC_COMPILER_FLAGS_FAST'] = "-O3" + env['MLC_LINKER_FLAGS_FAST'] = "-O3" + env['MLC_COMPILER_FLAGS_DEBUG'] = "-O0" + env['MLC_LINKER_FLAGS_DEBUG'] = "-O0" + env['MLC_COMPILER_FLAGS_DEFAULT'] = "-O2" + env['MLC_LINKER_FLAGS_DEFAULT'] = "-O2" return {'return': 0, 'version': version} diff --git a/script/get-gcc/meta.yaml b/script/get-gcc/meta.yaml index f67a59d2e..27a3b6feb 100644 --- a/script/get-gcc/meta.yaml +++ b/script/get-gcc/meta.yaml @@ -8,16 +8,16 @@ deps: - tags: detect,os name: Detect or install GCC compiler new_env_keys: -- CM_GCC_* -- CM_C_COMPILER_* -- CM_CXX_COMPILER_* -- CM_COMPILER_* -- CM_LINKER_* +- MLC_GCC_* +- MLC_C_COMPILER_* +- MLC_CXX_COMPILER_* +- MLC_COMPILER_* +- MLC_LINKER_* - + CFLAGS - + CXXFLAGS - + FFLAGS - + LDFLAGS -- +CM_HOST_OS_DEFAULT_INCLUDE_PATH +- +MLC_HOST_OS_DEFAULT_INCLUDE_PATH - +PATH post_deps: - tags: get,compiler-flags diff --git a/script/get-gcc/run.bat b/script/get-gcc/run.bat index fac96d834..c459d6218 100644 --- a/script/get-gcc/run.bat +++ b/script/get-gcc/run.bat @@ -1,3 +1,3 @@ -%CM_GCC_BIN_WITH_PATH% --version > tmp-ver.out +%MLC_GCC_BIN_WITH_PATH% --version > tmp-ver.out IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-gcc/run.sh b/script/get-gcc/run.sh index 08be81f21..e5b397bf6 100644 --- a/script/get-gcc/run.sh +++ b/script/get-gcc/run.sh @@ -1,5 +1,5 @@ #!/bin/bash -gcc_bin=${CM_GCC_BIN_WITH_PATH} +gcc_bin=${MLC_GCC_BIN_WITH_PATH} echo "${gcc_bin} --version" ${gcc_bin} --version > tmp-ver.out diff --git a/script/get-generic-python-lib/customize.py b/script/get-generic-python-lib/customize.py index 2b259feca..85b7326cc 100644 --- a/script/get-generic-python-lib/customize.py +++ b/script/get-generic-python-lib/customize.py @@ -9,9 +9,9 @@ def preprocess(i): meta = i['meta'] automation = i['automation'] run_script_input = i['run_script_input'] - pip_version = env.get('CM_PIP_VERSION', '').strip().split('.') + pip_version = env.get('MLC_PIP_VERSION', '').strip().split('.') - package_name = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '').strip() + package_name = env.get('MLC_GENERIC_PYTHON_PACKAGE_NAME', '').strip() if package_name == '': return automation._available_variations({'meta': meta}) @@ -20,37 +20,37 @@ def preprocess(i): # 20240214: ONNXRuntime 1.17.0 now support CUDA 12 so we remove next check # TBD: if we have explicit version for ONNX < 17.0.0 and CUDA is >= 12, # we should add a check to fail ... - cuda_version = env.get('CM_CUDA_VERSION', '').strip() + cuda_version = env.get('MLC_CUDA_VERSION', '').strip() # if cuda_version!='': # cuda_version_split = cuda_version.split('.') # if int(cuda_version_split[0]) >= 12: -# # env['CM_INSTALL_ONNXRUNTIME_GPU_FROM_SRC'] = "yes" +# # env['MLC_INSTALL_ONNXRUNTIME_GPU_FROM_SRC'] = "yes" # return {'return': 1, 'error':'at this moment, PIP package # "onnxruntime_gpu" needs CUDA < 12'} - extra = env.get('CM_GENERIC_PYTHON_PIP_EXTRA', '') + extra = env.get('MLC_GENERIC_PYTHON_PIP_EXTRA', '') if (pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23) and ( '--break-system-packages' not in extra): extra += ' --break-system-packages ' - env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages" + env['MLC_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages" - if env.get('CM_GENERIC_PYTHON_PACKAGE_INSTALL_DEPS', '') == "no": - env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --no-deps" + if env.get('MLC_GENERIC_PYTHON_PACKAGE_INSTALL_DEPS', '') == "no": + env['MLC_PYTHON_PIP_COMMON_EXTRA'] = " --no-deps" - if env.get('CM_PIP_INSTALL_NEEDS_USER', '') == "yes": - env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --user" + if env.get('MLC_PIP_INSTALL_NEEDS_USER', '') == "yes": + env['MLC_PYTHON_PIP_COMMON_EXTRA'] = " --user" - if env.get('CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS', '') != '': + if env.get('MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS', '') != '': r = automation.run_native_script( {'run_script_input': run_script_input, 'env': env, 'script_name': 'uninstall_deps'}) if r['return'] > 0: return r - prepare_env_key = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '') + prepare_env_key = env.get('MLC_GENERIC_PYTHON_PACKAGE_NAME', '') for x in ["-", "[", "]"]: prepare_env_key = prepare_env_key.replace(x, "_") - env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'] = prepare_env_key.upper() + env['MLC_TMP_PYTHON_PACKAGE_NAME_ENV'] = prepare_env_key.upper() recursion_spaces = i['recursion_spaces'] @@ -61,7 +61,7 @@ def preprocess(i): force_install = ( env.get( - 'CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL', + 'MLC_TMP_PYTHON_PACKAGE_FORCE_INSTALL', '') in [ 'yes', 'true', @@ -72,8 +72,8 @@ def preprocess(i): if r['return'] == 16 or force_install: # Clean detected version env if exists otherwise takes detected version # for example, when we reinstall generic python lib package - env_version_key = 'CM_' + \ - env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' + env_version_key = 'MLC_' + \ + env['MLC_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' if env.get(env_version_key, '') != '': del (env[env_version_key]) @@ -82,36 +82,36 @@ def preprocess(i): extra += ' --upgrade --no-deps --force-reinstall' # Check index URL - index_url = env.get('CM_GENERIC_PYTHON_PIP_INDEX_URL', '').strip() + index_url = env.get('MLC_GENERIC_PYTHON_PIP_INDEX_URL', '').strip() if index_url != '': # Check special cases - if '${CM_TORCH_CUDA}' in index_url: + if '${MLC_TORCH_CUDA}' in index_url: index_url = index_url.replace( - '${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA')) + '${MLC_TORCH_CUDA}', env.get('MLC_TORCH_CUDA')) extra += ' --index-url ' + index_url # Check extra index URL extra_index_url = env.get( - 'CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL', '').strip() + 'MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL', '').strip() if extra_index_url != '': # Check special cases - if '${CM_TORCH_CUDA}' in extra_index_url: + if '${MLC_TORCH_CUDA}' in extra_index_url: extra_index_url = extra_index_url.replace( - '${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA')) + '${MLC_TORCH_CUDA}', env.get('MLC_TORCH_CUDA')) extra += ' --extra-index-url ' + extra_index_url # check find-links find_links_url = env.get( - 'CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL', '').strip() + 'MLC_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL', '').strip() if find_links_url != '': extra += ' -f ' + find_links_url # Check update - if env.get('CM_GENERIC_PYTHON_PIP_UPDATE', '') in [ + if env.get('MLC_GENERIC_PYTHON_PIP_UPDATE', '') in [ True, 'true', 'yes', 'on']: extra += ' -U' @@ -119,7 +119,7 @@ def preprocess(i): print(recursion_spaces + ' Extra PIP CMD: ' + extra) print('') - env['CM_GENERIC_PYTHON_PIP_EXTRA'] = extra + env['MLC_GENERIC_PYTHON_PIP_EXTRA'] = extra r = automation.run_native_script( {'run_script_input': run_script_input, 'env': env, 'script_name': 'install'}) @@ -134,11 +134,11 @@ def detect_version(i): env = i['env'] - if env.get('CM_TMP_PYTHON_PACKAGE_NAME_ENV', '') != '': - env_version_key = 'CM_' + \ - env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' + if env.get('MLC_TMP_PYTHON_PACKAGE_NAME_ENV', '') != '': + env_version_key = 'MLC_' + \ + env['MLC_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' else: - env_version_key = 'CM_CACHE_TMP_VERSION' + env_version_key = 'MLC_CACHE_TMP_VERSION' r = i['automation'].parse_version({'match_text': r'\s*([\d.a-z\-]+)', 'group_number': 1, @@ -150,7 +150,7 @@ def detect_version(i): version = r['version'] current_detected_version = version - if env.get('CM_TMP_SILENT', '') != 'yes': + if env.get('MLC_TMP_SILENT', '') != 'yes': print( i['recursion_spaces'] + ' Detected version: {}'.format(version)) @@ -162,8 +162,8 @@ def postprocess(i): env = i['env'] - env_version_key = 'CM_' + \ - env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' + env_version_key = 'MLC_' + \ + env['MLC_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' if env.get(env_version_key, '') != '': version = env[env_version_key] @@ -174,19 +174,19 @@ def postprocess(i): version = r['version'] - env['CM_PYTHONLIB_' + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'] + + env['MLC_PYTHONLIB_' + env['MLC_TMP_PYTHON_PACKAGE_NAME_ENV'] + '_CACHE_TAGS'] = 'version-' + version import pkgutil - package_name = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '').strip() + package_name = env.get('MLC_GENERIC_PYTHON_PACKAGE_NAME', '').strip() package = pkgutil.get_loader(package_name) if package: installed_file_path = package.get_filename() - env['CM_GET_DEPENDENT_CACHED_PATH'] = installed_file_path + env['MLC_GET_DEPENDENT_CACHED_PATH'] = installed_file_path - pip_version = env.get('CM_PIP_VERSION', '').strip().split('.') + pip_version = env.get('MLC_PIP_VERSION', '').strip().split('.') if pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23: - env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages" + env['MLC_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages" if version.count('.') > 1: env[f"{env_version_key}_MAJOR_MINOR"] = ".".join( diff --git a/script/get-generic-python-lib/detect-version.py b/script/get-generic-python-lib/detect-version.py index fc879f04e..98e0ee022 100644 --- a/script/get-generic-python-lib/detect-version.py +++ b/script/get-generic-python-lib/detect-version.py @@ -1,7 +1,7 @@ import os import sys -package_name = os.environ.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '') +package_name = os.environ.get('MLC_GENERIC_PYTHON_PACKAGE_NAME', '') package_name = package_name.split("[")[0] filename = 'tmp-ver.out' diff --git a/script/get-generic-python-lib/install.bat b/script/get-generic-python-lib/install.bat index 0a5967462..e74450c72 100644 --- a/script/get-generic-python-lib/install.bat +++ b/script/get-generic-python-lib/install.bat @@ -1,13 +1,13 @@ echo. -if NOT "%CM_GENERIC_PYTHON_PIP_URL%" == "" ( +if NOT "%MLC_GENERIC_PYTHON_PIP_URL%" == "" ( - %CM_PYTHON_BIN_WITH_PATH% -m pip install %CM_GENERIC_PYTHON_PIP_URL% %CM_GENERIC_PYTHON_PIP_EXTRA% + %MLC_PYTHON_BIN_WITH_PATH% -m pip install %MLC_GENERIC_PYTHON_PIP_URL% %MLC_GENERIC_PYTHON_PIP_EXTRA% IF %ERRORLEVEL% NEQ 0 EXIT 1 ) else ( - %CM_PYTHON_BIN_WITH_PATH% -m pip install %CM_GENERIC_PYTHON_PACKAGE_NAME%%CM_TMP_PIP_VERSION_STRING% %CM_GENERIC_PYTHON_PIP_EXTRA% + %MLC_PYTHON_BIN_WITH_PATH% -m pip install %MLC_GENERIC_PYTHON_PACKAGE_NAME%%MLC_TMP_PIP_VERSION_STRING% %MLC_GENERIC_PYTHON_PIP_EXTRA% IF %ERRORLEVEL% NEQ 0 EXIT 1 ) diff --git a/script/get-generic-python-lib/install.sh b/script/get-generic-python-lib/install.sh index b79aa8146..655c6d869 100644 --- a/script/get-generic-python-lib/install.sh +++ b/script/get-generic-python-lib/install.sh @@ -2,11 +2,11 @@ echo "" -if [[ ${CM_GENERIC_PYTHON_PACKAGE_VARIANT} == "nvidia-apex-depreciated" ]]; then - cd ${CM_GIT_REPO_CHECKOUT_PATH} - cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install -v --disable-pip-version-check --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./" +if [[ ${MLC_GENERIC_PYTHON_PACKAGE_VARIANT} == "nvidia-apex-depreciated" ]]; then + cd ${MLC_GIT_REPO_CHECKOUT_PATH} + cmd="${MLC_PYTHON_BIN_WITH_PATH} -m pip install -v --disable-pip-version-check --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./" echo $cmd - if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then + if [[ -n ${MLC_PIP_ERROR_SKIP} ]]; then eval $cmd else eval $cmd @@ -15,31 +15,31 @@ if [[ ${CM_GENERIC_PYTHON_PACKAGE_VARIANT} == "nvidia-apex-depreciated" ]]; then exit 0 fi -if [[ ${CM_GENERIC_PYTHON_PACKAGE_NAME} == "tensorflow_old" ]]; then - if [[ ${CM_HOST_OS_FLAVOR} == "macos" ]]; then - if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then - . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-macos.sh +if [[ ${MLC_GENERIC_PYTHON_PACKAGE_NAME} == "tensorflow_old" ]]; then + if [[ ${MLC_HOST_OS_FLAVOR} == "macos" ]]; then + if [[ -n ${MLC_PIP_ERROR_SKIP} ]]; then + . ${MLC_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-macos.sh else - . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-macos.sh + . ${MLC_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-macos.sh test $? -eq 0 || exit $? fi exit 0 fi - if [[ ${CM_HOST_PLATFORM_FLAVOR} == "aarch64" ]]; then - if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then - . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-aarch64.sh + if [[ ${MLC_HOST_PLATFORM_FLAVOR} == "aarch64" ]]; then + if [[ -n ${MLC_PIP_ERROR_SKIP} ]]; then + . ${MLC_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-aarch64.sh else - . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-aarch64.sh + . ${MLC_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-aarch64.sh test $? -eq 0 || exit $? fi exit 0 fi fi -if [[ -n ${CM_GENERIC_PYTHON_PIP_URL} ]]; then - cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install \"${CM_GENERIC_PYTHON_PIP_URL}\" ${CM_GENERIC_PYTHON_PIP_EXTRA}" +if [[ -n ${MLC_GENERIC_PYTHON_PIP_URL} ]]; then + cmd="${MLC_PYTHON_BIN_WITH_PATH} -m pip install \"${MLC_GENERIC_PYTHON_PIP_URL}\" ${MLC_GENERIC_PYTHON_PIP_EXTRA}" echo $cmd - if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then + if [[ -n ${MLC_PIP_ERROR_SKIP} ]]; then eval $cmd else eval $cmd @@ -48,10 +48,10 @@ if [[ -n ${CM_GENERIC_PYTHON_PIP_URL} ]]; then exit 0 fi -cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install \"${CM_GENERIC_PYTHON_PACKAGE_NAME}${CM_TMP_PIP_VERSION_STRING}\" ${CM_GENERIC_PYTHON_PIP_EXTRA}" +cmd="${MLC_PYTHON_BIN_WITH_PATH} -m pip install \"${MLC_GENERIC_PYTHON_PACKAGE_NAME}${MLC_TMP_PIP_VERSION_STRING}\" ${MLC_GENERIC_PYTHON_PIP_EXTRA}" echo $cmd -if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then +if [[ -n ${MLC_PIP_ERROR_SKIP} ]]; then eval $cmd else eval $cmd diff --git a/script/get-generic-python-lib/meta.yaml b/script/get-generic-python-lib/meta.yaml index ee0a4cdd1..6eb0b0a78 100644 --- a/script/get-generic-python-lib/meta.yaml +++ b/script/get-generic-python-lib/meta.yaml @@ -11,7 +11,7 @@ deps: - python - python3 skip_if_env: - CM_TMP_USE_CUSTOM_PYTHON: + MLC_TMP_USE_CUSTOM_PYTHON: - 'on' tags: get,python3 dynamic: true @@ -19,23 +19,23 @@ deps: - python-pip - pip skip_if_env: - CM_GENERIC_PYTHON_PACKAGE_NAME: + MLC_GENERIC_PYTHON_PACKAGE_NAME: - pip tags: get,generic-python-lib,_pip extra_cache_tags_from_env: -- env: CM_PYTHON_CACHE_TAGS +- env: MLC_PYTHON_CACHE_TAGS prefix: python- input_mapping: - extra_index_url: CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL - force_install: CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL - index_url: CM_GENERIC_PYTHON_PIP_INDEX_URL + extra_index_url: MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL + force_install: MLC_TMP_PYTHON_PACKAGE_FORCE_INSTALL + index_url: MLC_GENERIC_PYTHON_PIP_INDEX_URL local_env_keys: -- CM_GENERIC_PYTHON_PACKAGE_VARIANT +- MLC_GENERIC_PYTHON_PACKAGE_VARIANT new_env_keys: -- CM_PYTHONLIB_* +- MLC_PYTHONLIB_* prehook_deps: - enable_if_env: - CM_INSTALL_ONNXRUNTIME_GPU_FROM_SRC: + MLC_INSTALL_ONNXRUNTIME_GPU_FROM_SRC: - 'yes' tags: install,onnxruntime,from.src,_cuda tags: @@ -49,63 +49,63 @@ uid: 94b62a682bc44791 variations: Pillow: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: Pillow + MLC_GENERIC_PYTHON_PACKAGE_NAME: Pillow new_env_keys: - - CM_PILLOW_VERSION + - MLC_PILLOW_VERSION anthropic: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: anthropic + MLC_GENERIC_PYTHON_PACKAGE_NAME: anthropic new_env_keys: - - CM_ANTHROPIC_VERSION + - MLC_ANTHROPIC_VERSION apache-tvm: deps: - tags: get,generic-python-lib,_typing_extensions env: - CM_GENERIC_PYTHON_PACKAGE_NAME: apache-tvm - CM_GENERIC_PYTHON_PIP_EXTRA: ' --pre' + MLC_GENERIC_PYTHON_PACKAGE_NAME: apache-tvm + MLC_GENERIC_PYTHON_PIP_EXTRA: ' --pre' new_env_keys: - - CM_APACHE_TVM_VERSION + - MLC_APACHE_TVM_VERSION apex: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: apex + MLC_GENERIC_PYTHON_PACKAGE_NAME: apex new_env_keys: - - CM_APEX_VERSION + - MLC_APEX_VERSION async_timeout: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: async_timeout + MLC_GENERIC_PYTHON_PACKAGE_NAME: async_timeout new_env_keys: - - CM_ASYNC_TIMEOUT_VERSION + - MLC_ASYNC_TIMEOUT_VERSION attr: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: attr + MLC_GENERIC_PYTHON_PACKAGE_NAME: attr new_env_keys: - - CM_ATTR_VERSION + - MLC_ATTR_VERSION attrs: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: attrs + MLC_GENERIC_PYTHON_PACKAGE_NAME: attrs new_env_keys: - - CM_ATTRS_VERSION + - MLC_ATTRS_VERSION boto3: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: boto3 + MLC_GENERIC_PYTHON_PACKAGE_NAME: boto3 new_env_keys: - - CM_BOTO3_VERSION + - MLC_BOTO3_VERSION cloudpickle: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: cloudpickle + MLC_GENERIC_PYTHON_PACKAGE_NAME: cloudpickle new_env_keys: - - CM_CLOUDPICKLE_VERSION + - MLC_CLOUDPICKLE_VERSION cmind: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: cmind + MLC_GENERIC_PYTHON_PACKAGE_NAME: cmind new_env_keys: - - CM_CMIND_VERSION + - MLC_CMIND_VERSION colored: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: colored - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://pypi.ngc.nvidia.com + MLC_GENERIC_PYTHON_PACKAGE_NAME: colored + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://pypi.ngc.nvidia.com new_env_keys: - - CM_COLORED_VERSION + - MLC_COLORED_VERSION conda.#: ad: python-pip: @@ -118,135 +118,135 @@ variations: - cuda tags: get,cuda env: - CM_GENERIC_PYTHON_PACKAGE_NAME: cupy + MLC_GENERIC_PYTHON_PACKAGE_NAME: cupy new_env_keys: - - CM_CUPY_VERSION + - MLC_CUPY_VERSION custom-python: ad: python-pip: tags: _custom-python env: - CM_TMP_USE_CUSTOM_PYTHON: 'on' + MLC_TMP_USE_CUSTOM_PYTHON: 'on' cxx11-abi: env: {} datasets: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: datasets + MLC_GENERIC_PYTHON_PACKAGE_NAME: datasets new_env_keys: - - CM_DATASETS_VERSION + - MLC_DATASETS_VERSION decorator: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: decorator + MLC_GENERIC_PYTHON_PACKAGE_NAME: decorator new_env_keys: - - CM_DECORATOR_VERSION + - MLC_DECORATOR_VERSION deepsparse: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: deepsparse + MLC_GENERIC_PYTHON_PACKAGE_NAME: deepsparse new_env_keys: - - CM_DEEPSPARSE_VERSION + - MLC_DEEPSPARSE_VERSION dllogger: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: dllogger - CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/NVIDIA/dllogger#egg=dllogger + MLC_GENERIC_PYTHON_PACKAGE_NAME: dllogger + MLC_GENERIC_PYTHON_PIP_URL: git+https://github.com/NVIDIA/dllogger#egg=dllogger extra-index-url.#: env: - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '#' + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '#' fiftyone: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: fiftyone + MLC_GENERIC_PYTHON_PACKAGE_NAME: fiftyone new_env_keys: - - CM_FIFTYONE_VERSION + - MLC_FIFTYONE_VERSION google-api-python-client: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: google_api_python_client + MLC_GENERIC_PYTHON_PACKAGE_NAME: google_api_python_client new_env_keys: - - CM_GOOGLE_API_PYTHON_CLIENT_VERSION + - MLC_GOOGLE_API_PYTHON_CLIENT_VERSION google-auth-oauthlib: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: google_auth_oauthlib + MLC_GENERIC_PYTHON_PACKAGE_NAME: google_auth_oauthlib new_env_keys: - - CM_GOOGLE_AUTH_OAUTHLIB_VERSION + - MLC_GOOGLE_AUTH_OAUTHLIB_VERSION huggingface_hub: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: huggingface_hub + MLC_GENERIC_PYTHON_PACKAGE_NAME: huggingface_hub new_env_keys: - - CM_HUGGINGFACE_HUB_VERSION + - MLC_HUGGINGFACE_HUB_VERSION index-url.#: env: - CM_GENERIC_PYTHON_PIP_INDEX_URL: '#' + MLC_GENERIC_PYTHON_PIP_INDEX_URL: '#' inflect: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: inflect + MLC_GENERIC_PYTHON_PACKAGE_NAME: inflect new_env_keys: - - CM_INFLECT_VERSION + - MLC_INFLECT_VERSION jax: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: jax + MLC_GENERIC_PYTHON_PACKAGE_NAME: jax new_env_keys: - - CM_JAX_VERSION* + - MLC_JAX_VERSION* jax_cuda: deps: - names: - cuda tags: get,cuda env: - CM_GENERIC_PYTHON_PACKAGE_NAME: jax[cuda] - CM_GENERIC_PYTHON_PIP_EXTRA: -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html - CM_JAX_VERSION_EXTRA: CUDA + MLC_GENERIC_PYTHON_PACKAGE_NAME: jax[cuda] + MLC_GENERIC_PYTHON_PIP_EXTRA: -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html + MLC_JAX_VERSION_EXTRA: CUDA new_env_keys: - - CM_JAX_VERSION* + - MLC_JAX_VERSION* librosa: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: librosa + MLC_GENERIC_PYTHON_PACKAGE_NAME: librosa new_env_keys: - - CM_LIBROSA_VERSION + - MLC_LIBROSA_VERSION matplotlib: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: matplotlib + MLC_GENERIC_PYTHON_PACKAGE_NAME: matplotlib new_env_keys: - - CM_MATPLOTLIB_VERSION + - MLC_MATPLOTLIB_VERSION mlperf_loadgen: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: mlperf_loadgen - CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/mlcommons/inference.git#subdirectory=loadgen + MLC_GENERIC_PYTHON_PACKAGE_NAME: mlperf_loadgen + MLC_GENERIC_PYTHON_PIP_URL: git+https://github.com/mlcommons/inference.git#subdirectory=loadgen new_env_keys: - - CM_MLPERF_LOADGEN_VERSION + - MLC_MLPERF_LOADGEN_VERSION mlperf_logging: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: mlperf_logging - CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/mlperf/logging.git + MLC_GENERIC_PYTHON_PACKAGE_NAME: mlperf_logging + MLC_GENERIC_PYTHON_PIP_URL: git+https://github.com/mlperf/logging.git new_env_keys: - - CM_MLPERF_LOGGING_VERSION + - MLC_MLPERF_LOGGING_VERSION mpld3: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: mpld3 + MLC_GENERIC_PYTHON_PACKAGE_NAME: mpld3 new_env_keys: - - CM_MPLD3_VERSION + - MLC_MPLD3_VERSION mxeval: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: mxeval - CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/amazon-science/mxeval.git - CM_PIP_ERROR_SKIP: 'true' + MLC_GENERIC_PYTHON_PACKAGE_NAME: mxeval + MLC_GENERIC_PYTHON_PIP_URL: git+https://github.com/amazon-science/mxeval.git + MLC_PIP_ERROR_SKIP: 'true' nibabel: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: nibabel + MLC_GENERIC_PYTHON_PACKAGE_NAME: nibabel new_env_keys: - - CM_NIBABEL_VERSION + - MLC_NIBABEL_VERSION no-deps: env: - CM_GENERIC_PYTHON_PACKAGE_INSTALL_DEPS: 'no' + MLC_GENERIC_PYTHON_PACKAGE_INSTALL_DEPS: 'no' numpy: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: numpy + MLC_GENERIC_PYTHON_PACKAGE_NAME: numpy new_env_keys: - - CM_NUMPY_VERSION + - MLC_NUMPY_VERSION nvidia-apex: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: apex - CM_GENERIC_PYTHON_PACKAGE_VARIANT: nvidia-apex - CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/nvidia/apex@0da3ffb92ee6fbe5336602f0e3989db1cd16f880 + MLC_GENERIC_PYTHON_PACKAGE_NAME: apex + MLC_GENERIC_PYTHON_PACKAGE_VARIANT: nvidia-apex + MLC_GENERIC_PYTHON_PIP_URL: git+https://github.com/nvidia/apex@0da3ffb92ee6fbe5336602f0e3989db1cd16f880 new_env_keys: - - CM_NVIDIA_APEX_VERSION + - MLC_NVIDIA_APEX_VERSION nvidia-apex-from-src: deps: - names: @@ -256,25 +256,25 @@ variations: - torch tags: get,generic-python-lib,_torch_cuda - env: - CM_GIT_CHECKOUT_FOLDER: apex + MLC_GIT_CHECKOUT_FOLDER: apex extra_cache_tags: nvidia-apex tags: get,git,repo,_repo.https://github.com/NVIDIA/apex,_tag.23.05 env: - CM_GENERIC_PYTHON_PACKAGE_NAME: apex - CM_GENERIC_PYTHON_PACKAGE_VARIANT: nvidia-apex + MLC_GENERIC_PYTHON_PACKAGE_NAME: apex + MLC_GENERIC_PYTHON_PACKAGE_VARIANT: nvidia-apex new_env_keys: - - CM_NVIDIA_APEX_VERSION + - MLC_NVIDIA_APEX_VERSION nvidia-dali: deps: - names: - cuda tags: get,cuda env: - CM_GENERIC_PYTHON_PACKAGE_NAME: nvidia-dali-cuda120 - CM_GENERIC_PYTHON_PIP_EXTRA: ' --upgrade --default-timeout=900' - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://developer.download.nvidia.com/compute/redist + MLC_GENERIC_PYTHON_PACKAGE_NAME: nvidia-dali-cuda120 + MLC_GENERIC_PYTHON_PIP_EXTRA: ' --upgrade --default-timeout=900' + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://developer.download.nvidia.com/compute/redist new_env_keys: - - CM_NVIDIA_DALI_VERSION + - MLC_NVIDIA_DALI_VERSION nvidia-pycocotools: base: - pycocotools @@ -286,149 +286,149 @@ variations: - numpy tags: get,generic-python-lib,_package.numpy env: - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: pycocotools - CM_GENERIC_PYTHON_PIP_URL: pycocotools@git+https://github.com/NVIDIA/cocoapi#subdirectory=PythonAPI + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: pycocotools + MLC_GENERIC_PYTHON_PIP_URL: pycocotools@git+https://github.com/NVIDIA/cocoapi#subdirectory=PythonAPI nvidia-pyindex: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: nvidia-pyindex + MLC_GENERIC_PYTHON_PACKAGE_NAME: nvidia-pyindex new_env_keys: - - CM_NVIDIA_PYINDEX_VERSION + - MLC_NVIDIA_PYINDEX_VERSION nvidia-tensorrt: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: nvidia-tensorrt + MLC_GENERIC_PYTHON_PACKAGE_NAME: nvidia-tensorrt new_env_keys: - - CM_NVIDIA_TENSORRT_VERSION + - MLC_NVIDIA_TENSORRT_VERSION onnx: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: onnx + MLC_GENERIC_PYTHON_PACKAGE_NAME: onnx new_env_keys: - - CM_ONNX_VERSION + - MLC_ONNX_VERSION onnx-graphsurgeon: deps: - tags: get,generic-python-lib,_package.nvidia-pyindex env: - CM_GENERIC_PYTHON_PACKAGE_NAME: onnx_graphsurgeon + MLC_GENERIC_PYTHON_PACKAGE_NAME: onnx_graphsurgeon new_env_keys: - - CM_ONNX_GRAPHSURGEON_VERSION + - MLC_ONNX_GRAPHSURGEON_VERSION onnxruntime: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime + MLC_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime new_env_keys: - - CM_ONNXRUNTIME_VERSION + - MLC_ONNXRUNTIME_VERSION onnxruntime,rocm: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime-training - CM_GENERIC_PYTHON_PIP_URL: https://download.onnxruntime.ai/onnxruntime_training-1.16.0%2Brocm56-cp3<<>>-cp3<<>>-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + MLC_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime-training + MLC_GENERIC_PYTHON_PIP_URL: https://download.onnxruntime.ai/onnxruntime_training-1.16.0%2Brocm56-cp3<<>>-cp3<<>>-manylinux_2_17_x86_64.manylinux2014_x86_64.whl new_env_keys: - - CM_ONNXRUNTIME_TRAINING_VERSION* + - MLC_ONNXRUNTIME_TRAINING_VERSION* onnxruntime_gpu: default_env: - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: onnxruntime + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: onnxruntime deps: - names: - cuda tags: get,cuda env: - CM_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime_gpu - CM_ONNXRUNTIME_VERSION_EXTRA: GPU + MLC_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime_gpu + MLC_ONNXRUNTIME_VERSION_EXTRA: GPU new_env_keys: - - CM_ONNXRUNTIME_GPU_VERSION* + - MLC_ONNXRUNTIME_GPU_VERSION* openai: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: openai + MLC_GENERIC_PYTHON_PACKAGE_NAME: openai new_env_keys: - - CM_OPENAI_VERSION + - MLC_OPENAI_VERSION opencv-python: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: opencv-python + MLC_GENERIC_PYTHON_PACKAGE_NAME: opencv-python new_env_keys: - - CM_OPENCV_PYTHON_VERSION + - MLC_OPENCV_PYTHON_VERSION package.#: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: '#' - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: '' - CM_GENERIC_PYTHON_PIP_URL: '' + MLC_GENERIC_PYTHON_PACKAGE_NAME: '#' + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: '' + MLC_GENERIC_PYTHON_PIP_URL: '' find_links_url.#: env: - CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: '#' + MLC_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: '#' package.torch,cxx11-abi: env: - CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu-cxx11-abi + MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu-cxx11-abi pandas: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: pandas + MLC_GENERIC_PYTHON_PACKAGE_NAME: pandas new_env_keys: - - CM_PANDAS_VERSION + - MLC_PANDAS_VERSION path.#: env: - CM_GENERIC_PYTHON_PIP_URL: '#' + MLC_GENERIC_PYTHON_PIP_URL: '#' pillow: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: Pillow + MLC_GENERIC_PYTHON_PACKAGE_NAME: Pillow new_env_keys: - - CM_PILLOW_VERSION + - MLC_PILLOW_VERSION pip: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: pip + MLC_GENERIC_PYTHON_PACKAGE_NAME: pip new_env_keys: - - CM_PIP_VERSION - - CM_PYTHON_PIP_COMMON_EXTRA + - MLC_PIP_VERSION + - MLC_PYTHON_PIP_COMMON_EXTRA polygraphy: deps: - tags: get,generic-python-lib,_colored env: - CM_GENERIC_PYTHON_PACKAGE_NAME: polygraphy - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://pypi.ngc.nvidia.com + MLC_GENERIC_PYTHON_PACKAGE_NAME: polygraphy + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://pypi.ngc.nvidia.com new_env_keys: - - CM_POLYGRAPHY_VERSION + - MLC_POLYGRAPHY_VERSION pre: env: - CM_GENERIC_PYTHON_DEV_VERSION: 'yes' + MLC_GENERIC_PYTHON_DEV_VERSION: 'yes' protobuf: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: protobuf + MLC_GENERIC_PYTHON_PACKAGE_NAME: protobuf new_env_keys: - - CM_PROTOBUF_VERSION + - MLC_PROTOBUF_VERSION psutil: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: psutil + MLC_GENERIC_PYTHON_PACKAGE_NAME: psutil new_env_keys: - - CM_PSUTIL_VERSION + - MLC_PSUTIL_VERSION pycocotools: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: pycocotools + MLC_GENERIC_PYTHON_PACKAGE_NAME: pycocotools new_env_keys: - - CM_PYCOCOTOOLS_VERSION + - MLC_PYCOCOTOOLS_VERSION pycuda: deps: - names: - cuda tags: get,cuda env: - CM_GENERIC_PYTHON_PACKAGE_NAME: pycuda + MLC_GENERIC_PYTHON_PACKAGE_NAME: pycuda new_env_keys: - - CM_PYCUDA_VERSION + - MLC_PYCUDA_VERSION quark-amd: deps: - env: - CM_DOWNLOAD_FILENAME: quark-0.1.0+a9827f5-py39-none-any.whl - CM_DOWNLOAD_FINAL_ENV_NAME: CM_QUARK_AMD_WHL_PATH + MLC_DOWNLOAD_FILENAME: quark-0.1.0+a9827f5-py39-none-any.whl + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_QUARK_AMD_WHL_PATH extra_cache_tags: quark-amd force_cache: true tags: download,file,_wget,_url.https://www.xilinx.com/bin/public/openDownload?filename=quark-0.1.0+a9827f5-py39-none-any.whl env: - CM_GENERIC_PYTHON_PACKAGE_NAME: quark - CM_GENERIC_PYTHON_PIP_URL: <<>> + MLC_GENERIC_PYTHON_PACKAGE_NAME: quark + MLC_GENERIC_PYTHON_PIP_URL: <<>> ray: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: ray[default] + MLC_GENERIC_PYTHON_PACKAGE_NAME: ray[default] new_env_keys: - - CM_RAY_VERSION + - MLC_RAY_VERSION requests: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: requests + MLC_GENERIC_PYTHON_PACKAGE_NAME: requests new_env_keys: - - CM_REQUESTS_VERSION + - MLC_REQUESTS_VERSION rocm: deps: - names: @@ -438,142 +438,142 @@ variations: safetensors: deps: - skip_if_env: - CM_HOST_PLATFORM_FLAVOR: + MLC_HOST_PLATFORM_FLAVOR: - x86_64 tags: get,rust-compiler env: - CM_GENERIC_PYTHON_PACKAGE_NAME: safetensors + MLC_GENERIC_PYTHON_PACKAGE_NAME: safetensors new_env_keys: - - CM_SAFETENSORS_VERSION + - MLC_SAFETENSORS_VERSION scikit-learn: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: scikit-learn + MLC_GENERIC_PYTHON_PACKAGE_NAME: scikit-learn new_env_keys: - - CM_SCIKIT_LEARN_VERSION + - MLC_SCIKIT_LEARN_VERSION scipy: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: scipy + MLC_GENERIC_PYTHON_PACKAGE_NAME: scipy new_env_keys: - - CM_SCIPY_VERSION + - MLC_SCIPY_VERSION scons: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: scons + MLC_GENERIC_PYTHON_PACKAGE_NAME: scons new_env_keys: - - CM_SCONS_VERSION + - MLC_SCONS_VERSION setfit: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: setfit + MLC_GENERIC_PYTHON_PACKAGE_NAME: setfit new_env_keys: - - CM_SETFIT_VERSION + - MLC_SETFIT_VERSION setuptools: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: setuptools + MLC_GENERIC_PYTHON_PACKAGE_NAME: setuptools new_env_keys: - - CM_SETUPTOOL_VERSION + - MLC_SETUPTOOL_VERSION six: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: six + MLC_GENERIC_PYTHON_PACKAGE_NAME: six new_env_keys: - - CM_SIX_VERSION + - MLC_SIX_VERSION sklearn: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: sklearn + MLC_GENERIC_PYTHON_PACKAGE_NAME: sklearn new_env_keys: - - CM_SKLEARN_VERSION + - MLC_SKLEARN_VERSION sox: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: sox + MLC_GENERIC_PYTHON_PACKAGE_NAME: sox new_env_keys: - - CM_SOX_VERSION + - MLC_SOX_VERSION sparsezoo: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: sparsezoo + MLC_GENERIC_PYTHON_PACKAGE_NAME: sparsezoo new_env_keys: - - CM_SPARSEZOO_VERSION + - MLC_SPARSEZOO_VERSION streamlit: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: streamlit + MLC_GENERIC_PYTHON_PACKAGE_NAME: streamlit new_env_keys: - - CM_STREAMLIT_VERSION + - MLC_STREAMLIT_VERSION streamlit_option_menu: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: streamlit_option_menu + MLC_GENERIC_PYTHON_PACKAGE_NAME: streamlit_option_menu new_env_keys: - - CM_STREAMLIT_OPTION_MENU_VERSION + - MLC_STREAMLIT_OPTION_MENU_VERSION tensorboard: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: tensorboard + MLC_GENERIC_PYTHON_PACKAGE_NAME: tensorboard new_env_keys: - - CM_TENSORBOARD_VERSION + - MLC_TENSORBOARD_VERSION tensorflow: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: tensorflow + MLC_GENERIC_PYTHON_PACKAGE_NAME: tensorflow new_env_keys: - - CM_TENSORFLOW_VERSION + - MLC_TENSORFLOW_VERSION tensorflow,rocm: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: tensorflow-rocm + MLC_GENERIC_PYTHON_PACKAGE_NAME: tensorflow-rocm new_env_keys: - - CM_TENSORFLOW_ROCM_VERSION + - MLC_TENSORFLOW_ROMLC_VERSION tensorrt: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: tensorrt - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/<<>> - CM_TORCH_VERSION_EXTRA: CUDA + MLC_GENERIC_PYTHON_PACKAGE_NAME: tensorrt + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/<<>> + MLC_TORCH_VERSION_EXTRA: CUDA new_env_keys: - - CM_TENSORRT_VERSION + - MLC_TENSORRT_VERSION tflite: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: tflite + MLC_GENERIC_PYTHON_PACKAGE_NAME: tflite new_env_keys: - - CM_TFLITE_VERSION + - MLC_TFLITE_VERSION tflite-runtime: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: tflite-runtime + MLC_GENERIC_PYTHON_PACKAGE_NAME: tflite-runtime new_env_keys: - - CM_TFLITE_RUNTIME_VERSION + - MLC_TFLITE_RUNTIME_VERSION tokenization: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: tokenization + MLC_GENERIC_PYTHON_PACKAGE_NAME: tokenization new_env_keys: - - CM_TOKENIZATION_VERSION + - MLC_TOKENIZATION_VERSION toml: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: toml + MLC_GENERIC_PYTHON_PACKAGE_NAME: toml new_env_keys: - - CM_TOML_VERSION + - MLC_TOML_VERSION torch: deps: - enable_if_env: - CM_PYTHON_MINOR_VERSION: + MLC_PYTHON_MINOR_VERSION: - '7' - '8' tags: get,generic-python-lib,_package.networkx env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torch - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu + MLC_GENERIC_PYTHON_PACKAGE_NAME: torch + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu new_env_keys: - - CM_TORCH_VERSION* + - MLC_TORCH_VERSION* torch,cxx11-abi: env: - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu-cxx11-abi + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu-cxx11-abi torch,pre: default_env: - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torch - CM_GENERIC_PYTHON_PIP_EXTRA: ' --pre' - CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu + MLC_GENERIC_PYTHON_PACKAGE_NAME: torch + MLC_GENERIC_PYTHON_PIP_EXTRA: ' --pre' + MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu new_env_keys: - - CM_TORCH_VERSION* + - MLC_TORCH_VERSION* torch,rocm: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torch - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '' - CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2 - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch + MLC_GENERIC_PYTHON_PACKAGE_NAME: torch + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '' + MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2 + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch new_env_keys: - - CM_TORCH_VERSION* + - MLC_TORCH_VERSION* post_deps: - tags: get,generic-python-lib,_torchvision,_rocm - tags: get,generic-python-lib,_torchaudio,_rocm @@ -584,144 +584,144 @@ variations: - cuda tags: get,cuda env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torch - CM_TORCH_VERSION_EXTRA: CUDA + MLC_GENERIC_PYTHON_PACKAGE_NAME: torch + MLC_TORCH_VERSION_EXTRA: CUDA new_env_keys: - - CM_TORCH_VERSION* + - MLC_TORCH_VERSION* torch_cuda,pre: default_env: - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch_cuda + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch_cuda deps: - names: - cuda tags: get,cuda - tags: get,generic-python-lib,_numpy env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torch - CM_GENERIC_PYTHON_PIP_EXTRA: ' --pre' - CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/<<>> - CM_TORCH_VERSION_EXTRA: CUDA + MLC_GENERIC_PYTHON_PACKAGE_NAME: torch + MLC_GENERIC_PYTHON_PIP_EXTRA: ' --pre' + MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/<<>> + MLC_TORCH_VERSION_EXTRA: CUDA new_env_keys: - - CM_TORCH_VERSION* + - MLC_TORCH_VERSION* torch_tensorrt: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torch-tensorrt - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/<<>> - CM_TORCH_VERSION_EXTRA: CUDA + MLC_GENERIC_PYTHON_PACKAGE_NAME: torch-tensorrt + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/<<>> + MLC_TORCH_VERSION_EXTRA: CUDA new_env_keys: - - CM_TORCH_TENSORRT_VERSION + - MLC_TORCH_TENSORRT_VERSION torchaudio: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torchaudio - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu + MLC_GENERIC_PYTHON_PACKAGE_NAME: torchaudio + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu new_env_keys: - - CM_TORCHAUDIO_VERSION* + - MLC_TORCHAUDIO_VERSION* torchaudio,rocm: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torchaudio - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '' - CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2 - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchaudio + MLC_GENERIC_PYTHON_PACKAGE_NAME: torchaudio + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '' + MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2 + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchaudio new_env_keys: - - CM_TORCHAUDIO_VERSION* + - MLC_TORCHAUDIO_VERSION* torchaudio_cuda: default_env: - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchaudio + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchaudio deps: - names: - cuda tags: get,cuda env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torchaudio - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1: https://download.pytorch.org/whl/<<>> - CM_TORCHAUDIO_VERSION_EXTRA: CUDA + MLC_GENERIC_PYTHON_PACKAGE_NAME: torchaudio + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1: https://download.pytorch.org/whl/<<>> + MLC_TORCHAUDIO_VERSION_EXTRA: CUDA new_env_keys: - - CM_TORCHAUDIO_VERSION* + - MLC_TORCHAUDIO_VERSION* torchvision: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torchvision - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu + MLC_GENERIC_PYTHON_PACKAGE_NAME: torchvision + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu new_env_keys: - - CM_TORCHVISION_VERSION* + - MLC_TORCHVISION_VERSION* torchvision,rocm: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torchvision - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '' - CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2 - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchvision + MLC_GENERIC_PYTHON_PACKAGE_NAME: torchvision + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '' + MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2 + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchvision new_env_keys: - - CM_TORCHVISION_VERSION* + - MLC_TORCHVISION_VERSION* torchvision_cuda: default_env: - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS1: torchvision + MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS1: torchvision deps: - names: - cuda tags: get,cuda env: - CM_GENERIC_PYTHON_PACKAGE_NAME: torchvision - CM_TORCHVISION_VERSION_EXTRA: CUDA + MLC_GENERIC_PYTHON_PACKAGE_NAME: torchvision + MLC_TORCHVISION_VERSION_EXTRA: CUDA new_env_keys: - - CM_TORCHVISION_VERSION* + - MLC_TORCHVISION_VERSION* tornado: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: tornado + MLC_GENERIC_PYTHON_PACKAGE_NAME: tornado new_env_keys: - - CM_TORNADO_VERSION + - MLC_TORNADO_VERSION tqdm: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: tqdm + MLC_GENERIC_PYTHON_PACKAGE_NAME: tqdm new_env_keys: - - CM_TQDM_VERSION + - MLC_TQDM_VERSION transformers: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: transformers + MLC_GENERIC_PYTHON_PACKAGE_NAME: transformers new_env_keys: - - CM_TRANSFORMERS_VERSION + - MLC_TRANSFORMERS_VERSION typing_extensions: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: typing_extensions + MLC_GENERIC_PYTHON_PACKAGE_NAME: typing_extensions new_env_keys: - - CM_TYPING_EXTENSIONS_VERSION + - MLC_TYPING_EXTENSIONS_VERSION ujson: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: ujson + MLC_GENERIC_PYTHON_PACKAGE_NAME: ujson new_env_keys: - - CM_UJSON_VERSION + - MLC_UJSON_VERSION unidecode: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: unidecode + MLC_GENERIC_PYTHON_PACKAGE_NAME: unidecode new_env_keys: - - CM_UNIDECODE_VERSION + - MLC_UNIDECODE_VERSION url.#: env: - CM_GENERIC_PYTHON_PIP_URL: '#' - CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL: 'yes' + MLC_GENERIC_PYTHON_PIP_URL: '#' + MLC_TMP_PYTHON_PACKAGE_FORCE_INSTALL: 'yes' wandb: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: wandb + MLC_GENERIC_PYTHON_PACKAGE_NAME: wandb new_env_keys: - - CM_WANDB_VERSION + - MLC_WANDB_VERSION west: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: west + MLC_GENERIC_PYTHON_PACKAGE_NAME: west new_env_keys: - - CM_WEST_VERSION + - MLC_WEST_VERSION whl-url.#: deps: - env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_GENERIC_PYTHON_PIP_URL + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_GENERIC_PYTHON_PIP_URL force_cache: 'yes' tags: download,file,_url.# env: - CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL: 'yes' + MLC_TMP_PYTHON_PACKAGE_FORCE_INSTALL: 'yes' xgboost: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: xgboost + MLC_GENERIC_PYTHON_PACKAGE_NAME: xgboost new_env_keys: - - CM_XGBOOST_VERSION + - MLC_XGBOOST_VERSION xlsxwriter: env: - CM_GENERIC_PYTHON_PACKAGE_NAME: xlsxwriter + MLC_GENERIC_PYTHON_PACKAGE_NAME: xlsxwriter new_env_keys: - - CM_XLSXWRITER_VERSION + - MLC_XLSXWRITER_VERSION diff --git a/script/get-generic-python-lib/run.bat b/script/get-generic-python-lib/run.bat index 2612377c8..17e27e030 100644 --- a/script/get-generic-python-lib/run.bat +++ b/script/get-generic-python-lib/run.bat @@ -1,4 +1,4 @@ -IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD% +IF NOT DEFINED MLC_TMP_CURRENT_SCRIPT_PATH SET MLC_TMP_CURRENT_SCRIPT_PATH=%CD% -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\detect-version.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\detect-version.py IF %ERRORLEVEL% NEQ 0 EXIT 1 diff --git a/script/get-generic-python-lib/run.sh b/script/get-generic-python-lib/run.sh index b60ac0814..2df36823d 100644 --- a/script/get-generic-python-lib/run.sh +++ b/script/get-generic-python-lib/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD} -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect-version.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/detect-version.py test $? -eq 0 || exit $? exit 0 diff --git a/script/get-generic-python-lib/tensorflow/run-aarch64.sh b/script/get-generic-python-lib/tensorflow/run-aarch64.sh index 6c11efb71..71fca3564 100644 --- a/script/get-generic-python-lib/tensorflow/run-aarch64.sh +++ b/script/get-generic-python-lib/tensorflow/run-aarch64.sh @@ -1,13 +1,13 @@ -CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3} +MLC_PYTHON_BIN=${MLC_PYTHON_BIN_WITH_PATH:-python3} -${CM_PYTHON_BIN} -m pip install --upgrade pip ${CM_PYTHON_PIP_COMMON_EXTRA} -${CM_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${CM_PYTHON_PIP_COMMON_EXTRA} +${MLC_PYTHON_BIN} -m pip install --upgrade pip ${MLC_PYTHON_PIP_COMMON_EXTRA} +${MLC_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${MLC_PYTHON_PIP_COMMON_EXTRA} curl https://sh.rustup.rs -sSf -o tmp.sh sh tmp.sh -y export PATH=$PATH:$HOME/.cargo/bin -${CM_PYTHON_BIN} -m pip install tensorflow-aarch64${CM_TMP_PIP_VERSION_STRING} --user ${CM_PYTHON_PIP_COMMON_EXTRA} +${MLC_PYTHON_BIN} -m pip install tensorflow-aarch64${MLC_TMP_PIP_VERSION_STRING} --user ${MLC_PYTHON_PIP_COMMON_EXTRA} test $? -eq 0 || exit 1 -echo "CM_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-aarch64" >> $PWD/tmp-run-env.out +echo "MLC_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-aarch64" >> $PWD/tmp-run-env.out diff --git a/script/get-generic-python-lib/tensorflow/run-macos.sh b/script/get-generic-python-lib/tensorflow/run-macos.sh index 525b532eb..6b41b939f 100644 --- a/script/get-generic-python-lib/tensorflow/run-macos.sh +++ b/script/get-generic-python-lib/tensorflow/run-macos.sh @@ -1,7 +1,7 @@ #!/bin/bash -CM_PYTHON_BIN=${CM_PYTHON_BIN:-python3} +MLC_PYTHON_BIN=${MLC_PYTHON_BIN:-python3} -${CM_PYTHON_BIN} -m pip install tensorflow-macos${CM_TMP_PIP_VERSION_STRING} +${MLC_PYTHON_BIN} -m pip install tensorflow-macos${MLC_TMP_PIP_VERSION_STRING} test $? -eq 0 || exit 1 -echo "CM_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-macos" >> $PWD/tmp-run-env.out +echo "MLC_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-macos" >> $PWD/tmp-run-env.out diff --git a/script/get-generic-python-lib/uninstall_deps.sh b/script/get-generic-python-lib/uninstall_deps.sh index eeddf36d7..b288c967b 100644 --- a/script/get-generic-python-lib/uninstall_deps.sh +++ b/script/get-generic-python-lib/uninstall_deps.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [[ -n ${CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} ]]; then - cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip uninstall ${CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} -y ${CM_PYTHON_PIP_COMMON_EXTRA}" +if [[ -n ${MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} ]]; then + cmd="${MLC_PYTHON_BIN_WITH_PATH} -m pip uninstall ${MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} -y ${MLC_PYTHON_PIP_COMMON_EXTRA}" echo "$cmd" eval "$cmd" test $? -eq 0 || exit $? diff --git a/script/get-generic-python-lib/validate_cache.bat b/script/get-generic-python-lib/validate_cache.bat index 2612377c8..17e27e030 100644 --- a/script/get-generic-python-lib/validate_cache.bat +++ b/script/get-generic-python-lib/validate_cache.bat @@ -1,4 +1,4 @@ -IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD% +IF NOT DEFINED MLC_TMP_CURRENT_SCRIPT_PATH SET MLC_TMP_CURRENT_SCRIPT_PATH=%CD% -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\detect-version.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\detect-version.py IF %ERRORLEVEL% NEQ 0 EXIT 1 diff --git a/script/get-generic-python-lib/validate_cache.sh b/script/get-generic-python-lib/validate_cache.sh index b60ac0814..2df36823d 100644 --- a/script/get-generic-python-lib/validate_cache.sh +++ b/script/get-generic-python-lib/validate_cache.sh @@ -1,7 +1,7 @@ #!/bin/bash -CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD} -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect-version.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/detect-version.py test $? -eq 0 || exit $? exit 0 diff --git a/script/get-generic-sys-util/customize.py b/script/get-generic-sys-util/customize.py index 81f2bf76b..74b8c75b6 100644 --- a/script/get-generic-sys-util/customize.py +++ b/script/get-generic-sys-util/customize.py @@ -12,37 +12,37 @@ def preprocess(i): automation = i['automation'] # Use VERSION_CMD and CHECK_CMD if no CHECK_CMD is set - if env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' and env.get( - 'CM_SYS_UTIL_CHECK_CMD', '') == '': - env['CM_SYS_UTIL_CHECK_CMD'] = env['CM_SYS_UTIL_VERSION_CMD'] + if env.get('MLC_SYS_UTIL_VERSION_CMD', '') != '' and env.get( + 'MLC_SYS_UTIL_CHECK_CMD', '') == '': + env['MLC_SYS_UTIL_CHECK_CMD'] = env['MLC_SYS_UTIL_VERSION_CMD'] - if env.get('CM_GENERIC_SYS_UTIL_RUN_MODE', '') == "install": - if env.get('CM_SYS_UTIL_INSTALL_WITH_RETRY', '') == "yes": + if env.get('MLC_GENERIC_SYS_UTIL_RUN_MODE', '') == "install": + if env.get('MLC_SYS_UTIL_INSTALL_WITH_RETRY', '') == "yes": i['run_script_input']['script_name'] = "install-with-retry" else: i['run_script_input']['script_name'] = "install" - if env.get('CM_GENERIC_SYS_UTIL_RUN_MODE', '') == "detect": - if env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' or env.get( - 'CM_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '': + if env.get('MLC_GENERIC_SYS_UTIL_RUN_MODE', '') == "detect": + if env.get('MLC_SYS_UTIL_VERSION_CMD', '') != '' or env.get( + 'MLC_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '': r = automation.run_native_script( {'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'detect'}) if r['return'] != 0: # detection failed, do install via prehook_deps print("detection failed, going for installation") - env['CM_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes" + env['MLC_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes" return {'return': 0} else: # detection is successful, no need to install # print("detection success") - env['CM_SYS_UTIL_INSTALL_CMD'] = "" + env['MLC_SYS_UTIL_INSTALL_CMD'] = "" return {'return': 0} else: # No detction command available, just install # print("No detection possible, going for installation") - env['CM_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes" + env['MLC_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes" return {'return': 0} # Only "install" mode reaches here - pm = env.get('CM_HOST_OS_PACKAGE_MANAGER') - util = env.get('CM_SYS_UTIL_NAME', '') + pm = env.get('MLC_HOST_OS_PACKAGE_MANAGER') + util = env.get('MLC_SYS_UTIL_NAME', '') if util == '': return { 'return': 1, 'error': 'Please select a variation specifying the sys util name'} @@ -67,19 +67,19 @@ def preprocess(i): 'error': f'No package name specified for {util} in the meta'} if not package_name: - if str(env.get('CM_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE', '') + if str(env.get('MLC_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE', '') ).lower() in ["1", "true", "yes"]: print( f"WARNING: No package name specified for {pm} and util name {util}. Ignoring it...") - env['CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED'] = 'yes' + env['MLC_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED'] = 'yes' return {'return': 0} else: return { 'return': 1, 'error': f'No package name specified for {pm} and util name {util}'} if util == "libffi": - if env.get("CM_HOST_OS_FLAVOR", "") == "ubuntu": - if env.get("CM_HOST_OS_VERSION", "") in [ + if env.get("MLC_HOST_OS_FLAVOR", "") == "ubuntu": + if env.get("MLC_HOST_OS_VERSION", "") in [ "20.04", "20.10", "21.04", "21.10"]: package_name = "libffi7" else: @@ -96,7 +96,7 @@ def preprocess(i): package_name = package_name.replace( "<<<" + tmp_value + ">>>", str(env[tmp_value])) - install_cmd = env.get('CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD') + install_cmd = env.get('MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD') if not install_cmd: return { 'return': 1, 'error': 'Package manager installation command not detected for the given OS'} @@ -104,32 +104,32 @@ def preprocess(i): if pm == "brew": sudo = '' else: - sudo = env.get('CM_SUDO', '') - env['CM_SYS_UTIL_INSTALL_CMD'] = sudo + \ + sudo = env.get('MLC_SUDO', '') + env['MLC_SYS_UTIL_INSTALL_CMD'] = sudo + \ ' ' + install_cmd + ' ' + package_name env['+PATH'] = [] - if env.get('CM_HOST_OS_FLAVOR', '') == 'rhel': - if env['CM_SYS_UTIL_NAME'] == "g++12": + if env.get('MLC_HOST_OS_FLAVOR', '') == 'rhel': + if env['MLC_SYS_UTIL_NAME'] == "g++12": env['+PATH'] = ["/opt/rh/gcc-toolset-12/root/usr/bin"] - if env['CM_SYS_UTIL_NAME'] == "numactl" and env['CM_HOST_OS_VERSION'] in [ + if env['MLC_SYS_UTIL_NAME'] == "numactl" and env['MLC_HOST_OS_VERSION'] in [ "9.1", "9.2", "9.3"]: - env['CM_SYS_UTIL_INSTALL_CMD'] = '' + env['MLC_SYS_UTIL_INSTALL_CMD'] = '' - if env.get('CM_SYS_UTIL_CHECK_CMD', - '') != '' and env['CM_SYS_UTIL_INSTALL_CMD'] != '': - env['CM_SYS_UTIL_INSTALL_CMD'] = f"""{env['CM_SYS_UTIL_CHECK_CMD']} || {env['CM_SYS_UTIL_INSTALL_CMD']}""" + if env.get('MLC_SYS_UTIL_CHECK_CMD', + '') != '' and env['MLC_SYS_UTIL_INSTALL_CMD'] != '': + env['MLC_SYS_UTIL_INSTALL_CMD'] = f"""{env['MLC_SYS_UTIL_CHECK_CMD']} || {env['MLC_SYS_UTIL_INSTALL_CMD']}""" return {'return': 0} def detect_version(i): env = i['env'] - version_env_key = f"CM_{env['CM_SYS_UTIL_NAME'].upper()}_VERSION" - version_check_re = env.get('CM_SYS_UTIL_VERSION_RE', '') - group_number = env.get('CM_TMP_VERSION_DETECT_GROUP_NUMBER', 1) + version_env_key = f"MLC_{env['MLC_SYS_UTIL_NAME'].upper()}_VERSION" + version_check_re = env.get('MLC_SYS_UTIL_VERSION_RE', '') + group_number = env.get('MLC_TMP_VERSION_DETECT_GROUP_NUMBER', 1) # Confirm that the regex pattern and file are present if version_check_re == '' or not os.path.exists("tmp-ver.out"): @@ -154,17 +154,17 @@ def detect_version(i): def postprocess(i): env = i['env'] - version_env_key = f"CM_{env['CM_SYS_UTIL_NAME'].upper()}_VERSION" + version_env_key = f"MLC_{env['MLC_SYS_UTIL_NAME'].upper()}_VERSION" - if (env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' or env.get('CM_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '') and env.get(version_env_key, '') == '' and str(env.get( - 'CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED', '')).lower() not in ["yes", "1", "true"] and env.get('CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED', '') != 'yes': + if (env.get('MLC_SYS_UTIL_VERSION_CMD', '') != '' or env.get('MLC_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '') and env.get(version_env_key, '') == '' and str(env.get( + 'MLC_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED', '')).lower() not in ["yes", "1", "true"] and env.get('MLC_GET_GENERIC_SYS_UTIL_INSTALL_FAILED', '') != 'yes': automation = i['automation'] r = automation.run_native_script( {'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'detect'}) if r['return'] > 0 and str(env.get( - 'CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE', '')).lower() not in ["1", "yes", "true"]: - return {'return': 1, 'error': 'Version detection failed after installation. Please check the provided version command or use env.CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE=yes to ignore the error.'} + 'MLC_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE', '')).lower() not in ["1", "yes", "true"]: + return {'return': 1, 'error': 'Version detection failed after installation. Please check the provided version command or use env.MLC_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE=yes to ignore the error.'} elif r['return'] == 0: r = detect_version(i) @@ -177,7 +177,7 @@ def postprocess(i): env[version_env_key] = version # Not used now - env['CM_GENERIC_SYS_UTIL_' + env['CM_SYS_UTIL_NAME'].upper() + + env['MLC_GENERIC_SYS_UTIL_' + env['MLC_SYS_UTIL_NAME'].upper() + '_CACHE_TAGS'] = 'version-' + version if env.get(version_env_key, '') == '': diff --git a/script/get-generic-sys-util/detect.sh b/script/get-generic-sys-util/detect.sh index 2c3583799..53d36fa2b 100755 --- a/script/get-generic-sys-util/detect.sh +++ b/script/get-generic-sys-util/detect.sh @@ -1,17 +1,17 @@ #!/bin/bash -if [[ -n "${CM_SYS_UTIL_VERSION_CMD_OVERRIDE}" ]]; then - cmd="${CM_SYS_UTIL_VERSION_CMD_OVERRIDE}" +if [[ -n "${MLC_SYS_UTIL_VERSION_CMD_OVERRIDE}" ]]; then + cmd="${MLC_SYS_UTIL_VERSION_CMD_OVERRIDE}" echo $cmd eval $cmd test $? -eq 0 || exit $? else - if [[ -n "${CM_SYS_UTIL_VERSION_CMD}" ]]; then - if [[ "${CM_SYS_UTIL_VERSION_CMD_USE_ERROR_STREAM}" == "yes" ]]; then + if [[ -n "${MLC_SYS_UTIL_VERSION_CMD}" ]]; then + if [[ "${MLC_SYS_UTIL_VERSION_CMD_USE_ERROR_STREAM}" == "yes" ]]; then # Redirect both stdout and stderr to tmp-ver.out - cmd="${CM_SYS_UTIL_VERSION_CMD} > tmp-ver.out 2>&1" + cmd="${MLC_SYS_UTIL_VERSION_CMD} > tmp-ver.out 2>&1" else - cmd="${CM_SYS_UTIL_VERSION_CMD} > tmp-ver.out" + cmd="${MLC_SYS_UTIL_VERSION_CMD} > tmp-ver.out" fi echo $cmd eval $cmd diff --git a/script/get-generic-sys-util/install-with-retry.sh b/script/get-generic-sys-util/install-with-retry.sh index 9abc55d08..43ee22556 100644 --- a/script/get-generic-sys-util/install-with-retry.sh +++ b/script/get-generic-sys-util/install-with-retry.sh @@ -1,6 +1,6 @@ #!/bin/bash # Safe execution of a command stored in a variable -cmd="${CM_SYS_UTIL_INSTALL_CMD}" +cmd="${MLC_SYS_UTIL_INSTALL_CMD}" echo "$cmd" # set the max number of retries as well as the delay between the retries @@ -9,7 +9,7 @@ delay_in_retry=3 for ((i=1; i<=max_retries; i++)); do - echo "Attempting to install ${CM_SYS_UTIL_NAME} - $i of $max_retries..." + echo "Attempting to install ${MLC_SYS_UTIL_NAME} - $i of $max_retries..." output=$(eval "$cmd" 2>&1) echo "$output" exit_status=$? @@ -21,8 +21,8 @@ for ((i=1; i<=max_retries; i++)); do sleep $delay_in_retry else # If it's a non-network error, handle based on fail-safe setting - if [[ "${CM_TMP_FAIL_SAFE}" == 'yes' ]]; then - echo "CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED=yes" > tmp-run-env.out + if [[ "${MLC_TMP_FAIL_SAFE}" == 'yes' ]]; then + echo "MLC_GET_GENERIC_SYS_UTIL_INSTALL_FAILED=yes" > tmp-run-env.out echo "Fail-safe is enabled, exiting with status 0." exit 0 else @@ -32,14 +32,14 @@ for ((i=1; i<=max_retries; i++)); do fi else # If the command succeeded - echo "Successfully installed ${CM_SYS_UTIL_NAME}." + echo "Successfully installed ${MLC_SYS_UTIL_NAME}." exit 0 fi # If this was the last retry, print a final failure message if [[ $i -eq $max_retries ]]; then echo "Installation failed after $max_retries attempts due to persistent network issues." - if [[ "${CM_TMP_FAIL_SAFE}" == 'yes' ]]; then + if [[ "${MLC_TMP_FAIL_SAFE}" == 'yes' ]]; then exit 0 else exit 1 diff --git a/script/get-generic-sys-util/install.sh b/script/get-generic-sys-util/install.sh index c8f532c49..d264ba3e9 100644 --- a/script/get-generic-sys-util/install.sh +++ b/script/get-generic-sys-util/install.sh @@ -1,14 +1,14 @@ #!/bin/bash # Safe execution of a command stored in a variable -cmd="${CM_SYS_UTIL_INSTALL_CMD}" +cmd="${MLC_SYS_UTIL_INSTALL_CMD}" echo "$cmd" # Execute the command and capture the exit status directly if ! eval "$cmd"; then echo "Command failed with status $?" - if [[ "${CM_TMP_FAIL_SAFE}" == 'yes' ]]; then + if [[ "${MLC_TMP_FAIL_SAFE}" == 'yes' ]]; then # Exit safely if fail-safe is enabled - echo "CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED=yes" > tmp-run-env.out + echo "MLC_GET_GENERIC_SYS_UTIL_INSTALL_FAILED=yes" > tmp-run-env.out echo "Fail-safe is enabled, exiting with status 0" exit 0 else diff --git a/script/get-generic-sys-util/meta.yaml b/script/get-generic-sys-util/meta.yaml index b75e24bbc..0436ba72d 100644 --- a/script/get-generic-sys-util/meta.yaml +++ b/script/get-generic-sys-util/meta.yaml @@ -4,16 +4,16 @@ automation_uid: 5b4e0237da074764 cache: true category: Detection or installation of tools and artifacts default_env: - CM_CLEAN_DIRS: bin - CM_SUDO: sudo + MLC_CLEAN_DIRS: bin + MLC_SUDO: sudo deps: - tags: detect,os env: - CM_GENERIC_SYS_UTIL_INSTALL_NEEDED: 'no' - CM_SYS_UTIL_VERSION_CMD: '' + MLC_GENERIC_SYS_UTIL_INSTALL_NEEDED: 'no' + MLC_SYS_UTIL_VERSION_CMD: '' input_mapping: - fail_safe: CM_TMP_FAIL_SAFE - ignore_missing: CM_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE + fail_safe: MLC_TMP_FAIL_SAFE + ignore_missing: MLC_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE new_env_keys: - +PATH tags: @@ -27,7 +27,7 @@ tests: docker_os: rhel docker_os_version: '9' env: - CM_TMP_FAIL_SAFE: 'yes' + MLC_TMP_FAIL_SAFE: 'yes' ignore_missing: 'yes' test-all-variations: 'yes' - docker: 'yes' @@ -52,12 +52,12 @@ uid: bb0393afa8404a11 variations: cmake: env: - CM_SYS_UTIL_NAME: cmake - CM_SYS_UTIL_VERSION_CMD: cmake --version - CM_SYS_UTIL_VERSION_RE: cmake version ([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + MLC_SYS_UTIL_NAME: cmake + MLC_SYS_UTIL_VERSION_CMD: cmake --version + MLC_SYS_UTIL_VERSION_RE: cmake version ([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1 new_env_keys: - - CM_CMAKE_VERSION + - MLC_CMAKE_VERSION state: cmake: apt: cmake @@ -67,14 +67,14 @@ variations: detect: default: true env: - CM_GENERIC_SYS_UTIL_RUN_MODE: detect + MLC_GENERIC_SYS_UTIL_RUN_MODE: detect group: mode prehook_deps: - enable_if_env: - CM_GENERIC_SYS_UTIL_INSTALL_NEEDED: + MLC_GENERIC_SYS_UTIL_INSTALL_NEEDED: - 'yes' force_env_keys: - - CM_TMP_FAIL_SAFE + - MLC_TMP_FAIL_SAFE inherit_variation_tags: true names: - install-sys-util @@ -83,12 +83,12 @@ variations: tags: get,generic-sys-util,_install dmidecode: env: - CM_SYS_UTIL_NAME: dmidecode - CM_SYS_UTIL_VERSION_CMD: dmidecode --version - CM_SYS_UTIL_VERSION_RE: ([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: dmidecode + MLC_SYS_UTIL_VERSION_CMD: dmidecode --version + MLC_SYS_UTIL_VERSION_RE: ([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_DMIDECODE_VERSION + - MLC_DMIDECODE_VERSION state: dmidecode: apt: dmidecode @@ -97,69 +97,69 @@ variations: yum: dmidecode g++-11: env: - CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes' - CM_SYS_UTIL_NAME: g++11 - CM_SYS_UTIL_VERSION_CMD: g++-11 --version - CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes' + MLC_SYS_UTIL_NAME: g++11 + MLC_SYS_UTIL_VERSION_CMD: g++-11 --version + MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_GPP11_VERSION + - MLC_GPP11_VERSION state: g++11: apt: g++-11 dnf: gcc-toolset-11-gcc-c++ g++-12: env: - CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes' - CM_SYS_UTIL_NAME: g++12 - CM_SYS_UTIL_VERSION_CMD: g++-12 --version - CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes' + MLC_SYS_UTIL_NAME: g++12 + MLC_SYS_UTIL_VERSION_CMD: g++-12 --version + MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_GPP12_VERSION + - MLC_GPP12_VERSION state: g++12: apt: g++-12 dnf: gcc-toolset-12-gcc-c++ g++-9: env: - CM_SYS_UTIL_NAME: g++9 - CM_SYS_UTIL_VERSION_CMD: g++-9 --version - CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: g++9 + MLC_SYS_UTIL_VERSION_CMD: g++-9 --version + MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_GPP9_VERSION + - MLC_GPP9_VERSION state: g++9: apt: g++-9 dnf: gcc-toolset-9-gcc-c++ gcc-11: env: - CM_SYS_UTIL_NAME: gcc11 - CM_SYS_UTIL_VERSION_CMD: gcc-11 --version - CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: gcc11 + MLC_SYS_UTIL_VERSION_CMD: gcc-11 --version + MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_GCC11_VERSION + - MLC_GCC11_VERSION state: gcc11: apt: gcc-11 gcc-9: env: - CM_SYS_UTIL_NAME: gcc9 - CM_SYS_UTIL_VERSION_CMD: gcc-9 --version - CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: gcc9 + MLC_SYS_UTIL_VERSION_CMD: gcc-9 --version + MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_GCC9_VERSION + - MLC_GCC9_VERSION state: gcc9: apt: gcc-9 gflags-dev: env: - CM_SYS_UTIL_NAME: gflags-dev + MLC_SYS_UTIL_NAME: gflags-dev new_env_keys: - - CM_GFLAGS_DEV_VERSION + - MLC_GFLAGS_DEV_VERSION state: gflags-dev: apt: libgflags-dev @@ -168,12 +168,12 @@ variations: yum: gflags-devel git-lfs: env: - CM_SYS_UTIL_NAME: git-lfs - CM_SYS_UTIL_VERSION_CMD: git-lfs --version - CM_SYS_UTIL_VERSION_RE: git-lfs\/([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: git-lfs + MLC_SYS_UTIL_VERSION_CMD: git-lfs --version + MLC_SYS_UTIL_VERSION_RE: git-lfs\/([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_GIT_LFS_VERSION + - MLC_GIT_LFS_VERSION state: git-lfs: apt: git-lfs @@ -182,9 +182,9 @@ variations: yum: git-lfs glog-dev: env: - CM_SYS_UTIL_NAME: glog-dev + MLC_SYS_UTIL_NAME: glog-dev new_env_keys: - - CM_GLOG_DEV_VERSION + - MLC_GLOG_DEV_VERSION state: glog-dev: apt: libgoogle-glog-dev @@ -193,19 +193,19 @@ variations: yum: glog-devel install: env: - CM_GENERIC_SYS_UTIL_RUN_MODE: install + MLC_GENERIC_SYS_UTIL_RUN_MODE: install group: mode new_env_keys: - - CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED - - CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED + - MLC_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED + - MLC_GET_GENERIC_SYS_UTIL_INSTALL_FAILED libboost-all-dev: env: - CM_SYS_UTIL_NAME: libboost-all-dev - CM_SYS_UTIL_VERSION_CMD: dpkg -s libboost-dev | grep 'Version' - CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + MLC_SYS_UTIL_NAME: libboost-all-dev + MLC_SYS_UTIL_VERSION_CMD: dpkg -s libboost-dev | grep 'Version' + MLC_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1 new_env_keys: - - CM_LIBBOOST_ALL_DEV_VERSION + - MLC_LIBBOOST_ALL_DEV_VERSION state: libboost-all-dev: apt: libboost-all-dev @@ -214,12 +214,12 @@ variations: yum: boost-devel bzip2: env: - CM_SYS_UTIL_NAME: bzip2 - CM_SYS_UTIL_VERSION_CMD_OVERRIDE: bzcat --version 2>&1 | grep bzip > tmp-ver.out - CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + MLC_SYS_UTIL_NAME: bzip2 + MLC_SYS_UTIL_VERSION_CMD_OVERRIDE: bzcat --version 2>&1 | grep bzip > tmp-ver.out + MLC_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1 new_env_keys: - - CM_BZIP2_VERSION + - MLC_BZIP2_VERSION state: bzip2: apt: bzip2 @@ -228,12 +228,12 @@ variations: yum: bzip2 libbz2-dev: env: - CM_SYS_UTIL_NAME: libbz2_dev - CM_SYS_UTIL_VERSION_CMD: dpkg -s libbz2-dev | grep 'Version' - CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: libbz2_dev + MLC_SYS_UTIL_VERSION_CMD: dpkg -s libbz2-dev | grep 'Version' + MLC_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_LIBBZ2_DEV_VERSION + - MLC_LIBBZ2_DEV_VERSION state: libbz2_dev: apt: libbz2-dev @@ -241,28 +241,28 @@ variations: yum: libbzip2-devel libev-dev: env: - CM_SYS_UTIL_NAME: libev_dev - CM_SYS_UTIL_VERSION_CMD: dpkg -s libev-dev | grep 'Version' - CM_SYS_UTIL_VERSION_RE: ([\d:]+\.[\d\.-]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: libev_dev + MLC_SYS_UTIL_VERSION_CMD: dpkg -s libev-dev | grep 'Version' + MLC_SYS_UTIL_VERSION_RE: ([\d:]+\.[\d\.-]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_LIBEV_DEV_VERSION + - MLC_LIBEV_DEV_VERSION state: libev_dev: apt: libev-dev libffi: env: - CM_SYS_UTIL_NAME: libffi + MLC_SYS_UTIL_NAME: libffi new_env_keys: - - CM_LIBFFI_VERSION + - MLC_LIBFFI_VERSION state: libffi: apt: libffi libffi-dev: env: - CM_SYS_UTIL_NAME: libffi_dev + MLC_SYS_UTIL_NAME: libffi_dev new_env_keys: - - CM_LIBFFI_DEV_VERSION + - MLC_LIBFFI_DEV_VERSION state: libffi_dev: apt: libffi-dev @@ -271,40 +271,40 @@ variations: yum: libffi-devel libffi7: env: - CM_SYS_UTIL_NAME: libffi7 - CM_SYS_UTIL_VERSION_CMD: dpkg -l libffi7 2>/dev/null | grep '^ii' | awk '{print + MLC_SYS_UTIL_NAME: libffi7 + MLC_SYS_UTIL_VERSION_CMD: dpkg -l libffi7 2>/dev/null | grep '^ii' | awk '{print $3}' || rpm -q libffi7 2>/dev/null || pacman -Q libffi7 2>/dev/null - CM_SYS_UTIL_VERSION_RE: \d\.\d-[0-9]+ - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_VERSION_RE: \d\.\d-[0-9]+ + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_LIBFFI7_VERSION + - MLC_LIBFFI7_VERSION state: libffi7: apt: libffi7 libffi8: env: - CM_SYS_UTIL_NAME: libffi8 + MLC_SYS_UTIL_NAME: libffi8 new_env_keys: - - CM_LIBFFI8_VERSION + - MLC_LIBFFI8_VERSION state: libffi8: apt: libffi8 libgdbm-dev: env: - CM_SYS_UTIL_NAME: libgdbm_dev - CM_SYS_UTIL_VERSION_CMD: dpkg -s libgdbm-dev | grep 'Version' - CM_SYS_UTIL_VERSION_RE: ([\d]+\.[\d\.-]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: libgdbm_dev + MLC_SYS_UTIL_VERSION_CMD: dpkg -s libgdbm-dev | grep 'Version' + MLC_SYS_UTIL_VERSION_RE: ([\d]+\.[\d\.-]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_LIBGDBM_DEV_VERSION + - MLC_LIBGDBM_DEV_VERSION state: libgdbm_dev: apt: libgdbm-dev libgmock-dev: env: - CM_SYS_UTIL_NAME: libgmock-dev + MLC_SYS_UTIL_NAME: libgmock-dev new_env_keys: - - CM_LIBGMOCK_DEV_VERSION + - MLC_LIBGMOCK_DEV_VERSION state: libgmock-dev: apt: libgmock-dev @@ -313,20 +313,20 @@ variations: yum: gmock-devel liblzma-dev: env: - CM_SYS_UTIL_NAME: liblzma_dev - CM_SYS_UTIL_VERSION_CMD: xz --version - CM_SYS_UTIL_VERSION_RE: (\d(\.\d)+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: liblzma_dev + MLC_SYS_UTIL_VERSION_CMD: xz --version + MLC_SYS_UTIL_VERSION_RE: (\d(\.\d)+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_LIBLZMA_DEV_VERSION + - MLC_LIBLZMA_DEV_VERSION state: liblzma_dev: apt: liblzma-dev libmkl-dev: env: - CM_SYS_UTIL_NAME: libmkl-dev + MLC_SYS_UTIL_NAME: libmkl-dev new_env_keys: - - CM_LIBMKL_DEV_VERSION + - MLC_LIBMKL_DEV_VERSION state: libmkl-dev: apt: libmkl-dev @@ -335,9 +335,9 @@ variations: yum: '' libmpfr-dev: env: - CM_SYS_UTIL_NAME: libmpfr-dev + MLC_SYS_UTIL_NAME: libmpfr-dev new_env_keys: - - CM_LIBMPFR_DEV_VERSION + - MLC_LIBMPFR_DEV_VERSION state: libmpfr-dev: apt: libmpfr-dev @@ -347,13 +347,13 @@ variations: zypper: mpfr-devel libncurses-dev: env: - CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes' - CM_SYS_UTIL_NAME: libncurses_dev - CM_SYS_UTIL_VERSION_CMD: ncurses5-config --version - CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + MLC_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes' + MLC_SYS_UTIL_NAME: libncurses_dev + MLC_SYS_UTIL_VERSION_CMD: ncurses5-config --version + MLC_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1 new_env_keys: - - CM_LIBNCURSES_DEV_VERSION + - MLC_LIBNCURSES_DEV_VERSION state: libncurses_dev: apt: libncurses-dev @@ -361,9 +361,9 @@ variations: yum: libncurses-devel libnuma-dev: env: - CM_SYS_UTIL_NAME: libnuma-dev + MLC_SYS_UTIL_NAME: libnuma-dev new_env_keys: - - CM_LIBNUMA_DEV_VERSION + - MLC_LIBNUMA_DEV_VERSION state: libnuma-dev: apt: libnuma-dev @@ -372,9 +372,9 @@ variations: yum: numactl-libs libpci-dev: env: - CM_SYS_UTIL_NAME: libpci-dev + MLC_SYS_UTIL_NAME: libpci-dev new_env_keys: - - CM_LIBPCI_DEV_VERSION + - MLC_LIBPCI_DEV_VERSION state: libpci-dev: apt: libpci-dev @@ -383,9 +383,9 @@ variations: yum: pciutils-devel libpng-dev: env: - CM_SYS_UTIL_NAME: libpng-dev + MLC_SYS_UTIL_NAME: libpng-dev new_env_keys: - - CM_LIBPNG_DEV_VERSION + - MLC_LIBPNG_DEV_VERSION state: libpng-dev: apt: libpng-dev @@ -394,9 +394,9 @@ variations: yum: libpng-devel libre2-dev: env: - CM_SYS_UTIL_NAME: libre2-dev + MLC_SYS_UTIL_NAME: libre2-dev new_env_keys: - - CM_LIBRE2_DEV_VERSION + - MLC_LIBRE2_DEV_VERSION state: libre2-dev: apt: libre2-dev @@ -405,9 +405,9 @@ variations: yum: libre-devel libreadline-dev: env: - CM_SYS_UTIL_NAME: libreadline_dev + MLC_SYS_UTIL_NAME: libreadline_dev new_env_keys: - - CM_LIBREADLINE_DEV_VERSION + - MLC_LIBREADLINE_DEV_VERSION state: libreadline_dev: apt: libreadline-dev @@ -415,20 +415,20 @@ variations: yum: readline-devel libsqlite3-dev: env: - CM_SYS_UTIL_NAME: libsqlite3_dev + MLC_SYS_UTIL_NAME: libsqlite3_dev new_env_keys: - - CM_LIBSQLITE3_DEV_VERSION + - MLC_LIBSQLITE3_DEV_VERSION state: libsqlite3_dev: apt: libsqlite3-dev libssl-dev: env: - CM_SYS_UTIL_NAME: libssl_dev - CM_SYS_UTIL_VERSION_CMD: openssl version - CM_SYS_UTIL_VERSION_RE: OpenSSL\s+([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + MLC_SYS_UTIL_NAME: libssl_dev + MLC_SYS_UTIL_VERSION_CMD: openssl version + MLC_SYS_UTIL_VERSION_RE: OpenSSL\s+([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1 new_env_keys: - - CM_LIBSSL_DEV_VERSION + - MLC_LIBSSL_DEV_VERSION state: libssl_dev: apt: libssl-dev @@ -437,9 +437,9 @@ variations: yum: libssl-devel libudev-dev: env: - CM_SYS_UTIL_NAME: libudev-dev + MLC_SYS_UTIL_NAME: libudev-dev new_env_keys: - - CM_LIBUDEV_DEV_VERSION + - MLC_LIBUDEV_DEV_VERSION state: libudev-dev: apt: libudev-dev @@ -450,32 +450,32 @@ variations: deps: - tags: detect,os env: - CM_SYS_UTIL_NAME: linux-tools + MLC_SYS_UTIL_NAME: linux-tools new_env_keys: - - CM_LINUX_TOOLS_VERSION + - MLC_LINUX_TOOLS_VERSION state: linux-tools: - apt: linux-tools-<<>> + apt: linux-tools-<<>> md5sha1sum: env: - CM_SYS_UTIL_NAME: md5sha1sum - CM_SYS_UTIL_VERSION_CMD: md5sum --version | grep sha1sum - CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: md5sha1sum + MLC_SYS_UTIL_VERSION_CMD: md5sum --version | grep sha1sum + MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_MD5SHA1SUM_VERSION + - MLC_MD5SHA1SUM_VERSION state: md5sha1sum: apt: '' brew: md5sha1sum ninja-build: env: - CM_SYS_UTIL_NAME: ninja-build - CM_SYS_UTIL_VERSION_CMD: ninja --version - CM_SYS_UTIL_VERSION_RE: ([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: ninja-build + MLC_SYS_UTIL_VERSION_CMD: ninja --version + MLC_SYS_UTIL_VERSION_RE: ([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_NINJA_BUILD_VERSION + - MLC_NINJA_BUILD_VERSION state: ninja-build: apt: ninja-build @@ -485,18 +485,18 @@ variations: zypper: ninja-build nlohmann-json3-dev: env: - CM_SYS_UTIL_NAME: nlohmann_json3_dev + MLC_SYS_UTIL_NAME: nlohmann_json3_dev new_env_keys: - - CM_NLOHMANN_JSON3_DEV_VERSION + - MLC_NLOHMANN_JSON3_DEV_VERSION state: nlohmann_json3_dev: apt: nlohmann-json3-dev dnf: nlohmann-json-devel ntpdate: env: - CM_SYS_UTIL_NAME: ntpdate + MLC_SYS_UTIL_NAME: ntpdate new_env_keys: - - CM_NTPDATE_VERSION + - MLC_NTPDATE_VERSION state: ntpdate: apt: ntpdate @@ -506,18 +506,18 @@ variations: numactl: deps: - enable_if_env: - CM_HOST_OS_FLAVOR: + MLC_HOST_OS_FLAVOR: - rhel - CM_HOST_OS_VERSION: + MLC_HOST_OS_VERSION: - '9.1' - '9.2' - '9.3' tags: install,numactl,from.src env: - CM_SYS_UTIL_NAME: numactl - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: numactl + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_NUMACTL_VERSION + - MLC_NUMACTL_VERSION state: numactl: apt: numactl @@ -525,12 +525,12 @@ variations: yum: numactl-devel nvidia-cuda-toolkit: env: - CM_SYS_UTIL_NAME: nvidia-cuda-toolkit - CM_SYS_UTIL_VERSION_CMD: nvcc --version - CM_SYS_UTIL_VERSION_RE: release ([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + MLC_SYS_UTIL_NAME: nvidia-cuda-toolkit + MLC_SYS_UTIL_VERSION_CMD: nvcc --version + MLC_SYS_UTIL_VERSION_RE: release ([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1 new_env_keys: - - CM_NVIDIA_CUDA_TOOLKIT_VERSION + - MLC_NVIDIA_CUDA_TOOLKIT_VERSION state: nvidia-cuda-toolkit: apt: nvidia-cuda-toolkit @@ -539,9 +539,9 @@ variations: yum: nvidia-cuda-toolkit pkg-config: env: - CM_SYS_UTIL_NAME: pkg_config - CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: pkg_config + MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 state: pkg_config: apt: pkg-config @@ -550,9 +550,9 @@ variations: yum: pkg-config psmisc: env: - CM_SYS_UTIL_NAME: psmisc + MLC_SYS_UTIL_NAME: psmisc new_env_keys: - - CM_PSMISC_VERSION + - MLC_PSMISC_VERSION state: psmisc: apt: psmisc @@ -561,9 +561,9 @@ variations: yum: psmisc rapidjson-dev: env: - CM_SYS_UTIL_NAME: rapidjson-dev + MLC_SYS_UTIL_NAME: rapidjson-dev new_env_keys: - - CM_RAPIDJSON_DEV_VERSION + - MLC_RAPIDJSON_DEV_VERSION state: rapidjson-dev: apt: rapidjson-dev @@ -572,12 +572,12 @@ variations: yum: rapidjson-devel rsync: env: - CM_SYS_UTIL_NAME: rsync - CM_SYS_UTIL_VERSION_CMD: rsync --version - CM_SYS_UTIL_VERSION_RE: rsync\s+version\s+([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: rsync + MLC_SYS_UTIL_VERSION_CMD: rsync --version + MLC_SYS_UTIL_VERSION_RE: rsync\s+version\s+([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_RSYNC_VERSION + - MLC_RSYNC_VERSION state: rsync: apt: rsync @@ -587,12 +587,12 @@ variations: zypper: rsync screen: env: - CM_SYS_UTIL_NAME: screen - CM_SYS_UTIL_VERSION_CMD: screen --version - CM_SYS_UTIL_VERSION_RE: Screen version ([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: screen + MLC_SYS_UTIL_VERSION_CMD: screen --version + MLC_SYS_UTIL_VERSION_RE: Screen version ([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_SCREEN_VERSION + - MLC_SCREEN_VERSION state: screen: apt: screen @@ -602,12 +602,12 @@ variations: zypper: rsync sox: env: - CM_SYS_UTIL_NAME: sox - CM_SYS_UTIL_VERSION_CMD: sox --version - CM_SYS_UTIL_VERSION_RE: sox:\s+SoX\s+v([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: sox + MLC_SYS_UTIL_VERSION_CMD: sox --version + MLC_SYS_UTIL_VERSION_RE: sox:\s+SoX\s+v([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_SOX_VERSION + - MLC_SOX_VERSION state: sox: apt: sox @@ -615,12 +615,12 @@ variations: dnf: sox systemd: env: - CM_SYS_UTIL_NAME: systemd - CM_SYS_UTIL_VERSION_CMD: systemctl --version - CM_SYS_UTIL_VERSION_RE: systemd ([\d]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: systemd + MLC_SYS_UTIL_VERSION_CMD: systemctl --version + MLC_SYS_UTIL_VERSION_RE: systemd ([\d]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_SYSTEMD_VERSION + - MLC_SYSTEMD_VERSION state: systemd: apt: systemd @@ -629,24 +629,24 @@ variations: yum: systemd tk-dev: env: - CM_SYS_UTIL_NAME: tk_dev - CM_SYS_UTIL_VERSION_CMD: dpkg -s tk-dev | grep Version - CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + MLC_SYS_UTIL_NAME: tk_dev + MLC_SYS_UTIL_VERSION_CMD: dpkg -s tk-dev | grep Version + MLC_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1 new_env_keys: - - CM_TK_DEV_VERSION + - MLC_TK_DEV_VERSION state: tk_dev: apt: tk-dev transmission: env: - CM_SYS_UTIL_NAME: transmission - CM_SYS_UTIL_VERSION_CMD: transmission-daemon --version - CM_SYS_UTIL_VERSION_CMD_USE_ERROR_STREAM: 'yes' - CM_SYS_UTIL_VERSION_RE: transmission-daemon ([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: transmission + MLC_SYS_UTIL_VERSION_CMD: transmission-daemon --version + MLC_SYS_UTIL_VERSION_CMD_USE_ERROR_STREAM: 'yes' + MLC_SYS_UTIL_VERSION_RE: transmission-daemon ([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_TRANSMISSION_VERSION + - MLC_TRANSMISSION_VERSION state: transmission: apt: transmission-daemon @@ -655,12 +655,12 @@ variations: yum: transmission-daemon vim-common: env: - CM_SYS_UTIL_NAME: vim_common - CM_SYS_UTIL_VERSION_CMD: vim --version - CM_SYS_UTIL_VERSION_RE: VIM - Vi IMproved ([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: vim_common + MLC_SYS_UTIL_VERSION_CMD: vim --version + MLC_SYS_UTIL_VERSION_RE: VIM - Vi IMproved ([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_VIM_COMMON_VERSION + - MLC_VIM_COMMON_VERSION state: vim_common: apt: vim-common @@ -670,36 +670,36 @@ variations: yum: vim-common wget: env: - CM_SYS_UTIL_NAME: wget - CM_SYS_UTIL_VERSION_CMD: wget --version - CM_SYS_UTIL_VERSION_RE: Wget\s*([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: wget + MLC_SYS_UTIL_VERSION_CMD: wget --version + MLC_SYS_UTIL_VERSION_RE: Wget\s*([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_WGET_VERSION + - MLC_WGET_VERSION state: wget: apt: wget brew: wget wkhtmltopdf: env: - CM_SYS_UTIL_NAME: wkhtmltopdf - CM_SYS_UTIL_VERSION_CMD: wkhtmltopdf --version - CM_SYS_UTIL_VERSION_RE: wkhtmltopdf ([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + MLC_SYS_UTIL_NAME: wkhtmltopdf + MLC_SYS_UTIL_VERSION_CMD: wkhtmltopdf --version + MLC_SYS_UTIL_VERSION_RE: wkhtmltopdf ([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 new_env_keys: - - CM_WKHTMLTOPDF_VERSION + - MLC_WKHTMLTOPDF_VERSION state: wkhtmltopdf: apt: wkhtmltopdf brew: wkhtmltopdf xz: env: - CM_SYS_UTIL_NAME: xz - CM_SYS_UTIL_VERSION_CMD: xz --version - CM_SYS_UTIL_VERSION_RE: xz \(XZ Utils\) ([\d.]+) - CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + MLC_SYS_UTIL_NAME: xz + MLC_SYS_UTIL_VERSION_CMD: xz --version + MLC_SYS_UTIL_VERSION_RE: xz \(XZ Utils\) ([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1 new_env_keys: - - CM_XZ_VERSION + - MLC_XZ_VERSION state: xz: apt: xz-utils @@ -709,18 +709,18 @@ variations: yum: xz zlib: env: - CM_SYS_UTIL_NAME: zlib + MLC_SYS_UTIL_NAME: zlib new_env_keys: - - CM_ZLIB_VERSION + - MLC_ZLIB_VERSION state: zlib: apt: zlib1g choco: zlib zlib1g-dev: env: - CM_SYS_UTIL_NAME: zlib1g_dev + MLC_SYS_UTIL_NAME: zlib1g_dev new_env_keys: - - CM_ZLIB1G_DEV_VERSION + - MLC_ZLIB1G_DEV_VERSION state: zlib1g_dev: apt: zlib1g-dev diff --git a/script/get-gh-actions-runner/customize.py b/script/get-gh-actions-runner/customize.py index 5fa54e71f..564065fb4 100644 --- a/script/get-gh-actions-runner/customize.py +++ b/script/get-gh-actions-runner/customize.py @@ -13,17 +13,17 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - cmd = env.get('CM_GH_ACTIONS_RUNNER_COMMAND', '') + cmd = env.get('MLC_GH_ACTIONS_RUNNER_COMMAND', '') if cmd == "config": - run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && ./config.sh --url {env['CM_GH_ACTIONS_RUNNER_URL']} --token {env['CM_GH_ACTIONS_RUNNER_TOKEN']}" + run_cmd = f"cd {env['MLC_GH_ACTIONS_RUNNER_CODE_PATH']} && ./config.sh --url {env['MLC_GH_ACTIONS_RUNNER_URL']} --token {env['MLC_GH_ACTIONS_RUNNER_TOKEN']}" elif cmd == "remove": - run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && ./config.sh remove --token {env['CM_GH_ACTIONS_RUNNER_TOKEN']}" + run_cmd = f"cd {env['MLC_GH_ACTIONS_RUNNER_CODE_PATH']} && ./config.sh remove --token {env['MLC_GH_ACTIONS_RUNNER_TOKEN']}" elif cmd == "install": - run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh install" + run_cmd = f"cd {env['MLC_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh install" elif cmd == "uninstall": - run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh uninstall" + run_cmd = f"cd {env['MLC_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh uninstall" cache_rm_tags = "gh,runner,_install" r = cm.access({'action': 'rm', 'automation': 'cache', 'tags': cache_rm_tags, 'f': True}) @@ -31,9 +31,9 @@ def preprocess(i): if r['return'] != 0 and r['return'] != 16: # ignore missing ones return r elif cmd == "start": - run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh start" + run_cmd = f"cd {env['MLC_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh start" - env['CM_RUN_CMD'] = run_cmd + env['MLC_RUN_CMD'] = run_cmd return {'return': 0} diff --git a/script/get-gh-actions-runner/meta.yaml b/script/get-gh-actions-runner/meta.yaml index 287ee254a..67eabf7fb 100644 --- a/script/get-gh-actions-runner/meta.yaml +++ b/script/get-gh-actions-runner/meta.yaml @@ -14,11 +14,11 @@ tags: - gh-actions-runner uid: 5b005c5a76f242a7 input_mapping: - token: CM_GH_ACTIONS_RUNNER_TOKEN - url: CM_GH_ACTIONS_RUNNER_URL + token: MLC_GH_ACTIONS_RUNNER_TOKEN + url: MLC_GH_ACTIONS_RUNNER_URL new_env_keys: - - CM_GH_ACTIONS_RUNNER_CODE_PATH + - MLC_GH_ACTIONS_RUNNER_CODE_PATH deps: - tags: detect-os @@ -26,33 +26,33 @@ deps: force_cache: yes extra_cache_tags: gh-actions-runner-code,gh-actions,code env: - CM_DAE_FINAL_ENV_NAME: CM_GH_ACTIONS_RUNNER_CODE_PATH + MLC_DAE_FINAL_ENV_NAME: MLC_GH_ACTIONS_RUNNER_CODE_PATH variations: config: group: command default: true env: - CM_GH_ACTIONS_RUNNER_COMMAND: config + MLC_GH_ACTIONS_RUNNER_COMMAND: config remove: group: command env: - CM_GH_ACTIONS_RUNNER_COMMAND: remove + MLC_GH_ACTIONS_RUNNER_COMMAND: remove install: group: command deps: - tags: get,gh,actions-runner,_config force_cache: yes env: - CM_GH_ACTIONS_RUNNER_COMMAND: install + MLC_GH_ACTIONS_RUNNER_COMMAND: install uninstall: group: command env: - CM_GH_ACTIONS_RUNNER_COMMAND: uninstall + MLC_GH_ACTIONS_RUNNER_COMMAND: uninstall start: group: command deps: - tags: get,gh,actions-runner,_install force_cache: yes env: - CM_GH_ACTIONS_RUNNER_COMMAND: start + MLC_GH_ACTIONS_RUNNER_COMMAND: start diff --git a/script/get-gh-actions-runner/run.sh b/script/get-gh-actions-runner/run.sh index 547395120..43988f060 100644 --- a/script/get-gh-actions-runner/run.sh +++ b/script/get-gh-actions-runner/run.sh @@ -1,5 +1,5 @@ #!/bin/bash -echo ${CM_RUN_CMD} -eval ${CM_RUN_CMD} +echo ${MLC_RUN_CMD} +eval ${MLC_RUN_CMD} test $? -eq 0 || exit $? diff --git a/script/get-git-repo/README-extra.md b/script/get-git-repo/README-extra.md index 83a368e5f..9ef54386b 100644 --- a/script/get-git-repo/README-extra.md +++ b/script/get-git-repo/README-extra.md @@ -13,7 +13,7 @@ where [VARIATION] is one of * `no-recurse-submodules:` Only download the main repository ## Exported Variables -* `CM_GIT_CHECKOUT_PATH`: Directory path of the cloned git repository +* `MLC_GIT_CHECKOUT_PATH`: Directory path of the cloned git repository ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-git-repo/customize.py b/script/get-git-repo/customize.py index 603340e47..8f9e3ea31 100644 --- a/script/get-git-repo/customize.py +++ b/script/get-git-repo/customize.py @@ -16,37 +16,37 @@ def preprocess(i): env_key = get_env_key(env) - cm_git_url = env['CM_GIT_URL'] + cm_git_url = env['MLC_GIT_URL'] - if 'CM_GIT_REPO_NAME' not in env: + if 'MLC_GIT_REPO_NAME' not in env: update_env( env, - 'CM_GIT_REPO{}_NAME', + 'MLC_GIT_REPO{}_NAME', env_key, os.path.basename( - env['CM_GIT_URL'])) + env['MLC_GIT_URL'])) - if 'CM_GIT_DEPTH' not in env: - env['CM_GIT_DEPTH'] = '' + if 'MLC_GIT_DEPTH' not in env: + env['MLC_GIT_DEPTH'] = '' - if 'CM_GIT_RECURSE_SUBMODULES' not in env: - env['CM_GIT_RECURSE_SUBMODULES'] = '' + if 'MLC_GIT_RECURSE_SUBMODULES' not in env: + env['MLC_GIT_RECURSE_SUBMODULES'] = '' - if env.get('CM_GIT_CHECKOUT', '') == '': - env['CM_GIT_CHECKOUT'] = env.get( - 'CM_GIT_SHA', env.get( - 'CM_GIT_BRANCH', '')) + if env.get('MLC_GIT_CHECKOUT', '') == '': + env['MLC_GIT_CHECKOUT'] = env.get( + 'MLC_GIT_SHA', env.get( + 'MLC_GIT_BRANCH', '')) - git_checkout_string = " -b " + env['CM_GIT_BRANCH'] if ( - "CM_GIT_BRANCH" in env and env.get('CM_GIT_SHA', '') == '') else "" + git_checkout_string = " -b " + env['MLC_GIT_BRANCH'] if ( + "MLC_GIT_BRANCH" in env and env.get('MLC_GIT_SHA', '') == '') else "" - git_clone_cmd = "git clone " + env['CM_GIT_RECURSE_SUBMODULES'] + git_checkout_string + " " + \ - env['CM_GIT_URL'] + " " + \ - env.get('CM_GIT_DEPTH', '') + ' ' + env['CM_GIT_CHECKOUT_FOLDER'] + git_clone_cmd = "git clone " + env['MLC_GIT_RECURSE_SUBMODULES'] + git_checkout_string + " " + \ + env['MLC_GIT_URL'] + " " + \ + env.get('MLC_GIT_DEPTH', '') + ' ' + env['MLC_GIT_CHECKOUT_FOLDER'] - env['CM_GIT_CLONE_CMD'] = git_clone_cmd - env['CM_TMP_GIT_PATH'] = os.path.join( - os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER'], ".gitdone") + env['MLC_GIT_CLONE_CMD'] = git_clone_cmd + env['MLC_TMP_GIT_PATH'] = os.path.join( + os.getcwd(), env['MLC_GIT_CHECKOUT_FOLDER'], ".gitdone") return {'return': 0} @@ -55,41 +55,41 @@ def postprocess(i): env = i['env'] state = i['state'] - env['CM_GIT_CHECKOUT_PATH'] = os.path.join( - os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER']) - git_checkout_path = env['CM_GIT_CHECKOUT_PATH'] + env['MLC_GIT_CHECKOUT_PATH'] = os.path.join( + os.getcwd(), env['MLC_GIT_CHECKOUT_FOLDER']) + git_checkout_path = env['MLC_GIT_CHECKOUT_PATH'] env_key = get_env_key(env) - # We remap CM_GIT variables with CM_GIT_REPO prefix so that they don't + # We remap MLC_GIT variables with MLC_GIT_REPO prefix so that they don't # contaminate the env of the parent script - update_env(env, 'CM_GIT_REPO{}_CHECKOUT_PATH', - env_key, env['CM_GIT_CHECKOUT_PATH']) - update_env(env, 'CM_GIT_REPO{}_URL', env_key, env['CM_GIT_URL']) - update_env(env, 'CM_GIT_REPO{}_CHECKOUT', env_key, env['CM_GIT_CHECKOUT']) - update_env(env, 'CM_GIT_REPO{}_DEPTH', env_key, env['CM_GIT_DEPTH']) - update_env(env, 'CM_GIT_REPO{}_CHECKOUT_FOLDER', - env_key, env['CM_GIT_CHECKOUT_FOLDER']) - update_env(env, 'CM_GIT_REPO{}_PATCH', env_key, env['CM_GIT_PATCH']) - update_env(env, 'CM_GIT_REPO{}_RECURSE_SUBMODULES', - env_key, env['CM_GIT_RECURSE_SUBMODULES']) - - if (env.get('CM_GIT_CHECKOUT_PATH_ENV_NAME', '') != ''): - env[env['CM_GIT_CHECKOUT_PATH_ENV_NAME']] = git_checkout_path - - env['CM_GET_DEPENDENT_CACHED_PATH'] = git_checkout_path - - if os.path.exists("tmp-cm-git-hash.out"): - with open("tmp-cm-git-hash.out", "r") as f: + update_env(env, 'MLC_GIT_REPO{}_CHECKOUT_PATH', + env_key, env['MLC_GIT_CHECKOUT_PATH']) + update_env(env, 'MLC_GIT_REPO{}_URL', env_key, env['MLC_GIT_URL']) + update_env(env, 'MLC_GIT_REPO{}_CHECKOUT', env_key, env['MLC_GIT_CHECKOUT']) + update_env(env, 'MLC_GIT_REPO{}_DEPTH', env_key, env['MLC_GIT_DEPTH']) + update_env(env, 'MLC_GIT_REPO{}_CHECKOUT_FOLDER', + env_key, env['MLC_GIT_CHECKOUT_FOLDER']) + update_env(env, 'MLC_GIT_REPO{}_PATCH', env_key, env['MLC_GIT_PATCH']) + update_env(env, 'MLC_GIT_REPO{}_RECURSE_SUBMODULES', + env_key, env['MLC_GIT_RECURSE_SUBMODULES']) + + if (env.get('MLC_GIT_CHECKOUT_PATH_ENV_NAME', '') != ''): + env[env['MLC_GIT_CHECKOUT_PATH_ENV_NAME']] = git_checkout_path + + env['MLC_GET_DEPENDENT_CACHED_PATH'] = git_checkout_path + + if os.path.exists("tmp-mlc-git-hash.out"): + with open("tmp-mlc-git-hash.out", "r") as f: git_hash = f.readline().strip() - env['CM_GIT_REPO_CURRENT_HASH'] = git_hash + env['MLC_GIT_REPO_CURRENT_HASH'] = git_hash return {'return': 0} def get_env_key(env): - env_key = env.get('CM_GIT_ENV_KEY', '') + env_key = env.get('MLC_GIT_ENV_KEY', '') if env_key != '' and not env_key.startswith('_'): env_key = '_' + env_key diff --git a/script/get-git-repo/meta.yaml b/script/get-git-repo/meta.yaml index eae2ac3e7..962b9281f 100644 --- a/script/get-git-repo/meta.yaml +++ b/script/get-git-repo/meta.yaml @@ -4,39 +4,39 @@ automation_uid: 5b4e0237da074764 cache: true category: DevOps automation default_env: - CM_GIT_CHECKOUT_FOLDER: repo - CM_GIT_DEPTH: --depth 4 - CM_GIT_PATCH: 'no' - CM_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' - CM_GIT_URL: https://github.com/mlcommons/ck.git + MLC_GIT_CHECKOUT_FOLDER: repo + MLC_GIT_DEPTH: --depth 4 + MLC_GIT_PATCH: 'no' + MLC_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' + MLC_GIT_URL: https://github.com/mlcommons/ck.git deps: - tags: detect,os input_mapping: - branch: CM_GIT_CHECKOUT - depth: CM_GIT_DEPTH - env_key: CM_GIT_ENV_KEY - folder: CM_GIT_CHECKOUT_FOLDER - patch: CM_GIT_PATCH - pull: CM_GIT_REPO_PULL - submodules: CM_GIT_RECURSE_SUBMODULES - update: CM_GIT_REPO_PULL + branch: MLC_GIT_CHECKOUT + depth: MLC_GIT_DEPTH + env_key: MLC_GIT_ENV_KEY + folder: MLC_GIT_CHECKOUT_FOLDER + patch: MLC_GIT_PATCH + pull: MLC_GIT_REPO_PULL + submodules: MLC_GIT_RECURSE_SUBMODULES + update: MLC_GIT_REPO_PULL new_env_keys: -- CM_GIT_CHECKOUT_PATH -- CM_GIT_REPO_* -- <<>> +- MLC_GIT_CHECKOUT_PATH +- MLC_GIT_REPO_* +- <<>> post_deps: - dynamic: true enable_if_env: - CM_GIT_REPO_PULL: + MLC_GIT_REPO_PULL: - 'yes' - 'True' force_env_keys: - - CM_GIT_CHECKOUT_PATH + - MLC_GIT_CHECKOUT_PATH names: - pull-git-repo tags: pull,git,repo print_env_at_the_end: - CM_GIT_CHECKOUT_PATH: CM cache path to the Git repo + MLC_GIT_CHECKOUT_PATH: CM cache path to the Git repo tags: - get - git @@ -47,48 +47,48 @@ uid: ed603e7292974f10 variations: branch.#: env: - CM_GIT_BRANCH: '#' + MLC_GIT_BRANCH: '#' group: checkout cherrypicks.#: env: - CM_GIT_CHERRYPICKS: '#' + MLC_GIT_CHERRYPICKS: '#' full-history: env: - CM_GIT_DEPTH: '' + MLC_GIT_DEPTH: '' group: git-history lfs: deps: - tags: get,generic,sys-util,_git-lfs env: - CM_GIT_REPO_NEEDS_LFS: 'yes' + MLC_GIT_REPO_NEEDS_LFS: 'yes' no-recurse-submodules: env: - CM_GIT_RECURSE_SUBMODULES: '' + MLC_GIT_RECURSE_SUBMODULES: '' patch: env: - CM_GIT_PATCH: 'yes' + MLC_GIT_PATCH: 'yes' pr-to-apply.#: env: - CM_GIT_PR_TO_APPLY: '#' + MLC_GIT_PR_TO_APPLY: '#' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo sha.#: default_variations: git-history: full-history env: - CM_GIT_SHA: '#' + MLC_GIT_SHA: '#' group: checkout short-history: default: true env: - CM_GIT_DEPTH: --depth 5 + MLC_GIT_DEPTH: --depth 5 group: git-history submodules.#: env: - CM_GIT_SUBMODULES: '#' + MLC_GIT_SUBMODULES: '#' tag.#: env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' group: checkout diff --git a/script/get-git-repo/run.bat b/script/get-git-repo/run.bat index d00f32b15..8a8003513 100644 --- a/script/get-git-repo/run.bat +++ b/script/get-git-repo/run.bat @@ -1,22 +1,22 @@ @echo off rem echo ****************************************************** -rem echo Cloning MLCommons from %CM_GIT_URL% with branch %CM_GIT_CHECKOUT% %CM_GIT_DEPTH% %CM_GIT_RECURSE_SUBMODULES% ... +rem echo Cloning MLCommons from %MLC_GIT_URL% with branch %MLC_GIT_CHECKOUT% %MLC_GIT_DEPTH% %MLC_GIT_RECURSE_SUBMODULES% ... -rem git clone %CM_GIT_RECURSE_SUBMODULES% %CM_GIT_URL% %CM_GIT_DEPTH% inference +rem git clone %MLC_GIT_RECURSE_SUBMODULES% %MLC_GIT_URL% %MLC_GIT_DEPTH% inference rem cd inference -rem git checkout -b "%CM_GIT_CHECKOUT%" +rem git checkout -b "%MLC_GIT_CHECKOUT%" rem rem Next line allows ERRORLEVEL inside if statements! setlocal enabledelayedexpansion set CUR_DIR=%cd% -set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% +set SCRIPT_DIR=%MLC_TMP_CURRENT_SCRIPT_PATH% -set folder=%CM_GIT_CHECKOUT_FOLDER% +set folder=%MLC_GIT_CHECKOUT_FOLDER% -if not exist "%CM_TMP_GIT_PATH%" ( +if not exist "%MLC_TMP_GIT_PATH%" ( if exist "%folder%" ( rmdir /S /Q "%folder%" rem Use rmdir instead of deltree @@ -25,20 +25,20 @@ if not exist "%CM_TMP_GIT_PATH%" ( echo ****************************************************** echo Current directory: %CUR_DIR% echo. - echo Cloning %CM_GIT_REPO_NAME% from %CM_GIT_URL% + echo Cloning %MLC_GIT_REPO_NAME% from %MLC_GIT_URL% echo. - echo "%CM_GIT_CLONE_CMD%" + echo "%MLC_GIT_CLONE_CMD%" echo. - %CM_GIT_CLONE_CMD% + %MLC_GIT_CLONE_CMD% IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! cd "%folder%" - if not "%CM_GIT_SHA%" == "" ( + if not "%MLC_GIT_SHA%" == "" ( echo. echo. - git checkout "%CM_GIT_CHECKOUT%" + git checkout "%MLC_GIT_CHECKOUT%" IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! ) @@ -46,8 +46,8 @@ if not exist "%CM_TMP_GIT_PATH%" ( cd "%folder%" ) -if not "%CM_GIT_SUBMODULES%" == "" ( - for /F %%s in ("%CM_GIT_SUBMODULES%") do ( +if not "%MLC_GIT_SUBMODULES%" == "" ( + for /F %%s in ("%MLC_GIT_SUBMODULES%") do ( echo. echo Initializing submodule %%s git submodule update --init %%s @@ -55,8 +55,8 @@ if not "%CM_GIT_SUBMODULES%" == "" ( ) ) -if "%CM_GIT_PATCH%" == "yes" ( - for %%x in (%CM_GIT_PATCH_FILEPATHS%) do ( +if "%MLC_GIT_PATCH%" == "yes" ( + for %%x in (%MLC_GIT_PATCH_FILEPATHS%) do ( echo. echo Applying patch %%x ... git apply %%x diff --git a/script/get-git-repo/run.sh b/script/get-git-repo/run.sh index 2a7b0b51c..0e0c19324 100644 --- a/script/get-git-repo/run.sh +++ b/script/get-git-repo/run.sh @@ -2,53 +2,53 @@ CUR_DIR=$PWD echo "$CUR_DIR" -SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} +SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH} -folder=${CM_GIT_CHECKOUT_FOLDER} -if [ ! -e "${CM_TMP_GIT_PATH}" ]; then +folder=${MLC_GIT_CHECKOUT_FOLDER} +if [ ! -e "${MLC_TMP_GIT_PATH}" ]; then cmd="rm -rf ${folder}" echo $cmd eval $cmd echo "******************************************************" echo "Current directory: ${CUR_DIR}" echo "" - echo "Cloning ${CM_GIT_REPO_NAME} from ${CM_GIT_URL}" + echo "Cloning ${MLC_GIT_REPO_NAME} from ${MLC_GIT_URL}" echo "" - echo "${CM_GIT_CLONE_CMD}"; + echo "${MLC_GIT_CLONE_CMD}"; echo "" - ${CM_GIT_CLONE_CMD} + ${MLC_GIT_CLONE_CMD} rcode=$? if [ ! $rcode -eq 0 ]; then #try once more rm -rf $folder - ${CM_GIT_CLONE_CMD} + ${MLC_GIT_CLONE_CMD} test $? -eq 0 || exit $? fi cd ${folder} - if [ ! -z ${CM_GIT_SHA} ]; then + if [ ! -z ${MLC_GIT_SHA} ]; then echo "" - cmd="git checkout -b ${CM_GIT_SHA} ${CM_GIT_SHA}" + cmd="git checkout -b ${MLC_GIT_SHA} ${MLC_GIT_SHA}" echo "$cmd" eval "$cmd" test $? -eq 0 || exit $? - elif [ ! -z ${CM_GIT_CHECKOUT_TAG} ]; then + elif [ ! -z ${MLC_GIT_CHECKOUT_TAG} ]; then echo "" cmd="git fetch --all --tags" echo "$cmd" eval "$cmd" - cmd="git checkout tags/${CM_GIT_CHECKOUT_TAG} -b ${CM_GIT_CHECKOUT_TAG}" + cmd="git checkout tags/${MLC_GIT_CHECKOUT_TAG} -b ${MLC_GIT_CHECKOUT_TAG}" echo "$cmd" eval "$cmd" test $? -eq 0 || exit $? else - cmd="git rev-parse HEAD >> ../tmp-cm-git-hash.out" + cmd="git rev-parse HEAD >> ../tmp-mlc-git-hash.out" echo "$cmd" eval "$cmd" test $? -eq 0 || exit $? @@ -58,13 +58,13 @@ else cd ${folder} fi -if [ ! -z ${CM_GIT_PR_TO_APPLY} ]; then +if [ ! -z ${MLC_GIT_PR_TO_APPLY} ]; then echo "" - echo "Fetching from ${CM_GIT_PR_TO_APPLY}" - git fetch origin ${CM_GIT_PR_TO_APPLY}:tmp-apply + echo "Fetching from ${MLC_GIT_PR_TO_APPLY}" + git fetch origin ${MLC_GIT_PR_TO_APPLY}:tmp-apply fi -IFS=',' read -r -a cherrypicks <<< "${CM_GIT_CHERRYPICKS}" +IFS=',' read -r -a cherrypicks <<< "${MLC_GIT_CHERRYPICKS}" for cherrypick in "${cherrypicks[@]}" do echo "" @@ -73,7 +73,7 @@ do test $? -eq 0 || exit $? done -IFS=',' read -r -a submodules <<< "${CM_GIT_SUBMODULES}" +IFS=',' read -r -a submodules <<< "${MLC_GIT_SUBMODULES}" for submodule in "${submodules[@]}" do @@ -83,8 +83,8 @@ do test $? -eq 0 || exit $? done -if [ ${CM_GIT_PATCH} == "yes" ]; then - IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILEPATHS} +if [ ${MLC_GIT_PATCH} == "yes" ]; then + IFS=', ' read -r -a patch_files <<< ${MLC_GIT_PATCH_FILEPATHS} for patch_file in "${patch_files[@]}" do echo "" diff --git a/script/get-github-cli/customize.py b/script/get-github-cli/customize.py index bf6a19089..33b2c3cac 100644 --- a/script/get-github-cli/customize.py +++ b/script/get-github-cli/customize.py @@ -12,18 +12,18 @@ def preprocess(i): file_name = 'gh.exe' if os_info['platform'] == 'windows' else 'gh' - # Will check env['CM_TMP_PATH'] if comes from installation script + # Will check env['MLC_TMP_PATH'] if comes from installation script r = i['automation'].find_artifact({'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_GITHUBCLI_BIN_WITH_PATH', + 'env_path_key': 'MLC_GITHUBCLI_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: if r['return'] == 16: - if env.get('CM_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes': + if env.get('MLC_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes': return r print(recursion_spaces + ' # {}'.format(r['error'])) @@ -47,7 +47,7 @@ def postprocess(i): r = i['automation'].parse_version({'match_text': r'gh\s*version\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_GITHUBCLI_VERSION', + 'env_key': 'MLC_GITHUBCLI_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r diff --git a/script/get-go/README-extra.md b/script/get-go/README-extra.md index d1c4f9caa..327cee0a9 100644 --- a/script/get-go/README-extra.md +++ b/script/get-go/README-extra.md @@ -2,7 +2,7 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed GO tool on the system. ## Exported Variables -* `CM_GO_BIN_WITH_PATH` +* `MLC_GO_BIN_WITH_PATH` * `+PATH` ## Supported and Tested OS diff --git a/script/get-go/customize.py b/script/get-go/customize.py index b3ccee3cc..95ff8630a 100644 --- a/script/get-go/customize.py +++ b/script/get-go/customize.py @@ -12,18 +12,18 @@ def preprocess(i): file_name = 'go.exe' if os_info['platform'] == 'windows' else 'go' env['FILE_NAME'] = file_name - if 'CM_GO_BIN_WITH_PATH' not in env: + if 'MLC_GO_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_GO_BIN_WITH_PATH', + 'env_path_key': 'MLC_GO_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: if r['return'] == 16: - env['CM_REQUIRE_INSTALL'] = "yes" + env['MLC_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r @@ -34,7 +34,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'\s+go([\d.]+)', 'group_number': 1, - 'env_key': 'CM_GO_VERSION', + 'env_key': 'MLC_GO_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -54,11 +54,11 @@ def postprocess(i): return r version = r['version'] - found_file_path = env['CM_GO_BIN_WITH_PATH'] + found_file_path = env['MLC_GO_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_GO_INSTALLED_PATH'] = found_path + env['MLC_GO_INSTALLED_PATH'] = found_path - env['CM_GO_CACHE_TAGS'] = 'version-' + version + env['MLC_GO_CACHE_TAGS'] = 'version-' + version return {'return': 0, 'version': version} diff --git a/script/get-go/meta.yaml b/script/get-go/meta.yaml index f7c5c89d2..8f73f7284 100644 --- a/script/get-go/meta.yaml +++ b/script/get-go/meta.yaml @@ -5,13 +5,13 @@ cache: true category: Compiler automation clean_files: [] env: - CM_REQUIRE_INSTALL: 'no' + MLC_REQUIRE_INSTALL: 'no' new_env_keys: -- CM_GO_* +- MLC_GO_* - +PATH prehook_deps: - enable_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' reuse_version: true tags: install,go diff --git a/script/get-google-saxml/meta.yaml b/script/get-google-saxml/meta.yaml index 2e2db0f88..015ddddcc 100644 --- a/script/get-google-saxml/meta.yaml +++ b/script/get-google-saxml/meta.yaml @@ -20,10 +20,10 @@ deps: tags: get,python3 - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_GOOGLE_SAXML_SRC + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_GOOGLE_SAXML_SRC extra_cache_tags: google,saxsml,src force_env_keys: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT names: - google-saxml-git-src tags: get,git,_repo.https://github.com/google/saxml @@ -33,11 +33,11 @@ deps: - bazel extra_cache_tags_from_env: - - env: CM_PYTHON_CACHE_TAGS + - env: MLC_PYTHON_CACHE_TAGS prefix: python- new_env_keys: -- CM_GOOGLE_SAXML* +- MLC_GOOGLE_SAXML* tags: - get diff --git a/script/get-google-test/customize.py b/script/get-google-test/customize.py index a22c59349..cb1be8fe5 100644 --- a/script/get-google-test/customize.py +++ b/script/get-google-test/customize.py @@ -12,8 +12,8 @@ def preprocess(i): automation = i['automation'] - env['CM_GIT_CHECKOUT'] = "v" + env['CM_VERSION'] - quiet = (env.get('CM_QUIET', False) == 'yes') + env['MLC_GIT_CHECKOUT'] = "v" + env['MLC_VERSION'] + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} @@ -27,8 +27,8 @@ def postprocess(i): env['+LD_LIBRARY_PATH'] = [] gtest_install_path = os.path.join(os.getcwd(), "install") - env['CM_GOOGLE_TEST_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] - env['CM_GOOGLE_TEST_INSTALL_PATH'] = gtest_install_path + env['MLC_GOOGLE_TEST_SRC_PATH'] = env['MLC_GIT_REPO_CHECKOUT_PATH'] + env['MLC_GOOGLE_TEST_INSTALL_PATH'] = gtest_install_path env['+C_INCLUDE_PATH'].append(os.path.join(gtest_install_path, "include")) env['+LD_LIBRARY_PATH'].append(os.path.join(gtest_install_path, "lib")) diff --git a/script/get-google-test/meta.yaml b/script/get-google-test/meta.yaml index 68de9dbfd..18af534a1 100644 --- a/script/get-google-test/meta.yaml +++ b/script/get-google-test/meta.yaml @@ -14,8 +14,8 @@ deps: input_description: {} input_mapping: {} new_env_keys: -- CM_GOOGLE_TEST_SRC_PATH -- CM_GOOGLE_TEST_INSTALL_PATH +- MLC_GOOGLE_TEST_SRC_PATH +- MLC_GOOGLE_TEST_INSTALL_PATH - +C_INCLUDE_PATH - +LD_LIBRARY_PATH new_state_keys: [] @@ -24,7 +24,7 @@ posthook_deps: [] prehook_deps: - extra_cache_tags: google-test,gtest force_env_keys: - - CM_GIT_* + - MLC_GIT_* tags: get,git,repo,_repo.https://github.com/google/googletest.git tags: - get diff --git a/script/get-google-test/run.sh b/script/get-google-test/run.sh index c8a9a4425..eaf2eb367 100644 --- a/script/get-google-test/run.sh +++ b/script/get-google-test/run.sh @@ -1,19 +1,19 @@ #!/bin/bash function cmake() { -${CM_CMAKE_BIN_WITH_PATH} $@ +${MLC_CMAKE_BIN_WITH_PATH} $@ } -export CC=${CM_C_COMPILER_WITH_PATH} -export CXX=${CM_CXX_COMPILER_WITH_PATH} +export CC=${MLC_C_COMPILER_WITH_PATH} +export CXX=${MLC_CXX_COMPILER_WITH_PATH} CUR=$PWD mkdir -p install INSTALL_DIR=$CUR/install -cd ${CM_GIT_REPO_CHECKOUT_PATH} +cd ${MLC_GIT_REPO_CHECKOUT_PATH} mkdir build cd build -export MAKEFLAGS=-j${CM_MAKE_CORES} +export MAKEFLAGS=-j${MLC_MAKE_CORES} cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} .. test $? -eq 0 || exit $? diff --git a/script/get-huggingface-cli/customize.py b/script/get-huggingface-cli/customize.py index 56fcb0761..03fc5b753 100644 --- a/script/get-huggingface-cli/customize.py +++ b/script/get-huggingface-cli/customize.py @@ -4,11 +4,11 @@ def preprocess(i): env = i['env'] - if env.get('CM_HF_TOKEN', '') != '': - env['CM_HF_LOGIN_CMD'] = f"""git config --global credential.helper store && huggingface-cli login --token {env['CM_HF_TOKEN']} --add-to-git-credential + if env.get('MLC_HF_TOKEN', '') != '': + env['MLC_HF_LOGIN_CMD'] = f"""git config --global credential.helper store && huggingface-cli login --token {env['MLC_HF_TOKEN']} --add-to-git-credential """ - elif str(env.get('CM_HF_DO_LOGIN')).lower() in ["yes", "1", "true"]: - env['CM_HF_LOGIN_CMD'] = f"""git config --global credential.helper store && huggingface-cli login + elif str(env.get('MLC_HF_DO_LOGIN')).lower() in ["yes", "1", "true"]: + env['MLC_HF_LOGIN_CMD'] = f"""git config --global credential.helper store && huggingface-cli login """ return {'return': 0} @@ -18,7 +18,7 @@ def postprocess(i): r = i['automation'].parse_version({'match_text': r'huggingface_hub\s*version:\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_GITHUBCLI_VERSION', + 'env_key': 'MLC_GITHUBCLI_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r diff --git a/script/get-huggingface-cli/meta.yaml b/script/get-huggingface-cli/meta.yaml index 6643eb222..aad5f7aa1 100644 --- a/script/get-huggingface-cli/meta.yaml +++ b/script/get-huggingface-cli/meta.yaml @@ -14,7 +14,7 @@ tags: - huggingface-cli - cli input_mapping: - token: CM_HF_TOKEN + token: MLC_HF_TOKEN uid: e9488a272f1d4160 deps: - tags: get,generic-python-lib,_package.huggingface_hub[cli] @@ -23,4 +23,4 @@ variations: cache: true force_cache: true env: - CM_HF_DO_LOGIN: yes + MLC_HF_DO_LOGIN: yes diff --git a/script/get-huggingface-cli/run.bat b/script/get-huggingface-cli/run.bat index 464afe5c7..97c90f089 100644 --- a/script/get-huggingface-cli/run.bat +++ b/script/get-huggingface-cli/run.bat @@ -1,8 +1,8 @@ @echo off -REM Check if the environment variable CM_HF_LOGIN_CMD is defined and not empty -IF DEFINED CM_HF_LOGIN_CMD ( - echo %CM_HF_LOGIN_CMD% - call %CM_HF_LOGIN_CMD% +REM Check if the environment variable MLC_HF_LOGIN_CMD is defined and not empty +IF DEFINED MLC_HF_LOGIN_CMD ( + echo %MLC_HF_LOGIN_CMD% + call %MLC_HF_LOGIN_CMD% IF ERRORLEVEL 1 ( echo Command failed with error code %ERRORLEVEL% exit /b %ERRORLEVEL% diff --git a/script/get-huggingface-cli/run.sh b/script/get-huggingface-cli/run.sh index 43d20f367..cb1d022ee 100644 --- a/script/get-huggingface-cli/run.sh +++ b/script/get-huggingface-cli/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [[ -n ${CM_HF_LOGIN_CMD} ]]; then - echo "${CM_HF_LOGIN_CMD}" - eval ${CM_HF_LOGIN_CMD} +if [[ -n ${MLC_HF_LOGIN_CMD} ]]; then + echo "${MLC_HF_LOGIN_CMD}" + eval ${MLC_HF_LOGIN_CMD} test $? -eq 0 || exit $? fi huggingface-cli version > tmp-ver.out diff --git a/script/get-ipol-src/customize.py b/script/get-ipol-src/customize.py index 9291872cf..4de9d4f9c 100644 --- a/script/get-ipol-src/customize.py +++ b/script/get-ipol-src/customize.py @@ -16,15 +16,15 @@ def preprocess(i): path = os.getcwd() - url = env['CM_IPOL_SRC_URL'] + url = env['MLC_IPOL_SRC_URL'] - year = env.get('CM_IPOL_YEAR', '') - number = env.get('CM_IPOL_NUMBER', '') + year = env.get('MLC_IPOL_YEAR', '') + number = env.get('MLC_IPOL_NUMBER', '') url = url.replace( - '{{CM_IPOL_YEAR}}', + '{{MLC_IPOL_YEAR}}', year).replace( - '{{CM_IPOL_NUMBER}}', + '{{MLC_IPOL_NUMBER}}', number) print('Downloading from {}'.format(url)) @@ -54,7 +54,7 @@ def preprocess(i): subdir = ff[0] - env['CM_IPOL_PATH'] = os.path.join(path, subdir) + env['MLC_IPOL_PATH'] = os.path.join(path, subdir) # Applying patch cmd = 'patch -p0 < {}'.format(os.path.join(script_path, diff --git a/script/get-ipol-src/meta.yaml b/script/get-ipol-src/meta.yaml index dd6b6ca0d..ac64d8971 100644 --- a/script/get-ipol-src/meta.yaml +++ b/script/get-ipol-src/meta.yaml @@ -4,22 +4,22 @@ automation_uid: 5b4e0237da074764 cache: true category: Reproducibility and artifact evaluation env: - CM_IPOL_NUMBER: '439' - CM_IPOL_SRC_URL: http://www.ipol.im/pub/art/{{CM_IPOL_YEAR}}/{{CM_IPOL_NUMBER}}/{{CM_IPOL_NUMBER}}-master.zip - CM_IPOL_YEAR: '2022' + MLC_IPOL_NUMBER: '439' + MLC_IPOL_SRC_URL: http://www.ipol.im/pub/art/{{MLC_IPOL_YEAR}}/{{MLC_IPOL_NUMBER}}/{{MLC_IPOL_NUMBER}}-master.zip + MLC_IPOL_YEAR: '2022' extra_cache_tags_from_env: -- env: CM_IPOL_NUMBER +- env: MLC_IPOL_NUMBER prefix: number- -- env: CM_IPOL_YEAR +- env: MLC_IPOL_YEAR prefix: year- input_description: number: IPOL publication number year: IPOL publication year input_mapping: - number: CM_IPOL_NUMBER - year: CM_IPOL_YEAR + number: MLC_IPOL_NUMBER + year: MLC_IPOL_YEAR new_env_keys: -- CM_IPOL_* +- MLC_IPOL_* tags: - get - ipol diff --git a/script/get-java/customize.py b/script/get-java/customize.py index b82010d8d..199d1b23d 100644 --- a/script/get-java/customize.py +++ b/script/get-java/customize.py @@ -21,9 +21,9 @@ def preprocess(i): meta = i['meta'] found = False - install = env.get('CM_JAVA_PREBUILT_INSTALL', '') in ['on', 'True', True] + install = env.get('MLC_JAVA_PREBUILT_INSTALL', '') in ['on', 'True', True] - env_path_key = 'CM_JAVA_BIN_WITH_PATH' + env_path_key = 'MLC_JAVA_BIN_WITH_PATH' # If not force install, search for artifact if not install: @@ -45,27 +45,27 @@ def preprocess(i): if not found or install: if os_info['platform'] == 'windows': - env['CM_JAVA_PREBUILT_HOST_OS'] = 'windows' - env['CM_JAVA_PREBUILT_EXT'] = '.zip' + env['MLC_JAVA_PREBUILT_HOST_OS'] = 'windows' + env['MLC_JAVA_PREBUILT_EXT'] = '.zip' else: - env['CM_JAVA_PREBUILT_HOST_OS'] = 'linux' - env['CM_JAVA_PREBUILT_EXT'] = '.tar.gz' + env['MLC_JAVA_PREBUILT_HOST_OS'] = 'linux' + env['MLC_JAVA_PREBUILT_EXT'] = '.tar.gz' - url = env['CM_JAVA_PREBUILT_URL'] - filename = env['CM_JAVA_PREBUILT_FILENAME'] + url = env['MLC_JAVA_PREBUILT_URL'] + filename = env['MLC_JAVA_PREBUILT_FILENAME'] - java_prebuilt_version = env['CM_JAVA_PREBUILT_VERSION'] - java_prebuilt_build = env['CM_JAVA_PREBUILT_BUILD'] + java_prebuilt_version = env['MLC_JAVA_PREBUILT_VERSION'] + java_prebuilt_build = env['MLC_JAVA_PREBUILT_BUILD'] - for key in ['CM_JAVA_PREBUILT_VERSION', - 'CM_JAVA_PREBUILT_BUILD', - 'CM_JAVA_PREBUILT_HOST_OS', - 'CM_JAVA_PREBUILT_EXT']: + for key in ['MLC_JAVA_PREBUILT_VERSION', + 'MLC_JAVA_PREBUILT_BUILD', + 'MLC_JAVA_PREBUILT_HOST_OS', + 'MLC_JAVA_PREBUILT_EXT']: url = url.replace('${' + key + '}', env[key]) filename = filename.replace('${' + key + '}', env[key]) - env['CM_JAVA_PREBUILT_URL'] = url - env['CM_JAVA_PREBUILT_FILENAME'] = filename + env['MLC_JAVA_PREBUILT_URL'] = url + env['MLC_JAVA_PREBUILT_FILENAME'] = filename print('') print( @@ -118,7 +118,7 @@ def detect_version(i): r = i['automation'].parse_version({'match_text': r'\s*"(.*?)"', 'group_number': 1, - 'env_key': 'CM_JAVA_VERSION', + 'env_key': 'MLC_JAVA_VERSION', 'which_env': i['env'], 'debug': True}) if r['return'] > 0: @@ -138,13 +138,13 @@ def postprocess(i): if r['return'] > 0: return r - version = env['CM_JAVA_VERSION'] - env['CM_JAVA_CACHE_TAGS'] = 'version-' + version + version = env['MLC_JAVA_VERSION'] + env['MLC_JAVA_CACHE_TAGS'] = 'version-' + version - found_file_path = env['CM_JAVA_BIN_WITH_PATH'] + found_file_path = env['MLC_JAVA_BIN_WITH_PATH'] file_name = os.path.basename(found_file_path) - env['CM_JAVA_BIN'] = file_name + env['MLC_JAVA_BIN'] = file_name found_path = os.path.dirname(found_file_path) java_home_path = os.path.dirname(found_path) diff --git a/script/get-java/install-prebuilt.bat b/script/get-java/install-prebuilt.bat index 17b00e5ab..beada62d3 100644 --- a/script/get-java/install-prebuilt.bat +++ b/script/get-java/install-prebuilt.bat @@ -1,9 +1,9 @@ -del /Q %CM_JAVA_PREBUILT_FILENAME%.zip +del /Q %MLC_JAVA_PREBUILT_FILENAME%.zip -wget --no-check-certificate %CM_JAVA_PREBUILT_URL%%CM_JAVA_PREBUILT_FILENAME%.zip +wget --no-check-certificate %MLC_JAVA_PREBUILT_URL%%MLC_JAVA_PREBUILT_FILENAME%.zip IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -unzip %CM_JAVA_PREBUILT_FILENAME%.zip +unzip %MLC_JAVA_PREBUILT_FILENAME%.zip IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -del /Q %CM_JAVA_PREBUILT_FILENAME%.zip +del /Q %MLC_JAVA_PREBUILT_FILENAME%.zip diff --git a/script/get-java/install-prebuilt.sh b/script/get-java/install-prebuilt.sh index 575d0467e..a037c60ef 100644 --- a/script/get-java/install-prebuilt.sh +++ b/script/get-java/install-prebuilt.sh @@ -1,15 +1,15 @@ #!/bin/bash -rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar.gz -rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar +rm -f ${MLC_JAVA_PREBUILT_FILENAME}.tar.gz +rm -f ${MLC_JAVA_PREBUILT_FILENAME}.tar -wget --no-check-certificate ${CM_JAVA_PREBUILT_URL}${CM_JAVA_PREBUILT_FILENAME}.tar.gz +wget --no-check-certificate ${MLC_JAVA_PREBUILT_URL}${MLC_JAVA_PREBUILT_FILENAME}.tar.gz test $? -eq 0 || exit 1 -gzip -d ${CM_JAVA_PREBUILT_FILENAME}.tar.gz +gzip -d ${MLC_JAVA_PREBUILT_FILENAME}.tar.gz test $? -eq 0 || exit 1 -tar xvf ${CM_JAVA_PREBUILT_FILENAME}.tar +tar xvf ${MLC_JAVA_PREBUILT_FILENAME}.tar test $? -eq 0 || exit 1 -rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar +rm -f ${MLC_JAVA_PREBUILT_FILENAME}.tar diff --git a/script/get-java/meta.yaml b/script/get-java/meta.yaml index 07facec86..434d34a00 100644 --- a/script/get-java/meta.yaml +++ b/script/get-java/meta.yaml @@ -4,16 +4,16 @@ automation_uid: 5b4e0237da074764 cache: true category: Detection or installation of tools and artifacts default_env: - CM_JAVA_PREBUILT_BUILD: '36' - CM_JAVA_PREBUILT_FILENAME: openjdk-${CM_JAVA_PREBUILT_VERSION}+${CM_JAVA_PREBUILT_BUILD}_${CM_JAVA_PREBUILT_HOST_OS}-x64_bin - CM_JAVA_PREBUILT_URL: https://download.java.net/openjdk/jdk${CM_JAVA_PREBUILT_VERSION}/ri/ - CM_JAVA_PREBUILT_VERSION: '19' + MLC_JAVA_PREBUILT_BUILD: '36' + MLC_JAVA_PREBUILT_FILENAME: openjdk-${MLC_JAVA_PREBUILT_VERSION}+${MLC_JAVA_PREBUILT_BUILD}_${MLC_JAVA_PREBUILT_HOST_OS}-x64_bin + MLC_JAVA_PREBUILT_URL: https://download.java.net/openjdk/jdk${MLC_JAVA_PREBUILT_VERSION}/ri/ + MLC_JAVA_PREBUILT_VERSION: '19' deps: - tags: detect,os input_mapping: - install: CM_JAVA_PREBUILT_INSTALL + install: MLC_JAVA_PREBUILT_INSTALL new_env_keys: -- CM_JAVA_* +- MLC_JAVA_* - JAVA_HOME - +PATH tags: @@ -23,4 +23,4 @@ uid: 9399d0e785704f8c variations: install: env: - CM_JAVA_PREBUILT_INSTALL: 'on' + MLC_JAVA_PREBUILT_INSTALL: 'on' diff --git a/script/get-java/run.bat b/script/get-java/run.bat index 0a80aa34c..807efafe4 100644 --- a/script/get-java/run.bat +++ b/script/get-java/run.bat @@ -1,3 +1,3 @@ -"%CM_JAVA_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1 +"%MLC_JAVA_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1 IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-java/run.sh b/script/get-java/run.sh index 566a2b569..ddb8cb04b 100644 --- a/script/get-java/run.sh +++ b/script/get-java/run.sh @@ -1,3 +1,3 @@ #!/bin/bash -${CM_JAVA_BIN_WITH_PATH} -version &> tmp-ver.out +${MLC_JAVA_BIN_WITH_PATH} -version &> tmp-ver.out test $? -eq 0 || exit 1 diff --git a/script/get-javac/customize.py b/script/get-javac/customize.py index 4a1aa03f9..b039c2483 100644 --- a/script/get-javac/customize.py +++ b/script/get-javac/customize.py @@ -21,9 +21,9 @@ def preprocess(i): meta = i['meta'] found = False - install = env.get('CM_JAVAC_PREBUILT_INSTALL', '') in ['on', 'True', True] + install = env.get('MLC_JAVAC_PREBUILT_INSTALL', '') in ['on', 'True', True] - env_path_key = 'CM_JAVAC_BIN_WITH_PATH' + env_path_key = 'MLC_JAVAC_BIN_WITH_PATH' # If not force install, search for artifact if not install: @@ -45,27 +45,27 @@ def preprocess(i): if not found or install: if os_info['platform'] == 'windows': - env['CM_JAVAC_PREBUILT_HOST_OS'] = 'windows' - env['CM_JAVAC_PREBUILT_EXT'] = '.zip' + env['MLC_JAVAC_PREBUILT_HOST_OS'] = 'windows' + env['MLC_JAVAC_PREBUILT_EXT'] = '.zip' else: - env['CM_JAVAC_PREBUILT_HOST_OS'] = 'linux' - env['CM_JAVAC_PREBUILT_EXT'] = '.tar.gz' + env['MLC_JAVAC_PREBUILT_HOST_OS'] = 'linux' + env['MLC_JAVAC_PREBUILT_EXT'] = '.tar.gz' - url = env['CM_JAVAC_PREBUILT_URL'] - filename = env['CM_JAVAC_PREBUILT_FILENAME'] + url = env['MLC_JAVAC_PREBUILT_URL'] + filename = env['MLC_JAVAC_PREBUILT_FILENAME'] - javac_prebuilt_version = env['CM_JAVAC_PREBUILT_VERSION'] - javac_prebuilt_build = env['CM_JAVAC_PREBUILT_BUILD'] + javac_prebuilt_version = env['MLC_JAVAC_PREBUILT_VERSION'] + javac_prebuilt_build = env['MLC_JAVAC_PREBUILT_BUILD'] - for key in ['CM_JAVAC_PREBUILT_VERSION', - 'CM_JAVAC_PREBUILT_BUILD', - 'CM_JAVAC_PREBUILT_HOST_OS', - 'CM_JAVAC_PREBUILT_EXT']: + for key in ['MLC_JAVAC_PREBUILT_VERSION', + 'MLC_JAVAC_PREBUILT_BUILD', + 'MLC_JAVAC_PREBUILT_HOST_OS', + 'MLC_JAVAC_PREBUILT_EXT']: url = url.replace('${' + key + '}', env[key]) filename = filename.replace('${' + key + '}', env[key]) - env['CM_JAVAC_PREBUILT_URL'] = url - env['CM_JAVAC_PREBUILT_FILENAME'] = filename + env['MLC_JAVAC_PREBUILT_URL'] = url + env['MLC_JAVAC_PREBUILT_FILENAME'] = filename print('') print( @@ -118,7 +118,7 @@ def detect_version(i): r = i['automation'].parse_version({'match_text': r'javac\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_JAVAC_VERSION', + 'env_key': 'MLC_JAVAC_VERSION', 'which_env': i['env'], 'debug': True}) if r['return'] > 0: @@ -140,21 +140,21 @@ def postprocess(i): if r['return'] > 0: return r - version = env['CM_JAVAC_VERSION'] - env['CM_JAVAC_CACHE_TAGS'] = 'version-' + version + version = env['MLC_JAVAC_VERSION'] + env['MLC_JAVAC_CACHE_TAGS'] = 'version-' + version - found_file_path = env['CM_JAVAC_BIN_WITH_PATH'] + found_file_path = env['MLC_JAVAC_BIN_WITH_PATH'] file_name = os.path.basename(found_file_path) file_path = os.path.dirname(found_file_path) - env['CM_JAVAC_BIN'] = file_name + env['MLC_JAVAC_BIN'] = file_name if os_info['platform'] == 'windows': - env['CM_JAVA_BIN'] = 'java.exe' + env['MLC_JAVA_BIN'] = 'java.exe' else: - env['CM_JAVA_BIN'] = 'java' + env['MLC_JAVA_BIN'] = 'java' - env['CM_JAVA_BIN_WITH_PATH'] = os.path.join(file_path, env['CM_JAVA_BIN']) + env['MLC_JAVA_BIN_WITH_PATH'] = os.path.join(file_path, env['MLC_JAVA_BIN']) found_path = os.path.dirname(found_file_path) javac_home_path = os.path.dirname(found_path) diff --git a/script/get-javac/install-prebuilt.bat b/script/get-javac/install-prebuilt.bat index 74b1c4812..fdec0a335 100644 --- a/script/get-javac/install-prebuilt.bat +++ b/script/get-javac/install-prebuilt.bat @@ -1,9 +1,9 @@ -del /Q %CM_JAVAC_PREBUILT_FILENAME%.zip +del /Q %MLC_JAVAC_PREBUILT_FILENAME%.zip -wget --no-check-certificate %CM_JAVAC_PREBUILT_URL%%CM_JAVAC_PREBUILT_FILENAME%.zip +wget --no-check-certificate %MLC_JAVAC_PREBUILT_URL%%MLC_JAVAC_PREBUILT_FILENAME%.zip IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -unzip %CM_JAVAC_PREBUILT_FILENAME%.zip +unzip %MLC_JAVAC_PREBUILT_FILENAME%.zip IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -del /Q %CM_JAVAC_PREBUILT_FILENAME%.zip +del /Q %MLC_JAVAC_PREBUILT_FILENAME%.zip diff --git a/script/get-javac/install-prebuilt.sh b/script/get-javac/install-prebuilt.sh index eed1b8b01..96db5c87d 100644 --- a/script/get-javac/install-prebuilt.sh +++ b/script/get-javac/install-prebuilt.sh @@ -1,15 +1,15 @@ #!/bin/bash -rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar.gz -rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar +rm -f ${MLC_JAVAC_PREBUILT_FILENAME}.tar.gz +rm -f ${MLC_JAVAC_PREBUILT_FILENAME}.tar -wget --no-check-certificate ${CM_JAVAC_PREBUILT_URL}${CM_JAVAC_PREBUILT_FILENAME}.tar.gz +wget --no-check-certificate ${MLC_JAVAC_PREBUILT_URL}${MLC_JAVAC_PREBUILT_FILENAME}.tar.gz test $? -eq 0 || exit 1 -gzip -d ${CM_JAVAC_PREBUILT_FILENAME}.tar.gz +gzip -d ${MLC_JAVAC_PREBUILT_FILENAME}.tar.gz test $? -eq 0 || exit 1 -tar xvf ${CM_JAVAC_PREBUILT_FILENAME}.tar +tar xvf ${MLC_JAVAC_PREBUILT_FILENAME}.tar test $? -eq 0 || exit 1 -rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar +rm -f ${MLC_JAVAC_PREBUILT_FILENAME}.tar diff --git a/script/get-javac/meta.yaml b/script/get-javac/meta.yaml index 89ffaf779..30f20360d 100644 --- a/script/get-javac/meta.yaml +++ b/script/get-javac/meta.yaml @@ -4,17 +4,17 @@ automation_uid: 5b4e0237da074764 cache: true category: Detection or installation of tools and artifacts default_env: - CM_JAVAC_PREBUILT_BUILD: '36' - CM_JAVAC_PREBUILT_FILENAME: openjdk-${CM_JAVAC_PREBUILT_VERSION}+${CM_JAVAC_PREBUILT_BUILD}_${CM_JAVAC_PREBUILT_HOST_OS}-x64_bin - CM_JAVAC_PREBUILT_URL: https://download.java.net/openjdk/jdk${CM_JAVAC_PREBUILT_VERSION}/ri/ - CM_JAVAC_PREBUILT_VERSION: '19' + MLC_JAVAC_PREBUILT_BUILD: '36' + MLC_JAVAC_PREBUILT_FILENAME: openjdk-${MLC_JAVAC_PREBUILT_VERSION}+${MLC_JAVAC_PREBUILT_BUILD}_${MLC_JAVAC_PREBUILT_HOST_OS}-x64_bin + MLC_JAVAC_PREBUILT_URL: https://download.java.net/openjdk/jdk${MLC_JAVAC_PREBUILT_VERSION}/ri/ + MLC_JAVAC_PREBUILT_VERSION: '19' deps: - tags: detect,os input_mapping: - install: CM_JAVAC_PREBUILT_INSTALL + install: MLC_JAVAC_PREBUILT_INSTALL new_env_keys: -- CM_JAVAC_* -- CM_JAVA_* +- MLC_JAVAC_* +- MLC_JAVA_* - JAVA_HOME - +PATH tags: @@ -24,4 +24,4 @@ uid: 509280c497b24226 variations: install: env: - CM_JAVAC_PREBUILT_INSTALL: 'on' + MLC_JAVAC_PREBUILT_INSTALL: 'on' diff --git a/script/get-javac/run.bat b/script/get-javac/run.bat index 1919f559c..75b6bdcc8 100644 --- a/script/get-javac/run.bat +++ b/script/get-javac/run.bat @@ -1,3 +1,3 @@ -"%CM_JAVAC_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1 +"%MLC_JAVAC_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1 IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-javac/run.sh b/script/get-javac/run.sh index 40f97218d..418a3230f 100644 --- a/script/get-javac/run.sh +++ b/script/get-javac/run.sh @@ -1,3 +1,3 @@ #!/bin/bash -${CM_JAVAC_BIN_WITH_PATH} -version &> tmp-ver.out +${MLC_JAVAC_BIN_WITH_PATH} -version &> tmp-ver.out test $? -eq 0 || exit 1 diff --git a/script/get-lib-armnn/customize.py b/script/get-lib-armnn/customize.py index c365543c7..263850444 100644 --- a/script/get-lib-armnn/customize.py +++ b/script/get-lib-armnn/customize.py @@ -8,16 +8,16 @@ def preprocess(i): return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - version = env['CM_LIB_ARMNN_VERSION'] - if env.get('CM_HOST_PLATFORM_FLAVOR', '') == 'x86_64': + version = env['MLC_LIB_ARMNN_VERSION'] + if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == 'x86_64': url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-x86_64.tar.gz" - elif env.get('CM_HOST_PLATFORM_FLAVOR', '') == 'aarch64': + elif env.get('MLC_HOST_PLATFORM_FLAVOR', '') == 'aarch64': url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-aarch64.tar.gz" - env['CM_LIB_ARMNN_PREBUILT_BINARY_URL'] = url - env['CM_LIB_ARMNN_EXTRACT_FILENAME'] = os.path.basename(url) + env['MLC_LIB_ARMNN_PREBUILT_BINARY_URL'] = url + env['MLC_LIB_ARMNN_EXTRACT_FILENAME'] = os.path.basename(url) - env['CM_GIT_CHECKOUT'] = env['CM_TMP_GIT_BRANCH_NAME'] + env['MLC_GIT_CHECKOUT'] = env['MLC_TMP_GIT_BRANCH_NAME'] return {'return': 0} @@ -37,7 +37,7 @@ def postprocess(i): env[key] = [] include_paths = [] - armnn_src_path = env['CM_GIT_CHECKOUT_PATH'] + armnn_src_path = env['MLC_GIT_CHECKOUT_PATH'] include_paths.append(os.path.join(os.getcwd(), 'include')) include_paths.append(os.path.join(armnn_src_path, 'include')) include_paths.append(os.path.join(armnn_src_path, 'profiling')) diff --git a/script/get-lib-armnn/meta.yaml b/script/get-lib-armnn/meta.yaml index df9a42a4d..6c33e97ed 100644 --- a/script/get-lib-armnn/meta.yaml +++ b/script/get-lib-armnn/meta.yaml @@ -7,16 +7,16 @@ default_version: '23.11' deps: - tags: detect,os env: - CM_GIT_URL: https://github.com/ARM-software/armnn + MLC_GIT_URL: https://github.com/ARM-software/armnn new_env_keys: -- CM_LIB_ARMNN_VERSION -- CM_LIB_DNNL_* +- MLC_LIB_ARMNN_VERSION +- MLC_LIB_DNNL_* - +C_INCLUDE_PATH - +CPLUS_INCLUDE_PATH - +LD_LIBRARY_PATH prehook_deps: - force_env_keys: - - CM_GIT_* + - MLC_GIT_* tags: get,git,repo,_repo.https://github.com/ARM-software/armnn tags: - get @@ -27,13 +27,13 @@ uid: 9603a2e90fd44587 versions: '22.11': env: - CM_LIB_ARMNN_VERSION: v22.11 - CM_TMP_GIT_BRANCH_NAME: branches/armnn_22_11 + MLC_LIB_ARMNN_VERSION: v22.11 + MLC_TMP_GIT_BRANCH_NAME: branches/armnn_22_11 '23.05': env: - CM_LIB_ARMNN_VERSION: v23.05 - CM_TMP_GIT_BRANCH_NAME: branches/armnn_23_05 + MLC_LIB_ARMNN_VERSION: v23.05 + MLC_TMP_GIT_BRANCH_NAME: branches/armnn_23_05 '23.11': env: - CM_LIB_ARMNN_VERSION: v23.11 - CM_TMP_GIT_BRANCH_NAME: branches/armnn_23_11 + MLC_LIB_ARMNN_VERSION: v23.11 + MLC_TMP_GIT_BRANCH_NAME: branches/armnn_23_11 diff --git a/script/get-lib-armnn/run.sh b/script/get-lib-armnn/run.sh index 4bb5d182a..1c4d20e9d 100644 --- a/script/get-lib-armnn/run.sh +++ b/script/get-lib-armnn/run.sh @@ -2,8 +2,8 @@ CUR_DIR=${PWD:-tmp} -wget -nc ${CM_LIB_ARMNN_PREBUILT_BINARY_URL} -tar -xvzf ${CM_LIB_ARMNN_EXTRACT_FILENAME} +wget -nc ${MLC_LIB_ARMNN_PREBUILT_BINARY_URL} +tar -xvzf ${MLC_LIB_ARMNN_EXTRACT_FILENAME} echo "******************************************************" echo "ArmNN prebuilt binary downloaded to ${CUR_DIR} ..." diff --git a/script/get-lib-dnnl/customize.py b/script/get-lib-dnnl/customize.py index 051595167..d02be486e 100644 --- a/script/get-lib-dnnl/customize.py +++ b/script/get-lib-dnnl/customize.py @@ -13,7 +13,7 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_LIB_DNNL_INSTALL_DIR'] = os.getcwd() + env['MLC_LIB_DNNL_INSTALL_DIR'] = os.getcwd() for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: diff --git a/script/get-lib-dnnl/meta.yaml b/script/get-lib-dnnl/meta.yaml index 2fdc8cb7c..c625565cd 100644 --- a/script/get-lib-dnnl/meta.yaml +++ b/script/get-lib-dnnl/meta.yaml @@ -8,13 +8,13 @@ deps: - tags: detect,cpu - tags: cmake,get-cmake env: - CM_DNNL_CLEAN_BUILD: 'yes' - CM_GIT_URL: https://github.com/oneapi-src/oneDNN + MLC_DNNL_CLEAN_BUILD: 'yes' + MLC_GIT_URL: https://github.com/oneapi-src/oneDNN DNNL_BUILD_EXAMPLES: 'OFF' DNNL_BUILD_TESTS: 'OFF' DNNL_CPU_RUNTIME: OMP new_env_keys: -- CM_LIB_DNNL_* +- MLC_LIB_DNNL_* - +C_INCLUDE_PATH - +CPLUS_INCLUDE_PATH - +LD_LIBRARY_PATH @@ -27,7 +27,7 @@ uid: 1cd35a6a3b0b4530 versions: 2.2.4: env: - CM_GIT_CHECKOUT: v2.2.4 + MLC_GIT_CHECKOUT: v2.2.4 dev: env: - CM_GIT_CHECKOUT: master + MLC_GIT_CHECKOUT: master diff --git a/script/get-lib-dnnl/run.sh b/script/get-lib-dnnl/run.sh index ca47ee3b9..eaf887199 100644 --- a/script/get-lib-dnnl/run.sh +++ b/script/get-lib-dnnl/run.sh @@ -2,7 +2,7 @@ CUR_DIR=${PWD:-tmp} -git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} src +git clone --recursive -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} src test $? -eq 0 || exit 1 @@ -25,7 +25,7 @@ cmake .. \ if [ "${?}" != "0" ]; then exit 1; fi echo "******************************************************" -cmake --build . -j${CM_CPUINFO_CPUs} +cmake --build . -j${MLC_CPUINFO_CPUs} if [ "${?}" != "0" ]; then exit 1; fi echo "******************************************************" @@ -35,7 +35,7 @@ if [ "${?}" != "0" ]; then exit 1; fi # Clean build directory (too large) cd ${INSTALL_DIR} -if [ "${CM_DNNL_CLEAN_BUILD}" != "no" ]; then +if [ "${MLC_DNNL_CLEAN_BUILD}" != "no" ]; then rm -rf build fi diff --git a/script/get-lib-protobuf/customize.py b/script/get-lib-protobuf/customize.py index e8e6ea450..4219223dd 100644 --- a/script/get-lib-protobuf/customize.py +++ b/script/get-lib-protobuf/customize.py @@ -12,8 +12,8 @@ def preprocess(i): automation = i['automation'] - env['CM_GIT_CHECKOUT'] = "v" + env['CM_VERSION'] - quiet = (env.get('CM_QUIET', False) == 'yes') + env['MLC_GIT_CHECKOUT'] = "v" + env['MLC_VERSION'] + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} @@ -26,8 +26,8 @@ def postprocess(i): env['+LD_LIBRARY_PATH'] = [] protobuf_install_path = os.path.join(os.getcwd(), "install") - env['CM_GOOGLE_PROTOBUF_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] - env['CM_GOOGLE_PROTOBUF_INSTALL_PATH'] = protobuf_install_path + env['MLC_GOOGLE_PROTOBUF_SRC_PATH'] = env['MLC_GIT_REPO_CHECKOUT_PATH'] + env['MLC_GOOGLE_PROTOBUF_INSTALL_PATH'] = protobuf_install_path env['+C_INCLUDE_PATH'].append( os.path.join( protobuf_install_path, diff --git a/script/get-lib-protobuf/meta.yaml b/script/get-lib-protobuf/meta.yaml index 6f06409fd..551a31a0e 100644 --- a/script/get-lib-protobuf/meta.yaml +++ b/script/get-lib-protobuf/meta.yaml @@ -10,8 +10,8 @@ deps: input_description: {} input_mapping: {} new_env_keys: -- CM_GOOGLE_PROTOBUF_SRC_PATH -- CM_GOOGLE_PROTOBUF_INSTALL_PATH +- MLC_GOOGLE_PROTOBUF_SRC_PATH +- MLC_GOOGLE_PROTOBUF_INSTALL_PATH - +C_INCLUDE_PATH - +CPLUS_INCLUDE_PATH - +LD_LIBRARY_PATH @@ -21,15 +21,15 @@ posthook_deps: [] prehook_deps: - extra_cache_tags: lib,protobuf,src force_env_keys: - - CM_GIT_* + - MLC_GIT_* tags: get,git,repo,_repo.https://github.com/google/protobuf.git update_tags_from_env_with_prefix: _branch.: - - CM_TMP_GIT_CHECKOUT + - MLC_TMP_GIT_CHECKOUT _repo.: - - CM_TMP_GIT_URL + - MLC_TMP_GIT_URL _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG tags: - get - google-protobuf @@ -41,8 +41,8 @@ uid: db45f1eb73934f91 variations: branch.#: env: - CM_TMP_GIT_CHECKOUT: '#' + MLC_TMP_GIT_CHECKOUT: '#' tag.#: env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/get-lib-protobuf/run.sh b/script/get-lib-protobuf/run.sh index 29c0267d1..a44b8f688 100644 --- a/script/get-lib-protobuf/run.sh +++ b/script/get-lib-protobuf/run.sh @@ -2,11 +2,11 @@ CUR=$PWD mkdir -p install INSTALL_DIR=$CUR/install -cd ${CM_GIT_REPO_CHECKOUT_PATH} +cd ${MLC_GIT_REPO_CHECKOUT_PATH} rm -rf build mkdir build cd build -export MAKEFLAGS=-j${CM_MAKE_CORES} +export MAKEFLAGS=-j${MLC_MAKE_CORES} cmake -Dprotobuf_BUILD_TESTS=OFF -DBUILD_SHARED_LIBS=ON -DCMAKE_CXX_STANDARD=14 -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} ../cmake test $? -eq 0 || exit $? CMD="make install" diff --git a/script/get-lib-qaic-api/customize.py b/script/get-lib-qaic-api/customize.py index 6c829ae86..d94c53325 100644 --- a/script/get-lib-qaic-api/customize.py +++ b/script/get-lib-qaic-api/customize.py @@ -9,7 +9,7 @@ def preprocess(i): env = i['env'] - # env['CM_GIT_CHECKOUT'] = env['CM_TMP_GIT_BRANCH_NAME'] + # env['MLC_GIT_CHECKOUT'] = env['MLC_TMP_GIT_BRANCH_NAME'] return {'return': 0} @@ -28,16 +28,16 @@ def postprocess(i): for key in paths: env[key] = [] - include_paths = [env['CM_TMP_CURRENT_SCRIPT_PATH']] + include_paths = [env['MLC_TMP_CURRENT_SCRIPT_PATH']] for inc_path in include_paths: env['+C_INCLUDE_PATH'].append(inc_path) env['+CPLUS_INCLUDE_PATH'].append(inc_path) version = "master" - env['CM_QAIC_API_SRC_FILE'] = os.path.join( - env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.cpp") - env['CM_QAIC_API_INC_FILE'] = os.path.join( - env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.h") + env['MLC_QAIC_API_SRC_FILE'] = os.path.join( + env['MLC_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.cpp") + env['MLC_QAIC_API_INC_FILE'] = os.path.join( + env['MLC_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.h") return {'return': 0} diff --git a/script/get-lib-qaic-api/meta.yaml b/script/get-lib-qaic-api/meta.yaml index aaf6688dd..65d8ebed3 100644 --- a/script/get-lib-qaic-api/meta.yaml +++ b/script/get-lib-qaic-api/meta.yaml @@ -8,8 +8,8 @@ deps: - tags: detect,os env: {} new_env_keys: -- CM_LIB_QAIC_* -- CM_QAIC_API_* +- MLC_LIB_QAIC_* +- MLC_QAIC_API_* - +C_INCLUDE_PATH - +CPLUS_INCLUDE_PATH - +LD_LIBRARY_PATH @@ -24,4 +24,4 @@ uid: 1e253ae184e44f23 versions: master: env: - CM_LIB_QAIC_VERSION: master + MLC_LIB_QAIC_VERSION: master diff --git a/script/get-llvm/README-extra.md b/script/get-llvm/README-extra.md index 8020e09ba..a57c16f5a 100644 --- a/script/get-llvm/README-extra.md +++ b/script/get-llvm/README-extra.md @@ -2,14 +2,14 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed llvm on the system and if not found calls the [install script for llvm](../script/install-llvm-prebuilt). ## Exported Variables -* `CM_LLVM_CLANG_BIN` -* `CM_LLVM_CLANG_BIN_WITH_PATH` -* `CM_C_COMPILER_BIN` -* `CM_C_COMPILER_WITH_PATH` -* `CM_CXX_COMPILER_BIN` -* `CM_CXX_COMPILER_WITH_PATH` -* `CM_COMPILER_*` -* `CM_LINKER_*` +* `MLC_LLVM_CLANG_BIN` +* `MLC_LLVM_CLANG_BIN_WITH_PATH` +* `MLC_C_COMPILER_BIN` +* `MLC_C_COMPILER_WITH_PATH` +* `MLC_CXX_COMPILER_BIN` +* `MLC_CXX_COMPILER_WITH_PATH` +* `MLC_COMPILER_*` +* `MLC_LINKER_*` ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 @@ -83,7 +83,7 @@ cm run script "app image corner-detection" ```bash cm rm cache -f -cm run script "get llvm" --version=13.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "get llvm" --version=13.0.0 --env.MLC_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz cm run script "app image corner-detection" ``` @@ -91,6 +91,6 @@ cm run script "app image corner-detection" ```bash cm rm cache -f -cm run script "get llvm" --version=12.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "get llvm" --version=12.0.0 --env.MLC_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz cm run script "app image corner-detection" ``` diff --git a/script/get-llvm/customize.py b/script/get-llvm/customize.py index c8bb004d6..5e1eb0edf 100644 --- a/script/get-llvm/customize.py +++ b/script/get-llvm/customize.py @@ -14,18 +14,18 @@ def preprocess(i): env['FILE_NAME_C'] = file_name_c - if 'CM_LLVM_CLANG_BIN_WITH_PATH' not in env: + if 'MLC_LLVM_CLANG_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name_c, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_LLVM_CLANG_BIN_WITH_PATH', + 'env_path_key': 'MLC_LLVM_CLANG_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: if r['return'] == 16: - env['CM_REQUIRE_INSTALL'] = "yes" + env['MLC_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r @@ -37,7 +37,7 @@ def detect_version(i): r = i['automation'].parse_version({'match_text': r'clang version\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_LLVM_CLANG_VERSION', + 'env_key': 'MLC_LLVM_CLANG_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -56,42 +56,42 @@ def postprocess(i): if r['return'] > 0: return r - version = env['CM_LLVM_CLANG_VERSION'] - env['CM_LLVM_CLANG_CACHE_TAGS'] = 'version-' + version - env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-llvm' - env['CM_COMPILER_FAMILY'] = 'LLVM' - env['CM_COMPILER_VERSION'] = env['CM_LLVM_CLANG_VERSION'] + version = env['MLC_LLVM_CLANG_VERSION'] + env['MLC_LLVM_CLANG_CACHE_TAGS'] = 'version-' + version + env['MLC_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-llvm' + env['MLC_COMPILER_FAMILY'] = 'LLVM' + env['MLC_COMPILER_VERSION'] = env['MLC_LLVM_CLANG_VERSION'] - found_file_path = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + found_file_path = env['MLC_LLVM_CLANG_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) file_name_c = os.path.basename(found_file_path) file_name_cpp = file_name_c.replace("clang", "clang++") - env['CM_LLVM_CLANG_BIN'] = file_name_c + env['MLC_LLVM_CLANG_BIN'] = file_name_c # General compiler for general program compilation - env['CM_C_COMPILER_BIN'] = file_name_c - env['CM_C_COMPILER_WITH_PATH'] = found_file_path - env['CM_C_COMPILER_FLAG_OUTPUT'] = '-o ' - env['CM_C_COMPILER_FLAG_VERSION'] = '--version' - env['CM_C_COMPILER_FLAG_INCLUDE'] = '-I' - - env['CM_CXX_COMPILER_BIN'] = file_name_cpp - env['CM_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp) - env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '-o ' - env['CM_CXX_COMPILER_FLAG_VERSION'] = '--version' - env['CM_CXX_COMPILER_FLAG_INCLUDE'] = '-I' - - env['CM_COMPILER_FLAGS_FAST'] = "-O4" + env['MLC_C_COMPILER_BIN'] = file_name_c + env['MLC_C_COMPILER_WITH_PATH'] = found_file_path + env['MLC_C_COMPILER_FLAG_OUTPUT'] = '-o ' + env['MLC_C_COMPILER_FLAG_VERSION'] = '--version' + env['MLC_C_COMPILER_FLAG_INCLUDE'] = '-I' + + env['MLC_CXX_COMPILER_BIN'] = file_name_cpp + env['MLC_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp) + env['MLC_CXX_COMPILER_FLAG_OUTPUT'] = '-o ' + env['MLC_CXX_COMPILER_FLAG_VERSION'] = '--version' + env['MLC_CXX_COMPILER_FLAG_INCLUDE'] = '-I' + + env['MLC_COMPILER_FLAGS_FAST'] = "-O4" # "-flto" - this flag is not always available (requires LLVMgold.so) - env['CM_LINKER_FLAGS_FAST'] = "-O4" - env['CM_COMPILER_FLAGS_DEBUG'] = "-O0" - env['CM_LINKER_FLAGS_DEBUG'] = "-O0" - env['CM_COMPILER_FLAGS_DEFAULT'] = "-O2" - env['CM_LINKER_FLAGS_DEFAULT'] = "-O2" + env['MLC_LINKER_FLAGS_FAST'] = "-O4" + env['MLC_COMPILER_FLAGS_DEBUG'] = "-O0" + env['MLC_LINKER_FLAGS_DEBUG'] = "-O0" + env['MLC_COMPILER_FLAGS_DEFAULT'] = "-O2" + env['MLC_LINKER_FLAGS_DEFAULT'] = "-O2" - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_LLVM_CLANG_BIN_WITH_PATH'] return {'return': 0, 'version': version} diff --git a/script/get-llvm/meta.yaml b/script/get-llvm/meta.yaml index 49795a0ba..cd6242287 100644 --- a/script/get-llvm/meta.yaml +++ b/script/get-llvm/meta.yaml @@ -5,25 +5,25 @@ cache: true category: Compiler automation clean_files: [] env: - CM_REQUIRE_INSTALL: 'no' + MLC_REQUIRE_INSTALL: 'no' name: Detect or install LLVM compiler new_env_keys: -- CM_LLVM_* -- CM_C_COMPILER_* -- CM_CXX_COMPILER_* -- CM_COMPILER_* -- CM_LINKER_* +- MLC_LLVM_* +- MLC_C_COMPILER_* +- MLC_CXX_COMPILER_* +- MLC_COMPILER_* +- MLC_LINKER_* - + CFLAGS - + CXXFLAGS - + FFLAGS - + LDFLAGS -- +CM_HOST_OS_DEFAULT_INCLUDE_PATH +- +MLC_HOST_OS_DEFAULT_INCLUDE_PATH - +PATH post_deps: - tags: get,compiler-flags prehook_deps: - enable_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' names: llvm-install reuse_version: true diff --git a/script/get-llvm/run.bat b/script/get-llvm/run.bat index 632b201da..829bfa2aa 100644 --- a/script/get-llvm/run.bat +++ b/script/get-llvm/run.bat @@ -1,3 +1,3 @@ -%CM_LLVM_CLANG_BIN_WITH_PATH% --version > tmp-ver.out +%MLC_LLVM_CLANG_BIN_WITH_PATH% --version > tmp-ver.out IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-llvm/run.sh b/script/get-llvm/run.sh index c24cbb1ad..cb1c45ece 100644 --- a/script/get-llvm/run.sh +++ b/script/get-llvm/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -clang_bin=${CM_LLVM_CLANG_BIN_WITH_PATH} +clang_bin=${MLC_LLVM_CLANG_BIN_WITH_PATH} ${clang_bin} --version > tmp-ver.out test $? -eq 0 || exit 1 diff --git a/script/get-microtvm/README-extra.md b/script/get-microtvm/README-extra.md index 5e8876519..3a27d6e1a 100644 --- a/script/get-microtvm/README-extra.md +++ b/script/get-microtvm/README-extra.md @@ -2,4 +2,4 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) clones the git repository of [Microtvm](https://github.com/octoml/microtvm) and cache it in CM for reuse across other CM scripts. ## Exported Variables -1. [CN_MICROTVM_SOURCE](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-microtvm/customize.py#L24): Location in CM cache where microtvm git repository is cloned. +1. [CN_MICROTVM_SOURCE](https://github.com/mlcommons/ck/blob/master/mlc-mlops/script/get-microtvm/customize.py#L24): Location in CM cache where microtvm git repository is cloned. diff --git a/script/get-microtvm/customize.py b/script/get-microtvm/customize.py index 8572322f4..fbfa55b2f 100644 --- a/script/get-microtvm/customize.py +++ b/script/get-microtvm/customize.py @@ -10,10 +10,10 @@ def preprocess(i): if os_info['platform'] == 'windows': return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - if 'CM_GIT_DEPTH' not in env: - env['CM_GIT_DEPTH'] = '' - if 'CM_GIT_RECURSE_SUBMODULES' not in env: - env['CM_GIT_RECURSE_SUBMODULES'] = '' + if 'MLC_GIT_DEPTH' not in env: + env['MLC_GIT_DEPTH'] = '' + if 'MLC_GIT_RECURSE_SUBMODULES' not in env: + env['MLC_GIT_RECURSE_SUBMODULES'] = '' return {'return': 0} @@ -23,6 +23,6 @@ def postprocess(i): env = i['env'] state = i['state'] - env['CM_MICROTVM_SOURCE'] = os.path.join(os.getcwd(), 'microtvm') + env['MLC_MICROTVM_SOURCE'] = os.path.join(os.getcwd(), 'microtvm') return {'return': 0} diff --git a/script/get-microtvm/meta.yaml b/script/get-microtvm/meta.yaml index c47a88f31..9540bce1f 100644 --- a/script/get-microtvm/meta.yaml +++ b/script/get-microtvm/meta.yaml @@ -7,16 +7,16 @@ default_version: main deps: - tags: detect,os env: - CM_GIT_AUTH: 'yes' - CM_GIT_DEPTH: '' - CM_GIT_PATCH: 'no' - CM_GIT_URL: https://github.com/mlcommons/tiny_results_v1.0 + MLC_GIT_AUTH: 'yes' + MLC_GIT_DEPTH: '' + MLC_GIT_PATCH: 'no' + MLC_GIT_URL: https://github.com/mlcommons/tiny_results_v1.0 input_mapping: - ssh: CM_GIT_SSH + ssh: MLC_GIT_SSH local_env_keys: -- CM_GIT_* +- MLC_GIT_* new_env_keys: -- CM_MICROTVM_* +- MLC_MICROTVM_* tags: - get - src @@ -27,12 +27,12 @@ uid: a9cad70972a140b9 variations: full-history: env: - CM_GIT_DEPTH: --depth 10 + MLC_GIT_DEPTH: --depth 10 short-history: env: - CM_GIT_DEPTH: --depth 10 + MLC_GIT_DEPTH: --depth 10 versions: custom: {} main: env: - CM_GIT_CHECKOUT: main + MLC_GIT_CHECKOUT: main diff --git a/script/get-microtvm/run.sh b/script/get-microtvm/run.sh index 2bffb48d8..4dae49467 100644 --- a/script/get-microtvm/run.sh +++ b/script/get-microtvm/run.sh @@ -1,12 +1,12 @@ #!/bin/bash CUR_DIR=$PWD -SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} +SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH} echo "******************************************************" -echo "Cloning microtvm from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." +echo "Cloning microtvm from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES}..." if [ ! -d "microtvm" ]; then - git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} microtvm + git clone ${MLC_GIT_RECURSE_SUBMODULES} -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} ${MLC_GIT_DEPTH} microtvm if [ "${?}" != "0" ]; then exit 1; fi fi diff --git a/script/get-ml-model-3d-unet-kits19/customize.py b/script/get-ml-model-3d-unet-kits19/customize.py index d24b386c9..df4c46806 100644 --- a/script/get-ml-model-3d-unet-kits19/customize.py +++ b/script/get-ml-model-3d-unet-kits19/customize.py @@ -12,14 +12,14 @@ def preprocess(i): cm = automation.action_object - path = os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH']) + path = os.path.dirname(env['MLC_ML_MODEL_FILE_WITH_PATH']) - if env.get("CM_DAE_EXTRACT_DOWNLOADED", " ") != " ": - env['CM_ML_MODEL_PATH'] = os.path.join(path, env['CM_ML_MODEL_FILE']) - env['CM_ML_MODEL_FILE_WITH_PATH'] = env['CM_ML_MODEL_PATH'] + if env.get("MLC_DAE_EXTRACT_DOWNLOADED", " ") != " ": + env['MLC_ML_MODEL_PATH'] = os.path.join(path, env['MLC_ML_MODEL_FILE']) + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_PATH'] else: - env['CM_ML_MODEL_PATH'] = path + env['MLC_ML_MODEL_PATH'] = path - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_PATH'] return {'return': 0} diff --git a/script/get-ml-model-3d-unet-kits19/meta.yaml b/script/get-ml-model-3d-unet-kits19/meta.yaml index 658f306a7..532152570 100644 --- a/script/get-ml-model-3d-unet-kits19/meta.yaml +++ b/script/get-ml-model-3d-unet-kits19/meta.yaml @@ -4,14 +4,14 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models env: - CM_ML_MODEL: 3d-unet-kits19 - CM_ML_MODEL_DATASET: kits19 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_ML_MODEL: 3d-unet-kits19 + MLC_ML_MODEL_DATASET: kits19 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' new_env_keys: -- CM_ML_MODEL_* +- MLC_ML_MODEL_* print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - ml-model @@ -24,85 +24,85 @@ variations: fp32: default: true env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32 group: precision onnx: default: true env: - CM_ML_MODEL_FRAMEWORK: onnx + MLC_ML_MODEL_FRAMEWORK: onnx group: framework onnx,fp32: deps: - env: - CM_DOWNLOAD_CHECKSUM: 82f0618fde78f9839e7c712274019b4a - CM_DOWNLOAD_FILENAME: 3dunet_kits19_128x128x128_dynbatch.onnx - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_DOWNLOAD_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128_dynbatch.onnx?download=1 + MLC_DOWNLOAD_CHECKSUM: 82f0618fde78f9839e7c712274019b4a + MLC_DOWNLOAD_FILENAME: 3dunet_kits19_128x128x128_dynbatch.onnx + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_DOWNLOAD_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128_dynbatch.onnx?download=1 extra_cache_tags: 3d-unet,medical-imaging force-cache: true tags: download,file,download-file,_wget force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME env: - CM_ML_MODEL_ACCURACY: '0.86170' - CM_ML_MODEL_FILE: 3dunet_kits19_128x128x128_dynbatch.onnx + MLC_ML_MODEL_ACCURACY: '0.86170' + MLC_ML_MODEL_FILE: 3dunet_kits19_128x128x128_dynbatch.onnx pytorch: env: - CM_ML_MODEL_FRAMEWORK: pytorch + MLC_ML_MODEL_FRAMEWORK: pytorch group: framework pytorch,fp32: deps: - env: - CM_DOWNLOAD_CHECKSUM: 2251109371f408c9f10a4320ffdcaef8 - CM_DOWNLOAD_FILENAME: 3dunet_kits19_pytorch.ptc - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_DOWNLOAD_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch.ptc?download=1 + MLC_DOWNLOAD_CHECKSUM: 2251109371f408c9f10a4320ffdcaef8 + MLC_DOWNLOAD_FILENAME: 3dunet_kits19_pytorch.ptc + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_DOWNLOAD_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch.ptc?download=1 extra_cache_tags: 3d-unet,medical-imaging force-cache: true tags: download,file,download-file,_wget force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME env: - CM_ML_MODEL_ACCURACY: '0.86170' - CM_ML_MODEL_FILE: 3dunet_kits19_pytorch.ptc + MLC_ML_MODEL_ACCURACY: '0.86170' + MLC_ML_MODEL_FILE: 3dunet_kits19_pytorch.ptc pytorch,fp32,weights: deps: - env: - CM_DAE_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_DAE_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch_checkpoint.pth?download=1 - CM_DOWNLOAD_CHECKSUM: 09c696e3ec13d83c628498bcd831eb5b - CM_DOWNLOAD_FILENAME: 3dunet_kits19_pytorch_checkpoint.pth + MLC_DAE_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_DAE_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch_checkpoint.pth?download=1 + MLC_DOWNLOAD_CHECKSUM: 09c696e3ec13d83c628498bcd831eb5b + MLC_DOWNLOAD_FILENAME: 3dunet_kits19_pytorch_checkpoint.pth extra_cache_tags: 3d-unet,medical-imaging force-cache: true tags: download-and-extract,_wget,_extract force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME env: - CM_ML_MODEL_ACCURACY: '0.86170' - CM_ML_MODEL_FILE: 3dunet_kits19_pytorch_checkpoint.pth + MLC_ML_MODEL_ACCURACY: '0.86170' + MLC_ML_MODEL_FILE: 3dunet_kits19_pytorch_checkpoint.pth tensorflow: alias: tf tf: env: - CM_ML_MODEL_FRAMEWORK: tensorflow + MLC_ML_MODEL_FRAMEWORK: tensorflow group: framework tf,fp32: deps: - env: - CM_DAE_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_DAE_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1 - CM_DOWNLOAD_CHECKSUM: 9497108bd0504ae8f85a764a807b76a9 - CM_DOWNLOAD_FILENAME: 3dunet_kits19_128x128x128.tf.zip + MLC_DAE_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_DAE_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1 + MLC_DOWNLOAD_CHECKSUM: 9497108bd0504ae8f85a764a807b76a9 + MLC_DOWNLOAD_FILENAME: 3dunet_kits19_128x128x128.tf.zip extra_cache_tags: 3d-unet,medical-imaging force-cache: true tags: download-and-extract,_wget,_extract force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME env: - CM_ML_MODEL_ACCURACY: '0.86170' - CM_ML_MODEL_FILE: 3dunet_kits19_128x128x128.tf + MLC_ML_MODEL_ACCURACY: '0.86170' + MLC_ML_MODEL_FILE: 3dunet_kits19_128x128x128.tf weights: env: - CM_MODEL_WEIGHTS_FILE: 'yes' + MLC_MODEL_WEIGHTS_FILE: 'yes' diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index 7d20952a2..1e3df4acf 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -8,15 +8,15 @@ def preprocess(i): env = i['env'] - if env.get('CM_ML_MODEL_LOCAL', '') == 'yes': - ml_model = env.get('CM_ML_MODEL_FILENAME', '') + if env.get('MLC_ML_MODEL_LOCAL', '') == 'yes': + ml_model = env.get('MLC_ML_MODEL_FILENAME', '') if ml_model == '': return {'return': 1, 'error': '_local.{model name.pth} is not specified'} if not os.path.isabs(ml_model): ml_model = os.path.join( env.get( - 'CM_TMP_CURRENT_PATH', + 'MLC_TMP_CURRENT_PATH', ''), ml_model) @@ -24,7 +24,7 @@ def preprocess(i): return {'return': 1, 'error': 'ML model {} is not found'.format(ml_model)} - env['CM_ML_MODEL_FILE_WITH_PATH'] = ml_model + env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model return {'return': 0} @@ -33,17 +33,17 @@ def postprocess(i): env = i['env'] - if env.get('CM_ML_MODEL_FILE_WITH_PATH', '') == '': - env['CM_ML_MODEL_FILE_WITH_PATH'] = 'model-weights-skipped' + if env.get('MLC_ML_MODEL_FILE_WITH_PATH', '') == '': + env['MLC_ML_MODEL_FILE_WITH_PATH'] = 'model-weights-skipped' - env['CM_ML_MODEL_FILE'] = os.path.basename( - env['CM_ML_MODEL_FILE_WITH_PATH']) + env['MLC_ML_MODEL_FILE'] = os.path.basename( + env['MLC_ML_MODEL_FILE_WITH_PATH']) - if env.get('CM_ABTF_SSD_PYTORCH', '') == '': - env['CM_ABTF_SSD_PYTORCH'] = 'model-code-skipped' + if env.get('MLC_ABTF_SSD_PYTORCH', '') == '': + env['MLC_ABTF_SSD_PYTORCH'] = 'model-code-skipped' - env['CM_ML_MODEL_CODE_WITH_PATH'] = env['CM_ABTF_SSD_PYTORCH'] + env['MLC_ML_MODEL_CODE_WITH_PATH'] = env['MLC_ABTF_SSD_PYTORCH'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] return {'return': 0} diff --git a/script/get-ml-model-abtf-ssd-pytorch/meta.yaml b/script/get-ml-model-abtf-ssd-pytorch/meta.yaml index b346288d2..b9f70ebc3 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/meta.yaml +++ b/script/get-ml-model-abtf-ssd-pytorch/meta.yaml @@ -21,13 +21,13 @@ tags: input_mapping: - model_code_git_url: CM_ABTF_MODEL_CODE_GIT_URL - model_code_git_branch: CM_ABTF_MODEL_CODE_GIT_BRANCH + model_code_git_url: MLC_ABTF_MODEL_CODE_GIT_URL + model_code_git_branch: MLC_ABTF_MODEL_CODE_GIT_BRANCH default_env: - CM_ABTF_MODEL_CODE_GIT_URL: https://github.com/mlcommons/abtf-ssd-pytorch - CM_ABTF_MODEL_CODE_GIT_BRANCH: cognata + MLC_ABTF_MODEL_CODE_GIT_URL: https://github.com/mlcommons/abtf-ssd-pytorch + MLC_ABTF_MODEL_CODE_GIT_BRANCH: cognata deps: @@ -39,122 +39,122 @@ deps: - abtf-ssd-pytorch-git-repo - abtf-ml-model-code-git-repo skip_if_env: - CM_SKIP_MODEL_CODE_DOWNLOAD: + MLC_SKIP_MODEL_CODE_DOWNLOAD: - 'yes' env: - CM_GIT_AUTH: 'yes' - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_ABTF_SSD_PYTORCH + MLC_GIT_AUTH: 'yes' + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_ABTF_SSD_PYTORCH extra_cache_tags: abtf,ssd,pytorch,ml-model,cmc update_tags_from_env_with_prefix: _repo.: - - CM_ABTF_MODEL_CODE_GIT_URL + - MLC_ABTF_MODEL_CODE_GIT_URL _branch.: - - CM_ABTF_MODEL_CODE_GIT_BRANCH + - MLC_ABTF_MODEL_CODE_GIT_BRANCH - tags: download,file env: - CM_DOWNLOAD_CHECKSUM: <<>> - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_DOWNLOAD_FILENAME: <<>> - CM_VERIFY_SSL: 'no' + MLC_DOWNLOAD_CHECKSUM: <<>> + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_DOWNLOAD_FILENAME: <<>> + MLC_VERIFY_SSL: 'no' force_cache: true names: - abtf-ml-model-weights - abtf-ml-model-weights-download skip_if_env: - CM_SKIP_MODEL_WEIGHTS_DOWNLOAD: + MLC_SKIP_MODEL_WEIGHTS_DOWNLOAD: - 'yes' update_tags_from_env_with_prefix: _url.: - - CM_ML_MODEL_URL + - MLC_ML_MODEL_URL new_env_keys: -- CM_ML_MODEL_* +- MLC_ML_MODEL_* print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model weights - CM_ML_MODEL_CODE_WITH_PATH: Path to the ML model code + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model weights + MLC_ML_MODEL_CODE_WITH_PATH: Path to the ML model code variations: e01: env: - CM_ML_MODEL: abtf-ssd-pytorch - CM_ML_MODEL_CHECKSUM: 31d177228308bbe43917c912b01c2d67 - CM_ML_MODEL_DATASET: coco - CM_ML_MODEL_FILENAME: SSD_e1.pth - CM_ML_MODEL_IMAGE_HEIGHT: '300' - CM_ML_MODEL_IMAGE_WIDTH: '300' - CM_ML_MODEL_URL: https://www.dropbox.com/scl/fi/7nqt5z8gplgeaveo933eo/SSD_e1.pth?rlkey=7lyb4qs2hzg491bfprwcuvx54&dl=0 + MLC_ML_MODEL: abtf-ssd-pytorch + MLC_ML_MODEL_CHECKSUM: 31d177228308bbe43917c912b01c2d67 + MLC_ML_MODEL_DATASET: coco + MLC_ML_MODEL_FILENAME: SSD_e1.pth + MLC_ML_MODEL_IMAGE_HEIGHT: '300' + MLC_ML_MODEL_IMAGE_WIDTH: '300' + MLC_ML_MODEL_URL: https://www.dropbox.com/scl/fi/7nqt5z8gplgeaveo933eo/SSD_e1.pth?rlkey=7lyb4qs2hzg491bfprwcuvx54&dl=0 group: model-weights e65: env: - CM_ML_MODEL: abtf-ssd-pytorch - CM_ML_MODEL_CHECKSUM: f769eb0321ac7fc1c16f982db6131d2f - CM_ML_MODEL_DATASET: coco - CM_ML_MODEL_FILENAME: SSD_e65.pth - CM_ML_MODEL_IMAGE_HEIGHT: '300' - CM_ML_MODEL_IMAGE_WIDTH: '300' - CM_ML_MODEL_URL: https://www.dropbox.com/scl/fi/wkegl2qxvm8cefbqq00o3/SSD_e65.pth?rlkey=ez26jafjdcly665npl6pdqxl8&dl=0 + MLC_ML_MODEL: abtf-ssd-pytorch + MLC_ML_MODEL_CHECKSUM: f769eb0321ac7fc1c16f982db6131d2f + MLC_ML_MODEL_DATASET: coco + MLC_ML_MODEL_FILENAME: SSD_e65.pth + MLC_ML_MODEL_IMAGE_HEIGHT: '300' + MLC_ML_MODEL_IMAGE_WIDTH: '300' + MLC_ML_MODEL_URL: https://www.dropbox.com/scl/fi/wkegl2qxvm8cefbqq00o3/SSD_e65.pth?rlkey=ez26jafjdcly665npl6pdqxl8&dl=0 group: model-weights abtf-mvp: env: - CM_ML_MODEL: abtf-ssd-pytorch - CM_ML_MODEL_CHECKSUM: 1ab66f523715f9564603626e94e59c8c - CM_ML_MODEL_DATASET: cognata - CM_ML_MODEL_FILENAME: baseline_8MP_ss_scales_all_ep60.pth - CM_ML_MODEL_IMAGE_SIZE: '8M' - CM_ML_MODEL_URL: https://www.dropbox.com/scl/fi/9un2i2169rgebui4xklnm/baseline_8MP_ss_scales_all_ep60.pth?rlkey=sez3dnjep4waa09s5uy4r3wmk&st=z859czgk&dl=0 + MLC_ML_MODEL: abtf-ssd-pytorch + MLC_ML_MODEL_CHECKSUM: 1ab66f523715f9564603626e94e59c8c + MLC_ML_MODEL_DATASET: cognata + MLC_ML_MODEL_FILENAME: baseline_8MP_ss_scales_all_ep60.pth + MLC_ML_MODEL_IMAGE_SIZE: '8M' + MLC_ML_MODEL_URL: https://www.dropbox.com/scl/fi/9un2i2169rgebui4xklnm/baseline_8MP_ss_scales_all_ep60.pth?rlkey=sez3dnjep4waa09s5uy4r3wmk&st=z859czgk&dl=0 group: model-weights abtf-poc: default_variations: download-tool: rclone env: - CM_ML_MODEL: abtf-ssd-pytorch - CM_ML_MODEL_CHECKSUM: 26845c3b9573ce115ef29dca4ae5be14 - CM_ML_MODEL_DATASET: cognata - CM_ML_MODEL_FILENAME: baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth - CM_ML_MODEL_IMAGE_SIZE: '8M' + MLC_ML_MODEL: abtf-ssd-pytorch + MLC_ML_MODEL_CHECKSUM: 26845c3b9573ce115ef29dca4ae5be14 + MLC_ML_MODEL_DATASET: cognata + MLC_ML_MODEL_FILENAME: baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth + MLC_ML_MODEL_IMAGE_SIZE: '8M' group: model-weights abtf-poc,gdrive: env: - CM_ML_MODEL_URL: https://drive.google.com/file/d/1kfJR_bs54KONprVd51kZu0PYmmh1wZZa/view + MLC_ML_MODEL_URL: https://drive.google.com/file/d/1kfJR_bs54KONprVd51kZu0PYmmh1wZZa/view abtf-poc,rclone: env: - CM_RCLONE_COPY_USING: copyurl - CM_ML_MODEL_URL: https://automotive.mlcommons-storage.org/SSD_ResNet50%2Fbaseline_8MP_ss_scales_fm1_5x5_all_ep60.pth - CM_RCLONE_CONFIG_CMD: '' + MLC_RCLONE_COPY_USING: copyurl + MLC_ML_MODEL_URL: https://automotive.mlcommons-storage.org/SSD_ResNet50%2Fbaseline_8MP_ss_scales_fm1_5x5_all_ep60.pth + MLC_RCLONE_CONFIG_CMD: '' local.#: env: - CM_ML_MODEL_FILENAME: '#' - CM_ML_MODEL_LOCAL: 'yes' - CM_SKIP_MODEL_WEIGHTS_DOWNLOAD: 'yes' + MLC_ML_MODEL_FILENAME: '#' + MLC_ML_MODEL_LOCAL: 'yes' + MLC_SKIP_MODEL_WEIGHTS_DOWNLOAD: 'yes' group: model-weights skip_weights: default: true env: - CM_SKIP_MODEL_WEIGHTS_DOWNLOAD: 'yes' + MLC_SKIP_MODEL_WEIGHTS_DOWNLOAD: 'yes' group: model-weights skip_code: env: - CM_SKIP_MODEL_CODE_DOWNLOAD: 'yes' + MLC_SKIP_MODEL_CODE_DOWNLOAD: 'yes' rclone: group: download-tool env: - CM_RCLONE_COPY_USING: copyurl + MLC_RCLONE_COPY_USING: copyurl adr: abtf-ml-model-weights-download: tags: _rclone @@ -168,7 +168,7 @@ variations: gdown: group: download-tool env: - CM_DOWNLOAD_EXTRA_OPTIONS: " --fuzzy" + MLC_DOWNLOAD_EXTRA_OPTIONS: " --fuzzy" adr: abtf-ml-model-weights-download: tags: _gdown diff --git a/script/get-ml-model-bert-base-squad/meta.yaml b/script/get-ml-model-bert-base-squad/meta.yaml index 477f5570a..11acdde02 100644 --- a/script/get-ml-model-bert-base-squad/meta.yaml +++ b/script/get-ml-model-bert-base-squad/meta.yaml @@ -4,29 +4,29 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models env: - CM_ML_MODEL: BERT - CM_ML_MODEL_DATASET: squad-1.1 - CM_ML_MODEL_MAX_SEQ_LENGTH: '384' - CM_ML_MODEL_NAME: MLPERF BERT Base on SQuAD v1.1 - CM_TMP_ML_MODEL_REQUIRE_DOWNLOAD: 'no' + MLC_ML_MODEL: BERT + MLC_ML_MODEL_DATASET: squad-1.1 + MLC_ML_MODEL_MAX_SEQ_LENGTH: '384' + MLC_ML_MODEL_NAME: MLPERF BERT Base on SQuAD v1.1 + MLC_TMP_ML_MODEL_REQUIRE_DOWNLOAD: 'no' new_env_keys: -- CM_ML_MODEL* +- MLC_ML_MODEL* post_deps: - tags: get,bert,squad,vocab prehook_deps: - enable_if_env: - CM_TMP_ML_MODEL_REQUIRE_DOWNLOAD: 'yes' + MLC_TMP_ML_MODEL_REQUIRE_DOWNLOAD: 'yes' env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_EXTRACT_EXTRACTED_FILENAME: <<>> - CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_EXTRACT_EXTRACTED_FILENAME: <<>> + MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH tags: download-and-extract update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL print_env_at_the_end: - CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: Path to the BERT vocab file - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: Path to the BERT vocab file + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - ml-model @@ -40,12 +40,12 @@ uid: b3b10b452ce24c5f variations: deepsparse: env: - CM_ML_MODEL_FRAMEWORK: deepsparse - CM_ML_MODEL_INPUT_IDS_NAME: input_ids - CM_ML_MODEL_INPUT_MASK_NAME: input_mask - CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits + MLC_ML_MODEL_FRAMEWORK: deepsparse + MLC_ML_MODEL_INPUT_IDS_NAME: input_ids + MLC_ML_MODEL_INPUT_MASK_NAME: input_mask + MLC_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids + MLC_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits + MLC_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits group: framework deepsparse,int8: deps: @@ -53,16 +53,16 @@ variations: - neural-magic-zoo-downloader tags: get,ml-model,zoo,deepsparse,_bert-base-pruned95_obs_quant-none env: - CM_ML_MODEL_F1: '87.89' - CM_ML_MODEL_FILE: model.onnx - CM_PRUNING_PERCENTAGE: '95' + MLC_ML_MODEL_F1: '87.89' + MLC_ML_MODEL_FILE: model.onnx + MLC_PRUNING_PERCENTAGE: '95' fp32: default: true env: - CM_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_PRECISION: fp32 group: precision int8: env: - CM_ML_MODEL_PRECISION: int8 - CM_ML_MODEL_QUANTIZED: 'yes' + MLC_ML_MODEL_PRECISION: int8 + MLC_ML_MODEL_QUANTIZED: 'yes' group: precision diff --git a/script/get-ml-model-bert-large-squad/customize.py b/script/get-ml-model-bert-large-squad/customize.py index 76eddaaed..3de4c51d5 100644 --- a/script/get-ml-model-bert-large-squad/customize.py +++ b/script/get-ml-model-bert-large-squad/customize.py @@ -7,14 +7,14 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - if env.get('CM_ML_MODEL_BERT_PACKED', '') == 'yes': + if env.get('MLC_ML_MODEL_BERT_PACKED', '') == 'yes': i['run_script_input']['script_name'] = "run-packed" - env['CM_BERT_CONFIG_PATH'] = os.path.join( - env['CM_MLPERF_INFERENCE_BERT_PATH'], "bert_config.json") - env['CM_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.getcwd() - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + env['MLC_BERT_CONFIG_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_BERT_PATH'], "bert_config.json") + env['MLC_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.getcwd() + env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join( os.getcwd(), "model.onnx") - env['CM_ML_MODEL_BERT_PACKED_PATH'] = os.path.join( + env['MLC_ML_MODEL_BERT_PACKED_PATH'] = os.path.join( os.getcwd(), "model.onnx") return {'return': 0} @@ -24,14 +24,14 @@ def postprocess(i): env = i['env'] - env['CM_ML_MODEL_FILE'] = os.path.basename( - env['CM_ML_MODEL_FILE_WITH_PATH']) + env['MLC_ML_MODEL_FILE'] = os.path.basename( + env['MLC_ML_MODEL_FILE_WITH_PATH']) - if env.get('CM_ML_MODEL_PRECISION', '') == "fp32": - env['CM_ML_MODEL_BERT_LARGE_FP32_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] - elif env.get('CM_ML_MODEL_PRECISION', '') == "int8": - env['CM_ML_MODEL_BERT_LARGE_INT8_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + if env.get('MLC_ML_MODEL_PRECISION', '') == "fp32": + env['MLC_ML_MODEL_BERT_LARGE_FP32_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] + elif env.get('MLC_ML_MODEL_PRECISION', '') == "int8": + env['MLC_ML_MODEL_BERT_LARGE_INT8_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] return {'return': 0} diff --git a/script/get-ml-model-bert-large-squad/meta.yaml b/script/get-ml-model-bert-large-squad/meta.yaml index e81819279..51bdd93d3 100644 --- a/script/get-ml-model-bert-large-squad/meta.yaml +++ b/script/get-ml-model-bert-large-squad/meta.yaml @@ -4,36 +4,36 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models env: - CM_ML_MODEL: BERT - CM_ML_MODEL_DATASET: squad-1.1 - CM_ML_MODEL_MAX_SEQ_LENGTH: '384' - CM_ML_MODEL_NAME: MLPERF BERT Large on SQuAD v1.1 - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> + MLC_ML_MODEL: BERT + MLC_ML_MODEL_DATASET: squad-1.1 + MLC_ML_MODEL_MAX_SEQ_LENGTH: '384' + MLC_ML_MODEL_NAME: MLPERF BERT Large on SQuAD v1.1 + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> new_env_keys: -- CM_ML_MODEL* +- MLC_ML_MODEL* post_deps: - tags: get,dataset-aux,squad-vocab force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME prehook_deps: - env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_DOWNLOAD_URL1: <<>> - CM_EXTRACT_EXTRACTED_FILENAME: <<>> - CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_DOWNLOAD_URL1: <<>> + MLC_EXTRACT_EXTRACTED_FILENAME: <<>> + MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH extra_cache_tags: bert-large,ml-model force_cache: true skip_if_env: - CM_ML_MODEL_BERT_PACKED: + MLC_ML_MODEL_BERT_PACKED: - 'yes' tags: download-and-extract force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - ml-model @@ -51,73 +51,73 @@ variations: group: download-source custom-url.#: env: - CM_PACKAGE_URL: '#' + MLC_PACKAGE_URL: '#' group: download-source deepsparse: default_variations: download-source: github env: - CM_ML_MODEL_FRAMEWORK: deepsparse - CM_ML_MODEL_INPUT_IDS_NAME: input_ids - CM_ML_MODEL_INPUT_MASK_NAME: input_mask - CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits + MLC_ML_MODEL_FRAMEWORK: deepsparse + MLC_ML_MODEL_INPUT_IDS_NAME: input_ids + MLC_ML_MODEL_INPUT_MASK_NAME: input_mask + MLC_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids + MLC_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits + MLC_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits group: framework deepsparse,int8: env: - CM_DAE_EXTRACT_DOWNLOADED: 'yes' - CM_ML_MODEL_F1: '90.21282641816266' - CM_ML_MODEL_FILE: oBERT-Large_95sparse_block4_qat.onnx + MLC_DAE_EXTRACT_DOWNLOADED: 'yes' + MLC_ML_MODEL_F1: '90.21282641816266' + MLC_ML_MODEL_FILE: oBERT-Large_95sparse_block4_qat.onnx deepsparse,int8,github: env: - CM_PACKAGE_URL: https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz + MLC_PACKAGE_URL: https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz fp32: default: true env: - CM_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_PRECISION: fp32 group: precision github: group: download-source int8: env: - CM_ML_MODEL_PRECISION: int8 - CM_ML_MODEL_QUANTIZED: 'yes' + MLC_ML_MODEL_PRECISION: int8 + MLC_ML_MODEL_QUANTIZED: 'yes' group: precision onnx: default: true default_variations: download-source: armi env: - CM_ML_MODEL_FRAMEWORK: onnx - CM_ML_MODEL_INPUT_IDS_NAME: input_ids - CM_ML_MODEL_INPUT_MASK_NAME: input_mask - CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits + MLC_ML_MODEL_FRAMEWORK: onnx + MLC_ML_MODEL_INPUT_IDS_NAME: input_ids + MLC_ML_MODEL_INPUT_MASK_NAME: input_mask + MLC_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids + MLC_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits + MLC_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits group: framework onnx,fp32: env: - CM_DOWNLOAD_CHECKSUM: 819b25b19cd8e59080c10892689750ca - CM_ML_MODEL_F1: '90.874' + MLC_DOWNLOAD_CHECKSUM: 819b25b19cd8e59080c10892689750ca + MLC_ML_MODEL_F1: '90.874' onnx,fp32,armi: env: - CM_PACKAGE_URL: https://armi.in/files/model.onnx - CM_PACKAGE_URL1: https://zenodo.org/record/3733910/files/model.onnx + MLC_PACKAGE_URL: https://armi.in/files/model.onnx + MLC_PACKAGE_URL1: https://zenodo.org/record/3733910/files/model.onnx onnx,fp32,zenodo: env: - CM_PACKAGE_URL: https://zenodo.org/record/3733910/files/model.onnx + MLC_PACKAGE_URL: https://zenodo.org/record/3733910/files/model.onnx onnx,int8: env: - CM_DOWNLOAD_CHECKSUM: 45f88ffb2915362242703c85c38ec2d4 - CM_ML_MODEL_F1: '90.067' - CM_PACKAGE_URL: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx + MLC_DOWNLOAD_CHECKSUM: 45f88ffb2915362242703c85c38ec2d4 + MLC_ML_MODEL_F1: '90.067' + MLC_PACKAGE_URL: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx onnx,int8,amazon-s3: env: - CM_PACKAGE_URL: https://mlperf-public.s3.us-west-2.amazonaws.com/bert_large_v1_1_fake_quant.onnx + MLC_PACKAGE_URL: https://mlperf-public.s3.us-west-2.amazonaws.com/bert_large_v1_1_fake_quant.onnx onnx,int8,zenodo: env: - CM_PACKAGE_URL: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx + MLC_PACKAGE_URL: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx onnxruntime: base: - onnx @@ -162,47 +162,47 @@ variations: - inference-src tags: get,mlperf,inference,src env: - CM_ML_MODEL_BERT_PACKED: 'yes' + MLC_ML_MODEL_BERT_PACKED: 'yes' group: packing new_env_keys: - - CM_BERT_ + - MLC_BERT_ prehook_deps: - env: - CM_DOWNLOAD_CHECKSUM: 3089b27c559906a868878741d992ade7 - CM_DOWNLOAD_FILENAME: model.ckpt-5474.data-00000-of-00001 - CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CHECKPOINT_DATA_PATH - CM_DOWNLOAD_PATH: <<>> + MLC_DOWNLOAD_CHECKSUM: 3089b27c559906a868878741d992ade7 + MLC_DOWNLOAD_FILENAME: model.ckpt-5474.data-00000-of-00001 + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_CHECKPOINT_DATA_PATH + MLC_DOWNLOAD_PATH: <<>> extra_cache_tags: bert,checkpoint,weights,bert-large force_cache: true tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.data-00000-of-00001 - env: - CM_DOWNLOAD_CHECKSUM: d23d61572d9404da4dac3363b5bc735b - CM_DOWNLOAD_FILENAME: model.ckpt-5474.index - CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CHECKPOINT_INDEX_PATH - CM_DOWNLOAD_PATH: <<>> + MLC_DOWNLOAD_CHECKSUM: d23d61572d9404da4dac3363b5bc735b + MLC_DOWNLOAD_FILENAME: model.ckpt-5474.index + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_CHECKPOINT_INDEX_PATH + MLC_DOWNLOAD_PATH: <<>> extra_cache_tags: bert,checkpoint-index,bert-large force_cache: true tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.index - env: - CM_DOWNLOAD_CHECKSUM: 83e11e57eea14c9e9a246af74af40d66 - CM_DOWNLOAD_FILENAME: model.ckpt-5474.meta - CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CHECKPOINT_META_PATH - CM_DOWNLOAD_PATH: <<>> + MLC_DOWNLOAD_CHECKSUM: 83e11e57eea14c9e9a246af74af40d66 + MLC_DOWNLOAD_FILENAME: model.ckpt-5474.meta + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_CHECKPOINT_META_PATH + MLC_DOWNLOAD_PATH: <<>> extra_cache_tags: bert,checkpoint-meta,bert-large force_cache: true tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.meta - env: - CM_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e - CM_DOWNLOAD_FILENAME: vocab.txt - CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_VOCAB_PATH - CM_DOWNLOAD_PATH: <<>> + MLC_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e + MLC_DOWNLOAD_FILENAME: vocab.txt + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_VOCAB_PATH + MLC_DOWNLOAD_PATH: <<>> extra_cache_tags: bert,vocab,bert-large force_cache: true tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/vocab.txt - env: - CM_DOWNLOAD_CHECKSUM: 94c91ce422e8f36f9d98b4926e2ad688 - CM_DOWNLOAD_FILENAME: convert_model.py - CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CONVERTER_CODE_PATH + MLC_DOWNLOAD_CHECKSUM: 94c91ce422e8f36f9d98b4926e2ad688 + MLC_DOWNLOAD_FILENAME: convert_model.py + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_CONVERTER_CODE_PATH extra_cache_tags: bert,checkpoint,converter,code,bert-large force_cache: true tags: download,file,_wget,_url.https://raw.githubusercontent.com/krai/axs2kilt/main/model_onnx_bert_large_packed_recipe/convert_model.py @@ -210,35 +210,35 @@ variations: default_variations: download-source: armi env: - CM_ML_MODEL_FRAMEWORK: pytorch - CM_ML_MODEL_INPUT_IDS_NAME: input_ids - CM_ML_MODEL_INPUT_MASK_NAME: input_mask - CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits + MLC_ML_MODEL_FRAMEWORK: pytorch + MLC_ML_MODEL_INPUT_IDS_NAME: input_ids + MLC_ML_MODEL_INPUT_MASK_NAME: input_mask + MLC_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids + MLC_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits + MLC_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits group: framework pytorch,fp32: env: - CM_DOWNLOAD_CHECKSUM: 00fbcbfaebfa20d87ac9885120a6e9b4 - CM_ML_MODEL_F1: '90.874' + MLC_DOWNLOAD_CHECKSUM: 00fbcbfaebfa20d87ac9885120a6e9b4 + MLC_ML_MODEL_F1: '90.874' pytorch,fp32,armi: env: - CM_PACKAGE_URL: https://armi.in/files/fp32/model.pytorch - CM_PACKAGE_URL1: https://zenodo.org/record/3733896/files/model.pytorch + MLC_PACKAGE_URL: https://armi.in/files/fp32/model.pytorch + MLC_PACKAGE_URL1: https://zenodo.org/record/3733896/files/model.pytorch pytorch,fp32,zenodo: env: - CM_PACKAGE_URL: https://zenodo.org/record/3733896/files/model.pytorch + MLC_PACKAGE_URL: https://zenodo.org/record/3733896/files/model.pytorch pytorch,int8: env: - CM_DOWNLOAD_CHECKSUM: 0734c580cb53b4b56a3f400771ffcb7c - CM_ML_MODEL_F1: '90.633' + MLC_DOWNLOAD_CHECKSUM: 0734c580cb53b4b56a3f400771ffcb7c + MLC_ML_MODEL_F1: '90.633' pytorch,int8,armi: env: - CM_PACKAGE_URL: https://armi.in/files/int8/pytorch_model.bin - CM_PACKAGE_URL1: https://zenodo.org/record/4792496/files/pytorch_model.bin + MLC_PACKAGE_URL: https://armi.in/files/int8/pytorch_model.bin + MLC_PACKAGE_URL1: https://zenodo.org/record/4792496/files/pytorch_model.bin pytorch,int8,zenodo: env: - CM_PACKAGE_URL: https://zenodo.org/record/4792496/files/pytorch_model.bin + MLC_PACKAGE_URL: https://zenodo.org/record/4792496/files/pytorch_model.bin tensorflow: base: - tf @@ -246,24 +246,24 @@ variations: default_variations: download-source: zenodo env: - CM_ML_MODEL_FRAMEWORK: tf - CM_ML_MODEL_INPUT_IDS_NAME: input_ids - CM_ML_MODEL_INPUT_MASK_NAME: input_mask - CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits + MLC_ML_MODEL_FRAMEWORK: tf + MLC_ML_MODEL_INPUT_IDS_NAME: input_ids + MLC_ML_MODEL_INPUT_MASK_NAME: input_mask + MLC_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids + MLC_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits + MLC_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits group: framework tf,fp32: env: - CM_DOWNLOAD_CHECKSUM: dd72de12e8226f25f0128a1a864b97ad - CM_ML_MODEL_F1: '90.874' + MLC_DOWNLOAD_CHECKSUM: dd72de12e8226f25f0128a1a864b97ad + MLC_ML_MODEL_F1: '90.874' tf,fp32,zenodo: env: - CM_PACKAGE_URL: https://zenodo.org/record/3939747/files/model.pb + MLC_PACKAGE_URL: https://zenodo.org/record/3939747/files/model.pb unpacked: default: true env: - CM_ML_MODEL_BERT_PACKED: 'no' + MLC_ML_MODEL_BERT_PACKED: 'no' group: packing zenodo: group: download-source diff --git a/script/get-ml-model-bert-large-squad/run-packed.sh b/script/get-ml-model-bert-large-squad/run-packed.sh index 4c7b016c9..220d1fbc2 100644 --- a/script/get-ml-model-bert-large-squad/run-packed.sh +++ b/script/get-ml-model-bert-large-squad/run-packed.sh @@ -1,6 +1,6 @@ #!/bin/bash -cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_BERT_CONVERTER_CODE_PATH} --src '${CM_BERT_CHECKPOINT_INDEX_PATH}/../model.ckpt-5474' --dest '$PWD/' --config_path '${CM_BERT_CONFIG_PATH}'" +cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_BERT_CONVERTER_CODE_PATH} --src '${MLC_BERT_CHECKPOINT_INDEX_PATH}/../model.ckpt-5474' --dest '$PWD/' --config_path '${MLC_BERT_CONFIG_PATH}'" echo $cmd eval $cmd test $? -eq 0 || exit $? diff --git a/script/get-ml-model-dlrm-terabyte/meta.yaml b/script/get-ml-model-dlrm-terabyte/meta.yaml index 4d5c93f1b..fed99aaa4 100644 --- a/script/get-ml-model-dlrm-terabyte/meta.yaml +++ b/script/get-ml-model-dlrm-terabyte/meta.yaml @@ -4,34 +4,34 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_ML_MODEL: dlrm - CM_ML_MODEL_DATASET: criteo-terabyte - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_ML_MODEL: dlrm + MLC_ML_MODEL_DATASET: criteo-terabyte + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' input_mapping: - dir: CM_DOWNLOAD_PATH - download_path: CM_DOWNLOAD_PATH - to: CM_DOWNLOAD_PATH + dir: MLC_DOWNLOAD_PATH + download_path: MLC_DOWNLOAD_PATH + to: MLC_DOWNLOAD_PATH new_env_keys: -- CM_ML_MODEL_* +- MLC_ML_MODEL_* prehook_deps: - env: - CM_DOWNLOAD_DOWNLOADED_FILENAME: <<>> - CM_EXTRACT_EXTRACTED_FILENAME: <<>> + MLC_DOWNLOAD_DOWNLOADED_FILENAME: <<>> + MLC_EXTRACT_EXTRACTED_FILENAME: <<>> extra_cache_tags: ml-model,dlrm,terabyte,raw,ml-model-dlrm force_cache: true names: - dae tags: download-and-extract force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - ml-model @@ -45,71 +45,71 @@ uid: 8fa7582c603a4db3 variations: debug: env: - CM_ML_MODEL_DEBUG: 'yes' + MLC_ML_MODEL_DEBUG: 'yes' fp32: default: true env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32 group: precision onnx: env: - CM_ML_MODEL_FRAMEWORK: onnx + MLC_ML_MODEL_FRAMEWORK: onnx group: framework onnx,fp32: env: - CM_DOWNLOAD_CHECKSUM: 763b964eaffe5f86e92cdcb60c5dc0de - CM_ML_MODEL_ACCURACY: '0.8025' - CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000' - CM_ML_MODEL_FILE: tb00_40M.onnx - CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.onnx.tar - CM_UNTAR: 'yes' + MLC_DOWNLOAD_CHECKSUM: 763b964eaffe5f86e92cdcb60c5dc0de + MLC_ML_MODEL_ACCURACY: '0.8025' + MLC_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000' + MLC_ML_MODEL_FILE: tb00_40M.onnx + MLC_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.onnx.tar + MLC_UNTAR: 'yes' onnx,fp32,debug: env: - CM_DOWNLOAD_CHECKSUM: d11255cd9926cda9181a347861e4d263 - CM_ML_MODEL_ACCURACY: '0.8107' - CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '10000000' - CM_ML_MODEL_FILE: tb0875_10M.onnx - CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.onnx.tar - CM_UNTAR: 'yes' + MLC_DOWNLOAD_CHECKSUM: d11255cd9926cda9181a347861e4d263 + MLC_ML_MODEL_ACCURACY: '0.8107' + MLC_ML_MODEL_DLRM_MAX_INDEX_RANGE: '10000000' + MLC_ML_MODEL_FILE: tb0875_10M.onnx + MLC_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.onnx.tar + MLC_UNTAR: 'yes' pytorch: default: true env: - CM_ML_MODEL_FRAMEWORK: pytorch - CM_TMP_MODEL_ADDITIONAL_NAME: dlrm_terabyte.pytorch + MLC_ML_MODEL_FRAMEWORK: pytorch + MLC_TMP_MODEL_ADDITIONAL_NAME: dlrm_terabyte.pytorch group: framework pytorch,fp32: env: - CM_DOWNLOAD_CHECKSUM: 2d49a5288cddb37c3c64860a06d79bb9 - CM_ML_MODEL_ACCURACY: '0.8025' - CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000' - CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.pt + MLC_DOWNLOAD_CHECKSUM: 2d49a5288cddb37c3c64860a06d79bb9 + MLC_ML_MODEL_ACCURACY: '0.8025' + MLC_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000' + MLC_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.pt pytorch,fp32,debug: env: - CM_DOWNLOAD_CHECKSUM: b7cacffcf75f767faa9cb2af397723aa - CM_ML_MODEL_ACCURACY: '0.8107' - CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '10000000' - CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.pt + MLC_DOWNLOAD_CHECKSUM: b7cacffcf75f767faa9cb2af397723aa + MLC_ML_MODEL_ACCURACY: '0.8107' + MLC_ML_MODEL_DLRM_MAX_INDEX_RANGE: '10000000' + MLC_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.pt pytorch,fp32,weight_sharded: default_variations: download-tool: rclone env: - CM_DOWNLOAD_CHECKSUM: '' - CM_ML_MODEL_ACCURACY: '0.8025' - CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000' - CM_ML_MODEL_FILE: model_weights - CM_TMP_MODEL_ADDITIONAL_NAME: '' + MLC_DOWNLOAD_CHECKSUM: '' + MLC_ML_MODEL_ACCURACY: '0.8025' + MLC_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000' + MLC_ML_MODEL_FILE: model_weights + MLC_TMP_MODEL_ADDITIONAL_NAME: '' pytorch,fp32,weight_sharded,rclone: env: - CM_PACKAGE_URL: mlc-inference:mlcommons-inference-wg-public/model_weights - CM_RCLONE_CONFIG_NAME: mlc-inference + MLC_PACKAGE_URL: mlc-inference:mlcommons-inference-wg-public/model_weights + MLC_RCLONE_CONFIG_NAME: mlc-inference pytorch,fp32,weight_sharded,wget: env: - CM_DAE_EXTRACT_DOWNLOADED: 'yes' - CM_DOWNLOAD_FILENAME: download - CM_EXTRACT_UNZIP: 'yes' - CM_PACKAGE_URL: https://cloud.mlcommons.org/index.php/s/XzfSeLgW8FYfR3S/download + MLC_DAE_EXTRACT_DOWNLOADED: 'yes' + MLC_DOWNLOAD_FILENAME: download + MLC_EXTRACT_UNZIP: 'yes' + MLC_PACKAGE_URL: https://cloud.mlcommons.org/index.php/s/XzfSeLgW8FYfR3S/download rclone: ad: dae: @@ -118,7 +118,7 @@ variations: weight_sharded: default: true env: - CM_DLRM_MULTIHOT_MODEL: 'yes' + MLC_DLRM_MULTIHOT_MODEL: 'yes' group: type wget: ad: diff --git a/script/get-ml-model-dlrm-terabyte/run.sh b/script/get-ml-model-dlrm-terabyte/run.sh index d2595b32f..2da188061 100644 --- a/script/get-ml-model-dlrm-terabyte/run.sh +++ b/script/get-ml-model-dlrm-terabyte/run.sh @@ -1,4 +1,4 @@ #/bin/bash -if [[ ${CM_TMP_MODEL_ADDITIONAL_NAME} ]]; then - ln -s ${CM_ML_MODEL_FILE} ${CM_TMP_MODEL_ADDITIONAL_NAME} +if [[ ${MLC_TMP_MODEL_ADDITIONAL_NAME} ]]; then + ln -s ${MLC_ML_MODEL_FILE} ${MLC_TMP_MODEL_ADDITIONAL_NAME} fi diff --git a/script/get-ml-model-efficientnet-lite/customize.py b/script/get-ml-model-efficientnet-lite/customize.py index 59f3c580e..6b8a8fd6b 100644 --- a/script/get-ml-model-efficientnet-lite/customize.py +++ b/script/get-ml-model-efficientnet-lite/customize.py @@ -14,8 +14,8 @@ def preprocess(i): path = os.getcwd() - url = env['CM_PACKAGE_URL'] - env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url + url = env['MLC_PACKAGE_URL'] + env['MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url print('Downloading from {}'.format(url)) @@ -27,30 +27,30 @@ def preprocess(i): filename = r['filename'] - if env.get('CM_UNZIP') == "yes" or env.get('CM_UNTAR') == "yes": - if env.get('CM_UNZIP') == "yes": + if env.get('MLC_UNZIP') == "yes" or env.get('MLC_UNTAR') == "yes": + if env.get('MLC_UNZIP') == "yes": cmd = "unzip " - elif env.get('CM_UNTAR') == "yes": + elif env.get('MLC_UNTAR') == "yes": cmd = "tar -xvzf " os.system(cmd + filename) - filename = env['CM_ML_MODEL_FILE'] + filename = env['MLC_ML_MODEL_FILE'] - extract_folder = env.get('CM_EXTRACT_FOLDER', '') + extract_folder = env.get('MLC_EXTRACT_FOLDER', '') if extract_folder: - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join( path, extract_folder, filename) else: - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) + env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) else: - env['CM_ML_MODEL_FILE'] = filename - env['CM_ML_MODEL_FILE_WITH_PATH'] = r['path'] + env['MLC_ML_MODEL_FILE'] = filename + env['MLC_ML_MODEL_FILE_WITH_PATH'] = r['path'] - env['CM_ML_MODEL_PATH'] = path + env['MLC_ML_MODEL_PATH'] = path - if not os.path.exists(env['CM_ML_MODEL_FILE_WITH_PATH']): + if not os.path.exists(env['MLC_ML_MODEL_FILE_WITH_PATH']): return { - 'return': 1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"} + 'return': 1, 'error': f"Model file path {env['MLC_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['MLC_ML_MODEL_FILE']} in model meta is wrong"} return {'return': 0} diff --git a/script/get-ml-model-efficientnet-lite/meta.yaml b/script/get-ml-model-efficientnet-lite/meta.yaml index e40dd196c..df1bbc519 100644 --- a/script/get-ml-model-efficientnet-lite/meta.yaml +++ b/script/get-ml-model-efficientnet-lite/meta.yaml @@ -4,34 +4,34 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models default_env: - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 env: - CM_EXTRACT_FOLDER: efficientnet-<<>> - CM_ML_MODEL: efficientnet-lite - CM_ML_MODEL_DATASET: imagenet2012-val - CM_ML_MODEL_DATA_LAYOUT: NHWC - CM_ML_MODEL_FILE: efficientnet-<<>>-<<>>.tflite - CM_ML_MODEL_FULL_NAME: efficientnet-<<>>-<<>> - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: '' - CM_ML_MODEL_INPUT_LAYER_NAME: images - CM_ML_MODEL_INPUT_SHAPES: '\"input\": (BATCH_SIZE, 224, 224, 3)' - CM_ML_MODEL_MOBILENET_NAME_SUFFIX: '' - CM_ML_MODEL_NORMALIZE_DATA: 'yes' - CM_ML_MODEL_OUTPUT_LAYER_NAME: Softmax - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_SUBTRACT_MEANS: '0' - CM_ML_MODEL_WEIGHTS_ARE_CHECKPOINTS: 'yes' - CM_ML_MODEL_WEIGHTS_FILE: model.ckpt.data-00000-of-00001 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' - CM_PACKAGE_URL: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/efficientnet-<<>>.tar.gz - CM_UNTAR: 'yes' + MLC_EXTRACT_FOLDER: efficientnet-<<>> + MLC_ML_MODEL: efficientnet-lite + MLC_ML_MODEL_DATASET: imagenet2012-val + MLC_ML_MODEL_DATA_LAYOUT: NHWC + MLC_ML_MODEL_FILE: efficientnet-<<>>-<<>>.tflite + MLC_ML_MODEL_FULL_NAME: efficientnet-<<>>-<<>> + MLC_ML_MODEL_GIVEN_CHANNEL_MEANS: '' + MLC_ML_MODEL_INPUT_LAYER_NAME: images + MLC_ML_MODEL_INPUT_SHAPES: '\"input\": (BATCH_SIZE, 224, 224, 3)' + MLC_ML_MODEL_MOBILENET_NAME_SUFFIX: '' + MLC_ML_MODEL_NORMALIZE_DATA: 'yes' + MLC_ML_MODEL_OUTPUT_LAYER_NAME: Softmax + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_SUBTRACT_MEANS: '0' + MLC_ML_MODEL_WEIGHTS_ARE_CHECKPOINTS: 'yes' + MLC_ML_MODEL_WEIGHTS_FILE: model.ckpt.data-00000-of-00001 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_PACKAGE_URL: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/efficientnet-<<>>.tar.gz + MLC_UNTAR: 'yes' new_env_keys: -- CM_ML_MODEL_* -- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS +- MLC_ML_MODEL_* +- MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - ml-model @@ -58,10 +58,10 @@ variations: fp32: default: true env: - CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: fp32 - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_EFFICIENTNET_LITE_PRECISION: fp32 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 group: precision int8: alias: uint8 @@ -70,73 +70,73 @@ variations: - resolution-224 default: true env: - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite0 + MLC_ML_MODEL_EFFICIENTNET_LITE_KIND: lite0 group: kind lite1: base: - resolution-240 env: - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite1 + MLC_ML_MODEL_EFFICIENTNET_LITE_KIND: lite1 group: kind lite2: base: - resolution-260 env: - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite2 + MLC_ML_MODEL_EFFICIENTNET_LITE_KIND: lite2 group: kind lite3: base: - resolution-280 env: - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite3 + MLC_ML_MODEL_EFFICIENTNET_LITE_KIND: lite3 group: kind lite4: base: - resolution-300 env: - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite4 + MLC_ML_MODEL_EFFICIENTNET_LITE_KIND: lite4 group: kind resolution-224: default: true env: - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.224 - CM_ML_MODEL_IMAGE_HEIGHT: '224' - CM_ML_MODEL_IMAGE_WIDTH: '224' - CM_ML_MODEL_MOBILENET_RESOLUTION: '224' + MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.224 + MLC_ML_MODEL_IMAGE_HEIGHT: '224' + MLC_ML_MODEL_IMAGE_WIDTH: '224' + MLC_ML_MODEL_MOBILENET_RESOLUTION: '224' group: resolution resolution-240: env: - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.240 - CM_ML_MODEL_IMAGE_HEIGHT: '240' - CM_ML_MODEL_IMAGE_WIDTH: '240' - CM_ML_MODEL_MOBILENET_RESOLUTION: '240' + MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.240 + MLC_ML_MODEL_IMAGE_HEIGHT: '240' + MLC_ML_MODEL_IMAGE_WIDTH: '240' + MLC_ML_MODEL_MOBILENET_RESOLUTION: '240' group: resolution resolution-260: env: - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.260 - CM_ML_MODEL_IMAGE_HEIGHT: '260' - CM_ML_MODEL_IMAGE_WIDTH: '260' - CM_ML_MODEL_MOBILENET_RESOLUTION: '260' + MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.260 + MLC_ML_MODEL_IMAGE_HEIGHT: '260' + MLC_ML_MODEL_IMAGE_WIDTH: '260' + MLC_ML_MODEL_MOBILENET_RESOLUTION: '260' group: resolution resolution-280: env: - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.280 - CM_ML_MODEL_IMAGE_HEIGHT: '280' - CM_ML_MODEL_IMAGE_WIDTH: '280' - CM_ML_MODEL_MOBILENET_RESOLUTION: '280' + MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.280 + MLC_ML_MODEL_IMAGE_HEIGHT: '280' + MLC_ML_MODEL_IMAGE_WIDTH: '280' + MLC_ML_MODEL_MOBILENET_RESOLUTION: '280' group: resolution resolution-300: env: - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.300 - CM_ML_MODEL_IMAGE_HEIGHT: '300' - CM_ML_MODEL_IMAGE_WIDTH: '300' - CM_ML_MODEL_MOBILENET_RESOLUTION: '300' + MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.300 + MLC_ML_MODEL_IMAGE_HEIGHT: '300' + MLC_ML_MODEL_IMAGE_WIDTH: '300' + MLC_ML_MODEL_MOBILENET_RESOLUTION: '300' group: resolution tflite: {} uint8: env: - CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: int8 - CM_ML_MODEL_INPUTS_DATA_TYPE: uint8 - CM_ML_MODEL_PRECISION: uint8 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: uint8 + MLC_ML_MODEL_EFFICIENTNET_LITE_PRECISION: int8 + MLC_ML_MODEL_INPUTS_DATA_TYPE: uint8 + MLC_ML_MODEL_PRECISION: uint8 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: uint8 group: precision diff --git a/script/get-ml-model-gptj/customize.py b/script/get-ml-model-gptj/customize.py index d4fc749e5..b9c640faf 100644 --- a/script/get-ml-model-gptj/customize.py +++ b/script/get-ml-model-gptj/customize.py @@ -7,43 +7,43 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - if env.get('CM_GPTJ_INTEL_MODEL', '') == 'yes': + if env.get('MLC_GPTJ_INTEL_MODEL', '') == 'yes': i['run_script_input']['script_name'] = 'run-intel' harness_root = os.path.join( - env['CM_MLPERF_INFERENCE_RESULTS_PATH'], + env['MLC_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'code', 'gptj-99', 'pytorch-cpu') print(f"Harness Root: {harness_root}") - env['CM_HARNESS_CODE_ROOT'] = harness_root - env['CM_CALIBRATION_CODE_ROOT'] = os.path.join( - env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration') + env['MLC_HARNESS_CODE_ROOT'] = harness_root + env['MLC_CALIBRATION_CODE_ROOT'] = os.path.join( + env['MLC_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration') env['CHECKPOINT_DIR'] = env['GPTJ_CHECKPOINT_PATH'] env['QUANTIZED_MODEL_DIR'] = os.getcwd() - if env['CM_ML_MODEL_WEIGHT_DATA_TYPES'] == "int8": + if env['MLC_ML_MODEL_WEIGHT_DATA_TYPES'] == "int8": env['INT8_MODEL_DIR'] = os.getcwd() else: env['INT4_MODEL_DIR'] = os.getcwd() - elif env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': + elif env.get('MLC_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': i['run_script_input']['script_name'] = 'run-nvidia' - if str(env.get('CM_DOCKER_DETACHED_MODE', '') + if str(env.get('MLC_DOCKER_DETACHED_MODE', '') ).lower() in ['yes', 'true', "1"]: env['DOCKER_RUN_OPTS'] = "--rm --ipc=host --ulimit memlock=-1 --ulimit stack=67108864" gpu_arch = int( float( - env['CM_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) * + env['MLC_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) * 10) - env['CM_GPU_ARCH'] = gpu_arch - env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no' + env['MLC_GPU_ARCH'] = gpu_arch + env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'no' else: - is_saxml = env.get('CM_TMP_MODEL_SAXML', '') + is_saxml = env.get('MLC_TMP_MODEL_SAXML', '') if is_saxml == "fp32": i['run_script_input']['script_name'] = 'run-saxml' elif is_saxml == "int8": @@ -52,7 +52,7 @@ def preprocess(i): path = env.get('GPTJ_CHECKPOINT_PATH', '').strip() if path == '' or not os.path.exists(path): - env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} @@ -66,12 +66,12 @@ def postprocess(i): env['GPTJ_CHECKPOINT_PATH'] = os.path.join( env['GPTJ_CHECKPOINT_PATH'], "checkpoint-final") - is_saxml = env.get('CM_TMP_MODEL_SAXML', '') + is_saxml = env.get('MLC_TMP_MODEL_SAXML', '') if is_saxml == "fp32": if os.path.exists("pax_gptj_checkpoint"): env['GPTJ_SAXML_CHECKPOINT_PATH'] = os.path.join( os.getcwd(), "pax_gptj_checkpoint") - env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_CHECKPOINT_PATH'] + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_CHECKPOINT_PATH'] else: return {'return': 1, 'error': 'pax_gptj_checkpoint generation failed'} @@ -79,21 +79,21 @@ def postprocess(i): if os.path.exists("int8_ckpt"): env['GPTJ_SAXML_INT8_CHECKPOINT_PATH'] = os.path.join( os.getcwd(), "int8_ckpt") - env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_INT8_CHECKPOINT_PATH'] + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_INT8_CHECKPOINT_PATH'] else: return {'return': 1, 'error': 'pax_gptj_checkpoint generation failed'} - elif env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( - env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], + elif env.get('MLC_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': + env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'], 'models', 'GPTJ-6B', 'fp8-quantized-ammo', 'GPTJ-FP8-quantized') else: - env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_CHECKPOINT_PATH'] + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_CHECKPOINT_PATH'] - env['CM_ML_MODEL_FILE'] = os.path.basename( - env['CM_ML_MODEL_FILE_WITH_PATH']) - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['MLC_ML_MODEL_FILE'] = os.path.basename( + env['MLC_ML_MODEL_FILE_WITH_PATH']) + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] return {'return': 0} diff --git a/script/get-ml-model-gptj/meta.yaml b/script/get-ml-model-gptj/meta.yaml index 25b2ef981..9ebaf1524 100644 --- a/script/get-ml-model-gptj/meta.yaml +++ b/script/get-ml-model-gptj/meta.yaml @@ -6,36 +6,36 @@ category: AI/ML models docker: run: false env: - CM_ML_MODEL: GPTJ - CM_ML_MODEL_DATASET: cnndm - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_ML_MODEL: GPTJ + MLC_ML_MODEL_DATASET: cnndm + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' input_mapping: checkpoint: GPTJ_CHECKPOINT_PATH - download_path: CM_DOWNLOAD_PATH - to: CM_DOWNLOAD_PATH + download_path: MLC_DOWNLOAD_PATH + to: MLC_DOWNLOAD_PATH new_env_keys: -- CM_ML_MODEL_* +- MLC_ML_MODEL_* - GPTJ_CHECKPOINT_PATH prehook_deps: - enable_if_env: - CM_TMP_REQUIRE_DOWNLOAD: + MLC_TMP_REQUIRE_DOWNLOAD: - 'yes' env: - CM_DOWNLOAD_FINAL_ENV_NAME: GPTJ_CHECKPOINT_PATH - CM_EXTRACT_FINAL_ENV_NAME: GPTJ_CHECKPOINT_PATH - CM_EXTRACT_TO_FOLDER: gpt-j + MLC_DOWNLOAD_FINAL_ENV_NAME: GPTJ_CHECKPOINT_PATH + MLC_EXTRACT_FINAL_ENV_NAME: GPTJ_CHECKPOINT_PATH + MLC_EXTRACT_TO_FOLDER: gpt-j extra_cache_tags: gptj,model force_cache: true names: - dae tags: download-and-extract force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_DOWNLOAD_URL + - MLC_DOWNLOAD_URL print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - raw @@ -47,28 +47,28 @@ uid: a41166210f294fbf variations: batch_size.#: env: - CM_ML_MODEL_BATCH_SIZE: '#' + MLC_ML_MODEL_BATCH_SIZE: '#' fp32: env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32 group: precision fp8: env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp8 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp8 group: precision int4: env: - CM_ML_MODEL_INPUT_DATA_TYPES: int4 - CM_ML_MODEL_WEIGHT_DATA_TYPES: int4 + MLC_ML_MODEL_INPUT_DATA_TYPES: int4 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: int4 group: precision int8: env: - CM_ML_MODEL_INPUT_DATA_TYPES: int8 - CM_ML_MODEL_PRECISION: int8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: int8 + MLC_ML_MODEL_INPUT_DATA_TYPES: int8 + MLC_ML_MODEL_PRECISION: int8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: int8 group: precision intel: default_variations: @@ -83,23 +83,23 @@ variations: default_variations: framework: pytorch env: - CM_TMP_ML_MODEL_PROVIDER: nvidia + MLC_TMP_ML_MODEL_PROVIDER: nvidia group: model-provider pytorch: default: true env: - CM_ML_MODEL_DATA_LAYOUT: NCHW - CM_ML_MODEL_FRAMEWORK: pytorch - CM_ML_STARTING_WEIGHTS_FILENAME: <<>> + MLC_ML_MODEL_DATA_LAYOUT: NCHW + MLC_ML_MODEL_FRAMEWORK: pytorch + MLC_ML_STARTING_WEIGHTS_FILENAME: <<>> group: framework pytorch,fp32: env: - CM_DOWNLOAD_CHECKSUM_NOT_USED: e677e28aaf03da84584bb3073b7ee315 - CM_DOWNLOAD_EXTRA_OPTIONS: ' --output-document checkpoint.zip' - CM_PACKAGE_URL: https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download - CM_RCLONE_CONFIG_NAME: mlc-inference - CM_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/gpt-j - CM_UNZIP: 'yes' + MLC_DOWNLOAD_CHECKSUM_NOT_USED: e677e28aaf03da84584bb3073b7ee315 + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --output-document checkpoint.zip' + MLC_PACKAGE_URL: https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download + MLC_RCLONE_CONFIG_NAME: mlc-inference + MLC_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/gpt-j + MLC_UNZIP: 'yes' required_disk_space: 22700 pytorch,fp32,wget: add_deps_recursive: @@ -117,7 +117,7 @@ variations: - tags: get,mlperf,inference,results version: v3.1 - env: - CM_GPTJ_INTEL_MODEL: '' + MLC_GPTJ_INTEL_MODEL: '' force_new_env_keys: - GPTJ_CHECKPOINT_PATH tags: get,ml-model,gpt-j,_fp32,_pytorch @@ -135,13 +135,13 @@ variations: - tags: install,ipex,from.src,_for-intel-mlperf-inference-v3.1-gptj - tags: get,dataset,cnndm,_calibration env: - CM_GPTJ_INTEL_MODEL: 'yes' + MLC_GPTJ_INTEL_MODEL: 'yes' pytorch,nvidia: default_variations: precision: fp8 deps: - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_TENSORRT_LLM_CHECKOUT_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_TENSORRT_LLM_CHECKOUT_PATH extra_cache_tags: tensorrt-llm tags: get,git,repo,_lfs,_repo.https://github.com/NVIDIA/TensorRT-LLM.git,_sha.0ab9d17a59c284d2de36889832fe9fc7c8697604 - names: @@ -171,8 +171,8 @@ variations: tags: _rclone default: true env: - CM_DOWNLOAD_FILENAME: checkpoint - CM_DOWNLOAD_URL: <<>> + MLC_DOWNLOAD_FILENAME: checkpoint + MLC_DOWNLOAD_URL: <<>> group: download-tool saxml: group: framework @@ -189,7 +189,7 @@ variations: - tags: get,generic-python-lib,_package.transformers - tags: get,generic-python-lib,_package.accelerate env: - CM_TMP_MODEL_SAXML: fp32 + MLC_TMP_MODEL_SAXML: fp32 new_env_keys: - GPTJ_SAXML_CHECKPOINT_PATH saxml,int8: @@ -203,24 +203,24 @@ variations: - tags: get,generic-python-lib,_package.praxis - tags: get,generic-python-lib,_package.apache-beam - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_SAXML_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_SAXML_REPO_PATH extra_cache_tags: saxml names: - saxml tags: get,git,repo,_repo.https://github.com/google/saxml env: - CM_TMP_MODEL_SAXML: int8 + MLC_TMP_MODEL_SAXML: int8 uint8: env: - CM_ML_MODEL_INPUT_DATA_TYPES: uint8 - CM_ML_MODEL_PRECISION: uint8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8 + MLC_ML_MODEL_INPUT_DATA_TYPES: uint8 + MLC_ML_MODEL_PRECISION: uint8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: uint8 group: precision wget: add_deps_recursive: dae: tags: _wget env: - CM_DOWNLOAD_FILENAME: checkpoint.zip - CM_DOWNLOAD_URL: <<>> + MLC_DOWNLOAD_FILENAME: checkpoint.zip + MLC_DOWNLOAD_URL: <<>> group: download-tool diff --git a/script/get-ml-model-gptj/run-int4-calibration.sh b/script/get-ml-model-gptj/run-int4-calibration.sh index 45c3669e5..112716bc4 100644 --- a/script/get-ml-model-gptj/run-int4-calibration.sh +++ b/script/get-ml-model-gptj/run-int4-calibration.sh @@ -1,9 +1,9 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH -echo ${CM_CALIBRATION_CODE_ROOT} -cd ${CM_CALIBRATION_CODE_ROOT}/gpt-j/pytorch-cpu/INT4 +echo ${MLC_CALIBRATION_CODE_ROOT} +cd ${MLC_CALIBRATION_CODE_ROOT}/gpt-j/pytorch-cpu/INT4 pip install -r requirements.txt bash run_calibration_int4.sh diff --git a/script/get-ml-model-gptj/run-intel.sh b/script/get-ml-model-gptj/run-intel.sh index f6cb2134d..a83d69bf1 100644 --- a/script/get-ml-model-gptj/run-intel.sh +++ b/script/get-ml-model-gptj/run-intel.sh @@ -1,17 +1,17 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH -export CALIBRATION_DATA_JSON=${CM_CALIBRATION_DATASET_CNNDM_PATH} +export CALIBRATION_DATA_JSON=${MLC_CALIBRATION_DATASET_CNNDM_PATH} -if [[ ${CM_ML_MODEL_WEIGHT_DATA_TYPES} == "int4" ]]; then +if [[ ${MLC_ML_MODEL_WEIGHT_DATA_TYPES} == "int4" ]]; then export INT4_CALIBRATION_DIR=${PWD}/quantized-int4-model - bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-int4-calibration.sh - cd ${CM_HARNESS_CODE_ROOT} + bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/run-int4-calibration.sh + cd ${MLC_HARNESS_CODE_ROOT} bash run_quantization_int4.sh else - cd ${CM_HARNESS_CODE_ROOT} + cd ${MLC_HARNESS_CODE_ROOT} bash run_quantization.sh fi diff --git a/script/get-ml-model-gptj/run-nvidia.sh b/script/get-ml-model-gptj/run-nvidia.sh index b16ee45da..a81d52f7c 100644 --- a/script/get-ml-model-gptj/run-nvidia.sh +++ b/script/get-ml-model-gptj/run-nvidia.sh @@ -1,21 +1,21 @@ #!/bin/bash -if [[ ! -e ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/checkpoint-final ]]; then - mkdir -p ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/ - cp -r ${GPTJ_CHECKPOINT_PATH} ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/checkpoint-final +if [[ ! -e ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/checkpoint-final ]]; then + mkdir -p ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/ + cp -r ${GPTJ_CHECKPOINT_PATH} ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/checkpoint-final test $? -eq 0 || exit $? fi -echo "cd ${CM_TENSORRT_LLM_CHECKOUT_PATH}" -cd ${CM_TENSORRT_LLM_CHECKOUT_PATH} +echo "cd ${MLC_TENSORRT_LLM_CHECKOUT_PATH}" +cd ${MLC_TENSORRT_LLM_CHECKOUT_PATH} make -C docker build test $? -eq 0 || exit $? -export RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${CM_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/GPTJ-6B/fp8-quantized-ammo/GPTJ-FP8-quantized --model_dir=/mnt/models/GPTJ-6B/checkpoint-final --qformat=fp8 --kv_cache_dtype=fp8 '" -export DOCKER_RUN_ARGS=" -v ${CM_NVIDIA_MLPERF_SCRATCH_PATH}:/mnt" +export RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${MLC_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/GPTJ-6B/fp8-quantized-ammo/GPTJ-FP8-quantized --model_dir=/mnt/models/GPTJ-6B/checkpoint-final --qformat=fp8 --kv_cache_dtype=fp8 '" +export DOCKER_RUN_ARGS=" -v ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}:/mnt" make -C docker run LOCAL_USER=1 test $? -eq 0 || exit $? -PYTHONPATH='' ${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH}/code/gptj/tensorrt/onnx_tune.py --fp8-scalers-path=${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/fp8-quantized-ammo/GPTJ-FP8-quantized/rank0.safetensors --scaler 1.005 --index 15 +PYTHONPATH='' ${MLC_PYTHON_BIN_WITH_PATH} ${MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH}/code/gptj/tensorrt/onnx_tune.py --fp8-scalers-path=${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/fp8-quantized-ammo/GPTJ-FP8-quantized/rank0.safetensors --scaler 1.005 --index 15 test $? -eq 0 || exit $? diff --git a/script/get-ml-model-gptj/run-saxml-quantized.sh b/script/get-ml-model-gptj/run-saxml-quantized.sh index e74862be0..019b455c5 100644 --- a/script/get-ml-model-gptj/run-saxml-quantized.sh +++ b/script/get-ml-model-gptj/run-saxml-quantized.sh @@ -1,6 +1,6 @@ #!/bin/bash CUR=$PWD -${CM_PYTHON_BIN_WITH_PATH} -m pip install jaxlib==0.4.24 -cd ${CM_TMP_CURRENT_SCRIPT_PATH} -${CM_PYTHON_BIN_WITH_PATH} ${CM_SAXML_REPO_PATH}/saxml/tools/offline_quantize.py --input_dir ${CM_ML_MODEL_FILE_WITH_PATH}/checkpoint_00000000/state --output_dir ${CUR}/int8_ckpt/checkpoint_00000000/state --quantization_configs "gptj" > offline_quantize2.log +${MLC_PYTHON_BIN_WITH_PATH} -m pip install jaxlib==0.4.24 +cd ${MLC_TMP_CURRENT_SCRIPT_PATH} +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_SAXML_REPO_PATH}/saxml/tools/offline_quantize.py --input_dir ${MLC_ML_MODEL_FILE_WITH_PATH}/checkpoint_00000000/state --output_dir ${CUR}/int8_ckpt/checkpoint_00000000/state --quantization_configs "gptj" > offline_quantize2.log test $? -eq 0 || exit $? diff --git a/script/get-ml-model-gptj/run-saxml.sh b/script/get-ml-model-gptj/run-saxml.sh index 031d736c0..78ad4a92f 100644 --- a/script/get-ml-model-gptj/run-saxml.sh +++ b/script/get-ml-model-gptj/run-saxml.sh @@ -1,8 +1,8 @@ #!/bin/bash CUR=$PWD rm -rf pax_gptj_checkpoint -cd ${CM_TMP_CURRENT_SCRIPT_PATH} -${CM_PYTHON_BIN_WITH_PATH} -m convert_gptj_ckpt --base ${GPTJ_CHECKPOINT_PATH} --pax ${CUR}/pax_gptj_checkpoint +cd ${MLC_TMP_CURRENT_SCRIPT_PATH} +${MLC_PYTHON_BIN_WITH_PATH} -m convert_gptj_ckpt --base ${GPTJ_CHECKPOINT_PATH} --pax ${CUR}/pax_gptj_checkpoint test $? -eq 0 || exit $? cd "$CUR" diff --git a/script/get-ml-model-huggingface-zoo/customize.py b/script/get-ml-model-huggingface-zoo/customize.py index e117df4e3..4f57cfb8a 100644 --- a/script/get-ml-model-huggingface-zoo/customize.py +++ b/script/get-ml-model-huggingface-zoo/customize.py @@ -14,17 +14,17 @@ def preprocess(i): script_path = i['run_script_input']['path'] - path = env.get('CM_DOWNLOAD_PATH', '') + path = env.get('MLC_DOWNLOAD_PATH', '') if path == '': path = os.getcwd() - if env.get('CM_GIT_CLONE_REPO', '') != 'yes': - run_cmd = env.get('CM_PYTHON_BIN_WITH_PATH') + " " + \ + if env.get('MLC_GIT_CLONE_REPO', '') != 'yes': + run_cmd = env.get('MLC_PYTHON_BIN_WITH_PATH') + " " + \ os.path.join(script_path, 'download_model.py') else: run_cmd = '' - env['CM_RUN_CMD'] = run_cmd + env['MLC_RUN_CMD'] = run_cmd return {'return': 0} @@ -33,21 +33,21 @@ def postprocess(i): env = i['env'] - env_key = env.get('CM_MODEL_ZOO_ENV_KEY', '') + env_key = env.get('MLC_MODEL_ZOO_ENV_KEY', '') - path_file = env.get('CM_ML_MODEL_FILE_WITH_PATH', '') + path_file = env.get('MLC_ML_MODEL_FILE_WITH_PATH', '') if path_file != '': path_dir = os.path.dirname(path_file) - env['CM_ML_MODEL_PATH'] = path_dir + env['MLC_ML_MODEL_PATH'] = path_dir if env_key != '': - env['CM_ML_MODEL_' + env_key + '_PATH'] = path_dir + env['MLC_ML_MODEL_' + env_key + '_PATH'] = path_dir else: - path_dir = env['CM_ML_MODEL_PATH'] + path_dir = env['MLC_ML_MODEL_PATH'] if env_key != '': - env['CM_ML_MODEL_' + env_key + '_FILE_WITH_PATH'] = path_dir + env['MLC_ML_MODEL_' + env_key + '_FILE_WITH_PATH'] = path_dir return {'return': 0} diff --git a/script/get-ml-model-huggingface-zoo/download_model.py b/script/get-ml-model-huggingface-zoo/download_model.py index 2f3584278..f993f7ab3 100644 --- a/script/get-ml-model-huggingface-zoo/download_model.py +++ b/script/get-ml-model-huggingface-zoo/download_model.py @@ -1,10 +1,10 @@ from huggingface_hub import hf_hub_download import os -model_stub = os.environ.get('CM_MODEL_ZOO_STUB', '') -model_task = os.environ.get('CM_MODEL_TASK', '') +model_stub = os.environ.get('MLC_MODEL_ZOO_STUB', '') +model_task = os.environ.get('MLC_MODEL_TASK', '') -revision = os.environ.get('CM_HF_REVISION', '') +revision = os.environ.get('MLC_HF_REVISION', '') if model_task == "prune": print("Downloading model: " + model_stub) @@ -16,13 +16,13 @@ cache_dir=os.getcwd()) with open('tmp-run-env.out', 'w') as f: - f.write(f"CM_ML_MODEL_FILE_WITH_PATH={os.path.join(os.getcwd(),'')}") + f.write(f"MLC_ML_MODEL_FILE_WITH_PATH={os.path.join(os.getcwd(),'')}") else: - subfolder = os.environ.get('CM_HF_SUBFOLDER', '') - full_subfolder = os.environ.get('CM_HF_FULL_SUBFOLDER', '') + subfolder = os.environ.get('MLC_HF_SUBFOLDER', '') + full_subfolder = os.environ.get('MLC_HF_FULL_SUBFOLDER', '') - model_filename = os.environ.get('CM_MODEL_ZOO_FILENAME', '') + model_filename = os.environ.get('MLC_MODEL_ZOO_FILENAME', '') if model_filename == '': model_filename = 'model.onnx' @@ -102,4 +102,4 @@ def list_hf_files(path): print('') with open('tmp-run-env.out', 'w') as f: - f.write(f"CM_ML_MODEL_FILE_WITH_PATH={base_model_filepath}") + f.write(f"MLC_ML_MODEL_FILE_WITH_PATH={base_model_filepath}") diff --git a/script/get-ml-model-huggingface-zoo/meta.yaml b/script/get-ml-model-huggingface-zoo/meta.yaml index b8235a57d..aef4f541f 100644 --- a/script/get-ml-model-huggingface-zoo/meta.yaml +++ b/script/get-ml-model-huggingface-zoo/meta.yaml @@ -11,17 +11,17 @@ deps: - tags: get,generic-python-lib,_huggingface_hub env: {} input_mapping: - download_path: CM_DOWNLOAD_PATH - env_key: CM_MODEL_ZOO_ENV_KEY - full_subfolder: CM_HF_FULL_SUBFOLDER - model_filename: CM_MODEL_ZOO_FILENAME - revision: CM_HF_REVISION - subfolder: CM_HF_SUBFOLDER + download_path: MLC_DOWNLOAD_PATH + env_key: MLC_MODEL_ZOO_ENV_KEY + full_subfolder: MLC_HF_FULL_SUBFOLDER + model_filename: MLC_MODEL_ZOO_FILENAME + revision: MLC_HF_REVISION + subfolder: MLC_HF_SUBFOLDER new_env_keys: -- CM_ML_MODEL* -- CM_MODEL_ZOO_STUB +- MLC_ML_MODEL* +- MLC_MODEL_ZOO_STUB print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - ml-model @@ -37,28 +37,28 @@ variations: deps: - tags: get,hf-cli,_with-login enable_if_env: - CM_HF_TOKEN: + MLC_HF_TOKEN: - on - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_ML_MODEL_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_ML_MODEL_PATH tags: get,git,repo,_lfs force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _repo.https://huggingface.co/: - - CM_MODEL_ZOO_STUB + - MLC_MODEL_ZOO_STUB env: - CM_GIT_CLONE_REPO: 'yes' + MLC_GIT_CLONE_REPO: 'yes' group: download-type model-stub.#: env: - CM_MODEL_ZOO_STUB: '#' + MLC_MODEL_ZOO_STUB: '#' onnx-subfolder: env: - CM_HF_SUBFOLDER: onnx + MLC_HF_SUBFOLDER: onnx pierreguillou_bert_base_cased_squad_v1.1_portuguese: env: - CM_MODEL_ZOO_STUB: pierreguillou/bert-base-cased-squad-v1.1-portuguese + MLC_MODEL_ZOO_STUB: pierreguillou/bert-base-cased-squad-v1.1-portuguese prune: env: - CM_MODEL_TASK: prune + MLC_MODEL_TASK: prune diff --git a/script/get-ml-model-huggingface-zoo/run.bat b/script/get-ml-model-huggingface-zoo/run.bat index 6a4faa929..edc748162 100644 --- a/script/get-ml-model-huggingface-zoo/run.bat +++ b/script/get-ml-model-huggingface-zoo/run.bat @@ -1,3 +1,3 @@ -echo %CM_RUN_CMD% -call %CM_RUN_CMD% +echo %MLC_RUN_CMD% +call %MLC_RUN_CMD% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-ml-model-huggingface-zoo/run.sh b/script/get-ml-model-huggingface-zoo/run.sh index 111f4f2c8..ebffc6e22 100644 --- a/script/get-ml-model-huggingface-zoo/run.sh +++ b/script/get-ml-model-huggingface-zoo/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -echo ${CM_RUN_CMD} -eval ${CM_RUN_CMD} +echo ${MLC_RUN_CMD} +eval ${MLC_RUN_CMD} test $? -eq 0 || exit $? diff --git a/script/get-ml-model-llama2/customize.py b/script/get-ml-model-llama2/customize.py index 1c091c12b..7e8e0b4f1 100644 --- a/script/get-ml-model-llama2/customize.py +++ b/script/get-ml-model-llama2/customize.py @@ -7,28 +7,28 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - if env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': + if env.get('MLC_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': i['run_script_input']['script_name'] = 'run-nvidia' gpu_arch = int( float( - env['CM_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) * + env['MLC_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) * 10) - env['CM_GPU_ARCH'] = gpu_arch - env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no' + env['MLC_GPU_ARCH'] = gpu_arch + env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'no' else: path = env.get('LLAMA2_CHECKPOINT_PATH', '').strip() - if env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'amd': - env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no' + if env.get('MLC_TMP_ML_MODEL_PROVIDER', '') == 'amd': + env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'no' i['run_script_input']['script_name'] = 'run-amd' env['AMD_CODE_DIR'] = os.path.join( - env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'AMD', 'code') - env['CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT'] = os.getcwd() - env['CM_LLAMA2_FINAL_SAFE_TENSORS_PATH'] = os.path.join( - env['CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT'], "llama.safetensors") + env['MLC_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'AMD', 'code') + env['MLC_LLAMA2_FINAL_SAFE_TENSORS_ROOT'] = os.getcwd() + env['MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH'] = os.path.join( + env['MLC_LLAMA2_FINAL_SAFE_TENSORS_ROOT'], "llama.safetensors") else: if path == '' or not os.path.exists(path): - env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} @@ -37,10 +37,10 @@ def postprocess(i): env = i['env'] if env.get('LLAMA2_CHECKPOINT_PATH', '') == '': - env['LLAMA2_CHECKPOINT_PATH'] = env['CM_ML_MODEL_PATH'] + env['LLAMA2_CHECKPOINT_PATH'] = env['MLC_ML_MODEL_PATH'] else: - env['CM_ML_MODEL_PATH'] = env['LLAMA2_CHECKPOINT_PATH'] - env['CM_ML_MODEL_LLAMA2_FILE_WITH_PATH'] = env['LLAMA2_CHECKPOINT_PATH'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] + env['MLC_ML_MODEL_PATH'] = env['LLAMA2_CHECKPOINT_PATH'] + env['MLC_ML_MODEL_LLAMA2_FILE_WITH_PATH'] = env['LLAMA2_CHECKPOINT_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_PATH'] return {'return': 0} diff --git a/script/get-ml-model-llama2/meta.yaml b/script/get-ml-model-llama2/meta.yaml index fe082718e..265b66925 100644 --- a/script/get-ml-model-llama2/meta.yaml +++ b/script/get-ml-model-llama2/meta.yaml @@ -6,28 +6,28 @@ category: AI/ML models docker: real_run: false env: - CM_ML_MODEL_DATASET: openorca - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_ML_MODEL_DATASET: openorca + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' input_mapping: checkpoint: LLAMA2_CHECKPOINT_PATH new_env_keys: -- CM_ML_MODEL_* +- MLC_ML_MODEL_* - LLAMA2_CHECKPOINT_PATH -- CM_NVIDIA_TP_SIZE -- CM_LLAMA2_FINAL_SAFE_TENSORS_PATH +- MLC_NVIDIA_TP_SIZE +- MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH prehook_deps: - enable_if_env: - CM_TMP_REQUIRE_DOWNLOAD: + MLC_TMP_REQUIRE_DOWNLOAD: - 'yes' env: {} extra_cache_tags: llama2,llama-2 force_env_keys: - - CM_GIT_CHECKOUT_FOLDER + - MLC_GIT_CHECKOUT_FOLDER names: - hf-zoo tags: get,ml-model,huggingface,zoo,_clone-repo force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME print_env_at_the_end: LLAMA2_CHECKPOINT_PATH: LLAMA2 checkpoint path tags: @@ -42,45 +42,45 @@ uid: 5db97be9f61244c6 variations: L40s: env: - CM_NVIDIA_TP_SIZE: 4 + MLC_NVIDIA_TP_SIZE: 4 group: gpu amd: default_env: - CM_LLAMA2_QUANTIZATION_DEVICE: '' + MLC_LLAMA2_QUANTIZATION_DEVICE: '' default_variations: framework: pytorch precision: fp8 env: - CM_TMP_ML_MODEL_PROVIDER: amd + MLC_TMP_ML_MODEL_PROVIDER: amd group: model-provider new_env_keys: - - CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT - - CM_LLAMA2_FINAL_SAFE_TENSORS_PATH + - MLC_LLAMA2_FINAL_SAFE_TENSORS_ROOT + - MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH batch_size.#: env: - CM_ML_MODEL_BATCH_SIZE: '#' + MLC_ML_MODEL_BATCH_SIZE: '#' fp32: default: true env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32 group: precision fp8: env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp8 - CM_ML_MODEL_PRECISION: fp8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp8 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp8 + MLC_ML_MODEL_PRECISION: fp8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp8 group: precision generic: env: - CM_NVIDIA_TP_SIZE: 2 + MLC_NVIDIA_TP_SIZE: 2 group: gpu int8: env: - CM_ML_MODEL_INPUT_DATA_TYPES: int8 - CM_ML_MODEL_PRECISION: int8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: int8 + MLC_ML_MODEL_INPUT_DATA_TYPES: int8 + MLC_ML_MODEL_PRECISION: int8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: int8 group: precision meta-llama/Llama-2-70b-chat-hf: adr: @@ -88,27 +88,27 @@ variations: tags: _model-stub.meta-llama/Llama-2-70b-chat-hf default: true env: - CM_GIT_CHECKOUT_FOLDER: Llama-2-70b-chat-hf - CM_MODEL_ZOO_ENV_KEY: LLAMA2 + MLC_GIT_CHECKOUT_FOLDER: Llama-2-70b-chat-hf + MLC_MODEL_ZOO_ENV_KEY: LLAMA2 group: huggingface-stub meta-llama/Llama-2-7b-chat-hf: adr: hf-zoo: tags: _model-stub.meta-llama/Llama-2-7b-chat-hf env: - CM_GIT_CHECKOUT_FOLDER: Llama-2-7b-chat-hf - CM_MODEL_ZOO_ENV_KEY: LLAMA2 + MLC_GIT_CHECKOUT_FOLDER: Llama-2-7b-chat-hf + MLC_MODEL_ZOO_ENV_KEY: LLAMA2 group: huggingface-stub nvidia: default_variations: framework: pytorch env: - CM_TMP_ML_MODEL_PROVIDER: nvidia + MLC_TMP_ML_MODEL_PROVIDER: nvidia group: model-provider pytorch: default: true env: - CM_ML_MODEL_FRAMEWORK: pytorch + MLC_ML_MODEL_FRAMEWORK: pytorch group: framework pytorch,amd: default_variations: @@ -125,9 +125,9 @@ variations: tags: get,ml-model,llama2-70b,_fp32,_pytorch - tags: get,preprocessed,dataset,openorca,_calibration,_mlc - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_RESULTS_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_RESULTS_PATH extra_cache_tags: inference,results - tags: get,git,repo,_repo.https://github.com/mlcommons/inference_results_v4.1,_branch.cm-code-only + tags: get,git,repo,_repo.https://github.com/mlcommons/inference_results_v4.1,_branch.mlc-code-only - tags: get,generic-python-lib,_quark-amd - tags: get,generic-python-lib,_package.nltk - tags: get,generic-python-lib,_torch_cuda @@ -140,7 +140,7 @@ variations: precision: fp8 deps: - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_TENSORRT_LLM_CHECKOUT_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_TENSORRT_LLM_CHECKOUT_PATH extra_cache_tags: tensorrt-llm tags: get,git,repo,_repo.https://github.com/NVIDIA/TensorRT-LLM.git,_sha.0ab9d17a59c284d2de36889832fe9fc7c8697604 - names: @@ -164,15 +164,15 @@ variations: hf-zoo: tags: _model-stub.# env: - CM_MODEL_ZOO_ENV_KEY: LLAMA2 + MLC_MODEL_ZOO_ENV_KEY: LLAMA2 group: huggingface-stub tp-size.#: env: - CM_NVIDIA_TP_SIZE: '#' + MLC_NVIDIA_TP_SIZE: '#' group: gpu uint8: env: - CM_ML_MODEL_INPUT_DATA_TYPES: uint8 - CM_ML_MODEL_PRECISION: uint8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8 + MLC_ML_MODEL_INPUT_DATA_TYPES: uint8 + MLC_ML_MODEL_PRECISION: uint8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: uint8 group: precision diff --git a/script/get-ml-model-llama2/run-amd.sh b/script/get-ml-model-llama2/run-amd.sh index 6f3ee48e9..308a5c294 100644 --- a/script/get-ml-model-llama2/run-amd.sh +++ b/script/get-ml-model-llama2/run-amd.sh @@ -3,19 +3,19 @@ code_dir=$AMD_CODE_DIR model_dir=${LLAMA2_CHECKPOINT_PATH} output_dir=$PWD -calib_dataset=${CM_DATASET_OPENORCA_CALIBRATION_PATH} +calib_dataset=${MLC_DATASET_OPENORCA_CALIBRATION_PATH} cmd="cd $code_dir/llama2-70b-99.9/tools/quark-0.1.0+a9827f5-mlperf/examples/torch/language_modeling/" echo $cmd eval $cmd test $? -eq 0 || exit $? -if [[ "x$CM_LLAMA2_QUANTIZATION_DEVICE" == "x" ]]; then +if [[ "x$MLC_LLAMA2_QUANTIZATION_DEVICE" == "x" ]]; then device_str="" else - device_str="--device $CM_LLAMA2_QUANTIZATION_DEVICE" + device_str="--device $MLC_LLAMA2_QUANTIZATION_DEVICE" fi -cmd="${CM_PYTHON_BIN_WITH_PATH} quantize_quark.py --model_dir $model_dir \ +cmd="${MLC_PYTHON_BIN_WITH_PATH} quantize_quark.py --model_dir $model_dir \ --output_dir $output_dir \ --quant_scheme w_fp8_a_fp8_o_fp8 \ --dataset $calib_dataset \ diff --git a/script/get-ml-model-llama2/run-nvidia.sh b/script/get-ml-model-llama2/run-nvidia.sh index 2e576280b..d38e911cb 100644 --- a/script/get-ml-model-llama2/run-nvidia.sh +++ b/script/get-ml-model-llama2/run-nvidia.sh @@ -1,29 +1,29 @@ #!/bin/bash -echo "Set tp size is ${CM_NVIDIA_TP_SIZE}" +echo "Set tp size is ${MLC_NVIDIA_TP_SIZE}" -if [[ ! -e ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf ]]; then - mkdir -p ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf +if [[ ! -e ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf ]]; then + mkdir -p ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf cd ${LLAMA2_CHECKPOINT_PATH} - cp -r ${LLAMA2_CHECKPOINT_PATH}/* ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf + cp -r ${LLAMA2_CHECKPOINT_PATH}/* ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf test $? -eq 0 || exit $? fi -echo "cd ${CM_TENSORRT_LLM_CHECKOUT_PATH}" -cd ${CM_TENSORRT_LLM_CHECKOUT_PATH} +echo "cd ${MLC_TENSORRT_LLM_CHECKOUT_PATH}" +cd ${MLC_TENSORRT_LLM_CHECKOUT_PATH} make -C docker build test $? -eq 0 || exit $? -if [ "${CM_NVIDIA_TP_SIZE}" -eq 1 ]; then - RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${CM_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/Llama2/fp8-quantized-ammo/llama2-70b-chat-hf-tp${CM_NVIDIA_TP_SIZE}pp1-fp8-02072024 --model_dir=/mnt/models/Llama2/Llama-2-70b-chat-hf --qformat=fp8 --kv_cache_dtype=fp8 --tp_size ${CM_NVIDIA_TP_SIZE}'" +if [ "${MLC_NVIDIA_TP_SIZE}" -eq 1 ]; then + RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${MLC_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/Llama2/fp8-quantized-ammo/llama2-70b-chat-hf-tp${MLC_NVIDIA_TP_SIZE}pp1-fp8-02072024 --model_dir=/mnt/models/Llama2/Llama-2-70b-chat-hf --qformat=fp8 --kv_cache_dtype=fp8 --tp_size ${MLC_NVIDIA_TP_SIZE}'" else - RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${CM_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/Llama2/fp8-quantized-ammo/llama2-70b-chat-hf-tp${CM_NVIDIA_TP_SIZE}pp1-fp8 --model_dir=/mnt/models/Llama2/Llama-2-70b-chat-hf --qformat=fp8 --kv_cache_dtype=fp8 --tp_size ${CM_NVIDIA_TP_SIZE}'" + RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${MLC_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/Llama2/fp8-quantized-ammo/llama2-70b-chat-hf-tp${MLC_NVIDIA_TP_SIZE}pp1-fp8 --model_dir=/mnt/models/Llama2/Llama-2-70b-chat-hf --qformat=fp8 --kv_cache_dtype=fp8 --tp_size ${MLC_NVIDIA_TP_SIZE}'" fi -DOCKER_RUN_ARGS=" -v ${CM_NVIDIA_MLPERF_SCRATCH_PATH}:/mnt" +DOCKER_RUN_ARGS=" -v ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}:/mnt" export DOCKER_RUN_ARGS="$DOCKER_RUN_ARGS" export RUN_CMD="$RUN_CMD" make -C docker run LOCAL_USER=1 test $? -eq 0 || exit $? -echo "MLPerf Nvidia scratch path is:${CM_NVIDIA_MLPERF_SCRATCH_PATH}" +echo "MLPerf Nvidia scratch path is:${MLC_NVIDIA_MLPERF_SCRATCH_PATH}" diff --git a/script/get-ml-model-llama3/customize.py b/script/get-ml-model-llama3/customize.py index 71d309b3a..2429a1e92 100644 --- a/script/get-ml-model-llama3/customize.py +++ b/script/get-ml-model-llama3/customize.py @@ -9,18 +9,18 @@ def preprocess(i): # skip download and register in cache if the llama3 checkpoint path is # already defined by the user - if env.get('CM_ML_MODEL_LLAMA3_CHECKPOINT_PATH', '') != '': - env['LLAMA3_CHECKPOINT_PATH'] = env['CM_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + if env.get('MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH', '') != '': + env['LLAMA3_CHECKPOINT_PATH'] = env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] return {'return': 0} - path = env.get('CM_OUTDIRNAME', '').strip() + path = env.get('MLC_OUTDIRNAME', '').strip() if path != "": os.makedirs(path, exist_ok=True) - env['CM_GIT_CHECKOUT_FOLDER'] = os.path.join( - path, env['CM_ML_MODEL_NAME']) + env['MLC_GIT_CHECKOUT_FOLDER'] = os.path.join( + path, env['MLC_ML_MODEL_NAME']) - env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} @@ -29,7 +29,7 @@ def postprocess(i): env = i['env'] - env['CM_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] = env['LLAMA3_CHECKPOINT_PATH'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] + env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] = env['LLAMA3_CHECKPOINT_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_PATH'] return {'return': 0} diff --git a/script/get-ml-model-llama3/meta.yaml b/script/get-ml-model-llama3/meta.yaml index 376553823..f5432f3ee 100644 --- a/script/get-ml-model-llama3/meta.yaml +++ b/script/get-ml-model-llama3/meta.yaml @@ -4,18 +4,18 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models input_mapping: - outdirname: CM_OUTDIRNAME + outdirname: MLC_OUTDIRNAME new_env_keys: -- CM_ML_MODEL_* +- MLC_ML_MODEL_* - LLAMA3_CHECKPOINT_PATH prehook_deps: - enable_if_env: - CM_TMP_REQUIRE_DOWNLOAD: + MLC_TMP_REQUIRE_DOWNLOAD: - 'yes' env: {} extra_cache_tags: llama3,llama-3 force_env_keys: - - CM_GIT_CHECKOUT_FOLDER + - MLC_GIT_CHECKOUT_FOLDER names: - hf-zoo tags: get,ml-model,huggingface,zoo,_clone-repo @@ -33,9 +33,9 @@ variations: fp16: default: true env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp16 - CM_ML_MODEL_PRECISION: fp16 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp16 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp16 + MLC_ML_MODEL_PRECISION: fp16 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp16 group: precision meta-llama/Llama-3.1-405B-Instruct: adr: @@ -43,26 +43,26 @@ variations: tags: _model-stub.meta-llama/Llama-3.1-405B-Instruct default: true env: - CM_ML_MODEL_NAME: Llama-3-405b-instruct - CM_MODEL_ZOO_ENV_KEY: LLAMA3 + MLC_ML_MODEL_NAME: Llama-3-405b-instruct + MLC_MODEL_ZOO_ENV_KEY: LLAMA3 group: huggingface-stub meta-llama/Llama-3.1-8B-Instruct: adr: hf-zoo: tags: _model-stub.meta-llama/Llama-3.1-8B-Instruct env: - CM_ML_MODEL_NAME: Llama-3-8b-instruct - CM_MODEL_ZOO_ENV_KEY: LLAMA3 + MLC_ML_MODEL_NAME: Llama-3-8b-instruct + MLC_MODEL_ZOO_ENV_KEY: LLAMA3 group: huggingface-stub vllm: default: true env: - CM_ML_MODEL_FRAMEWORK: vllm + MLC_ML_MODEL_FRAMEWORK: vllm group: framework stub.#: adr: hf-zoo: tags: _model-stub.# env: - CM_MODEL_ZOO_ENV_KEY: LLAMA3 + MLC_MODEL_ZOO_ENV_KEY: LLAMA3 group: huggingface-stub diff --git a/script/get-ml-model-mixtral/customize.py b/script/get-ml-model-mixtral/customize.py index 15ca81033..d33656b33 100644 --- a/script/get-ml-model-mixtral/customize.py +++ b/script/get-ml-model-mixtral/customize.py @@ -10,7 +10,7 @@ def preprocess(i): path = env.get('MIXTRAL_CHECKPOINT_PATH', '').strip() if path == '' or not os.path.exists(path): - env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} @@ -20,9 +20,9 @@ def postprocess(i): env = i['env'] if env.get('MIXTRAL_CHECKPOINT_PATH', '') == '': - env['MIXTRAL_CHECKPOINT_PATH'] = env['CM_ML_MODEL_PATH'] + env['MIXTRAL_CHECKPOINT_PATH'] = env['MLC_ML_MODEL_PATH'] else: - env['CM_ML_MODEL_PATH'] = env['MIXTRAL_CHECKPOINT_PATH'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] + env['MLC_ML_MODEL_PATH'] = env['MIXTRAL_CHECKPOINT_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_PATH'] return {'return': 0} diff --git a/script/get-ml-model-mixtral/meta.yaml b/script/get-ml-model-mixtral/meta.yaml index 358d56318..48ded49db 100644 --- a/script/get-ml-model-mixtral/meta.yaml +++ b/script/get-ml-model-mixtral/meta.yaml @@ -4,28 +4,28 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models env: - CM_ML_MODEL_DATASET: '' - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_ML_MODEL_DATASET: '' + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' docker: real_run: False input_mapping: checkpoint: MIXTRAL_CHECKPOINT_PATH new_env_keys: -- CM_ML_MODEL_* +- MLC_ML_MODEL_* - MIXTRAL_CHECKPOINT_PATH prehook_deps: - enable_if_env: - CM_TMP_REQUIRE_DOWNLOAD: + MLC_TMP_REQUIRE_DOWNLOAD: - 'yes' env: {} extra_cache_tags: mixtral force_env_keys: - - CM_GIT_CHECKOUT_FOLDER + - MLC_GIT_CHECKOUT_FOLDER names: - hf-zoo tags: get,ml-model,huggingface,zoo,_clone-repo force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME print_env_at_the_end: MIXTRAL_CHECKPOINT_PATH: MIXTRAL checkpoint path tags: @@ -39,13 +39,13 @@ uid: 0c14127677f34ea2 variations: batch_size.#: env: - CM_ML_MODEL_BATCH_SIZE: '#' + MLC_ML_MODEL_BATCH_SIZE: '#' fp32: default: true env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32 group: precision mistralai/Mixtral-8x7B-Instruct-v0.1: adr: @@ -53,19 +53,19 @@ variations: tags: _model-stub.mistralai/Mixtral-8x7B-Instruct-v0.1 default: true env: - CM_GIT_CHECKOUT_FOLDER: Mixtral-8x7B-Instruct-v0.1 - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 - CM_MODEL_ZOO_ENV_KEY: MIXTRAL + MLC_GIT_CHECKOUT_FOLDER: Mixtral-8x7B-Instruct-v0.1 + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 + MLC_MODEL_ZOO_ENV_KEY: MIXTRAL group: huggingface-stub pytorch: default: true env: - CM_ML_MODEL_FRAMEWORK: pytorch + MLC_ML_MODEL_FRAMEWORK: pytorch group: framework stub.#: adr: hf-zoo: tags: _model-stub.# env: - CM_MODEL_ZOO_ENV_KEY: MIXTRAL + MLC_MODEL_ZOO_ENV_KEY: MIXTRAL group: huggingface-stub diff --git a/script/get-ml-model-mobilenet/README-extra.md b/script/get-ml-model-mobilenet/README-extra.md index 63766e960..24bc0e34f 100644 --- a/script/get-ml-model-mobilenet/README-extra.md +++ b/script/get-ml-model-mobilenet/README-extra.md @@ -9,7 +9,7 @@ where, * `[VARIATION]` is one of `tf-fp32`, `tf-int8`, `onnx-v1-opset-8`, `onnx-v1-opset-11`, `onnx-int8`. ## Exported Variables -* `CM_ML_MODEL_FILE:` Model filename -* `CM_ML_MODEL_FILE_WITH_PATH:` Full path to model file -* `CM_ML_MODEL_PATH:` Path to folder containing the model file +* `MLC_ML_MODEL_FILE:` Model filename +* `MLC_ML_MODEL_FILE_WITH_PATH:` Full path to model file +* `MLC_ML_MODEL_PATH:` Path to folder containing the model file * More env variables being exported are given in [cm.json file](_cm.json) diff --git a/script/get-ml-model-mobilenet/customize.py b/script/get-ml-model-mobilenet/customize.py index 59f3c580e..6b8a8fd6b 100644 --- a/script/get-ml-model-mobilenet/customize.py +++ b/script/get-ml-model-mobilenet/customize.py @@ -14,8 +14,8 @@ def preprocess(i): path = os.getcwd() - url = env['CM_PACKAGE_URL'] - env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url + url = env['MLC_PACKAGE_URL'] + env['MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url print('Downloading from {}'.format(url)) @@ -27,30 +27,30 @@ def preprocess(i): filename = r['filename'] - if env.get('CM_UNZIP') == "yes" or env.get('CM_UNTAR') == "yes": - if env.get('CM_UNZIP') == "yes": + if env.get('MLC_UNZIP') == "yes" or env.get('MLC_UNTAR') == "yes": + if env.get('MLC_UNZIP') == "yes": cmd = "unzip " - elif env.get('CM_UNTAR') == "yes": + elif env.get('MLC_UNTAR') == "yes": cmd = "tar -xvzf " os.system(cmd + filename) - filename = env['CM_ML_MODEL_FILE'] + filename = env['MLC_ML_MODEL_FILE'] - extract_folder = env.get('CM_EXTRACT_FOLDER', '') + extract_folder = env.get('MLC_EXTRACT_FOLDER', '') if extract_folder: - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join( path, extract_folder, filename) else: - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) + env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) else: - env['CM_ML_MODEL_FILE'] = filename - env['CM_ML_MODEL_FILE_WITH_PATH'] = r['path'] + env['MLC_ML_MODEL_FILE'] = filename + env['MLC_ML_MODEL_FILE_WITH_PATH'] = r['path'] - env['CM_ML_MODEL_PATH'] = path + env['MLC_ML_MODEL_PATH'] = path - if not os.path.exists(env['CM_ML_MODEL_FILE_WITH_PATH']): + if not os.path.exists(env['MLC_ML_MODEL_FILE_WITH_PATH']): return { - 'return': 1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"} + 'return': 1, 'error': f"Model file path {env['MLC_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['MLC_ML_MODEL_FILE']} in model meta is wrong"} return {'return': 0} diff --git a/script/get-ml-model-mobilenet/meta.yaml b/script/get-ml-model-mobilenet/meta.yaml index d690ffa69..297deb5d3 100644 --- a/script/get-ml-model-mobilenet/meta.yaml +++ b/script/get-ml-model-mobilenet/meta.yaml @@ -4,18 +4,18 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models default_env: - CM_ML_MODEL: mobilenet - CM_ML_MODEL_DATASET: imagenet2012-val - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_MOBILENET_NAME_SUFFIX: '' - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_ML_MODEL: mobilenet + MLC_ML_MODEL_DATASET: imagenet2012-val + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_MOBILENET_NAME_SUFFIX: '' + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' new_env_keys: -- CM_ML_MODEL_* -- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS +- MLC_ML_MODEL_* +- MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - ml-model @@ -28,217 +28,217 @@ variations: fp32: default: true env: - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_MOBILENET_PRECISION: float - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_MOBILENET_PRECISION: float + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 group: precision from.google: env: - CM_DOWNLOAD_SOURCE: google + MLC_DOWNLOAD_SOURCE: google group: source from.zenodo: env: - CM_DOWNLOAD_SOURCE: zenodo + MLC_DOWNLOAD_SOURCE: zenodo group: source int8: base: - quantized_ env: - CM_ML_MODEL_INPUTS_DATA_TYPE: int8 - CM_ML_MODEL_MOBILENET_PRECISION: int8 - CM_ML_MODEL_PRECISION: int8 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int8 + MLC_ML_MODEL_MOBILENET_PRECISION: int8 + MLC_ML_MODEL_PRECISION: int8 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 group: precision large: env: - CM_ML_MODEL_MOBILENET_KIND: large + MLC_ML_MODEL_MOBILENET_KIND: large group: kind large-minimalistic: env: - CM_ML_MODEL_MOBILENET_KIND: large-minimalistic + MLC_ML_MODEL_MOBILENET_KIND: large-minimalistic group: kind multiplier-0.25: env: - CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.25' - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '25' + MLC_ML_MODEL_MOBILENET_MULTIPLIER: '0.25' + MLC_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '25' group: multiplier multiplier-0.35: env: - CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.35' - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '35' + MLC_ML_MODEL_MOBILENET_MULTIPLIER: '0.35' + MLC_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '35' group: multiplier multiplier-0.5: env: - CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.5' - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '50' + MLC_ML_MODEL_MOBILENET_MULTIPLIER: '0.5' + MLC_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '50' group: multiplier multiplier-0.75: env: - CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.75' - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '75' + MLC_ML_MODEL_MOBILENET_MULTIPLIER: '0.75' + MLC_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '75' group: multiplier multiplier-1.0: env: - CM_ML_MODEL_MOBILENET_MULTIPLIER: '1.0' - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '100' + MLC_ML_MODEL_MOBILENET_MULTIPLIER: '1.0' + MLC_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '100' group: multiplier onnx: env: - CM_ML_MODEL_DATA_LAYOUT: NCHW - CM_ML_MODEL_FRAMEWORK: onnx + MLC_ML_MODEL_DATA_LAYOUT: NCHW + MLC_ML_MODEL_FRAMEWORK: onnx group: framework onnx,fp32,v1: env: - CM_ML_MODEL_INPUT_LAYER_NAME: input:0 - CM_ML_MODEL_NORMALIZE_DATA: 'yes' - CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV1/Predictions/Reshape_1:0 - CM_ML_MODEL_SUBTRACT_MEANS: 'no' - CM_ML_MODEL_VER: '1_1.0_224' + MLC_ML_MODEL_INPUT_LAYER_NAME: input:0 + MLC_ML_MODEL_NORMALIZE_DATA: 'yes' + MLC_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV1/Predictions/Reshape_1:0 + MLC_ML_MODEL_SUBTRACT_MEANS: 'no' + MLC_ML_MODEL_VER: '1_1.0_224' onnx,int8,v1: env: - CM_ML_MODEL_FILE: mobilenet_sym_no_bn.onnx - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: 128.0 128.0 128.0 - CM_ML_MODEL_INPUT_LAYER_NAME: '0' - CM_ML_MODEL_NORMALIZE_DATA: 'no' - CM_ML_MODEL_OUTPUT_LAYER_NAME: '169' - CM_ML_MODEL_SUBTRACT_MEANS: 'yes' - CM_ML_MODEL_VER: 1_1.0_224_quant - CM_PACKAGE_URL: https://zenodo.org/record/3353417/files/Quantized%20MobileNet.zip - CM_UNZIP: 'yes' + MLC_ML_MODEL_FILE: mobilenet_sym_no_bn.onnx + MLC_ML_MODEL_GIVEN_CHANNEL_MEANS: 128.0 128.0 128.0 + MLC_ML_MODEL_INPUT_LAYER_NAME: '0' + MLC_ML_MODEL_NORMALIZE_DATA: 'no' + MLC_ML_MODEL_OUTPUT_LAYER_NAME: '169' + MLC_ML_MODEL_SUBTRACT_MEANS: 'yes' + MLC_ML_MODEL_VER: 1_1.0_224_quant + MLC_PACKAGE_URL: https://zenodo.org/record/3353417/files/Quantized%20MobileNet.zip + MLC_UNZIP: 'yes' onnx,opset-11,fp32,v1: env: - CM_PACKAGE_URL: https://zenodo.org/record/4735651/files/mobilenet_v1_1.0_224.onnx + MLC_PACKAGE_URL: https://zenodo.org/record/4735651/files/mobilenet_v1_1.0_224.onnx onnx,opset-8,fp32,v1: env: - CM_PACKAGE_URL: https://zenodo.org/record/3157894/files/mobilenet_v1_1.0_224.onnx + MLC_PACKAGE_URL: https://zenodo.org/record/3157894/files/mobilenet_v1_1.0_224.onnx opset-11: env: - CM_ML_MODEL_ONNX_OPSET: '11' + MLC_ML_MODEL_ONNX_OPSET: '11' group: opset-version opset-8: env: - CM_ML_MODEL_ONNX_OPSET: '8' + MLC_ML_MODEL_ONNX_OPSET: '8' group: opset-version quantized_: env: - CM_ML_MODEL_MOBILENET_NAME_SUFFIX: _quant - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'yes' + MLC_ML_MODEL_MOBILENET_NAME_SUFFIX: _quant + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'yes' resolution-128: env: - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.128 - CM_ML_MODEL_IMAGE_HEIGHT: '128' - CM_ML_MODEL_IMAGE_WIDTH: '128' - CM_ML_MODEL_MOBILENET_RESOLUTION: '128' + MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.128 + MLC_ML_MODEL_IMAGE_HEIGHT: '128' + MLC_ML_MODEL_IMAGE_WIDTH: '128' + MLC_ML_MODEL_MOBILENET_RESOLUTION: '128' group: resolution resolution-160: env: - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.160 - CM_ML_MODEL_IMAGE_HEIGHT: '160' - CM_ML_MODEL_IMAGE_WIDTH: '160' - CM_ML_MODEL_MOBILENET_RESOLUTION: '160' + MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.160 + MLC_ML_MODEL_IMAGE_HEIGHT: '160' + MLC_ML_MODEL_IMAGE_WIDTH: '160' + MLC_ML_MODEL_MOBILENET_RESOLUTION: '160' group: resolution resolution-192: env: - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.192 - CM_ML_MODEL_IMAGE_HEIGHT: '192' - CM_ML_MODEL_IMAGE_WIDTH: '192' - CM_ML_MODEL_MOBILENET_RESOLUTION: '192' + MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.192 + MLC_ML_MODEL_IMAGE_HEIGHT: '192' + MLC_ML_MODEL_IMAGE_WIDTH: '192' + MLC_ML_MODEL_MOBILENET_RESOLUTION: '192' group: resolution resolution-224: env: - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.224 - CM_ML_MODEL_IMAGE_HEIGHT: '224' - CM_ML_MODEL_IMAGE_WIDTH: '224' - CM_ML_MODEL_MOBILENET_RESOLUTION: '224' + MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.224 + MLC_ML_MODEL_IMAGE_HEIGHT: '224' + MLC_ML_MODEL_IMAGE_WIDTH: '224' + MLC_ML_MODEL_MOBILENET_RESOLUTION: '224' group: resolution small: env: - CM_ML_MODEL_MOBILENET_KIND: small + MLC_ML_MODEL_MOBILENET_KIND: small group: kind small-minimalistic: default_variations: precision: fp32 env: - CM_ML_MODEL_MOBILENET_KIND: small-minimalistic + MLC_ML_MODEL_MOBILENET_KIND: small-minimalistic group: kind tf: default: true default_variations: source: from.google env: - CM_ML_MODEL_DATA_LAYOUT: NHWC - CM_ML_MODEL_INPUT_LAYER_NAME: input - CM_ML_MODEL_NORMALIZE_DATA: 'yes' - CM_ML_MODEL_SUBTRACT_MEANS: 'no' + MLC_ML_MODEL_DATA_LAYOUT: NHWC + MLC_ML_MODEL_INPUT_LAYER_NAME: input + MLC_ML_MODEL_NORMALIZE_DATA: 'yes' + MLC_ML_MODEL_SUBTRACT_MEANS: 'no' group: framework tf,fp32,v1,resolution-224,multiplier-1.0: env: - CM_ML_MODEL_ACCURACY: '71.676' + MLC_ML_MODEL_ACCURACY: '71.676' tf,from.google,v1: env: - CM_PACKAGE_URL: http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_<<>>_<<>><<>>.tgz - CM_UNTAR: 'yes' + MLC_PACKAGE_URL: http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_<<>>_<<>><<>>.tgz + MLC_UNTAR: 'yes' tf,from.google,v2,fp32: env: - CM_ML_MODEL_FILE: mobilenet_v2_<<>>_<<>>.tflite - CM_ML_MODEL_WEIGHTS_FILE: mobilenet_v2_<<>>_<<>>.ckpt.data-00000-of-00001 - CM_PACKAGE_URL: https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_<<>>_<<>>.tgz - CM_UNTAR: 'yes' + MLC_ML_MODEL_FILE: mobilenet_v2_<<>>_<<>>.tflite + MLC_ML_MODEL_WEIGHTS_FILE: mobilenet_v2_<<>>_<<>>.ckpt.data-00000-of-00001 + MLC_PACKAGE_URL: https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_<<>>_<<>>.tgz + MLC_UNTAR: 'yes' tf,from.google,v2,quantized_: env: - CM_EXTRACT_FOLDER: v2_<<>>_<<>> - CM_ML_MODEL_FILE: model.tflite - CM_ML_MODEL_WEIGHTS_FILE: <<>>_v2_<<>>_<<>>.ckpt.data-00000-of-00001 - CM_PACKAGE_URL: https://storage.googleapis.com/mobilenet_v2/checkpoints/<<>>_v2_<<>>_<<>>.tgz - CM_UNTAR: 'yes' + MLC_EXTRACT_FOLDER: v2_<<>>_<<>> + MLC_ML_MODEL_FILE: model.tflite + MLC_ML_MODEL_WEIGHTS_FILE: <<>>_v2_<<>>_<<>>.ckpt.data-00000-of-00001 + MLC_PACKAGE_URL: https://storage.googleapis.com/mobilenet_v2/checkpoints/<<>>_v2_<<>>_<<>>.tgz + MLC_UNTAR: 'yes' tf,from.google,v3: env: - CM_EXTRACT_FOLDER: v3-<<>>_<<>>_<<>>_<<>> - CM_ML_MODEL_FILE: v3-<<>>_<<>>_<<>>_<<>>.tflite - CM_PACKAGE_URL: https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-<<>>_<<>>_<<>>_<<>>.tgz - CM_UNTAR: 'yes' + MLC_EXTRACT_FOLDER: v3-<<>>_<<>>_<<>>_<<>> + MLC_ML_MODEL_FILE: v3-<<>>_<<>>_<<>>_<<>>.tflite + MLC_PACKAGE_URL: https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-<<>>_<<>>_<<>>_<<>>.tgz + MLC_UNTAR: 'yes' tf,from.zenodo,v1: env: - CM_PACKAGE_URL: https://zenodo.org/record/2269307/files/mobilenet_v1_<<>>_<<>><<>>.tgz - CM_UNTAR: 'yes' + MLC_PACKAGE_URL: https://zenodo.org/record/2269307/files/mobilenet_v1_<<>>_<<>><<>>.tgz + MLC_UNTAR: 'yes' tf,int8,v1,resolution-224,multiplier-1.0: env: - CM_ML_MODEL_ACCURACY: '70.762' + MLC_ML_MODEL_ACCURACY: '70.762' tf,v1: env: - CM_ML_MODEL_FILE: mobilenet_v1_<<>>_<<>><<>>.tflite - CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV1/Predictions/Reshape_1 - CM_ML_MODEL_VER: 1_<<>>_<<>><<>>_2018_08_02 - CM_ML_MODEL_WEIGHTS_FILE: mobilenet_v1_<<>>_<<>><<>>.ckpt.data-00000-of-00001 + MLC_ML_MODEL_FILE: mobilenet_v1_<<>>_<<>><<>>.tflite + MLC_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV1/Predictions/Reshape_1 + MLC_ML_MODEL_VER: 1_<<>>_<<>><<>>_2018_08_02 + MLC_ML_MODEL_WEIGHTS_FILE: mobilenet_v1_<<>>_<<>><<>>.ckpt.data-00000-of-00001 tf,v1,fp32: env: - CM_ML_MODEL_MOBILENET_NAME_PREFIX: '' + MLC_ML_MODEL_MOBILENET_NAME_PREFIX: '' tf,v1,int8: env: - CM_ML_MODEL_MOBILENET_NAME_SUFFIX: _quant + MLC_ML_MODEL_MOBILENET_NAME_SUFFIX: _quant tf,v1,uint8: env: - CM_ML_MODEL_MOBILENET_NAME_SUFFIX: _quant + MLC_ML_MODEL_MOBILENET_NAME_SUFFIX: _quant tf,v2,fp32: env: - CM_ML_MODEL_MOBILENET_NAME_PREFIX: '' - CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV2/Predictions/Reshape_1 - CM_ML_MODEL_VER: 2_<<>>_<<>> + MLC_ML_MODEL_MOBILENET_NAME_PREFIX: '' + MLC_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV2/Predictions/Reshape_1 + MLC_ML_MODEL_VER: 2_<<>>_<<>> tf,v2,int8: env: - CM_ML_MODEL_MOBILENET_NAME_PREFIX: quantized - CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV2/Predictions/Softmax - CM_ML_MODEL_VER: 2_<<>>_<<>> + MLC_ML_MODEL_MOBILENET_NAME_PREFIX: quantized + MLC_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV2/Predictions/Softmax + MLC_ML_MODEL_VER: 2_<<>>_<<>> tf,v2,uint8: env: - CM_ML_MODEL_MOBILENET_NAME_PREFIX: quantized - CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV2/Predictions/Softmax - CM_ML_MODEL_VER: 2_<<>>_<<>> + MLC_ML_MODEL_MOBILENET_NAME_PREFIX: quantized + MLC_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV2/Predictions/Softmax + MLC_ML_MODEL_VER: 2_<<>>_<<>> tf,v3: env: - CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV3/Predictions/Softmax - CM_ML_MODEL_VER: 3_<<>>_<<>> + MLC_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV3/Predictions/Softmax + MLC_ML_MODEL_VER: 3_<<>>_<<>> tflite: base: - tf @@ -246,27 +246,27 @@ variations: base: - quantized_ env: - CM_ML_MODEL_INPUTS_DATA_TYPE: uint8 - CM_ML_MODEL_MOBILENET_PRECISION: uint8 - CM_ML_MODEL_PRECISION: uint8 - CM_ML_MODEL_WEIGHTS_DATA_TYPE: uint8 + MLC_ML_MODEL_INPUTS_DATA_TYPE: uint8 + MLC_ML_MODEL_MOBILENET_PRECISION: uint8 + MLC_ML_MODEL_PRECISION: uint8 + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: uint8 group: precision v1: default_variations: multiplier: multiplier-1.0 resolution: resolution-224 env: - CM_ML_MODEL_FULL_NAME: mobilenet-v1-precision_<<>>-<<>>-<<>> - CM_ML_MODEL_MOBILENET_VERSION: '1' + MLC_ML_MODEL_FULL_NAME: mobilenet-v1-precision_<<>>-<<>>-<<>> + MLC_ML_MODEL_MOBILENET_VERSION: '1' group: version v2: default_variations: multiplier: multiplier-1.0 resolution: resolution-224 env: - CM_ML_MODEL_FULL_NAME: mobilenet-v2-precision_<<>>-<<>>-<<>> - CM_ML_MODEL_MOBILENET_VERSION: '2' - CM_ML_MODEL_VER: '2' + MLC_ML_MODEL_FULL_NAME: mobilenet-v2-precision_<<>>-<<>>-<<>> + MLC_ML_MODEL_MOBILENET_VERSION: '2' + MLC_ML_MODEL_VER: '2' group: version v3: default: true @@ -274,7 +274,7 @@ variations: multiplier: multiplier-1.0 resolution: resolution-224 env: - CM_ML_MODEL_FULL_NAME: mobilenet-v3-precision_<<>>-<<>>-<<>> - CM_ML_MODEL_MOBILENET_VERSION: '3' - CM_ML_MODEL_VER: '3' + MLC_ML_MODEL_FULL_NAME: mobilenet-v3-precision_<<>>-<<>>-<<>> + MLC_ML_MODEL_MOBILENET_VERSION: '3' + MLC_ML_MODEL_VER: '3' group: version diff --git a/script/get-ml-model-neuralmagic-zoo/customize.py b/script/get-ml-model-neuralmagic-zoo/customize.py index 685eaae6f..5aec325b9 100644 --- a/script/get-ml-model-neuralmagic-zoo/customize.py +++ b/script/get-ml-model-neuralmagic-zoo/customize.py @@ -14,7 +14,7 @@ def preprocess(i): path = os.getcwd() - model_stub = env.get('CM_MODEL_ZOO_STUB', '') + model_stub = env.get('MLC_MODEL_ZOO_STUB', '') if model_stub == '': variations = list(i.get('meta', {}).get('variations', {}).keys()) @@ -24,7 +24,7 @@ def preprocess(i): if '#' not in v: variation_models.append(v) - return {'return': 1, 'error': 'ENV CM_MODEL_ZOO_STUB is not set. Please select variation from {}'.format( + return {'return': 1, 'error': 'ENV MLC_MODEL_ZOO_STUB is not set. Please select variation from {}'.format( str(variation_models))} return {'return': 0} @@ -36,11 +36,11 @@ def postprocess(i): env = i['env'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] - onnx_path = os.path.join(env['CM_ML_MODEL_FILE_WITH_PATH'], "model.onnx") + onnx_path = os.path.join(env['MLC_ML_MODEL_FILE_WITH_PATH'], "model.onnx") if os.path.exists(onnx_path): - env['CM_MLPERF_CUSTOM_MODEL_PATH'] = onnx_path + env['MLC_MLPERF_CUSTOM_MODEL_PATH'] = onnx_path return {'return': 0} diff --git a/script/get-ml-model-neuralmagic-zoo/download_sparse.py b/script/get-ml-model-neuralmagic-zoo/download_sparse.py index b2c9de607..8d726c54a 100644 --- a/script/get-ml-model-neuralmagic-zoo/download_sparse.py +++ b/script/get-ml-model-neuralmagic-zoo/download_sparse.py @@ -1,10 +1,10 @@ from sparsezoo import Model import os -model_stub = os.environ.get('CM_MODEL_ZOO_STUB', '') +model_stub = os.environ.get('MLC_MODEL_ZOO_STUB', '') print(f"Downloading model {model_stub}") stub = f"{model_stub}" model = Model(stub) with open('tmp-run-env.out', 'w') as f: - f.write(f"CM_ML_MODEL_FILE_WITH_PATH={model.path}") + f.write(f"MLC_ML_MODEL_FILE_WITH_PATH={model.path}") diff --git a/script/get-ml-model-neuralmagic-zoo/meta.yaml b/script/get-ml-model-neuralmagic-zoo/meta.yaml index 3c78a1ce9..c0d4d7236 100644 --- a/script/get-ml-model-neuralmagic-zoo/meta.yaml +++ b/script/get-ml-model-neuralmagic-zoo/meta.yaml @@ -13,12 +13,12 @@ deps: - tags: get,generic-python-lib,_sparsezoo env: {} new_env_keys: -- CM_ML_MODEL* -- CM_MODEL_ZOO_STUB -- CM_MLPERF_CUSTOM_MODEL_PATH -- CM_GET_DEPENDENT_CACHED_PATH +- MLC_ML_MODEL* +- MLC_MODEL_ZOO_STUB +- MLC_MLPERF_CUSTOM_MODEL_PATH +- MLC_GET_DEPENDENT_CACHED_PATH print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - ml-model @@ -33,88 +33,88 @@ uid: adbb3f2525a14f97 variations: bert-base-pruned90-none: env: - CM_ML_MODEL_FULL_NAME: bert-base-pruned90-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-base-uncased - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none + MLC_ML_MODEL_FULL_NAME: bert-base-pruned90-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-base-uncased + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none bert-base-pruned95_obs_quant-none: env: - CM_ML_MODEL_FULL_NAME: bert-base-pruned95_obs_quant-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: int64 - CM_ML_MODEL_RETRAINING: 'yes' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-base-uncased - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none + MLC_ML_MODEL_FULL_NAME: bert-base-pruned95_obs_quant-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int64 + MLC_ML_MODEL_RETRAINING: 'yes' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-base-uncased + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none bert-base_cased-pruned90-none: env: - CM_ML_MODEL_FULL_NAME: bert-base_cased-pruned90-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-base-cased - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none + MLC_ML_MODEL_FULL_NAME: bert-base_cased-pruned90-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-base-cased + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none bert-large-base-none: env: - CM_ML_MODEL_FULL_NAME: bert-large-base-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none + MLC_ML_MODEL_FULL_NAME: bert-large-base-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none bert-large-pruned80_quant-none-vnni: env: - CM_ML_MODEL_FULL_NAME: bert-large-pruned80_quant-none-vnni-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: int64 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni + MLC_ML_MODEL_FULL_NAME: bert-large-pruned80_quant-none-vnni-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int64 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni mobilebert-14layer_pruned50-none-vnni: env: - CM_ML_MODEL_FULL_NAME: mobilebert-14layer_pruned50-none-vnni-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni + MLC_ML_MODEL_FULL_NAME: mobilebert-14layer_pruned50-none-vnni-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni mobilebert-14layer_pruned50_quant-none-vnni: env: - CM_ML_MODEL_FULL_NAME: mobilebert-14layer_pruned50_quant-none-vnni-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: int64 - CM_ML_MODEL_RETRAINING: 'yes' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni + MLC_ML_MODEL_FULL_NAME: mobilebert-14layer_pruned50_quant-none-vnni-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int64 + MLC_ML_MODEL_RETRAINING: 'yes' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni mobilebert-base_quant-none: env: - CM_ML_MODEL_FULL_NAME: mobilebert-base_quant-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: int64 - CM_ML_MODEL_RETRAINING: 'yes' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none + MLC_ML_MODEL_FULL_NAME: mobilebert-base_quant-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int64 + MLC_ML_MODEL_RETRAINING: 'yes' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none mobilebert-none-base-none: env: - CM_ML_MODEL_FULL_NAME: mobilebert-none-base-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none + MLC_ML_MODEL_FULL_NAME: mobilebert-none-base-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none model-stub.#: env: - CM_MODEL_ZOO_STUB: '#' + MLC_MODEL_ZOO_STUB: '#' model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none: alias: bert-base-pruned90-none model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none: @@ -151,73 +151,73 @@ variations: alias: roberta-base-pruned85-quant-none obert-base-pruned90-none: env: - CM_ML_MODEL_FULL_NAME: obert-base-pruned90-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none + MLC_ML_MODEL_FULL_NAME: obert-base-pruned90-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none obert-large-base-none: env: - CM_ML_MODEL_FULL_NAME: obert-large-base-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none + MLC_ML_MODEL_FULL_NAME: obert-large-base-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none obert-large-pruned95-none-vnni: env: - CM_ML_MODEL_FULL_NAME: obert-large-pruned95-none-vnni-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni + MLC_ML_MODEL_FULL_NAME: obert-large-pruned95-none-vnni-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni obert-large-pruned95_quant-none-vnni: env: - CM_ML_MODEL_FULL_NAME: obert-large-pruned95_quant-none-vnni-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: int64 - CM_ML_MODEL_RETRAINING: 'yes' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni + MLC_ML_MODEL_FULL_NAME: obert-large-pruned95_quant-none-vnni-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int64 + MLC_ML_MODEL_RETRAINING: 'yes' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni obert-large-pruned97-none: env: - CM_ML_MODEL_FULL_NAME: obert-large-pruned97-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased - CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none + MLC_ML_MODEL_FULL_NAME: obert-large-pruned97-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none obert-large-pruned97-quant-none: env: - CM_ML_MODEL_FULL_NAME: obert-large-pruned97-quant-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: int64 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none + MLC_ML_MODEL_FULL_NAME: obert-large-pruned97-quant-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int64 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none oberta-base-pruned90-quant-none: env: - CM_ML_MODEL_FULL_NAME: oberta-base-pruned90-quant-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: int64 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/roberta-base - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none + MLC_ML_MODEL_FULL_NAME: oberta-base-pruned90-quant-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int64 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/roberta-base + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none roberta-base-pruned85-quant-none: env: - CM_ML_MODEL_FULL_NAME: roberta-base-pruned85-quant-none-bert-99 - CM_ML_MODEL_INPUTS_DATA_TYPE: int64 - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/roberta-base - CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning - CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none + MLC_ML_MODEL_FULL_NAME: roberta-base-pruned85-quant-none-bert-99 + MLC_ML_MODEL_INPUTS_DATA_TYPE: int64 + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/roberta-base + MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + MLC_MODEL_ZOO_STUB: zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none diff --git a/script/get-ml-model-neuralmagic-zoo/run.bat b/script/get-ml-model-neuralmagic-zoo/run.bat index 854e9b668..8b4501c61 100644 --- a/script/get-ml-model-neuralmagic-zoo/run.bat +++ b/script/get-ml-model-neuralmagic-zoo/run.bat @@ -1,2 +1,2 @@ -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\download_sparse.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\download_sparse.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-ml-model-neuralmagic-zoo/run.sh b/script/get-ml-model-neuralmagic-zoo/run.sh index 9d7d529be..d6970d819 100644 --- a/script/get-ml-model-neuralmagic-zoo/run.sh +++ b/script/get-ml-model-neuralmagic-zoo/run.sh @@ -1,2 +1,2 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/download_sparse.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/download_sparse.py diff --git a/script/get-ml-model-resnet50/README-extra.md b/script/get-ml-model-resnet50/README-extra.md index 42809e535..87e82b92c 100644 --- a/script/get-ml-model-resnet50/README-extra.md +++ b/script/get-ml-model-resnet50/README-extra.md @@ -9,7 +9,7 @@ where, * `[VARIATION]` is one of `onnx` (alias `onnxruntime`), `pytorch`, `tensorflow` (alias `tf`) , `fp32`, `onnx-1.5-opset-8`, `onnx-1.5-opset-11`. ## Exported Variables -* `CM_ML_MODEL_FILE:` Model filename -* `CM_ML_MODEL_FILE_WITH_PATH:` Full path to model file -* `CM_ML_MODEL_PATH:` Path to folder containing the model file +* `MLC_ML_MODEL_FILE:` Model filename +* `MLC_ML_MODEL_FILE_WITH_PATH:` Full path to model file +* `MLC_ML_MODEL_PATH:` Path to folder containing the model file * More env variables being exported are given in [cm.json file](_cm.json) diff --git a/script/get-ml-model-resnet50/customize.py b/script/get-ml-model-resnet50/customize.py index d4095dc65..a954a3c1d 100644 --- a/script/get-ml-model-resnet50/customize.py +++ b/script/get-ml-model-resnet50/customize.py @@ -8,7 +8,7 @@ def preprocess(i): env = i['env'] - if env.get('CM_ML_MODEL_TF_FIX_INPUT_SHAPE', '') == "yes": + if env.get('MLC_ML_MODEL_TF_FIX_INPUT_SHAPE', '') == "yes": i['run_script_input']['script_name'] = "run-fix-input" return {'return': 0} @@ -18,16 +18,16 @@ def postprocess(i): env = i['env'] - if env.get('CM_ML_MODEL_TF_FIX_INPUT_SHAPE', '') == "yes": - env['CM_ML_MODEL_STARTING_FILE_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + if env.get('MLC_ML_MODEL_TF_FIX_INPUT_SHAPE', '') == "yes": + env['MLC_ML_MODEL_STARTING_FILE_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] + env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join( os.getcwd(), "resnet50_v1.pb") - env['CM_ML_MODEL_FILE'] = os.path.basename( - env['CM_ML_MODEL_FILE_WITH_PATH']) - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['MLC_ML_MODEL_FILE'] = os.path.basename( + env['MLC_ML_MODEL_FILE_WITH_PATH']) + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] - env['CM_DOWNLOAD_PATH'] = os.path.dirname( - env['CM_ML_MODEL_FILE_WITH_PATH']) + env['MLC_DOWNLOAD_PATH'] = os.path.dirname( + env['MLC_ML_MODEL_FILE_WITH_PATH']) return {'return': 0} diff --git a/script/get-ml-model-resnet50/meta.yaml b/script/get-ml-model-resnet50/meta.yaml index d8637acbb..f9617eca5 100644 --- a/script/get-ml-model-resnet50/meta.yaml +++ b/script/get-ml-model-resnet50/meta.yaml @@ -4,34 +4,34 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_ML_MODEL: RESNET50 - CM_ML_MODEL_DATASET: imagenet2012-val - CM_ML_MODEL_IMAGE_HEIGHT: '224' - CM_ML_MODEL_IMAGE_WIDTH: '224' - CM_ML_MODEL_NORMALIZE_DATA: '0' - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_SUBTRACT_MEANS: 'YES' - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_ML_MODEL: RESNET50 + MLC_ML_MODEL_DATASET: imagenet2012-val + MLC_ML_MODEL_IMAGE_HEIGHT: '224' + MLC_ML_MODEL_IMAGE_WIDTH: '224' + MLC_ML_MODEL_NORMALIZE_DATA: '0' + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_SUBTRACT_MEANS: 'YES' + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' new_env_keys: -- CM_ML_MODEL_* +- MLC_ML_MODEL_* prehook_deps: - env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_EXTRACT_EXTRACTED_FILENAME: <<>> - CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - extra_cache_tags: ml-model,resnet50,raw,ml-model-resnet50,_<<>> + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_EXTRACT_EXTRACTED_FILENAME: <<>> + MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + extra_cache_tags: ml-model,resnet50,raw,ml-model-resnet50,_<<>> force_cache: true force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME names: - model-downloader tags: download-and-extract update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - raw @@ -44,14 +44,14 @@ variations: argmax: default: true env: - CM_ML_MODEL_OUTPUT_LAYER_ARGMAX: 'yes' + MLC_ML_MODEL_OUTPUT_LAYER_ARGMAX: 'yes' group: model-output batch_size.#: env: - CM_ML_MODEL_BATCH_SIZE: '#' + MLC_ML_MODEL_BATCH_SIZE: '#' batch_size.1: env: - CM_ML_MODEL_BATCH_SIZE: '1' + MLC_ML_MODEL_BATCH_SIZE: '1' fix-input-shape: deps: - names: @@ -61,129 +61,129 @@ variations: fp32: default: true env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32 group: precision from-tf: {} huggingface_default: env: - CM_DOWNLOAD_CHECKSUM: f6a4da60cd5f084d97efc2c1ddb10beb - CM_PACKAGE_URL: https://huggingface.co/ctuning/mlperf-inference-resnet50-onnx-fp32-imagenet2012-v1.0/resolve/main/resnet50_v1.onnx + MLC_DOWNLOAD_CHECKSUM: f6a4da60cd5f084d97efc2c1ddb10beb + MLC_PACKAGE_URL: https://huggingface.co/ctuning/mlperf-inference-resnet50-onnx-fp32-imagenet2012-v1.0/resolve/main/resnet50_v1.onnx int8: env: - CM_ML_MODEL_INPUT_DATA_TYPES: int8 - CM_ML_MODEL_PRECISION: int8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: int8 + MLC_ML_MODEL_INPUT_DATA_TYPES: int8 + MLC_ML_MODEL_PRECISION: int8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: int8 group: precision ncnn: env: - CM_ML_MODEL_FRAMEWORK: ncnn + MLC_ML_MODEL_FRAMEWORK: ncnn group: framework ncnn,fp32: env: - CM_DOWNLOAD_CHECKSUM: 0360777ab2178a65a8f78c35a7d619e0 - CM_PACKAGE_URL: https://zenodo.org/record/8073420/files/resnet50_v1.bin?download=1 + MLC_DOWNLOAD_CHECKSUM: 0360777ab2178a65a8f78c35a7d619e0 + MLC_PACKAGE_URL: https://zenodo.org/record/8073420/files/resnet50_v1.bin?download=1 post_deps: - env: - CM_DOWNLOAD_CHECKSUM: f9ba6c4d7f66348e6d24c06bfe3f4ae8 - CM_EXTRACT_EXTRACTED_FILENAME: <<>> + MLC_DOWNLOAD_CHECKSUM: f9ba6c4d7f66348e6d24c06bfe3f4ae8 + MLC_EXTRACT_EXTRACTED_FILENAME: <<>> extra_cache_tags: ml-model-params,params,resnet50,ncnn,model-params tags: download-and-extract,_url.https://zenodo.org/record/8073420/files/resnet50_v1.param?download= no-argmax: env: - CM_ML_MODEL_OUTPUT_LAYER_ARGMAX: 'no' + MLC_ML_MODEL_OUTPUT_LAYER_ARGMAX: 'no' group: model-output onnx: default: true default_variations: opset-version: opset-11 env: - CM_ML_MODEL_DATA_LAYOUT: NCHW - CM_ML_MODEL_FRAMEWORK: onnx - CM_ML_MODEL_INPUT_LAYERS: input_tensor:0 - CM_ML_MODEL_INPUT_LAYER_NAME: input_tensor:0 - CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)' - CM_ML_MODEL_OUTPUT_LAYERS: softmax_tensor:0 - CM_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor:0 - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> - CM_ML_MODEL_VER: '1.5' + MLC_ML_MODEL_DATA_LAYOUT: NCHW + MLC_ML_MODEL_FRAMEWORK: onnx + MLC_ML_MODEL_INPUT_LAYERS: input_tensor:0 + MLC_ML_MODEL_INPUT_LAYER_NAME: input_tensor:0 + MLC_ML_MODEL_INPUT_SHAPES: '\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)' + MLC_ML_MODEL_OUTPUT_LAYERS: softmax_tensor:0 + MLC_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor:0 + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> + MLC_ML_MODEL_VER: '1.5' group: framework onnx,from-tf: env: - CM_DOWNLOAD_CHECKSUM: 7b94a2da05dd30f6c0af23a46bc08886 - CM_ML_MODEL_DATA_LAYOUT: NHWC - CM_ML_MODEL_FRAMEWORK: onnx - CM_ML_MODEL_INPUT_LAYERS: input_tensor - CM_ML_MODEL_INPUT_LAYER_NAME: input_tensor - CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor\": (BATCH_SIZE, 224, 224, 3)' - CM_ML_MODEL_OUTPUT_LAYERS: softmax_tensor - CM_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://zenodo.org/record/2535873/files/resnet50_v1.pb + MLC_DOWNLOAD_CHECKSUM: 7b94a2da05dd30f6c0af23a46bc08886 + MLC_ML_MODEL_DATA_LAYOUT: NHWC + MLC_ML_MODEL_FRAMEWORK: onnx + MLC_ML_MODEL_INPUT_LAYERS: input_tensor + MLC_ML_MODEL_INPUT_LAYER_NAME: input_tensor + MLC_ML_MODEL_INPUT_SHAPES: '\"input_tensor\": (BATCH_SIZE, 224, 224, 3)' + MLC_ML_MODEL_OUTPUT_LAYERS: softmax_tensor + MLC_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://zenodo.org/record/2535873/files/resnet50_v1.pb onnx,from-tf,fp32: adr: model-downloader: tags: _gdown env: - CM_DOWNLOAD_CHECKSUM: 04a510152d9eded924883bdfcf85dd4a - CM_DOWNLOAD_FILENAME: resnet50_v1_modified.onnx - CM_PACKAGE_URL: https://drive.google.com/uc?id=15wZ_8Vt12cb10IEBsln8wksD1zGwlbOM + MLC_DOWNLOAD_CHECKSUM: 04a510152d9eded924883bdfcf85dd4a + MLC_DOWNLOAD_FILENAME: resnet50_v1_modified.onnx + MLC_PACKAGE_URL: https://drive.google.com/uc?id=15wZ_8Vt12cb10IEBsln8wksD1zGwlbOM onnx,opset-11: env: - CM_DOWNLOAD_CHECKSUM: f6a4da60cd5f084d97efc2c1ddb10beb - CM_PACKAGE_URL: https://zenodo.org/record/4735647/files/resnet50_v1.onnx + MLC_DOWNLOAD_CHECKSUM: f6a4da60cd5f084d97efc2c1ddb10beb + MLC_PACKAGE_URL: https://zenodo.org/record/4735647/files/resnet50_v1.onnx onnx,opset-8: env: - CM_DOWNLOAD_CHECKSUM: a638cf028b5870da29e09ccc2f7182e7 - CM_PACKAGE_URL: https://zenodo.org/record/2592612/files/resnet50_v1.onnx + MLC_DOWNLOAD_CHECKSUM: a638cf028b5870da29e09ccc2f7182e7 + MLC_PACKAGE_URL: https://zenodo.org/record/2592612/files/resnet50_v1.onnx onnxruntime: alias: onnx opset-11: env: - CM_ML_MODEL_ONNX_OPSET: '11' + MLC_ML_MODEL_ONNX_OPSET: '11' group: opset-version opset-8: env: - CM_ML_MODEL_ONNX_OPSET: '8' + MLC_ML_MODEL_ONNX_OPSET: '8' group: opset-version pytorch: env: - CM_ML_MODEL_DATA_LAYOUT: NCHW - CM_ML_MODEL_FRAMEWORK: pytorch - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: '?' - CM_ML_MODEL_INPUT_LAYER_NAME: input_tensor:0 - CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor:0\": [BATCH_SIZE, 3, 224, 224]' - CM_ML_MODEL_OUTPUT_LAYERS: output - CM_ML_MODEL_OUTPUT_LAYER_NAME: '?' - CM_ML_STARTING_WEIGHTS_FILENAME: <<>> + MLC_ML_MODEL_DATA_LAYOUT: NCHW + MLC_ML_MODEL_FRAMEWORK: pytorch + MLC_ML_MODEL_GIVEN_CHANNEL_MEANS: '?' + MLC_ML_MODEL_INPUT_LAYER_NAME: input_tensor:0 + MLC_ML_MODEL_INPUT_SHAPES: '\"input_tensor:0\": [BATCH_SIZE, 3, 224, 224]' + MLC_ML_MODEL_OUTPUT_LAYERS: output + MLC_ML_MODEL_OUTPUT_LAYER_NAME: '?' + MLC_ML_STARTING_WEIGHTS_FILENAME: <<>> group: framework pytorch,fp32: env: - CM_DOWNLOAD_CHECKSUM: 9e9c86b324d80e65229fab49b8d9a8e8 - CM_PACKAGE_URL: https://zenodo.org/record/4588417/files/resnet50-19c8e357.pth + MLC_DOWNLOAD_CHECKSUM: 9e9c86b324d80e65229fab49b8d9a8e8 + MLC_PACKAGE_URL: https://zenodo.org/record/4588417/files/resnet50-19c8e357.pth pytorch,int8: base: - int8 - pytorch env: - CM_DOWNLOAD_CHECKSUM: 6893ea9769b0afce65bb0ddf002f4438 - CM_PACKAGE_URL: https://zenodo.org/record/4589637/files/resnet50_INT8bit_quantized.pt + MLC_DOWNLOAD_CHECKSUM: 6893ea9769b0afce65bb0ddf002f4438 + MLC_PACKAGE_URL: https://zenodo.org/record/4589637/files/resnet50_INT8bit_quantized.pt tensorflow: env: - CM_DOWNLOAD_CHECKSUM: 7b94a2da05dd30f6c0af23a46bc08886 - CM_ML_MODEL_ACCURACY: '76.456' - CM_ML_MODEL_DATA_LAYOUT: NHWC - CM_ML_MODEL_FRAMEWORK: tensorflow - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 - CM_ML_MODEL_INPUT_LAYERS: input_tensor - CM_ML_MODEL_INPUT_LAYER_NAME: input_tensor - CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)' - CM_ML_MODEL_NORMALIZE_DATA: '0' - CM_ML_MODEL_OUTPUT_LAYERS: softmax_tensor - CM_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> - CM_ML_MODEL_SUBTRACT_MEANS: 'YES' - CM_PACKAGE_URL: https://zenodo.org/record/2535873/files/resnet50_v1.pb + MLC_DOWNLOAD_CHECKSUM: 7b94a2da05dd30f6c0af23a46bc08886 + MLC_ML_MODEL_ACCURACY: '76.456' + MLC_ML_MODEL_DATA_LAYOUT: NHWC + MLC_ML_MODEL_FRAMEWORK: tensorflow + MLC_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 + MLC_ML_MODEL_INPUT_LAYERS: input_tensor + MLC_ML_MODEL_INPUT_LAYER_NAME: input_tensor + MLC_ML_MODEL_INPUT_SHAPES: '\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)' + MLC_ML_MODEL_NORMALIZE_DATA: '0' + MLC_ML_MODEL_OUTPUT_LAYERS: softmax_tensor + MLC_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> + MLC_ML_MODEL_SUBTRACT_MEANS: 'YES' + MLC_PACKAGE_URL: https://zenodo.org/record/2535873/files/resnet50_v1.pb group: framework tensorflow,fix-input-shape: deps: @@ -191,49 +191,49 @@ variations: - tensorflow tags: get,generic-python-lib,_package.tensorflow env: - CM_ML_MODEL_TF_FIX_INPUT_SHAPE: 'yes' + MLC_ML_MODEL_TF_FIX_INPUT_SHAPE: 'yes' tf: alias: tensorflow tflite: env: - CM_ML_MODEL_ACCURACY: '76.456' - CM_ML_MODEL_DATA_LAYOUT: NHWC - CM_ML_MODEL_FRAMEWORK: tflite - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 - CM_ML_MODEL_INPUT_LAYERS: input_tensor - CM_ML_MODEL_INPUT_LAYER_NAME: input_tensor - CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' - CM_ML_MODEL_NORMALIZE_DATA: '0' - CM_ML_MODEL_OUTPUT_LAYERS: softmax_tensor - CM_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> - CM_ML_MODEL_SUBTRACT_MEANS: 'YES' + MLC_ML_MODEL_ACCURACY: '76.456' + MLC_ML_MODEL_DATA_LAYOUT: NHWC + MLC_ML_MODEL_FRAMEWORK: tflite + MLC_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 + MLC_ML_MODEL_INPUT_LAYERS: input_tensor + MLC_ML_MODEL_INPUT_LAYER_NAME: input_tensor + MLC_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' + MLC_ML_MODEL_NORMALIZE_DATA: '0' + MLC_ML_MODEL_OUTPUT_LAYERS: softmax_tensor + MLC_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> + MLC_ML_MODEL_SUBTRACT_MEANS: 'YES' group: framework tflite,argmax: env: - CM_DAE_EXTRACT_DOWNLOADED: 'yes' - CM_DOWNLOAD_CHECKSUM: 92b5ae497e0de5c2d487507953b6e5cc - CM_DOWNLOAD_FINAL_ENV_NAME: '' - CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_ML_MODEL_FILE: resnet50_v1.tflite - CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' - CM_PACKAGE_URL: https://www.dropbox.com/s/cvv2zlfo80h54uz/resnet50_v1.tflite.gz?dl=1 + MLC_DAE_EXTRACT_DOWNLOADED: 'yes' + MLC_DOWNLOAD_CHECKSUM: 92b5ae497e0de5c2d487507953b6e5cc + MLC_DOWNLOAD_FINAL_ENV_NAME: '' + MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_ML_MODEL_FILE: resnet50_v1.tflite + MLC_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' + MLC_PACKAGE_URL: https://www.dropbox.com/s/cvv2zlfo80h54uz/resnet50_v1.tflite.gz?dl=1 tflite,int8,no-argmax: env: - CM_DOWNLOAD_CHECKSUM: a657cf1f97545aefd058c1c718cc0e17 - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_ML_MODEL_FILE: resnet50_quant_full_mlperf_edgetpu.tflite - CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' - CM_PACKAGE_URL: https://zenodo.org/record/8234946/files/resnet50_quant_full_mlperf_edgetpu.tflite?download=1 + MLC_DOWNLOAD_CHECKSUM: a657cf1f97545aefd058c1c718cc0e17 + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_ML_MODEL_FILE: resnet50_quant_full_mlperf_edgetpu.tflite + MLC_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' + MLC_PACKAGE_URL: https://zenodo.org/record/8234946/files/resnet50_quant_full_mlperf_edgetpu.tflite?download=1 tflite,no-argmax: env: - CM_DOWNLOAD_CHECKSUM: 53e81f9f9b459ecfb6d64add3da91564 - CM_ML_MODEL_FILE: resnet50_v1.no-argmax.tflite - CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' - CM_PACKAGE_URL: https://www.dropbox.com/s/vhuqo0wc39lky0a/resnet50_v1.no-argmax.tflite?dl=1 + MLC_DOWNLOAD_CHECKSUM: 53e81f9f9b459ecfb6d64add3da91564 + MLC_ML_MODEL_FILE: resnet50_v1.no-argmax.tflite + MLC_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' + MLC_PACKAGE_URL: https://www.dropbox.com/s/vhuqo0wc39lky0a/resnet50_v1.no-argmax.tflite?dl=1 uint8: env: - CM_ML_MODEL_INPUT_DATA_TYPES: uint8 - CM_ML_MODEL_PRECISION: uint8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8 + MLC_ML_MODEL_INPUT_DATA_TYPES: uint8 + MLC_ML_MODEL_PRECISION: uint8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: uint8 group: precision diff --git a/script/get-ml-model-resnet50/run-fix-input.sh b/script/get-ml-model-resnet50/run-fix-input.sh index 5364b1233..6a45ac95f 100644 --- a/script/get-ml-model-resnet50/run-fix-input.sh +++ b/script/get-ml-model-resnet50/run-fix-input.sh @@ -2,9 +2,9 @@ wget -nc https://raw.githubusercontent.com/krai/ck-mlperf/master/package/model-tf-mlperf-resnet/fix_input_shape.py test $? -eq 0 || exit $? -${CM_PYTHON_BIN_WITH_PATH} "fix_input_shape.py" \ +${MLC_PYTHON_BIN_WITH_PATH} "fix_input_shape.py" \ --input_name "input_tensor" \ ---input_graph "${CM_ML_MODEL_FILE_WITH_PATH}" \ +--input_graph "${MLC_ML_MODEL_FILE_WITH_PATH}" \ --output_graph "resnet50_v1.pb" \ --type b test $? -eq 0 || exit $? diff --git a/script/get-ml-model-retinanet-nvidia/customize.py b/script/get-ml-model-retinanet-nvidia/customize.py index 67be9d6a7..efcc67e3d 100644 --- a/script/get-ml-model-retinanet-nvidia/customize.py +++ b/script/get-ml-model-retinanet-nvidia/customize.py @@ -11,16 +11,16 @@ def preprocess(i): env['+PYTHONPATH'] = [] env['+PYTHONPATH'].append( os.path.join( - env['CM_MLPERF_TRAINING_SOURCE'], + env['MLC_MLPERF_TRAINING_SOURCE'], "single_stage_detector", "ssd")) - env['CM_ML_MODEL_DYN_BATCHSIZE_PATH'] = os.path.join( + env['MLC_ML_MODEL_DYN_BATCHSIZE_PATH'] = os.path.join( os.getcwd(), "retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx") - if "CM_NVIDIA_EFFICIENT_NMS" in env: - env['CM_NVIDIA_MODEL_PATCHED_PATH'] = os.path.join( + if "MLC_NVIDIA_EFFICIENT_NMS" in env: + env['MLC_NVIDIA_MODEL_PATCHED_PATH'] = os.path.join( os.getcwd(), "fpn_efficientnms_concatall.onnx") - env['CM_ML_MODEL_ANCHOR_PATH'] = os.path.join( - env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'], + env['MLC_ML_MODEL_ANCHOR_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH'], "code", "retinanet", "tensorrt", @@ -31,8 +31,8 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH'] = os.path.join( + env['MLC_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH'] = os.path.join( os.getcwd(), "test_fpn_efficientnms_concatall.onnx") - if "CM_NVIDIA_EFFICIENT_NMS" in env: - env['CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH'] = env['CM_NVIDIA_MODEL_PATCHED_PATH'] + if "MLC_NVIDIA_EFFICIENT_NMS" in env: + env['MLC_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH'] = env['MLC_NVIDIA_MODEL_PATCHED_PATH'] return {'return': 0} diff --git a/script/get-ml-model-retinanet-nvidia/meta.yaml b/script/get-ml-model-retinanet-nvidia/meta.yaml index 7f6880322..261081777 100644 --- a/script/get-ml-model-retinanet-nvidia/meta.yaml +++ b/script/get-ml-model-retinanet-nvidia/meta.yaml @@ -4,7 +4,7 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models default_env: - CM_TORCH_DEVICE: cpu + MLC_TORCH_DEVICE: cpu deps: - tags: detect,os - names: @@ -15,24 +15,24 @@ deps: - tags: get,mlperf,inference,src - tags: get,ml-model,retinanet,_pytorch,_fp32,_weights force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME - enable_if_env: - CM_TORCH_DEVICE: cpu + MLC_TORCH_DEVICE: cpu tags: get,generic-python-lib,_torch - tags: get,generic-python-lib,_torchvision - tags: get,generic-python-lib,_mlperf_logging - enable_if_env: - CM_TORCH_DEVICE: cuda + MLC_TORCH_DEVICE: cuda tags: get,cuda - enable_if_env: - CM_TORCH_DEVICE: cuda + MLC_TORCH_DEVICE: cuda tags: get,generic-python-lib,_torch_cuda - tags: get,nvidia,mlperf,inference,common-code,-_custom new_env_keys: -- CM_NVIDIA_RETINANET_* -- CM_ML_MODEL_* +- MLC_NVIDIA_RETINANET_* +- MLC_ML_MODEL_* print_env_at_the_end: - CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH: Path to the ML model + MLC_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH: Path to the ML model tags: - get - ml-model @@ -44,4 +44,4 @@ variations: deps: - tags: get,generic-python-lib,_polygraphy env: - CM_NVIDIA_EFFICIENT_NMS: 'yes' + MLC_NVIDIA_EFFICIENT_NMS: 'yes' diff --git a/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py b/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py index d445ef01c..8d571c0f4 100644 --- a/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py +++ b/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py @@ -25,14 +25,14 @@ # in_onnx = "/work/code/retinanet/tensorrt/onnx_retina/ref_fpn_transreshapeconcat.onnx" in_onnx = os.environ.get( - "CM_ML_MODEL_DYN_BATCHSIZE_PATH", + "MLC_ML_MODEL_DYN_BATCHSIZE_PATH", "build/models/retinanet-resnext50-32x4d/new/retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx") out_onnx = os.environ.get( - "CM_NVIDIA_MODEL_PATCHED_PATH", + "MLC_NVIDIA_MODEL_PATCHED_PATH", "/work/code/retinanet/tensorrt/onnx_generator/test_fpn_efficientnms_concatall.onnx") # Anchor at [1, 1] anchor_xywh_1x1_npy = os.environ.get( - "CM_ML_MODEL_ANCHOR_PATH", + "MLC_ML_MODEL_ANCHOR_PATH", "/work/code/retinanet/tensorrt/onnx_generator/retinanet_anchor_xywh_1x1.npy") graph = gs.import_onnx(onnx.load(in_onnx)) diff --git a/script/get-ml-model-retinanet-nvidia/run.sh b/script/get-ml-model-retinanet-nvidia/run.sh index 592509b67..55b2d6b72 100644 --- a/script/get-ml-model-retinanet-nvidia/run.sh +++ b/script/get-ml-model-retinanet-nvidia/run.sh @@ -1,15 +1,15 @@ #!/bin/bash -#${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_INFERENCE_VISION_PATH}/tools/retinanet_pytorch_to_onnx.py --weights ${CM_ML_MODEL_FILE_WITH_PATH} -cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_TRAINING_SOURCE}/single_stage_detector/ssd/pth_to_onnx.py --num-classes 264 --image-size 800 800 --input ${CM_ML_MODEL_FILE_WITH_PATH} --output retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx --device ${CM_TORCH_DEVICE}" +#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_MLPERF_INFERENCE_VISION_PATH}/tools/retinanet_pytorch_to_onnx.py --weights ${MLC_ML_MODEL_FILE_WITH_PATH} +cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_MLPERF_TRAINING_SOURCE}/single_stage_detector/ssd/pth_to_onnx.py --num-classes 264 --image-size 800 800 --input ${MLC_ML_MODEL_FILE_WITH_PATH} --output retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx --device ${MLC_TORCH_DEVICE}" echo $cmd eval $cmd test $? -eq 0 || exit $? -if [[ ${CM_NVIDIA_EFFICIENT_NMS} == "yes" ]]; then - cmd="bash ${CM_TMP_CURRENT_SCRIPT_PATH}/polygraphy_script.sh retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx folded.onnx backend.onnx nms.onnx" +if [[ ${MLC_NVIDIA_EFFICIENT_NMS} == "yes" ]]; then + cmd="bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/polygraphy_script.sh retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx folded.onnx backend.onnx nms.onnx" echo $cmd eval $cmd test $? -eq 0 || exit $? - cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/nvidia_patch_retinanet_efficientnms.py" + cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/nvidia_patch_retinanet_efficientnms.py" echo $cmd eval $cmd test $? -eq 0 || exit $? diff --git a/script/get-ml-model-retinanet/README-extra.md b/script/get-ml-model-retinanet/README-extra.md index db25a8657..246c54db8 100644 --- a/script/get-ml-model-retinanet/README-extra.md +++ b/script/get-ml-model-retinanet/README-extra.md @@ -9,8 +9,8 @@ where, * `[VARIATION]` is one of `onnx-fp32`, `pytorch-fp32` or `pytorch-fp32-weights`. ## Exported Variables -* `CM_ML_MODEL_FILE:` Model filename -* `CM_ML_MODEL_FILE_WITH_PATH:` Full path to model file -* `CM_ML_MODEL_PATH:` Path to folder containing the model file +* `MLC_ML_MODEL_FILE:` Model filename +* `MLC_ML_MODEL_FILE_WITH_PATH:` Full path to model file +* `MLC_ML_MODEL_PATH:` Path to folder containing the model file * More env variables being exported are given in [cm.json file](_cm.json) diff --git a/script/get-ml-model-retinanet/customize.py b/script/get-ml-model-retinanet/customize.py index 71ab2852e..e668ac7b8 100644 --- a/script/get-ml-model-retinanet/customize.py +++ b/script/get-ml-model-retinanet/customize.py @@ -8,9 +8,9 @@ def preprocess(i): env = i['env'] - if env.get('CM_TMP_ML_MODEL_RETINANET_NO_NMS', '') == 'yes': + if env.get('MLC_TMP_ML_MODEL_RETINANET_NO_NMS', '') == 'yes': i['run_script_input']['script_name'] = "run-no-nms" - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join( os.getcwd(), "retinanet.onnx") return {'return': 0} @@ -20,15 +20,15 @@ def postprocess(i): env = i['env'] - env['CM_ML_MODEL_FILE'] = os.path.basename( - env['CM_ML_MODEL_FILE_WITH_PATH']) - if env.get('CM_ENV_NAME_ML_MODEL_FILE', '') != '': - env[env['CM_ENV_NAME_ML_MODEL_FILE']] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['MLC_ML_MODEL_FILE'] = os.path.basename( + env['MLC_ML_MODEL_FILE_WITH_PATH']) + if env.get('MLC_ENV_NAME_ML_MODEL_FILE', '') != '': + env[env['MLC_ENV_NAME_ML_MODEL_FILE']] = env['MLC_ML_MODEL_FILE_WITH_PATH'] - if env.get("CM_QAIC_PRINT_NODE_PRECISION_INFO", '') == 'yes': - env['CM_ML_MODEL_RETINANET_QAIC_NODE_PRECISION_INFO_FILE_PATH'] = os.path.join( + if env.get("MLC_QAIC_PRINT_NODE_PRECISION_INFO", '') == 'yes': + env['MLC_ML_MODEL_RETINANET_QAIC_NODE_PRECISION_INFO_FILE_PATH'] = os.path.join( os.getcwd(), 'node-precision-info.yaml') - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] return {'return': 0} diff --git a/script/get-ml-model-retinanet/meta.yaml b/script/get-ml-model-retinanet/meta.yaml index 8da05da0e..026853d15 100644 --- a/script/get-ml-model-retinanet/meta.yaml +++ b/script/get-ml-model-retinanet/meta.yaml @@ -4,38 +4,38 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models env: - CM_ML_MODEL: retinanet - CM_ML_MODEL_DATASET: open-images - CM_ML_MODEL_IMAGE_HEIGHT: '800' - CM_ML_MODEL_IMAGE_WIDTH: '800' - CM_ML_MODEL_NORMALIZE_DATA: 'yes' - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_SUBTRACT_MEANS: 'yes' - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_ML_MODEL: retinanet + MLC_ML_MODEL_DATASET: open-images + MLC_ML_MODEL_IMAGE_HEIGHT: '800' + MLC_ML_MODEL_IMAGE_WIDTH: '800' + MLC_ML_MODEL_NORMALIZE_DATA: 'yes' + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_SUBTRACT_MEANS: 'yes' + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' new_env_keys: -- CM_ML_MODEL_* -- <<>> +- MLC_ML_MODEL_* +- <<>> prehook_deps: - env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_EXTRACT_EXTRACTED_FILENAME: <<>> - CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_EXTRACT_EXTRACTED_FILENAME: <<>> + MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH extra_cache_tags: get,ml-model,model-retinanet force_cache: true names: - dae skip_if_env: - CM_TMP_ML_MODEL_RETINANET_NO_NMS: + MLC_TMP_ML_MODEL_RETINANET_NO_NMS: - 'yes' tags: download-and-extract force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL print_env_at_the_end: - CM_ML_MODEL_ACCURACY: Model accuracy - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_ACCURACY: Model accuracy + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - ml-model @@ -48,26 +48,26 @@ variations: fp32: default: true env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32 group: precision no-nms: env: - CM_ML_MODEL_RETINANET_NO_NMS: 'yes' - CM_QAIC_PRINT_NODE_PRECISION_INFO: 'yes' - CM_TMP_ML_MODEL_RETINANET_NO_NMS: 'yes' + MLC_ML_MODEL_RETINANET_NO_NMS: 'yes' + MLC_QAIC_PRINT_NODE_PRECISION_INFO: 'yes' + MLC_TMP_ML_MODEL_RETINANET_NO_NMS: 'yes' onnx: default: true env: - CM_ML_MODEL_DATA_LAYOUT: NCHW - CM_ML_MODEL_FRAMEWORK: onnx + MLC_ML_MODEL_DATA_LAYOUT: NCHW + MLC_ML_MODEL_FRAMEWORK: onnx group: framework onnx,fp32: env: - CM_DOWNLOAD_CHECKSUM: 4544f4e56e0a4684215831cc937ea45c - CM_ML_MODEL_ACCURACY: '0.3757' - CM_PACKAGE_URL: https://zenodo.org/record/6617879/files/resnext50_32x4d_fpn.onnx + MLC_DOWNLOAD_CHECKSUM: 4544f4e56e0a4684215831cc937ea45c + MLC_ML_MODEL_ACCURACY: '0.3757' + MLC_PACKAGE_URL: https://zenodo.org/record/6617879/files/resnext50_32x4d_fpn.onnx required_disk_space: 150 warning: This model is downloaded from Zenodo.org onnx,no-nms: @@ -78,21 +78,21 @@ variations: - tags: get,generic-python-lib,_package.onnx - tags: get,generic-python-lib,_package.onnxsim - env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_RETINANET_NO_NMS_PATCH_FILE_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_RETINANET_NO_NMS_PATCH_FILE_PATH extra_cache_tags: retinanet,training,patch,file force_cache: true tags: download,file,_url.https://raw.githubusercontent.com/arjunsuresh/ck-qaic/main/package/model-onnx-mlperf-retinanet-no-nms/remove-nms-and-extract-priors.patch force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_TRAINING_REPO_PATCHED_PATH - CM_GIT_PATCH_FILEPATHS: <<>> + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_TRAINING_REPO_PATCHED_PATH + MLC_GIT_PATCH_FILEPATHS: <<>> extra_cache_tags: training,src,mlperf,patched names: - mlperf-training-src tags: get,git,repo,_repo.https://github.com/mlcommons/training.git,_patch - env: - CM_ENV_NAME_ML_MODEL_FILE: CM_ML_MODEL_RETINANET_PYTORCH_WEIGHTS_FILE_PATH + MLC_ENV_NAME_ML_MODEL_FILE: MLC_ML_MODEL_RETINANET_PYTORCH_WEIGHTS_FILE_PATH names: - pytorch-weights tags: get,ml-model,retinanet,_pytorch,_fp32,_weights @@ -104,24 +104,24 @@ variations: env: {} pytorch: env: - CM_ML_MODEL_DATA_LAYOUT: NCHW - CM_ML_MODEL_FRAMEWORK: pytorch + MLC_ML_MODEL_DATA_LAYOUT: NCHW + MLC_ML_MODEL_FRAMEWORK: pytorch group: framework pytorch,fp32: env: - CM_DOWNLOAD_CHECKSUM: a55f6bec3464f605ce8d686da8ac1533 - CM_ML_MODEL_ACCURACY: '0.3755' - CM_PACKAGE_URL: https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth + MLC_DOWNLOAD_CHECKSUM: a55f6bec3464f605ce8d686da8ac1533 + MLC_ML_MODEL_ACCURACY: '0.3755' + MLC_PACKAGE_URL: https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth pytorch,fp32,weights: add_deps_recursive: dae: tags: _extract env: - CM_DOWNLOAD_CHECKSUM: '2037c152a6be18e371ebec654314f7e0 ' - CM_ML_MODEL_ACCURACY: '0.3755' - CM_ML_MODEL_FILE: retinanet_model_10.pth - CM_PACKAGE_URL: https://zenodo.org/record/6605272/files/retinanet_model_10.zip?download=1 - CM_UNZIP: 'yes' + MLC_DOWNLOAD_CHECKSUM: '2037c152a6be18e371ebec654314f7e0 ' + MLC_ML_MODEL_ACCURACY: '0.3755' + MLC_ML_MODEL_FILE: retinanet_model_10.pth + MLC_PACKAGE_URL: https://zenodo.org/record/6605272/files/retinanet_model_10.zip?download=1 + MLC_UNZIP: 'yes' weights: env: - CM_MODEL_WEIGHTS_FILE: 'yes' + MLC_MODEL_WEIGHTS_FILE: 'yes' diff --git a/script/get-ml-model-retinanet/run-no-nms.sh b/script/get-ml-model-retinanet/run-no-nms.sh index 48be9d1e6..82e546f62 100644 --- a/script/get-ml-model-retinanet/run-no-nms.sh +++ b/script/get-ml-model-retinanet/run-no-nms.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,19 +17,19 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" -cmd="PYTHONPATH=${PYTHONPATH}:${CM_MLPERF_TRAINING_REPO_PATCHED_PATH}/single_stage_detector/ssd/ ${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_TRAINING_REPO_PATCHED_PATH}/single_stage_detector/scripts/pth_to_onnx.py --input ${CM_ML_MODEL_RETINANET_PYTORCH_WEIGHTS_FILE_PATH} --output $PWD/retinanet.onnx --image-size 800 800" +cmd="PYTHONPATH=${PYTHONPATH}:${MLC_MLPERF_TRAINING_REPO_PATCHED_PATH}/single_stage_detector/ssd/ ${MLC_PYTHON_BIN_WITH_PATH} ${MLC_MLPERF_TRAINING_REPO_PATCHED_PATH}/single_stage_detector/scripts/pth_to_onnx.py --input ${MLC_ML_MODEL_RETINANET_PYTORCH_WEIGHTS_FILE_PATH} --output $PWD/retinanet.onnx --image-size 800 800" run "$cmd" -if [[ ${CM_QAIC_PRINT_NODE_PRECISION_INFO} == "yes" ]]; then - cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/node-precision-info.py --input $PWD/retinanet.onnx --output $PWD/node-precision-info.yaml" +if [[ ${MLC_QAIC_PRINT_NODE_PRECISION_INFO} == "yes" ]]; then + cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/node-precision-info.py --input $PWD/retinanet.onnx --output $PWD/node-precision-info.yaml" run "$cmd" fi diff --git a/script/get-ml-model-rgat/customize.py b/script/get-ml-model-rgat/customize.py index 469cb3943..dbe679243 100644 --- a/script/get-ml-model-rgat/customize.py +++ b/script/get-ml-model-rgat/customize.py @@ -7,7 +7,7 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - download_dir = env.get('CM_OUTDIRNAME', '') + download_dir = env.get('MLC_OUTDIRNAME', '') path = env.get('RGAT_CHECKPOINT_PATH', '').strip() @@ -17,7 +17,7 @@ def preprocess(i): env['RGAT_CHECKPOINT_PATH'] = os.path.join( download_dir, "RGAT", "RGAT.pt") else: - env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} @@ -30,13 +30,13 @@ def postprocess(i): env['RGAT_CHECKPOINT_PATH'] = os.path.join( env['RGAT_DIR_PATH'], "RGAT.pt") - if env.get('CM_ML_MODEL_RGAT_CHECKPOINT_PATH', '') == '': - env['CM_ML_MODEL_RGAT_CHECKPOINT_PATH'] = env['RGAT_CHECKPOINT_PATH'] + if env.get('MLC_ML_MODEL_RGAT_CHECKPOINT_PATH', '') == '': + env['MLC_ML_MODEL_RGAT_CHECKPOINT_PATH'] = env['RGAT_CHECKPOINT_PATH'] - if env.get('CM_ML_MODEL_PATH', '') == '': - env['CM_ML_MODEL_PATH'] = env['CM_ML_MODEL_RGAT_CHECKPOINT_PATH'] + if env.get('MLC_ML_MODEL_PATH', '') == '': + env['MLC_ML_MODEL_PATH'] = env['MLC_ML_MODEL_RGAT_CHECKPOINT_PATH'] - env['RGAT_CHECKPOINT_PATH'] = env['CM_ML_MODEL_RGAT_CHECKPOINT_PATH'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_RGAT_CHECKPOINT_PATH'] + env['RGAT_CHECKPOINT_PATH'] = env['MLC_ML_MODEL_RGAT_CHECKPOINT_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_RGAT_CHECKPOINT_PATH'] return {'return': 0} diff --git a/script/get-ml-model-rgat/meta.yaml b/script/get-ml-model-rgat/meta.yaml index 27a7e39e2..b9ffe25c1 100644 --- a/script/get-ml-model-rgat/meta.yaml +++ b/script/get-ml-model-rgat/meta.yaml @@ -6,31 +6,31 @@ category: AI/ML models docker: fake_run_deps: True env: - CM_ML_MODEL: RGAT + MLC_ML_MODEL: RGAT input_mapping: checkpoint: RGAT_CHECKPOINT_PATH new_env_keys: -- CM_ML_MODEL_* -- CM_ML_MODEL_RGAT_CHECKPOINT_PATH +- MLC_ML_MODEL_* +- MLC_ML_MODEL_RGAT_CHECKPOINT_PATH - RGAT_CHECKPOINT_PATH prehook_deps: - enable_if_env: - CM_DOWNLOAD_TOOL: + MLC_DOWNLOAD_TOOL: - rclone - CM_TMP_REQUIRE_DOWNLOAD: + MLC_TMP_REQUIRE_DOWNLOAD: - 'yes' env: - CM_DOWNLOAD_FINAL_ENV_NAME: RGAT_DIR_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: RGAT_DIR_PATH extra_cache_tags: rgat,gnn,model,ml-model force_cache: true names: - download-file tags: download,file force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_DOWNLOAD_URL + - MLC_DOWNLOAD_URL print_env_at_the_end: RGAT_CHECKPOINT_PATH: R-GAT checkpoint path tags: @@ -45,9 +45,9 @@ variations: fp32: default: true env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32 group: precision mlcommons: default: true @@ -59,11 +59,11 @@ variations: download-file: tags: _rclone env: - CM_DOWNLOAD_TOOL: rclone - CM_RCLONE_CONFIG_NAME: mlc-inference + MLC_DOWNLOAD_TOOL: rclone + MLC_RCLONE_CONFIG_NAME: mlc-inference group: download-tool rclone,fp32: env: - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://github.com/mlcommons/inference/tree/master/graph/R-GAT#download-model-using-rclone - CM_DOWNLOAD_URL: mlc-inference:mlcommons-inference-wg-public/R-GAT/RGAT.pt - CM_DOWNLOAD_FILENAME: RGAT + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://github.com/mlcommons/inference/tree/master/graph/R-GAT#download-model-using-rclone + MLC_DOWNLOAD_URL: mlc-inference:mlcommons-inference-wg-public/R-GAT/RGAT.pt + MLC_DOWNLOAD_FILENAME: RGAT diff --git a/script/get-ml-model-rnnt/customize.py b/script/get-ml-model-rnnt/customize.py index 0fc024cdd..b5218f91d 100644 --- a/script/get-ml-model-rnnt/customize.py +++ b/script/get-ml-model-rnnt/customize.py @@ -14,7 +14,7 @@ def preprocess(i): path = os.getcwd() - url = env['CM_PACKAGE_URL'] + url = env['MLC_PACKAGE_URL'] print('Downloading from {}'.format(url)) @@ -26,15 +26,15 @@ def preprocess(i): filename = r['filename'] - if env.get('CM_UNZIP') == "yes": + if env.get('MLC_UNZIP') == "yes": os.system("unzip " + filename) - filename = env['CM_ML_MODEL_FILE'] - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) + filename = env['MLC_ML_MODEL_FILE'] + env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) else: # Add to path - env['CM_ML_MODEL_FILE'] = filename - env['CM_ML_MODEL_FILE_WITH_PATH'] = r['path'] + env['MLC_ML_MODEL_FILE'] = filename + env['MLC_ML_MODEL_FILE_WITH_PATH'] = r['path'] - env['CM_ML_MODEL_PATH'] = path + env['MLC_ML_MODEL_PATH'] = path return {'return': 0} diff --git a/script/get-ml-model-rnnt/meta.yaml b/script/get-ml-model-rnnt/meta.yaml index 913508aaf..0ad9b6895 100644 --- a/script/get-ml-model-rnnt/meta.yaml +++ b/script/get-ml-model-rnnt/meta.yaml @@ -4,14 +4,14 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models env: - CM_ML_MODEL: rnnt - CM_ML_MODEL_DATASET: librispeech - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_ML_MODEL: rnnt + MLC_ML_MODEL_DATASET: librispeech + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' new_env_keys: -- CM_ML_MODEL_* +- MLC_ML_MODEL_* print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - ml-model @@ -26,27 +26,27 @@ variations: fp32: default: true env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32 group: precision pytorch: default: true env: - CM_ML_MODEL_FRAMEWORK: pytorch + MLC_ML_MODEL_FRAMEWORK: pytorch group: framework pytorch,fp32: env: - CM_ML_MODEL_ACCURACY: '0.07452253714852645' - CM_PACKAGE_URL: https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1 + MLC_ML_MODEL_ACCURACY: '0.07452253714852645' + MLC_PACKAGE_URL: https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1 pytorch,fp32,amazon-s3: env: {} pytorch,fp32,zenodo: env: - CM_PACKAGE_URL: https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1 + MLC_PACKAGE_URL: https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1 weights: env: - CM_MODEL_WEIGHTS_FILE: 'yes' + MLC_MODEL_WEIGHTS_FILE: 'yes' zenodo: default: true group: download-src diff --git a/script/get-ml-model-stable-diffusion/customize.py b/script/get-ml-model-stable-diffusion/customize.py index 1049ac703..1eafee100 100644 --- a/script/get-ml-model-stable-diffusion/customize.py +++ b/script/get-ml-model-stable-diffusion/customize.py @@ -10,7 +10,7 @@ def preprocess(i): path = env.get('SDXL_CHECKPOINT_PATH', '').strip() if path == '' or not os.path.exists(path): - env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} @@ -20,10 +20,10 @@ def postprocess(i): env = i['env'] if env.get('SDXL_CHECKPOINT_PATH', '') == '': - env['SDXL_CHECKPOINT_PATH'] = env['CM_ML_MODEL_PATH'] - elif env.get('CM_ML_MODEL_PATH', '') == '': - env['CM_ML_MODEL_PATH'] = env['SDXL_CHECKPOINT_PATH'] + env['SDXL_CHECKPOINT_PATH'] = env['MLC_ML_MODEL_PATH'] + elif env.get('MLC_ML_MODEL_PATH', '') == '': + env['MLC_ML_MODEL_PATH'] = env['SDXL_CHECKPOINT_PATH'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['SDXL_CHECKPOINT_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['SDXL_CHECKPOINT_PATH'] return {'return': 0} diff --git a/script/get-ml-model-stable-diffusion/meta.yaml b/script/get-ml-model-stable-diffusion/meta.yaml index ae9ee2757..a1c96bd8b 100644 --- a/script/get-ml-model-stable-diffusion/meta.yaml +++ b/script/get-ml-model-stable-diffusion/meta.yaml @@ -4,39 +4,39 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models env: - CM_ML_MODEL: SDXL - CM_ML_MODEL_DATASET: openorca - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_ML_MODEL: SDXL + MLC_ML_MODEL_DATASET: openorca + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' input_mapping: checkpoint: SDXL_CHECKPOINT_PATH - download_path: CM_DOWNLOAD_PATH - to: CM_DOWNLOAD_PATH + download_path: MLC_DOWNLOAD_PATH + to: MLC_DOWNLOAD_PATH new_env_keys: -- CM_ML_MODEL_* +- MLC_ML_MODEL_* - SDXL_CHECKPOINT_PATH prehook_deps: - enable_if_env: - CM_DOWNLOAD_TOOL: + MLC_DOWNLOAD_TOOL: - git - CM_TMP_REQUIRE_DOWNLOAD: + MLC_TMP_REQUIRE_DOWNLOAD: - 'yes' env: - CM_GIT_CHECKOUT_FOLDER: stable-diffusion-xl-base-1.0 - CM_MODEL_ZOO_ENV_KEY: SDXL + MLC_GIT_CHECKOUT_FOLDER: stable-diffusion-xl-base-1.0 + MLC_MODEL_ZOO_ENV_KEY: SDXL force_env_keys: - - CM_GIT_CHECKOUT_FOLDER + - MLC_GIT_CHECKOUT_FOLDER names: - hf-zoo tags: get,ml-model,huggingface,zoo,_clone-repo,_model-stub.stabilityai/stable-diffusion-xl-base-1.0 force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME - enable_if_env: - CM_DOWNLOAD_TOOL: + MLC_DOWNLOAD_TOOL: - rclone - CM_TMP_REQUIRE_DOWNLOAD: + MLC_TMP_REQUIRE_DOWNLOAD: - 'yes' env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_PATH extra_cache_tags: stable-diffusion,sdxl,model force_cache: true names: @@ -44,7 +44,7 @@ prehook_deps: tags: download-and-extract update_tags_from_env_with_prefix: _url.: - - CM_DOWNLOAD_URL + - MLC_DOWNLOAD_URL print_env_at_the_end: SDXL_CHECKPOINT_PATH: Stable diffusion checkpoint path tags: @@ -58,23 +58,23 @@ uid: 22c6516b2d4d4c23 variations: batch_size.#: env: - CM_ML_MODEL_BATCH_SIZE: '#' + MLC_ML_MODEL_BATCH_SIZE: '#' fp16: env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp16 - CM_ML_MODEL_PRECISION: fp16 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp16 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp16 + MLC_ML_MODEL_PRECISION: fp16 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp16 group: precision fp32: default: true env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32 group: precision git: env: - CM_DOWNLOAD_TOOL: git + MLC_DOWNLOAD_TOOL: git group: download-tool huggingface: default_variations: @@ -82,9 +82,9 @@ variations: group: download-source int8: env: - CM_ML_MODEL_INPUT_DATA_TYPES: int8 - CM_ML_MODEL_PRECISION: int8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: int8 + MLC_ML_MODEL_INPUT_DATA_TYPES: int8 + MLC_ML_MODEL_PRECISION: int8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: int8 group: precision mlcommons: default: true @@ -94,8 +94,8 @@ variations: pytorch: default: true env: - CM_ML_MODEL_FRAMEWORK: pytorch - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://github.com/mlcommons/inference/tree/master/text_to_image#download-model + MLC_ML_MODEL_FRAMEWORK: pytorch + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://github.com/mlcommons/inference/tree/master/text_to_image#download-model group: framework pytorch,fp16: required_disk_space: 6500 @@ -107,25 +107,25 @@ variations: dae: tags: _rclone env: - CM_DOWNLOAD_TOOL: rclone - CM_RCLONE_CONFIG_NAME: mlc-inference + MLC_DOWNLOAD_TOOL: rclone + MLC_RCLONE_CONFIG_NAME: mlc-inference group: download-tool rclone,fp16: env: - CM_DOWNLOAD_URL: mlc-inference:mlcommons-inference-wg-public/stable_diffusion_fp16 + MLC_DOWNLOAD_URL: mlc-inference:mlcommons-inference-wg-public/stable_diffusion_fp16 rclone,fp32: env: - CM_DOWNLOAD_URL: mlc-inference:mlcommons-inference-wg-public/stable_diffusion_fp32 + MLC_DOWNLOAD_URL: mlc-inference:mlcommons-inference-wg-public/stable_diffusion_fp32 uint8: env: - CM_ML_MODEL_INPUT_DATA_TYPES: uint8 - CM_ML_MODEL_PRECISION: uint8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8 + MLC_ML_MODEL_INPUT_DATA_TYPES: uint8 + MLC_ML_MODEL_PRECISION: uint8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: uint8 group: precision wget: adr: dae: tags: _wget env: - CM_DOWNLOAD_TOOL: wget + MLC_DOWNLOAD_TOOL: wget group: download-tool diff --git a/script/get-ml-model-tiny-resnet/customize.py b/script/get-ml-model-tiny-resnet/customize.py index fac34716f..3fb42da47 100644 --- a/script/get-ml-model-tiny-resnet/customize.py +++ b/script/get-ml-model-tiny-resnet/customize.py @@ -8,12 +8,12 @@ def preprocess(i): env = i['env'] - if env.get("CM_TMP_ML_MODEL_TF2ONNX", "") == "yes": - outputfile = env.get('CM_ML_MODEL_OUTFILE', 'model_quant.onnx') - env['CM_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + " -m tf2onnx.convert --tflite " + \ - env['CM_ML_MODEL_FILE_WITH_PATH'] + " --output " + \ + if env.get("MLC_TMP_ML_MODEL_TF2ONNX", "") == "yes": + outputfile = env.get('MLC_ML_MODEL_OUTFILE', 'model_quant.onnx') + env['MLC_RUN_CMD'] = env['MLC_PYTHON_BIN_WITH_PATH'] + " -m tf2onnx.convert --tflite " + \ + env['MLC_ML_MODEL_FILE_WITH_PATH'] + " --output " + \ outputfile + " --inputs-as-nchw \"input_1_int8\"" - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join( os.getcwd(), outputfile) return {'return': 0} @@ -23,8 +23,8 @@ def postprocess(i): env = i['env'] - env['CM_ML_MODEL_FILE'] = os.path.basename( - env['CM_ML_MODEL_FILE_WITH_PATH']) - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['MLC_ML_MODEL_FILE'] = os.path.basename( + env['MLC_ML_MODEL_FILE_WITH_PATH']) + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] return {'return': 0} diff --git a/script/get-ml-model-tiny-resnet/meta.yaml b/script/get-ml-model-tiny-resnet/meta.yaml index 791ecccee..c1b11f4d6 100644 --- a/script/get-ml-model-tiny-resnet/meta.yaml +++ b/script/get-ml-model-tiny-resnet/meta.yaml @@ -4,31 +4,31 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH - CM_ML_MODEL: RESNET - CM_ML_MODEL_DATASET: cifar-10 - CM_ML_MODEL_IMAGE_HEIGHT: '32' - CM_ML_MODEL_IMAGE_WIDTH: '32' - CM_ML_MODEL_NORMALIZE_DATA: '0' - CM_ML_MODEL_RETRAINING: 'no' - CM_ML_MODEL_SUBTRACT_MEANS: 'YES' - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH + MLC_ML_MODEL: RESNET + MLC_ML_MODEL_DATASET: cifar-10 + MLC_ML_MODEL_IMAGE_HEIGHT: '32' + MLC_ML_MODEL_IMAGE_WIDTH: '32' + MLC_ML_MODEL_NORMALIZE_DATA: '0' + MLC_ML_MODEL_RETRAINING: 'no' + MLC_ML_MODEL_SUBTRACT_MEANS: 'YES' + MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' new_env_keys: -- CM_ML_MODEL_* +- MLC_ML_MODEL_* prehook_deps: - enable_if_env: - CM_PACKAGE_URL: + MLC_PACKAGE_URL: - 'on' env: - CM_EXTRACT_EXTRACTED_FILENAME: <<>> + MLC_EXTRACT_EXTRACTED_FILENAME: <<>> tags: download-and-extract force_env_keys: - - CM_OUTDIRNAME + - MLC_OUTDIRNAME update_tags_from_env_with_prefix: _url.: - - CM_PACKAGE_URL + - MLC_PACKAGE_URL print_env_at_the_end: - CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model + MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model tags: - get - raw @@ -44,15 +44,15 @@ uid: dd5ec11c3f6e49eb variations: batch_size.#: env: - CM_ML_MODEL_BATCH_SIZE: '#' + MLC_ML_MODEL_BATCH_SIZE: '#' fp32: add_deps_tags: dependent-model: tags: _int8 env: - CM_ML_MODEL_INPUT_DATA_TYPES: fp32 - CM_ML_MODEL_PRECISION: fp32 - CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + MLC_ML_MODEL_INPUT_DATA_TYPES: fp32 + MLC_ML_MODEL_PRECISION: fp32 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32 group: precision int8: add_deps_recursive: @@ -60,9 +60,9 @@ variations: tags: _int8 default: true env: - CM_ML_MODEL_INPUT_DATA_TYPES: int8 - CM_ML_MODEL_PRECISION: int8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: int8 + MLC_ML_MODEL_INPUT_DATA_TYPES: int8 + MLC_ML_MODEL_PRECISION: int8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: int8 group: precision onnx: deps: @@ -77,34 +77,34 @@ variations: - tf2onnx tags: get,generic-python-lib,_package.tf2onnx env: - CM_TMP_ML_MODEL_TF2ONNX: 'yes' + MLC_TMP_ML_MODEL_TF2ONNX: 'yes' group: framework tflite: default: true env: - CM_ML_MODEL_ACCURACY: '85' - CM_ML_MODEL_DATA_LAYOUT: NHWC - CM_ML_MODEL_FRAMEWORK: tflite - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: '' - CM_ML_MODEL_INPUT_LAYERS: '' - CM_ML_MODEL_INPUT_LAYER_NAME: '' - CM_ML_MODEL_INPUT_SHAPES: '' - CM_ML_MODEL_NORMALIZE_DATA: '0' - CM_ML_MODEL_OUTPUT_LAYERS: '' - CM_ML_MODEL_OUTPUT_LAYER_NAME: '' - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> - CM_ML_MODEL_SUBTRACT_MEANS: 'YES' + MLC_ML_MODEL_ACCURACY: '85' + MLC_ML_MODEL_DATA_LAYOUT: NHWC + MLC_ML_MODEL_FRAMEWORK: tflite + MLC_ML_MODEL_GIVEN_CHANNEL_MEANS: '' + MLC_ML_MODEL_INPUT_LAYERS: '' + MLC_ML_MODEL_INPUT_LAYER_NAME: '' + MLC_ML_MODEL_INPUT_SHAPES: '' + MLC_ML_MODEL_NORMALIZE_DATA: '0' + MLC_ML_MODEL_OUTPUT_LAYERS: '' + MLC_ML_MODEL_OUTPUT_LAYER_NAME: '' + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> + MLC_ML_MODEL_SUBTRACT_MEANS: 'YES' group: framework tflite,int8: env: - CM_DOWNLOAD_CHECKSUM: 2d6dd48722471313e4c4528249205ae3 - CM_PACKAGE_URL: https://github.com/mlcommons/tiny/raw/master/benchmark/training/image_classification/trained_models/pretrainedResnet_quant.tflite + MLC_DOWNLOAD_CHECKSUM: 2d6dd48722471313e4c4528249205ae3 + MLC_PACKAGE_URL: https://github.com/mlcommons/tiny/raw/master/benchmark/training/image_classification/trained_models/pretrainedResnet_quant.tflite uint8: add_deps_tags: dependent-model: tags: _int8 env: - CM_ML_MODEL_INPUT_DATA_TYPES: uint8 - CM_ML_MODEL_PRECISION: uint8 - CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8 + MLC_ML_MODEL_INPUT_DATA_TYPES: uint8 + MLC_ML_MODEL_PRECISION: uint8 + MLC_ML_MODEL_WEIGHT_DATA_TYPES: uint8 group: precision diff --git a/script/get-ml-model-tiny-resnet/run.sh b/script/get-ml-model-tiny-resnet/run.sh index e935cf158..1dc364808 100644 --- a/script/get-ml-model-tiny-resnet/run.sh +++ b/script/get-ml-model-tiny-resnet/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -echo ${CM_RUN_CMD} -eval ${CM_RUN_CMD} +echo ${MLC_RUN_CMD} +eval ${MLC_RUN_CMD} test $? -eq 0 || exit 1 diff --git a/script/get-ml-model-using-imagenet-from-model-zoo/meta.yaml b/script/get-ml-model-using-imagenet-from-model-zoo/meta.yaml index 3f5b3f648..ab7ebea8f 100644 --- a/script/get-ml-model-using-imagenet-from-model-zoo/meta.yaml +++ b/script/get-ml-model-using-imagenet-from-model-zoo/meta.yaml @@ -4,10 +4,10 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models env: - CM_ML_MODEL: resnet - CM_ML_MODEL_DATASET: imagenet + MLC_ML_MODEL: resnet + MLC_ML_MODEL_DATASET: imagenet new_env_keys: -- CM_ML_MODEL* +- MLC_ML_MODEL* tags: - get - ml-model diff --git a/script/get-mlperf-automotive-scratch-space/customize.py b/script/get-mlperf-automotive-scratch-space/customize.py index 9c3cda605..17f6712dd 100644 --- a/script/get-mlperf-automotive-scratch-space/customize.py +++ b/script/get-mlperf-automotive-scratch-space/customize.py @@ -12,10 +12,10 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if env.get('CM_ABTF_SCRATCH_PATH', '') == '': - env['CM_ABTF_SCRATCH_PATH'] = os.getcwd() + if env.get('MLC_ABTF_SCRATCH_PATH', '') == '': + env['MLC_ABTF_SCRATCH_PATH'] = os.getcwd() return {'return': 0} @@ -24,17 +24,17 @@ def postprocess(i): env = i['env'] - env['CM_ABTF_SCRATCH_PATH_MODELS'] = os.path.join( - env['CM_ABTF_SCRATCH_PATH'], "models") - env['CM_ABTF_SCRATCH_PATH_DATASETS'] = os.path.join( - env['CM_ABTF_SCRATCH_PATH'], "datasets") + env['MLC_ABTF_SCRATCH_PATH_MODELS'] = os.path.join( + env['MLC_ABTF_SCRATCH_PATH'], "models") + env['MLC_ABTF_SCRATCH_PATH_DATASETS'] = os.path.join( + env['MLC_ABTF_SCRATCH_PATH'], "datasets") - if not os.path.exists(env['CM_ABTF_SCRATCH_PATH_MODELS']): - os.makedirs(env['CM_ABTF_SCRATCH_PATH_MODELS']) + if not os.path.exists(env['MLC_ABTF_SCRATCH_PATH_MODELS']): + os.makedirs(env['MLC_ABTF_SCRATCH_PATH_MODELS']) - if not os.path.exists(env['CM_ABTF_SCRATCH_PATH_DATASETS']): - os.makedirs(env['CM_ABTF_SCRATCH_PATH_DATASETS']) + if not os.path.exists(env['MLC_ABTF_SCRATCH_PATH_DATASETS']): + os.makedirs(env['MLC_ABTF_SCRATCH_PATH_DATASETS']) - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ABTF_SCRATCH_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ABTF_SCRATCH_PATH'] return {'return': 0} diff --git a/script/get-mlperf-automotive-scratch-space/meta.yaml b/script/get-mlperf-automotive-scratch-space/meta.yaml index cefe6da4c..249029955 100644 --- a/script/get-mlperf-automotive-scratch-space/meta.yaml +++ b/script/get-mlperf-automotive-scratch-space/meta.yaml @@ -8,12 +8,12 @@ docker: run: false input_description: {} input_mapping: - scratch_path: CM_ABTF_SCRATCH_PATH + scratch_path: MLC_ABTF_SCRATCH_PATH new_env_keys: -- CM_ABTF_SCRATCH_PATH -- CM_ABTF_SCRATCH_PATH_MODELS -- CM_ABTF_SCRATCH_PATH_DATASETS -- CM_ABTF_SCRATCH_VERSION +- MLC_ABTF_SCRATCH_PATH +- MLC_ABTF_SCRATCH_PATH_MODELS +- MLC_ABTF_SCRATCH_PATH_DATASETS +- MLC_ABTF_SCRATCH_VERSION new_state_keys: [] post_deps: [] posthook_deps: [] @@ -28,12 +28,12 @@ uid: c384b7604e5c47d5 variations: version.#: env: - CM_ABTF_SCRATCH_VERSION: '#' + MLC_ABTF_SCRATCH_VERSION: '#' group: version version.4_0: default: true env: - CM_ABTF_SCRATCH_VERSION: '4_0' + MLC_ABTF_SCRATCH_VERSION: '4_0' group: version versions: {} diff --git a/script/get-mlperf-automotive-scratch-space/run.sh b/script/get-mlperf-automotive-scratch-space/run.sh index 3a584c10c..821adb3f9 100644 --- a/script/get-mlperf-automotive-scratch-space/run.sh +++ b/script/get-mlperf-automotive-scratch-space/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,11 +17,11 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" diff --git a/script/get-mlperf-inference-intel-scratch-space/customize.py b/script/get-mlperf-inference-intel-scratch-space/customize.py index 8862e1adf..01ea63e3a 100644 --- a/script/get-mlperf-inference-intel-scratch-space/customize.py +++ b/script/get-mlperf-inference-intel-scratch-space/customize.py @@ -12,10 +12,10 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if env.get('CM_INTEL_MLPERF_SCRATCH_PATH', '') == '': - env['CM_INTEL_MLPERF_SCRATCH_PATH'] = os.getcwd() + if env.get('MLC_INTEL_MLPERF_SCRATCH_PATH', '') == '': + env['MLC_INTEL_MLPERF_SCRATCH_PATH'] = os.getcwd() return {'return': 0} @@ -24,6 +24,6 @@ def postprocess(i): env = i['env'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_INTEL_MLPERF_SCRATCH_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_INTEL_MLPERF_SCRATCH_PATH'] return {'return': 0} diff --git a/script/get-mlperf-inference-intel-scratch-space/meta.yaml b/script/get-mlperf-inference-intel-scratch-space/meta.yaml index 1ddab564e..22924ebd0 100644 --- a/script/get-mlperf-inference-intel-scratch-space/meta.yaml +++ b/script/get-mlperf-inference-intel-scratch-space/meta.yaml @@ -10,8 +10,8 @@ input_description: {} input_mapping: scratch_path: MLPERF_INTEL_SCRATCH_PATH new_env_keys: -- CM_INTEL_MLPERF_SCRATCH_PATH -- CM_INTEL_SCRATCH_SPACE_VERSION +- MLC_INTEL_MLPERF_SCRATCH_PATH +- MLC_INTEL_SCRATCH_SPACE_VERSION new_state_keys: [] post_deps: [] posthook_deps: [] @@ -27,11 +27,11 @@ uid: e83fca30851f45ef variations: version.#: env: - CM_INTEL_SCRATCH_SPACE_VERSION: '#' + MLC_INTEL_SCRATCH_SPACE_VERSION: '#' group: version version.4_0: default: true env: - CM_INTEL_SCRATCH_SPACE_VERSION: '4_0' + MLC_INTEL_SCRATCH_SPACE_VERSION: '4_0' group: version versions: {} diff --git a/script/get-mlperf-inference-intel-scratch-space/run.sh b/script/get-mlperf-inference-intel-scratch-space/run.sh index eb5ce2456..4260c33a9 100644 --- a/script/get-mlperf-inference-intel-scratch-space/run.sh +++ b/script/get-mlperf-inference-intel-scratch-space/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,16 +17,16 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" -scratch_path=${CM_NVIDIA_MLPERF_SCRATCH_PATH} +scratch_path=${MLC_NVIDIA_MLPERF_SCRATCH_PATH} mkdir -p ${scratch_path}/data mkdir -p ${scratch_path}/preprocessed_data mkdir -p ${scratch_path}/models diff --git a/script/get-mlperf-inference-loadgen/customize.py b/script/get-mlperf-inference-loadgen/customize.py index c6747af04..6a3802faf 100644 --- a/script/get-mlperf-inference-loadgen/customize.py +++ b/script/get-mlperf-inference-loadgen/customize.py @@ -7,7 +7,7 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - if env.get('CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '') == 'yes': + if env.get('MLC_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '') == 'yes': i['run_script_input']['script_name'] = "donotrun" return {'return': 0} @@ -18,7 +18,7 @@ def postprocess(i): os_info = i['os_info'] env = i['env'] - if env.get('CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '') == 'yes': + if env.get('MLC_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '') == 'yes': return {'return': 0} for key in ['+PYTHONPATH', '+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', @@ -33,11 +33,11 @@ def postprocess(i): cur_path = os.getcwd() install_path = os.path.join(cur_path, 'install') - env['CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH'] = install_path + env['MLC_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH'] = install_path build_path = os.path.join(cur_path, 'build') if os.path.exists(build_path): - env['CM_MLPERF_INFERENCE_LOADGEN_BUILD_PATH'] = build_path + env['MLC_MLPERF_INFERENCE_LOADGEN_BUILD_PATH'] = build_path include_path = os.path.join(install_path, 'include') lib_path = os.path.join(install_path, 'lib') @@ -45,13 +45,13 @@ def postprocess(i): env['+C_INCLUDE_PATH'].append(include_path) env['+CPLUS_INCLUDE_PATH'].append(include_path) - env['CM_MLPERF_INFERENCE_LOADGEN_INCLUDE_PATH'] = include_path + env['MLC_MLPERF_INFERENCE_LOADGEN_INCLUDE_PATH'] = include_path env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - env['CM_MLPERF_INFERENCE_LOADGEN_LIBRARY_PATH'] = lib_path + env['MLC_MLPERF_INFERENCE_LOADGEN_LIBRARY_PATH'] = lib_path env['+PYTHONPATH'].append(python_path) - env['CM_MLPERF_INFERENCE_LOADGEN_PYTHON_PATH'] = python_path + env['MLC_MLPERF_INFERENCE_LOADGEN_PYTHON_PATH'] = python_path return {'return': 0} diff --git a/script/get-mlperf-inference-loadgen/meta.yaml b/script/get-mlperf-inference-loadgen/meta.yaml index 7b879a448..ad59163fd 100644 --- a/script/get-mlperf-inference-loadgen/meta.yaml +++ b/script/get-mlperf-inference-loadgen/meta.yaml @@ -9,7 +9,7 @@ cache: true category: MLPerf benchmark support default_env: - CM_SHARED_BUILD: 'no' + MLC_SHARED_BUILD: 'no' default_version: master @@ -20,16 +20,16 @@ deps: - python tags: get,python3 - force_env_keys: - - CM_GIT_URL - - CM_GIT_CHECKOUT + - MLC_GIT_URL + - MLC_GIT_CHECKOUT names: - inference-src-loadgen skip_if_env: - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: + MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: - 'YES' tags: get,mlcommons,inference,src - enable_if_env: - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: + MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: - 'YES' force_cache: true names: @@ -37,20 +37,20 @@ deps: tags: download-and-extract,file,_wget,_extract update_tags_from_env_with_prefix: _url.: - - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL + - MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL - names: - compiler skip_if_any_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - windows - CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: + MLC_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: - 'yes' tags: get,compiler - enable_if_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - windows skip_if_env: - CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: + MLC_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: - 'yes' names: - compiler @@ -77,9 +77,9 @@ deps: tags: get,generic-python-lib,_package.setuptools extra_cache_tags_from_env: -- env: CM_PYTHON_CACHE_TAGS +- env: MLC_PYTHON_CACHE_TAGS prefix: python- -- env: CM_COMPILER_CACHE_TAGS +- env: MLC_COMPILER_CACHE_TAGS prefix: compiler- new_env_keys: @@ -88,7 +88,7 @@ new_env_keys: - +CPLUS_INCLUDE_PATH - +LD_LIBRARY_PATH - +DYLD_FALLBACK_LIBRARY_PATH -- CM_MLPERF_INFERENCE_LOADGEN_* +- MLC_MLPERF_INFERENCE_LOADGEN_* tags: - get @@ -101,14 +101,14 @@ tags: variations: from-pip: env: - CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: 'yes' + MLC_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: 'yes' deps: - tags: get,generic-python-lib,_package.mlcommons-loadgen copy: add_deps: inference-src-loadgen: env: - CM_GIT_URL: https://github.com/cknowledge/mlperf-inference-loadgen-copy + MLC_GIT_URL: https://github.com/cknowledge/mlperf-inference-loadgen-copy # You still need to add --version=main since it's forced here to custom-python: ad: @@ -116,40 +116,40 @@ variations: tags: _custom-python python3: skip_if_env: - CM_TMP_USE_CUSTOM_PYTHON: + MLC_TMP_USE_CUSTOM_PYTHON: - 'on' env: - CM_TMP_USE_CUSTOM_PYTHON: 'on' + MLC_TMP_USE_CUSTOM_PYTHON: 'on' keep-build: group: clean-build env: - CM_MLPERF_INFERENCE_LOADGEN_BUILD_CLEAN: 'no' + MLC_MLPERF_INFERENCE_LOADGEN_BUILD_CLEAN: 'no' clean-build: group: clean-build default: true env: - CM_MLPERF_INFERENCE_LOADGEN_BUILD_CLEAN: 'yes' + MLC_MLPERF_INFERENCE_LOADGEN_BUILD_CLEAN: 'yes' download: env: - CM_DOWNLOAD_CHECKSUM: af3f9525965b2c1acc348fb882a5bfd1 - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 - CM_MLPERF_INFERENCE_LOADGEN_VERSION: v3.1 - CM_VERIFY_SSL: false + MLC_DOWNLOAD_CHECKSUM: af3f9525965b2c1acc348fb882a5bfd1 + MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' + MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 + MLC_MLPERF_INFERENCE_LOADGEN_VERSION: v3.1 + MLC_VERIFY_SSL: false download_v3.1: env: - CM_DOWNLOAD_CHECKSUM: af3f9525965b2c1acc348fb882a5bfd1 - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 - CM_MLPERF_INFERENCE_LOADGEN_VERSION: v3.1 - CM_VERIFY_SSL: false + MLC_DOWNLOAD_CHECKSUM: af3f9525965b2c1acc348fb882a5bfd1 + MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' + MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 + MLC_MLPERF_INFERENCE_LOADGEN_VERSION: v3.1 + MLC_VERIFY_SSL: false download_v4.0: env: - CM_DOWNLOAD_CHECKSUM: b4d97525d9ad0539a64667f2a3ca20c5 - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/gk5e9kziju5t56umxyzyx/loadgen.zip?rlkey=vsie4xnzml1inpjplm5cg7t54&dl=0 - CM_MLPERF_INFERENCE_LOADGEN_VERSION: v4.0 - CM_VERIFY_SSL: false + MLC_DOWNLOAD_CHECKSUM: b4d97525d9ad0539a64667f2a3ca20c5 + MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' + MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/gk5e9kziju5t56umxyzyx/loadgen.zip?rlkey=vsie4xnzml1inpjplm5cg7t54&dl=0 + MLC_MLPERF_INFERENCE_LOADGEN_VERSION: v4.0 + MLC_VERIFY_SSL: false no-compilation-warnings: env: '+ CXXFLAGS': @@ -190,4 +190,4 @@ versions: version: r3.1 print_env_at_the_end: - CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH: "Path to the tool" + MLC_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH: "Path to the tool" diff --git a/script/get-mlperf-inference-loadgen/run.bat b/script/get-mlperf-inference-loadgen/run.bat index 6d97f12b4..b93166404 100644 --- a/script/get-mlperf-inference-loadgen/run.bat +++ b/script/get-mlperf-inference-loadgen/run.bat @@ -5,29 +5,29 @@ echo ======================================================= set CUR_DIR=%cd% echo Current path in CM script: %CUR_DIR% -if "%CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD%" == "YES" ( - set CM_MLPERF_INFERENCE_SOURCE=%CM_EXTRACT_EXTRACTED_PATH% +if "%MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD%" == "YES" ( + set MLC_MLPERF_INFERENCE_SOURCE=%MLC_EXTRACT_EXTRACTED_PATH% ) set INSTALL_DIR=%CUR_DIR%\install echo. -echo Switching to %CM_MLPERF_INFERENCE_SOURCE%\loadgen +echo Switching to %MLC_MLPERF_INFERENCE_SOURCE%\loadgen -cd %CM_MLPERF_INFERENCE_SOURCE%\loadgen +cd %MLC_MLPERF_INFERENCE_SOURCE%\loadgen IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% echo. -echo Running %CM_PYTHON_BIN% setup.py develop +echo Running %MLC_PYTHON_BIN% setup.py develop -%CM_PYTHON_BIN% setup.py develop +%MLC_PYTHON_BIN% setup.py develop IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% echo ======================================================= cmake ^ -DCMAKE_INSTALL_PREFIX=%INSTALL_DIR% ^ - %CM_MLPERF_INFERENCE_SOURCE%\loadgen ^ - -DPYTHON_EXECUTABLE:FILEPATH=%CM_PYTHON_BIN_WITH_PATH% + %MLC_MLPERF_INFERENCE_SOURCE%\loadgen ^ + -DPYTHON_EXECUTABLE:FILEPATH=%MLC_PYTHON_BIN_WITH_PATH% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% echo ======================================================= diff --git a/script/get-mlperf-inference-loadgen/run.sh b/script/get-mlperf-inference-loadgen/run.sh index 47885f150..c5dd24095 100644 --- a/script/get-mlperf-inference-loadgen/run.sh +++ b/script/get-mlperf-inference-loadgen/run.sh @@ -11,38 +11,38 @@ echo "******************************************************" cd build -if [ "${CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD}" == "YES" ]; then - export CM_MLPERF_INFERENCE_SOURCE="${CM_EXTRACT_EXTRACTED_PATH}" +if [ "${MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD}" == "YES" ]; then + export MLC_MLPERF_INFERENCE_SOURCE="${MLC_EXTRACT_EXTRACTED_PATH}" fi -if [ -z "${CM_MLPERF_INFERENCE_SOURCE}" ]; then - echo "Error: env CM_MLPERF_INFERENCE_SOURCE is not defined - something is wrong with script automation!" +if [ -z "${MLC_MLPERF_INFERENCE_SOURCE}" ]; then + echo "Error: env MLC_MLPERF_INFERENCE_SOURCE is not defined - something is wrong with script automation!" exit 1 fi cmake \ -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \ - "${CM_MLPERF_INFERENCE_SOURCE}/loadgen" \ - -DPYTHON_EXECUTABLE:FILEPATH="${CM_PYTHON_BIN_WITH_PATH}" -B . + "${MLC_MLPERF_INFERENCE_SOURCE}/loadgen" \ + -DPYTHON_EXECUTABLE:FILEPATH="${MLC_PYTHON_BIN_WITH_PATH}" -B . test $? -eq 0 || exit $? echo "******************************************************" -CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} -CM_MAKE_CORES=${CM_MAKE_CORES:-2} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-${MLC_HOST_CPU_TOTAL_CORES}} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-2} -cmake --build . --target install -j "${CM_MAKE_CORES}" +cmake --build . --target install -j "${MLC_MAKE_CORES}" test $? -eq 0 || exit $? # Clean build directory (too large) cd "${CUR_DIR}" -if [[ $CM_MLPERF_INFERENCE_LOADGEN_BUILD_CLEAN == "yes" ]]; then +if [[ $MLC_MLPERF_INFERENCE_LOADGEN_BUILD_CLEAN == "yes" ]]; then rm -rf build fi -cd "${CM_MLPERF_INFERENCE_SOURCE}/loadgen" -${CM_PYTHON_BIN_WITH_PATH} -m pip install . --target="${MLPERF_INFERENCE_PYTHON_SITE_BASE}" +cd "${MLC_MLPERF_INFERENCE_SOURCE}/loadgen" +${MLC_PYTHON_BIN_WITH_PATH} -m pip install . --target="${MLPERF_INFERENCE_PYTHON_SITE_BASE}" test $? -eq 0 || exit $? # Clean the built wheel diff --git a/script/get-mlperf-inference-nvidia-common-code/customize.py b/script/get-mlperf-inference-nvidia-common-code/customize.py index a8b61cf0e..7f320265e 100644 --- a/script/get-mlperf-inference-nvidia-common-code/customize.py +++ b/script/get-mlperf-inference-nvidia-common-code/customize.py @@ -14,8 +14,8 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] = os.path.join( - env['CM_MLPERF_INFERENCE_RESULTS_PATH'], "closed", "NVIDIA") - env['+PYTHONPATH'] = [env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH']] + env['MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_RESULTS_PATH'], "closed", "NVIDIA") + env['+PYTHONPATH'] = [env['MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH']] return {'return': 0} diff --git a/script/get-mlperf-inference-nvidia-common-code/meta.yaml b/script/get-mlperf-inference-nvidia-common-code/meta.yaml index bb3828b00..771820f8d 100644 --- a/script/get-mlperf-inference-nvidia-common-code/meta.yaml +++ b/script/get-mlperf-inference-nvidia-common-code/meta.yaml @@ -12,7 +12,7 @@ deps: tags: get,mlperf,inference,results,official,_code-only new_env_keys: - +PYTHONPATH -- CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH +- MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH tags: - get - nvidia diff --git a/script/get-mlperf-inference-nvidia-scratch-space/customize.py b/script/get-mlperf-inference-nvidia-scratch-space/customize.py index 0e21e01ab..5227b9cd3 100644 --- a/script/get-mlperf-inference-nvidia-scratch-space/customize.py +++ b/script/get-mlperf-inference-nvidia-scratch-space/customize.py @@ -12,13 +12,13 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if env.get('CM_NVIDIA_MLPERF_SCRATCH_PATH', '') == '': + if env.get('MLC_NVIDIA_MLPERF_SCRATCH_PATH', '') == '': if env.get('MLPERF_SCRATCH_PATH', '') != '': - env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] = env['MLPERF_SCRATCH_PATH'] + env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'] = env['MLPERF_SCRATCH_PATH'] else: - env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] = os.getcwd() + env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'] = os.getcwd() return {'return': 0} @@ -27,7 +27,7 @@ def postprocess(i): env = i['env'] - env['MLPERF_SCRATCH_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] + env['MLPERF_SCRATCH_PATH'] = env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'] return {'return': 0} diff --git a/script/get-mlperf-inference-nvidia-scratch-space/meta.yaml b/script/get-mlperf-inference-nvidia-scratch-space/meta.yaml index b6612265f..826db4016 100644 --- a/script/get-mlperf-inference-nvidia-scratch-space/meta.yaml +++ b/script/get-mlperf-inference-nvidia-scratch-space/meta.yaml @@ -8,11 +8,11 @@ docker: run: false input_description: {} input_mapping: - scratch_path: CM_NVIDIA_MLPERF_SCRATCH_PATH + scratch_path: MLC_NVIDIA_MLPERF_SCRATCH_PATH new_env_keys: -- CM_NVIDIA_MLPERF_SCRATCH_PATH +- MLC_NVIDIA_MLPERF_SCRATCH_PATH - MLPERF_SCRATCH_PATH -- CM_NVIDIA_SCRATCH_SPACE_VERSION +- MLC_NVIDIA_SCRATCH_SPACE_VERSION new_state_keys: [] post_deps: [] posthook_deps: [] @@ -28,19 +28,19 @@ uid: 0b2bec8b29fb4ab7 variations: version.#: env: - CM_NVIDIA_SCRATCH_SPACE_VERSION: '#' + MLC_NVIDIA_SCRATCH_SPACE_VERSION: '#' group: version version.4_0: env: - CM_NVIDIA_SCRATCH_SPACE_VERSION: '4_0' + MLC_NVIDIA_SCRATCH_SPACE_VERSION: '4_0' group: version version.4_1: env: - CM_NVIDIA_SCRATCH_SPACE_VERSION: '4_1' + MLC_NVIDIA_SCRATCH_SPACE_VERSION: '4_1' group: version version.4_1-dev: default: true env: - CM_NVIDIA_SCRATCH_SPACE_VERSION: 4_1-dev + MLC_NVIDIA_SCRATCH_SPACE_VERSION: 4_1-dev group: version versions: {} diff --git a/script/get-mlperf-inference-nvidia-scratch-space/run.sh b/script/get-mlperf-inference-nvidia-scratch-space/run.sh index eb5ce2456..4260c33a9 100644 --- a/script/get-mlperf-inference-nvidia-scratch-space/run.sh +++ b/script/get-mlperf-inference-nvidia-scratch-space/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,16 +17,16 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" -scratch_path=${CM_NVIDIA_MLPERF_SCRATCH_PATH} +scratch_path=${MLC_NVIDIA_MLPERF_SCRATCH_PATH} mkdir -p ${scratch_path}/data mkdir -p ${scratch_path}/preprocessed_data mkdir -p ${scratch_path}/models diff --git a/script/get-mlperf-inference-results-dir/customize.py b/script/get-mlperf-inference-results-dir/customize.py index 997a0564d..a49293155 100644 --- a/script/get-mlperf-inference-results-dir/customize.py +++ b/script/get-mlperf-inference-results-dir/customize.py @@ -12,10 +12,10 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', '') == '': - env['CM_MLPERF_INFERENCE_RESULTS_DIR'] = os.getcwd() + if env.get('MLC_MLPERF_INFERENCE_RESULTS_DIR', '') == '': + env['MLC_MLPERF_INFERENCE_RESULTS_DIR'] = os.getcwd() return {'return': 0} @@ -24,6 +24,6 @@ def postprocess(i): env = i['env'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_RESULTS_DIR'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_MLPERF_INFERENCE_RESULTS_DIR'] return {'return': 0} diff --git a/script/get-mlperf-inference-results-dir/meta.yaml b/script/get-mlperf-inference-results-dir/meta.yaml index 4aad78007..a38c9f7a4 100644 --- a/script/get-mlperf-inference-results-dir/meta.yaml +++ b/script/get-mlperf-inference-results-dir/meta.yaml @@ -8,10 +8,10 @@ docker: run: false input_description: {} input_mapping: - results_dir: CM_MLPERF_INFERENCE_RESULTS_DIR + results_dir: MLC_MLPERF_INFERENCE_RESULTS_DIR new_env_keys: -- CM_MLPERF_INFERENCE_RESULTS_DIR -- CM_MLPERF_INFERENCE_RESULTS_VERSION +- MLC_MLPERF_INFERENCE_RESULTS_DIR +- MLC_MLPERF_INFERENCE_RESULTS_VERSION new_state_keys: [] post_deps: [] posthook_deps: [] @@ -27,18 +27,18 @@ tags: uid: 84f3c5aad5e1444b variations: path.#: - CM_MLPERF_INFERENCE_RESULTS_DIR: '#' + MLC_MLPERF_INFERENCE_RESULTS_DIR: '#' version.#: env: - CM_MLPERF_INFERENCE_RESULTS_VERSION: '#' + MLC_MLPERF_INFERENCE_RESULTS_VERSION: '#' group: version version.4_1: env: - CM_MLPERF_INFERENCE_RESULTS_VERSION: '4_1' + MLC_MLPERF_INFERENCE_RESULTS_VERSION: '4_1' group: version version.4_1-dev: default: true env: - CM_MLPERF_INFERENCE_RESULTS_VERSION: 4_1-dev + MLC_MLPERF_INFERENCE_RESULTS_VERSION: 4_1-dev group: version versions: {} diff --git a/script/get-mlperf-inference-results/README-extra.md b/script/get-mlperf-inference-results/README-extra.md index 8ed3bed39..df428fff8 100644 --- a/script/get-mlperf-inference-results/README-extra.md +++ b/script/get-mlperf-inference-results/README-extra.md @@ -11,7 +11,7 @@ cm run script --tags=get,mlperf,inference,results --version=[VERSION] * `v2.1:` MLCommons inference 2.1 round results ## Exported Variables -* `CM_MLPERF_INFERENCE_RESULTS_PATH`: Directory path to the inference results repository +* `MLC_MLPERF_INFERENCE_RESULTS_PATH`: Directory path to the inference results repository ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-mlperf-inference-results/customize.py b/script/get-mlperf-inference-results/customize.py index b1e054dac..f921067e1 100644 --- a/script/get-mlperf-inference-results/customize.py +++ b/script/get-mlperf-inference-results/customize.py @@ -14,23 +14,23 @@ def preprocess(i): meta = i['meta'] if env.get('NVIDIA_ONLY', '') == 'yes': - env['CM_GIT_URL'] = "https://github.com/GATEOverflow/nvidia-inference-code.git" + env['MLC_GIT_URL'] = "https://github.com/GATEOverflow/nvidia-inference-code.git" - if 'GITHUB_REPO_OWNER' in env and '<<>>' in env['CM_GIT_URL']: - env['CM_GIT_URL'] = env['CM_GIT_URL'].replace( + if 'GITHUB_REPO_OWNER' in env and '<<>>' in env['MLC_GIT_URL']: + env['MLC_GIT_URL'] = env['MLC_GIT_URL'].replace( '<<>>', env['GITHUB_REPO_OWNER']) - if 'CM_GIT_DEPTH' not in env: - env['CM_GIT_DEPTH'] = '' + if 'MLC_GIT_DEPTH' not in env: + env['MLC_GIT_DEPTH'] = '' - if 'CM_GIT_RECURSE_SUBMODULES' not in env: - env['CM_GIT_RECURSE_SUBMODULES'] = '' + if 'MLC_GIT_RECURSE_SUBMODULES' not in env: + env['MLC_GIT_RECURSE_SUBMODULES'] = '' - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') versions = meta['versions'] if need_version != '' and not need_version in versions: - env['CM_GIT_CHECKOUT'] = need_version + env['MLC_GIT_CHECKOUT'] = need_version return {'return': 0} @@ -40,9 +40,9 @@ def postprocess(i): env = i['env'] state = i['state'] - if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': - env['CM_VERSION'] += "-git-" + env['CM_GIT_REPO_CURRENT_HASH'] + if env.get('MLC_GIT_REPO_CURRENT_HASH', '') != '': + env['MLC_VERSION'] += "-git-" + env['MLC_GIT_REPO_CURRENT_HASH'] -# env['CM_MLPERF_INFERENCE_RESULTS_PATH'] = os.path.join(os.getcwd(), "inference_results_"+env['CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME']) +# env['MLC_MLPERF_INFERENCE_RESULTS_PATH'] = os.path.join(os.getcwd(), "inference_results_"+env['MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME']) return {'return': 0} diff --git a/script/get-mlperf-inference-results/meta.yaml b/script/get-mlperf-inference-results/meta.yaml index 22ceaa92a..e4fb0067b 100644 --- a/script/get-mlperf-inference-results/meta.yaml +++ b/script/get-mlperf-inference-results/meta.yaml @@ -4,25 +4,25 @@ automation_uid: 5b4e0237da074764 cache: true category: MLPerf benchmark support default_env: - CM_GIT_CHECKOUT: master - CM_GIT_DEPTH: --depth 1 - CM_GIT_PATCH: 'no' + MLC_GIT_CHECKOUT: master + MLC_GIT_DEPTH: --depth 1 + MLC_GIT_PATCH: 'no' default_version: v4.0 deps: [] new_env_keys: -- CM_MLPERF_INFERENCE_RESULTS_* +- MLC_MLPERF_INFERENCE_RESULTS_* prehook_deps: - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_RESULTS_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_RESULTS_PATH extra_cache_tags: mlperf,inference,results,official force_env_keys: - - CM_GIT_* + - MLC_GIT_* names: - inference-results-repo tags: get,git,repo update_tags_from_env_with_prefix: _repo.: - - CM_GIT_URL + - MLC_GIT_URL tags: - get - results @@ -36,7 +36,7 @@ variations: code-only: adr: inference-results-repo: - tags: _branch.cm-code-only + tags: _branch.mlc-code-only group: repo-branch ctuning: env: @@ -63,21 +63,21 @@ variations: versions: v2.1: env: - CM_GIT_URL: https://github.com/<<>>/inference_results_v2.1.git - CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v2.1 + MLC_GIT_URL: https://github.com/<<>>/inference_results_v2.1.git + MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v2.1 v3.0: env: - CM_GIT_URL: https://github.com/<<>>/inference_results_v3.0.git - CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v3.0 + MLC_GIT_URL: https://github.com/<<>>/inference_results_v3.0.git + MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v3.0 v3.1: env: - CM_GIT_URL: https://github.com/<<>>/inference_results_v3.1.git - CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v3.1 + MLC_GIT_URL: https://github.com/<<>>/inference_results_v3.1.git + MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v3.1 v4.0: env: - CM_GIT_URL: https://github.com/<<>>/inference_results_v4.0.git - CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v4.0 + MLC_GIT_URL: https://github.com/<<>>/inference_results_v4.0.git + MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v4.0 v4.1: env: - CM_GIT_URL: https://github.com/<<>>/inference_results_v4.1.git - CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v4.1 + MLC_GIT_URL: https://github.com/<<>>/inference_results_v4.1.git + MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v4.1 diff --git a/script/get-mlperf-inference-src/README-extra.md b/script/get-mlperf-inference-src/README-extra.md index a96611831..c02697077 100644 --- a/script/get-mlperf-inference-src/README-extra.md +++ b/script/get-mlperf-inference-src/README-extra.md @@ -19,10 +19,10 @@ where [VARIATION] is one of * `r2.1:` Uses the release branch used for MLCommons inference 2.1 round ## Exported Variables -* `CM_MLPERF_INFERENCE_SOURCE`: Directory path of the cloned inference repository -* `CM_MLPERF_INFERENCE_VISION_PATH`: Directory path to the vision folder inside the inference repository +* `MLC_MLPERF_INFERENCE_SOURCE`: Directory path of the cloned inference repository +* `MLC_MLPERF_INFERENCE_VISION_PATH`: Directory path to the vision folder inside the inference repository * `PYTHONPATH`: Is appended with the paths to vision module and the submission tools module -* `CM_MLPERF_INFERENCE_MODELS`: This `state` variable contains the configuration of the MLPerf models as per the selected version +* `MLC_MLPERF_INFERENCE_MODELS`: This `state` variable contains the configuration of the MLPerf models as per the selected version ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-mlperf-inference-src/customize.py b/script/get-mlperf-inference-src/customize.py index dee74bdaf..d523f6abe 100644 --- a/script/get-mlperf-inference-src/customize.py +++ b/script/get-mlperf-inference-src/customize.py @@ -16,52 +16,52 @@ def preprocess(i): script_path = i['run_script_input']['path'] - if env.get('CM_GIT_CHECKOUT', '') == '' and env.get( - 'CM_GIT_URL', '') == '' and env.get('CM_VERSION', '') == '': - # if custom checkout and url parameters are not set and CM_VERSION is + if env.get('MLC_GIT_CHECKOUT', '') == '' and env.get( + 'MLC_GIT_URL', '') == '' and env.get('MLC_VERSION', '') == '': + # if custom checkout and url parameters are not set and MLC_VERSION is # not specified - env['CM_VERSION'] = "master" - env["CM_GIT_CHECKOUT"] = "master" - env["CM_GIT_URL"] = "https://github.com/mlcommons/inference" - elif env.get('CM_GIT_CHECKOUT', '') != '' and env.get('CM_TMP_GIT_CHECKOUT', '') != '' and env.get('CM_GIT_CHECKOUT', '') != env.get('CM_TMP_GIT_CHECKOUT', ''): + env['MLC_VERSION'] = "master" + env["MLC_GIT_CHECKOUT"] = "master" + env["MLC_GIT_URL"] = "https://github.com/mlcommons/inference" + elif env.get('MLC_GIT_CHECKOUT', '') != '' and env.get('MLC_TMP_GIT_CHECKOUT', '') != '' and env.get('MLC_GIT_CHECKOUT', '') != env.get('MLC_TMP_GIT_CHECKOUT', ''): # if checkout branch is assigned inside version and custom branch is # also specified return { "return": 1, "error": "Conflicting branches between version assigned and user specified."} - elif env.get('CM_GIT_URL', '') != '' and env.get('CM_TMP_GIT_URL', '') != '' and env.get('CM_GIT_URL', '') != env.get('CM_TMP_GIT_URL', ''): + elif env.get('MLC_GIT_URL', '') != '' and env.get('MLC_TMP_GIT_URL', '') != '' and env.get('MLC_GIT_URL', '') != env.get('MLC_TMP_GIT_URL', ''): # if GIT URL is assigned inside version and custom branch is also # specified return { "return": 1, "error": "Conflicting URL's between version assigned and user specified."} - if env.get('CM_VERSION', '') == '': - env['CM_VERSION'] = "custom" + if env.get('MLC_VERSION', '') == '': + env['MLC_VERSION'] = "custom" # check whether branch and url is specified, # if not try to assign the values specified in version parameters, # if version parameters does not have the value to a parameter, set the # default one - if env.get('CM_GIT_CHECKOUT', '') == '' and env.get( - 'CM_GIT_CHECKOUT_TAG', '') == '': - if env.get('CM_TMP_GIT_CHECKOUT', '') != '': - env["CM_GIT_CHECKOUT"] = env["CM_TMP_GIT_CHECKOUT"] + if env.get('MLC_GIT_CHECKOUT', '') == '' and env.get( + 'MLC_GIT_CHECKOUT_TAG', '') == '': + if env.get('MLC_TMP_GIT_CHECKOUT', '') != '': + env["MLC_GIT_CHECKOUT"] = env["MLC_TMP_GIT_CHECKOUT"] else: - env["CM_GIT_CHECKOUT"] = "master" + env["MLC_GIT_CHECKOUT"] = "master" - if env.get('CM_GIT_URL', '') == '': - if env.get('CM_TMP_GIT_URL', '') != '': - env["CM_GIT_URL"] = env["CM_TMP_GIT_URL"] + if env.get('MLC_GIT_URL', '') == '': + if env.get('MLC_TMP_GIT_URL', '') != '': + env["MLC_GIT_URL"] = env["MLC_TMP_GIT_URL"] else: - env["CM_GIT_URL"] = "https://github.com/mlcommons/inference" + env["MLC_GIT_URL"] = "https://github.com/mlcommons/inference" - if env.get("CM_MLPERF_LAST_RELEASE", '') == '': - env["CM_MLPERF_LAST_RELEASE"] = "v5.0" + if env.get("MLC_MLPERF_LAST_RELEASE", '') == '': + env["MLC_MLPERF_LAST_RELEASE"] = "v5.0" - if 'CM_GIT_DEPTH' not in env: - env['CM_GIT_DEPTH'] = '' + if 'MLC_GIT_DEPTH' not in env: + env['MLC_GIT_DEPTH'] = '' - if 'CM_GIT_RECURSE_SUBMODULES' not in env: - env['CM_GIT_RECURSE_SUBMODULES'] = '' + if 'MLC_GIT_RECURSE_SUBMODULES' not in env: + env['MLC_GIT_RECURSE_SUBMODULES'] = '' submodules = [] possible_submodules = { "gn": "third_party/gn", @@ -71,21 +71,21 @@ def preprocess(i): } for submodule in possible_submodules: env_name = submodule.upper().replace("-", "_") - if env.get("CM_SUBMODULE_" + env_name) == "yes": + if env.get("MLC_SUBMODULE_" + env_name) == "yes": submodules.append(possible_submodules[submodule]) - env['CM_GIT_SUBMODULES'] = ",".join(submodules) + env['MLC_GIT_SUBMODULES'] = ",".join(submodules) - if env.get('CM_GIT_PATCH_FILENAME', '') != '': - patch_file_name = env['CM_GIT_PATCH_FILENAME'] - env['CM_GIT_PATCH_FILEPATHS'] = os.path.join( + if env.get('MLC_GIT_PATCH_FILENAME', '') != '': + patch_file_name = env['MLC_GIT_PATCH_FILENAME'] + env['MLC_GIT_PATCH_FILEPATHS'] = os.path.join( script_path, 'patch', patch_file_name) - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') versions = meta['versions'] if need_version != '' and not need_version in versions: - env['CM_GIT_CHECKOUT'] = need_version + env['MLC_GIT_CHECKOUT'] = need_version return {'return': 0} @@ -95,62 +95,62 @@ def postprocess(i): env = i['env'] state = i['state'] - inference_root = env['CM_MLPERF_INFERENCE_SOURCE'] - env['CM_MLPERF_INFERENCE_VISION_PATH'] = os.path.join( + inference_root = env['MLC_MLPERF_INFERENCE_SOURCE'] + env['MLC_MLPERF_INFERENCE_VISION_PATH'] = os.path.join( inference_root, 'vision') - env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] = os.path.join( inference_root, 'vision', 'classification_and_detection') - env['CM_MLPERF_INFERENCE_BERT_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_BERT_PATH'] = os.path.join( inference_root, 'language', 'bert') - env['CM_MLPERF_INFERENCE_GPTJ_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_GPTJ_PATH'] = os.path.join( inference_root, 'language', 'gpt-j') - env['CM_MLPERF_INFERENCE_RNNT_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_RNNT_PATH'] = os.path.join( inference_root, 'speech_recognition', 'rnnt') - env['CM_MLPERF_INFERENCE_DLRM_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_DLRM_PATH'] = os.path.join( inference_root, 'recommendation', 'dlrm') - env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_DLRM_V2_PATH'] = os.path.join( inference_root, 'recommendation', 'dlrm_v2') - env['CM_MLPERF_INFERENCE_RGAT_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_RGAT_PATH'] = os.path.join( inference_root, 'graph', 'R-GAT') - env['CM_MLPERF_INFERENCE_3DUNET_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_3DUNET_PATH'] = os.path.join( inference_root, 'vision', 'medical_imaging', '3d-unet-kits19') - env['CM_GET_DEPENDENT_CACHED_PATH'] = inference_root + env['MLC_GET_DEPENDENT_CACHED_PATH'] = inference_root # 20221024: we save and restore env in the main script and can clean env here for determinism # if '+PYTHONPATH' not in env: env['+PYTHONPATH'] = [] env['+PYTHONPATH'] = [] env['+PYTHONPATH'].append( os.path.join( - env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], + env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], 'python')) if os.path.exists(os.path.join(inference_root, "loadgen", "VERSION.txt")): with open(os.path.join(inference_root, "loadgen", "VERSION.txt")) as f: version_info = f.read().strip() - env['CM_MLPERF_INFERENCE_SOURCE_VERSION'] = version_info + env['MLC_MLPERF_INFERENCE_SOURCE_VERSION'] = version_info - if env.get('CM_GET_MLPERF_IMPLEMENTATION_ONLY', '') == "yes": + if env.get('MLC_GET_MLPERF_IMPLEMENTATION_ONLY', '') == "yes": return {'return': 0} - env['CM_MLPERF_INFERENCE_CONF_PATH'] = os.path.join( + env['MLC_MLPERF_INFERENCE_CONF_PATH'] = os.path.join( inference_root, 'mlperf.conf') env['+PYTHONPATH'].append( os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], 'tools', 'submission')) valid_models = get_valid_models( - env['CM_MLPERF_LAST_RELEASE'], - env['CM_MLPERF_INFERENCE_SOURCE']) + env['MLC_MLPERF_LAST_RELEASE'], + env['MLC_MLPERF_INFERENCE_SOURCE']) - state['CM_MLPERF_INFERENCE_MODELS'] = valid_models + state['MLC_MLPERF_INFERENCE_MODELS'] = valid_models - if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': - env['CM_VERSION'] += "-git-" + env['CM_GIT_REPO_CURRENT_HASH'] + if env.get('MLC_GIT_REPO_CURRENT_HASH', '') != '': + env['MLC_VERSION'] += "-git-" + env['MLC_GIT_REPO_CURRENT_HASH'] - return {'return': 0, 'version': env['CM_VERSION']} + return {'return': 0, 'version': env['MLC_VERSION']} def get_valid_models(mlperf_version, mlperf_path): diff --git a/script/get-mlperf-inference-src/meta.yaml b/script/get-mlperf-inference-src/meta.yaml index a9f7410a5..1d2db1989 100644 --- a/script/get-mlperf-inference-src/meta.yaml +++ b/script/get-mlperf-inference-src/meta.yaml @@ -4,10 +4,10 @@ automation_uid: 5b4e0237da074764 cache: true category: MLPerf benchmark support default_env: - CM_GIT_CHECKOUT_FOLDER: inference - CM_GIT_DEPTH: --depth 4 - CM_GIT_PATCH: 'no' - CM_GIT_RECURSE_SUBMODULES: '' + MLC_GIT_CHECKOUT_FOLDER: inference + MLC_GIT_DEPTH: --depth 4 + MLC_GIT_PATCH: 'no' + MLC_GIT_RECURSE_SUBMODULES: '' default_version: master deps: - tags: detect,os @@ -16,41 +16,41 @@ deps: - python3 tags: get,python3 new_env_keys: -- CM_MLPERF_INFERENCE_3DUNET_PATH -- CM_MLPERF_INFERENCE_BERT_PATH -- CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH -- CM_MLPERF_INFERENCE_CONF_PATH -- CM_MLPERF_INFERENCE_DLRM_PATH -- CM_MLPERF_INFERENCE_DLRM_V2_PATH -- CM_MLPERF_INFERENCE_GPTJ_PATH -- CM_MLPERF_INFERENCE_RNNT_PATH -- CM_MLPERF_INFERENCE_RGAT_PATH -- CM_MLPERF_INFERENCE_SOURCE -- CM_MLPERF_INFERENCE_SOURCE_VERSION -- CM_MLPERF_INFERENCE_VERSION -- CM_MLPERF_INFERENCE_VISION_PATH -- CM_MLPERF_LAST_RELEASE +- MLC_MLPERF_INFERENCE_3DUNET_PATH +- MLC_MLPERF_INFERENCE_BERT_PATH +- MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH +- MLC_MLPERF_INFERENCE_CONF_PATH +- MLC_MLPERF_INFERENCE_DLRM_PATH +- MLC_MLPERF_INFERENCE_DLRM_V2_PATH +- MLC_MLPERF_INFERENCE_GPTJ_PATH +- MLC_MLPERF_INFERENCE_RNNT_PATH +- MLC_MLPERF_INFERENCE_RGAT_PATH +- MLC_MLPERF_INFERENCE_SOURCE +- MLC_MLPERF_INFERENCE_SOURCE_VERSION +- MLC_MLPERF_INFERENCE_VERSION +- MLC_MLPERF_INFERENCE_VISION_PATH +- MLC_MLPERF_LAST_RELEASE - +PYTHONPATH prehook_deps: - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_SOURCE + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_SOURCE extra_cache_tags: inference,src force_env_keys: - - CM_GIT_* + - MLC_GIT_* names: - inference-git-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_SHA + - MLC_GIT_SHA _submodules.: - - CM_GIT_SUBMODULES + - MLC_GIT_SUBMODULES print_env_at_the_end_disabled: - CM_MLPERF_INFERENCE_SOURCE: Path to MLPerf inference benchmark sources + MLC_MLPERF_INFERENCE_SOURCE: Path to MLPerf inference benchmark sources tags: - get - src @@ -64,126 +64,126 @@ uid: 4b57186581024797 variations: 3d-unet: env: - CM_SUBMODULE_3D_UNET: 'yes' + MLC_SUBMODULE_3D_UNET: 'yes' branch.#: default_version: custom env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' group: checkout deeplearningexamples: env: - CM_SUBMODULE_DEEPLEARNINGEXAMPLES: 'yes' + MLC_SUBMODULE_DEEPLEARNINGEXAMPLES: 'yes' deepsparse: base: - _branch.deepsparse - _repo.https://github.com/neuralmagic/inference full-history: env: - CM_GIT_DEPTH: '' + MLC_GIT_DEPTH: '' group: git-history gn: env: - CM_SUBMODULE_GN: 'yes' + MLC_SUBMODULE_GN: 'yes' no-recurse-submodules: env: - CM_GIT_RECURSE_SUBMODULES: '' + MLC_GIT_RECURSE_SUBMODULES: '' nvidia-pycocotools: base: - patch env: - CM_GIT_PATCH_FILENAME: coco.patch + MLC_GIT_PATCH_FILENAME: coco.patch octoml: base: - short-history - _repo.https://github.com/octoml/inference env: - CM_GIT_URL: https://github.com/octoml/inference + MLC_GIT_URL: https://github.com/octoml/inference openimages-nvidia-pycocotools: base: - patch env: - CM_GIT_PATCH_FILENAME: openimages-pycocotools.patch + MLC_GIT_PATCH_FILENAME: openimages-pycocotools.patch patch: ad: inference-git-repo: tags: _patch env: - CM_GIT_PATCH: 'yes' + MLC_GIT_PATCH: 'yes' pybind: env: - CM_SUBMODULE_PYBIND: 'yes' + MLC_SUBMODULE_PYBIND: 'yes' recurse-submodules: env: - CM_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' + MLC_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' sha.#: env: - CM_GIT_SHA: '#' + MLC_GIT_SHA: '#' group: checkout short-history: default: true env: - CM_GIT_DEPTH: --depth 10 + MLC_GIT_DEPTH: --depth 10 group: git-history submodules.#: env: - CM_GIT_SUBMODULES: '#' + MLC_GIT_SUBMODULES: '#' versions: custom: env: - CM_MLPERF_LAST_RELEASE: v5.0 + MLC_MLPERF_LAST_RELEASE: v5.0 deepsparse: env: - CM_MLPERF_LAST_RELEASE: v5.0 - CM_TMP_GIT_CHECKOUT: deepsparse - CM_TMP_GIT_URL: https://github.com/neuralmagic/inference + MLC_MLPERF_LAST_RELEASE: v5.0 + MLC_TMP_GIT_CHECKOUT: deepsparse + MLC_TMP_GIT_URL: https://github.com/neuralmagic/inference main: env: - CM_MLPERF_LAST_RELEASE: v5.0 - CM_TMP_GIT_CHECKOUT: main + MLC_MLPERF_LAST_RELEASE: v5.0 + MLC_TMP_GIT_CHECKOUT: main master: env: - CM_MLPERF_LAST_RELEASE: v5.0 - CM_TMP_GIT_CHECKOUT: master + MLC_MLPERF_LAST_RELEASE: v5.0 + MLC_TMP_GIT_CHECKOUT: master r2.1: env: - CM_MLPERF_LAST_RELEASE: v2.1 - CM_TMP_GIT_CHECKOUT: v2.1 + MLC_MLPERF_LAST_RELEASE: v2.1 + MLC_TMP_GIT_CHECKOUT: v2.1 r3.0: ad: inference-git-repo: tags: _tag.v3.0 env: - CM_MLPERF_LAST_RELEASE: v3.0 - CM_TMP_GIT_CHECKOUT: '' + MLC_MLPERF_LAST_RELEASE: v3.0 + MLC_TMP_GIT_CHECKOUT: '' r3.1: ad: inference-git-repo: tags: _tag.v3.1 env: - CM_MLPERF_LAST_RELEASE: v3.1 - CM_GIT_CHECKOUT_TAG: 'v3.1' + MLC_MLPERF_LAST_RELEASE: v3.1 + MLC_GIT_CHECKOUT_TAG: 'v3.1' r4.0: ad: inference-git-repo: tags: _tag.v4.0 env: - CM_MLPERF_LAST_RELEASE: v4.0 - CM_GIT_CHECKOUT_TAG: 'v4.0' + MLC_MLPERF_LAST_RELEASE: v4.0 + MLC_GIT_CHECKOUT_TAG: 'v4.0' r4.1: ad: inference-git-repo: tags: _tag.v4.1 env: - CM_MLPERF_LAST_RELEASE: v4.1 - CM_GIT_CHECKOUT_TAG: 'v4.1' + MLC_MLPERF_LAST_RELEASE: v4.1 + MLC_GIT_CHECKOUT_TAG: 'v4.1' r5.0: env: - CM_MLPERF_LAST_RELEASE: v5.0 + MLC_MLPERF_LAST_RELEASE: v5.0 tvm: env: - CM_MLPERF_LAST_RELEASE: v3.1 - CM_TMP_GIT_CHECKOUT: tvm - CM_TMP_GIT_URL: https://github.com/mlcommons/inference + MLC_MLPERF_LAST_RELEASE: v3.1 + MLC_TMP_GIT_CHECKOUT: tvm + MLC_TMP_GIT_URL: https://github.com/mlcommons/inference diff --git a/script/get-mlperf-inference-submission-dir/customize.py b/script/get-mlperf-inference-submission-dir/customize.py index e7e7eae85..da58a8af9 100644 --- a/script/get-mlperf-inference-submission-dir/customize.py +++ b/script/get-mlperf-inference-submission-dir/customize.py @@ -12,12 +12,12 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') == '': + if env.get('MLC_MLPERF_INFERENCE_SUBMISSION_DIR', '') == '': if not os.path.exists("mlperf-inference-submission"): os.mkdir("mlperf-inference-submission") - env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join( os.getcwd(), "mlperf-inference-submission") return {'return': 0} @@ -27,6 +27,6 @@ def postprocess(i): env = i['env'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR'] return {'return': 0} diff --git a/script/get-mlperf-inference-submission-dir/meta.yaml b/script/get-mlperf-inference-submission-dir/meta.yaml index 84f4b30cc..9590ef7f8 100644 --- a/script/get-mlperf-inference-submission-dir/meta.yaml +++ b/script/get-mlperf-inference-submission-dir/meta.yaml @@ -8,10 +8,10 @@ docker: run: false input_description: {} input_mapping: - submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR + submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR new_env_keys: -- CM_MLPERF_INFERENCE_SUBMISSION_DIR -- CM_MLPERF_INFERENCE_SUBMISSION_VERSION +- MLC_MLPERF_INFERENCE_SUBMISSION_DIR +- MLC_MLPERF_INFERENCE_SUBMISSION_VERSION new_state_keys: [] post_deps: [] posthook_deps: [] @@ -28,11 +28,11 @@ uid: ddf36a41d6934a7e variations: version.#: env: - CM_MLPERF_INFERENCE_SUBMISSION_VERSION: '#' + MLC_MLPERF_INFERENCE_SUBMISSION_VERSION: '#' group: version version.4_1-dev: default: true env: - CM_MLPERF_INFERENCE_SUBMISSION_VERSION: 4_1-dev + MLC_MLPERF_INFERENCE_SUBMISSION_VERSION: 4_1-dev group: version versions: {} diff --git a/script/get-mlperf-inference-sut-configs/customize.py b/script/get-mlperf-inference-sut-configs/customize.py index 8bc64be4c..fe92bb76a 100644 --- a/script/get-mlperf-inference-sut-configs/customize.py +++ b/script/get-mlperf-inference-sut-configs/customize.py @@ -8,52 +8,52 @@ def postprocess(i): env = i['env'] state = i['state'] - if env.get('CM_HW_NAME', '') == '': - host_name = env.get('CM_HOST_SYSTEM_NAME', 'default').replace("-", "_") - env['CM_HW_NAME'] = host_name + if env.get('MLC_HW_NAME', '') == '': + host_name = env.get('MLC_HOST_SYSTEM_NAME', 'default').replace("-", "_") + env['MLC_HW_NAME'] = host_name - device = env.get('CM_MLPERF_DEVICE', 'cpu') + device = env.get('MLC_MLPERF_DEVICE', 'cpu') - backend = env.get('CM_MLPERF_BACKEND', 'default') - if env.get('CM_MLPERF_BACKEND_VERSION', '') != '': - backend_version = "v" + env.get('CM_MLPERF_BACKEND_VERSION') if not env.get( - 'CM_MLPERF_BACKEND_VERSION').startswith("v") else env.get('CM_MLPERF_BACKEND_VERSION') + backend = env.get('MLC_MLPERF_BACKEND', 'default') + if env.get('MLC_MLPERF_BACKEND_VERSION', '') != '': + backend_version = "v" + env.get('MLC_MLPERF_BACKEND_VERSION') if not env.get( + 'MLC_MLPERF_BACKEND_VERSION').startswith("v") else env.get('MLC_MLPERF_BACKEND_VERSION') else: backend_version = 'vdefault' - if 'CM_SUT_CONFIG' not in state: - state['CM_SUT_CONFIG'] = {} - if 'CM_SUT_CONFIG_PATH' not in state: - state['CM_SUT_CONFIG_PATH'] = {} + if 'MLC_SUT_CONFIG' not in state: + state['MLC_SUT_CONFIG'] = {} + if 'MLC_SUT_CONFIG_PATH' not in state: + state['MLC_SUT_CONFIG_PATH'] = {} - implementation_string = env['CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX'] if env.get( - 'CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX', '') != '' else env.get( - 'CM_MLPERF_IMPLEMENTATION', 'default') + implementation_string = env['MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX'] if env.get( + 'MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX', '') != '' else env.get( + 'MLC_MLPERF_IMPLEMENTATION', 'default') run_config = [] for i in range(1, 6): - if env.get(f'CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX{i}', '') != '': + if env.get(f'MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX{i}', '') != '': run_config.append( - env.get(f'CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX{i}')) + env.get(f'MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX{i}')) run_config_string = "_".join( run_config) if run_config else 'default_config' - env['CM_MLPERF_INFERENCE_SUT_RUN_CONFIG'] = run_config_string + env['MLC_MLPERF_INFERENCE_SUT_RUN_CONFIG'] = run_config_string - if env.get('CM_SUT_NAME', '') == '': - env['CM_SUT_NAME'] = env['CM_HW_NAME'] + "-" + implementation_string + "-" + \ + if env.get('MLC_SUT_NAME', '') == '': + env['MLC_SUT_NAME'] = env['MLC_HW_NAME'] + "-" + implementation_string + "-" + \ device + "-" + backend + "-" + backend_version + "-" + run_config_string - if env.get('CM_SUT_CONFIGS_PATH', '') != '': - path = env['CM_SUT_CONFIGS_PATH'] - elif env.get('CM_SUT_USE_EXTERNAL_CONFIG_REPO', '') == "yes": - path = env.get('CM_GIT_CHECKOUT_PATH') + if env.get('MLC_SUT_CONFIGS_PATH', '') != '': + path = env['MLC_SUT_CONFIGS_PATH'] + elif env.get('MLC_SUT_USE_EXTERNAL_CONFIG_REPO', '') == "yes": + path = env.get('MLC_GIT_CHECKOUT_PATH') else: path = os.path.join(os.getcwd(), "configs") config_path = os.path.join( path, - env['CM_HW_NAME'], + env['MLC_HW_NAME'], implementation_string + "-implementation", device + @@ -68,7 +68,7 @@ def postprocess(i): os.makedirs(os.path.dirname(config_path), exist_ok=True) config_path_default = os.path.join( path, - env['CM_HW_NAME'], + env['MLC_HW_NAME'], implementation_string + "-implementation", device + @@ -80,36 +80,36 @@ def postprocess(i): shutil.copy(config_path_default, config_path) else: src_config_full = os.path.join( - env['CM_TMP_CURRENT_SCRIPT_PATH'], + env['MLC_TMP_CURRENT_SCRIPT_PATH'], "configs", - env['CM_HW_NAME'], + env['MLC_HW_NAME'], implementation_string + "-implementation", device + "-device", backend + "-framework", "framework-version-" + backend_version, run_config_string + "-config.yaml") src_config_partial1 = os.path.join( - env['CM_TMP_CURRENT_SCRIPT_PATH'], + env['MLC_TMP_CURRENT_SCRIPT_PATH'], "configs", - env['CM_HW_NAME'], + env['MLC_HW_NAME'], implementation_string + "-implementation", device + "-device", backend + "-framework", "framework-version-" + backend_version, "default-config.yaml") src_config_partial2 = os.path.join( - env['CM_TMP_CURRENT_SCRIPT_PATH'], + env['MLC_TMP_CURRENT_SCRIPT_PATH'], "configs", - env['CM_HW_NAME'], + env['MLC_HW_NAME'], implementation_string + "-implementation", device + "-device", backend + "-framework", "framework-version-default", "default-config.yaml") src_config_partial3 = os.path.join( - env['CM_TMP_CURRENT_SCRIPT_PATH'], + env['MLC_TMP_CURRENT_SCRIPT_PATH'], "configs", - env['CM_HW_NAME'], + env['MLC_HW_NAME'], implementation_string + "-implementation", device + "-device", backend + "-framework", @@ -124,9 +124,9 @@ def postprocess(i): shutil.copy(src_config_partial3, config_path) else: print( - f"Config file missing for given hw_name: '{env['CM_HW_NAME']}', implementation: '{implementation_string}', device: '{device}, backend: '{backend}', copying from default") + f"Config file missing for given hw_name: '{env['MLC_HW_NAME']}', implementation: '{implementation_string}', device: '{device}, backend: '{backend}', copying from default") src_config = os.path.join( - env['CM_TMP_CURRENT_SCRIPT_PATH'], + env['MLC_TMP_CURRENT_SCRIPT_PATH'], "configs", "default", "config.yaml") @@ -136,9 +136,9 @@ def postprocess(i): exist_ok=True) shutil.copy(src_config, config_path_default) - state['CM_SUT_CONFIG'][env['CM_SUT_NAME']] = yaml.load( + state['MLC_SUT_CONFIG'][env['MLC_SUT_NAME']] = yaml.load( open(config_path), Loader=yaml.SafeLoader) - state['CM_SUT_CONFIG_NAME'] = env['CM_SUT_NAME'] - state['CM_SUT_CONFIG_PATH'][env['CM_SUT_NAME']] = config_path + state['MLC_SUT_CONFIG_NAME'] = env['MLC_SUT_NAME'] + state['MLC_SUT_CONFIG_PATH'][env['MLC_SUT_NAME']] = config_path return {'return': 0} diff --git a/script/get-mlperf-inference-sut-configs/meta.yaml b/script/get-mlperf-inference-sut-configs/meta.yaml index 8913bdc29..f7d1857fb 100644 --- a/script/get-mlperf-inference-sut-configs/meta.yaml +++ b/script/get-mlperf-inference-sut-configs/meta.yaml @@ -4,23 +4,23 @@ automation_uid: 5b4e0237da074764 cache: false category: MLPerf benchmark support default_env: - CM_GIT_URL: '' - CM_SUT_CONFIGS_PATH: '' + MLC_GIT_URL: '' + MLC_SUT_CONFIGS_PATH: '' deps: - env: - CM_CACHE_DIR_ENV_NAME: CM_SUT_CONFIGS_PATH + MLC_CACHE_DIR_ENV_NAME: MLC_SUT_CONFIGS_PATH extra_cache_tags: mlperf,inference,sut,configs tags: get,cache,dir,_name.mlperf-inference-sut-configs input_mapping: - configs_git_url: CM_GIT_URL - repo_path: CM_SUT_CONFIGS_PATH - run_config: CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX + configs_git_url: MLC_GIT_URL + repo_path: MLC_SUT_CONFIGS_PATH + run_config: MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX new_env_keys: -- CM_HW_* -- CM_SUT_* -- CM_MLPERF_INFERENCE_SUT_RUN_CONFIG +- MLC_HW_* +- MLC_SUT_* +- MLC_MLPERF_INFERENCE_SUT_RUN_CONFIG new_state_keys: -- CM_SUT_* +- MLC_SUT_* tags: - get - mlperf diff --git a/script/get-mlperf-inference-sut-description/customize.py b/script/get-mlperf-inference-sut-description/customize.py index 3490f4580..d93bb2d00 100644 --- a/script/get-mlperf-inference-sut-description/customize.py +++ b/script/get-mlperf-inference-sut-description/customize.py @@ -9,23 +9,23 @@ def preprocess(i): state = i['state'] os_info = i['os_info'] - submitter = env.get('CM_MLPERF_SUBMITTER', 'MLCommons') + submitter = env.get('MLC_MLPERF_SUBMITTER', 'MLCommons') auto_detected_hw_name = False - if env.get('CM_HW_NAME', '') == '': - host_name = env.get('CM_HOST_SYSTEM_NAME', 'default').replace("-", "_") - env['CM_HW_NAME'] = host_name + if env.get('MLC_HW_NAME', '') == '': + host_name = env.get('MLC_HOST_SYSTEM_NAME', 'default').replace("-", "_") + env['MLC_HW_NAME'] = host_name auto_detected_hw_name = True - hw_name = env['CM_HW_NAME'] + hw_name = env['MLC_HW_NAME'] - backend = env.get('CM_MLPERF_BACKEND', '') - backend_version = env.get('CM_MLPERF_BACKEND_VERSION', '') + backend = env.get('MLC_MLPERF_BACKEND', '') + backend_version = env.get('MLC_MLPERF_BACKEND_VERSION', '') sut_suffix = '' backend_name = '' backend_desc = '' if backend: - backend_name = env.get('CM_MLPERF_BACKEND_NAME', backend) + backend_name = env.get('MLC_MLPERF_BACKEND_NAME', backend) sut_suffix = "-" + backend backend_desc = backend_name if backend_version: @@ -34,12 +34,12 @@ def preprocess(i): sut = hw_name + sut_suffix script_path = i['run_script_input']['path'] - sut_desc_path = env['CM_MLPERF_INFERENCE_SUT_DESC_PATH'] + sut_desc_path = env['MLC_MLPERF_INFERENCE_SUT_DESC_PATH'] sut_path = os.path.join(sut_desc_path, "suts", sut + ".json") - if os.path.exists(sut_path) and env.get('CM_SUT_DESC_CACHE', '') == "yes": + if os.path.exists(sut_path) and env.get('MLC_SUT_DESC_CACHE', '') == "yes": print(f"Reusing SUT description file {sut}") - state['CM_SUT_META'] = json.load(open(sut_path)) + state['MLC_SUT_META'] = json.load(open(sut_path)) else: if not os.path.exists(os.path.dirname(sut_path)): os.makedirs(os.path.dirname(sut_path)) @@ -55,27 +55,27 @@ def preprocess(i): " not found. Copying from default!!!") shutil.copy(default_hw_path, hw_path) - state['CM_HW_META'] = json.load(open(hw_path)) - state['CM_SUT_META'] = state['CM_HW_META'] - state['CM_SUT_META']['framework'] = backend_desc - os_name = env.get('CM_HOST_OS_FLAVOR', '').capitalize() - os_version = env.get('CM_HOST_OS_VERSION', '') + state['MLC_HW_META'] = json.load(open(hw_path)) + state['MLC_SUT_META'] = state['MLC_HW_META'] + state['MLC_SUT_META']['framework'] = backend_desc + os_name = env.get('MLC_HOST_OS_FLAVOR', '').capitalize() + os_version = env.get('MLC_HOST_OS_VERSION', '') if os_name and os_version: os_name_string = os_name + " " + os_version else: os_name_string = '' - os_type = env.get('CM_HOST_OS_TYPE', '') - kernel = env.get('CM_HOST_OS_KERNEL_VERSION', '') + os_type = env.get('MLC_HOST_OS_TYPE', '') + kernel = env.get('MLC_HOST_OS_KERNEL_VERSION', '') if os_type and kernel: os_name_string += " (" + os_type + "-" + kernel - glibc_version = env.get('CM_HOST_OS_GLIBC_VERSION', '') + glibc_version = env.get('MLC_HOST_OS_GLIBC_VERSION', '') if glibc_version: os_name_string += '-glibc' + glibc_version os_name_string += ')' - python_version = env.get('CM_PYTHON_VERSION', '') - compiler = env.get('CM_COMPILER_FAMILY', '') - compiler_version = env.get('CM_COMPILER_VERSION', '') - state['CM_SUT_META']['submitter'] = submitter + python_version = env.get('MLC_PYTHON_VERSION', '') + compiler = env.get('MLC_COMPILER_FAMILY', '') + compiler_version = env.get('MLC_COMPILER_VERSION', '') + state['MLC_SUT_META']['submitter'] = submitter # If Windows and os_name_string is empty, rebuild it: @@ -83,98 +83,98 @@ def preprocess(i): import platform os_name_string = str(platform.platform()) - state['CM_SUT_META']['operating_system'] = os_name_string + state['MLC_SUT_META']['operating_system'] = os_name_string - state['CM_SUT_META']['other_software_stack'] = "Python: " + \ + state['MLC_SUT_META']['other_software_stack'] = "Python: " + \ python_version + ", " + compiler + "-" + compiler_version - if env.get('CM_DOCKER_VERSION', '') != '': - state['CM_SUT_META']['other_software_stack'] += " Docker version:" + \ - env['CM_DOCKER_VERSION'] + if env.get('MLC_DOCKER_VERSION', '') != '': + state['MLC_SUT_META']['other_software_stack'] += " Docker version:" + \ + env['MLC_DOCKER_VERSION'] else: if os.path.exists('/.dockerenv'): - state['CM_SUT_META']['other_software_stack'] += ", Using Docker " + state['MLC_SUT_META']['other_software_stack'] += ", Using Docker " - if state['CM_SUT_META'].get('system_name', '') == '': - system_name = env.get('CM_MLPERF_SYSTEM_NAME') + if state['MLC_SUT_META'].get('system_name', '') == '': + system_name = env.get('MLC_MLPERF_SYSTEM_NAME') if not system_name: - system_name = env.get('CM_HW_NAME') + system_name = env.get('MLC_HW_NAME') if system_name: if auto_detected_hw_name: system_name += " (auto detected)" else: system_name = " (generic)" - state['CM_SUT_META']['system_name'] = system_name + state['MLC_SUT_META']['system_name'] = system_name # Add GPU info - if env.get('CM_MLPERF_DEVICE', '') == "gpu" or env.get( - 'CM_MLPERF_DEVICE', '') == "cuda": - if env.get('CM_CUDA_VERSION', '') != '': - cuda_version = " , CUDA " + env['CM_CUDA_VERSION'] - state['CM_SUT_META']['other_software_stack'] += cuda_version + if env.get('MLC_MLPERF_DEVICE', '') == "gpu" or env.get( + 'MLC_MLPERF_DEVICE', '') == "cuda": + if env.get('MLC_CUDA_VERSION', '') != '': + cuda_version = " , CUDA " + env['MLC_CUDA_VERSION'] + state['MLC_SUT_META']['other_software_stack'] += cuda_version if 'cm_cuda_device_prop' in state: - state['CM_SUT_META']['accelerator_frequency'] = state['cm_cuda_device_prop']['Max clock rate'] - state['CM_SUT_META']['accelerator_memory_capacity'] = str(int( + state['MLC_SUT_META']['accelerator_frequency'] = state['cm_cuda_device_prop']['Max clock rate'] + state['MLC_SUT_META']['accelerator_memory_capacity'] = str(int( state['cm_cuda_device_prop']['Global memory']) / (1024 * 1024.0 * 1024)) + " GB" - state['CM_SUT_META']['accelerator_model_name'] = state['cm_cuda_device_prop']['GPU Name'] - num_accelerators = env.get('CM_CUDA_NUM_DEVICES', "1") - state['CM_SUT_META']['accelerators_per_node'] = num_accelerators + state['MLC_SUT_META']['accelerator_model_name'] = state['cm_cuda_device_prop']['GPU Name'] + num_accelerators = env.get('MLC_CUDA_NUM_DEVICES', "1") + state['MLC_SUT_META']['accelerators_per_node'] = num_accelerators - if state['CM_SUT_META'].get('host_processor_core_count', '') == '': + if state['MLC_SUT_META'].get('host_processor_core_count', '') == '': physical_cores_per_node = env.get( - 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET') + 'MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET') if physical_cores_per_node is None or physical_cores_per_node == '': if os_info['platform'] == 'windows': physical_cores_per_node = '1' - state['CM_SUT_META']['host_processor_core_count'] = physical_cores_per_node + state['MLC_SUT_META']['host_processor_core_count'] = physical_cores_per_node - if state['CM_SUT_META'].get('host_processor_model_name', '') == '': - state['CM_SUT_META']['host_processor_model_name'] = env.get( - 'CM_HOST_CPU_MODEL_NAME', 'undefined') - if state['CM_SUT_META'].get('host_processors_per_node', '') == '': - x = env.get('CM_HOST_CPU_SOCKETS', '') + if state['MLC_SUT_META'].get('host_processor_model_name', '') == '': + state['MLC_SUT_META']['host_processor_model_name'] = env.get( + 'MLC_HOST_CPU_MODEL_NAME', 'undefined') + if state['MLC_SUT_META'].get('host_processors_per_node', '') == '': + x = env.get('MLC_HOST_CPU_SOCKETS', '') if x == '' and os_info['platform'] == 'windows': x = '1' - state['CM_SUT_META']['host_processors_per_node'] = x - - if state['CM_SUT_META'].get('host_processor_caches', '') == '': - state['CM_SUT_META']['host_processor_caches'] = "L1d cache: " + env.get('CM_HOST_CPU_L1D_CACHE_SIZE', ' ') + \ - ", L1i cache: " + env.get('CM_HOST_CPU_L1I_CACHE_SIZE', ' ') + ", L2 cache: " + \ - env.get('CM_HOST_CPU_L2_CACHE_SIZE', ' ') + \ - ", L3 cache: " + env.get('CM_HOST_CPU_L3_CACHE_SIZE', ' ') - - if state['CM_SUT_META'].get('host_processor_frequency', '') == '': - state['CM_SUT_META']['host_processor_frequency'] = env.get( - 'CM_HOST_CPU_MAX_MHZ') if env.get('CM_HOST_CPU_MAX_MHZ', '') != '' else 'undefined' - if state['CM_SUT_META'].get('host_memory_capacity', '') == '': - state['CM_SUT_META']['host_memory_capacity'] = env.get( - 'CM_HOST_MEMORY_CAPACITY') if env.get('CM_HOST_MEMORY_CAPACITY', '') != '' else 'undefined' - if state['CM_SUT_META'].get('host_storage_capacity', '') == '': - state['CM_SUT_META']['host_storage_capacity'] = env.get( - 'CM_HOST_DISK_CAPACITY') if env.get('CM_HOST_DISK_CAPACITY', '') != '' else 'undefined' - if 'CM_SUT_SW_NOTES' in env: - sw_notes = env['CM_SUT_SW_NOTES'] + state['MLC_SUT_META']['host_processors_per_node'] = x + + if state['MLC_SUT_META'].get('host_processor_caches', '') == '': + state['MLC_SUT_META']['host_processor_caches'] = "L1d cache: " + env.get('MLC_HOST_CPU_L1D_CACHE_SIZE', ' ') + \ + ", L1i cache: " + env.get('MLC_HOST_CPU_L1I_CACHE_SIZE', ' ') + ", L2 cache: " + \ + env.get('MLC_HOST_CPU_L2_CACHE_SIZE', ' ') + \ + ", L3 cache: " + env.get('MLC_HOST_CPU_L3_CACHE_SIZE', ' ') + + if state['MLC_SUT_META'].get('host_processor_frequency', '') == '': + state['MLC_SUT_META']['host_processor_frequency'] = env.get( + 'MLC_HOST_CPU_MAX_MHZ') if env.get('MLC_HOST_CPU_MAX_MHZ', '') != '' else 'undefined' + if state['MLC_SUT_META'].get('host_memory_capacity', '') == '': + state['MLC_SUT_META']['host_memory_capacity'] = env.get( + 'MLC_HOST_MEMORY_CAPACITY') if env.get('MLC_HOST_MEMORY_CAPACITY', '') != '' else 'undefined' + if state['MLC_SUT_META'].get('host_storage_capacity', '') == '': + state['MLC_SUT_META']['host_storage_capacity'] = env.get( + 'MLC_HOST_DISK_CAPACITY') if env.get('MLC_HOST_DISK_CAPACITY', '') != '' else 'undefined' + if 'MLC_SUT_SW_NOTES' in env: + sw_notes = env['MLC_SUT_SW_NOTES'] else: sw_notes = '' - state['CM_SUT_META']['sw_notes'] = sw_notes + state['MLC_SUT_META']['sw_notes'] = sw_notes - if env.get('CM_SUDO_USER', '') == "yes" and env.get( - 'CM_HOST_OS_TYPE', 'linux'): + if env.get('MLC_SUDO_USER', '') == "yes" and env.get( + 'MLC_HOST_OS_TYPE', 'linux'): r = i['automation'].run_native_script( {'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'detect_memory'}) if r['return'] > 0: return r - if env.get('CM_HOST_MEM_INFO', '') != '': - state['CM_SUT_META']['host_memory_configuration'] = env['CM_HOST_MEM_INFO'] + if env.get('MLC_HOST_MEM_INFO', '') != '': + state['MLC_SUT_META']['host_memory_configuration'] = env['MLC_HOST_MEM_INFO'] - state['CM_SUT_META'] = dict(sorted(state['CM_SUT_META'].items())) + state['MLC_SUT_META'] = dict(sorted(state['MLC_SUT_META'].items())) sut_file = open(sut_path, "w") - json.dump(state['CM_SUT_META'], sut_file, indent=4) + json.dump(state['MLC_SUT_META'], sut_file, indent=4) sut_file.close() return {'return': 0} diff --git a/script/get-mlperf-inference-sut-description/detect_memory.sh b/script/get-mlperf-inference-sut-description/detect_memory.sh index 8a65daa13..4a21653b9 100644 --- a/script/get-mlperf-inference-sut-description/detect_memory.sh +++ b/script/get-mlperf-inference-sut-description/detect_memory.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [[ ${CM_SUDO_USER} == "yes" ]]; then - ${CM_SUDO} dmidecode -t memory > meminfo.out - ${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/get_memory_info.py +if [[ ${MLC_SUDO_USER} == "yes" ]]; then + ${MLC_SUDO} dmidecode -t memory > meminfo.out + ${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/get_memory_info.py fi test $? -eq 0 || return $? diff --git a/script/get-mlperf-inference-sut-description/get_memory_info.py b/script/get-mlperf-inference-sut-description/get_memory_info.py index 27d0f870a..220517576 100644 --- a/script/get-mlperf-inference-sut-description/get_memory_info.py +++ b/script/get-mlperf-inference-sut-description/get_memory_info.py @@ -58,4 +58,4 @@ meminfo_string = ", ".join(meminfo) with open("tmp-run-env.out", "w") as f: - f.write(f"CM_HOST_MEM_INFO={meminfo_string}") + f.write(f"MLC_HOST_MEM_INFO={meminfo_string}") diff --git a/script/get-mlperf-inference-sut-description/meta.yaml b/script/get-mlperf-inference-sut-description/meta.yaml index 9d2139869..c46bf930b 100644 --- a/script/get-mlperf-inference-sut-description/meta.yaml +++ b/script/get-mlperf-inference-sut-description/meta.yaml @@ -4,7 +4,7 @@ automation_uid: 5b4e0237da074764 cache: false category: MLPerf benchmark support default_env: - CM_SUT_DESC_CACHE: 'no' + MLC_SUT_DESC_CACHE: 'no' deps: - tags: detect,os - tags: detect,cpu @@ -15,36 +15,36 @@ deps: - names: - compiler skip_if_env: - CM_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: + MLC_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: - 'yes' tags: get,compiler - enable_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - gpu - cuda tags: get,cuda-devices,_with-pycuda - enable_if_env: - CM_DETERMINE_MEMORY_CONFIGURATION: + MLC_DETERMINE_MEMORY_CONFIGURATION: - 'yes' - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - linux tags: detect,sudo - tags: get,generic-python-lib,_package.dmiparser - env: - CM_CACHE_DIR_ENV_NAME: CM_MLPERF_INFERENCE_SUT_DESC_PATH + MLC_CACHE_DIR_ENV_NAME: MLC_MLPERF_INFERENCE_SUT_DESC_PATH extra_cache_tags: mlperf,inference,sut,descriptions tags: get,cache,dir,_name.mlperf-inference-sut-descriptions docker: run: false input_mapping: - name: CM_HW_NAME - submitter: CM_MLPERF_SUBMITTER + name: MLC_HW_NAME + submitter: MLC_MLPERF_SUBMITTER new_env_keys: -- CM_HW_* -- CM_SUT_* +- MLC_HW_* +- MLC_SUT_* new_state_keys: -- CM_SUT_* -- CM_HW_* +- MLC_SUT_* +- MLC_HW_* tags: - get - mlperf diff --git a/script/get-mlperf-inference-utils/customize.py b/script/get-mlperf-inference-utils/customize.py index 2f0310932..dcbe141bb 100644 --- a/script/get-mlperf-inference-utils/customize.py +++ b/script/get-mlperf-inference-utils/customize.py @@ -13,14 +13,14 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - utils_path = env['CM_TMP_CURRENT_SCRIPT_PATH'] + utils_path = env['MLC_TMP_CURRENT_SCRIPT_PATH'] env['+PYTHONPATH'] = [utils_path] submission_checker_dir = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission") + env['MLC_MLPERF_INFERENCE_SOURCE'], "tools", "submission") sys.path.append(submission_checker_dir) sys.path.append(utils_path) diff --git a/script/get-mlperf-logging/customize.py b/script/get-mlperf-logging/customize.py index 318a83d0a..4f9f86db5 100644 --- a/script/get-mlperf-logging/customize.py +++ b/script/get-mlperf-logging/customize.py @@ -10,7 +10,7 @@ def preprocess(i): env = i['env'] meta = i['meta'] - env['CM_MLPERF_LOGGING_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + env['MLC_MLPERF_LOGGING_SRC_PATH'] = env['MLC_GIT_REPO_CHECKOUT_PATH'] return {'return': 0} @@ -18,6 +18,6 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['+PYTHONPATH'] = [env['CM_MLPERF_LOGGING_SRC_PATH']] + env['+PYTHONPATH'] = [env['MLC_MLPERF_LOGGING_SRC_PATH']] return {'return': 0} diff --git a/script/get-mlperf-logging/meta.yaml b/script/get-mlperf-logging/meta.yaml index c173a906a..3422bd2b0 100644 --- a/script/get-mlperf-logging/meta.yaml +++ b/script/get-mlperf-logging/meta.yaml @@ -11,10 +11,10 @@ deps: - python3 tags: get,python3 - env: - CM_GIT_CHECKOUT: master + MLC_GIT_CHECKOUT: master tags: get,git,repo,_repo.https://github.com/mlcommons/logging new_env_keys: -- CM_MLPERF_LOGGING_* +- MLC_MLPERF_LOGGING_* - +PYTHONPATH tags: - get diff --git a/script/get-mlperf-power-dev/customize.py b/script/get-mlperf-power-dev/customize.py index 1cf439730..b6647f13a 100644 --- a/script/get-mlperf-power-dev/customize.py +++ b/script/get-mlperf-power-dev/customize.py @@ -13,10 +13,10 @@ def preprocess(i): def postprocess(i): env = i['env'] - if env.get('CM_VERSION', '') == '': - env['CM_VERSION'] = "master" + if env.get('MLC_VERSION', '') == '': + env['MLC_VERSION'] = "master" - if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': - env['CM_VERSION'] += "-git-" + env['CM_GIT_REPO_CURRENT_HASH'] + if env.get('MLC_GIT_REPO_CURRENT_HASH', '') != '': + env['MLC_VERSION'] += "-git-" + env['MLC_GIT_REPO_CURRENT_HASH'] - return {'return': 0, 'version': env['CM_VERSION']} + return {'return': 0, 'version': env['MLC_VERSION']} diff --git a/script/get-mlperf-power-dev/meta.yaml b/script/get-mlperf-power-dev/meta.yaml index c97047225..4258e27e0 100644 --- a/script/get-mlperf-power-dev/meta.yaml +++ b/script/get-mlperf-power-dev/meta.yaml @@ -4,30 +4,30 @@ automation_uid: 5b4e0237da074764 cache: true category: MLPerf benchmark support default_env: - CM_GIT_CHECKOUT_FOLDER: power-dev - CM_GIT_DEPTH: --depth 1 - CM_GIT_PATCH: 'no' + MLC_GIT_CHECKOUT_FOLDER: power-dev + MLC_GIT_DEPTH: --depth 1 + MLC_GIT_PATCH: 'no' deps: [] new_env_keys: -- CM_MLPERF_POWER_SOURCE +- MLC_MLPERF_POWER_SOURCE prehook_deps: - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_POWER_SOURCE + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_POWER_SOURCE extra_cache_tags: mlperf,power,power-dev,src force_env_keys: - - CM_GIT_* + - MLC_GIT_* names: - mlperf-power-dev-git-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_SHA + - MLC_GIT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG tags: - get - src @@ -40,26 +40,26 @@ uid: 72aa56768c994bcf variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' group: checkout mlcommons: default: true env: - CM_GIT_URL: https://github.com/mlcommons/power-dev.git + MLC_GIT_URL: https://github.com/mlcommons/power-dev.git group: repo octoml: env: - CM_GIT_URL: https://github.com/octoml/power-dev.git + MLC_GIT_URL: https://github.com/octoml/power-dev.git group: repo repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo sha.#: env: - CM_GIT_SHA: '#' + MLC_GIT_SHA: '#' group: checkout tag.#: env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' group: checkout diff --git a/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py b/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py index 477df85b1..ae9548c8b 100644 --- a/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py +++ b/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py @@ -14,8 +14,8 @@ def preprocess(i): env = i['env'] meta = i['meta'] - if 'CM_GIT_DEPTH' not in env: - env['CM_GIT_DEPTH'] = '' + if 'MLC_GIT_DEPTH' not in env: + env['MLC_GIT_DEPTH'] = '' return {'return': 0} @@ -25,9 +25,9 @@ def postprocess(i): env = i['env'] state = i['state'] - env['CM_EEMBC_ENERGY_RUNNER_SRC'] = os.path.join(os.getcwd(), 'src') + env['MLC_EEMBC_ENERGY_RUNNER_SRC'] = os.path.join(os.getcwd(), 'src') datasets_src_path = os.path.join(os.getcwd(), 'src', 'datasets') - env['CM_EEMBC_ENERGY_RUNNER_SRC_DATASETS'] = datasets_src_path + env['MLC_EEMBC_ENERGY_RUNNER_SRC_DATASETS'] = datasets_src_path # Get user directory for EEMBC runner path home_directory = os.path.expanduser('~') @@ -37,7 +37,7 @@ def postprocess(i): print('') print('Path to EEMBC runner sessions: {}'.format(sessions_path)) - env['CM_EEMBC_ENERGY_RUNNER_SESSIONS'] = sessions_path + env['MLC_EEMBC_ENERGY_RUNNER_SESSIONS'] = sessions_path if not os.path.isdir(sessions_path): os.makedirs(sessions_path) @@ -56,7 +56,7 @@ def postprocess(i): if not os.path.isdir(datasets_path): os.makedirs(datasets_path) - env['CM_EEMBC_ENERGY_RUNNER_DATASETS'] = datasets_path + env['MLC_EEMBC_ENERGY_RUNNER_DATASETS'] = datasets_path print('') print('Copying datasets to EEMBC user space ...') diff --git a/script/get-mlperf-tiny-eembc-energy-runner-src/meta.yaml b/script/get-mlperf-tiny-eembc-energy-runner-src/meta.yaml index 32e42c206..1ebe5de54 100644 --- a/script/get-mlperf-tiny-eembc-energy-runner-src/meta.yaml +++ b/script/get-mlperf-tiny-eembc-energy-runner-src/meta.yaml @@ -4,12 +4,12 @@ automation_uid: 5b4e0237da074764 cache: true category: MLPerf benchmark support default_env: - CM_GIT_CHECKOUT: main - CM_GIT_PATCH: 'no' - CM_GIT_RECURSE_SUBMODULES: '' - CM_GIT_URL: https://github.com/eembc/energyrunner + MLC_GIT_CHECKOUT: main + MLC_GIT_PATCH: 'no' + MLC_GIT_RECURSE_SUBMODULES: '' + MLC_GIT_URL: https://github.com/eembc/energyrunner new_env_keys: -- CM_EEMBC_ENERGY_RUNNER_* +- MLC_EEMBC_ENERGY_RUNNER_* - +PYTHONPATH tags: - get diff --git a/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat b/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat index 799902b4d..731272612 100644 --- a/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat +++ b/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat @@ -1,25 +1,25 @@ @echo off set CUR_DIR=%cd% -set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% +set SCRIPT_DIR=%MLC_TMP_CURRENT_SCRIPT_PATH% echo ****************************************************** -echo Cloning EEMBC Energy Runner from %CM_GIT_URL% with branch %CM_GIT_CHECKOUT% %CM_GIT_DEPTH% %CM_GIT_RECURSE_SUBMODULES% ... +echo Cloning EEMBC Energy Runner from %MLC_GIT_URL% with branch %MLC_GIT_CHECKOUT% %MLC_GIT_DEPTH% %MLC_GIT_RECURSE_SUBMODULES% ... set folder=src if not exist %folder% ( - if not "%CM_GIT_SHA%" == "" ( - git clone %CM_GIT_RECURSE_SUBMODULES% -b "%CM_GIT_CHECKOUT%" %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + if not "%MLC_GIT_SHA%" == "" ( + git clone %MLC_GIT_RECURSE_SUBMODULES% -b "%MLC_GIT_CHECKOUT%" %MLC_GIT_URL% %MLC_GIT_DEPTH% %folder% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% cd %folder% ) else ( - git clone %CM_GIT_RECURSE_SUBMODULES% %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + git clone %MLC_GIT_RECURSE_SUBMODULES% %MLC_GIT_URL% %MLC_GIT_DEPTH% %folder% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% cd %folder% - git checkout "%CM_GIT_CHECKOUT%" + git checkout "%MLC_GIT_CHECKOUT%" IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% ) ) else ( @@ -29,8 +29,8 @@ if not exist %folder% ( ) -if not "%CM_GIT_SUBMODULES%" == "" ( - for /F %%s in ("%CM_GIT_SUBMODULES%") do ( +if not "%MLC_GIT_SUBMODULES%" == "" ( + for /F %%s in ("%MLC_GIT_SUBMODULES%") do ( echo. echo Initializing submodule %%s git submodule update --init %%s @@ -39,17 +39,17 @@ if not "%CM_GIT_SUBMODULES%" == "" ( ) -if "%CM_GIT_PATCH%" == "yes" ( +if "%MLC_GIT_PATCH%" == "yes" ( echo Git patching is not yet implemented in CM script "get-mlperf-tiny-src" - please add it! pause - rem set patch_filename=%CM_GIT_PATCH_FILENAME% - rem if [ ! -n ${CM_GIT_PATCH_FILENAMES} ]; then - rem patchfile=${CM_GIT_PATCH_FILENAME:-"git.patch"} - rem CM_GIT_PATCH_FILENAMES=$patchfile + rem set patch_filename=%MLC_GIT_PATCH_FILENAME% + rem if [ ! -n ${MLC_GIT_PATCH_FILENAMES} ]; then + rem patchfile=${MLC_GIT_PATCH_FILENAME:-"git.patch"} + rem MLC_GIT_PATCH_FILENAMES=$patchfile rem fi rem - rem IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILENAMES} + rem IFS=', ' read -r -a patch_files <<< ${MLC_GIT_PATCH_FILENAMES} rem rem for patch_filename in "${patch_files[@]}" rem do diff --git a/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh b/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh index ea2645f7e..abb91ef88 100644 --- a/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh +++ b/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh @@ -1,26 +1,26 @@ #!/bin/bash CUR_DIR=$PWD -SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} +SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH} echo "******************************************************" -echo "Cloning EEMBC Energy Runner from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES} ..." +echo "Cloning EEMBC Energy Runner from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES} ..." if [ ! -d "src" ]; then - if [ -z ${CM_GIT_SHA} ]; then - git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} src + if [ -z ${MLC_GIT_SHA} ]; then + git clone ${MLC_GIT_RECURSE_SUBMODULES} -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} ${MLC_GIT_DEPTH} src cd src else - git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} src + git clone ${MLC_GIT_RECURSE_SUBMODULES} ${MLC_GIT_URL} ${MLC_GIT_DEPTH} src cd src - git checkout -b "${CM_GIT_CHECKOUT}" + git checkout -b "${MLC_GIT_CHECKOUT}" fi if [ "${?}" != "0" ]; then exit 1; fi else cd src fi -IFS=',' read -r -a submodules <<< "${CM_GIT_SUBMODULES}" +IFS=',' read -r -a submodules <<< "${MLC_GIT_SUBMODULES}" for submodule in "${submodules[@]}" do @@ -29,8 +29,8 @@ do if [ "${?}" != "0" ]; then exit 1; fi done -if [ ${CM_GIT_PATCH} == "yes" ]; then - patch_filename=${CM_GIT_PATCH_FILENAME:-git.patch} +if [ ${MLC_GIT_PATCH} == "yes" ]; then + patch_filename=${MLC_GIT_PATCH_FILENAME:-git.patch} echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" git apply ${SCRIPT_DIR}/patch/"$patch_filename" if [ "${?}" != "0" ]; then exit 1; fi diff --git a/script/get-mlperf-tiny-src/customize.py b/script/get-mlperf-tiny-src/customize.py index 58eba296d..2d61fbacc 100644 --- a/script/get-mlperf-tiny-src/customize.py +++ b/script/get-mlperf-tiny-src/customize.py @@ -14,8 +14,8 @@ def preprocess(i): env = i['env'] meta = i['meta'] - if 'CM_GIT_DEPTH' not in env: - env['CM_GIT_DEPTH'] = '' + if 'MLC_GIT_DEPTH' not in env: + env['MLC_GIT_DEPTH'] = '' return {'return': 0} @@ -25,36 +25,36 @@ def postprocess(i): env = i['env'] state = i['state'] - env['CM_MLPERF_TINY_SRC'] = os.path.join(os.getcwd(), 'src') - env['CM_MLPERF_TINY_BENCHMARK'] = os.path.join( + env['MLC_MLPERF_TINY_SRC'] = os.path.join(os.getcwd(), 'src') + env['MLC_MLPERF_TINY_BENCHMARK'] = os.path.join( os.getcwd(), 'src', 'benchmark') - env['CM_MLPERF_TINY_DATASETS'] = os.path.join( + env['MLC_MLPERF_TINY_DATASETS'] = os.path.join( os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets') - env['CM_MLPERF_TINY_DATASETS_AD'] = os.path.join( + env['MLC_MLPERF_TINY_DATASETS_AD'] = os.path.join( os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'ad01') - env['CM_MLPERF_TINY_DATASETS_IC'] = os.path.join( + env['MLC_MLPERF_TINY_DATASETS_IC'] = os.path.join( os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'ic01') - env['CM_MLPERF_TINY_DATASETS_KWS'] = os.path.join( + env['MLC_MLPERF_TINY_DATASETS_KWS'] = os.path.join( os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'kws01') - env['CM_MLPERF_TINY_DATASETS_KWS_OPEN'] = os.path.join( + env['MLC_MLPERF_TINY_DATASETS_KWS_OPEN'] = os.path.join( os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'kws01-open') - env['CM_MLPERF_TINY_DATASETS_VWW'] = os.path.join( + env['MLC_MLPERF_TINY_DATASETS_VWW'] = os.path.join( os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'vww01') - env['CM_MLPERF_TINY_TRAINING'] = os.path.join( + env['MLC_MLPERF_TINY_TRAINING'] = os.path.join( os.getcwd(), 'src', 'benchmark', 'training') - env['CM_MLPERF_TINY_TRAINING_AD'] = os.path.join( + env['MLC_MLPERF_TINY_TRAINING_AD'] = os.path.join( os.getcwd(), 'src', 'benchmark', 'training', 'anomaly_detection') - env['CM_MLPERF_TINY_TRAINING_IC'] = os.path.join( + env['MLC_MLPERF_TINY_TRAINING_IC'] = os.path.join( os.getcwd(), 'src', 'benchmark', 'training', 'image_classification') - env['CM_MLPERF_TINY_TRAINING_KWS'] = os.path.join( + env['MLC_MLPERF_TINY_TRAINING_KWS'] = os.path.join( os.getcwd(), 'src', 'benchmark', 'training', 'keyword_spotting') - env['CM_MLPERF_TINY_TRAINING_VWW'] = os.path.join( + env['MLC_MLPERF_TINY_TRAINING_VWW'] = os.path.join( os.getcwd(), 'src', 'benchmark', 'training', 'visual_wake_words') # 20221024: we save and restore env in the main script and can clean env here for determinism # if '+PYTHONPATH' not in env: env['+PYTHONPATH'] = [] # env['+PYTHONPATH']=[] -# env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], 'python')) -# env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'tools', 'submission')) +# env['+PYTHONPATH'].append(os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], 'python')) +# env['+PYTHONPATH'].append(os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], 'tools', 'submission')) return {'return': 0} diff --git a/script/get-mlperf-tiny-src/meta.yaml b/script/get-mlperf-tiny-src/meta.yaml index 86e859a73..24967f1de 100644 --- a/script/get-mlperf-tiny-src/meta.yaml +++ b/script/get-mlperf-tiny-src/meta.yaml @@ -4,10 +4,10 @@ automation_uid: 5b4e0237da074764 cache: true category: MLPerf benchmark support default_env: - CM_GIT_CHECKOUT: master - CM_GIT_PATCH: 'no' - CM_GIT_RECURSE_SUBMODULES: '' - CM_GIT_URL: https://github.com/mlcommons/tiny.git + MLC_GIT_CHECKOUT: master + MLC_GIT_PATCH: 'no' + MLC_GIT_RECURSE_SUBMODULES: '' + MLC_GIT_URL: https://github.com/mlcommons/tiny.git deps: - tags: detect,os - names: @@ -15,7 +15,7 @@ deps: - python3 tags: get,python3 new_env_keys: -- CM_MLPERF_TINY_* +- MLC_MLPERF_TINY_* - +PYTHONPATH tags: - get diff --git a/script/get-mlperf-tiny-src/run.bat b/script/get-mlperf-tiny-src/run.bat index e94998ad7..9d29beefd 100644 --- a/script/get-mlperf-tiny-src/run.bat +++ b/script/get-mlperf-tiny-src/run.bat @@ -1,25 +1,25 @@ @echo off set CUR_DIR=%cd% -set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% +set SCRIPT_DIR=%MLC_TMP_CURRENT_SCRIPT_PATH% echo ****************************************************** -echo Cloning MLCommons from %CM_GIT_URL% with branch %CM_GIT_CHECKOUT% %CM_GIT_DEPTH% %CM_GIT_RECURSE_SUBMODULES% ... +echo Cloning MLCommons from %MLC_GIT_URL% with branch %MLC_GIT_CHECKOUT% %MLC_GIT_DEPTH% %MLC_GIT_RECURSE_SUBMODULES% ... set folder=src if not exist %folder% ( - if not "%CM_GIT_SHA%" == "" ( - git clone %CM_GIT_RECURSE_SUBMODULES% -b "%CM_GIT_CHECKOUT%" %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + if not "%MLC_GIT_SHA%" == "" ( + git clone %MLC_GIT_RECURSE_SUBMODULES% -b "%MLC_GIT_CHECKOUT%" %MLC_GIT_URL% %MLC_GIT_DEPTH% %folder% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% cd %folder% ) else ( - git clone %CM_GIT_RECURSE_SUBMODULES% %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + git clone %MLC_GIT_RECURSE_SUBMODULES% %MLC_GIT_URL% %MLC_GIT_DEPTH% %folder% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% cd %folder% - git checkout "%CM_GIT_CHECKOUT%" + git checkout "%MLC_GIT_CHECKOUT%" IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% ) ) else ( @@ -29,8 +29,8 @@ if not exist %folder% ( ) -if not "%CM_GIT_SUBMODULES%" == "" ( - for /F %%s in ("%CM_GIT_SUBMODULES%") do ( +if not "%MLC_GIT_SUBMODULES%" == "" ( + for /F %%s in ("%MLC_GIT_SUBMODULES%") do ( echo. echo Initializing submodule %%s git submodule update --init %%s @@ -39,17 +39,17 @@ if not "%CM_GIT_SUBMODULES%" == "" ( ) -if "%CM_GIT_PATCH%" == "yes" ( +if "%MLC_GIT_PATCH%" == "yes" ( echo Git patching is not yet implemented in CM script "get-mlperf-tiny-src" - please add it! pause - rem set patch_filename=%CM_GIT_PATCH_FILENAME% - rem if [ ! -n ${CM_GIT_PATCH_FILENAMES} ]; then - rem patchfile=${CM_GIT_PATCH_FILENAME:-"git.patch"} - rem CM_GIT_PATCH_FILENAMES=$patchfile + rem set patch_filename=%MLC_GIT_PATCH_FILENAME% + rem if [ ! -n ${MLC_GIT_PATCH_FILENAMES} ]; then + rem patchfile=${MLC_GIT_PATCH_FILENAME:-"git.patch"} + rem MLC_GIT_PATCH_FILENAMES=$patchfile rem fi rem - rem IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILENAMES} + rem IFS=', ' read -r -a patch_files <<< ${MLC_GIT_PATCH_FILENAMES} rem rem for patch_filename in "${patch_files[@]}" rem do diff --git a/script/get-mlperf-tiny-src/run.sh b/script/get-mlperf-tiny-src/run.sh index e625891ac..b408e92bb 100644 --- a/script/get-mlperf-tiny-src/run.sh +++ b/script/get-mlperf-tiny-src/run.sh @@ -1,26 +1,26 @@ #!/bin/bash CUR_DIR=$PWD -SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} +SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH} echo "******************************************************" -echo "Cloning MLCommons from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES} ..." +echo "Cloning MLCommons from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES} ..." if [ ! -d "src" ]; then - if [ -z ${CM_GIT_SHA} ]; then - git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} src + if [ -z ${MLC_GIT_SHA} ]; then + git clone ${MLC_GIT_RECURSE_SUBMODULES} -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} ${MLC_GIT_DEPTH} src cd src else - git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} src + git clone ${MLC_GIT_RECURSE_SUBMODULES} ${MLC_GIT_URL} ${MLC_GIT_DEPTH} src cd src - git checkout -b "${CM_GIT_CHECKOUT}" + git checkout -b "${MLC_GIT_CHECKOUT}" fi if [ "${?}" != "0" ]; then exit 1; fi else cd src fi -IFS=',' read -r -a submodules <<< "${CM_GIT_SUBMODULES}" +IFS=',' read -r -a submodules <<< "${MLC_GIT_SUBMODULES}" for submodule in "${submodules[@]}" do @@ -29,8 +29,8 @@ do if [ "${?}" != "0" ]; then exit 1; fi done -if [ ${CM_GIT_PATCH} == "yes" ]; then - patch_filename=${CM_GIT_PATCH_FILENAME:-git.patch} +if [ ${MLC_GIT_PATCH} == "yes" ]; then + patch_filename=${MLC_GIT_PATCH_FILENAME:-git.patch} echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" git apply ${SCRIPT_DIR}/patch/"$patch_filename" if [ "${?}" != "0" ]; then exit 1; fi diff --git a/script/get-mlperf-training-nvidia-code/customize.py b/script/get-mlperf-training-nvidia-code/customize.py index 14c0ba345..b66a3ee67 100644 --- a/script/get-mlperf-training-nvidia-code/customize.py +++ b/script/get-mlperf-training-nvidia-code/customize.py @@ -14,12 +14,12 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH'] = os.path.join( - env['CM_MLPERF_TRAINING_RESULTS_PATH'], "NVIDIA") - if not os.path.exists(env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH']): + env['MLC_MLPERF_TRAINING_NVIDIA_CODE_PATH'] = os.path.join( + env['MLC_MLPERF_TRAINING_RESULTS_PATH'], "NVIDIA") + if not os.path.exists(env['MLC_MLPERF_TRAINING_NVIDIA_CODE_PATH']): return { - 'return': 1, 'error': f'Nvidia code path not found in the repository{env["CM_MLPERF_TRAINING_RESULTS_PATH"]}'} + 'return': 1, 'error': f'Nvidia code path not found in the repository{env["MLC_MLPERF_TRAINING_RESULTS_PATH"]}'} - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_MLPERF_TRAINING_NVIDIA_CODE_PATH'] return {'return': 0} diff --git a/script/get-mlperf-training-nvidia-code/meta.yaml b/script/get-mlperf-training-nvidia-code/meta.yaml index 02e2ca173..153517149 100644 --- a/script/get-mlperf-training-nvidia-code/meta.yaml +++ b/script/get-mlperf-training-nvidia-code/meta.yaml @@ -7,16 +7,16 @@ clean_files: [] default_version: r3.0 deps: - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_TRAINING_RESULTS_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_TRAINING_RESULTS_PATH extra_cache_tags: mlperf,training,results names: - mlperf-training-results tags: get,git,repo update_tags_from_env_with_prefix: _repo.: - - CM_NVIDIA_CODE_DOWNLOAD_URL + - MLC_NVIDIA_CODE_DOWNLOAD_URL new_env_keys: -- CM_MLPERF_TRAINING_NVIDIA_CODE_PATH +- MLC_MLPERF_TRAINING_NVIDIA_CODE_PATH tags: - get - nvidia @@ -28,26 +28,26 @@ uid: fdc630b1d41743c5 variations: ctuning: env: - CM_TMP_TRAINING_SRC: ctuning + MLC_TMP_TRAINING_SRC: ctuning group: repo-owner custom: group: repo-owner mlcommons: default: true env: - CM_TMP_TRAINING_SRC: mlcommons + MLC_TMP_TRAINING_SRC: mlcommons group: repo-owner nvidia-only: env: - CM_TMP_TRAINING_SRC: GATEOverflow + MLC_TMP_TRAINING_SRC: GATEOverflow group: repo-owner versions: r2.1: env: - CM_NVIDIA_CODE_DOWNLOAD_URL: https://github.com/<<>>/training_results_v2.1 + MLC_NVIDIA_CODE_DOWNLOAD_URL: https://github.com/<<>>/training_results_v2.1 r3.0: env: - CM_NVIDIA_CODE_DOWNLOAD_URL: https://github.com/<<>>/training_results_v3.0 + MLC_NVIDIA_CODE_DOWNLOAD_URL: https://github.com/<<>>/training_results_v3.0 r3.1: env: - CM_NVIDIA_CODE_DOWNLOAD_URL: https://github.com/<<>>/training_results_v3.1 + MLC_NVIDIA_CODE_DOWNLOAD_URL: https://github.com/<<>>/training_results_v3.1 diff --git a/script/get-mlperf-training-src/README-extra.md b/script/get-mlperf-training-src/README-extra.md index 08293c98b..5ebb33d8d 100644 --- a/script/get-mlperf-training-src/README-extra.md +++ b/script/get-mlperf-training-src/README-extra.md @@ -19,7 +19,7 @@ where [VARIATION] is one of * `r2.1:` Uses the release branch used for MLCommons training 2.1 round ## Exported Variables -* `CM_MLPERF_TRAINING_SOURCE`: Directory path of the cloned inference repository +* `MLC_MLPERF_TRAINING_SOURCE`: Directory path of the cloned inference repository * `PYTHONPATH`: Is appended with the paths to vision module and the submission tools module ## Supported and Tested OS diff --git a/script/get-mlperf-training-src/customize.py b/script/get-mlperf-training-src/customize.py index baf805882..a89b5295b 100644 --- a/script/get-mlperf-training-src/customize.py +++ b/script/get-mlperf-training-src/customize.py @@ -9,14 +9,14 @@ def preprocess(i): script_path = i['run_script_input']['path'] - if env.get('CM_GIT_PATCH_FILENAMES', '') != '': - patch_files = env['CM_GIT_PATCH_FILENAMES'].split(",") + if env.get('MLC_GIT_PATCH_FILENAMES', '') != '': + patch_files = env['MLC_GIT_PATCH_FILENAMES'].split(",") patch_files_full_paths = [] for patch_file in patch_files: patch_file_full_path = os.path.join( script_path, "patch", patch_file) patch_files_full_paths.append(patch_file_full_path) - env['CM_GIT_PATCH_FILEPATHS'] = ",".join(patch_files_full_paths) + env['MLC_GIT_PATCH_FILEPATHS'] = ",".join(patch_files_full_paths) return {'return': 0} diff --git a/script/get-mlperf-training-src/meta.yaml b/script/get-mlperf-training-src/meta.yaml index 063e655ff..12a385877 100644 --- a/script/get-mlperf-training-src/meta.yaml +++ b/script/get-mlperf-training-src/meta.yaml @@ -4,34 +4,34 @@ automation_uid: 5b4e0237da074764 cache: true category: MLPerf benchmark support default_env: - CM_GIT_CHECKOUT: master - CM_GIT_CHECKOUT_FOLDER: training - CM_GIT_DEPTH: --depth 4 - CM_GIT_PATCH: 'no' - CM_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' + MLC_GIT_CHECKOUT: master + MLC_GIT_CHECKOUT_FOLDER: training + MLC_GIT_DEPTH: --depth 4 + MLC_GIT_PATCH: 'no' + MLC_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' default_version: master new_env_keys: -- CM_MLPERF_TRAINING_* -- CM_MLPERF_TRAINING_LAST_RELEASE +- MLC_MLPERF_TRAINING_* +- MLC_MLPERF_TRAINING_LAST_RELEASE - +PYTHONPATH prehook_deps: - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_TRAINING_SOURCE + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_TRAINING_SOURCE extra_cache_tags: mlperf,training,src force_env_keys: - - CM_GIT_* + - MLC_GIT_* names: - mlperf-training-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_SHA + - MLC_GIT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG tags: - get - src @@ -45,53 +45,53 @@ uid: dc440bd88e794a28 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' group: checkout cknowledge: default: true env: - CM_GIT_URL: https://github.com/cknowledge/training.git + MLC_GIT_URL: https://github.com/cknowledge/training.git group: src full-history: env: - CM_GIT_DEPTH: '' + MLC_GIT_DEPTH: '' group: git-history mlcommons: env: - CM_GIT_URL: https://github.com/mlcommons/training.git + MLC_GIT_URL: https://github.com/mlcommons/training.git group: src no-recurse-submodules: env: - CM_GIT_RECURSE_SUBMODULES: '' + MLC_GIT_RECURSE_SUBMODULES: '' nvidia-retinanet: base: - patch env: - CM_GIT_PATCH_FILENAMES: nvidia-retinanet.patch,cpu_load.patch + MLC_GIT_PATCH_FILENAMES: nvidia-retinanet.patch,cpu_load.patch patch: env: - CM_GIT_PATCH: 'yes' + MLC_GIT_PATCH: 'yes' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo sha.#: env: - CM_GIT_SHA: '#' + MLC_GIT_SHA: '#' group: checkout short-history: default: true env: - CM_GIT_DEPTH: --depth 5 + MLC_GIT_DEPTH: --depth 5 group: git-history tag.#: env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' group: checkout versions: custom: env: - CM_MLPERF_LAST_RELEASE: custom + MLC_MLPERF_LAST_RELEASE: custom master: env: - CM_MLPERF_LAST_RELEASE: v3.1 + MLC_MLPERF_LAST_RELEASE: v3.1 diff --git a/script/get-nvidia-mitten/meta.yaml b/script/get-nvidia-mitten/meta.yaml index fe0200b5c..3073438bb 100644 --- a/script/get-nvidia-mitten/meta.yaml +++ b/script/get-nvidia-mitten/meta.yaml @@ -13,18 +13,18 @@ deps: - tags: get,generic-python-lib,_pycuda version: 2022.2.2 - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_NVIDIA_MITTEN_SRC + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_NVIDIA_MITTEN_SRC extra_cache_tags: nvidia,mitten,src force_env_keys: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT names: - nvidia-mitten-git-src tags: get,git,_repo.https://github.com/NVIDIA/mitten extra_cache_tags_from_env: -- env: CM_PYTHON_CACHE_TAGS +- env: MLC_PYTHON_CACHE_TAGS prefix: python- new_env_keys: -- CM_NVIDIA_MITTEN* +- MLC_NVIDIA_MITTEN* tags: - get - nvidia diff --git a/script/get-nvidia-mitten/run.sh b/script/get-nvidia-mitten/run.sh index 28b1ea4ce..ac0dc16b2 100644 --- a/script/get-nvidia-mitten/run.sh +++ b/script/get-nvidia-mitten/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -cd ${CM_NVIDIA_MITTEN_SRC} -${CM_PYTHON_BIN_WITH_PATH} -m pip install . +cd ${MLC_NVIDIA_MITTEN_SRC} +${MLC_PYTHON_BIN_WITH_PATH} -m pip install . test $? -eq 0 || exit $? diff --git a/script/get-onnxruntime-prebuilt/customize.py b/script/get-onnxruntime-prebuilt/customize.py index 2184b54de..b6045ddd0 100644 --- a/script/get-onnxruntime-prebuilt/customize.py +++ b/script/get-onnxruntime-prebuilt/customize.py @@ -6,13 +6,13 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - machine = env.get('CM_HOST_OS_MACHINE', '') + machine = env.get('MLC_HOST_OS_MACHINE', '') if machine == '': machine = 'x86_64' if machine == 'x86_64': machine = 'x64' - hostos = env['CM_HOST_OS_TYPE'] + hostos = env['MLC_HOST_OS_TYPE'] ext = '.tgz' @@ -22,11 +22,11 @@ def preprocess(i): hostos = 'win' ext = '.zip' - device = env.get('CM_ONNXRUNTIME_DEVICE', '') + device = env.get('MLC_ONNXRUNTIME_DEVICE', '') if device != '': machine += '-' + device - version = env['CM_VERSION'] + version = env['MLC_VERSION'] FOLDER = 'onnxruntime-{}-{}-{}'.format(hostos, machine, version) @@ -50,9 +50,9 @@ def postprocess(i): env = i['env'] - hostos = env['CM_HOST_OS_TYPE'] + hostos = env['MLC_HOST_OS_TYPE'] - install_folder = env['CM_TMP_INSTALL_FOLDER'] + install_folder = env['MLC_TMP_INSTALL_FOLDER'] for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: @@ -78,7 +78,7 @@ def postprocess(i): # For dynamic libraries env['+PATH'] = [lib_path] - env['CM_ONNXRUNTIME_LIB_PATH'] = lib_path - env['CM_ONNXRUNTIME_INCLUDE_PATH'] = include_path + env['MLC_ONNXRUNTIME_LIB_PATH'] = lib_path + env['MLC_ONNXRUNTIME_INCLUDE_PATH'] = include_path return {'return': 0} diff --git a/script/get-onnxruntime-prebuilt/meta.yaml b/script/get-onnxruntime-prebuilt/meta.yaml index 22bb2b719..57078077a 100644 --- a/script/get-onnxruntime-prebuilt/meta.yaml +++ b/script/get-onnxruntime-prebuilt/meta.yaml @@ -8,8 +8,8 @@ default_version: 1.16.3 deps: - tags: detect,os new_env_keys: -- CM_ONNXRUNTIME_LIB_PATH -- CM_ONNXRUNTIME_INCLUDE_PATH +- MLC_ONNXRUNTIME_LIB_PATH +- MLC_ONNXRUNTIME_INCLUDE_PATH - +PATH - +C_INCLUDE_PATH - +CPLUS_INCLUDE_PATH @@ -28,9 +28,9 @@ variations: cpu: default: true env: - CM_ONNXRUNTIME_DEVICE: '' + MLC_ONNXRUNTIME_DEVICE: '' group: device cuda: env: - CM_ONNXRUNTIME_DEVICE: gpu + MLC_ONNXRUNTIME_DEVICE: gpu group: device diff --git a/script/get-onnxruntime-prebuilt/run.bat b/script/get-onnxruntime-prebuilt/run.bat index ea9ebc982..84be0ae11 100644 --- a/script/get-onnxruntime-prebuilt/run.bat +++ b/script/get-onnxruntime-prebuilt/run.bat @@ -7,4 +7,4 @@ IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% unzip %FILENAME% -d install IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -echo CM_TMP_INSTALL_FOLDER=%FOLDER% > tmp-run-env.out +echo MLC_TMP_INSTALL_FOLDER=%FOLDER% > tmp-run-env.out diff --git a/script/get-onnxruntime-prebuilt/run.sh b/script/get-onnxruntime-prebuilt/run.sh index 6be34ea8a..ca6998305 100644 --- a/script/get-onnxruntime-prebuilt/run.sh +++ b/script/get-onnxruntime-prebuilt/run.sh @@ -11,4 +11,4 @@ test $? -eq 0 || exit 1 tar -C install -xzf ${FILENAME} test $? -eq 0 || exit 1 -echo "CM_TMP_INSTALL_FOLDER=$FOLDER" > tmp-run-env.out +echo "MLC_TMP_INSTALL_FOLDER=$FOLDER" > tmp-run-env.out diff --git a/script/get-openssl/README-extra.md b/script/get-openssl/README-extra.md index c4f88f975..cb54103fc 100644 --- a/script/get-openssl/README-extra.md +++ b/script/get-openssl/README-extra.md @@ -2,7 +2,7 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects openssl installed on the system and if not found calls the [install script for openssl](../script/install-openssl). ## Exported Variables -* `CM_OPENSSL_BIN_WITH_PATH` +* `MLC_OPENSSL_BIN_WITH_PATH` ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-openssl/customize.py b/script/get-openssl/customize.py index 2cc8ff2e5..6cdb4a657 100644 --- a/script/get-openssl/customize.py +++ b/script/get-openssl/customize.py @@ -11,18 +11,18 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] file_name = 'openssl' - if 'CM_OPENSSL_BIN_WITH_PATH' not in env: + if 'MLC_OPENSSL_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_OPENSSL_BIN_WITH_PATH', + 'env_path_key': 'MLC_OPENSSL_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': i['recursion_spaces']}) if r['return'] > 0: if r['return'] == 16 and os_info['platform'] != 'windows': - env['CM_REQUIRE_INSTALL'] = "yes" + env['MLC_REQUIRE_INSTALL'] = "yes" return {'return': 0} return r @@ -32,7 +32,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'OpenSSL\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_OPENSSL_VERSION', + 'env_key': 'MLC_OPENSSL_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -50,10 +50,10 @@ def postprocess(i): if r['return'] > 0: return r version = r['version'] - found_file_path = env['CM_OPENSSL_BIN_WITH_PATH'] + found_file_path = env['MLC_OPENSSL_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_OPENSSL_INSTALLED_PATH'] = found_path + env['MLC_OPENSSL_INSTALLED_PATH'] = found_path # Save tags that can be used to specialize further dependencies (such as # python packages) diff --git a/script/get-openssl/meta.yaml b/script/get-openssl/meta.yaml index d46266838..d7c898677 100644 --- a/script/get-openssl/meta.yaml +++ b/script/get-openssl/meta.yaml @@ -5,13 +5,13 @@ cache: true category: Detection or installation of tools and artifacts clean_files: [] env: - CM_REQUIRE_INSTALL: 'no' + MLC_REQUIRE_INSTALL: 'no' new_env_keys: -- CM_OPENSSL_* +- MLC_OPENSSL_* - +LD_LIBRARY_PATH prehook_deps: - enable_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' reuse_version: true tags: install,openssl diff --git a/script/get-openssl/run.sh b/script/get-openssl/run.sh index 14277c91a..a83337265 100644 --- a/script/get-openssl/run.sh +++ b/script/get-openssl/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -openssl_bin=${CM_OPENSSL_BIN_WITH_PATH} +openssl_bin=${MLC_OPENSSL_BIN_WITH_PATH} ${openssl_bin} version > tmp-ver.out 2>/dev/null test $? -eq 0 || exit 1 diff --git a/script/get-platform-details/customize.py b/script/get-platform-details/customize.py index 5db915829..a7dbcc4f2 100644 --- a/script/get-platform-details/customize.py +++ b/script/get-platform-details/customize.py @@ -18,18 +18,18 @@ def preprocess(i): env = i['env'] if not check_installation("numactl", os_info): - env['CM_INSTALL_NUMACTL'] = 'True' + env['MLC_INSTALL_NUMACTL'] = 'True' # if not check_installation("cpupower",os_info): - env['CM_INSTALL_CPUPOWER'] = 'True' - - if env.get('CM_PLATFORM_DETAILS_FILE_PATH', '') == '': - if env.get('CM_PLATFORM_DETAILS_DIR_PATH', '') == '': - env['CM_PLATFORM_DETAILS_DIR_PATH'] = os.getcwd() - if env.get('CM_PLATFORM_DETAILS_FILE_NAME', '') == '': - env['CM_PLATFORM_DETAILS_FILE_NAME'] = "system-info.txt" - env['CM_PLATFORM_DETAILS_FILE_PATH'] = os.path.join( - env['CM_PLATFORM_DETAILS_DIR_PATH'], env['CM_PLATFORM_DETAILS_FILE_NAME']) + env['MLC_INSTALL_CPUPOWER'] = 'True' + + if env.get('MLC_PLATFORM_DETAILS_FILE_PATH', '') == '': + if env.get('MLC_PLATFORM_DETAILS_DIR_PATH', '') == '': + env['MLC_PLATFORM_DETAILS_DIR_PATH'] = os.getcwd() + if env.get('MLC_PLATFORM_DETAILS_FILE_NAME', '') == '': + env['MLC_PLATFORM_DETAILS_FILE_NAME'] = "system-info.txt" + env['MLC_PLATFORM_DETAILS_FILE_PATH'] = os.path.join( + env['MLC_PLATFORM_DETAILS_DIR_PATH'], env['MLC_PLATFORM_DETAILS_FILE_NAME']) return {'return': 0} diff --git a/script/get-platform-details/meta.yaml b/script/get-platform-details/meta.yaml index 957d72e96..943b5d68c 100644 --- a/script/get-platform-details/meta.yaml +++ b/script/get-platform-details/meta.yaml @@ -6,52 +6,52 @@ category: Platform information deps: - tags: detect,os - skip_if_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - windows tags: detect,sudo - skip_if_any_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - windows skip_if_env: - CM_SUDO_USER: + MLC_SUDO_USER: - 'no' tags: get,sys-util,generic,_psmisc - enable_if_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - linux skip_if_env: - CM_SUDO_USER: + MLC_SUDO_USER: - 'no' tags: get,sys-util,generic,_systemd - enable_if_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - linux skip_if_env: - CM_SUDO_USER: + MLC_SUDO_USER: - 'no' tags: get,sys-util,generic,_dmidecode input_mapping: - out_dir_path: CM_PLATFORM_DETAILS_DIR_PATH - out_file_name: CM_PLATFORM_DETAILS_FILE_NAME + out_dir_path: MLC_PLATFORM_DETAILS_DIR_PATH + out_file_name: MLC_PLATFORM_DETAILS_FILE_NAME prehook_deps: - enable_if_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - linux - CM_INSTALL_NUMACTL: + MLC_INSTALL_NUMACTL: - 'True' skip_if_env: - CM_SUDO_USER: + MLC_SUDO_USER: - 'no' tags: get,sys-util,generic,_numactl - enable_if_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - linux - CM_INSTALL_CPUPOWER: + MLC_INSTALL_CPUPOWER: - 'True' env: - CM_TMP_FAIL_SAFE: 'yes' + MLC_TMP_FAIL_SAFE: 'yes' skip_if_env: - CM_SUDO_USER: + MLC_SUDO_USER: - 'no' tags: get,sys-util,generic,_linux-tools tags: diff --git a/script/get-platform-details/run.sh b/script/get-platform-details/run.sh index 12b0388a1..8f6e30d49 100644 --- a/script/get-platform-details/run.sh +++ b/script/get-platform-details/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -OUTPUT_FILE="$CM_PLATFORM_DETAILS_FILE_PATH" +OUTPUT_FILE="$MLC_PLATFORM_DETAILS_FILE_PATH" #set -e #echo $OUTPUT_FILE echo "WARNING: sudo permission is needed for some of the below commands" @@ -43,9 +43,9 @@ test $? -eq 0 || exit $? echo "------------------------------------------------------------" >> $OUTPUT_FILE echo "8. numactl --hardware" >> $OUTPUT_FILE -if [[ ${CM_SUDO_USER} == "yes" ]]; then - echo "${CM_SUDO} numactl --hardware" - eval "${CM_SUDO} numactl --hardware" >> $OUTPUT_FILE +if [[ ${MLC_SUDO_USER} == "yes" ]]; then + echo "${MLC_SUDO} numactl --hardware" + eval "${MLC_SUDO} numactl --hardware" >> $OUTPUT_FILE test $? -eq 0 || exit $? else echo "Requires SUDO permission" >> $OUTPUT_FILE @@ -83,9 +83,9 @@ test $? -eq 0 || echo "FAILED: cpupower frequency-info" >> $OUTPUT_FILE echo "------------------------------------------------------------" >> $OUTPUT_FILE echo "15. sysctl" >> $OUTPUT_FILE -if [[ ${CM_SUDO_USER} == "yes" ]]; then - echo "${CM_SUDO} sysctl -a" - eval "${CM_SUDO} sysctl -a" >> $OUTPUT_FILE +if [[ ${MLC_SUDO_USER} == "yes" ]]; then + echo "${MLC_SUDO} sysctl -a" + eval "${MLC_SUDO} sysctl -a" >> $OUTPUT_FILE test $? -eq 0 || exit $? else echo "Requires SUDO permission" >> $OUTPUT_FILE @@ -118,8 +118,8 @@ test $? -eq 0 || exit $? echo "------------------------------------------------------------" >> $OUTPUT_FILE echo "21. dmidecode" >> $OUTPUT_FILE -if [[ ${CM_SUDO_USER} == "yes" ]]; then - eval "${CM_SUDO} dmidecode" >> $OUTPUT_FILE +if [[ ${MLC_SUDO_USER} == "yes" ]]; then + eval "${MLC_SUDO} dmidecode" >> $OUTPUT_FILE test $? -eq 0 || echo "FAILED: dmidecode" >> $OUTPUT_FILE else echo "Requires SUDO permission" >> $OUTPUT_FILE @@ -127,8 +127,8 @@ fi echo "------------------------------------------------------------" >> $OUTPUT_FILE echo "22. BIOS" >> $OUTPUT_FILE -if [[ ${CM_SUDO_USER} == "yes" ]]; then - eval "${CM_SUDO} dmidecode -t bios" >> $OUTPUT_FILE +if [[ ${MLC_SUDO_USER} == "yes" ]]; then + eval "${MLC_SUDO} dmidecode -t bios" >> $OUTPUT_FILE test $? -eq 0 || echo "FAILED: dmidecode -t bios" >> $OUTPUT_FILE else echo "Requires SUDO permission" >> $OUTPUT_FILE diff --git a/script/get-preprocessed-dataset-criteo/README-extra.md b/script/get-preprocessed-dataset-criteo/README-extra.md index 7a6f99137..745cf6861 100644 --- a/script/get-preprocessed-dataset-criteo/README-extra.md +++ b/script/get-preprocessed-dataset-criteo/README-extra.md @@ -11,6 +11,6 @@ where, ## Exported Variables -* `[CM_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored +* `[MLC_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored diff --git a/script/get-preprocessed-dataset-criteo/customize.py b/script/get-preprocessed-dataset-criteo/customize.py index 7200d4795..723e209bf 100644 --- a/script/get-preprocessed-dataset-criteo/customize.py +++ b/script/get-preprocessed-dataset-criteo/customize.py @@ -8,34 +8,34 @@ def preprocess(i): env = i['env'] skip_preprocessing = False - if env.get('CM_DATASET_PREPROCESSED_PATH', '') != '': + if env.get('MLC_DATASET_PREPROCESSED_PATH', '') != '': ''' Path with preprocessed dataset given as input ''' skip_preprocessing = True print("Using preprocessed criteo dataset from '" + - env['CM_DATASET_PREPROCESSED_PATH'] + "'") + env['MLC_DATASET_PREPROCESSED_PATH'] + "'") if not skip_preprocessing and env.get( - 'CM_DATASET_PREPROCESSED_OUTPUT_PATH', '') != '': - env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() + 'MLC_DATASET_PREPROCESSED_OUTPUT_PATH', '') != '': + env['MLC_DATASET_PREPROCESSED_PATH'] = os.getcwd() if not skip_preprocessing and env.get( - 'CM_DATASET_CRITEO_MULTIHOT', '') == 'yes': + 'MLC_DATASET_CRITEO_MULTIHOT', '') == 'yes': i['run_script_input']['script_name'] = "run-multihot" - # ${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py - output_dir = env['CM_DATASET_PREPROCESSED_PATH'] - dataset_path = env['CM_DATASET_PATH'] + # ${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/preprocess.py + output_dir = env['MLC_DATASET_PREPROCESSED_PATH'] + dataset_path = env['MLC_DATASET_PATH'] tmp_dir = os.path.join(output_dir, "tmp") run_dir = os.path.join( - env['CM_MLPERF_TRAINING_SOURCE'], + env['MLC_MLPERF_TRAINING_SOURCE'], "recommendation_v2", "torchrec_dlrm", "scripts") - env['CM_RUN_CMD'] = f'cd {run_dir} && bash ./process_Criteo_1TB_Click_Logs_dataset.sh {dataset_path} {tmp_dir} {output_dir} ' + env['MLC_RUN_CMD'] = f'cd {run_dir} && bash ./process_Criteo_1TB_Click_Logs_dataset.sh {dataset_path} {tmp_dir} {output_dir} ' print("Using MLCommons Training source from '" + - env['CM_MLPERF_TRAINING_SOURCE'] + "'") + env['MLC_MLPERF_TRAINING_SOURCE'] + "'") return {'return': 0} @@ -44,8 +44,8 @@ def postprocess(i): env = i['env'] - env['CM_CRITEO_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] + env['MLC_CRITEO_PREPROCESSED_PATH'] = env['MLC_DATASET_PREPROCESSED_PATH'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_CRITEO_PREPROCESSED_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_CRITEO_PREPROCESSED_PATH'] return {'return': 0} diff --git a/script/get-preprocessed-dataset-criteo/meta.yaml b/script/get-preprocessed-dataset-criteo/meta.yaml index 2f012605d..b4219c7a6 100644 --- a/script/get-preprocessed-dataset-criteo/meta.yaml +++ b/script/get-preprocessed-dataset-criteo/meta.yaml @@ -12,63 +12,63 @@ deps: - original-dataset - criteo-dataset skip_if_env: - CM_DATASET_PREPROCESSED_PATH: + MLC_DATASET_PREPROCESSED_PATH: - 'on' tags: get,dataset,criteo,original - names: - dlrm-src skip_if_env: - CM_DATASET_PREPROCESSED_PATH: + MLC_DATASET_PREPROCESSED_PATH: - 'on' tags: get,dlrm,src - names: - inference-src skip_if_env: - CM_DATASET_PREPROCESSED_PATH: + MLC_DATASET_PREPROCESSED_PATH: - 'on' tags: mlperf,mlcommons,inference,source,src - skip_if_env: - CM_DATASET_PREPROCESSED_PATH: + MLC_DATASET_PREPROCESSED_PATH: - 'on' tags: get,generic-python-lib,_scikit-learn - skip_if_env: - CM_DATASET_PREPROCESSED_PATH: + MLC_DATASET_PREPROCESSED_PATH: - 'on' tags: get,generic-python-lib,_torch - skip_if_env: - CM_DATASET_PREPROCESSED_PATH: + MLC_DATASET_PREPROCESSED_PATH: - 'on' tags: get,generic-python-lib,_opencv-python - skip_if_env: - CM_DATASET_PREPROCESSED_PATH: + MLC_DATASET_PREPROCESSED_PATH: - 'on' tags: get,generic-python-lib,_decorator - skip_if_env: - CM_DATASET_PREPROCESSED_PATH: + MLC_DATASET_PREPROCESSED_PATH: - 'on' tags: get,generic-python-lib,_psutil - skip_if_env: - CM_DATASET_PREPROCESSED_PATH: + MLC_DATASET_PREPROCESSED_PATH: - 'on' tags: get,generic-python-lib,_onnx - skip_if_env: - CM_DATASET_PREPROCESSED_PATH: + MLC_DATASET_PREPROCESSED_PATH: - 'on' tags: get,generic-python-lib,_tqdm - skip_if_env: - CM_DATASET_PREPROCESSED_PATH: + MLC_DATASET_PREPROCESSED_PATH: - 'on' tags: get,generic-python-lib,_mlperf_logging docker: run: false input_mapping: - dir: CM_DATASET_PREPROCESSED_PATH - output_dir: CM_DATASET_PREPROCESSED_OUTPUT_PATH - threads: CM_NUM_PREPROCESS_THREADS + dir: MLC_DATASET_PREPROCESSED_PATH + output_dir: MLC_DATASET_PREPROCESSED_OUTPUT_PATH + threads: MLC_NUM_PREPROCESS_THREADS new_env_keys: -- CM_DATASET_PREPROCESSED_PATH -- CM_DATASET_CRITEO_MULTIHOT -- CM_CRITEO_PREPROCESSED_PATH +- MLC_DATASET_PREPROCESSED_PATH +- MLC_DATASET_CRITEO_MULTIHOT +- MLC_CRITEO_PREPROCESSED_PATH tags: - get - dataset @@ -80,16 +80,16 @@ uid: afa59956272a4ba4 variations: '1': env: - CM_DATASET_SIZE: '1' + MLC_DATASET_SIZE: '1' '50': env: - CM_DATASET_SIZE: '50' + MLC_DATASET_SIZE: '50' fake: add_deps_recursive: original-dataset: tags: _fake env: - CM_CRITEO_FAKE: 'yes' + MLC_CRITEO_FAKE: 'yes' full: add_deps_recursive: original-dataset: @@ -98,8 +98,8 @@ variations: mlc: default: true env: - CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: 'yes' - CM_DATASET_PREPROCESSED_PATH: 'on' + MLC_DATASET_PREPROCESSED_CRITEO_FROM_MLC: 'yes' + MLC_DATASET_PREPROCESSED_PATH: 'on' group: src multihot: default: true @@ -108,41 +108,41 @@ variations: - mlperf-training - training-src skip_if_env: - CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + MLC_DATASET_PREPROCESSED_CRITEO_FROM_MLC: - 'yes' tags: get,mlperf,training,src - skip_if_env: - CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + MLC_DATASET_PREPROCESSED_CRITEO_FROM_MLC: - 'yes' tags: get,generic-python-lib,_package.typing_inspect - skip_if_env: - CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + MLC_DATASET_PREPROCESSED_CRITEO_FROM_MLC: - 'yes' tags: get,generic-python-lib,_package.iopath - skip_if_env: - CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + MLC_DATASET_PREPROCESSED_CRITEO_FROM_MLC: - 'yes' tags: get,generic-python-lib,_package.fbgemm_gpu - skip_if_env: - CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + MLC_DATASET_PREPROCESSED_CRITEO_FROM_MLC: - 'yes' tags: get,generic-python-lib,_package.torchrec - skip_if_env: - CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + MLC_DATASET_PREPROCESSED_CRITEO_FROM_MLC: - 'yes' tags: get,generic-python-lib,_package.pyre_extensions env: - CM_DATASET_CRITEO_MULTIHOT: 'yes' + MLC_DATASET_CRITEO_MULTIHOT: 'yes' group: type multihot,mlc: deps: - env: - CM_DOWNLOAD_CHECKSUM_FILE: <<>>/checksums.txt - CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_PREPROCESSED_PATH - CM_EXTRACT_FINAL_ENV_NAME: CM_DATASET_PREPROCESSED_PATH - CM_EXTRACT_TO_FOLDER: criteo-preprocessed - CM_RCLONE_CONFIG_NAME: mlc-inference - CM_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/dlrm_preprocessed + MLC_DOWNLOAD_CHECKSUM_FILE: <<>>/checksums.txt + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_PREPROCESSED_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_PREPROCESSED_PATH + MLC_EXTRACT_TO_FOLDER: criteo-preprocessed + MLC_RCLONE_CONFIG_NAME: mlc-inference + MLC_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/dlrm_preprocessed extra_cache_tags: criteo,preprocessed,dataset force_cache: true names: diff --git a/script/get-preprocessed-dataset-criteo/preprocess.py b/script/get-preprocessed-dataset-criteo/preprocess.py index 5a5c429c6..71dfddea0 100644 --- a/script/get-preprocessed-dataset-criteo/preprocess.py +++ b/script/get-preprocessed-dataset-criteo/preprocess.py @@ -2,24 +2,24 @@ import criteo import os import sys -mlperf_dlrm_path = os.environ['CM_MLPERF_INFERENCE_DLRM_PATH'] +mlperf_dlrm_path = os.environ['MLC_MLPERF_INFERENCE_DLRM_PATH'] python_path = os.path.join(mlperf_dlrm_path, "pytorch", "python") sys.path.insert(0, python_path) -dataset_name = os.environ['CM_DATASET'] -dataset_path = os.environ['CM_DATASET_PATH'] -dataset_list = os.environ.get('CM_DATASET_IMAGES_LIST', None) +dataset_name = os.environ['MLC_DATASET'] +dataset_path = os.environ['MLC_DATASET_PATH'] +dataset_list = os.environ.get('MLC_DATASET_IMAGES_LIST', None) samples_to_aggregate_fix = os.environ.get( - 'CM_DATASET_SAMPLES_TO_AGGREGATE_FIX', None) + 'MLC_DATASET_SAMPLES_TO_AGGREGATE_FIX', None) samples_to_aggregate_min = os.environ.get( - 'CM_DATASET_SAMPLES_TO_AGGREGATE_MIN', None) + 'MLC_DATASET_SAMPLES_TO_AGGREGATE_MIN', None) samples_to_aggregate_max = os.environ.get( - 'CM_DATASET_SAMPLES_TO_AGGREGATE_MAX', None) -count = int(os.environ.get('CM_DATASET_SIZE', 0)) or None -max_ind_range = os.environ.get('CM_DATASET_MAX_IND_RANGE', -1) -threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) -threads = os.environ.get('CM_NUM_PREPROCESS_THREADS', threads) + 'MLC_DATASET_SAMPLES_TO_AGGREGATE_MAX', None) +count = int(os.environ.get('MLC_DATASET_SIZE', 0)) or None +max_ind_range = os.environ.get('MLC_DATASET_MAX_IND_RANGE', -1) +threads = os.environ.get('MLC_NUM_THREADS', os.cpu_count()) +threads = os.environ.get('MLC_NUM_PREPROCESS_THREADS', threads) criteo.Criteo(data_path=dataset_path, name=dataset_name, diff --git a/script/get-preprocessed-dataset-criteo/preprocess_multihot.sh b/script/get-preprocessed-dataset-criteo/preprocess_multihot.sh index 058cd76ee..dadf7566c 100644 --- a/script/get-preprocessed-dataset-criteo/preprocess_multihot.sh +++ b/script/get-preprocessed-dataset-criteo/preprocess_multihot.sh @@ -1,6 +1,6 @@ #!/bin/bash -cd ${CM_MLPERF_TRAINING_SOURCE}/recommendation_v2_torchrec_dlrm/ -${CM_PYTHON_BIN_WITH_PATH} materialize_synthetic_multihot_dataset.py \ +cd ${MLC_MLPERF_TRAINING_SOURCE}/recommendation_v2_torchrec_dlrm/ +${MLC_PYTHON_BIN_WITH_PATH} materialize_synthetic_multihot_dataset.py \ --in_memory_binary_criteo_path $PREPROCESSED_CRITEO_1TB_CLICK_LOGS_DATASET_PATH \ --output_path $MATERIALIZED_DATASET_PATH \ --num_embeddings_per_feature 40000000,39060,17295,7424,20265,3,7122,1543,63,40000000,3067956,405282,10,2209,11938,155,4,976,14,40000000,40000000,40000000,590152,12973,108,36 \ diff --git a/script/get-preprocessed-dataset-criteo/run-multihot.sh b/script/get-preprocessed-dataset-criteo/run-multihot.sh index e4741b41d..be26a7710 100644 --- a/script/get-preprocessed-dataset-criteo/run-multihot.sh +++ b/script/get-preprocessed-dataset-criteo/run-multihot.sh @@ -1,6 +1,6 @@ #!/bin/bash CUR=$PWD -echo ${CM_RUN_CMD} -eval ${CM_RUN_CMD} +echo ${MLC_RUN_CMD} +eval ${MLC_RUN_CMD} test $? -eq 0 || exit $? diff --git a/script/get-preprocessed-dataset-criteo/run.sh b/script/get-preprocessed-dataset-criteo/run.sh index 5c080f4c0..08b380577 100644 --- a/script/get-preprocessed-dataset-criteo/run.sh +++ b/script/get-preprocessed-dataset-criteo/run.sh @@ -2,7 +2,7 @@ CUR=$PWD -if [[ ${CM_CRITEO_FAKE} == "yes" ]]; then +if [[ ${MLC_CRITEO_FAKE} == "yes" ]]; then exit 0 fi -#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py +#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/preprocess.py diff --git a/script/get-preprocessed-dataset-generic/src/generic_preprocess.py b/script/get-preprocessed-dataset-generic/src/generic_preprocess.py index 05d65cdd2..033c4113c 100644 --- a/script/get-preprocessed-dataset-generic/src/generic_preprocess.py +++ b/script/get-preprocessed-dataset-generic/src/generic_preprocess.py @@ -145,30 +145,30 @@ def int8_to_uint8(image): def preprocess(): import sys - source_dir = os.environ['CM_DATASET_PATH'] - destination_dir = os.environ['CM_DATASET_PREPROCESSED_PATH'] - - square_side = int(os.environ['CM_DATASET_INPUT_SQUARE_SIDE']) - crop_percentage = float(os.environ['CM_DATASET_CROP_FACTOR']) - inter_size = int(os.getenv('CM_DATASET_INTERMEDIATE_SIZE', 0)) - convert_to_bgr = int(os.getenv('CM_DATASET_CONVERT_TO_BGR', 0)) - offset = int(os.getenv('CM_DATASET_SUBSET_OFFSET', 0)) - volume = int(os.environ['CM_DATASET_SIZE']) - fof_name = os.getenv('CM_DATASET_SUBSET_FOF', 'files.txt') - data_type = os.getenv('CM_DATASET_DATA_TYPE_INPUT', 'float32') - data_layout = os.getenv('CM_DATASET_DATA_LAYOUT', '').lower() - new_file_extension = os.getenv('CM_DATASET_PREPROCESSED_EXTENSION', '') - normalize_data = int(os.getenv('CM_DATASET_NORMALIZE_DATA', '0')) - subtract_mean = int(os.getenv('CM_DATASET_SUBTRACT_MEANS', '0')) - given_channel_means = os.getenv('CM_DATASET_GIVEN_CHANNEL_MEANS', '') - given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') - quant_scale = float(os.environ['CM_DATASET_QUANT_SCALE']) - quant_offset = float(os.environ['CM_DATASET_QUANT_OFFSET']) - quantize = int(os.environ['CM_DATASET_QUANTIZE']) # 1 for quantize to int8 + source_dir = os.environ['MLC_DATASET_PATH'] + destination_dir = os.environ['MLC_DATASET_PREPROCESSED_PATH'] + + square_side = int(os.environ['MLC_DATASET_INPUT_SQUARE_SIDE']) + crop_percentage = float(os.environ['MLC_DATASET_CROP_FACTOR']) + inter_size = int(os.getenv('MLC_DATASET_INTERMEDIATE_SIZE', 0)) + convert_to_bgr = int(os.getenv('MLC_DATASET_CONVERT_TO_BGR', 0)) + offset = int(os.getenv('MLC_DATASET_SUBSET_OFFSET', 0)) + volume = int(os.environ['MLC_DATASET_SIZE']) + fof_name = os.getenv('MLC_DATASET_SUBSET_FOF', 'files.txt') + data_type = os.getenv('MLC_DATASET_DATA_TYPE_INPUT', 'float32') + data_layout = os.getenv('MLC_DATASET_DATA_LAYOUT', '').lower() + new_file_extension = os.getenv('MLC_DATASET_PREPROCESSED_EXTENSION', '') + normalize_data = int(os.getenv('MLC_DATASET_NORMALIZE_DATA', '0')) + subtract_mean = int(os.getenv('MLC_DATASET_SUBTRACT_MEANS', '0')) + given_channel_means = os.getenv('MLC_DATASET_GIVEN_CHANNEL_MEANS', '') + given_channel_stds = os.getenv('MLC_DATASET_GIVEN_CHANNEL_STDS', '') + quant_scale = float(os.environ['MLC_DATASET_QUANT_SCALE']) + quant_offset = float(os.environ['MLC_DATASET_QUANT_OFFSET']) + quantize = int(os.environ['MLC_DATASET_QUANTIZE']) # 1 for quantize to int8 convert_to_unsigned = int( - os.environ['CM_DATASET_CONVERT_TO_UNSIGNED']) # 1 for int8 to uint8 + os.environ['MLC_DATASET_CONVERT_TO_UNSIGNED']) # 1 for int8 to uint8 - images_list = os.getenv('CM_DATASET_IMAGES_LIST') + images_list = os.getenv('MLC_DATASET_IMAGES_LIST') if given_channel_means: given_channel_means = [float(x) @@ -177,7 +177,7 @@ def preprocess(): if given_channel_stds: given_channel_stds = [float(x) for x in given_channel_stds.split(' ')] - interpolation_method = os.getenv('CM_DATASET_INTERPOLATION_METHOD', '') + interpolation_method = os.getenv('MLC_DATASET_INTERPOLATION_METHOD', '') print(("From: {}, To: {}, Size: {}, Crop: {}, InterSize: {}, 2BGR: {}, OFF: {}, VOL: '{}', FOF: {}," + " DTYPE: {}, DLAYOUT: {}, EXT: {}, NORM: {}, SMEAN: {}, GCM: {}, GSTD: {}, QUANTIZE: {}, QUANT_SCALE: {}, QUANT_OFFSET: {}, CONV_UNSIGNED: {}, INTER: {}").format( diff --git a/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py b/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py index 4c6a31dc6..386d6b8b5 100644 --- a/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py +++ b/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py @@ -91,41 +91,41 @@ def preprocess_files(selected_filenames, source_dir, destination_dir, square_sid def preprocess(): - source_directory = os.environ['CM_DATASET_PATH'] - destination_directory = os.environ['CM_DATASET_PREPROCESSED_PATH'] + source_directory = os.environ['MLC_DATASET_PATH'] + destination_directory = os.environ['MLC_DATASET_PREPROCESSED_PATH'] intermediate_data_type = os.environ.get( - 'CM_DATASET_INTERMEDIATE_DATA_TYPE', np.float32) - square_side = int(os.environ['CM_DATASET_INPUT_SQUARE_SIDE']) - crop_percentage = float(os.environ['CM_DATASET_CROP_FACTOR']) - inter_size = int(os.getenv('CM_DATASET_INTERMEDIATE_SIZE', 0)) - convert_to_bgr = int(os.getenv('CM_DATASET_CONVERT_TO_BGR', 0)) - offset = int(os.getenv('CM_DATASET_SUBSET_OFFSET', 0)) - volume = int(os.environ['CM_DATASET_SIZE']) - fof_name = os.getenv('CM_DATASET_SUBSET_FOF', 'files.txt') - data_type = os.getenv('CM_DATASET_DATA_TYPE_INPUT', 'float32') - input_data_type = os.getenv('CM_DATASET_DATA_TYPE_INPUT', 'float32') - data_layout = os.getenv('CM_DATASET_DATA_LAYOUT', '').lower() - new_file_extension = os.getenv('CM_DATASET_PREPROCESSED_EXTENSION', '') - normalize_data = int(os.getenv('CM_DATASET_NORMALIZE_DATA', '0')) - subtract_mean = int(os.getenv('CM_DATASET_SUBTRACT_MEANS', '0')) - given_channel_means = os.getenv('CM_DATASET_GIVEN_CHANNEL_MEANS', '') - given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') - quant_scale = float(os.environ['CM_DATASET_QUANT_SCALE']) - quant_offset = float(os.environ['CM_DATASET_QUANT_OFFSET']) - quantize = int(os.environ['CM_DATASET_QUANTIZE']) # 1 for quantize to int8 + 'MLC_DATASET_INTERMEDIATE_DATA_TYPE', np.float32) + square_side = int(os.environ['MLC_DATASET_INPUT_SQUARE_SIDE']) + crop_percentage = float(os.environ['MLC_DATASET_CROP_FACTOR']) + inter_size = int(os.getenv('MLC_DATASET_INTERMEDIATE_SIZE', 0)) + convert_to_bgr = int(os.getenv('MLC_DATASET_CONVERT_TO_BGR', 0)) + offset = int(os.getenv('MLC_DATASET_SUBSET_OFFSET', 0)) + volume = int(os.environ['MLC_DATASET_SIZE']) + fof_name = os.getenv('MLC_DATASET_SUBSET_FOF', 'files.txt') + data_type = os.getenv('MLC_DATASET_DATA_TYPE_INPUT', 'float32') + input_data_type = os.getenv('MLC_DATASET_DATA_TYPE_INPUT', 'float32') + data_layout = os.getenv('MLC_DATASET_DATA_LAYOUT', '').lower() + new_file_extension = os.getenv('MLC_DATASET_PREPROCESSED_EXTENSION', '') + normalize_data = int(os.getenv('MLC_DATASET_NORMALIZE_DATA', '0')) + subtract_mean = int(os.getenv('MLC_DATASET_SUBTRACT_MEANS', '0')) + given_channel_means = os.getenv('MLC_DATASET_GIVEN_CHANNEL_MEANS', '') + given_channel_stds = os.getenv('MLC_DATASET_GIVEN_CHANNEL_STDS', '') + quant_scale = float(os.environ['MLC_DATASET_QUANT_SCALE']) + quant_offset = float(os.environ['MLC_DATASET_QUANT_OFFSET']) + quantize = int(os.environ['MLC_DATASET_QUANTIZE']) # 1 for quantize to int8 convert_to_unsigned = int( - os.environ['CM_DATASET_CONVERT_TO_UNSIGNED']) # 1 for int8 to uint8 + os.environ['MLC_DATASET_CONVERT_TO_UNSIGNED']) # 1 for int8 to uint8 - images_list = os.getenv('CM_DATASET_IMAGES_LIST') - interpolation_method = os.getenv('CM_DATASET_INTERPOLATION_METHOD', '') + images_list = os.getenv('MLC_DATASET_IMAGES_LIST') + interpolation_method = os.getenv('MLC_DATASET_INTERPOLATION_METHOD', '') - annotations_filepath = os.environ['CM_DATASET_ANNOTATIONS_FILE_PATH'] - is_calibration = os.environ['CM_DATASET_TYPE'] == "calibration" - image_file = os.getenv('CM_IMAGE_FILE', '') + annotations_filepath = os.environ['MLC_DATASET_ANNOTATIONS_FILE_PATH'] + is_calibration = os.environ['MLC_DATASET_TYPE'] == "calibration" + image_file = os.getenv('MLC_IMAGE_FILE', '') - normalize_lower = float(os.getenv('CM_DATASET_NORMALIZE_LOWER', -1.0)) - normalize_upper = float(os.getenv('CM_DATASET_NORMALIZE_UPPER', 1.0)) + normalize_lower = float(os.getenv('MLC_DATASET_NORMALIZE_LOWER', -1.0)) + normalize_upper = float(os.getenv('MLC_DATASET_NORMALIZE_UPPER', 1.0)) if given_channel_means: given_channel_means = np.fromstring( @@ -135,7 +135,7 @@ def preprocess(): if convert_to_bgr: given_channel_means = given_channel_means[::-1] - given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') + given_channel_stds = os.getenv('MLC_DATASET_GIVEN_CHANNEL_STDS', '') if given_channel_stds: given_channel_stds = np.fromstring( given_channel_stds, diff --git a/script/get-preprocessed-dataset-imagenet/README-extra.md b/script/get-preprocessed-dataset-imagenet/README-extra.md index cc2742fa5..ab184e5a8 100644 --- a/script/get-preprocessed-dataset-imagenet/README-extra.md +++ b/script/get-preprocessed-dataset-imagenet/README-extra.md @@ -16,11 +16,11 @@ and the supported [VARIATIONS] (comma separated and beginning with _) are *`[NCHW]:` Preprocess the dataset with `Channel` component at beginning ## Input Variables coming from Dependencies -* `[CM_DATASET_PATH]:` Folder path to Imagenet dataset -* `[CM_DATASET_AUX_PATH]:` Folder path to Imagenet auxiliary dataset (to get image list) -* `[CM_DATASET_IMAGES_LIST]:` File path containing the image names +* `[MLC_DATASET_PATH]:` Folder path to Imagenet dataset +* `[MLC_DATASET_AUX_PATH]:` Folder path to Imagenet auxiliary dataset (to get image list) +* `[MLC_DATASET_IMAGES_LIST]:` File path containing the image names ## Exported Variables -* `[CM_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored +* `[MLC_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored diff --git a/script/get-preprocessed-dataset-imagenet/customize.py b/script/get-preprocessed-dataset-imagenet/customize.py index 0b5a6f0d6..eaa4382f0 100644 --- a/script/get-preprocessed-dataset-imagenet/customize.py +++ b/script/get-preprocessed-dataset-imagenet/customize.py @@ -8,41 +8,41 @@ def preprocess(i): env = i['env'] - if 'CM_IMAGENET_PREPROCESSED_PATH' in env: + if 'MLC_IMAGENET_PREPROCESSED_PATH' in env: files = glob.glob( - env['CM_IMAGENET_PREPROCESSED_PATH'] + + env['MLC_IMAGENET_PREPROCESSED_PATH'] + "/**/" + - env['CM_IMAGENET_PREPROCESSED_FILENAME'], + env['MLC_IMAGENET_PREPROCESSED_FILENAME'], recursive=True) if files: - env['CM_DATASET_PREPROCESSED_PATH'] = env['CM_IMAGENET_PREPROCESSED_PATH'] + env['MLC_DATASET_PREPROCESSED_PATH'] = env['MLC_IMAGENET_PREPROCESSED_PATH'] else: return {'return': 1, 'error': 'No preprocessed images found in ' + - env['CM_IMAGENET_PREPROCESSED_PATH']} + env['MLC_IMAGENET_PREPROCESSED_PATH']} else: - if env.get('CM_DATASET_REFERENCE_PREPROCESSOR', "0") == "1": + if env.get('MLC_DATASET_REFERENCE_PREPROCESSOR', "0") == "1": print("Using MLCommons Inference source from '" + - env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") - env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() - if env['CM_DATASET_TYPE'] == "validation" and not exists( - os.path.join(env['CM_DATASET_PATH'], "val_map.txt")): - shutil.copy(os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt"), os.path.join(env['CM_DATASET_PATH'], + env['MLC_DATASET_PREPROCESSED_PATH'] = os.getcwd() + if env['MLC_DATASET_TYPE'] == "validation" and not exists( + os.path.join(env['MLC_DATASET_PATH'], "val_map.txt")): + shutil.copy(os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt"), os.path.join(env['MLC_DATASET_PATH'], "val_map.txt")) - preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] + preprocessed_path = env['MLC_DATASET_PREPROCESSED_PATH'] - if env.get('CM_DATASET_TYPE', '') == "validation" and not exists( + if env.get('MLC_DATASET_TYPE', '') == "validation" and not exists( os.path.join(preprocessed_path, "val_map.txt")): - shutil.copy(os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt"), + shutil.copy(os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt"), os.path.join(preprocessed_path, "val_map.txt")) - if env.get('CM_DATASET_TYPE', '') == "calibration": - env['CM_DATASET_IMAGES_LIST'] = env['CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH'] - env['CM_DATASET_SIZE'] = 500 + if env.get('MLC_DATASET_TYPE', '') == "calibration": + env['MLC_DATASET_IMAGES_LIST'] = env['MLC_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH'] + env['MLC_DATASET_SIZE'] = 500 - if env.get('CM_DATASET_DATA_TYPE_INPUT', '') == '': - env['CM_DATASET_DATA_TYPE_INPUT'] = env['CM_DATASET_DATA_TYPE'] + if env.get('MLC_DATASET_DATA_TYPE_INPUT', '') == '': + env['MLC_DATASET_DATA_TYPE_INPUT'] = env['MLC_DATASET_DATA_TYPE'] return {'return': 0} @@ -52,11 +52,11 @@ def postprocess(i): env = i['env'] # finalize path - preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] + preprocessed_path = env['MLC_DATASET_PREPROCESSED_PATH'] preprocessed_images_list = [] preprocessed_imagenames_list = [] - match_text = "/*." + env.get("CM_DATASET_PREPROCESSED_EXTENSION", "*") + match_text = "/*." + env.get("MLC_DATASET_PREPROCESSED_EXTENSION", "*") for filename in sorted(glob.glob(preprocessed_path + match_text)): preprocessed_images_list.append(filename) preprocessed_imagenames_list.append(os.path.basename(filename)) @@ -65,9 +65,9 @@ def postprocess(i): with open("preprocessed_filenames.txt", "w") as f: f.write("\n".join(preprocessed_imagenames_list)) - env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join( + env['MLC_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join( os.getcwd(), "preprocessed_files.txt") - env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join( + env['MLC_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join( os.getcwd(), "preprocessed_filenames.txt") return {'return': 0} diff --git a/script/get-preprocessed-dataset-imagenet/meta.yaml b/script/get-preprocessed-dataset-imagenet/meta.yaml index fcd514b77..eea817b2f 100644 --- a/script/get-preprocessed-dataset-imagenet/meta.yaml +++ b/script/get-preprocessed-dataset-imagenet/meta.yaml @@ -4,64 +4,64 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML datasets default_env: - CM_DATASET_CONVERT_TO_UNSIGNED: '0' - CM_DATASET_CROP_FACTOR: '87.5' - CM_DATASET_DATA_LAYOUT: NCHW - CM_DATASET_DATA_TYPE: float32 - CM_DATASET_PREPROCESSED_EXTENSION: npy - CM_DATASET_QUANTIZE: '0' - CM_DATASET_QUANT_OFFSET: '0' - CM_DATASET_QUANT_SCALE: '1' - CM_DATASET_REFERENCE_PREPROCESSOR: '1' - CM_MODEL: resnet50 - CM_PREPROCESS_VGG: 'yes' + MLC_DATASET_CONVERT_TO_UNSIGNED: '0' + MLC_DATASET_CROP_FACTOR: '87.5' + MLC_DATASET_DATA_LAYOUT: NCHW + MLC_DATASET_DATA_TYPE: float32 + MLC_DATASET_PREPROCESSED_EXTENSION: npy + MLC_DATASET_QUANTIZE: '0' + MLC_DATASET_QUANT_OFFSET: '0' + MLC_DATASET_QUANT_SCALE: '1' + MLC_DATASET_REFERENCE_PREPROCESSOR: '1' + MLC_MODEL: resnet50 + MLC_PREPROCESS_VGG: 'yes' deps: - names: - python3 - python skip_if_env: - CM_IMAGENET_PREPROCESSED_PATH: + MLC_IMAGENET_PREPROCESSED_PATH: - 'on' tags: get,python3 - names: - original-dataset skip_if_env: - CM_IMAGENET_PREPROCESSED_PATH: + MLC_IMAGENET_PREPROCESSED_PATH: - 'on' tags: get,dataset,image-classification,original - enable_if_env: - CM_DATASET_TYPE: + MLC_DATASET_TYPE: - validation skip_if_env: - CM_IMAGENET_PREPROCESSED_PATH: + MLC_IMAGENET_PREPROCESSED_PATH: - 'on' tags: get,dataset-aux,image-classification,imagenet-aux - enable_if_env: - CM_DATASET_TYPE: + MLC_DATASET_TYPE: - calibration tags: get,dataset,imagenet,calibration - tags: get,generic-python-lib,_package.opencv-python-headless - tags: get,generic-python-lib,_pillow - enable_if_env: - CM_DATASET_REFERENCE_PREPROCESSOR: + MLC_DATASET_REFERENCE_PREPROCESSOR: - '1' names: - inference-src skip_if_env: - CM_IMAGENET_PREPROCESSED_PATH: + MLC_IMAGENET_PREPROCESSED_PATH: - 'on' tags: mlperf,mlcommons,inference,source,src docker: run: false env: - CM_DATASET: imagenet + MLC_DATASET: imagenet input_mapping: - dir: CM_DATASET_PREPROCESSED_PATH - imagenet_path: CM_IMAGENET_PATH - imagenet_preprocessed_path: CM_IMAGENET_PREPROCESSED_PATH - threads: CM_NUM_PREPROCESS_THREADS + dir: MLC_DATASET_PREPROCESSED_PATH + imagenet_path: MLC_IMAGENET_PATH + imagenet_preprocessed_path: MLC_IMAGENET_PREPROCESSED_PATH + threads: MLC_NUM_PREPROCESS_THREADS new_env_keys: -- CM_DATASET_* +- MLC_DATASET_* tags: - get - dataset @@ -76,14 +76,14 @@ variations: original-dataset: tags: _2012-1 env: - CM_DATASET_SIZE: '1' + MLC_DATASET_SIZE: '1' group: size '500': add_deps: original-dataset: tags: _2012 env: - CM_DATASET_SIZE: '500' + MLC_DATASET_SIZE: '500' group: size 500,validation: add_deps: @@ -92,11 +92,11 @@ variations: NCHW: default: true env: - CM_DATASET_DATA_LAYOUT: NCHW + MLC_DATASET_DATA_LAYOUT: NCHW group: layout NHWC: env: - CM_DATASET_DATA_LAYOUT: NHWC + MLC_DATASET_DATA_LAYOUT: NHWC group: layout calibration: add_deps: @@ -106,14 +106,14 @@ variations: calibration-option: mlperf.option1 preprocessing-source: generic-preprocessor env: - CM_DATASET_TYPE: calibration + MLC_DATASET_TYPE: calibration group: dataset-type default: {} float32: env: - CM_DATASET_CONVERT_TO_UNSIGNED: '0' - CM_DATASET_DATA_TYPE: float32 - CM_DATASET_QUANTIZE: '0' + MLC_DATASET_CONVERT_TO_UNSIGNED: '0' + MLC_DATASET_DATA_TYPE: float32 + MLC_DATASET_QUANTIZE: '0' group: precision for.mobilenet: base: @@ -122,83 +122,83 @@ variations: group: model for.mobilenet,float32: env: - CM_DATASET_GIVEN_CHANNEL_MEANS: '' - CM_DATASET_NORMALIZE_DATA: '1' - CM_DATASET_QUANTIZE: '0' - CM_DATASET_SUBTRACT_MEANS: '0' + MLC_DATASET_GIVEN_CHANNEL_MEANS: '' + MLC_DATASET_NORMALIZE_DATA: '1' + MLC_DATASET_QUANTIZE: '0' + MLC_DATASET_SUBTRACT_MEANS: '0' for.mobilenet,rgb8: env: - CM_DATASET_DATA_TYPE: uint8 - CM_DATASET_GIVEN_CHANNEL_MEANS: '' - CM_DATASET_NORMALIZE_DATA: '0' - CM_DATASET_QUANTIZE: '0' - CM_DATASET_SUBTRACT_MEANS: '0' + MLC_DATASET_DATA_TYPE: uint8 + MLC_DATASET_GIVEN_CHANNEL_MEANS: '' + MLC_DATASET_NORMALIZE_DATA: '0' + MLC_DATASET_QUANTIZE: '0' + MLC_DATASET_SUBTRACT_MEANS: '0' for.resnet50: base: - resnet50_ env: - CM_DATASET_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 - CM_DATASET_INTERPOLATION_METHOD: INTER_AREA - CM_DATASET_NORMALIZE_DATA: '0' - CM_DATASET_SUBTRACT_MEANS: '1' + MLC_DATASET_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 + MLC_DATASET_INTERPOLATION_METHOD: INTER_AREA + MLC_DATASET_NORMALIZE_DATA: '0' + MLC_DATASET_SUBTRACT_MEANS: '1' group: model for.resnet50,float32: env: {} for.resnet50,rgb8: env: - CM_DATASET_DATA_TYPE: uint8 - CM_DATASET_GIVEN_CHANNEL_MEANS: '' - CM_DATASET_NORMALIZE_DATA: '0' - CM_DATASET_QUANTIZE: '0' - CM_DATASET_SUBTRACT_MEANS: '0' + MLC_DATASET_DATA_TYPE: uint8 + MLC_DATASET_GIVEN_CHANNEL_MEANS: '' + MLC_DATASET_NORMALIZE_DATA: '0' + MLC_DATASET_QUANTIZE: '0' + MLC_DATASET_SUBTRACT_MEANS: '0' for.resnet50,rgb8,uint8: env: - CM_DATASET_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 - CM_DATASET_QUANTIZE: '1' - CM_DATASET_SUBTRACT_MEANS: '1' + MLC_DATASET_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 + MLC_DATASET_QUANTIZE: '1' + MLC_DATASET_SUBTRACT_MEANS: '1' for.resnet50,uint8: env: - CM_DATASET_QUANT_OFFSET: '0' - CM_DATASET_QUANT_SCALE: '1.18944883' + MLC_DATASET_QUANT_OFFSET: '0' + MLC_DATASET_QUANT_SCALE: '1.18944883' full: add_deps: original-dataset: tags: _full env: - CM_DATASET_SIZE: '50000' + MLC_DATASET_SIZE: '50000' group: size generic-preprocessor: env: - CM_DATASET_REFERENCE_PREPROCESSOR: '0' + MLC_DATASET_REFERENCE_PREPROCESSOR: '0' group: preprocessing-source prehook_deps: - tags: get,generic,image-preprocessor int8: env: - CM_DATASET_CONVERT_TO_UNSIGNED: '0' - CM_DATASET_DATA_TYPE: int8 - CM_DATASET_QUANTIZE: '1' + MLC_DATASET_CONVERT_TO_UNSIGNED: '0' + MLC_DATASET_DATA_TYPE: int8 + MLC_DATASET_QUANTIZE: '1' group: precision inter.area: env: - CM_DATASET_INTERPOLATION_METHOD: INTER_AREA + MLC_DATASET_INTERPOLATION_METHOD: INTER_AREA group: interpolation-method inter.linear: env: - CM_DATASET_INTERPOLATION_METHOD: INTER_LINEAR + MLC_DATASET_INTERPOLATION_METHOD: INTER_LINEAR group: interpolation-method mlcommons-reference-preprocessor: default: true env: - CM_DATASET_REFERENCE_PREPROCESSOR: '1' + MLC_DATASET_REFERENCE_PREPROCESSOR: '1' group: preprocessing-source mlperf.option1: env: - CM_DATASET_CALIBRATION_OPTION: one + MLC_DATASET_CALIBRATION_OPTION: one group: calibration-option mlperf.option2: env: - CM_DATASET_CALIBRATION_OPTION: two + MLC_DATASET_CALIBRATION_OPTION: two group: calibration-option mobilenet_: default_variations: @@ -207,7 +207,7 @@ variations: precision: int8 preprocessing-source: generic-preprocessor env: - CM_MODEL: mobilenet + MLC_MODEL: mobilenet pytorch: default_variations: preprocessing-source: mlcommons-reference-preprocessor @@ -216,8 +216,8 @@ variations: - torchvision tags: get,generic-python-lib,_torchvision env: - CM_MODEL: resnet50 - CM_PREPROCESS_PYTORCH: 'yes' + MLC_MODEL: resnet50 + MLC_PREPROCESS_PYTORCH: 'yes' resnet50_: default_variations: extension: rgb32 @@ -225,48 +225,48 @@ variations: precision: float32 preprocessing-source: generic-preprocessor env: - CM_MODEL: resnet50 + MLC_MODEL: resnet50 resolution.#: env: - CM_DATASET_INPUT_SQUARE_SIDE: '#' + MLC_DATASET_INPUT_SQUARE_SIDE: '#' group: resolution resolution.224: default: true env: - CM_DATASET_INPUT_SQUARE_SIDE: '224' + MLC_DATASET_INPUT_SQUARE_SIDE: '224' group: resolution rgb32: env: - CM_DATASET_PREPROCESSED_EXTENSION: rgb32 + MLC_DATASET_PREPROCESSED_EXTENSION: rgb32 group: extension rgb8: env: - CM_DATASET_PREPROCESSED_EXTENSION: rgb8 + MLC_DATASET_PREPROCESSED_EXTENSION: rgb8 group: extension size.#: add_deps: original-dataset: tags: _# env: - CM_DATASET_SIZE: '#' + MLC_DATASET_SIZE: '#' group: size tflite_tpu: default_variations: preprocessing-source: mlcommons-reference-preprocessor env: - CM_MODEL: resnet50 - CM_PREPROCESS_TFLITE_TPU: 'yes' + MLC_MODEL: resnet50 + MLC_PREPROCESS_TFLITE_TPU: 'yes' uint8: env: - CM_DATASET_CONVERT_TO_UNSIGNED: '1' - CM_DATASET_DATA_TYPE: uint8 - CM_DATASET_DATA_TYPE_INPUT: float32 - CM_DATASET_QUANTIZE: '1' + MLC_DATASET_CONVERT_TO_UNSIGNED: '1' + MLC_DATASET_DATA_TYPE: uint8 + MLC_DATASET_DATA_TYPE_INPUT: float32 + MLC_DATASET_QUANTIZE: '1' group: precision validation: default: 'true' default_variations: size: '500' env: - CM_DATASET_TYPE: validation + MLC_DATASET_TYPE: validation group: dataset-type diff --git a/script/get-preprocessed-dataset-imagenet/preprocess.py b/script/get-preprocessed-dataset-imagenet/preprocess.py index beefd1dca..ba696fbd5 100644 --- a/script/get-preprocessed-dataset-imagenet/preprocess.py +++ b/script/get-preprocessed-dataset-imagenet/preprocess.py @@ -1,31 +1,31 @@ import os import sys -if os.environ.get('CM_DATASET_REFERENCE_PREPROCESSOR', '1') == "0": +if os.environ.get('MLC_DATASET_REFERENCE_PREPROCESSOR', '1') == "0": import generic_preprocess generic_preprocess.preprocess() else: - mlperf_src_path = os.environ['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] + mlperf_src_path = os.environ['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] python_path = os.path.join(mlperf_src_path, "python") sys.path.insert(0, python_path) import imagenet import dataset - dataset_path = os.environ['CM_DATASET_PATH'] - dataset_list = os.environ.get('CM_DATASET_IMAGES_LIST', None) - img_format = os.environ.get('CM_DATASET_DATA_LAYOUT', 'NHWC') - count = int(os.environ.get('CM_DATASET_SIZE', 1)) + dataset_path = os.environ['MLC_DATASET_PATH'] + dataset_list = os.environ.get('MLC_DATASET_IMAGES_LIST', None) + img_format = os.environ.get('MLC_DATASET_DATA_LAYOUT', 'NHWC') + count = int(os.environ.get('MLC_DATASET_SIZE', 1)) preprocessed_dir = os.environ.get( - 'CM_DATASET_PREPROCESSED_PATH', os.getcwd()) - threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) - threads = int(os.environ.get('CM_NUM_PREPROCESS_THREADS', threads)) + 'MLC_DATASET_PREPROCESSED_PATH', os.getcwd()) + threads = os.environ.get('MLC_NUM_THREADS', os.cpu_count()) + threads = int(os.environ.get('MLC_NUM_PREPROCESS_THREADS', threads)) - if os.environ.get('CM_MODEL') == 'mobilenet': + if os.environ.get('MLC_MODEL') == 'mobilenet': pre_process = dataset.pre_process_mobilenet - elif os.environ.get('CM_MODEL', 'resnet50') == 'resnet50' and os.environ.get('CM_PREPROCESS_PYTORCH', '') == "yes": + elif os.environ.get('MLC_MODEL', 'resnet50') == 'resnet50' and os.environ.get('MLC_PREPROCESS_PYTORCH', '') == "yes": pre_process = dataset.pre_process_imagenet_pytorch - elif os.environ.get('CM_MODEL', 'resnet50') == 'resnet50' and os.environ.get('CM_PREPROCESS_TFLITE_TPU', '') == "yes": + elif os.environ.get('MLC_MODEL', 'resnet50') == 'resnet50' and os.environ.get('MLC_PREPROCESS_TFLITE_TPU', '') == "yes": pre_process = dataset.pre_process_imagenet_tflite_tpu else: pre_process = dataset.pre_process_vgg diff --git a/script/get-preprocessed-dataset-imagenet/run.bat b/script/get-preprocessed-dataset-imagenet/run.bat index 7f6036f84..bdeca68fd 100644 --- a/script/get-preprocessed-dataset-imagenet/run.bat +++ b/script/get-preprocessed-dataset-imagenet/run.bat @@ -1,4 +1,4 @@ @echo off -%CM_PYTHON_BIN% %CM_TMP_CURRENT_SCRIPT_PATH%\preprocess.py +%MLC_PYTHON_BIN% %MLC_TMP_CURRENT_SCRIPT_PATH%\preprocess.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-preprocessed-dataset-imagenet/run.sh b/script/get-preprocessed-dataset-imagenet/run.sh index c6e17411b..04b4b3cff 100644 --- a/script/get-preprocessed-dataset-imagenet/run.sh +++ b/script/get-preprocessed-dataset-imagenet/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -if [ ! -z ${CM_IMAGENET_PREPROCESSED_PATH+x} ]; then +if [ ! -z ${MLC_IMAGENET_PREPROCESSED_PATH+x} ]; then exit 0 fi -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/preprocess.py test $? -eq 0 || exit 1 diff --git a/script/get-preprocessed-dataset-kits19/customize.py b/script/get-preprocessed-dataset-kits19/customize.py index 6b3ce5c02..dd1d7ebf0 100644 --- a/script/get-preprocessed-dataset-kits19/customize.py +++ b/script/get-preprocessed-dataset-kits19/customize.py @@ -8,23 +8,23 @@ def preprocess(i): env = i['env'] print("Using MLCommons Inference source from '" + - env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") preprocess_src = os.path.join( - env['CM_MLPERF_INFERENCE_3DUNET_PATH'], + env['MLC_MLPERF_INFERENCE_3DUNET_PATH'], 'preprocess.py') - cmd = 'cd ' + env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + \ - ' && ${CM_PYTHON_BIN_WITH_PATH} preprocess.py --raw_data_dir ' + \ - env['CM_DATASET_PATH'] + ' --results_dir ' + \ + cmd = 'cd ' + env['MLC_MLPERF_INFERENCE_3DUNET_PATH'] + \ + ' && ${MLC_PYTHON_BIN_WITH_PATH} preprocess.py --raw_data_dir ' + \ + env['MLC_DATASET_PATH'] + ' --results_dir ' + \ os.getcwd() + ' --mode preprocess' - env['CM_TMP_CMD'] = cmd + env['MLC_TMP_CMD'] = cmd return {'return': 0} def postprocess(i): env = i['env'] - if 'CM_DATASET_PREPROCESSED_PATH' not in env: - env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() - env['CM_DATASET_KITS19_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] + if 'MLC_DATASET_PREPROCESSED_PATH' not in env: + env['MLC_DATASET_PREPROCESSED_PATH'] = os.getcwd() + env['MLC_DATASET_KITS19_PREPROCESSED_PATH'] = env['MLC_DATASET_PREPROCESSED_PATH'] return {'return': 0} diff --git a/script/get-preprocessed-dataset-kits19/meta.yaml b/script/get-preprocessed-dataset-kits19/meta.yaml index 7c7eeda67..fdf7f120b 100644 --- a/script/get-preprocessed-dataset-kits19/meta.yaml +++ b/script/get-preprocessed-dataset-kits19/meta.yaml @@ -4,8 +4,8 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML datasets default_env: - CM_DATASET: kits19 - CM_DATASET_DTYPE: fp32 + MLC_DATASET: kits19 + MLC_DATASET_DTYPE: fp32 deps: - names: - python3 @@ -23,10 +23,10 @@ deps: - numpy tags: get,generic-python-lib,_numpy input_mapping: - dir: CM_DATASET_PREPROCESSED_PATH - threads: CM_NUM_PREPROCESS_THREADS + dir: MLC_DATASET_PREPROCESSED_PATH + threads: MLC_NUM_PREPROCESS_THREADS new_env_keys: -- CM_DATASET_* +- MLC_DATASET_* tags: - get - dataset @@ -40,55 +40,55 @@ variations: original-dataset: tags: _1 env: - CM_DATASET_SIZE: '1' + MLC_DATASET_SIZE: '1' group: dataset-count '5': adr: original-dataset: tags: _5 env: - CM_DATASET_SIZE: '5' + MLC_DATASET_SIZE: '5' group: dataset-count '50': adr: original-dataset: tags: _50 env: - CM_DATASET_SIZE: '50' + MLC_DATASET_SIZE: '50' group: dataset-count '500': adr: original-dataset: tags: _500 env: - CM_DATASET_SIZE: '500' + MLC_DATASET_SIZE: '500' group: dataset-count calibration: add_deps: original-dataset: tags: _calibration env: - CM_DATASET_PATH: <<>> + MLC_DATASET_PATH: <<>> group: dataset-type fp32: default: true env: - CM_DATASET_DTYPE: fp32 + MLC_DATASET_DTYPE: fp32 group: dataset-precision full: adr: original-dataset: tags: _full env: - CM_DATASET_SIZE: '' + MLC_DATASET_SIZE: '' group: dataset-count int8: env: - CM_DATASET_DTYPE: int8 + MLC_DATASET_DTYPE: int8 group: dataset-precision nvidia: env: - CM_PREPROCESSING_BY_NVIDIA: 'yes' + MLC_PREPROCESSING_BY_NVIDIA: 'yes' validation: add_deps: original-dataset: diff --git a/script/get-preprocessed-dataset-kits19/run.sh b/script/get-preprocessed-dataset-kits19/run.sh index a9f248c38..7798c842f 100644 --- a/script/get-preprocessed-dataset-kits19/run.sh +++ b/script/get-preprocessed-dataset-kits19/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -cmd=${CM_TMP_CMD} +cmd=${MLC_TMP_CMD} echo $cmd eval $cmd test $? -eq 0 || exit $? diff --git a/script/get-preprocessed-dataset-librispeech/customize.py b/script/get-preprocessed-dataset-librispeech/customize.py index 70389e904..7ae65ec85 100644 --- a/script/get-preprocessed-dataset-librispeech/customize.py +++ b/script/get-preprocessed-dataset-librispeech/customize.py @@ -8,25 +8,25 @@ def preprocess(i): env = i['env'] print("Using MLCommons Inference source from '" + - env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") preprocess_src = os.path.join( - env['CM_MLPERF_INFERENCE_RNNT_PATH'], + env['MLC_MLPERF_INFERENCE_RNNT_PATH'], 'pytorch', 'utils', 'convert_librispeech.py') - cmd = 'cd ' + env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + ' && ${CM_PYTHON_BIN_WITH_PATH} ' + preprocess_src + ' --input_dir ' + env['CM_DATASET_LIBRISPEECH_PATH'] + \ + cmd = 'cd ' + env['MLC_MLPERF_INFERENCE_3DUNET_PATH'] + ' && ${MLC_PYTHON_BIN_WITH_PATH} ' + preprocess_src + ' --input_dir ' + env['MLC_DATASET_LIBRISPEECH_PATH'] + \ ' --dest_dir ' + os.path.join(os.getcwd(), 'dev-clean-wav') + \ ' --output_json ' + os.path.join(os.getcwd(), 'dev-clean-wav.json') - env['CM_TMP_CMD'] = cmd + env['MLC_TMP_CMD'] = cmd return {'return': 0} def postprocess(i): env = i['env'] - env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join( + env['MLC_DATASET_PREPROCESSED_PATH'] = os.path.join( os.getcwd(), 'dev-clean-wav') - env['CM_DATASET_PREPROCESSED_JSON'] = os.path.join( + env['MLC_DATASET_PREPROCESSED_JSON'] = os.path.join( os.getcwd(), 'dev-clean-wav.json') return {'return': 0} diff --git a/script/get-preprocessed-dataset-librispeech/meta.yaml b/script/get-preprocessed-dataset-librispeech/meta.yaml index 07adafaa5..779b8286f 100644 --- a/script/get-preprocessed-dataset-librispeech/meta.yaml +++ b/script/get-preprocessed-dataset-librispeech/meta.yaml @@ -4,8 +4,8 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML datasets default_env: - CM_DATASET: kits19 - CM_DATASET_DTYPE: fp32 + MLC_DATASET: kits19 + MLC_DATASET_DTYPE: fp32 deps: - names: - python3 @@ -22,10 +22,10 @@ deps: - tags: get,generic-python-lib,_tqdm - tags: get,sys-util,generic,_sox input_mapping: - dir: CM_DATASET_PREPROCESSED_PATH - threads: CM_NUM_PREPROCESS_THREADS + dir: MLC_DATASET_PREPROCESSED_PATH + threads: MLC_NUM_PREPROCESS_THREADS new_env_keys: -- CM_DATASET_* +- MLC_DATASET_* tags: - get - dataset @@ -39,51 +39,51 @@ variations: original-dataset: tags: _1 env: - CM_DATASET_SIZE: '1' + MLC_DATASET_SIZE: '1' group: dataset-count '5': adr: original-dataset: tags: _5 env: - CM_DATASET_SIZE: '5' + MLC_DATASET_SIZE: '5' group: dataset-count '50': adr: original-dataset: tags: _50 env: - CM_DATASET_SIZE: '50' + MLC_DATASET_SIZE: '50' group: dataset-count '500': adr: original-dataset: tags: _500 env: - CM_DATASET_SIZE: '500' + MLC_DATASET_SIZE: '500' group: dataset-count calibration: add_deps: original-dataset: tags: _calibration env: - CM_DATASET_PATH: <<>> + MLC_DATASET_PATH: <<>> group: dataset-type fp32: default: true env: - CM_DATASET_DTYPE: fp32 + MLC_DATASET_DTYPE: fp32 group: dataset-precision full: adr: original-dataset: tags: _full env: - CM_DATASET_SIZE: '' + MLC_DATASET_SIZE: '' group: dataset-count int8: env: - CM_DATASET_DTYPE: int8 + MLC_DATASET_DTYPE: int8 group: dataset-precision validation: add_deps: diff --git a/script/get-preprocessed-dataset-librispeech/run.sh b/script/get-preprocessed-dataset-librispeech/run.sh index a9f248c38..7798c842f 100644 --- a/script/get-preprocessed-dataset-librispeech/run.sh +++ b/script/get-preprocessed-dataset-librispeech/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -cmd=${CM_TMP_CMD} +cmd=${MLC_TMP_CMD} echo $cmd eval $cmd test $? -eq 0 || exit $? diff --git a/script/get-preprocessed-dataset-openimages/README-extra.md b/script/get-preprocessed-dataset-openimages/README-extra.md index f5c013f9a..ee9878b4a 100644 --- a/script/get-preprocessed-dataset-openimages/README-extra.md +++ b/script/get-preprocessed-dataset-openimages/README-extra.md @@ -18,11 +18,11 @@ and the supported [VARIATIONS] (comma separated and beginning with _) are *`[NCHW]:` Preprocess the dataset with `Channel` component at beginning ## Input Variables coming from Dependencies -* `[CM_DATASET_PATH]:` Folder path to Imagenet dataset -* `[CM_DATASET_IMAGES_LIST]:` File path containing the image names -* `[CM_DATASET_OPENIMAGES_RESIZE]:` Image width to resize to (default 800) +* `[MLC_DATASET_PATH]:` Folder path to Imagenet dataset +* `[MLC_DATASET_IMAGES_LIST]:` File path containing the image names +* `[MLC_DATASET_OPENIMAGES_RESIZE]:` Image width to resize to (default 800) ## Exported Variables -* `[CM_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored +* `[MLC_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored diff --git a/script/get-preprocessed-dataset-openimages/customize.py b/script/get-preprocessed-dataset-openimages/customize.py index f502bf1b2..027df7edb 100644 --- a/script/get-preprocessed-dataset-openimages/customize.py +++ b/script/get-preprocessed-dataset-openimages/customize.py @@ -8,19 +8,19 @@ def preprocess(i): env = i['env'] - if 'CM_DATASET_PREPROCESSED_PATH' not in env: - env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() + if 'MLC_DATASET_PREPROCESSED_PATH' not in env: + env['MLC_DATASET_PREPROCESSED_PATH'] = os.getcwd() - if env.get('CM_DATASET_REFERENCE_PREPROCESSOR', "0") == "1": + if env.get('MLC_DATASET_REFERENCE_PREPROCESSOR', "0") == "1": print("Using MLCommons Inference source from '" + - env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") - if env.get('CM_ML_MODEL_NAME', '') == 'retinanet': - if env.get('CM_DATASET_QUANTIZE', '') == '1': - if env.get('CM_QAIC_MODEL_RETINANET_IMAGE_SCALE', '') != '': - env['CM_DATASET_QUANT_SCALE'] = env['CM_QAIC_MODEL_RETINANET_IMAGE_SCALE'] - if env.get('CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET', '') != '': - env['CM_DATASET_QUANT_OFFSET'] = env['CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET'] + if env.get('MLC_ML_MODEL_NAME', '') == 'retinanet': + if env.get('MLC_DATASET_QUANTIZE', '') == '1': + if env.get('MLC_QAIC_MODEL_RETINANET_IMAGE_SCALE', '') != '': + env['MLC_DATASET_QUANT_SCALE'] = env['MLC_QAIC_MODEL_RETINANET_IMAGE_SCALE'] + if env.get('MLC_QAIC_MODEL_RETINANET_IMAGE_OFFSET', '') != '': + env['MLC_DATASET_QUANT_OFFSET'] = env['MLC_QAIC_MODEL_RETINANET_IMAGE_OFFSET'] return {'return': 0} @@ -29,18 +29,18 @@ def postprocess(i): env = i['env'] - if env["CM_DATASET_TYPE"] == "validation": - env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join( - env['CM_DATASET_PREPROCESSED_PATH'], "annotations") - env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join( - env['CM_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-mlperf.json") + if env["MLC_DATASET_TYPE"] == "validation": + env['MLC_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join( + env['MLC_DATASET_PREPROCESSED_PATH'], "annotations") + env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join( + env['MLC_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-mlperf.json") # finalize path - preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] + preprocessed_path = env['MLC_DATASET_PREPROCESSED_PATH'] preprocessed_images_list = [] preprocessed_imagenames_list = [] - match_text = "/*." + env.get("CM_DATASET_PREPROCESSED_EXTENSION", "*") + match_text = "/*." + env.get("MLC_DATASET_PREPROCESSED_EXTENSION", "*") for filename in sorted(glob.glob(preprocessed_path + match_text)): preprocessed_images_list.append(filename) preprocessed_imagenames_list.append(os.path.basename(filename)) @@ -49,9 +49,9 @@ def postprocess(i): with open("preprocessed_filenames.txt", "w") as f: f.write("\n".join(preprocessed_imagenames_list)) - env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join( + env['MLC_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join( os.getcwd(), "preprocessed_files.txt") - env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join( + env['MLC_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join( os.getcwd(), "preprocessed_filenames.txt") return {'return': 0} diff --git a/script/get-preprocessed-dataset-openimages/meta.yaml b/script/get-preprocessed-dataset-openimages/meta.yaml index 75c03137c..20583814d 100644 --- a/script/get-preprocessed-dataset-openimages/meta.yaml +++ b/script/get-preprocessed-dataset-openimages/meta.yaml @@ -4,13 +4,13 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML datasets default_env: - CM_DATASET: OPENIMAGES - CM_DATASET_CROP_FACTOR: '100.0' - CM_DATASET_DTYPE: fp32 - CM_DATASET_INPUT_SQUARE_SIDE: '800' - CM_DATASET_QUANTIZE: '0' - CM_DATASET_QUANT_OFFSET: '0' - CM_DATASET_QUANT_SCALE: '1' + MLC_DATASET: OPENIMAGES + MLC_DATASET_CROP_FACTOR: '100.0' + MLC_DATASET_DTYPE: fp32 + MLC_DATASET_INPUT_SQUARE_SIDE: '800' + MLC_DATASET_QUANTIZE: '0' + MLC_DATASET_QUANT_OFFSET: '0' + MLC_DATASET_QUANT_SCALE: '1' deps: - names: - python3 @@ -35,10 +35,10 @@ deps: - numpy tags: get,generic-python-lib,_numpy input_mapping: - dir: CM_DATASET_PREPROCESSED_PATH - threads: CM_NUM_PREPROCESS_THREADS + dir: MLC_DATASET_PREPROCESSED_PATH + threads: MLC_NUM_PREPROCESS_THREADS new_env_keys: -- CM_DATASET_* +- MLC_DATASET_* tags: - get - dataset @@ -54,23 +54,23 @@ variations: tags: _50 default: true env: - CM_DATASET_SIZE: '50' + MLC_DATASET_SIZE: '50' group: dataset-count '500': ad: original-dataset: tags: _500 env: - CM_DATASET_SIZE: '500' + MLC_DATASET_SIZE: '500' group: dataset-count NCHW: default: true env: - CM_DATASET_DATA_LAYOUT: NCHW + MLC_DATASET_DATA_LAYOUT: NCHW group: dataset-layout NHWC: env: - CM_DATASET_DATA_LAYOUT: NHWC + MLC_DATASET_DATA_LAYOUT: NHWC group: dataset-layout calibration: ad: @@ -79,9 +79,9 @@ variations: default_variations: dataset-count: '500' env: - CM_DATASET_ANNOTATIONS_FILE_PATH: <<>> - CM_DATASET_PATH: <<>> - CM_DATASET_TYPE: calibration + MLC_DATASET_ANNOTATIONS_FILE_PATH: <<>> + MLC_DATASET_PATH: <<>> + MLC_DATASET_TYPE: calibration group: dataset-type custom-annotations: ad: @@ -100,7 +100,7 @@ variations: tags: _filter filter,calibration: env: - CM_DATASET_CALIBRATION_FILTER: 'yes' + MLC_DATASET_CALIBRATION_FILTER: 'yes' filter-size.#: ad: original-dataset: @@ -112,30 +112,30 @@ variations: interpolation-method: inter.linear preprocessing-source: generic-preprocessor env: - CM_DATASET_CONVERT_TO_BGR: '0' - CM_DATASET_CROP_FACTOR: '100.0' - CM_DATASET_GIVEN_CHANNEL_MEANS: 0.485 0.456 0.406 - CM_DATASET_GIVEN_CHANNEL_STDS: 0.229 0.224 0.225 - CM_DATASET_NORMALIZE_DATA: '0' - CM_DATASET_NORMALIZE_LOWER: '0.0' - CM_DATASET_NORMALIZE_UPPER: '1.0' - CM_DATASET_SUBTRACT_MEANS: '1' - CM_ML_MODEL_NAME: retinanet + MLC_DATASET_CONVERT_TO_BGR: '0' + MLC_DATASET_CROP_FACTOR: '100.0' + MLC_DATASET_GIVEN_CHANNEL_MEANS: 0.485 0.456 0.406 + MLC_DATASET_GIVEN_CHANNEL_STDS: 0.229 0.224 0.225 + MLC_DATASET_NORMALIZE_DATA: '0' + MLC_DATASET_NORMALIZE_LOWER: '0.0' + MLC_DATASET_NORMALIZE_UPPER: '1.0' + MLC_DATASET_SUBTRACT_MEANS: '1' + MLC_ML_MODEL_NAME: retinanet for.retinanet.onnx,fp32: env: {} for.retinanet.onnx,uint8: env: - CM_DATASET_QUANT_OFFSET: '114' - CM_DATASET_QUANT_SCALE: '0.0186584499' + MLC_DATASET_QUANT_OFFSET: '114' + MLC_DATASET_QUANT_SCALE: '0.0186584499' fp32: default: true default_variations: extension: raw env: - CM_DATASET_CONVERT_TO_UNSIGNED: '0' - CM_DATASET_DTYPE: fp32 - CM_DATASET_INPUT_DTYPE: fp32 - CM_DATASET_QUANTIZE: '0' + MLC_DATASET_CONVERT_TO_UNSIGNED: '0' + MLC_DATASET_DTYPE: fp32 + MLC_DATASET_INPUT_DTYPE: fp32 + MLC_DATASET_QUANTIZE: '0' group: dataset-precision full: group: dataset-count @@ -144,7 +144,7 @@ variations: original-dataset: tags: _full env: - CM_DATASET_SIZE: '24781' + MLC_DATASET_SIZE: '24781' generic-preprocessor: deps: - names: @@ -155,7 +155,7 @@ variations: - torchvision tags: get,generic-python-lib,_torchvision env: - CM_DATASET_REFERENCE_PREPROCESSOR: '0' + MLC_DATASET_REFERENCE_PREPROCESSOR: '0' group: preprocessing-source prehook_deps: - tags: get,generic,image-preprocessor @@ -163,64 +163,64 @@ variations: default_variations: extension: rgb8 env: - CM_DATASET_CONVERT_TO_UNSIGNED: '0' - CM_DATASET_DTYPE: int8 - CM_DATASET_INPUT_DTYPE: fp32 - CM_DATASET_QUANTIZE: '1' + MLC_DATASET_CONVERT_TO_UNSIGNED: '0' + MLC_DATASET_DTYPE: int8 + MLC_DATASET_INPUT_DTYPE: fp32 + MLC_DATASET_QUANTIZE: '1' group: dataset-precision inter.area: env: - CM_DATASET_INTERPOLATION_METHOD: INTER_AREA + MLC_DATASET_INTERPOLATION_METHOD: INTER_AREA group: interpolation-method inter.linear: env: - CM_DATASET_INTERPOLATION_METHOD: INTER_LINEAR + MLC_DATASET_INTERPOLATION_METHOD: INTER_LINEAR group: interpolation-method mlcommons-reference-preprocessor: default: true env: - CM_DATASET_REFERENCE_PREPROCESSOR: '1' + MLC_DATASET_REFERENCE_PREPROCESSOR: '1' group: preprocessing-source npy: env: - CM_DATASET_PREPROCESSED_EXTENSION: npy + MLC_DATASET_PREPROCESSED_EXTENSION: npy group: extension nvidia: env: - CM_PREPROCESSING_BY_NVIDIA: 'yes' + MLC_PREPROCESSING_BY_NVIDIA: 'yes' quant-offset.#: const: - CM_DATASET_QUANT_OFFSET: '#' + MLC_DATASET_QUANT_OFFSET: '#' quant-scale.#: const: - CM_DATASET_QUANT_SCALE: '#' + MLC_DATASET_QUANT_SCALE: '#' raw: env: - CM_DATASET_PREPROCESSED_EXTENSION: raw + MLC_DATASET_PREPROCESSED_EXTENSION: raw group: extension rgb32: env: - CM_DATASET_PREPROCESSED_EXTENSION: rgb32 + MLC_DATASET_PREPROCESSED_EXTENSION: rgb32 group: extension rgb8: env: - CM_DATASET_PREPROCESSED_EXTENSION: rgb8 + MLC_DATASET_PREPROCESSED_EXTENSION: rgb8 group: extension size.#: ad: original-dataset: tags: _size.# env: - CM_DATASET_SIZE: '#' + MLC_DATASET_SIZE: '#' group: dataset-count uint8: default_variations: extension: rgb8 env: - CM_DATASET_CONVERT_TO_UNSIGNED: '1' - CM_DATASET_DTYPE: uint8 - CM_DATASET_INPUT_DTYPE: fp32 - CM_DATASET_QUANTIZE: '1' + MLC_DATASET_CONVERT_TO_UNSIGNED: '1' + MLC_DATASET_DTYPE: uint8 + MLC_DATASET_INPUT_DTYPE: fp32 + MLC_DATASET_QUANTIZE: '1' group: dataset-precision validation: ad: @@ -228,5 +228,5 @@ variations: tags: _validation default: true env: - CM_DATASET_TYPE: validation + MLC_DATASET_TYPE: validation group: dataset-type diff --git a/script/get-preprocessed-dataset-openimages/preprocess.py b/script/get-preprocessed-dataset-openimages/preprocess.py index c5af0ff04..ba105063e 100644 --- a/script/get-preprocessed-dataset-openimages/preprocess.py +++ b/script/get-preprocessed-dataset-openimages/preprocess.py @@ -5,26 +5,26 @@ import sys import os.path -mlperf_src_path = os.environ['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] +mlperf_src_path = os.environ['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] python_path = os.path.join(mlperf_src_path, "python") sys.path.insert(0, python_path) -dataset_path = os.environ['CM_DATASET_PATH'] -preprocessed_dir = os.environ.get('CM_DATASET_PREPROCESSED_PATH', os.getcwd()) +dataset_path = os.environ['MLC_DATASET_PATH'] +preprocessed_dir = os.environ.get('MLC_DATASET_PREPROCESSED_PATH', os.getcwd()) -if os.environ.get('CM_DATASET_REFERENCE_PREPROCESSOR', '1') == "0": +if os.environ.get('MLC_DATASET_REFERENCE_PREPROCESSOR', '1') == "0": # import generic_preprocess # generic_preprocess.preprocess() import preprocess_object_detection_dataset as pp pp.preprocess() else: - dataset_list = os.environ.get('CM_DATASET_ANNOTATIONS_FILE_PATH', None) - img_format = os.environ.get('CM_DATASET_DATA_LAYOUT', 'NHWC') - count = int(os.environ.get('CM_DATASET_SIZE', 0)) or None - image_width = int(os.environ.get('CM_DATASET_OPENIMAGES_RESIZE', 800)) - threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) - threads = os.environ.get('CM_NUM_PREPROCESS_THREADS', threads) + dataset_list = os.environ.get('MLC_DATASET_ANNOTATIONS_FILE_PATH', None) + img_format = os.environ.get('MLC_DATASET_DATA_LAYOUT', 'NHWC') + count = int(os.environ.get('MLC_DATASET_SIZE', 0)) or None + image_width = int(os.environ.get('MLC_DATASET_OPENIMAGES_RESIZE', 800)) + threads = os.environ.get('MLC_NUM_THREADS', os.cpu_count()) + threads = os.environ.get('MLC_NUM_PREPROCESS_THREADS', threads) name = "openimages-" + str(image_width) + "-retinanet" openimages.OpenImages(data_path=dataset_path, @@ -38,9 +38,9 @@ threads=threads, preprocessed_dir=preprocessed_dir) -if os.environ["CM_DATASET_TYPE"] == "validation": +if os.environ["MLC_DATASET_TYPE"] == "validation": src_path = os.environ.get( - 'CM_DATASET_ANNOTATIONS_DIR_PATH', + 'MLC_DATASET_ANNOTATIONS_DIR_PATH', os.path.join( dataset_path, "annotations")) diff --git a/script/get-preprocessed-dataset-openimages/run.bat b/script/get-preprocessed-dataset-openimages/run.bat index f3ccd2da7..9223ab77c 100644 --- a/script/get-preprocessed-dataset-openimages/run.bat +++ b/script/get-preprocessed-dataset-openimages/run.bat @@ -1 +1 @@ -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\preprocess.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\preprocess.py diff --git a/script/get-preprocessed-dataset-openimages/run.sh b/script/get-preprocessed-dataset-openimages/run.sh index aa660b693..a550dfee0 100644 --- a/script/get-preprocessed-dataset-openimages/run.sh +++ b/script/get-preprocessed-dataset-openimages/run.sh @@ -1,3 +1,3 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/preprocess.py diff --git a/script/get-preprocessed-dataset-openorca/customize.py b/script/get-preprocessed-dataset-openorca/customize.py index 49b1a8ae4..4317c1e84 100644 --- a/script/get-preprocessed-dataset-openorca/customize.py +++ b/script/get-preprocessed-dataset-openorca/customize.py @@ -7,44 +7,44 @@ def preprocess(i): env = i['env'] - if str(env.get('CM_DATASET_PREPROCESSED_BY_MLC', '') + if str(env.get('MLC_DATASET_PREPROCESSED_BY_MLC', '') ).lower() in ["yes", "1", "true"]: run_dir = os.getcwd() - if env.get('CM_DATASET_CALIBRATION', '') == "yes": - env['CM_DATASET_CALIBRATION_PATH'] = os.path.join( - env['CM_OPENORCA_PREPROCESSED_ROOT'], + if env.get('MLC_DATASET_CALIBRATION', '') == "yes": + env['MLC_DATASET_CALIBRATION_PATH'] = os.path.join( + env['MLC_OPENORCA_PREPROCESSED_ROOT'], "open_orca_gpt4_tokenized_llama.calibration_1000.pkl.gz") - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_CALIBRATION_PATH'] - env['CM_DATASET_OPENORCA_CALIBRATION_PATH'] = env['CM_DATASET_CALIBRATION_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_CALIBRATION_PATH'] + env['MLC_DATASET_OPENORCA_CALIBRATION_PATH'] = env['MLC_DATASET_CALIBRATION_PATH'] else: - env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join( - env['CM_OPENORCA_PREPROCESSED_ROOT'], + env['MLC_DATASET_PREPROCESSED_PATH'] = os.path.join( + env['MLC_OPENORCA_PREPROCESSED_ROOT'], "open_orca_gpt4_tokenized_llama.sampled_24576.pkl.gz") - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] - env['CM_DATASET_OPENORCA_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] - # run_cmd = f"gunzip -k {env['CM_DATASET_PREPROCESSED_PATH']}" + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_PREPROCESSED_PATH'] + env['MLC_DATASET_OPENORCA_PREPROCESSED_PATH'] = env['MLC_DATASET_PREPROCESSED_PATH'] + # run_cmd = f"gunzip -k {env['MLC_DATASET_PREPROCESSED_PATH']}" run_cmd = '' else: - inference_src = env['CM_MLPERF_INFERENCE_SOURCE'] + inference_src = env['MLC_MLPERF_INFERENCE_SOURCE'] run_dir = os.path.join(inference_src, 'language', 'llama2-70b') - model_dir = env['CM_ML_MODEL_PATH'] - if env.get('CM_DATASET_CALIBRATION', '') == "yes": + model_dir = env['MLC_ML_MODEL_PATH'] + if env.get('MLC_DATASET_CALIBRATION', '') == "yes": return {'return': 1, 'error': 'No raw preprocessing information is available for openorca calibration. Please use _mlcommons variation to use the MLCommons shared calibration dataset'} else: - env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join( + env['MLC_DATASET_PREPROCESSED_PATH'] = os.path.join( os.path.join( os.getcwd(), "processed-openorca", 'open_orca_gpt4_tokenized_llama.sampled_' + - env['CM_DATASET_SIZE'] + + env['MLC_DATASET_SIZE'] + '.pkl')) - run_cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' processorca.py --dataset_pq_path=' + env['CM_DATASET_OPENORCA_PARQUET'] + ' --model_dir=' + model_dir + \ + run_cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + ' processorca.py --dataset_pq_path=' + env['MLC_DATASET_OPENORCA_PARQUET'] + ' --model_dir=' + model_dir + \ ' --seqlen_limit=2048 --export_dir=' + \ os.path.join(os.getcwd(), "processed-openorca") + \ - ' --num_total_samples=' + env['CM_DATASET_SIZE'] + ' --num_total_samples=' + env['MLC_DATASET_SIZE'] - env['CM_RUN_DIR'] = run_dir - env['CM_RUN_CMD'] = run_cmd + env['MLC_RUN_DIR'] = run_dir + env['MLC_RUN_CMD'] = run_cmd return {'return': 0} diff --git a/script/get-preprocessed-dataset-openorca/meta.yaml b/script/get-preprocessed-dataset-openorca/meta.yaml index e3055db10..8ddb5f049 100644 --- a/script/get-preprocessed-dataset-openorca/meta.yaml +++ b/script/get-preprocessed-dataset-openorca/meta.yaml @@ -5,7 +5,7 @@ cache: true category: AI/ML datasets category_sort: 8500 default_env: - CM_DATASET_CALIBRATION: 'no' + MLC_DATASET_CALIBRATION: 'no' deps: - tags: get,sys-utils-cm - names: @@ -16,16 +16,16 @@ deps: - openorca-original - dataset-original skip_if_env: - CM_DATASET_PREPROCESSED_BY_MLC: + MLC_DATASET_PREPROCESSED_BY_MLC: - 'on' - 'yes' tags: get,dataset,original,openorca - force_env_keys: - - CM_GIT_* + - MLC_GIT_* names: - inference-src skip_if_env: - CM_DATASET_PREPROCESSED_BY_MLC: + MLC_DATASET_PREPROCESSED_BY_MLC: - 'on' - 'yes' tags: mlperf,inference,source @@ -39,14 +39,14 @@ deps: - transformers tags: get,generic-python-lib,_package.transformers - skip_if_env: - CM_DATASET_PREPROCESSED_BY_MLC: + MLC_DATASET_PREPROCESSED_BY_MLC: - 'on' - 'yes' tags: get,ml-model,llama2 docker: real_run: false env: - CM_DATASET: OPENORCA + MLC_DATASET: OPENORCA tags: - get - dataset @@ -64,11 +64,11 @@ variations: base: - mlcommons env: - CM_DATASET_CALIBRATION: 'yes' + MLC_DATASET_CALIBRATION: 'yes' group: dataset-type new_env_keys: - - CM_DATASET_CALIBRATION_PATH - - CM_DATASET_OPENORCA_CALIBRATION_PATH + - MLC_DATASET_CALIBRATION_PATH + - MLC_DATASET_OPENORCA_CALIBRATION_PATH full: ad: dataset-original: @@ -80,10 +80,10 @@ variations: mlcommons: deps: - env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_OPENORCA_PREPROCESSED_ROOT - CM_EXTRACT_FINAL_ENV_NAME: CM_OPENORCA_PREPROCESSED_ROOT - CM_EXTRACT_TO_FOLDER: openorca-preprocessed - CM_RCLONE_CONFIG_NAME: mlc-inference + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_OPENORCA_PREPROCESSED_ROOT + MLC_EXTRACT_FINAL_ENV_NAME: MLC_OPENORCA_PREPROCESSED_ROOT + MLC_EXTRACT_TO_FOLDER: openorca-preprocessed + MLC_RCLONE_CONFIG_NAME: mlc-inference extra_cache_tags: openorca,preprocessed,dataset force_cache: true names: @@ -91,10 +91,10 @@ variations: tags: download-and-extract,_rclone update_tags_from_env_with_prefix: _url.: - - CM_RCLONE_URL + - MLC_RCLONE_URL env: - CM_DATASET_PREPROCESSED_BY_MLC: 'yes' - CM_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/open_orca + MLC_DATASET_PREPROCESSED_BY_MLC: 'yes' + MLC_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/open_orca size.#: ad: dataset-original: @@ -103,8 +103,8 @@ variations: validation: default: true env: - CM_DATASET_CALIBRATION: 'no' + MLC_DATASET_CALIBRATION: 'no' group: dataset-type new_env_keys: - - CM_DATASET_PREPROCESSED_PATH - - CM_DATASET_OPENORCA_PREPROCESSED_PATH + - MLC_DATASET_PREPROCESSED_PATH + - MLC_DATASET_OPENORCA_PREPROCESSED_PATH diff --git a/script/get-preprocessed-dataset-openorca/run.sh b/script/get-preprocessed-dataset-openorca/run.sh index 38fe6d64b..aa7be3116 100644 --- a/script/get-preprocessed-dataset-openorca/run.sh +++ b/script/get-preprocessed-dataset-openorca/run.sh @@ -1,5 +1,5 @@ #!/bin/bash -cd ${CM_RUN_DIR} -echo "${CM_RUN_CMD}" -eval "${CM_RUN_CMD}" +cd ${MLC_RUN_DIR} +echo "${MLC_RUN_CMD}" +eval "${MLC_RUN_CMD}" diff --git a/script/get-preprocessed-dataset-squad/customize.py b/script/get-preprocessed-dataset-squad/customize.py index 7f89c2275..ba1ecf264 100644 --- a/script/get-preprocessed-dataset-squad/customize.py +++ b/script/get-preprocessed-dataset-squad/customize.py @@ -12,18 +12,18 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if env.get('CM_DATASET_SQUAD_CALIBRATION_SET', '') == "one": + if env.get('MLC_DATASET_SQUAD_CALIBRATION_SET', '') == "one": env['DATASET_CALIBRATION_FILE'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], 'calibration', 'SQuAD-v1.1', 'bert_calibration_features.txt') env['DATASET_CALIBRATION_ID'] = 1 - elif env.get('CM_DATASET_SQUAD_CALIBRATION_SET', '') == "two": + elif env.get('MLC_DATASET_SQUAD_CALIBRATION_SET', '') == "two": env['DATASET_CALIBRATION_FILE'] = os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], 'calibration', 'SQuAD-v1.1', 'bert_calibration_qas_ids.txt') @@ -32,14 +32,14 @@ def preprocess(i): env['DATASET_CALIBRATION_FILE'] = "''" env['DATASET_CALIBRATION_ID'] = 0 - env['CK_ENV_MLPERF_INFERENCE'] = env['CM_MLPERF_INFERENCE_SOURCE'] + env['CK_ENV_MLPERF_INFERENCE'] = env['MLC_MLPERF_INFERENCE_SOURCE'] - if env.get('CM_DATASET_SQUAD_PACKED', '') == "yes": + if env.get('MLC_DATASET_SQUAD_PACKED', '') == "yes": i['run_script_input']['script_name'] = "run-packed" if env.get('+PYTHONPATH', '') == '': env['+PYTHONPATH'] = [] - env['+PYTHONPATH'].append(env['CM_MLPERF_INFERENCE_BERT_PATH']) + env['+PYTHONPATH'].append(env['MLC_MLPERF_INFERENCE_BERT_PATH']) return {'return': 0} @@ -49,22 +49,22 @@ def postprocess(i): env = i['env'] cur = os.getcwd() - if env.get('CM_DATASET_SQUAD_PACKED', '') != "yes": - env['CM_DATASET_SQUAD_TOKENIZED_ROOT'] = cur - if env.get('CM_DATASET_RAW', '') == "yes": - env['CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS'] = os.path.join( + if env.get('MLC_DATASET_SQUAD_PACKED', '') != "yes": + env['MLC_DATASET_SQUAD_TOKENIZED_ROOT'] = cur + if env.get('MLC_DATASET_RAW', '') == "yes": + env['MLC_DATASET_SQUAD_TOKENIZED_INPUT_IDS'] = os.path.join( cur, 'bert_tokenized_squad_v1_1_input_ids.raw') - env['CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS'] = os.path.join( + env['MLC_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS'] = os.path.join( cur, 'bert_tokenized_squad_v1_1_segment_ids.raw') - env['CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK'] = os.path.join( + env['MLC_DATASET_SQUAD_TOKENIZED_INPUT_MASK'] = os.path.join( cur, 'bert_tokenized_squad_v1_1_input_mask.raw') else: - env['CM_DATASET_SQUAD_TOKENIZED_PICKLE_FILE'] = os.path.join( + env['MLC_DATASET_SQUAD_TOKENIZED_PICKLE_FILE'] = os.path.join( cur, 'bert_tokenized_squad_v1_1.pickle') - env['CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH'] = env['CM_DATASET_MAX_SEQ_LENGTH'] - env['CM_DATASET_SQUAD_TOKENIZED_DOC_STRIDE'] = env['CM_DATASET_DOC_STRIDE'] - env['CM_DATASET_SQUAD_TOKENIZED_MAX_QUERY_LENGTH'] = env['CM_DATASET_MAX_QUERY_LENGTH'] + env['MLC_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH'] = env['MLC_DATASET_MAX_SEQ_LENGTH'] + env['MLC_DATASET_SQUAD_TOKENIZED_DOC_STRIDE'] = env['MLC_DATASET_DOC_STRIDE'] + env['MLC_DATASET_SQUAD_TOKENIZED_MAX_QUERY_LENGTH'] = env['MLC_DATASET_MAX_QUERY_LENGTH'] else: with open("packed_filenames.txt", "w") as f: @@ -91,7 +91,7 @@ def postprocess(i): dirname, "input_position_ids.raw") + "\n") - env['CM_DATASET_SQUAD_TOKENIZED_PACKED_FILENAMES_FILE'] = os.path.join( + env['MLC_DATASET_SQUAD_TOKENIZED_PACKED_FILENAMES_FILE'] = os.path.join( cur, "packed_filenames.txt") return {'return': 0} diff --git a/script/get-preprocessed-dataset-squad/meta.yaml b/script/get-preprocessed-dataset-squad/meta.yaml index cff348c26..1d9ae4bc4 100644 --- a/script/get-preprocessed-dataset-squad/meta.yaml +++ b/script/get-preprocessed-dataset-squad/meta.yaml @@ -27,10 +27,10 @@ deps: - tags: get,generic-python-lib,_package.tensorflow env: - CM_DATASET_MAX_QUERY_LENGTH: 64 + MLC_DATASET_MAX_QUERY_LENGTH: 64 new_env_keys: - - CM_DATASET_SQUAD_TOKENIZED_* + - MLC_DATASET_SQUAD_TOKENIZED_* tags: - get @@ -43,51 +43,51 @@ variations: calib1: group: calibration-set env: - CM_DATASET_SQUAD_CALIBRATION_SET: one + MLC_DATASET_SQUAD_CALIBRATION_SET: one calib2: group: calibration-set env: - CM_DATASET_SQUAD_CALIBRATION_SET: two + MLC_DATASET_SQUAD_CALIBRATION_SET: two no-calib: group: calibration-set default: true env: - CM_DATASET_SQUAD_CALIBRATION_SET: '' + MLC_DATASET_SQUAD_CALIBRATION_SET: '' raw: group: raw default: true env: - CM_DATASET_RAW: "yes" + MLC_DATASET_RAW: "yes" pickle: group: raw env: - CM_DATASET_RAW: "no" + MLC_DATASET_RAW: "no" seq-length.#: group: seq-length env: - CM_DATASET_MAX_SEQ_LENGTH: "#" + MLC_DATASET_MAX_SEQ_LENGTH: "#" seq-length.384: group: seq-length default: true env: - CM_DATASET_MAX_SEQ_LENGTH: 384 + MLC_DATASET_MAX_SEQ_LENGTH: 384 doc-stride.#: group: doc-stride env: - CM_DATASET_DOC_STRIDE: "#" + MLC_DATASET_DOC_STRIDE: "#" doc-stride.128: group: doc-stride default: true env: - CM_DATASET_DOC_STRIDE: 128 + MLC_DATASET_DOC_STRIDE: 128 packed: group: packing env: - CM_DATASET_SQUAD_PACKED: 'yes' + MLC_DATASET_SQUAD_PACKED: 'yes' deps: - tags: get,preprocessed,squad,_pickle env: - CM_DATASET_SQUAD_PACKED: '' + MLC_DATASET_SQUAD_PACKED: '' inherit_variation_tags: true skip_inherit_variation_groups: - packing diff --git a/script/get-preprocessed-dataset-squad/run-packed.sh b/script/get-preprocessed-dataset-squad/run-packed.sh index 776c35142..f42dedcf8 100644 --- a/script/get-preprocessed-dataset-squad/run-packed.sh +++ b/script/get-preprocessed-dataset-squad/run-packed.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,16 +17,16 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" CUR=$PWD run "wget --no-check-certificate -nc https://raw.githubusercontent.com/graphcore/examples/v3.2.0/tutorials/blogs_code/packedBERT/spfhp.py" run "wget --no-check-certificate -nc https://raw.githubusercontent.com/arjunsuresh/ck-qaic/main/package/model-qaic-calibrate-bert/pack.py" -run "${CM_PYTHON_BIN_WITH_PATH} pack.py ${CM_DATASET_SQUAD_TOKENIZED_PICKLE_FILE} ./ ${CM_DATASET_MAX_SEQ_LENGTH}" +run "${MLC_PYTHON_BIN_WITH_PATH} pack.py ${MLC_DATASET_SQUAD_TOKENIZED_PICKLE_FILE} ./ ${MLC_DATASET_MAX_SEQ_LENGTH}" diff --git a/script/get-preprocessed-dataset-squad/run.sh b/script/get-preprocessed-dataset-squad/run.sh index 94b008eac..32c02c034 100644 --- a/script/get-preprocessed-dataset-squad/run.sh +++ b/script/get-preprocessed-dataset-squad/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,25 +17,25 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" CUR=$PWD run "wget --no-check-certificate -nc https://raw.githubusercontent.com/krai/ck-mlperf/master/package/dataset-squad-tokenized_for_bert/tokenize_and_pack.py" -run "${CM_PYTHON_BIN_WITH_PATH} tokenize_and_pack.py \ - ${CM_DATASET_SQUAD_VAL_PATH} \ - ${CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH} \ +run "${MLC_PYTHON_BIN_WITH_PATH} tokenize_and_pack.py \ + ${MLC_DATASET_SQUAD_VAL_PATH} \ + ${MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH} \ ${CUR}/bert_tokenized_squad_v1_1 \ - ${CM_DATASET_MAX_SEQ_LENGTH} \ - ${CM_DATASET_MAX_QUERY_LENGTH} \ - ${CM_DATASET_DOC_STRIDE} \ - ${CM_DATASET_RAW} \ + ${MLC_DATASET_MAX_SEQ_LENGTH} \ + ${MLC_DATASET_MAX_QUERY_LENGTH} \ + ${MLC_DATASET_DOC_STRIDE} \ + ${MLC_DATASET_RAW} \ ${DATASET_CALIBRATION_FILE} \ ${DATASET_CALIBRATION_ID}" diff --git a/script/get-python3/README-extra.md b/script/get-python3/README-extra.md index fcf689078..5f784aa75 100644 --- a/script/get-python3/README-extra.md +++ b/script/get-python3/README-extra.md @@ -2,10 +2,10 @@ ## New ENV -* CM_PYTHON_BIN -* CM_PYTHON_BIN_WITH_PATH -* CM_PYTHON_VERSION -* CM_PYTHON_CACHE_TAGS +* MLC_PYTHON_BIN +* MLC_PYTHON_BIN_WITH_PATH +* MLC_PYTHON_VERSION +* MLC_PYTHON_CACHE_TAGS * PATH * LD_LIBRARY_PATH diff --git a/script/get-python3/customize.py b/script/get-python3/customize.py index e6598804d..a306f9e14 100644 --- a/script/get-python3/customize.py +++ b/script/get-python3/customize.py @@ -8,20 +8,20 @@ def preprocess(i): env = i['env'] - if env.get('CM_PYTHON_CONDA', '') == 'yes' and env.get( - 'CM_CONDA_BIN_PATH', '') != '': - env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( - env['CM_CONDA_BIN_PATH'], "python") + if env.get('MLC_PYTHON_CONDA', '') == 'yes' and env.get( + 'MLC_CONDA_BIN_PATH', '') != '': + env['MLC_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['MLC_CONDA_BIN_PATH'], "python") recursion_spaces = i['recursion_spaces'] - # we need to understand whether this script is called first and CM_PYTHON_BIN_WITH_PATH is empty + # we need to understand whether this script is called first and MLC_PYTHON_BIN_WITH_PATH is empty # then we should search for related artifacts (python in our case) - # or this script is called after install-python* and CM_PYTHON_BIN_WITH_PATH is set there + # or this script is called after install-python* and MLC_PYTHON_BIN_WITH_PATH is set there # then we do not search for an artifact (python) but pick it up from the # installation - if 'CM_PYTHON_BIN_WITH_PATH' not in env: + if 'MLC_PYTHON_BIN_WITH_PATH' not in env: # file_name = 'python.exe' if os_info['platform'] == 'windows' else 'python[0-9|\.]*$' file_name = 'python.exe' if os_info['platform'] == 'windows' else 'python3' extra_paths = {"include": "+C_INCLUDE_PATH", "lib": "+LD_LIBRARY_PATH"} @@ -35,7 +35,7 @@ def preprocess(i): 'detect_version': True, # the next key is used in run.sh to # detect python version - 'env_path_key': 'CM_PYTHON_BIN_WITH_PATH', + 'env_path_key': 'MLC_PYTHON_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': i['recursion_spaces'], 'extra_paths': extra_paths @@ -45,7 +45,7 @@ def preprocess(i): # If artifact is not found and we are not on windows # we should try to install python from src # in prehook_deps - env['CM_REQUIRE_INSTALL'] = "yes" + env['MLC_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: @@ -57,7 +57,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'Python\s*([\d.]+)', 'group_number': 1, - 'env_key': 'CM_PYTHON_VERSION', + 'env_key': 'MLC_PYTHON_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -80,12 +80,12 @@ def postprocess(i): version = r['version'] - found_file_path = env['CM_PYTHON_BIN_WITH_PATH'] + found_file_path = env['MLC_PYTHON_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_PYTHON_BIN'] = os.path.basename(found_file_path) - env['CM_PYTHON_BIN_PATH'] = os.path.dirname(found_file_path) + env['MLC_PYTHON_BIN'] = os.path.basename(found_file_path) + env['MLC_PYTHON_BIN_PATH'] = os.path.dirname(found_file_path) # Save tags that can be used to specialize further dependencies (such as # python packages) @@ -93,7 +93,7 @@ def postprocess(i): add_extra_cache_tags = [] - extra_tags = env.get('CM_EXTRA_CACHE_TAGS', '') + extra_tags = env.get('MLC_EXTRA_CACHE_TAGS', '') if extra_tags != '': tags += ',' + extra_tags @@ -103,7 +103,7 @@ def postprocess(i): if not from_virtual: tags += ',non-virtual' - env['CM_PYTHON_CACHE_TAGS'] = tags + env['MLC_PYTHON_CACHE_TAGS'] = tags add_extra_cache_tags = tags.split(',') @@ -135,9 +135,9 @@ def postprocess(i): if len(version_split) > 2: python_patch_version = version_split[2] - env['CM_PYTHON_MAJOR_VERSION'] = python_major_version - env['CM_PYTHON_MINOR_VERSION'] = python_minor_version - env['CM_PYTHON_PATCH_VERSION'] = python_patch_version + env['MLC_PYTHON_MAJOR_VERSION'] = python_major_version + env['MLC_PYTHON_MINOR_VERSION'] = python_minor_version + env['MLC_PYTHON_PATCH_VERSION'] = python_patch_version return {'return': 0, 'version': version, 'add_extra_cache_tags': add_extra_cache_tags} diff --git a/script/get-python3/meta.yaml b/script/get-python3/meta.yaml index 57bc286e1..b187346d2 100644 --- a/script/get-python3/meta.yaml +++ b/script/get-python3/meta.yaml @@ -5,10 +5,10 @@ cache: true category: Python automation clean_files: [] extra_cache_tags_from_env: -- env: CM_PYTHON_INSTALL_CACHE_TAGS +- env: MLC_PYTHON_INSTALL_CACHE_TAGS prefix: python- new_env_keys: -- CM_PYTHON_* +- MLC_PYTHON_* - +LD_LIBRARY_PATH - +C_INCLUDE_PATH - +PATH @@ -16,14 +16,14 @@ new_state_keys: - script_prefix prehook_deps: - enable_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' inherit_variation_tags: true reuse_version: true tags: install,python,src print_env_at_the_end: - CM_PYTHON_BIN_WITH_PATH: Path to Python - CM_PYTHON_VERSION: Python version + MLC_PYTHON_BIN_WITH_PATH: Path to Python + MLC_PYTHON_VERSION: Python version tags: - get - python @@ -42,11 +42,11 @@ variations: - conda-python tags: get,generic,conda-package,_name.#,_package.python env: - CM_PYTHON_CONDA: 'yes' - CM_PYTHON_INSTALL_CACHE_TAGS: _conda.# + MLC_PYTHON_CONDA: 'yes' + MLC_PYTHON_INSTALL_CACHE_TAGS: _conda.# custom-path.#: env: - CM_PYTHON_BIN_WITH_PATH: '#' + MLC_PYTHON_BIN_WITH_PATH: '#' lto: {} optimized: {} shared: {} diff --git a/script/get-python3/run.bat b/script/get-python3/run.bat index 515d6849b..273af8634 100644 --- a/script/get-python3/run.bat +++ b/script/get-python3/run.bat @@ -1,2 +1,2 @@ -%CM_PYTHON_BIN_WITH_PATH% --version > tmp-ver.out +%MLC_PYTHON_BIN_WITH_PATH% --version > tmp-ver.out IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-python3/run.sh b/script/get-python3/run.sh index 28cf477f2..a44ebb000 100644 --- a/script/get-python3/run.sh +++ b/script/get-python3/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} --version > tmp-ver.out 2>&1 +${MLC_PYTHON_BIN_WITH_PATH} --version > tmp-ver.out 2>&1 test $? -eq 0 || exit 1 #PYTHON_BIN_PATH="${python_bin%/*}" diff --git a/script/get-qaic-apps-sdk/customize.py b/script/get-qaic-apps-sdk/customize.py index 023b9809d..74e5f5533 100644 --- a/script/get-qaic-apps-sdk/customize.py +++ b/script/get-qaic-apps-sdk/customize.py @@ -15,8 +15,8 @@ def preprocess(i): apps_sdk_path = None - if env.get('CM_INPUT', '').strip() != '': - path = env['CM_INPUT'] + if env.get('MLC_INPUT', '').strip() != '': + path = env['MLC_INPUT'] if os.path.exists(os.path.join(path, "exec", "qaic-exec")): apps_sdk_path = path else: @@ -31,10 +31,10 @@ def preprocess(i): return {'return': 1, 'error': f'qaic-exec not found in the default path: {path}'} - env['CM_QAIC_APPS_SDK_PATH'] = path - env['CM_QAIC_EXEC_PATH'] = os.path.join(path, "exec", "qaic-exec") + env['MLC_QAIC_APPS_SDK_PATH'] = path + env['MLC_QAIC_EXEC_PATH'] = os.path.join(path, "exec", "qaic-exec") - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} @@ -42,7 +42,7 @@ def preprocess(i): def detect_version(i): env = i['env'] - sdk_path = env['CM_QAIC_APPS_SDK_PATH'] + sdk_path = env['MLC_QAIC_APPS_SDK_PATH'] version = None version_xml_path = os.path.join(sdk_path, "versions", "apps.xml") version_info = et.parse(version_xml_path) @@ -79,7 +79,7 @@ def postprocess(i): if "+PATH" not in env: env["+PATH"] = [] - env['+PATH'].append(os.path.dirname(env['CM_QAIC_EXEC_PATH'])) + env['+PATH'].append(os.path.dirname(env['MLC_QAIC_EXEC_PATH'])) paths = [ "+C_INCLUDE_PATH", @@ -94,7 +94,7 @@ def postprocess(i): include_paths = [] lib_paths = [] - inc_path = os.path.join(env['CM_QAIC_APPS_SDK_PATH'], "dev", "inc") + inc_path = os.path.join(env['MLC_QAIC_APPS_SDK_PATH'], "dev", "inc") if os.path.exists(inc_path): include_paths.append(inc_path) @@ -103,7 +103,7 @@ def postprocess(i): env['+CPLUS_INCLUDE_PATH'].append(inc_path) lib_path = os.path.join( - env['CM_QAIC_APPS_SDK_PATH'], + env['MLC_QAIC_APPS_SDK_PATH'], "dev", "lib", "x86_64") diff --git a/script/get-qaic-apps-sdk/meta.yaml b/script/get-qaic-apps-sdk/meta.yaml index f448ee4be..083a0bcbe 100644 --- a/script/get-qaic-apps-sdk/meta.yaml +++ b/script/get-qaic-apps-sdk/meta.yaml @@ -8,7 +8,7 @@ input_description: {} input_mapping: {} new_env_keys: - +PATH -- CM_QAIC_EXEC_PATH +- MLC_QAIC_EXEC_PATH - +C_INCLUDE_PATH - +CPLUS_INCLUDE_PATH - +LD_LIBRARY_PATH diff --git a/script/get-qaic-platform-sdk/customize.py b/script/get-qaic-platform-sdk/customize.py index 9e2079a98..b0050427c 100644 --- a/script/get-qaic-platform-sdk/customize.py +++ b/script/get-qaic-platform-sdk/customize.py @@ -15,8 +15,8 @@ def preprocess(i): platform_sdk_path = None - if env.get('CM_INPUT', '').strip() != '': - path = env['CM_INPUT'] + if env.get('MLC_INPUT', '').strip() != '': + path = env['MLC_INPUT'] if os.path.exists(os.path.join(path, "exec", "qaic-runner")): platform_sdk_path = path else: @@ -31,11 +31,11 @@ def preprocess(i): return {'return': 1, 'error': f'qaic-runner not found in the default path: {path}'} - env['CM_QAIC_PLATFORM_SDK_PATH'] = path - env['CM_QAIC_RUNNER_PATH'] = os.path.join(path, "exec", "qaic-runner") - env['CM_QAIC_TOOLS_PATH'] = os.path.join(path, "tools") + env['MLC_QAIC_PLATFORM_SDK_PATH'] = path + env['MLC_QAIC_RUNNER_PATH'] = os.path.join(path, "exec", "qaic-runner") + env['MLC_QAIC_TOOLS_PATH'] = os.path.join(path, "tools") - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} @@ -43,7 +43,7 @@ def preprocess(i): def detect_version(i): env = i['env'] - sdk_path = env['CM_QAIC_PLATFORM_SDK_PATH'] + sdk_path = env['MLC_QAIC_PLATFORM_SDK_PATH'] version = None version_xml_path = os.path.join(sdk_path, "versions", "platform.xml") version_info = et.parse(version_xml_path) @@ -80,7 +80,7 @@ def postprocess(i): if "+PATH" not in env: env["+PATH"] = [] - env['+PATH'].append(os.path.dirname(env['CM_QAIC_RUNNER_PATH'])) + env['+PATH'].append(os.path.dirname(env['MLC_QAIC_RUNNER_PATH'])) paths = [ "+C_INCLUDE_PATH", @@ -95,7 +95,7 @@ def postprocess(i): include_paths = [] lib_paths = [] - inc_path = os.path.join(env['CM_QAIC_PLATFORM_SDK_PATH'], "dev", "inc") + inc_path = os.path.join(env['MLC_QAIC_PLATFORM_SDK_PATH'], "dev", "inc") if os.path.exists(inc_path): include_paths.append(inc_path) @@ -104,10 +104,10 @@ def postprocess(i): env['+CPLUS_INCLUDE_PATH'].append(inc_path) lib_path = os.path.join( - env['CM_QAIC_PLATFORM_SDK_PATH'], + env['MLC_QAIC_PLATFORM_SDK_PATH'], "dev", "lib", - env['CM_HOST_PLATFORM_FLAVOR']) + env['MLC_HOST_PLATFORM_FLAVOR']) if os.path.exists(lib_path): lib_paths.append(lib_path) diff --git a/script/get-qaic-platform-sdk/meta.yaml b/script/get-qaic-platform-sdk/meta.yaml index d40a7d624..775cb1f7b 100644 --- a/script/get-qaic-platform-sdk/meta.yaml +++ b/script/get-qaic-platform-sdk/meta.yaml @@ -9,8 +9,8 @@ input_description: {} input_mapping: {} new_env_keys: - +PATH -- CM_QAIC_RUNNER_PATH -- CM_QAIC_TOOLS_PATH +- MLC_QAIC_RUNNER_PATH +- MLC_QAIC_TOOLS_PATH - +C_INCLUDE_PATH - +CPLUS_INCLUDE_PATH - +LD_LIBRARY_PATH diff --git a/script/get-qaic-software-kit/customize.py b/script/get-qaic-software-kit/customize.py index 6826e2ea2..e94db4245 100644 --- a/script/get-qaic-software-kit/customize.py +++ b/script/get-qaic-software-kit/customize.py @@ -12,17 +12,17 @@ def preprocess(i): automation = i['automation'] - env['CM_QAIC_SOFTWARE_KIT_PATH'] = env['CM_GIT_CHECKOUT_PATH'] + env['MLC_QAIC_SOFTWARE_KIT_PATH'] = env['MLC_GIT_CHECKOUT_PATH'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') if env.get('+ CXXFLAGS', []) == []: env['+ CXXFLAGS'] = [] if env.get('+ CFLAGS', []) == []: env['+ CFLAGS'] = [] - if env.get('CM_LLVM_CLANG_VERSION', '') != '': - clang_version_split = env['CM_LLVM_CLANG_VERSION'].split(".") + if env.get('MLC_LLVM_CLANG_VERSION', '') != '': + clang_version_split = env['MLC_LLVM_CLANG_VERSION'].split(".") clang_major_version = int(clang_version_split[0]) if clang_major_version >= 17: @@ -53,14 +53,14 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_QAIC_RUNNER_PATH'] = os.path.join( - env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") + env['MLC_QAIC_RUNNER_PATH'] = os.path.join( + env['MLC_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") if '+PATH' not in env: env['+PATH'] = [] - env['+PATH'].append(env['CM_QAIC_RUNNER_PATH']) - env['CM_QAIC_RUNNER_PATH'] = os.path.join( - env['CM_QAIC_RUNNER_PATH'], "qaic-runner") + env['+PATH'].append(env['MLC_QAIC_RUNNER_PATH']) + env['MLC_QAIC_RUNNER_PATH'] = os.path.join( + env['MLC_QAIC_RUNNER_PATH'], "qaic-runner") return {'return': 0} diff --git a/script/get-qaic-software-kit/meta.yaml b/script/get-qaic-software-kit/meta.yaml index 772fc8b1f..a042fae5d 100644 --- a/script/get-qaic-software-kit/meta.yaml +++ b/script/get-qaic-software-kit/meta.yaml @@ -10,9 +10,9 @@ deps: tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL - tags: get,generic,sys-util,_libudev-dev - tags: get,generic,sys-util,_libpci-dev - tags: get,google,test @@ -27,8 +27,8 @@ input_description: {} input_mapping: {} new_env_keys: - +PATH -- CM_QAIC_SOFTWARE_KIT_PATH -- CM_QAIC_RUNNER_PATH +- MLC_QAIC_SOFTWARE_KIT_PATH +- MLC_QAIC_RUNNER_PATH new_state_keys: [] post_deps: [] posthook_deps: [] @@ -43,14 +43,14 @@ uid: 3344655922694bbb variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo-source repo.quic: default: true env: - CM_GIT_URL: https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100 + MLC_GIT_URL: https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100 group: repo-source versions: {} diff --git a/script/get-qaic-software-kit/run.sh b/script/get-qaic-software-kit/run.sh index a00122a35..16c20f1d6 100644 --- a/script/get-qaic-software-kit/run.sh +++ b/script/get-qaic-software-kit/run.sh @@ -1,14 +1,14 @@ #!/bin/bash function cmake() { -${CM_CMAKE_BIN_WITH_PATH} $@ +${MLC_CMAKE_BIN_WITH_PATH} $@ } -export CC=${CM_C_COMPILER_WITH_PATH} -export CXX=${CM_CXX_COMPILER_WITH_PATH} +export CC=${MLC_C_COMPILER_WITH_PATH} +export CXX=${MLC_CXX_COMPILER_WITH_PATH} export -f cmake -cd ${CM_QAIC_SOFTWARE_KIT_PATH} +cd ${MLC_QAIC_SOFTWARE_KIT_PATH} rm -rf build ./bootstrap.sh test $? -eq 0 || exit $? diff --git a/script/get-rclone-config/customize.py b/script/get-rclone-config/customize.py index 68c1f59a0..f90b972bc 100644 --- a/script/get-rclone-config/customize.py +++ b/script/get-rclone-config/customize.py @@ -12,10 +12,10 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if env.get('CM_RCLONE_CONFIG_CMD', '') != '': - env['CM_RUN_CMD'] = env['CM_RCLONE_CONFIG_CMD'] + if env.get('MLC_RCLONE_CONFIG_CMD', '') != '': + env['MLC_RUN_CMD'] = env['MLC_RCLONE_CONFIG_CMD'] return {'return': 0} diff --git a/script/get-rclone-config/meta.yaml b/script/get-rclone-config/meta.yaml index a8fa32218..8ebbe168a 100644 --- a/script/get-rclone-config/meta.yaml +++ b/script/get-rclone-config/meta.yaml @@ -10,4 +10,4 @@ uid: 6c59ddbc6cd046e3 variations: mlc-inference: env: - CM_RCLONE_CONFIG_CMD: 'rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com' + MLC_RCLONE_CONFIG_CMD: 'rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com' diff --git a/script/get-rclone-config/run.sh b/script/get-rclone-config/run.sh index 4c23c380e..32cf4d51e 100644 --- a/script/get-rclone-config/run.sh +++ b/script/get-rclone-config/run.sh @@ -1,17 +1,17 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency echo "Running: " -echo "${CM_RUN_CMD}" +echo "${MLC_RUN_CMD}" echo "" -if [[ ${CM_FAKE_RUN} != "yes" ]]; then - eval "${CM_RUN_CMD}" +if [[ ${MLC_FAKE_RUN} != "yes" ]]; then + eval "${MLC_RUN_CMD}" test $? -eq 0 || exit 1 fi diff --git a/script/get-rclone/configs/rclone.conf b/script/get-rclone/configs/rclone.conf index 45699a0a2..9ec87f8ef 100644 --- a/script/get-rclone/configs/rclone.conf +++ b/script/get-rclone/configs/rclone.conf @@ -1,4 +1,4 @@ -[cm-team] +[mlc-team] type = drive scope = drive.readonly service_account_file = diff --git a/script/get-rclone/customize.py b/script/get-rclone/customize.py index 197e75f1e..25ff0d703 100644 --- a/script/get-rclone/customize.py +++ b/script/get-rclone/customize.py @@ -17,11 +17,11 @@ def preprocess(i): run_script_input = i['run_script_input'] automation = i['automation'] - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') host_os_machine = '' if os_info['platform'] != 'windows': - host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + host_os_machine = env['MLC_HOST_OS_MACHINE'] # ABI r = automation.detect_version_using_script({ 'env': env, @@ -32,7 +32,7 @@ def preprocess(i): if r['return'] == 16: install_script = 'install' if os_info['platform'] != 'windows' and env.get( - 'CM_RCLONE_SYSTEM', '') == 'yes': + 'MLC_RCLONE_SYSTEM', '') == 'yes': install_script += '-system' else: if os_info['platform'] != 'windows': @@ -47,19 +47,19 @@ def preprocess(i): elif os_info['platform'] == 'linux': filename = filebase.format(need_version, 'linux', x1) - env['CM_RCLONE_URL'] = urlbase.format( + env['MLC_RCLONE_URL'] = urlbase.format( need_version, filename + '.zip') - env['CM_RCLONE_ARCHIVE'] = filename - env['CM_RCLONE_ARCHIVE_WITH_EXT'] = filename + '.zip' + env['MLC_RCLONE_ARCHIVE'] = filename + env['MLC_RCLONE_ARCHIVE_WITH_EXT'] = filename + '.zip' print( recursion_spaces + 'Downloading {}'.format( - env['CM_RCLONE_URL'])) + env['MLC_RCLONE_URL'])) cur_dir = os.getcwd() path_bin = os.path.join(cur_dir, file_name) - env['CM_RCLONE_BIN_WITH_PATH'] = path_bin + env['MLC_RCLONE_BIN_WITH_PATH'] = path_bin if not env.get('+PATH', []): env['+PATH'] = [] @@ -83,7 +83,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'rclone v([\d.]+)', 'group_number': 1, - 'env_key': 'CM_RCLONE_VERSION', + 'env_key': 'MLC_RCLONE_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -100,14 +100,14 @@ def postprocess(i): os_info = i['os_info'] env = i['env'] - gdrive = env.get('CM_RCLONE_GDRIVE', '') + gdrive = env.get('MLC_RCLONE_GDRIVE', '') if gdrive == "yes": config = configparser.ConfigParser() config_file_path = os.path.join( - env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", "rclone.conf") + env['MLC_TMP_CURRENT_SCRIPT_PATH'], "configs", "rclone.conf") config.read(config_file_path) - # config['cm-team']['service_account_file'] = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "accessfiles", "rclone-gdrive.json") + # config['mlc-team']['service_account_file'] = os.path.join(env['MLC_TMP_CURRENT_SCRIPT_PATH'], "accessfiles", "rclone-gdrive.json") default_config_path = os.path.join( os.path.expanduser('~'), ".config", "rclone", "rclone.conf") @@ -131,17 +131,17 @@ def postprocess(i): version = r['version'] - env['CM_RCLONE_CACHE_TAGS'] = 'version-' + version + env['MLC_RCLONE_CACHE_TAGS'] = 'version-' + version file_name = 'rclone.exe' if os_info['platform'] == 'windows' else 'rclone' if os_info['platform'] == 'windows' or env.get( - 'CM_RCLONE_SYSTEM', '') != 'yes': + 'MLC_RCLONE_SYSTEM', '') != 'yes': cur_dir = os.getcwd() path_bin = os.path.join(cur_dir, file_name) if os.path.isfile(path_bin): # Was downloaded and extracted by CM - env['CM_RCLONE_BIN_WITH_PATH'] = path_bin + env['MLC_RCLONE_BIN_WITH_PATH'] = path_bin env['+PATH'] = [cur_dir] return {'return': 0, 'version': version} diff --git a/script/get-rclone/install.bat b/script/get-rclone/install.bat index 0c12f5c1b..61e830b6d 100644 --- a/script/get-rclone/install.bat +++ b/script/get-rclone/install.bat @@ -1,12 +1,12 @@ -del /Q /S rclone-v%CM_VERSION%-windows-amd64.zip > NUL 2>&1 +del /Q /S rclone-v%MLC_VERSION%-windows-amd64.zip > NUL 2>&1 -wget --no-check-certificate https://downloads.rclone.org/v%CM_VERSION%/rclone-v%CM_VERSION%-windows-amd64.zip +wget --no-check-certificate https://downloads.rclone.org/v%MLC_VERSION%/rclone-v%MLC_VERSION%-windows-amd64.zip IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -unzip -o rclone-v%CM_VERSION%-windows-amd64.zip +unzip -o rclone-v%MLC_VERSION%-windows-amd64.zip IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -move /Y rclone-v%CM_VERSION%-windows-amd64\* . +move /Y rclone-v%MLC_VERSION%-windows-amd64\* . -del /Q /S rclone-v%CM_VERSION%-windows-amd64.zip > NUL 2>&1 +del /Q /S rclone-v%MLC_VERSION%-windows-amd64.zip > NUL 2>&1 diff --git a/script/get-rclone/install.sh b/script/get-rclone/install.sh index d3f6ede34..fe4b2fc80 100644 --- a/script/get-rclone/install.sh +++ b/script/get-rclone/install.sh @@ -1,13 +1,13 @@ #!/bin/bash -rm -rf ${CM_RCLONE_ARCHIVE_WITH_EXT} +rm -rf ${MLC_RCLONE_ARCHIVE_WITH_EXT} rm -rf rclone -wget ${CM_RCLONE_URL} --no-check-certificate +wget ${MLC_RCLONE_URL} --no-check-certificate test $? -eq 0 || exit 1 -unzip ${CM_RCLONE_ARCHIVE_WITH_EXT} +unzip ${MLC_RCLONE_ARCHIVE_WITH_EXT} test $? -eq 0 || exit 1 -mv ${CM_RCLONE_ARCHIVE}/rclone . +mv ${MLC_RCLONE_ARCHIVE}/rclone . test $? -eq 0 || exit 1 diff --git a/script/get-rclone/meta.yaml b/script/get-rclone/meta.yaml index e2d8e02fc..b0727d484 100644 --- a/script/get-rclone/meta.yaml +++ b/script/get-rclone/meta.yaml @@ -7,13 +7,13 @@ default_version: 1.65.2 deps: - tags: detect,os - enable_if_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - windows tags: get,sys-utils-min new_env_keys: -- CM_RCLONE_CACHE_TAGS -- CM_RCLONE_BIN_WITH_PATH -- CM_RCLONE_VERSION +- MLC_RCLONE_CACHE_TAGS +- MLC_RCLONE_BIN_WITH_PATH +- MLC_RCLONE_VERSION - +PATH tags: - get @@ -22,9 +22,9 @@ uid: 22ffb43c49c9419e variations: gdrive: env: - CM_RCLONE_GDRIVE: 'yes' + MLC_RCLONE_GDRIVE: 'yes' system: env: - CM_RCLONE_SYSTEM: 'yes' + MLC_RCLONE_SYSTEM: 'yes' warnings: - This CM script will install rclone using sudo/brew! diff --git a/script/get-rocm-devices/customize.py b/script/get-rocm-devices/customize.py index 69cb82ff1..76930d30d 100644 --- a/script/get-rocm-devices/customize.py +++ b/script/get-rocm-devices/customize.py @@ -7,7 +7,7 @@ def preprocess(i): env = i['env'] - if str(env.get('CM_DETECT_USING_HIP-PYTHON', '') + if str(env.get('MLC_DETECT_USING_HIP-PYTHON', '') ).lower() in ["1", "yes", "true"]: i['run_script_input']['script_name'] = 'detect' @@ -54,11 +54,11 @@ def postprocess(i): gpu[gpu_id][key] = val p[key] = val - key_env = 'CM_ROCM_DEVICE_PROP_' + key.upper().replace(' ', '_') + key_env = 'MLC_ROMLC_DEVICE_PROP_' + key.upper().replace(' ', '_') env[key_env] = val state['cm_rocm_num_devices'] = gpu_id + 1 - env['CM_ROCM_NUM_DEVICES'] = gpu_id + 1 + env['MLC_ROMLC_NUM_DEVICES'] = gpu_id + 1 state['cm_rocm_device_prop'] = p state['cm_rocm_devices_prop'] = gpu diff --git a/script/get-rocm-devices/detect.sh b/script/get-rocm-devices/detect.sh index 8f6b93596..9de8aa64b 100644 --- a/script/get-rocm-devices/detect.sh +++ b/script/get-rocm-devices/detect.sh @@ -1,4 +1,4 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/detect.py test $? -eq 0 || exit $? diff --git a/script/get-rocm-devices/run.sh b/script/get-rocm-devices/run.sh index 7b4fa0386..4d9be9008 100644 --- a/script/get-rocm-devices/run.sh +++ b/script/get-rocm-devices/run.sh @@ -24,8 +24,8 @@ echo "" echo "Running program ..." echo "" -cd ${CM_TMP_CURRENT_PATH} +cd ${MLC_TMP_CURRENT_PATH} -python ${CM_TMP_CURRENT_SCRIPT_PATH}/detect.py > tmp-run.out +python ${MLC_TMP_CURRENT_SCRIPT_PATH}/detect.py > tmp-run.out cat tmp-run.out test $? -eq 0 || exit 1 diff --git a/script/get-rocm/customize.py b/script/get-rocm/customize.py index f864221df..351c338aa 100644 --- a/script/get-rocm/customize.py +++ b/script/get-rocm/customize.py @@ -12,20 +12,20 @@ def preprocess(i): file_name = 'rocminfo.exe' if os_info['platform'] == 'windows' else 'rocminfo' env['FILE_NAME'] = file_name - env['CM_TMP_PATH'] = "/opt/rocm/bin" + env['MLC_TMP_PATH'] = "/opt/rocm/bin" - if 'CM_ROCM_BIN_WITH_PATH' not in env: + if 'MLC_ROMLC_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_ROCM_BIN_WITH_PATH', + 'env_path_key': 'MLC_ROMLC_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: if r['return'] == 16: - env['CM_REQUIRE_INSTALL'] = "yes" + env['MLC_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r @@ -36,7 +36,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'([\d.]+[-\d+]*)', 'group_number': 1, - 'env_key': 'CM_ROCM_VERSION', + 'env_key': 'MLC_ROMLC_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -56,11 +56,11 @@ def postprocess(i): return r version = r['version'] - found_file_path = env['CM_ROCM_BIN_WITH_PATH'] + found_file_path = env['MLC_ROMLC_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_ROCM_INSTALLED_PATH'] = found_path + env['MLC_ROMLC_INSTALLED_PATH'] = found_path - env['CM_ROCM_CACHE_TAGS'] = 'version-' + version + env['MLC_ROMLC_CACHE_TAGS'] = 'version-' + version return {'return': 0, 'version': version} diff --git a/script/get-rocm/meta.yaml b/script/get-rocm/meta.yaml index 0390db5a2..6477f9865 100644 --- a/script/get-rocm/meta.yaml +++ b/script/get-rocm/meta.yaml @@ -5,11 +5,11 @@ cache: true category: AI/ML frameworks clean_files: [] new_env_keys: -- CM_ROCM_* +- MLC_ROMLC_* - +PATH prehook_deps: - enable_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' reuse_version: true tags: install,rocm diff --git a/script/get-rocm/run.sh b/script/get-rocm/run.sh index f7c8e888c..7503d8afd 100644 --- a/script/get-rocm/run.sh +++ b/script/get-rocm/run.sh @@ -1,5 +1,5 @@ #!/bin/bash -dir="${CM_ROCM_BIN_WITH_PATH%/*}/../" +dir="${MLC_ROMLC_BIN_WITH_PATH%/*}/../" cat ${dir}/.info/version > tmp-ver.out test $? -eq 0 || exit 1 diff --git a/script/get-spec-ptd/README-extra.md b/script/get-spec-ptd/README-extra.md index 4061851ca..1b5f4d7fb 100644 --- a/script/get-spec-ptd/README-extra.md +++ b/script/get-spec-ptd/README-extra.md @@ -8,8 +8,8 @@ cm run script --tags=get,mlperf,power,src ``` ## Exported Variables -* `CM_SPEC_PTD_PATH'`: Path to the PTDaemon -* `CM_MLPERF_PTD_PATH'`: Path to the PTDaemon (same as `CM_SPEC_PTD_DAEMON`) +* `MLC_SPEC_PTD_PATH'`: Path to the PTDaemon +* `MLC_MLPERF_PTD_PATH'`: Path to the PTDaemon (same as `MLC_SPEC_PTD_DAEMON`) ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-spec-ptd/customize.py b/script/get-spec-ptd/customize.py index 3607fa621..9cab14690 100644 --- a/script/get-spec-ptd/customize.py +++ b/script/get-spec-ptd/customize.py @@ -16,15 +16,15 @@ def postprocess(i): env = i['env'] state = i['state'] - if env['CM_HOST_OS_TYPE'].lower() == "windows": + if env['MLC_HOST_OS_TYPE'].lower() == "windows": binary_name = "ptd-windows-x86.exe" else: binary_name = "ptd-linux-x86" - if env.get('CM_MLPERF_PTD_PATH', '') == '': - env['CM_MLPERF_PTD_PATH'] = os.path.join( - env['CM_MLPERF_POWER_SOURCE'], 'PTD', 'binaries', binary_name) + if env.get('MLC_MLPERF_PTD_PATH', '') == '': + env['MLC_MLPERF_PTD_PATH'] = os.path.join( + env['MLC_MLPERF_POWER_SOURCE'], 'PTD', 'binaries', binary_name) - file_path = env['CM_MLPERF_PTD_PATH'] + file_path = env['MLC_MLPERF_PTD_PATH'] current_permissions = os.stat(file_path).st_mode # Check if the file already has execute permissions @@ -32,6 +32,6 @@ def postprocess(i): # Add execute permissions for the user os.chmod(file_path, current_permissions | stat.S_IXUSR) - env['CM_SPEC_PTD_PATH'] = env['CM_MLPERF_PTD_PATH'] + env['MLC_SPEC_PTD_PATH'] = env['MLC_MLPERF_PTD_PATH'] return {'return': 0} diff --git a/script/get-spec-ptd/meta.yaml b/script/get-spec-ptd/meta.yaml index c4d7c8218..f642f7053 100644 --- a/script/get-spec-ptd/meta.yaml +++ b/script/get-spec-ptd/meta.yaml @@ -4,11 +4,11 @@ automation_uid: 5b4e0237da074764 cache: true category: MLPerf benchmark support default_env: - CM_GIT_CHECKOUT: main - CM_GIT_DEPTH: --depth 1 - CM_GIT_PATCH: 'no' - CM_GIT_RECURSE_SUBMODULES: ' ' - CM_GIT_URL: https://github.com/mlcommons/power.git + MLC_GIT_CHECKOUT: main + MLC_GIT_DEPTH: --depth 1 + MLC_GIT_PATCH: 'no' + MLC_GIT_RECURSE_SUBMODULES: ' ' + MLC_GIT_URL: https://github.com/mlcommons/power.git default_version: main deps: - tags: detect,os @@ -17,19 +17,19 @@ deps: - python3 tags: get,python3 - env: - CM_GIT_AUTH: 'yes' - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_POWER_SOURCE + MLC_GIT_AUTH: 'yes' + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_POWER_SOURCE extra_cache_tags: mlperf,power,spec,ptdaemon,ptd force_env_keys: - - CM_GIT_* + - MLC_GIT_* tags: get,git,repo,_repo.https://github.com/mlcommons/power input_description: input: Path to SPEC PTDaemon (Optional) input_mapping: - input: CM_INPUT + input: MLC_INPUT new_env_keys: -- CM_SPEC_PTD_PATH -- CM_MLPERF_PTD_PATH +- MLC_SPEC_PTD_PATH +- MLC_MLPERF_PTD_PATH tags: - get - spec @@ -46,4 +46,4 @@ versions: env: {} main: env: - CM_GIT_CHECKOUT: main + MLC_GIT_CHECKOUT: main diff --git a/script/get-spec-ptd/run.sh b/script/get-spec-ptd/run.sh index f0f2e7eae..9550c4b11 100644 --- a/script/get-spec-ptd/run.sh +++ b/script/get-spec-ptd/run.sh @@ -1,10 +1,10 @@ #!/bin/bash -if [[ -n "${CM_INPUT}" ]]; then +if [[ -n "${MLC_INPUT}" ]]; then exit 0 fi -cd ${CM_MLPERF_POWER_SOURCE} +cd ${MLC_MLPERF_POWER_SOURCE} chmod +x "inference_v1.0/ptd-linux-x86" chmod +x "inference_v1.0/ptd-windows-x86.exe" diff --git a/script/get-sys-utils-cm/customize.py b/script/get-sys-utils-cm/customize.py index 0582e9a5e..06887bc3f 100644 --- a/script/get-sys-utils-cm/customize.py +++ b/script/get-sys-utils-cm/customize.py @@ -11,12 +11,12 @@ def preprocess(i): automation = i['automation'] cm = automation.action_object - if env.get('CM_HOST_OS_FLAVOR', '') == 'amzn': - env['CM_PACKAGE_TOOL'] = "yum" + if env.get('MLC_HOST_OS_FLAVOR', '') == 'amzn': + env['MLC_PACKAGE_TOOL'] = "yum" i['run_script_input']['script_name'] = "run-rhel" # Test (not needed - will be removed) - if str(env.get('CM_SKIP_SYS_UTILS', '')).lower() in [True, 'yes', 'on']: + if str(env.get('MLC_SKIP_SYS_UTILS', '')).lower() in [True, 'yes', 'on']: return {'return': 0, 'skip': True} @@ -33,7 +33,7 @@ def preprocess(i): # # path = os.getcwd() # -# clean_dirs = env.get('CM_CLEAN_DIRS','').strip() +# clean_dirs = env.get('MLC_CLEAN_DIRS','').strip() # if clean_dirs!='': # import shutil # for cd in clean_dirs.split(','): @@ -42,7 +42,7 @@ def preprocess(i): # print ('Clearning directory {}'.format(cd)) # shutil.rmtree(cd) # -# url = env['CM_PACKAGE_WIN_URL'] +# url = env['MLC_PACKAGE_WIN_URL'] # # urls = [url] if ';' not in url else url.split(';') # diff --git a/script/get-sys-utils-cm/do_pip_installs.sh b/script/get-sys-utils-cm/do_pip_installs.sh index cbf7e5857..f0a9251aa 100644 --- a/script/get-sys-utils-cm/do_pip_installs.sh +++ b/script/get-sys-utils-cm/do_pip_installs.sh @@ -1,6 +1,6 @@ #!/bin/bash PIP_EXTRA=`python3 -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` -cmd="python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} ${CM_PYTHON_PIP_COMMON_EXTRA} ${PIP_EXTRA}" +cmd="python3 -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${MLC_PYTHON_PIP_USER} ${MLC_PYTHON_PIP_COMMON_EXTRA} ${PIP_EXTRA}" echo $cmd eval $cmd diff --git a/script/get-sys-utils-cm/do_pip_installs.sh.old b/script/get-sys-utils-cm/do_pip_installs.sh.old index 55a149249..441f884dc 100644 --- a/script/get-sys-utils-cm/do_pip_installs.sh.old +++ b/script/get-sys-utils-cm/do_pip_installs.sh.old @@ -1,6 +1,6 @@ #!/bin/bash PIP_EXTRA=`python3 -c "import pkg_resources; print(' --break-system-packages ' if int(pkg_resources.get_distribution('pip').version.split('.')[0]) >= 23 else '')"` -cmd="python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} ${CM_PYTHON_PIP_COMMON_EXTRA} ${PIP_EXTRA}" +cmd="python3 -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${MLC_PYTHON_PIP_USER} ${MLC_PYTHON_PIP_COMMON_EXTRA} ${PIP_EXTRA}" echo $cmd eval $cmd diff --git a/script/get-sys-utils-cm/meta.yaml b/script/get-sys-utils-cm/meta.yaml index 4d3e755ed..b1bfc03ed 100644 --- a/script/get-sys-utils-cm/meta.yaml +++ b/script/get-sys-utils-cm/meta.yaml @@ -12,12 +12,12 @@ deps: - tags: detect,os env: - CM_CLEAN_DIRS: bin - CM_PACKAGE_WIN_URL: https://zenodo.org/records/13868077/files/cm-artifact-os-windows-32.zip?download=1 - CM_SUDO: sudo + MLC_CLEAN_DIRS: bin + MLC_PACKAGE_WIN_URL: https://zenodo.org/records/13868077/files/mlc-artifact-os-windows-32.zip?download=1 + MLC_SUDO: sudo input_mapping: - skip: CM_SKIP_SYS_UTILS + skip: MLC_SKIP_SYS_UTILS new_env_keys: - +PATH @@ -29,8 +29,8 @@ tags: variations: user: env: - CM_PYTHON_PIP_USER: --user + MLC_PYTHON_PIP_USER: --user skip_python_deps: env: - CM_SKIP_PYTHON_DEPS: "yes" + MLC_SKIP_PYTHON_DEPS: "yes" diff --git a/script/get-sys-utils-cm/run-arch.sh b/script/get-sys-utils-cm/run-arch.sh index eb71848ad..1c2105c62 100644 --- a/script/get-sys-utils-cm/run-arch.sh +++ b/script/get-sys-utils-cm/run-arch.sh @@ -4,17 +4,17 @@ echo "***************************************************" echo "Installing some system dependencies via sudo pacman" -if [[ "$CM_QUIET" != "yes" ]]; then +if [[ "$MLC_QUIET" != "yes" ]]; then echo "Enter skip to skip this step or press enter to continue:" read DUMMY if [[ "$DUMMY" == "skip" ]]; then exit 0; fi fi -CM_PACKAGE_TOOL=${CM_PACKAGE_TOOL:-pacman} +MLC_PACKAGE_TOOL=${MLC_PACKAGE_TOOL:-pacman} -${CM_SUDO} ${CM_PACKAGE_TOOL} -Syu && \ - ${CM_SUDO} ${CM_PACKAGE_TOOL} -Sy \ +${MLC_SUDO} ${MLC_PACKAGE_TOOL} -Syu && \ + ${MLC_SUDO} ${MLC_PACKAGE_TOOL} -Sy \ acl autoconf \ bzip2 \ ca-certificates curl cmake \ @@ -33,7 +33,7 @@ ${CM_SUDO} ${CM_PACKAGE_TOOL} -Syu && \ # Install Python deps though preference is to install them # via cmr "get generic-python-lib _package.{Python PIP package name}" -if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then - . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? fi diff --git a/script/get-sys-utils-cm/run-debian.sh b/script/get-sys-utils-cm/run-debian.sh index 488da76fb..89112f7f1 100644 --- a/script/get-sys-utils-cm/run-debian.sh +++ b/script/get-sys-utils-cm/run-debian.sh @@ -4,17 +4,17 @@ echo "************************************************" echo "Installing some system dependencies via sudo apt" -if [[ "$CM_QUIET" != "yes" ]]; then +if [[ "$MLC_QUIET" != "yes" ]]; then echo "Enter skip to skip this step or press enter to continue:" read DUMMY if [[ "$DUMMY" == "skip" ]]; then exit 0; fi fi -CM_APT_TOOL=${CM_APT_TOOL:-apt-get} +MLC_APT_TOOL=${MLC_APT_TOOL:-apt-get} -${CM_SUDO} ${CM_APT_TOOL} update && \ - ${CM_SUDO} ${CM_APT_TOOL} install -y --no-install-recommends \ +${MLC_SUDO} ${MLC_APT_TOOL} update && \ + ${MLC_SUDO} ${MLC_APT_TOOL} install -y --no-install-recommends \ apt-utils \ git \ wget \ @@ -54,7 +54,7 @@ ${CM_SUDO} ${CM_APT_TOOL} update && \ # Install Python deps though preference is to install them # via cmr "get generic-python-lib _package.{Python PIP package name}" -if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then - . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? fi diff --git a/script/get-sys-utils-cm/run-macos.sh b/script/get-sys-utils-cm/run-macos.sh index afb2ef8e6..a76c0524d 100644 --- a/script/get-sys-utils-cm/run-macos.sh +++ b/script/get-sys-utils-cm/run-macos.sh @@ -3,7 +3,7 @@ echo "***************************************************" echo "Installing some system dependencies via brew" -if [[ "$CM_QUIET" != "yes" ]]; then +if [[ "$MLC_QUIET" != "yes" ]]; then echo "Enter skip to skip this step or press enter to continue:" read DUMMY @@ -37,7 +37,7 @@ brew update && \ # Install Python deps though preference is to install them # via cmr "get generic-python-lib _package.{Python PIP package name}" -if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then - . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? fi diff --git a/script/get-sys-utils-cm/run-rhel.sh b/script/get-sys-utils-cm/run-rhel.sh index 87b03b777..05ba53c04 100644 --- a/script/get-sys-utils-cm/run-rhel.sh +++ b/script/get-sys-utils-cm/run-rhel.sh @@ -4,21 +4,21 @@ echo "************************************************" echo "Installing some system dependencies via sudo dnf" -if [[ "$CM_QUIET" != "yes" ]]; then +if [[ "$MLC_QUIET" != "yes" ]]; then echo "Enter skip to skip this step or press enter to continue:" read DUMMY if [[ "$DUMMY" == "skip" ]]; then exit 0; fi fi -if [[ "$CM_HOST_OS_FLAVOR" == "amzn" ]]; then - ${CM_SUDO} yum groupinstall "Development Tools" +if [[ "$MLC_HOST_OS_FLAVOR" == "amzn" ]]; then + ${MLC_SUDO} yum groupinstall "Development Tools" fi -CM_PACKAGE_TOOL=${CM_PACKAGE_TOOL:-dnf} +MLC_PACKAGE_TOOL=${MLC_PACKAGE_TOOL:-dnf} -${CM_SUDO} ${CM_PACKAGE_TOOL} update && \ - ${CM_SUDO} ${CM_PACKAGE_TOOL} --skip-broken install -y \ +${MLC_SUDO} ${MLC_PACKAGE_TOOL} update && \ + ${MLC_SUDO} ${MLC_PACKAGE_TOOL} --skip-broken install -y \ acl autoconf \ bzip2-devel bzip2 \ ca-certificates curl cmake \ @@ -40,7 +40,7 @@ ${CM_SUDO} ${CM_PACKAGE_TOOL} update && \ # Install Python deps though preference is to install them # via cmr "get generic-python-lib _package.{Python PIP package name}" -if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then - . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? fi diff --git a/script/get-sys-utils-cm/run-sles.sh b/script/get-sys-utils-cm/run-sles.sh index 845c0b069..c597387a9 100644 --- a/script/get-sys-utils-cm/run-sles.sh +++ b/script/get-sys-utils-cm/run-sles.sh @@ -4,18 +4,18 @@ echo "***************************************************" echo "Installing some system dependencies via sudo zypper" -if [[ "$CM_QUIET" != "yes" ]]; then +if [[ "$MLC_QUIET" != "yes" ]]; then echo "Enter skip to skip this step or press enter to continue:" read DUMMY if [[ "$DUMMY" == "skip" ]]; then exit 0; fi fi -CM_PACKAGE_TOOL=${CM_PACKAGE_TOOL:-zypper} +MLC_PACKAGE_TOOL=${MLC_PACKAGE_TOOL:-zypper} -${CM_SUDO} ${CM_PACKAGE_TOOL} install -t pattern devel_basis -${CM_SUDO} ${CM_PACKAGE_TOOL} update && \ - ${CM_SUDO} ${CM_PACKAGE_TOOL} install -y \ +${MLC_SUDO} ${MLC_PACKAGE_TOOL} install -t pattern devel_basis +${MLC_SUDO} ${MLC_PACKAGE_TOOL} update && \ + ${MLC_SUDO} ${MLC_PACKAGE_TOOL} install -y \ bzip2-devel bzip2 \ ca-certificates curl cmake \ gcc git \ @@ -36,7 +36,7 @@ ${CM_SUDO} ${CM_PACKAGE_TOOL} update && \ # Install Python deps though preference is to install them # via cmr "get generic-python-lib _package.{Python PIP package name}" -if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then - . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? fi diff --git a/script/get-sys-utils-cm/run-ubuntu.sh b/script/get-sys-utils-cm/run-ubuntu.sh index 3fd66a552..72f7b6fd1 100644 --- a/script/get-sys-utils-cm/run-ubuntu.sh +++ b/script/get-sys-utils-cm/run-ubuntu.sh @@ -4,17 +4,17 @@ echo "************************************************" echo "Installing some system dependencies via sudo apt" -if [[ "$CM_QUIET" != "yes" ]]; then +if [[ "$MLC_QUIET" != "yes" ]]; then echo "Enter skip to skip this step or press enter to continue:" read DUMMY if [[ "$DUMMY" == "skip" ]]; then exit 0; fi fi -CM_APT_TOOL=${CM_APT_TOOL:-apt-get} +MLC_APT_TOOL=${MLC_APT_TOOL:-apt-get} -${CM_SUDO} ${CM_APT_TOOL} update && \ - ${CM_SUDO} DEBIAN_FRONTEND=noninteractive ${CM_APT_TOOL} install -y --no-install-recommends \ +${MLC_SUDO} ${MLC_APT_TOOL} update && \ + ${MLC_SUDO} DEBIAN_FRONTEND=noninteractive ${MLC_APT_TOOL} install -y --no-install-recommends \ apt-utils \ git \ wget \ @@ -58,7 +58,7 @@ ${CM_SUDO} ${CM_APT_TOOL} update && \ # Install Python deps though preference is to install them # via cmr "get generic-python-lib _package.{Python PIP package name}" -if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then - . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +if [[ "${MLC_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${MLC_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh test $? -eq 0 || exit $? fi diff --git a/script/get-sys-utils-min/customize.py b/script/get-sys-utils-min/customize.py index 9c91e0d90..002d8ca6a 100644 --- a/script/get-sys-utils-min/customize.py +++ b/script/get-sys-utils-min/customize.py @@ -16,7 +16,7 @@ def preprocess(i): path = os.getcwd() - clean_dirs = env.get('CM_CLEAN_DIRS', '').strip() + clean_dirs = env.get('MLC_CLEAN_DIRS', '').strip() if clean_dirs != '': import shutil for cd in clean_dirs.split(','): @@ -25,7 +25,7 @@ def preprocess(i): print('Clearning directory {}'.format(cd)) shutil.rmtree(cd) - url = env['CM_PACKAGE_WIN_URL'] + url = env['MLC_PACKAGE_WIN_URL'] urls = [url] if ';' not in url else url.split(';') @@ -38,8 +38,8 @@ def preprocess(i): print('') print('Downloading from {}'.format(url)) - env['CM_DAE_FINAL_ENV_NAME'] = 'FILENAME' - env['CM_OUTDIRNAME'] = os.getcwd() + env['MLC_DAE_FINAL_ENV_NAME'] = 'FILENAME' + env['MLC_OUTDIRNAME'] = os.getcwd() r = cm.access({'action': 'run', 'target': 'script', 'env': env, diff --git a/script/get-sys-utils-min/meta.yaml b/script/get-sys-utils-min/meta.yaml index c07f46eb5..7db6d866e 100644 --- a/script/get-sys-utils-min/meta.yaml +++ b/script/get-sys-utils-min/meta.yaml @@ -12,18 +12,18 @@ deps: - tags: detect,os - tags: get,generic,sys-util,_xz enable_if_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - windows - tags: get,generic,sys-util,_zlib enable_if_env: - CM_HOST_OS_TYPE: + MLC_HOST_OS_TYPE: - windows env: - CM_CLEAN_DIRS: bin - CM_WINDOWS_SYS_UTILS_MIN_INSTALL: yes - CM_PACKAGE_WIN_URL: https://zenodo.org/records/13868077/files/cm-artifact-os-windows-32.zip?download=1 - CM_SUDO: sudo + MLC_CLEAN_DIRS: bin + MLC_WINDOWS_SYS_UTILS_MIN_INSTALL: yes + MLC_PACKAGE_WIN_URL: https://zenodo.org/records/13868077/files/mlc-artifact-os-windows-32.zip?download=1 + MLC_SUDO: sudo new_env_keys: - +PATH diff --git a/script/get-tensorrt/customize.py b/script/get-tensorrt/customize.py index 2da2a3b2e..17f19d63f 100644 --- a/script/get-tensorrt/customize.py +++ b/script/get-tensorrt/customize.py @@ -12,8 +12,8 @@ def preprocess(i): env = i['env'] # Not enforcing dev requirement for now - if env.get('CM_TENSORRT_TAR_FILE_PATH', '') == '' and env.get( - 'CM_TENSORRT_REQUIRE_DEV1', '') != 'yes' and env.get('CM_HOST_PLATFORM_FLAVOR_', '') != 'aarch64': + if env.get('MLC_TENSORRT_TAR_FILE_PATH', '') == '' and env.get( + 'MLC_TENSORRT_REQUIRE_DEV1', '') != 'yes' and env.get('MLC_HOST_PLATFORM_FLAVOR_', '') != 'aarch64': if os_info['platform'] == 'windows': extra_pre = '' @@ -23,20 +23,20 @@ def preprocess(i): extra_ext = 'so' libfilename = extra_pre + 'nvinfer.' + extra_ext - env['CM_TENSORRT_VERSION'] = 'vdetected' + env['MLC_TENSORRT_VERSION'] = 'vdetected' - if env.get('CM_TMP_PATH', '').strip() != '': - path = env.get('CM_TMP_PATH') + if env.get('MLC_TMP_PATH', '').strip() != '': + path = env.get('MLC_TMP_PATH') if os.path.exists(os.path.join(path, libfilename)): - env['CM_TENSORRT_LIB_PATH'] = path + env['MLC_TENSORRT_LIB_PATH'] = path return {'return': 0} - if not env.get('CM_TMP_PATH'): - env['CM_TMP_PATH'] = '' + if not env.get('MLC_TMP_PATH'): + env['MLC_TMP_PATH'] = '' if os_info['platform'] == 'windows': - if env.get('CM_INPUT', '').strip() == '' and env.get( - 'CM_TMP_PATH', '').strip() == '': + if env.get('MLC_INPUT', '').strip() == '' and env.get( + 'MLC_TMP_PATH', '').strip() == '': # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" paths = [] for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", @@ -52,30 +52,30 @@ def preprocess(i): tmp_paths = ';'.join(paths) tmp_paths += ';' + os.environ.get('PATH', '') - env['CM_TMP_PATH'] = tmp_paths - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + env['MLC_TMP_PATH'] = tmp_paths + env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' else: # paths to cuda are not always in PATH - add a few typical locations to search for # (unless forced by a user) - if env.get('CM_INPUT', '').strip() == '': - if env.get('CM_TMP_PATH', '').strip() != '': - env['CM_TMP_PATH'] += ':' + if env.get('MLC_INPUT', '').strip() == '': + if env.get('MLC_TMP_PATH', '').strip() != '': + env['MLC_TMP_PATH'] += ':' - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' for lib_path in env.get( - '+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): + '+MLC_HOST_OS_DEFAULT_LIBRARY_PATH', []): if (os.path.exists(lib_path)): - env['CM_TMP_PATH'] += ':' + lib_path + env['MLC_TMP_PATH'] += ':' + lib_path r = i['automation'].find_artifact({'file_name': libfilename, 'env': env, 'os_info': os_info, 'default_path_env_key': 'LD_LIBRARY_PATH', 'detect_version': False, - 'env_path_key': 'CM_TENSORRT_LIB_WITH_PATH', + 'env_path_key': 'MLC_TENSORRT_LIB_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: @@ -87,9 +87,9 @@ def preprocess(i): if os_info['platform'] == 'windows': return {'return': 1, 'error': 'Windows is currently not supported!'} - if env.get('CM_TENSORRT_TAR_FILE_PATH', '') == '': + if env.get('MLC_TENSORRT_TAR_FILE_PATH', '') == '': tags = ["get", "tensorrt"] - if env.get('CM_TENSORRT_REQUIRE_DEV', '') != 'yes': + if env.get('MLC_TENSORRT_REQUIRE_DEV', '') != 'yes': tags.append("_dev") return {'return': 1, 'error': 'Please envoke cmr "' + " ".join(tags) + '" --tar_file={full path to the TensorRT tar file}'} @@ -97,7 +97,7 @@ def preprocess(i): print('Untaring file - can take some time ...') file_name = "trtexec" - my_tar = tarfile.open(os.path.expanduser(env['CM_TENSORRT_TAR_FILE_PATH'])) + my_tar = tarfile.open(os.path.expanduser(env['MLC_TENSORRT_TAR_FILE_PATH'])) folder_name = my_tar.getnames()[0] if not os.path.exists(os.path.join(os.getcwd(), folder_name)): my_tar.extractall() @@ -109,10 +109,10 @@ def preprocess(i): return {'return': 1, 'error': 'Extracted TensorRT folder does not seem proper - Version information missing'} version = version_match.group(1) - env['CM_TENSORRT_VERSION'] = version - env['CM_TENSORRT_INSTALL_PATH'] = os.path.join(os.getcwd(), folder_name) - env['CM_TENSORRT_LIB_PATH'] = os.path.join(os.getcwd(), folder_name, "lib") - env['CM_TMP_PATH'] = os.path.join(os.getcwd(), folder_name, "bin") + env['MLC_TENSORRT_VERSION'] = version + env['MLC_TENSORRT_INSTALL_PATH'] = os.path.join(os.getcwd(), folder_name) + env['MLC_TENSORRT_LIB_PATH'] = os.path.join(os.getcwd(), folder_name, "lib") + env['MLC_TMP_PATH'] = os.path.join(os.getcwd(), folder_name, "bin") env['+CPLUS_INCLUDE_PATH'] = [ os.path.join( os.getcwd(), @@ -143,13 +143,13 @@ def postprocess(i): if '+ LDFLAGS' not in env: env['+ LDFLAGS'] = [] - # if 'CM_TENSORRT_LIB_WITH_PATH' in env: - # tensorrt_lib_path = os.path.dirname(env['CM_TENSORRT_LIB_WITH_PATH']) - if 'CM_TENSORRT_LIB_PATH' in env: - env['+LD_LIBRARY_PATH'].append(env['CM_TENSORRT_LIB_PATH']) - env['+PATH'].append(env['CM_TENSORRT_LIB_PATH']) # for cmake - env['+ LDFLAGS'].append("-L" + env['CM_TENSORRT_LIB_PATH']) + # if 'MLC_TENSORRT_LIB_WITH_PATH' in env: + # tensorrt_lib_path = os.path.dirname(env['MLC_TENSORRT_LIB_WITH_PATH']) + if 'MLC_TENSORRT_LIB_PATH' in env: + env['+LD_LIBRARY_PATH'].append(env['MLC_TENSORRT_LIB_PATH']) + env['+PATH'].append(env['MLC_TENSORRT_LIB_PATH']) # for cmake + env['+ LDFLAGS'].append("-L" + env['MLC_TENSORRT_LIB_PATH']) - version = env['CM_TENSORRT_VERSION'] + version = env['MLC_TENSORRT_VERSION'] return {'return': 0, 'version': version} diff --git a/script/get-tensorrt/meta.yaml b/script/get-tensorrt/meta.yaml index 3ded18c17..5370e37d4 100644 --- a/script/get-tensorrt/meta.yaml +++ b/script/get-tensorrt/meta.yaml @@ -17,10 +17,10 @@ input_description: tar_file: Full path to the TensorRT Tar file downloaded from the Nvidia website (https://developer.nvidia.com/tensorrt) input_mapping: - input: CM_INPUT - tar_file: CM_TENSORRT_TAR_FILE_PATH + input: MLC_INPUT + tar_file: MLC_TENSORRT_TAR_FILE_PATH new_env_keys: -- CM_TENSORRT_* +- MLC_TENSORRT_* - +PATH - +C_INCLUDE_PATH - +CPLUS_INCLUDE_PATH @@ -35,4 +35,4 @@ uid: 2a84ca505e4c408d variations: dev: env: - CM_TENSORRT_REQUIRE_DEV: 'yes' + MLC_TENSORRT_REQUIRE_DEV: 'yes' diff --git a/script/get-tensorrt/run.sh b/script/get-tensorrt/run.sh index ac3b30a9d..14c78da24 100644 --- a/script/get-tensorrt/run.sh +++ b/script/get-tensorrt/run.sh @@ -1,32 +1,32 @@ #!/bin/bash -if [[ ${CM_TENSORRT_VERSION} == 'vdetected' ]]; then +if [[ ${MLC_TENSORRT_VERSION} == 'vdetected' ]]; then exit 0; fi PIP_EXTRA=`python3 -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` -version=${CM_TENSORRT_VERSION} -install_dir=${CM_TENSORRT_INSTALL_PATH} -python_version=${CM_PYTHON_VERSION} +version=${MLC_TENSORRT_VERSION} +install_dir=${MLC_TENSORRT_INSTALL_PATH} +python_version=${MLC_PYTHON_VERSION} python_version_info=(${python_version//./ }) python_max_version=${python_version_info[0]} python_min_version=${python_version_info[1]} cd ${install_dir}/python -${CM_PYTHON_BIN_WITH_PATH} -m pip install tensorrt-*-cp${python_max_version}${python_min_version}-none-${CM_HOST_OS_TYPE}_${CM_HOST_OS_MACHINE}.whl $PIP_EXTRA +${MLC_PYTHON_BIN_WITH_PATH} -m pip install tensorrt-*-cp${python_max_version}${python_min_version}-none-${MLC_HOST_OS_TYPE}_${MLC_HOST_OS_MACHINE}.whl $PIP_EXTRA test $? -eq 0 || exit $? cd ${install_dir}/uff -${CM_PYTHON_BIN_WITH_PATH} -m pip install uff-0.6.9-py2.py3-none-any.whl $PIP_EXTRA +${MLC_PYTHON_BIN_WITH_PATH} -m pip install uff-0.6.9-py2.py3-none-any.whl $PIP_EXTRA test $? -eq 0 || exit $? cd ${install_dir}/graphsurgeon -${CM_PYTHON_BIN_WITH_PATH} -m pip install graphsurgeon-0.4.6-py2.py3-none-any.whl $PIP_EXTRA +${MLC_PYTHON_BIN_WITH_PATH} -m pip install graphsurgeon-0.4.6-py2.py3-none-any.whl $PIP_EXTRA test $? -eq 0 || exit $? cd ${install_dir}/onnx_graphsurgeon -${CM_PYTHON_BIN_WITH_PATH} -m pip install onnx_graphsurgeon-0.3.12-py2.py3-none-any.whl $PIP_EXTRA +${MLC_PYTHON_BIN_WITH_PATH} -m pip install onnx_graphsurgeon-0.3.12-py2.py3-none-any.whl $PIP_EXTRA test $? -eq 0 || exit $? #create softlinks for libnvinfer.so.7 and libnvinfer_plugin.so.7 diff --git a/script/get-terraform/README-extra.md b/script/get-terraform/README-extra.md index 0fc57d505..e9cb784f7 100644 --- a/script/get-terraform/README-extra.md +++ b/script/get-terraform/README-extra.md @@ -2,7 +2,7 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed Terraform on the system and if not found calls the [install script for Terraform](../script/install-terraform-from-src). ## Exported Variables -* `CM_TERRAFORM_BIN_WITH_PATH` +* `MLC_TERRAFORM_BIN_WITH_PATH` ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 diff --git a/script/get-terraform/customize.py b/script/get-terraform/customize.py index 8b051f01f..39ae728c2 100644 --- a/script/get-terraform/customize.py +++ b/script/get-terraform/customize.py @@ -12,18 +12,18 @@ def preprocess(i): file_name = 'terraform.exe' if os_info['platform'] == 'windows' else 'terraform' env['FILE_NAME'] = file_name - if 'CM_TERRAFORM_BIN_WITH_PATH' not in env: + if 'MLC_TERRAFORM_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, 'os_info': os_info, 'default_path_env_key': 'PATH', 'detect_version': True, - 'env_path_key': 'CM_TERRAFORM_BIN_WITH_PATH', + 'env_path_key': 'MLC_TERRAFORM_BIN_WITH_PATH', 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: if r['return'] == 16: - env['CM_REQUIRE_INSTALL'] = "yes" + env['MLC_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r @@ -34,7 +34,7 @@ def preprocess(i): def detect_version(i): r = i['automation'].parse_version({'match_text': r'Terraform\s*v([\d.]+)', 'group_number': 1, - 'env_key': 'CM_TERRAFORM_VERSION', + 'env_key': 'MLC_TERRAFORM_VERSION', 'which_env': i['env']}) if r['return'] > 0: return r @@ -54,11 +54,11 @@ def postprocess(i): return r version = r['version'] - found_file_path = env['CM_TERRAFORM_BIN_WITH_PATH'] + found_file_path = env['MLC_TERRAFORM_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_TERRAFORM_INSTALLED_PATH'] = found_path + env['MLC_TERRAFORM_INSTALLED_PATH'] = found_path - env['CM_TERRAFORM_CACHE_TAGS'] = 'version-' + version + env['MLC_TERRAFORM_CACHE_TAGS'] = 'version-' + version return {'return': 0, 'version': version} diff --git a/script/get-terraform/meta.yaml b/script/get-terraform/meta.yaml index 236d5b9f6..ad35e9093 100644 --- a/script/get-terraform/meta.yaml +++ b/script/get-terraform/meta.yaml @@ -5,11 +5,11 @@ cache: true category: Cloud automation clean_files: [] new_env_keys: -- CM_TERRAFORM_* +- MLC_TERRAFORM_* - +PATH prehook_deps: - enable_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' reuse_version: true tags: install,terraform diff --git a/script/get-tvm-model/README-extra.md b/script/get-tvm-model/README-extra.md index 0815c5b53..e387efd57 100644 --- a/script/get-tvm-model/README-extra.md +++ b/script/get-tvm-model/README-extra.md @@ -17,5 +17,5 @@ in 5 and 6 you can insert any suitable value instead of the symbol `#`, e.g. `mo ## Notes -For PyTorch and TensorFlow frontends you should specify evironment variable `CM_ML_MODEL_INPUT_SHAPES` with input shapes of the model you want to compile (e.g. `"input": (16, 3, 224, 224)`) or separate variables `CM_ML_MODEL_IMAGE_NUM_CHANNELS`, `CM_ML_MODEL_IMAGE_WIDTH`, `CM_ML_MODEL_IMAGE_HEIGHT` for 2D CV models and `CM_ML_MODEL_MAX_SEQ_LENGTH` for language models. +For PyTorch and TensorFlow frontends you should specify evironment variable `MLC_ML_MODEL_INPUT_SHAPES` with input shapes of the model you want to compile (e.g. `"input": (16, 3, 224, 224)`) or separate variables `MLC_ML_MODEL_IMAGE_NUM_CHANNELS`, `MLC_ML_MODEL_IMAGE_WIDTH`, `MLC_ML_MODEL_IMAGE_HEIGHT` for 2D CV models and `MLC_ML_MODEL_MAX_SEQ_LENGTH` for language models. If your model is in ONNX format then all input shapes can be extracted automatically. diff --git a/script/get-tvm-model/customize.py b/script/get-tvm-model/customize.py index 5869374bc..44f785bb3 100644 --- a/script/get-tvm-model/customize.py +++ b/script/get-tvm-model/customize.py @@ -12,9 +12,9 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - work_dir = env.get('CM_TUNE_TVM_MODEL_WORKDIR', '') + work_dir = env.get('MLC_TUNE_TVM_MODEL_WORKDIR', '') if work_dir != '': if not os.path.exists(work_dir): @@ -29,9 +29,9 @@ def preprocess(i): raise FileNotFoundError( "Error: the found workdir does not contain database_tuning_record.json") - if env.get('CM_TUNE_TVM_MODEL', '') != '': + if env.get('MLC_TUNE_TVM_MODEL', '') != '': print("The \"tune-model\" variation is selected, but at the same time the path to the existing \"work_dir\" is also specified. The compiled model will be based on the found existing \"work_dir\".") - env["CM_TUNE_TVM_MODEL"] = "no" + env["MLC_TUNE_TVM_MODEL"] = "no" return {'return': 0} @@ -40,15 +40,15 @@ def postprocess(i): env = i['env'] - env['CM_ML_MODEL_ORIGINAL_FILE_WITH_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] - env['CM_ML_MODEL_FILE'] = 'model-tvm.so' - env['CM_ML_MODEL_PATH'] = os.path.join(os.getcwd()) - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( - os.getcwd(), env['CM_ML_MODEL_FILE']) - env['CM_ML_MODEL_FRAMEWORK'] = "tvm-" + env['CM_ML_MODEL_FRAMEWORK'] - if 'CM_ML_MODEL_INPUT_SHAPES' in env.keys(): - env['CM_ML_MODEL_INPUT_SHAPES'] = env['CM_ML_MODEL_INPUT_SHAPES'].replace( - "BATCH_SIZE", env['CM_ML_MODEL_MAX_BATCH_SIZE']) - if 'CM_TVM_FRONTEND_FRAMEWORK' in env and env['CM_TVM_FRONTEND_FRAMEWORK'] == 'pytorch': - env['CM_PREPROCESS_PYTORCH'] = 'yes' + env['MLC_ML_MODEL_ORIGINAL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH'] + env['MLC_ML_MODEL_FILE'] = 'model-tvm.so' + env['MLC_ML_MODEL_PATH'] = os.path.join(os.getcwd()) + env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + os.getcwd(), env['MLC_ML_MODEL_FILE']) + env['MLC_ML_MODEL_FRAMEWORK'] = "tvm-" + env['MLC_ML_MODEL_FRAMEWORK'] + if 'MLC_ML_MODEL_INPUT_SHAPES' in env.keys(): + env['MLC_ML_MODEL_INPUT_SHAPES'] = env['MLC_ML_MODEL_INPUT_SHAPES'].replace( + "BATCH_SIZE", env['MLC_ML_MODEL_MAX_BATCH_SIZE']) + if 'MLC_TVM_FRONTEND_FRAMEWORK' in env and env['MLC_TVM_FRONTEND_FRAMEWORK'] == 'pytorch': + env['MLC_PREPROCESS_PYTORCH'] = 'yes' return {'return': 0} diff --git a/script/get-tvm-model/meta.yaml b/script/get-tvm-model/meta.yaml index e3dbcface..a76d765e7 100644 --- a/script/get-tvm-model/meta.yaml +++ b/script/get-tvm-model/meta.yaml @@ -4,10 +4,10 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML models default_env: - CM_ML_MODEL_MAX_BATCH_SIZE: '1' - CM_TUNE_TVM_MODEL: 'no' - CM_TVM_FRONTEND_FRAMEWORK: onnx - CM_TVM_USE_VM: 'yes' + MLC_ML_MODEL_MAX_BATCH_SIZE: '1' + MLC_TUNE_TVM_MODEL: 'no' + MLC_TVM_FRONTEND_FRAMEWORK: onnx + MLC_TVM_USE_VM: 'yes' deps: - names: - python @@ -21,18 +21,18 @@ deps: - tags: get,generic-python-lib,_scipy - tags: get,generic-python-lib,_attrs new_env_keys: -- CM_ML_MODEL_* -- CM_TUNE_TVM_* -- CM_TVM_* +- MLC_ML_MODEL_* +- MLC_TUNE_TVM_* +- MLC_TVM_* prehook_deps: - names: - original-model tags: get,ml-model,raw update_tags_from_env: - - CM_ML_MODEL + - MLC_ML_MODEL update_tags_from_env_with_prefix: _: - - CM_TVM_FRONTEND_FRAMEWORK + - MLC_TVM_FRONTEND_FRAMEWORK tags: - get - ml-model-tvm @@ -41,7 +41,7 @@ uid: c1b7b656b6224307 variations: batch_size.#: env: - CM_ML_MODEL_MAX_BATCH_SIZE: '#' + MLC_ML_MODEL_MAX_BATCH_SIZE: '#' group: batchsize fp32: add_deps_recursive: @@ -51,7 +51,7 @@ variations: group: precision graph-executor: env: - CM_TVM_USE_VM: 'no' + MLC_TVM_USE_VM: 'no' group: runtime int8: add_deps_recursive: @@ -60,7 +60,7 @@ variations: group: precision model.#: env: - CM_ML_MODEL: '#' + MLC_ML_MODEL: '#' group: model onnx: default: true @@ -69,7 +69,7 @@ variations: - onnx tags: get,generic-python-lib,_onnx env: - CM_TVM_FRONTEND_FRAMEWORK: onnx + MLC_TVM_FRONTEND_FRAMEWORK: onnx group: frontend pytorch: deps: @@ -79,7 +79,7 @@ variations: tags: get,generic-python-lib,_torch - tags: get,generic-python-lib,_torchvision env: - CM_TVM_FRONTEND_FRAMEWORK: pytorch + MLC_TVM_FRONTEND_FRAMEWORK: pytorch group: frontend tensorflow: deps: @@ -87,7 +87,7 @@ variations: - tensorflow tags: get,generic-python-lib,_tensorflow env: - CM_TVM_FRONTEND_FRAMEWORK: tensorflow + MLC_TVM_FRONTEND_FRAMEWORK: tensorflow group: frontend tf: alias: tensorflow @@ -97,7 +97,7 @@ variations: - tflite tags: get,generic-python-lib,_tflite env: - CM_TVM_FRONTEND_FRAMEWORK: tflite + MLC_TVM_FRONTEND_FRAMEWORK: tflite group: frontend torch: alias: pytorch @@ -107,7 +107,7 @@ variations: - tags: get,generic-python-lib,_pandas - tags: get,generic-python-lib,_tornado env: - CM_TUNE_TVM_MODEL: 'yes' + MLC_TUNE_TVM_MODEL: 'yes' uint8: add_deps_recursive: original-model: @@ -116,5 +116,5 @@ variations: virtual_machine: default: true env: - CM_TVM_USE_VM: 'yes' + MLC_TVM_USE_VM: 'yes' group: runtime diff --git a/script/get-tvm-model/process.py b/script/get-tvm-model/process.py index c7384000d..4ec1fd921 100644 --- a/script/get-tvm-model/process.py +++ b/script/get-tvm-model/process.py @@ -2,7 +2,7 @@ import tempfile from typing import Dict, Tuple, Optional, List, Any, Union -if os.environ.get("CM_TVM_FRONTEND_FRAMEWORK", None) == "pytorch": +if os.environ.get("MLC_TVM_FRONTEND_FRAMEWORK", None) == "pytorch": import torch import torchvision @@ -200,7 +200,7 @@ def serialize_vm( def main() -> None: - model_path = os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', None) + model_path = os.environ.get('MLC_ML_MODEL_FILE_WITH_PATH', None) compiled_model = os.path.join(os.getcwd(), 'model-tvm.so') print('TVM model: ' + model_path) if model_path.endswith('.so') or model_path.endswith('.dylib'): @@ -212,35 +212,35 @@ def main() -> None: ) else: mod, params = get_mod_params( - model_path=os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', None), - model_name=os.environ.get('CM_ML_MODEL', '').strip().lower(), - batch_size=int(os.environ.get('CM_ML_MODEL_MAX_BATCH_SIZE', 1)), - frontend=os.environ.get("CM_TVM_FRONTEND_FRAMEWORK", None), - input_shapes_str=os.environ.get('CM_ML_MODEL_INPUT_SHAPES', None), + model_path=os.environ.get('MLC_ML_MODEL_FILE_WITH_PATH', None), + model_name=os.environ.get('MLC_ML_MODEL', '').strip().lower(), + batch_size=int(os.environ.get('MLC_ML_MODEL_MAX_BATCH_SIZE', 1)), + frontend=os.environ.get("MLC_TVM_FRONTEND_FRAMEWORK", None), + input_shapes_str=os.environ.get('MLC_ML_MODEL_INPUT_SHAPES', None), input_layer_name=os.environ.get( - 'CM_ML_MODEL_INPUT_LAYER_NAME', None), + 'MLC_ML_MODEL_INPUT_LAYER_NAME', None), num_channels=int( os.environ.get( - 'CM_ML_MODEL_IMAGE_NUM_CHANNELS', + 'MLC_ML_MODEL_IMAGE_NUM_CHANNELS', 3)), - image_width=int(os.environ.get('CM_ML_MODEL_IMAGE_WIDTH', 0)), - image_height=int(os.environ.get('CM_ML_MODEL_IMAGE_HEIGHT', 0)), + image_width=int(os.environ.get('MLC_ML_MODEL_IMAGE_WIDTH', 0)), + image_height=int(os.environ.get('MLC_ML_MODEL_IMAGE_HEIGHT', 0)), max_seq_length=int( os.environ.get( - 'CM_ML_MODEL_MAX_SEQ_LENGTH', 0)), + 'MLC_ML_MODEL_MAX_SEQ_LENGTH', 0)), ) - opt_level = int(os.environ.get('CM_MLPERF_TVM_OPT_LEVEL', 3)) + opt_level = int(os.environ.get('MLC_MLPERF_TVM_OPT_LEVEL', 3)) target = os.environ.get( - 'CM_MLPERF_TVM_TARGET', - f"llvm -num-cores {os.environ.get('CM_HOST_CPU_TOTAL_CORES', '1')}" + 'MLC_MLPERF_TVM_TARGET', + f"llvm -num-cores {os.environ.get('MLC_HOST_CPU_TOTAL_CORES', '1')}" ) build_conf = {} target_host = None tvm_target = tvm.target.Target(target, host=target_host) - tune_model_flag = os.environ.get('CM_TUNE_TVM_MODEL', 'no') == 'yes' + tune_model_flag = os.environ.get('MLC_TUNE_TVM_MODEL', 'no') == 'yes' work_dir = '' database = None - use_vm = os.environ.get('CM_TVM_USE_VM', 'no') == 'yes' + use_vm = os.environ.get('MLC_TVM_USE_VM', 'no') == 'yes' if tune_model_flag: work_dir, database = tune_model( mod=mod, @@ -251,7 +251,7 @@ def main() -> None: mod=mod, params=params, work_dir=work_dir if work_dir != '' else os.environ.get( - 'CM_TUNE_TVM_MODEL_WORKDIR', ''), + 'MLC_TUNE_TVM_MODEL_WORKDIR', ''), target=tvm_target, opt_level=opt_level, build_conf=build_conf, diff --git a/script/get-tvm-model/run.sh b/script/get-tvm-model/run.sh index 6b18e39df..64cada524 100644 --- a/script/get-tvm-model/run.sh +++ b/script/get-tvm-model/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/process.py" +cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/process.py" echo $cmd diff --git a/script/get-tvm/customize.py b/script/get-tvm/customize.py index 9bb912f88..c4053d618 100644 --- a/script/get-tvm/customize.py +++ b/script/get-tvm/customize.py @@ -16,7 +16,7 @@ def postprocess(i): os_info = i['os_info'] env = i['env'] - if env.get('CM_TVM_PIP_INSTALL', '') == "yes": + if env.get('MLC_TVM_PIP_INSTALL', '') == "yes": return {'return': 0} tvm_home = env['TVM_HOME'] @@ -39,12 +39,12 @@ def postprocess(i): env['+C_INCLUDE_PATH'].append(include_path) env['+CPLUS_INCLUDE_PATH'].append(include_path) - env['CM_TVM_PATH_INCLUDE'] = include_path + env['MLC_TVM_PATH_INCLUDE'] = include_path # Lib lib_path = os.path.join(tvm_home, 'build') env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - env['CM_TVM_PATH_LIB'] = lib_path + env['MLC_TVM_PATH_LIB'] = lib_path return {'return': 0} diff --git a/script/get-tvm/meta.yaml b/script/get-tvm/meta.yaml index 75f2a174c..75ba13d59 100644 --- a/script/get-tvm/meta.yaml +++ b/script/get-tvm/meta.yaml @@ -4,9 +4,9 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML frameworks default_env: - CM_GIT_CHECKOUT: main - CM_GIT_URL: https://github.com/apache/tvm - CM_TVM_PIP_INSTALL: 'no' + MLC_GIT_CHECKOUT: main + MLC_GIT_URL: https://github.com/apache/tvm + MLC_TVM_PIP_INSTALL: 'no' deps: - tags: cmake,get-cmake version_min: '3.18' @@ -17,11 +17,11 @@ deps: - tags: get,generic-python-lib,_attrs - tags: get,generic-python-lib,_psutil extra_cache_tags_from_env: -- env: CM_LLVM_CACHE_TAGS +- env: MLC_LLVM_CACHE_TAGS prefix: llvm- new_env_keys: - TVM_HOME -- CM_TVM_* +- MLC_TVM_* - +LD_LIBRARY_PATH - +DYLD_FALLBACK_LIBRARY_PATH - +PYTHONPATH @@ -35,7 +35,7 @@ variations: deps: - tags: get,cuda env: - CM_TVM_USE_CUDA: 'yes' + MLC_TVM_USE_CUDA: 'yes' llvm: deps: - names: @@ -43,31 +43,31 @@ variations: tags: get,llvm version_min: 14.0.0 env: - CM_TVM_USE_LLVM: 'yes' + MLC_TVM_USE_LLVM: 'yes' group: installation-type openmp: env: - CM_TVM_USE_OPENMP: 'yes' + MLC_TVM_USE_OPENMP: 'yes' pip-install: default: true deps: - tags: get,generic-python-lib,_apache-tvm env: - CM_TVM_PIP_INSTALL: 'yes' + MLC_TVM_PIP_INSTALL: 'yes' group: installation-type versions: main: env: - CM_GIT_CHECKOUT: main + MLC_GIT_CHECKOUT: main v0.10.0: env: - CM_GIT_CHECKOUT: v0.10.0 + MLC_GIT_CHECKOUT: v0.10.0 v0.7.0: env: - CM_GIT_CHECKOUT: v0.7.0 + MLC_GIT_CHECKOUT: v0.7.0 v0.8.0: env: - CM_GIT_CHECKOUT: v0.8.0 + MLC_GIT_CHECKOUT: v0.8.0 v0.9.0: env: - CM_GIT_CHECKOUT: v0.9.0 + MLC_GIT_CHECKOUT: v0.9.0 diff --git a/script/get-tvm/run.sh b/script/get-tvm/run.sh index e7c492058..9ac13bdba 100644 --- a/script/get-tvm/run.sh +++ b/script/get-tvm/run.sh @@ -2,7 +2,7 @@ CUR_DIR=$PWD -if [ "${CM_TVM_PIP_INSTALL}" != "no" ]; then +if [ "${MLC_TVM_PIP_INSTALL}" != "no" ]; then exit 0; fi @@ -11,15 +11,15 @@ echo "Path for TVM: ${CUR_DIR}" echo "" if [ ! -d "tvm" ]; then - echo "git clone --recursive -b ${CM_GIT_CHECKOUT} ${CM_GIT_URL} tvm" - git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} tvm + echo "git clone --recursive -b ${MLC_GIT_CHECKOUT} ${MLC_GIT_URL} tvm" + git clone --recursive -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} tvm test $? -eq 0 || exit 1 fi cd tvm -if [ "${CM_GIT_SHA}" != "" ]; then - echo "git checkout ${CM_GIT_SHA}" - git checkout ${CM_GIT_SHA} +if [ "${MLC_GIT_SHA}" != "" ]; then + echo "git checkout ${MLC_GIT_SHA}" + git checkout ${MLC_GIT_SHA} test $? -eq 0 || exit 1 fi @@ -35,20 +35,20 @@ if [ ! -d "${CUR_DIR}/tvm/build" ]; then cd ${CUR_DIR}/tvm/build - if [[ ${CM_TVM_USE_LLVM} == "yes" ]]; then - if [[ -z "${CM_LLVM_INSTALLED_PATH}" ]]; then - llvm_version=$(echo "${CM_LLVM_CLANG_VERSION}" | cut -d. -f1) + if [[ ${MLC_TVM_USE_LLVM} == "yes" ]]; then + if [[ -z "${MLC_LLVM_INSTALLED_PATH}" ]]; then + llvm_version=$(echo "${MLC_LLVM_CLANG_VERSION}" | cut -d. -f1) sed -i.bak "s|set(USE_LLVM OFF)|set(USE_LLVM llvm-config-$llvm_version)|" config.cmake else - sed -i.bak "s|set(USE_LLVM OFF)|set(USE_LLVM ${CM_LLVM_INSTALLED_PATH}/llvm-config)|" config.cmake + sed -i.bak "s|set(USE_LLVM OFF)|set(USE_LLVM ${MLC_LLVM_INSTALLED_PATH}/llvm-config)|" config.cmake fi fi - if [[ ${CM_TVM_USE_OPENMP} == "yes" ]]; then + if [[ ${MLC_TVM_USE_OPENMP} == "yes" ]]; then sed -i.bak 's/set(USE_OPENMP none)/set(USE_OPENMP gnu)/' config.cmake fi - if [[ ${CM_TVM_USE_CUDA} == "yes" ]]; then + if [[ ${MLC_TVM_USE_CUDA} == "yes" ]]; then sed -i.bak 's/set(USE_CUDA OFF)/set(USE_OPENMP ON)/' config.cmake echo 'set(USE_CUDA ON)' >> config.cmake fi @@ -57,16 +57,16 @@ if [ ! -d "${CUR_DIR}/tvm/build" ]; then test $? -eq 0 || exit 1 fi -CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} -CM_MAKE_CORES=${CM_MAKE_CORES:-2} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-${MLC_HOST_CPU_TOTAL_CORES}} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-2} echo "******************************************************" -echo "Building TVM using ${CM_MAKE_CORES} cores ..." +echo "Building TVM using ${MLC_MAKE_CORES} cores ..." echo "" cd ${CUR_DIR}/tvm/build -make -j${CM_MAKE_CORES} +make -j${MLC_MAKE_CORES} test $? -eq 0 || exit 1 INSTALL_DIR=$PWD @@ -74,7 +74,7 @@ INSTALL_DIR=$PWD cd ../../ echo "TVM_HOME=$PWD/tvm" > tmp-run-env.out -echo "CM_TVM_INSTALLED_PATH=$PWD/tvm" >> tmp-run-env.out +echo "MLC_TVM_INSTALLED_PATH=$PWD/tvm" >> tmp-run-env.out echo "******************************************************" echo "TVM was built and installed to ${INSTALL_DIR} ..." diff --git a/script/get-xilinx-sdk/customize.py b/script/get-xilinx-sdk/customize.py index f14b9797d..07097d60a 100644 --- a/script/get-xilinx-sdk/customize.py +++ b/script/get-xilinx-sdk/customize.py @@ -12,9 +12,9 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - file_path = env.get("CM_XILINX_SDK_BIN_PATH") + file_path = env.get("MLC_XILINX_SDK_BIN_PATH") if not file_path or not os.path.exists(file_path): return {'return': 1, 'error': 'FILE_PATH does not exist'} diff --git a/script/get-xilinx-sdk/meta.yaml b/script/get-xilinx-sdk/meta.yaml index 745020214..4a1c2456c 100644 --- a/script/get-xilinx-sdk/meta.yaml +++ b/script/get-xilinx-sdk/meta.yaml @@ -7,9 +7,9 @@ default_version: '2019.1' deps: [] input_description: {} input_mapping: - input: CM_XILINX_SDK_FILE_PATH + input: MLC_XILINX_SDK_FILE_PATH new_env_keys: -- CM_XILINX_* +- MLC_XILINX_* - +PATH new_state_keys: [] post_deps: [] @@ -23,5 +23,5 @@ variations: {} versions: '2019.1': env: - CM_DOWNLOAD_CHECKSUM: 7ccb3840d36c305a7cb34b314db7d7f2 - CM_DOWNLOAD_URL: https://www.xilinx.com/member/forms/download/xef.html?filename=Xilinx_SDK_2019.1_0524_1430_Lin64.bin + MLC_DOWNLOAD_CHECKSUM: 7ccb3840d36c305a7cb34b314db7d7f2 + MLC_DOWNLOAD_URL: https://www.xilinx.com/member/forms/download/xef.html?filename=Xilinx_SDK_2019.1_0524_1430_Lin64.bin diff --git a/script/get-xilinx-sdk/run.sh b/script/get-xilinx-sdk/run.sh index 3a584c10c..821adb3f9 100644 --- a/script/get-xilinx-sdk/run.sh +++ b/script/get-xilinx-sdk/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,11 +17,11 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" diff --git a/script/get-zendnn/customize.py b/script/get-zendnn/customize.py index 706398a08..97fb859e6 100644 --- a/script/get-zendnn/customize.py +++ b/script/get-zendnn/customize.py @@ -12,12 +12,12 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - env['ZENDNN_BLIS_PATH'] = env['CM_BLIS_INSTALL_PATH'] - env['ZENDNN_LIBM_PATH'] = env['CM_AOCL_BUILD_PATH'] + env['ZENDNN_BLIS_PATH'] = env['MLC_BLIS_INSTALL_PATH'] + env['ZENDNN_LIBM_PATH'] = env['MLC_AOCL_BUILD_PATH'] - env['ZENDNN_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + env['ZENDNN_SRC_PATH'] = env['MLC_GIT_REPO_CHECKOUT_PATH'] return {'return': 0} diff --git a/script/get-zephyr-sdk/README-extra.md b/script/get-zephyr-sdk/README-extra.md index 3c139b607..ae73f91f8 100644 --- a/script/get-zephyr-sdk/README-extra.md +++ b/script/get-zephyr-sdk/README-extra.md @@ -6,8 +6,8 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/scrip cm run script --tags=get,zephyr-sdk --version=0.13.2 ``` ## Exported Variables -1. [ZEPHYR_SDK_INSTALL_DIR](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-zephyr-sdk/customize.py#L13): Location in CM cache where Zephyr SDK is installed. -2. [ZEPHYR_TOOLCHAIN_VARIANT](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-zephyr-sdk/customize.py#L12) +1. [ZEPHYR_SDK_INSTALL_DIR](https://github.com/mlcommons/ck/blob/master/mlc-mlops/script/get-zephyr-sdk/customize.py#L13): Location in CM cache where Zephyr SDK is installed. +2. [ZEPHYR_TOOLCHAIN_VARIANT](https://github.com/mlcommons/ck/blob/master/mlc-mlops/script/get-zephyr-sdk/customize.py#L12) ## Supported Versions 1. 0.13.1 diff --git a/script/get-zephyr-sdk/customize.py b/script/get-zephyr-sdk/customize.py index 950a479ec..143ad8584 100644 --- a/script/get-zephyr-sdk/customize.py +++ b/script/get-zephyr-sdk/customize.py @@ -12,6 +12,6 @@ def postprocess(i): env = i['env'] env['ZEPHYR_TOOLCHAIN_VARIANT'] = "zephyr" env['ZEPHYR_SDK_INSTALL_DIR'] = os.path.join( - os.getcwd(), "zephyr-sdk-" + env['CM_ZEPHYR_SDK_VERSION']) + os.getcwd(), "zephyr-sdk-" + env['MLC_ZEPHYR_SDK_VERSION']) return {'return': 0} diff --git a/script/get-zephyr-sdk/meta.yaml b/script/get-zephyr-sdk/meta.yaml index 5a7a97862..1889cc0be 100644 --- a/script/get-zephyr-sdk/meta.yaml +++ b/script/get-zephyr-sdk/meta.yaml @@ -16,10 +16,10 @@ uid: c70ae1a7567f4a7b versions: 0.13.1: env: - CM_ZEPHYR_SDK_VERSION: 0.13.1 + MLC_ZEPHYR_SDK_VERSION: 0.13.1 0.13.2: env: - CM_ZEPHYR_SDK_VERSION: 0.13.2 + MLC_ZEPHYR_SDK_VERSION: 0.13.2 0.15.0: env: - CM_ZEPHYR_SDK_VERSION: 0.15.0 + MLC_ZEPHYR_SDK_VERSION: 0.15.0 diff --git a/script/get-zephyr-sdk/run.sh b/script/get-zephyr-sdk/run.sh index 07c55e078..4c969eedd 100644 --- a/script/get-zephyr-sdk/run.sh +++ b/script/get-zephyr-sdk/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} -version=${CM_ZEPHYR_SDK_VERSION} -os=${CM_HOST_OS_TYPE} +MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD} +version=${MLC_ZEPHYR_SDK_VERSION} +os=${MLC_HOST_OS_TYPE} if [ $os == "darwin" ]; then - os=${CM_HOST_OS_FLAVOR} + os=${MLC_HOST_OS_FLAVOR} fi -platform=${CM_HOST_OS_MACHINE} +platform=${MLC_HOST_OS_MACHINE} if [ $platform == "arm64" ]; then platform=aarch64 fi diff --git a/script/get-zephyr/README-extra.md b/script/get-zephyr/README-extra.md deleted file mode 100644 index 34aa511ce..000000000 --- a/script/get-zephyr/README-extra.md +++ /dev/null @@ -1,8 +0,0 @@ -# GET-ZEPHYR -This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) installs the [Zephyr](https://github.com/zephyrproject-rtos/zephyr) real-time OS including all the needed system and python dependencies using its own command line tool [west](https://docs.zephyrproject.org/latest/develop/west/index.html). -## Exported Variables -1. [ZEPHYR_DIR](https://github.com/octoml/ck/blob/master/cm-mlops/script/get-zephyr/customize.py#L15): Location in CM cache where Zephyr is installed. - -## Supported and Tested OS -1. Ubuntu 18.04, 20.04, 22.04 -2. RHEL 9 diff --git a/script/get-zephyr/customize.py b/script/get-zephyr/customize.py index 4bbcf5316..4db95cecd 100644 --- a/script/get-zephyr/customize.py +++ b/script/get-zephyr/customize.py @@ -13,6 +13,6 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_ZEPHYR_DIR'] = os.path.join(os.getcwd(), "zephyr") + env['MLC_ZEPHYR_DIR'] = os.path.join(os.getcwd(), "zephyr") return {'return': 0} diff --git a/script/get-zephyr/meta.yaml b/script/get-zephyr/meta.yaml index 55c59aff5..5c7a85652 100644 --- a/script/get-zephyr/meta.yaml +++ b/script/get-zephyr/meta.yaml @@ -15,7 +15,7 @@ deps: version_min: 3.20.0 - tags: get,generic-python-lib,_west new_env_keys: -- CM_ZEPHYR_* +- MLC_ZEPHYR_* tags: - get - zephyr @@ -23,4 +23,4 @@ uid: d4105c2cdb044276 versions: v2.7: env: - CM_ZEPHYR_VERSION: v2.7 + MLC_ZEPHYR_VERSION: v2.7 diff --git a/script/get-zephyr/run-ubuntu.sh b/script/get-zephyr/run-ubuntu.sh index a8cb216b4..f2658491b 100644 --- a/script/get-zephyr/run-ubuntu.sh +++ b/script/get-zephyr/run-ubuntu.sh @@ -1,4 +1,4 @@ #!/bin/bash sudo apt-get install -y --no-install-recommends gcc-multilib g++-multilib libsdl2-dev -. ${CM_TMP_CURRENT_SCRIPT_PATH}/run.sh +. ${MLC_TMP_CURRENT_SCRIPT_PATH}/run.sh diff --git a/script/get-zephyr/run.sh b/script/get-zephyr/run.sh index 7d5b53245..eea5aaaec 100644 --- a/script/get-zephyr/run.sh +++ b/script/get-zephyr/run.sh @@ -1,14 +1,14 @@ #!/bin/bash -CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} -CM_PYTHON_BIN_WITH_PATH=${CM_PYTHON_BIN_WITH_PATH:-python3} +MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD} +MLC_PYTHON_BIN_WITH_PATH=${MLC_PYTHON_BIN_WITH_PATH:-python3} CUR=`pwd` if [ "${?}" != "0" ]; then exit 1; fi if [ ! -d "zephyr" ]; then - west init --mr ${CM_ZEPHYR_VERSION}-branch $CUR + west init --mr ${MLC_ZEPHYR_VERSION}-branch $CUR if [ "${?}" != "0" ]; then exit 1; fi fi @@ -17,6 +17,6 @@ west update if [ "${?}" != "0" ]; then exit 1; fi west zephyr-export if [ "${?}" != "0" ]; then exit 1; fi -${CM_PYTHON_BIN_WITH_PATH} -m pip install -r $CUR/zephyr/scripts/requirements.txt +${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r $CUR/zephyr/scripts/requirements.txt if [ "${?}" != "0" ]; then exit 1; fi diff --git a/script/install-apt-package/customize.py b/script/install-apt-package/customize.py index 911067966..7ecb5d9fa 100644 --- a/script/install-apt-package/customize.py +++ b/script/install-apt-package/customize.py @@ -9,19 +9,19 @@ def preprocess(i): env = i['env'] state = i['state'] - package_name = env['CM_APT_PACKAGE_NAME'] + package_name = env['MLC_APT_PACKAGE_NAME'] - install_cmd = env.get('CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD') + install_cmd = env.get('MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD') if not install_cmd: return { 'return': 1, 'error': 'Package manager installation command not detected for the given OS'} - sudo = env.get('CM_SUDO', '') + sudo = env.get('MLC_SUDO', '') - env['CM_APT_INSTALL_CMD'] = sudo + ' ' + install_cmd + ' ' + package_name + env['MLC_APT_INSTALL_CMD'] = sudo + ' ' + install_cmd + ' ' + package_name - if env.get('CM_APT_CHECK_CMD', - '') != '' and env['CM_APT_INSTALL_CMD'] != '': - env['CM_APT_INSTALL_CMD'] = f"""{env['CM_APT_CHECK_CMD']} || {env['CM_APT_INSTALL_CMD']}""" + if env.get('MLC_APT_CHECK_CMD', + '') != '' and env['MLC_APT_INSTALL_CMD'] != '': + env['MLC_APT_INSTALL_CMD'] = f"""{env['MLC_APT_CHECK_CMD']} || {env['MLC_APT_INSTALL_CMD']}""" return {'return': 0} diff --git a/script/install-apt-package/meta.yaml b/script/install-apt-package/meta.yaml index d5ba87af7..71f999e09 100644 --- a/script/install-apt-package/meta.yaml +++ b/script/install-apt-package/meta.yaml @@ -4,8 +4,8 @@ automation_uid: 5b4e0237da074764 cache: true category: Detection or installation of tools and artifacts default_env: - CM_CLEAN_DIRS: bin - CM_SUDO: sudo + MLC_CLEAN_DIRS: bin + MLC_SUDO: sudo deps: - tags: detect,os new_env_keys: [] @@ -18,4 +18,4 @@ uid: 3688efcd8f324546 variations: package.#: env: - CM_APT_PACKAGE_NAME: '#' + MLC_APT_PACKAGE_NAME: '#' diff --git a/script/install-apt-package/run.sh b/script/install-apt-package/run.sh index d72b2c9d6..ee2a065d2 100644 --- a/script/install-apt-package/run.sh +++ b/script/install-apt-package/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -cmd=${CM_APT_INSTALL_CMD} +cmd=${MLC_APT_INSTALL_CMD} echo $cmd eval $cmd test $? -eq 0 || exit $? diff --git a/script/install-aws-cli/meta.yaml b/script/install-aws-cli/meta.yaml index 6fda1f71e..c4fb3e9e8 100644 --- a/script/install-aws-cli/meta.yaml +++ b/script/install-aws-cli/meta.yaml @@ -6,10 +6,10 @@ category: Cloud automation deps: - tags: detect,os env: - CM_CURL_URL: https://awscli.amazonaws.com/awscli-exe-[OS]-[PLATFORM].zip + MLC_CURL_URL: https://awscli.amazonaws.com/awscli-exe-[OS]-[PLATFORM].zip post_deps: - skip_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' tags: get,aws-cli tags: diff --git a/script/install-aws-cli/run.sh b/script/install-aws-cli/run.sh index cc3abf3f9..e60191a2f 100644 --- a/script/install-aws-cli/run.sh +++ b/script/install-aws-cli/run.sh @@ -2,13 +2,13 @@ CUR_DIR=$PWD echo "******************************************************" -echo $CM_CURL_URL -CM_CURL_URL=${CM_CURL_URL//"[OS]"/${CM_HOST_OS_TYPE}} -CM_CURL_URL=${CM_CURL_URL//"[PLATFORM]"/${CM_HOST_PLATFORM_FLAVOR}} -echo $CM_CURL_URL -echo "CM_CURL_URL=${CM_CURL_URL}" >> tmp-run-env.out +echo $MLC_CURL_URL +MLC_CURL_URL=${MLC_CURL_URL//"[OS]"/${MLC_HOST_OS_TYPE}} +MLC_CURL_URL=${MLC_CURL_URL//"[PLATFORM]"/${MLC_HOST_PLATFORM_FLAVOR}} +echo $MLC_CURL_URL +echo "MLC_CURL_URL=${MLC_CURL_URL}" >> tmp-run-env.out FILE="awscliv2.zip" rm -rf ${FILE} -curl "${CM_CURL_URL}" -o "${FILE}" +curl "${MLC_CURL_URL}" -o "${FILE}" unzip ${FILE} sudo ./aws/install diff --git a/script/install-bazel/customize.py b/script/install-bazel/customize.py index f18929e25..a3d230c47 100644 --- a/script/install-bazel/customize.py +++ b/script/install-bazel/customize.py @@ -12,15 +12,15 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') if need_version == '': return {'return': 1, - 'error': 'internal problem - CM_VERSION is not defined in env'} + 'error': 'internal problem - MLC_VERSION is not defined in env'} print(recursion_spaces + ' # Requested version: {}'.format(need_version)) -# if 'CM_GIT_CHECKOUT' not in env: -# env['CM_GIT_CHECKOUT'] = 'releases/gcc-' + need_version +# if 'MLC_GIT_CHECKOUT' not in env: +# env['MLC_GIT_CHECKOUT'] = 'releases/gcc-' + need_version if os_info['platform'] == 'windows': prefix = '' @@ -29,8 +29,8 @@ def preprocess(i): ext = '.exe' else: prefix = 'installer-' - xos = env['CM_HOST_OS_TYPE'] - platform = env['CM_HOST_PLATFORM_FLAVOR'] + xos = env['MLC_HOST_OS_TYPE'] + platform = env['MLC_HOST_PLATFORM_FLAVOR'] ext = '.sh' filename = 'bazel-{}-{}{}-{}{}'.format(need_version, @@ -51,12 +51,12 @@ def preprocess(i): bazel_bin = 'bazel' path = os.path.join(cur_dir, 'install', 'bin') - env['CM_BAZEL_DOWNLOAD_URL'] = url - env['CM_BAZEL_DOWNLOAD_FILE'] = filename + env['MLC_BAZEL_DOWNLOAD_URL'] = url + env['MLC_BAZEL_DOWNLOAD_FILE'] = filename - env['CM_BAZEL_INSTALLED_PATH'] = path - env['CM_BAZEL_BIN_WITH_PATH'] = os.path.join(path, bazel_bin) + env['MLC_BAZEL_INSTALLED_PATH'] = path + env['MLC_BAZEL_BIN_WITH_PATH'] = os.path.join(path, bazel_bin) - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + env['MLC_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() return {'return': 0} diff --git a/script/install-bazel/meta.yaml b/script/install-bazel/meta.yaml index 65d913604..7c68d5654 100644 --- a/script/install-bazel/meta.yaml +++ b/script/install-bazel/meta.yaml @@ -7,12 +7,12 @@ default_version: 7.0.2 deps: - tags: detect,os env: - CM_WGET_URL: https://github.com/bazelbuild/bazel/releases/download/[VERSION]/bazel-[VERSION]-installer-[OS]-[PLATFORM].sh + MLC_WGET_URL: https://github.com/bazelbuild/bazel/releases/download/[VERSION]/bazel-[VERSION]-installer-[OS]-[PLATFORM].sh new_env_keys: -- CM_BAZEL_* +- MLC_BAZEL_* post_deps: - skip_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' tags: get,bazel tags: diff --git a/script/install-bazel/run-aarch64.sh b/script/install-bazel/run-aarch64.sh index 0b8753243..522ffeb0d 100644 --- a/script/install-bazel/run-aarch64.sh +++ b/script/install-bazel/run-aarch64.sh @@ -3,23 +3,23 @@ CUR_DIR=$PWD echo "******************************************************" -CM_WGET_URL=${CM_WGET_URL//"[OS]"/${CM_HOST_OS_TYPE}} -CM_WGET_URL=${CM_WGET_URL//"[PLATFORM]"/arm64} -CM_WGET_URL=${CM_WGET_URL//"[VERSION]"/${CM_VERSION}} -CM_WGET_URL=${CM_WGET_URL//"-installer-"/-} -CM_WGET_URL=${CM_WGET_URL//".sh"/} -echo "CM_WGET_URL=${CM_WGET_URL}" > tmp-run-env.out -BAZEL_SCRIPT="bazel-${CM_VERSION}-${CM_HOST_OS_TYPE}-arm64" +MLC_WGET_URL=${MLC_WGET_URL//"[OS]"/${MLC_HOST_OS_TYPE}} +MLC_WGET_URL=${MLC_WGET_URL//"[PLATFORM]"/arm64} +MLC_WGET_URL=${MLC_WGET_URL//"[VERSION]"/${MLC_VERSION}} +MLC_WGET_URL=${MLC_WGET_URL//"-installer-"/-} +MLC_WGET_URL=${MLC_WGET_URL//".sh"/} +echo "MLC_WGET_URL=${MLC_WGET_URL}" > tmp-run-env.out +BAZEL_SCRIPT="bazel-${MLC_VERSION}-${MLC_HOST_OS_TYPE}-arm64" INSTALL_DIR=${CUR_DIR} rm -rf ${INSTALL_DIR}/bin -wget -c ${CM_WGET_URL} +wget -c ${MLC_WGET_URL} if [ "${?}" != "0" ]; then exit 1; fi chmod +x ${BAZEL_SCRIPT} ln -s ${BAZEL_SCRIPT} bazel if [ "${?}" != "0" ]; then exit 1; fi -echo "CM_BAZEL_INSTALLED_PATH=${INSTALL_DIR}" >>tmp-run-env.out -echo "CM_BAZEL_BIN_WITH_PATH=${INSTALL_DIR}/${BAZEL_SCRIPT}" >>tmp-run-env.out +echo "MLC_BAZEL_INSTALLED_PATH=${INSTALL_DIR}" >>tmp-run-env.out +echo "MLC_BAZEL_BIN_WITH_PATH=${INSTALL_DIR}/${BAZEL_SCRIPT}" >>tmp-run-env.out echo "Bazel is installed to ${INSTALL_DIR} ..." diff --git a/script/install-bazel/run.bat b/script/install-bazel/run.bat index 7108a4758..56cc42830 100644 --- a/script/install-bazel/run.bat +++ b/script/install-bazel/run.bat @@ -1,9 +1,9 @@ @echo off -del /Q /S %CM_BAZEL_DOWNLOAD_FILE% +del /Q /S %MLC_BAZEL_DOWNLOAD_FILE% del /Q /S bazel.exe -wget -c %CM_BAZEL_DOWNLOAD_URL% -O %CM_BAZEL_DOWNLOAD_FILE% --no-check-certificate +wget -c %MLC_BAZEL_DOWNLOAD_URL% -O %MLC_BAZEL_DOWNLOAD_FILE% --no-check-certificate IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -ren %CM_BAZEL_DOWNLOAD_FILE% bazel.exe +ren %MLC_BAZEL_DOWNLOAD_FILE% bazel.exe diff --git a/script/install-bazel/run.sh b/script/install-bazel/run.sh index e5fe4651d..a4771f47e 100644 --- a/script/install-bazel/run.sh +++ b/script/install-bazel/run.sh @@ -4,19 +4,19 @@ CUR_DIR=$PWD echo "******************************************************" -CM_WGET_URL=${CM_WGET_URL//"[OS]"/${CM_HOST_OS_TYPE}} -CM_WGET_URL=${CM_WGET_URL//"[PLATFORM]"/${CM_HOST_PLATFORM_FLAVOR}} -CM_WGET_URL=${CM_WGET_URL//"[VERSION]"/${CM_VERSION}} +MLC_WGET_URL=${MLC_WGET_URL//"[OS]"/${MLC_HOST_OS_TYPE}} +MLC_WGET_URL=${MLC_WGET_URL//"[PLATFORM]"/${MLC_HOST_PLATFORM_FLAVOR}} +MLC_WGET_URL=${MLC_WGET_URL//"[VERSION]"/${MLC_VERSION}} -echo "CM_WGET_URL=${CM_WGET_URL}" >> tmp-run-env.out +echo "MLC_WGET_URL=${MLC_WGET_URL}" >> tmp-run-env.out -BAZEL_SCRIPT="bazel-${CM_VERSION}-installer-${CM_HOST_OS_TYPE}-${CM_HOST_PLATFORM_FLAVOR}.sh" +BAZEL_SCRIPT="bazel-${MLC_VERSION}-installer-${MLC_HOST_OS_TYPE}-${MLC_HOST_PLATFORM_FLAVOR}.sh" INSTALL_DIR=${CUR_DIR} rm -rf ${INSTALL_DIR}/bin -wget -c ${CM_WGET_URL} --no-check-certificate +wget -c ${MLC_WGET_URL} --no-check-certificate if [ "${?}" != "0" ]; then exit 1; fi diff --git a/script/install-cmake-prebuilt/customize.py b/script/install-cmake-prebuilt/customize.py index 5eef0848a..e8ceb4657 100644 --- a/script/install-cmake-prebuilt/customize.py +++ b/script/install-cmake-prebuilt/customize.py @@ -13,10 +13,10 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') if need_version == '': return {'return': 1, - 'error': 'internal problem - CM_VERSION is not defined in env'} + 'error': 'internal problem - MLC_VERSION is not defined in env'} print(recursion_spaces + ' # Requested version: {}'.format(need_version)) @@ -26,10 +26,10 @@ def preprocess(i): need_version = ".".join(version_split) - host_os_bits = env['CM_HOST_OS_BITS'] + host_os_bits = env['MLC_HOST_OS_BITS'] if os_info['platform'] != 'windows': - host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + host_os_machine = env['MLC_HOST_OS_MACHINE'] # ABI # Prepare package name if os_info['platform'] == 'darwin': @@ -104,18 +104,18 @@ def preprocess(i): path_bin = os.path.join(os.getcwd(), 'bin') path_include = os.path.join(os.getcwd(), 'include') - env['CM_CMAKE_PACKAGE'] = filename + env['MLC_CMAKE_PACKAGE'] = filename - env['CM_CMAKE_INSTALLED_PATH'] = path_bin - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + env['MLC_CMAKE_INSTALLED_PATH'] = path_bin + env['MLC_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() bin_name = 'cmake.exe' if os_info['platform'] == 'windows' else 'cmake' - env['CM_CMAKE_BIN_WITH_PATH'] = os.path.join(path_bin, bin_name) + env['MLC_CMAKE_BIN_WITH_PATH'] = os.path.join(path_bin, bin_name) # We don't need to check default paths here because we force install to # cache - env['+PATH'] = [env['CM_CMAKE_INSTALLED_PATH']] + env['+PATH'] = [env['MLC_CMAKE_INSTALLED_PATH']] if os.path.isdir(path_include): env['+C_INCLUDE_PATH'] = [path_include] diff --git a/script/install-cmake-prebuilt/meta.yaml b/script/install-cmake-prebuilt/meta.yaml index 4107931fc..ba7767f97 100644 --- a/script/install-cmake-prebuilt/meta.yaml +++ b/script/install-cmake-prebuilt/meta.yaml @@ -7,14 +7,14 @@ default_version: 3.28.3 deps: - tags: detect,os new_env_keys: -- CM_CMAKE_* -- CM_GET_DEPENDENT_CACHED_PATH +- MLC_CMAKE_* +- MLC_GET_DEPENDENT_CACHED_PATH - +PATH - +LD_LIBRARY_PATH - +C_INCLUDE_PATH post_deps: - skip_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' tags: get,cmake tags: diff --git a/script/install-cmake-prebuilt/run.sh b/script/install-cmake-prebuilt/run.sh index a7b91ddd2..92431bc42 100644 --- a/script/install-cmake-prebuilt/run.sh +++ b/script/install-cmake-prebuilt/run.sh @@ -1,10 +1,10 @@ #!/bin/bash echo "" -echo "Unarchiving ${CM_CMAKE_PACKAGE} ..." +echo "Unarchiving ${MLC_CMAKE_PACKAGE} ..." -tar --strip 1 -xf ${CM_CMAKE_PACKAGE} +tar --strip 1 -xf ${MLC_CMAKE_PACKAGE} test $? -eq 0 || exit 1 -rm -f ${CM_CMAKE_PACKAGE} +rm -f ${MLC_CMAKE_PACKAGE} test $? -eq 0 || exit 1 diff --git a/script/install-cuda-package-manager/customize.py b/script/install-cuda-package-manager/customize.py index 96100616a..dd0602596 100644 --- a/script/install-cuda-package-manager/customize.py +++ b/script/install-cuda-package-manager/customize.py @@ -11,6 +11,6 @@ def preprocess(i): automation = i['automation'] recursion_spaces = i['recursion_spaces'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + env['MLC_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() return {'return': 0} diff --git a/script/install-cuda-package-manager/meta.yaml b/script/install-cuda-package-manager/meta.yaml index 3acae7227..a00c4cca5 100644 --- a/script/install-cuda-package-manager/meta.yaml +++ b/script/install-cuda-package-manager/meta.yaml @@ -7,7 +7,7 @@ deps: - tags: detect,os post_deps: - skip_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' tags: get,cuda tags: diff --git a/script/install-cuda-package-manager/run.sh b/script/install-cuda-package-manager/run.sh index d52681cbf..ca3bba68d 100644 --- a/script/install-cuda-package-manager/run.sh +++ b/script/install-cuda-package-manager/run.sh @@ -1,5 +1,5 @@ #!/bin/bash CUR=${PWD} -echo "Package installation script not available yet for ${CM_HOST_OS_FLAVOR}" +echo "Package installation script not available yet for ${MLC_HOST_OS_FLAVOR}" exit 1 diff --git a/script/install-cuda-prebuilt/customize.py b/script/install-cuda-prebuilt/customize.py index ddaae605c..efe076217 100644 --- a/script/install-cuda-prebuilt/customize.py +++ b/script/install-cuda-prebuilt/customize.py @@ -9,44 +9,44 @@ def preprocess(i): env = i['env'] if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': - env['CM_SUDO'] = '' + env['MLC_SUDO'] = '' meta = i['meta'] automation = i['automation'] - version = env.get('CM_VERSION') + version = env.get('MLC_VERSION') - if version not in env.get('CM_CUDA_LINUX_FILENAME', ''): + if version not in env.get('MLC_CUDA_LINUX_FILENAME', ''): supported_versions = list(meta['versions'].keys()) return {'return': 1, 'error': "Only CUDA versions {} are supported now".format( ', '.join(supported_versions))} - install_prefix = env.get('CM_CUDA_INSTALL_PREFIX', os.getcwd()) + install_prefix = env.get('MLC_CUDA_INSTALL_PREFIX', os.getcwd()) - env['CM_CUDA_INSTALL_PREFIX'] = install_prefix + env['MLC_CUDA_INSTALL_PREFIX'] = install_prefix extra_install_args = '' - if str(env.get('CM_CUDA_DRIVER_INSTALL_OVERRIDE', '')) != '': + if str(env.get('MLC_CUDA_DRIVER_INSTALL_OVERRIDE', '')) != '': extra_install_args += ' --override-driver-check' recursion_spaces = i['recursion_spaces'] nvcc_bin = "nvcc" env['WGET_URL'] = "https://developer.download.nvidia.com/compute/cuda/" + \ - env['CM_VERSION'] + "/local_installers/" + \ - env['CM_CUDA_LINUX_FILENAME'] + env['MLC_VERSION'] + "/local_installers/" + \ + env['MLC_CUDA_LINUX_FILENAME'] extra_options = env.get('CUDA_ADDITIONAL_INSTALL_OPTIONS', '') - if env.get('CM_CUDA_INSTALL_DRIVER', '') == "yes": + if env.get('MLC_CUDA_INSTALL_DRIVER', '') == "yes": extra_options += " --driver" env['CUDA_ADDITIONAL_INSTALL_OPTIONS'] = extra_options - env['CM_CUDA_INSTALLED_PATH'] = os.path.join(install_prefix, 'install') - env['CM_NVCC_BIN_WITH_PATH'] = os.path.join( + env['MLC_CUDA_INSTALLED_PATH'] = os.path.join(install_prefix, 'install') + env['MLC_NVCC_BIN_WITH_PATH'] = os.path.join( install_prefix, 'install', 'bin', nvcc_bin) - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_NVCC_BIN_WITH_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_NVCC_BIN_WITH_PATH'] - env['CM_CUDA_EXTRA_INSTALL_ARGS'] = extra_install_args + env['MLC_CUDA_EXTRA_INSTALL_ARGS'] = extra_install_args # Set CUDA_RUN_FILE_LOCAL_PATH to empty if not set for backwards # compatibility in download file diff --git a/script/install-cuda-prebuilt/meta.yaml b/script/install-cuda-prebuilt/meta.yaml index 4d22a2d0c..a5f06022e 100644 --- a/script/install-cuda-prebuilt/meta.yaml +++ b/script/install-cuda-prebuilt/meta.yaml @@ -4,29 +4,29 @@ automation_uid: 5b4e0237da074764 cache: true category: CUDA automation default_env: - CM_SUDO: sudo + MLC_SUDO: sudo default_version: 11.8.0 deps: - tags: detect,os docker: run: true input_mapping: - install_prefix: CM_CUDA_INSTALL_PREFIX + install_prefix: MLC_CUDA_INSTALL_PREFIX local_run_file_path: CUDA_RUN_FILE_LOCAL_PATH - override-driver-check: CM_CUDA_DRIVER_INSTALL_OVERRIDE + override-driver-check: MLC_CUDA_DRIVER_INSTALL_OVERRIDE skip_sudo: CUDA_SKIP_SUDO new_env_keys: -- CM_CUDA_* -- CM_NVCC_* +- MLC_CUDA_* +- MLC_NVCC_* post_deps: - skip_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' tags: get,cuda prehook_deps: - env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_CUDA_RUN_FILE_PATH - CM_DOWNLOAD_LOCAL_FILE_PATH: <<>> + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_CUDA_RUN_FILE_PATH + MLC_DOWNLOAD_LOCAL_FILE_PATH: <<>> extra_cache_tags: cuda,run,file force_cache: true names: @@ -45,47 +45,47 @@ uid: 14eadcd42ba340c3 variations: driver: env: - CM_CUDA_INSTALL_DRIVER: 'yes' + MLC_CUDA_INSTALL_DRIVER: 'yes' group: install-driver no-driver: default: true env: - CM_CUDA_INSTALL_DRIVER: 'no' + MLC_CUDA_INSTALL_DRIVER: 'no' group: install-driver versions: 11.7.0: env: - CM_CUDA_LINUX_FILENAME: cuda_11.7.0_515.43.04_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_11.7.0_515.43.04_linux.run 11.8.0: env: - CM_CUDA_LINUX_FILENAME: cuda_11.8.0_520.61.05_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_11.8.0_520.61.05_linux.run 12.0.0: env: - CM_CUDA_LINUX_FILENAME: cuda_12.0.0_525.60.13_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_12.0.0_525.60.13_linux.run 12.1.1: env: - CM_CUDA_LINUX_FILENAME: cuda_12.1.1_530.30.02_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_12.1.1_530.30.02_linux.run 12.2.0: env: - CM_CUDA_LINUX_FILENAME: cuda_12.2.0_535.54.03_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_12.2.0_535.54.03_linux.run 12.3.2: env: - CM_CUDA_LINUX_FILENAME: cuda_12.3.2_545.23.08_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_12.3.2_545.23.08_linux.run 12.4.0: env: - CM_CUDA_LINUX_FILENAME: cuda_12.4.0_550.54.14_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_12.4.0_550.54.14_linux.run 12.4.1: env: - CM_CUDA_LINUX_FILENAME: cuda_12.4.1_550.54.15_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_12.4.1_550.54.15_linux.run 12.5.0: env: - CM_CUDA_LINUX_FILENAME: cuda_12.5.0_555.42.02_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_12.5.0_555.42.02_linux.run 12.5.1: env: - CM_CUDA_LINUX_FILENAME: cuda_12.5.1_555.42.06_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_12.5.1_555.42.06_linux.run 12.6.0: env: - CM_CUDA_LINUX_FILENAME: cuda_12.6.0_560.28.03_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_12.6.0_560.28.03_linux.run 12.6.1: env: - CM_CUDA_LINUX_FILENAME: cuda_12.6.1_560.35.03_linux.run + MLC_CUDA_LINUX_FILENAME: cuda_12.6.1_560.35.03_linux.run diff --git a/script/install-cuda-prebuilt/run.sh b/script/install-cuda-prebuilt/run.sh index c13e96b3b..84590a9af 100644 --- a/script/install-cuda-prebuilt/run.sh +++ b/script/install-cuda-prebuilt/run.sh @@ -1,8 +1,8 @@ #!/bin/bash -INSTALL_DIR=${CM_CUDA_INSTALL_PREFIX}/install +INSTALL_DIR=${MLC_CUDA_INSTALL_PREFIX}/install -cmd="${CM_SUDO} bash ${CM_CUDA_RUN_FILE_PATH} --toolkitpath=${INSTALL_DIR} --defaultroot=${INSTALL_DIR} --toolkit ${CUDA_ADDITIONAL_INSTALL_OPTIONS} --silent --override ${CM_CUDA_EXTRA_INSTALL_ARGS}" +cmd="${MLC_SUDO} bash ${MLC_CUDA_RUN_FILE_PATH} --toolkitpath=${INSTALL_DIR} --defaultroot=${INSTALL_DIR} --toolkit ${CUDA_ADDITIONAL_INSTALL_OPTIONS} --silent --override ${MLC_CUDA_EXTRA_INSTALL_ARGS}" echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? diff --git a/script/install-diffusers-from-src/meta.yaml b/script/install-diffusers-from-src/meta.yaml index f2194fd66..718301b5c 100644 --- a/script/install-diffusers-from-src/meta.yaml +++ b/script/install-diffusers-from-src/meta.yaml @@ -10,32 +10,32 @@ deps: - python - python3 skip_if_env: - CM_CONDA_ENV: + MLC_CONDA_ENV: - 'yes' tags: get,python3 - names: - compiler tags: get,compiler - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_DIFFUSERS_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_DIFFUSERS_SRC_REPO_PATH extra_cache_tags: diffusers,diffusers-src,src,diffusers-src,diffusers-src-repo names: - diffusers-src-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: - CM_GIT_URL: https://github.com/huggingface/diffusers.git + MLC_GIT_URL: https://github.com/huggingface/diffusers.git name: Build diffusers from sources new_env_keys: -- CM_DIFFUSERS_* +- MLC_DIFFUSERS_* prehook_deps: [] sort: 1000 tags: @@ -49,31 +49,31 @@ uid: b2ddda995f63412f variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' for-intel-mlperf-inference-v4.0-sdxl: base: - tag.v0.25.1 env: - CM_INTEL_MLPERF_INFERENCE_v4_0_STABLE_DIFFUSION_PATCH: 'yes' + MLC_INTEL_MLPERF_INFERENCE_v4_0_STABLE_DIFFUSION_PATCH: 'yes' python.#: env: - CM_PYTHON_BIN_WITH_PATH: '#' + MLC_PYTHON_BIN_WITH_PATH: '#' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/huggingface/diffusers: default: true env: - CM_GIT_URL: https://github.com/huggingface/diffusers + MLC_GIT_URL: https://github.com/huggingface/diffusers group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: diffusers-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-diffusers-from-src/run.sh b/script/install-diffusers-from-src/run.sh index 8d5ca084a..c1f6c3be7 100644 --- a/script/install-diffusers-from-src/run.sh +++ b/script/install-diffusers-from-src/run.sh @@ -2,17 +2,17 @@ CUR_DIR=$PWD rm -rf diffusers -cp -r ${CM_DIFFUSERS_SRC_REPO_PATH} diffusers +cp -r ${MLC_DIFFUSERS_SRC_REPO_PATH} diffusers test "${?}" -eq "0" || exit $? cd diffusers rm -rf build -if [[ ${CM_INTEL_MLPERF_INFERENCE_v4_0_STABLE_DIFFUSION_PATCH} == "yes" ]]; then +if [[ ${MLC_INTEL_MLPERF_INFERENCE_v4_0_STABLE_DIFFUSION_PATCH} == "yes" ]]; then wget -nc https://raw.githubusercontent.com/mlcommons/inference_results_v4.0/main/closed/Intel/code/stable-diffusion-xl/pytorch-cpu/diffusers.patch test "${?}" -eq "0" || exit $? git apply diffusers.patch test "${?}" -eq "0" || exit $? fi -${CM_PYTHON_BIN_WITH_PATH} -m pip install . +${MLC_PYTHON_BIN_WITH_PATH} -m pip install . test "${?}" -eq "0" || exit $? diff --git a/script/install-gcc-src/customize.py b/script/install-gcc-src/customize.py index ff13be8e2..5d37ab787 100644 --- a/script/install-gcc-src/customize.py +++ b/script/install-gcc-src/customize.py @@ -15,16 +15,16 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') if need_version == '': return {'return': 1, - 'error': 'internal problem - CM_VERSION is not defined in env'} + 'error': 'internal problem - MLC_VERSION is not defined in env'} print(recursion_spaces + ' # Requested version: {}'.format(need_version)) - if 'CM_GIT_CHECKOUT' not in env: - env['CM_GIT_CHECKOUT'] = 'releases/gcc-' + need_version + if 'MLC_GIT_CHECKOUT' not in env: + env['MLC_GIT_CHECKOUT'] = 'releases/gcc-' + need_version - env['CM_GCC_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'install', 'bin') + env['MLC_GCC_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'install', 'bin') return {'return': 0} diff --git a/script/install-gcc-src/meta.yaml b/script/install-gcc-src/meta.yaml index 2c72a9734..c463860ed 100644 --- a/script/install-gcc-src/meta.yaml +++ b/script/install-gcc-src/meta.yaml @@ -7,10 +7,10 @@ default_version: '12' deps: - tags: detect,os env: - CM_GIT_URL: git://gcc.gnu.org/git/gcc.git + MLC_GIT_URL: git://gcc.gnu.org/git/gcc.git post_deps: - skip_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' tags: get,gcc tags: @@ -22,4 +22,4 @@ uid: faae0ebd6e1242db versions: master: env: - CM_GIT_CHECKOUT: master + MLC_GIT_CHECKOUT: master diff --git a/script/install-gcc-src/run.sh b/script/install-gcc-src/run.sh index 472f4e9c1..c3631255d 100644 --- a/script/install-gcc-src/run.sh +++ b/script/install-gcc-src/run.sh @@ -5,8 +5,8 @@ CUR_DIR=$PWD echo "******************************************************" if [ ! -d "src" ]; then - echo "Cloning GCC from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT}..." - git clone -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} src + echo "Cloning GCC from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT}..." + git clone -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} src if [ "${?}" != "0" ]; then exit 1; fi fi @@ -25,10 +25,10 @@ cd ../build if [ "${?}" != "0" ]; then exit 1; fi echo "******************************************************" -CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} -CM_MAKE_CORES=${CM_MAKE_CORES:-2} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-${MLC_HOST_CPU_TOTAL_CORES}} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-2} -make -j${CM_MAKE_CORES} +make -j${MLC_MAKE_CORES} if [ "${?}" != "0" ]; then exit 1; fi make install if [ "${?}" != "0" ]; then exit 1; fi diff --git a/script/install-generic-conda-package/customize.py b/script/install-generic-conda-package/customize.py index 2fe548677..33c2ad57c 100644 --- a/script/install-generic-conda-package/customize.py +++ b/script/install-generic-conda-package/customize.py @@ -10,17 +10,17 @@ def preprocess(i): automation = i['automation'] run_script_input = i['run_script_input'] - version_string = env.get('CM_TMP_PIP_VERSION_STRING', '').strip() - package_name = env['CM_CONDA_PKG_NAME'].strip() + version_string = env.get('MLC_TMP_PIP_VERSION_STRING', '').strip() + package_name = env['MLC_CONDA_PKG_NAME'].strip() - install_cmd = env['CM_CONDA_BIN_WITH_PATH'] + " install -y " - if env.get('CM_CONDA_PKG_SRC', '') != '': - install_cmd += " -c " + env['CM_CONDA_PKG_SRC'] + " " + install_cmd = env['MLC_CONDA_BIN_WITH_PATH'] + " install -y " + if env.get('MLC_CONDA_PKG_SRC', '') != '': + install_cmd += " -c " + env['MLC_CONDA_PKG_SRC'] + " " install_cmd += package_name install_cmd += version_string - env['CM_CONDA_PKG_INSTALL_CMD'] = install_cmd + env['MLC_CONDA_PKG_INSTALL_CMD'] = install_cmd return {'return': 0} @@ -36,10 +36,10 @@ def detect_version(i): def postprocess(i): env = i['env'] - version = env.get('CM_VERSION', '') + version = env.get('MLC_VERSION', '') - if env['CM_CONDA_PKG_NAME'] == "python": - env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( - os.path.dirname(env['CM_CONDA_BIN_WITH_PATH']), "python") + if env['MLC_CONDA_PKG_NAME'] == "python": + env['MLC_PYTHON_BIN_WITH_PATH'] = os.path.join( + os.path.dirname(env['MLC_CONDA_BIN_WITH_PATH']), "python") return {'return': 0, 'version': version} diff --git a/script/install-generic-conda-package/meta.yaml b/script/install-generic-conda-package/meta.yaml index 8beed5da9..34f02091e 100644 --- a/script/install-generic-conda-package/meta.yaml +++ b/script/install-generic-conda-package/meta.yaml @@ -14,12 +14,12 @@ deps: - conda tags: get,conda extra_cache_tags_from_env: -- env: CM_PYTHON_CACHE_TAGS +- env: MLC_PYTHON_CACHE_TAGS prefix: python- local_env_keys: -- CM_GENERIC_PYTHON_PACKAGE_VARIANT +- MLC_GENERIC_PYTHON_PACKAGE_VARIANT new_env_keys: -- CM_PYTHONLIB_* +- MLC_PYTHONLIB_* tags: - get - install @@ -36,17 +36,17 @@ variations: tags: _name.# package.#: env: - CM_CONDA_PKG_NAME: '#' + MLC_CONDA_PKG_NAME: '#' package.python: env: - CM_CONDA_PKG_NAME: python + MLC_CONDA_PKG_NAME: python new_env_keys: - - CM_PYTHON_BIN_WITH_PATH + - MLC_PYTHON_BIN_WITH_PATH source.#: env: - CM_CONDA_PKG_SRC: '#' + MLC_CONDA_PKG_SRC: '#' group: package-source source.intel: env: - CM_CONDA_PKG_SRC: https://software.repos.intel.com/python/conda/ + MLC_CONDA_PKG_SRC: https://software.repos.intel.com/python/conda/ group: package-source diff --git a/script/install-generic-conda-package/run.sh b/script/install-generic-conda-package/run.sh index 68a48d9ee..e8e175f5e 100644 --- a/script/install-generic-conda-package/run.sh +++ b/script/install-generic-conda-package/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -cmd="${CM_CONDA_PKG_INSTALL_CMD}" +cmd="${MLC_CONDA_PKG_INSTALL_CMD}" echo $cmd eval $cmd test $? -eq 0 || exit $? diff --git a/script/install-gflags-from-src/customize.py b/script/install-gflags-from-src/customize.py index c7ad1faa0..c5632ed1d 100644 --- a/script/install-gflags-from-src/customize.py +++ b/script/install-gflags-from-src/customize.py @@ -22,7 +22,7 @@ def postprocess(i): env = i['env'] - env['CM_GFLAGS_BUILD_PATH'] = os.path.join(os.getcwd(), "gflags", "build") - env['CM_DEPENDENT_CACHED_PATH'] = env['CM_GFLAGS_BUILD_PATH'] + env['MLC_GFLAGS_BUILD_PATH'] = os.path.join(os.getcwd(), "gflags", "build") + env['MLC_DEPENDENT_CACHED_PATH'] = env['MLC_GFLAGS_BUILD_PATH'] return {'return': 0} diff --git a/script/install-gflags-from-src/meta.yaml b/script/install-gflags-from-src/meta.yaml index 9dabcd42a..db2c38831 100644 --- a/script/install-gflags-from-src/meta.yaml +++ b/script/install-gflags-from-src/meta.yaml @@ -7,7 +7,7 @@ deps: - tags: detect,os - tags: detect,cpu - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_GFLAGS_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_GFLAGS_SRC_REPO_PATH extra_cache_tags: gflags,src,gflags-src,gflags-src-repo names: - gflags-src-repo @@ -15,17 +15,17 @@ deps: tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: {} name: Build gflags from sources new_env_keys: -- CM_GFLAGS_* +- MLC_GFLAGS_* prehook_deps: [] sort: 1000 tags: @@ -39,23 +39,23 @@ uid: f311366ff15e4cdf variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/gflags/gflags: default: true env: - CM_GIT_URL: https://github.com/gflags/gflags + MLC_GIT_URL: https://github.com/gflags/gflags group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: gflags-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-gflags-from-src/run.sh b/script/install-gflags-from-src/run.sh index 79cd02608..98e59246f 100644 --- a/script/install-gflags-from-src/run.sh +++ b/script/install-gflags-from-src/run.sh @@ -2,7 +2,7 @@ CUR_DIR=$PWD rm -rf gflags -cp -r ${CM_GFLAGS_SRC_REPO_PATH} gflags +cp -r ${MLC_GFLAGS_SRC_REPO_PATH} gflags cd gflags test "${?}" -eq "0" || exit $? rm -rf build @@ -11,5 +11,5 @@ mkdir build cd build cmake .. test "${?}" -eq "0" || exit $? -make -j${CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} +make -j${MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} test "${?}" -eq "0" || exit $? diff --git a/script/install-gflags/customize.py b/script/install-gflags/customize.py index 97f3d2c06..c92d1d7af 100644 --- a/script/install-gflags/customize.py +++ b/script/install-gflags/customize.py @@ -15,10 +15,10 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') if need_version == '': return {'return': 1, - 'error': 'internal problem - CM_VERSION is not defined in env'} + 'error': 'internal problem - MLC_VERSION is not defined in env'} print(recursion_spaces + ' # Requested version: {}'.format(need_version)) diff --git a/script/install-gflags/meta.yaml b/script/install-gflags/meta.yaml index 349560315..6eb2ea038 100644 --- a/script/install-gflags/meta.yaml +++ b/script/install-gflags/meta.yaml @@ -20,4 +20,4 @@ uid: 10bb562c29ea459e versions: 2.2.2: env: - CM_VERSION: 2.2.2 + MLC_VERSION: 2.2.2 diff --git a/script/install-gflags/run.sh b/script/install-gflags/run.sh index 881eb6b75..63c559638 100644 --- a/script/install-gflags/run.sh +++ b/script/install-gflags/run.sh @@ -3,16 +3,16 @@ CUR_DIR=$PWD echo "***********************************************************" -CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} -CM_MAKE_CORES=${CM_MAKE_CORES:-2} -CM_WGET_URL=https://github.com/gflags/gflags/archive/refs/tags/v${CM_VERSION}.tar.gz -wget -nc ${CM_WGET_URL} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-${MLC_HOST_CPU_TOTAL_CORES}} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-2} +MLC_WGET_URL=https://github.com/gflags/gflags/archive/refs/tags/v${MLC_VERSION}.tar.gz +wget -nc ${MLC_WGET_URL} test $? -eq 0 || exit 1 -tar -xzf "v${CM_VERSION}.tar.gz" && cd gflags-${CM_VERSION} +tar -xzf "v${MLC_VERSION}.tar.gz" && cd gflags-${MLC_VERSION} test $? -eq 0 || exit 1 rm -rf build mkdir build && cd build cmake .. -make -j${CM_MAKE_CORES} +make -j${MLC_MAKE_CORES} test $? -eq 0 || exit 1 sudo make install diff --git a/script/install-github-cli/customize.py b/script/install-github-cli/customize.py index 08370ecbd..ac6ddfc9e 100644 --- a/script/install-github-cli/customize.py +++ b/script/install-github-cli/customize.py @@ -8,7 +8,7 @@ def preprocess(i): env = i['env'] - env['CM_TMP_PATH'] = os.path.join(os.getcwd(), 'install', 'bin') - env['CM_TMP_FAIL_IF_NOT_FOUND'] = 'yes' + env['MLC_TMP_PATH'] = os.path.join(os.getcwd(), 'install', 'bin') + env['MLC_TMP_FAIL_IF_NOT_FOUND'] = 'yes' return {'return': 0} diff --git a/script/install-intel-neural-speed-from-src/customize.py b/script/install-intel-neural-speed-from-src/customize.py index f6cae5a37..d4ce4a624 100644 --- a/script/install-intel-neural-speed-from-src/customize.py +++ b/script/install-intel-neural-speed-from-src/customize.py @@ -11,8 +11,8 @@ def preprocess(i): env = i['env'] - env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( - env['CM_CONDA_BIN_PATH'], "python") + env['MLC_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['MLC_CONDA_BIN_PATH'], "python") automation = i['automation'] diff --git a/script/install-intel-neural-speed-from-src/meta.yaml b/script/install-intel-neural-speed-from-src/meta.yaml index da1f041a2..f9ca70fbb 100644 --- a/script/install-intel-neural-speed-from-src/meta.yaml +++ b/script/install-intel-neural-speed-from-src/meta.yaml @@ -7,25 +7,25 @@ deps: - tags: detect,os - tags: detect,cpu - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_INTEL_NEURAL_SPEED_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_INTEL_NEURAL_SPEED_SRC_REPO_PATH extra_cache_tags: intel-neural-speed,neural-speed-src,src,intel-neural-speed-src,neural-speed-src-repo names: - neural-speed-src-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: - CM_GIT_URL: https://github.com/intel/neural-speed + MLC_GIT_URL: https://github.com/intel/neural-speed name: Build Intel Neural Speed from sources new_env_keys: -- CM_INTEL_NEURAL_SPEED_* +- MLC_INTEL_NEURAL_SPEED_* sort: 1000 tags: - install @@ -37,7 +37,7 @@ uid: b5477fdc929744ce variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' for-intel-mlperf-inference-v4.0-gptj: adr: conda-package: @@ -57,20 +57,20 @@ variations: tags: get,generic,conda-package,_package.wheel,_source.conda-forge repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/intel/neural-speed: default: true env: - CM_GIT_URL: https://github.com/intel/neural-speed + MLC_GIT_URL: https://github.com/intel/neural-speed group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: neural-speed-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' warnings: [] diff --git a/script/install-intel-neural-speed-from-src/run.sh b/script/install-intel-neural-speed-from-src/run.sh index 7068890f3..8f8473434 100644 --- a/script/install-intel-neural-speed-from-src/run.sh +++ b/script/install-intel-neural-speed-from-src/run.sh @@ -3,12 +3,12 @@ CUR_DIR=$PWD echo $PWD rm -rf neural-speed -cmd="cp -r ${CM_INTEL_NEURAL_SPEED_SRC_REPO_PATH} neural-speed" +cmd="cp -r ${MLC_INTEL_NEURAL_SPEED_SRC_REPO_PATH} neural-speed" echo "$cmd" eval "$cmd" -${CM_PYTHON_BIN_WITH_PATH} -m pip install -r neural-speed/requirements.txt +${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r neural-speed/requirements.txt test $? -eq 0 || exit $? -CMAKE_ARGS="-DNS_PROFILING=ON" ${CM_PYTHON_BIN_WITH_PATH} -m pip install -ve ./neural-speed +CMAKE_ARGS="-DNS_PROFILING=ON" ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -ve ./neural-speed test $? -eq 0 || exit $? echo "******************************************************" diff --git a/script/install-ipex-from-src/customize.py b/script/install-ipex-from-src/customize.py index 7521bc3c5..73ded3321 100644 --- a/script/install-ipex-from-src/customize.py +++ b/script/install-ipex-from-src/customize.py @@ -11,17 +11,17 @@ def preprocess(i): env = i['env'] - env['IPEX_DIR'] = env['CM_IPEX_SRC_REPO_PATH'] + env['IPEX_DIR'] = env['MLC_IPEX_SRC_REPO_PATH'] - if env.get('CM_USE_LLVM_FOR_IPEX', '') == 'yes': + if env.get('MLC_USE_LLVM_FOR_IPEX', '') == 'yes': env['DNNL_GRAPH_BUILD_COMPILER_BACKEND'] = 1 - env['USE_LLVM'] = env['CM_LLVM_INSTALLED_PATH'] + env['USE_LLVM'] = env['MLC_LLVM_INSTALLED_PATH'] env['LLVM_DIR'] = os.path.join( - env['CM_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") + env['MLC_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") run_cmd = "python setup.py clean && python setup.py install" - env['CM_RUN_CMD'] = run_cmd + env['MLC_RUN_CMD'] = run_cmd return {'return': 0} @@ -29,12 +29,12 @@ def preprocess(i): def postprocess(i): env = i['env'] - env['CM_IPEX_BUILD_PATH'] = os.path.join(os.getcwd(), "ipex_src", "build") - env['CM_IPEX_INSTALLED_PATH'] = os.path.join( - env['CM_IPEX_BUILD_PATH'], + env['MLC_IPEX_BUILD_PATH'] = os.path.join(os.getcwd(), "ipex_src", "build") + env['MLC_IPEX_INSTALLED_PATH'] = os.path.join( + env['MLC_IPEX_BUILD_PATH'], "Release", "packages", "intel_extension_for_pytorch") - env['CM_DEPENDENT_CACHED_PATH'] = env['CM_IPEX_INSTALLED_PATH'] + env['MLC_DEPENDENT_CACHED_PATH'] = env['MLC_IPEX_INSTALLED_PATH'] return {'return': 0} diff --git a/script/install-ipex-from-src/meta.yaml b/script/install-ipex-from-src/meta.yaml index 590467ec6..7f7046949 100644 --- a/script/install-ipex-from-src/meta.yaml +++ b/script/install-ipex-from-src/meta.yaml @@ -10,35 +10,35 @@ deps: - python - python3 skip_if_env: - CM_CONDA_ENV: + MLC_CONDA_ENV: - 'yes' tags: get,python3 - names: - pytorch skip_if_env: - CM_IPEX_SKIP_PYTORCH: + MLC_IPEX_SKIP_PYTORCH: - 'yes' tags: get,pytorch,from.src - tags: get,generic,conda-package,_package.ninja - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_IPEX_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_IPEX_SRC_REPO_PATH extra_cache_tags: ipex,src,ipex-src,ipex-src-repo names: - ipex-src-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: {} name: Build IPEX from sources new_env_keys: -- CM_IPEX_* +- MLC_IPEX_* prehook_deps: [] sort: 1000 tags: @@ -52,7 +52,7 @@ uid: 09364fff2bf04516 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' for-intel-mlperf-inference-3d-unet: adr: conda-package: @@ -94,9 +94,9 @@ variations: version_max: '12.3' version_max_usable: '12.3' env: - CM_CONDA_ENV: 'yes' - CM_INTEL_IPEX_3D_UNET_PATCH: 'yes' - CM_IPEX_SKIP_PYTORCH: 'yes' + MLC_CONDA_ENV: 'yes' + MLC_INTEL_IPEX_3D_UNET_PATCH: 'yes' + MLC_IPEX_SKIP_PYTORCH: 'yes' for-intel-mlperf-inference-resnet50: adr: conda-package: @@ -137,8 +137,8 @@ variations: version_max: '12.3' version_max_usable: '12.3' env: - CM_CONDA_ENV: 'yes' - CM_INTEL_IPEX_RESNET50_PATCH: 'yes' + MLC_CONDA_ENV: 'yes' + MLC_INTEL_IPEX_RESNET50_PATCH: 'yes' for-intel-mlperf-inference-retinanet: adr: conda-package: @@ -179,8 +179,8 @@ variations: version_max: '12.3' version_max_usable: '12.3' env: - CM_CONDA_ENV: 'yes' - CM_INTEL_IPEX_RETINANET_PATCH: 'yes' + MLC_CONDA_ENV: 'yes' + MLC_INTEL_IPEX_RETINANET_PATCH: 'yes' for-intel-mlperf-inference-sdxl: alias: for-intel-mlperf-inference-v4.0-sdxl for-intel-mlperf-inference-v3.1-3d-unet: @@ -229,9 +229,9 @@ variations: - torch tags: install,pytorch,from-src,_for-intel-mlperf-inference-v3.1-dlrm-v2 env: - CM_CONDA_ENV: 'yes' - CM_INTEL_IPEX_DLRM_V2_PATCH: 'yes' - CM_IPEX_SKIP_PYTORCH: 'yes' + MLC_CONDA_ENV: 'yes' + MLC_INTEL_IPEX_DLRM_V2_PATCH: 'yes' + MLC_IPEX_SKIP_PYTORCH: 'yes' for-intel-mlperf-inference-v3.1-gptj: adr: conda-package: @@ -268,9 +268,9 @@ variations: tags: get,generic,conda-package,_package.sympy,_source.conda-forge - tags: install,llvm,src,_for-intel-mlperf-inference-v3.1-gptj env: - CM_CONDA_ENV: 'yes' - CM_IPEX_SKIP_PYTORCH: 'yes' - CM_USE_LLVM_FOR_IPEX: 'yes' + MLC_CONDA_ENV: 'yes' + MLC_IPEX_SKIP_PYTORCH: 'yes' + MLC_USE_LLVM_FOR_IPEX: 'yes' for-intel-mlperf-inference-v3.1-resnet50: alias: for-intel-mlperf-inference-resnet50 for-intel-mlperf-inference-v3.1-retinanet: @@ -323,24 +323,24 @@ variations: version_max: '12.3' version_max_usable: '12.3' env: - CM_CONDA_ENV: 'yes' - CM_IPEX_SKIP_PYTORCH: 'yes' + MLC_CONDA_ENV: 'yes' + MLC_IPEX_SKIP_PYTORCH: 'yes' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/intel/intel-extension-for-pytorch: default: true env: - CM_GIT_URL: https://github.com/intel/intel-extension-for-pytorch + MLC_GIT_URL: https://github.com/intel/intel-extension-for-pytorch group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: ipex-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-ipex-from-src/run.sh b/script/install-ipex-from-src/run.sh index 572455359..ac84f1d46 100644 --- a/script/install-ipex-from-src/run.sh +++ b/script/install-ipex-from-src/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -#export PATH=${CM_CONDA_BIN_PATH}:${PATH} +#export PATH=${MLC_CONDA_BIN_PATH}:${PATH} #echo $LD_LIBRARY_PATH #exit 1 rm -rf ipex_src @@ -11,23 +11,23 @@ pwd git submodule sync git submodule update --init --recursive -if [[ ${CM_INTEL_IPEX_RESNET50_PATCH} == "yes" ]]; then - bash ${CM_TMP_CURRENT_SCRIPT_PATH}/apply_intel_resnet50_patch.sh +if [[ ${MLC_INTEL_IPEX_RESNET50_PATCH} == "yes" ]]; then + bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/apply_intel_resnet50_patch.sh test "$?" -eq 0 || exit "$?" -elif [[ ${CM_INTEL_IPEX_RETINANET_PATCH} == "yes" ]]; then - bash ${CM_TMP_CURRENT_SCRIPT_PATH}/apply_intel_retinanet_patch.sh +elif [[ ${MLC_INTEL_IPEX_RETINANET_PATCH} == "yes" ]]; then + bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/apply_intel_retinanet_patch.sh test "$?" -eq 0 || exit "$?" -elif [[ ${CM_INTEL_IPEX_3D_UNET_PATCH} == "yes" ]]; then +elif [[ ${MLC_INTEL_IPEX_3D_UNET_PATCH} == "yes" ]]; then cd third_party/mkl-dnn git fetch --tags && git checkout v2.7 test "$?" -eq 0 || exit "$?" cd ../../ - bash ${CM_TMP_CURRENT_SCRIPT_PATH}/apply_intel_3d-unet_patch.sh + bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/apply_intel_3d-unet_patch.sh test "$?" -eq 0 || exit "$?" -elif [[ ${CM_INTEL_IPEX_DLRM_V2_PATCH} == "yes" ]]; then +elif [[ ${MLC_INTEL_IPEX_DLRM_V2_PATCH} == "yes" ]]; then export LD_LIBRARY_PATH="" wget https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/dlrm-v2-99/pytorch-cpu-int8/ipex.patch test "$?" -eq 0 || exit "$?" @@ -49,8 +49,8 @@ elif [[ ${CM_INTEL_IPEX_DLRM_V2_PATCH} == "yes" ]]; then fi rm -rf build -echo ${CM_RUN_CMD} -eval ${CM_RUN_CMD} +echo ${MLC_RUN_CMD} +eval ${MLC_RUN_CMD} test "$?" -eq 0 || exit "$?" diff --git a/script/install-llvm-prebuilt/README-extra.md b/script/install-llvm-prebuilt/README-extra.md index 1ad1e122b..992fd3cf1 100644 --- a/script/install-llvm-prebuilt/README-extra.md +++ b/script/install-llvm-prebuilt/README-extra.md @@ -2,13 +2,13 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed llvm on the system and if not found calls the [install script for llvm](../script/install-llvm-prebuilt). ## Exported Variables -* `CM_LLVM_CLANG_BIN` -* `CM_LLVM_CLANG_BIN_WITH_PATH` -* `CM_C_COMPILER_BIN` -* `CM_C_COMPILER_WITH_PATH` -* `CM_CXX_COMPILER_BIN` -* `CM_CXX_COMPILER_WITH_PATH` -* `CM_COMPILER_*` +* `MLC_LLVM_CLANG_BIN` +* `MLC_LLVM_CLANG_BIN_WITH_PATH` +* `MLC_C_COMPILER_BIN` +* `MLC_C_COMPILER_WITH_PATH` +* `MLC_CXX_COMPILER_BIN` +* `MLC_CXX_COMPILER_WITH_PATH` +* `MLC_COMPILER_*` ## Supported and Tested OS 1. Ubuntu 18.04, 20.04, 22.04 @@ -86,7 +86,7 @@ cm run script "app image corner-detection" ```bash cm rm cache -f -cm run script "install llvm prebuilt" --version=13.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "install llvm prebuilt" --version=13.0.0 --env.MLC_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz cm run script "app image corner-detection" ``` @@ -94,6 +94,6 @@ cm run script "app image corner-detection" ```bash cm rm cache -f -cm run script "install llvm prebuilt" --version=12.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "install llvm prebuilt" --version=12.0.0 --env.MLC_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz cm run script "app image corner-detection" ``` diff --git a/script/install-llvm-prebuilt/customize.py b/script/install-llvm-prebuilt/customize.py index 058714522..45417febc 100644 --- a/script/install-llvm-prebuilt/customize.py +++ b/script/install-llvm-prebuilt/customize.py @@ -12,22 +12,22 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') clang_file_name = "clang" if need_version == '': return {'return': 1, - 'error': 'internal problem - CM_VERSION is not defined in env'} + 'error': 'internal problem - MLC_VERSION is not defined in env'} print(recursion_spaces + ' # Requested version: {}'.format(need_version)) - host_os_bits = env['CM_HOST_OS_BITS'] + host_os_bits = env['MLC_HOST_OS_BITS'] if os_info['platform'] != 'windows': - host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + host_os_machine = env['MLC_HOST_OS_MACHINE'] # ABI # Prepare package name # First check if it is forced by external environment - package_name = env.get('CM_LLVM_PACKAGE', '').strip() + package_name = env.get('MLC_LLVM_PACKAGE', '').strip() if package_name == '': need_version_split = need_version.split('.') @@ -35,12 +35,12 @@ def preprocess(i): # and arch if os_info['platform'] == 'darwin': force_arch = env.get( - 'CM_LLVM_PACKAGE_FORCE_ARCH', + 'MLC_LLVM_PACKAGE_FORCE_ARCH', '') # To allow x86_64 if needed if force_arch == '': force_arch = 'arm64' force_darwin_version = env.get( - 'CM_LLVM_PACKAGE_FORCE_DARWIN_VERSION', '') + 'MLC_LLVM_PACKAGE_FORCE_DARWIN_VERSION', '') if force_darwin_version == '': if len(need_version_split) > 0: hver = 0 @@ -76,9 +76,9 @@ def preprocess(i): else: package_name = 'clang+llvm-' + need_version + '-armv7a-linux-gnueabihf.tar.xz' else: - host_os_flavor = env['CM_HOST_OS_FLAVOR'] + host_os_flavor = env['MLC_HOST_OS_FLAVOR'] - host_os_version = env['CM_HOST_OS_VERSION'] + host_os_version = env['MLC_HOST_OS_VERSION'] # if 'debian' in host_os_flavor: # return {'return':1, 'error':'debian is not supported yet'} @@ -183,15 +183,15 @@ def preprocess(i): # 'clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz' # f['filename'] filename = r['filename'] - env['CM_LLVM_PACKAGE'] = filename - env['CM_LLVM_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'bin') - env['CM_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( + env['MLC_LLVM_PACKAGE'] = filename + env['MLC_LLVM_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'bin') + env['MLC_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( os.getcwd(), 'bin', clang_file_name) - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_LLVM_CLANG_BIN_WITH_PATH'] # We don't need to check default paths here because we force install to # cache - env['+PATH'] = [env['CM_LLVM_INSTALLED_PATH']] + env['+PATH'] = [env['MLC_LLVM_INSTALLED_PATH']] path_include = os.path.join(os.getcwd(), 'include') if os.path.isdir(path_include): @@ -203,7 +203,7 @@ def preprocess(i): def postprocess(i): env = i['env'] - version = env['CM_VERSION'] + version = env['MLC_VERSION'] os_info = i['os_info'] # cur_dir = os.getcwd() diff --git a/script/install-llvm-prebuilt/meta.yaml b/script/install-llvm-prebuilt/meta.yaml index a389387a0..bef81d139 100644 --- a/script/install-llvm-prebuilt/meta.yaml +++ b/script/install-llvm-prebuilt/meta.yaml @@ -8,15 +8,15 @@ deps: - tags: detect,os name: Install prebuilt LLVM compiler new_env_keys: -- CM_LLVM_* -- CM_COMPILER_NAME +- MLC_LLVM_* +- MLC_COMPILER_NAME - +PATH - +LD_LIBRARY_PATH - +C_INCLUDE_PATH - +CPLUS_INCLUDE_PATH post_deps: - skip_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' tags: get,llvm tags: diff --git a/script/install-llvm-prebuilt/run.bat b/script/install-llvm-prebuilt/run.bat index 922a0d8ed..cfacd1d0f 100644 --- a/script/install-llvm-prebuilt/run.bat +++ b/script/install-llvm-prebuilt/run.bat @@ -1,3 +1,3 @@ -echo Running %CM_LLVM_PACKAGE% ... +echo Running %MLC_LLVM_PACKAGE% ... -%CM_LLVM_PACKAGE% --help +%MLC_LLVM_PACKAGE% --help diff --git a/script/install-llvm-prebuilt/run.sh b/script/install-llvm-prebuilt/run.sh index 1ace2bb27..f7008c1d3 100644 --- a/script/install-llvm-prebuilt/run.sh +++ b/script/install-llvm-prebuilt/run.sh @@ -1,10 +1,10 @@ #!/bin/bash echo "" -echo "Unarchiving ${CM_LLVM_PACKAGE} ..." +echo "Unarchiving ${MLC_LLVM_PACKAGE} ..." -tar --strip 1 -xf ${CM_LLVM_PACKAGE} +tar --strip 1 -xf ${MLC_LLVM_PACKAGE} test $? -eq 0 || exit 1 -rm -f ${CM_LLVM_PACKAGE} +rm -f ${MLC_LLVM_PACKAGE} test $? -eq 0 || exit 1 diff --git a/script/install-llvm-src/customize.py b/script/install-llvm-src/customize.py index c3ec8e0c8..843e2027d 100644 --- a/script/install-llvm-src/customize.py +++ b/script/install-llvm-src/customize.py @@ -16,43 +16,43 @@ def preprocess(i): install_prefix = os.path.join(os.getcwd(), "install") - if env.get('CM_LLVM_CONDA_ENV', '') == "yes": - install_prefix = env['CM_CONDA_PREFIX'] + if env.get('MLC_LLVM_CONDA_ENV', '') == "yes": + install_prefix = env['MLC_CONDA_PREFIX'] extra_cmake_options = f"-DCMAKE_SHARED_LINKER_FLAGS=-L{install_prefix} -Wl,-rpath,{install_prefix}" - if env.get('CM_LLVM_16_INTEL_MLPERF_INFERENCE', '') == "yes": - env['CM_REQUIRE_INSTALL'] = 'yes' + if env.get('MLC_LLVM_16_INTEL_MLPERF_INFERENCE', '') == "yes": + env['MLC_REQUIRE_INSTALL'] = 'yes' i['run_script_input']['script_name'] = "install-llvm-16-intel-mlperf-inference" clang_file_name = "llvm-link" # env['USE_LLVM'] = install_prefix # env['LLVM_DIR'] = os.path.join(env['USE_LLVM'], "lib", "cmake", "llvm") else: - if env.get('CM_LLVM_ENABLE_RUNTIMES', '') != '': - enable_runtimes = env['CM_LLVM_ENABLE_RUNTIMES'].replace(":", ";") + if env.get('MLC_LLVM_ENABLE_RUNTIMES', '') != '': + enable_runtimes = env['MLC_LLVM_ENABLE_RUNTIMES'].replace(":", ";") else: enable_runtimes = '' - if env.get('CM_LLVM_ENABLE_PROJECTS', '') != '': - enable_projects = env['CM_LLVM_ENABLE_PROJECTS'].replace(":", ";") + if env.get('MLC_LLVM_ENABLE_PROJECTS', '') != '': + enable_projects = env['MLC_LLVM_ENABLE_PROJECTS'].replace(":", ";") else: enable_projects = '' - llvm_build_type = env['CM_LLVM_BUILD_TYPE'] + llvm_build_type = env['MLC_LLVM_BUILD_TYPE'] - cmake_cmd = "cmake " + os.path.join(env["CM_LLVM_SRC_REPO_PATH"], "llvm") + " -GNinja -DCMAKE_BUILD_TYPE=" + llvm_build_type + " -DLLVM_ENABLE_PROJECTS=" + enable_projects + " -DLLVM_ENABLE_RUNTIMES='" + \ + cmake_cmd = "cmake " + os.path.join(env["MLC_LLVM_SRC_REPO_PATH"], "llvm") + " -GNinja -DCMAKE_BUILD_TYPE=" + llvm_build_type + " -DLLVM_ENABLE_PROJECTS=" + enable_projects + " -DLLVM_ENABLE_RUNTIMES='" + \ enable_runtimes + "' -DCMAKE_INSTALL_PREFIX=" + install_prefix + \ " -DLLVM_ENABLE_RTTI=ON -DLLVM_INSTALL_UTILS=ON -DLLVM_TARGETS_TO_BUILD=X86 " + \ extra_cmake_options - env['CM_LLVM_CMAKE_CMD'] = cmake_cmd + env['MLC_LLVM_CMAKE_CMD'] = cmake_cmd - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') # print(cmake_cmd) - env['CM_LLVM_INSTALLED_PATH'] = install_prefix - env['CM_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( - env['CM_LLVM_INSTALLED_PATH'], "bin", clang_file_name) + env['MLC_LLVM_INSTALLED_PATH'] = install_prefix + env['MLC_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( + env['MLC_LLVM_INSTALLED_PATH'], "bin", clang_file_name) # env['+PATH'] = [] return {'return': 0} @@ -62,14 +62,14 @@ def postprocess(i): env = i['env'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_LLVM_CLANG_BIN_WITH_PATH'] - if env.get('CM_LLVM_CONDA_ENV', '') != "yes": + if env.get('MLC_LLVM_CONDA_ENV', '') != "yes": # We don't need to check default paths here because we force install to # cache - env['+PATH'] = [os.path.join(env['CM_LLVM_INSTALLED_PATH'], "bin")] + env['+PATH'] = [os.path.join(env['MLC_LLVM_INSTALLED_PATH'], "bin")] - path_include = os.path.join(env['CM_LLVM_INSTALLED_PATH'], 'include') + path_include = os.path.join(env['MLC_LLVM_INSTALLED_PATH'], 'include') if os.path.isdir(path_include): env['+C_INCLUDE_PATH'] = [path_include] diff --git a/script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh b/script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh index 30b612b2b..0d092aad3 100644 --- a/script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh +++ b/script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh @@ -1,7 +1,7 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:${PATH} -#export LD_LIBRARY_PATH=${CM_CONDA_LIB_PATH}:${LD_LIBRARY_PATH} +export PATH=${MLC_CONDA_BIN_PATH}:${PATH} +#export LD_LIBRARY_PATH=${MLC_CONDA_LIB_PATH}:${LD_LIBRARY_PATH} ABI=$(python -c "import torch; print(int(torch._C._GLIBCXX_USE_CXX11_ABI))") test $? -eq 0 || exit $? export ABI=$ABI @@ -17,8 +17,8 @@ rm -rf build mkdir -p build cd build export DEB_BUILD_MAINT_OPTIONS=hardening=-format -export CC=${CM_C_COMPILER_WITH_PATH} -export CXX=${CM_CXX_COMPILER_WITH_PATH} +export CC=${MLC_C_COMPILER_WITH_PATH} +export CXX=${MLC_CXX_COMPILER_WITH_PATH} cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-D_GLIBCXX_USE_CXX11_ABI=${ABI}" -DLLVM_TARGETS_TO_BUILD=X86 -DLLVM_ENABLE_TERMINFO=OFF -DLLVM_INCLUDE_TESTS=OFF -DLLVM_INCLUDE_EXAMPLES=OFF -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_INCLUDE_BENCHMARKS=OFF ../llvm/ test $? -eq 0 || exit $? diff --git a/script/install-llvm-src/meta.yaml b/script/install-llvm-src/meta.yaml index 91589e8ce..f1dd3657d 100644 --- a/script/install-llvm-src/meta.yaml +++ b/script/install-llvm-src/meta.yaml @@ -7,43 +7,43 @@ deps: - tags: detect,os - tags: detect,cpu - skip_if_env: - CM_LLVM_CONDA_ENV: + MLC_LLVM_CONDA_ENV: - 'yes' tags: get,cmake - skip_if_env: - CM_LLVM_CONDA_ENV: + MLC_LLVM_CONDA_ENV: - 'yes' tags: get,generic-sys-util,_ninja-build - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_LLVM_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_LLVM_SRC_REPO_PATH extra_cache_tags: llvm,src,llvm-src,llvm-src-repo force_env_keys: - - CM_GIT_* + - MLC_GIT_* names: - llvm-src-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG _tag.llvmorg-: - - CM_VERSION + - MLC_VERSION env: - CM_GIT_URL: https://github.com/llvm/llvm-project + MLC_GIT_URL: https://github.com/llvm/llvm-project name: Build LLVM compiler from sources (can take >30 min) new_env_keys: -- CM_LLVM_* -- CM_GET_DEPENDENT_CACHED_PATH +- MLC_LLVM_* +- MLC_GET_DEPENDENT_CACHED_PATH - +PATH - +C_INCLUDE_PATH post_deps: - skip_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' tags: get,llvm prehook_deps: [] @@ -58,15 +58,15 @@ uid: 2af16e9a6c5f4702 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' clang: default: true env: - CM_LLVM_ENABLE_PROJECTS: clang + MLC_LLVM_ENABLE_PROJECTS: clang group: clang debug: env: - CM_LLVM_BUILD_TYPE: debug + MLC_LLVM_BUILD_TYPE: debug group: build-type for-intel-mlperf-inference-v3.1-bert: adr: @@ -100,7 +100,7 @@ variations: - libstdcxx-ng tags: get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge env: - CM_LLVM_CONDA_ENV: 'yes' + MLC_LLVM_CONDA_ENV: 'yes' for-intel-mlperf-inference-v3.1-gptj: adr: conda-package: @@ -160,7 +160,7 @@ variations: - -Wno-maybe-uninitialized - -Wno-uninitialized - -Wno-free-nonheap-object - CM_PYTHON_BIN_WITH_PATH: <<>>/python3 + MLC_PYTHON_BIN_WITH_PATH: <<>>/python3 tags: get,generic-python-lib,_custom-python,_package.torch,_url.git+https://github.com/pytorch/pytorch.git@927dc662386af052018212c7d01309a506fc94cd - names: - conda-package @@ -171,16 +171,16 @@ variations: - sympy tags: get,generic,conda-package,_package.sympy,_source.conda-forge - env: - CM_PYTHON_BIN_WITH_PATH: <<>>/python3 + MLC_PYTHON_BIN_WITH_PATH: <<>>/python3 tags: get,generic-python-lib,_custom-python,_package.setuptools version_max: 69.9.999 version_max_usable: 58.2.0 - env: - CM_PYTHON_BIN_WITH_PATH: <<>>/python3 + MLC_PYTHON_BIN_WITH_PATH: <<>>/python3 tags: get,generic-python-lib,_custom-python,_package.neural-compressor,_url.git+https://github.com/intel/neural-compressor.git@a2931eaa4052eec195be3c79a13f7bfa23e54473 env: - CM_LLVM_16_INTEL_MLPERF_INFERENCE: 'yes' - CM_LLVM_CONDA_ENV: 'yes' + MLC_LLVM_16_INTEL_MLPERF_INFERENCE: 'yes' + MLC_LLVM_CONDA_ENV: 'yes' CUDA_VISIBLE_DEVICES: '' USE_CUDA: '0' full-history: @@ -190,21 +190,21 @@ variations: release: default: true env: - CM_LLVM_BUILD_TYPE: release + MLC_LLVM_BUILD_TYPE: release group: build-type repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo runtimes.#: env: - CM_LLVM_ENABLE_RUNTIMES: '#' + MLC_LLVM_ENABLE_RUNTIMES: '#' sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: base: - full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-llvm-src/run.sh b/script/install-llvm-src/run.sh index 60c0efea6..02d80dac5 100644 --- a/script/install-llvm-src/run.sh +++ b/script/install-llvm-src/run.sh @@ -2,32 +2,32 @@ CUR_DIR=$PWD -INSTALL_DIR="${CM_LLVM_INSTALLED_PATH}" +INSTALL_DIR="${MLC_LLVM_INSTALLED_PATH}" echo "INSTALL_DIR=${INSTALL_DIR}" -if [[ ${CM_LLVM_CONDA_ENV} != "yes" ]]; then +if [[ ${MLC_LLVM_CONDA_ENV} != "yes" ]]; then cmd="rm -rf ${INSTALL_DIR}" echo "$cmd" eval "$cmd" else - export PATH=${CM_CONDA_BIN_PATH}:$PATH + export PATH=${MLC_CONDA_BIN_PATH}:$PATH fi -if [[ ${CM_CLEAN_BUILD} == "yes" ]]; then +if [[ ${MLC_CLEAN_BUILD} == "yes" ]]; then rm -rf build fi mkdir -p build # If install exist, then configure was done -if [ ! -d "${INSTALL_DIR}" ] || [ ${CM_LLVM_CONDA_ENV} == "yes" ]; then +if [ ! -d "${INSTALL_DIR}" ] || [ ${MLC_LLVM_CONDA_ENV} == "yes" ]; then echo "******************************************************" cd build if [ "${?}" != "0" ]; then exit 1; fi - echo "${CM_LLVM_CMAKE_CMD}" - eval "${CM_LLVM_CMAKE_CMD}" + echo "${MLC_LLVM_CMAKE_CMD}" + eval "${MLC_LLVM_CMAKE_CMD}" ninja if [ "${?}" != "0" ]; then exit 1; fi ninja install diff --git a/script/install-mlperf-logging-from-src/customize.py b/script/install-mlperf-logging-from-src/customize.py index 3df1d0660..d6f5dab44 100644 --- a/script/install-mlperf-logging-from-src/customize.py +++ b/script/install-mlperf-logging-from-src/customize.py @@ -12,7 +12,7 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} diff --git a/script/install-mlperf-logging-from-src/meta.yaml b/script/install-mlperf-logging-from-src/meta.yaml index c4d8f86bf..a41c3de26 100644 --- a/script/install-mlperf-logging-from-src/meta.yaml +++ b/script/install-mlperf-logging-from-src/meta.yaml @@ -11,11 +11,11 @@ deps: - tags: get,git,repo,_repo.https://github.com/mlcommons/logging extra_cache_tags: mlperf_logging env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_LOGGING_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_LOGGING_REPO_PATH docker_input_mapping: input_description: new_env_keys: - - CM_MLPERF_LOGGING_REPO_PATH + - MLC_MLPERF_LOGGING_REPO_PATH new_state_keys: [] post_deps: [] posthook_deps: [] @@ -30,7 +30,7 @@ variations: {} versions: master: env: - CM_MLPERF_LOGGING_VERSION: master + MLC_MLPERF_LOGGING_VERSION: master v3.1: env: - CM_MLPERF_LOGGING_VERSION: v3.1 + MLC_MLPERF_LOGGING_VERSION: v3.1 diff --git a/script/install-mlperf-logging-from-src/run.sh b/script/install-mlperf-logging-from-src/run.sh index de622c9f3..0d5d73c4c 100644 --- a/script/install-mlperf-logging-from-src/run.sh +++ b/script/install-mlperf-logging-from-src/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,13 +17,13 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" -run "${CM_PYTHON_BIN_WITH_PATH} -m pip install -e ${CM_MLPERF_LOGGING_REPO_PATH}" +run "${MLC_PYTHON_BIN_WITH_PATH} -m pip install -e ${MLC_MLPERF_LOGGING_REPO_PATH}" diff --git a/script/install-nccl-libs/customize.py b/script/install-nccl-libs/customize.py index 3df1d0660..d6f5dab44 100644 --- a/script/install-nccl-libs/customize.py +++ b/script/install-nccl-libs/customize.py @@ -12,7 +12,7 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} diff --git a/script/install-nccl-libs/run-ubuntu.sh b/script/install-nccl-libs/run-ubuntu.sh index e56074a51..13d7cb3ec 100644 --- a/script/install-nccl-libs/run-ubuntu.sh +++ b/script/install-nccl-libs/run-ubuntu.sh @@ -1,2 +1,2 @@ -CM_SUDO=${CM_SUDO:-sudo} -${CM_SUDO} apt install -y --allow-downgrades libnccl2=2.18.3-1+cuda${CM_CUDA_VERSION} libnccl-dev=2.18.3-1+cuda${CM_CUDA_VERSION} +MLC_SUDO=${MLC_SUDO:-sudo} +${MLC_SUDO} apt install -y --allow-downgrades libnccl2=2.18.3-1+cuda${MLC_CUDA_VERSION} libnccl-dev=2.18.3-1+cuda${MLC_CUDA_VERSION} diff --git a/script/install-nccl-libs/run.sh b/script/install-nccl-libs/run.sh index 3a584c10c..821adb3f9 100644 --- a/script/install-nccl-libs/run.sh +++ b/script/install-nccl-libs/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,11 +17,11 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" diff --git a/script/install-numactl-from-src/customize.py b/script/install-numactl-from-src/customize.py index 9f662f3a8..51ac72495 100644 --- a/script/install-numactl-from-src/customize.py +++ b/script/install-numactl-from-src/customize.py @@ -13,7 +13,7 @@ def preprocess(i): run_cmd = "python setup.py install" - env['CM_RUN_CMD'] = run_cmd + env['MLC_RUN_CMD'] = run_cmd automation = i['automation'] diff --git a/script/install-numactl-from-src/meta.yaml b/script/install-numactl-from-src/meta.yaml index 3257c2cc4..a03250cdf 100644 --- a/script/install-numactl-from-src/meta.yaml +++ b/script/install-numactl-from-src/meta.yaml @@ -7,25 +7,25 @@ deps: - tags: detect,os - tags: detect,cpu - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_NUMACTL_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_NUMACTL_SRC_REPO_PATH extra_cache_tags: numactl,src,numactl-src,numactl-src-repo names: - numactl-src-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: - CM_GIT_URL: https://github.com/numactl/numactl + MLC_GIT_URL: https://github.com/numactl/numactl name: Build numactl from sources new_env_keys: -- CM_NUMACTL_* +- MLC_NUMACTL_* - +PATH sort: 1000 tags: @@ -38,24 +38,24 @@ uid: 4f355ae8ca1948b2 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/numactl/numactl: default: true env: - CM_GIT_URL: https://github.com/numactl/numactl + MLC_GIT_URL: https://github.com/numactl/numactl group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: pytorch-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' warnings: - This CM script will need sudo to install numactl! diff --git a/script/install-numactl-from-src/run.sh b/script/install-numactl-from-src/run.sh index 606b5d965..286ef416d 100644 --- a/script/install-numactl-from-src/run.sh +++ b/script/install-numactl-from-src/run.sh @@ -3,7 +3,7 @@ CUR_DIR=$PWD echo $PWD rm -rf numactl -cmd="cp -r ${CM_NUMACTL_SRC_REPO_PATH} numactl" +cmd="cp -r ${MLC_NUMACTL_SRC_REPO_PATH} numactl" echo "$cmd" eval "$cmd" cd numactl diff --git a/script/install-onednn-from-src/customize.py b/script/install-onednn-from-src/customize.py index 18444917d..2bdf25c14 100644 --- a/script/install-onednn-from-src/customize.py +++ b/script/install-onednn-from-src/customize.py @@ -13,10 +13,10 @@ def preprocess(i): run_cmd = "" - env['CM_RUN_CMD'] = run_cmd - env['CM_ONEDNN_INSTALLED_PATH'] = os.path.join(os.getcwd(), "onednn") + env['MLC_RUN_CMD'] = run_cmd + env['MLC_ONEDNN_INSTALLED_PATH'] = os.path.join(os.getcwd(), "onednn") - if env.get('CM_FOR_INTEL_MLPERF_INFERENCE_BERT', '') == "yes": + if env.get('MLC_FOR_INTEL_MLPERF_INFERENCE_BERT', '') == "yes": i['run_script_input']['script_name'] = "run-intel-mlperf-inference-bert" automation = i['automation'] diff --git a/script/install-onednn-from-src/meta.yaml b/script/install-onednn-from-src/meta.yaml index 6a86bde63..49155e6e6 100644 --- a/script/install-onednn-from-src/meta.yaml +++ b/script/install-onednn-from-src/meta.yaml @@ -10,29 +10,29 @@ deps: - python - python3 skip_if_env: - CM_CONDA_ENV: + MLC_CONDA_ENV: - 'yes' tags: get,python3 - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_ONEDNN_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_ONEDNN_SRC_REPO_PATH extra_cache_tags: onednn,src,onednn-src,onednn-src-repo names: - onednn-src-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: - CM_GIT_URL: https://github.com/oneapi-src/oneDNN + MLC_GIT_URL: https://github.com/oneapi-src/oneDNN name: Build oneDNN from sources new_env_keys: -- CM_ONEDNN_* +- MLC_ONEDNN_* prehook_deps: [] sort: 1000 tags: @@ -46,7 +46,7 @@ uid: fe3a652e315f4c8f variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' for-intel-mlperf-inference-v3.1-bert: adr: conda-package: @@ -56,24 +56,24 @@ variations: base: - tag.v2.6 env: - CM_CONDA_ENV: 'yes' - CM_FOR_INTEL_MLPERF_INFERENCE_BERT: 'yes' + MLC_CONDA_ENV: 'yes' + MLC_FOR_INTEL_MLPERF_INFERENCE_BERT: 'yes' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/oneapi-src/oneDNN: default: true env: - CM_GIT_URL: https://github.com/oneapi-src/oneDNN + MLC_GIT_URL: https://github.com/oneapi-src/oneDNN group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: onednn-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-onednn-from-src/run-intel-mlperf-inference-bert.sh b/script/install-onednn-from-src/run-intel-mlperf-inference-bert.sh index 77bff6883..c866b69b3 100644 --- a/script/install-onednn-from-src/run-intel-mlperf-inference-bert.sh +++ b/script/install-onednn-from-src/run-intel-mlperf-inference-bert.sh @@ -1,10 +1,10 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH CUR_DIR=$PWD rm -rf onednn -cp -r ${CM_ONEDNN_SRC_REPO_PATH} onednn +cp -r ${MLC_ONEDNN_SRC_REPO_PATH} onednn cd onednn rm -rf build pwd diff --git a/script/install-onednn-from-src/run-intel-mlperf-inference.sh b/script/install-onednn-from-src/run-intel-mlperf-inference.sh index 77bff6883..c866b69b3 100644 --- a/script/install-onednn-from-src/run-intel-mlperf-inference.sh +++ b/script/install-onednn-from-src/run-intel-mlperf-inference.sh @@ -1,10 +1,10 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH CUR_DIR=$PWD rm -rf onednn -cp -r ${CM_ONEDNN_SRC_REPO_PATH} onednn +cp -r ${MLC_ONEDNN_SRC_REPO_PATH} onednn cd onednn rm -rf build pwd diff --git a/script/install-onednn-from-src/run.sh b/script/install-onednn-from-src/run.sh index fbdd90f92..ed220c8c2 100644 --- a/script/install-onednn-from-src/run.sh +++ b/script/install-onednn-from-src/run.sh @@ -2,7 +2,7 @@ CUR_DIR=$PWD rm -rf onednn -cp -r ${CM_ONEDNN_SRC_REPO_PATH} onednn +cp -r ${MLC_ONEDNN_SRC_REPO_PATH} onednn cd onednn test "${?}" -eq "0" || exit $? rm -rf build @@ -11,5 +11,5 @@ mkdir build cd build cmake .. test "${?}" -eq "0" || exit $? -make -j${CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} +make -j${MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} test "${?}" -eq "0" || exit $? diff --git a/script/install-onnxruntime-from-src/customize.py b/script/install-onnxruntime-from-src/customize.py index 23a0b15de..1ee0e39d9 100644 --- a/script/install-onnxruntime-from-src/customize.py +++ b/script/install-onnxruntime-from-src/customize.py @@ -13,11 +13,11 @@ def preprocess(i): run_cmd = "./build.sh --config RelWithDebInfo --build_wheel --parallel --allow_running_as_root --skip_tests " - if env.get('CM_ONNXRUNTIME_GPU', '') == "yes": + if env.get('MLC_ONNXRUNTIME_GPU', '') == "yes": cuda_home = env['CUDA_HOME'] run_cmd += f"--use_cuda --cuda_home {cuda_home} --cudnn_home {cuda_home}" - env['CM_RUN_DIR'] = env['CM_ONNXRUNTIME_SRC_REPO_PATH'] - env['CM_RUN_CMD'] = run_cmd + env['MLC_RUN_DIR'] = env['MLC_ONNXRUNTIME_SRC_REPO_PATH'] + env['MLC_RUN_CMD'] = run_cmd return {'return': 0} diff --git a/script/install-onnxruntime-from-src/meta.yaml b/script/install-onnxruntime-from-src/meta.yaml index 20c58c5b6..a5bdbb7a0 100644 --- a/script/install-onnxruntime-from-src/meta.yaml +++ b/script/install-onnxruntime-from-src/meta.yaml @@ -11,7 +11,7 @@ deps: - python - python3 skip_if_env: - CM_CONDA_ENV: + MLC_CONDA_ENV: - 'yes' tags: get,python3 - tags: get,cmake @@ -20,24 +20,24 @@ deps: version_max: 11.9.999 version_max_usable: '11.0' - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_ONNXRUNTIME_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_ONNXRUNTIME_SRC_REPO_PATH extra_cache_tags: onnxruntime,src,onnxruntime-src,onnxruntime-src-repo names: - onnxruntime-src-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: {} name: Build onnxruntime from sources new_env_keys: -- CM_ONNXRUNTIME_* +- MLC_ONNXRUNTIME_* prehook_deps: [] sort: 1000 tags: @@ -51,26 +51,26 @@ uid: 9798c7e7a5944cee variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' cuda: deps: - names: - cuda tags: get,cuda,_cudnn env: - CM_ONNXRUNTIME_GPU: 'yes' + MLC_ONNXRUNTIME_GPU: 'yes' repo.https://github.com/Microsoft/onnxruntime: default: true env: - CM_GIT_URL: https://github.com/Microsoft/onnxruntime + MLC_GIT_URL: https://github.com/Microsoft/onnxruntime group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: onnxruntime-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-onnxruntime-from-src/run.sh b/script/install-onnxruntime-from-src/run.sh index 4a2381af7..d3d60b3ef 100644 --- a/script/install-onnxruntime-from-src/run.sh +++ b/script/install-onnxruntime-from-src/run.sh @@ -1,15 +1,15 @@ #!/bin/bash -export CC=${CM_GCC_BIN_WITH_PATH} -export CXX=${CM_GCC_INSTALLED_PATH}/g++ +export CC=${MLC_GCC_BIN_WITH_PATH} +export CXX=${MLC_GCC_INSTALLED_PATH}/g++ -echo "cd ${CM_RUN_DIR}" -cd ${CM_RUN_DIR} +echo "cd ${MLC_RUN_DIR}" +cd ${MLC_RUN_DIR} test $? -eq 0 || exit $? rm -rf build -echo ${CM_RUN_CMD} -eval ${CM_RUN_CMD} +echo ${MLC_RUN_CMD} +eval ${MLC_RUN_CMD} test $? -eq 0 || exit $? exit 1 diff --git a/script/install-opencv-from-src/customize.py b/script/install-opencv-from-src/customize.py index 459536d29..640123876 100644 --- a/script/install-opencv-from-src/customize.py +++ b/script/install-opencv-from-src/customize.py @@ -22,7 +22,7 @@ def postprocess(i): env = i['env'] - env['CM_OPENCV_BUILD_PATH'] = os.path.join(os.getcwd(), "opencv", "build") - env['CM_DEPENDENT_CACHED_PATH'] = env['CM_OPENCV_BUILD_PATH'] + env['MLC_OPENCV_BUILD_PATH'] = os.path.join(os.getcwd(), "opencv", "build") + env['MLC_DEPENDENT_CACHED_PATH'] = env['MLC_OPENCV_BUILD_PATH'] return {'return': 0} diff --git a/script/install-opencv-from-src/meta.yaml b/script/install-opencv-from-src/meta.yaml index 83b253f2a..10a34e8ec 100644 --- a/script/install-opencv-from-src/meta.yaml +++ b/script/install-opencv-from-src/meta.yaml @@ -7,7 +7,7 @@ deps: - tags: detect,os - tags: detect,cpu - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_OPENCV_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_OPENCV_SRC_REPO_PATH extra_cache_tags: opencv,src,opencv-src,opencv-src-repo names: - opencv-src-repo @@ -15,18 +15,18 @@ deps: tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: - CM_GIT_URL: https://github.com/opencv/opencv + MLC_GIT_URL: https://github.com/opencv/opencv name: Build opencv from sources new_env_keys: -- CM_OPENCV_* +- MLC_OPENCV_* prehook_deps: [] sort: 1000 tags: @@ -41,23 +41,23 @@ uid: 98552486a0bc4214 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/opencv/opencv: default: true env: - CM_GIT_URL: https://github.com/opencv/opencv + MLC_GIT_URL: https://github.com/opencv/opencv group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: opencv-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-opencv-from-src/run.sh b/script/install-opencv-from-src/run.sh index 34c2b4dba..d1ca6cbc0 100644 --- a/script/install-opencv-from-src/run.sh +++ b/script/install-opencv-from-src/run.sh @@ -2,7 +2,7 @@ CUR_DIR=$PWD rm -rf opencv -cp -r ${CM_OPENCV_SRC_REPO_PATH} opencv +cp -r ${MLC_OPENCV_SRC_REPO_PATH} opencv cd opencv test "${?}" -eq "0" || exit $? rm -rf build @@ -11,5 +11,5 @@ mkdir build cd build cmake .. test "${?}" -eq "0" || exit $? -make -j${CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} +make -j${MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} test "${?}" -eq "0" || exit $? diff --git a/script/install-openssl/customize.py b/script/install-openssl/customize.py index d2c747014..208ca60f6 100644 --- a/script/install-openssl/customize.py +++ b/script/install-openssl/customize.py @@ -15,10 +15,10 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') if need_version == '': return {'return': 1, - 'error': 'internal problem - CM_VERSION is not defined in env'} + 'error': 'internal problem - MLC_VERSION is not defined in env'} print(recursion_spaces + ' # Requested version: {}'.format(need_version)) @@ -33,7 +33,7 @@ def postprocess(i): install_path = os.path.join( os.getcwd(), 'openssl-' + - env['CM_VERSION'] + + env['MLC_VERSION'] + 'g', 'install') path_lib = os.path.join(install_path, 'lib') @@ -42,6 +42,6 @@ def postprocess(i): env['+LD_LIBRARY_PATH'].append(path_lib) bin_name = "openssl" path_bin = os.path.join(install_path, 'bin') - env['CM_OPENSSL_INSTALLED_PATH'] = path_bin - env['CM_OPENSSL_BIN_WITH_PATH'] = os.path.join(path_bin, bin_name) + env['MLC_OPENSSL_INSTALLED_PATH'] = path_bin + env['MLC_OPENSSL_BIN_WITH_PATH'] = os.path.join(path_bin, bin_name) return {'return': 0} diff --git a/script/install-openssl/meta.yaml b/script/install-openssl/meta.yaml index e478a2ecf..fe1561736 100644 --- a/script/install-openssl/meta.yaml +++ b/script/install-openssl/meta.yaml @@ -9,11 +9,11 @@ deps: - tags: detect,cpu env: {} new_env_keys: -- CM_OPENSSL_* +- MLC_OPENSSL_* - +LD_LIBRARY_PATH post_deps: - skip_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' tags: get,openssl tags: @@ -25,4 +25,4 @@ uid: be472d3b1d014169 versions: 1.1.1: env: - CM_VERSION: 1.1.1 + MLC_VERSION: 1.1.1 diff --git a/script/install-openssl/run.sh b/script/install-openssl/run.sh index 2e6502c07..3f05b699c 100644 --- a/script/install-openssl/run.sh +++ b/script/install-openssl/run.sh @@ -3,15 +3,15 @@ CUR_DIR=$PWD echo "***********************************************************" -CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} -CM_MAKE_CORES=${CM_MAKE_CORES:-2} -CM_WGET_URL=https://www.openssl.org/source/openssl-${CM_VERSION}g.tar.gz -wget -nc ${CM_WGET_URL} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-${MLC_HOST_CPU_TOTAL_CORES}} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-2} +MLC_WGET_URL=https://www.openssl.org/source/openssl-${MLC_VERSION}g.tar.gz +wget -nc ${MLC_WGET_URL} test $? -eq 0 || exit 1 -tar -xzf openssl-${CM_VERSION}g.tar.gz && cd openssl-${CM_VERSION}g +tar -xzf openssl-${MLC_VERSION}g.tar.gz && cd openssl-${MLC_VERSION}g test $? -eq 0 || exit 1 mkdir -p install ./config --prefix=`pwd`/install -make -j${CM_MAKE_CORES} +make -j${MLC_MAKE_CORES} test $? -eq 0 || exit 1 make install diff --git a/script/install-pip-package-for-cmind-python/customize.py b/script/install-pip-package-for-cmind-python/customize.py index be33639d1..d1a865326 100644 --- a/script/install-pip-package-for-cmind-python/customize.py +++ b/script/install-pip-package-for-cmind-python/customize.py @@ -27,8 +27,8 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - if env.get('CM_PIP_PACKAGE_NAME', '') != '': - r = install(env['CM_PIP_PACKAGE_NAME']) + if env.get('MLC_PIP_PACKAGE_NAME', '') != '': + r = install(env['MLC_PIP_PACKAGE_NAME']) if r['return'] > 0: return r diff --git a/script/install-pip-package-for-cmind-python/meta.yaml b/script/install-pip-package-for-cmind-python/meta.yaml index 765500d91..feb2dcdd8 100644 --- a/script/install-pip-package-for-cmind-python/meta.yaml +++ b/script/install-pip-package-for-cmind-python/meta.yaml @@ -14,4 +14,4 @@ uid: b16ed087abab459c variations: package.#: env: - CM_PIP_PACKAGE_NAME: "#" + MLC_PIP_PACKAGE_NAME: "#" diff --git a/script/install-python-src/customize.py b/script/install-python-src/customize.py index fe0901aa3..cf5f95f88 100644 --- a/script/install-python-src/customize.py +++ b/script/install-python-src/customize.py @@ -15,16 +15,16 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION', '') + need_version = env.get('MLC_VERSION', '') if need_version == '': return {'return': 1, - 'error': 'internal problem - CM_VERSION is not defined in env'} + 'error': 'internal problem - MLC_VERSION is not defined in env'} print(recursion_spaces + ' # Requested version: {}'.format(need_version)) path_bin = os.path.join(os.getcwd(), 'install', 'bin') - env['CM_PYTHON_INSTALLED_PATH'] = path_bin + env['MLC_PYTHON_INSTALLED_PATH'] = path_bin return {'return': 0} @@ -37,14 +37,14 @@ def postprocess(i): path_lib = os.path.join(os.getcwd(), 'install', 'lib') env['+LD_LIBRARY_PATH'] = [path_lib] - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + env['MLC_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() - env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( - env['CM_PYTHON_INSTALLED_PATH'], 'python3') + env['MLC_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['MLC_PYTHON_INSTALLED_PATH'], 'python3') # We don't need to check default paths here because we force install to # cache - env['+PATH'] = [env['CM_PYTHON_INSTALLED_PATH']] + env['+PATH'] = [env['MLC_PYTHON_INSTALLED_PATH']] path_include = os.path.join(os.getcwd(), 'install', 'include') env['+C_INCLUDE_PATH'] = [path_include] diff --git a/script/install-python-src/meta.yaml b/script/install-python-src/meta.yaml index 5aeed2a6b..99ed1df15 100644 --- a/script/install-python-src/meta.yaml +++ b/script/install-python-src/meta.yaml @@ -4,12 +4,12 @@ automation_uid: 5b4e0237da074764 cache: true category: Python automation default_env: - CM_CUSTOM_SSL: 'no' - CM_ENABLE_SSL: 'no' - CM_PYTHON_LTO_FLAG: '' - CM_PYTHON_OPTIMIZATION_FLAG: '' - CM_SHARED_BUILD: 'no' - CM_WGET_URL: https://www.python.org/ftp/python/[PYTHON_VERSION]/Python-[PYTHON_VERSION].tgz + MLC_CUSTOM_SSL: 'no' + MLC_ENABLE_SSL: 'no' + MLC_PYTHON_LTO_FLAG: '' + MLC_PYTHON_OPTIMIZATION_FLAG: '' + MLC_SHARED_BUILD: 'no' + MLC_WGET_URL: https://www.python.org/ftp/python/[PYTHON_VERSION]/Python-[PYTHON_VERSION].tgz default_version: 3.10.13 deps: - tags: detect,os @@ -17,28 +17,28 @@ deps: - tags: get,generic-sys-util,_libffi-dev - tags: get,generic-sys-util,_libbz2-dev enable_if_env: - CM_HOST_OS_FLAVOR: + MLC_HOST_OS_FLAVOR: - ubuntu - tags: get,generic-sys-util,_libssl-dev - enable_if_env: - CM_HOST_OS_FLAVOR: + MLC_HOST_OS_FLAVOR: - ubuntu tags: get,generic-sys-util,_liblzma-dev - enable_if_env: - CM_HOST_OS_FLAVOR: + MLC_HOST_OS_FLAVOR: - ubuntu tags: get,generic-sys-util,_libncurses-dev - enable_if_env: - CM_HOST_OS_FLAVOR: + MLC_HOST_OS_FLAVOR: - ubuntu tags: get,generic-sys-util,_libreadline-dev - enable_if_env: - CM_HOST_OS_FLAVOR: + MLC_HOST_OS_FLAVOR: - ubuntu tags: get,generic-sys-util,_libsqlite3-dev new_env_keys: -- CM_PYTHON_INSTALL_PATH -- CM_PYTHON_BIN_WITH_PATH +- MLC_PYTHON_INSTALL_PATH +- MLC_PYTHON_BIN_WITH_PATH - +PATH - +LD_LIBRARY_PATH - +C_INCLUDE_PATH @@ -49,7 +49,7 @@ post_deps: - python3 reuse_version: true skip_if_env: - CM_REQUIRE_INSTALL: + MLC_REQUIRE_INSTALL: - 'yes' tags: get,python3 tags: @@ -63,24 +63,24 @@ uid: 12d3a608afe14a1e variations: lto: env: - CM_PYTHON_INSTALL_CACHE_TAGS: with-lto - CM_PYTHON_LTO_FLAG: ' --lto' + MLC_PYTHON_INSTALL_CACHE_TAGS: with-lto + MLC_PYTHON_LTO_FLAG: ' --lto' optimized: env: - CM_PYTHON_INSTALL_CACHE_TAGS: optimized - CM_PYTHON_OPTIMIZATION_FLAG: ' --enable-optimizations' + MLC_PYTHON_INSTALL_CACHE_TAGS: optimized + MLC_PYTHON_OPTIMIZATION_FLAG: ' --enable-optimizations' shared: env: - CM_PYTHON_INSTALL_CACHE_TAGS: shared - CM_SHARED_BUILD: 'yes' + MLC_PYTHON_INSTALL_CACHE_TAGS: shared + MLC_SHARED_BUILD: 'yes' with-custom-ssl: deps: - tags: get,openssl env: - CM_CUSTOM_SSL: 'yes' - CM_PYTHON_INSTALL_CACHE_TAGS: with-custom-ssl + MLC_CUSTOM_SSL: 'yes' + MLC_PYTHON_INSTALL_CACHE_TAGS: with-custom-ssl with-ssl: env: - CM_ENABLE_SSL: 'yes' - CM_PYTHON_INSTALL_CACHE_TAGS: with-ssl + MLC_ENABLE_SSL: 'yes' + MLC_PYTHON_INSTALL_CACHE_TAGS: with-ssl group: ssl diff --git a/script/install-python-src/run.sh b/script/install-python-src/run.sh index d151283e7..bf821963e 100644 --- a/script/install-python-src/run.sh +++ b/script/install-python-src/run.sh @@ -3,16 +3,16 @@ CUR_DIR=$PWD echo "***********************************************************" -export PYTHON_VERSION=${CM_VERSION} -CM_WGET_URL="${CM_WGET_URL//"[PYTHON_VERSION]"/$PYTHON_VERSION}" +export PYTHON_VERSION=${MLC_VERSION} +MLC_WGET_URL="${MLC_WGET_URL//"[PYTHON_VERSION]"/$PYTHON_VERSION}" -echo "CM_WGET_URL=${CM_WGET_URL}" >> tmp-run-env.out -echo "wget Python src from ${CM_WGET_URL} for version ${PYTHON_VERSION}..." +echo "MLC_WGET_URL=${MLC_WGET_URL}" >> tmp-run-env.out +echo "wget Python src from ${MLC_WGET_URL} for version ${PYTHON_VERSION}..." -CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} -CM_MAKE_CORES=${CM_MAKE_CORES:-2} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-${MLC_HOST_CPU_TOTAL_CORES}} +MLC_MAKE_CORES=${MLC_MAKE_CORES:-2} -if [[ ${CM_SHARED_BUILD} == "yes" ]]; then +if [[ ${MLC_SHARED_BUILD} == "yes" ]]; then SHARED_BUILD_FLAGS=" --enable-shared" else SHARED_BUILD_FLAGS="" @@ -20,13 +20,13 @@ fi EXTRA_FLAGS="" -if [[ ${CM_ENABLE_SSL} == "yes" ]]; then +if [[ ${MLC_ENABLE_SSL} == "yes" ]]; then EXTRA_FLAGS="${EXTRA_FLAGS} --enable-ssl" fi -if [[ ${CM_CUSTOM_SSL} == "yes" ]]; then - EXTRA_FLAGS="${EXTRA_FLAGS} --with-openssl=${CM_OPENSSL_INSTALLED_PATH} --with-openssl-rpath=auto" +if [[ ${MLC_CUSTOM_SSL} == "yes" ]]; then + EXTRA_FLAGS="${EXTRA_FLAGS} --with-openssl=${MLC_OPENSSL_INSTALLED_PATH} --with-openssl-rpath=auto" fi rm -rf src @@ -38,7 +38,7 @@ mkdir install cd src pwd -wget -nc ${CM_WGET_URL} +wget -nc ${MLC_WGET_URL} if [ "${?}" != "0" ]; then exit 1; fi @@ -51,11 +51,11 @@ if [ "${?}" != "0" ]; then exit 1; fi cd Python-${PYTHON_VERSION} -./configure ${CM_PYTHON_OPTIMIZATION_FLAG} ${CM_PYTHON_LTO_FLAG} ${SHARED_BUILD_FLAGS} ${EXTRA_FLAGS} --with-ensurepip=install --prefix="${CUR_DIR}/install" +./configure ${MLC_PYTHON_OPTIMIZATION_FLAG} ${MLC_PYTHON_LTO_FLAG} ${SHARED_BUILD_FLAGS} ${EXTRA_FLAGS} --with-ensurepip=install --prefix="${CUR_DIR}/install" if [ "${?}" != "0" ]; then exit 1; fi -make -j${CM_MAKE_CORES} -make -j${CM_MAKE_CORES} install +make -j${MLC_MAKE_CORES} +make -j${MLC_MAKE_CORES} install if [ "${?}" != "0" ]; then exit 1; fi echo "Removing src files" diff --git a/script/install-python-venv/customize.py b/script/install-python-venv/customize.py index b73e5ac7d..7e3b85454 100644 --- a/script/install-python-venv/customize.py +++ b/script/install-python-venv/customize.py @@ -10,7 +10,7 @@ def preprocess(i): meta = i['meta'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') automation = i['automation'] @@ -20,7 +20,7 @@ def preprocess(i): add_extra_cache_tags = [] # for this script add_python_extra_cache_tags = ['virtual'] # for get-python script - name = env.get('CM_NAME', '') + name = env.get('MLC_NAME', '') if not quiet and name == '': print('') x = input( @@ -38,15 +38,15 @@ def preprocess(i): add_extra_cache_tags.append(name_tag) add_python_extra_cache_tags.append(name_tag) - env['CM_VIRTUAL_ENV_DIR'] = directory_name - env['CM_VIRTUAL_ENV_PATH'] = os.path.join(os.getcwd(), directory_name) + env['MLC_VIRTUAL_ENV_DIR'] = directory_name + env['MLC_VIRTUAL_ENV_PATH'] = os.path.join(os.getcwd(), directory_name) s = 'Scripts' if os_info['platform'] == 'windows' else 'bin' - env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] = os.path.join( - env['CM_VIRTUAL_ENV_PATH'], s) + env['MLC_VIRTUAL_ENV_SCRIPTS_PATH'] = os.path.join( + env['MLC_VIRTUAL_ENV_PATH'], s) - env['CM_TMP_PATH'] = env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] - env['CM_TMP_FAIL_IF_NOT_FOUND'] = 'yes' + env['MLC_TMP_PATH'] = env['MLC_VIRTUAL_ENV_SCRIPTS_PATH'] + env['MLC_TMP_FAIL_IF_NOT_FOUND'] = 'yes' r = automation.update_deps({'deps': meta['post_deps'], 'update_deps': {'register-python': @@ -54,7 +54,7 @@ def preprocess(i): if r['return'] > 0: return r - env['CM_PYTHON_INSTALLED_PATH'] = env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] + env['MLC_PYTHON_INSTALLED_PATH'] = env['MLC_VIRTUAL_ENV_SCRIPTS_PATH'] return {'return': 0, 'add_extra_cache_tags': add_extra_cache_tags} @@ -70,7 +70,7 @@ def postprocess(i): script_prefix = state.get('script_prefix', []) path_to_activate = os.path.join( - env['CM_VIRTUAL_ENV_SCRIPTS_PATH'], 'activate') + env['MLC_VIRTUAL_ENV_SCRIPTS_PATH'], 'activate') # If windows, download here otherwise use run.sh if os_info['platform'] == 'windows': @@ -84,7 +84,7 @@ def postprocess(i): python_name = 'python.exe' if os_info['platform'] == 'windows' else 'python3' # Will be passed to get-python to finalize registering of the new python - env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( - env['CM_PYTHON_INSTALLED_PATH'], python_name) + env['MLC_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['MLC_PYTHON_INSTALLED_PATH'], python_name) return {'return': 0} diff --git a/script/install-python-venv/meta.yaml b/script/install-python-venv/meta.yaml index f914d7d57..1d3733eee 100644 --- a/script/install-python-venv/meta.yaml +++ b/script/install-python-venv/meta.yaml @@ -9,8 +9,8 @@ deps: reuse_version: true tags: get,python,-virtual new_env_keys: -- CM_VIRTUAL_ENV_* -- CM_PYTHON_BIN_WITH_PATH +- MLC_VIRTUAL_ENV_* +- MLC_PYTHON_BIN_WITH_PATH new_state_keys: - script_prefix post_deps: diff --git a/script/install-python-venv/run.bat b/script/install-python-venv/run.bat index 6c48e1bdc..75cf28649 100644 --- a/script/install-python-venv/run.bat +++ b/script/install-python-venv/run.bat @@ -1,5 +1,5 @@ -%CM_PYTHON_BIN_WITH_PATH% -m pip install virtualenv +%MLC_PYTHON_BIN_WITH_PATH% -m pip install virtualenv IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -%CM_PYTHON_BIN_WITH_PATH% -m venv %CM_VIRTUAL_ENV_DIR% +%MLC_PYTHON_BIN_WITH_PATH% -m venv %MLC_VIRTUAL_ENV_DIR% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/install-python-venv/run.sh b/script/install-python-venv/run.sh index 87dfcaf10..223b6a888 100644 --- a/script/install-python-venv/run.sh +++ b/script/install-python-venv/run.sh @@ -1,10 +1,10 @@ #!/bin/bash -#PIP_EXTRA=`${CM_PYTHON_BIN} -c "import pkg_resources; print(' --break-system-packages ' if int(pkg_resources.get_distribution('pip').version.split('.')[0]) >= 23 else '')"` -PIP_EXTRA=`${CM_PYTHON_BIN} -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` +#PIP_EXTRA=`${MLC_PYTHON_BIN} -c "import pkg_resources; print(' --break-system-packages ' if int(pkg_resources.get_distribution('pip').version.split('.')[0]) >= 23 else '')"` +PIP_EXTRA=`${MLC_PYTHON_BIN} -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` -${CM_PYTHON_BIN_WITH_PATH} -m pip install virtualenv ${PIP_EXTRA} +${MLC_PYTHON_BIN_WITH_PATH} -m pip install virtualenv ${PIP_EXTRA} test $? -eq 0 || exit 1 -${CM_PYTHON_BIN_WITH_PATH} -m venv ${CM_VIRTUAL_ENV_DIR} +${MLC_PYTHON_BIN_WITH_PATH} -m venv ${MLC_VIRTUAL_ENV_DIR} test $? -eq 0 || exit 1 diff --git a/script/install-pytorch-from-src/customize.py b/script/install-pytorch-from-src/customize.py index 3f8735654..acc104efd 100644 --- a/script/install-pytorch-from-src/customize.py +++ b/script/install-pytorch-from-src/customize.py @@ -11,16 +11,16 @@ def preprocess(i): env = i['env'] - if env.get('CM_MLPERF_INFERENCE_INTEL_LANGUAGE_MODEL', '') == "yes": + if env.get('MLC_MLPERF_INFERENCE_INTEL_LANGUAGE_MODEL', '') == "yes": i['run_script_input']['script_name'] = "run-intel-mlperf-inference-v3_1" run_cmd = "CC=clang CXX=clang++ USE_CUDA=OFF python -m pip install -e . " - env['CM_RUN_CMD'] = run_cmd - elif env.get('CM_MLPERF_INFERENCE_INTEL_MODEL', '') in ["resnet50", "retinanet"]: + env['MLC_RUN_CMD'] = run_cmd + elif env.get('MLC_MLPERF_INFERENCE_INTEL_MODEL', '') in ["resnet50", "retinanet"]: i['run_script_input']['script_name'] = "run-intel-mlperf-inference-vision" - run_cmd = f"CC={env['CM_C_COMPILER_WITH_PATH']} CXX={env['CM_CXX_COMPILER_WITH_PATH']} USE_CUDA=OFF python -m pip install -e . " + run_cmd = f"CC={env['MLC_C_COMPILER_WITH_PATH']} CXX={env['MLC_CXX_COMPILER_WITH_PATH']} USE_CUDA=OFF python -m pip install -e . " - env['CM_RUN_CMD'] = run_cmd + env['MLC_RUN_CMD'] = run_cmd if not env.get('+ CFLAGS', []): env['+ CFLAGS'] = [] diff --git a/script/install-pytorch-from-src/meta.yaml b/script/install-pytorch-from-src/meta.yaml index 479fa9e01..95d1cc1bf 100644 --- a/script/install-pytorch-from-src/meta.yaml +++ b/script/install-pytorch-from-src/meta.yaml @@ -10,32 +10,32 @@ deps: - python - python3 skip_if_env: - CM_CONDA_ENV: + MLC_CONDA_ENV: - 'yes' tags: get,python3 - names: - compiler tags: get,compiler - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_PYTORCH_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_PYTORCH_SRC_REPO_PATH extra_cache_tags: pytorch,src,pytorch-src,pytorch-src-repo names: - pytorch-src-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: - CM_GIT_URL: https://github.com/pytorch/pytorch + MLC_GIT_URL: https://github.com/pytorch/pytorch name: Build pytorch from sources new_env_keys: -- CM_PYTORCH_* +- MLC_PYTORCH_* prehook_deps: [] sort: 1000 tags: @@ -50,7 +50,7 @@ uid: 64eaf3e81de94f41 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' cherrypicks.#: ad: pytorch-src-repo: @@ -62,10 +62,10 @@ variations: - cuda tags: get,cuda,_cudnn env: - CUDA_HOME: <<>> - CUDA_NVCC_EXECUTABLE: <<>> - CUDNN_INCLUDE_PATH: <<>> - CUDNN_LIBRARY_PATH: <<>> + CUDA_HOME: <<>> + CUDA_NVCC_EXECUTABLE: <<>> + CUDNN_INCLUDE_PATH: <<>> + CUDNN_LIBRARY_PATH: <<>> TORCH_CUDA_ARCH_LIST: Ampere Ada Hopper TORCH_CXX_FLAGS: -D_GLIBCXX_USE_CXX11_ABI=1 USE_CUDA: '1' @@ -108,9 +108,9 @@ variations: - libstdcxx-ng tags: get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge env: - CM_CONDA_ENV: 'yes' - CM_MLPERF_INFERENCE_INTEL: 'yes' - CM_MLPERF_INFERENCE_INTEL_MODEL: resnet50 + MLC_CONDA_ENV: 'yes' + MLC_MLPERF_INFERENCE_INTEL: 'yes' + MLC_MLPERF_INFERENCE_INTEL_MODEL: resnet50 USE_CUDA: '0' for-intel-mlperf-inference-retinanet: adr: @@ -151,9 +151,9 @@ variations: - libstdcxx-ng tags: get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge env: - CM_CONDA_ENV: 'yes' - CM_MLPERF_INFERENCE_INTEL: 'yes' - CM_MLPERF_INFERENCE_INTEL_MODEL: retinanet + MLC_CONDA_ENV: 'yes' + MLC_MLPERF_INFERENCE_INTEL: 'yes' + MLC_MLPERF_INFERENCE_INTEL_MODEL: retinanet USE_CUDA: '0' for-intel-mlperf-inference-v3.1-bert: adr: @@ -225,9 +225,9 @@ variations: - libstdcxx-ng tags: get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge env: - CM_CONDA_ENV: 'yes' - CM_MLPERF_INFERENCE_INTEL: 'yes' - CM_MLPERF_INFERENCE_INTEL_LANGUAGE_MODEL: 'yes' + MLC_CONDA_ENV: 'yes' + MLC_MLPERF_INFERENCE_INTEL: 'yes' + MLC_MLPERF_INFERENCE_INTEL_LANGUAGE_MODEL: 'yes' USE_CUDA: '0' for-intel-mlperf-inference-v3.1-dlrm-v2: ad: @@ -269,20 +269,20 @@ variations: env: {} repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/pytorch/pytorch: default: true env: - CM_GIT_URL: https://github.com/pytorch/pytorch + MLC_GIT_URL: https://github.com/pytorch/pytorch group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: pytorch-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh b/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh index 7ad6fbd61..c831a9271 100644 --- a/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh +++ b/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh @@ -1,10 +1,10 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH CUR_DIR=$PWD rm -rf pytorch -cp -r ${CM_PYTORCH_SRC_REPO_PATH} pytorch +cp -r ${MLC_PYTORCH_SRC_REPO_PATH} pytorch cd pytorch rm -rf build @@ -31,7 +31,7 @@ git apply pytorch_official_1_12.patch if [ "${?}" != "0" ]; then exit 1; fi pip install -r requirements.txt -cmd="${CM_RUN_CMD}" +cmd="${MLC_RUN_CMD}" echo ${cmd} eval ${cmd} diff --git a/script/install-pytorch-from-src/run-intel-mlperf-inference-vision.sh b/script/install-pytorch-from-src/run-intel-mlperf-inference-vision.sh index f3bd3d771..20a29d82f 100644 --- a/script/install-pytorch-from-src/run-intel-mlperf-inference-vision.sh +++ b/script/install-pytorch-from-src/run-intel-mlperf-inference-vision.sh @@ -1,11 +1,11 @@ #!/bin/bash -#export PATH=${CM_CONDA_BIN_PATH}:$PATH -#export LIBRARY_PATH=${CM_CONDA_LIB_PATH}:$LIBRARY_PATH +#export PATH=${MLC_CONDA_BIN_PATH}:$PATH +#export LIBRARY_PATH=${MLC_CONDA_LIB_PATH}:$LIBRARY_PATH CUR_DIR=$PWD rm -rf pytorch -cp -r ${CM_PYTORCH_SRC_REPO_PATH} pytorch +cp -r ${MLC_PYTORCH_SRC_REPO_PATH} pytorch cd pytorch rm -rf build @@ -14,7 +14,7 @@ git submodule update --init --recursive if [ "${?}" != "0" ]; then exit 1; fi pip install -r requirements.txt -cmd="${CM_RUN_CMD}" +cmd="${MLC_RUN_CMD}" echo ${cmd} eval ${cmd} diff --git a/script/install-pytorch-from-src/run.sh b/script/install-pytorch-from-src/run.sh index 08ddde105..fe651d4bc 100644 --- a/script/install-pytorch-from-src/run.sh +++ b/script/install-pytorch-from-src/run.sh @@ -2,27 +2,27 @@ gcc() { - ${CM_GCC_BIN_WITH_PATH} "$@" + ${MLC_GCC_BIN_WITH_PATH} "$@" } export -f gcc CUR_DIR=$PWD if [[ ! -e pytorch/dist/torch*.whl ]]; then rm -rf pytorch - cp -r ${CM_PYTORCH_SRC_REPO_PATH} pytorch + cp -r ${MLC_PYTORCH_SRC_REPO_PATH} pytorch cd pytorch git submodule sync git submodule update --init --recursive rm -rf build - ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r requirements.txt + ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r requirements.txt test $? -eq 0 || exit $? - ${CM_PYTHON_BIN_WITH_PATH} setup.py bdist_wheel + ${MLC_PYTHON_BIN_WITH_PATH} setup.py bdist_wheel test $? -eq 0 || exit $? else cd pytorch fi cd dist -${CM_PYTHON_BIN_WITH_PATH} -m pip install torch-2.*linux_x86_64.whl +${MLC_PYTHON_BIN_WITH_PATH} -m pip install torch-2.*linux_x86_64.whl test $? -eq 0 || exit $? diff --git a/script/install-pytorch-kineto-from-src/meta.yaml b/script/install-pytorch-kineto-from-src/meta.yaml index 11a5dd8ff..80f60ca35 100644 --- a/script/install-pytorch-kineto-from-src/meta.yaml +++ b/script/install-pytorch-kineto-from-src/meta.yaml @@ -10,31 +10,31 @@ deps: - python - python3 skip_if_env: - CM_CONDA_ENV: + MLC_CONDA_ENV: - 'yes' tags: get,python3 - tags: get,cmake version_min: 3.25.0 - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_PYTORCH_KINETO_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_PYTORCH_KINETO_SRC_REPO_PATH extra_cache_tags: pytorch-kineto,kineto,src,pytorch-kineto-src,pytorch-kineto-src-repo names: - pytorch-kineto-src-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: - CM_GIT_URL: https://github.com/pytorch/kineto + MLC_GIT_URL: https://github.com/pytorch/kineto name: Build pytorch kineto from sources new_env_keys: -- CM_PYTORCH_KINETO_* +- MLC_PYTORCH_KINETO_* prehook_deps: [] sort: 1000 tags: @@ -49,37 +49,37 @@ uid: 98a4b061712d4483 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' cuda: deps: - names: - cuda tags: get,cuda,_cudnn env: - CUDA_HOME: <<>> - CUDA_NVCC_EXECUTABLE: <<>> - CUDNN_INCLUDE_PATH: <<>> - CUDNN_LIBRARY_PATH: <<>> + CUDA_HOME: <<>> + CUDA_NVCC_EXECUTABLE: <<>> + CUDNN_INCLUDE_PATH: <<>> + CUDNN_LIBRARY_PATH: <<>> TORCH_CUDA_ARCH_LIST: Ampere Ada Hopper TORCH_CXX_FLAGS: -D_GLIBCXX_USE_CXX11_ABI=1 USE_CUDA: '1' USE_CUDNN: '1' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/pytorch/kineto: default: true env: - CM_GIT_URL: https://github.com/pytorch/kineto + MLC_GIT_URL: https://github.com/pytorch/kineto group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: pytorch-src-repo: tags: _full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-pytorch-kineto-from-src/run.sh b/script/install-pytorch-kineto-from-src/run.sh index bd162e7f8..96e817e64 100644 --- a/script/install-pytorch-kineto-from-src/run.sh +++ b/script/install-pytorch-kineto-from-src/run.sh @@ -2,7 +2,7 @@ CUR_DIR=$PWD rm -rf kineto -cp -r ${CM_PYTORCH_KINETO_SRC_REPO_PATH} kineto +cp -r ${MLC_PYTORCH_KINETO_SRC_REPO_PATH} kineto cd kineto rm -rf libkineto/build diff --git a/script/install-qaic-compute-sdk-from-src/customize.py b/script/install-qaic-compute-sdk-from-src/customize.py index ec0a42185..30371e4b1 100644 --- a/script/install-qaic-compute-sdk-from-src/customize.py +++ b/script/install-qaic-compute-sdk-from-src/customize.py @@ -12,18 +12,18 @@ def preprocess(i): automation = i['automation'] - env['CM_QAIC_COMPUTE_SDK_PATH'] = env['CM_GIT_CHECKOUT_PATH'] + env['MLC_QAIC_COMPUTE_SDK_PATH'] = env['MLC_GIT_CHECKOUT_PATH'] ''' if env.get('+PATH', []) == []: env['+PATH'] = [] - env['+PATH'].append(env['CM_LLVM_INSTALLED_PATH']) + env['+PATH'].append(env['MLC_LLVM_INSTALLED_PATH']) if env.get('+LD_LIBRARY_PATH', []) == []: env['+LD_LIBRARY_PATH'] = [] - env['+LD_LIBRARY_PATH'].append(os.path.join(env['CM_LLVM_INSTALLED_PATH'], "..", "lib")) + env['+LD_LIBRARY_PATH'].append(os.path.join(env['MLC_LLVM_INSTALLED_PATH'], "..", "lib")) ''' - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} @@ -31,20 +31,20 @@ def preprocess(i): def postprocess(i): env = i['env'] - # env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") + # env['MLC_QAIC_RUNNER_PATH'] = os.path.join(env['MLC_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") if '+PATH' not in env: env['+PATH'] = [] - env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'] = os.path.join( + env['MLC_QAIC_COMPUTE_SDK_INSTALL_PATH'] = os.path.join( os.getcwd(), "src", "install", "qaic-compute-" + - env['CM_QAIC_COMPUTE_SDK_INSTALL_MODE']) + env['MLC_QAIC_COMPUTE_SDK_INSTALL_MODE']) - env['QAIC_COMPUTE_INSTALL_DIR'] = env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'] + env['QAIC_COMPUTE_INSTALL_DIR'] = env['MLC_QAIC_COMPUTE_SDK_INSTALL_PATH'] - env['+PATH'].append(os.path.join(env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'], "exec")) + env['+PATH'].append(os.path.join(env['MLC_QAIC_COMPUTE_SDK_INSTALL_PATH'], "exec")) return {'return': 0} diff --git a/script/install-qaic-compute-sdk-from-src/meta.yaml b/script/install-qaic-compute-sdk-from-src/meta.yaml index de3024209..404f46103 100644 --- a/script/install-qaic-compute-sdk-from-src/meta.yaml +++ b/script/install-qaic-compute-sdk-from-src/meta.yaml @@ -10,9 +10,9 @@ deps: tags: get,git,repo,_repo.https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL - names: - cmake tags: get,cmake @@ -26,7 +26,7 @@ deps: - tags: get,generic-sys-util,_ninja-build - tags: get,generic-sys-util,_rsync - env: - CM_EXTRACT_FINAL_ENV_NAME: CM_HEXAGON_TOOLS_INSTALLED_DIR + MLC_EXTRACT_FINAL_ENV_NAME: MLC_HEXAGON_TOOLS_INSTALLED_DIR extra_cache_tags: hexagon-compiler force_cache: true names: @@ -36,7 +36,7 @@ input_description: {} input_mapping: {} new_env_keys: - +PATH -- CM_QAIC_COMPUTE_SDK_PATH +- MLC_QAIC_COMPUTE_SDK_PATH new_state_keys: [] post_deps: [] posthook_deps: [] @@ -54,27 +54,27 @@ uid: 9701bdda97fa4045 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' debug: env: - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: debug + MLC_QAIC_COMPUTE_SDK_INSTALL_MODE: debug group: installation-mode release: default: true env: - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: release + MLC_QAIC_COMPUTE_SDK_INSTALL_MODE: release group: installation-mode release-assert: env: - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: release-assert + MLC_QAIC_COMPUTE_SDK_INSTALL_MODE: release-assert group: installation-mode repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo-source repo.quic: default: true env: - CM_GIT_URL: https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc + MLC_GIT_URL: https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc group: repo-source versions: {} diff --git a/script/install-qaic-compute-sdk-from-src/run.sh b/script/install-qaic-compute-sdk-from-src/run.sh index 734fe01b9..164d31814 100644 --- a/script/install-qaic-compute-sdk-from-src/run.sh +++ b/script/install-qaic-compute-sdk-from-src/run.sh @@ -1,24 +1,24 @@ #!/bin/bash function cmake() { -${CM_CMAKE_BIN_WITH_PATH} $@ +${MLC_CMAKE_BIN_WITH_PATH} $@ } -export CC=${CM_C_COMPILER_WITH_PATH} -export CXX=${CM_CXX_COMPILER_WITH_PATH} +export CC=${MLC_C_COMPILER_WITH_PATH} +export CXX=${MLC_CXX_COMPILER_WITH_PATH} export -f cmake -export HEXAGON_TOOLS_DIR=${CM_HEXAGON_TOOLS_INSTALLED_DIR}/clang+llvm-15.0.5-cross-hexagon-unknown-linux-musl/x86_64-linux-gnu +export HEXAGON_TOOLS_DIR=${MLC_HEXAGON_TOOLS_INSTALLED_DIR}/clang+llvm-15.0.5-cross-hexagon-unknown-linux-musl/x86_64-linux-gnu mkdir -p src -rsync -avz --exclude=.git ${CM_QAIC_COMPUTE_SDK_PATH}/ src/ +rsync -avz --exclude=.git ${MLC_QAIC_COMPUTE_SDK_PATH}/ src/ cd src -if [[ ${CM_CLEAN_BUILD} == "yes" ]]; then +if [[ ${MLC_CLEAN_BUILD} == "yes" ]]; then rm -rf build fi -./scripts/build.sh --${CM_QAIC_COMPUTE_SDK_INSTALL_MODE} --install +./scripts/build.sh --${MLC_QAIC_COMPUTE_SDK_INSTALL_MODE} --install test $? -eq 0 || exit $? cd - diff --git a/script/install-rapidjson-from-src/meta.yaml b/script/install-rapidjson-from-src/meta.yaml index b754a02d6..4ceb31d05 100644 --- a/script/install-rapidjson-from-src/meta.yaml +++ b/script/install-rapidjson-from-src/meta.yaml @@ -7,7 +7,7 @@ deps: - tags: detect,os - tags: detect,cpu - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_RAPIDJSON_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_RAPIDJSON_SRC_REPO_PATH extra_cache_tags: rapidjson,src,rapidjson-src,rapidjson-src-repo names: - rapidjson-src-repo @@ -15,17 +15,17 @@ deps: tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: {} name: Build rapidjson from sources new_env_keys: -- CM_RAPIDJSON_* +- MLC_RAPIDJSON_* prehook_deps: [] sort: 1000 tags: @@ -39,23 +39,23 @@ uid: 5171e69b4bb94989 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/Tencent/rapidjson: default: true env: - CM_GIT_URL: https://github.com/Tencent/rapidjson + MLC_GIT_URL: https://github.com/Tencent/rapidjson group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: rapidjson-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-rapidjson-from-src/run.sh b/script/install-rapidjson-from-src/run.sh index 4a6b2ec7d..0172d28cf 100644 --- a/script/install-rapidjson-from-src/run.sh +++ b/script/install-rapidjson-from-src/run.sh @@ -2,7 +2,7 @@ CUR_DIR=$PWD rm -rf rapidjson -cp -r ${CM_RAPIDJSON_SRC_REPO_PATH} rapidjson +cp -r ${MLC_RAPIDJSON_SRC_REPO_PATH} rapidjson cd rapidjson test "${?}" -eq "0" || exit $? rm -rf build @@ -11,5 +11,5 @@ mkdir build cd build cmake .. test "${?}" -eq "0" || exit $? -make -j${CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} +make -j${MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} test "${?}" -eq "0" || exit $? diff --git a/script/install-rocm/customize.py b/script/install-rocm/customize.py index 9891f8e62..103150312 100644 --- a/script/install-rocm/customize.py +++ b/script/install-rocm/customize.py @@ -14,8 +14,8 @@ def postprocess(i): env = i['env'] installed_path = "/opt/rocm/bin" - env['CM_ROCM_INSTALLED_PATH'] = installed_path - env['CM_ROCM_BIN_WITH_PATH'] = os.path.join(installed_path, "rocminfo") + env['MLC_ROMLC_INSTALLED_PATH'] = installed_path + env['MLC_ROMLC_BIN_WITH_PATH'] = os.path.join(installed_path, "rocminfo") env['+PATH'] = [installed_path] return {'return': 0} diff --git a/script/install-rocm/meta.yaml b/script/install-rocm/meta.yaml index 395ed8764..5a0fc8a39 100644 --- a/script/install-rocm/meta.yaml +++ b/script/install-rocm/meta.yaml @@ -9,7 +9,7 @@ deps: - tags: detect,os env: {} new_env_keys: -- CM_ROCM_* +- MLC_ROMLC_* - +PATH tags: - install diff --git a/script/install-rocm/run-rhel.sh b/script/install-rocm/run-rhel.sh index 10f8a6789..49d4e11ee 100644 --- a/script/install-rocm/run-rhel.sh +++ b/script/install-rocm/run-rhel.sh @@ -1,7 +1,7 @@ # Add the amdgpu module repository for RHEL repo1="[amdgpu] name=amdgpu -baseurl=https://repo.radeon.com/amdgpu/${CM_VERSION}/rhel/${CM_HOST_OS_VERSION}/main/x86_64 +baseurl=https://repo.radeon.com/amdgpu/${MLC_VERSION}/rhel/${MLC_HOST_OS_VERSION}/main/x86_64 enabled=1 gpgcheck=1 gpgkey=https://repo.radeon.com/rocm/rocm.gpg.key @@ -9,7 +9,7 @@ gpgkey=https://repo.radeon.com/rocm/rocm.gpg.key echo "${repo1}" | sudo tee /etc/yum.repos.d/amdgpu.repo # Add the rocm repository for RHEL -mainversion="${CM_HOST_OS_VERSION%%.*}" +mainversion="${MLC_HOST_OS_VERSION%%.*}" repo2="[rocm] name=rocm baseurl=https://repo.radeon.com/rocm/rhel${mainversion}/latest/main diff --git a/script/install-rocm/run-ubuntu.sh b/script/install-rocm/run-ubuntu.sh index 400ba5fa7..6aaeeed36 100644 --- a/script/install-rocm/run-ubuntu.sh +++ b/script/install-rocm/run-ubuntu.sh @@ -8,14 +8,14 @@ wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | \ gpg --dearmor | sudo tee /etc/apt/keyrings/rocm.gpg > /dev/null ubuntuflavor="jammy" -if [[ ${CM_HOST_OS_VERSION} == "22.04" ]]; then +if [[ ${MLC_HOST_OS_VERSION} == "22.04" ]]; then ubuntuflavor="jammy" -elif [[ ${CM_HOST_OS_VERSION} == "20.04" ]]; then +elif [[ ${MLC_HOST_OS_VERSION} == "20.04" ]]; then ubuntuflavor="focal" fi # Kernel driver repository -deb1="deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/${CM_VERSION}/ubuntu ${ubuntuflavor} main" +deb1="deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/${MLC_VERSION}/ubuntu ${ubuntuflavor} main" echo $deb1 | sudo tee /etc/apt/sources.list.d/amdgpu.list # ROCm repository diff --git a/script/install-rocm/run.sh b/script/install-rocm/run.sh deleted file mode 100644 index 05a7907cf..000000000 --- a/script/install-rocm/run.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash - diff --git a/script/install-tensorflow-for-c/run.sh b/script/install-tensorflow-for-c/run.sh index 2f7c3957b..cab1b6cba 100644 --- a/script/install-tensorflow-for-c/run.sh +++ b/script/install-tensorflow-for-c/run.sh @@ -1,12 +1,12 @@ #!/bin/bash -CM_VERSION=${CM_VERSION:-2.8.0} -if [[ ${CM_HOST_PLATFORM_FLAVOR} != 'x86_64' ]]; then - echo "Platform ${CM_HOST_PLATFORM_FLAVOR} is not supported yet!"; +MLC_VERSION=${MLC_VERSION:-2.8.0} +if [[ ${MLC_HOST_PLATFORM_FLAVOR} != 'x86_64' ]]; then + echo "Platform ${MLC_HOST_PLATFORM_FLAVOR} is not supported yet!"; exit 1 fi mkdir install -FILENAME=libtensorflow-cpu-${CM_HOST_OS_TYPE}-x86_64-${CM_VERSION}.tar.gz +FILENAME=libtensorflow-cpu-${MLC_HOST_OS_TYPE}-x86_64-${MLC_VERSION}.tar.gz wget -q --no-check-certificate https://storage.googleapis.com/tensorflow/libtensorflow/${FILENAME} tar -C install -xzf ${FILENAME} diff --git a/script/install-tensorflow-from-src/customize.py b/script/install-tensorflow-from-src/customize.py index e99d0b5a2..f3706f941 100644 --- a/script/install-tensorflow-from-src/customize.py +++ b/script/install-tensorflow-from-src/customize.py @@ -9,7 +9,7 @@ def preprocess(i): env = i['env'] - env['CC'] = env['CM_C_COMPILER_WITH_PATH'] + env['CC'] = env['MLC_C_COMPILER_WITH_PATH'] return {'return': 0} @@ -60,7 +60,7 @@ def postprocess(i): env['+C_INCLUDE_PATH'] = inc_paths env['+CPLUS_INCLUDE_PATH'] = inc_paths - tflite_lib = env.get("CM_TFLITE", "") + tflite_lib = env.get("MLC_TFLITE", "") if tflite_lib == "on": lib_path = os.path.join(bazel_install_bin, 'tensorflow', 'lite') else: diff --git a/script/install-tensorflow-from-src/meta.yaml b/script/install-tensorflow-from-src/meta.yaml index 31542404e..30821bb38 100644 --- a/script/install-tensorflow-from-src/meta.yaml +++ b/script/install-tensorflow-from-src/meta.yaml @@ -5,22 +5,22 @@ cache: true category: AI/ML frameworks clean_files: [] default_env: - CM_GIT_DEPTH: '1' - CM_GIT_URL: https://github.com/tensorflow/tensorflow - CM_TFLITE: 'off' + MLC_GIT_DEPTH: '1' + MLC_GIT_URL: https://github.com/tensorflow/tensorflow + MLC_TFLITE: 'off' default_version: master deps: - tags: detect,cpu - tags: detect,os - enable_if_env: - CM_HOST_OS_FLAVOR: + MLC_HOST_OS_FLAVOR: - ubuntu - CM_HOST_OS_VERSION: + MLC_HOST_OS_VERSION: - '18.04' tags: get,generic-sys-util,_zlib - tags: get,generic-python-lib,_package.numpy extra_cache_tags_from_env: -- env: CM_PYTHON_CACHE_TAGS +- env: MLC_PYTHON_CACHE_TAGS prefix: python- new_env_keys: - +C_INCLUDE_PATH @@ -41,7 +41,7 @@ uid: a974533c4c854597 variations: tflite: env: - CM_TFLITE: 'on' + MLC_TFLITE: 'on' versions: master: deps: @@ -58,7 +58,7 @@ versions: - tags: get,bazel version: 6.5.0 env: - CM_GIT_CHECKOUT: master + MLC_GIT_CHECKOUT: master v1.15.0: deps: - names: @@ -73,7 +73,7 @@ versions: - tags: get,bazel version: 0.26.1 env: - CM_GIT_CHECKOUT: v1.15.0 + MLC_GIT_CHECKOUT: v1.15.0 v2.0.0: deps: - names: @@ -89,7 +89,7 @@ versions: - tags: get,bazel version: 0.26.1 env: - CM_GIT_CHECKOUT: v2.0.0 + MLC_GIT_CHECKOUT: v2.0.0 v2.1.0: deps: - names: @@ -105,7 +105,7 @@ versions: - tags: get,bazel version: 0.27.1 env: - CM_GIT_CHECKOUT: v2.1.0 + MLC_GIT_CHECKOUT: v2.1.0 v2.10.0: deps: - names: @@ -121,7 +121,7 @@ versions: - tags: get,bazel version: 5.1.1 env: - CM_GIT_CHECKOUT: v2.10.0 + MLC_GIT_CHECKOUT: v2.10.0 v2.11.0: deps: - names: @@ -137,7 +137,7 @@ versions: - tags: get,bazel version: 5.3.0 env: - CM_GIT_CHECKOUT: v2.11.0 + MLC_GIT_CHECKOUT: v2.11.0 v2.12.0: deps: - names: @@ -153,7 +153,7 @@ versions: - tags: get,bazel version: 5.3.0 env: - CM_GIT_CHECKOUT: v2.12.0 + MLC_GIT_CHECKOUT: v2.12.0 v2.13.0: deps: - names: @@ -168,7 +168,7 @@ versions: - tags: get,bazel version: 5.3.0 env: - CM_GIT_CHECKOUT: v2.13.0 + MLC_GIT_CHECKOUT: v2.13.0 v2.14.0: deps: - names: @@ -183,7 +183,7 @@ versions: - tags: get,bazel version: 6.1.0 env: - CM_GIT_CHECKOUT: v2.14.0 + MLC_GIT_CHECKOUT: v2.14.0 v2.15.0: deps: - names: @@ -198,7 +198,7 @@ versions: - tags: get,bazel version: 6.1.0 env: - CM_GIT_CHECKOUT: v2.15.0 + MLC_GIT_CHECKOUT: v2.15.0 v2.16.1: deps: - names: @@ -215,7 +215,7 @@ versions: - tags: get,bazel version: 6.5.0 env: - CM_GIT_CHECKOUT: v2.16.1 + MLC_GIT_CHECKOUT: v2.16.1 v2.2.0: deps: - names: @@ -231,7 +231,7 @@ versions: - tags: get,bazel version: 2.0.0 env: - CM_GIT_CHECKOUT: v2.2.0 + MLC_GIT_CHECKOUT: v2.2.0 v2.3.0: deps: - names: @@ -247,7 +247,7 @@ versions: - tags: get,bazel version: 3.1.0 env: - CM_GIT_CHECKOUT: v2.3.0 + MLC_GIT_CHECKOUT: v2.3.0 v2.4.0: deps: - names: @@ -263,7 +263,7 @@ versions: - tags: get,bazel version: 3.1.0 env: - CM_GIT_CHECKOUT: v2.4.0 + MLC_GIT_CHECKOUT: v2.4.0 v2.5.0: deps: - names: @@ -279,7 +279,7 @@ versions: - tags: get,bazel version: 3.7.2 env: - CM_GIT_CHECKOUT: v2.5.0 + MLC_GIT_CHECKOUT: v2.5.0 v2.6.0: deps: - names: @@ -295,7 +295,7 @@ versions: - tags: get,bazel version: 3.7.2 env: - CM_GIT_CHECKOUT: v2.6.0 + MLC_GIT_CHECKOUT: v2.6.0 v2.7.0: deps: - names: @@ -311,7 +311,7 @@ versions: - tags: get,bazel version: 3.7.2 env: - CM_GIT_CHECKOUT: v2.7.0 + MLC_GIT_CHECKOUT: v2.7.0 v2.8.0: deps: - names: @@ -327,7 +327,7 @@ versions: - tags: get,bazel version: 4.2.1 env: - CM_GIT_CHECKOUT: v2.8.0 + MLC_GIT_CHECKOUT: v2.8.0 v2.9.0: deps: - names: @@ -343,4 +343,4 @@ versions: - tags: get,bazel version: 5.0.0 env: - CM_GIT_CHECKOUT: v2.9.0 + MLC_GIT_CHECKOUT: v2.9.0 diff --git a/script/install-tensorflow-from-src/run.sh b/script/install-tensorflow-from-src/run.sh index d9090bf7d..b768d8656 100644 --- a/script/install-tensorflow-from-src/run.sh +++ b/script/install-tensorflow-from-src/run.sh @@ -2,10 +2,10 @@ CUR_DIR=${PWD:-tmp} if [ ! -d "src" ]; then - echo "Cloning Tensorflow from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} --depth ${CM_GIT_DEPTH}..." - git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} --depth ${CM_GIT_DEPTH} src + echo "Cloning Tensorflow from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} --depth ${MLC_GIT_DEPTH}..." + git clone --recursive -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} --depth ${MLC_GIT_DEPTH} src fi -CM_PYTHON_BIN=${CM_PYTHON_BIN:-python3} +MLC_PYTHON_BIN=${MLC_PYTHON_BIN:-python3} INSTALL_DIR="${CUR_DIR}" @@ -14,8 +14,8 @@ cd src #./configure #if [ "${?}" != "0" ]; then exit 1; fi -if [ "${CM_TFLITE}" == "on" ]; then - cmd="${CM_BAZEL_BIN_WITH_PATH} build -c opt --define tflite_with_xnnpack=true //tensorflow/lite:libtensorflowlite.so" +if [ "${MLC_TFLITE}" == "on" ]; then + cmd="${MLC_BAZEL_BIN_WITH_PATH} build -c opt --define tflite_with_xnnpack=true //tensorflow/lite:libtensorflowlite.so" echo $cmd eval $cmd if [ "${?}" != "0" ]; then exit 1; fi @@ -24,7 +24,7 @@ fi ./configure if [ "${?}" != "0" ]; then exit 1; fi echo "******************************************************" -cmd="${CM_BAZEL_BIN_WITH_PATH} build //tensorflow/tools/pip_package:build_pip_package" +cmd="${MLC_BAZEL_BIN_WITH_PATH} build //tensorflow/tools/pip_package:build_pip_package" echo $cmd eval $cmd if [ "${?}" != "0" ]; then exit 1; fi @@ -36,7 +36,7 @@ if [ "${?}" != "0" ]; then exit 1; fi # Clean build directory (too large) cd ${INSTALL_DIR} -if [ "${CM_TENSORFLOW_CLEAN_BUILD}" != "no" ]; then +if [ "${MLC_TENSORFLOW_CLEAN_BUILD}" != "no" ]; then rm -rf build fi diff --git a/script/install-terraform-from-src/customize.py b/script/install-terraform-from-src/customize.py index 475c0037c..cb01640bb 100644 --- a/script/install-terraform-from-src/customize.py +++ b/script/install-terraform-from-src/customize.py @@ -14,8 +14,8 @@ def postprocess(i): env = i['env'] installed_path = os.path.join(os.getcwd(), 'bin') - env['CM_TERRAFORM_INSTALLED_PATH'] = installed_path - env['CM_TERRAFORM_BIN_WITH_PATH'] = os.path.join( + env['MLC_TERRAFORM_INSTALLED_PATH'] = installed_path + env['MLC_TERRAFORM_BIN_WITH_PATH'] = os.path.join( installed_path, "terraform") env['+PATH'] = [installed_path] diff --git a/script/install-terraform-from-src/meta.yaml b/script/install-terraform-from-src/meta.yaml index a2cb2e446..d0bad2c47 100644 --- a/script/install-terraform-from-src/meta.yaml +++ b/script/install-terraform-from-src/meta.yaml @@ -9,9 +9,9 @@ deps: - tags: detect,cpu - tags: get,tool,go env: - CM_GIT_URL: https://github.com/hashicorp/terraform.git + MLC_GIT_URL: https://github.com/hashicorp/terraform.git new_env_keys: -- CM_TERRAFORM_* +- MLC_TERRAFORM_* - +PATH tags: - install @@ -21,4 +21,4 @@ uid: d79d47a074f34428 versions: main: env: - CM_GIT_CHECKOUT: main + MLC_GIT_CHECKOUT: main diff --git a/script/install-terraform-from-src/run.sh b/script/install-terraform-from-src/run.sh index 8cdb88302..9fbef576f 100644 --- a/script/install-terraform-from-src/run.sh +++ b/script/install-terraform-from-src/run.sh @@ -2,8 +2,8 @@ CUR_DIR=${PWD} if [ ! -d "terraform" ]; then - echo "Cloning Terraform from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT}..." - git clone -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} terraform + echo "Cloning Terraform from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT}..." + git clone -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} terraform fi test $? -eq 0 || exit 1 diff --git a/script/install-tflite-from-src/meta.yaml b/script/install-tflite-from-src/meta.yaml index 6825db569..f2c723a27 100644 --- a/script/install-tflite-from-src/meta.yaml +++ b/script/install-tflite-from-src/meta.yaml @@ -5,7 +5,7 @@ cache: true category: AI/ML frameworks clean_files: [] default_env: - CM_GIT_DEPTH: '1' + MLC_GIT_DEPTH: '1' default_version: master deps: - tags: detect,cpu @@ -14,9 +14,9 @@ deps: tags: get,compiler - tags: get,cmake env: - CM_GIT_URL: https://github.com/tensorflow/tensorflow + MLC_GIT_URL: https://github.com/tensorflow/tensorflow extra_cache_tags_from_env: -- env: CM_PYTHON_CACHE_TAGS +- env: MLC_PYTHON_CACHE_TAGS prefix: python- new_env_keys: - +C_INCLUDE_PATH @@ -37,4 +37,4 @@ versions: tags: gcc version_min: 10.0.0 env: - CM_GIT_CHECKOUT: master + MLC_GIT_CHECKOUT: master diff --git a/script/install-tflite-from-src/run.sh b/script/install-tflite-from-src/run.sh index fb453f2e6..c9e4aac2d 100644 --- a/script/install-tflite-from-src/run.sh +++ b/script/install-tflite-from-src/run.sh @@ -2,8 +2,8 @@ CUR_DIR=${PWD:-tmp} if [ ! -d "src" ]; then - echo "Cloning Tensorflow from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} --depth ${CM_GIT_DEPTH}..." - git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} --depth ${CM_GIT_DEPTH} src + echo "Cloning Tensorflow from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} --depth ${MLC_GIT_DEPTH}..." + git clone --recursive -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} --depth ${MLC_GIT_DEPTH} src fi INSTALL_DIR="${CUR_DIR}" @@ -19,7 +19,7 @@ cmake ../src/tensorflow/lite/c if [ "${?}" != "0" ]; then exit 1; fi echo "******************************************************" -cmake --build . -j${CM_MAKE_CORES} +cmake --build . -j${MLC_MAKE_CORES} if [ "${?}" != "0" ]; then exit 1; fi diff --git a/script/install-torchvision-from-src/meta.yaml b/script/install-torchvision-from-src/meta.yaml index 5e6bf9681..30af395b6 100644 --- a/script/install-torchvision-from-src/meta.yaml +++ b/script/install-torchvision-from-src/meta.yaml @@ -10,18 +10,18 @@ deps: - python - python3 skip_if_env: - CM_CONDA_ENV: + MLC_CONDA_ENV: - 'yes' tags: get,python3 - names: - compiler tags: get,compiler - enable_if_env: - CM_TORCHVISION_NEEDS_PNG: + MLC_TORCHVISION_NEEDS_PNG: - 'yes' tags: get,generic-sys-util,_libpng-dev - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_PYTORCH_VISION_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_PYTORCH_VISION_SRC_REPO_PATH extra_cache_tags: pytorchvision,torchvision,torchvision-src,src,pytorchvision-src,pytorchvision-src-repo names: - pytorchision-src-repo @@ -29,18 +29,18 @@ deps: tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: - CM_GIT_URL: https://github.com/pytorch/vision + MLC_GIT_URL: https://github.com/pytorch/vision name: Build pytorchvision from sources new_env_keys: -- CM_PYTORCHVISION_* +- MLC_PYTORCHVISION_* prehook_deps: [] sort: 1000 tags: @@ -55,17 +55,17 @@ uid: 68b855780d474546 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' cuda: deps: - names: - cuda tags: get,cuda,_cudnn env: - CUDA_HOME: <<>> - CUDA_NVCC_EXECUTABLE: <<>> - CUDNN_INCLUDE_PATH: <<>> - CUDNN_LIBRARY_PATH: <<>> + CUDA_HOME: <<>> + CUDA_NVCC_EXECUTABLE: <<>> + CUDNN_INCLUDE_PATH: <<>> + CUDNN_LIBRARY_PATH: <<>> TORCH_CUDA_ARCH_LIST: Ampere Ada Hopper TORCH_CXX_FLAGS: -D_GLIBCXX_USE_CXX11_ABI=1 USE_CUDA: '1' @@ -86,23 +86,23 @@ variations: env: {} python.#: env: - CM_PYTHON_BIN_WITH_PATH: '#' + MLC_PYTHON_BIN_WITH_PATH: '#' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/pytorch/vision: default: true env: - CM_GIT_URL: https://github.com/pytorch/vision + MLC_GIT_URL: https://github.com/pytorch/vision group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: pytorch-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-torchvision-from-src/run.sh b/script/install-torchvision-from-src/run.sh index 3ba73deee..2f528cc3f 100644 --- a/script/install-torchvision-from-src/run.sh +++ b/script/install-torchvision-from-src/run.sh @@ -2,13 +2,13 @@ CUR_DIR=$PWD rm -rf pytorchvision -cp -r ${CM_PYTORCH_VISION_SRC_REPO_PATH} pytorchvision +cp -r ${MLC_PYTORCH_VISION_SRC_REPO_PATH} pytorchvision cd pytorchvision test "${?}" -eq "0" || exit $? rm -rf build -${CM_PYTHON_BIN_WITH_PATH} setup.py bdist_wheel +${MLC_PYTHON_BIN_WITH_PATH} setup.py bdist_wheel test "${?}" -eq "0" || exit $? cd dist -${CM_PYTHON_BIN_WITH_PATH} -m pip install torchvision*linux_x86_64.whl +${MLC_PYTHON_BIN_WITH_PATH} -m pip install torchvision*linux_x86_64.whl test "${?}" -eq "0" || exit $? diff --git a/script/install-tpp-pytorch-extension/customize.py b/script/install-tpp-pytorch-extension/customize.py index dfc6e52df..16a59d0aa 100644 --- a/script/install-tpp-pytorch-extension/customize.py +++ b/script/install-tpp-pytorch-extension/customize.py @@ -11,15 +11,15 @@ def preprocess(i): env = i['env'] - env['TPP_PEX_DIR'] = env['CM_TPP_PEX_SRC_REPO_PATH'] + env['TPP_PEX_DIR'] = env['MLC_TPP_PEX_SRC_REPO_PATH'] env['DNNL_GRAPH_BUILD_COMPILER_BACKEND'] = 1 - env['USE_LLVM'] = env['CM_LLVM_INSTALLED_PATH'] + env['USE_LLVM'] = env['MLC_LLVM_INSTALLED_PATH'] env['LLVM_DIR'] = os.path.join( - env['CM_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") + env['MLC_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") run_cmd = "python setup.py clean && python setup.py install" - env['CM_RUN_DIR'] = env['TPP_PEX_DIR'] - env['CM_RUN_CMD'] = run_cmd + env['MLC_RUN_DIR'] = env['TPP_PEX_DIR'] + env['MLC_RUN_CMD'] = run_cmd return {'return': 0} diff --git a/script/install-tpp-pytorch-extension/meta.yaml b/script/install-tpp-pytorch-extension/meta.yaml index 07ac48e4e..682718fa4 100644 --- a/script/install-tpp-pytorch-extension/meta.yaml +++ b/script/install-tpp-pytorch-extension/meta.yaml @@ -10,34 +10,34 @@ deps: - python - python3 skip_if_env: - CM_CONDA_ENV: + MLC_CONDA_ENV: - 'yes' tags: get,python3 - names: - pytorch skip_if_env: - CM_CONDA_ENV: + MLC_CONDA_ENV: - 'yes' tags: get,pytorch,from.src - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_TPP_PEX_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_TPP_PEX_SRC_REPO_PATH extra_cache_tags: tpp,tpp-pex,src,tpp-pex-src,tpp-pex-src-repo names: - tpp-pex-src-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: {} name: Build TPP-PEX from sources new_env_keys: -- CM_TPP_PEX_* +- MLC_TPP_PEX_* prehook_deps: [] sort: 1000 tags: @@ -51,7 +51,7 @@ uid: 1701d2f5f4e84d42 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' for-intel-mlperf-inference-v3.1-gptj: adr: conda-package: @@ -80,23 +80,23 @@ variations: version: 69.5.1 - tags: install,llvm,src,_for-intel-mlperf-inference-v3.1-gptj env: - CM_CONDA_ENV: 'yes' + MLC_CONDA_ENV: 'yes' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/libxsmm/tpp-pytorch-extension: default: true env: - CM_GIT_URL: https://github.com/libxsmm/tpp-pytorch-extension + MLC_GIT_URL: https://github.com/libxsmm/tpp-pytorch-extension group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: pytorch-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-tpp-pytorch-extension/run.sh b/script/install-tpp-pytorch-extension/run.sh index d426d4004..098a7ab05 100644 --- a/script/install-tpp-pytorch-extension/run.sh +++ b/script/install-tpp-pytorch-extension/run.sh @@ -1,10 +1,10 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:${PATH} +export PATH=${MLC_CONDA_BIN_PATH}:${PATH} -cd ${CM_RUN_DIR} -echo ${CM_RUN_CMD} -eval ${CM_RUN_CMD} +cd ${MLC_RUN_DIR} +echo ${MLC_RUN_CMD} +eval ${MLC_RUN_CMD} if [ "${?}" != "0" ]; then exit 1; fi diff --git a/script/install-transformers-from-src/customize.py b/script/install-transformers-from-src/customize.py index d83e374e3..9af8e1dd6 100644 --- a/script/install-transformers-from-src/customize.py +++ b/script/install-transformers-from-src/customize.py @@ -13,7 +13,7 @@ def preprocess(i): run_cmd = "python setup.py install" - env['CM_RUN_CMD'] = run_cmd + env['MLC_RUN_CMD'] = run_cmd automation = i['automation'] diff --git a/script/install-transformers-from-src/meta.yaml b/script/install-transformers-from-src/meta.yaml index d2c411c97..5de2098e2 100644 --- a/script/install-transformers-from-src/meta.yaml +++ b/script/install-transformers-from-src/meta.yaml @@ -10,35 +10,35 @@ deps: - python - python3 skip_if_env: - CM_CONDA_ENV: + MLC_CONDA_ENV: - 'yes' tags: get,python3 - names: - pytorch skip_if_env: - CM_CONDA_ENV: + MLC_CONDA_ENV: - 'yes' tags: get,pytorch,from.src - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_TRANSFORMERS_SRC_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_TRANSFORMERS_SRC_REPO_PATH extra_cache_tags: transformers,src,transformers-src,transformers-src-repo names: - transformers-src-repo tags: get,git,repo update_tags_from_env_with_prefix: _branch.: - - CM_GIT_CHECKOUT + - MLC_GIT_CHECKOUT _repo.: - - CM_GIT_URL + - MLC_GIT_URL _sha.: - - CM_GIT_CHECKOUT_SHA + - MLC_GIT_CHECKOUT_SHA _tag.: - - CM_GIT_CHECKOUT_TAG + - MLC_GIT_CHECKOUT_TAG env: - CM_GIT_URL: https://github.com/huggingface/transformers + MLC_GIT_URL: https://github.com/huggingface/transformers name: Build transformers from sources new_env_keys: -- CM_TRANSFORMERS_* +- MLC_TRANSFORMERS_* prehook_deps: [] sort: 1000 tags: @@ -51,7 +51,7 @@ uid: 88512c48ea5c4186 variations: branch.#: env: - CM_GIT_CHECKOUT: '#' + MLC_GIT_CHECKOUT: '#' for-intel-mlperf-inference-v3.1-bert: adr: conda-package: @@ -78,23 +78,23 @@ variations: - setuptools tags: get,generic,conda-package,_package.setuptools,_source.conda-forge env: - CM_CONDA_ENV: 'yes' + MLC_CONDA_ENV: 'yes' repo.#: env: - CM_GIT_URL: '#' + MLC_GIT_URL: '#' group: repo repo.https://github.com/pytorch/pytorch: default: true env: - CM_GIT_URL: https://github.com/huggingface/transformers + MLC_GIT_URL: https://github.com/huggingface/transformers group: repo sha.#: env: - CM_GIT_CHECKOUT_SHA: '#' + MLC_GIT_CHECKOUT_SHA: '#' tag.#: ad: pytorch-src-repo: tags: _no-recurse-submodules,_full-history env: - CM_GIT_CHECKOUT_TAG: '#' + MLC_GIT_CHECKOUT_TAG: '#' versions: {} diff --git a/script/install-transformers-from-src/run.sh b/script/install-transformers-from-src/run.sh index 8af8c6c77..ff31ed71e 100644 --- a/script/install-transformers-from-src/run.sh +++ b/script/install-transformers-from-src/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -export PATH=${CM_CONDA_BIN_PATH}:$PATH +export PATH=${MLC_CONDA_BIN_PATH}:$PATH CUR_DIR=$PWD echo $PWD rm -rf transformers -cmd="cp -r ${CM_TRANSFORMERS_SRC_REPO_PATH} transformers" +cmd="cp -r ${MLC_TRANSFORMERS_SRC_REPO_PATH} transformers" echo "$cmd" eval "$cmd" cd transformers @@ -16,8 +16,8 @@ if [ "${?}" != "0" ]; then exit 1; fi git apply transformers.patch if [ "${?}" != "0" ]; then exit 1; fi -echo ${CM_RUN_CMD} -eval ${CM_RUN_CMD} +echo ${MLC_RUN_CMD} +eval ${MLC_RUN_CMD} if [ "${?}" != "0" ]; then exit 1; fi diff --git a/script/plug-prebuilt-cudnn-to-cuda/customize.py b/script/plug-prebuilt-cudnn-to-cuda/customize.py index 4f495e035..ec3598416 100644 --- a/script/plug-prebuilt-cudnn-to-cuda/customize.py +++ b/script/plug-prebuilt-cudnn-to-cuda/customize.py @@ -13,11 +13,11 @@ def preprocess(i): env = i['env'] if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': - env['CM_SUDO'] = '' + env['MLC_SUDO'] = '' meta = i['meta'] automation = i['automation'] - version = env.get('CM_VERSION') + version = env.get('MLC_VERSION') supported_versions = list(meta['versions'].keys()) @@ -25,17 +25,17 @@ def preprocess(i): return {'return': 1, 'error': "Only cuDNN versions {} are supported now".format( ', '.join(supported_versions))} - env['CM_CUDNN_VERSION'] = version + env['MLC_CUDNN_VERSION'] = version - filename = env['CM_CUDNN_TAR_FILE_NAME_TEMPLATE'] - cudnn_md5sum = env.get('CM_CUDNN_TAR_MD5SUM', '') + filename = env['MLC_CUDNN_TAR_FILE_NAME_TEMPLATE'] + cudnn_md5sum = env.get('MLC_CUDNN_TAR_MD5SUM', '') - cuda_version_split = env['CM_CUDA_VERSION'].split('.') + cuda_version_split = env['MLC_CUDA_VERSION'].split('.') cuda_version_major = cuda_version_split[0] filename = filename.replace('{{CUDA_MAJOR_VERSION}}', cuda_version_major) - env['CM_CUDNN_TAR_FILE_NAME'] = filename + env['MLC_CUDNN_TAR_FILE_NAME'] = filename cudnn_dir = filename[:-7] @@ -44,9 +44,9 @@ def preprocess(i): print('') print(f'URL to download cuDNN: {cudnn_url}') - env['CM_CUDNN_TAR_DIR'] = cudnn_dir - env['CM_CUDNN_UNTAR_PATH'] = os.path.join(cur_dir, cudnn_dir) + env['MLC_CUDNN_TAR_DIR'] = cudnn_dir + env['MLC_CUDNN_UNTAR_PATH'] = os.path.join(cur_dir, cudnn_dir) env['WGET_URL'] = cudnn_url - env['CM_DOWNLOAD_CHECKSUM'] = cudnn_md5sum + env['MLC_DOWNLOAD_CHECKSUM'] = cudnn_md5sum return {'return': 0} diff --git a/script/plug-prebuilt-cudnn-to-cuda/meta.yaml b/script/plug-prebuilt-cudnn-to-cuda/meta.yaml index da6f26635..555fc777e 100644 --- a/script/plug-prebuilt-cudnn-to-cuda/meta.yaml +++ b/script/plug-prebuilt-cudnn-to-cuda/meta.yaml @@ -14,7 +14,7 @@ cache: true category: CUDA automation default_env: - CM_SUDO: sudo + MLC_SUDO: sudo default_version: 9.3.0 @@ -31,23 +31,23 @@ input_description: desc: Full path to the cuDNN Tar file downloaded from Nvidia website (https://developer.nvidia.com/cudnn) input_mapping: - tar_file: CM_CUDNN_TAR_FILE_PATH + tar_file: MLC_CUDNN_TAR_FILE_PATH skip_sudo: CUDA_SKIP_SUDO new_env_keys: -- CM_CUDNN_* +- MLC_CUDNN_* prehook_deps: #- tags: get,generic-sys-util,_xz - tags: download,file env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_CUDNN_TAR_FILE_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_CUDNN_TAR_FILE_PATH extra_cache_tags: cudnn,archive force_cache: true names: - download-script skip_if_env: - CM_CUDNN_TAR_FILE_PATH: + MLC_CUDNN_TAR_FILE_PATH: - True update_tags_from_env_with_prefix: _url.: @@ -56,5 +56,5 @@ prehook_deps: versions: 9.3.0: env: - CM_CUDNN_TAR_FILE_NAME_TEMPLATE: cudnn-linux-x86_64-9.3.0.75_cuda{{CUDA_MAJOR_VERSION}}-archive.tar.xz - CM_CUDNN_TAR_MD5SUM: 2fa73268de8bbdab5560f4aa1a5a73ab + MLC_CUDNN_TAR_FILE_NAME_TEMPLATE: cudnn-linux-x86_64-9.3.0.75_cuda{{CUDA_MAJOR_VERSION}}-archive.tar.xz + MLC_CUDNN_TAR_MD5SUM: 2fa73268de8bbdab5560f4aa1a5a73ab diff --git a/script/plug-prebuilt-cudnn-to-cuda/run.sh b/script/plug-prebuilt-cudnn-to-cuda/run.sh index bf6e72ec3..4bcd029f7 100644 --- a/script/plug-prebuilt-cudnn-to-cuda/run.sh +++ b/script/plug-prebuilt-cudnn-to-cuda/run.sh @@ -5,33 +5,33 @@ INSTALL_DIR=${CUR}/install echo "******************************************" echo "${CUR}" -echo "${CM_CUDNN_TAR_FILE_PATH}" -echo "${CM_CUDNN_TAR_DIR}" -echo "${CM_CUDNN_UNTAR_PATH}" +echo "${MLC_CUDNN_TAR_FILE_PATH}" +echo "${MLC_CUDNN_TAR_DIR}" +echo "${MLC_CUDNN_UNTAR_PATH}" echo "${CUDA_HOME}" -echo "${CM_CUDA_PATH_INCLUDE}" -echo "${CM_CUDA_PATH_LIB}" +echo "${MLC_CUDA_PATH_INCLUDE}" +echo "${MLC_CUDA_PATH_LIB}" echo "******************************************" echo "Untaring file ..." echo "" -tar -xf ${CM_CUDNN_TAR_FILE_PATH} +tar -xf ${MLC_CUDNN_TAR_FILE_PATH} test $? -eq 0 || exit $? echo "Copying include files ..." echo "" -${CM_SUDO} cp -P ${CM_CUDNN_TAR_DIR}/include/cudnn*.h ${CM_CUDA_PATH_INCLUDE} -${CM_SUDO} chmod a+r ${CM_CUDA_PATH_INCLUDE}/cudnn*.h +${MLC_SUDO} cp -P ${MLC_CUDNN_TAR_DIR}/include/cudnn*.h ${MLC_CUDA_PATH_INCLUDE} +${MLC_SUDO} chmod a+r ${MLC_CUDA_PATH_INCLUDE}/cudnn*.h echo "Copying lib files ..." echo "" -${CM_SUDO} cp -P ${CM_CUDNN_TAR_DIR}/lib/libcudnn* ${CM_CUDA_PATH_LIB} -${CM_SUDO} chmod a+r ${CM_CUDA_PATH_LIB}/libcudnn* +${MLC_SUDO} cp -P ${MLC_CUDNN_TAR_DIR}/lib/libcudnn* ${MLC_CUDA_PATH_LIB} +${MLC_SUDO} chmod a+r ${MLC_CUDA_PATH_LIB}/libcudnn* echo "Adding file that cuDNN is installed ..." echo "" -if [ "${CM_SUDO}" == "sudo" ]; then - ${CM_SUDO} sh -c "echo '${CM_VERSION}' > ${CUDA_HOME}/cm_installed_cudnn.txt" +if [ "${MLC_SUDO}" == "sudo" ]; then + ${MLC_SUDO} sh -c "echo '${MLC_VERSION}' > ${CUDA_HOME}/cm_installed_cudnn.txt" else - echo "${CM_VERSION}" > ${CUDA_HOME}/cm_installed_cudnn.txt + echo "${MLC_VERSION}" > ${CUDA_HOME}/cm_installed_cudnn.txt fi diff --git a/script/plug-prebuilt-cusparselt-to-cuda/customize.py b/script/plug-prebuilt-cusparselt-to-cuda/customize.py index 20980c022..f26246f09 100644 --- a/script/plug-prebuilt-cusparselt-to-cuda/customize.py +++ b/script/plug-prebuilt-cusparselt-to-cuda/customize.py @@ -13,11 +13,11 @@ def preprocess(i): env = i['env'] if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': - env['CM_SUDO'] = '' + env['MLC_SUDO'] = '' meta = i['meta'] automation = i['automation'] - version = env.get('CM_VERSION') + version = env.get('MLC_VERSION') supported_versions = list(meta['versions'].keys()) @@ -25,17 +25,17 @@ def preprocess(i): return {'return': 1, 'error': "Only CUSPARSELT versions {} are supported now".format( ', '.join(supported_versions))} - env['CM_CUSPARSELT_VERSION'] = version + env['MLC_CUSPARSELT_VERSION'] = version - filename = env['CM_CUSPARSELT_TAR_FILE_NAME_TEMPLATE'] - cusparselt_md5sum = env.get('CM_CUSPARSELT_TAR_MD5SUM', '') + filename = env['MLC_CUSPARSELT_TAR_FILE_NAME_TEMPLATE'] + cusparselt_md5sum = env.get('MLC_CUSPARSELT_TAR_MD5SUM', '') - cuda_version_split = env['CM_CUDA_VERSION'].split('.') + cuda_version_split = env['MLC_CUDA_VERSION'].split('.') cuda_version_major = cuda_version_split[0] filename = filename.replace('{{CUDA_MAJOR_VERSION}}', cuda_version_major) - env['CM_CUSPARSELT_TAR_FILE_NAME'] = filename + env['MLC_CUSPARSELT_TAR_FILE_NAME'] = filename cusparselt_dir = filename[:-7] @@ -44,9 +44,9 @@ def preprocess(i): print('') print(f'URL to download CUSPARSELT: {cusparselt_url}') - env['CM_CUSPARSELT_TAR_DIR'] = cusparselt_dir - env['CM_CUSPARSELT_UNTAR_PATH'] = os.path.join(cur_dir, cusparselt_dir) + env['MLC_CUSPARSELT_TAR_DIR'] = cusparselt_dir + env['MLC_CUSPARSELT_UNTAR_PATH'] = os.path.join(cur_dir, cusparselt_dir) env['WGET_URL'] = cusparselt_url - env['CM_DOWNLOAD_CHECKSUM'] = cusparselt_md5sum + env['MLC_DOWNLOAD_CHECKSUM'] = cusparselt_md5sum return {'return': 0} diff --git a/script/plug-prebuilt-cusparselt-to-cuda/meta.yaml b/script/plug-prebuilt-cusparselt-to-cuda/meta.yaml index b542a31f6..a45f65c48 100644 --- a/script/plug-prebuilt-cusparselt-to-cuda/meta.yaml +++ b/script/plug-prebuilt-cusparselt-to-cuda/meta.yaml @@ -14,7 +14,7 @@ cache: true category: CUDA automation default_env: - CM_SUDO: sudo + MLC_SUDO: sudo default_version: 0.6.2.3 @@ -31,23 +31,23 @@ input_description: desc: Full path to the cuSPARSELt Tar file downloaded from Nvidia website (https://developer.nvidia.com/cusparselt-downloads) input_mapping: - tar_file: CM_CUSPARSELT_TAR_FILE_PATH + tar_file: MLC_CUSPARSELT_TAR_FILE_PATH skip_sudo: CUDA_SKIP_SUDO new_env_keys: -- CM_CUSPARSELT_* +- MLC_CUSPARSELT_* prehook_deps: #- tags: get,generic-sys-util,_xz - tags: download,file env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_CUSPARSELT_TAR_FILE_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_CUSPARSELT_TAR_FILE_PATH extra_cache_tags: cusparselt,archive force_cache: true names: - download-script skip_if_env: - CM_CUSPARSELT_TAR_FILE_PATH: + MLC_CUSPARSELT_TAR_FILE_PATH: - True update_tags_from_env_with_prefix: _url.: @@ -56,5 +56,5 @@ prehook_deps: versions: 0.6.2.3: env: - CM_CUSPARSELT_TAR_FILE_NAME_TEMPLATE: libcusparse_lt-linux-x86_64-0.6.2.3-archive.tar.xz - CM_CUSPARSELT_TAR_MD5SUM: 2fa73268de8bbdab5560f4aa1a5a73ab + MLC_CUSPARSELT_TAR_FILE_NAME_TEMPLATE: libcusparse_lt-linux-x86_64-0.6.2.3-archive.tar.xz + MLC_CUSPARSELT_TAR_MD5SUM: 2fa73268de8bbdab5560f4aa1a5a73ab diff --git a/script/plug-prebuilt-cusparselt-to-cuda/run.sh b/script/plug-prebuilt-cusparselt-to-cuda/run.sh index d500f56c1..e11a9596a 100644 --- a/script/plug-prebuilt-cusparselt-to-cuda/run.sh +++ b/script/plug-prebuilt-cusparselt-to-cuda/run.sh @@ -5,33 +5,33 @@ INSTALL_DIR=${CUR}/install echo "******************************************" echo "${CUR}" -echo "${CM_CUSPARSELT_TAR_FILE_PATH}" -echo "${CM_CUSPARSELT_TAR_DIR}" -echo "${CM_CUSPARSELT_UNTAR_PATH}" +echo "${MLC_CUSPARSELT_TAR_FILE_PATH}" +echo "${MLC_CUSPARSELT_TAR_DIR}" +echo "${MLC_CUSPARSELT_UNTAR_PATH}" echo "${CUDA_HOME}" -echo "${CM_CUDA_PATH_INCLUDE}" -echo "${CM_CUDA_PATH_LIB}" +echo "${MLC_CUDA_PATH_INCLUDE}" +echo "${MLC_CUDA_PATH_LIB}" echo "******************************************" echo "Untaring file ..." echo "" -tar -xf ${CM_CUSPARSELT_TAR_FILE_PATH} +tar -xf ${MLC_CUSPARSELT_TAR_FILE_PATH} test $? -eq 0 || exit $? echo "Copying include files ..." echo "" -${CM_SUDO} cp -P ${CM_CUSPARSELT_TAR_DIR}/include/cusparseLt*.h ${CM_CUDA_PATH_INCLUDE} -${CM_SUDO} chmod a+r ${CM_CUDA_PATH_INCLUDE}/cusparseLt*.h +${MLC_SUDO} cp -P ${MLC_CUSPARSELT_TAR_DIR}/include/cusparseLt*.h ${MLC_CUDA_PATH_INCLUDE} +${MLC_SUDO} chmod a+r ${MLC_CUDA_PATH_INCLUDE}/cusparseLt*.h echo "Copying lib files ..." echo "" -${CM_SUDO} cp -P ${CM_CUSPARSELT_TAR_DIR}/lib/libcusparseLt* ${CM_CUDA_PATH_LIB} -${CM_SUDO} chmod a+r ${CM_CUDA_PATH_LIB}/libcusparseLt* +${MLC_SUDO} cp -P ${MLC_CUSPARSELT_TAR_DIR}/lib/libcusparseLt* ${MLC_CUDA_PATH_LIB} +${MLC_SUDO} chmod a+r ${MLC_CUDA_PATH_LIB}/libcusparseLt* echo "Adding file that CUSPARSELT is installed ..." echo "" -if [ "${CM_SUDO}" == "sudo" ]; then - ${CM_SUDO} sh -c "echo '${CM_VERSION}' > ${CUDA_HOME}/cm_installed_cusparselt.txt" +if [ "${MLC_SUDO}" == "sudo" ]; then + ${MLC_SUDO} sh -c "echo '${MLC_VERSION}' > ${CUDA_HOME}/cm_installed_cusparselt.txt" else - echo "${CM_VERSION}" > ${CUDA_HOME}/cm_installed_cusparselt.txt + echo "${MLC_VERSION}" > ${CUDA_HOME}/cm_installed_cusparselt.txt fi diff --git a/script/prepare-training-data-bert/customize.py b/script/prepare-training-data-bert/customize.py index 1b0de8e90..63bf5b110 100644 --- a/script/prepare-training-data-bert/customize.py +++ b/script/prepare-training-data-bert/customize.py @@ -12,34 +12,34 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - datadir = env.get('CM_DATA_DIR', os.getcwd()) - env['CM_DATA_DIR'] = datadir + datadir = env.get('MLC_DATA_DIR', os.getcwd()) + env['MLC_DATA_DIR'] = datadir - env['CM_BERT_CONFIG_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") - env['CM_BERT_VOCAB_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") - env['CM_BERT_DATA_DOWNLOAD_DIR'] = os.path.join(datadir, "download") + env['MLC_BERT_CONFIG_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") + env['MLC_BERT_VOCAB_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") + env['MLC_BERT_DATA_DOWNLOAD_DIR'] = os.path.join(datadir, "download") - env['CM_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") + env['MLC_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") - if env.get("CM_TMP_VARIATION", "") == "nvidia": + if env.get("MLC_TMP_VARIATION", "") == "nvidia": code_path = os.path.join( - env['CM_GIT_REPO_CHECKOUT_PATH'], + env['MLC_GIT_REPO_CHECKOUT_PATH'], 'NVIDIA', 'benchmarks', 'bert', 'implementations', 'pytorch-22.09') - env['CM_RUN_DIR'] = code_path - elif env.get("CM_TMP_VARIATION", "") == "reference": + env['MLC_RUN_DIR'] = code_path + elif env.get("MLC_TMP_VARIATION", "") == "reference": code_path = os.path.join( - env['CM_MLPERF_TRAINING_SOURCE'], + env['MLC_MLPERF_TRAINING_SOURCE'], 'language_model', 'tensorflow', 'bert', 'cleanup_scripts') - env['CM_RUN_DIR'] = code_path + env['MLC_RUN_DIR'] = code_path return {'return': 0} @@ -48,19 +48,19 @@ def postprocess(i): env = i['env'] - data_dir = env['CM_DATA_DIR'] - env['CM_MLPERF_TRAINING_BERT_DATA_PATH'] = data_dir + data_dir = env['MLC_DATA_DIR'] + env['MLC_MLPERF_TRAINING_BERT_DATA_PATH'] = data_dir - if env.get("CM_TMP_VARIATION", "") == "nvidia": - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join( + if env.get("MLC_TMP_VARIATION", "") == "nvidia": + env['MLC_GET_DEPENDENT_CACHED_PATH'] = os.path.join( data_dir, "hdf5", "eval", "eval_all.hdf5") - elif env.get("CM_TMP_VARIATION", "") == "reference": - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join( + elif env.get("MLC_TMP_VARIATION", "") == "reference": + env['MLC_GET_DEPENDENT_CACHED_PATH'] = os.path.join( data_dir, "tfrecords", "eval_10k") - env['CM_MLPERF_TRAINING_BERT_TFRECORDS_PATH'] = os.path.join( + env['MLC_MLPERF_TRAINING_BERT_TFRECORDS_PATH'] = os.path.join( data_dir, "tfrecords") - env['CM_MLPERF_TRAINING_BERT_VOCAB_PATH'] = env['CM_BERT_VOCAB_FILE_PATH'] - env['CM_MLPERF_TRAINING_BERT_CONFIG_PATH'] = env['CM_BERT_CONFIG_FILE_PATH'] + env['MLC_MLPERF_TRAINING_BERT_VOCAB_PATH'] = env['MLC_BERT_VOCAB_FILE_PATH'] + env['MLC_MLPERF_TRAINING_BERT_CONFIG_PATH'] = env['MLC_BERT_CONFIG_FILE_PATH'] return {'return': 0} diff --git a/script/prepare-training-data-bert/meta.yaml b/script/prepare-training-data-bert/meta.yaml index de1a41141..c8032e2ff 100644 --- a/script/prepare-training-data-bert/meta.yaml +++ b/script/prepare-training-data-bert/meta.yaml @@ -6,68 +6,68 @@ category: MLPerf benchmark support deps: [] input_description: {} input_mapping: - clean: CM_MLPERF_TRAINING_CLEAN_TFRECORDS - data_dir: CM_DATA_DIR + clean: MLC_MLPERF_TRAINING_CLEAN_TFRECORDS + data_dir: MLC_DATA_DIR new_env_keys: -- CM_MLPERF_TRAINING_BERT_* +- MLC_MLPERF_TRAINING_BERT_* new_state_keys: [] post_deps: [] posthook_deps: [] prehook_deps: - env: - CM_DOWNLOAD_CHECKSUM: 7f59165e21b7d566db610ff6756c926b - CM_DOWNLOAD_FILENAME: bert_config.json - CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CONFIG_FILE_PATH - CM_DOWNLOAD_PATH: <<>> + MLC_DOWNLOAD_CHECKSUM: 7f59165e21b7d566db610ff6756c926b + MLC_DOWNLOAD_FILENAME: bert_config.json + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_CONFIG_FILE_PATH + MLC_DOWNLOAD_PATH: <<>> extra_cache_tags: mlperf,training,bert,config force_cache: true tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1fbGClQMi2CoMv7fwrwTC5YYPooQBdcFW - env: - CM_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e - CM_DOWNLOAD_FILENAME: vocab.txt - CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_VOCAB_FILE_PATH - CM_DOWNLOAD_PATH: <<>> + MLC_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e + MLC_DOWNLOAD_FILENAME: vocab.txt + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_VOCAB_FILE_PATH + MLC_DOWNLOAD_PATH: <<>> extra_cache_tags: bert,vocab force_cache: true tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1USK108J6hMM_d27xCHi738qBL8_BT1u1 - env: - CM_DOWNLOAD_CHECKSUM: 7d3a0619cb8bf7e829af99fa5c29daa8 - CM_DOWNLOAD_FILENAME: bert_reference_results_text_md5.txt - CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_REFERENCE_RESULTS_TEXT_MD5_FILE_PATH - CM_DOWNLOAD_PATH: <<>> + MLC_DOWNLOAD_CHECKSUM: 7d3a0619cb8bf7e829af99fa5c29daa8 + MLC_DOWNLOAD_FILENAME: bert_reference_results_text_md5.txt + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_REFERENCE_RESULTS_TEXT_MD5_FILE_PATH + MLC_DOWNLOAD_PATH: <<>> extra_cache_tags: bert,data,results,md5 force_cache: true tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1tmMgLwoBvbEJEHXh77sqrXYw5RpqT8R_ - env: - CM_DOWNLOAD_CHECKSUM: '' - CM_DOWNLOAD_FILENAME: results_text.tar.gz - CM_DOWNLOAD_PATH: <<>> - CM_EXTRACT_EXTRACTED_CHECKSUM_FILE: <<>> - CM_EXTRACT_EXTRACTED_FILENAME: results4 - CM_EXTRACT_FINAL_ENV_NAME: CM_BERT_TRAINING_DATA_PATH - CM_EXTRACT_PATH: <<>> + MLC_DOWNLOAD_CHECKSUM: '' + MLC_DOWNLOAD_FILENAME: results_text.tar.gz + MLC_DOWNLOAD_PATH: <<>> + MLC_EXTRACT_EXTRACTED_CHECKSUM_FILE: <<>> + MLC_EXTRACT_EXTRACTED_FILENAME: results4 + MLC_EXTRACT_FINAL_ENV_NAME: MLC_BERT_TRAINING_DATA_PATH + MLC_EXTRACT_PATH: <<>> extra_cache_tags: bert,data,results force_cache: true tags: download-and-extract,file,_gdown,_extract,_url.https://drive.google.com/uc?id=14xV2OUGSQDG_yDBrmbSdcDC-QGeqpfs_ - env: - CM_DOWNLOAD_CHECKSUM: 50797acd537880bfb5a7ade80d976129 - CM_DOWNLOAD_FILENAME: model.ckpt-28252.data-00000-of-00001 - CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CHECKPOINT_FILE_PATH - CM_DOWNLOAD_PATH: <<>> + MLC_DOWNLOAD_CHECKSUM: 50797acd537880bfb5a7ade80d976129 + MLC_DOWNLOAD_FILENAME: model.ckpt-28252.data-00000-of-00001 + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_CHECKPOINT_FILE_PATH + MLC_DOWNLOAD_PATH: <<>> extra_cache_tags: bert,checkpoint,data force_cache: true tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1chiTBljF0Eh1U5pKs6ureVHgSbtU8OG_ - env: - CM_DOWNLOAD_CHECKSUM: f97de3ae180eb8d479555c939d50d048 - CM_DOWNLOAD_FILENAME: model.ckpt-28252.index - CM_DOWNLOAD_PATH: <<>> + MLC_DOWNLOAD_CHECKSUM: f97de3ae180eb8d479555c939d50d048 + MLC_DOWNLOAD_FILENAME: model.ckpt-28252.index + MLC_DOWNLOAD_PATH: <<>> extra_cache_tags: bert,checkpoint,index force_cache: true tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1Q47V3K3jFRkbJ2zGCrKkKk-n0fvMZsa0 - env: - CM_DOWNLOAD_CHECKSUM: dbd16c731e8a8113bc08eeed0326b8e7 - CM_DOWNLOAD_FILENAME: model.ckpt-28252.meta - CM_DOWNLOAD_PATH: <<>> + MLC_DOWNLOAD_CHECKSUM: dbd16c731e8a8113bc08eeed0326b8e7 + MLC_DOWNLOAD_FILENAME: model.ckpt-28252.meta + MLC_DOWNLOAD_PATH: <<>> extra_cache_tags: bert,checkpoint,meta force_cache: true tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1vAcVmXSLsLeQ1q7gvHnQUSth5W_f_pwv @@ -86,7 +86,7 @@ variations: - extra_cache_tags: mlperf,training,results tags: get,git,repo,_repo.https://github.com/wchen61/training_results_v2.1,_branch.fix_bert_prepare_data env: - CM_TMP_VARIATION: nvidia + MLC_TMP_VARIATION: nvidia group: implementation reference: deps: @@ -102,6 +102,6 @@ variations: version_max: 3.20.1 version_max_usable: 3.20.1 env: - CM_TMP_VARIATION: reference + MLC_TMP_VARIATION: reference group: implementation versions: {} diff --git a/script/prepare-training-data-bert/run-nvidia.sh b/script/prepare-training-data-bert/run-nvidia.sh index 23cd41289..016f6e07d 100644 --- a/script/prepare-training-data-bert/run-nvidia.sh +++ b/script/prepare-training-data-bert/run-nvidia.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,17 +17,17 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" -CUR=${CM_DATA_DIR:-"$PWD/data"} -run "cd \"${CM_RUN_DIR}\"" +CUR=${MLC_DATA_DIR:-"$PWD/data"} +run "cd \"${MLC_RUN_DIR}\"" run "docker build --pull -t mlperf-nvidia:language_model ." run "ID=`docker run -dt --runtime=nvidia --ipc=host -v $CUR:/workspace/bert_data mlperf-nvidia:language_model bash`" run "docker exec $ID bash -c 'cd /workspace/bert && ./input_preprocessing/prepare_data.sh -s --outputdir /workspace/bert_data'" diff --git a/script/prepare-training-data-bert/run-reference.sh b/script/prepare-training-data-bert/run-reference.sh index 97524312f..a72f3adfd 100644 --- a/script/prepare-training-data-bert/run-reference.sh +++ b/script/prepare-training-data-bert/run-reference.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,30 +17,30 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" CUR=$PWD -DATA_DIR=${CM_DATA_DIR:-"$PWD/data"} +DATA_DIR=${MLC_DATA_DIR:-"$PWD/data"} -cd ${CM_RUN_DIR} +cd ${MLC_RUN_DIR} mkdir -p ${DATA_DIR}/tfrecords for i in $(seq -f "%05g" 0 499) do FILENAME="${DATA_DIR}/tfrecords/part-${i}-of-00500" - if [[ ${CM_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 500000000 ]] ; then + if [[ ${MLC_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 500000000 ]] ; then echo "Skipping regenerating existing ${FILENAME}" continue; fi cmd="python3 create_pretraining_data.py \ - --input_file=${CM_BERT_DATA_DOWNLOAD_DIR}/results4/part-${i}-of-00500 \ + --input_file=${MLC_BERT_DATA_DOWNLOAD_DIR}/results4/part-${i}-of-00500 \ --output_file=${DATA_DIR}/tfrecords/part-${i}-of-00500 \ - --vocab_file=${CM_BERT_VOCAB_FILE_PATH} \ + --vocab_file=${MLC_BERT_VOCAB_FILE_PATH} \ --do_lower_case=True \ --max_seq_length=512 \ --max_predictions_per_seq=76 \ @@ -51,13 +51,13 @@ do done FILENAME="${DATA_DIR}/eval_intermediate" -if [[ ${CM_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 800000000 ]] ; then +if [[ ${MLC_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 800000000 ]] ; then echo "Skipping regenerating existing ${FILENAME}" else cmd="python3 create_pretraining_data.py \ - --input_file=${CM_BERT_DATA_DOWNLOAD_DIR}/results4/eval.txt \ + --input_file=${MLC_BERT_DATA_DOWNLOAD_DIR}/results4/eval.txt \ --output_file=${DATA_DIR}/eval_intermediate \ - --vocab_file=${CM_BERT_VOCAB_FILE_PATH} \ + --vocab_file=${MLC_BERT_VOCAB_FILE_PATH} \ --do_lower_case=True \ --max_seq_length=512 \ --max_predictions_per_seq=76 \ @@ -69,7 +69,7 @@ else fi FILENAME=${DATA_DIR}/tfrecords/eval_10k -if [[ ${CM_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 25000000 ]] ; then +if [[ ${MLC_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 25000000 ]] ; then echo "Skipping regenerating existing ${FILENAME}" else cmd="python3 pick_eval_samples.py \ diff --git a/script/prepare-training-data-bert/run.sh b/script/prepare-training-data-bert/run.sh index ea6fd8aca..194b53e80 100644 --- a/script/prepare-training-data-bert/run.sh +++ b/script/prepare-training-data-bert/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,20 +17,20 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" -CUR=${CM_DATA_DIR:-"$PWD/data"} -cd ${CM_RUN_DIR} +CUR=${MLC_DATA_DIR:-"$PWD/data"} +cd ${MLC_RUN_DIR} -if [[ ${CM_TMP_VARIATION} == "nvidia" ]]; then - bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-nvidia.sh -elif [[ ${CM_TMP_VARIATION} == "reference" ]]; then - bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-reference.sh +if [[ ${MLC_TMP_VARIATION} == "nvidia" ]]; then + bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/run-nvidia.sh +elif [[ ${MLC_TMP_VARIATION} == "reference" ]]; then + bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/run-reference.sh fi diff --git a/script/prepare-training-data-bert/run_config.yml b/script/prepare-training-data-bert/run_config.yml index e39692ebc..88b2e84f1 100644 --- a/script/prepare-training-data-bert/run_config.yml +++ b/script/prepare-training-data-bert/run_config.yml @@ -4,7 +4,7 @@ docker: docker_os_version: "22.04" fake_run_deps: true mounts: - - ${{ CM_DATA_DIR }}:${{ CM_DATA_DIR }} + - ${{ MLC_DATA_DIR }}:${{ MLC_DATA_DIR }} run_with_default_inputs: true #if false the script won't run automatic tests diff --git a/script/prepare-training-data-resnet/customize.py b/script/prepare-training-data-resnet/customize.py index 36fdba4e9..3d7e16815 100644 --- a/script/prepare-training-data-resnet/customize.py +++ b/script/prepare-training-data-resnet/customize.py @@ -12,30 +12,30 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - datadir = env.get('CM_DATA_DIR', os.getcwd()) - env['CM_DATA_DIR'] = datadir + datadir = env.get('MLC_DATA_DIR', os.getcwd()) + env['MLC_DATA_DIR'] = datadir - env['MXNET_VER'] = env.get('CM_MXNET_VER', '22.08').replace("-", ".") + env['MXNET_VER'] = env.get('MLC_MXNET_VER', '22.08').replace("-", ".") - env['CM_IMAGENET_LABELS_DOWNLOAD_DIR'] = env['CM_DATASET_IMAGENET_TRAIN_PATH'] + env['MLC_IMAGENET_LABELS_DOWNLOAD_DIR'] = env['MLC_DATASET_IMAGENET_TRAIN_PATH'] - if env.get("CM_TMP_VARIATION", "") == "nvidia": + if env.get("MLC_TMP_VARIATION", "") == "nvidia": code_path = os.path.join( - env['CM_NVIDIA_DEEPLEARNING_EXAMPLES_REPO_PATH'], + env['MLC_NVIDIA_DEEPLEARNING_EXAMPLES_REPO_PATH'], 'MxNet', 'Classification', 'RN50v1.5') - env['CM_RUN_DIR'] = code_path + env['MLC_RUN_DIR'] = code_path i['run_script_input']['script_name'] = "run-nvidia" - elif env.get("CM_TMP_VARIATION", "") == "reference": + elif env.get("MLC_TMP_VARIATION", "") == "reference": code_path = os.path.join( - env['CM_MLPERF_TRAINING_SOURCE'], + env['MLC_MLPERF_TRAINING_SOURCE'], 'image_classification', 'tensorflow2') - env['CM_RUN_DIR'] = code_path + env['MLC_RUN_DIR'] = code_path i['run_script_input']['script_name'] = "run-reference" return {'return': 0} @@ -45,19 +45,19 @@ def postprocess(i): env = i['env'] - data_dir = env['CM_DATA_DIR'] - env['CM_MLPERF_TRAINING_RESNET_DATA_PATH'] = data_dir + data_dir = env['MLC_DATA_DIR'] + env['MLC_MLPERF_TRAINING_RESNET_DATA_PATH'] = data_dir - env['CM_MLPERF_TRAINING_IMAGENET_PATH'] = env['CM_DATASET_IMAGENET_TRAIN_PATH'] + env['MLC_MLPERF_TRAINING_IMAGENET_PATH'] = env['MLC_DATASET_IMAGENET_TRAIN_PATH'] - if env.get("CM_TMP_VARIATION", "") == "nvidia": - env['CM_GET_DEPENDENT_CACHED_PATH'] = data_dir - env['CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH'] = data_dir + if env.get("MLC_TMP_VARIATION", "") == "nvidia": + env['MLC_GET_DEPENDENT_CACHED_PATH'] = data_dir + env['MLC_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH'] = data_dir - elif env.get("CM_TMP_VARIATION", "") == "reference": - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join( + elif env.get("MLC_TMP_VARIATION", "") == "reference": + env['MLC_GET_DEPENDENT_CACHED_PATH'] = os.path.join( data_dir, "tfrecords") - env['CM_MLPERF_TRAINING_RESNET_TFRECORDS_PATH'] = os.path.join( + env['MLC_MLPERF_TRAINING_RESNET_TFRECORDS_PATH'] = os.path.join( data_dir, "tfrecords") return {'return': 0} diff --git a/script/prepare-training-data-resnet/meta.yaml b/script/prepare-training-data-resnet/meta.yaml index 25925f56d..b53014420 100644 --- a/script/prepare-training-data-resnet/meta.yaml +++ b/script/prepare-training-data-resnet/meta.yaml @@ -13,27 +13,27 @@ deps: - tags: get,generic-sys-util,_rsync input_description: {} input_mapping: - data_dir: CM_DATA_DIR + data_dir: MLC_DATA_DIR new_env_keys: -- CM_MLPERF_TRAINING_RESNET_* -- CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH +- MLC_MLPERF_TRAINING_RESNET_* +- MLC_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH new_state_keys: [] post_deps: [] posthook_deps: [] prehook_deps: - env: - CM_DOWNLOAD_CHECKSUM: '' - CM_DOWNLOAD_FINAL_ENV_NAME: CM_IMAGENET_LABELS_FILE_PATH - CM_DOWNLOAD_PATH: <<>> - CM_DOWNLOAD_RENAME_FILE: synset_labels.txt + MLC_DOWNLOAD_CHECKSUM: '' + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_IMAGENET_LABELS_FILE_PATH + MLC_DOWNLOAD_PATH: <<>> + MLC_DOWNLOAD_RENAME_FILE: synset_labels.txt extra_cache_tags: imagenet,val,labels force_cache: true tags: download,file,_wget,_url.https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_2012_validation_synset_labels.txt - enable_if_env: - CM_TMP_VARIATION: + MLC_TMP_VARIATION: - reference env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_IMAGENET_TO_GCS_SCRIPT_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_IMAGENET_TO_GCS_SCRIPT_PATH extra_cache_tags: imagenet_to_gcs,script force_cache: true tags: download,file,_wget,_url.https://raw.githubusercontent.com/tensorflow/tpu/master/tools/datasets/imagenet_to_gcs.py @@ -48,7 +48,7 @@ uid: d42a8a8ca2704f9f variations: mxnet.#: env: - CM_MXNET_VERSION: '#' + MLC_MXNET_VERSION: '#' nvidia: default: true deps: @@ -56,11 +56,11 @@ variations: - nvidia-training-code tags: get,mlperf,training,nvidia,code - env: - CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_NVIDIA_DEEPLEARNING_EXAMPLES_REPO_PATH + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_NVIDIA_DEEPLEARNING_EXAMPLES_REPO_PATH extra_cache_tags: nvidia,deeplearning_examples tags: get,git,repo,_repo.https://github.com/NVIDIA/DeepLearningExamples,_sha.81ee705868a11d6fe18c12d237abe4a08aab5fd6 env: - CM_TMP_VARIATION: nvidia + MLC_TMP_VARIATION: nvidia group: implementation reference: deps: @@ -73,6 +73,6 @@ variations: - tags: get,generic-python-lib,_tensorflow - tags: get,generic-python-lib,_protobuf env: - CM_TMP_VARIATION: reference + MLC_TMP_VARIATION: reference group: implementation versions: {} diff --git a/script/prepare-training-data-resnet/run-nvidia.sh b/script/prepare-training-data-resnet/run-nvidia.sh index e7ffdb741..18fa70634 100644 --- a/script/prepare-training-data-resnet/run-nvidia.sh +++ b/script/prepare-training-data-resnet/run-nvidia.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,18 +17,18 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" mkdir -p train_data/train mkdir -p train_data/val -rsync -avz ${CM_DATASET_IMAGENET_TRAIN_PATH}/ train_data/train/ -rsync -avz ${CM_DATASET_IMAGENET_VAL_PATH}/ train_data/val/ +rsync -avz ${MLC_DATASET_IMAGENET_TRAIN_PATH}/ train_data/train/ +rsync -avz ${MLC_DATASET_IMAGENET_VAL_PATH}/ train_data/val/ cd train_data/train find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done cd ../val @@ -36,8 +36,8 @@ run "wget --no-check-certificate -qO- https://raw.githubusercontent.com/soumith cd ../../ DATA_DIR=`pwd`/train_data -CUR=${CM_DATA_DIR} -run "cd \"${CM_RUN_DIR}\"" +CUR=${MLC_DATA_DIR} +run "cd \"${MLC_RUN_DIR}\"" run "docker build --build-arg FROM_IMAGE_NAME=nvcr.io/nvidia/mxnet:${MXNET_VER}-py3 -t nvidia_rn50_mx ." run "ID=`docker run -dt --gpus all --runtime=nvidia --ipc=host -v ${DATA_DIR}:/data -v ${CUR}:/preprocessed nvidia_rn50_mx bash`" run "docker exec $ID bash -c './scripts/prepare_imagenet.sh /data /preprocessed'" diff --git a/script/prepare-training-data-resnet/run-reference.sh b/script/prepare-training-data-resnet/run-reference.sh index 332da70cc..a2f4a5eda 100644 --- a/script/prepare-training-data-resnet/run-reference.sh +++ b/script/prepare-training-data-resnet/run-reference.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,21 +17,21 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" CUR=$PWD -DATA_DIR=${CM_DATA_DIR:-"$PWD/data"} +DATA_DIR=${MLC_DATA_DIR:-"$PWD/data"} -cd ${CM_RUN_DIR} +cd ${MLC_RUN_DIR} mkdir -p ${DATA_DIR}/tfrecords -cmd="python3 ${CM_IMAGENET_TO_GCS_SCRIPT_PATH} \ - --raw_data_dir=${CM_DATASET_IMAGENET_TRAIN_PATH} \ +cmd="python3 ${MLC_IMAGENET_TO_GCS_SCRIPT_PATH} \ + --raw_data_dir=${MLC_DATASET_IMAGENET_TRAIN_PATH} \ --local_scratch_dir=${DATA_DIR}/tfrecords \ --nogcs_upload" run "$cmd" diff --git a/script/prepare-training-data-resnet/run_config.yml b/script/prepare-training-data-resnet/run_config.yml index 688f811ea..2f8a5028f 100644 --- a/script/prepare-training-data-resnet/run_config.yml +++ b/script/prepare-training-data-resnet/run_config.yml @@ -4,7 +4,7 @@ docker: docker_os_version: "22.04" fake_run_deps: true mounts: - - ${{ CM_DATA_DIR }}:${{ CM_DATA_DIR }} + - ${{ MLC_DATA_DIR }}:${{ MLC_DATA_DIR }} run_with_default_inputs: true #if false the script won't run automatic tests diff --git a/script/preprocess-mlperf-inference-submission/customize.py b/script/preprocess-mlperf-inference-submission/customize.py index 40a00bf39..f4caaf11d 100644 --- a/script/preprocess-mlperf-inference-submission/customize.py +++ b/script/preprocess-mlperf-inference-submission/customize.py @@ -8,32 +8,32 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") + submission_dir = env.get("MLC_MLPERF_INFERENCE_SUBMISSION_DIR", "") if submission_dir == "": - print("Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR") + print("Please set --env.MLC_MLPERF_INFERENCE_SUBMISSION_DIR") return {'return': 1, - 'error': 'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified'} + 'error': 'MLC_MLPERF_INFERENCE_SUBMISSION_DIR is not specified'} if not os.path.exists(submission_dir): - print("Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR to a valid submission directory") + print("Please set --env.MLC_MLPERF_INFERENCE_SUBMISSION_DIR to a valid submission directory") return {'return': 1, - 'error': 'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not existing'} + 'error': 'MLC_MLPERF_INFERENCE_SUBMISSION_DIR is not existing'} submission_dir = submission_dir.rstrip(os.path.sep) - submitter = env.get("CM_MLPERF_SUBMITTER", "MLCommons") + submitter = env.get("MLC_MLPERF_SUBMITTER", "MLCommons") submission_processed = f"{submission_dir}_processed" if os.path.exists(submission_processed): print(f"Cleaning {submission_processed}") shutil.rmtree(submission_processed) - version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION', '') + version = env.get('MLC_MLPERF_SUBMISSION_CHECKER_VERSION', '') x_version = ' --version ' + version + ' ' if version != '' else '' - CMD = env['CM_PYTHON_BIN'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + CMD = env['MLC_PYTHON_BIN'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "tools", "submission", "preprocess_submission.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --output '" + submission_processed + "'" + x_version - env['CM_RUN_CMD'] = CMD + env['MLC_RUN_CMD'] = CMD return {'return': 0} @@ -41,7 +41,7 @@ def preprocess(i): def postprocess(i): env = i['env'] - submission_dir = env["CM_MLPERF_INFERENCE_SUBMISSION_DIR"] + submission_dir = env["MLC_MLPERF_INFERENCE_SUBMISSION_DIR"] import datetime submission_backup = submission_dir + "_backup_" + \ '{date:%Y-%m-%d_%H:%M:%S}'.format(date=datetime.datetime.now()) diff --git a/script/preprocess-mlperf-inference-submission/meta.yaml b/script/preprocess-mlperf-inference-submission/meta.yaml index eb5f959b4..e9645c06e 100644 --- a/script/preprocess-mlperf-inference-submission/meta.yaml +++ b/script/preprocess-mlperf-inference-submission/meta.yaml @@ -16,14 +16,14 @@ deps: - names: - get-mlperf-submission-dir skip_if_env: - CM_MLPERF_INFERENCE_SUBMISSION_DIR: + MLC_MLPERF_INFERENCE_SUBMISSION_DIR: - 'on' tags: get,mlperf,submission,dir input_mapping: - input: CM_MLPERF_INFERENCE_SUBMISSION_DIR - submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR - version: CM_MLPERF_SUBMISSION_CHECKER_VERSION - submitter: CM_MLPERF_SUBMITTER + input: MLC_MLPERF_INFERENCE_SUBMISSION_DIR + submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR + version: MLC_MLPERF_SUBMISSION_CHECKER_VERSION + submitter: MLC_MLPERF_SUBMITTER tags: - run - mlc diff --git a/script/preprocess-mlperf-inference-submission/run.sh b/script/preprocess-mlperf-inference-submission/run.sh index 1b3c5c3c0..7feafdf44 100644 --- a/script/preprocess-mlperf-inference-submission/run.sh +++ b/script/preprocess-mlperf-inference-submission/run.sh @@ -1,5 +1,5 @@ #!/bin/bash -cmd=${CM_RUN_CMD} +cmd=${MLC_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? diff --git a/script/print-any-text/COPYRIGHT.md b/script/print-any-text/COPYRIGHT.md deleted file mode 100644 index a059b0c49..000000000 --- a/script/print-any-text/COPYRIGHT.md +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Notice - -© 2024-2025 MLCommons. All Rights Reserved. - -This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: - -[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - -Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/print-any-text/README.md b/script/print-any-text/README.md deleted file mode 100644 index ae23369cd..000000000 --- a/script/print-any-text/README.md +++ /dev/null @@ -1 +0,0 @@ -Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-any-text](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-any-text) for the documentation of this CM script. diff --git a/script/print-any-text/customize.py b/script/print-any-text/customize.py deleted file mode 100644 index ea05b88fc..000000000 --- a/script/print-any-text/customize.py +++ /dev/null @@ -1,30 +0,0 @@ -# Developer(s): Grigori Fursin - -from mlc import utils -import os - - -def postprocess(i): - - env = i['env'] - - cm_env_keys = env.get('CM_PRINT_ANY_CM_ENV_KEYS', '').strip() - os_env_keys = env.get('CM_PRINT_ANY_OS_ENV_KEYS', '').strip() - - printed = False - for k, e, t in [(cm_env_keys, env, 'CM_ENV'), - (os_env_keys, os.environ, 'OS_ENV')]: - - if k != '': - for kk in k.split(','): - kk = kk.strip() - if kk != '': - vv = e.get(kk) - - print('{}[{}]: {}'.format(t, kk, vv)) - printed = True - - if printed: - print('') - - return {'return': 0} diff --git a/script/print-any-text/meta.yaml b/script/print-any-text/meta.yaml deleted file mode 100644 index 2fd9bba2c..000000000 --- a/script/print-any-text/meta.yaml +++ /dev/null @@ -1,34 +0,0 @@ -alias: print-any-text -uid: f4bf2d1d33c24e31 - -automation_alias: script -automation_uid: 5b4e0237da074764 - -category: Tests - -developers: "Grigori Fursin" - -default_env: - CM_PRINT_ANY_TEXT: '' - -input_mapping: - text: CM_PRINT_ANY_TEXT - cm_env_keys: CM_PRINT_ANY_CM_ENV_KEYS - os_env_keys: CM_PRINT_ANY_OS_ENV_KEYS - -tags: -- print -- any-text - -variations: - text.#: - env: - CM_PRINT_ANY_TEXT: "#" - - cm_env.#: - env: - CM_PRINT_ANY_CM_ENV_KEYS: "#" - - os_env.#: - env: - CM_PRINT_ANY_OS_ENV_KEYS: "#" diff --git a/script/print-any-text/run.bat b/script/print-any-text/run.bat deleted file mode 100644 index be97ff0a2..000000000 --- a/script/print-any-text/run.bat +++ /dev/null @@ -1,5 +0,0 @@ -if "%CM_PRINT_ANY_TEXT%" == "" ( - echo. -) else ( - echo %CM_PRINT_ANY_TEXT% -) diff --git a/script/print-any-text/run.sh b/script/print-any-text/run.sh deleted file mode 100644 index 7f04767d6..000000000 --- a/script/print-any-text/run.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -echo "${CM_PRINT_ANY_TEXT}" diff --git a/script/print-croissant-desc/COPYRIGHT.md b/script/print-croissant-desc/COPYRIGHT.md deleted file mode 100644 index a059b0c49..000000000 --- a/script/print-croissant-desc/COPYRIGHT.md +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Notice - -© 2024-2025 MLCommons. All Rights Reserved. - -This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: - -[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - -Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/print-croissant-desc/README-extra.md b/script/print-croissant-desc/README-extra.md deleted file mode 100644 index a3c638caa..000000000 --- a/script/print-croissant-desc/README-extra.md +++ /dev/null @@ -1,16 +0,0 @@ -# MLCommons CM automation recipe - -## Print [Croissant](https://github.com/mlcommons/croissant) description from metadata URL - -```bash -pip intstall cmind - -cm pull repo ctuning@mlcommons-ck - -cmr "print croissant desc" --url="https://raw.githubusercontent.com/mlcommons/croissant/main/datasets/1.0/gpt-3/metadata.json" -``` - -## About - -* Code snippet taken from https://github.com/mlcommons/croissant/pull/564/files ([@mkuchnik](https://github.com/mkuchnik)) -* CM automation recipe added by [@gfursin](https://github.com/gfursin). \ No newline at end of file diff --git a/script/print-croissant-desc/README.md b/script/print-croissant-desc/README.md deleted file mode 100644 index 3b4b1561b..000000000 --- a/script/print-croissant-desc/README.md +++ /dev/null @@ -1 +0,0 @@ -Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-croissant-desc](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-croissant-desc) for the documentation of this CM script. diff --git a/script/print-croissant-desc/code.py b/script/print-croissant-desc/code.py deleted file mode 100644 index c53ad5fdf..000000000 --- a/script/print-croissant-desc/code.py +++ /dev/null @@ -1,29 +0,0 @@ -# Taken from https://github.com/mlcommons/croissant/pull/564/files (@mkuchnik) - -import os -import mlcroissant as mlc - - -def main(): - - url = os.environ.get('CM_PRINT_CROISSANT_URL', '') - - if url == '': - print('Error: --url is not specified') - exit(1) - - ds = mlc.Dataset(url) - metadata = ds.metadata.to_json() - - print('') - print('Croissant meta data URL: {}'.format(url)) - print('') - print(f"{metadata['name']}: {metadata['description']}") - - print('') - for x in ds.records(record_set="default"): - print(x) - - -if __name__ == '__main__': - main() diff --git a/script/print-croissant-desc/meta.yaml b/script/print-croissant-desc/meta.yaml deleted file mode 100644 index ef4d2a7ba..000000000 --- a/script/print-croissant-desc/meta.yaml +++ /dev/null @@ -1,29 +0,0 @@ -alias: print-croissant-desc -uid: 59116d5c98a04d4f - -automation_alias: script -automation_uid: 5b4e0237da074764 - -category: Tests - -input_mapping: - url: CM_PRINT_CROISSANT_URL - -default_env: - CM_PRINT_CROISSANT_URL: "https://raw.githubusercontent.com/mlcommons/croissant/main/datasets/1.0/gpt-3/metadata.json" - -deps: -- tags: detect,os -- tags: get,sys-utils-cm -- names: - - python - - python3 - tags: get,python3 -- names: - - croissant - tags: get,croissant - -tags: -- print -- croissant -- desc diff --git a/script/print-croissant-desc/run.bat b/script/print-croissant-desc/run.bat deleted file mode 100644 index 37f249b0f..000000000 --- a/script/print-croissant-desc/run.bat +++ /dev/null @@ -1,2 +0,0 @@ -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/print-croissant-desc/run.sh b/script/print-croissant-desc/run.sh deleted file mode 100644 index 9b94917d9..000000000 --- a/script/print-croissant-desc/run.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py -test $? -eq 0 || exit $? diff --git a/script/print-hello-world-java/COPYRIGHT.md b/script/print-hello-world-java/COPYRIGHT.md deleted file mode 100644 index 9e44ad290..000000000 --- a/script/print-hello-world-java/COPYRIGHT.md +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Notice - -© 2022-2025 MLCommons. All Rights Reserved. - -This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: - -[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - -Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/print-hello-world-java/README.md b/script/print-hello-world-java/README.md deleted file mode 100644 index 063f8afda..000000000 --- a/script/print-hello-world-java/README.md +++ /dev/null @@ -1 +0,0 @@ -Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-java](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-java) for the documentation of this CM script. diff --git a/script/print-hello-world-java/code.java b/script/print-hello-world-java/code.java deleted file mode 100644 index 4bb917c9e..000000000 --- a/script/print-hello-world-java/code.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - Developer: Grigori Fursin -*/ - -//Import libraries... -import java.io.*; - -public class hello_world -{ - static int N=16; - static double[][] A=new double [N][N]; - static double[][] B=new double [N][N]; - static double[][] C=new double [N][N]; - - // ******************************************************************* - public static void main(String args[]) - { - System.out.println("Hello world!"); - System.out.println(""); - - String env=System.getenv("CM_VAR1"); - System.out.println("CM_VAR1="+env); - - env=System.getenv("CM_VAR2"); - System.out.println("CM_VAR2="+env); - } -} diff --git a/script/print-hello-world-java/meta.yaml b/script/print-hello-world-java/meta.yaml deleted file mode 100644 index b38990817..000000000 --- a/script/print-hello-world-java/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -alias: print-hello-world-java -automation_alias: script -automation_uid: 5b4e0237da074764 -category: Tests -deps: -- tags: detect,os -- names: - - java - tags: get,java -tags: -- print -- hello world -- hello-world -- hello -- world -- java -uid: 3b62dc46cce3489c diff --git a/script/print-hello-world-java/run.bat b/script/print-hello-world-java/run.bat deleted file mode 100644 index f57f2084b..000000000 --- a/script/print-hello-world-java/run.bat +++ /dev/null @@ -1,4 +0,0 @@ -echo %CM_JAVA_BIN_WITH_PATH% - -%CM_JAVA_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.java -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/print-hello-world-java/run.sh b/script/print-hello-world-java/run.sh deleted file mode 100644 index 7c5ab3f6a..000000000 --- a/script/print-hello-world-java/run.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -which ${CM_JAVA_BIN_WITH_PATH} - -${CM_JAVA_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.java -test $? -eq 0 || exit $? diff --git a/script/print-hello-world-javac/COPYRIGHT.md b/script/print-hello-world-javac/COPYRIGHT.md deleted file mode 100644 index 9e44ad290..000000000 --- a/script/print-hello-world-javac/COPYRIGHT.md +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Notice - -© 2022-2025 MLCommons. All Rights Reserved. - -This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: - -[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - -Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/print-hello-world-javac/README.md b/script/print-hello-world-javac/README.md deleted file mode 100644 index e07f0c290..000000000 --- a/script/print-hello-world-javac/README.md +++ /dev/null @@ -1 +0,0 @@ -Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-javac](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-javac) for the documentation of this CM script. diff --git a/script/print-hello-world-javac/code.java b/script/print-hello-world-javac/code.java deleted file mode 100644 index 9eb859cda..000000000 --- a/script/print-hello-world-javac/code.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - Developer: Grigori Fursin -*/ - -//Import libraries... -import java.io.*; - -public class code -{ - static int N=16; - static double[][] A=new double [N][N]; - static double[][] B=new double [N][N]; - static double[][] C=new double [N][N]; - - // ******************************************************************* - public static void main(String args[]) - { - System.out.println("Hello world!"); - System.out.println(""); - - String env=System.getenv("CM_VAR1"); - System.out.println("CM_VAR1="+env); - - env=System.getenv("CM_VAR2"); - System.out.println("CM_VAR2="+env); - } -} diff --git a/script/print-hello-world-javac/meta.yaml b/script/print-hello-world-javac/meta.yaml deleted file mode 100644 index 8afdf4d26..000000000 --- a/script/print-hello-world-javac/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -alias: print-hello-world-javac -automation_alias: script -automation_uid: 5b4e0237da074764 -category: Tests -deps: -- tags: detect,os -- names: - - javac - tags: get,javac -tags: -- print -- hello world -- hello-world -- hello -- world -- javac -uid: 040fafd538104819 diff --git a/script/print-hello-world-javac/run.bat b/script/print-hello-world-javac/run.bat deleted file mode 100644 index 583b89804..000000000 --- a/script/print-hello-world-javac/run.bat +++ /dev/null @@ -1,8 +0,0 @@ -echo "%CM_JAVA_BIN_WITH_PATH%" -echo. - -"%CM_JAVAC_BIN_WITH_PATH%" %CM_TMP_CURRENT_SCRIPT_PATH%\code.java -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% - -"%CM_JAVA_BIN_WITH_PATH%" -classpath "%CM_TMP_CURRENT_SCRIPT_PATH%" code -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/print-hello-world-javac/run.sh b/script/print-hello-world-javac/run.sh deleted file mode 100644 index c7fb26cbc..000000000 --- a/script/print-hello-world-javac/run.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -echo "${CM_JAVAC_BIN_WITH_PATH}" -echo "" - -${CM_JAVAC_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.java -test $? -eq 0 || exit 1 - -${CM_JAVA_BIN_WITH_PATH} -classpath "${CM_TMP_CURRENT_SCRIPT_PATH}" code -test $? -eq 0 || exit 1 diff --git a/script/print-hello-world-py/COPYRIGHT.md b/script/print-hello-world-py/COPYRIGHT.md deleted file mode 100644 index 9e44ad290..000000000 --- a/script/print-hello-world-py/COPYRIGHT.md +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Notice - -© 2022-2025 MLCommons. All Rights Reserved. - -This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: - -[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - -Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/print-hello-world-py/README.md b/script/print-hello-world-py/README.md deleted file mode 100644 index 8bfc479e6..000000000 --- a/script/print-hello-world-py/README.md +++ /dev/null @@ -1 +0,0 @@ -Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-py](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-py) for the documentation of this CM script. diff --git a/script/print-hello-world-py/app.py b/script/print-hello-world-py/app.py deleted file mode 100644 index 12382ac80..000000000 --- a/script/print-hello-world-py/app.py +++ /dev/null @@ -1,20 +0,0 @@ -def main(): - print('') - - # Import cmind to test break points - import cmind.utils - import os - if os.environ.get('CM_TMP_DEBUG_UID', '') == 'f52670e5f3f345a2': - cmind.utils.debug_here( - __file__, - port=5678, - text='Debugging main.py!').breakpoint() - - print('HELLO WORLD from Python') - - x = 1 - print(x) - - -if __name__ == '__main__': - main() diff --git a/script/print-hello-world-py/customize.py b/script/print-hello-world-py/customize.py deleted file mode 100644 index 3ee63ecb1..000000000 --- a/script/print-hello-world-py/customize.py +++ /dev/null @@ -1,20 +0,0 @@ -# Developer(s): Grigori Fursin - -import os - - -def preprocess(i): - - os_info = i['os_info'] - env = i['env'] - meta = i['meta'] - - return {'return': 0} - - -def postprocess(i): - - env = i['env'] - state = i['state'] - - return {'return': 0} diff --git a/script/print-hello-world-py/meta.yaml b/script/print-hello-world-py/meta.yaml deleted file mode 100644 index b927a85dd..000000000 --- a/script/print-hello-world-py/meta.yaml +++ /dev/null @@ -1,24 +0,0 @@ -alias: print-hello-world-py -automation_alias: script -automation_uid: 5b4e0237da074764 -category: Tests -deps: -- tags: detect,os -- names: - - python - - python3 - tags: get,python3 -- skip_if_env: - CM_SKIP_PRINT: - - 'True' - CM_SKIP_PRINT2: - - 'True' - tags: print,python-version -tags: -- print -- hello world -- hello-world -- hello -- world -- python -uid: d83274c7eb754d90 diff --git a/script/print-hello-world-py/run.bat b/script/print-hello-world-py/run.bat deleted file mode 100644 index c0980c59b..000000000 --- a/script/print-hello-world-py/run.bat +++ /dev/null @@ -1,8 +0,0 @@ -IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD% - -rem %CM_PYTHON_BIN_WITH_PATH% --version - -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\app.py -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% - -echo CM_NEW_VAR_FROM_RUN=XYZ > tmp-run-env.out diff --git a/script/print-hello-world-py/run.sh b/script/print-hello-world-py/run.sh deleted file mode 100644 index bc7e2c301..000000000 --- a/script/print-hello-world-py/run.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} - -#which ${CM_PYTHON_BIN_WITH_PATH} -#${CM_PYTHON_BIN_WITH_PATH} --version - -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/app.py -test $? -eq 0 || exit $? - -echo "CM_NEW_VAR_FROM_RUN=$MLPERF_XYZ" > tmp-run-env.out diff --git a/script/print-hello-world/COPYRIGHT.md b/script/print-hello-world/COPYRIGHT.md deleted file mode 100644 index 9e44ad290..000000000 --- a/script/print-hello-world/COPYRIGHT.md +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Notice - -© 2022-2025 MLCommons. All Rights Reserved. - -This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: - -[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - -Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/print-hello-world/README.md b/script/print-hello-world/README.md deleted file mode 100644 index 8fad99c80..000000000 --- a/script/print-hello-world/README.md +++ /dev/null @@ -1 +0,0 @@ -Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world) for the documentation of this CM script. diff --git a/script/print-hello-world/meta.yaml b/script/print-hello-world/meta.yaml deleted file mode 100644 index 12b26efd4..000000000 --- a/script/print-hello-world/meta.yaml +++ /dev/null @@ -1,48 +0,0 @@ -alias: print-hello-world -uid: b9f0acba4aca4baa - -automation_alias: script -automation_uid: 5b4e0237da074764 - -category: Tests - -default_env: - CM_ENV_TEST1: TEST1 - -env: - CM_ENV_TEST2: TEST2 - -input_mapping: - test1: CM_ENV_TEST1 - -new_env_keys: -- CM_ENV_TEST* - -new_state_keys: -- hello_world* - -tags: -- print -- hello-world -- hello world -- hello -- world -- native-script -- native -- script - -variations: - text.#: - env: - CM_PRINT_HELLO_WORLD_TEXT: "#" - - skip_print_env: - env: - CM_PRINT_HELLO_WORLD_SKIP_PRINT_ENV: 'yes' - -docker: - skip_run_cmd: 'yes' - skip_cm_sys_upgrade: 'yes' - cm_repo_flags: '--checkout=dev' - use_host_group_id: 'yes' - image_tag_extra: '-cm-dev' diff --git a/script/print-hello-world/run.bat b/script/print-hello-world/run.bat deleted file mode 100644 index 8ce95fc1a..000000000 --- a/script/print-hello-world/run.bat +++ /dev/null @@ -1,16 +0,0 @@ -if not "%CM_PRINT_HELLO_WORLD_SKIP_PRINT_ENV%" == "yes" ( - echo. - echo CM_ENV_TEST1 = %CM_ENV_TEST1% - echo CM_ENV_TEST2 = %CM_ENV_TEST2% - echo CM_ENV_TEST3 = %CM_ENV_TEST3% -) - -echo. -echo HELLO WORLD! -if not "%CM_PRINT_HELLO_WORLD_TEXT%" == "" ( - - echo. - echo %CM_PRINT_HELLO_WORLD_TEXT% - -) -echo. diff --git a/script/print-hello-world/run.sh b/script/print-hello-world/run.sh deleted file mode 100644 index fcb42d00e..000000000 --- a/script/print-hello-world/run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -if [[ ${CM_PRINT_HELLO_WORLD_SKIP_PRINT_ENV} != "yes" ]]; then - echo "" - echo "CM_ENV_TEST1 = ${CM_ENV_TEST1}" - echo "CM_ENV_TEST2 = ${CM_ENV_TEST2}" - echo "CM_ENV_TEST3 = ${CM_ENV_TEST3}" -fi - -echo "" -echo "HELLO WORLD!" -if [[ ${CM_PRINT_HELLO_WORLD_TEXT} != "" ]]; then - - echo "" - echo "${CM_PRINT_HELLO_WORLD_TEXT}" - -fi -echo "" diff --git a/script/print-python-version/COPYRIGHT.md b/script/print-python-version/COPYRIGHT.md deleted file mode 100644 index 9e44ad290..000000000 --- a/script/print-python-version/COPYRIGHT.md +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Notice - -© 2022-2025 MLCommons. All Rights Reserved. - -This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: - -[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - -Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/print-python-version/README.md b/script/print-python-version/README.md deleted file mode 100644 index b0794039f..000000000 --- a/script/print-python-version/README.md +++ /dev/null @@ -1 +0,0 @@ -Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-python-version](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-python-version) for the documentation of this CM script. diff --git a/script/print-python-version/meta.yaml b/script/print-python-version/meta.yaml deleted file mode 100644 index bc2497ea5..000000000 --- a/script/print-python-version/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -alias: print-python-version -automation_alias: script -automation_uid: 5b4e0237da074764 -category: Tests -deps: -- names: - - python - - python3 - tags: get,python3 -tags: -- print -- python -- version -- python-version -uid: d3a538fa4abb464b diff --git a/script/print-python-version/run.bat b/script/print-python-version/run.bat deleted file mode 100644 index e79030343..000000000 --- a/script/print-python-version/run.bat +++ /dev/null @@ -1,8 +0,0 @@ -echo. - -echo CM_PYTHON_BIN = %CM_PYTHON_BIN% -echo CM_PYTHON_BIN_WITH_PATH = %CM_PYTHON_BIN_WITH_PATH% - -echo . - -%CM_PYTHON_BIN_WITH_PATH% --version diff --git a/script/print-python-version/run.sh b/script/print-python-version/run.sh deleted file mode 100644 index 3c54cd68e..000000000 --- a/script/print-python-version/run.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -echo "" - -echo "CM_PYTHON_BIN = ${CM_PYTHON_BIN}" -echo "CM_PYTHON_BIN_WITH_PATH = ${CM_PYTHON_BIN_WITH_PATH}" - -echo "" - -${CM_PYTHON_BIN_WITH_PATH} --version - diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index 9e3a99885..b9da75a19 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -9,26 +9,26 @@ def preprocess(i): xsep = ';' if os_info['platform'] == 'windows' else ':' env = i['env'] - results_dir = env.get("CM_MLPERF_ACCURACY_RESULTS_DIR", "") + results_dir = env.get("MLC_MLPERF_ACCURACY_RESULTS_DIR", "") if results_dir == "": - print("Please set CM_MLPERF_ACCURACY_RESULTS_DIR") + print("Please set MLC_MLPERF_ACCURACY_RESULTS_DIR") return {'return': -1} # In fact, we expect only 1 command line here run_cmds = [] - if env.get('CM_MAX_EXAMPLES', '') != '' and env.get( - 'CM_MLPERF_RUN_STYLE', '') != 'valid': - max_examples_string = " --max_examples " + env['CM_MAX_EXAMPLES'] + if env.get('MLC_MAX_EXAMPLES', '') != '' and env.get( + 'MLC_MLPERF_RUN_STYLE', '') != 'valid': + max_examples_string = " --max_examples " + env['MLC_MAX_EXAMPLES'] else: max_examples_string = "" results_dir_split = results_dir.split(xsep) - dataset = env['CM_DATASET'] + dataset = env['MLC_DATASET'] regenerate_accuracy_file = env.get( - 'CM_MLPERF_REGENERATE_ACCURACY_FILE', env.get( - 'CM_RERUN', False)) + 'MLC_MLPERF_REGENERATE_ACCURACY_FILE', env.get( + 'MLC_RERUN', False)) for result_dir in results_dir_split: @@ -39,168 +39,168 @@ def preprocess(i): continue if dataset == "openimages": - if env.get('CM_DATASET_PATH_ROOT', '') != '': - dataset_dir = env['CM_DATASET_PATH_ROOT'] + if env.get('MLC_DATASET_PATH_ROOT', '') != '': + dataset_dir = env['MLC_DATASET_PATH_ROOT'] if 'DATASET_ANNOTATIONS_FILE_PATH' in env: del (env['DATASET_ANNOTATIONS_FILE_PATH']) else: - env['DATASET_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + env['DATASET_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] dataset_dir = os.getcwd() # not used, just to keep the script happy - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " " + "'" + os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " " + "'" + os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", "accuracy-openimages.py") + "'" + " --mlperf-accuracy-file " + "'" + os.path.join(result_dir, "mlperf_log_accuracy.json") + "'" + " --openimages-dir " + "'" + dataset_dir + "'" + " --verbose > " + "'" + \ out_file + "'" elif dataset == "imagenet": - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", "accuracy-imagenet.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, - "mlperf_log_accuracy.json") + "' --imagenet-val-file '" + os.path.join(env['CM_DATASET_AUX_PATH'], - "val.txt") + "' --dtype " + env.get('CM_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" + "mlperf_log_accuracy.json") + "' --imagenet-val-file '" + os.path.join(env['MLC_DATASET_AUX_PATH'], + "val.txt") + "' --dtype " + env.get('MLC_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" elif dataset == "squad": - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], - "accuracy-squad.py") + "' --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + \ + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_BERT_PATH'], + "accuracy-squad.py") + "' --val_data '" + env['MLC_DATASET_SQUAD_VAL_PATH'] + \ "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --vocab_file '" + env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + \ + "' --vocab_file '" + env['MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + \ "' --out_file '" + os.path.join(result_dir, 'predictions.json') + \ - "' --features_cache_file '" + os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], 'eval_features.pickle') + \ - "' --output_dtype " + env['CM_ACCURACY_DTYPE'] + env.get( - 'CM_OUTPUT_TRANSPOSED', '') + max_examples_string + " > '" + out_file + "'" + "' --features_cache_file '" + os.path.join(env['MLC_MLPERF_INFERENCE_BERT_PATH'], 'eval_features.pickle') + \ + "' --output_dtype " + env['MLC_ACCURACY_DTYPE'] + env.get( + 'MLC_OUTPUT_TRANSPOSED', '') + max_examples_string + " > '" + out_file + "'" elif dataset == "cnndm": - if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'intel': - accuracy_checker_file = env['CM_MLPERF_INFERENCE_INTEL_GPTJ_ACCURACY_FILE_WITH_PATH'] - env['+PYTHONPATH'] = [os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH'])] + [ - os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH'])] + env['+PYTHONPATH'] + if env.get('MLC_MLPERF_IMPLEMENTATION', '') == 'intel': + accuracy_checker_file = env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_ACCURACY_FILE_WITH_PATH'] + env['+PYTHONPATH'] = [os.path.dirname(env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH'])] + [ + os.path.dirname(env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH'])] + env['+PYTHONPATH'] suffix_string = " --model-name-or-path '" + \ env['GPTJ_CHECKPOINT_PATH'] + "'" else: - accuracy_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j", + accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j", "evaluation.py") suffix_string = " --dtype " + \ - env.get('CM_ACCURACY_DTYPE', "float32") - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + env.get('MLC_ACCURACY_DTYPE', "float32") + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ "' --dataset-file '" + \ - env['CM_DATASET_EVAL_PATH'] + "'" + \ + env['MLC_DATASET_EVAL_PATH'] + "'" + \ suffix_string + " > '" + out_file + "'" elif dataset == "openorca": - accuracy_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b", + accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b", "evaluate-accuracy.py") - if env.get('CM_VLLM_SERVER_MODEL_NAME', '') == '': - checkpoint_path = env['CM_ML_MODEL_LLAMA2_FILE_WITH_PATH'] + if env.get('MLC_VLLM_SERVER_MODEL_NAME', '') == '': + checkpoint_path = env['MLC_ML_MODEL_LLAMA2_FILE_WITH_PATH'] else: - checkpoint_path = env['CM_VLLM_SERVER_MODEL_NAME'] - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + checkpoint_path + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --dataset-file '" + env['CM_DATASET_PREPROCESSED_PATH'] + "'" + " --dtype " + env.get( - 'CM_ACCURACY_DTYPE', "int32") + " > '" + out_file + "'" + checkpoint_path = env['MLC_VLLM_SERVER_MODEL_NAME'] + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + checkpoint_path + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + env['MLC_DATASET_PREPROCESSED_PATH'] + "'" + " --dtype " + env.get( + 'MLC_ACCURACY_DTYPE', "int32") + " > '" + out_file + "'" elif dataset == "openorca-gsm8k-mbxp-combined": - accuracy_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b", + accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b", "evaluate-accuracy.py") - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + env['MIXTRAL_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --dataset-file '" + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] + "'" + \ - " --dtype " + env.get('CM_ACCURACY_DTYPE', + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + env['MIXTRAL_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] + "'" + \ + " --dtype " + env.get('MLC_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" elif dataset == "coco2014": env['+PYTHONPATH'] = [ os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools"), os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", "fid")] extra_options = "" - if env.get('CM_SDXL_STATISTICS_FILE_PATH', '') != '': + if env.get('MLC_SDXL_STATISTICS_FILE_PATH', '') != '': extra_options += ( f""" --statistics-path '{ - env['CM_SDXL_STATISTICS_FILE_PATH']}'""" + env['MLC_SDXL_STATISTICS_FILE_PATH']}'""" ) - if env.get('CM_SDXL_COMPLIANCE_IMAGES_PATH', '') != '': + if env.get('MLC_SDXL_COMPLIANCE_IMAGES_PATH', '') != '': extra_options += ( f""" --compliance-images-path '{ - env['CM_SDXL_COMPLIANCE_IMAGES_PATH']}' """ + env['MLC_SDXL_COMPLIANCE_IMAGES_PATH']}' """ ) else: extra_options += f""" --compliance-images-path '{ os.path.join( result_dir, "images")}' """ - if env.get('CM_COCO2014_SAMPLE_ID_PATH', '') != '': + if env.get('MLC_COCO2014_SAMPLE_ID_PATH', '') != '': extra_options += ( - f" --ids-path '{env['CM_COCO2014_SAMPLE_ID_PATH']}' " + f" --ids-path '{env['MLC_COCO2014_SAMPLE_ID_PATH']}' " ) - if env.get('CM_SDXL_ACCURACY_RUN_DEVICE', '') != '': + if env.get('MLC_SDXL_ACCURACY_RUN_DEVICE', '') != '': extra_options += ( - f" --device '{env['CM_SDXL_ACCURACY_RUN_DEVICE']}' " + f" --device '{env['MLC_SDXL_ACCURACY_RUN_DEVICE']}' " ) - # env['DATASET_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", + # env['DATASET_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", "accuracy_coco.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ "' --caption-path '" + os.path.join( - env['CM_MLPERF_INFERENCE_SOURCE'], + env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image", "coco2014", "captions", "captions_source.tsv") + "'" + extra_options + " > '" + out_file + "'" elif dataset == "kits19": - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_3DUNET_PATH'], + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_3DUNET_PATH'], "accuracy_kits.py") + \ - "' --preprocessed_data_dir '" + env['CM_DATASET_PREPROCESSED_PATH'] +\ + "' --preprocessed_data_dir '" + env['MLC_DATASET_PREPROCESSED_PATH'] +\ "' --postprocessed_data_dir '" + result_dir +\ "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ "' --output_dtype " + \ - env['CM_ACCURACY_DTYPE'] + " > '" + out_file + "'" + env['MLC_ACCURACY_DTYPE'] + " > '" + out_file + "'" elif dataset == "librispeech": - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_RNNT_PATH'], + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_RNNT_PATH'], "accuracy_eval.py") + \ - "' --dataset_dir '" + os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "..") +\ - "' --manifest '" + env['CM_DATASET_PREPROCESSED_JSON'] +\ + "' --dataset_dir '" + os.path.join(env['MLC_DATASET_PREPROCESSED_PATH'], "..") +\ + "' --manifest '" + env['MLC_DATASET_PREPROCESSED_JSON'] +\ "' --log_dir '" + result_dir + \ "' --output_dtype " + \ - env['CM_ACCURACY_DTYPE'] + " > '" + out_file + "'" + env['MLC_ACCURACY_DTYPE'] + " > '" + out_file + "'" elif dataset == "terabyte": extra_options = "" - if env.get('CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH', '') != '': + if env.get('MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH', '') != '': extra_options += ( f""" --aggregation-trace-file '{ - env['CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH']}' """ + env['MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH']}' """ ) - if env.get('CM_DLRM_V2_DAY23_FILE_PATH', '') != '': + if env.get('MLC_DLRM_V2_DAY23_FILE_PATH', '') != '': extra_options += ( f""" --day-23-file '{ - env['CM_DLRM_V2_DAY23_FILE_PATH']}' """ + env['MLC_DLRM_V2_DAY23_FILE_PATH']}' """ ) - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch", "tools", + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch", "tools", "accuracy-dlrm.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + "'" + extra_options + \ - " --dtype " + env.get('CM_ACCURACY_DTYPE', + " --dtype " + env.get('MLC_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" elif dataset == "igbh": - if env.get('CM_DATASET_IGBH_SIZE', '') == '': - if env.get('CM_MLPERF_SUBMISSION_GENERATION_STYLE', + if env.get('MLC_DATASET_IGBH_SIZE', '') == '': + if env.get('MLC_MLPERF_SUBMISSION_GENERATION_STYLE', '') == "full": - env['CM_DATASET_IGBH_SIZE'] = "full" + env['MLC_DATASET_IGBH_SIZE'] = "full" else: - env['CM_DATASET_IGBH_SIZE'] = "tiny" - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "graph", "R-GAT", "tools", "accuracy_igbh.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['CM_DATASET_IGBH_PATH'] + "' --dataset-size '" + env['CM_DATASET_IGBH_SIZE'] + "' --output-file '" + out_file + "'" + env['MLC_DATASET_IGBH_SIZE'] = "tiny" + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "graph", "R-GAT", "tools", "accuracy_igbh.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_DATASET_IGBH_PATH'] + "' --dataset-size '" + env['MLC_DATASET_IGBH_SIZE'] + "' --output-file '" + out_file + "'" elif dataset == "dataset_llama3": - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "llama3.1-405b", "evaluate-accuracy.py") + "' --checkpoint-path '" + env['CM_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dtype '" + env['CM_ACCURACY_DTYPE'] + "' --dataset-file '" + env['CM_DATASET_LLAMA3_PATH'] + "' > '" + out_file + "'" + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama3.1-405b", "evaluate-accuracy.py") + "' --checkpoint-path '" + env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dtype '" + env['MLC_ACCURACY_DTYPE'] + "' --dataset-file '" + env['MLC_DATASET_LLAMA3_PATH'] + "' > '" + out_file + "'" else: return {'return': 1, 'error': 'Unsupported dataset'} @@ -208,14 +208,14 @@ def preprocess(i): run_cmds.append(CMD) if os_info['platform'] == 'windows': - env['CM_RUN_CMDS'] = ( + env['MLC_RUN_CMDS'] = ( '\n'.join(run_cmds)).replace( "'", '"').replace( '>', '^>') else: - env['CM_RUN_CMDS'] = "??".join(run_cmds) + env['MLC_RUN_CMDS'] = "??".join(run_cmds) return {'return': 0} @@ -228,7 +228,7 @@ def postprocess(i): xsep = ';' if os_info['platform'] == 'windows' else ':' - results_dir = env.get("CM_MLPERF_ACCURACY_RESULTS_DIR", "") + results_dir = env.get("MLC_MLPERF_ACCURACY_RESULTS_DIR", "") results_dir_split = results_dir.split(xsep) diff --git a/script/process-mlperf-accuracy/meta.yaml b/script/process-mlperf-accuracy/meta.yaml index 3b80194d4..f45e3f485 100644 --- a/script/process-mlperf-accuracy/meta.yaml +++ b/script/process-mlperf-accuracy/meta.yaml @@ -14,8 +14,8 @@ deps: - accuracy-check-src tags: get,mlcommons,inference,src input_mapping: - rerun: CM_RERUN - result_dir: CM_MLPERF_ACCURACY_RESULTS_DIR + rerun: MLC_RERUN + result_dir: MLC_MLPERF_ACCURACY_RESULTS_DIR new_state_keys: - app_mlperf_inference_accuracy* tags: @@ -54,35 +54,35 @@ variations: - absl-py tags: get,generic-python-lib,_package.absl-py - enable_if_env: - CM_MLPERF_IMPLEMENTATION: + MLC_MLPERF_IMPLEMENTATION: - intel env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_INFERENCE_INTEL_GPTJ_ACCURACY_FILE_WITH_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_MLPERF_INFERENCE_INTEL_GPTJ_ACCURACY_FILE_WITH_PATH extra_cache_tags: intel,accuracy,file,gptj,mlperf,inference force_cache: true tags: download,file,_url.https://raw.githubusercontent.com/mlcommons/inference_results_v4.0/main/closed/Intel/code/gptj-99/ITREX/evaluation.py - enable_if_env: - CM_MLPERF_IMPLEMENTATION: + MLC_MLPERF_IMPLEMENTATION: - intel env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH extra_cache_tags: intel,dataset,file,gptj,mlperf,inference force_cache: true tags: download,file,_url.https://raw.githubusercontent.com/mlcommons/inference_results_v4.0/main/closed/Intel/code/gptj-99/ITREX/dataset.py - enable_if_env: - CM_MLPERF_IMPLEMENTATION: + MLC_MLPERF_IMPLEMENTATION: - intel env: - CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH extra_cache_tags: intel,dataset,item,file,gptj,mlperf,inference force_cache: true tags: download,file,_url.https://raw.githubusercontent.com/mlcommons/inference_results_v4.0/main/closed/Intel/code/gptj-99/ITREX/item.py - enable_if_env: - CM_MLPERF_IMPLEMENTATION: + MLC_MLPERF_IMPLEMENTATION: - intel tags: get,ml-model,gptj,_fp32,_pytorch env: - CM_DATASET: cnndm + MLC_DATASET: cnndm group: dataset coco2014: deps: @@ -101,7 +101,7 @@ variations: - numpy tags: get,generic-python-lib,_package.numpy env: - CM_DATASET: coco2014 + MLC_DATASET: coco2014 group: dataset default-pycocotools: default: true @@ -115,16 +115,16 @@ variations: tags: get,mlcommons,mlperf,inference,src,-_openimages-nvidia-pycocotools float16: env: - CM_ACCURACY_DTYPE: float16 + MLC_ACCURACY_DTYPE: float16 group: precision float32: default: 'true' env: - CM_ACCURACY_DTYPE: float32 + MLC_ACCURACY_DTYPE: float32 group: precision float64: env: - CM_ACCURACY_DTYPE: float64 + MLC_ACCURACY_DTYPE: float64 group: precision imagenet: default: 'true' @@ -132,23 +132,23 @@ variations: - tags: get,dataset-aux,image-classification,imagenet-aux - tags: get,generic-python-lib,_numpy env: - CM_DATASET: imagenet + MLC_DATASET: imagenet group: dataset int16: env: - CM_ACCURACY_DTYPE: int16 + MLC_ACCURACY_DTYPE: int16 group: precision int32: env: - CM_ACCURACY_DTYPE: int32 + MLC_ACCURACY_DTYPE: int32 group: precision int64: env: - CM_ACCURACY_DTYPE: int64 + MLC_ACCURACY_DTYPE: int64 group: precision int8: env: - CM_ACCURACY_DTYPE: int8 + MLC_ACCURACY_DTYPE: int8 group: precision kits19: deps: @@ -157,13 +157,13 @@ variations: version_max: 1.53.0 version_max_usable: 1.53.0 env: - CM_DATASET: kits19 + MLC_DATASET: kits19 group: dataset librispeech: deps: - tags: get,dataset,preprocessed,speech-recognition,librispeech env: - CM_DATASET: librispeech + MLC_DATASET: librispeech group: dataset nvidia-pycocotools: group: coco-evaluation-tool @@ -182,27 +182,27 @@ variations: - names: - llama2-model skip_if_env: - CM_MLPERF_INFERENCE_API_SERVER: + MLC_MLPERF_INFERENCE_API_SERVER: - 'on' tags: get,ml-model,llama2 env: - CM_DATASET: openorca + MLC_DATASET: openorca group: dataset openimages: deps: - enable_if_env: - CM_MLPERF_RUN_STYLE: + MLC_MLPERF_RUN_STYLE: - valid tags: get,dataset-aux,openimages,annotations - names: - openimages-original skip_if_env: - CM_MLPERF_RUN_STYLE: + MLC_MLPERF_RUN_STYLE: - valid tags: get,dataset,openimages,original - tags: get,generic-python-lib,_package.kiwisolver env: - CM_DATASET: openimages + MLC_DATASET: openimages group: dataset openorca-gsm8k-mbxp: deps: @@ -213,17 +213,17 @@ variations: - names: - openorca-gsm8k-mbxp-combined skip_if_env: - CM_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: - 'yes' tags: get,dataset-mixtral,openorca-mbxp-gsm8k-combined - names: - mixtral-8x7b-model skip_if_env: - CM_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: - 'yes' tags: get,ml-model,mixtral env: - CM_DATASET: openorca-gsm8k-mbxp-combined + MLC_DATASET: openorca-gsm8k-mbxp-combined group: dataset squad: add_deps_recursive: @@ -233,25 +233,25 @@ variations: - tags: get,generic-python-lib,_boto3 - tags: get,generic-python-lib,_package.transformers - skip_if_env: - CM_DATASET_SQUAD_VAL_PATH: [] + MLC_DATASET_SQUAD_VAL_PATH: [] tags: get,dataset,squad,language-processing - skip_if_env: - CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: + MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: - 'on' tags: get,dataset-aux,squad-vocab - skip_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cuda - gpu tags: get,generic-python-lib,_torch - enable_if_env: - CM_MLPERF_DEVICE: + MLC_MLPERF_DEVICE: - cuda - gpu tags: get,generic-python-lib,_torch_cuda - tags: get,generic-python-lib,_tokenization env: - CM_DATASET: squad + MLC_DATASET: squad group: dataset terabyte: deps: @@ -259,13 +259,13 @@ variations: - tags: get,generic-python-lib,_scikit-learn - tags: get,generic-python-lib,_numpy env: - CM_DATASET: terabyte + MLC_DATASET: terabyte group: dataset igbh: env: - CM_DATASET: igbh + MLC_DATASET: igbh group: dataset dataset_llama3: env: - CM_DATASET: dataset_llama3 + MLC_DATASET: dataset_llama3 group: dataset diff --git a/script/process-mlperf-accuracy/run.bat b/script/process-mlperf-accuracy/run.bat index 82705126d..022cb0e12 100644 --- a/script/process-mlperf-accuracy/run.bat +++ b/script/process-mlperf-accuracy/run.bat @@ -1,8 +1,8 @@ echo Running command: echo. -echo %CM_RUN_CMDS% +echo %MLC_RUN_CMDS% echo. -%CM_RUN_CMDS% +%MLC_RUN_CMDS% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/process-mlperf-accuracy/run.sh b/script/process-mlperf-accuracy/run.sh index 6268860cb..70764578f 100644 --- a/script/process-mlperf-accuracy/run.sh +++ b/script/process-mlperf-accuracy/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -IFS="??" read -r -a cmd_array <<< "$CM_RUN_CMDS" +IFS="??" read -r -a cmd_array <<< "$MLC_RUN_CMDS" for cmd in "${cmd_array[@]}" do echo "${cmd}" diff --git a/script/prune-bert-models/customize.py b/script/prune-bert-models/customize.py index eac06855d..9928625f5 100644 --- a/script/prune-bert-models/customize.py +++ b/script/prune-bert-models/customize.py @@ -8,9 +8,9 @@ def preprocess(i): env = i['env'] - ckpt_path = env.get('CM_BERT_PRUNE_CKPT_PATH', '') + ckpt_path = env.get('MLC_BERT_PRUNE_CKPT_PATH', '') if ckpt_path == '': - p = env['CM_ML_MODEL_FILE_WITH_PATH'] + p = env['MLC_ML_MODEL_FILE_WITH_PATH'] x = os.listdir(p) for y in x: if y.startswith('models--'): @@ -22,21 +22,21 @@ def preprocess(i): if len(z2) > 0: ckpt_path = os.path.join(z1, z2[0]) - env['CM_BERT_PRUNE_CKPT_PATH'] = ckpt_path + env['MLC_BERT_PRUNE_CKPT_PATH'] = ckpt_path - out_dir = env.get('CM_BERT_PRUNE_OUTPUT_DIR', '') + out_dir = env.get('MLC_BERT_PRUNE_OUTPUT_DIR', '') if out_dir == '': out_dir = os.path.join(os.getcwd(), 'pruned-model-output') - env['CM_BERT_PRUNE_OUTPUT_DIR'] = out_dir + env['MLC_BERT_PRUNE_OUTPUT_DIR'] = out_dir print('') print( 'Local CM cache path to the updated BERT pruner src from NeurIPS 2022: ' + - env['CM_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH']) + env['MLC_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH']) print('') - for k in ["CM_ML_MODEL_FILE_WITH_PATH", - "CM_BERT_PRUNE_CKPT_PATH", "CM_BERT_PRUNE_OUTPUT_DIR"]: + for k in ["MLC_ML_MODEL_FILE_WITH_PATH", + "MLC_BERT_PRUNE_CKPT_PATH", "MLC_BERT_PRUNE_OUTPUT_DIR"]: print('ENV["{}"]: {}'.format(k, env[k])) print('') diff --git a/script/prune-bert-models/meta.yaml b/script/prune-bert-models/meta.yaml index 0c9f63297..59ec44d6a 100644 --- a/script/prune-bert-models/meta.yaml +++ b/script/prune-bert-models/meta.yaml @@ -3,10 +3,10 @@ automation_alias: script automation_uid: 5b4e0237da074764 category: AI/ML optimization default_env: - CM_BERT_PRUNE_CONSTRAINT: '0.5' - CM_BERT_PRUNE_MODEL_NAME: bert-large-uncased - CM_BERT_PRUNE_TASK: squad - CM_MODEL_ZOO_STUB: bert-large-uncased + MLC_BERT_PRUNE_CONSTRAINT: '0.5' + MLC_BERT_PRUNE_MODEL_NAME: bert-large-uncased + MLC_BERT_PRUNE_TASK: squad + MLC_MODEL_ZOO_STUB: bert-large-uncased deps: - tags: get,python3 - tags: get,generic-python-lib,_numpy @@ -18,14 +18,14 @@ deps: - tags: get,generic-python-lib,_transformers - tags: get,generic-python-lib,_scikit-learn - env: - CM_GIT_ENV_KEY: BERT_PRUNER_NEURIPS_2022 + MLC_GIT_ENV_KEY: BERT_PRUNER_NEURIPS_2022 tags: get,git,repo,_repo.https://github.com/cknowledge/retraining-free-pruning - names: - get-model tags: get,ml-model,model,zoo,model-zoo,huggingface,_prune input_mapping: - constraint: CM_BERT_PRUNE_CONSTRAINT - output_dir: CM_BERT_PRUNE_OUTPUT_DIR + constraint: MLC_BERT_PRUNE_CONSTRAINT + output_dir: MLC_BERT_PRUNE_OUTPUT_DIR tags: - prune - bert-models @@ -38,11 +38,11 @@ variations: get-model: tags: _model-stub.# env: - CM_BERT_PRUNE_MODEL_NAME: '#' - CM_MODEL_ZOO_STUB: '#' + MLC_BERT_PRUNE_MODEL_NAME: '#' + MLC_MODEL_ZOO_STUB: '#' path.#: env: - CM_BERT_PRUNE_CKPT_PATH: '#' + MLC_BERT_PRUNE_CKPT_PATH: '#' task.#: env: - CM_BERT_PRUNE_TASK: '#' + MLC_BERT_PRUNE_TASK: '#' diff --git a/script/prune-bert-models/run.sh b/script/prune-bert-models/run.sh index 68c077968..6788a303b 100644 --- a/script/prune-bert-models/run.sh +++ b/script/prune-bert-models/run.sh @@ -4,15 +4,15 @@ echo "====================================================================" echo "Start pruning ..." echo "" -CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD} -time ${CM_PYTHON_BIN_WITH_PATH} \ - ${CM_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH}/main.py \ - --model_name ${CM_BERT_PRUNE_MODEL_NAME} \ - --task_name ${CM_BERT_PRUNE_TASK} \ - --ckpt_dir ${CM_BERT_PRUNE_CKPT_PATH} \ - --constraint ${CM_BERT_PRUNE_CONSTRAINT} \ - --output_dir ${CM_BERT_PRUNE_OUTPUT_DIR} +time ${MLC_PYTHON_BIN_WITH_PATH} \ + ${MLC_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH}/main.py \ + --model_name ${MLC_BERT_PRUNE_MODEL_NAME} \ + --task_name ${MLC_BERT_PRUNE_TASK} \ + --ckpt_dir ${MLC_BERT_PRUNE_CKPT_PATH} \ + --constraint ${MLC_BERT_PRUNE_CONSTRAINT} \ + --output_dir ${MLC_BERT_PRUNE_OUTPUT_DIR} test $? -eq 0 || exit $? diff --git a/script/publish-results-to-dashboard/code.py b/script/publish-results-to-dashboard/code.py index 2ce02a9df..bcd1ff23a 100644 --- a/script/publish-results-to-dashboard/code.py +++ b/script/publish-results-to-dashboard/code.py @@ -31,13 +31,13 @@ def main(): env = os.environ - dashboard_user = env.get('CM_MLPERF_DASHBOARD_WANDB_USER', '') + dashboard_user = env.get('MLC_MLPERF_DASHBOARD_WANDB_USER', '') if dashboard_user == '': dashboard_user = 'cmind' - dashboard_project = env.get('CM_MLPERF_DASHBOARD_WANDB_PROJECT', '') + dashboard_project = env.get('MLC_MLPERF_DASHBOARD_WANDB_PROJECT', '') if dashboard_project == '': - dashboard_project = 'cm-mlperf-dse-testing' + dashboard_project = 'mlc-mlperf-dse-testing' for k in results: @@ -69,20 +69,20 @@ def main(): # Check extra env variables x = { - "lang": "CM_MLPERF_LANG", - "device": "CM_MLPERF_DEVICE", - "submitter": "CM_MLPERF_SUBMITTER", - "backend": "CM_MLPERF_BACKEND", - "model": "CM_MLPERF_MODEL", - "run_style": "CM_MLPERF_RUN_STYLE", - "rerun": "CM_RERUN", - "hw_name": "CM_HW_NAME", - "max_batchsize": "CM_MLPERF_LOADGEN_MAX_BATCHSIZE", - "num_threads": "CM_NUM_THREADS", - "scenario": "CM_MLPERF_LOADGEN_SCENARIO", - "test_query_count": "CM_TEST_QUERY_COUNT", - "run_checker": "CM_RUN_SUBMISSION_CHECKER", - "skip_truncation": "CM_SKIP_TRUNCATE_ACCURACY" + "lang": "MLC_MLPERF_LANG", + "device": "MLC_MLPERF_DEVICE", + "submitter": "MLC_MLPERF_SUBMITTER", + "backend": "MLC_MLPERF_BACKEND", + "model": "MLC_MLPERF_MODEL", + "run_style": "MLC_MLPERF_RUN_STYLE", + "rerun": "MLC_RERUN", + "hw_name": "MLC_HW_NAME", + "max_batchsize": "MLC_MLPERF_LOADGEN_MAX_BATCHSIZE", + "num_threads": "MLC_NUM_THREADS", + "scenario": "MLC_MLPERF_LOADGEN_SCENARIO", + "test_query_count": "MLC_TEST_QUERY_COUNT", + "run_checker": "MLC_RUN_SUBMISSION_CHECKER", + "skip_truncation": "MLC_SKIP_TRUNCATE_ACCURACY" } for k in x: diff --git a/script/publish-results-to-dashboard/run.bat b/script/publish-results-to-dashboard/run.bat index 37f249b0f..7086d33dd 100644 --- a/script/publish-results-to-dashboard/run.bat +++ b/script/publish-results-to-dashboard/run.bat @@ -1,2 +1,2 @@ -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\code.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/publish-results-to-dashboard/run.sh b/script/publish-results-to-dashboard/run.sh index 288833adb..fd1684ceb 100644 --- a/script/publish-results-to-dashboard/run.sh +++ b/script/publish-results-to-dashboard/run.sh @@ -3,5 +3,5 @@ # For now login to WANDB anonymously wandb login --anonymously --relogin -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/code.py test $? -eq 0 || exit $? diff --git a/script/pull-git-repo/customize.py b/script/pull-git-repo/customize.py index 2135a1941..8a9c71f5c 100644 --- a/script/pull-git-repo/customize.py +++ b/script/pull-git-repo/customize.py @@ -10,10 +10,10 @@ def preprocess(i): env = i['env'] meta = i['meta'] - if 'CM_GIT_CHECKOUT_PATH' not in env: - return {'return': 1, 'error': 'CM_GIT_CHECKOUT_PATH is not set'} + if 'MLC_GIT_CHECKOUT_PATH' not in env: + return {'return': 1, 'error': 'MLC_GIT_CHECKOUT_PATH is not set'} - env['CM_GIT_PULL_CMD'] = "git pull --rebase" + env['MLC_GIT_PULL_CMD'] = "git pull --rebase" return {'return': 0} diff --git a/script/pull-git-repo/meta.yaml b/script/pull-git-repo/meta.yaml index f6d85da88..5f545fc2f 100644 --- a/script/pull-git-repo/meta.yaml +++ b/script/pull-git-repo/meta.yaml @@ -6,7 +6,7 @@ default_env: {} deps: - tags: detect,os input_mapping: - path: CM_GIT_CHECKOUT_PATH + path: MLC_GIT_CHECKOUT_PATH new_env_keys: [] tags: - pull diff --git a/script/pull-git-repo/run.bat b/script/pull-git-repo/run.bat index 8642fce0e..830588501 100644 --- a/script/pull-git-repo/run.bat +++ b/script/pull-git-repo/run.bat @@ -3,10 +3,10 @@ setlocal enabledelayedexpansion REM Save the current directory set "CUR_DIR=%CD%" -set "SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH%" +set "SCRIPT_DIR=%MLC_TMP_CURRENT_SCRIPT_PATH%" REM Change to the specified path -set "path=%CM_GIT_CHECKOUT_PATH%" +set "path=%MLC_GIT_CHECKOUT_PATH%" echo cd %path% cd /d "%path%" @@ -16,8 +16,8 @@ if errorlevel 1 ( ) REM Execute the Git pull command -echo %CM_GIT_PULL_CMD% -call %CM_GIT_PULL_CMD% +echo %MLC_GIT_PULL_CMD% +call %MLC_GIT_PULL_CMD% REM Don't fail if there are local changes REM if errorlevel 1 exit /b %errorlevel% diff --git a/script/pull-git-repo/run.sh b/script/pull-git-repo/run.sh index db8612d56..c75f5b45d 100644 --- a/script/pull-git-repo/run.sh +++ b/script/pull-git-repo/run.sh @@ -1,16 +1,16 @@ #!/bin/bash CUR_DIR=$PWD -SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} +SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH} -path=${CM_GIT_CHECKOUT_PATH} +path=${MLC_GIT_CHECKOUT_PATH} echo "cd $path" cd $path test $? -eq 0 || exit $? -echo ${CM_GIT_PULL_CMD} -eval ${CM_GIT_PULL_CMD} +echo ${MLC_GIT_PULL_CMD} +eval ${MLC_GIT_PULL_CMD} #don't fail if there are local changes #test $? -eq 0 || exit $? diff --git a/script/push-csv-to-spreadsheet/google_api.py b/script/push-csv-to-spreadsheet/google_api.py index 24926daed..b8a66926e 100644 --- a/script/push-csv-to-spreadsheet/google_api.py +++ b/script/push-csv-to-spreadsheet/google_api.py @@ -13,7 +13,7 @@ SCOPES = ['https://www.googleapis.com/auth/spreadsheets'] # The ID of a sample document. -DOCUMENT_ID = os.environ['CM_GOOGLE_SPREADSHEET_ID'] +DOCUMENT_ID = os.environ['MLC_GOOGLE_SPREADSHEET_ID'] def main(): @@ -40,8 +40,8 @@ def main(): try: service = build("sheets", "v4", credentials=creds) - sheet_name = os.environ.get('CM_GOOGLE_SHEET_NAME', 'Sheet1') - csv_file = os.environ['CM_CSV_FILE_PATH'] + sheet_name = os.environ.get('MLC_GOOGLE_SHEET_NAME', 'Sheet1') + csv_file = os.environ['MLC_CSV_FILE_PATH'] f = open(csv_file, "r") values = [r for r in csv.reader(f)] diff --git a/script/push-csv-to-spreadsheet/meta.yaml b/script/push-csv-to-spreadsheet/meta.yaml index 028275906..d00dbec7f 100644 --- a/script/push-csv-to-spreadsheet/meta.yaml +++ b/script/push-csv-to-spreadsheet/meta.yaml @@ -3,7 +3,7 @@ automation_alias: script automation_uid: 5b4e0237da074764 category: DevOps automation default_env: - CM_GOOGLE_SPREADSHEET_ID: 1gMHjXmFmwZR4-waPPyxy5Pc3VARqX3kKUWxkP97Xa6Y + MLC_GOOGLE_SPREADSHEET_ID: 1gMHjXmFmwZR4-waPPyxy5Pc3VARqX3kKUWxkP97Xa6Y deps: - names: - python3 @@ -12,9 +12,9 @@ deps: - tags: get,generic-python-lib,_google-api-python-client - tags: get,generic-python-lib,_google-auth-oauthlib input_mapping: - csv_file: CM_CSV_FILE_PATH - sheet_name: CM_GOOGLE_SHEET_NAME - spreadsheet_id: CM_GOOGLE_SPREADSHEET_ID + csv_file: MLC_CSV_FILE_PATH + sheet_name: MLC_GOOGLE_SHEET_NAME + spreadsheet_id: MLC_GOOGLE_SPREADSHEET_ID tags: - push - google-spreadsheet diff --git a/script/push-csv-to-spreadsheet/run.sh b/script/push-csv-to-spreadsheet/run.sh index 5ba4257d5..2ce02b4d1 100644 --- a/script/push-csv-to-spreadsheet/run.sh +++ b/script/push-csv-to-spreadsheet/run.sh @@ -1,3 +1,3 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/google_api.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/google_api.py diff --git a/script/push-mlperf-inference-results-to-github/customize.py b/script/push-mlperf-inference-results-to-github/customize.py index 7abbc77b0..d056eec77 100644 --- a/script/push-mlperf-inference-results-to-github/customize.py +++ b/script/push-mlperf-inference-results-to-github/customize.py @@ -10,11 +10,11 @@ def preprocess(i): meta = i['meta'] automation = i['automation'] - repo = env.get('CM_MLPERF_RESULTS_GIT_REPO_URL', '') + repo = env.get('MLC_MLPERF_RESULTS_GIT_REPO_URL', '') if repo.strip() == '': return {'return': 1, 'error': 'Invalid GIT_REPO_URL for MLPERF results'} - branch = env.get('CM_GIT_BRANCH', '') + branch = env.get('MLC_GIT_BRANCH', '') if branch: extra_tags_string = f",_branch.{branch}" else: @@ -29,17 +29,17 @@ def preprocess(i): }) if r['return'] > 0: return r - env['CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE'] = env.get( - 'CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE', 'Added new results') + env['MLC_MLPERF_RESULTS_REPO_COMMIT_MESSAGE'] = env.get( + 'MLC_MLPERF_RESULTS_REPO_COMMIT_MESSAGE', 'Added new results') - if env.get('CM_GITHUB_PAT', '') != '': + if env.get('MLC_GITHUB_PAT', '') != '': p = parse(repo) - token = env['CM_GITHUB_PAT'] + token = env['MLC_GITHUB_PAT'] if token == 'pat': token = "$PAT" - env['CM_GIT_PUSH_CMD'] = f"""git push https://x-access-token:{token}@{p.host}/{p.owner}/{p.repo}""" + env['MLC_GIT_PUSH_CMD'] = f"""git push https://x-access-token:{token}@{p.host}/{p.owner}/{p.repo}""" else: - env['CM_GIT_PUSH_CMD'] = "git push" + env['MLC_GIT_PUSH_CMD'] = "git push" return {'return': 0} diff --git a/script/push-mlperf-inference-results-to-github/meta.yaml b/script/push-mlperf-inference-results-to-github/meta.yaml index 9efeb0e24..2e92bf7c8 100644 --- a/script/push-mlperf-inference-results-to-github/meta.yaml +++ b/script/push-mlperf-inference-results-to-github/meta.yaml @@ -3,7 +3,7 @@ automation_alias: script automation_uid: 5b4e0237da074764 category: MLPerf benchmark support default_env: - CM_MLPERF_RESULTS_GIT_REPO_URL: https://github.com/mlcommons/mlperf_inference_submissions_v4.0 + MLC_MLPERF_RESULTS_GIT_REPO_URL: https://github.com/mlcommons/mlperf_inference_submissions_v4.0 deps: - names: - python3 @@ -13,15 +13,15 @@ deps: - names: - get-mlperf-submission-dir skip_if_env: - CM_MLPERF_INFERENCE_SUBMISSION_DIR: + MLC_MLPERF_INFERENCE_SUBMISSION_DIR: - 'on' tags: get,mlperf,submission,dir input_mapping: - branch: CM_GIT_BRANCH - commit_message: CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE - repo_branch: CM_GIT_BRANCH - repo_url: CM_MLPERF_RESULTS_GIT_REPO_URL - submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR + branch: MLC_GIT_BRANCH + commit_message: MLC_MLPERF_RESULTS_REPO_COMMIT_MESSAGE + repo_branch: MLC_GIT_BRANCH + repo_url: MLC_MLPERF_RESULTS_GIT_REPO_URL + submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR prehook_deps: - names: - get-git-repo diff --git a/script/push-mlperf-inference-results-to-github/run.bat b/script/push-mlperf-inference-results-to-github/run.bat index 085727d19..385235737 100644 --- a/script/push-mlperf-inference-results-to-github/run.bat +++ b/script/push-mlperf-inference-results-to-github/run.bat @@ -1,35 +1,35 @@ @echo off -REM Check if CM_GIT_REPO_CHECKOUT_PATH is set -if not defined CM_GIT_REPO_CHECKOUT_PATH ( - echo "Error: CM_GIT_REPO_CHECKOUT_PATH is not set." +REM Check if MLC_GIT_REPO_CHECKOUT_PATH is set +if not defined MLC_GIT_REPO_CHECKOUT_PATH ( + echo "Error: MLC_GIT_REPO_CHECKOUT_PATH is not set." exit /b 1 ) -cd /d "%CM_GIT_REPO_CHECKOUT_PATH%" +cd /d "%MLC_GIT_REPO_CHECKOUT_PATH%" if %errorlevel% neq 0 ( - echo "Error: Failed to change directory to %CM_GIT_REPO_CHECKOUT_PATH%" + echo "Error: Failed to change directory to %MLC_GIT_REPO_CHECKOUT_PATH%" exit /b 1 ) git pull git add * -REM Check if the CM_MLPERF_INFERENCE_SUBMISSION_DIR variable is set -if defined CM_MLPERF_INFERENCE_SUBMISSION_DIR ( - robocopy "%CM_MLPERF_INFERENCE_SUBMISSION_DIR%" "%CM_GIT_REPO_CHECKOUT_PATH%" /E /COPYALL /DCOPY:DAT +REM Check if the MLC_MLPERF_INFERENCE_SUBMISSION_DIR variable is set +if defined MLC_MLPERF_INFERENCE_SUBMISSION_DIR ( + robocopy "%MLC_MLPERF_INFERENCE_SUBMISSION_DIR%" "%MLC_GIT_REPO_CHECKOUT_PATH%" /E /COPYALL /DCOPY:DAT git add * ) REM Check if the previous command was successful if %errorlevel% neq 0 exit /b %errorlevel% -git commit -a -m "%CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE%" +git commit -a -m "%MLC_MLPERF_RESULTS_REPO_COMMIT_MESSAGE%" -if defined CM_MLPERF_INFERENCE_SUBMISSION_DIR call %CM_SET_REMOTE_URL_CMD% +if defined MLC_MLPERF_INFERENCE_SUBMISSION_DIR call %MLC_SET_REMOTE_URL_CMD% -echo "%CM_GIT_PUSH_CMD%" -%CM_GIT_PUSH_CMD% +echo "%MLC_GIT_PUSH_CMD%" +%MLC_GIT_PUSH_CMD% REM Check if the previous command was successful if %errorlevel% neq 0 exit /b %errorlevel% diff --git a/script/push-mlperf-inference-results-to-github/run.sh b/script/push-mlperf-inference-results-to-github/run.sh index 795838f68..90408efc1 100644 --- a/script/push-mlperf-inference-results-to-github/run.sh +++ b/script/push-mlperf-inference-results-to-github/run.sh @@ -1,24 +1,24 @@ #!/bin/bash -# Check if CM_GIT_REPO_CHECKOUT_PATH is set -if [ -z "${CM_GIT_REPO_CHECKOUT_PATH}" ]; then - echo "Error: CM_GIT_REPO_CHECKOUT_PATH is not set." +# Check if MLC_GIT_REPO_CHECKOUT_PATH is set +if [ -z "${MLC_GIT_REPO_CHECKOUT_PATH}" ]; then + echo "Error: MLC_GIT_REPO_CHECKOUT_PATH is not set." exit 1 fi -cd "${CM_GIT_REPO_CHECKOUT_PATH}" +cd "${MLC_GIT_REPO_CHECKOUT_PATH}" git pull git add * -if [[ -n ${CM_MLPERF_INFERENCE_SUBMISSION_DIR} ]]; then - rsync -avz "${CM_MLPERF_INFERENCE_SUBMISSION_DIR}/" "${CM_GIT_REPO_CHECKOUT_PATH}/" +if [[ -n ${MLC_MLPERF_INFERENCE_SUBMISSION_DIR} ]]; then + rsync -avz "${MLC_MLPERF_INFERENCE_SUBMISSION_DIR}/" "${MLC_GIT_REPO_CHECKOUT_PATH}/" git add * fi test $? -eq 0 || exit $? -git commit -a -m "${CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE}" +git commit -a -m "${MLC_MLPERF_RESULTS_REPO_COMMIT_MESSAGE}" -echo ${CM_GIT_PUSH_CMD} -${CM_GIT_PUSH_CMD} +echo ${MLC_GIT_PUSH_CMD} +${MLC_GIT_PUSH_CMD} test $? -eq 0 || exit $? diff --git a/script/remote-run-commands/customize.py b/script/remote-run-commands/customize.py index 28f8379a9..e62a50762 100644 --- a/script/remote-run-commands/customize.py +++ b/script/remote-run-commands/customize.py @@ -10,10 +10,10 @@ def preprocess(i): cmd_string = '' - # pre_run_cmds = env.get('CM_SSH_PRE_RUN_CMDS', ['source $HOME/cm/bin/activate']) - pre_run_cmds = env.get('CM_SSH_PRE_RUN_CMDS', []) + # pre_run_cmds = env.get('MLC_SSH_PRE_RUN_CMDS', ['source $HOME/cm/bin/activate']) + pre_run_cmds = env.get('MLC_SSH_PRE_RUN_CMDS', []) - run_cmds = env.get('CM_SSH_RUN_COMMANDS', []) + run_cmds = env.get('MLC_SSH_RUN_COMMANDS', []) run_cmds = pre_run_cmds + run_cmds @@ -24,23 +24,23 @@ def preprocess(i): run_cmds[i] = cmd cmd_string += " ; ".join(run_cmds) - user = env.get('CM_SSH_USER') - password = env.get('CM_SSH_PASSWORD', None) - host = env.get('CM_SSH_HOST') + user = env.get('MLC_SSH_USER') + password = env.get('MLC_SSH_PASSWORD', None) + host = env.get('MLC_SSH_HOST') if password: password_string = " -p " + password else: password_string = "" cmd_extra = '' - if env.get("CM_SSH_SKIP_HOST_VERIFY"): + if env.get("MLC_SSH_SKIP_HOST_VERIFY"): cmd_extra += " -o StrictHostKeyChecking=no" - if env.get("CM_SSH_KEY_FILE"): - cmd_extra += " -i " + env.get("CM_SSH_KEY_FILE") + if env.get("MLC_SSH_KEY_FILE"): + cmd_extra += " -i " + env.get("MLC_SSH_KEY_FILE") ssh_command = "ssh " + user + "@" + host + \ password_string + cmd_extra + " '" + cmd_string + "'" - env['CM_SSH_CMD'] = ssh_command + env['MLC_SSH_CMD'] = ssh_command return {'return': 0} diff --git a/script/remote-run-commands/meta.yaml b/script/remote-run-commands/meta.yaml index dd49f650b..5927a457f 100644 --- a/script/remote-run-commands/meta.yaml +++ b/script/remote-run-commands/meta.yaml @@ -3,20 +3,20 @@ automation_alias: script automation_uid: 5b4e0237da074764 category: Remote automation default_env: - CM_SSH_CLIENT_REFRESH: '10' - CM_SSH_HOST: localhost - CM_SSH_KEY_FILE: $HOME/.ssh/id_rsa - CM_SSH_PORT: '22' - CM_SSH_USER: $USER + MLC_SSH_CLIENT_REFRESH: '10' + MLC_SSH_HOST: localhost + MLC_SSH_KEY_FILE: $HOME/.ssh/id_rsa + MLC_SSH_PORT: '22' + MLC_SSH_USER: $USER input_mapping: - client_refresh: CM_SSH_CLIENT_REFRESH - host: CM_SSH_HOST - password: CM_SSH_PASSWORD - port: CM_SSH_PORT - run_cmds: CM_SSH_RUN_COMMANDS - skip_host_verify: CM_SSH_SKIP_HOST_VERIFY - ssh_key_file: CM_SSH_KEY_FILE - user: CM_SSH_USER + client_refresh: MLC_SSH_CLIENT_REFRESH + host: MLC_SSH_HOST + password: MLC_SSH_PASSWORD + port: MLC_SSH_PORT + run_cmds: MLC_SSH_RUN_COMMANDS + skip_host_verify: MLC_SSH_SKIP_HOST_VERIFY + ssh_key_file: MLC_SSH_KEY_FILE + user: MLC_SSH_USER tags: - remote - run diff --git a/script/remote-run-commands/run.sh b/script/remote-run-commands/run.sh index f9fac760b..410ba197b 100644 --- a/script/remote-run-commands/run.sh +++ b/script/remote-run-commands/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -cmd=$CM_SSH_CMD +cmd=$MLC_SSH_CMD echo $cmd eval $cmd diff --git a/script/reproduce-mlperf-inference-dummy/README.md b/script/reproduce-mlperf-inference-dummy/README.md deleted file mode 100644 index 36f245ef7..000000000 --- a/script/reproduce-mlperf-inference-dummy/README.md +++ /dev/null @@ -1,381 +0,0 @@ -
-Click here to see the table of contents. - -* [About](#about) -* [Summary](#summary) -* [Reuse this script in your project](#reuse-this-script-in-your-project) - * [ Install CM automation language](#install-cm-automation-language) - * [ Check CM script flags](#check-cm-script-flags) - * [ Run this script from command line](#run-this-script-from-command-line) - * [ Run this script from Python](#run-this-script-from-python) - * [ Run this script via GUI](#run-this-script-via-gui) - * [ Run this script via Docker (beta)](#run-this-script-via-docker-(beta)) -* [Customization](#customization) - * [ Variations](#variations) - * [ Script flags mapped to environment](#script-flags-mapped-to-environment) - * [ Default environment](#default-environment) -* [Script workflow, dependencies and native scripts](#script-workflow-dependencies-and-native-scripts) -* [Script output](#script-output) -* [New environment keys (filter)](#new-environment-keys-(filter)) -* [New environment keys auto-detected from customize](#new-environment-keys-auto-detected-from-customize) -* [Maintainers](#maintainers) - -
- -*Note that this README is automatically generated - don't edit!* - -### About - -#### Summary - -* Category: *Modular MLPerf benchmarks.* -* CM GitHub repository: *[mlcommons@cm4mlops](https://github.com/mlcommons/cm4mlops)* -* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy)* -* CM meta description for this script: *[_cm.yaml](_cm.yaml)* -* CM "database" tags to find this script: *reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy* -* Output cached? *False* -___ -### Reuse this script in your project - -#### Install CM automation language - -* [Installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) -* [CM intro](https://doi.org/10.5281/zenodo.8105339) - -#### Pull CM repository with this automation - -```cm pull repo mlcommons@ck``` - - -#### Run this script from command line - -1. `cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy[,variations] [--input_flags]` - -2. `cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy dummy-harness dummy[ variations]" [--input_flags]` - -* `variations` can be seen [here](#variations) - -* `input_flags` can be seen [here](#script-flags-mapped-to-environment) - -#### Run this script from Python - -
-Click here to expand this section. - -```python - -import cmind - -r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy' - 'out':'con', - ... - (other input keys for this script) - ... - }) - -if r['return']>0: - print (r['error']) - -``` - -
- - -#### Run this script via GUI - -```cmr "cm gui" --script="reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy"``` - -Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy) to generate CM CMD. - -#### Run this script via Docker (beta) - -`cm docker script "reproduce mlcommons mlperf inference harness dummy-harness dummy dummy-harness dummy[ variations]" [--input_flags]` - -___ -### Customization - - -#### Variations - - * *Internal group (variations should not be selected manually)* -
- Click here to expand this section. - - * `_bert_` - - Workflow: - * `_gptj_` - - Workflow: - 1. ***Read "deps" on other CM scripts*** - * get,ml-model,gptj - * CM names: `--adr.['gptj-model']...` - - CM script: [get-ml-model-gptj](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj) - * get,dataset,cnndm,_validation - - CM script: [get-dataset-cnndm](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cnndm) - * `_llama2-70b_` - - Workflow: - -
- - - * *No group (any variation can be selected)* -
- Click here to expand this section. - - * `_pytorch,cpu` - - Workflow: - 1. ***Read "deps" on other CM scripts*** - * get,generic-python-lib,_torch - - CM script: [get-generic-python-lib](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib) - * `_pytorch,cuda` - - Workflow: - 1. ***Read "deps" on other CM scripts*** - * get,generic-python-lib,_torch_cuda - - CM script: [get-generic-python-lib](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib) - * `_singlestream,resnet50` - - Workflow: - * `_singlestream,retinanet` - - Workflow: - -
- - - * Group "**backend**" -
- Click here to expand this section. - - * **`_pytorch`** (default) - - Environment variables: - - *CM_MLPERF_BACKEND*: `pytorch` - - Workflow: - -
- - - * Group "**batch-size**" -
- Click here to expand this section. - - * `_bs.#` - - Workflow: - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - Environment variables: - - *CM_MLPERF_DEVICE*: `cpu` - - Workflow: - * `_cuda` - - Environment variables: - - *CM_MLPERF_DEVICE*: `gpu` - - *CM_MLPERF_DEVICE_LIB_NAMESPEC*: `cudart` - - Workflow: - -
- - - * Group "**loadgen-scenario**" -
- Click here to expand this section. - - * `_multistream` - - Environment variables: - - *CM_MLPERF_LOADGEN_SCENARIO*: `MultiStream` - - Workflow: - * `_offline` - - Environment variables: - - *CM_MLPERF_LOADGEN_SCENARIO*: `Offline` - - Workflow: - * `_server` - - Environment variables: - - *CM_MLPERF_LOADGEN_SCENARIO*: `Server` - - Workflow: - * `_singlestream` - - Environment variables: - - *CM_MLPERF_LOADGEN_SCENARIO*: `SingleStream` - - Workflow: - -
- - - * Group "**model**" -
- Click here to expand this section. - - * `_bert-99` - - Environment variables: - - *CM_MODEL*: `bert-99` - - *CM_SQUAD_ACCURACY_DTYPE*: `float32` - - Workflow: - * `_bert-99.9` - - Environment variables: - - *CM_MODEL*: `bert-99.9` - - Workflow: - * `_gptj-99` - - Environment variables: - - *CM_MODEL*: `gptj-99` - - *CM_SQUAD_ACCURACY_DTYPE*: `float32` - - Workflow: - * `_gptj-99.9` - - Environment variables: - - *CM_MODEL*: `gptj-99.9` - - Workflow: - * `_llama2-70b-99` - - Environment variables: - - *CM_MODEL*: `llama2-70b-99` - - Workflow: - * `_llama2-70b-99.9` - - Environment variables: - - *CM_MODEL*: `llama2-70b-99.9` - - Workflow: - * **`_resnet50`** (default) - - Environment variables: - - *CM_MODEL*: `resnet50` - - Workflow: - * `_retinanet` - - Environment variables: - - *CM_MODEL*: `retinanet` - - Workflow: - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * `_fp16` - - Environment variables: - - *CM_MLPERF_MODEL_PRECISION*: `float16` - - Workflow: - * **`_fp32`** (default) - - Environment variables: - - *CM_MLPERF_MODEL_PRECISION*: `float32` - - Workflow: - * `_uint8` - - Environment variables: - - *CM_MLPERF_MODEL_PRECISION*: `uint8` - - Workflow: - -
- - -#### Default variations - -`_cpu,_fp32,_pytorch,_resnet50` - -#### Script flags mapped to environment -
-Click here to expand this section. - -* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` -* `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` -* `--mlperf_conf=value` → `CM_MLPERF_CONF=value` -* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` -* `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` -* `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` -* `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` -* `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` -* `--rerun=value` → `CM_RERUN=value` -* `--results_repo=value` → `CM_MLPERF_INFERENCE_RESULTS_REPO=value` -* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` -* `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` -* `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` -* `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` -* `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` -* `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` -* `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` -* `--user_conf=value` → `CM_MLPERF_USER_CONF=value` - -**Above CLI flags can be used in the Python CM API as follows:** - -```python -r=cm.access({... , "count":...} -``` - -
- -#### Default environment - -
-Click here to expand this section. - -These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - -* CM_MLPERF_LOADGEN_SCENARIO: `Offline` -* CM_MLPERF_LOADGEN_MODE: `performance` -* CM_SKIP_PREPROCESS_DATASET: `no` -* CM_SKIP_MODEL_DOWNLOAD: `no` -* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `dummy` -* CM_MLPERF_SKIP_RUN: `no` - -
- -___ -### Script workflow, dependencies and native scripts - -
-Click here to expand this section. - - 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/_cm.yaml)*** - * detect,os - - CM script: [detect-os](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os) - * detect,cpu - - CM script: [detect-cpu](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu) - * get,sys-utils-cm - - CM script: [get-sys-utils-cm](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm) - * get,mlcommons,inference,src - * CM names: `--adr.['inference-src']...` - - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-src) - * get,mlcommons,inference,loadgen - * CM names: `--adr.['inference-loadgen']...` - - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen) - * generate,user-conf,mlperf,inference - * CM names: `--adr.['user-conf-generator']...` - - CM script: [generate-mlperf-inference-user-conf](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-inference-user-conf) - * get,generic-python-lib,_mlperf_logging - * CM names: `--adr.['mlperf-logging']...` - - CM script: [get-generic-python-lib](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib) - * get,git,repo - * CM names: `--adr.inference-results...` - - CM script: [get-git-repo](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo) - 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/customize.py)*** - 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/_cm.yaml) - 1. ***Run native script if exists*** - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/run.sh) - 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/_cm.yaml) - 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/customize.py)*** - 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/_cm.yaml)*** - * benchmark-mlperf - * `if (CM_MLPERF_SKIP_RUN not in ['yes', True])` - * CM names: `--adr.['runner', 'mlperf-runner']...` - - CM script: [benchmark-program-mlperf](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program-mlperf) - * save,mlperf,inference,state - * CM names: `--adr.['save-mlperf-inference-state']...` - - CM script: [save-mlperf-inference-implementation-state](https://github.com/mlcommons/cm4mlops/tree/main/script/save-mlperf-inference-implementation-state) -
- -___ -### Script output -`cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy dummy-harness dummy[,variations]" [--input_flags] -j` -#### New environment keys (filter) - -* `CM_DATASET_*` -* `CM_HW_NAME` -* `CM_IMAGENET_ACCURACY_DTYPE` -* `CM_MAX_EXAMPLES` -* `CM_MLPERF_*` -* `CM_ML_MODEL_*` -* `CM_SQUAD_ACCURACY_DTYPE` -#### New environment keys auto-detected from customize - -___ -### Maintainers - -* [Open MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) \ No newline at end of file diff --git a/script/reproduce-mlperf-octoml-tinyml-results/customize.py b/script/reproduce-mlperf-octoml-tinyml-results/customize.py index 8f579e8f3..0f62f2085 100644 --- a/script/reproduce-mlperf-octoml-tinyml-results/customize.py +++ b/script/reproduce-mlperf-octoml-tinyml-results/customize.py @@ -7,10 +7,10 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - if 'CM_MICROTVM_VARIANT' not in env: - env['CM_MICROTVM_VARIANT'] = 'microtvm_cmsis_nn' - if 'CM_TINY_MODEL' not in env: - env['CM_TINY_MODEL'] = 'ic' + if 'MLC_MICROTVM_VARIANT' not in env: + env['MLC_MICROTVM_VARIANT'] = 'microtvm_cmsis_nn' + if 'MLC_TINY_MODEL' not in env: + env['MLC_TINY_MODEL'] = 'ic' if os_info['platform'] == 'windows': return {'return': 1, 'error': 'Windows is not supported in this script yet'} diff --git a/script/reproduce-mlperf-octoml-tinyml-results/meta.yaml b/script/reproduce-mlperf-octoml-tinyml-results/meta.yaml index 5dbee3b43..055bb03dd 100644 --- a/script/reproduce-mlperf-octoml-tinyml-results/meta.yaml +++ b/script/reproduce-mlperf-octoml-tinyml-results/meta.yaml @@ -30,15 +30,15 @@ deps: version_min: 3.20.0 - tags: get,gcc input_mapping: - flash: CM_FLASH_BOARD - recreate_binary: CM_RECREATE_BINARY + flash: MLC_FLASH_BOARD + recreate_binary: MLC_RECREATE_BINARY local_env_keys: -- CM_* +- MLC_* new_env_keys: -- CM_TINY_* +- MLC_TINY_* post_deps: - enable_if_env: - CM_FLASH_BOARD: + MLC_FLASH_BOARD: - 'True' tags: flash,tiny,mlperf tags: @@ -52,28 +52,28 @@ uid: a63803a707d04332 variations: NRF: env: - CM_TINY_BOARD: NRF5340DK + MLC_TINY_BOARD: NRF5340DK NUCLEO: env: - CM_TINY_BOARD: NUCLEO_L4R5ZI + MLC_TINY_BOARD: NUCLEO_L4R5ZI ad: env: - CM_TINY_MODEL: ad + MLC_TINY_MODEL: ad cmsis_nn: env: - CM_MICROTVM_VARIANT: microtvm_cmsis_nn + MLC_MICROTVM_VARIANT: microtvm_cmsis_nn ic: env: - CM_TINY_MODEL: ic + MLC_TINY_MODEL: ic kws: env: - CM_TINY_MODEL: kws + MLC_TINY_MODEL: kws native: env: - CM_MICROTVM_VARIANT: microtvm_native + MLC_MICROTVM_VARIANT: microtvm_native vww: env: - CM_TINY_MODEL: vww + MLC_TINY_MODEL: vww versions: r1.0: add_deps_recursive: diff --git a/script/reproduce-mlperf-octoml-tinyml-results/run.sh b/script/reproduce-mlperf-octoml-tinyml-results/run.sh index c8d2f077f..d0b0f4436 100644 --- a/script/reproduce-mlperf-octoml-tinyml-results/run.sh +++ b/script/reproduce-mlperf-octoml-tinyml-results/run.sh @@ -2,30 +2,30 @@ CUR_DIR=$PWD -code=${CM_MICROTVM_SOURCE}/closed/OctoML/code -model=${CM_TINY_MODEL:-ad} -microtvm_variant=${CM_MICROTVM_VARIANT} -board=${CM_TINY_BOARD:-NUCLEO_L4R5ZI} +code=${MLC_MICROTVM_SOURCE}/closed/OctoML/code +model=${MLC_TINY_MODEL:-ad} +microtvm_variant=${MLC_MICROTVM_VARIANT} +board=${MLC_TINY_BOARD:-NUCLEO_L4R5ZI} source=${code}/${microtvm_variant} path_suffix="${board}/${model}" cmake_src=${source}/${path_suffix} build_path=${CUR_DIR}/${path_suffix} -echo "CM_TINY_BUILD_DIR=${build_path}/build" > tmp-run-env.out +echo "MLC_TINY_BUILD_DIR=${build_path}/build" > tmp-run-env.out mkdir -p ${build_path} cd ${build_path} binary_path=${build_path}/build/zephyr/zephyr.elf -if [ -f "${binary_path}" ] && [ "${CM_RECREATE_BINARY}" != "True" ]; then +if [ -f "${binary_path}" ] && [ "${MLC_RECREATE_BINARY}" != "True" ]; then echo "ELF binary existing at ${binary_path}. Skipping regeneration." cd build else rm -rf build mkdir -p build cd build - CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES:-2}} + MLC_MAKE_CORES=${MLC_MAKE_CORES:-${MLC_HOST_CPU_TOTAL_CORES:-2}} cmake ${cmake_src} test $? -eq 0 || exit 1 - make -j${CM_MAKE_CORES} + make -j${MLC_MAKE_CORES} test $? -eq 0 || exit 1 cd ../ echo "ELF binary created at ${build_path}/build/zephyr/zephyr.elf" diff --git a/script/reproduce-mlperf-training-nvidia/customize.py b/script/reproduce-mlperf-training-nvidia/customize.py index cd78bd8b4..2c9bccaa4 100644 --- a/script/reproduce-mlperf-training-nvidia/customize.py +++ b/script/reproduce-mlperf-training-nvidia/customize.py @@ -11,7 +11,7 @@ def preprocess(i): return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - conf = env.get('CM_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME', '') + conf = env.get('MLC_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME', '') if conf == "": return {'return': 1, 'error': 'Please provide --system_conf_name='} @@ -19,7 +19,7 @@ def preprocess(i): if not conf.endswith(".sh"): conf = conf + ".sh" - if env.get('CM_MLPERF_TRAINING_BENCHMARK', '') == "resnet": + if env.get('MLC_MLPERF_TRAINING_BENCHMARK', '') == "resnet": i['run_script_input']['script_name'] = "run-resnet" env['CONFIG_FILE'] = conf diff --git a/script/reproduce-mlperf-training-nvidia/meta.yaml b/script/reproduce-mlperf-training-nvidia/meta.yaml index a118ee3f7..263ab9d63 100644 --- a/script/reproduce-mlperf-training-nvidia/meta.yaml +++ b/script/reproduce-mlperf-training-nvidia/meta.yaml @@ -22,12 +22,12 @@ tags: # Map script inputs to environment variables input_mapping: - system_conf_name: CM_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME - results_dir: CM_MLPERF_RESULTS_DIR + system_conf_name: MLC_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME + results_dir: MLC_MLPERF_RESULTS_DIR new_state_keys: - mlperf-training-implementation - - CM_SUT_* + - MLC_SUT_* # Dependencies on other CM scripts @@ -42,7 +42,7 @@ deps: # Install system dependencies on a given host - tags: get,nvidia-docker skip_if_env: - CM_SKIP_GET_NVIDIA_DOCKER: + MLC_SKIP_GET_NVIDIA_DOCKER: - yes # Detect CUDA @@ -55,7 +55,7 @@ variations: resnet: group: benchmark env: - CM_MLPERF_TRAINING_BENCHMARK: resnet + MLC_MLPERF_TRAINING_BENCHMARK: resnet deps: - tags: prepare,mlperf,training,resnet,_nvidia names: diff --git a/script/reproduce-mlperf-training-nvidia/run-resnet.sh b/script/reproduce-mlperf-training-nvidia/run-resnet.sh index d64cf068c..ea039badf 100644 --- a/script/reproduce-mlperf-training-nvidia/run-resnet.sh +++ b/script/reproduce-mlperf-training-nvidia/run-resnet.sh @@ -1,15 +1,15 @@ #!/bin/bash benchmark_implementation=${benchmark_implementation:-"mxnet-22.04"} -echo "cd ${CM_MLPERF_TRAINING_NVIDIA_CODE_PATH}/benchmarks/resnet/implementations/${benchmark_implementation}" -cd ${CM_MLPERF_TRAINING_NVIDIA_CODE_PATH}/benchmarks/resnet/implementations/${benchmark_implementation} +echo "cd ${MLC_MLPERF_TRAINING_NVIDIA_CODE_PATH}/benchmarks/resnet/implementations/${benchmark_implementation}" +cd ${MLC_MLPERF_TRAINING_NVIDIA_CODE_PATH}/benchmarks/resnet/implementations/${benchmark_implementation} docker build --pull -t mlperf-nvidia:image_classification . test $? -eq 0 || exit $? echo "source ${CONFIG_FILE}" source ${CONFIG_FILE} test $? -eq 0 || exit $? -DATADIR=${CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH} -echo "DATADIR=${CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH}" +DATADIR=${MLC_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH} +echo "DATADIR=${MLC_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH}" echo "CONT=mlperf-nvidia:image_classification DATADIR=${DATADIR} LOGDIR=${RESULTS_DIR} ./run_with_docker.sh" CONT=mlperf-nvidia:image_classification DATADIR=${DATADIR} LOGDIR=${RESULTS_DIR} ./run_with_docker.sh diff --git a/script/reproduce-mlperf-training-nvidia/run.sh b/script/reproduce-mlperf-training-nvidia/run.sh index ddcd0b550..0c6a8fc4a 100644 --- a/script/reproduce-mlperf-training-nvidia/run.sh +++ b/script/reproduce-mlperf-training-nvidia/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then - cd ${CM_RUN_DIR} - cmd=${CM_RUN_CMD} +if [[ ${MLC_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${MLC_RUN_DIR} + cmd=${MLC_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? diff --git a/script/run-all-mlperf-models/README.md b/script/run-all-mlperf-models/README.md deleted file mode 100644 index 01f5427b1..000000000 --- a/script/run-all-mlperf-models/README.md +++ /dev/null @@ -1,237 +0,0 @@ -
-Click here to see the table of contents. - -* [About](#about) -* [Summary](#summary) -* [Reuse this script in your project](#reuse-this-script-in-your-project) - * [ Install CM automation language](#install-cm-automation-language) - * [ Check CM script flags](#check-cm-script-flags) - * [ Run this script from command line](#run-this-script-from-command-line) - * [ Run this script from Python](#run-this-script-from-python) - * [ Run this script via GUI](#run-this-script-via-gui) - * [ Run this script via Docker (beta)](#run-this-script-via-docker-(beta)) -* [Customization](#customization) - * [ Variations](#variations) - * [ Default environment](#default-environment) -* [Script workflow, dependencies and native scripts](#script-workflow-dependencies-and-native-scripts) -* [Script output](#script-output) -* [New environment keys (filter)](#new-environment-keys-(filter)) -* [New environment keys auto-detected from customize](#new-environment-keys-auto-detected-from-customize) -* [Maintainers](#maintainers) - -
- -*Note that this README is automatically generated - don't edit!* - -### About - -#### Summary - -* Category: *MLPerf benchmark support.* -* CM GitHub repository: *[mlcommons@cm4mlops](https://github.com/mlcommons/cm4mlops)* -* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models)* -* CM meta description for this script: *[_cm.yaml](_cm.yaml)* -* CM "database" tags to find this script: *run,natively,all,mlperf-models* -* Output cached? *False* -___ -### Reuse this script in your project - -#### Install CM automation language - -* [Installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) -* [CM intro](https://doi.org/10.5281/zenodo.8105339) - -#### Pull CM repository with this automation - -```cm pull repo mlcommons@cm4mlops --checkout=dev``` - - -#### Run this script from command line - -1. `cm run script --tags=run,natively,all,mlperf-models[,variations] ` - -2. `cmr "run natively all mlperf-models[ variations]" ` - -* `variations` can be seen [here](#variations) - -#### Run this script from Python - -
-Click here to expand this section. - -```python - -import cmind - -r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,natively,all,mlperf-models' - 'out':'con', - ... - (other input keys for this script) - ... - }) - -if r['return']>0: - print (r['error']) - -``` - -
- - -#### Run this script via GUI - -```cmr "cm gui" --script="run,natively,all,mlperf-models"``` - -Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,natively,all,mlperf-models) to generate CM CMD. - -#### Run this script via Docker (beta) - -`cm docker script "run natively all mlperf-models[ variations]" ` - -___ -### Customization - - -#### Variations - - * *No group (any variation can be selected)* -
- Click here to expand this section. - - * `_phoenix,reference` - - Workflow: - -
- - - * Group "**implementation**" -
- Click here to expand this section. - - * `_deepsparse` - - Environment variables: - - *DIVISION*: `open` - - *IMPLEMENTATION*: `deepsparse` - - Workflow: - * `_intel` - - Environment variables: - - *IMPLEMENTATION*: `intel` - - Workflow: - * `_mil` - - Environment variables: - - *IMPLEMENTATION*: `mil` - - Workflow: - * `_nvidia` - - Environment variables: - - *IMPLEMENTATION*: `nvidia` - - Workflow: - * `_qualcomm` - - Environment variables: - - *IMPLEMENTATION*: `qualcomm` - - Workflow: - * `_reference` - - Environment variables: - - *IMPLEMENTATION*: `reference` - - Workflow: - * `_tflite-cpp` - - Environment variables: - - *IMPLEMENTATION*: `tflite_cpp` - - Workflow: - -
- - - * Group "**power**" -
- Click here to expand this section. - - * **`_performance-only`** (default) - - Workflow: - * `_power` - - Environment variables: - - *POWER*: `True` - - Workflow: - -
- - - * Group "**sut**" -
- Click here to expand this section. - - * `_macbookpro-m1` - - Environment variables: - - *CATEGORY*: `edge` - - *DIVISION*: `closed` - - Workflow: - * `_orin.32g` - - Environment variables: - - *CATEGORY*: `edge` - - *DIVISION*: `closed` - - Workflow: - * `_phoenix` - - Environment variables: - - *CATEGORY*: `edge,datacenter` - - *DIVISION*: `closed` - - Workflow: - * `_sapphire-rapids.24c` - - Environment variables: - - *CATEGORY*: `edge,datacenter` - - *DIVISION*: `closed` - - Workflow: - -
- - -#### Default variations - -`_performance-only` -#### Default environment - -
-Click here to expand this section. - -These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - -
- -___ -### Script workflow, dependencies and native scripts - -
-Click here to expand this section. - - 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/_cm.yaml) - 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/customize.py)*** - 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/_cm.yaml) - 1. ***Run native script if exists*** - * [run-bert-macos.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-bert-macos.sh) - * [run-bert.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-bert.sh) - * [run-cpp-implementation.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-cpp-implementation.sh) - * [run-mobilenet-models.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-mobilenet-models.sh) - * [run-nvidia-4090.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-nvidia-4090.sh) - * [run-nvidia-a100.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-nvidia-a100.sh) - * [run-nvidia-t4.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-nvidia-t4.sh) - * [run-pruned-bert.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-pruned-bert.sh) - * [run-reference-models.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-reference-models.sh) - * [run-resnet50-macos.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-resnet50-macos.sh) - * [run-resnet50.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-resnet50.sh) - 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/_cm.yaml) - 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/customize.py)*** - 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/_cm.yaml) -
- -___ -### Script output -`cmr "run natively all mlperf-models[,variations]" -j` -#### New environment keys (filter) - -#### New environment keys auto-detected from customize - -___ -### Maintainers - -* [Open MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) \ No newline at end of file diff --git a/script/run-all-mlperf-models/customize.py b/script/run-all-mlperf-models/customize.py index 07460c6be..3ae4f54dd 100644 --- a/script/run-all-mlperf-models/customize.py +++ b/script/run-all-mlperf-models/customize.py @@ -13,7 +13,7 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') models = env['MODELS'].split(",") diff --git a/script/run-all-mlperf-models/run-bert-macos.sh b/script/run-all-mlperf-models/run-bert-macos.sh index 5d46fd113..192248f78 100644 --- a/script/run-all-mlperf-models/run-bert-macos.sh +++ b/script/run-all-mlperf-models/run-bert-macos.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi @@ -61,7 +61,7 @@ readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_a --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" run_test "onnxruntime" "100" "reference" "cpu" "$find_performance_cmd" run_test "tf" "100" "reference" "cpu" "$find_performance_cmd" run_test "pytorch" "200" "reference" "cpu" "$find_performance_cmd" diff --git a/script/run-all-mlperf-models/run-bert.sh b/script/run-all-mlperf-models/run-bert.sh index 08cddadde..bc9bbf16d 100644 --- a/script/run-all-mlperf-models/run-bert.sh +++ b/script/run-all-mlperf-models/run-bert.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi @@ -62,7 +62,7 @@ readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_a --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" run_test "onnxruntime" "20" "reference" "cpu" "$find_performance_cmd" run_test "tf" "20" "reference" "cpu" "$find_performance_cmd" run_test "pytorch" "200" "reference" "cpu" "$find_performance_cmd" diff --git a/script/run-all-mlperf-models/run-cpp-implementation.sh b/script/run-all-mlperf-models/run-cpp-implementation.sh index 704abff2d..c93bfdfa9 100644 --- a/script/run-all-mlperf-models/run-cpp-implementation.sh +++ b/script/run-all-mlperf-models/run-cpp-implementation.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,14 +17,14 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } division="closed" #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 " POWER="" diff --git a/script/run-all-mlperf-models/run-mobilenet-models.sh b/script/run-all-mlperf-models/run-mobilenet-models.sh index 41497d56d..1ae46f66b 100644 --- a/script/run-all-mlperf-models/run-mobilenet-models.sh +++ b/script/run-all-mlperf-models/run-mobilenet-models.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi @@ -31,7 +31,7 @@ extra_tags="" #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" run "cm run script --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} \ diff --git a/script/run-all-mlperf-models/run-nvidia-4090.sh b/script/run-all-mlperf-models/run-nvidia-4090.sh index 033fa9d9e..deb6884bd 100644 --- a/script/run-all-mlperf-models/run-nvidia-4090.sh +++ b/script/run-all-mlperf-models/run-nvidia-4090.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi @@ -37,7 +37,7 @@ category="edge" power="" power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15" #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -58,4 +58,4 @@ run_model "resnet50" "10" "${submission_cmd} --offline_target_qps=45000 --server run_model "rnnt" "10" "${submission_cmd} --offline_target_qps=15200 --server_target_qps=14150 --singlestream_target_latency=23" run_model "retinanet" "10" "${submission_cmd} --offline_target_qps=620 --server_target_qps=590 --singlestream_target_latency=2 --multistream_target_latency=14" run_model "bert-99" "10" "${submission_cmd} --offline_target_qps=4100 --server_target_qps=3950 --singlestream_target_latency=1" -run_model "3d-unet-99.9" "10" "${submission_cmd} --offline_target_qps=4 --singlestream_target_latency=433 --env.CM_MLPERF_USE_MAX_DURATION=no" +run_model "3d-unet-99.9" "10" "${submission_cmd} --offline_target_qps=4 --singlestream_target_latency=433 --env.MLC_MLPERF_USE_MAX_DURATION=no" diff --git a/script/run-all-mlperf-models/run-nvidia-a100.sh b/script/run-all-mlperf-models/run-nvidia-a100.sh index 4b5fb40fc..e793a1fb5 100644 --- a/script/run-all-mlperf-models/run-nvidia-a100.sh +++ b/script/run-all-mlperf-models/run-nvidia-a100.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi @@ -36,7 +36,7 @@ power="" connection_type="sxm" #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -56,4 +56,4 @@ run "resnet50" "10" "${submission_cmd}" run "retinanet" "10" "${submission_cmd}" run "rnnt" "10" "${submission_cmd}" run "bert-99" "10" "${submission_cmd}" -run "3d-unet-99.9" "10" "${submission_cmd} --env.CM_MLPERF_USE_MAX_DURATION='no'" +run "3d-unet-99.9" "10" "${submission_cmd} --env.MLC_MLPERF_USE_MAX_DURATION='no'" diff --git a/script/run-all-mlperf-models/run-nvidia-t4.sh b/script/run-all-mlperf-models/run-nvidia-t4.sh index 835c1adad..4a9176ab6 100644 --- a/script/run-all-mlperf-models/run-nvidia-t4.sh +++ b/script/run-all-mlperf-models/run-nvidia-t4.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi @@ -34,7 +34,7 @@ implementation="nvidia-original" category="edge,datacenter" #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' diff --git a/script/run-all-mlperf-models/run-pruned-bert.sh b/script/run-all-mlperf-models/run-pruned-bert.sh index 16444e0db..e44c6480c 100644 --- a/script/run-all-mlperf-models/run-pruned-bert.sh +++ b/script/run-all-mlperf-models/run-pruned-bert.sh @@ -35,7 +35,7 @@ zoo_stub_list=( \ ) rerun="" -power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --env.CM_MLPERF_SKIP_POWER_CHECKS=yes" +power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --env.MLC_MLPERF_SKIP_POWER_CHECKS=yes" power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15" power="" max_batchsize=1 @@ -55,7 +55,7 @@ cmd="cm run script --tags=run,mlperf,inference,generate-run-cmds,_find-performan --scenario=Offline \ --test_query_count=15000 \ --adr.mlperf-inference-implementation.max_batchsize=$max_batchsize \ - --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ + --env.MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ ${rerun} \ --quiet" echo ${cmd} @@ -76,7 +76,7 @@ for stub in ${zoo_stub_list[@]}; do --execution_mode=valid \ --adr.mlperf-inference-implementation.max_batchsize=$max_batchsize \ ${power} \ - --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ + --env.MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ --quiet" echo ${cmd} eval ${cmd} diff --git a/script/run-all-mlperf-models/run-reference-models.sh b/script/run-all-mlperf-models/run-reference-models.sh index 41898f145..e01ac97e6 100644 --- a/script/run-all-mlperf-models/run-reference-models.sh +++ b/script/run-all-mlperf-models/run-reference-models.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,14 +17,14 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } division="closed" #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" run "cm run script --tags=generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=100" diff --git a/script/run-all-mlperf-models/run-resnet50-macos.sh b/script/run-all-mlperf-models/run-resnet50-macos.sh index 8d00ddc79..191907397 100644 --- a/script/run-all-mlperf-models/run-resnet50-macos.sh +++ b/script/run-all-mlperf-models/run-resnet50-macos.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi @@ -61,7 +61,7 @@ readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_a --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" run_test "onnxruntime" "6000" "reference" "cpu" "$find_performance_cmd --rerun" run_test "tf" "6000" "reference" "cpu" "$find_performance_cmd --rerun" diff --git a/script/run-all-mlperf-models/run-resnet50.sh b/script/run-all-mlperf-models/run-resnet50.sh index df2789d8c..e3f3077c0 100644 --- a/script/run-all-mlperf-models/run-resnet50.sh +++ b/script/run-all-mlperf-models/run-resnet50.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi @@ -61,7 +61,7 @@ readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_a --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" run_test "onnxruntime" "200" "reference" "cpu" "$find_performance_cmd" run_test "tf" "200" "reference" "cpu" "$find_performance_cmd" run_test "onnxruntime" "10000" "reference" "cuda" "$find_performance_cmd" diff --git a/script/run-all-mlperf-models/run-retinanet-sh b/script/run-all-mlperf-models/run-retinanet-sh index 6f0bac9c5..b3151d068 100644 --- a/script/run-all-mlperf-models/run-retinanet-sh +++ b/script/run-all-mlperf-models/run-retinanet-sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi @@ -61,7 +61,7 @@ readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_a --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" run_test "onnxruntime" "50" "reference" "cpu" "$find_performance_cmd" run_test "pytorch" "100" "reference" "cpu" "$find_performance_cmd" run_test "onnxruntime" "1000" "reference" "cuda" "$find_performance_cmd" diff --git a/script/run-all-mlperf-models/template.sh b/script/run-all-mlperf-models/template.sh index 42ecda5ad..4af440f10 100644 --- a/script/run-all-mlperf-models/template.sh +++ b/script/run-all-mlperf-models/template.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,7 +17,7 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index f35f22f57..f7fd846cc 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -12,39 +12,39 @@ def preprocess(i): env = i['env'] - interactive = env.get('CM_DOCKER_INTERACTIVE_MODE', '') + interactive = env.get('MLC_DOCKER_INTERACTIVE_MODE', '') if str(interactive).lower() in ['yes', 'true', '1']: - env['CM_DOCKER_DETACHED_MODE'] = 'no' + env['MLC_DOCKER_DETACHED_MODE'] = 'no' - if 'CM_DOCKER_RUN_SCRIPT_TAGS' not in env: - env['CM_DOCKER_RUN_SCRIPT_TAGS'] = "run,docker,container" - CM_RUN_CMD = "mlc version" + if 'MLC_DOCKER_RUN_SCRIPT_TAGS' not in env: + env['MLC_DOCKER_RUN_SCRIPT_TAGS'] = "run,docker,container" + MLC_RUN_CMD = "mlc version" else: - CM_RUN_CMD = "mlc run script --tags=" + \ - env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' + MLC_RUN_CMD = "mlc run script --tags=" + \ + env['MLC_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' r = mlc.access({'action': 'search', 'automation': 'script', - 'tags': env['CM_DOCKER_RUN_SCRIPT_TAGS']}) + 'tags': env['MLC_DOCKER_RUN_SCRIPT_TAGS']}) if len(r['list']) < 1: raise Exception( 'CM script with tags ' + - env['CM_DOCKER_RUN_SCRIPT_TAGS'] + + env['MLC_DOCKER_RUN_SCRIPT_TAGS'] + ' not found!') PATH = r['list'][0].path os.chdir(PATH) - env['CM_DOCKER_RUN_CMD'] = CM_RUN_CMD + env['MLC_DOCKER_RUN_CMD'] = MLC_RUN_CMD # Updating Docker info update_docker_info(env) - docker_image_repo = env['CM_DOCKER_IMAGE_REPO'] - docker_image_base = env['CM_DOCKER_IMAGE_BASE'] - docker_image_name = env['CM_DOCKER_IMAGE_NAME'] - docker_image_tag = env['CM_DOCKER_IMAGE_TAG'] + docker_image_repo = env['MLC_DOCKER_IMAGE_REPO'] + docker_image_base = env['MLC_DOCKER_IMAGE_BASE'] + docker_image_name = env['MLC_DOCKER_IMAGE_NAME'] + docker_image_tag = env['MLC_DOCKER_IMAGE_TAG'] DOCKER_CONTAINER = docker_image_repo + "/" + \ docker_image_name + ":" + docker_image_tag @@ -52,7 +52,7 @@ def preprocess(i): print('') print('Checking existing Docker container:') print('') - CMD = f"""{env['CM_CONTAINER_TOOL']} ps --format=json --filter "ancestor={DOCKER_CONTAINER}" """ + CMD = f"""{env['MLC_CONTAINER_TOOL']} ps --format=json --filter "ancestor={DOCKER_CONTAINER}" """ if os_info['platform'] == 'windows': CMD += " 2> nul" else: @@ -69,7 +69,7 @@ def preprocess(i): 'error': 'Unexpected error occurred with docker run:\n{}'.format(e) } - if len(out) > 0 and str(env.get('CM_DOCKER_REUSE_EXISTING_CONTAINER', + if len(out) > 0 and str(env.get('MLC_DOCKER_REUSE_EXISTING_CONTAINER', '')).lower() in ["1", "true", "yes"]: # container exists # print(out) out_split = out.splitlines() @@ -87,13 +87,13 @@ def preprocess(i): if isinstance(out_json, list) and len(out_json) > 0: existing_container_id = out_json[0]['Id'] print(f"Reusing existing container {existing_container_id}") - env['CM_DOCKER_CONTAINER_ID'] = existing_container_id + env['MLC_DOCKER_CONTAINER_ID'] = existing_container_id else: - if env.get('CM_DOCKER_CONTAINER_ID', '') != '': - del (env['CM_DOCKER_CONTAINER_ID']) # not valid ID + if env.get('MLC_DOCKER_CONTAINER_ID', '') != '': + del (env['MLC_DOCKER_CONTAINER_ID']) # not valid ID - CMD = f"""{env['CM_CONTAINER_TOOL']} images -q """ + DOCKER_CONTAINER + CMD = f"""{env['MLC_CONTAINER_TOOL']} images -q """ + DOCKER_CONTAINER if os_info['platform'] == 'windows': CMD += " 2> nul" @@ -113,15 +113,15 @@ def preprocess(i): return { 'return': 1, 'error': 'Docker is either not installed or not started:\n{}'.format(e)} - recreate_image = env.get('CM_DOCKER_IMAGE_RECREATE', '') + recreate_image = env.get('MLC_DOCKER_IMAGE_RECREATE', '') if recreate_image != 'yes': if docker_image: print("Docker image exists with ID: " + docker_image) - env['CM_DOCKER_IMAGE_EXISTS'] = "yes" + env['MLC_DOCKER_IMAGE_EXISTS'] = "yes" # elif recreate_image == "yes": - # env['CM_DOCKER_IMAGE_RECREATE'] = "no" + # env['MLC_DOCKER_IMAGE_RECREATE'] = "no" return {'return': 0} @@ -135,10 +135,10 @@ def postprocess(i): # Updating Docker info update_docker_info(env) - docker_image_repo = env['CM_DOCKER_IMAGE_REPO'] - docker_image_base = env['CM_DOCKER_IMAGE_BASE'] - docker_image_name = env['CM_DOCKER_IMAGE_NAME'] - docker_image_tag = env['CM_DOCKER_IMAGE_TAG'] + docker_image_repo = env['MLC_DOCKER_IMAGE_REPO'] + docker_image_base = env['MLC_DOCKER_IMAGE_BASE'] + docker_image_name = env['MLC_DOCKER_IMAGE_NAME'] + docker_image_tag = env['MLC_DOCKER_IMAGE_TAG'] run_cmds = [] mount_cmds = [] @@ -147,54 +147,54 @@ def postprocess(i): # not completed as su command breaks the execution sequence # - # if env.get('CM_DOCKER_PASS_USER_ID', '') != '': + # if env.get('MLC_DOCKER_PASS_USER_ID', '') != '': # run_opts += " --user 0 " # run_cmds.append(f"(usermod -u {os.getuid()} cmuser || echo pass)") # run_cmds.append(f"(chown -R {os.getuid()}:{os.getuid()} /home/cmuser || echo pass)") # run_cmds.append(" ( su cmuser )") # run_cmds.append('export PATH="/home/cmuser/venv/cm/bin:$PATH"') - if env.get('CM_DOCKER_PRE_RUN_COMMANDS', []): - for pre_run_cmd in env['CM_DOCKER_PRE_RUN_COMMANDS']: + if env.get('MLC_DOCKER_PRE_RUN_COMMANDS', []): + for pre_run_cmd in env['MLC_DOCKER_PRE_RUN_COMMANDS']: run_cmds.append(pre_run_cmd) - if env.get('CM_DOCKER_VOLUME_MOUNTS', []): - for mounts in env['CM_DOCKER_VOLUME_MOUNTS']: + if env.get('MLC_DOCKER_VOLUME_MOUNTS', []): + for mounts in env['MLC_DOCKER_VOLUME_MOUNTS']: mount_cmds.append(mounts) - if env.get('CM_DOCKER_PASS_USER_GROUP', '') != '': + if env.get('MLC_DOCKER_PASS_USER_GROUP', '') != '': run_opts += " --group-add $(id -g $USER) " - if env.get('CM_DOCKER_ADD_DEVICE', '') != '': - run_opts += " --device=" + env['CM_DOCKER_ADD_DEVICE'] + if env.get('MLC_DOCKER_ADD_DEVICE', '') != '': + run_opts += " --device=" + env['MLC_DOCKER_ADD_DEVICE'] - if env.get('CM_DOCKER_PRIVILEGED_MODE', '') == 'yes': + if env.get('MLC_DOCKER_PRIVILEGED_MODE', '') == 'yes': run_opts += " --privileged " - if env.get('CM_DOCKER_ADD_NUM_GPUS', '') != '': - run_opts += " --gpus={}".format(env['CM_DOCKER_ADD_NUM_GPUS']) - elif env.get('CM_DOCKER_ADD_ALL_GPUS', '') != '': + if env.get('MLC_DOCKER_ADD_NUM_GPUS', '') != '': + run_opts += " --gpus={}".format(env['MLC_DOCKER_ADD_NUM_GPUS']) + elif env.get('MLC_DOCKER_ADD_ALL_GPUS', '') != '': run_opts += " --gpus=all" - if env.get('CM_DOCKER_SHM_SIZE', '') != '': - run_opts += " --shm-size={}".format(env['CM_DOCKER_SHM_SIZE']) + if env.get('MLC_DOCKER_SHM_SIZE', '') != '': + run_opts += " --shm-size={}".format(env['MLC_DOCKER_SHM_SIZE']) - if env.get('CM_DOCKER_EXTRA_RUN_ARGS', '') != '': - run_opts += env['CM_DOCKER_EXTRA_RUN_ARGS'] + if env.get('MLC_DOCKER_EXTRA_RUN_ARGS', '') != '': + run_opts += env['MLC_DOCKER_EXTRA_RUN_ARGS'] - if env.get('CM_CONTAINER_TOOL', '') == 'podman' and env.get( - 'CM_PODMAN_MAP_USER_ID', '').lower() not in ["no", "0", "false"]: + if env.get('MLC_CONTAINER_TOOL', '') == 'podman' and env.get( + 'MLC_PODMAN_MAP_USER_ID', '').lower() not in ["no", "0", "false"]: run_opts += " --userns=keep-id" - if env.get('CM_DOCKER_PORT_MAPS', []): - for ports in env['CM_DOCKER_PORT_MAPS']: + if env.get('MLC_DOCKER_PORT_MAPS', []): + for ports in env['MLC_DOCKER_PORT_MAPS']: port_map_cmds.append(ports) - run_cmd = env['CM_DOCKER_RUN_CMD'] + " " + \ - env.get('CM_DOCKER_RUN_CMD_EXTRA', '').replace(":", "=") + run_cmd = env['MLC_DOCKER_RUN_CMD'] + " " + \ + env.get('MLC_DOCKER_RUN_CMD_EXTRA', '').replace(":", "=") run_cmds.append(run_cmd) - if 'CM_DOCKER_POST_RUN_COMMANDS' in env: - for post_run_cmd in env['CM_DOCKER_POST_RUN_COMMANDS']: + if 'MLC_DOCKER_POST_RUN_COMMANDS' in env: + for post_run_cmd in env['MLC_DOCKER_POST_RUN_COMMANDS']: run_cmds.append(post_run_cmd) run_cmd = " && ".join(run_cmds) @@ -237,7 +237,7 @@ def postprocess(i): # Currently have problem running Docker in detached mode on Windows: detached = str( env.get( - 'CM_DOCKER_DETACHED_MODE', + 'MLC_DOCKER_DETACHED_MODE', '')).lower() in [ 'yes', 'true', @@ -248,16 +248,16 @@ def postprocess(i): return { 'return': 1, 'error': 'Currently we don\'t support running Docker containers in detached mode on Windows - TBD'} - existing_container_id = env.get('CM_DOCKER_CONTAINER_ID', '') + existing_container_id = env.get('MLC_DOCKER_CONTAINER_ID', '') if existing_container_id: - CMD = f"""ID={existing_container_id} && {env['CM_CONTAINER_TOOL']} exec $ID bash -c '""" + run_cmd + "'" + CMD = f"""ID={existing_container_id} && {env['MLC_CONTAINER_TOOL']} exec $ID bash -c '""" + run_cmd + "'" else: - CONTAINER = f"""{env['CM_CONTAINER_TOOL']} run -dt {run_opts} --rm {docker_image_repo}/{docker_image_name}:{docker_image_tag} bash""" - CMD = f"""ID=`{CONTAINER}` && {env['CM_CONTAINER_TOOL']} exec $ID bash -c '{run_cmd}'""" + CONTAINER = f"""{env['MLC_CONTAINER_TOOL']} run -dt {run_opts} --rm {docker_image_repo}/{docker_image_name}:{docker_image_tag} bash""" + CMD = f"""ID=`{CONTAINER}` && {env['MLC_CONTAINER_TOOL']} exec $ID bash -c '{run_cmd}'""" - if False and str(env.get('CM_KEEP_DETACHED_CONTAINER', '')).lower() not in [ + if False and str(env.get('MLC_KEEP_DETACHED_CONTAINER', '')).lower() not in [ 'yes', "1", 'true']: - CMD += f""" && {env['CM_CONTAINER_TOOL']} kill $ID >/dev/null""" + CMD += f""" && {env['MLC_CONTAINER_TOOL']} kill $ID >/dev/null""" CMD += ' && echo "ID=$ID"' @@ -269,7 +269,7 @@ def postprocess(i): print( "Running " + run_cmd + - f""" inside {env['CM_CONTAINER_TOOL']} container""") + f""" inside {env['MLC_CONTAINER_TOOL']} container""") record_script({'cmd': CMD, 'env': env}) @@ -293,7 +293,7 @@ def postprocess(i): docker_out = result.stdout # if docker_out != 0: - # return {'return': docker_out, 'error': f""{env['CM_CONTAINER_TOOL']} + # return {'return': docker_out, 'error': f""{env['MLC_CONTAINER_TOOL']} # run failed""} lines = docker_out.split("\n") @@ -301,7 +301,7 @@ def postprocess(i): for line in lines: if line.startswith("ID="): ID = line[3:] - env['CM_DOCKER_CONTAINER_ID'] = ID + env['MLC_DOCKER_CONTAINER_ID'] = ID print(docker_out) @@ -313,12 +313,12 @@ def postprocess(i): x1 = '' x2 = '' run_cmd_prefix = "" - if env.get('CM_DOCKER_INTERACTIVE_MODE', '') in ['yes', 'True', True]: + if env.get('MLC_DOCKER_INTERACTIVE_MODE', '') in ['yes', 'True', True]: run_cmd_prefix = "(" x1 = '-it' x2 = " && bash ) || bash" - CONTAINER = f"{env['CM_CONTAINER_TOOL']} run " + x1 + " --entrypoint " + x + x + " " + run_opts + \ + CONTAINER = f"{env['MLC_CONTAINER_TOOL']} run " + x1 + " --entrypoint " + x + x + " " + run_opts + \ " " + docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag CMD = CONTAINER + " bash -c " + x + run_cmd_prefix + run_cmd + x2 + x @@ -335,7 +335,7 @@ def postprocess(i): if docker_out % 256 == 0: docker_out = 1 return {'return': docker_out, - 'error': f"""{env['CM_CONTAINER_TOOL']} run failed"""} + 'error': f"""{env['MLC_CONTAINER_TOOL']} run failed"""} return {'return': 0} @@ -347,12 +347,12 @@ def record_script(i): files = [] - dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '') + dockerfile_path = env.get('MLC_DOCKERFILE_WITH_PATH', '') if dockerfile_path != '' and os.path.isfile(dockerfile_path): files.append(dockerfile_path + '.run.bat') files.append(dockerfile_path + '.run.sh') - save_script = env.get('CM_DOCKER_SAVE_SCRIPT', '') + save_script = env.get('MLC_DOCKER_SAVE_SCRIPT', '') if save_script != '': if save_script.endswith('.bat') or save_script.endswith('.sh'): files.append(save_script) @@ -370,31 +370,31 @@ def record_script(i): def update_docker_info(env): # Updating Docker info - docker_image_repo = env.get('CM_DOCKER_IMAGE_REPO', 'localhost/local') - env['CM_DOCKER_IMAGE_REPO'] = docker_image_repo + docker_image_repo = env.get('MLC_DOCKER_IMAGE_REPO', 'localhost/local') + env['MLC_DOCKER_IMAGE_REPO'] = docker_image_repo - docker_image_base = env.get('CM_DOCKER_IMAGE_BASE') + docker_image_base = env.get('MLC_DOCKER_IMAGE_BASE') if not docker_image_base: - if env.get("CM_DOCKER_OS", '') != '': - docker_image_base = env["CM_DOCKER_OS"] + \ - ":" + env["CM_DOCKER_OS_VERSION"] + if env.get("MLC_DOCKER_OS", '') != '': + docker_image_base = env["MLC_DOCKER_OS"] + \ + ":" + env["MLC_DOCKER_OS_VERSION"] else: docker_image_base = "ubuntu:22.04" - env['CM_DOCKER_IMAGE_BASE'] = docker_image_base + env['MLC_DOCKER_IMAGE_BASE'] = docker_image_base - if env.get('CM_DOCKER_IMAGE_NAME', '') != '': - docker_image_name = env['CM_DOCKER_IMAGE_NAME'] + if env.get('MLC_DOCKER_IMAGE_NAME', '') != '': + docker_image_name = env['MLC_DOCKER_IMAGE_NAME'] else: docker_image_name = 'mlc-script-' + \ - env['CM_DOCKER_RUN_SCRIPT_TAGS'].replace( + env['MLC_DOCKER_RUN_SCRIPT_TAGS'].replace( ',', '-').replace('_', '-').replace('+', 'plus') - env['CM_DOCKER_IMAGE_NAME'] = docker_image_name + env['MLC_DOCKER_IMAGE_NAME'] = docker_image_name - docker_image_tag_extra = env.get('CM_DOCKER_IMAGE_TAG_EXTRA', '-latest') + docker_image_tag_extra = env.get('MLC_DOCKER_IMAGE_TAG_EXTRA', '-latest') - docker_image_tag = env.get('CM_DOCKER_IMAGE_TAG', docker_image_base.replace( + docker_image_tag = env.get('MLC_DOCKER_IMAGE_TAG', docker_image_base.replace( ':', '-').replace('_', '').replace("/", "-") + docker_image_tag_extra) - env['CM_DOCKER_IMAGE_TAG'] = docker_image_tag + env['MLC_DOCKER_IMAGE_TAG'] = docker_image_tag return diff --git a/script/run-docker-container/meta.yaml b/script/run-docker-container/meta.yaml index 3bc5ac184..f3a9a5c40 100644 --- a/script/run-docker-container/meta.yaml +++ b/script/run-docker-container/meta.yaml @@ -14,49 +14,49 @@ cache: false category: Docker automation default_env: - CM_DOCKER_DETACHED_MODE: 'yes' - CM_DOCKER_REUSE_EXISTING_CONTAINER: 'no' - CM_DOCKER_PRIVILEGED_MODE: 'no' - CM_PODMAN_MAP_USER_ID: 'no' + MLC_DOCKER_DETACHED_MODE: 'yes' + MLC_DOCKER_REUSE_EXISTING_CONTAINER: 'no' + MLC_DOCKER_PRIVILEGED_MODE: 'no' + MLC_PODMAN_MAP_USER_ID: 'no' input_mapping: - all_gpus: CM_DOCKER_ADD_ALL_GPUS - num_gpus: CM_DOCKER_ADD_NUM_GPUS - base: CM_DOCKER_IMAGE_BASE - cache: CM_DOCKER_CACHE - cm_repo: CM_MLOPS_REPO - detached: CM_DOCKER_DETACHED_MODE - device: CM_DOCKER_ADD_DEVICE - docker_image_base: CM_DOCKER_IMAGE_BASE - docker_base_image: CM_DOCKER_IMAGE_BASE - keep_detached: CM_KEEP_DETACHED_CONTAINER - docker_os: CM_DOCKER_OS - docker_os_version: CM_DOCKER_OS_VERSION - extra_run_args: CM_DOCKER_EXTRA_RUN_ARGS - fake_run_option: CM_DOCKER_FAKE_RUN_OPTION - gh_token: CM_GH_TOKEN - image_name: CM_DOCKER_IMAGE_NAME - image_repo: CM_DOCKER_IMAGE_REPO - image_tag: CM_DOCKER_IMAGE_TAG - image_tag_extra: CM_DOCKER_IMAGE_TAG_EXTRA - interactive: CM_DOCKER_INTERACTIVE_MODE - it: CM_DOCKER_INTERACTIVE - mounts: CM_DOCKER_VOLUME_MOUNTS - pass_user_id: CM_DOCKER_PASS_USER_ID - pass_user_group: CM_DOCKER_PASS_USER_GROUP - port_maps: CM_DOCKER_PORT_MAPS - post_run_cmds: CM_DOCKER_POST_RUN_COMMANDS - pre_run_cmds: CM_DOCKER_PRE_RUN_COMMANDS - real_run: CM_REAL_RUN - recreate: CM_DOCKER_IMAGE_RECREATE - run_cmd: CM_DOCKER_RUN_CMD - run_cmd_extra: CM_DOCKER_RUN_CMD_EXTRA - save_script: CM_DOCKER_SAVE_SCRIPT - script_tags: CM_DOCKER_RUN_SCRIPT_TAGS - shm_size: CM_DOCKER_SHM_SIZE + all_gpus: MLC_DOCKER_ADD_ALL_GPUS + num_gpus: MLC_DOCKER_ADD_NUM_GPUS + base: MLC_DOCKER_IMAGE_BASE + cache: MLC_DOCKER_CACHE + cm_repo: MLC_MLOPS_REPO + detached: MLC_DOCKER_DETACHED_MODE + device: MLC_DOCKER_ADD_DEVICE + docker_image_base: MLC_DOCKER_IMAGE_BASE + docker_base_image: MLC_DOCKER_IMAGE_BASE + keep_detached: MLC_KEEP_DETACHED_CONTAINER + docker_os: MLC_DOCKER_OS + docker_os_version: MLC_DOCKER_OS_VERSION + extra_run_args: MLC_DOCKER_EXTRA_RUN_ARGS + fake_run_option: MLC_DOCKER_FAKE_RUN_OPTION + gh_token: MLC_GH_TOKEN + image_name: MLC_DOCKER_IMAGE_NAME + image_repo: MLC_DOCKER_IMAGE_REPO + image_tag: MLC_DOCKER_IMAGE_TAG + image_tag_extra: MLC_DOCKER_IMAGE_TAG_EXTRA + interactive: MLC_DOCKER_INTERACTIVE_MODE + it: MLC_DOCKER_INTERACTIVE + mounts: MLC_DOCKER_VOLUME_MOUNTS + pass_user_id: MLC_DOCKER_PASS_USER_ID + pass_user_group: MLC_DOCKER_PASS_USER_GROUP + port_maps: MLC_DOCKER_PORT_MAPS + post_run_cmds: MLC_DOCKER_POST_RUN_COMMANDS + pre_run_cmds: MLC_DOCKER_PRE_RUN_COMMANDS + real_run: MLC_REAL_RUN + recreate: MLC_DOCKER_IMAGE_RECREATE + run_cmd: MLC_DOCKER_RUN_CMD + run_cmd_extra: MLC_DOCKER_RUN_CMD_EXTRA + save_script: MLC_DOCKER_SAVE_SCRIPT + script_tags: MLC_DOCKER_RUN_SCRIPT_TAGS + shm_size: MLC_DOCKER_SHM_SIZE new_env_keys: - - 'CM_DOCKER_CONTAINER_ID' + - 'MLC_DOCKER_CONTAINER_ID' deps: - tags: get,docker @@ -65,11 +65,11 @@ prehook_deps: - names: - build-docker-image skip_if_any_env: - CM_DOCKER_IMAGE_EXISTS: + MLC_DOCKER_IMAGE_EXISTS: - 'yes' - CM_DOCKER_SKIP_BUILD: + MLC_DOCKER_SKIP_BUILD: - 'yes' - CM_DOCKER_CONTAINER_ID: + MLC_DOCKER_CONTAINER_ID: - on tags: build,docker,image - tags: get,docker diff --git a/script/run-mlperf-automotive-app/customize.py b/script/run-mlperf-automotive-app/customize.py index 4133b60ed..c714a75bc 100644 --- a/script/run-mlperf-automotive-app/customize.py +++ b/script/run-mlperf-automotive-app/customize.py @@ -21,15 +21,15 @@ def preprocess(i): state = i['state'] script_path = i['run_script_input']['path'] - if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes": return {'return': 0} - dump_version_info = env.get('CM_DUMP_VERSION_INFO', True) - system_meta = state.get('CM_SUT_META', {}) + dump_version_info = env.get('MLC_DUMP_VERSION_INFO', True) + system_meta = state.get('MLC_SUT_META', {}) if system_meta: - env['CM_SUT_META_EXISTS'] = "yes" + env['MLC_SUT_META_EXISTS'] = "yes" - env['CM_MODEL'] = env['CM_MLPERF_MODEL'] + env['MLC_MODEL'] = env['MLC_MLPERF_MODEL'] # Clean MLPerf inference output tar file if non-standard x = env.get('MLPERF_INFERENCE_SUBMISSION_TAR_FILE', '') @@ -44,91 +44,91 @@ def preprocess(i): if os.path.isfile(z): os.remove(z) - if env.get('CM_MLPERF_SUBMISSION_SYSTEM_TYPE', '') != '': - system_type = env['CM_MLPERF_SUBMISSION_SYSTEM_TYPE'] + if env.get('MLC_MLPERF_SUBMISSION_SYSTEM_TYPE', '') != '': + system_type = env['MLC_MLPERF_SUBMISSION_SYSTEM_TYPE'] system_meta['system_type'] = system_type - if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') != '': - division = env['CM_MLPERF_SUBMISSION_DIVISION'] + if env.get('MLC_MLPERF_SUBMISSION_DIVISION', '') != '': + division = env['MLC_MLPERF_SUBMISSION_DIVISION'] system_meta['division'] = division if system_meta.get('division', '') != "closed": # no compliance runs needed for open division - env["CM_MLPERF_LOADGEN_COMPLIANCE"] = "no" + env["MLC_MLPERF_LOADGEN_COMPLIANCE"] = "no" clean = False - if 'CM_MLPERF_CLEAN_ALL' in env: + if 'MLC_MLPERF_CLEAN_ALL' in env: clean = True - if 'CM_MLPERF_CLEAN_SUBMISSION_DIR' not in env: - env['CM_MLPERF_CLEAN_SUBMISSION_DIR'] = "yes" - if 'CM_RERUN' not in env: - env['CM_RERUN'] = "yes" + if 'MLC_MLPERF_CLEAN_SUBMISSION_DIR' not in env: + env['MLC_MLPERF_CLEAN_SUBMISSION_DIR'] = "yes" + if 'MLC_RERUN' not in env: + env['MLC_RERUN'] = "yes" - if str(env.get('CM_SYSTEM_POWER', 'no')).lower( - ) != "no" or env.get('CM_MLPERF_POWER', '') == "yes": + if str(env.get('MLC_SYSTEM_POWER', 'no')).lower( + ) != "no" or env.get('MLC_MLPERF_POWER', '') == "yes": power_variation = ",_power" - env['CM_MLPERF_POWER'] = "yes" + env['MLC_MLPERF_POWER'] = "yes" else: power_variation = "" - if env.get('CM_RUN_STYLE', - '') == "valid" and 'CM_RUN_MLPERF_ACCURACY' not in env: - env['CM_RUN_MLPERF_ACCURACY'] = "on" + if env.get('MLC_RUN_STYLE', + '') == "valid" and 'MLC_RUN_MLPERF_ACCURACY' not in env: + env['MLC_RUN_MLPERF_ACCURACY'] = "on" - if env.get('CM_MLPERF_INFERENCE_SOURCE', '') != '': + if env.get('MLC_MLPERF_INFERENCE_SOURCE', '') != '': print( "Using MLCommons Inference source from " + - env['CM_MLPERF_INFERENCE_SOURCE']) + env['MLC_MLPERF_INFERENCE_SOURCE']) - if 'CM_MLPERF_LOADGEN_EXTRA_OPTIONS' not in env: - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = "" + if 'MLC_MLPERF_LOADGEN_EXTRA_OPTIONS' not in env: + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] = "" - if 'CM_MLPERF_LOADGEN_MODES' not in env: - if 'CM_MLPERF_LOADGEN_MODE' not in env: - env['CM_MLPERF_LOADGEN_MODE'] = "performance" + if 'MLC_MLPERF_LOADGEN_MODES' not in env: + if 'MLC_MLPERF_LOADGEN_MODE' not in env: + env['MLC_MLPERF_LOADGEN_MODE'] = "performance" - if 'CM_MLPERF_LOADGEN_SCENARIOS' not in env: - if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: - env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" + if 'MLC_MLPERF_LOADGEN_SCENARIOS' not in env: + if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env: + env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline" - if env.get('CM_MLPERF_LOADGEN_ALL_SCENARIOS', '') == "yes": - env['CM_MLPERF_LOADGEN_SCENARIOS'] = get_valid_scenarios( - env['CM_MODEL'], + if env.get('MLC_MLPERF_LOADGEN_ALL_SCENARIOS', '') == "yes": + env['MLC_MLPERF_LOADGEN_SCENARIOS'] = get_valid_scenarios( + env['MLC_MODEL'], system_meta['system_type'], - env['CM_MLPERF_LAST_RELEASE'], - env['CM_MLPERF_INFERENCE_SOURCE']) + env['MLC_MLPERF_LAST_RELEASE'], + env['MLC_MLPERF_INFERENCE_SOURCE']) else: system_meta = {} - env['CM_MLPERF_LOADGEN_SCENARIOS'] = [ - env['CM_MLPERF_LOADGEN_SCENARIO']] + env['MLC_MLPERF_LOADGEN_SCENARIOS'] = [ + env['MLC_MLPERF_LOADGEN_SCENARIO']] - if env.get('CM_MLPERF_LOADGEN_ALL_MODES', '') == "yes": - env['CM_MLPERF_LOADGEN_MODES'] = ["performance", "accuracy"] + if env.get('MLC_MLPERF_LOADGEN_ALL_MODES', '') == "yes": + env['MLC_MLPERF_LOADGEN_MODES'] = ["performance", "accuracy"] else: - env['CM_MLPERF_LOADGEN_MODES'] = [env['CM_MLPERF_LOADGEN_MODE']] + env['MLC_MLPERF_LOADGEN_MODES'] = [env['MLC_MLPERF_LOADGEN_MODE']] if env.get('OUTPUT_BASE_DIR', '') == '': env['OUTPUT_BASE_DIR'] = env.get( - 'CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) + 'MLC_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) test_list = [] variation_implementation = "_" + \ - env.get("CM_MLPERF_IMPLEMENTATION", "reference") - variation_model = ",_" + env["CM_MLPERF_MODEL"] + env.get("MLC_MLPERF_IMPLEMENTATION", "reference") + variation_model = ",_" + env["MLC_MLPERF_MODEL"] variation_backend = ",_" + \ - env["CM_MLPERF_BACKEND"] if env.get( - "CM_MLPERF_BACKEND", "") != "" else "" + env["MLC_MLPERF_BACKEND"] if env.get( + "MLC_MLPERF_BACKEND", "") != "" else "" variation_device = ",_" + \ - env["CM_MLPERF_DEVICE"] if env.get( - "CM_MLPERF_DEVICE", "") != "" else "" - variation_run_style = ",_" + env.get("CM_MLPERF_RUN_STYLE", "test") - variation_reproducibility = ",_" + env["CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS"] if env.get( - "CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS", "") != "" else "" - - if env.get("CM_MLPERF_MODEL_PRECISION", '') != '': - variation_quantization_string = ",_" + env["CM_MLPERF_MODEL_PRECISION"] + env["MLC_MLPERF_DEVICE"] if env.get( + "MLC_MLPERF_DEVICE", "") != "" else "" + variation_run_style = ",_" + env.get("MLC_MLPERF_RUN_STYLE", "test") + variation_reproducibility = ",_" + env["MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS"] if env.get( + "MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS", "") != "" else "" + + if env.get("MLC_MLPERF_MODEL_PRECISION", '') != '': + variation_quantization_string = ",_" + env["MLC_MLPERF_MODEL_PRECISION"] else: variation_quantization_string = "" @@ -147,7 +147,7 @@ def preprocess(i): for key in adr_from_meta: add_deps_recursive[key] = adr_from_meta[key] - if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '': + if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '': if not add_deps_recursive.get('mlperf-inference-implementation', {}): add_deps_recursive['mlperf-inference-implementation'] = {} if add_deps_recursive['mlperf-inference-implementation'].get( @@ -156,9 +156,9 @@ def preprocess(i): else: add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' add_deps_recursive['mlperf-inference-implementation']['tags'] += "_batch_size." + \ - env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE'] + env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE'] - if env.get('CM_MLPERF_INFERENCE_SUT_VARIATION', '') != '': + if env.get('MLC_MLPERF_INFERENCE_SUT_VARIATION', '') != '': if not add_deps_recursive.get('mlperf-inference-implementation', {}): add_deps_recursive['mlperf-inference-implementation'] = {} if add_deps_recursive['mlperf-inference-implementation'].get( @@ -167,12 +167,12 @@ def preprocess(i): else: add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' add_deps_recursive['mlperf-inference-implementation']['tags'] += "_" + \ - env['CM_MLPERF_INFERENCE_SUT_VARIATION'] + env['MLC_MLPERF_INFERENCE_SUT_VARIATION'] - if env.get('CM_NETWORK_LOADGEN', '') != '': + if env.get('MLC_NETWORK_LOADGEN', '') != '': if not add_deps_recursive.get('mlperf-inference-implementation', {}): add_deps_recursive['mlperf-inference-implementation'] = {} - network_variation_tag = f"_network-{env['CM_NETWORK_LOADGEN']}" + network_variation_tag = f"_network-{env['MLC_NETWORK_LOADGEN']}" if add_deps_recursive['mlperf-inference-implementation'].get( 'tags', '') == '': add_deps_recursive['mlperf-inference-implementation']['tags'] = '' @@ -180,12 +180,12 @@ def preprocess(i): add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' add_deps_recursive['mlperf-inference-implementation']['tags'] += network_variation_tag - if env.get('CM_OUTPUT_FOLDER_NAME', '') == '': - env['CM_OUTPUT_FOLDER_NAME'] = env['CM_MLPERF_RUN_STYLE'] + "_results" + if env.get('MLC_OUTPUT_FOLDER_NAME', '') == '': + env['MLC_OUTPUT_FOLDER_NAME'] = env['MLC_MLPERF_RUN_STYLE'] + "_results" output_dir = os.path.join( env['OUTPUT_BASE_DIR'], - env['CM_OUTPUT_FOLDER_NAME']) + env['MLC_OUTPUT_FOLDER_NAME']) if clean: path_to_clean = output_dir @@ -196,15 +196,15 @@ def preprocess(i): print('=========================================================') - if str(env.get('CM_MLPERF_USE_DOCKER', '') + if str(env.get('MLC_MLPERF_USE_DOCKER', '') ).lower() in ["1", "true", "yes"]: action = "docker" del (env['OUTPUT_BASE_DIR']) state = {} docker_extra_input = {} - if env.get('CM_HW_NAME'): - del (env['CM_HW_NAME']) + if env.get('MLC_HW_NAME'): + del (env['MLC_HW_NAME']) for k in inp: if k.startswith("docker_"): @@ -213,32 +213,32 @@ def preprocess(i): else: action = "run" - # local_keys = [ 'CM_MLPERF_SKIP_RUN', 'CM_MLPERF_LOADGEN_QUERY_COUNT', - # 'CM_MLPERF_LOADGEN_TARGET_QPS', 'CM_MLPERF_LOADGEN_TARGET_LATENCY' ] + # local_keys = [ 'MLC_MLPERF_SKIP_RUN', 'MLC_MLPERF_LOADGEN_QUERY_COUNT', + # 'MLC_MLPERF_LOADGEN_TARGET_QPS', 'MLC_MLPERF_LOADGEN_TARGET_LATENCY' ] - for scenario in env['CM_MLPERF_LOADGEN_SCENARIOS']: + for scenario in env['MLC_MLPERF_LOADGEN_SCENARIOS']: scenario_tags = tags + ",_" + scenario.lower() - env['CM_MLPERF_LOADGEN_SCENARIO'] = scenario + env['MLC_MLPERF_LOADGEN_SCENARIO'] = scenario if scenario == "Offline": - if env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'): - env['CM_MLPERF_LOADGEN_TARGET_QPS'] = env['CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'] + if env.get('MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'): + env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = env['MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'] elif scenario == "Server": - if env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS'): - env['CM_MLPERF_LOADGEN_TARGET_QPS'] = env['CM_MLPERF_LOADGEN_SERVER_TARGET_QPS'] + if env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS'): + env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = env['MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS'] elif scenario == "SingleStream": - if env.get('CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'): - env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = env['CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'] + if env.get('MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'): + env['MLC_MLPERF_LOADGEN_TARGET_LATENCY'] = env['MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'] elif scenario == "MultiStream": - if env.get('CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'): - env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = env['CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'] + if env.get('MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'): + env['MLC_MLPERF_LOADGEN_TARGET_LATENCY'] = env['MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'] - for mode in env['CM_MLPERF_LOADGEN_MODES']: + for mode in env['MLC_MLPERF_LOADGEN_MODES']: env_copy = copy.deepcopy(env) - env_copy['CM_MLPERF_LOADGEN_MODE'] = mode + env_copy['MLC_MLPERF_LOADGEN_MODE'] = mode for key in env_copy: if isinstance(env_copy[key], str) and env_copy[key].startswith( - "CM_TMP_"): + "MLC_TMP_"): del env_copy[key] print(f"\nRunning loadgen scenario: {scenario} and mode: {mode}") @@ -256,22 +256,22 @@ def preprocess(i): # We run commands interactively inside the docker container return {'return': 0} - if env_copy.get('CM_OUTPUT_PREDICTIONS_PATH'): + if env_copy.get('MLC_OUTPUT_PREDICTIONS_PATH'): print( - f"\nOutput predictions can be seen by opening the images inside {env_copy['CM_OUTPUT_PREDICTIONS_PATH']}\n") + f"\nOutput predictions can be seen by opening the images inside {env_copy['MLC_OUTPUT_PREDICTIONS_PATH']}\n") if state.get('docker', {}): del (state['docker']) - if env.get("CM_MLPERF_LOADGEN_COMPLIANCE", "") == "yes": + if env.get("MLC_MLPERF_LOADGEN_COMPLIANCE", "") == "yes": for test in test_list: env_copy = copy.deepcopy(env) for key in env_copy: if isinstance(env_copy[key], str) and env_copy[key].startswith( - "CM_TMP_"): + "MLC_TMP_"): del env_copy[key] - env_copy['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] = test - env_copy['CM_MLPERF_LOADGEN_MODE'] = "compliance" + env_copy['MLC_MLPERF_LOADGEN_COMPLIANCE_TEST'] = test + env_copy['MLC_MLPERF_LOADGEN_MODE'] = "compliance" ii = {'action': action, 'automation': 'script', 'tags': scenario_tags, 'quiet': 'true', 'env': env_copy, 'input': inp, 'state': state, 'add_deps': copy.deepcopy(add_deps), 'add_deps_recursive': copy.deepcopy(add_deps_recursive), 'adr': copy.deepcopy(adr), 'ad': ad, 'v': verbose, 'print_env': print_env, 'print_deps': print_deps, 'dump_version_info': dump_version_info} @@ -348,9 +348,9 @@ def postprocess(i): env = i['env'] state = i['state'] - if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'reference': - x1 = env.get('CM_MLPERF_INFERENCE_SOURCE', '') - x2 = env.get('CM_MLPERF_INFERENCE_CONF_PATH', '') + if env.get('MLC_MLPERF_IMPLEMENTATION', '') == 'reference': + x1 = env.get('MLC_MLPERF_INFERENCE_SOURCE', '') + x2 = env.get('MLC_MLPERF_INFERENCE_CONF_PATH', '') if x1 != '' and x2 != '': print('') diff --git a/script/run-mlperf-automotive-app/meta.yaml b/script/run-mlperf-automotive-app/meta.yaml index 83bf6a1cb..906102ad5 100644 --- a/script/run-mlperf-automotive-app/meta.yaml +++ b/script/run-mlperf-automotive-app/meta.yaml @@ -25,67 +25,67 @@ tags: tags_help: "run-abtf,inference" default_env: - CM_MLPERF_IMPLEMENTATION: reference - CM_MLPERF_MODEL: retinanet - CM_MLPERF_RUN_STYLE: test + MLC_MLPERF_IMPLEMENTATION: reference + MLC_MLPERF_MODEL: retinanet + MLC_MLPERF_RUN_STYLE: test input_mapping: - backend: CM_MLPERF_BACKEND - clean: CM_MLPERF_CLEAN_ALL - compliance: CM_MLPERF_LOADGEN_COMPLIANCE - dashboard_wb_project: CM_MLPERF_DASHBOARD_WANDB_PROJECT - dashboard_wb_user: CM_MLPERF_DASHBOARD_WANDB_USER - debug: CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM - device: CM_MLPERF_DEVICE - division: CM_MLPERF_SUBMISSION_DIVISION - docker: CM_MLPERF_USE_DOCKER - dump_version_info: CM_DUMP_VERSION_INFO - save_console_log: CM_SAVE_CONSOLE_LOG - execution_mode: CM_MLPERF_RUN_STYLE - find_performance: CM_MLPERF_FIND_PERFORMANCE_MODE - gh_token: CM_GH_TOKEN - gpu_name: CM_NVIDIA_GPU_NAME - hw_name: CM_HW_NAME - hw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA + backend: MLC_MLPERF_BACKEND + clean: MLC_MLPERF_CLEAN_ALL + compliance: MLC_MLPERF_LOADGEN_COMPLIANCE + dashboard_wb_project: MLC_MLPERF_DASHBOARD_WANDB_PROJECT + dashboard_wb_user: MLC_MLPERF_DASHBOARD_WANDB_USER + debug: MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM + device: MLC_MLPERF_DEVICE + division: MLC_MLPERF_SUBMISSION_DIVISION + docker: MLC_MLPERF_USE_DOCKER + dump_version_info: MLC_DUMP_VERSION_INFO + save_console_log: MLC_SAVE_CONSOLE_LOG + execution_mode: MLC_MLPERF_RUN_STYLE + find_performance: MLC_MLPERF_FIND_PERFORMANCE_MODE + gh_token: MLC_GH_TOKEN + gpu_name: MLC_NVIDIA_GPU_NAME + hw_name: MLC_HW_NAME + hw_notes_extra: MLC_MLPERF_SUT_SW_NOTES_EXTRA imagenet_path: IMAGENET_PATH - implementation: CM_MLPERF_IMPLEMENTATION - lang: CM_MLPERF_IMPLEMENTATION - mode: CM_MLPERF_LOADGEN_MODE - model: CM_MLPERF_MODEL - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + implementation: MLC_MLPERF_IMPLEMENTATION + lang: MLC_MLPERF_IMPLEMENTATION + mode: MLC_MLPERF_LOADGEN_MODE + model: MLC_MLPERF_MODEL + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS output_dir: OUTPUT_BASE_DIR output_summary: MLPERF_INFERENCE_SUBMISSION_SUMMARY output_tar: MLPERF_INFERENCE_SUBMISSION_TAR_FILE - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - power: CM_SYSTEM_POWER - precision: CM_MLPERF_MODEL_PRECISION - preprocess_submission: CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR - push_to_github: CM_MLPERF_RESULT_PUSH_TO_GITHUB - readme: CM_MLPERF_README - regenerate_accuracy_file: CM_MLPERF_REGENERATE_ACCURACY_FILE - regenerate_files: CM_REGENERATE_MEASURE_FILES - rerun: CM_RERUN + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + power: MLC_SYSTEM_POWER + precision: MLC_MLPERF_MODEL_PRECISION + preprocess_submission: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR + push_to_github: MLC_MLPERF_RESULT_PUSH_TO_GITHUB + readme: MLC_MLPERF_README + regenerate_accuracy_file: MLC_MLPERF_REGENERATE_ACCURACY_FILE + regenerate_files: MLC_REGENERATE_MEASURE_FILES + rerun: MLC_RERUN results_dir: OUTPUT_BASE_DIR - results_git_url: CM_MLPERF_RESULTS_GIT_REPO_URL - run_checker: CM_RUN_SUBMISSION_CHECKER - run_style: CM_MLPERF_RUN_STYLE - scenario: CM_MLPERF_LOADGEN_SCENARIO - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - skip_submission_generation: CM_MLPERF_SKIP_SUBMISSION_GENERATION - skip_truncation: CM_SKIP_TRUNCATE_ACCURACY - submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR - submitter: CM_MLPERF_SUBMITTER - sut_servers: CM_NETWORK_LOADGEN_SUT_SERVERS - sw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA - system_type: CM_MLPERF_SUBMISSION_SYSTEM_TYPE - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - test_query_count: CM_TEST_QUERY_COUNT - threads: CM_NUM_THREADS - batch_size: CM_MLPERF_LOADGEN_MAX_BATCHSIZE - sut: CM_MLPERF_INFERENCE_SUT_VARIATION + results_git_url: MLC_MLPERF_RESULTS_GIT_REPO_URL + run_checker: MLC_RUN_SUBMISSION_CHECKER + run_style: MLC_MLPERF_RUN_STYLE + scenario: MLC_MLPERF_LOADGEN_SCENARIO + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + skip_submission_generation: MLC_MLPERF_SKIP_SUBMISSION_GENERATION + skip_truncation: MLC_SKIP_TRUNCATE_ACCURACY + submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR + submitter: MLC_MLPERF_SUBMITTER + sut_servers: MLC_NETWORK_LOADGEN_SUT_SERVERS + sw_notes_extra: MLC_MLPERF_SUT_SW_NOTES_EXTRA + system_type: MLC_MLPERF_SUBMISSION_SYSTEM_TYPE + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + test_query_count: MLC_TEST_QUERY_COUNT + threads: MLC_NUM_THREADS + batch_size: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE + sut: MLC_MLPERF_INFERENCE_SUT_VARIATION new_state_keys: - app_mlperf_inference_* @@ -94,72 +94,72 @@ new_state_keys: deps: - tags: detect,os skip_if_env: - CM_MLPERF_USE_DOCKER: [ on ] + MLC_MLPERF_USE_DOCKER: [ on ] - tags: detect,cpu skip_if_env: - CM_MLPERF_USE_DOCKER: [ on ] + MLC_MLPERF_USE_DOCKER: [ on ] - names: - python - python3 tags: get,python3 skip_if_env: - CM_MLPERF_USE_DOCKER: [ on ] + MLC_MLPERF_USE_DOCKER: [ on ] - names: - inference-src tags: get,mlcommons,inference,src skip_if_env: - CM_MLPERF_USE_DOCKER: [ on ] + MLC_MLPERF_USE_DOCKER: [ on ] - tags: get,sut,description skip_if_env: - CM_MLPERF_USE_DOCKER: [ on ] + MLC_MLPERF_USE_DOCKER: [ on ] - tags: get,mlperf,inference,results,dir names: - get-mlperf-inference-results-dir enable_if_env: - CM_MLPERF_USE_DOCKER: [ off ] + MLC_MLPERF_USE_DOCKER: [ off ] skip_if_env: OUTPUT_BASE_DIR: [ on ] - tags: install,pip-package,for-cmind-python,_package.tabulate - tags: get,mlperf,inference,utils skip_if_env: - CM_MLPERF_USE_DOCKER: [ on ] + MLC_MLPERF_USE_DOCKER: [ on ] variations: accuracy-only: default_variations: submission-generation-style: full env: - CM_MLPERF_LOADGEN_MODE: accuracy - CM_MLPERF_SUBMISSION_RUN: 'yes' - CM_RUN_MLPERF_ACCURACY: 'on' - CM_RUN_SUBMISSION_CHECKER: 'no' + MLC_MLPERF_LOADGEN_MODE: accuracy + MLC_MLPERF_SUBMISSION_RUN: 'yes' + MLC_RUN_MLPERF_ACCURACY: 'on' + MLC_RUN_SUBMISSION_CHECKER: 'no' group: submission-generation all-modes: env: - CM_MLPERF_LOADGEN_ALL_MODES: 'yes' + MLC_MLPERF_LOADGEN_ALL_MODES: 'yes' group: mode all-scenarios: env: - CM_MLPERF_LOADGEN_ALL_SCENARIOS: 'yes' + MLC_MLPERF_LOADGEN_ALL_SCENARIOS: 'yes' compliance: env: - CM_MLPERF_LOADGEN_COMPLIANCE: 'yes' + MLC_MLPERF_LOADGEN_COMPLIANCE: 'yes' dashboard: default_gui: false env: - CM_MLPERF_DASHBOARD: 'on' + MLC_MLPERF_DASHBOARD: 'on' find-performance: env: - CM_MLPERF_FIND_PERFORMANCE_MODE: 'yes' - CM_MLPERF_LOADGEN_ALL_MODES: 'no' - CM_MLPERF_LOADGEN_MODE: performance - CM_MLPERF_RESULT_PUSH_TO_GITHUB: false + MLC_MLPERF_FIND_PERFORMANCE_MODE: 'yes' + MLC_MLPERF_LOADGEN_ALL_MODES: 'no' + MLC_MLPERF_LOADGEN_MODE: performance + MLC_MLPERF_RESULT_PUSH_TO_GITHUB: false group: submission-generation full: @@ -169,30 +169,30 @@ variations: coco2014-preprocessed: tags: _full env: - CM_MLPERF_SUBMISSION_GENERATION_STYLE: full - CM_MLPERF_SKIP_SUBMISSION_GENERATION: 'yes' + MLC_MLPERF_SUBMISSION_GENERATION_STYLE: full + MLC_MLPERF_SKIP_SUBMISSION_GENERATION: 'yes' group: submission-generation-style performance-only: default_variations: submission-generation-style: full env: - CM_MLPERF_LOADGEN_MODE: performance - CM_MLPERF_SUBMISSION_RUN: 'yes' - CM_RUN_SUBMISSION_CHECKER: 'no' + MLC_MLPERF_LOADGEN_MODE: performance + MLC_MLPERF_SUBMISSION_RUN: 'yes' + MLC_RUN_SUBMISSION_CHECKER: 'no' group: submission-generation mvp-demo: default_env: - CM_MLPERF_DEVICE: cpu + MLC_MLPERF_DEVICE: cpu env: - CM_MLPERF_INFERENCE_VERSION: mvp-demo - CM_MLPERF_MODEL: abtf-demo-model - CM_MLPERF_BACKEND: pytorch - CM_MLPERF_IMPLEMENTATION: mlcommons-python - CM_MLPERF_LOADGEN_SCENARIO: SingleStream - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: mvp-demo + MLC_MLPERF_INFERENCE_VERSION: mvp-demo + MLC_MLPERF_MODEL: abtf-demo-model + MLC_MLPERF_BACKEND: pytorch + MLC_MLPERF_IMPLEMENTATION: mlcommons-python + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: mvp-demo adr: compiler: tags: gcc @@ -200,16 +200,16 @@ variations: poc-demo: default_env: - CM_MLPERF_DEVICE: cpu - CM_TEST_QUERY_COUNT: "20" + MLC_MLPERF_DEVICE: cpu + MLC_TEST_QUERY_COUNT: "20" env: - CM_MLPERF_INFERENCE_VERSION: poc-demo - CM_MLPERF_MODEL: abtf-poc-model - CM_MLPERF_BACKEND: pytorch - CM_MLPERF_IMPLEMENTATION: mlcommons-python - CM_MLPERF_LOADGEN_SCENARIO: SingleStream - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: poc-demo + MLC_MLPERF_INFERENCE_VERSION: poc-demo + MLC_MLPERF_MODEL: abtf-poc-model + MLC_MLPERF_BACKEND: pytorch + MLC_MLPERF_IMPLEMENTATION: mlcommons-python + MLC_MLPERF_LOADGEN_SCENARIO: SingleStream + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: poc-demo adr: compiler: tags: gcc @@ -230,17 +230,17 @@ variations: default_variations: submission-generation-style: full env: - CM_MLPERF_LOADGEN_COMPLIANCE: 'yes' - CM_MLPERF_SUBMISSION_RUN: 'yes' - CM_RUN_MLPERF_ACCURACY: 'on' - CM_RUN_SUBMISSION_CHECKER: 'yes' - CM_TAR_SUBMISSION_DIR: 'yes' + MLC_MLPERF_LOADGEN_COMPLIANCE: 'yes' + MLC_MLPERF_SUBMISSION_RUN: 'yes' + MLC_RUN_MLPERF_ACCURACY: 'on' + MLC_RUN_SUBMISSION_CHECKER: 'yes' + MLC_TAR_SUBMISSION_DIR: 'yes' group: submission-generation post_deps: - names: - submission-generator enable_if_env: - CM_MLPERF_SKIP_SUBMISSION_GENERATION: + MLC_MLPERF_SKIP_SUBMISSION_GENERATION: - 'no' - 'false' - 'False' diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index 82d493993..56cbf56d6 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -22,22 +22,22 @@ def preprocess(i): state = i['state'] script_path = i['run_script_input']['path'] - if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes": return {'return': 0} - if env.get('CM_DOCKER_IMAGE_NAME', '') == 'scc24': - if env.get("CM_MLPERF_IMPLEMENTATION", "reference") == "reference": - env['CM_DOCKER_IMAGE_NAME'] = "scc24-reference" - elif "nvidia" in env.get("CM_MLPERF_IMPLEMENTATION", "reference"): - env['CM_DOCKER_IMAGE_NAME'] = "scc24-nvidia" + if env.get('MLC_DOCKER_IMAGE_NAME', '') == 'scc24': + if env.get("MLC_MLPERF_IMPLEMENTATION", "reference") == "reference": + env['MLC_DOCKER_IMAGE_NAME'] = "scc24-reference" + elif "nvidia" in env.get("MLC_MLPERF_IMPLEMENTATION", "reference"): + env['MLC_DOCKER_IMAGE_NAME'] = "scc24-nvidia" - dump_version_info = env.get('CM_DUMP_VERSION_INFO', True) + dump_version_info = env.get('MLC_DUMP_VERSION_INFO', True) - system_meta = state.get('CM_SUT_META', {}) + system_meta = state.get('MLC_SUT_META', {}) if system_meta: - env['CM_SUT_META_EXISTS'] = "yes" + env['MLC_SUT_META_EXISTS'] = "yes" - env['CM_MODEL'] = env['CM_MLPERF_MODEL'] + env['MLC_MODEL'] = env['MLC_MLPERF_MODEL'] # Clean MLPerf inference output tar file if non-standard x = env.get('MLPERF_INFERENCE_SUBMISSION_TAR_FILE', '') @@ -52,102 +52,102 @@ def preprocess(i): if os.path.isfile(z): os.remove(z) - if env.get('CM_MLPERF_SUBMISSION_SYSTEM_TYPE', '') != '': - system_type = env['CM_MLPERF_SUBMISSION_SYSTEM_TYPE'] + if env.get('MLC_MLPERF_SUBMISSION_SYSTEM_TYPE', '') != '': + system_type = env['MLC_MLPERF_SUBMISSION_SYSTEM_TYPE'] system_meta['system_type'] = system_type - if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') != '': - division = env['CM_MLPERF_SUBMISSION_DIVISION'] + if env.get('MLC_MLPERF_SUBMISSION_DIVISION', '') != '': + division = env['MLC_MLPERF_SUBMISSION_DIVISION'] system_meta['division'] = division if system_meta.get('division', '') != "closed": # no compliance runs needed for open division - env["CM_MLPERF_LOADGEN_COMPLIANCE"] = "no" + env["MLC_MLPERF_LOADGEN_COMPLIANCE"] = "no" clean = False - if 'CM_MLPERF_CLEAN_ALL' in env: + if 'MLC_MLPERF_CLEAN_ALL' in env: clean = True - if 'CM_MLPERF_CLEAN_SUBMISSION_DIR' not in env: - env['CM_MLPERF_CLEAN_SUBMISSION_DIR'] = "yes" - if 'CM_RERUN' not in env: - env['CM_RERUN'] = "yes" + if 'MLC_MLPERF_CLEAN_SUBMISSION_DIR' not in env: + env['MLC_MLPERF_CLEAN_SUBMISSION_DIR'] = "yes" + if 'MLC_RERUN' not in env: + env['MLC_RERUN'] = "yes" - if str(env.get('CM_SYSTEM_POWER', 'no')).lower( - ) != "no" or env.get('CM_MLPERF_POWER', '') == "yes": + if str(env.get('MLC_SYSTEM_POWER', 'no')).lower( + ) != "no" or env.get('MLC_MLPERF_POWER', '') == "yes": power_variation = ",_power" - env['CM_MLPERF_POWER'] = "yes" + env['MLC_MLPERF_POWER'] = "yes" else: power_variation = "" - if env.get('CM_RUN_STYLE', - '') == "valid" and 'CM_RUN_MLPERF_ACCURACY' not in env: - env['CM_RUN_MLPERF_ACCURACY'] = "on" + if env.get('MLC_RUN_STYLE', + '') == "valid" and 'MLC_RUN_MLPERF_ACCURACY' not in env: + env['MLC_RUN_MLPERF_ACCURACY'] = "on" print("Using MLCommons Inference source from " + - env['CM_MLPERF_INFERENCE_SOURCE']) + env['MLC_MLPERF_INFERENCE_SOURCE']) - if 'CM_MLPERF_LOADGEN_EXTRA_OPTIONS' not in env: - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = "" + if 'MLC_MLPERF_LOADGEN_EXTRA_OPTIONS' not in env: + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] = "" - if 'CM_MLPERF_LOADGEN_MODES' not in env: - if 'CM_MLPERF_LOADGEN_MODE' not in env: - env['CM_MLPERF_LOADGEN_MODE'] = "performance" + if 'MLC_MLPERF_LOADGEN_MODES' not in env: + if 'MLC_MLPERF_LOADGEN_MODE' not in env: + env['MLC_MLPERF_LOADGEN_MODE'] = "performance" - if 'CM_MLPERF_LOADGEN_SCENARIOS' not in env: - if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: - env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" + if 'MLC_MLPERF_LOADGEN_SCENARIOS' not in env: + if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env: + env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline" - if env.get('CM_MLPERF_LOADGEN_ALL_SCENARIOS', '') == "yes": - env['CM_MLPERF_LOADGEN_SCENARIOS'] = get_valid_scenarios( - env['CM_MODEL'], + if env.get('MLC_MLPERF_LOADGEN_ALL_SCENARIOS', '') == "yes": + env['MLC_MLPERF_LOADGEN_SCENARIOS'] = get_valid_scenarios( + env['MLC_MODEL'], system_meta.get( 'system_type', 'edge'), - env['CM_MLPERF_LAST_RELEASE'], - env['CM_MLPERF_INFERENCE_SOURCE']) + env['MLC_MLPERF_LAST_RELEASE'], + env['MLC_MLPERF_INFERENCE_SOURCE']) else: system_meta = {} - env['CM_MLPERF_LOADGEN_SCENARIOS'] = [ - env['CM_MLPERF_LOADGEN_SCENARIO']] + env['MLC_MLPERF_LOADGEN_SCENARIOS'] = [ + env['MLC_MLPERF_LOADGEN_SCENARIO']] - if env.get('CM_MLPERF_LOADGEN_ALL_MODES', '') == "yes": - env['CM_MLPERF_LOADGEN_MODES'] = ["performance", "accuracy"] + if env.get('MLC_MLPERF_LOADGEN_ALL_MODES', '') == "yes": + env['MLC_MLPERF_LOADGEN_MODES'] = ["performance", "accuracy"] else: - env['CM_MLPERF_LOADGEN_MODES'] = [env['CM_MLPERF_LOADGEN_MODE']] + env['MLC_MLPERF_LOADGEN_MODES'] = [env['MLC_MLPERF_LOADGEN_MODE']] if env.get('OUTPUT_BASE_DIR', '') == '': env['OUTPUT_BASE_DIR'] = env.get( - 'CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) + 'MLC_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) test_list = ["TEST01"] - if env['CM_MODEL'] in ["resnet50", "sdxl"]: + if env['MLC_MODEL'] in ["resnet50", "sdxl"]: test_list.append("TEST04") - if "gpt" in env['CM_MODEL'] or "llama2-70b" in env['CM_MODEL'] or "mixtral-8x7b" in env['CM_MODEL']: + if "gpt" in env['MLC_MODEL'] or "llama2-70b" in env['MLC_MODEL'] or "mixtral-8x7b" in env['MLC_MODEL']: test_list.remove("TEST01") # test_list.remove("TEST05") - if "llama2" in env['CM_MODEL'].lower( - ) or "mixtral-8x7b" in env['CM_MODEL']: + if "llama2" in env['MLC_MODEL'].lower( + ) or "mixtral-8x7b" in env['MLC_MODEL']: test_list.append("TEST06") variation_implementation = "_" + \ - env.get("CM_MLPERF_IMPLEMENTATION", "reference") - variation_model = ",_" + env["CM_MLPERF_MODEL"] + env.get("MLC_MLPERF_IMPLEMENTATION", "reference") + variation_model = ",_" + env["MLC_MLPERF_MODEL"] variation_backend = ",_" + \ - env["CM_MLPERF_BACKEND"] if env.get( - "CM_MLPERF_BACKEND", "") != "" else "" + env["MLC_MLPERF_BACKEND"] if env.get( + "MLC_MLPERF_BACKEND", "") != "" else "" variation_device = ",_" + \ - env["CM_MLPERF_DEVICE"] if env.get( - "CM_MLPERF_DEVICE", "") != "" else "" - variation_run_style = ",_" + env.get("CM_MLPERF_RUN_STYLE", "test") - variation_reproducibility = ",_" + env["CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS"] if env.get( - "CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS", "") != "" else "" + env["MLC_MLPERF_DEVICE"] if env.get( + "MLC_MLPERF_DEVICE", "") != "" else "" + variation_run_style = ",_" + env.get("MLC_MLPERF_RUN_STYLE", "test") + variation_reproducibility = ",_" + env["MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS"] if env.get( + "MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS", "") != "" else "" variation_all_models = ",_all-models" if env.get( - "CM_MLPERF_ALL_MODELS", "") == "yes" else "" + "MLC_MLPERF_ALL_MODELS", "") == "yes" else "" - if env.get("CM_MLPERF_MODEL_PRECISION", '') != '': - variation_quantization_string = ",_" + env["CM_MLPERF_MODEL_PRECISION"] + if env.get("MLC_MLPERF_MODEL_PRECISION", '') != '': + variation_quantization_string = ",_" + env["MLC_MLPERF_MODEL_PRECISION"] else: variation_quantization_string = "" @@ -168,7 +168,7 @@ def preprocess(i): for key in adr_from_meta: add_deps_recursive[key] = adr_from_meta[key] - if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '': + if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '': if not add_deps_recursive.get('mlperf-inference-implementation', {}): add_deps_recursive['mlperf-inference-implementation'] = {} if add_deps_recursive['mlperf-inference-implementation'].get( @@ -177,9 +177,9 @@ def preprocess(i): else: add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' add_deps_recursive['mlperf-inference-implementation']['tags'] += "_batch_size." + \ - env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE'] + env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE'] - if env.get('CM_MLPERF_INFERENCE_SUT_VARIATION', '') != '': + if env.get('MLC_MLPERF_INFERENCE_SUT_VARIATION', '') != '': if not add_deps_recursive.get('mlperf-inference-implementation', {}): add_deps_recursive['mlperf-inference-implementation'] = {} if add_deps_recursive['mlperf-inference-implementation'].get( @@ -188,12 +188,12 @@ def preprocess(i): else: add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' add_deps_recursive['mlperf-inference-implementation']['tags'] += "_" + \ - env['CM_MLPERF_INFERENCE_SUT_VARIATION'] + env['MLC_MLPERF_INFERENCE_SUT_VARIATION'] - if env.get('CM_NETWORK_LOADGEN', '') != '': + if env.get('MLC_NETWORK_LOADGEN', '') != '': if not add_deps_recursive.get('mlperf-inference-implementation', {}): add_deps_recursive['mlperf-inference-implementation'] = {} - network_variation_tag = f"_network-{env['CM_NETWORK_LOADGEN']}" + network_variation_tag = f"_network-{env['MLC_NETWORK_LOADGEN']}" if add_deps_recursive['mlperf-inference-implementation'].get( 'tags', '') == '': add_deps_recursive['mlperf-inference-implementation']['tags'] = '' @@ -201,12 +201,12 @@ def preprocess(i): add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' add_deps_recursive['mlperf-inference-implementation']['tags'] += network_variation_tag - if env.get('CM_OUTPUT_FOLDER_NAME', '') == '': - env['CM_OUTPUT_FOLDER_NAME'] = env['CM_MLPERF_RUN_STYLE'] + "_results" + if env.get('MLC_OUTPUT_FOLDER_NAME', '') == '': + env['MLC_OUTPUT_FOLDER_NAME'] = env['MLC_MLPERF_RUN_STYLE'] + "_results" output_dir = os.path.join( env['OUTPUT_BASE_DIR'], - env['CM_OUTPUT_FOLDER_NAME']) + env['MLC_OUTPUT_FOLDER_NAME']) if clean: path_to_clean = output_dir @@ -217,15 +217,15 @@ def preprocess(i): print('=========================================================') - if str(env.get('CM_MLPERF_USE_DOCKER', '') + if str(env.get('MLC_MLPERF_USE_DOCKER', '') ).lower() in ["1", "true", "yes"]: action = "docker" # del(env['OUTPUT_BASE_DIR']) state = {} docker_extra_input = {} - # if env.get('CM_HW_NAME'): - # del(env['CM_HW_NAME']) + # if env.get('MLC_HW_NAME'): + # del(env['MLC_HW_NAME']) for k in inp: if k.startswith("docker_"): @@ -233,36 +233,36 @@ def preprocess(i): inp = {} if str(docker_dt).lower() in ["yes", "true", "1"]: # turning it off for the first run and after that we turn it on - if env.get('CM_DOCKER_REUSE_EXISTING_CONTAINER', '') == '': - env['CM_DOCKER_REUSE_EXISTING_CONTAINER'] = 'no' - env['CM_DOCKER_DETACHED_MODE'] = 'yes' + if env.get('MLC_DOCKER_REUSE_EXISTING_CONTAINER', '') == '': + env['MLC_DOCKER_REUSE_EXISTING_CONTAINER'] = 'no' + env['MLC_DOCKER_DETACHED_MODE'] = 'yes' - if env.get('CM_DOCKER_IMAGE_NAME', '') != '': - docker_extra_input['docker_image_name'] = env['CM_DOCKER_IMAGE_NAME'] + if env.get('MLC_DOCKER_IMAGE_NAME', '') != '': + docker_extra_input['docker_image_name'] = env['MLC_DOCKER_IMAGE_NAME'] else: action = "run" - # local_keys = [ 'CM_MLPERF_SKIP_RUN', 'CM_MLPERF_LOADGEN_QUERY_COUNT', 'CM_MLPERF_LOADGEN_TARGET_QPS', 'CM_MLPERF_LOADGEN_TARGET_LATENCY' ] + # local_keys = [ 'MLC_MLPERF_SKIP_RUN', 'MLC_MLPERF_LOADGEN_QUERY_COUNT', 'MLC_MLPERF_LOADGEN_TARGET_QPS', 'MLC_MLPERF_LOADGEN_TARGET_LATENCY' ] - for scenario in env['CM_MLPERF_LOADGEN_SCENARIOS']: + for scenario in env['MLC_MLPERF_LOADGEN_SCENARIOS']: scenario_tags = tags + ",_" + scenario.lower() - env['CM_MLPERF_LOADGEN_SCENARIO'] = scenario + env['MLC_MLPERF_LOADGEN_SCENARIO'] = scenario if scenario == "Offline": - if env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'): - env['CM_MLPERF_LOADGEN_TARGET_QPS'] = env['CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'] + if env.get('MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'): + env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = env['MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'] elif scenario == "Server": - if env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS'): - env['CM_MLPERF_LOADGEN_TARGET_QPS'] = env['CM_MLPERF_LOADGEN_SERVER_TARGET_QPS'] + if env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS'): + env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = env['MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS'] elif scenario == "SingleStream": - if env.get('CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'): - env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = env['CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'] + if env.get('MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'): + env['MLC_MLPERF_LOADGEN_TARGET_LATENCY'] = env['MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'] elif scenario == "MultiStream": - if env.get('CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'): - env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = env['CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'] + if env.get('MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'): + env['MLC_MLPERF_LOADGEN_TARGET_LATENCY'] = env['MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'] - for mode in env['CM_MLPERF_LOADGEN_MODES']: - env['CM_MLPERF_LOADGEN_MODE'] = mode + for mode in env['MLC_MLPERF_LOADGEN_MODES']: + env['MLC_MLPERF_LOADGEN_MODE'] = mode env_copy = copy.deepcopy(env) const_copy = copy.deepcopy(const) @@ -279,11 +279,11 @@ def preprocess(i): if r['return'] > 0: return r - if env_copy.get('CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR', '') != '': - env['CM_MLPERF_INFERENCE_RESULTS_DIR_'] = env_copy['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] + if env_copy.get('MLC_MLPERF_INFERENCE_FINAL_RESULTS_DIR', '') != '': + env['MLC_MLPERF_INFERENCE_RESULTS_DIR_'] = env_copy['MLC_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] else: - env['CM_MLPERF_INFERENCE_RESULTS_DIR_'] = os.path.join( - env['OUTPUT_BASE_DIR'], f"{env['CM_MLPERF_RUN_STYLE']}_results") + env['MLC_MLPERF_INFERENCE_RESULTS_DIR_'] = os.path.join( + env['OUTPUT_BASE_DIR'], f"{env['MLC_MLPERF_RUN_STYLE']}_results") if action == "docker": if str(docker_dt).lower() not in ["yes", "true", "1"]: @@ -292,16 +292,16 @@ def preprocess(i): # We run commands interactively inside the docker container return {'return': 0} else: - env['CM_DOCKER_REUSE_EXISTING_CONTAINER'] = 'yes' - container_id = env_copy['CM_DOCKER_CONTAINER_ID'] - env['CM_DOCKER_CONTAINER_ID'] = container_id + env['MLC_DOCKER_REUSE_EXISTING_CONTAINER'] = 'yes' + container_id = env_copy['MLC_DOCKER_CONTAINER_ID'] + env['MLC_DOCKER_CONTAINER_ID'] = container_id if state.get('docker', {}): del (state['docker']) - if env.get("CM_MLPERF_LOADGEN_COMPLIANCE", "") == "yes": + if env.get("MLC_MLPERF_LOADGEN_COMPLIANCE", "") == "yes": for test in test_list: - env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] = test - env['CM_MLPERF_LOADGEN_MODE'] = "compliance" + env['MLC_MLPERF_LOADGEN_COMPLIANCE_TEST'] = test + env['MLC_MLPERF_LOADGEN_MODE'] = "compliance" ii = {'action': action, 'automation': 'script', 'tags': scenario_tags, 'quiet': 'true', 'env': copy.deepcopy(env), 'const': copy.deepcopy(const), 'input': inp, 'state': state, 'add_deps': copy.deepcopy(add_deps), 'add_deps_recursive': copy.deepcopy(add_deps_recursive), 'adr': copy.deepcopy(adr), 'ad': ad, 'v': verbose, 'print_env': print_env, 'print_deps': print_deps, 'dump_version_info': dump_version_info} @@ -314,9 +314,9 @@ def preprocess(i): if state.get('docker', {}): del (state['docker']) - if env.get('CM_DOCKER_CONTAINER_ID', '') != '' and str(env.get( - 'CM_DOCKER_CONTAINER_KEEP_ALIVE', '')).lower() not in ["yes", "1", "true"]: - container_id = env['CM_DOCKER_CONTAINER_ID'] + if env.get('MLC_DOCKER_CONTAINER_ID', '') != '' and str(env.get( + 'MLC_DOCKER_CONTAINER_KEEP_ALIVE', '')).lower() not in ["yes", "1", "true"]: + container_id = env['MLC_DOCKER_CONTAINER_ID'] CMD = f"docker kill {container_id}" docker_out = subprocess.check_output(CMD, shell=True).decode("utf-8") @@ -384,9 +384,9 @@ def postprocess(i): env = i['env'] state = i['state'] - if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'reference': - x1 = env.get('CM_MLPERF_INFERENCE_SOURCE', '') - x2 = env.get('CM_MLPERF_INFERENCE_CONF_PATH', '') + if env.get('MLC_MLPERF_IMPLEMENTATION', '') == 'reference': + x1 = env.get('MLC_MLPERF_INFERENCE_SOURCE', '') + x2 = env.get('MLC_MLPERF_INFERENCE_CONF_PATH', '') if x1 != '' and x2 != '': print('') diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index 812e9eeba..c2f64bb3f 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -29,95 +29,95 @@ tags_help: "run-mlperf,inference" predeps: False default_env: - CM_MLPERF_IMPLEMENTATION: reference - CM_MLPERF_MODEL: resnet50 - CM_MLPERF_RUN_STYLE: test - CM_MLPERF_SKIP_SUBMISSION_GENERATION: no - CM_DOCKER_PRIVILEGED_MODE: yes - CM_MLPERF_SUBMISSION_DIVISION: open - CM_MLPERF_INFERENCE_TP_SIZE: 1 + MLC_MLPERF_IMPLEMENTATION: reference + MLC_MLPERF_MODEL: resnet50 + MLC_MLPERF_RUN_STYLE: test + MLC_MLPERF_SKIP_SUBMISSION_GENERATION: no + MLC_DOCKER_PRIVILEGED_MODE: yes + MLC_MLPERF_SUBMISSION_DIVISION: open + MLC_MLPERF_INFERENCE_TP_SIZE: 1 input_mapping: - api_server: CM_MLPERF_INFERENCE_API_SERVER - backend: CM_MLPERF_BACKEND - batch_size: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + api_server: MLC_MLPERF_INFERENCE_API_SERVER + backend: MLC_MLPERF_BACKEND + batch_size: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE beam_size: GPTJ_BEAM_SIZE - category: CM_MLPERF_SUBMISSION_SYSTEM_TYPE - clean: CM_MLPERF_CLEAN_ALL - compliance: CM_MLPERF_LOADGEN_COMPLIANCE - custom_system_nvidia: CM_CUSTOM_SYSTEM_NVIDIA - dashboard_wb_project: CM_MLPERF_DASHBOARD_WANDB_PROJECT - dashboard_wb_user: CM_MLPERF_DASHBOARD_WANDB_USER - debug: CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM - device: CM_MLPERF_DEVICE - division: CM_MLPERF_SUBMISSION_DIVISION + category: MLC_MLPERF_SUBMISSION_SYSTEM_TYPE + clean: MLC_MLPERF_CLEAN_ALL + compliance: MLC_MLPERF_LOADGEN_COMPLIANCE + custom_system_nvidia: MLC_CUSTOM_SYSTEM_NVIDIA + dashboard_wb_project: MLC_MLPERF_DASHBOARD_WANDB_PROJECT + dashboard_wb_user: MLC_MLPERF_DASHBOARD_WANDB_USER + debug: MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM + device: MLC_MLPERF_DEVICE + division: MLC_MLPERF_SUBMISSION_DIVISION dlrm_data_path: DLRM_DATA_PATH - docker: CM_MLPERF_USE_DOCKER - dump_version_info: CM_DUMP_VERSION_INFO - save_console_log: CM_SAVE_CONSOLE_LOG - execution_mode: CM_MLPERF_RUN_STYLE - find_performance: CM_MLPERF_FIND_PERFORMANCE_MODE - framework: CM_MLPERF_BACKEND - docker_keep_alive: CM_DOCKER_CONTAINER_KEEP_ALIVE - get_platform_details: CM_GET_PLATFORM_DETAILS - gpu_name: CM_NVIDIA_GPU_NAME - hw_name: CM_HW_NAME - pip_loadgen: CM_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP - hw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA + docker: MLC_MLPERF_USE_DOCKER + dump_version_info: MLC_DUMP_VERSION_INFO + save_console_log: MLC_SAVE_CONSOLE_LOG + execution_mode: MLC_MLPERF_RUN_STYLE + find_performance: MLC_MLPERF_FIND_PERFORMANCE_MODE + framework: MLC_MLPERF_BACKEND + docker_keep_alive: MLC_DOCKER_CONTAINER_KEEP_ALIVE + get_platform_details: MLC_GET_PLATFORM_DETAILS + gpu_name: MLC_NVIDIA_GPU_NAME + hw_name: MLC_HW_NAME + pip_loadgen: MLC_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP + hw_notes_extra: MLC_MLPERF_SUT_SW_NOTES_EXTRA imagenet_path: IMAGENET_PATH - implementation: CM_MLPERF_IMPLEMENTATION - lang: CM_MLPERF_IMPLEMENTATION - min_query_count: CM_MLPERF_INFERENCE_MIN_QUERY_COUNT - max_query_count: CM_MLPERF_INFERENCE_MAX_QUERY_COUNT - mode: CM_MLPERF_LOADGEN_MODE - model: CM_MLPERF_MODEL - multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY - network: CM_NETWORK_LOADGEN - nvidia_system_name: CM_NVIDIA_SYSTEM_NAME - offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + implementation: MLC_MLPERF_IMPLEMENTATION + lang: MLC_MLPERF_IMPLEMENTATION + min_query_count: MLC_MLPERF_INFERENCE_MIN_QUERY_COUNT + max_query_count: MLC_MLPERF_INFERENCE_MAX_QUERY_COUNT + mode: MLC_MLPERF_LOADGEN_MODE + model: MLC_MLPERF_MODEL + multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + network: MLC_NETWORK_LOADGEN + nvidia_system_name: MLC_NVIDIA_SYSTEM_NAME + offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS output_dir: OUTPUT_BASE_DIR output_summary: MLPERF_INFERENCE_SUBMISSION_SUMMARY output_tar: MLPERF_INFERENCE_SUBMISSION_TAR_FILE - performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT - power: CM_SYSTEM_POWER - precision: CM_MLPERF_MODEL_PRECISION - preprocess_submission: CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR - push_to_github: CM_MLPERF_RESULT_PUSH_TO_GITHUB - pull_changes: CM_MLPERF_INFERENCE_PULL_CODE_CHANGES - pull_inference_changes: CM_MLPERF_INFERENCE_PULL_SRC_CHANGES - readme: CM_MLPERF_README - regenerate_accuracy_file: CM_MLPERF_REGENERATE_ACCURACY_FILE - regenerate_files: CM_REGENERATE_MEASURE_FILES - rerun: CM_RERUN + performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + power: MLC_SYSTEM_POWER + precision: MLC_MLPERF_MODEL_PRECISION + preprocess_submission: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR + push_to_github: MLC_MLPERF_RESULT_PUSH_TO_GITHUB + pull_changes: MLC_MLPERF_INFERENCE_PULL_CODE_CHANGES + pull_inference_changes: MLC_MLPERF_INFERENCE_PULL_SRC_CHANGES + readme: MLC_MLPERF_README + regenerate_accuracy_file: MLC_MLPERF_REGENERATE_ACCURACY_FILE + regenerate_files: MLC_REGENERATE_MEASURE_FILES + rerun: MLC_RERUN results_dir: OUTPUT_BASE_DIR - results_git_url: CM_MLPERF_RESULTS_GIT_REPO_URL - run_checker: CM_RUN_SUBMISSION_CHECKER - run_style: CM_MLPERF_RUN_STYLE - scenario: CM_MLPERF_LOADGEN_SCENARIO - server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS - singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY - skip_submission_generation: CM_MLPERF_SKIP_SUBMISSION_GENERATION - skip_truncation: CM_SKIP_TRUNCATE_ACCURACY - submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR - submitter: CM_MLPERF_SUBMITTER - sut_servers: CM_NETWORK_LOADGEN_SUT_SERVERS - sw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA - system_type: CM_MLPERF_SUBMISSION_SYSTEM_TYPE - target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY - target_qps: CM_MLPERF_LOADGEN_TARGET_QPS - test_query_count: CM_TEST_QUERY_COUNT - threads: CM_NUM_THREADS - sut: CM_MLPERF_INFERENCE_SUT_VARIATION - nvidia_llama2_dataset_file_path: CM_NVIDIA_LLAMA_DATASET_FILE_PATH - tp_size: CM_NVIDIA_TP_SIZE - vllm_tp_size: CM_MLPERF_INFERENCE_TP_SIZE - vllm_model_name: CM_VLLM_SERVER_MODEL_NAME - num_workers: CM_MLPERF_INFERENCE_NUM_WORKERS - max_test_duration: CM_MLPERF_MAX_DURATION_TEST - all_models: CM_MLPERF_ALL_MODELS - criteo_day23_raw_data_path: CM_CRITEO_DAY23_RAW_DATA_PATH - use_dataset_from_host: CM_USE_DATASET_FROM_HOST - use_model_from_host: CM_USE_MODEL_FROM_HOST + results_git_url: MLC_MLPERF_RESULTS_GIT_REPO_URL + run_checker: MLC_RUN_SUBMISSION_CHECKER + run_style: MLC_MLPERF_RUN_STYLE + scenario: MLC_MLPERF_LOADGEN_SCENARIO + server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + skip_submission_generation: MLC_MLPERF_SKIP_SUBMISSION_GENERATION + skip_truncation: MLC_SKIP_TRUNCATE_ACCURACY + submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR + submitter: MLC_MLPERF_SUBMITTER + sut_servers: MLC_NETWORK_LOADGEN_SUT_SERVERS + sw_notes_extra: MLC_MLPERF_SUT_SW_NOTES_EXTRA + system_type: MLC_MLPERF_SUBMISSION_SYSTEM_TYPE + target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY + target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS + test_query_count: MLC_TEST_QUERY_COUNT + threads: MLC_NUM_THREADS + sut: MLC_MLPERF_INFERENCE_SUT_VARIATION + nvidia_llama2_dataset_file_path: MLC_NVIDIA_LLAMA_DATASET_FILE_PATH + tp_size: MLC_NVIDIA_TP_SIZE + vllm_tp_size: MLC_MLPERF_INFERENCE_TP_SIZE + vllm_model_name: MLC_VLLM_SERVER_MODEL_NAME + num_workers: MLC_MLPERF_INFERENCE_NUM_WORKERS + max_test_duration: MLC_MLPERF_MAX_DURATION_TEST + all_models: MLC_MLPERF_ALL_MODELS + criteo_day23_raw_data_path: MLC_CRITEO_DAY23_RAW_DATA_PATH + use_dataset_from_host: MLC_USE_DATASET_FROM_HOST + use_model_from_host: MLC_USE_MODEL_FROM_HOST rgat_checkpoint_path: RGAT_CHECKPOINT_PATH new_state_keys: @@ -127,34 +127,34 @@ new_state_keys: deps: - tags: detect,os skip_if_env: - CM_MLPERF_USE_DOCKER: [ on ] + MLC_MLPERF_USE_DOCKER: [ on ] - tags: detect,cpu skip_if_env: - CM_MLPERF_USE_DOCKER: [ on ] + MLC_MLPERF_USE_DOCKER: [ on ] - names: - python - python3 tags: get,python3 skip_if_env: - CM_MLPERF_USE_DOCKER: [ on ] + MLC_MLPERF_USE_DOCKER: [ on ] - names: - inference-src tags: get,mlcommons,inference,src - tags: pull,git,repo env: - CM_GIT_CHECKOUT_PATH: '<<>>' + MLC_GIT_CHECKOUT_PATH: '<<>>' enable_if_env: - CM_MLPERF_INFERENCE_PULL_SRC_CHANGES: + MLC_MLPERF_INFERENCE_PULL_SRC_CHANGES: - 'yes' - tags: get,sut,description skip_if_env: - CM_MLPERF_USE_DOCKER: [ on ] + MLC_MLPERF_USE_DOCKER: [ on ] - tags: get,mlperf,inference,results,dir names: - get-mlperf-inference-results-dir enable_if_env: - CM_MLPERF_USE_DOCKER: [ off ] + MLC_MLPERF_USE_DOCKER: [ off ] skip_if_env: OUTPUT_BASE_DIR: [ on ] - tags: install,pip-package,for-cmind-python,_package.tabulate @@ -165,7 +165,7 @@ docker_off: mounts: - ${{ INSTALL_DATA_PATH }}:/install_data - ${{ DATA_PATH }}:/data - - ${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}:${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }} + - ${{ MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}:${{ MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }} - ${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }} skip_run_cmd: 'no' shm_size: '32gb' @@ -181,7 +181,7 @@ docker_off: results_dir: RESULTS_DIR submission_dir: SUBMISSION_DIR dlrm_data_path: DLRM_DATA_PATH - intel_gptj_int8_model_path: CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH + intel_gptj_int8_model_path: MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH variations: @@ -189,31 +189,31 @@ variations: default_variations: submission-generation-style: full env: - CM_MLPERF_LOADGEN_MODE: accuracy - CM_MLPERF_SUBMISSION_RUN: 'yes' - CM_RUN_MLPERF_ACCURACY: 'on' - CM_RUN_SUBMISSION_CHECKER: 'no' + MLC_MLPERF_LOADGEN_MODE: accuracy + MLC_MLPERF_SUBMISSION_RUN: 'yes' + MLC_RUN_MLPERF_ACCURACY: 'on' + MLC_RUN_SUBMISSION_CHECKER: 'no' group: submission-generation all-modes: env: - CM_MLPERF_LOADGEN_ALL_MODES: 'yes' + MLC_MLPERF_LOADGEN_ALL_MODES: 'yes' group: mode all-scenarios: env: - CM_MLPERF_LOADGEN_ALL_SCENARIOS: 'yes' + MLC_MLPERF_LOADGEN_ALL_SCENARIOS: 'yes' compliance: env: - CM_MLPERF_LOADGEN_COMPLIANCE: 'yes' + MLC_MLPERF_LOADGEN_COMPLIANCE: 'yes' find-performance: env: - CM_MLPERF_FIND_PERFORMANCE_MODE: 'yes' - CM_MLPERF_LOADGEN_ALL_MODES: 'no' - CM_MLPERF_LOADGEN_MODE: performance - CM_MLPERF_RESULT_PUSH_TO_GITHUB: false + MLC_MLPERF_FIND_PERFORMANCE_MODE: 'yes' + MLC_MLPERF_LOADGEN_ALL_MODES: 'no' + MLC_MLPERF_LOADGEN_MODE: performance + MLC_MLPERF_RESULT_PUSH_TO_GITHUB: false group: submission-generation full: @@ -239,16 +239,16 @@ variations: igbh-dataset: tags: _full env: - CM_MLPERF_SUBMISSION_GENERATION_STYLE: full + MLC_MLPERF_SUBMISSION_GENERATION_STYLE: full group: submission-generation-style performance-only: default_variations: submission-generation-style: full env: - CM_MLPERF_LOADGEN_MODE: performance - CM_MLPERF_SUBMISSION_RUN: 'yes' - CM_RUN_SUBMISSION_CHECKER: 'no' + MLC_MLPERF_LOADGEN_MODE: performance + MLC_MLPERF_SUBMISSION_RUN: 'yes' + MLC_RUN_SUBMISSION_CHECKER: 'no' group: submission-generation populate-readme: @@ -257,18 +257,18 @@ variations: default_variations: submission-generation-style: full env: - CM_MLPERF_README: 'yes' - CM_MLPERF_SUBMISSION_RUN: 'yes' - CM_RUN_SUBMISSION_CHECKER: 'no' + MLC_MLPERF_README: 'yes' + MLC_MLPERF_SUBMISSION_RUN: 'yes' + MLC_RUN_SUBMISSION_CHECKER: 'no' group: submission-generation scc24-base: base: - short env: - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX4: scc24-base - CM_DOCKER_IMAGE_NAME: scc24 - CM_MLPERF_INFERENCE_MIN_QUERY_COUNT: 50 + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX4: scc24-base + MLC_DOCKER_IMAGE_NAME: scc24 + MLC_MLPERF_INFERENCE_MIN_QUERY_COUNT: 50 adr: coco2014-preprocessed: tags: _size.50,_with-sample-ids @@ -291,41 +291,41 @@ variations: nvidia-preprocess-data: extra_cache_tags: "scc24-main" env: - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX4: scc24-main - CM_DOCKER_IMAGE_NAME: scc24 - CM_MLPERF_INFERENCE_MIN_QUERY_COUNT: 500 + MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX4: scc24-main + MLC_DOCKER_IMAGE_NAME: scc24 + MLC_MLPERF_INFERENCE_MIN_QUERY_COUNT: 500 deps: - tags: clean,nvidia,scratch,_sdxl,_downloaded-data extra_cache_rm_tags: scc24-base r2.1: env: - CM_MLPERF_INFERENCE_VERSION: '2.1' - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r2.1_default + MLC_MLPERF_INFERENCE_VERSION: '2.1' + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r2.1_default group: benchmark-version r3.0: env: - CM_MLPERF_INFERENCE_VERSION: '3.0' - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r3.0_default + MLC_MLPERF_INFERENCE_VERSION: '3.0' + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r3.0_default group: benchmark-version r3.1: env: - CM_MLPERF_INFERENCE_VERSION: '3.1' - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r3.1_default + MLC_MLPERF_INFERENCE_VERSION: '3.1' + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r3.1_default group: benchmark-version r4.0-dev: env: - CM_MLPERF_INFERENCE_VERSION: '4.0-dev' - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.0-dev_default + MLC_MLPERF_INFERENCE_VERSION: '4.0-dev' + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.0-dev_default group: benchmark-version r4.0: env: - CM_MLPERF_INFERENCE_VERSION: '4.0' - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.0_default + MLC_MLPERF_INFERENCE_VERSION: '4.0' + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.0_default group: benchmark-version adr: get-mlperf-inference-results-dir: @@ -337,8 +337,8 @@ variations: r4.1-dev: env: - CM_MLPERF_INFERENCE_VERSION: '4.1-dev' - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.1-dev_default + MLC_MLPERF_INFERENCE_VERSION: '4.1-dev' + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.1-dev_default group: benchmark-version adr: get-mlperf-inference-results-dir: @@ -350,9 +350,9 @@ variations: r4.1: env: - CM_MLPERF_INFERENCE_VERSION: '4.1' - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.1_default - CM_MLPERF_SUBMISSION_CHECKER_VERSION: v4.1 + MLC_MLPERF_INFERENCE_VERSION: '4.1' + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.1_default + MLC_MLPERF_SUBMISSION_CHECKER_VERSION: v4.1 adr: get-mlperf-inference-results-dir: tags: _version.r4_1 @@ -365,9 +365,9 @@ variations: r5.0-dev: default: true env: - CM_MLPERF_INFERENCE_VERSION: '5.0-dev' - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r5.0-dev_default - CM_MLPERF_SUBMISSION_CHECKER_VERSION: v5.0 + MLC_MLPERF_INFERENCE_VERSION: '5.0-dev' + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r5.0-dev_default + MLC_MLPERF_SUBMISSION_CHECKER_VERSION: v5.0 group: benchmark-version adr: get-mlperf-inference-results-dir: @@ -383,9 +383,9 @@ variations: tags: _short-run default: 'true' env: - CM_MLPERF_SUBMISSION_DIVISION: open - CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR: off - CM_MLPERF_SUBMISSION_GENERATION_STYLE: short + MLC_MLPERF_SUBMISSION_DIVISION: open + MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR: off + MLC_MLPERF_SUBMISSION_GENERATION_STYLE: short group: submission-generation-style performance-and-accuracy: @@ -402,17 +402,17 @@ variations: default_variations: submission-generation-style: full env: - CM_MLPERF_LOADGEN_COMPLIANCE: 'yes' - CM_MLPERF_SUBMISSION_RUN: 'yes' - CM_RUN_MLPERF_ACCURACY: 'on' - CM_RUN_SUBMISSION_CHECKER: 'yes' - CM_TAR_SUBMISSION_DIR: 'yes' + MLC_MLPERF_LOADGEN_COMPLIANCE: 'yes' + MLC_MLPERF_SUBMISSION_RUN: 'yes' + MLC_RUN_MLPERF_ACCURACY: 'on' + MLC_RUN_SUBMISSION_CHECKER: 'yes' + MLC_TAR_SUBMISSION_DIR: 'yes' group: submission-generation post_deps: - names: - submission-generator skip_if_env: - CM_MLPERF_SKIP_SUBMISSION_GENERATION: + MLC_MLPERF_SKIP_SUBMISSION_GENERATION: - 'yes' tags: generate,mlperf,inference,submission @@ -546,11 +546,11 @@ input_description: adr.compiler.tags: desc: Compiler for loadgen and any C/C++ part of implementation - adr.inference-src-loadgen.env.CM_GIT_URL: + adr.inference-src-loadgen.env.MLC_GIT_URL: default: '' desc: Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations) - adr.inference-src.env.CM_GIT_URL: + adr.inference-src.env.MLC_GIT_URL: default: '' desc: Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations) diff --git a/script/run-mlperf-inference-mobilenet-models/README-about.md b/script/run-mlperf-inference-mobilenet-models/README-about.md deleted file mode 100644 index beaa467a8..000000000 --- a/script/run-mlperf-inference-mobilenet-models/README-about.md +++ /dev/null @@ -1,107 +0,0 @@ -## Set up - -We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. - -
-Click here to set up docker (Optional). - -### Docker Setup - -CM commands are expected to run natively but if you prefer not to modify the host system, you can do the below command to set up a docker container. - -``` -cm docker script --tags=run,mobilenet-models,_tflite,_accuracy-only \ ---adr.compiler.tags=gcc \ ---docker_cm_repo=mlcommons@cm4mlops \ ---imagenet_path=$HOME/imagenet-2012-val \ ---results_dir=$HOME/mobilenet_results \ ---submission_dir=$HOME/inference_submission_3.1 \ ---docker_skip_run_cmd -``` - -This command will build a docker container and give you an interactive shell from which you can execute the below CM run commands. -* `results_dir`, `submission_dir` and `imagenet_path` are mounted from the host system. -* `results_dir` and `submission_dir` are expected to be empty directories to be populated by the docker -* `imagenet_path` should point to the imagenet folder containing the 50000 validation images. - -
- -## Run Commands - -Since the runs can take many hours, in case you are running remotely you can install screen as follows. You may omit "screen" from all commands if you are running on a host system. -``` -cmr "get generic-sys-util _screen" -``` -### Default tflite - - -#### Do a full accuracy run for all the models (can take almost a day) - -``` -screen cmr "run mobilenet-models _tflite _accuracy-only" \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/mobilenet_results -``` - -#### Do a full performance run for all the models (can take almost a day) -``` -screen cmr "run mobilenet-models _tflite _performance-only" \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/mobilenet_results -``` - -#### Generate README files for all the runs -``` -cmr "run mobilenet-models _tflite _populate-readme" \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/mobilenet_results -``` - -#### Generate actual submission tree - -We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. -``` -cmr "generate inference submission" \ ---results_dir=$HOME/mobilenet_results/valid_results \ ---submission_dir=$HOME/mobilenet_submission_tree \ ---clean \ ---infer_scenario_results=yes \ ---adr.compiler.tags=gcc --adr.inference-src.version=master \ ---run-checker \ ---submitter=cTuning \ ---hw_notes_extra="Result taken by NAME" -``` -* Use `--hw_name="My system name"` to give a meaningful system name. Examples can be seen [here](https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning/systems) - -#### Push the results to GitHub repo - -First, create a fork of [this repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/). Then run the following command after replacing `--repo_url` with your fork URL. -``` -cmr "push github mlperf inference submission" \ ---submission_dir=$HOME/mobilenet_submission_tree \ ---repo_url=https://github.com/ctuning/mlperf_inference_submissions_v3.1/ \ ---commit_message="Mobilenet results added" -``` - -Create a PR to [cTuning repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/) - -### Using ARMNN with NEON - -Follow the same procedure as above but for the first three experiment runs add `_armnn,_neon` to the tags. For example -``` -cmr "run mobilenet-models _tflite _armnn _neon _accuracy-only" \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/mobilenet_results -``` - -`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. - -### Using ARMNN with OpenCL -Follow the same procedure as above but for the first three experiment runs add `_armnn,_opencl` to the tags. For example -``` -cmr "run mobilenet-models _tflite _armnn _opencl _accuracy-only" \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/mobilenet_results -``` - -`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py index 4101f3d52..322f5078a 100644 --- a/script/run-mlperf-inference-mobilenet-models/customize.py +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -18,8 +18,8 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') - verbose = (env.get('CM_VERBOSE', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') + verbose = (env.get('MLC_VERBOSE', False) == 'yes') models_all = { "mobilenet": { @@ -49,18 +49,18 @@ def preprocess(i): } models = {} - if env.get('CM_MLPERF_RUN_MOBILENET_V1', '') == "yes": + if env.get('MLC_MLPERF_RUN_MOBILENET_V1', '') == "yes": models['mobilenet'] = {} models['mobilenet']['v1'] = models_all['mobilenet']['v1'] - elif env.get('CM_MLPERF_RUN_MOBILENET_V2', '') == "yes": + elif env.get('MLC_MLPERF_RUN_MOBILENET_V2', '') == "yes": models['mobilenet'] = {} models['mobilenet']['v2'] = models_all['mobilenet']['v2'] - elif env.get('CM_MLPERF_RUN_MOBILENET_V3', '') == "yes": + elif env.get('MLC_MLPERF_RUN_MOBILENET_V3', '') == "yes": models['mobilenet'] = {} models['mobilenet']['v3'] = models_all['mobilenet']['v3'] - elif env.get('CM_MLPERF_RUN_MOBILENETS', '') == "yes": + elif env.get('MLC_MLPERF_RUN_MOBILENETS', '') == "yes": models['mobilenet'] = models_all['mobilenet'] - elif env.get('CM_MLPERF_RUN_EFFICIENTNETS', '') == "yes": + elif env.get('MLC_MLPERF_RUN_EFFICIENTNETS', '') == "yes": models['efficientnet'] = models_all['efficientnet'] variation_strings = {} @@ -89,16 +89,16 @@ def preprocess(i): variation_list.append("_" + k3) variation_strings[t1].append(",".join(variation_list)) - if env.get('CM_MLPERF_SUBMISSION_MODE', '') == "yes": + if env.get('MLC_MLPERF_SUBMISSION_MODE', '') == "yes": var = "_submission" execution_mode = "valid" - elif env.get('CM_MLPERF_ACCURACY_MODE', '') == "yes" and env.get('CM_MLPERF_PERFORMANCE_MODE', '') == "yes": + elif env.get('MLC_MLPERF_ACCURACY_MODE', '') == "yes" and env.get('MLC_MLPERF_PERFORMANCE_MODE', '') == "yes": var = "_full,_performance-and-accuracy" execution_mode = "valid" - elif env.get('CM_MLPERF_ACCURACY_MODE', '') == "yes": + elif env.get('MLC_MLPERF_ACCURACY_MODE', '') == "yes": var = "_full,_accuracy-only" execution_mode = "valid" - elif env.get('CM_MLPERF_PERFORMANCE_MODE', '') == "yes": + elif env.get('MLC_MLPERF_PERFORMANCE_MODE', '') == "yes": var = "_full,_performance-only" execution_mode = "valid" else: @@ -106,17 +106,17 @@ def preprocess(i): execution_mode = "test" precisions = [] - if env.get('CM_MLPERF_RUN_FP32', '') == "yes": + if env.get('MLC_MLPERF_RUN_FP32', '') == "yes": precisions.append("fp32") - if env.get('CM_MLPERF_RUN_INT8', '') == "yes": + if env.get('MLC_MLPERF_RUN_INT8', '') == "yes": precisions.append("uint8") implementation_tags = [] - if env.get('CM_MLPERF_USE_ARMNN_LIBRARY', '') == "yes": + if env.get('MLC_MLPERF_USE_ARMNN_LIBRARY', '') == "yes": implementation_tags.append("_armnn") - if env.get('CM_MLPERF_TFLITE_ARMNN_NEON', '') == "yes": + if env.get('MLC_MLPERF_TFLITE_ARMNN_NEON', '') == "yes": implementation_tags.append("_use-neon") - if env.get('CM_MLPERF_TFLITE_ARMNN_OPENCL', '') == "yes": + if env.get('MLC_MLPERF_TFLITE_ARMNN_OPENCL', '') == "yes": implementation_tags.append("_use-opencl") implementation_tags_string = ",".join(implementation_tags) @@ -163,27 +163,27 @@ def preprocess(i): utils.merge_dicts( {'dict1': mlc_input['adr'], 'dict2': adr, 'append_lists': True, 'append_unique': True}) - if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', '') != '': - mlc_input['results_dir'] = env['CM_MLPERF_INFERENCE_RESULTS_DIR'] + if env.get('MLC_MLPERF_INFERENCE_RESULTS_DIR', '') != '': + mlc_input['results_dir'] = env['MLC_MLPERF_INFERENCE_RESULTS_DIR'] - if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') != '': - mlc_input['submission_dir'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] + if env.get('MLC_MLPERF_INFERENCE_SUBMISSION_DIR', '') != '': + mlc_input['submission_dir'] = env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR'] - if env.get('CM_MLPERF_FIND_PERFORMANCE_MODE', '') == "yes" and env.get( - 'CM_MLPERF_NO_RERUN', '') != 'yes': + if env.get('MLC_MLPERF_FIND_PERFORMANCE_MODE', '') == "yes" and env.get( + 'MLC_MLPERF_NO_RERUN', '') != 'yes': mlc_input['rerun'] = True - if env.get('CM_MLPERF_POWER', '') == "yes": + if env.get('MLC_MLPERF_POWER', '') == "yes": mlc_input['power'] = 'yes' - if env.get('CM_MLPERF_ACCURACY_MODE', '') == "yes": + if env.get('MLC_MLPERF_ACCURACY_MODE', '') == "yes": mlc_input['mode'] = 'accuracy' print(mlc_input) r = cmind.access(mlc_input) if r['return'] > 0: return r - if env.get('CM_MLPERF_PERFORMANCE_MODE', '') == "yes": + if env.get('MLC_MLPERF_PERFORMANCE_MODE', '') == "yes": mlc_input['mode'] = 'performance' print(mlc_input) @@ -191,7 +191,7 @@ def preprocess(i): if r['return'] > 0: return r - if env.get('CM_TEST_ONE_RUN', '') == "yes": + if env.get('MLC_TEST_ONE_RUN', '') == "yes": return {'return': 0} clean_input = { diff --git a/script/run-mlperf-inference-mobilenet-models/meta.yaml b/script/run-mlperf-inference-mobilenet-models/meta.yaml index fb28250a9..7fae10c70 100644 --- a/script/run-mlperf-inference-mobilenet-models/meta.yaml +++ b/script/run-mlperf-inference-mobilenet-models/meta.yaml @@ -3,11 +3,11 @@ automation_alias: script automation_uid: 5b4e0237da074764 category: MLPerf benchmark support default_env: - CM_MLPERF_NO_RERUN: 'no' - CM_MLPERF_RUN_EFFICIENTNETS: 'no' - CM_MLPERF_RUN_FP32: 'yes' - CM_MLPERF_RUN_INT8: 'yes' - CM_MLPERF_RUN_MOBILENETS: 'no' + MLC_MLPERF_NO_RERUN: 'no' + MLC_MLPERF_RUN_EFFICIENTNETS: 'no' + MLC_MLPERF_RUN_FP32: 'yes' + MLC_MLPERF_RUN_INT8: 'yes' + MLC_MLPERF_RUN_MOBILENETS: 'no' deps: - tags: get,sys-utils-cm docker: @@ -25,13 +25,13 @@ docker: - ${{ SUBMISSION_DIR }}:/home/cmuser/inference_submission_3.1 run: true input_mapping: - find-performance: CM_MLPERF_FIND_PERFORMANCE_MODE + find-performance: MLC_MLPERF_FIND_PERFORMANCE_MODE imagenet_path: IMAGENET_PATH - no-rerun: CM_MLPERF_NO_RERUN - power: CM_MLPERF_POWER - results_dir: CM_MLPERF_INFERENCE_RESULTS_DIR - submission: CM_MLPERF_SUBMISSION_MODE - submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR + no-rerun: MLC_MLPERF_NO_RERUN + power: MLC_MLPERF_POWER + results_dir: MLC_MLPERF_INFERENCE_RESULTS_DIR + submission: MLC_MLPERF_SUBMISSION_MODE + submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR tags: - run - mobilenet @@ -44,91 +44,91 @@ uid: f21cc993a8b14a58 variations: accuracy-only: env: - CM_MLPERF_ACCURACY_MODE: 'yes' - CM_MLPERF_FIND_PERFORMANCE_MODE: 'no' - CM_MLPERF_SUBMISSION_MODE: 'no' + MLC_MLPERF_ACCURACY_MODE: 'yes' + MLC_MLPERF_FIND_PERFORMANCE_MODE: 'no' + MLC_MLPERF_SUBMISSION_MODE: 'no' group: run-mode all-models: default: true env: - CM_MLPERF_RUN_EFFICIENTNETS: 'yes' - CM_MLPERF_RUN_MOBILENETS: 'yes' + MLC_MLPERF_RUN_EFFICIENTNETS: 'yes' + MLC_MLPERF_RUN_MOBILENETS: 'yes' group: model-selection armnn: env: - CM_MLPERF_USE_ARMNN_LIBRARY: 'yes' + MLC_MLPERF_USE_ARMNN_LIBRARY: 'yes' efficientnet: env: - CM_MLPERF_RUN_EFFICIENTNETS: 'yes' + MLC_MLPERF_RUN_EFFICIENTNETS: 'yes' group: model-selection find-performance: env: - CM_MLPERF_FIND_PERFORMANCE_MODE: 'yes' - CM_MLPERF_SUBMISSION_MODE: 'no' + MLC_MLPERF_FIND_PERFORMANCE_MODE: 'yes' + MLC_MLPERF_SUBMISSION_MODE: 'no' group: run-mode mobilenet: env: - CM_MLPERF_RUN_MOBILENETS: 'yes' + MLC_MLPERF_RUN_MOBILENETS: 'yes' group: model-selection mobilenet-v1: env: - CM_MLPERF_RUN_MOBILENET_V1: 'yes' + MLC_MLPERF_RUN_MOBILENET_V1: 'yes' group: model-selection mobilenet-v2: env: - CM_MLPERF_RUN_MOBILENET_V2: 'yes' + MLC_MLPERF_RUN_MOBILENET_V2: 'yes' group: model-selection mobilenet-v3: env: - CM_MLPERF_RUN_MOBILENET_V3: 'yes' + MLC_MLPERF_RUN_MOBILENET_V3: 'yes' group: model-selection neon: env: - CM_MLPERF_USE_NEON: 'yes' + MLC_MLPERF_USE_NEON: 'yes' only-fp32: env: - CM_MLPERF_RUN_INT8: 'no' + MLC_MLPERF_RUN_INT8: 'no' only-int8: env: - CM_MLPERF_RUN_FP32: 'no' + MLC_MLPERF_RUN_FP32: 'no' opencl: env: - CM_MLPERF_USE_OPENCL: 'yes' + MLC_MLPERF_USE_OPENCL: 'yes' performance-and-accuracy: default: 'true' env: - CM_MLPERF_ACCURACY_MODE: 'yes' - CM_MLPERF_FIND_PERFORMANCE_MODE: 'no' - CM_MLPERF_PERFORMANCE_MODE: 'yes' - CM_MLPERF_SUBMISSION_MODE: 'no' + MLC_MLPERF_ACCURACY_MODE: 'yes' + MLC_MLPERF_FIND_PERFORMANCE_MODE: 'no' + MLC_MLPERF_PERFORMANCE_MODE: 'yes' + MLC_MLPERF_SUBMISSION_MODE: 'no' group: run-mode performance-only: env: - CM_MLPERF_FIND_PERFORMANCE_MODE: 'no' - CM_MLPERF_PERFORMANCE_MODE: 'yes' - CM_MLPERF_SUBMISSION_MODE: 'no' + MLC_MLPERF_FIND_PERFORMANCE_MODE: 'no' + MLC_MLPERF_PERFORMANCE_MODE: 'yes' + MLC_MLPERF_SUBMISSION_MODE: 'no' group: run-mode submission: env: - CM_MLPERF_FIND_PERFORMANCE_MODE: 'no' - CM_MLPERF_SUBMISSION_MODE: 'yes' + MLC_MLPERF_FIND_PERFORMANCE_MODE: 'no' + MLC_MLPERF_SUBMISSION_MODE: 'yes' group: run-mode tflite: default: true group: base-framework tflite,armnn: env: - CM_MLPERF_TFLITE_ARMNN: 'yes' + MLC_MLPERF_TFLITE_ARMNN: 'yes' tflite,armnn,neon: env: - CM_MLPERF_TFLITE_ARMNN_NEON: 'yes' + MLC_MLPERF_TFLITE_ARMNN_NEON: 'yes' tflite,armnn,opencl: env: - CM_MLPERF_TFLITE_ARMNN_OPENCL: 'yes' + MLC_MLPERF_TFLITE_ARMNN_OPENCL: 'yes' tflite-default: default: true env: - CM_MLPERF_TFLITE_DEFAULT_MODE: 'yes' + MLC_MLPERF_TFLITE_DEFAULT_MODE: 'yes' group: optimization use-neon: alias: neon diff --git a/script/run-mlperf-inference-submission-checker/customize.py b/script/run-mlperf-inference-submission-checker/customize.py index ddb6f7c60..4d22d4867 100644 --- a/script/run-mlperf-inference-submission-checker/customize.py +++ b/script/run-mlperf-inference-submission-checker/customize.py @@ -9,28 +9,28 @@ def preprocess(i): env = i['env'] q = '"' if os_info['platform'] == 'windows' else "'" - submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") + submission_dir = env.get("MLC_MLPERF_INFERENCE_SUBMISSION_DIR", "") - version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION', '') + version = env.get('MLC_MLPERF_SUBMISSION_CHECKER_VERSION', '') if submission_dir == "": return {'return': 1, - 'error': 'Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR'} + 'error': 'Please set --env.MLC_MLPERF_INFERENCE_SUBMISSION_DIR'} - submitter = env.get("CM_MLPERF_SUBMITTER", "") # "default") + submitter = env.get("MLC_MLPERF_SUBMITTER", "") # "default") if ' ' in submitter: return { - 'return': 1, 'error': 'CM_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} + 'return': 1, 'error': 'MLC_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} - if 'CM_MLPERF_SKIP_COMPLIANCE' in env: + if 'MLC_MLPERF_SKIP_COMPLIANCE' in env: skip_compliance = " --skip_compliance" else: skip_compliance = "" - submission_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + submission_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "tools", "submission", "submission_checker.py") - if env['CM_MLPERF_SHORT_RUN'] == "yes": + if env['MLC_MLPERF_SHORT_RUN'] == "yes": import shutil new_submission_checker_file = os.path.join( os.path.dirname(submission_checker_file), @@ -45,24 +45,24 @@ def preprocess(i): file.write(data) submission_checker_file = new_submission_checker_file - if env.get('CM_MLPERF_EXTRA_MODEL_MAPPING', '') != '': + if env.get('MLC_MLPERF_EXTRA_MODEL_MAPPING', '') != '': extra_map = ' --extra_model_benchmark_map "' + \ - env['CM_MLPERF_EXTRA_MODEL_MAPPING'] + '"' + env['MLC_MLPERF_EXTRA_MODEL_MAPPING'] + '"' else: extra_map = "" - if env.get('CM_MLPERF_SKIP_POWER_CHECK', 'no') == "yes": + if env.get('MLC_MLPERF_SKIP_POWER_CHECK', 'no') == "yes": power_check = " --skip-power-check" else: power_check = "" - extra_args = ' ' + env.get('CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS', '') + extra_args = ' ' + env.get('MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS', '') x_submitter = ' --submitter ' + q + submitter + q if submitter != '' else '' x_version = ' --version ' + version + ' ' if version != '' else '' - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + q + submission_checker_file + q + ' --input ' + q + submission_dir + q + \ + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + ' ' + q + submission_checker_file + q + ' --input ' + q + submission_dir + q + \ x_submitter + \ x_version + \ skip_compliance + extra_map + power_check + extra_args @@ -73,18 +73,18 @@ def preprocess(i): x_submission_repo_owner = '' x_submission_repo_branch = '' - if env.get('CM_MLPERF_RESULTS_GIT_REPO_NAME', '') != '': - x_submission_repo_name = f""" --repository {env['CM_MLPERF_RESULTS_GIT_REPO_NAME']}""" - if env.get('CM_MLPERF_RESULTS_GIT_REPO_OWNER', '') != '': - x_submission_repo_owner = f""" --repository-owner {env['CM_MLPERF_RESULTS_GIT_REPO_OWNER']}""" - if env.get('CM_MLPERF_RESULTS_GIT_REPO_BRANCH', '') != '': - x_submission_repo_branch = f""" --repository-branch {env['CM_MLPERF_RESULTS_GIT_REPO_BRANCH']}""" + if env.get('MLC_MLPERF_RESULTS_GIT_REPO_NAME', '') != '': + x_submission_repo_name = f""" --repository {env['MLC_MLPERF_RESULTS_GIT_REPO_NAME']}""" + if env.get('MLC_MLPERF_RESULTS_GIT_REPO_OWNER', '') != '': + x_submission_repo_owner = f""" --repository-owner {env['MLC_MLPERF_RESULTS_GIT_REPO_OWNER']}""" + if env.get('MLC_MLPERF_RESULTS_GIT_REPO_BRANCH', '') != '': + x_submission_repo_branch = f""" --repository-branch {env['MLC_MLPERF_RESULTS_GIT_REPO_BRANCH']}""" - report_generator_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + report_generator_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "tools", "submission", "generate_final_report.py") - env['CM_RUN_CMD'] = CMD + env['MLC_RUN_CMD'] = CMD print(CMD) - env['CM_POST_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + q + report_generator_file + q + ' --input summary.csv ' + \ + env['MLC_POST_RUN_CMD'] = env['MLC_PYTHON_BIN_WITH_PATH'] + ' ' + q + report_generator_file + q + ' --input summary.csv ' + \ x_version + \ x_submission_repo_name + \ x_submission_repo_owner + \ @@ -96,15 +96,15 @@ def preprocess(i): def postprocess(i): env = i['env'] - if env.get('CM_TAR_SUBMISSION_DIR', ''): - env['CM_TAR_INPUT_DIR'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] + if env.get('MLC_TAR_SUBMISSION_DIR', ''): + env['MLC_TAR_INPUT_DIR'] = env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR'] x = env.get('MLPERF_INFERENCE_SUBMISSION_TAR_FILE', '') if x != '': - env['CM_TAR_OUTFILE'] = x + env['MLC_TAR_OUTFILE'] = x - if env.get('CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR', '') != '': - env['CM_TAR_OUTPUT_DIR'] = env['CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR'] + if env.get('MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR', '') != '': + env['MLC_TAR_OUTPUT_DIR'] = env['MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR'] x = env.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY', '') if x != '': diff --git a/script/run-mlperf-inference-submission-checker/meta.yaml b/script/run-mlperf-inference-submission-checker/meta.yaml index 0bb2079b0..93faed4c9 100644 --- a/script/run-mlperf-inference-submission-checker/meta.yaml +++ b/script/run-mlperf-inference-submission-checker/meta.yaml @@ -5,7 +5,7 @@ cache: false category: MLPerf benchmark support clean_files: [] default_env: - CM_MLPERF_SHORT_RUN: 'no' + MLC_MLPERF_SHORT_RUN: 'no' default_version: master deps: - names: @@ -27,53 +27,53 @@ deps: - names: - get-mlperf-submission-dir skip_if_env: - CM_MLPERF_INFERENCE_SUBMISSION_DIR: + MLC_MLPERF_INFERENCE_SUBMISSION_DIR: - 'on' tags: get,mlperf,submission,dir - enable_if_env: - CM_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION: + MLC_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION: - 'on' tags: preprocess,mlperf,inference,submission input_mapping: - extra_args: CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS - extra_checker_args: CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS - extra_model_benchmark_map: CM_MLPERF_EXTRA_MODEL_MAPPING - input: CM_MLPERF_INFERENCE_SUBMISSION_DIR - power: CM_MLPERF_POWER - preprocess: CM_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION - preprocess_submission: CM_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION - push_to_github: CM_MLPERF_RESULT_PUSH_TO_GITHUB - repo_branch: CM_MLPERF_RESULTS_GIT_REPO_BRANCH - repo_name: CM_MLPERF_RESULTS_GIT_REPO_NAME - repo_owner: CM_MLPERF_RESULTS_GIT_REPO_OWNER - skip_compliance: CM_MLPERF_SKIP_COMPLIANCE - skip_power_check: CM_MLPERF_SKIP_POWER_CHECK - src_version: CM_MLPERF_SUBMISSION_CHECKER_VERSION - submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR - submitter: CM_MLPERF_SUBMITTER - submitter_id: CM_MLPERF_SUBMITTER_ID - tar: CM_TAR_SUBMISSION_DIR + extra_args: MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS + extra_checker_args: MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS + extra_model_benchmark_map: MLC_MLPERF_EXTRA_MODEL_MAPPING + input: MLC_MLPERF_INFERENCE_SUBMISSION_DIR + power: MLC_MLPERF_POWER + preprocess: MLC_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION + preprocess_submission: MLC_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION + push_to_github: MLC_MLPERF_RESULT_PUSH_TO_GITHUB + repo_branch: MLC_MLPERF_RESULTS_GIT_REPO_BRANCH + repo_name: MLC_MLPERF_RESULTS_GIT_REPO_NAME + repo_owner: MLC_MLPERF_RESULTS_GIT_REPO_OWNER + skip_compliance: MLC_MLPERF_SKIP_COMPLIANCE + skip_power_check: MLC_MLPERF_SKIP_POWER_CHECK + src_version: MLC_MLPERF_SUBMISSION_CHECKER_VERSION + submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR + submitter: MLC_MLPERF_SUBMITTER + submitter_id: MLC_MLPERF_SUBMITTER_ID + tar: MLC_TAR_SUBMISSION_DIR post_deps: - enable_if_env: - CM_MLPERF_DASHBOARD: + MLC_MLPERF_DASHBOARD: - 'on' tags: publish-results,dashboard - enable_if_env: - CM_MLPERF_RESULT_PUSH_TO_GITHUB: + MLC_MLPERF_RESULT_PUSH_TO_GITHUB: - 'on' names: - push-to-github tags: publish-results,github - enable_if_env: - CM_TAR_SUBMISSION_DIR: + MLC_TAR_SUBMISSION_DIR: - 'yes' tags: run,tar - enable_if_env: - CM_SUBMITTER_ID: + MLC_SUBMITTER_ID: - 'yes' tags: submit,mlperf,results,_inference env: - CM_MLPERF_SUBMISSION_FILE: <<>> + MLC_MLPERF_SUBMISSION_FILE: <<>> tags: - run - mlc @@ -89,7 +89,7 @@ uid: 15d03ec2c1af4297 variations: short-run: env: - CM_MLPERF_SHORT_RUN: 'yes' + MLC_MLPERF_SHORT_RUN: 'yes' versions: master: adr: diff --git a/script/run-mlperf-inference-submission-checker/run.bat b/script/run-mlperf-inference-submission-checker/run.bat index 5cbc264a2..65d64eee5 100644 --- a/script/run-mlperf-inference-submission-checker/run.bat +++ b/script/run-mlperf-inference-submission-checker/run.bat @@ -1,6 +1,6 @@ -echo "%CM_RUN_CMD%" -%CM_RUN_CMD% +echo "%MLC_RUN_CMD%" +%MLC_RUN_CMD% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\code.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/run-mlperf-inference-submission-checker/run.sh b/script/run-mlperf-inference-submission-checker/run.sh index 82434a83b..4a70055dc 100644 --- a/script/run-mlperf-inference-submission-checker/run.sh +++ b/script/run-mlperf-inference-submission-checker/run.sh @@ -1,13 +1,13 @@ #!/bin/bash -cmd=${CM_RUN_CMD} +cmd=${MLC_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? -cmd=${CM_POST_RUN_CMD} +cmd=${MLC_POST_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/code.py test $? -eq 0 || exit $? diff --git a/script/run-mlperf-power-client/customize.py b/script/run-mlperf-power-client/customize.py index b93d9e121..f7d3cd989 100644 --- a/script/run-mlperf-power-client/customize.py +++ b/script/run-mlperf-power-client/customize.py @@ -8,36 +8,36 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - if not env['CM_MLPERF_RUN_CMD']: - env['CM_MLPERF_RUN_CMD'] = os.path.join( + if not env['MLC_MLPERF_RUN_CMD']: + env['MLC_MLPERF_RUN_CMD'] = os.path.join( i['run_script_input']['path'], "dummy.sh") - if 'CM_MLPERF_POWER_TIMESTAMP' in env: + if 'MLC_MLPERF_POWER_TIMESTAMP' in env: timestamp = "" else: timestamp = " --no-timestamp-path" - if 'CM_MLPERF_LOADGEN_LOGS_DIR' not in env: - env['CM_MLPERF_LOADGEN_LOGS_DIR'] = os.path.join( + if 'MLC_MLPERF_LOADGEN_LOGS_DIR' not in env: + env['MLC_MLPERF_LOADGEN_LOGS_DIR'] = os.path.join( os.getcwd(), "loadgen_logs") - run_cmd = env['CM_MLPERF_RUN_CMD'].replace("'", '"') + run_cmd = env['MLC_MLPERF_RUN_CMD'].replace("'", '"') run_cmd = run_cmd.replace('"', '\\"') - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' +\ - os.path.join(env['CM_MLPERF_POWER_SOURCE'], 'ptd_client_server', 'client.py') + \ - " -a " + env['CM_MLPERF_POWER_SERVER_ADDRESS'] + \ - " -p " + env.get('CM_MLPERF_POWER_SERVER_PORT', "4950") + \ + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + ' ' +\ + os.path.join(env['MLC_MLPERF_POWER_SOURCE'], 'ptd_client_server', 'client.py') + \ + " -a " + env['MLC_MLPERF_POWER_SERVER_ADDRESS'] + \ + " -p " + env.get('MLC_MLPERF_POWER_SERVER_PORT', "4950") + \ " -w '" + run_cmd + \ - "' -L " + env['CM_MLPERF_LOADGEN_LOGS_DIR'] + \ - " -o " + env['CM_MLPERF_POWER_LOG_DIR'] + \ - " -n " + env['CM_MLPERF_POWER_NTP_SERVER'] + \ + "' -L " + env['MLC_MLPERF_LOADGEN_LOGS_DIR'] + \ + " -o " + env['MLC_MLPERF_POWER_LOG_DIR'] + \ + " -n " + env['MLC_MLPERF_POWER_NTP_SERVER'] + \ timestamp - if 'CM_MLPERF_POWER_MAX_AMPS' in env and 'CM_MLPERF_POWER_MAX_VOLTS' in env: - cmd = cmd + " --max-amps " + env['CM_MLPERF_POWER_MAX_AMPS'] + \ - " --max-volts " + env['CM_MLPERF_POWER_MAX_VOLTS'] + if 'MLC_MLPERF_POWER_MAX_AMPS' in env and 'MLC_MLPERF_POWER_MAX_VOLTS' in env: + cmd = cmd + " --max-amps " + env['MLC_MLPERF_POWER_MAX_AMPS'] + \ + " --max-volts " + env['MLC_MLPERF_POWER_MAX_VOLTS'] - env['CM_MLPERF_POWER_RUN_CMD'] = cmd + env['MLC_MLPERF_POWER_RUN_CMD'] = cmd return {'return': 0} diff --git a/script/run-mlperf-power-client/meta.yaml b/script/run-mlperf-power-client/meta.yaml index 40604e1e3..12e98bec8 100644 --- a/script/run-mlperf-power-client/meta.yaml +++ b/script/run-mlperf-power-client/meta.yaml @@ -5,10 +5,10 @@ cache: false category: MLPerf benchmark support clean_files: [] default_env: - CM_MLPERF_POWER_LOG_DIR: logs - CM_MLPERF_POWER_NTP_SERVER: time.google.com - CM_MLPERF_POWER_SERVER_ADDRESS: localhost - CM_MLPERF_RUN_CMD: '' + MLC_MLPERF_POWER_LOG_DIR: logs + MLC_MLPERF_POWER_NTP_SERVER: time.google.com + MLC_MLPERF_POWER_SERVER_ADDRESS: localhost + MLC_MLPERF_RUN_CMD: '' deps: - names: - python @@ -19,17 +19,17 @@ deps: tags: get,mlperf,power,src - tags: get,generic-sys-util,_ntpdate input_mapping: - loadgen_logs_dir: CM_MLPERF_LOADGEN_LOGS_DIR - log_dir: CM_MLPERF_POWER_LOG_DIR - max_amps: CM_MLPERF_POWER_MAX_AMPS - max_volts: CM_MLPERF_POWER_MAX_VOLTS - ntp_server: CM_MLPERF_POWER_NTP_SERVER - port: CM_MLPERF_POWER_SERVER_PORT - power_server: CM_MLPERF_POWER_SERVER_ADDRESS - run_cmd: CM_MLPERF_RUN_CMD - server: CM_MLPERF_POWER_SERVER_ADDRESS - server_port: CM_MLPERF_POWER_SERVER_PORT - timestamp: CM_MLPERF_POWER_TIMESTAMP + loadgen_logs_dir: MLC_MLPERF_LOADGEN_LOGS_DIR + log_dir: MLC_MLPERF_POWER_LOG_DIR + max_amps: MLC_MLPERF_POWER_MAX_AMPS + max_volts: MLC_MLPERF_POWER_MAX_VOLTS + ntp_server: MLC_MLPERF_POWER_NTP_SERVER + port: MLC_MLPERF_POWER_SERVER_PORT + power_server: MLC_MLPERF_POWER_SERVER_ADDRESS + run_cmd: MLC_MLPERF_RUN_CMD + server: MLC_MLPERF_POWER_SERVER_ADDRESS + server_port: MLC_MLPERF_POWER_SERVER_PORT + timestamp: MLC_MLPERF_POWER_TIMESTAMP tags: - run - mlc diff --git a/script/run-mlperf-power-client/run.sh b/script/run-mlperf-power-client/run.sh index 19805cb5b..e77dc8085 100644 --- a/script/run-mlperf-power-client/run.sh +++ b/script/run-mlperf-power-client/run.sh @@ -1,14 +1,14 @@ #!/bin/bash -if [[ -n ${CM_RUN_DIR} ]]; then - cur_dir=${CM_RUN_DIR}; +if [[ -n ${MLC_RUN_DIR} ]]; then + cur_dir=${MLC_RUN_DIR}; cd $cur_dir else cur_dir=`pwd` fi echo "Running power client from $cur_dir" -cmd="${CM_MLPERF_POWER_RUN_CMD}" +cmd="${MLC_MLPERF_POWER_RUN_CMD}" echo $cmd eval $cmd test $? -eq 0 || exit $? diff --git a/script/run-mlperf-power-server/customize.py b/script/run-mlperf-power-server/customize.py index 0593bc170..e4117c93d 100644 --- a/script/run-mlperf-power-server/customize.py +++ b/script/run-mlperf-power-server/customize.py @@ -11,11 +11,11 @@ def preprocess(i): # Initialize ConfigParser config = configparser.ConfigParser() - if env.get('CM_MLPERF_POWER_SERVER_CONF_FILE', '') != '': - server_config_file = env['CM_MLPERF_POWER_SERVER_CONF_FILE'] + if env.get('MLC_MLPERF_POWER_SERVER_CONF_FILE', '') != '': + server_config_file = env['MLC_MLPERF_POWER_SERVER_CONF_FILE'] else: server_config_file = os.path.join( - env.get('CM_MLPERF_POWER_SOURCE', ''), + env.get('MLC_MLPERF_POWER_SOURCE', ''), 'ptd_client_server', 'server.template.conf' ) @@ -28,23 +28,23 @@ def preprocess(i): config.read(server_config_file) # Update the server section try: - config['server']['ntpServer'] = env['CM_MLPERF_POWER_NTP_SERVER'] - config['server']['listen'] = f"{env['CM_MLPERF_POWER_SERVER_ADDRESS']} {env['CM_MLPERF_POWER_SERVER_PORT']}" + config['server']['ntpServer'] = env['MLC_MLPERF_POWER_NTP_SERVER'] + config['server']['listen'] = f"{env['MLC_MLPERF_POWER_SERVER_ADDRESS']} {env['MLC_MLPERF_POWER_SERVER_PORT']}" except KeyError as e: raise KeyError(f"Missing required environment variable: {e}") # Define number of analyzers and network port start - num_analyzers = int(env.get('CM_MLPERF_POWER_NUM_ANALYZERS', 1)) + num_analyzers = int(env.get('MLC_MLPERF_POWER_NUM_ANALYZERS', 1)) network_port_start = int( env.get( - 'CM_MLPERF_POWER_NETWORK_PORT_START', + 'MLC_MLPERF_POWER_NETWORK_PORT_START', 8888)) # Ensure 'ptd' section exists if 'ptd' not in config: config.add_section('ptd') - config['ptd']['ptd'] = str(env.get('CM_MLPERF_PTD_PATH', '')) + config['ptd']['ptd'] = str(env.get('MLC_MLPERF_PTD_PATH', '')) config['ptd']['analyzercount'] = str(num_analyzers) # Add analyzers to the configuration @@ -55,11 +55,11 @@ def preprocess(i): # Add the analyzer subsection as keys under the 'ptd' section config[f'{analyzer_section}']['interfaceFlag'] = str( - env.get('CM_MLPERF_POWER_INTERFACE_FLAG', '')) + env.get('MLC_MLPERF_POWER_INTERFACE_FLAG', '')) config[f'{analyzer_section}']['deviceType'] = str( - env.get('CM_MLPERF_POWER_DEVICE_TYPE', '')) + env.get('MLC_MLPERF_POWER_DEVICE_TYPE', '')) config[f'{analyzer_section}']['devicePort'] = str( - env.get('CM_MLPERF_POWER_DEVICE_PORT', '')) + env.get('MLC_MLPERF_POWER_DEVICE_PORT', '')) config[f'{analyzer_section}']['networkPort'] = str( network_port_start + aid - 1) @@ -68,16 +68,16 @@ def preprocess(i): print({section: dict(config[section]) for section in config.sections()}) - if env['CM_HOST_OS_TYPE'] == "windows": + if env['MLC_HOST_OS_TYPE'] == "windows": cmd_prefix = "" else: cmd_prefix = "sudo " - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + os.path.join( - env['CM_MLPERF_POWER_SOURCE'], + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + ' ' + os.path.join( + env['MLC_MLPERF_POWER_SOURCE'], 'ptd_client_server', 'server.py') + ' -c tmp-power-server.conf' - if env.get('CM_MLPERF_POWER_SERVER_USE_SCREEN', 'no') == 'yes': + if env.get('MLC_MLPERF_POWER_SERVER_USE_SCREEN', 'no') == 'yes': cmd = cmd_prefix + ' screen -d -m ' + cmd + ' ' else: cmd = cmd_prefix + cmd diff --git a/script/run-mlperf-power-server/meta.yaml b/script/run-mlperf-power-server/meta.yaml index c4c4546b1..428df42ff 100644 --- a/script/run-mlperf-power-server/meta.yaml +++ b/script/run-mlperf-power-server/meta.yaml @@ -5,13 +5,13 @@ cache: false category: MLPerf benchmark support clean_files: [] default_env: - CM_MLPERF_POWER_DEVICE_PORT: /dev/usbtmc0 - CM_MLPERF_POWER_DEVICE_TYPE: '49' - CM_MLPERF_POWER_INTERFACE_FLAG: '' - CM_MLPERF_POWER_NTP_SERVER: time.google.com - CM_MLPERF_POWER_SERVER_ADDRESS: 0.0.0.0 - CM_MLPERF_POWER_SERVER_PORT: '4950' - CM_MLPERF_POWER_SERVER_USE_SCREEN: 'no' + MLC_MLPERF_POWER_DEVICE_PORT: /dev/usbtmc0 + MLC_MLPERF_POWER_DEVICE_TYPE: '49' + MLC_MLPERF_POWER_INTERFACE_FLAG: '' + MLC_MLPERF_POWER_NTP_SERVER: time.google.com + MLC_MLPERF_POWER_SERVER_ADDRESS: 0.0.0.0 + MLC_MLPERF_POWER_SERVER_PORT: '4950' + MLC_MLPERF_POWER_SERVER_USE_SCREEN: 'no' deps: - names: - python @@ -27,10 +27,10 @@ deps: - names: - screen skip_if_env: - CM_HOST_OS_TYPE: windows + MLC_HOST_OS_TYPE: windows tags: get,generic,sys-util,_screen - enable_if_env: - CM_HOST_OS_TYPE: windows + MLC_HOST_OS_TYPE: windows names: - win32 tags: get,generic-python-lib,_package.pypiwin32 @@ -39,13 +39,13 @@ docker: port_maps: - 4950:4950 input_mapping: - device_port: CM_MLPERF_POWER_DEVICE_PORT - device_type: CM_MLPERF_POWER_DEVICE_TYPE - interface_flag: CM_MLPERF_POWER_INTERFACE_FLAG - ntp_server: CM_MLPERF_POWER_NTP_SERVER - conf_file: CM_MLPERF_POWER_SERVER_CONF_FILE - screen: CM_MLPERF_POWER_SERVER_USE_SCREEN - num_analyzers: CM_MLPERF_POWER_NUM_ANALYZERS + device_port: MLC_MLPERF_POWER_DEVICE_PORT + device_type: MLC_MLPERF_POWER_DEVICE_TYPE + interface_flag: MLC_MLPERF_POWER_INTERFACE_FLAG + ntp_server: MLC_MLPERF_POWER_NTP_SERVER + conf_file: MLC_MLPERF_POWER_SERVER_CONF_FILE + screen: MLC_MLPERF_POWER_SERVER_USE_SCREEN + num_analyzers: MLC_MLPERF_POWER_NUM_ANALYZERS tags: - run - mlc diff --git a/script/run-mlperf-training-submission-checker/customize.py b/script/run-mlperf-training-submission-checker/customize.py index 3dba45eb6..97c0d31d9 100644 --- a/script/run-mlperf-training-submission-checker/customize.py +++ b/script/run-mlperf-training-submission-checker/customize.py @@ -7,28 +7,28 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - submission_dir = env.get("CM_MLPERF_SUBMISSION_DIR", "") + submission_dir = env.get("MLC_MLPERF_SUBMISSION_DIR", "") - version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION', 'v3.1') + version = env.get('MLC_MLPERF_SUBMISSION_CHECKER_VERSION', 'v3.1') if submission_dir == "": - return {'return': 1, 'error': 'Please set CM_MLPERF_SUBMISSION_DIR'} + return {'return': 1, 'error': 'Please set MLC_MLPERF_SUBMISSION_DIR'} - submitter = env.get("CM_MLPERF_SUBMITTER", "") # "default") + submitter = env.get("MLC_MLPERF_SUBMITTER", "") # "default") if ' ' in submitter: return { - 'return': 1, 'error': 'CM_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} + 'return': 1, 'error': 'MLC_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} submission_checker_file = os.path.join( - env['CM_MLPERF_LOGGING_REPO_PATH'], + env['MLC_MLPERF_LOGGING_REPO_PATH'], "scripts", "verify_for_" + version + "_training.sh") - extra_args = ' ' + env.get('CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS', '') + extra_args = ' ' + env.get('MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS', '') CMD = submission_checker_file + " " + submission_dir - env['CM_RUN_CMD'] = CMD + env['MLC_RUN_CMD'] = CMD return {'return': 0} @@ -36,7 +36,7 @@ def preprocess(i): def postprocess(i): env = i['env'] - if env.get('CM_TAR_SUBMISSION_DIR'): - env['CM_TAR_INPUT_DIR'] = env.get('CM_MLPERF_SUBMISSION_DIR', '$HOME') + if env.get('MLC_TAR_SUBMISSION_DIR'): + env['MLC_TAR_INPUT_DIR'] = env.get('MLC_MLPERF_SUBMISSION_DIR', '$HOME') return {'return': 0} diff --git a/script/run-mlperf-training-submission-checker/meta.yaml b/script/run-mlperf-training-submission-checker/meta.yaml index 661e1ed17..1d74db7f3 100644 --- a/script/run-mlperf-training-submission-checker/meta.yaml +++ b/script/run-mlperf-training-submission-checker/meta.yaml @@ -5,7 +5,7 @@ cache: false category: MLPerf benchmark support clean_files: [] default_env: - CM_MLPERF_SHORT_RUN: 'no' + MLC_MLPERF_SHORT_RUN: 'no' default_version: master deps: - names: @@ -18,25 +18,25 @@ deps: tags: get,mlcommons,inference,src - tags: install,mlperf,logging,from.src input_mapping: - extra_args: CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS - input: CM_MLPERF_SUBMISSION_DIR - power: CM_MLPERF_POWER - push_to_github: CM_MLPERF_RESULT_PUSH_TO_GITHUB - skip_compliance: CM_MLPERF_SKIP_COMPLIANCE - skip_power_check: CM_MLPERF_SKIP_POWER_CHECK - src_version: CM_MLPERF_SUBMISSION_CHECKER_VERSION - submission_dir: CM_MLPERF_SUBMISSION_DIR - submitter: CM_MLPERF_SUBMITTER - tar: CM_TAR_SUBMISSION_DIR + extra_args: MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS + input: MLC_MLPERF_SUBMISSION_DIR + power: MLC_MLPERF_POWER + push_to_github: MLC_MLPERF_RESULT_PUSH_TO_GITHUB + skip_compliance: MLC_MLPERF_SKIP_COMPLIANCE + skip_power_check: MLC_MLPERF_SKIP_POWER_CHECK + src_version: MLC_MLPERF_SUBMISSION_CHECKER_VERSION + submission_dir: MLC_MLPERF_SUBMISSION_DIR + submitter: MLC_MLPERF_SUBMITTER + tar: MLC_TAR_SUBMISSION_DIR post_deps: - enable_if_env: - CM_MLPERF_RESULT_PUSH_TO_GITHUB: + MLC_MLPERF_RESULT_PUSH_TO_GITHUB: - 'on' names: - push-to-github tags: publish-results,github - enable_if_env: - CM_TAR_SUBMISSION_DIR: + MLC_TAR_SUBMISSION_DIR: - 'yes' tags: run,tar tags: @@ -55,7 +55,7 @@ uid: cb5cb60ac9a74d09 variations: short-run: env: - CM_MLPERF_SHORT_RUN: 'yes' + MLC_MLPERF_SHORT_RUN: 'yes' versions: master: adr: diff --git a/script/run-mlperf-training-submission-checker/run.sh b/script/run-mlperf-training-submission-checker/run.sh index 8784f3504..2f95ba060 100644 --- a/script/run-mlperf-training-submission-checker/run.sh +++ b/script/run-mlperf-training-submission-checker/run.sh @@ -1,10 +1,10 @@ #!/bin/bash -cmd=${CM_RUN_CMD} +cmd=${MLC_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? -cmd=${CM_POST_RUN_CMD} +cmd=${MLC_POST_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? diff --git a/script/run-python/meta.yaml b/script/run-python/meta.yaml index 7901f3de8..5eaea5dee 100644 --- a/script/run-python/meta.yaml +++ b/script/run-python/meta.yaml @@ -9,7 +9,7 @@ deps: - python3 tags: get,python3 input_mapping: - command: CM_RUN_PYTHON_CMD + command: MLC_RUN_PYTHON_CMD tags: - run - python diff --git a/script/run-python/run.bat b/script/run-python/run.bat index 95d32d577..c7c9eace2 100644 --- a/script/run-python/run.bat +++ b/script/run-python/run.bat @@ -1,2 +1,2 @@ -%CM_PYTHON_BIN_WITH_PATH% %CM_RUN_PYTHON_CMD% +%MLC_PYTHON_BIN_WITH_PATH% %MLC_RUN_PYTHON_CMD% IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/run-python/run.sh b/script/run-python/run.sh index 641095ae8..d1902e9ad 100644 --- a/script/run-python/run.sh +++ b/script/run-python/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} ${CM_RUN_PYTHON_CMD} +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_RUN_PYTHON_CMD} test $? -eq 0 || exit $? diff --git a/script/run-terraform/customize.py b/script/run-terraform/customize.py index 5d9aa83a5..5728ce687 100644 --- a/script/run-terraform/customize.py +++ b/script/run-terraform/customize.py @@ -12,21 +12,21 @@ def preprocess(i): script_dir = i['run_script_input']['path'] config_dir = os.path.join( script_dir, env.get( - 'CM_TERRAFORM_CONFIG_DIR_NAME', '')) - env['CM_TERRAFORM_CONFIG_DIR'] = config_dir + 'MLC_TERRAFORM_CONFIG_DIR_NAME', '')) + env['MLC_TERRAFORM_CONFIG_DIR'] = config_dir cache_dir = os.getcwd() print(f"Running terraform from {cache_dir}") shutil.copy(os.path.join(config_dir, "main.tf"), cache_dir) - env['CM_TERRAFORM_RUN_DIR'] = cache_dir + env['MLC_TERRAFORM_RUN_DIR'] = cache_dir return {'return': 0} def postprocess(i): env = i['env'] - if env.get('CM_DESTROY_TERRAFORM'): + if env.get('MLC_DESTROY_TERRAFORM'): return {'return': 0} state = i['state'] with open("terraform.tfstate") as f: @@ -38,14 +38,14 @@ def postprocess(i): aws_resource = resource break instances_state = aws_resource['instances'] - state['CM_TF_NEW_INSTANCES_STATE'] = [] - ssh_key_file = env.get('CM_SSH_KEY_FILE') + state['MLC_TF_NEW_INSTANCES_STATE'] = [] + ssh_key_file = env.get('MLC_SSH_KEY_FILE') user = 'ubuntu' for instance_state in instances_state: instance_attributes = instance_state['attributes'] - state['CM_TF_NEW_INSTANCES_STATE'].append(instance_attributes) + state['MLC_TF_NEW_INSTANCES_STATE'].append(instance_attributes) public_ip = instance_attributes['public_ip'] - if env.get('CM_TERRAFORM_CM_INIT'): + if env.get('MLC_TERRAFORM_MLC_INIT'): run_input = { 'automation': 'script', 'action': 'run', @@ -66,8 +66,8 @@ def postprocess(i): "source ~/.profile" ] } - if env.get('CM_TERRAFORM_RUN_COMMANDS'): - run_cmds = env.get('CM_TERRAFORM_RUN_COMMANDS') + if env.get('MLC_TERRAFORM_RUN_COMMANDS'): + run_cmds = env.get('MLC_TERRAFORM_RUN_COMMANDS') for cmd in run_cmds: cmd = cmd.replace(":", "=") cmd = cmd.replace(";;", ",") diff --git a/script/run-terraform/meta.yaml b/script/run-terraform/meta.yaml index 6e00051e0..255503c6b 100644 --- a/script/run-terraform/meta.yaml +++ b/script/run-terraform/meta.yaml @@ -10,21 +10,21 @@ default_env: deps: - tags: get,terraform input_mapping: - cminit: CM_TERRAFORM_CM_INIT - destroy: CM_DESTROY_TERRAFORM - gcp_credentials_json_file: CM_GCP_CREDENTIALS_JSON_PATH - key_file: CM_SSH_KEY_FILE - run_cmds: CM_TERRAFORM_RUN_COMMANDS - ssh_key_file: CM_SSH_KEY_FILE + cminit: MLC_TERRAFORM_MLC_INIT + destroy: MLC_DESTROY_TERRAFORM + gcp_credentials_json_file: MLC_GCP_CREDENTIALS_JSON_PATH + key_file: MLC_SSH_KEY_FILE + run_cmds: MLC_TERRAFORM_RUN_COMMANDS + ssh_key_file: MLC_SSH_KEY_FILE new_env_keys: -- CM_TERRAFORM_RUN_DIR -- CM_TERRAFORM_CONFIG_DIR +- MLC_TERRAFORM_RUN_DIR +- MLC_TERRAFORM_CONFIG_DIR new_state_keys: -- CM_TF_NEW_INSTANCES_STATE +- MLC_TF_NEW_INSTANCES_STATE post_deps: - dynamic: true enable_if_env: - CM_DESTROY_TERRAFORM: + MLC_DESTROY_TERRAFORM: - 'on' names: - destroy-cmd @@ -64,7 +64,7 @@ variations: group: aws-instance-image arm64: env: - CM_INSTANCE_PLATFORM: arm64 + MLC_INSTANCE_PLATFORM: arm64 group: platform aws: default: true @@ -72,7 +72,7 @@ variations: aws-instance-type: t2.micro region: us-west-2 env: - CM_TERRAFORM_CONFIG_DIR_NAME: aws + MLC_TERRAFORM_CONFIG_DIR_NAME: aws group: cloud-provider aws_instance_image.#: env: @@ -136,7 +136,7 @@ variations: storage-size: storage_size.120 zone: zone.us-west1-a env: - CM_TERRAFORM_CONFIG_DIR_NAME: gcp + MLC_TERRAFORM_CONFIG_DIR_NAME: gcp group: cloud-provider gcp_instance_image.#: env: @@ -154,7 +154,7 @@ variations: default_variations: platform: arm64 env: - CM_TERRAFORM_AWS_GRAVITON_INSTANCE: 'yes' + MLC_TERRAFORM_AWS_GRAVITON_INSTANCE: 'yes' inf1.2xlarge: base: - aws @@ -187,7 +187,7 @@ variations: default_variations: platform: arm64 env: - CM_TERRAFORM_AWS_INFERENTIA_INSTANCE: 'yes' + MLC_TERRAFORM_AWS_INFERENTIA_INSTANCE: 'yes' inferentia,amazon-linux-2-kernel.510: default_variations: aws-instance-image: amazon-linux-2-kernel.510,arm64,us-west-2 @@ -312,7 +312,7 @@ variations: x86: default: true env: - CM_INSTANCE_PLATFORM: x86 + MLC_INSTANCE_PLATFORM: x86 group: platform zone.#: env: diff --git a/script/run-terraform/run.sh b/script/run-terraform/run.sh index 094cffcd9..14c3c0021 100644 --- a/script/run-terraform/run.sh +++ b/script/run-terraform/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -if [[ ${CM_TERRAFORM_CONFIG_DIR} == "aws" ]]; then - source ${CM_TERRAFORM_CONFIG_DIR}/credentials.sh - source ${CM_TERRAFORM_CONFIG_DIR}/apply_credentials.sh +if [[ ${MLC_TERRAFORM_CONFIG_DIR} == "aws" ]]; then + source ${MLC_TERRAFORM_CONFIG_DIR}/credentials.sh + source ${MLC_TERRAFORM_CONFIG_DIR}/apply_credentials.sh fi -if [[ -z $CM_DESTROY_TERRAFORM ]]; then +if [[ -z $MLC_DESTROY_TERRAFORM ]]; then terraform init -input=false terraform plan -out=tfplan -input=false terraform apply -input=false tfplan diff --git a/script/run-vllm-server/customize.py b/script/run-vllm-server/customize.py index f8abd76c0..6946dd7ac 100644 --- a/script/run-vllm-server/customize.py +++ b/script/run-vllm-server/customize.py @@ -15,422 +15,422 @@ def preprocess(i): cmd_args = "" - model_name = env.get("CM_VLLM_SERVER_MODEL_NAME", False) + model_name = env.get("MLC_VLLM_SERVER_MODEL_NAME", False) if not model_name: return {'return': 1, 'error': 'Model name not specified'} else: - cmd_args += f" --model {env['CM_ML_MODEL_PATH']} --served-model-name {model_name}" + cmd_args += f" --model {env['MLC_ML_MODEL_PATH']} --served-model-name {model_name}" - tp_size = env.get("CM_VLLM_SERVER_TP_SIZE", False) + tp_size = env.get("MLC_VLLM_SERVER_TP_SIZE", False) if tp_size: cmd_args += f" --tensor-parallel-size {tp_size}" - pp_size = env.get("CM_VLLM_SERVER_PP_SIZE", False) + pp_size = env.get("MLC_VLLM_SERVER_PP_SIZE", False) if pp_size: cmd_args += f" --pipeline-parallel-size {pp_size}" - api_key = env.get("CM_VLLM_SERVER_API_KEY", "root") + api_key = env.get("MLC_VLLM_SERVER_API_KEY", "root") if pp_size: cmd_args += f" --api-key {api_key}" distributed_executor_backend = env.get( - "CM_VLLM_SERVER_DIST_EXEC_BACKEND", False) + "MLC_VLLM_SERVER_DIST_EXEC_BACKEND", False) if distributed_executor_backend: cmd_args += f" --distributed-executor-backend {distributed_executor_backend}" - host = env.get("CM_VLLM_SERVER_HOST", False) + host = env.get("MLC_VLLM_SERVER_HOST", False) if host: cmd_args += f" --host {host}" - port = env.get("CM_VLLM_SERVER_PORT", False) + port = env.get("MLC_VLLM_SERVER_PORT", False) if port: cmd_args += f" --port {port}" - uvicorn_log_level = env.get("CM_VLLM_SERVER_UVICORN_LOG_LEVEL", False) + uvicorn_log_level = env.get("MLC_VLLM_SERVER_UVICORN_LOG_LEVEL", False) if uvicorn_log_level: cmd_args += f" --uvicorn-log-level {uvicorn_log_level}" - allow_credentials = env.get("CM_VLLM_SERVER_ALLOW_CREDENTIALS", False) + allow_credentials = env.get("MLC_VLLM_SERVER_ALLOW_CREDENTIALS", False) if allow_credentials: cmd_args += f" --allow-credentials" - allowed_origins = env.get("CM_VLLM_SERVER_ALLOWED_ORIGINS", False) + allowed_origins = env.get("MLC_VLLM_SERVER_ALLOWED_ORIGINS", False) if allowed_origins: cmd_args += f" --allowed-origins {allowed_origins}" - allowed_methods = env.get("CM_VLLM_SERVER_ALLOWED_METHODS", False) + allowed_methods = env.get("MLC_VLLM_SERVER_ALLOWED_METHODS", False) if allowed_methods: cmd_args += f" --allowed-methods {allowed_methods}" - allowed_headers = env.get("CM_VLLM_SERVER_ALLOWED_HEADERS", False) + allowed_headers = env.get("MLC_VLLM_SERVER_ALLOWED_HEADERS", False) if allowed_headers: cmd_args += f" --allowed-headers {allowed_headers}" - lora_modules = env.get("CM_VLLM_SERVER_LORA_MODULES", False) + lora_modules = env.get("MLC_VLLM_SERVER_LORA_MODULES", False) if lora_modules: cmd_args += f" --lora-modules {lora_modules}" - prompt_adapters = env.get("CM_VLLM_SERVER_PROMPT_ADAPTERS", False) + prompt_adapters = env.get("MLC_VLLM_SERVER_PROMPT_ADAPTERS", False) if prompt_adapters: cmd_args += f" --prompt-adapters {prompt_adapters}" - chat_template = env.get("CM_VLLM_SERVER_CHAT_TEMPLATE", False) + chat_template = env.get("MLC_VLLM_SERVER_CHAT_TEMPLATE", False) if chat_template: cmd_args += f" --chat-template {chat_template}" - response_role = env.get("CM_VLLM_SERVER_RESPONSE_ROLE", False) + response_role = env.get("MLC_VLLM_SERVER_RESPONSE_ROLE", False) if response_role: cmd_args += f" --response-role {response_role}" - ssl_keyfile = env.get("CM_VLLM_SERVER_SSL_KEYFILE", False) + ssl_keyfile = env.get("MLC_VLLM_SERVER_SSL_KEYFILE", False) if ssl_keyfile: cmd_args += f" --ssl-keyfile {ssl_keyfile}" - ssl_certfile = env.get("CM_VLLM_SERVER_SSL_CERTFILE", False) + ssl_certfile = env.get("MLC_VLLM_SERVER_SSL_CERTFILE", False) if ssl_certfile: cmd_args += f" --ssl-certfile {ssl_certfile}" - ssl_ca_certs = env.get("CM_VLLM_SERVER_SSL_CA_CERTS", False) + ssl_ca_certs = env.get("MLC_VLLM_SERVER_SSL_CA_CERTS", False) if ssl_ca_certs: cmd_args += f" --ssl-ca-certs {ssl_ca_certs}" - ssl_cert_reqs = env.get("CM_VLLM_SERVER_SSL_CERT_REQS", False) + ssl_cert_reqs = env.get("MLC_VLLM_SERVER_SSL_CERT_REQS", False) if ssl_cert_reqs: cmd_args += f" --ssl-cert-reqs {ssl_cert_reqs}" - root_path = env.get("CM_VLLM_SERVER_ROOT_PATH", False) + root_path = env.get("MLC_VLLM_SERVER_ROOT_PATH", False) if root_path: cmd_args += f" --root-path {root_path}" - middleware = env.get("CM_VLLM_SERVER_MIDDLEWARE", False) + middleware = env.get("MLC_VLLM_SERVER_MIDDLEWARE", False) if middleware: cmd_args += f" --middleware {middleware}" - tokenizer = env.get("CM_VLLM_SERVER_TOKENIZER", False) + tokenizer = env.get("MLC_VLLM_SERVER_TOKENIZER", False) if tokenizer: cmd_args += f" --tokenizer {tokenizer}" - skip_tokenizer_init = env.get("CM_VLLM_SERVER_SKIP_TOKENIZER_INIT", False) + skip_tokenizer_init = env.get("MLC_VLLM_SERVER_SKIP_TOKENIZER_INIT", False) if skip_tokenizer_init: cmd_args += f" --skip-tokenizer-init" - revision = env.get("CM_VLLM_SERVER_REVISION", False) + revision = env.get("MLC_VLLM_SERVER_REVISION", False) if revision: cmd_args += f" --revision {revision}" - code_revision = env.get("CM_VLLM_SERVER_CODE_REVISION", False) + code_revision = env.get("MLC_VLLM_SERVER_CODE_REVISION", False) if code_revision: cmd_args += f" --code-revision {code_revision}" - tokenizer_revision = env.get("CM_VLLM_SERVER_TOKENIZER_REVISION", False) + tokenizer_revision = env.get("MLC_VLLM_SERVER_TOKENIZER_REVISION", False) if tokenizer_revision: cmd_args += f" --tokenizer-revision {tokenizer_revision}" - tokenizer_mode = env.get("CM_VLLM_SERVER_TOKENIZER_MODE", False) + tokenizer_mode = env.get("MLC_VLLM_SERVER_TOKENIZER_MODE", False) if tokenizer_mode: cmd_args += f" --tokenizer-mode {tokenizer_mode}" - trust_remote_code = env.get("CM_VLLM_SERVER_TRUST_REMOTE_CODE", False) + trust_remote_code = env.get("MLC_VLLM_SERVER_TRUST_REMOTE_CODE", False) if trust_remote_code: cmd_args += f" --trust-remote-code" - download_dir = env.get("CM_VLLM_SERVER_DOWNLOAD_DIR", False) + download_dir = env.get("MLC_VLLM_SERVER_DOWNLOAD_DIR", False) if download_dir: cmd_args += f" --download-dir {download_dir}" - load_format = env.get("CM_VLLM_SERVER_LOAD_FORMAT", False) + load_format = env.get("MLC_VLLM_SERVER_LOAD_FORMAT", False) if load_format: cmd_args += f" --load-format {load_format}" - dtype = env.get("CM_VLLM_SERVER_DTYPE", False) + dtype = env.get("MLC_VLLM_SERVER_DTYPE", False) if dtype: cmd_args += f" --dtype {dtype}" - kv_cache_dtype = env.get("CM_VLLM_SERVER_KV_CACHE_DTYPE", False) + kv_cache_dtype = env.get("MLC_VLLM_SERVER_KV_CACHE_DTYPE", False) if kv_cache_dtype: cmd_args += f" --kv-cache-dtype {kv_cache_dtype}" quantization_param_path = env.get( - "CM_VLLM_SERVER_QUANTIZATION_PARAM_PATH", False) + "MLC_VLLM_SERVER_QUANTIZATION_PARAM_PATH", False) if quantization_param_path: cmd_args += f" --quantization-param-path {quantization_param_path}" - max_model_len = env.get("CM_VLLM_SERVER_MAX_MODEL_LEN", False) + max_model_len = env.get("MLC_VLLM_SERVER_MAX_MODEL_LEN", False) if max_model_len: cmd_args += f" --max-model-len {max_model_len}" guided_decoding_backend = env.get( - "CM_VLLM_SERVER_GUIDED_DECODING_BACKEND", False) + "MLC_VLLM_SERVER_GUIDED_DECODING_BACKEND", False) if guided_decoding_backend: cmd_args += f" --guided-decoding-backend {guided_decoding_backend}" - worker_use_ray = env.get("CM_VLLM_SERVER_WORKER_USE_RAY", False) + worker_use_ray = env.get("MLC_VLLM_SERVER_WORKER_USE_RAY", False) if worker_use_ray: cmd_args += f" --worker-use-ray" max_parallel_loading_workers = env.get( - "CM_VLLM_SERVER_MAX_PARALLEL_LOADING_WORKERS", False) + "MLC_VLLM_SERVER_MAX_PARALLEL_LOADING_WORKERS", False) if max_parallel_loading_workers: cmd_args += f" --max-parallel-loading-workers {max_parallel_loading_workers}" ray_workers_use_nsight = env.get( - "CM_VLLM_SERVER_RAY_WORKERS_USE_NSIGHT", False) + "MLC_VLLM_SERVER_RAY_WORKERS_USE_NSIGHT", False) if ray_workers_use_nsight: cmd_args += f" --ray-workers-use-nsight" - block_size = env.get("CM_VLLM_SERVER_BLOCK_SIZE", False) + block_size = env.get("MLC_VLLM_SERVER_BLOCK_SIZE", False) if block_size: cmd_args += f" --block-size {block_size}" enable_prefix_caching = env.get( - "CM_VLLM_SERVER_ENABLE_PREFIX_CACHING", False) + "MLC_VLLM_SERVER_ENABLE_PREFIX_CACHING", False) if enable_prefix_caching: cmd_args += f" --enable-prefix-caching" disable_sliding_window = env.get( - "CM_VLLM_SERVER_DISABLE_SLIDING_WINDOW", False) + "MLC_VLLM_SERVER_DISABLE_SLIDING_WINDOW", False) if disable_sliding_window: cmd_args += f" --disable-sliding-window" use_v2_block_manager = env.get( - "CM_VLLM_SERVER_USE_V2_BLOCK_MANAGER", False) + "MLC_VLLM_SERVER_USE_V2_BLOCK_MANAGER", False) if use_v2_block_manager: cmd_args += f" --use-v2-block-manager" - num_lookahead_slots = env.get("CM_VLLM_SERVER_NUM_LOOKAHEAD_SLOTS", False) + num_lookahead_slots = env.get("MLC_VLLM_SERVER_NUM_LOOKAHEAD_SLOTS", False) if num_lookahead_slots: cmd_args += f" --num-lookahead-slots {num_lookahead_slots}" - seed = env.get("CM_VLLM_SERVER_SEED", False) + seed = env.get("MLC_VLLM_SERVER_SEED", False) if seed: cmd_args += f" --seed {seed}" - swap_space = env.get("CM_VLLM_SERVER_SWAP_SPACE", False) + swap_space = env.get("MLC_VLLM_SERVER_SWAP_SPACE", False) if swap_space: cmd_args += f" --swap-space {swap_space}" gpu_memory_utilization = env.get( - "CM_VLLM_SERVER_GPU_MEMORY_UTILIZATION", False) + "MLC_VLLM_SERVER_GPU_MEMORY_UTILIZATION", False) if gpu_memory_utilization: cmd_args += f" --gpu-memory-utilization {gpu_memory_utilization}" num_gpu_blocks_override = env.get( - "CM_VLLM_SERVER_NUM_GPU_BLOCKS_OVERRIDE", False) + "MLC_VLLM_SERVER_NUM_GPU_BLOCKS_OVERRIDE", False) if num_gpu_blocks_override: cmd_args += f" --num-gpu-blocks-override {num_gpu_blocks_override}" max_num_batched_tokens = env.get( - "CM_VLLM_SERVER_MAX_NUM_BATCHED_TOKENS", False) + "MLC_VLLM_SERVER_MAX_NUM_BATCHED_TOKENS", False) if max_num_batched_tokens: cmd_args += f" --max-num-batched-tokens {max_num_batched_tokens}" - max_num_seqs = env.get("CM_VLLM_SERVER_MAX_NUM_SEQS", False) + max_num_seqs = env.get("MLC_VLLM_SERVER_MAX_NUM_SEQS", False) if max_num_seqs: cmd_args += f" --max-num-seqs {max_num_seqs}" - max_logprobs = env.get("CM_VLLM_SERVER_MAX_LOGPROBS", False) + max_logprobs = env.get("MLC_VLLM_SERVER_MAX_LOGPROBS", False) if max_logprobs: cmd_args += f" --max-logprobs {max_logprobs}" - disable_log_stats = env.get("CM_VLLM_SERVER_DISABLE_LOG_STATS", False) + disable_log_stats = env.get("MLC_VLLM_SERVER_DISABLE_LOG_STATS", False) if disable_log_stats: cmd_args += f" --disable-log-stats" - quantization = env.get("CM_VLLM_SERVER_QUANTIZATION", False) + quantization = env.get("MLC_VLLM_SERVER_QUANTIZATION", False) if quantization: cmd_args += f" --quantization {quantization}" - rope_scaling = env.get("CM_VLLM_SERVER_ROPE_SCALING", False) + rope_scaling = env.get("MLC_VLLM_SERVER_ROPE_SCALING", False) if rope_scaling: cmd_args += f" --rope-scaling {rope_scaling}" - rope_theta = env.get("CM_VLLM_SERVER_ROPE_THETA", False) + rope_theta = env.get("MLC_VLLM_SERVER_ROPE_THETA", False) if rope_theta: cmd_args += f" --rope-theta {rope_theta}" - enforce_eager = env.get("CM_VLLM_SERVER_ENFORCE_EAGER", False) + enforce_eager = env.get("MLC_VLLM_SERVER_ENFORCE_EAGER", False) if enforce_eager: cmd_args += f" --enforce-eager" max_context_len_to_capture = env.get( - "CM_VLLM_SERVER_MAX_CONTEXT_LEN_TO_CAPTURE", False) + "MLC_VLLM_SERVER_MAX_CONTEXT_LEN_TO_CAPTURE", False) if max_context_len_to_capture: cmd_args += f" --max-context-len-to-capture {max_context_len_to_capture}" max_seq_len_to_capture = env.get( - "CM_VLLM_SERVER_MAX_SEQ_LEN_TO_CAPTURE", False) + "MLC_VLLM_SERVER_MAX_SEQ_LEN_TO_CAPTURE", False) if max_seq_len_to_capture: cmd_args += f" --max-seq-len-to-capture {max_seq_len_to_capture}" disable_custom_all_reduce = env.get( - "CM_VLLM_SERVER_DISABLE_CUSTOM_ALL_REDUCE", False) + "MLC_VLLM_SERVER_DISABLE_CUSTOM_ALL_REDUCE", False) if disable_custom_all_reduce: cmd_args += f" --disable-custom-all-reduce" - tokenizer_pool_size = env.get("CM_VLLM_SERVER_TOKENIZER_POOL_SIZE", False) + tokenizer_pool_size = env.get("MLC_VLLM_SERVER_TOKENIZER_POOL_SIZE", False) if tokenizer_pool_size: cmd_args += f" --tokenizer-pool-size {tokenizer_pool_size}" - tokenizer_pool_type = env.get("CM_VLLM_SERVER_TOKENIZER_POOL_TYPE", False) + tokenizer_pool_type = env.get("MLC_VLLM_SERVER_TOKENIZER_POOL_TYPE", False) if tokenizer_pool_type: cmd_args += f" --tokenizer-pool-type {tokenizer_pool_type}" tokenizer_pool_extra_config = env.get( - "CM_VLLM_SERVER_TOKENIZER_POOL_EXTRA_CONFIG", False) + "MLC_VLLM_SERVER_TOKENIZER_POOL_EXTRA_CONFIG", False) if tokenizer_pool_extra_config: cmd_args += f" --tokenizer-pool-extra-config {tokenizer_pool_extra_config}" - enable_lora = env.get("CM_VLLM_SERVER_ENABLE_LORA", False) + enable_lora = env.get("MLC_VLLM_SERVER_ENABLE_LORA", False) if enable_lora: cmd_args += f" --enable-lora" - max_loras = env.get("CM_VLLM_SERVER_MAX_LORAS", False) + max_loras = env.get("MLC_VLLM_SERVER_MAX_LORAS", False) if max_loras: cmd_args += f" --max-loras {max_loras}" - max_lora_rank = env.get("CM_VLLM_SERVER_MAX_LORA_RANK", False) + max_lora_rank = env.get("MLC_VLLM_SERVER_MAX_LORA_RANK", False) if max_lora_rank: cmd_args += f" --max-lora-rank {max_lora_rank}" lora_extra_vocab_size = env.get( - "CM_VLLM_SERVER_LORA_EXTRA_VOCAB_SIZE", False) + "MLC_VLLM_SERVER_LORA_EXTRA_VOCAB_SIZE", False) if lora_extra_vocab_size: cmd_args += f" --lora-extra-vocab-size {lora_extra_vocab_size}" - lora_dtype = env.get("CM_VLLM_SERVER_LORA_DTYPE", False) + lora_dtype = env.get("MLC_VLLM_SERVER_LORA_DTYPE", False) if lora_dtype: cmd_args += f" --lora-dtype {lora_dtype}" long_lora_scaling_factors = env.get( - "CM_VLLM_SERVER_LONG_LORA_SCALING_FACTORS", False) + "MLC_VLLM_SERVER_LONG_LORA_SCALING_FACTORS", False) if long_lora_scaling_factors: cmd_args += f" --long-lora-scaling-factors {long_lora_scaling_factors}" - max_cpu_loras = env.get("CM_VLLM_SERVER_MAX_CPU_LORAS", False) + max_cpu_loras = env.get("MLC_VLLM_SERVER_MAX_CPU_LORAS", False) if max_cpu_loras: cmd_args += f" --max-cpu-loras {max_cpu_loras}" - fully_sharded_loras = env.get("CM_VLLM_SERVER_FULLY_SHARDED_LORAS", False) + fully_sharded_loras = env.get("MLC_VLLM_SERVER_FULLY_SHARDED_LORAS", False) if fully_sharded_loras: cmd_args += f" --fully-sharded-loras" enable_prompt_adapter = env.get( - "CM_VLLM_SERVER_ENABLE_PROMPT_ADAPTER", False) + "MLC_VLLM_SERVER_ENABLE_PROMPT_ADAPTER", False) if enable_prompt_adapter: cmd_args += f" --enable-prompt-adapter" - max_prompt_adapters = env.get("CM_VLLM_SERVER_MAX_PROMPT_ADAPTERS", False) + max_prompt_adapters = env.get("MLC_VLLM_SERVER_MAX_PROMPT_ADAPTERS", False) if max_prompt_adapters: cmd_args += f" --max-prompt-adapters {max_prompt_adapters}" max_prompt_adapter_token = env.get( - "CM_VLLM_SERVER_MAX_PROMPT_ADAPTER_TOKEN", False) + "MLC_VLLM_SERVER_MAX_PROMPT_ADAPTER_TOKEN", False) if max_prompt_adapter_token: cmd_args += f" --max-prompt-adapter-token {max_prompt_adapter_token}" - device = env.get("CM_VLLM_SERVER_DEVICE", False) + device = env.get("MLC_VLLM_SERVER_DEVICE", False) if device: cmd_args += f" --device {device}" scheduler_delay_factor = env.get( - "CM_VLLM_SERVER_SCHEDULER_DELAY_FACTOR", False) + "MLC_VLLM_SERVER_SCHEDULER_DELAY_FACTOR", False) if scheduler_delay_factor: cmd_args += f" --scheduler-delay-factor {scheduler_delay_factor}" enable_chunked_prefill = env.get( - "CM_VLLM_SERVER_ENABLE_CHUNKED_PREFILL", False) + "MLC_VLLM_SERVER_ENABLE_CHUNKED_PREFILL", False) if enable_chunked_prefill: cmd_args += f" --enable-chunked-prefill" - speculative_model = env.get("CM_VLLM_SERVER_SPECULATIVE_MODEL", False) + speculative_model = env.get("MLC_VLLM_SERVER_SPECULATIVE_MODEL", False) if speculative_model: cmd_args += f" --speculative-model {speculative_model}" num_speculative_tokens = env.get( - "CM_VLLM_SERVER_NUM_SPECULATIVE_TOKENS", False) + "MLC_VLLM_SERVER_NUM_SPECULATIVE_TOKENS", False) if num_speculative_tokens: cmd_args += f" --num-speculative-tokens {num_speculative_tokens}" speculative_draft_tensor_parallel_size = env.get( - "CM_VLLM_SERVER_SPECULATIVE_DRAFT_TENSOR_PARALLEL_SIZE", False) + "MLC_VLLM_SERVER_SPECULATIVE_DRAFT_TENSOR_PARALLEL_SIZE", False) if speculative_draft_tensor_parallel_size: cmd_args += f" --speculative-draft-tensor-parallel-size {speculative_draft_tensor_parallel_size}" speculative_max_model_len = env.get( - "CM_VLLM_SERVER_SPECULATIVE_MAX_MODEL_LEN", False) + "MLC_VLLM_SERVER_SPECULATIVE_MAX_MODEL_LEN", False) if speculative_max_model_len: cmd_args += f" --speculative-max-model-len {speculative_max_model_len}" speculative_disable_by_batch_size = env.get( - "CM_VLLM_SERVER_SPECULATIVE_DISABLE_BY_BATCH_SIZE", False) + "MLC_VLLM_SERVER_SPECULATIVE_DISABLE_BY_BATCH_SIZE", False) if speculative_disable_by_batch_size: cmd_args += f" --speculative-disable-by-batch-size {speculative_disable_by_batch_size}" ngram_prompt_lookup_max = env.get( - "CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MAX", False) + "MLC_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MAX", False) if ngram_prompt_lookup_max: cmd_args += f" --ngram-prompt-lookup-max {ngram_prompt_lookup_max}" ngram_prompt_lookup_min = env.get( - "CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MIN", False) + "MLC_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MIN", False) if ngram_prompt_lookup_min: cmd_args += f" --ngram-prompt-lookup-min {ngram_prompt_lookup_min}" spec_decoding_acceptance_method = env.get( - "CM_VLLM_SERVER_SPEC_DECODING_ACCEPTANCE_METHOD", False) + "MLC_VLLM_SERVER_SPEC_DECODING_ACCEPTANCE_METHOD", False) if spec_decoding_acceptance_method: cmd_args += f" --spec-decoding-acceptance-method {spec_decoding_acceptance_method}" typical_acceptance_sampler_posterior_threshold = env.get( - "CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_THRESHOLD", False) + "MLC_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_THRESHOLD", False) if typical_acceptance_sampler_posterior_threshold: cmd_args += f" --typical-acceptance-sampler-posterior-threshold {typical_acceptance_sampler_posterior_threshold}" typical_acceptance_sampler_posterior_alpha = env.get( - "CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_ALPHA", False) + "MLC_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_ALPHA", False) if typical_acceptance_sampler_posterior_alpha: cmd_args += f" --typical-acceptance-sampler-posterior-alpha {typical_acceptance_sampler_posterior_alpha}" model_loader_extra_config = env.get( - "CM_VLLM_SERVER_MODEL_LOADER_EXTRA_CONFIG", False) + "MLC_VLLM_SERVER_MODEL_LOADER_EXTRA_CONFIG", False) if model_loader_extra_config: cmd_args += f" --model-loader-extra-config {model_loader_extra_config}" - preemption_mode = env.get("CM_VLLM_SERVER_PREEMPTION_MODE", False) + preemption_mode = env.get("MLC_VLLM_SERVER_PREEMPTION_MODE", False) if preemption_mode: cmd_args += f" --preemption-mode {preemption_mode}" - served_model_name = env.get("CM_VLLM_SERVER_SERVED_MODEL_NAME", False) + served_model_name = env.get("MLC_VLLM_SERVER_SERVED_MODEL_NAME", False) if served_model_name: cmd_args += f" --served-model-name {served_model_name}" qlora_adapter_name_or_path = env.get( - "CM_VLLM_SERVER_QLORA_ADAPTER_NAME_OR_PATH", False) + "MLC_VLLM_SERVER_QLORA_ADAPTER_NAME_OR_PATH", False) if qlora_adapter_name_or_path: cmd_args += f" --qlora-adapter-name-or-path {qlora_adapter_name_or_path}" otlp_traces_endpoint = env.get( - "CM_VLLM_SERVER_OTLP_TRACES_ENDPOINT", False) + "MLC_VLLM_SERVER_OTLP_TRACES_ENDPOINT", False) if otlp_traces_endpoint: cmd_args += f" --otlp-traces-endpoint {otlp_traces_endpoint}" - engine_use_ray = env.get("CM_VLLM_SERVER_ENGINE_USE_RAY", False) + engine_use_ray = env.get("MLC_VLLM_SERVER_ENGINE_USE_RAY", False) if engine_use_ray: cmd_args += f" --engine-use-ray" disable_log_requests = env.get( - "CM_VLLM_SERVER_DISABLE_LOG_REQUESTS", False) + "MLC_VLLM_SERVER_DISABLE_LOG_REQUESTS", False) if disable_log_requests: cmd_args += f" --disable-log-requests" - max_log_len = env.get("CM_VLLM_SERVER_MAX_LOG_LEN", False) + max_log_len = env.get("MLC_VLLM_SERVER_MAX_LOG_LEN", False) if max_log_len: cmd_args += f" --max-log-len {max_log_len}" - cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} -m vllm.entrypoints.openai.api_server {cmd_args}" + cmd = f"{env['MLC_PYTHON_BIN_WITH_PATH']} -m vllm.entrypoints.openai.api_server {cmd_args}" print(cmd) - env['CM_VLLM_RUN_CMD'] = cmd + env['MLC_VLLM_RUN_CMD'] = cmd return {'return': 0} diff --git a/script/run-vllm-server/meta.yaml b/script/run-vllm-server/meta.yaml index f75a3d9fe..bb49dc9be 100644 --- a/script/run-vllm-server/meta.yaml +++ b/script/run-vllm-server/meta.yaml @@ -15,102 +15,102 @@ tags: - vllm-server input_mapping: - model: CM_VLLM_SERVER_MODEL_NAME - tp_size: CM_VLLM_SERVER_TP_SIZE - pp_size: CM_VLLM_SERVER_PP_SIZE - distributed-executor-backend: CM_VLLM_SERVER_DIST_EXEC_BACKEND - api_key: CM_VLLM_SERVER_API_KEY - skip_docker_model_download: CM_VLLM_SKIP_DOCKER_MODEL_DOWNLOAD - host: CM_VLLM_SERVER_HOST - port: CM_VLLM_SERVER_PORT - uvicorn_log_level: CM_VLLM_SERVER_UVICORN_LOG_LEVEL - allow_credentials: CM_VLLM_SERVER_ALLOW_CREDENTIALS - allowed_origins: CM_VLLM_SERVER_ALLOWED_ORIGINS - allowed_methods: CM_VLLM_SERVER_ALLOWED_METHODS - allowed_headers: CM_VLLM_SERVER_ALLOWED_HEADERS - lora_modules: CM_VLLM_SERVER_LORA_MODULES - prompt_adapters: CM_VLLM_SERVER_PROMPT_ADAPTERS - chat_template: CM_VLLM_SERVER_CHAT_TEMPLATE - response_role: CM_VLLM_SERVER_RESPONSE_ROLE - ssl_keyfile: CM_VLLM_SERVER_SSL_KEYFILE - ssl_certfile: CM_VLLM_SERVER_SSL_CERTFILE - ssl_ca_certs: CM_VLLM_SERVER_SSL_CA_CERTS - ssl_cert_reqs: CM_VLLM_SERVER_SSL_CERT_REQS - root_path: CM_VLLM_SERVER_ROOT_PATH - middleware: CM_VLLM_SERVER_MIDDLEWARE - tokenizer: CM_VLLM_SERVER_TOKENIZER - skip_tokenizer_init: CM_VLLM_SERVER_SKIP_TOKENIZER_INIT - revision: CM_VLLM_SERVER_REVISION - code_revision: CM_VLLM_SERVER_CODE_REVISION - tokenizer_revision: CM_VLLM_SERVER_TOKENIZER_REVISION - tokenizer_mode: CM_VLLM_SERVER_TOKENIZER_MODE - trust_remote_code: CM_VLLM_SERVER_TRUST_REMOTE_CODE - download_dir: CM_VLLM_SERVER_DOWNLOAD_DIR - load_format: CM_VLLM_SERVER_LOAD_FORMAT - dtype: CM_VLLM_SERVER_DTYPE - kv_cache_dtype: CM_VLLM_SERVER_KV_CACHE_DTYPE - quantization_param_path: CM_VLLM_SERVER_QUANTIZATION_PARAM_PATH - max_model_len: CM_VLLM_SERVER_MAX_MODEL_LEN - guided_decoding_backend: CM_VLLM_SERVER_GUIDED_DECODING_BACKEND - worker_use_ray: CM_VLLM_SERVER_WORKER_USE_RAY - pipeline_parallel_size: CM_VLLM_SERVER_PIPELINE_PARALLEL_SIZE - max_parallel_loading_workers: CM_VLLM_SERVER_MAX_PARALLEL_LOADING_WORKERS - ray_workers_use_nsight: CM_VLLM_SERVER_RAY_WORKERS_USE_NSIGHT - block_size: CM_VLLM_SERVER_BLOCK_SIZE - enable_prefix_caching: CM_VLLM_SERVER_ENABLE_PREFIX_CACHING - disable_sliding_window: CM_VLLM_SERVER_DISABLE_SLIDING_WINDOW - use_v2_block_manager: CM_VLLM_SERVER_USE_V2_BLOCK_MANAGER - num_lookahead_slots: CM_VLLM_SERVER_NUM_LOOKAHEAD_SLOTS - seed: CM_VLLM_SERVER_SEED - swap_space: CM_VLLM_SERVER_SWAP_SPACE - gpu_memory_utilization: CM_VLLM_SERVER_GPU_MEMORY_UTILIZATION - num_gpu_blocks_override: CM_VLLM_SERVER_NUM_GPU_BLOCKS_OVERRIDE - max_num_batched_tokens: CM_VLLM_SERVER_MAX_NUM_BATCHED_TOKENS - max_num_seqs: CM_VLLM_SERVER_MAX_NUM_SEQS - max_logprobs: CM_VLLM_SERVER_MAX_LOGPROBS - disable_log_stats: CM_VLLM_SERVER_DISABLE_LOG_STATS - quantization: CM_VLLM_SERVER_QUANTIZATION - rope_scaling: CM_VLLM_SERVER_ROPE_SCALING - rope_theta: CM_VLLM_SERVER_ROPE_THETA - enforce_eager: CM_VLLM_SERVER_ENFORCE_EAGER - max_context_len_to_capture: CM_VLLM_SERVER_MAX_CONTEXT_LEN_TO_CAPTURE - max_seq_len_to_capture: CM_VLLM_SERVER_MAX_SEQ_LEN_TO_CAPTURE - disable_custom_all_reduce: CM_VLLM_SERVER_DISABLE_CUSTOM_ALL_REDUCE - tokenizer_pool_size: CM_VLLM_SERVER_TOKENIZER_POOL_SIZE - tokenizer_pool_type: CM_VLLM_SERVER_TOKENIZER_POOL_TYPE - tokenizer_pool_extra_config: CM_VLLM_SERVER_TOKENIZER_POOL_EXTRA_CONFIG - enable_lora: CM_VLLM_SERVER_ENABLE_LORA - max_loras: CM_VLLM_SERVER_MAX_LORAS - max_lora_rank: CM_VLLM_SERVER_MAX_LORA_RANK - lora_extra_vocab_size: CM_VLLM_SERVER_LORA_EXTRA_VOCAB_SIZE - lora_dtype: CM_VLLM_SERVER_LORA_DTYPE - long_lora_scaling_factors: CM_VLLM_SERVER_LONG_LORA_SCALING_FACTORS - max_cpu_loras: CM_VLLM_SERVER_MAX_CPU_LORAS - fully_sharded_loras: CM_VLLM_SERVER_FULLY_SHARDED_LORAS - enable_prompt_adapter: CM_VLLM_SERVER_ENABLE_PROMPT_ADAPTER - max_prompt_adapters: CM_VLLM_SERVER_MAX_PROMPT_ADAPTERS - max_prompt_adapter_token: CM_VLLM_SERVER_MAX_PROMPT_ADAPTER_TOKEN - device: CM_VLLM_SERVER_DEVICE - scheduler_delay_factor: CM_VLLM_SERVER_SCHEDULER_DELAY_FACTOR - enable_chunked_prefill: CM_VLLM_SERVER_ENABLE_CHUNKED_PREFILL - speculative_model: CM_VLLM_SERVER_SPECULATIVE_MODEL - num_speculative_tokens: CM_VLLM_SERVER_NUM_SPECULATIVE_TOKENS - speculative_draft_tensor_parallel_size: CM_VLLM_SERVER_SPECULATIVE_DRAFT_TENSOR_PARALLEL_SIZE - speculative_max_model_len: CM_VLLM_SERVER_SPECULATIVE_MAX_MODEL_LEN - speculative_disable_by_batch_size: CM_VLLM_SERVER_SPECULATIVE_DISABLE_BY_BATCH_SIZE - ngram_prompt_lookup_max: CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MAX - ngram_prompt_lookup_min: CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MIN - spec_decoding_acceptance_method: CM_VLLM_SERVER_SPEC_DECODING_ACCEPTANCE_METHOD - typical_acceptance_sampler_posterior_threshold: CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_THRESHOLD - typical_acceptance_sampler_posterior_alpha: CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_ALPHA - model_loader_extra_config: CM_VLLM_SERVER_MODEL_LOADER_EXTRA_CONFIG - preemption_mode: CM_VLLM_SERVER_PREEMPTION_MODE - served_model_name: CM_VLLM_SERVER_SERVED_MODEL_NAME - qlora_adapter_name_or_path: CM_VLLM_SERVER_QLORA_ADAPTER_NAME_OR_PATH - otlp_traces_endpoint: CM_VLLM_SERVER_OTLP_TRACES_ENDPOINT - engine_use_ray: CM_VLLM_SERVER_ENGINE_USE_RAY - disable_log_requests: CM_VLLM_SERVER_DISABLE_LOG_REQUESTS - max_log_len: CM_VLLM_SERVER_MAX_LOG_LEN + model: MLC_VLLM_SERVER_MODEL_NAME + tp_size: MLC_VLLM_SERVER_TP_SIZE + pp_size: MLC_VLLM_SERVER_PP_SIZE + distributed-executor-backend: MLC_VLLM_SERVER_DIST_EXEC_BACKEND + api_key: MLC_VLLM_SERVER_API_KEY + skip_docker_model_download: MLC_VLLM_SKIP_DOCKER_MODEL_DOWNLOAD + host: MLC_VLLM_SERVER_HOST + port: MLC_VLLM_SERVER_PORT + uvicorn_log_level: MLC_VLLM_SERVER_UVICORN_LOG_LEVEL + allow_credentials: MLC_VLLM_SERVER_ALLOW_CREDENTIALS + allowed_origins: MLC_VLLM_SERVER_ALLOWED_ORIGINS + allowed_methods: MLC_VLLM_SERVER_ALLOWED_METHODS + allowed_headers: MLC_VLLM_SERVER_ALLOWED_HEADERS + lora_modules: MLC_VLLM_SERVER_LORA_MODULES + prompt_adapters: MLC_VLLM_SERVER_PROMPT_ADAPTERS + chat_template: MLC_VLLM_SERVER_CHAT_TEMPLATE + response_role: MLC_VLLM_SERVER_RESPONSE_ROLE + ssl_keyfile: MLC_VLLM_SERVER_SSL_KEYFILE + ssl_certfile: MLC_VLLM_SERVER_SSL_CERTFILE + ssl_ca_certs: MLC_VLLM_SERVER_SSL_CA_CERTS + ssl_cert_reqs: MLC_VLLM_SERVER_SSL_CERT_REQS + root_path: MLC_VLLM_SERVER_ROOT_PATH + middleware: MLC_VLLM_SERVER_MIDDLEWARE + tokenizer: MLC_VLLM_SERVER_TOKENIZER + skip_tokenizer_init: MLC_VLLM_SERVER_SKIP_TOKENIZER_INIT + revision: MLC_VLLM_SERVER_REVISION + code_revision: MLC_VLLM_SERVER_CODE_REVISION + tokenizer_revision: MLC_VLLM_SERVER_TOKENIZER_REVISION + tokenizer_mode: MLC_VLLM_SERVER_TOKENIZER_MODE + trust_remote_code: MLC_VLLM_SERVER_TRUST_REMOTE_CODE + download_dir: MLC_VLLM_SERVER_DOWNLOAD_DIR + load_format: MLC_VLLM_SERVER_LOAD_FORMAT + dtype: MLC_VLLM_SERVER_DTYPE + kv_cache_dtype: MLC_VLLM_SERVER_KV_CACHE_DTYPE + quantization_param_path: MLC_VLLM_SERVER_QUANTIZATION_PARAM_PATH + max_model_len: MLC_VLLM_SERVER_MAX_MODEL_LEN + guided_decoding_backend: MLC_VLLM_SERVER_GUIDED_DECODING_BACKEND + worker_use_ray: MLC_VLLM_SERVER_WORKER_USE_RAY + pipeline_parallel_size: MLC_VLLM_SERVER_PIPELINE_PARALLEL_SIZE + max_parallel_loading_workers: MLC_VLLM_SERVER_MAX_PARALLEL_LOADING_WORKERS + ray_workers_use_nsight: MLC_VLLM_SERVER_RAY_WORKERS_USE_NSIGHT + block_size: MLC_VLLM_SERVER_BLOCK_SIZE + enable_prefix_caching: MLC_VLLM_SERVER_ENABLE_PREFIX_CACHING + disable_sliding_window: MLC_VLLM_SERVER_DISABLE_SLIDING_WINDOW + use_v2_block_manager: MLC_VLLM_SERVER_USE_V2_BLOCK_MANAGER + num_lookahead_slots: MLC_VLLM_SERVER_NUM_LOOKAHEAD_SLOTS + seed: MLC_VLLM_SERVER_SEED + swap_space: MLC_VLLM_SERVER_SWAP_SPACE + gpu_memory_utilization: MLC_VLLM_SERVER_GPU_MEMORY_UTILIZATION + num_gpu_blocks_override: MLC_VLLM_SERVER_NUM_GPU_BLOCKS_OVERRIDE + max_num_batched_tokens: MLC_VLLM_SERVER_MAX_NUM_BATCHED_TOKENS + max_num_seqs: MLC_VLLM_SERVER_MAX_NUM_SEQS + max_logprobs: MLC_VLLM_SERVER_MAX_LOGPROBS + disable_log_stats: MLC_VLLM_SERVER_DISABLE_LOG_STATS + quantization: MLC_VLLM_SERVER_QUANTIZATION + rope_scaling: MLC_VLLM_SERVER_ROPE_SCALING + rope_theta: MLC_VLLM_SERVER_ROPE_THETA + enforce_eager: MLC_VLLM_SERVER_ENFORCE_EAGER + max_context_len_to_capture: MLC_VLLM_SERVER_MAX_CONTEXT_LEN_TO_CAPTURE + max_seq_len_to_capture: MLC_VLLM_SERVER_MAX_SEQ_LEN_TO_CAPTURE + disable_custom_all_reduce: MLC_VLLM_SERVER_DISABLE_CUSTOM_ALL_REDUCE + tokenizer_pool_size: MLC_VLLM_SERVER_TOKENIZER_POOL_SIZE + tokenizer_pool_type: MLC_VLLM_SERVER_TOKENIZER_POOL_TYPE + tokenizer_pool_extra_config: MLC_VLLM_SERVER_TOKENIZER_POOL_EXTRA_CONFIG + enable_lora: MLC_VLLM_SERVER_ENABLE_LORA + max_loras: MLC_VLLM_SERVER_MAX_LORAS + max_lora_rank: MLC_VLLM_SERVER_MAX_LORA_RANK + lora_extra_vocab_size: MLC_VLLM_SERVER_LORA_EXTRA_VOCAB_SIZE + lora_dtype: MLC_VLLM_SERVER_LORA_DTYPE + long_lora_scaling_factors: MLC_VLLM_SERVER_LONG_LORA_SCALING_FACTORS + max_cpu_loras: MLC_VLLM_SERVER_MAX_CPU_LORAS + fully_sharded_loras: MLC_VLLM_SERVER_FULLY_SHARDED_LORAS + enable_prompt_adapter: MLC_VLLM_SERVER_ENABLE_PROMPT_ADAPTER + max_prompt_adapters: MLC_VLLM_SERVER_MAX_PROMPT_ADAPTERS + max_prompt_adapter_token: MLC_VLLM_SERVER_MAX_PROMPT_ADAPTER_TOKEN + device: MLC_VLLM_SERVER_DEVICE + scheduler_delay_factor: MLC_VLLM_SERVER_SCHEDULER_DELAY_FACTOR + enable_chunked_prefill: MLC_VLLM_SERVER_ENABLE_CHUNKED_PREFILL + speculative_model: MLC_VLLM_SERVER_SPECULATIVE_MODEL + num_speculative_tokens: MLC_VLLM_SERVER_NUM_SPECULATIVE_TOKENS + speculative_draft_tensor_parallel_size: MLC_VLLM_SERVER_SPECULATIVE_DRAFT_TENSOR_PARALLEL_SIZE + speculative_max_model_len: MLC_VLLM_SERVER_SPECULATIVE_MAX_MODEL_LEN + speculative_disable_by_batch_size: MLC_VLLM_SERVER_SPECULATIVE_DISABLE_BY_BATCH_SIZE + ngram_prompt_lookup_max: MLC_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MAX + ngram_prompt_lookup_min: MLC_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MIN + spec_decoding_acceptance_method: MLC_VLLM_SERVER_SPEC_DECODING_ACCEPTANCE_METHOD + typical_acceptance_sampler_posterior_threshold: MLC_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_THRESHOLD + typical_acceptance_sampler_posterior_alpha: MLC_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_ALPHA + model_loader_extra_config: MLC_VLLM_SERVER_MODEL_LOADER_EXTRA_CONFIG + preemption_mode: MLC_VLLM_SERVER_PREEMPTION_MODE + served_model_name: MLC_VLLM_SERVER_SERVED_MODEL_NAME + qlora_adapter_name_or_path: MLC_VLLM_SERVER_QLORA_ADAPTER_NAME_OR_PATH + otlp_traces_endpoint: MLC_VLLM_SERVER_OTLP_TRACES_ENDPOINT + engine_use_ray: MLC_VLLM_SERVER_ENGINE_USE_RAY + disable_log_requests: MLC_VLLM_SERVER_DISABLE_LOG_REQUESTS + max_log_len: MLC_VLLM_SERVER_MAX_LOG_LEN deps: - tags: get,python3,get-python3 @@ -124,11 +124,11 @@ deps: - tags: get,ml-model,huggingface,zoo,_clone-repo update_tags_from_env_with_prefix: _model-stub.: - - CM_VLLM_SERVER_MODEL_NAME + - MLC_VLLM_SERVER_MODEL_NAME enable_if_env: - CM_VLLM_SERVER_MODEL_NAME: [ on ] + MLC_VLLM_SERVER_MODEL_NAME: [ on ] skip_if_env: - CM_VLLM_SKIP_DOCKER_MODEL_DOWNLOAD: [ on ] + MLC_VLLM_SKIP_DOCKER_MODEL_DOWNLOAD: [ on ] - tags: get,generic-python-lib,_package.vllm diff --git a/script/run-vllm-server/run.sh b/script/run-vllm-server/run.sh index 176c323c5..cc54e9ac0 100644 --- a/script/run-vllm-server/run.sh +++ b/script/run-vllm-server/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -echo ${CM_VLLM_RUN_CMD} +echo ${MLC_VLLM_RUN_CMD} -${CM_VLLM_RUN_CMD} +${MLC_VLLM_RUN_CMD} test $? -eq 0 || exit 1 diff --git a/script/runtime-system-infos/customize.py b/script/runtime-system-infos/customize.py index c271bbda2..b54abbdac 100644 --- a/script/runtime-system-infos/customize.py +++ b/script/runtime-system-infos/customize.py @@ -37,14 +37,14 @@ def preprocess(i): env = i['env'] - if env.get("CM_RUN_DIR", "") == "": - env['CM_RUN_DIR'] = os.getcwd() + if env.get("MLC_RUN_DIR", "") == "": + env['MLC_RUN_DIR'] = os.getcwd() - logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR']) + logs_dir = env.get('MLC_LOGS_DIR', env['MLC_RUN_DIR']) log_json_file_path = os.path.join(logs_dir, 'sys_utilisation_info.txt') - interval = int(env.get('CM_SYSTEM_INFO_MEASUREMENT_INTERVAL', '2')) + interval = int(env.get('MLC_SYSTEM_INFO_MEASUREMENT_INTERVAL', '2')) print(f"The system dumps are created to the folder:{logs_dir}") diff --git a/script/runtime-system-infos/meta.yaml b/script/runtime-system-infos/meta.yaml index 4bf8b8b17..3e3186c4b 100644 --- a/script/runtime-system-infos/meta.yaml +++ b/script/runtime-system-infos/meta.yaml @@ -17,8 +17,8 @@ tags: - infos input_mapping: - log_dir: CM_LOGS_DIR - interval: CM_SYSTEM_INFO_MEASUREMENT_INTERVAL + log_dir: MLC_LOGS_DIR + interval: MLC_SYSTEM_INFO_MEASUREMENT_INTERVAL # Dependencies on other CM scripts diff --git a/script/save-mlperf-inference-implementation-state/customize.py b/script/save-mlperf-inference-implementation-state/customize.py index a7d920cd8..dd0129cd1 100644 --- a/script/save-mlperf-inference-implementation-state/customize.py +++ b/script/save-mlperf-inference-implementation-state/customize.py @@ -13,13 +13,13 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') if not state.get( 'mlperf-inference-implementation'): # No state information. Just returning return {'return': 0} - if env.get('CM_MLPERF_README', "") == "yes": + if env.get('MLC_MLPERF_README', "") == "yes": import mlc inp = i['input'] @@ -46,7 +46,7 @@ def preprocess(i): state['mlperf-inference-implementation']['print_deps'] = r['new_state']['print_deps'] - if env.get('CM_DUMP_VERSION_INFO', True): + if env.get('MLC_DUMP_VERSION_INFO', True): if state['mlperf-inference-implementation'].get('script_id', '') == '': state['mlperf-inference-implementation']['script_id'] = '' diff --git a/script/set-device-settings-qaic/customize.py b/script/set-device-settings-qaic/customize.py index 1e87f2dad..427dab6df 100644 --- a/script/set-device-settings-qaic/customize.py +++ b/script/set-device-settings-qaic/customize.py @@ -12,11 +12,11 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') - if env.get('CM_QAIC_ECC', '') == 'yes': + if env.get('MLC_QAIC_ECC', '') == 'yes': import json - for device in env['CM_QAIC_DEVICES'].split(","): + for device in env['MLC_QAIC_DEVICES'].split(","): ecc_template = {} ecc_template['request'] = [] ecc_template['request'].append({}) @@ -31,8 +31,8 @@ def preprocess(i): with open("request_" + device + ".json", "w") as f: f.write(json.dumps(ecc_template)) - if env.get('CM_QAIC_VC', '') != '': - env['CM_QAIC_VC_HEX'] = hex(int(env['CM_QAIC_VC'])) + if env.get('MLC_QAIC_VC', '') != '': + env['MLC_QAIC_VC_HEX'] = hex(int(env['MLC_QAIC_VC'])) return {'return': 0} diff --git a/script/set-device-settings-qaic/meta.yaml b/script/set-device-settings-qaic/meta.yaml index ad88ba330..1599f0067 100644 --- a/script/set-device-settings-qaic/meta.yaml +++ b/script/set-device-settings-qaic/meta.yaml @@ -4,7 +4,7 @@ automation_uid: 5b4e0237da074764 cache: false category: DevOps automation default_env: - CM_QAIC_DEVICES: '0' + MLC_QAIC_DEVICES: '0' deps: - tags: detect-os - tags: get,qaic,platform,sdk @@ -12,7 +12,7 @@ docker_input_mapping: {} input_description: {} input_mapping: {} new_env_keys: -- CM_QAIC_DEVICE_* +- MLC_QAIC_DEVICE_* new_state_keys: [] post_deps: [] posthook_deps: [] @@ -33,7 +33,7 @@ uid: 408a1a1563b44780 variations: ecc: env: - CM_QAIC_ECC: 'yes' + MLC_QAIC_ECC: 'yes' vc.#: env: - CM_QAIC_VC: '#' + MLC_QAIC_VC: '#' diff --git a/script/set-device-settings-qaic/run.sh b/script/set-device-settings-qaic/run.sh index cdc11ac73..dab1a87d4 100644 --- a/script/set-device-settings-qaic/run.sh +++ b/script/set-device-settings-qaic/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,27 +17,27 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" -IFS="," read -r -a devices <<< "$CM_QAIC_DEVICES" +# run "$MLC_RUN_CMD" +IFS="," read -r -a devices <<< "$MLC_QAIC_DEVICES" -if [[ -n ${CM_QAIC_VC} ]]; then +if [[ -n ${MLC_QAIC_VC} ]]; then for device in ${devices[@]} do - run "sudo ${CM_QAIC_TOOLS_PATH}/qaic-diag -d $device -m 0x4B 0x66 0x05 0x1 ${CM_QAIC_VC_HEX}" + run "sudo ${MLC_QAIC_TOOLS_PATH}/qaic-diag -d $device -m 0x4B 0x66 0x05 0x1 ${MLC_QAIC_VC_HEX}" done fi -if [[ ${CM_QAIC_ECC} == "yes" ]]; then +if [[ ${MLC_QAIC_ECC} == "yes" ]]; then for device in ${devices} do - run "sudo ${CM_QAIC_TOOLS_PATH}/qaic-monitor-json -i request_$device.json" + run "sudo ${MLC_QAIC_TOOLS_PATH}/qaic-monitor-json -i request_$device.json" run "rm request_$device.json" done fi diff --git a/script/set-echo-off-win/customize.py b/script/set-echo-off-win/customize.py index ee9f4ef57..bfe238512 100644 --- a/script/set-echo-off-win/customize.py +++ b/script/set-echo-off-win/customize.py @@ -21,6 +21,6 @@ def preprocess(i): # Test to skip next dependency # env = i['env'] - # env['CM_SKIP_SYS_UTILS'] = 'YES' + # env['MLC_SKIP_SYS_UTILS'] = 'YES' return {'return': 0} diff --git a/script/set-performance-mode/customize.py b/script/set-performance-mode/customize.py index 61c4f844c..032f29b05 100644 --- a/script/set-performance-mode/customize.py +++ b/script/set-performance-mode/customize.py @@ -13,7 +13,7 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') return {'return': 0} diff --git a/script/set-performance-mode/meta.yaml b/script/set-performance-mode/meta.yaml index 6a820e286..9d954ce60 100644 --- a/script/set-performance-mode/meta.yaml +++ b/script/set-performance-mode/meta.yaml @@ -26,17 +26,17 @@ variations: cpu: default: 'true' env: - CM_SET_PERFORMANCE_MODE_OF: cpu + MLC_SET_PERFORMANCE_MODE_OF: cpu group: device performance: default: true env: - CM_SET_PERFORMANCE_MODE: performance + MLC_SET_PERFORMANCE_MODE: performance group: performance-mode power: env: - CM_SET_PERFORMANCE_MODE: power + MLC_SET_PERFORMANCE_MODE: power group: power reproducibility: env: - CM_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE: 'yes' + MLC_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE: 'yes' diff --git a/script/set-performance-mode/run-ubuntu.sh b/script/set-performance-mode/run-ubuntu.sh index fcec44246..71b8ce802 100644 --- a/script/set-performance-mode/run-ubuntu.sh +++ b/script/set-performance-mode/run-ubuntu.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,20 +17,20 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } -CM_SUDO="sudo" +MLC_SUDO="sudo" #Add your run commands here... -# run "$CM_RUN_CMD" -run "${CM_SUDO} apt-get install -y linux-tools-common linux-tools-generic linux-tools-`uname -r`" -run "${CM_SUDO} cpupower frequency-set -g performance" -if [[ ${CM_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE} != "no" ]]; then - run "${CM_SUDO} sysctl -w vm.dirty_ratio=8" - run "${CM_SUDO} sysctl -w vm.swappiness=1" - run "${CM_SUDO} sysctl -w vm.zone_reclaim_mode=1" - run "${CM_SUDO} sync; sysctl -w vm.drop_caches=3" - run "${CM_SUDO} sysctl -w kernel.randomize_va_space=0" +# run "$MLC_RUN_CMD" +run "${MLC_SUDO} apt-get install -y linux-tools-common linux-tools-generic linux-tools-`uname -r`" +run "${MLC_SUDO} cpupower frequency-set -g performance" +if [[ ${MLC_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE} != "no" ]]; then + run "${MLC_SUDO} sysctl -w vm.dirty_ratio=8" + run "${MLC_SUDO} sysctl -w vm.swappiness=1" + run "${MLC_SUDO} sysctl -w vm.zone_reclaim_mode=1" + run "${MLC_SUDO} sync; sysctl -w vm.drop_caches=3" + run "${MLC_SUDO} sysctl -w kernel.randomize_va_space=0" fi diff --git a/script/set-performance-mode/run.sh b/script/set-performance-mode/run.sh index 3a584c10c..821adb3f9 100644 --- a/script/set-performance-mode/run.sh +++ b/script/set-performance-mode/run.sh @@ -1,11 +1,11 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency @@ -17,11 +17,11 @@ function run() { echo "Running: " echo "$1" echo "" - if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then eval "$1" exit_if_error fi } #Add your run commands here... -# run "$CM_RUN_CMD" +# run "$MLC_RUN_CMD" diff --git a/script/set-sqlite-dir/customize.py b/script/set-sqlite-dir/customize.py index 21ab396e4..828ac188e 100644 --- a/script/set-sqlite-dir/customize.py +++ b/script/set-sqlite-dir/customize.py @@ -5,6 +5,6 @@ def postprocess(i): env = i['env'] - env['CM_SQLITE_PATH'] = os.getcwd() + env['MLC_SQLITE_PATH'] = os.getcwd() return {'return': 0} diff --git a/script/set-sqlite-dir/meta.yaml b/script/set-sqlite-dir/meta.yaml index 37f348259..48231f573 100644 --- a/script/set-sqlite-dir/meta.yaml +++ b/script/set-sqlite-dir/meta.yaml @@ -11,9 +11,9 @@ deps: tags: get,python3 env: {} input_mapping: - path: CM_SQLITE_PATH + path: MLC_SQLITE_PATH new_env_keys: -- CM_SQLITE_PATH +- MLC_SQLITE_PATH tags: - set - sqlite diff --git a/script/set-sqlite-dir/run.bat b/script/set-sqlite-dir/run.bat index 37f249b0f..7086d33dd 100644 --- a/script/set-sqlite-dir/run.bat +++ b/script/set-sqlite-dir/run.bat @@ -1,2 +1,2 @@ -%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\code.py IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/set-sqlite-dir/run.sh b/script/set-sqlite-dir/run.sh index 9b94917d9..ad0d08ec5 100644 --- a/script/set-sqlite-dir/run.sh +++ b/script/set-sqlite-dir/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/code.py test $? -eq 0 || exit $? diff --git a/script/set-user-limits/customize.py b/script/set-user-limits/customize.py index 22a38fa91..8574257b9 100644 --- a/script/set-user-limits/customize.py +++ b/script/set-user-limits/customize.py @@ -12,14 +12,14 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') cmds = [] - if env.get('CM_ULIMIT_NOFILE', '') != '': - cmds.append(f"ulimit -n {env['CM_ULIMIT_NOFILE']}") + if env.get('MLC_ULIMIT_NOFILE', '') != '': + cmds.append(f"ulimit -n {env['MLC_ULIMIT_NOFILE']}") - env['CM_RUN_CMD'] = " && ".join(cmds) + env['MLC_RUN_CMD'] = " && ".join(cmds) return {'return': 0} diff --git a/script/set-user-limits/meta.yaml b/script/set-user-limits/meta.yaml index 6097298c2..5bc2ac9f3 100644 --- a/script/set-user-limits/meta.yaml +++ b/script/set-user-limits/meta.yaml @@ -11,4 +11,4 @@ uid: 49dd1856b37342ac variations: large-nofile: env: - CM_ULIMIT_NOFILE: 9999 + MLC_ULIMIT_NOFILE: 9999 diff --git a/script/set-user-limits/run.sh b/script/set-user-limits/run.sh index 4c23c380e..32cf4d51e 100644 --- a/script/set-user-limits/run.sh +++ b/script/set-user-limits/run.sh @@ -1,17 +1,17 @@ #!/bin/bash -#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} #To export any variable #echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out -#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency echo "Running: " -echo "${CM_RUN_CMD}" +echo "${MLC_RUN_CMD}" echo "" -if [[ ${CM_FAKE_RUN} != "yes" ]]; then - eval "${CM_RUN_CMD}" +if [[ ${MLC_FAKE_RUN} != "yes" ]]; then + eval "${MLC_RUN_CMD}" test $? -eq 0 || exit 1 fi diff --git a/script/set-venv/customize.py b/script/set-venv/customize.py index 4bb0912d4..c9f5e129c 100644 --- a/script/set-venv/customize.py +++ b/script/set-venv/customize.py @@ -12,12 +12,12 @@ def preprocess(i): automation = i['automation'] - quiet = (env.get('CM_QUIET', False) == 'yes') + quiet = (env.get('MLC_QUIET', False) == 'yes') ############################################################ cur_dir = os.getcwd() - name = env.get('CM_NAME', '') + name = env.get('MLC_NAME', '') if name == '': artifacts = i.get('input', {}).get('artifacts', []) if len(artifacts) > 0: @@ -33,7 +33,7 @@ def preprocess(i): activate_script2 = os.path.join(name, activate_script) if not os.path.isfile(activate_script2): - force_python_path = env.get('CM_SET_VENV_PYTHON', '') + force_python_path = env.get('MLC_SET_VENV_PYTHON', '') if force_python_path != '' and not os.path.isfile(force_python_path): return {'return': 1, 'error': 'python executable not found: {}'.format( @@ -69,17 +69,17 @@ def preprocess(i): os.makedirs(work_dir) if os_info['platform'] == 'windows': - shell = os.environ.get('CM_SET_VENV_SHELL', '') + shell = os.environ.get('MLC_SET_VENV_SHELL', '') if shell == '': - shell = env.get('CM_SET_VENV_SHELL', '') + shell = env.get('MLC_SET_VENV_SHELL', '') if shell != '': - shell = shell.replace('CM_SET_VENV_WORK', 'work') + shell = shell.replace('MLC_SET_VENV_WORK', 'work') if shell == '': shell = 'cmd' - cmd = 'cd {} & call {} & set CM_REPOS=%CD%\\{}\\CM & {}\n'.format( + cmd = 'cd {} & call {} & set MLC_REPOS=%CD%\\{}\\CM & {}\n'.format( name, activate_script, name, shell) else: - cmd = '#!/bin/bash\n\ncd {} ; source {} ; export CM_REPOS=$PWD/CM ; cd work\n'.format( + cmd = '#!/bin/bash\n\ncd {} ; source {} ; export MLC_REPOS=$PWD/CM ; cd work\n'.format( name, activate_script) with open(script_file, 'w') as f: diff --git a/script/set-venv/meta.yaml b/script/set-venv/meta.yaml index 40b08b9f1..9dc6a4bd3 100644 --- a/script/set-venv/meta.yaml +++ b/script/set-venv/meta.yaml @@ -5,7 +5,7 @@ automation_alias: script automation_uid: 5b4e0237da074764 input_mapping: - python: CM_SET_VENV_PYTHON + python: MLC_SET_VENV_PYTHON cache: false diff --git a/script/submit-mlperf-results/customize.py b/script/submit-mlperf-results/customize.py index 918b16c4b..be2125325 100644 --- a/script/submit-mlperf-results/customize.py +++ b/script/submit-mlperf-results/customize.py @@ -11,10 +11,10 @@ def preprocess(i): meta = i['meta'] automation = i['automation'] - server = env['CM_MLPERF_SUBMISSION_URL'] - benchmark = env['CM_MLPERF_BENCHMARK'] - submitter_id = env['CM_MLPERF_SUBMITTER_ID'] - file_path = env['CM_MLPERF_SUBMISSION_FILE'] + server = env['MLC_MLPERF_SUBMISSION_URL'] + benchmark = env['MLC_MLPERF_BENCHMARK'] + submitter_id = env['MLC_MLPERF_SUBMITTER_ID'] + file_path = env['MLC_MLPERF_SUBMISSION_FILE'] r = get_signed_url(server, benchmark, submitter_id, file_path) if r['return'] > 0: diff --git a/script/submit-mlperf-results/meta.yaml b/script/submit-mlperf-results/meta.yaml index b7a10ce83..83e2faeb2 100644 --- a/script/submit-mlperf-results/meta.yaml +++ b/script/submit-mlperf-results/meta.yaml @@ -3,11 +3,11 @@ automation_alias: script automation_uid: 5b4e0237da074764 category: MLPerf benchmark support default_env: - CM_MLPERF_SUBMISSION_URL: https://submissions-ui.mlcommons.org + MLC_MLPERF_SUBMISSION_URL: https://submissions-ui.mlcommons.org input_mapping: - input: CM_MLPERF_SUBMISSION_FILE - submitter_id: CM_MLPERF_SUBMITTER_ID + input: MLC_MLPERF_SUBMISSION_FILE + submitter_id: MLC_MLPERF_SUBMITTER_ID tags: - submit - mlperf @@ -19,4 +19,4 @@ uid: cc01f0a82bef4216 variations: inference: env: - CM_MLPERF_BENCHMARK: "Inference" + MLC_MLPERF_BENCHMARK: "Inference" diff --git a/script/tar-my-folder/customize.py b/script/tar-my-folder/customize.py index 37dfd2271..e5a8eca9b 100644 --- a/script/tar-my-folder/customize.py +++ b/script/tar-my-folder/customize.py @@ -8,13 +8,13 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - input_dir = env.get("CM_TAR_INPUT_DIR", "") + input_dir = env.get("MLC_TAR_INPUT_DIR", "") if input_dir == "": - return {'return': 1, 'error': 'Please set CM_TAR_INPUT_DIR'} - output_dir = env.get("CM_TAR_OUTPUT_DIR", "") + return {'return': 1, 'error': 'Please set MLC_TAR_INPUT_DIR'} + output_dir = env.get("MLC_TAR_OUTPUT_DIR", "") if output_dir == "": output_dir = os.getcwd() - output_file = env.get("CM_TAR_OUTFILE", "") + output_file = env.get("MLC_TAR_OUTFILE", "") input_dirname = os.path.basename(input_dir) if output_file == "": output_file = input_dirname + ".tar.gz" diff --git a/script/tar-my-folder/meta.yaml b/script/tar-my-folder/meta.yaml index 100e27eb7..505b3452b 100644 --- a/script/tar-my-folder/meta.yaml +++ b/script/tar-my-folder/meta.yaml @@ -6,9 +6,9 @@ category: DevOps automation clean_files: [] deps: [] input_mapping: - input_dir: CM_TAR_INPUT_DIR - outfile: CM_TAR_OUTFILE - output_dir: CM_TAR_OUTPUT_DIR + input_dir: MLC_TAR_INPUT_DIR + outfile: MLC_TAR_OUTFILE + output_dir: MLC_TAR_OUTPUT_DIR tags: - run - tar diff --git a/script/truncate-mlperf-inference-accuracy-log/customize.py b/script/truncate-mlperf-inference-accuracy-log/customize.py index 264fb72c9..ec59f3697 100644 --- a/script/truncate-mlperf-inference-accuracy-log/customize.py +++ b/script/truncate-mlperf-inference-accuracy-log/customize.py @@ -8,18 +8,18 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") + submission_dir = env.get("MLC_MLPERF_INFERENCE_SUBMISSION_DIR", "") if submission_dir == "": - print("Please set CM_MLPERF_INFERENCE_SUBMISSION_DIR") - return {'return': 1, 'error': 'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified in env in run-mlperf-accuracy-log-truncator'} + print("Please set MLC_MLPERF_INFERENCE_SUBMISSION_DIR") + return {'return': 1, 'error': 'MLC_MLPERF_INFERENCE_SUBMISSION_DIR is not specified in env in run-mlperf-accuracy-log-truncator'} - submitter = env.get("CM_MLPERF_SUBMITTER", "CTuning") + submitter = env.get("MLC_MLPERF_SUBMITTER", "CTuning") os.system("rm -rf " + submission_dir + "_logs") - CMD = env['CM_PYTHON_BIN'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + CMD = env['MLC_PYTHON_BIN'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "tools", "submission", "truncate_accuracy_log.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --backup '" + submission_dir + "_logs'" - env['CM_RUN_CMD'] = CMD + env['MLC_RUN_CMD'] = CMD return {'return': 0} diff --git a/script/truncate-mlperf-inference-accuracy-log/meta.yaml b/script/truncate-mlperf-inference-accuracy-log/meta.yaml index 6e5887266..c0f02f6d3 100644 --- a/script/truncate-mlperf-inference-accuracy-log/meta.yaml +++ b/script/truncate-mlperf-inference-accuracy-log/meta.yaml @@ -15,13 +15,13 @@ deps: - names: - get-mlperf-submission-dir skip_if_env: - CM_MLPERF_INFERENCE_SUBMISSION_DIR: + MLC_MLPERF_INFERENCE_SUBMISSION_DIR: - 'on' tags: get,mlperf,submission,dir input_mapping: - input: CM_MLPERF_INFERENCE_SUBMISSION_DIR - submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR - submitter: CM_MLPERF_SUBMITTER + input: MLC_MLPERF_INFERENCE_SUBMISSION_DIR + submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR + submitter: MLC_MLPERF_SUBMITTER tags: - run - mlc diff --git a/script/truncate-mlperf-inference-accuracy-log/run.sh b/script/truncate-mlperf-inference-accuracy-log/run.sh index 1b3c5c3c0..7feafdf44 100644 --- a/script/truncate-mlperf-inference-accuracy-log/run.sh +++ b/script/truncate-mlperf-inference-accuracy-log/run.sh @@ -1,5 +1,5 @@ #!/bin/bash -cmd=${CM_RUN_CMD} +cmd=${MLC_RUN_CMD} echo "${cmd}" eval "${cmd}" test $? -eq 0 || exit $? diff --git a/script/upgrade-python-pip/run.bat b/script/upgrade-python-pip/run.bat index b6cc1b374..5baf63a1d 100644 --- a/script/upgrade-python-pip/run.bat +++ b/script/upgrade-python-pip/run.bat @@ -1,2 +1,2 @@ -%CM_PYTHON_BIN_WITH_PATH% -m pip install --upgrade pip +%MLC_PYTHON_BIN_WITH_PATH% -m pip install --upgrade pip IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/upgrade-python-pip/run.sh b/script/upgrade-python-pip/run.sh index 389a212e4..47a0df526 100644 --- a/script/upgrade-python-pip/run.sh +++ b/script/upgrade-python-pip/run.sh @@ -1,4 +1,4 @@ #!/bin/bash -${CM_PYTHON_BIN_WITH_PATH} -m pip install --upgrade pip +${MLC_PYTHON_BIN_WITH_PATH} -m pip install --upgrade pip test $? -eq 0 || exit $? diff --git a/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md b/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md index 836b025dd..e0901f6c1 100644 --- a/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md +++ b/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md @@ -1,4 +1,4 @@ -This is a wrapper script to [Reproduce MLPerf OctoML TinyML Results](https://github.com/octoml/ck/tree/master/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results) +This is a wrapper script to [Reproduce MLPerf OctoML TinyML Results](https://github.com/octoml/ck/tree/master/mlc-mlops/script/reproduce-mlperf-octoml-tinyml-results) which runs the script for the two microtvm variants and their supported models. ## Install diff --git a/script/wrapper-reproduce-octoml-tinyml-submission/customize.py b/script/wrapper-reproduce-octoml-tinyml-submission/customize.py index e1dfaf976..27907b617 100644 --- a/script/wrapper-reproduce-octoml-tinyml-submission/customize.py +++ b/script/wrapper-reproduce-octoml-tinyml-submission/customize.py @@ -9,7 +9,7 @@ def preprocess(i): env = i['env'] state = i['state'] inp = i['input'] - if 'CM_FLASH_BOARD' in env: + if 'MLC_FLASH_BOARD' in env: script_tags = "flash,tiny" else: script_tags = "reproduce,tiny,mlperf,octoml" @@ -25,7 +25,7 @@ def preprocess(i): for model in microtvm_variants[microtvm_variant]: variation_tags_string = "_" + board + ",_" + microtvm_variant + ",_" + model tags = script_tags + "," + variation_tags_string - if 'CM_RECREATE_BINARY' in env: + if 'MLC_RECREATE_BINARY' in env: r = mlc.access( {'action': 'rm', 'automation': 'cache', 'tags': tags, 'force': 'true'}) if r['return'] > 0: diff --git a/script/wrapper-reproduce-octoml-tinyml-submission/meta.yaml b/script/wrapper-reproduce-octoml-tinyml-submission/meta.yaml index f8f24d444..4afcec333 100644 --- a/script/wrapper-reproduce-octoml-tinyml-submission/meta.yaml +++ b/script/wrapper-reproduce-octoml-tinyml-submission/meta.yaml @@ -10,8 +10,8 @@ deps: tags: get,python3 env: {} input_mapping: - flash: CM_FLASH_BOARD - recreate_binary: CM_RECREATE_BINARY + flash: MLC_FLASH_BOARD + recreate_binary: MLC_RECREATE_BINARY tags: - run - generate-tiny