diff --git a/.github/scripts/process_individual_tests.py b/.github/scripts/process_individual_tests.py
index d328aad63..848656d50 100644
--- a/.github/scripts/process_individual_tests.py
+++ b/.github/scripts/process_individual_tests.py
@@ -25,10 +25,10 @@
ii = {
'action': 'test', 'target': 'script', 'item': uid, 'quiet': 'yes', 'out': 'con'
}
- if os.environ.get('DOCKER_CM_REPO', '') != '':
- ii['docker_cm_repo'] = os.environ['DOCKER_CM_REPO']
- if os.environ.get('DOCKER_CM_REPO_BRANCH', '') != '':
- ii['docker_cm_repo_branch'] = os.environ['DOCKER_CM_REPO_BRANCH']
+ if os.environ.get('DOCKER_MLC_REPO', '') != '':
+ ii['docker_cm_repo'] = os.environ['DOCKER_MLC_REPO']
+ if os.environ.get('DOCKER_MLC_REPO_BRANCH', '') != '':
+ ii['docker_cm_repo_branch'] = os.environ['DOCKER_MLC_REPO_BRANCH']
if os.environ.get('TEST_INPUT_INDEX', '') != '':
ii['test_input_index'] = os.environ['TEST_INPUT_INDEX']
print(ii)
diff --git a/.github/workflows/run-individual-script-tests.yml b/.github/workflows/run-individual-script-tests.yml
index cd0f2712d..b9c31990f 100644
--- a/.github/workflows/run-individual-script-tests.yml
+++ b/.github/workflows/run-individual-script-tests.yml
@@ -34,4 +34,4 @@ jobs:
done
pip install mlcflow
mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }}
- DOCKER_CM_REPO=${{ github.event.pull_request.head.repo.html_url }} DOCKER_CM_REPO_BRANCH=${{ github.event.pull_request.head.ref }} TEST_INPUT_INDEX=${{ matrix.test-input-index }} python3 .github/scripts/process_individual_tests.py ${{ steps.getfile.outputs.files }}
+ DOCKER_MLC_REPO=${{ github.event.pull_request.head.repo.html_url }} DOCKER_MLC_REPO_BRANCH=${{ github.event.pull_request.head.ref }} TEST_INPUT_INDEX=${{ matrix.test-input-index }} python3 .github/scripts/process_individual_tests.py ${{ steps.getfile.outputs.files }}
diff --git a/.github/workflows/test-amd-mlperf-inference-implementations.yml b/.github/workflows/test-amd-mlperf-inference-implementations.yml
index 2e140c32e..512a2af8e 100644
--- a/.github/workflows/test-amd-mlperf-inference-implementations.yml
+++ b/.github/workflows/test-amd-mlperf-inference-implementations.yml
@@ -19,7 +19,7 @@ jobs:
if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi
python3 -m venv gh_action
source gh_action/bin/activate
- export CM_REPOS=$HOME/GH_CM
+ export MLC_REPOS=$HOME/GH_MLC
pip install --upgrade cm4mlops
cm pull repo
cm run script --tags=run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet --docker_skip_run_cmd=yes
diff --git a/.github/workflows/test-intel-mlperf-inference-implementations.yml b/.github/workflows/test-intel-mlperf-inference-implementations.yml
index 166a1a77c..c70e5bb22 100644
--- a/.github/workflows/test-intel-mlperf-inference-implementations.yml
+++ b/.github/workflows/test-intel-mlperf-inference-implementations.yml
@@ -19,7 +19,7 @@ jobs:
if [ -f "gh_action_conda/bin/deactivate" ]; then source gh_action_conda/bin/deactivate; fi
python3 -m venv gh_action_conda
source gh_action_conda/bin/activate
- export CM_REPOS=$HOME/GH_CM
+ export MLC_REPOS=$HOME/GH_MLC
pip install --upgrade cm4mlops
pip install tabulate
cm run script --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_cm_repo=mlcommons@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet
diff --git a/.github/workflows/test-mlc-based-submission-generation.yml b/.github/workflows/test-mlc-based-submission-generation.yml
index c409e955f..5c97a2d3e 100644
--- a/.github/workflows/test-mlc-based-submission-generation.yml
+++ b/.github/workflows/test-mlc-based-submission-generation.yml
@@ -67,21 +67,21 @@ jobs:
extra_run_args=" --category=datacenter"
description="Submission generation (system_meta.json not found in results folder)"
elif [ "${{ matrix.case }}" == "closed" ]; then
- extra_run_args=" --env.CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check""
+ extra_run_args=" --env.MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check""
description="Test submission - contains closed edge and datacenter"
elif [ "${{ matrix.case }}" == "closed-no-compliance" ]; then
- extra_run_args=" --env.CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check""
+ extra_run_args=" --env.MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check""
description="Test submission - contains closed edge and datacenter with no compliance tests"
elif [ "${{ matrix.case }}" == "closed-power" ]; then
- extra_run_args=" --env.CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check""
+ extra_run_args=" --env.MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check""
description="Test submission - contains closed-power edge and datacenter results"
elif [ "${{ matrix.case }}" == "closed-failed-power-logs" ]; then
- extra_run_args=" --env.CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check""
+ extra_run_args=" --env.MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS="--skip-extra-accuracy-files-check""
description="Test submission - contains closed-power edge and datacenter results with failed power logs"
fi
# Dynamically set the log group to simulate a dynamic step name
echo "::group::$description"
- mlc ${{ matrix.action }} script --tags=generate,inference,submission --version=v4.1 --clean --preprocess_submission=yes --results_dir=$PWD/submission_generation_tests/${{ matrix.case }}/ --run-checker --submitter=MLCommons --tar=yes --env.CM_TAR_OUTFILE=submission.tar.gz --division=${{ matrix.division }} --env.CM_DETERMINE_MEMORY_CONFIGURATION=yes --quiet $extra_run_args
+ mlc ${{ matrix.action }} script --tags=generate,inference,submission --version=v4.1 --clean --preprocess_submission=yes --results_dir=$PWD/submission_generation_tests/${{ matrix.case }}/ --run-checker --submitter=MLCommons --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet $extra_run_args
exit_status=$?
echo "Exit status for the job ${description} ${exit_status}"
if [[ "${{ matrix.case }}" == "case-5" || "${{ matrix.case }}" == "case-6" ]]; then
diff --git a/.github/workflows/test-mlc-script-features.yml b/.github/workflows/test-mlc-script-features.yml
index 8ee512bca..96dabf921 100644
--- a/.github/workflows/test-mlc-script-features.yml
+++ b/.github/workflows/test-mlc-script-features.yml
@@ -63,10 +63,10 @@ jobs:
- name: Run docker container from dockerhub on linux
if: runner.os == 'linux'
run: |
- mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_cm_repo=mlcommons@mlperf-automations --image_name=cm-script-app-image-classification-onnx-py --env.CM_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.CM_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.CM_DOCKER_IMAGE_REPO=cknowledge --quiet
+ mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_cm_repo=mlcommons@mlperf-automations --image_name=cm-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=cknowledge --quiet
- name: Run docker container locally on linux
if: runner.os == 'linux'
run: |
- mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_cm_repo=mlcommons@mlperf-automations --image_name=mlc-script-app-image-classification-onnx-py --env.CM_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.CM_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.CM_DOCKER_IMAGE_REPO=local --quiet
+ mlc run script --tags=run,docker,container --adr.compiler.tags=gcc --docker_cm_repo=mlcommons@mlperf-automations --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet
diff --git a/.github/workflows/test-mlperf-inference-dlrm.yml b/.github/workflows/test-mlperf-inference-dlrm.yml
index f18b51b4d..749849842 100644
--- a/.github/workflows/test-mlperf-inference-dlrm.yml
+++ b/.github/workflows/test-mlperf-inference-dlrm.yml
@@ -22,7 +22,7 @@ jobs:
run: |
source gh_action/bin/deactivate || python3 -m venv gh_action
source gh_action/bin/activate
- export CM_REPOS=$HOME/GH_CM
+ export MLC_REPOS=$HOME/GH_MLC
python3 -m pip install cm4mlops
cm pull repo
cm run script --tags=run-mlperf,inference,_performance-only --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --model=dlrm-v2-99 --implementation=reference --backend=pytorch --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --clean
@@ -42,7 +42,7 @@ jobs:
run: |
source gh_action/bin/deactivate || python3 -m venv gh_action
source gh_action/bin/activate
- export CM_REPOS=$HOME/GH_CM
+ export MLC_REPOS=$HOME/GH_MLC
python3 -m pip install cm4mlops
cm pull repo
cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=dlrm-v2-99 --implementation=intel --batch_size=1 --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean
diff --git a/.github/workflows/test-mlperf-inference-gptj.yml b/.github/workflows/test-mlperf-inference-gptj.yml
index bf2921bd2..26543d98e 100644
--- a/.github/workflows/test-mlperf-inference-gptj.yml
+++ b/.github/workflows/test-mlperf-inference-gptj.yml
@@ -23,7 +23,7 @@ jobs:
run: |
source gh_action/bin/deactivate || python3 -m venv gh_action
source gh_action/bin/activate
- export CM_REPOS=$HOME/GH_CM
+ export MLC_REPOS=$HOME/GH_MLC
python3 -m pip install cm4mlops
cm pull repo
cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --pull_changes=yes --pull_inference_changes=yes --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --get_platform_details=yes --implementation=reference --clean
diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml
index 986ee21be..ec52d6b06 100644
--- a/.github/workflows/test-mlperf-inference-llama2.yml
+++ b/.github/workflows/test-mlperf-inference-llama2.yml
@@ -24,12 +24,12 @@ jobs:
run: |
source gh_action/bin/deactivate || python3 -m venv gh_action
source gh_action/bin/activate
- export CM_REPOS=$HOME/GH_CM
+ export MLC_REPOS=$HOME/GH_MLC
pip install cm4mlops
pip install tabulate
cm pull repo
pip install "huggingface_hub[cli]"
git config --global credential.helper store
huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential
- cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean
+ cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean
cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions" --quiet --submission_dir=$HOME/gh_action_submissions
diff --git a/.github/workflows/test-mlperf-inference-mixtral.yml b/.github/workflows/test-mlperf-inference-mixtral.yml
index c234d464e..174ae82a6 100644
--- a/.github/workflows/test-mlperf-inference-mixtral.yml
+++ b/.github/workflows/test-mlperf-inference-mixtral.yml
@@ -25,11 +25,11 @@ jobs:
run: |
source gh_action/bin/deactivate || python3 -m venv gh_action
source gh_action/bin/activate
- export CM_REPOS=$HOME/GH_CM
+ export MLC_REPOS=$HOME/GH_MLC
pip install cm4mlops
pip install "huggingface_hub[cli]"
git config --global credential.helper store
huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential
cm pull repo
- cm run script --tags=run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.CM_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.CM_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1
+ cm run script --tags=run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1
cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - GO-phoenix" --quiet --submission_dir=$HOME/gh_action_submissions
diff --git a/.github/workflows/test-mlperf-inference-resnet50.yml b/.github/workflows/test-mlperf-inference-resnet50.yml
index 6114d1dff..5ee480e21 100644
--- a/.github/workflows/test-mlperf-inference-resnet50.yml
+++ b/.github/workflows/test-mlperf-inference-resnet50.yml
@@ -13,7 +13,7 @@ jobs:
mlperf-inference-r50:
runs-on: ${{ matrix.os }}
env:
- CM_INDEX: "on"
+ MLC_INDEX: "on"
strategy:
fail-fast: false
matrix:
@@ -74,4 +74,4 @@ jobs:
git commit -a -m "Test commit"
git push https://x-access-token:${{ env.PAT }}@github.com/mlcommons/mlperf_inference_test_submissions_v5.0
- # mlcr --tags=push,github,mlperf,inference,submission --env.CM_GITHUB_PAT=pat --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet
+ # mlcr --tags=push,github,mlperf,inference,submission --env.MLC_GITHUB_PAT=pat --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet
diff --git a/.github/workflows/test-mlperf-inference-rnnt.yml b/.github/workflows/test-mlperf-inference-rnnt.yml
index 9937f9260..1d77ee282 100644
--- a/.github/workflows/test-mlperf-inference-rnnt.yml
+++ b/.github/workflows/test-mlperf-inference-rnnt.yml
@@ -30,7 +30,7 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install dependencies on Unix Platforms
run: |
- CM_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops
+ MLC_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops
- name: Pull MLOps repository
run: |
cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }}
diff --git a/.github/workflows/test-mlperf-inference-sdxl.yaml b/.github/workflows/test-mlperf-inference-sdxl.yaml
index 402424b54..cbdb0bd04 100644
--- a/.github/workflows/test-mlperf-inference-sdxl.yaml
+++ b/.github/workflows/test-mlperf-inference-sdxl.yaml
@@ -18,8 +18,8 @@ jobs:
run: |
source gh_action/bin/deactivate || python3 -m venv gh_action
source gh_action/bin/activate
- export CM_REPOS=$HOME/GH_CM
+ export MLC_REPOS=$HOME/GH_MLC
python3 -m pip install cm4mlops
cm pull repo
- cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean
+ cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_cm_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean
cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions
diff --git a/.github/workflows/test-mlperf-inference-tvm.yml b/.github/workflows/test-mlperf-inference-tvm.yml
index fa363d65c..8ecf27fa2 100644
--- a/.github/workflows/test-mlperf-inference-tvm.yml
+++ b/.github/workflows/test-mlperf-inference-tvm.yml
@@ -27,7 +27,7 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install dependencies on Unix Platforms
run: |
- CM_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops
+ MLC_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops
- name: Pull MLOps repository
run: |
cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }}
diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml
index 23cd33e54..5ff906bbd 100644
--- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml
+++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml
@@ -47,8 +47,8 @@ jobs:
if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi
python3 -m venv gh_action
source gh_action/bin/activate
- export CM_REPOS=$HOME/GH_CM
- CM_PULL_DEFAULT_MLOPS_REPO=no pip install --upgrade cm4mlops
+ export MLC_REPOS=$HOME/GH_MLC
+ MLC_PULL_DEFAULT_MLOPS_REPO=no pip install --upgrade cm4mlops
cm pull repo
cm run script --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=datacenter,edge --division=closed --docker_dt=yes --docker_it=no --docker_cm_repo=mlcommons@mlperf-automations --docker_cm_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string --quiet
diff --git a/.github/workflows/test-qaic-compute-sdk-build.yml b/.github/workflows/test-qaic-compute-sdk-build.yml
index 6bb069685..6eb901fed 100644
--- a/.github/workflows/test-qaic-compute-sdk-build.yml
+++ b/.github/workflows/test-qaic-compute-sdk-build.yml
@@ -26,7 +26,7 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
- CM_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops
+ MLC_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops
cm run script --tags=get,sys-utils-cm --quiet
- name: Test QAIC Compute SDK for compilation
diff --git a/.github/workflows/test-scc24-sdxl.yaml b/.github/workflows/test-scc24-sdxl.yaml
index 50849e7b1..61fac51db 100644
--- a/.github/workflows/test-scc24-sdxl.yaml
+++ b/.github/workflows/test-scc24-sdxl.yaml
@@ -9,8 +9,8 @@ jobs:
if: github.repository_owner == 'gateoverflow'
runs-on: [ self-hosted, linux, x64, GO-spr ]
env:
- CM_DOCKER_REPO: mlcommons@mlperf-automations
- CM_DOCKER_REPO_BRANCH: dev
+ MLC_DOCKER_REPO: mlcommons@mlperf-automations
+ MLC_DOCKER_REPO_BRANCH: dev
strategy:
fail-fast: false
matrix:
@@ -24,21 +24,21 @@ jobs:
if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi
python3 -m venv gh_action
source gh_action/bin/activate
- export CM_REPOS=$HOME/GH_CM
+ export MLC_REPOS=$HOME/GH_MLC
pip install --upgrade mlcflow
pip install tabulate
mlc pull repo
- mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$CM_DOCKER_REPO --docker_cm_repo_branch=$CM_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean
- mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$CM_DOCKER_REPO --docker_cm_repo_branch=$CM_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean
- mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.CM_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results
+ mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean
+ mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean
+ mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results
mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions
build_nvidia:
if: github.repository_owner == 'gateoverflow'
runs-on: [ self-hosted, linux, x64, GO-spr]
env:
- CM_DOCKER_REPO: mlcommons@mlperf-automations
- CM_DOCKER_REPO_BRANCH: dev
+ MLC_DOCKER_REPO: mlcommons@mlperf-automations
+ MLC_DOCKER_REPO_BRANCH: dev
strategy:
fail-fast: false
matrix:
@@ -52,11 +52,11 @@ jobs:
if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi
python3 -m venv gh_action
source gh_action/bin/activate
- export CM_REPOS=$HOME/GH_CM
+ export MLC_REPOS=$HOME/GH_MLC
pip install --upgrade mlcflow
pip install tabulate
mlc pull repo
- mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$CM_DOCKER_REPO --docker_cm_repo_branch=$CM_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean
- mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$CM_DOCKER_REPO --docker_cm_repo_branch=$CM_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean
- mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.CM_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results
+ mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean
+ mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=$MLC_DOCKER_REPO --docker_cm_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean
+ mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results
mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions
diff --git a/automation/cache/module_misc.py b/automation/cache/module_misc.py
index 91ecbd5a1..d4eb76d1c 100644
--- a/automation/cache/module_misc.py
+++ b/automation/cache/module_misc.py
@@ -86,7 +86,7 @@ def copy_to_remote(i):
new_env = cm_cached_state['new_env']
new_state = cm_cached_state['new_state'] # Todo fix new state
cm_repos_path = os.environ.get(
- 'CM_REPOS', os.path.join(
+ 'MLC_REPOS', os.path.join(
os.path.expanduser("~"), "CM", "repos"))
cm_cache_path = os.path.realpath(
os.path.join(cm_repos_path, "local", "cache"))
diff --git a/automation/cfg/README-extra.md b/automation/cfg/README-extra.md
deleted file mode 100644
index cc94030ab..000000000
--- a/automation/cfg/README-extra.md
+++ /dev/null
@@ -1,8 +0,0 @@
-Examples:
-
-```bash
-cm set cfg default
-cm set cfg default --key.script.silent
-cm set cfg default --key.script.silent-
-
-```
diff --git a/automation/cfg/README.md b/automation/cfg/README.md
deleted file mode 100644
index 3c82852c8..000000000
--- a/automation/cfg/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!*
-
-### Automation actions
-
-#### test
-
- * CM CLI: ```cm test cfg``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15))
- * CM CLI with UID: ```cm test cfg,88dce9c160324c5d``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'test'
- 'automation':'cfg,88dce9c160324c5d'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-### Maintainers
-
-* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/automation/cfg/_cm.json b/automation/cfg/_cm.json
deleted file mode 100644
index 9a1dc030e..000000000
--- a/automation/cfg/_cm.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "action_substitutions": {
- "set":"xset"
- },
- "alias": "cfg",
- "automation_alias": "automation",
- "automation_uid": "bbeb15d8f0a944a4",
- "tags": [
- "automation"
- ],
- "uid": "88dce9c160324c5d"
-}
diff --git a/automation/cfg/module.py b/automation/cfg/module.py
deleted file mode 100644
index 6fff7d802..000000000
--- a/automation/cfg/module.py
+++ /dev/null
@@ -1,259 +0,0 @@
-# Universal cfg for CM automations
-#
-# Written by Grigori Fursin
-
-import os
-
-from cmind.automation import Automation
-from cmind import utils
-
-
-class CAutomation(Automation):
- """
- Automation actions
- """
-
- ############################################################
- def __init__(self, cmind, automation_file):
- super().__init__(cmind, __file__)
-
- ############################################################
- def test(self, i):
- """
- Test automation
-
- Args:
- (CM input dict):
-
- (out) (str): if 'con', output to console
-
- automation (str): automation as CM string object
-
- parsed_automation (list): prepared in CM CLI or CM access function
- [ (automation alias, automation UID) ] or
- [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
-
- (artifact) (str): artifact as CM string object
-
- (parsed_artifact) (list): prepared in CM CLI or CM access function
- [ (artifact alias, artifact UID) ] or
- [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
-
- ...
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * Output from this automation action
-
- """
-
- import json
- print(json.dumps(i, indent=2))
-
- return {'return': 0}
-
- ############################################################
- def xset(self, i):
- """
- Set keys in configuration
-
- Args:
- (CM input dict):
-
- (out) (str): if 'con', output to console
-
- (artifact) (str): CM artifact with configuration
- (tags) (str): list of tags to find CM artifact with configuration
-
- (key) (dict): updating config
- ...
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * Output from this automation action
-
- """
-
- import json
-
- r = self._find_cfg_artifact(i)
- if r['return'] > 0:
- return r
-
- # Path to cfg
- path = r['path']
- path_to_config = r['path_to_config']
- config = r['config']
-
- # Clean input to leave only keys for the configuration
- new_config = i.get('key', {})
-
- # If new config is empty, just print existing config
- if len(new_config) > 0:
- # Check if need to delete some
- def check_to_delete(d):
-
- for k in list(d.keys()):
- v = d[k]
- if isinstance(v, dict):
- check_to_delete(v)
- else:
- if k.endswith('-'):
- if k[:-1] in d:
- del (d[k[:-1]])
- del (d[k])
- else:
- vsl = str(v).lower()
- if vsl == 'none':
- v = None
- elif vsl == 'false':
- v = False
- elif vsl == 'true':
- v = True
-
- d[k] = v
-
- utils.merge_dicts({'dict1': config,
- 'dict2': new_config,
- 'append_lists': True,
- 'append_unique': True})
-
- check_to_delete(config)
-
- r = utils.save_json(path_to_config, config)
- if r['return'] > 0:
- return r
-
- # Print config
- print('Config:')
- print('')
- print(json.dumps(config, indent=2))
-
- return {'return': 0}
-
- ############################################################
- def load(self, i):
- """
- Load configuration
-
- Args:
- (CM input dict):
-
- (out) (str): if 'con', output to console
-
- (artifact) (str): CM artifact with configuration
- (tags) (str): list of tags to find CM artifact with configuration
- ...
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * Output from this automation action
-
- """
-
- return self._find_cfg_artifact(i)
-
- ############################################################
- def _find_cfg_artifact(self, i):
- """
- Args:
- (CM input dict):
-
- (out) (str): if 'con', output to console
-
- (artifact) (str): CM artifact with configuration
- (tags) (str): list of tags to find CM artifact with configuration
- ...
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * Output from this automation action
-
- """
-
- # Clean input to find artifact
- ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags'])
-
- parsed_artifact = i.get('parsed_artifact', [])
-
- artifact_obj = parsed_artifact[0] if len(parsed_artifact) > 0 else None
- artifact_repo = parsed_artifact[1] if len(
- parsed_artifact) > 1 else None
-
- artifact = i.get('artifact', '')
-
- if artifact == '':
- ii['artifact'] = 'default'
-
- tags = ii.get('tags', '')
-
- if 'cm-universal-cfg' not in tags:
- if tags != '':
- tags += ','
- tags += 'cm-universal-cfg'
-
- ii['tags'] = tags
-
- automation = ii['automation']
- if automation != '.' and ',' not in automation:
- ii['automation'] = automation + ',' + self.meta['uid']
-
- # Add placeholder (use common action)
-
- ii['action'] = 'find'
- ii['out'] = ''
- # Avoid recursion - use internal CM add function to add the script
- # artifact
- ii['common'] = True
-
- r = self.cmind.access(ii)
- if r['return'] > 0:
- return r
-
- lst = r['list']
-
- if len(lst) == 0:
- ii['action'] = 'add'
- ii['meta'] = {}
-
- # Tags must be unique for default
- r = self.cmind.access(ii)
- if r['return'] > 0:
- return r
-
- path = r['path']
- elif len(lst) > 1:
- return {
- 'return': 1, 'error': 'ambiguity in cfg name - more than 1 CM artifact found'}
- else:
- path = lst[0].path
-
- # Check if has config
- path_to_cfg = os.path.join(path, 'config.json')
-
- config = {}
- if os.path.isfile(path_to_cfg):
- r = utils.load_json(path_to_cfg)
- if r['return'] > 0:
- return r
-
- config = r['meta']
-
- return {'return': 0, 'path': path,
- 'path_to_config': path_to_cfg, 'config': config}
diff --git a/automation/experiment/README-extra.md b/automation/experiment/README-extra.md
deleted file mode 100644
index 454c8d6ac..000000000
--- a/automation/experiment/README-extra.md
+++ /dev/null
@@ -1,315 +0,0 @@
-[ [Back to index](../../../docs/README.md) ]
-
-
-Click here to see the table of contents.
-
-* [CM "experiment" automation](#cm-"experiment"-automation)
- * [Introducing CM experiment automation](#introducing-cm-experiment-automation)
- * [Installing CM with ResearchOps/DevOps/MLOps automations](#installing-cm-with-researchops/devops/mlops-automations)
- * [Understanding CM experiments](#understanding-cm-experiments)
- * [Exploring combinations of parameters (autotuning, design space exploration)](#exploring-combinations-of-parameters-autotuning-design-space-exploration)
- * [Aggregating and unifying results](#aggregating-and-unifying-results)
- * [Visualizing results](#visualizing-results)
- * [Sharing experiments with the community](#sharing-experiments-with-the-community)
- * [Running CM experiments with CM scripts](#running-cm-experiments-with-cm-scripts)
- * [Further community developments](#further-community-developments)
-
-
-
-# CM "experiment" automation
-
-*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md),
- [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md)
- and [CM scripts](../script/README-extra.md) to understand CM motivation and concepts.
- You can also try [CM tutorials](https://github.com/mlcommons/ck/blob/master/docs/tutorials/README.md)
- to run some applications and benchmarks on your platform using CM scripts.*
-
-## Introducing CM experiment automation
-
-
-Researchers, engineers and students spend considerable amount of their time experimenting with
-many different settings of applications, tools, compilers, software and hardware
-to find the optimal combination suitable for their use cases.
-
-Based on their feedback, our [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)
-started developing a CM automation called "experiment".
-The goal is to provide a common interface to run, record, share, visualize and reproduce experiments
-on any platform with any software, hardware and data.
-
-The community helped us test a prototype of our "experiment" automation to record results in a unified CM format
-from [several MLPerf benchmarks](https://github.com/mlcommons/cm4mlperf-results)
-including [MLPerf inference](https://github.com/mlcommons/inference) and [MLPerf Tiny](https://github.com/mlcommons/tiny),
-visualize them at the [MLCommons CM platform](https://access.cknowledge.org/playground/?action=experiments&tags=all),
-and improve them by the community via [public benchmarking, optimization and reproducibility challenges](https://access.cknowledge.org/playground/?action=challenges).
-
-
-
-## Installing CM with ResearchOps/DevOps/MLOps automations
-
-This CM automation is available in the most commonly used `mlcommons@cm4mlops` repository.
-
-First, install CM automation language as described [here](https://github.com/mlcommons/ck/blob/master/docs/installation.md).
-Then, install or update this repository as follows:
-```bash
-cm pull repo mlcommons@cm4mlops
-```
-
-You can now test that CM experiment automation is available as follows:
-```bash
-cm run experiment --help
-```
-or using `cme` shortcut in CM V1.4.1+
-```bash
-cme --help
-```
-
-
-
-## Understanding CM experiments
-
-CM experiment simply wraps any user command line, creates an associated CM `experiment` artifact with a random ID (16 low case HEX characters)
-and some user tags in `_cm.json`, creates extra `{date}{time}` subdirectory with `cm-input.json` file with CM input,
-and executes the user command line inside an extra subdirectory with another random ID as shown below.
-
-The following command will print "Hello World!" while recording all the provenance in CM format in the local CM repository:
-
-```bash
-cme --tags=my,experiment,hello-world -- echo "Hello World!"
-```
-or
-```bash
-cm run experiment --tags=my,experiment,hello-world -- echo "Hello World!"
-```
-
-You should see the output similar to the following:
-```bash
-
-Path to CM experiment artifact: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945
-Path to experiment: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.09-58-02.863466
-================================================================
-Experiment step: 1 out of 1
-
-Path to experiment step: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.09-58-02.863466\7ed0ea0edd6b4dd7
-
-"Hello World!"
-```
-
-You can find and explore the newly created CM artifact as follows:
-```bash
-cm find experiment --tags=my,experiment,hello-world
-```
-or using UID
-```bash
-cm find experiment b83a1fb24dbf4945
-```
-
-When running the same experiment again, CM will find existing artifact by tags and create new {date}{time} directory there:
-```bash
-cme --tags=my,experiment,hello-world -- echo "Hello World!"
-
-Path to CM experiment artifact: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945
-Path to experiment: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.10-02-08.911210
-================================================================
-Experiment step: 1 out of 1
-
-Path to experiment step: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.10-02-08.911210\7ed0ea0edd6b4dd7
-
-"Hello World!"
-```
-
-You can now replay this experiment as follows:
-```bash
-cm replay experiment --tags=my,experiment,hello-world
-```
-
-Note that you can obtain current directory where you called CM
-(rather than the CM experiment artifact directory) via {{CD}} variable as follows:
-```bash
-cme --tags=my,experiment,hello-world -- echo {{CD}}
-```
-
-You can also record experiments in another CM repository instead of the `local` one as follows:
-```bash
-cm list repo
-cme {CM repository from above list}: --tags=my,experiment,hello-world -- echo {{CD}}
-```
-
-Finally, you can force a specific artifact name instead of some random ID as follows:
-```bash
-cme {my experiment artifact name} --tags=my,experiment,hello-world -- echo {{CD}}
-```
-or with given repository
-```bash
-cme {CM repository from above list}:{my experiment artifact name} --tags=my,experiment,hello-world -- echo {{CD}}
-```
-
-## Exploring combinations of parameters (autotuning, design space exploration)
-
-One of the most common tasks is computer engineering (and other sciences)
-is to explore various combinations of parameters of some applications
-and systems to select the optimal ones to trade off performance, accuracy,
-power consumption, memory usage and other characteristics.
-
-As a starting point, we have implemented a very simple explorer as a Cartesian product
-of any number of specified variables that are passed to a user command line via double curly braces `{{VAR}}` similar to GitHub.
-
-You just need to create a simple JSON file `cm-input.json` to describe sets/ranges for each variable as follows:
-```json
-{
- "explore": {
- "VAR1": [
- 1,
- 2,
- 3
- ],
- "VAR2": [
- "a",
- "b"
- ],
- "VAR3": "[2**i for i in range(0,6)]"
- }
-}
-```
-
-or YAML `cm-input.yaml`:
-
-```yaml
-explore:
- VAR1: [1,2,3]
- VAR2: ["a","b"]
- VAR3: "[2**i for i in range(0,6)]"
-```
-
-You can then run the following example to see all iterations:
-```bash
-cm run experiment --tags=my,experiment,hello-world @test_input.yaml \
- -- echo %VAR1% --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-%%VAR3%%
-```
-
-Note that you can also define a Python list of range for other variables
-directly in the command line as demonstrated in above example for `VAR4` - `{{VAR4{['xx','yy','zz']}}}`.
-
-CM will create or reuse experiment artifact with tags `my,experiment,hello-world`
-and will then iterate in a Cartesian product of all detected variables.
-
-For each iteration, CM will create a `{date}{time}` subdirectory in a given experiment artifact
-and will then run a user command line with substituted variables there.
-
-You can then replay any of the exploration experiment as follows:
-```bash
-cm replay experiment --tags={tags} --dir={sub directory}
-```
-
-
-
-## Aggregating and unifying results
-
-Users can expose any information such as measured characteristics of their applications and/or systems (performance,
-hardware or OS state, accuracy, internal parameters, etc) to CM for further analysis and visualization
-by generating a JSON `cm-result.json` file with any dictionary.
-
-If this file exists after executing a user command, CM will load it after each experiment or exploration step,
-and merge it with a list in a common `cm-result.json` in `{date}{time}` directory for this experiment.
-
-
-
-## Visualizing results
-
-Users can now visualize multiple experiments using the CM GUI script as follows:
-```bash
-cm run script "gui _graph" --exp_tags=my,experiment,hello-world
-```
-
-This script will search for all CM experiment entries with these tags, read all `cm-result.json` files,
-detect all keys used in result dictionaries, let users select these keys for X and Y axes
-to prepare a 2D graph using a popular [StreamLit library](https://streamlit.io), add derived metrics and set constraints
-as shown in the following example for one of the official [Tiny MLPerf submissions](https://github.com/mlcommons/tiny):
-
-
-
-
-
-
-
-
-## Sharing experiments with the community
-
-It is possible to share experiments with a common automation interface
-in your own GitHub/GitLab repository, container and zip/tar file
-in a non-intrusive way.
-
-You need to go to a root directory of your project and initialize CM repository there
-with a unique name "my-cool-project" as follows:
-
-```bash
-cm init repo my-cool-project --path=. --prefix=cmr
-```
-
-This command will create a `cmr.yaml` file with a description and unique ID of this repository,
-and will register it in the CM. Note that all CM automations and artifacts will be located
-in the `cmr` sub-directory to avoid contaminating your project. They can be deleted
-or moved to another project at any time.
-
-You can now record new experiments in this repository by adding `my-cool-project:` to the cm experiment command line as follows:
-```bash
-cm run experiment my-cool-project: --tags=my,experiment,hello-world -- echo "Hello World!"
-```
-
-You can also move a set of existing experiments from the `local` CM repository to the new one as follows:
-```bash
-cm move experiment my-cool-project: --tags=my,experiment,hello-world
-```
-
-You can continue replaying these experiments in the way no matter what CM repository they are in:
-```bash
-cm replay experiment --tags=my,experiment,hello-world
-```
-
-or you can enforce a specific repository as follows:
-```bash
-cm replay experiment my-cool-project: --tags=my,experiment,hello-world
-```
-
-
-
-
-
-## Running CM experiments with CM scripts
-
-User scripts and tools may contain some hardwired local paths that may prevent replaying them on another platform.
-In such case, we suggest you to use [CM scripts](/../script/README-extra.md).
-
-CM scripts solve this problem by wrapping existing user scripts and tools and detecting/resolving paths
-to specific tools and artifacts on a given user platform.
-
-You can find example of using CM scripts with CM experiments in [this directory](tests) - see `test3.bat` or `test3.sh`:
-```bash
-cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}}
-```
-
-You can use the following environment variables to pass the current path,
-different paths to experiment entries and the number of experiment to your CM script:
-* {{CD}}
-* {{CM_EXPERIMENT_STEP}}
-* {{CM_EXPERIMENT_PATH}}
-* {{CM_EXPERIMENT_PATH2}}
-* {{CM_EXPERIMENT_PATH3}}
-
-
-Feel free to check [this tutorial](../../../docs/tutorials/common-interface-to-reproduce-research-projects.md)
-to add CM scripts for your own applications, tools and native scripts.
-
-We are currently extending CM experiments and CM scripts for MLPerf benchmarks
-to automate benchmarking, optimization and design space exploration of ML/AI systems
-on any software and hardware - please stay tuned via our [Discord server](https://discord.gg/JjWNWXKxwT).
-
-
-
-## Further community developments
-
-We are developing this experiment automation in CM to help the community share, reproduce and reuse experiments
-using a common, simple, human readable, and portable [automation language](../../../docs/README.md).
-
-Join our [Discord server](https://discord.gg/JjWNWXKxwT) from the [MLCommons task force on automation and reproducibility](../taskforce.md)
-to participate in the unification and extension of this interface and CM scripts for diverse research projects and tools.
-
diff --git a/automation/experiment/README.md b/automation/experiment/README.md
deleted file mode 100644
index 13ea6ec1a..000000000
--- a/automation/experiment/README.md
+++ /dev/null
@@ -1,87 +0,0 @@
-*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!*
-
-### Automation actions
-
-#### test
-
- * CM CLI: ```cm test experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22))
- * CM CLI with UID: ```cm test experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'test'
- 'automation':'experiment,a0a2d123ef064bcb'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### run
-
- * CM CLI: ```cm run experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64))
- * CM CLI with UID: ```cm run experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'run'
- 'automation':'experiment,a0a2d123ef064bcb'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### rerun
-
- * CM CLI: ```cm rerun experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428))
- * CM CLI with UID: ```cm rerun experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'rerun'
- 'automation':'experiment,a0a2d123ef064bcb'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### replay
-
- * CM CLI: ```cm replay experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451))
- * CM CLI with UID: ```cm replay experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'replay'
- 'automation':'experiment,a0a2d123ef064bcb'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-### Maintainers
-
-* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/automation/experiment/_cm.json b/automation/experiment/_cm.json
deleted file mode 100644
index 49bb0e616..000000000
--- a/automation/experiment/_cm.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "alias": "experiment",
- "automation_alias": "automation",
- "automation_uid": "bbeb15d8f0a944a4",
- "desc": "Managing and reproducing experiments (under development)",
- "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)",
- "tags": [
- "automation"
- ],
- "uid": "a0a2d123ef064bcb"
-}
diff --git a/automation/experiment/module.py b/automation/experiment/module.py
deleted file mode 100644
index 6e98029d5..000000000
--- a/automation/experiment/module.py
+++ /dev/null
@@ -1,844 +0,0 @@
-# Universal experiment automation to support universal benchmarking
-# and optimization of apps and systems
-#
-# Written by Grigori Fursin
-
-import os
-import itertools
-import copy
-import json
-
-from cmind.automation import Automation
-from cmind import utils
-
-
-class CAutomation(Automation):
- """
- CM "experiment" automation actions
- """
-
- CM_RESULT_FILE = 'cm-result.json'
- CM_INPUT_FILE = 'cm-input.json'
- CM_OUTPUT_FILE = 'cm-output.json'
-
- ############################################################
- def __init__(self, cmind, automation_file):
- super().__init__(cmind, __file__)
-
- ############################################################
- def test(self, i):
- """
- Test automation
-
- Args:
- (CM input dict):
-
- (out) (str): if 'con', output to console
-
- automation (str): automation as CM string object
-
- parsed_automation (list): prepared in CM CLI or CM access function
- [ (automation alias, automation UID) ] or
- [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
-
- (artifact) (str): artifact as CM string object
-
- (parsed_artifact) (list): prepared in CM CLI or CM access function
- [ (artifact alias, artifact UID) ] or
- [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
-
- ...
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * Output from this automation action
- """
-
- import json
- print(json.dumps(i, indent=2))
-
- return {'return': 0}
-
- ############################################################
-
- def run(self, i):
- """
- Run experiment
-
- Args:
- (CM input dict):
-
- (out) (str): if 'con', output to console
-
- (artifact) (str): experiment artifact name (can include repository separated by :)
- (tags) (str): experiment tags separated by comma
-
- (dir) (str): force recording into a specific directory
-
-
- (script) (str): find and run CM script by name
- (s)
-
- (script_tags) (str): find and run CM script by tags
- (stags)
-
- (rerun) (bool): if True, rerun experiment in a given entry/directory instead of creating a new one...
-
- (explore) (dict): exploration dictionary
-
- ...
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * Output from this automation action
- """
-
- # Copy of original input
- ii_copy = copy.deepcopy(i)
- cur_dir = os.getcwd()
-
- # Find or add artifact based on repo/alias/tags
- r = self._find_or_add_artifact(i)
- if r['return'] > 0:
- return r
-
- experiment = r['experiment']
-
- console = i.get('out', '') == 'con'
-
- # Print experiment folder
- experiment_path = experiment.path
-
- if console:
- print('')
- print('Path to CM experiment artifact: {}'.format(experiment_path))
-
- # Get directory with datetime
- datetime = i.get('dir', '')
-
- if datetime == '' and i.get('rerun', False):
- # Check if already some dir exist
-
- directories = os.listdir(experiment_path)
-
- datetimes = sorted([f for f in directories if os.path.isfile(
- os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True)
-
- if len(datetimes) == 1:
- datetime = datetimes[0]
- elif len(datetimes) > 1:
- print('')
- print('Select experiment:')
-
- datetimes = sorted(datetimes)
-
- num = 0
- print('')
- for d in datetimes:
- print('{}) {}'.format(num, d.replace('.', ' ')))
- num += 1
-
- if not console:
- return {
- 'return': 1, 'error': 'more than 1 experiment found.\nPlease use "cm rerun experiment --dir={date and time}"'}
-
- print('')
- x = input('Make your selection or press Enter for 0: ')
-
- x = x.strip()
- if x == '':
- x = '0'
-
- selection = int(x)
-
- if selection < 0 or selection >= num:
- selection = 0
-
- datetime = datetimes[selection]
-
- if datetime != '':
- experiment_path2 = os.path.join(experiment_path, datetime)
- else:
- num = 0
- found = False
-
- while not found:
- r = utils.get_current_date_time({})
- if r['return'] > 0:
- return r
-
- datetime = r['iso_datetime'].replace(
- ':', '-').replace('T', '.')
-
- if num > 0:
- datetime += '.' + str(num)
-
- experiment_path2 = os.path.join(experiment_path, datetime)
-
- if not os.path.isdir(experiment_path2):
- found = True
- break
-
- num += 1
-
- # Check/create directory with date_time
- if not os.path.isdir(experiment_path2):
- os.makedirs(experiment_path2)
-
- # Change current path
- print('Path to experiment: {}'.format(experiment_path2))
-
- os.chdir(experiment_path2)
-
- # Record experiment input with possible exploration
- experiment_input_file = os.path.join(
- experiment_path2, self.CM_INPUT_FILE)
- experiment_result_file = os.path.join(
- experiment_path2, self.CM_RESULT_FILE)
-
- # Clean original input
- for k in ['parsed_artifact', 'parsed_automation', 'cmd']:
- if k in ii_copy:
- del (ii_copy[k])
-
- r = utils.save_json(file_name=experiment_input_file, meta=ii_copy)
- if r['return'] > 0:
- return r
-
- # Prepare run command
- cmd = ''
-
- unparsed = i.get('unparsed_cmd', [])
- if len(unparsed) > 0:
- for u in unparsed:
- if ' ' in u:
- u = '"' + u + '"'
- cmd += ' ' + u
-
- cmd = cmd.strip()
-
- # Prepare script run
- env = i.get('env', {})
-
- ii = {'action': 'native-run',
- 'automation': 'script,5b4e0237da074764',
- 'env': env}
-
- # Prepare exploration
- # Note that from Python 3.7, dictionaries are ordered so we can define order for exploration in json/yaml
- # ${{XYZ}} ${{ABC(range(1,2,3))}}
-
- # Extract exploration expressions from {{VAR{expression}}}
- explore = i.get('explore', {})
-
- j = 1
- k = 0
- while j >= 0:
- j = cmd.find('}}}', k)
- if j >= 0:
- k = j + 1
-
- l = cmd.rfind('{{', 0, j)
-
- if l >= 0:
- l2 = cmd.find('{', l + 2, j)
- if l2 >= 0:
- k = l2 + 1
-
- var = cmd[l + 2:l2]
- expr = cmd[l2 + 1:j]
-
- explore[var] = expr
-
- cmd = cmd[:l2] + cmd[j + 1:]
-
- # Separate Design Space Exploration into var and range
- explore_keys = []
- explore_dimensions = []
-
- for k in explore:
- v = explore[k]
-
- explore_keys.append(k)
-
-
-if not isinstance(v, if ) v = eval(v)
-
- explore_dimensions.append(v)
-
- # Next command will run all iterations so we need to redo above command
- # once again
- step = 0
-
- steps = itertools.product(*explore_dimensions)
-
- num_steps = len(list(steps))
-
- steps = itertools.product(*explore_dimensions)
-
- ii_copy = copy.deepcopy(ii)
-
- for dimensions in steps:
-
- step += 1
-
- print('================================================================')
- print('Experiment step: {} out of {}'.format(step, num_steps))
-
- print('')
-
- ii = copy.deepcopy(ii_copy)
-
- env = ii.get('env', {})
-
- l_dimensions = len(dimensions)
- if l_dimensions > 0:
- print(' Updating ENV variables during exploration:')
-
- print('')
- for j in range(l_dimensions):
- v = dimensions[j]
- k = explore_keys[j]
- print(' - Dimension {}: "{}" = {}'.format(j, k, v))
-
- env[k] = str(v)
-
- print('')
-
- # Generate UID and prepare extra directory:
- r = utils.gen_uid()
- if r['return'] > 0:
- return r
-
- uid = r['uid']
-
- experiment_path3 = os.path.join(experiment_path2, uid)
- if not os.path.isdir(experiment_path3):
- os.makedirs(experiment_path3)
-
- # Get date time of experiment
- r = utils.get_current_date_time({})
- if r['return'] > 0:
- return r
-
- current_datetime = r['iso_datetime']
-
- # Change current path
- print('Path to experiment step: {}'.format(experiment_path3))
- print('')
- os.chdir(experiment_path3)
-
- # Prepare and run experiment in a given placeholder directory
- os.chdir(experiment_path3)
-
- ii['env'] = env
-
- # Change only in CMD
- env_local = {'CD': cur_dir,
- 'CM_EXPERIMENT_STEP': str(step),
- 'CM_EXPERIMENT_PATH': experiment_path,
- 'CM_EXPERIMENT_PATH2': experiment_path2,
- 'CM_EXPERIMENT_PATH3': experiment_path3}
-
- # Update {{}} in CMD
- cmd_step = cmd
-
- j = 1
- k = 0
- while j >= 0:
- j = cmd_step.find('{{', k)
- if j >= 0:
- k = j
- l = cmd_step.find('}}', j + 2)
- if l >= 0:
- var = cmd_step[j + 2:l]
-
- # Such vars must be in env
- if var not in env and var not in env_local:
- return {
- 'return': 1, 'error': 'key "{}" is not in env during exploration'.format(var)}
-
- if var in env:
- value = env[var]
- else:
- value = env_local[var]
-
- cmd_step = cmd_step[:j] + str(value) + cmd_step[l + 2:]
-
- ii['command'] = cmd_step
-
- print('Generated CMD:')
- print('')
- print(cmd_step)
- print('')
-
- # Prepare experiment step input
- experiment_step_input_file = os.path.join(
- experiment_path3, self.CM_INPUT_FILE)
-
- r = utils.save_json(file_name=experiment_step_input_file, meta=ii)
- if r['return'] > 0:
- return r
-
- experiment_step_output_file = os.path.join(
- experiment_path3, self.CM_OUTPUT_FILE)
- if os.path.isfile(experiment_step_output_file):
- os.delete(experiment_step_output_file)
-
- # Run CMD
- rr = self.cmind.access(ii)
- if rr['return'] > 0:
- return rr
-
- # Record output
- result = {}
-
- if os.path.isfile(experiment_step_output_file):
- r = utils.load_json(file_name=experiment_step_output_file)
- if r['return'] > 0:
- return r
-
- result = r['meta']
-
- # Try to flatten
- try:
- flatten_result = flatten_dict(result)
- result = flatten_result
- except BaseException:
- pass
-
- # Add extra info
- result['uid'] = uid
- result['iso_datetime'] = current_datetime
-
- # Attempt to append to the main file ...
- all_results = []
-
- if os.path.isfile(experiment_result_file):
- r = utils.load_json(file_name=experiment_result_file)
- if r['return'] > 0:
- return r
-
- all_results = r['meta']
-
- all_results.append(result)
-
- r = utils.save_json(
- file_name=experiment_result_file,
- meta=all_results)
- if r['return'] > 0:
- return r
-
- rr = {'return': 0,
- 'experiment_path': experiment_path,
- 'experiment_path2': experiment_path2}
-
- return rr
-
- ############################################################
-
- def rerun(self, i):
- """
- Rerun experiment
-
- cm run experiment --rerun=True ...
- """
-
- i['rerun'] = True
-
- return self.run(i)
-
- ############################################################
-
- def replay(self, i):
- """
- Replay experiment
-
- Args:
- (CM input dict):
-
- (out) (str): if 'con', output to console
-
- (artifact) (str): experiment artifact
-
- (tags) (str): experiment tags separated by comma
-
- (dir) (str): experiment directory (often date time)
- (uid) (str): unique ID of an experiment
-
- ...
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * Output from this automation action
- """
-
- # Find or add artifact based on repo/alias/tags
- i['fail_if_not_found'] = True
- r = self._find_or_add_artifact(i)
- if r['return'] > 0:
- return r
-
- experiment = r['experiment']
-
- console = i.get('out', '') == 'con'
-
- # Print experiment folder
- experiment_path = experiment.path
-
- if console:
- print('')
- print('Path to CM experiment artifact: {}'.format(experiment_path))
-
- # Check date and time folder
- uid = i.get('uid', '')
- datetime = i.get('dir', '')
-
- if datetime != '':
- datetimes = [datetime]
- else:
- directories = os.listdir(experiment_path)
-
- datetimes = sorted([f for f in directories if os.path.isfile(
- os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True)
-
- if len(datetimes) == 0:
- return {'return': 1, 'error': 'experiment(s) not found in {}'.format(
- experiment_path)}
-
- # Check datetime directory
- found_result = {}
-
- if uid != '':
- for d in datetimes:
- r = self._find_uid({'path': experiment_path, 'datetime': d, 'uid': uid})
- if r['return'] > 0:
- return r
-
- if len(r.get('result', {})) > 0:
- found_result = r['result']
- datetime = d
- experiment_path2 = os.path.join(experiment_path, datetime)
- break
-
- if len(found_result) == 0:
- return {'return': 1, 'error': 'couldn\'t find result with UID {} in {}'.format(
- uid, experiment_path)}
-
- else:
- if len(datetimes) == 1:
- datetime = datetimes[0]
- else:
- print('')
- print('Available experiments:')
-
- datetimes = sorted(datetimes)
-
- num = 0
- print('')
- for d in datetimes:
- print('{}) {}'.format(num, d.replace('.', ' ')))
- num += 1
-
- if not console:
- return {
- 'return': 1, 'error': 'more than 1 experiment found.\nPlease use "cm run experiment --dir={date and time}"'}
-
- print('')
- x = input('Make your selection or press Enter for 0: ')
-
- x = x.strip()
- if x == '':
- x = '0'
-
- selection = int(x)
-
- if selection < 0 or selection >= num:
- selection = 0
-
- datetime = datetimes[selection]
-
- # Final path to experiment
- experiment_path2 = os.path.join(experiment_path, datetime)
-
- if not os.path.isdir(experiment_path2):
- return {'return': 1, 'error': 'experiment path not found {}'.format(
- experiment_path2)}
-
- r = self._find_uid({'path': experiment_path, 'datetime': datetime})
- if r['return'] > 0:
- return r
-
- results = r['meta']
-
- if len(results) == 0:
- return {'return': 1, 'error': 'results not found in {}'.format(
- experiment_path2)}
-
- elif len(results) == 1:
- selection = 0
-
- else:
- print('')
- print('Available Unique IDs of results:')
-
- results = sorted(results, key=lambda x: x.get('uid', ''))
-
- num = 0
- print('')
- for r in results:
- print('{}) {}'.format(num, r.get('uid', '')))
- num += 1
-
- if not console:
- return {
- 'return': 1, 'error': 'more than 1 result found.\nPlease use "cm run experiment --uid={result UID}"'}
-
- print('')
- x = input('Make your selection or press Enter for 0: ')
-
- x = x.strip()
- if x == '':
- x = '0'
-
- selection = int(x)
-
- if selection < 0 or selection >= num:
- selection = 0
-
- found_result = results[selection]
- uid = found_result['uid']
-
- # Final info
- if console:
- print('')
- print('Path to experiment: {}'.format(experiment_path2))
-
- print('')
- print('Result UID: {}'.format(uid))
-
- # Attempt to load cm-input.json
- experiment_input_file = os.path.join(
- experiment_path2, self.CM_INPUT_FILE)
-
- if not os.path.isfile(experiment_input_file):
- return {
- 'return': 1, 'error': '{} not found - can\'t replay'.format(self.CM_INPUT_FILE)}
-
- r = utils.load_json(experiment_input_file)
- if r['return'] > 0:
- return r
-
- cm_input = r['meta']
-
- tags = cm_input.get('tags', '').strip()
- if 'replay' not in tags:
- if tags != '':
- tags += ','
- tags += 'replay'
- cm_input['tags'] = tags
-
- if console:
- print('')
- print('Experiment input:')
- print('')
- print(json.dumps(cm_input, indent=2))
- print('')
-
- # Run experiment again
- r = self.cmind.access(cm_input)
- if r['return'] > 0:
- return r
-
- # TBA - validate experiment, etc ...
-
- return {'return': 0}
-
- ############################################################
-
- def _find_or_add_artifact(self, i):
- """
- Find or add experiment artifact (reused in run and reply)
-
- Args:
- (CM input dict):
-
- (fail_if_not_found) (bool) - if True, fail if experiment is not found
-
- ...
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- experiment (CM artifact class): Experiment artifact
-
- """
-
- console = i.get('out', '') == 'con'
-
- # Try to find experiment artifact by alias and/or tags
- ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags'])
- ii['action'] = 'find'
-
- ii_copy = copy.deepcopy(ii)
-
- # If artifact is specified, remove tags
- artifact = ii.get('artifact', '').strip()
- if artifact != '' and not artifact.endswith(':') \
- and '*' not in artifact and '?' not in artifact:
- if 'tags' in ii:
- del (ii['tags'])
-
- r = self.cmind.access(ii)
- if r['return'] > 0:
- return r
-
- lst = r['list']
-
- if len(lst) > 1:
- print('More than 1 experiment artifact found:')
-
- lst = sorted(lst, key=lambda x: x.path)
-
- num = 0
- print('')
- for e in lst:
- print('{}) {}'.format(num, e.path))
- print(
- ' Tags: {}'.format(
- ','.join(
- e.meta.get(
- 'tags',
- []))))
- num += 1
-
- if not console:
- return {'return': 1, 'error': 'more than 1 experiment artifact found.\nPlease use "cm run experiment {name}" or "cm run experiment --tags={tags separated by comma}"'}
-
- print('')
- x = input('Make your selection or press Enter for 0: ')
-
- x = x.strip()
- if x == '':
- x = '0'
-
- selection = int(x)
-
- if selection < 0 or selection >= num:
- selection = 0
-
- experiment = lst[selection]
-
- elif len(lst) == 1:
- experiment = lst[0]
- else:
- # Create new entry
- if i.get('fail_if_not_found', False):
- return {'return': 1, 'error': 'experiment not found'}
-
- ii = copy.deepcopy(ii_copy)
- ii['action'] = 'add'
- r = self.cmind.access(ii)
- if r['return'] > 0:
- return r
-
- experiment_uid = r['meta']['uid']
-
- r = self.cmind.access({'action': 'find',
- 'automation': 'experiment,a0a2d123ef064bcb',
- 'artifact': experiment_uid})
- if r['return'] > 0:
- return r
-
- lst = r['list']
- if len(lst) == 0 or len(lst) >1:
- return {
- 'return': 1, 'error': 'created experiment artifact with UID {} but can\'t find it - weird'.format(experiment_uid)}
-
- experiment = lst[0]
-
- return {'return': 0, 'experiment': experiment}
-
- ############################################################
- def _find_uid(self, i):
- """
- Find experiment result with a given UID
-
- Args:
- (CM input dict):
-
- path (str): path to experiment artifact
- datetime (str): sub-path to experiment
- (uid) (str): experiment UID
-
- ...
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- path_to_file (str): path to experiment result file
- meta (dict): complete list of all results
- result (dict): result dictionary with a given UID
-
- """
-
- path = i['path']
- datetime = i['datetime']
- uid = i.get('uid', '').strip()
-
- path_to_experiment_result_file = os.path.join(
- path, datetime, self.CM_RESULT_FILE)
-
- rr = {'return': 0, 'path_to_file': path_to_experiment_result_file}
-
- if os.path.isfile(path_to_experiment_result_file):
- r = utils.load_json(file_name=path_to_experiment_result_file)
- if r['return'] > 0:
- return r
-
- meta = r['meta']
-
- rr['meta'] = meta
-
- # Searching for UID
- if uid != '':
- for result in meta:
- ruid = result.get('uid', '').strip()
- if ruid != '' and ruid ==uid:
- rr['result'] = result
- break
-
- return rr
-
-############################################################################
-
-
-def flatten_dict(d, flat_dict= {}, prefix = ''):
-
- for k in d:
- v = d[k]
-
- if type(v) is dict:
- flatten_dict(v, flat_dict, prefix + k + '.')
- else:
- flat_dict[prefix + k] = v
-
- return flat_dict
diff --git a/automation/experiment/tests/test2.bat b/automation/experiment/tests/test2.bat
deleted file mode 100644
index 5ecb3a0d8..000000000
--- a/automation/experiment/tests/test2.bat
+++ /dev/null
@@ -1 +0,0 @@
-cm run experiment --tags=test @test_input.yaml -- echo %VAR1% --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-%%VAR3%%
diff --git a/automation/experiment/tests/test2.sh b/automation/experiment/tests/test2.sh
deleted file mode 100644
index 40d60a25a..000000000
--- a/automation/experiment/tests/test2.sh
+++ /dev/null
@@ -1 +0,0 @@
-cm run experiment --tags=test @test_input.yaml -- echo "\${VAR1} --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-\${VAR3}"
\ No newline at end of file
diff --git a/automation/experiment/tests/test3.bat b/automation/experiment/tests/test3.bat
deleted file mode 100644
index 800e36076..000000000
--- a/automation/experiment/tests/test3.bat
+++ /dev/null
@@ -1 +0,0 @@
-cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}}
diff --git a/automation/experiment/tests/test3.sh b/automation/experiment/tests/test3.sh
deleted file mode 100644
index 148e56433..000000000
--- a/automation/experiment/tests/test3.sh
+++ /dev/null
@@ -1 +0,0 @@
-cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}}
diff --git a/automation/experiment/tests/test3_input.yaml b/automation/experiment/tests/test3_input.yaml
deleted file mode 100644
index 1c789f52a..000000000
--- a/automation/experiment/tests/test3_input.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-explore:
- VAR1: [1,2,3]
- VAR2: ["a","b"]
- CM_ENV_TEST3: "[2**i for i in range(0,6)]"
diff --git a/automation/experiment/tests/test__json.bat b/automation/experiment/tests/test__json.bat
deleted file mode 100644
index 16eb9184b..000000000
--- a/automation/experiment/tests/test__json.bat
+++ /dev/null
@@ -1 +0,0 @@
-cm run experiment --tags=test @test_input.json -- {{CD}}\test_run.bat
diff --git a/automation/experiment/tests/test__json.sh b/automation/experiment/tests/test__json.sh
deleted file mode 100644
index a46cb98f5..000000000
--- a/automation/experiment/tests/test__json.sh
+++ /dev/null
@@ -1 +0,0 @@
-cm run experiment --tags=test @test_input.json -- {{CD}}/test_run.sh
diff --git a/automation/experiment/tests/test__yaml.bat b/automation/experiment/tests/test__yaml.bat
deleted file mode 100644
index e583f209b..000000000
--- a/automation/experiment/tests/test__yaml.bat
+++ /dev/null
@@ -1 +0,0 @@
-cm run experiment --tags=test @test_input.yaml -- {{CD}}\test_run.bat
diff --git a/automation/experiment/tests/test__yaml.sh b/automation/experiment/tests/test__yaml.sh
deleted file mode 100644
index 60c2f7a80..000000000
--- a/automation/experiment/tests/test__yaml.sh
+++ /dev/null
@@ -1 +0,0 @@
-cm run experiment --tags=test @test_input.yaml -- {{CD}}/test_run.sh
diff --git a/automation/experiment/tests/test_input.json b/automation/experiment/tests/test_input.json
deleted file mode 100644
index f682f5a34..000000000
--- a/automation/experiment/tests/test_input.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "explore": {
- "VAR1": [
- 1,
- 2,
- 3
- ],
- "VAR2": [
- "a",
- "b"
- ],
- "VAR3": "[2**i for i in range(0,6)]"
- }
-}
diff --git a/automation/experiment/tests/test_input.yaml b/automation/experiment/tests/test_input.yaml
deleted file mode 100644
index a621c5ef9..000000000
--- a/automation/experiment/tests/test_input.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-explore:
- VAR1: [1,2,3]
- VAR2: ["a","b"]
- VAR3: "[2**i for i in range(0,6)]"
diff --git a/automation/experiment/tests/test_run.bat b/automation/experiment/tests/test_run.bat
deleted file mode 100644
index b3aa91028..000000000
--- a/automation/experiment/tests/test_run.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-echo %VAR1% --batch_size=%VAR3% %VAR2%
-
-echo {"x":%VAR1%, "y":"%VAR2%", "z":%VAR3%} > cm-output.json
diff --git a/automation/experiment/tests/test_run.sh b/automation/experiment/tests/test_run.sh
deleted file mode 100644
index 7ed1b472e..000000000
--- a/automation/experiment/tests/test_run.sh
+++ /dev/null
@@ -1 +0,0 @@
-echo $VAR1 --batch_size=$VAR3 $VAR2
diff --git a/automation/script/README-extra.md b/automation/script/README-extra.md
deleted file mode 100644
index 7fc982067..000000000
--- a/automation/script/README-extra.md
+++ /dev/null
@@ -1,1035 +0,0 @@
-[ [Back to index](../../../docs/README.md) ]
-
-# CM "script" automation
-
-
-
-Click here to see the table of contents.
-
- * [Motivation](#motivation)
- * [Obtaining shared CM scripts](#obtaining-shared-cm-scripts)
- * [Getting started with CM scripts](#getting-started-with-cm-scripts)
- * [Understanding CM scripts](#understanding-cm-scripts)
- * [Wrapping native scripts](#wrapping-native-scripts)
- * [Modifying environment variables](#modifying-environment-variables)
- * [Understanding unified output dictionary](#understanding-unified-output-dictionary)
- * [Modifying state dictionary](#modifying-state-dictionary)
- * [Running CM scripts via CM Python API](#running-cm-scripts-via-cm-python-api)
- * [Assembling pipelines (workflows) of CM scripts](#assembling-pipelines-workflows-of-cm-scripts)
- * [Customizing CM script execution flow](#customizing-cm-script-execution-flow)
- * [Caching output of CM scripts](#caching-output-of-cm-scripts)
- * [Assembling pipeline to compile and run image corner detection](#assembling-pipeline-to-compile-and-run-image-corner-detection)
- * [Customizing sub-dependencies in a pipeline](#customizing-sub-dependencies-in-a-pipeline)
- * [Using Python virtual environments](#using-python-virtual-environments)
- * [Assembling pipelines with other artifacts included](#assembling-pipelines-with-other-artifacts-included)
- * [Unifying host OS and CPU detection](#unifying-host-os-and-cpu-detection)
- * [Detecting, installing and caching system dependencies](#detecting-installing-and-caching-system-dependencies)
- * [Using variations](#using-variations)
- * [Running CM scripts inside containers](#running-cm-scripts-inside-containers)
- * [Getting help about other script automation flags](#getting-help-about-other-script-automation-flags)
- * [Further reading](#further-reading)
-
-
-
-*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md)
- and [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) to understand CM motivation and concepts.
- You can also try [CM tutorials](https://github.com/mlcommons/ck/blob/master/docs/tutorials/README.md)
- to run some applications and benchmarks on your platform using CM scripts.*
-
-## Motivation
-
-While helping the community reproduce [150+ research papers](https://learning.acm.org/techtalks/reproducibility),
-we have noticed that researchers always create their own ad-hoc scripts, environment variable and files
-to perform *exactly the same steps (actions) across all papers* to prepare, run and reproduce their experiments
-across different software, hardware, models and data.
-
-
-
-This experience motivated us to create a CM automation called "script" to warp native scripts
-from research and industrial projects with a common, simple and unified CM Command Line Interface and Python API.
-
-Such non-intrusive wrapping helps to make numerous native scripts and tools more reusable, interoperable, portable, findable
-and deterministic across different projects with different artifacts based on [FAIR principles](https://www.go-fair.org/fair-principles).
-
-CM scripts can be embedded into existing projects with minimal or no modifications at all, and they can be connected
-into powerful and portable pipelines and workflows using simple JSON or YAML files
-to prepare, run and reproduce experiments across continuously changing technology.
-
-Importantly, CM scripts can be executed in the same way in a native user environment,
-Python virtual environments (to avoid messing up native environment) and containers
-while automatically adapting to a given environment!
-
-
-
-
-
-
-
-## Obtaining shared CM scripts
-
-In order to reuse some CM scripts embedded into shared projects,
-you need to install these projects via the CM interface.
-
-For example, to use automation scripts developed by the
-[MLCommons task force on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)
-and shared via GitHub, you just need to pull this repository via CM:
-
-```bash
-cm pull repo --url=https://github.com/mlcommons/cm4mlops --checkout=dev
-```
-
-or
-
-```bash
-cm pull repo mlcommons@cm4mlops --checkout=dev
-```
-
-You can now see all available CM scripts in your system as follows:
-
-```bash
-cm find script
-cm find script install* | sort
-
-```
-
-
-## Getting started with CM scripts
-
-You can run any of the above CM script on any platform as follows:
-```bash
-cm run script "tags separated by space" --keys=values --env.KEY=VALUE
-cm run script --tags="tags separated by comma" --keys=values --env.KEY=VALUE
-```
-or using a shortcut `cmr` available in CM V1.4.0+:
-```bash
-cmr "tags separated by space" --keys=values --env.KEY=VALUE
-```
-
-You can also use `-j` flag to print JSON output at the end of the script execution
-and `-v` flag to show extra debug information during script execution.
-
-For example, you can download a RESNET-50 model in ONNX format from Zenodo using the following script:
-```bash
-cmr "download file" --url=https://zenodo.org/record/4735647/files/resnet50_v1.onnx
-```
-
-You can also obtain info about your OS (Linux, Windows, MacOS) in a unified way and print JSON output
-as well as CM debug info as follows:
-```bash
-cmr "detect os" -j -v
-```
-
-You can turn on silent mode using CM cfg automation:
-```bash
-cm set cfg --key.script.silent
-```
-or
-```bash
-cm set cfg default --key.script.silent
-```
-
-
-## Understanding CM scripts
-
-CM scripts are treated as standard CM artifacts with the associated CM automation ["script"](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script),
-CM action ["run"](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/module.py#L73),
-and JSON and/or YAML meta descriptions.
-
-CM scripts can be invoked by using their alias, unique ID and human-readable tags (preferred method).
-
-For example, the [CM "Print Hello World" script](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world)
-simply wraps 2 native `run.sh` and `run.bat` scripts to print "Hello World" on Linux, MacOs or Windows
-together with a few environment variables:
-
-```bash
-ls `cm find script print-hello-world`
-
-README.md _cm.json run.bat run.sh
-```
-
-It is described by this [_cm.json meta description file](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world/_cm.json)
-with the following alias, UID and tags:
-
-```json
-{
- "automation_alias": "script",
- "automation_uid": "5b4e0237da074764",
-
- "alias": "print-hello-world",
- "uid": "b9f0acba4aca4baa",
-
- "default_env": {
- "CM_ENV_TEST1": "TEST1"
- },
-
- "env": {
- "CM_ENV_TEST2": "TEST2"
- },
-
- "input_mapping": {
- "test1": "CM_ENV_TEST1"
- },
-
- "new_env_keys": [
- "CM_ENV_TEST*"
- ],
-
- "new_state_keys": [
- "hello_test*"
- ],
-
- "tags": [
- "print",
- "hello-world",
- "hello world",
- "hello",
- "world",
- "native-script",
- "native",
- "script"
- ]
-}
-```
-
-The `automation_alias` and `automation_uid` tells CM that this artifact can be used with the CM "script" automation.
-
-Therefore, this script can be executed from the command line in any of the following ways:
-
-```bash
-cm run script print-hello-world
-cm run script b9f0acba4aca4baa
-cm run script --tags=print,native-script,hello-world
-cm run script "print native-script hello-world"
-```
-
-The same script can be also executed using CM Python API as follows:
-```python
-import cmind
-
-output = cmind.access({'action':'run', 'automation':'script', 'tags':'print,native-script,hello-world'})
-if output['return']>0:
- cmind.error(output)
-
-import json
-print (json.dumps(output, indent=2))
-```
-
-Normally you should see the following output along with some debug information (that will be removed soon):
-
-```bash
-
-...
-
-CM_ENV_TEST1 = TEST1
-CM_ENV_TEST2 = TEST2
-
-HELLO WORLD!
-...
-```
-
-### Wrapping native scripts
-
-*run.bat* and *run.sh* are native scripts that will be executed by this CM script in a unified way on Linux, MacOS and Windows:
-
-```bash
-echo ""
-echo "CM_ENV_TEST1 = ${CM_ENV_TEST1}"
-echo "CM_ENV_TEST2 = ${CM_ENV_TEST2}"
-
-echo ""
-echo "HELLO WORLD!"
-```
-
-The idea to use native scripts is to make it easier for researchers and engineers to reuse their existing automation scripts
-while providing a common CM wrapper with a unified CLI, Python API and extensible meta descriptions.
-
-
-
-
-### Modifying environment variables
-
-CM script automation CLI uses a flag `--env.VAR=VALUE` to set some environment variable and pass it to a native script
-as shown in this example:
-
-```bash
-cm run script "print native-script hello-world" \
- --env.CM_ENV_TEST1=ABC1 --env.CM_ENV_TEST2=ABC2
-
-...
-
-CM_ENV_TEST1 = ABC1
-CM_ENV_TEST2 = TEST2
-
-HELLO WORLD!
-```
-
-Note, that *CM_ENV_TEST2* did not change. This happened because dictionary `env` in the *_cm.json* forces *CM_ENV_TEST2* to *TEST2*,
-while `default_env` dictionary allows environment variables to be updated externally.
-
-You can still force an environment variable to a given value externally using a `--const` flag as follows:
-
-```bash
-cm run script "print native-script hello-world" \
- --env.CM_ENV_TEST1=ABC1 --const.CM_ENV_TEST2=ABC2
-
-...
-
-CM_ENV_TEST1 = ABC1
-CM_ENV_TEST2 = ABC2
-
-HELLO WORLD!
-
-```
-
-You can also use a JSON file instead of flags. Create *input.json* (or any other filename):
-```json
-{
- "tags":"print,native-script,hello-world",
- "env":{
- "CM_ENV_TEST1":"ABC1"
- }
-}
-```
-
-and run the CM script with this input file as follows:
-```
-cm run script @input.json
-```
-
-
-You can use YAML file instead of CLI. Create *input.yaml* (or any other filename):
-```yaml
-tags: "print,hello-world,script"
-env:
- CM_ENV_TEST1: "ABC1"
-```
-
-and run the CM script with this input file as follows:
-```
-cm run script @input.yaml
-```
-
-Finally, you can map any other flag from the script CLI to an environment variable
-using the key `input_mapping` in the `_cm.json` meta description of this script:
-
-```bash
-cm run script "print native-script hello-world" --test1=ABC1
-
-...
-
-CM_ENV_TEST1 = ABC1
-CM_ENV_TEST2 = TEST2
-
-HELLO WORLD!
-
-```
-
-
-### Understanding unified output dictionary
-
-You can see the output of a given CM script in the JSON format by adding `--out=json` flag as follows:
-
-```bash
-cm run script --tags=print,hello-world,script --env.CM_ENV_TEST1=ABC1 --out=json
-
-...
-
-CM_ENV_TEST1 = ABC1
-CM_ENV_TEST2 = ABC2
-
-HELLO WORLD!
-
-{
- "deps": [],
- "env": {
- "CM_ENV_TEST1": "ABC1",
- "CM_ENV_TEST2": "TEST2"
- },
- "new_env": {
- "CM_ENV_TEST1": "ABC1",
- "CM_ENV_TEST2": "TEST2"
- },
- "new_state": {},
- "return": 0,
- "state": {}
-}
-```
-
-Note that `new_env`shows new environment variables produced and explicitly exposed by this script
-via a `new_env_keys` key in the `_cm.json` meta description of this script.
-
-This is needed to assemble automation pipelines and workflows while avoiding their contamination
-with temporal environments. CM script must explicitly expose environment variables that will
-go to the next stage of a pipeline.
-
-In the following example, `CM_ENV_TEST3` will be added to the `new_env` while `CM_XYZ` will not
-since it is not included in `"new_env_keys":["CM_ENV_TEST*"]`:
-
-```bash
-cm run script --tags=print,hello-world,script --env.CM_ENV_TEST1=ABC1 --out=json --env.CM_ENV_TEST3=ABC3 --env.CM_XYZ=XYZ
-```
-
-### Modifying state dictionary
-
-Sometimes, it is needed to use more complex structures than environment variables in scripts and workflows.
-We use a dictionary `state` that can be updated and exposed by a given script via `new_state_keys` key
-in the `_cm.json` meta description of this script.
-
-In the following example, `hello_world` key will be updated in the `new_state` dictionary,
-while `hello` key will not be updated because it is not included in the wild card `"new_state_key":["hello_world*"]`:
-
-```bash
-cm run script --tags=print,hello-world,script --out=json \
- --state.hello=xyz1 --state.hello_world=xyz2
-
-...
-
-{
- "deps": [],
- "env": {
- "CM_ENV_TEST1": "TEST1",
- "CM_ENV_TEST2": "TEST2"
- },
- "new_env": {
- "CM_ENV_TEST1": "TEST1",
- "CM_ENV_TEST2": "TEST2"
- },
- "new_state": {
- "hello_world": "xyz2"
- },
- "return": 0,
- "state": {
- "hello": "xyz1",
- "hello_world": "xyz2"
- }
-}
-```
-
-### Running CM scripts via CM Python API
-
-You can run a given CM script from python or Jupyter notebooks as follows:
-
-```python
-
-import cmind
-
-r = cmind.access({'action':'run',
- 'automation':'script',
- 'tags':'print,hello-world,script',
- 'const':{
- 'CM_ENV_TEST1':'ABC1',
- },
- 'env':{
- 'CM_ENV_TEST2':'ABC2'
- },
- 'state': {
- 'hello':'xyz1',
- 'hello_world':'xyz2'
- }
- })
-
-print (r)
-
-```
-
-```bash
-...
-
-CM_ENV_TEST1 = ABC1
-CM_ENV_TEST2 = ABC2
-
-HELLO WORLD!
-
-{'return': 0,
- 'env': {'CM_ENV_TEST2': 'TEST2', 'CM_ENV_TEST1': 'ABC1'},
- 'new_env': {'CM_ENV_TEST2': 'TEST2', 'CM_ENV_TEST1': 'ABC1'},
- 'state': {'hello': 'xyz1', 'hello_world': 'xyz2'},
- 'new_state': {'hello_world': 'xyz2'},
- 'deps': []}
-
-```
-
-
-
-### Assembling pipelines (workflows) of CM scripts
-
-We've added a simple mechanism to chain reusable CM scripts into complex pipelines
-without the need for specialized workflow frameworks.
-
-Simply add the following dictionary "deps" to the `_cm.json` or `_cm.yaml` of your script as follows:
-
-```json
-
-{
- "deps": [
- {
- "tags": "a string of tags separated by comma to find and execute the 1st CM script"
- },
- {
- "tags": "a string of tags separated by comma to find and execute the 1st CM script"
- },
- ...
- ]
-}
-
-```
-
-This CM script will run all dependent scripts in above sequence, aggregate environment variable and `state` dictionary,
-and will then run native scripts.
-
-You can also turn on specific dependencies based on some values in specific environment variables or min/max version (if supported)
-in this pipeline as follows:
-
-```json
-
-{
- "deps": [
- {
- "tags": "a string of tags separated by comma to find and execute the 1st CM script",
- "enable_if_env": { "USE_CUDA" : ["yes", "YES", "true"] }
- },
- {
- "tags": "a string of tags separated by comma to find and execute the 1st CM script"
- "enable_if_env": { "USE_CPU" : ["yes", "YES", "true"] },
- "version_min": "3.10"
- },
- ...
- ]
-}
-
-```
-
-You can also specify dependencies to be invoked after executing native scripts
-using a dictionary `"post_deps"` with the same format `"deps"`.
-
-
-You can see an example of such dependencies in the [_cm.json](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world-py/_cm.json)
-of the ["print-hello-world-py" CM script](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world-py)
-that detects and unifies OS parameters using the ["detect-os" CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os),
-detects or builds Python using the ["get-python3" CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3)
-and then runs `code.py` with "Hello World" from `run.sh` or `run.bat`:
-
-```bash
-cm run script "print python hello-world"
-```
-
-
-
-
-
-
-### Customizing CM script execution flow
-
-If a developer adds `customize.py` file inside a given CM script,
-it can be used to programmatically update environment variables, prepare input scripts
-and even invoke other scripts programmatically using Python.
-
-If a function `preprocess` exists in this file, CM script will call it before
-invoking a native script.
-
-If this function returns `{"skip":True}` in the output,
-further execution of this script will be skipped.
-
-After executing the preprocess function, the CM script automation will record the global state dictionary
-into *tmp-state.json* and the local state dictionary from this CM script into *tmp-state-new.json*.
-
-The CM script automation will then run a native script (run.sh on Linux/MacOS or run.bat on Windows)
-with all merged environment variables from previous scripts.
-
-Note that native scripts can also create 2 files that will be automatically picked up and processed by the CM script automation:
-* *tmp-run-env.out* - list of environment variables to update the "new_env" of a given CM script
-* *tmp-run-state.json* - the state dictionary to update the "new_state" of a given CM script
-
-If `postprocess` function exists in the *customize.py* file, the CM script will call it
-to finalize the postprocessing of files, environment variables, and the state dictionary.
-
-You can see an [example of such `customize.py` module](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-python3/customize.py) in the CM script
-to [detect or install/build Python interpreter](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3) in a unified way on any machine.
-
-This script exposes a number of environment variables for a detected Python
-in the [`postprocess` function](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-python3/customize.py#L60):
-
-* `CM_PYTHON_BIN` - python3.10 or python.exe or any other name of a Python interpreter on a given system
-* `CM_PYTHON_BIN_PATH` - path to a detected or installed python
-* `CM_PYTHON_BIN_WITH_PATH` - full path to a detected or installed python
-* `LD_LIBRARY_PATH` - updated LD_LIBRARY_PATH to python
-* `PATH` - updated PATH to python
-
-These environment variables can be reused by other CM scripts or external tools
-while decoupling them from specific python versions and paths, and even allowing
-multiple versions of tools and artifacts to co-exist on the same system
-and plugged into CM scripts:
-
-```bash
-cm run script "get python3" --out=json
-```
-
-
-
-### Caching output of CM scripts
-
-By default, CM scripts run wrapped scripts and tools, update environment variables and produce new files in the current directory.
-
-In many cases, we want to cache the output and environment variables when we run the same CM script with the same input again
-to avoid potentially lengthy detections, downloads, builds and data pre/post processing.
-
-That's why we have developed another CM automation called ["cache"](../cache/README-extra.md)
-to cache the output of scripts in the "cache" artifacts in the "local" CM repository
-that can be found by tags or unique IDs like any other CM artifact.
-
-Our convention is to use names *get-{tool or artifact}* for CM scripts that detect already installed artifacts,
-prepare their environment and cache them in the *local* CM repository using the "cache" automation.
-
-If installed artifact doesn't exist, we either enhance above scripts to include download, installation and even building
-for a given artifact (if it's a tool) or we create extra CM scripts *install-{tool or artifact}*
-that download and prepare tools and artifacts (install, build, preprocess, etc).
-
-For example, the CM script [*get-python3*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3)
-has *customize.py* with *preprocess* function that implements the search for python3 on Linux
-or python.exe on Windows, 2 native scripts *run.sh* and *run.bat* to obtain the version of the detected python installation,
-and *postprocess* function to prepare environment variables *CM_PYTHON_BIN* and *CM_PYTHON_BIN_WITH_PATH*
-that can be used by other CM scripts:
-
-```bash
-cm run script "get python" --out=json
-```
-
-If you run it for the first time and CM script detects multiple versions of python co-existing on your system,
-it will ask you to select one. CM will then cache the output in the *cache* artifact of the CM repository.
-You can see all *cache* CM entries for other tools and artifacts as follows:
-
-```bash
-cm show cache
-```
-or
-```bash
-cm show cache --tags=get,python
-```
-
-You can see the cached files as follows:
-```bash
-ls `cm find cache --tags=get,python`
-```
-
-* _cm.json - CM meta description of this "cache" artifact with its unique ID, tags and other meta information
-* cm-cached-state.json - dictionary with the new environment variables and the new state dictionary
-* tmp-env-all.sh - all environment variables used during CM script execution
-* tmp-env.sh - only new environment variables produced after CM script execution (it can be used directly by external tools)
-* tmp-run.sh - all environment variables and a call to the native script (useful for reproducibility)
-* tmp-state.json - the state before running native script - it can be loaded and used by native scripts and tools instead of using environment variables
-* tmp-ver.out - the output of the --version command parsed by `postprocess` and `detect_version` functions in `customize.py`
-
-
-If you (or other CM script) run this CM script to get the python tool for the second time, CM script will reuse the cached output:
-```bash
-cm run script "get python" --out=json
-```
-
-This also allows us to install multiple tool versions into different CM cache entries (python virtual environments,
-LLVM compiler, etc) and use them separately without the need to change higher-level CM scripts - these tools
-will be automatically plugged in:
-
-```bash
-cm run script "install prebuilt llvm" --version=14.0.0
-cm run script "install prebuilt llvm" --version=16.0.0
-cm run script "install src llvm"
-```
-
-
-Such approach allows us to "probe" the user environment, detect different tools and artifacts, unify them
-and adapt complex applications to a user environment in an automatic, transparent and non-intrusive way
-as shown in the next example.
-
-
-
-
-
-
-## Assembling pipeline to compile and run image corner detection
-
-We can use automatically detected compiler from CM script to create simple and technology-neutral compilation and execution pipelines
-in CM scripts.
-
-For example, we have implemented a simple [image corner detection CM script]( https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection )
-with [this meta description](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-corner-detection/_cm.json).
-
-It uses two other reusable CM scripts to compile a given program using a detected/installed and cached compiler via CM (such as LLVM),
-and then run it with some input image.
-
-First, let's detect installed LLVM it via CM:
-
-```bash
-cm run script "get llvm"
-```
-or install a prebuilt version on Linux, MacOs or Windows:
-```bash
-cm run script "install prebuilt llvm" --version=14.0.0
-```
-
-We can then run this CM script to compile and run image corner detection as follows:
-```bash
-cm run script "app image corner-detection" --input=`cm find script --tags=app,image,corner-detection`/computer_mouse.pgm
-```
-
-This CM script will preset environment variables for a detected/installed compiler,
-compile our C program, run it via `run.sh` (Linux/MacOS) or `run.bat` (Windows)
-and generate an output image *output_image_with_corners.pgm* in the `output` directory of this script:
-
-```bash
-ls `cm find script --tags=app,image,corner-detection`/output
-
-image-corner output_image_with_corners.pgm
-
-```
-
-Note that this directory also contains the compiled tool "image-corner" that can now be used independently from CM if necessary.
-
-
-
-
-### Customizing sub-dependencies in a pipeline
-
-When running a CM script with many sub-dependencies similar to above example,
-we may want to specify some version constraints on sub-dependencies such as LLVM.
-
-One can use the key `"names"` in the "deps" list of any CM script meta description
-to specify multiple names for a given dependency.
-
-For example, a dependency to "get compiler" in CM script "compile-program"
-has `"names":["compiler"]` as shown [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/compile-program/_cm.json#L15).
-
-We can now use a CM script flag `--add_deps_recursive.{some name}.{some key}={some value}` or
-`--adr.{above name}.{some key}={some value}` to update a dictionary of all sub-dependencies
-that has `some name`.
-
-For example, we can now specify to use LLVM 16.0.0 for image corner detection as follows:
-```bash
-cm run script "app image corner-detection" --adr.compiler.tags=llvm --adr.compiler.version=16.0.0
-```
-
-If this compiler was not yet detected or installed by CM, it will find related scripts
-to install either a prebuilt version of LLVM or build it from sources.
-
-
-## Using Python virtual environments
-
-By default, CM scripts will install python dependencies into user space.
-This can influence other existing projects and may not be desirable.
-CM can be used inside virtual Python environments without any changes,
-but a user still need to do some manual steps to set up such environment.
-That's why we've developed a [CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv)
-to automate creation of multiple Python virtual environments with different names:
-
-```bash
-cm run script "install python-venv" --name={some name}
-```
-
-CM will create a virtual environment using default Python and save it in CM cache.
-It is possible to create a python virtual environment with a minimal required version
-or a specific one on Linux and MacOS as follows:
-
-```bash
-cm run script "install python-venv" --version_min=3.8 --name=mlperf
-cm run script "install python-venv" --version=3.10.8 --name=mlperf2
-```
-
-In this case, CM will attempt to detect Python 3.10.8 on a system.
-If CM can't detect it, CM will then automatically download and build it
-using [this script](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-src).
-
-Now, when user runs pipelines that install Python dependencies, CM will detect
-virtual environment in the CM cache as well as native Python and will ask a user
-which one to use.
-
-It is possible to avoid such questions by using the flag `--adr.python.name=mlperf`.
-In such case, CM will propagate the name of a virtual environment to all sub-dependencies
-as shown in the next example.
-
-Instead of adding this flag to all scripts, you can specify it
-using `CM_SCRIPT_EXTRA_CMD` environment variable as follows:
-```bash
-export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf"
-```
-
-You can even specify min Python version required as follows:
-```bash
-export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf --adr.python.version_min=3.9"
-```
-
-## Assembling pipelines with other artifacts included
-
-We can now use existing CM scripts as "LEGO" blocks to assemble more complex automation pipelines and workflows
-while automatically downloading and plugging in
-and pre-/post-processing all necessary artifacts (models, data sets, frameworks, compilers, etc)
-on any supported platform (Linux, MacOS, Windows).
-
-For example, we have implemented a simple image classification application automated by the following CM script:
-[*app-image-classification-onnx-py*]( https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py ).
-
-It is described by the following [`_cm.yaml`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml) meta description:
-
-```yaml
-alias: app-image-classification-onnx-py
-uid: 3d5e908e472b417e
-
-automation_alias: script
-automation_uid: 5b4e0237da074764
-
-category: "Modular ML/AI applications"
-
-tags:
-- app
-- image-classification
-- onnx
-- python
-
-default_env:
- CM_BATCH_COUNT: '1'
- CM_BATCH_SIZE: '1'
-
-deps:
-- tags: detect,os
-- tags: get,sys-utils-cm
-- names:
- - python
- - python3
- tags: get,python3
-- tags: get,cuda
- names:
- - cuda
- enable_if_env:
- USE_CUDA:
- - yes
-- tags: get,dataset,imagenet,image-classification,original
-- tags: get,dataset-aux,imagenet-aux,image-classification
-- tags: get,ml-model,resnet50,_onnx,image-classification
-
-- tags: get,generic-python-lib,_onnxruntime
- skip_if_env:
- USE_CUDA:
- - yes
-- tags: get,generic-python-lib,_onnxruntime_gpu
- enable_if_env:
- USE_CUDA:
- - yes
-
-variations:
- cuda:
- env:
- USE_CUDA: yes
-```
-
-
-Its `deps` pipeline runs other CM scripts to detect OS parameters, detect or install Python,
-install the latest ONNX run-time, download ResNet-50 model and the minimal ImageNet dataset (500).
-
-It also contains [`run.sh`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/run.sh)
-and [`run.bat`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/run.bat)
-to install extra Python requirements (not yet unified by CM scripts)
-and run a Python script that classifies an image from ImageNet
-or an image provided by user.
-
-Before running it, let us install Python virtual environment via CM to avoid altering
-native Python installation:
-```bash
-cm run script "install python-venv" --name=my-test
-cm show cache --tags=python
-```
-
-You can run it on any system as follows:
-
-```bash
-cm run script "python app image-classification onnx"
-
-```
-
-
-To avoid CM asking which python to use, you can force the use of Python virtual environment
-as follows:
-
-```bash
-cm run script "python app image-classification onnx" --adr.python.name=my-test
-```
-
-
-
-If you run this CM script for the first time, it may take some minutes because it will detect, download, build and cache all dependencies.
-
-When you run it again, it will plug in all cached dependencies:
-
-```bash
-cm run script "python app image-classification onnx" --adr.python.name.my-test
-
-```
-
-You can then run it with your own image as follows:
-```bash
-cm run script --tags=app,image-classification,onnx,python \
- --adr.python.name.my-test --input={path to my JPEG image}
-```
-
-
-
-## Unifying host OS and CPU detection
-
-In order to make experiments more portable and interoperable, we need to unify
-the information about host OS and CPU across different systems.
-We are gradually improving the following two CM scripts:
-
-* [`detect-os`](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os)
-* [`detect-cpu`](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu)
-
-These two CM script have *customize.py* with preprocess and postprocess functions
-and a native run script to detect OS info and update environment variables
-and the state dictionary needed by all other CM scripts.
-
-You can run them on your platform as follows:
-
-```bash
-cm run script "detect os" --out=json
-
-...
-
-cm run script "detect cpu" --out=json
-```
-
-If some information is missing or not consistent across different platforms,
-you can improve it in a backwards compatible way. You can then submit a PR [here](https://github.com/mlcommons/ck/pulls)
-to let the community reuse your knowledge and collaboratively enhance common automation scripts, pipelines and workflows -
-that's why we called our project "Collective Knowledge".
-
-
-## Detecting, installing and caching system dependencies
-
-Many projects require installation of some system dependencies. Unfortunately, the procedure
-is different across different systems.
-
-That's why we have developed two other CM script to unify and automate this process on any system.
-
-* [`get-sys-utils-cm`]( https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm )
-* [`get-sys-utils-min`]( https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-min )
-
-They will install (minimal) system dependencies based on the OS and CPU info detected by CM scripts mentioned above.
-
-The last script is particularly useful to make applications compatible with Windows
-where many typical tools like "wget", "patch", etc are missing - they will be automatically
-download by that script.
-
-You can use them as follows:
-```bash
-cm run script "get sys-utils-min" --out=json
-cm run script "get sys-utils-cm"
-```
-
-
-
-
-## Using variations
-
-In some cases, we want the same CM script to download some artifact in a different format.
-
-For example, we may want to download and cache ResNet50 model in ONNX or PyTorch or TensorFlow or TFLite format.
-
-In such case, we use so-called `variations` in the meta description of a given CM script.
-
-For example, the CM script [`get-ml-model-resnet50`] has many variations and combinations separated by comma
-to download this model in multiple formats:
-
-* `onnx`
-* `onnx,opset-11`
-* `onnx,opset-8`
-* `pytorch`
-* `pytorch,fp32`
-* `pytorch,int8`
-* `tflite`
-* `tflite,argmax`
-* `tflite,no-argmax`
-* `tensorflow`
-* `batch_size.1`
-* `batch_size.#`
-
-These variations simply update environment variables and add more dependencies on other CM scripts
-before running `customize.py` and native scripts as described in [_cm.json]( https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-ml-model-resnet50/_cm.json#L30 ).
-
-It is possible to specify a required variation or multiple variations when running a given CM script by adding extra tags with "_" prefix.
-
-For example, you can install quantized ResNet-50 model in PyTorch int8 format as follows:
-
-```bash
-cm run script "get ml-model resnet50 _pytorch _int8" --out=json
-```
-
-You can install another FP32 variation of this model at the same time:
-```bash
-cm run script "get ml-model resnet50 _pytorch _fp32" --out=json
-```
-
-You can now find them in cache by tags and variations as follows:
-```bash
-cm show cache --tags=get,ml-model,resnet50
-cm show cache --tags=get,ml-model,resnet50,_pytorch
-cm show cache --tags=get,ml-model,resnet50,_pytorch,_fp32
-```
-
-
-
-
-
-
-
-
-
-
-
-## Running CM scripts inside containers
-
-One of the important ideas behind using a common automation language
-is to use it inside and outside containers thus avoiding the need to create
-ad-hoc manual containers and README files.
-
-We can just use base containers and let the CM automation language
-detect installed tools and connect external data with the automation pipelines and workflows.
-
-See examples of modular containers with CM language to automate the MLPerf inference benchmark from MLCommons
-[here](https://github.com/mlcommons/ck/tree/master/docker).
-
-Note that we continue working on a CM functionality to automatically generate
-Docker containers and README files when executing CM scripts
-(a prototype was successfully validated in the MLPerf inference v3.0 submission):
-
-* https://github.com/mlcommons/cm4mlops/tree/main/script/build-dockerfile
-* https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image
-
-
-
-
-## Getting help about other script automation flags
-
-You can get help about all flags used to customize execution
-of a given CM script from the command line as follows:
-
-```bash
-cm run script --help
-```
-
-Some flags are useful to make it easier to debug scripts and save output in files.
-
-You can find more info about CM script execution flow in this [document](README-specs.md).
-
-
-
-
-
-
-
-
-
-
-
-
-## Further reading
-
-* [CM "script" automation specification](README-specs.md)
-* [MLCommons CM script sources](https://github.com/mlcommons/cm4mlops/tree/main/script)
-* [List of portable and reusable CM scripts from MLCommons](https://access.cknowledge.org/playground/?action=scripts)
-* [CM "cache" automation](../cache/README-extra.md)
diff --git a/automation/script/README-specs.md b/automation/script/README-specs.md
deleted file mode 100644
index 4b40feeba..000000000
--- a/automation/script/README-specs.md
+++ /dev/null
@@ -1,79 +0,0 @@
-# CM "script" automation specification
-
-Please check the [CM documentation](https://github.com/mlcommons/ck/tree/master/docs#collective-mind-language-cm)
-for more details about the CM automation language.
-
-See the CM script introduction [here](README-extra.md).
-
-See the [automatically generated catalog](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md) of all CM scripts from MLCommons.
-
-## Getting started with CM scripts
-
-* A CM script is identified by a set of tags and by unique ID.
-* Further each CM script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix.
-
-### CM script execution flow
-* When a CM script is invoked (either by tags or by unique ID), its `_cm.json` is processed first which will check for any `deps` script and if there are, then they are executed in order.
-* Once all the `deps` scripts are executed, `customize.py` file is checked and if existing `preprocess` function inside it is executed if present.
-* Then any `prehook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps`
-* After this, keys in `env` dictionary is exported as `ENV` variables and `run` file if exists is executed.
-* Once run file execution is done, any `posthook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps`
-* Then `postprocess` function inside customize.py is executed if present.
-* After this stage any `post_deps` CM scripts mentioned in `_cm.json` is executed.
-
-** If a script is already cached, then the `preprocess`, `run file` and `postprocess` executions won't happen and only the dependencies marked as `dynamic` will be executed from `deps`, `prehook_deps`, `posthook_deps` and `postdeps`.
-
-### Input flags
-When we run a CM script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `_cm.json` gets converted to the corresponding `ENV` variable.
-
-### Conditional execution of any `deps`, `post_deps`
-We can use `skip_if_env` dictionary inside any `deps`, `prehook_deps`, `posthook_deps` or `post_deps` to make its execution conditional
-
-### Versions
-We can specify any specific version of a script using `version`. `version_max` and `version_min` are also possible options.
-* When `version_min` is given, any version above this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if above `version_min` will be used for installation. Otherwise `version_min` will be used as `version`.
-* When `version_max` is given, any version below this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if below `version_max` will be used for installation. Otherwise `version_max_usable` (additional needed input for `version_max`) will be used as `version`.
-
-### Variations
-* Variations are used to customize CM script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`.
-
-#### Variation groups
-`group` is a key to map variations into a group and at any time only one variation from a group can be used in the variation tags. For example, both `cpu` and `cuda` can be two variations under the `device` group, but user can at any time use either `cpu` or `cuda` as variation tags but not both.
-
-#### Dynamic variations
-Sometimes it is difficult to add all variations needed for a script like say `batch_size` which can take many different values. To handle this case, we support dynamic variations using '#' where '#' can be dynamically replaced by any string. For example, `"_batch_size.8"` can be used as a tag to turn on the dynamic variation `"_batch_size.#"`.
-
-### ENV flow during CM script execution
-* [TBD] Issue added [here](https://github.com/mlcommons/ck/issues/382)
-* During a given script execution incoming `env` dictionary is saved `(saved_env)` and all the updates happens on a copy of it.
-* Once a script execution is over (which includes all the dependent script executions as well), newly created keys and any updated keys are merged with the `saved_env` provided the keys are mentioned in `new_env_keys`
-* Same behaviour applies to `state` dictionary.
-
-#### Special env keys
-* Any env key with a prefix `CM_TMP_*` and `CM_GIT_*` are not passed by default to any dependency. These can be force passed by adding the key(s) to the `force_env_keys` list of the concerned dependency.
-* Similarly we can avoid any env key from being passed to a given dependency by adding the prefix of the key in the `clean_env_keys` list of the concerned dependency.
-* `--input` is automatically converted to `CM_INPUT` env key
-* `version` is converted to `CM_VERSION`, ``version_min` to `CM_VERSION_MIN` and `version_max` to `CM_VERSION_MAX`
-* If `env['CM_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `CM_GIT_URL`) are changed to add this token.
-* If `env['CM_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS.
-
-### Script Meta
-#### Special keys in script meta
-* TBD: `reuse_version`, `inherit_variation_tags`, `update_env_tags_from_env`
-
-### How cache works?
-* If `cache=true` is set in a script meta, the result of the script execution is cached for further use.
-* For a cached script, `env` and `state` updates are done using `new_env` and `new_state` dictionaries which are stored in the `cm-cached.json` file inside the cached folder.
-* By using `--new` input, a new cache entry can be forced even when an old one exist.
-* By default no depndencies are run for a cached entry unless `dynamic` key is set for it.
-
-### Updating ENV from inside the run script
-* [TBD]
-
-
-### Script workflow (env, deps, native scripts)
-
-
-
-
-© 2022-24 [MLCommons](https://mlcommons.org)
diff --git a/automation/script/README.md b/automation/script/README.md
index d4a4c62bc..bbedf887d 100644
--- a/automation/script/README.md
+++ b/automation/script/README.md
@@ -1,427 +1,77 @@
-*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!*
+# CM "script" automation specification
-### Automation actions
+Please check the [CM documentation](https://github.com/mlcommons/ck/tree/master/docs#collective-mind-language-cm)
+for more details about the CM automation language.
-#### run
- * CM CLI: ```cm run script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77))
- * CM CLI with UID: ```cm run script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77))
- * CM Python API:
- ```python
- import cmind
- r=cm.access({
- 'action':'run'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
+## Getting started with CM scripts
-#### version
+* A CM script is identified by a set of tags and by unique ID.
+* Further each CM script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix.
- * CM CLI: ```cm version script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199))
- * CM CLI with UID: ```cm version script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199))
- * CM Python API:
- ```python
- import cmind
+### CM script execution flow
+* When a CM script is invoked (either by tags or by unique ID), its `_cm.json` is processed first which will check for any `deps` script and if there are, then they are executed in order.
+* Once all the `deps` scripts are executed, `customize.py` file is checked and if existing `preprocess` function inside it is executed if present.
+* Then any `prehook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps`
+* After this, keys in `env` dictionary is exported as `ENV` variables and `run` file if exists is executed.
+* Once run file execution is done, any `posthook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps`
+* Then `postprocess` function inside customize.py is executed if present.
+* After this stage any `post_deps` CM scripts mentioned in `_cm.json` is executed.
- r=cm.access({
- 'action':'version'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
+** If a script is already cached, then the `preprocess`, `run file` and `postprocess` executions won't happen and only the dependencies marked as `dynamic` will be executed from `deps`, `prehook_deps`, `posthook_deps` and `postdeps`.
-#### search
+### Input flags
+When we run a CM script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `_cm.json` gets converted to the corresponding `ENV` variable.
- * CM CLI: ```cm search script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227))
- * CM CLI with UID: ```cm search script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227))
- * CM Python API:
- ```python
- import cmind
+### Conditional execution of any `deps`, `post_deps`
+We can use `skip_if_env` dictionary inside any `deps`, `prehook_deps`, `posthook_deps` or `post_deps` to make its execution conditional
- r=cm.access({
- 'action':'search'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
+### Versions
+We can specify any specific version of a script using `version`. `version_max` and `version_min` are also possible options.
+* When `version_min` is given, any version above this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if above `version_min` will be used for installation. Otherwise `version_min` will be used as `version`.
+* When `version_max` is given, any version below this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if below `version_max` will be used for installation. Otherwise `version_max_usable` (additional needed input for `version_max`) will be used as `version`.
-#### test
+### Variations
+* Variations are used to customize CM script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`.
+
+#### Variation groups
+`group` is a key to map variations into a group and at any time only one variation from a group can be used in the variation tags. For example, both `cpu` and `cuda` can be two variations under the `device` group, but user can at any time use either `cpu` or `cuda` as variation tags but not both.
- * CM CLI: ```cm test script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346))
- * CM CLI with UID: ```cm test script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346))
- * CM Python API:
- ```python
- import cmind
+#### Dynamic variations
+Sometimes it is difficult to add all variations needed for a script like say `batch_size` which can take many different values. To handle this case, we support dynamic variations using '#' where '#' can be dynamically replaced by any string. For example, `"_batch_size.8"` can be used as a tag to turn on the dynamic variation `"_batch_size.#"`.
- r=cm.access({
- 'action':'test'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
+### ENV flow during CM script execution
+* [TBD] Issue added [here](https://github.com/mlcommons/ck/issues/382)
+* During a given script execution incoming `env` dictionary is saved `(saved_env)` and all the updates happens on a copy of it.
+* Once a script execution is over (which includes all the dependent script executions as well), newly created keys and any updated keys are merged with the `saved_env` provided the keys are mentioned in `new_env_keys`
+* Same behaviour applies to `state` dictionary.
-#### native_run
+#### Special env keys
+* Any env key with a prefix `MLC_TMP_*` and `MLC_GIT_*` are not passed by default to any dependency. These can be force passed by adding the key(s) to the `force_env_keys` list of the concerned dependency.
+* Similarly we can avoid any env key from being passed to a given dependency by adding the prefix of the key in the `clean_env_keys` list of the concerned dependency.
+* `--input` is automatically converted to `MLC_INPUT` env key
+* `version` is converted to `MLC_VERSION`, ``version_min` to `MLC_VERSION_MIN` and `version_max` to `MLC_VERSION_MAX`
+* If `env['MLC_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `MLC_GIT_URL`) are changed to add this token.
+* If `env['MLC_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS.
- * CM CLI: ```cm native_run script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412))
- * CM CLI with UID: ```cm native_run script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412))
- * CM Python API:
- ```python
- import cmind
+### Script Meta
+#### Special keys in script meta
+* TBD: `reuse_version`, `inherit_variation_tags`, `update_env_tags_from_env`
- r=cm.access({
- 'action':'native_run'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
+### How cache works?
+* If `cache=true` is set in a script meta, the result of the script execution is cached for further use.
+* For a cached script, `env` and `state` updates are done using `new_env` and `new_state` dictionaries which are stored in the `mlc-cached.json` file inside the cached folder.
+* By using `--new` input, a new cache entry can be forced even when an old one exist.
+* By default no depndencies are run for a cached entry unless `dynamic` key is set for it.
-#### add
+### Updating ENV from inside the run script
+* [TBD]
- * CM CLI: ```cm add script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485))
- * CM CLI with UID: ```cm add script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485))
- * CM Python API:
- ```python
- import cmind
- r=cm.access({
- 'action':'add'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
+### Script workflow (env, deps, native scripts)
-#### run_native_script
+
- * CM CLI: ```cm run_native_script script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270))
- * CM CLI with UID: ```cm run_native_script script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270))
- * CM Python API:
- ```python
- import cmind
- r=cm.access({
- 'action':'run_native_script'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### find_file_in_paths
-
- * CM CLI: ```cm find_file_in_paths script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314))
- * CM CLI with UID: ```cm find_file_in_paths script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'find_file_in_paths'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### detect_version_using_script
-
- * CM CLI: ```cm detect_version_using_script script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533))
- * CM CLI with UID: ```cm detect_version_using_script script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'detect_version_using_script'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### find_artifact
-
- * CM CLI: ```cm find_artifact script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606))
- * CM CLI with UID: ```cm find_artifact script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'find_artifact'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### find_file_deep
-
- * CM CLI: ```cm find_file_deep script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764))
- * CM CLI with UID: ```cm find_file_deep script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'find_file_deep'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### find_file_back
-
- * CM CLI: ```cm find_file_back script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822))
- * CM CLI with UID: ```cm find_file_back script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'find_file_back'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### parse_version
-
- * CM CLI: ```cm parse_version script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863))
- * CM CLI with UID: ```cm parse_version script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'parse_version'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### update_deps
-
- * CM CLI: ```cm update_deps script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917))
- * CM CLI with UID: ```cm update_deps script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'update_deps'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### get_default_path_list
-
- * CM CLI: ```cm get_default_path_list script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937))
- * CM CLI with UID: ```cm get_default_path_list script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'get_default_path_list'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### doc
-
- * CM CLI: ```cm doc script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948))
- * CM CLI with UID: ```cm doc script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'doc'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### gui
-
- * CM CLI: ```cm gui script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976))
- * CM CLI with UID: ```cm gui script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'gui'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### dockerfile
-
- * CM CLI: ```cm dockerfile script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013))
- * CM CLI with UID: ```cm dockerfile script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'dockerfile'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### docker
-
- * CM CLI: ```cm docker script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041))
- * CM CLI with UID: ```cm docker script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'docker'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### prepare
-
- * CM CLI: ```cm prepare script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095))
- * CM CLI with UID: ```cm prepare script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'prepare'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### clean_some_tmp_files
-
- * CM CLI: ```cm clean_some_tmp_files script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106))
- * CM CLI with UID: ```cm clean_some_tmp_files script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'clean_some_tmp_files'
- 'automation':'script,5b4e0237da074764'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-### Maintainers
-
-* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
+© 2022-25 [MLCommons](https://mlcommons.org)
diff --git a/automation/script/docker.py b/automation/script/docker.py
index bd511d7e3..99fa619cf 100644
--- a/automation/script/docker.py
+++ b/automation/script/docker.py
@@ -95,7 +95,7 @@ def dockerfile(self_module, input_params):
# Set Docker-specific configurations
docker_settings = state_data.get('docker', {})
docker_settings['dockerfile_env'] = dockerfile_environment_vars
- dockerfile_environment_vars['CM_RUN_STATE_DOCKER'] = True
+ dockerfile_environment_vars['MLC_RUN_STATE_DOCKER'] = True
if not docker_settings.get('run', True) and not input_params.get(
'docker_run_override', False):
@@ -115,7 +115,7 @@ def dockerfile(self_module, input_params):
# Prune temporary environment variables
run_command = copy.deepcopy(run_command_arc)
for key in list(run_command.get('env', {}).keys()):
- if key.startswith("CM_TMP_"):
+ if key.startswith("MLC_TMP_"):
del run_command['env'][key]
# Regenerate script command
@@ -177,7 +177,7 @@ def dockerfile(self_module, input_params):
# Push Docker image if specified
if input_params.get('docker_push_image') in [True, 'True', 'yes']:
- environment_vars['CM_DOCKER_PUSH_IMAGE'] = 'yes'
+ environment_vars['MLC_DOCKER_PUSH_IMAGE'] = 'yes'
# Generate Dockerfile
mlc_docker_input = {
@@ -227,7 +227,7 @@ def docker_run(self_module, i):
if i.get('docker_skip_build', False):
noregenerate_docker_file = True
norecreate_docker_image = True
- env['CM_DOCKER_SKIP_BUILD'] = 'yes'
+ env['MLC_DOCKER_SKIP_BUILD'] = 'yes'
# Prune unnecessary Docker-related input keys
r = prune_input({'input': i, 'extra_keys_starts_with': ['docker_']})
@@ -249,7 +249,7 @@ def docker_run(self_module, i):
if not lst:
return {'return': 1, 'error': 'No scripts were found'}
- env['CM_RUN_STATE_DOCKER'] = False
+ env['MLC_RUN_STATE_DOCKER'] = False
state, const, const_state = i.get(
'state', {}), i.get(
'const', {}), i.get(
@@ -259,7 +259,7 @@ def docker_run(self_module, i):
docker_cache = i.get('docker_cache', "yes")
if docker_cache.lower() in ["no", "false"]:
- env.setdefault('CM_DOCKER_CACHE', docker_cache)
+ env.setdefault('MLC_DOCKER_CACHE', docker_cache)
image_repo = i.get('docker_image_repo', '')
add_deps_recursive = i.get('add_deps_recursive')
diff --git a/automation/script/module.py b/automation/script/module.py
index bceca05d3..71ea0c9ad 100644
--- a/automation/script/module.py
+++ b/automation/script/module.py
@@ -37,7 +37,7 @@ def __init__(self, action_object, automation_file):
self.run_state['parent'] = None
self.run_state['version_info'] = []
self.run_state['cache'] = False
- self.file_with_cached_state = 'cm-cached-state.json'
+ self.file_with_cached_state = 'mlc-cached-state.json'
self.tmp_file_env = 'tmp-env'
self.tmp_file_env_all = 'tmp-env-all'
@@ -50,20 +50,20 @@ def __init__(self, action_object, automation_file):
self.__version__ = "1.3.2"
- self.local_env_keys = ['CM_VERSION',
- 'CM_VERSION_MIN',
- 'CM_VERSION_MAX',
- 'CM_VERSION_MAX_USABLE',
- 'CM_DETECTED_VERSION',
- 'CM_INPUT',
- 'CM_OUTPUT',
- 'CM_OUTBASENAME',
- 'CM_OUTDIRNAME',
- 'CM_NAME',
- 'CM_EXTRA_CACHE_TAGS',
- 'CM_TMP_*',
- 'CM_GIT_*',
- 'CM_RENEW_CACHE_ENTRY']
+ self.local_env_keys = ['MLC_VERSION',
+ 'MLC_VERSION_MIN',
+ 'MLC_VERSION_MAX',
+ 'MLC_VERSION_MAX_USABLE',
+ 'MLC_DETECTED_VERSION',
+ 'MLC_INPUT',
+ 'MLC_OUTPUT',
+ 'MLC_OUTBASENAME',
+ 'MLC_OUTDIRNAME',
+ 'MLC_NAME',
+ 'MLC_EXTRA_CACHE_TAGS',
+ 'MLC_TMP_*',
+ 'MLC_GIT_*',
+ 'MLC_RENEW_CACHE_ENTRY']
self.input_flags_converted_to_tmp_env = ['path']
@@ -105,33 +105,33 @@ def run(self, i):
(add_deps) (dict): {"name": {"tag": "tag(s)"}, "name": {"version": "version_no"}, ...}
(add_deps_recursive) (dict): same as add_deps but is passed recursively onto dependencies as well
- (version) (str): version to be added to env.CM_VERSION to specialize this flow
- (version_min) (str): min version to be added to env.CM_VERSION_MIN to specialize this flow
- (version_max) (str): max version to be added to env.CM_VERSION_MAX to specialize this flow
- (version_max_usable) (str): max USABLE version to be added to env.CM_VERSION_MAX_USABLE
+ (version) (str): version to be added to env.MLC_VERSION to specialize this flow
+ (version_min) (str): min version to be added to env.MLC_VERSION_MIN to specialize this flow
+ (version_max) (str): max version to be added to env.MLC_VERSION_MAX to specialize this flow
+ (version_max_usable) (str): max USABLE version to be added to env.MLC_VERSION_MAX_USABLE
- (path) (str): list of paths to be added to env.CM_TMP_PATH to specialize this flow
+ (path) (str): list of paths to be added to env.MLC_TMP_PATH to specialize this flow
- (input) (str): converted to env.CM_INPUT (local env)
- (output) (str): converted to env.CM_OUTPUT (local env)
+ (input) (str): converted to env.MLC_INPUT (local env)
+ (output) (str): converted to env.MLC_OUTPUT (local env)
- (outbasename) (str): converted to env.CM_OUTBASENAME (local env)
- (outdirname) (str): converted to env.CM_OUTDIRNAME (local env)
+ (outbasename) (str): converted to env.MLC_OUTBASENAME (local env)
+ (outdirname) (str): converted to env.MLC_OUTDIRNAME (local env)
- (extra_cache_tags) (str): converted to env.CM_EXTRA_CACHE_TAGS and used to add to caching (local env)
+ (extra_cache_tags) (str): converted to env.MLC_EXTRA_CACHE_TAGS and used to add to caching (local env)
- (name) (str): taken from env.CM_NAME and/or converted to env.CM_NAME (local env)
+ (name) (str): taken from env.MLC_NAME and/or converted to env.MLC_NAME (local env)
Added to extra_cache_tags with "name-" prefix .
Useful for python virtual env (to create multiple entries)
- (quiet) (bool): if True, set env.CM_QUIET to "yes" and attempt to skip questions
+ (quiet) (bool): if True, set env.MLC_QUIET to "yes" and attempt to skip questions
(the developers have to support it in pre/post processing and scripts)
(skip_cache) (bool): if True, skip caching and run in current directory
(force_cache) (bool): if True, force caching if can_force_cache=true in script meta
(skip_remembered_selections) (bool): if True, skip remembered selections
- (uses or sets env.CM_TMP_SKIP_REMEMBERED_SELECTIONS to "yes")
+ (uses or sets env.MLC_TMP_SKIP_REMEMBERED_SELECTIONS to "yes")
(new) (bool): if True, skip search for cached and run again
(renew) (bool): if True, rewrite cache entry if exists
@@ -160,7 +160,7 @@ def run(self, i):
inside a script specified by these tags
(debug_script) (bool): if True, debug current script (set debug_script_tags to the tags of a current script)
- (debug_uid) (str): if True, set CM_TMP_DEBUG_UID to this number to enable
+ (debug_uid) (str): if True, set MLC_TMP_DEBUG_UID to this number to enable
remote python debugging of scripts and wrapped apps/tools
(detected_versions) (dict): All the used scripts and their detected_versions
@@ -178,12 +178,12 @@ def run(self, i):
(pause) (bool): if True, pause at the end of the main script (Press Enter to continue)
- (repro) (bool): if True, dump cm-run-script-input.json, cm-run_script_output.json,
- cm-run-script-state.json, cm-run-script-info.json
+ (repro) (bool): if True, dump mlc-run-script-input.json, mlc-run_script_output.json,
+ mlc-run-script-state.json, mlc-run-script-info.json
to improve the reproducibility of results
(repro_prefix) (str): if !='', use it to record above files {repro-prefix)-input.json ...
- (repro_dir) (str): if !='', use this directory to dump info (default = 'cm-repro')
+ (repro_dir) (str): if !='', use this directory to dump info (default = 'mlc-repro')
(dump_version_info) (bool): dump info about resolved versions of tools in dependencies
@@ -193,13 +193,13 @@ def run(self, i):
(script_call_prefix) (str): how to call script in logs and READMEs (mlc run script)
- (skip_sys_utils) (bool): if True, set env['CM_SKIP_SYS_UTILS']='yes'
+ (skip_sys_utils) (bool): if True, set env['MLC_SKIP_SYS_UTILS']='yes'
to skip CM sys installation
- (skip_sudo) (bool): if True, set env['CM_TMP_SKIP_SUDO']='yes'
+ (skip_sudo) (bool): if True, set env['MLC_TMP_SKIP_SUDO']='yes'
to let scripts deal with that
(silent) (bool): if True, attempt to suppress all info if supported
- (sets CM_TMP_SILENT=yes)
+ (sets MLC_TMP_SILENT=yes)
(s) (bool): the same as 'silent'
...
@@ -239,11 +239,11 @@ def _run(self, i):
if repro:
repro_prefix = i.get('repro_prefix', '')
if repro_prefix == '':
- repro_prefix = 'cm-run-script'
+ repro_prefix = 'mlc-run-script'
repro_dir = i.get('repro_dir', '')
if repro_dir == '':
- repro_dir = os.path.join(os.getcwd(), 'cm-repro')
+ repro_dir = os.path.join(os.getcwd(), 'mlc-repro')
if not os.path.isdir(repro_dir):
os.makedirs(repro_dir)
@@ -277,9 +277,9 @@ def _run(self, i):
start_time = time.time()
- # Check extra input from environment variable CM_SCRIPT_EXTRA_CMD
+ # Check extra input from environment variable MLC_SCRIPT_EXTRA_CMD
# Useful to set up default flags such as the name of virtual enviroment
- extra_cli = os.environ.get('CM_SCRIPT_EXTRA_CMD', '').strip()
+ extra_cli = os.environ.get('MLC_SCRIPT_EXTRA_CMD', '').strip()
if extra_cli != '':
from cmind import cli
r = cli.parse(extra_cli)
@@ -369,22 +369,22 @@ def _run(self, i):
'prepare',
False)
if fake_run:
- env['CM_TMP_FAKE_RUN'] = 'yes'
+ env['MLC_TMP_FAKE_RUN'] = 'yes'
debug_uid = i.get('debug_uid', '')
if debug_uid != '':
- r = _update_env(env, 'CM_TMP_DEBUG_UID', debug_uid)
+ r = _update_env(env, 'MLC_TMP_DEBUG_UID', debug_uid)
if r['return'] > 0:
return r
fake_deps = i.get('fake_deps', False)
if fake_deps:
- env['CM_TMP_FAKE_DEPS'] = 'yes'
+ env['MLC_TMP_FAKE_DEPS'] = 'yes'
if str(i.get('skip_sys_utils', '')).lower() in ['true', 'yes']:
- env['CM_SKIP_SYS_UTILS'] = 'yes'
+ env['MLC_SKIP_SYS_UTILS'] = 'yes'
if str(i.get('skip_sudo', '')).lower() in ['true', 'yes']:
- env['CM_TMP_SKIP_SUDO'] = 'yes'
+ env['MLC_TMP_SKIP_SUDO'] = 'yes'
run_state = i.get('run_state', self.run_state)
if not run_state.get('version_info', []):
@@ -409,7 +409,7 @@ def _run(self, i):
del (i['verbose'])
if 'v' in i:
del (i['v'])
- env['CM_TMP_SILENT'] = 'yes'
+ env['MLC_TMP_SILENT'] = 'yes'
run_state['tmp_silent'] = True
if 'verbose' in i:
@@ -418,7 +418,7 @@ def _run(self, i):
verbose = i['v']
if verbose:
- env['CM_VERBOSE'] = 'yes'
+ env['MLC_VERBOSE'] = 'yes'
run_state['tmp_verbose'] = True
logging.getLogger().setLevel(logging.DEBUG)
@@ -445,7 +445,7 @@ def _run(self, i):
# Detect current path and record in env for further use in native
# scripts
current_path = os.path.abspath(os.getcwd())
- r = _update_env(env, 'CM_TMP_CURRENT_PATH', current_path)
+ r = _update_env(env, 'MLC_TMP_CURRENT_PATH', current_path)
if r['return'] > 0:
return r
@@ -454,15 +454,15 @@ def _run(self, i):
'quiet',
False) if 'quiet' in i else (
env.get(
- 'CM_QUIET',
+ 'MLC_QUIET',
'').lower() == 'yes')
if quiet:
- env['CM_QUIET'] = 'yes'
+ env['MLC_QUIET'] = 'yes'
skip_remembered_selections = i.get('skip_remembered_selections', False) if 'skip_remembered_selections' in i \
- else (env.get('CM_SKIP_REMEMBERED_SELECTIONS', '').lower() == 'yes')
+ else (env.get('MLC_SKIP_REMEMBERED_SELECTIONS', '').lower() == 'yes')
if skip_remembered_selections:
- env['CM_SKIP_REMEMBERED_SELECTIONS'] = 'yes'
+ env['MLC_SKIP_REMEMBERED_SELECTIONS'] = 'yes'
# Prepare debug info
parsed_script = i.get('parsed_artifact')
@@ -484,7 +484,7 @@ def _run(self, i):
# Bat extension for this host OS
bat_ext = os_info['bat_ext']
- # Add permanent env from OS (such as CM_WINDOWS:"yes" on Windows)
+ # Add permanent env from OS (such as MLC_WINDOWS:"yes" on Windows)
env_from_os_info = os_info.get('env', {})
if len(env_from_os_info) > 0:
env.update(env_from_os_info)
@@ -790,8 +790,8 @@ def _run(self, i):
script_repo_path_with_prefix = os.path.join(
script_repo_path, script_artifact.repo.meta['prefix'])
- env['CM_TMP_CURRENT_SCRIPT_REPO_PATH'] = script_repo_path
- env['CM_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX'] = script_repo_path_with_prefix
+ env['MLC_TMP_CURRENT_SCRIPT_REPO_PATH'] = script_repo_path
+ env['MLC_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX'] = script_repo_path_with_prefix
# Check if has --help
if i.get('help', False):
@@ -936,7 +936,7 @@ def _run(self, i):
explicit_variation_tags = r['explicit_variation_tags']
# USE CASE:
- # HERE we may have versions in script input and env['CM_VERSION_*']
+ # HERE we may have versions in script input and env['MLC_VERSION_*']
# STEP 900: Get version, min, max, usable from env (priority if passed from another script to force version),
# then script input, then script meta
@@ -951,14 +951,14 @@ def _run(self, i):
# Second, take from env
if version == '':
- version = env.get('CM_VERSION', '')
+ version = env.get('MLC_VERSION', '')
if version_min == '':
- version_min = env.get('CM_VERSION_MIN', '')
+ version_min = env.get('MLC_VERSION_MIN', '')
if version_max == '':
- version_max = env.get('CM_VERSION_MAX', '')
+ version_max = env.get('MLC_VERSION_MAX', '')
if version_max_usable == '':
version_max_usable = env.get(
- 'CM_VERSION_MAX_USABLE', '')
+ 'MLC_VERSION_MAX_USABLE', '')
# Third, take from meta
if version == '':
@@ -973,10 +973,10 @@ def _run(self, i):
# Update env with resolved versions
notes = []
- for version_index in [(version, 'CM_VERSION', ' == {}'),
- (version_min, 'CM_VERSION_MIN', ' >= {}'),
- (version_max, 'CM_VERSION_MAX', ' <= {}'),
- (version_max_usable, 'CM_VERSION_MAX_USABLE', '({})')]:
+ for version_index in [(version, 'MLC_VERSION', ' == {}'),
+ (version_min, 'MLC_VERSION_MIN', ' >= {}'),
+ (version_max, 'MLC_VERSION_MAX', ' <= {}'),
+ (version_max_usable, 'MLC_VERSION_MAX_USABLE', '({})')]:
version_value = version_index[0]
key = version_index[1]
note = version_index[2]
@@ -996,7 +996,7 @@ def _run(self, i):
' '.join(notes))
# STEP 900 output: version* set
- # env['CM_VERSION*] set
+ # env['MLC_VERSION*] set
# STEP 1000: Update version only if in "versions" (not obligatory)
# can be useful when handling complex Git revisions
@@ -1043,7 +1043,7 @@ def _run(self, i):
if r['return'] > 0:
return r
- if str(env.get('CM_RUN_STATE_DOCKER', False)
+ if str(env.get('MLC_RUN_STATE_DOCKER', False)
).lower() in ['true', '1', 'yes']:
if state.get('docker'):
if str(state['docker'].get('run', True)
@@ -1075,11 +1075,11 @@ def _run(self, i):
recursion_spaces +
' - Doing fake run for script::{} as we are inside docker'.format(found_script_artifact))
fake_run = True
- env['CM_TMP_FAKE_RUN'] = 'yes'
+ env['MLC_TMP_FAKE_RUN'] = 'yes'
#######################################################################
# Check extra cache tags
- x = env.get('CM_EXTRA_CACHE_TAGS', '').strip()
+ x = env.get('MLC_EXTRA_CACHE_TAGS', '').strip()
extra_cache_tags = [] if x == '' else x.split(',')
if i.get('extra_cache_tags', '') != '':
@@ -1094,8 +1094,8 @@ def _run(self, i):
if x not in extra_cache_tags:
extra_cache_tags.append(x)
- if env.get('CM_NAME', '') != '':
- extra_cache_tags.append('name-' + env['CM_NAME'].strip().lower())
+ if env.get('MLC_NAME', '') != '':
+ extra_cache_tags.append('name-' + env['MLC_NAME'].strip().lower())
#######################################################################
# Check if need to clean output files
@@ -1445,7 +1445,7 @@ def _run(self, i):
found_cached = False
remove_tmp_tag = True
- env['CM_RENEW_CACHE_ENTRY'] = 'yes'
+ env['MLC_RENEW_CACHE_ENTRY'] = 'yes'
# Prepare files to be cleaned
clean_files = [self.tmp_file_run_state,
@@ -1501,7 +1501,7 @@ def _run(self, i):
recursion_spaces +
' - Version is not specified - use either default_version from meta or min/max/usable: {}'.format(version))
- r = _update_env(env, 'CM_VERSION', version)
+ r = _update_env(env, 'MLC_VERSION', version)
if r['return'] > 0:
return r
@@ -1531,7 +1531,7 @@ def _run(self, i):
self._merge_dicts_with_tags(
add_deps_recursive, versions_meta['add_deps_recursive'])
- r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_PATH', path)
+ r = _update_env(env, 'MLC_TMP_CURRENT_SCRIPT_PATH', path)
if r['return'] > 0:
return r
@@ -1730,9 +1730,9 @@ def _run(self, i):
# Assemble PIP versions
pip_version_string = ''
- pip_version = env.get('CM_VERSION', '')
- pip_version_min = env.get('CM_VERSION_MIN', '')
- pip_version_max = env.get('CM_VERSION_MAX', '')
+ pip_version = env.get('MLC_VERSION', '')
+ pip_version_min = env.get('MLC_VERSION_MIN', '')
+ pip_version_max = env.get('MLC_VERSION_MAX', '')
if pip_version != '':
pip_version_string = '==' + pip_version
@@ -1751,7 +1751,7 @@ def _run(self, i):
r = _update_env(
env,
- 'CM_TMP_PIP_VERSION_STRING',
+ 'MLC_TMP_PIP_VERSION_STRING',
pip_version_string)
if r['return'] > 0:
return r
@@ -1763,13 +1763,13 @@ def _run(self, i):
pip_version_string)
tmp_curdir = os.getcwd()
- if env.get('CM_OUTDIRNAME', '') != '':
- if os.path.isabs(env['CM_OUTDIRNAME']) or recursion:
- c_outdirname = env['CM_OUTDIRNAME']
+ if env.get('MLC_OUTDIRNAME', '') != '':
+ if os.path.isabs(env['MLC_OUTDIRNAME']) or recursion:
+ c_outdirname = env['MLC_OUTDIRNAME']
else:
c_outdirname = os.path.join(
- env['CM_TMP_CURRENT_PATH'], env['CM_OUTDIRNAME'])
- env['CM_OUTDIRNAME'] = c_outdirname
+ env['MLC_TMP_CURRENT_PATH'], env['MLC_OUTDIRNAME'])
+ env['MLC_OUTDIRNAME'] = c_outdirname
if not os.path.exists(c_outdirname):
os.makedirs(c_outdirname)
@@ -1916,13 +1916,13 @@ def _run(self, i):
if x not in cached_tags:
cached_tags.append(x)
- if env.get('CM_OUTDIRNAME', '') != '':
+ if env.get('MLC_OUTDIRNAME', '') != '':
os.chdir(tmp_curdir)
detected_version = env.get(
- 'CM_DETECTED_VERSION', env.get(
- 'CM_VERSION', ''))
- dependent_cached_path = env.get('CM_GET_DEPENDENT_CACHED_PATH', '')
+ 'MLC_DETECTED_VERSION', env.get(
+ 'MLC_VERSION', ''))
+ dependent_cached_path = env.get('MLC_GET_DEPENDENT_CACHED_PATH', '')
#######################################################################
# Finalize script
@@ -2251,7 +2251,7 @@ def _update_env_from_input(self, env, i):
for key in self.input_flags_converted_to_tmp_env:
value = i.get(key, '').strip()
if value != '':
- env['CM_TMP_' + key.upper()] = value
+ env['MLC_TMP_' + key.upper()] = value
for key in self.input_flags_converted_to_env:
value = i.get(
@@ -2264,7 +2264,7 @@ def _update_env_from_input(self, env, i):
key,
'')
if value:
- env[f"CM_{key.upper()}"] = value
+ env[f"MLC_{key.upper()}"] = value
r = update_env_with_values(env)
if r['return'] > 0:
@@ -2276,7 +2276,7 @@ def _update_env_from_input(self, env, i):
def _fix_cache_paths(self, env):
'''
cm_repos_path = os.environ.get(
- 'CM_REPOS', os.path.join(
+ 'MLC_REPOS', os.path.join(
os.path.expanduser("~"), "CM", "repos"))
current_cache_path = os.path.realpath(
os.path.join(cm_repos_path, "local", "cache"))
@@ -2324,7 +2324,7 @@ def _dump_version_info_for_script(
if not quiet and not silent:
pass
- for f in ['cm-run-script-versions.json', 'version_info.json']:
+ for f in ['mlc-run-script-versions.json', 'version_info.json']:
if not quiet and not silent:
logging.info('Dumping versions to {}'.format(f))
r = utils.save_json(f, self.run_state.get('version_info', []))
@@ -3626,7 +3626,7 @@ def _run_deps(self, deps, clean_env_keys_deps, env, state, const, const_state, a
if d.get("reuse_version", False):
for k in tmp_env:
- if k.startswith('CM_VERSION'):
+ if k.startswith('MLC_VERSION'):
env[k] = tmp_env[k]
update_tags_from_env = d.get("update_tags_from_env", [])
@@ -4097,9 +4097,9 @@ def find_file_in_paths(self, i):
run_script_input = i['run_script_input']
env_path_key = i['env_path_key']
- version = env.get('CM_VERSION', '')
- version_min = env.get('CM_VERSION_MIN', '')
- version_max = env.get('CM_VERSION_MAX', '')
+ version = env.get('MLC_VERSION', '')
+ version_min = env.get('MLC_VERSION_MIN', '')
+ version_max = env.get('MLC_VERSION_MAX', '')
x = ''
@@ -4230,9 +4230,9 @@ def detect_version_using_script(self, i):
run_script_input = i['run_script_input']
- version = env.get('CM_VERSION', '')
- version_min = env.get('CM_VERSION_MIN', '')
- version_max = env.get('CM_VERSION_MAX', '')
+ version = env.get('MLC_VERSION', '')
+ version_min = env.get('MLC_VERSION_MIN', '')
+ version_max = env.get('MLC_VERSION_MAX', '')
x = ''
@@ -4339,10 +4339,10 @@ def find_artifact(self, i):
# Check if forced to search in a specific path or multiple paths
# separated by OS var separator (usually : or ;)
- path = env.get('CM_TMP_PATH', '')
+ path = env.get('MLC_TMP_PATH', '')
if path != '' and env.get(
- 'CM_TMP_PATH_IGNORE_NON_EXISTANT', '') != 'yes':
+ 'MLC_TMP_PATH_IGNORE_NON_EXISTANT', '') != 'yes':
# Can be a list of paths
path_list_tmp = path.split(os_info['env_separator'])
for path_tmp in path_list_tmp:
@@ -4350,9 +4350,9 @@ def find_artifact(self, i):
return {'return': 1,
'error': 'path {} doesn\'t exist'.format(path_tmp)}
- # Check if forced path and file name from --input (CM_INPUT - local env
+ # Check if forced path and file name from --input (MLC_INPUT - local env
# - will not be visible for higher-level script)
- forced_file = env.get('CM_INPUT', '').strip()
+ forced_file = env.get('MLC_INPUT', '').strip()
if forced_file != '':
if not os.path.isfile(forced_file):
return {'return': 1,
@@ -4391,7 +4391,7 @@ def find_artifact(self, i):
path_list.append(os.path.dirname(path_tmp))
# Check if quiet
- select_default = True if env.get('CM_QUIET', '') == 'yes' else False
+ select_default = True if env.get('MLC_QUIET', '') == 'yes' else False
# Prepare paths to search
r = self.find_file_in_paths({'paths': path_list,
@@ -4602,7 +4602,7 @@ def parse_version(self, i):
which_env[env_key] = version
# to be recorded in the cache meta
- which_env['CM_DETECTED_VERSION'] = version
+ which_env['MLC_DETECTED_VERSION'] = version
return {'return': 0, 'version': version, 'string': string}
@@ -4754,7 +4754,7 @@ def clean_some_tmp_files(self, i):
env = i.get('env', {})
- cur_work_dir = env.get('CM_TMP_CURRENT_SCRIPT_WORK_PATH', '')
+ cur_work_dir = env.get('MLC_TMP_CURRENT_SCRIPT_WORK_PATH', '')
if cur_work_dir != '' and os.path.isdir(cur_work_dir):
for x in ['tmp-run.bat', 'tmp-state.json']:
xx = os.path.join(cur_work_dir, x)
@@ -5147,12 +5147,12 @@ def update_env_with_values(env, fail_on_not_found=False, extra_env=None):
# No placeholders found
if not placeholders:
- # Special handling for CM_GIT_URL
- if key == 'CM_GIT_URL' and env.get('CM_GIT_AUTH', "no") == "yes":
- if env.get('CM_GH_TOKEN', '') and '@' not in env['CM_GIT_URL']:
- params = {"token": env['CM_GH_TOKEN']}
+ # Special handling for MLC_GIT_URL
+ if key == 'MLC_GIT_URL' and env.get('MLC_GIT_AUTH', "no") == "yes":
+ if env.get('MLC_GH_TOKEN', '') and '@' not in env['MLC_GIT_URL']:
+ params = {"token": env['MLC_GH_TOKEN']}
value = get_git_url("token", value, params)
- elif 'CM_GIT_SSH' in env:
+ elif 'MLC_GIT_SSH' in env:
value = get_git_url("ssh", value)
env[key] = value
continue
@@ -5313,11 +5313,11 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"):
cur_dir = os.getcwd()
- r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_PATH', path)
+ r = _update_env(env, 'MLC_TMP_CURRENT_SCRIPT_PATH', path)
if r['return'] > 0:
return r
- r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_WORK_PATH', cur_dir)
+ r = _update_env(env, 'MLC_TMP_CURRENT_SCRIPT_WORK_PATH', cur_dir)
if r['return'] > 0:
return r
@@ -5586,9 +5586,9 @@ def get_script_name(env, path, script_name='run'):
from os.path import exists
- tmp_suff1 = env.get('CM_HOST_OS_FLAVOR', '')
- tmp_suff2 = env.get('CM_HOST_OS_VERSION', '')
- tmp_suff3 = env.get('CM_HOST_PLATFORM_FLAVOR', '')
+ tmp_suff1 = env.get('MLC_HOST_OS_FLAVOR', '')
+ tmp_suff2 = env.get('MLC_HOST_OS_VERSION', '')
+ tmp_suff3 = env.get('MLC_HOST_PLATFORM_FLAVOR', '')
if exists(os.path.join(path, script_name + '-' + tmp_suff1 +
'-' + tmp_suff2 + '-' + tmp_suff3 + '.sh')):
@@ -5869,7 +5869,7 @@ def is_dep_tobe_skipped(d, env):
Internal: check if this dependency is to be skipped
"""
if d.get('skip_if_fake_run', False) and env.get(
- 'CM_TMP_FAKE_RUN', '') == 'yes':
+ 'MLC_TMP_FAKE_RUN', '') == 'yes':
return True
if "enable_if_env" in d:
@@ -6311,7 +6311,7 @@ def dump_repro_start(repro_prefix, ii):
import json
# Clean reproducibility and experiment files
- for f in ['cm-output.json', 'version_info.json', '-input.json',
+ for f in ['mlc-output.json', 'version_info.json', '-input.json',
'-info.json', '-output.json', '-run-state.json']:
ff = repro_prefix + f if f.startswith('-') else f
if os.path.isfile(ff):
@@ -6363,7 +6363,7 @@ def dump_repro_start(repro_prefix, ii):
cm_output['input'] = ii
try:
- with open('cm-output.json', 'w', encoding='utf-8') as f:
+ with open('mlc-output.json', 'w', encoding='utf-8') as f:
json.dump(cm_output, f, ensure_ascii=False, indent=2)
except BaseException:
pass
@@ -6394,7 +6394,7 @@ def dump_repro(repro_prefix, rr, run_state):
# Attempt to read
try:
- r = utils.load_json('cm-output.json')
+ r = utils.load_json('mlc-output.json')
if r['return'] == 0:
cm_output = r['meta']
except BaseException:
@@ -6434,7 +6434,7 @@ def dump_repro(repro_prefix, rr, run_state):
cm_output['acm_ctuning_repro_badge_functional'] = True
try:
- with open('cm-output.json', 'w', encoding='utf-8') as f:
+ with open('mlc-output.json', 'w', encoding='utf-8') as f:
json.dump(
cm_output,
f,
diff --git a/automation/script/module_misc.py b/automation/script/module_misc.py
deleted file mode 100644
index 336073969..000000000
--- a/automation/script/module_misc.py
+++ /dev/null
@@ -1,2522 +0,0 @@
-import os
-from cmind import utils
-
-# Meta deps
-
-
-def process_deps(self_module, meta, meta_url, md_script_readme,
- key, extra_space='', skip_from_meta=False, skip_if_empty=False):
-
- x = ''
- y = []
- if len(meta.get(key, {})) > 0:
- x = '***'
-
- for d in meta[key]:
- d_tags = d.get('tags', '')
-
- z = extra_space + ' * ' + d_tags
- y.append(z)
-
- names = d.get('names', [])
-
- for kk in [
- ('enable_if_env', 'Enable this dependency only if all ENV vars are set'),
- ('enable_if_any_env',
- 'Enable this dependency only if any of ENV vars are set'),
- ('skip_if_env',
- 'Skip this dependenecy only if all ENV vars are set'),
- ('skip_if_any_env',
- 'Skip this dependenecy only if any of ENV vars are set')
- ]:
-
- k1 = kk[0]
- k2 = kk[1]
-
- conditions = d.get(k1, {})
- if len(conditions) > 0:
- y.append(extra_space +
- ' * {}:
\n`{}`'.format(k2, str(conditions)))
-
- if len(names) > 0:
- y.append(
- extra_space +
- ' * CM names: `--adr.' +
- str(names) +
- '...`')
-
- # Attempt to find related CM scripts
- r = self_module.cmind.access({'action': 'find',
- 'automation': 'script',
- 'tags': d_tags})
- if r['return'] == 0:
- lst = r['list']
-
- if len(lst) == 0:
- y.append(extra_space +
- ' - *Warning: no scripts found*')
- else:
- for s in lst:
- s_repo_meta = s.repo_meta
-
- s_repo_alias = s_repo_meta.get('alias', '')
- s_repo_uid = s_repo_meta.get('uid', '')
-
- # Check URL
- s_url = ''
- s_url_repo = ''
- if s_repo_alias == 'internal':
- s_url_repo = 'https://github.com/mlcommons/ck/tree/master/cm/cmind/repo'
- s_url = s_url_repo + '/script/'
- elif '@' in s_repo_alias:
- s_url_repo = 'https://github.com/' + \
- s_repo_alias.replace('@', '/') + '/tree/master'
- if s_repo_meta.get('prefix', '') != '':
- s_url_repo += '/' + s_repo_meta['prefix']
- s_url = s_url_repo + '/script/'
-
- s_alias = s.meta['alias']
- y.append(
- extra_space + ' - CM script: [{}]({})'.format(s_alias, s_url + s_alias))
-
- z = ''
- if not skip_from_meta:
- z = ' from [meta]({})'.format(meta_url)
-
- if not skip_if_empty or len(y) > 0:
- md_script_readme.append(
- (extra_space +
- ' 1. ' +
- x +
- 'Read "{}" on other CM scripts' +
- z +
- x).format(key))
- md_script_readme += y
-
-############################################################
-
-
-def doc(i):
- """
- Add CM automation.
-
- Args:
- (CM input dict):
-
- (out) (str): if 'con', output to console
-
- parsed_artifact (list): prepared in CM CLI or CM access function
- [ (artifact alias, artifact UID) ] or
- [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
-
- (repos) (str): list of repositories to search for automations
-
- (output_dir) (str): output directory (../docs by default)
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- """
-
- self_module = i['self_module']
-
- cur_dir = os.getcwd()
-
- template_file = 'template_list_of_scripts.md'
- list_file = 'list_of_scripts.md'
-
- public_taskforce = '[Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)'
-
- console = i.get('out') == 'con'
-
- repos = i.get('repos', '')
- if repos == '':
- repos = 'internal,a4705959af8e447a'
-
- parsed_artifact = i.get('parsed_artifact', [])
-
- if len(parsed_artifact) < 1:
- parsed_artifact = [('', ''), ('', '')]
- elif len(parsed_artifact) < 2:
- parsed_artifact.append(('', ''))
- else:
- repos = parsed_artifact[1][0]
-
- list_of_repos = repos.split(',') if ',' in repos else [repos]
-
- ii = utils.sub_input(i, self_module.cmind.cfg['artifact_keys'] + ['tags'])
-
- ii['out'] = None
-
- # Search for automations in repos
- lst = []
-
- for repo in list_of_repos:
- parsed_artifact[1] = (
- '', repo) if utils.is_cm_uid(repo) else (
- repo, '')
- ii['parsed_artifact'] = parsed_artifact
- r = self_module.search(ii)
- if r['return'] > 0:
- return r
- lst += r['list']
-
- md = []
-
- toc = []
-
- toc_category = {}
- toc_category_sort = {}
- script_meta = {}
- urls = {}
-
- for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')):
-
- toc_readme = []
-
- # Common index for all scripts
- md_script = []
-
- path = artifact.path
- meta = artifact.meta
- original_meta = artifact.original_meta
-
- print('Documenting {}'.format(path))
-
- alias = meta.get('alias', '')
- uid = meta.get('uid', '')
-
- script_meta[alias] = meta
-
- name = meta.get('name', '')
- developers = meta.get('developers', '')
-
- # Check if has tags help otherwise all tags
- tags = meta.get('tags_help', '').strip()
- if tags == '':
- tags = meta.get('tags', [])
- else:
- tags = tags.split(' ')
-
- variations = meta.get('variations', {})
-
- variation_keys = sorted(list(variations.keys()))
- version_keys = sorted(list(meta.get('versions', {}).keys()))
-
- default_variation = meta.get('default_variation', '')
- default_version = meta.get('default_version', '')
-
- input_mapping = meta.get('input_mapping', {})
- input_description = meta.get('input_description', {})
-
- category = meta.get('category', '').strip()
- category_sort = meta.get('category_sort', 0)
- if category != '':
- if category not in toc_category:
- toc_category[category] = []
-
- if category not in toc_category_sort or category_sort > 0:
- toc_category_sort[category] = category_sort
-
- if alias not in toc_category[category]:
- toc_category[category].append(alias)
-
- repo_path = artifact.repo_path
- repo_meta = artifact.repo_meta
-
- repo_alias = repo_meta.get('alias', '')
- repo_uid = repo_meta.get('uid', '')
-
- # Check URL
- url = ''
- url_repo = ''
- if repo_alias == 'internal':
- url_repo = 'https://github.com/mlcommons/ck/tree/dev/cm/cmind/repo'
- url = url_repo + '/script/'
- elif '@' in repo_alias:
- url_repo = 'https://github.com/' + \
- repo_alias.replace('@', '/') + '/tree/dev'
- if repo_meta.get('prefix', '') != '':
- url_repo += '/' + repo_meta['prefix']
- url = url_repo + '/script/'
-
- if url != '':
- url += alias
-
- urls[alias] = url
-
- # Check if there is about doc
- path_readme = os.path.join(path, 'README.md')
- path_readme_extra = os.path.join(path, 'README-extra.md')
- path_readme_about = os.path.join(path, 'README-about.md')
-
- readme_about = ''
- if os.path.isfile(path_readme_about):
- r = utils.load_txt(path_readme_about, split=True)
- if r['return'] > 0:
- return
-
- s = r['string']
- readme_about = r['list']
-
- #######################################################################
- # Start automatically generated README
- md_script_readme = [
- # '',
- # 'Click here to see the table of contents.
',
- # '{{CM_README_TOC}}',
- # ' ',
- # '',
- 'Automatically generated README for this automation recipe: **{}**'.format(
- meta['alias']),
- ]
-
- md_script.append('## ' + alias)
- md_script.append('')
-
-# x = 'About'
-# md_script_readme.append('___')
-# md_script_readme.append('### '+x)
-# md_script_readme.append('')
-# toc_readme.append(x)
-
-# x = 'About'
-# md_script_readme.append('#### '+x)
-# md_script_readme.append('')
-# toc_readme.append(' '+x)
-
- if name != '':
- name += '.'
- md_script.append('*' + name + '*')
- md_script.append('')
-
-# md_script_readme.append('*'+name+'*')
-# md_script_readme.append('')
-
- if os.path.isfile(path_readme):
- r = utils.load_txt(path_readme, split=True)
- if r['return'] > 0:
- return
-
- s = r['string']
- readme = r['list']
-
- if not 'automatically generated' in s.lower():
- found_path_readme_extra = True
-
- # Attempt to rename to README-extra.md
- if os.path.isfile(path_readme_extra):
- return {
- 'return': 1, 'error': 'README.md is not auto-generated and README-extra.md already exists - can\'t rename'}
-
- os.rename(path_readme, path_readme_extra)
-
- # Add to Git (if in git)
- os.chdir(path)
- os.system('git add README-extra.md')
- os.chdir(cur_dir)
-
- if category != '':
- md_script_readme.append('')
- md_script_readme.append('Category: **{}**'.format(category))
-
- md_script_readme.append('')
- md_script_readme.append('License: **Apache 2.0**')
-
- md_script_readme.append('')
-
- if developers == '':
- md_script_readme.append('Maintainers: ' + public_taskforce)
- else:
- md_script_readme.append('Developers: ' + developers)
-
- x = '* [{}]({})'.format(alias, url)
- if name != '':
- x += ' *(' + name + ')*'
- toc.append(x)
-
- cm_readme_extra = '[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name={},{}) ] '.format(
- alias, uid)
-
- if os.path.isfile(path_readme_extra):
- readme_extra_url = url + '/README-extra.md'
-
- x = '* Notes from the authors, contributors and users: [*GitHub*]({})'.format(
- readme_extra_url)
- md_script.append(x)
-
- cm_readme_extra += '[ [Notes from the authors, contributors and users](README-extra.md) ] '
-
- md_script_readme.append('')
- md_script_readme.append('---')
- md_script_readme.append('*' + cm_readme_extra.strip() + '*')
-
- if readme_about != '':
- md_script_readme += ['', '---', ''] + readme_about
-
- x = 'Summary'
- md_script_readme.append('')
- md_script_readme.append('---')
- md_script_readme += [
- # '',
- # 'Click to see the summary
',
- '#### Summary',
- ''
- ]
- toc_readme.append(x)
-
-
-# if category != '':
-# x = 'Category'
-# md_script_readme.append('___')
-# md_script_readme.append('#### '+x)
-# md_script_readme.append(' ')
-# md_script_readme.append(category+'.')
-# toc_readme.append(x)
-
-# x = '* Category: *{}*'.format(category + '.')
-# md_script_readme.append(x)
-
-
-# x = 'Origin'
-# md_script_readme.append('___')
-# md_script_readme.append('#### '+x)
-# md_script_readme.append('')
-# toc_readme.append(x)
-
- x = '* CM GitHub repository: *[{}]({})*'.format(repo_alias, url_repo)
- md_script.append(x)
- md_script_readme.append(x)
-
- x = '* GitHub directory for this script: *[GitHub]({})*'.format(url)
- md_script.append(x)
- md_script_readme.append(x)
-
- # Check meta
- meta_file = self_module.cmind.cfg['file_cmeta']
- meta_path = os.path.join(path, meta_file)
-
- meta_file += '.yaml' if os.path.isfile(
- meta_path + '.yaml') else '.json'
-
- meta_url = url + '/' + meta_file
-
- x = '* CM meta description of this script: *[GitHub]({})*'.format(
- meta_url)
- md_script.append(x)
-
-# x = '* CM automation "script": *[Docs]({})*'.format('https://github.com/octoml/ck/blob/master/docs/list_of_automations.md#script')
-# md_script.append(x)
-# md_script_readme.append(x)
-
- if len(variation_keys) > 0:
- variation_pointer = "[,variations]"
- variation_pointer2 = "[variations]"
- else:
- variation_pointer = ''
- variation_pointer2 = ''
-
- if len(input_mapping) > 0:
- input_mapping_pointer = "[--input_flags]"
- else:
- input_mapping_pointer = ''
-
- cli_all_tags = '`cm run script --tags={}`'.format(','.join(tags))
- cli_all_tags3 = '`cm run script --tags={}{} {}`'.format(
- ','.join(tags), variation_pointer, input_mapping_pointer)
- x = '* CM CLI with all tags: {}*'.format(cli_all_tags)
- md_script.append(x)
-
- cli_help_tags_alternative = '`cmr "{}" --help`'.format(' '.join(tags))
-
- cli_all_tags_alternative = '`cmr "{}"`'.format(' '.join(tags))
- cli_all_tags_alternative3 = '`cmr "{} {}" {}`'.format(
- ' '.join(tags), variation_pointer2, input_mapping_pointer)
- cli_all_tags_alternative_j = '`cmr "{} {}" {} -j`'.format(
- ' '.join(tags), variation_pointer, input_mapping_pointer)
- x = '* CM CLI alternative: {}*'.format(cli_all_tags_alternative)
- md_script.append(x)
-
- cli_all_tags_alternative_docker = '`cm docker script "{}{}" {}`'.format(
- ' '.join(tags), variation_pointer2, input_mapping_pointer)
-
-
-# cli_uid = '`cm run script {} {}`'.format(meta['uid'], input_mapping_pointer)
-# x = '* CM CLI with alias and UID: {}*'.format(cli_uid)
-# md_script.append(x)
-
- if len(variation_keys) > 0:
- x = ''
- for variation in variation_keys:
- if x != '':
- x += '; '
- x += '_' + variation
- md_script.append('* Variations: *{}*'.format(x))
-
- if default_variation != '':
- md_script.append(
- '* Default variation: *{}*'.format(default_variation))
-
- if len(version_keys) > 0:
- md_script.append(
- '* Versions: *{}*'.format('; '.join(version_keys)))
-
- if default_version != '':
- md_script.append('* Default version: *{}*'.format(default_version))
-
- md_script.append('')
-# md_script_readme.append('')
-
- # Add extra to README
- x = 'Meta description'
-# md_script_readme.append('___')
-# md_script_readme.append('### '+x)
- md_script_readme.append(
- '* CM meta description for this script: *[{}]({})*'.format(meta_file, meta_file))
-# md_script_readme.append('')
-# toc_readme.append(x)
-
- x = 'Tags'
-# md_script_readme.append('___')
-# md_script_readme.append('### '+x)
- md_script_readme.append(
- '* All CM tags to find and reuse this script (see in above meta description): *{}*'.format(','.join(tags)))
-# md_script_readme.append('')
-# toc_readme.append(x)
-
- cache = meta.get('cache', False)
- md_script_readme.append('* Output cached? *{}*'.format(str(cache)))
-
- md_script_readme.append(
- '* See [pipeline of dependencies]({}) on other CM scripts'.format('#dependencies-on-other-cm-scripts'))
-
- md_script_readme += ['',
- # ' '
- ]
-
- # Add usage
- x1 = 'Reuse this script in your project'
- x1a = 'Install MLCommons CM automation meta-framework'
- x1aa = 'Pull CM repository with this automation recipe (CM script)'
- x1b = 'Print CM help from the command line'
- x2 = 'Customize and run this script from the command line with different variations and flags'
- x3 = 'Run this script from Python'
- x3a = 'Run this script via GUI'
- x4 = 'Run this script via Docker (beta)'
- md_script_readme += [
- '',
- '---',
- '### ' + x1,
- '',
- '#### ' + x1a,
- '',
- '* [Install CM](https://access.cknowledge.org/playground/?action=install)',
- '* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md)',
- '',
- '#### ' + x1aa,
- '',
- '```cm pull repo {}```'.format(repo_alias),
- '',
- '#### ' + x1b,
- '',
- '```{}```'.format(cli_help_tags_alternative),
- '',
- '#### ' + x2,
- '',
- '{}'.format(cli_all_tags),
- '',
- '{}'.format(cli_all_tags3),
- '',
- '*or*',
- '',
- '{}'.format(cli_all_tags_alternative),
- '',
- '{}'.format(cli_all_tags_alternative3),
- '',
- # '3. {}'.format(cli_uid),
- '']
-
- x = ' and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.'
- if len(variation_keys) > 0:
- md_script_readme += ['* *See the list of `variations` [here](#variations)' + x + '*',
- ''
- ]
-
- if input_description and len(input_description) > 0:
- x = 'Input Flags'
- md_script_readme.append('')
- md_script_readme.append('#### ' + x)
- toc_readme.append(' ' + x)
-
- md_script_readme.append('')
- key0 = ''
- for key in input_description:
- if key0 == '':
- key0 = key
-
- value = input_description[key]
- desc = value
-
- if isinstance(value, dict):
- desc = value['desc']
-
- choices = value.get('choices', [])
- if len(choices) > 0:
- desc += ' {' + ','.join(choices) + '}'
-
- default = value.get('default', '')
- if default != '':
- desc += ' (*' + str(default) + '*)'
-
- md_script_readme.append('* --**{}**={}'.format(key, desc))
-
- md_script_readme.append('')
- md_script_readme.append(
- '**Above CLI flags can be used in the Python CM API as follows:**')
- md_script_readme.append('')
-
- x = '```python\nr=cm.access({... , "' + key0 + '":...}\n```'
- md_script_readme.append(x)
-
- md_script_readme += ['#### ' + x3,
- '',
- '',
- 'Click here to expand this section.
',
- '',
- '```python',
- '',
- 'import cmind',
- '',
- "r = cmind.access({'action':'run'",
- " 'automation':'script',",
- " 'tags':'{}'".format(
- ','.join(tags)),
- " 'out':'con',",
- " ...",
- " (other input keys for this script)",
- " ...",
- " })",
- "",
- "if r['return']>0:",
- " print (r['error'])",
- '',
- '```',
- '',
- ' ',
- '',
-
- '',
- '#### ' + x3a,
- '',
- '```cmr "cm gui" --script="' +
- ','.join(tags) + '"```',
- '',
- # 'Use this [online GUI](https://cKnowledge.org/cm-gui/?tags={}) to generate CM CMD.'.format(','.join(tags)),
- # '',
- '#### ' + x4,
- '',
- '{}'.format(cli_all_tags_alternative_docker),
- ''
- ]
- toc_readme.append(x1)
- toc_readme.append(' ' + x1a)
- toc_readme.append(' ' + x1b)
- toc_readme.append(' ' + x2)
- toc_readme.append(' ' + x3)
- toc_readme.append(' ' + x3a)
- toc_readme.append(' ' + x4)
-
- x = 'Customization'
- md_script_readme.append('___')
- md_script_readme.append('### ' + x)
- md_script_readme.append('')
- toc_readme.append(x)
-
- if len(variation_keys) > 0:
- # x = 'Variation groups'
- # md_script_readme.append('___')
- # md_script_readme.append('### '+x)
- # toc_readme.append(x)
-
- variation_groups = {}
- default_variations = []
- variation_md = {}
- variation_alias = {}
-
- # Normally should not use anymore. Should use default:true inside
- # individual variations.
- default_variation = meta.get('default_variation', '')
-
- for variation_key in sorted(variation_keys):
- variation = variations[variation_key]
-
- alias = variation.get('alias', '').strip()
-
- if alias != '':
- aliases = variation_alias.get(alias, [])
- if variation_key not in aliases:
- aliases.append(variation_key)
- variation_alias[alias] = aliases
-
- # Do not continue this loop if alias
- continue
-
- default = variation.get('default', False)
-
- if not default:
- # Check outdated
- if default_variation == variation_key:
- default = True
-
- extra1 = ''
- extra2 = ''
- if default:
- extra1 = '**'
- extra2 = '** (default)'
-
- default_variations.append(variation_key)
-
- md_var = []
-
- md_var.append(
- '* {}`_{}`{}'.format(extra1, variation_key, extra2))
-
- variation_md[variation_key] = md_var
-
-# md_script_readme+=md_var
-
- group = variation.get('group', '')
-
- if variation_key.endswith('_'):
- group = '*Internal group (variations should not be selected manually)*'
- elif group == '':
- group = '*No group (any variation can be selected)*'
-
- if group not in variation_groups:
- variation_groups[group] = []
-
- variation_groups[group].append(variation_key)
-
- x = 'Variations'
- md_script_readme.append('')
- md_script_readme.append('#### ' + x)
- toc_readme.append(' ' + x)
-
- variation_groups_order = meta.get('variation_groups_order', [])
- for variation in sorted(variation_groups):
- if variation not in variation_groups_order:
- variation_groups_order.append(variation)
-
- for group_key in variation_groups_order:
- md_script_readme.append('')
-
- if not group_key.startswith('*'):
- md_script_readme.append(
- ' * Group "**{}**"'.format(group_key))
- else:
- md_script_readme.append(' * {}'.format(group_key))
-
- md_script_readme += [
- ' ',
- ' Click here to expand this section.
',
- ''
- ]
-
- for variation_key in sorted(variation_groups[group_key]):
- variation = variations[variation_key]
-
- xmd = variation_md[variation_key]
-
- aliases = variation_alias.get(variation_key, [])
- aliases2 = ['_' + v for v in aliases]
-
- if len(aliases) > 0:
- xmd.append(
- ' - Aliases: `{}`'.format(','.join(aliases2)))
-
- if len(variation.get('env', {})) > 0:
- xmd.append(' - Environment variables:')
- for key in variation['env']:
- xmd.append(
- ' - *{}*: `{}`'.format(key, variation['env'][key]))
-
- xmd.append(' - Workflow:')
-
- for dep in ['deps', 'prehook_deps',
- 'posthook_deps', 'post_deps']:
- process_deps(
- self_module,
- variation,
- meta_url,
- xmd,
- dep,
- ' ',
- True,
- True)
-
- for x in xmd:
- md_script_readme.append(' ' + x)
-
- md_script_readme.append('')
- md_script_readme.append(' ')
- md_script_readme.append('')
-
- # Check if has invalid_variation_combinations
- vvc = meta.get('invalid_variation_combinations', [])
- if len(vvc) > 0:
- x = 'Unsupported or invalid variation combinations'
- md_script_readme.append('')
- md_script_readme.append('#### ' + x)
- md_script_readme.append('')
- md_script_readme.append('')
- md_script_readme.append('')
- toc_readme.append(' ' + x)
-
- for v in vvc:
- vv = ['_' + x for x in v]
- md_script_readme.append('* `' + ','.join(vv) + '`')
-
- if len(default_variations) > 0:
- md_script_readme.append('')
- md_script_readme.append('#### Default variations')
- md_script_readme.append('')
-
- dv = ['_' + x for x in sorted(default_variations)]
-
- md_script_readme.append('`{}`'.format(','.join(dv)))
-
- # Check if has valid_variation_combinations
- vvc = meta.get('valid_variation_combinations', [])
- if len(vvc) > 0:
- x = 'Valid variation combinations checked by the community'
- md_script_readme.append('')
- md_script_readme.append('#### ' + x)
- md_script_readme.append('')
- md_script_readme.append('')
- md_script_readme.append('')
- toc_readme.append(' ' + x)
-
- for v in vvc:
- vv = ['_' + x for x in v]
- md_script_readme.append('* `' + ','.join(vv) + '`')
-
- # Check input flags
- if input_mapping and len(input_mapping) > 0:
- x = 'Script flags mapped to environment'
- md_script_readme.append('')
- md_script_readme.append('#### ' + x)
- toc_readme.append(' ' + x)
-
- md_script_readme.append('')
- md_script_readme.append(
- 'Click here to expand this section.
')
-
- md_script_readme.append('')
- key0 = ''
- for key in sorted(input_mapping):
- if key0 == '':
- key0 = key
- value = input_mapping[key]
- md_script_readme.append(
- '* `--{}=value` → `{}=value`'.format(key, value))
-
- md_script_readme.append('')
- md_script_readme.append(
- '**Above CLI flags can be used in the Python CM API as follows:**')
- md_script_readme.append('')
-
- x = '```python\nr=cm.access({... , "' + key0 + '":...}\n```'
- md_script_readme.append(x)
-
- md_script_readme.append('')
- md_script_readme.append(' ')
- md_script_readme.append('')
-
- # Default environment
- default_env = meta.get('default_env', {})
-
- x = 'Default environment'
-# md_script_readme.append('___')
- md_script_readme.append('#### ' + x)
- toc_readme.append(' ' + x)
-
- md_script_readme.append('')
- md_script_readme.append('')
- md_script_readme.append(
- 'Click here to expand this section.
')
- md_script_readme.append('')
- md_script_readme.append(
- 'These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.')
- md_script_readme.append('')
-
- for key in default_env:
- value = default_env[key]
- md_script_readme.append('* {}: `{}`'.format(key, value))
-
- md_script_readme.append('')
- md_script_readme.append(' ')
- md_script_readme.append('')
-
- if len(version_keys) > 0 or default_version != '':
- x = 'Versions'
-# md_script_readme.append('___')
- md_script_readme.append('#### ' + x)
- toc_readme.append(x)
-
- if default_version != '':
- md_script_readme.append(
- 'Default version: `{}`'.format(default_version))
- md_script_readme.append('')
-
- if len(version_keys) > 0:
- for version in version_keys:
- md_script_readme.append('* `{}`'.format(version))
-
- # Add workflow
- x = 'Dependencies on other CM scripts'
- md_script_readme += ['___',
- '### ' + x,
- '']
- toc_readme.append(x)
-
-# md_script_readme.append('')
-# md_script_readme.append('Click here to expand this section.
')
-
- md_script_readme.append('')
-
- # Check customize.py file
- path_customize = os.path.join(path, 'customize.py')
- found_customize = False
- found_customize_preprocess = False
- found_customize_postprocess = False
- found_output_env = []
-
- if os.path.isfile(path_customize):
- found_customize = True
-
- r = utils.load_txt(path_customize, split=True)
- if r['return'] > 0:
- return r
-
- customize = r['string']
- customize_l = r['list']
-
- if 'def preprocess(' in customize:
- found_customize_preprocess = True
-
- if 'def postprocess(' in customize:
- found_customize_postprocess = True
-
- # Ugly attempt to get output env
- found_postprocess = False
- for l in customize_l:
- # if not found_postprocess:
- # if 'def postprocess' in l:
- # found_postprocess = True
- # else:
- j = l.find(' env[')
- if j >= 0:
- j1 = l.find(']', j + 4)
- if j1 >= 0:
- j2 = l.find('=', j1 + 1)
- if j2 >= 0:
- key2 = l[j + 5:j1].strip()
- key = key2[1:-1]
-
- if key.startswith(
- 'CM_') and 'TMP' not in key and key not in found_output_env:
- found_output_env.append(key)
-
- process_deps(self_module, meta, meta_url, md_script_readme, 'deps')
-
- x = ''
- y = 'customize.py'
- if found_customize_preprocess:
- x = '***'
- y = '[' + y + '](' + url + '/' + y + ')'
- md_script_readme.append(
- (' 1. ' + x + 'Run "preprocess" function from {}' + x).format(y))
-
- process_deps(
- self_module,
- meta,
- meta_url,
- md_script_readme,
- 'prehook_deps')
-
- # Check scripts
- files = os.listdir(path)
- x = ''
- y = []
- for f in sorted(files):
- x = '***'
- if f.startswith('run') and (
- f.endswith('.sh') or f.endswith('.bat')):
- f_url = url + '/' + f
- y.append(' * [{}]({})'.format(f, f_url))
-
- md_script_readme.append(
- (' 1. ' + x + 'Run native script if exists' + x).format(y))
- md_script_readme += y
-
- process_deps(
- self_module,
- meta,
- meta_url,
- md_script_readme,
- 'posthook_deps')
-
- x = ''
- y = 'customize.py'
- if found_customize_postprocess:
- x = '***'
- y = '[' + y + '](' + url + '/' + y + ')'
- md_script_readme.append(
- (' 1. ' + x + 'Run "postrocess" function from {}' + x).format(y))
-
- process_deps(
- self_module,
- meta,
- meta_url,
- md_script_readme,
- 'post_deps')
- # md_script_readme.append(' ')
- md_script_readme.append('')
-
- # New environment
- new_env_keys = meta.get('new_env_keys', [])
-
- x = 'Script output'
- md_script_readme.append('___')
- md_script_readme.append('### ' + x)
- toc_readme.append(x)
-
- md_script_readme.append(cli_all_tags_alternative_j)
-
- x = 'New environment keys (filter)'
- md_script_readme.append('#### ' + x)
- toc_readme.append(x)
-
- md_script_readme.append('')
- for key in sorted(new_env_keys):
- md_script_readme.append('* `{}`'.format(key))
-
- # Pass found_output_env through above filter
- found_output_env_filtered = []
-
- import fnmatch
-
- for key in found_output_env:
- add = False
-
- for f in new_env_keys:
- if fnmatch.fnmatch(key, f):
- add = True
- break
-
- if add:
- found_output_env_filtered.append(key)
-
- x = 'New environment keys auto-detected from customize'
- md_script_readme.append('#### ' + x)
- toc_readme.append(x)
-
- md_script_readme.append('')
- for key in sorted(found_output_env_filtered):
- md_script_readme.append('* `{}`'.format(key))
-
- # Add maintainers
-# x = 'Maintainers'
-# md_script_readme.append('___')
-# md_script_readme.append('### '+x)
-# md_script_readme.append('')
-# md_script_readme.append('* ' + public_taskforce)
-# toc_readme.append(x)
-
- # Process TOC
- toc_readme_string = '\n'
- for x in toc_readme:
- x2 = x
- prefix = ''
-
- if x.startswith(' '):
- prefix = ' '
- x2 = x[1:]
-
- x2 = x2.lower().replace(' ', '-').replace(',', '')
- toc_readme_string += prefix + '* [{}](#{})\n'.format(x, x2)
-
- # Add to the total list
- md += md_script
-
- s = '\n'.join(md_script_readme)
-
- s = s.replace('{{CM_README_EXTRA}}', cm_readme_extra)
-# s = s.replace('{{CM_SEE_README_EXTRA}}', cm_see_readme_extra)
- s = s.replace('{{CM_README_TOC}}', toc_readme_string)
-
- r = utils.save_txt(path_readme, s)
- if r['return'] > 0:
- return r
-
- # Add to Git (if in git)
- os.chdir(path)
- os.system('git add README.md')
- os.chdir(cur_dir)
-
- # Recreate TOC with categories
- toc2 = []
-
- # , key = lambda x: -toc_category_sort[x]):
- for category in sorted(toc_category):
- toc2.append('### ' + category)
- toc2.append('')
-
- for script in sorted(toc_category[category]):
-
- meta = script_meta[script]
-
- name = meta.get('name', '')
-
- url = urls[script]
-
- x = '* [{}]({})'.format(script, url)
- if name != '':
- x += ' *(' + name + ')*'
-
- toc2.append(x)
-
- toc2.append('')
-
- toc_category_string = ''
- for category in sorted(toc_category):
- category_link = category.lower().replace(' ', '-').replace('/', '')
- toc_category_string += '* [{}](#{})\n'.format(category, category_link)
-
- # Load template
- r = utils.load_txt(os.path.join(self_module.path, template_file))
- if r['return'] > 0:
- return r
-
- s = r['string']
-
- s = s.replace('{{CM_TOC2}}', '\n'.join(toc2))
- s = s.replace('{{CM_TOC}}', '\n'.join(toc))
-# s = s.replace('{{CM_MAIN}}', '\n'.join(md))
- s = s.replace('{{CM_MAIN}}', '')
- s = s.replace('{{CM_TOC_CATEGORIES}}', toc_category_string)
-
- # Output
- output_dir = i.get('output_dir', '')
-
- if output_dir == '':
- output_dir = '..'
-
- output_file = os.path.join(output_dir, list_file)
-
- r = utils.save_txt(output_file, s)
- if r['return'] > 0:
- return r
-
- out_docs_file = os.path.join(
- "..",
- "docs",
- "scripts",
- category,
- alias,
- "index.md")
- r = utils.save_txt(out_docs_file, s)
- if r['return'] > 0:
- return r
-
- return {'return': 0}
-
-
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# This function takes in a host path and returns the absolute path on host and the container
-# If mounts is passed, the function appends the host path and the
-# container path to mounts in the form "host_path:container_path"
-def update_path_for_docker(path, mounts=None, force_path_target=''):
-
- path_orig = ''
- path_target = ''
-
- if path != '': # and (os.path.isfile(path) or os.path.isdir(path)):
- path = os.path.abspath(path)
-
- path_target = path
- path_orig = path
-
- if os.name == 'nt':
- from pathlib import PureWindowsPath, PurePosixPath
-
- x = PureWindowsPath(path_orig)
- path_target = str(PurePosixPath('/', *x.parts[1:]))
-
- if not path_target.startswith('/'):
- path_target = '/' + path_target
-
- path_target = '/cm-mount' + \
- path_target if force_path_target == '' else force_path_target
-
- # If file, mount directory
- if os.path.isfile(path) or not os.path.isdir(path):
- x = os.path.dirname(path_orig) + ':' + os.path.dirname(path_target)
- else:
- x = path_orig + ':' + path_target
-
- # CHeck if no duplicates
- if mounts is not None:
- to_add = True
- for y in mounts:
- if y.lower() == x.lower():
- to_add = False
- break
- if to_add:
- mounts.append(x)
-
- return (path_orig, path_target)
-
-############################################################
-
-
-def process_inputs(i):
-
- import copy
-
- i_run_cmd_arc = i['run_cmd_arc']
- docker_settings = i['docker_settings']
- mounts = i['mounts']
-
- # Check if need to update/map/mount inputs and env
- i_run_cmd = copy.deepcopy(i_run_cmd_arc)
-
- def get_value_using_key_with_dots(d, k):
- v = None
- j = k.find('.')
- if j >= 0:
- k1 = k[:j]
- k2 = k[j + 1:]
-
- if k1 in d:
- v = d[k1]
-
- if '.' in k2:
- v, d, k = get_value_using_key_with_dots(v, k2)
- else:
- d = v
- k = k2
- if isinstance(v, dict):
- v = v.get(k2)
- else:
- v = None
- else:
- if k == '':
- v = d
- else:
- v = d.get(k)
-
- return v, d, k
-
- docker_input_paths = docker_settings.get('input_paths', [])
- if len(i_run_cmd) > 0:
- for k in docker_input_paths:
- v2, i_run_cmd2, k2 = get_value_using_key_with_dots(i_run_cmd, k)
-
- if v2 is not None:
- v = i_run_cmd2[k2]
-
- path_orig, path_target = update_path_for_docker(v, mounts)
-
- if path_target != '':
- i_run_cmd2[k2] = path_target
-
- return {'return': 0, 'run_cmd': i_run_cmd}
-
-
-############################################################
-def regenerate_script_cmd(i):
-
- script_uid = i['script_uid']
- script_alias = i['script_alias']
- tags = i['tags']
- docker_settings = i['docker_settings']
- fake_run = i.get('fake_run', False)
-
- i_run_cmd = i['run_cmd']
-
- # Cleanup from env everything that has a host path value
- if i_run_cmd.get('env'):
- for key in list(i_run_cmd.get('env')):
- if isinstance(i_run_cmd['env'][key], str) and ((os.path.join("local", "cache", "") in i_run_cmd['env'][key]) or (
- os.path.join("CM", "repos", "") in i_run_cmd['env'][key])):
- del (i_run_cmd['env'][key])
- elif isinstance(i_run_cmd['env'][key], list):
- values_to_remove = []
- for val in i_run_cmd['env'][key]:
- if isinstance(val, str) and ((os.path.join("local", "cache", "") in val) or (
- os.path.join("CM", "repos", "") in val)):
- values_to_remove.append(val)
- if values_to_remove == i_run_cmd['env'][key]:
- del (i_run_cmd['env'][key])
- else:
- for val in values_to_remove:
- i_run_cmd['env'][key].remove(val)
-
- docker_run_cmd_prefix = i['docker_run_cmd_prefix']
-
- # Regenerate command from dictionary input
- run_cmd = 'cm run script'
-
- x = ''
-
- # Check if there are some tags without variation
- requested_tags = i_run_cmd.get('tags', [])
-
- tags_without_variation = False
- for t in requested_tags:
- if not t.startswith('_'):
- tags_without_variation = True
- break
-
- if not tags_without_variation:
- # If no tags without variation, add script alias and UID explicitly
- if script_uid != '':
- x = script_uid
- if script_alias != '':
- if x != '':
- x = ',' + x
- x = script_alias + x
-
- if x != '':
- run_cmd += ' ' + x + ' '
-
- skip_input_for_fake_run = docker_settings.get(
- 'skip_input_for_fake_run', [])
- add_quotes_to_keys = docker_settings.get('add_quotes_to_keys', [])
-
- def rebuild_flags(i_run_cmd, fake_run,
- skip_input_for_fake_run, add_quotes_to_keys, key_prefix):
-
- run_cmd = ''
-
- keys = list(i_run_cmd.keys())
-
- if 'tags' in keys:
- # Move tags first
- tags_position = keys.index('tags')
- del (keys[tags_position])
- keys = ['tags'] + keys
-
- for k in keys:
- # Assemble long key if dictionary
- long_key = key_prefix
- if long_key != '':
- long_key += '.'
- long_key += k
-
- if fake_run and long_key in skip_input_for_fake_run:
- continue
-
- v = i_run_cmd[k]
-
- q = '\\"' if long_key in add_quotes_to_keys else ''
-
- if isinstance(v, dict):
- run_cmd += rebuild_flags(v,
- fake_run,
- skip_input_for_fake_run,
- add_quotes_to_keys,
- long_key)
- elif isinstance(v, list):
- x = ''
- for vv in v:
- if x != '':
- x += ','
- x += q + str(vv) + q
- run_cmd += ' --' + long_key + ',=' + x
- else:
- run_cmd += ' --' + long_key + '=' + q + str(v) + q
-
- return run_cmd
-
- run_cmd += rebuild_flags(i_run_cmd,
- fake_run,
- skip_input_for_fake_run,
- add_quotes_to_keys,
- '')
-
- run_cmd = docker_run_cmd_prefix + ' && ' + \
- run_cmd if docker_run_cmd_prefix != '' else run_cmd
-
- return {'return': 0, 'run_cmd_string': run_cmd}
-
-
-############################################################
-def aux_search(i):
-
- self_module = i['self_module']
-
- inp = i['input']
-
- repos = inp.get('repos', '')
-# Grigori Fursin remarked on 20240412 because this line prevents
-# from searching for scripts in other public or private repositories.
-# Not sure why we enforce just 2 repositories
-#
-# if repos == '': repos='internal,a4705959af8e447a'
-
- parsed_artifact = inp.get('parsed_artifact', [])
-
- if len(parsed_artifact) < 1:
- parsed_artifact = [('', ''), ('', '')]
- elif len(parsed_artifact) < 2:
- parsed_artifact.append(('', ''))
- else:
- repos = parsed_artifact[1][0]
-
- list_of_repos = repos.split(',') if ',' in repos else [repos]
-
- ii = utils.sub_input(
- inp,
- self_module.cmind.cfg['artifact_keys'] +
- ['tags'])
-
- ii['out'] = None
-
- # Search for automations in repos
- lst = []
- for repo in list_of_repos:
- parsed_artifact[1] = (
- '', repo) if utils.is_cm_uid(repo) else (
- repo, '')
- ii['parsed_artifact'] = parsed_artifact
- r = self_module.search(ii)
- if r['return'] > 0:
- return r
- lst += r['list']
-
- return {'return': 0, 'list': lst}
-
-
-############################################################
-def dockerfile(i):
- """
- Add CM automation.
-
- Args:
- (CM input dict):
-
- (out) (str): if 'con', output to console
- (repos) (str): list of repositories to search for automations
- (output_dir) (str): output directory (./ by default)
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- """
-
- import copy
-
- # Check simplified CMD: cm docker script "python app image-classification onnx"
- # If artifact has spaces, treat them as tags!
- self_module = i['self_module']
- self_module.cmind.access(
- {'action': 'detect_tags_in_artifact', 'automation': 'utils', 'input': i})
-
- # Prepare "clean" input to replicate command
- r = self_module.cmind.access({'action': 'prune_input',
- 'automation': 'utils',
- 'input': i,
- 'extra_keys_starts_with': ['docker_']})
- i_run_cmd_arc = r['new_input']
-
- cur_dir = os.getcwd()
-
- quiet = i.get('quiet', False)
-
- console = i.get('out') == 'con'
-
- # Search for script(s)
- r = aux_search({'self_module': self_module, 'input': i})
- if r['return'] > 0:
- return r
-
- lst = r['list']
-
- if len(lst) == 0:
- return {'return': 1, 'error': 'no scripts were found'}
-
-
-# if i.get('cmd'):
-# run_cmd = "cm run script " + " ".join( a for a in i['cmd'] if not a.startswith('--docker_') )
-# elif i.get('artifact'):
-# run_cmd = "cm run script "+i['artifact']
-# elif i.get('tags'):
-# run_cmd = "cm run script \""+" "+" ".join(i['tags']) + "\""
-# else:
-# run_cmd = ""
-#
-# run_cmd = i.get('docker_run_cmd_prefix') + ' && ' + run_cmd if i.get('docker_run_cmd_prefix') else run_cmd
-
- env = i.get('env', {})
- state = i.get('state', {})
- const = i.get('const', {})
- const_state = i.get('const_state', {})
- script_automation = i['self_module']
-
- dockerfile_env = i.get('dockerfile_env', {})
-
- tags_split = i.get('tags', '').split(",")
- variation_tags = [t[1:] for t in tags_split if t.startswith("_")]
-
- for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')):
-
- meta = artifact.meta
-
- script_path = artifact.path
-
- tags = meta.get("tags", [])
- tag_string = ",".join(tags)
-
- script_alias = meta.get('alias', '')
- script_uid = meta.get('uid', '')
-
- verbose = i.get('v', False)
- show_time = i.get('show_time', False)
-
- run_state = {'deps': [], 'fake_deps': [], 'parent': None}
- run_state['script_id'] = script_alias + "," + script_uid
- run_state['script_variation_tags'] = variation_tags
- variations = meta.get('variations', {})
- docker_settings = meta.get('docker', {})
- docker_settings['dockerfile_env'] = dockerfile_env
- state['docker'] = docker_settings
- add_deps_recursive = i.get('add_deps_recursive', {})
-
- r = script_automation.update_state_from_meta(
- meta,
- env,
- state,
- const,
- const_state,
- deps=[],
- post_deps=[],
- prehook_deps=[],
- posthook_deps=[],
- new_env_keys=[],
- new_state_keys=[],
- run_state=run_state,
- i=i)
- if r['return'] > 0:
- return r
-
- r = script_automation._update_state_from_variations(
- i,
- meta,
- variation_tags,
- variations,
- env,
- state,
- const,
- const_state,
- deps=[],
- post_deps=[],
- prehook_deps=[],
- posthook_deps=[],
- new_env_keys_from_meta=[],
- new_state_keys_from_meta=[],
- add_deps_recursive=add_deps_recursive,
- run_state=run_state,
- recursion_spaces='',
- verbose=False)
- if r['return'] > 0:
- return r
-
- docker_settings = state['docker']
- dockerfile_env = docker_settings['dockerfile_env']
- dockerfile_env['CM_RUN_STATE_DOCKER'] = True
-
- if not docker_settings.get('run', True) and not i.get(
- 'docker_run_override', False):
- print("docker.run set to False in _cm.json")
- continue
- '''run_config_path = os.path.join(script_path,'run_config.yml')
- if not os.path.exists(run_config_path):
- print("No run_config.yml file present in {}".format(script_path))
- continue
- import yaml
- with open(run_config_path, 'r') as run_config_file:
- run_config = yaml.safe_load(run_config_file)
- docker_settings = run_config.get('docker')
- if not docker_settings or not docker_settings.get('build') or not run_config.get('run_with_default_inputs'):
- print("Run config is not configured for docker run in {}".format(run_config_path))
- continue
- '''
-
- deps = docker_settings.get('build_deps', [])
- if deps:
- r = script_automation._run_deps(
- deps,
- [],
- env,
- {},
- {},
- {},
- {},
- '',
- [],
- '',
- False,
- '',
- verbose,
- show_time,
- ' ',
- run_state)
- if r['return'] > 0:
- return r
- # For updating meta from update_meta_if_env
- r = script_automation.update_state_from_meta(
- meta,
- env,
- state,
- const,
- const_state,
- deps=[],
- post_deps=[],
- prehook_deps=[],
- posthook_deps=[],
- new_env_keys=[],
- new_state_keys=[],
- run_state=run_state,
- i=i)
- if r['return'] > 0:
- return r
- docker_settings = state['docker']
-
- d_env = i_run_cmd_arc.get('env', {})
- for key in list(d_env.keys()):
- if key.startswith("CM_TMP_"):
- del (d_env[key])
-
- # Check if need to update/map/mount inputs and env
- r = process_inputs({'run_cmd_arc': i_run_cmd_arc,
- 'docker_settings': docker_settings,
- 'mounts': []})
- if r['return'] > 0:
- return r
-
- i_run_cmd = r['run_cmd']
-
- docker_run_cmd_prefix = i.get(
- 'docker_run_cmd_prefix', docker_settings.get(
- 'run_cmd_prefix', ''))
-
- r = regenerate_script_cmd({'script_uid': script_uid,
- 'script_alias': script_alias,
- 'run_cmd': i_run_cmd,
- 'tags': tags,
- 'fake_run': True,
- 'docker_settings': docker_settings,
- 'docker_run_cmd_prefix': docker_run_cmd_prefix})
- if r['return'] > 0:
- return r
-
- run_cmd = r['run_cmd_string']
-
- cm_repo = i.get(
- 'docker_cm_repo',
- docker_settings.get(
- 'cm_repo',
- 'mlcommons@mlperf-automations'))
- cm_repo_branch = i.get(
- 'docker_cm_repo_branch',
- docker_settings.get(
- 'cm_repo_branch',
- 'main'))
-
- cm_repo_flags = i.get(
- 'docker_cm_repo_flags',
- docker_settings.get(
- 'cm_repo_flags',
- ''))
-
- docker_base_image = i.get(
- 'docker_base_image',
- docker_settings.get('base_image'))
- docker_os = i.get(
- 'docker_os', docker_settings.get(
- 'docker_os', 'ubuntu'))
- docker_os_version = i.get(
- 'docker_os_version', docker_settings.get(
- 'docker_os_version', '22.04'))
-
- docker_cm_repos = i.get(
- 'docker_cm_repos',
- docker_settings.get(
- 'cm_repos',
- ''))
-
- docker_skip_cm_sys_upgrade = i.get(
- 'docker_skip_cm_sys_upgrade', docker_settings.get(
- 'skip_cm_sys_upgrade', ''))
-
- docker_extra_sys_deps = i.get('docker_extra_sys_deps', '')
-
- if not docker_base_image:
- dockerfilename_suffix = docker_os + '_' + docker_os_version
- else:
- if os.name == 'nt':
- dockerfilename_suffix = docker_base_image.replace(
- '/', '-').replace(':', '-')
- else:
- dockerfilename_suffix = docker_base_image.split("/")
- dockerfilename_suffix = dockerfilename_suffix[len(
- dockerfilename_suffix) - 1]
-
- fake_run_deps = i.get(
- 'fake_run_deps', docker_settings.get(
- 'fake_run_deps', False))
- docker_run_final_cmds = docker_settings.get(
- 'docker_run_final_cmds', [])
-
- r = check_gh_token(i, docker_settings, quiet)
- if r['return'] > 0:
- return r
- gh_token = r['gh_token']
- i['docker_gh_token'] = gh_token # To pass to docker function if needed
-
- if i.get('docker_real_run', docker_settings.get(
- 'docker_real_run', False)):
- fake_run_option = " "
- fake_run_deps = False
- else:
- fake_run_option = " --fake_run"
-
- docker_copy_files = i.get(
- 'docker_copy_files',
- docker_settings.get(
- 'copy_files',
- []))
-
- env['CM_DOCKER_PRE_RUN_COMMANDS'] = docker_run_final_cmds
-
- docker_path = i.get('docker_path', '').strip()
- if docker_path == '':
- docker_path = script_path
-
- dockerfile_path = os.path.join(
- docker_path,
- 'dockerfiles',
- dockerfilename_suffix +
- '.Dockerfile')
-
- if i.get('print_deps'):
- cm_input = {'action': 'run',
- 'automation': 'script',
- 'tags': f"""{i.get('tags')}""",
- 'print_deps': True,
- 'quiet': True,
- 'silent': True,
- 'fake_run': True,
- 'fake_deps': True
- }
- r = self_module.cmind.access(cm_input)
- if r['return'] > 0:
- return r
- print_deps = r['new_state']['print_deps']
- comments = ["#RUN " + dep for dep in print_deps]
- comments.append("")
- comments.append("# Run CM workflow")
- else:
- comments = []
-
- if i.get('docker_push_image', '') in ['True', True, 'yes']:
- env['CM_DOCKER_PUSH_IMAGE'] = 'yes'
-
- cm_docker_input = {'action': 'run',
- 'automation': 'script',
- 'tags': 'build,dockerfile',
- 'cm_repo': cm_repo,
- 'cm_repo_branch': cm_repo_branch,
- 'cm_repo_flags': cm_repo_flags,
- 'docker_base_image': docker_base_image,
- 'docker_os': docker_os,
- 'docker_os_version': docker_os_version,
- 'skip_cm_sys_upgrade': docker_skip_cm_sys_upgrade,
- 'file_path': dockerfile_path,
- 'fake_run_option': fake_run_option,
- 'comments': comments,
- 'run_cmd': f'{run_cmd} --quiet',
- 'script_tags': f"""{i.get('tags')}""",
- 'copy_files': docker_copy_files,
- 'quiet': True,
- 'env': env,
- 'dockerfile_env': dockerfile_env,
- 'v': i.get('v', False),
- 'fake_docker_deps': fake_run_deps,
- 'print_deps': True,
- 'real_run': True
- }
-
- if docker_cm_repos != '':
- cm_docker_input['cm_repos'] = docker_cm_repos
-
- if gh_token != '':
- cm_docker_input['gh_token'] = gh_token
-
- if docker_extra_sys_deps != '':
- cm_docker_input['extra_sys_deps'] = docker_extra_sys_deps
-
- r = self_module.cmind.access(cm_docker_input)
- if r['return'] > 0:
- return r
-
- print('')
- print("Dockerfile generated at " + dockerfile_path)
-
- return {'return': 0}
-
-# we mount the main folder of the CM cache entry in case any file/folder
-# in that cache entry is needed inside the container
-
-
-def get_host_path(value):
- path_split = value.split(os.sep)
- if len(path_split) == 1:
- return value
-
- new_value = ''
- if "cache" in path_split and "local":
- repo_entry_index = path_split.index("local")
- if len(path_split) >= repo_entry_index + 3:
- return os.sep.join(path_split[0:repo_entry_index + 3])
-
- return value
-
-
-def get_container_path_script(i):
- tmp_dep_cached_path = i['tmp_dep_cached_path']
- value_mnt, value_env = get_container_path(tmp_dep_cached_path)
- return {'return': 0, 'value_mnt': value_mnt, 'value_env': value_env}
-
-
-def get_container_path(value):
- path_split = value.split(os.sep)
- if len(path_split) == 1:
- return value
-
- new_value = ''
- if "cache" in path_split and "local" in path_split:
- new_path_split = ["", "home", "cmuser", "CM", "repos"]
- repo_entry_index = path_split.index("local")
- if len(path_split) >= repo_entry_index + 3:
- new_path_split1 = new_path_split + \
- path_split[repo_entry_index:repo_entry_index + 3]
- new_path_split2 = new_path_split + path_split[repo_entry_index:]
- return "/".join(new_path_split1), "/".join(new_path_split2)
- else:
- orig_path, target_path = update_path_for_docker(path=value)
- return target_path, target_path
-
- # return value, value
-
-
-############################################################
-def docker(i):
- """
- CM automation to run CM scripts via Docker
-
- Args:
- (CM input dict):
-
- (out) (str): if 'con', output to console
-
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- """
-
- import copy
- import re
-
- from cmind import __version__ as current_cm_version
-
- self_module = i['self_module']
-
- if isinstance(i.get('docker', None), dict):
- # Grigori started cleaning and refactoring this code on 20240929
- #
- # 1. use --docker dictionary instead of --docker_{keys}
-
- if utils.compare_versions(current_cm_version, '2.3.8.1') >= 0:
- docker_params = utils.convert_dictionary(i['docker'], 'docker')
- i.update(docker_params)
- del (i['docker'])
-
- quiet = i.get('quiet', False)
-
- detached = i.get('docker_detached', '')
- if detached == '':
- detached = i.get('docker_dt', '')
- if detached == '':
- detached = 'no'
-
- interactive = i.get('docker_interactive', '')
- if interactive == '':
- interactive = i.get('docker_it', '')
-
- verbose = i.get('v', False)
- show_time = i.get('show_time', False)
-
- # Check simplified CMD: cm docker script "python app image-classification onnx"
- # If artifact has spaces, treat them as tags!
- self_module.cmind.access(
- {'action': 'detect_tags_in_artifact', 'automation': 'utils', 'input': i})
-
- # CAREFUL -> artifacts and parsed_artifacts are not supported in input
- # (and should not be?)
- if 'artifacts' in i:
- del (i['artifacts'])
- if 'parsed_artifacts' in i:
- del (i['parsed_artifacts'])
-
- # Prepare "clean" input to replicate command
- r = self_module.cmind.access({'action': 'prune_input',
- 'automation': 'utils',
- 'input': i,
- 'extra_keys_starts_with': ['docker_']})
- i_run_cmd_arc = r['new_input']
-
- env = i.get('env', {})
-
- noregenerate_docker_file = i.get('docker_noregenerate', False)
- norecreate_docker_image = i.get('docker_norecreate', True)
- recreate_docker_image = i.get('docker_recreate', False)
- if recreate_docker_image: # force recreate
- norecreate_docker_image = False
-
- if i.get('docker_skip_build', False):
- noregenerate_docker_file = True
- norecreate_docker_image = True
- env['CM_DOCKER_SKIP_BUILD'] = 'yes'
-
- # Check available configurations
- docker_cfg = i.get('docker_cfg', '')
- docker_cfg_uid = i.get('docker_cfg_uid', '')
-
- if docker_cfg != '' or docker_cfg_uid != '':
- # Check if docker_cfg is turned on but not selected
- if isinstance(docker_cfg, bool) or str(
- docker_cfg).lower() in ['true', 'yes']:
- docker_cfg = ''
-
- r = self_module.cmind.access({'action': 'select_cfg',
- 'automation': 'utils,dc2743f8450541e3',
- 'tags': 'basic,docker,configurations',
- 'title': 'docker',
- 'alias': docker_cfg,
- 'uid': docker_cfg_uid})
- if r['return'] > 0:
- if r['return'] == 16:
- return {'return': 1, 'error': 'Docker configuration {} was not found'.format(
- docker_cfg)}
- return r
-
- selection = r['selection']
-
- docker_input_update = selection['meta']['input']
-
- i.update(docker_input_update)
-
- ##########################################################################
- # Run dockerfile
- if not noregenerate_docker_file:
- r = utils.call_internal_module(
- self_module, __file__, 'module_misc', 'dockerfile', i)
- if r['return'] > 0:
- return r
-
- # Save current directory
- cur_dir = os.getcwd()
-
- console = i.get('out') == 'con'
-
- # Search for script(s)
- r = aux_search({'self_module': self_module, 'input': i})
- if r['return'] > 0:
- return r
-
- lst = r['list']
-
- if len(lst) == 0:
- return {'return': 1, 'error': 'no scripts were found'}
-
- env['CM_RUN_STATE_DOCKER'] = False
- script_automation = i['self_module']
- state = i.get('state', {})
- const = i.get('const', {})
- const_state = i.get('const_state', {})
-
- tags_split = i.get('tags', '').split(",")
- variation_tags = [t[1:] for t in tags_split if t.startswith("_")]
-
- docker_cache = i.get('docker_cache', "yes")
- if docker_cache in ["no", False, "False"]:
- if 'CM_DOCKER_CACHE' not in env:
- env['CM_DOCKER_CACHE'] = docker_cache
-
- image_repo = i.get('docker_image_repo', '')
-
- # Host system needs to have docker
- r = self_module.cmind.access({'action': 'run',
- 'automation': 'script',
- 'tags': "get,docker"})
- if r['return'] > 0:
- return r
-
- for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')):
-
- meta = artifact.meta
-
- if i.get('help', False):
- return utils.call_internal_module(self_module, __file__, 'module_help', 'print_help', {
- 'meta': meta, 'path': artifact.path})
-
- script_path = artifact.path
-
- tags = meta.get("tags", [])
- tag_string = ",".join(tags)
-
- script_alias = meta.get('alias', '')
- script_uid = meta.get('uid', '')
-
- mounts = copy.deepcopy(i.get('docker_mounts', []))
-
- '''run_config_path = os.path.join(script_path,'run_config.yml')
- if not os.path.exists(run_config_path):
- print("No run_config.yml file present in {}".format(script_path))
- continue
- import yaml
- with open(run_config_path, 'r') as run_config_file:
- run_config = yaml.safe_load(run_config_file)
- '''
-
- variations = meta.get('variations', {})
- docker_settings = meta.get('docker', {})
- state['docker'] = docker_settings
- # Todo: Support state, const and add_deps_recursive
- run_state = {'deps': [], 'fake_deps': [], 'parent': None}
- run_state['script_id'] = script_alias + "," + script_uid
- run_state['script_variation_tags'] = variation_tags
- add_deps_recursive = i.get('add_deps_recursive', {})
-
- r = script_automation.update_state_from_meta(
- meta,
- env,
- state,
- const,
- const_state,
- deps=[],
- post_deps=[],
- prehook_deps=[],
- posthook_deps=[],
- new_env_keys=[],
- new_state_keys=[],
- run_state=run_state,
- i=i)
- if r['return'] > 0:
- return r
-
- r = script_automation._update_state_from_variations(
- i,
- meta,
- variation_tags,
- variations,
- env,
- state,
- const,
- const_state,
- deps=[],
- post_deps=[],
- prehook_deps=[],
- posthook_deps=[],
- new_env_keys_from_meta=[],
- new_state_keys_from_meta=[],
- add_deps_recursive=add_deps_recursive,
- run_state=run_state,
- recursion_spaces='',
- verbose=False)
- if r['return'] > 0:
- return r
-
- docker_settings = state['docker']
-
- if not docker_settings.get('run', True) and not i.get(
- 'docker_run_override', False):
- print("docker.run set to False in _cm.json")
- continue
- '''
- if not docker_settings or not docker_settings.get('build') or not run_config.get('run_with_default_inputs'):
- print("Run config is not configured for docker run in {}".format(run_config_path))
- continue
- '''
-
- r = script_automation._update_env_from_input(env, i)
- if r['return'] > 0:
- return r
-
- # mount outdirname path
- if env.get('CM_OUTDIRNAME', '') != '':
- mounts.append(f"""{env['CM_OUTDIRNAME']}:{env['CM_OUTDIRNAME']}""")
-
- # Check if need to update/map/mount inputs and env
- r = process_inputs({'run_cmd_arc': i_run_cmd_arc,
- 'docker_settings': docker_settings,
- 'mounts': mounts})
- if r['return'] > 0:
- return r
-
- i_run_cmd = r['run_cmd']
-
- # Check if need to mount home directory
- current_path_target = '/cm-mount/current'
- if docker_settings.get('mount_current_dir', '') == 'yes':
- update_path_for_docker(
- '.', mounts, force_path_target=current_path_target)
-
- _os = i.get('docker_os', docker_settings.get('os', 'ubuntu'))
- version = i.get(
- 'docker_os_version',
- docker_settings.get(
- 'os_version',
- '22.04'))
-
- build_deps = docker_settings.get('deps', [])
- deps = docker_settings.get('deps', [])
- deps = build_deps + deps
- if deps:
- r = script_automation._run_deps(
- deps,
- [],
- env,
- {},
- {},
- {},
- {},
- '',
- [],
- '',
- False,
- '',
- verbose,
- show_time,
- ' ',
- run_state)
- if r['return'] > 0:
- return r
-
- # For updating meta from update_meta_if_env
- r = script_automation.update_state_from_meta(
- meta,
- env,
- state,
- const,
- const_state,
- deps=[],
- post_deps=[],
- prehook_deps=[],
- posthook_deps=[],
- new_env_keys=[],
- new_state_keys=[],
- run_state=run_state,
- i=i)
- if r['return'] > 0:
- return r
-
- docker_settings = state['docker']
-
- for key in docker_settings.get('mounts', []):
- mounts.append(key)
-
- # Updating environment variables from CM input based on input_mapping
- # from meta
- input_mapping = meta.get('input_mapping', {})
-
- for c_input in input_mapping:
- if c_input in i:
- env[input_mapping[c_input]] = i[c_input]
- # del(i[c_input])
-
- # Updating environment variables from CM input based on
- # docker_input_mapping from meta
-
- docker_input_mapping = docker_settings.get('docker_input_mapping', {})
-
- for c_input in docker_input_mapping:
- if c_input in i:
- env[docker_input_mapping[c_input]] = i[c_input]
- # del(i[c_input])
-
- # env keys corresponding to container mounts are explicitly passed to
- # the container run cmd
- container_env = {}
- for index in range(len(mounts)):
- mount = mounts[index]
- # Since windows may have 2 :, we search from the right
- j = mount.rfind(':')
- if j > 0:
- mount_parts = [mount[:j], mount[j + 1:]]
- else:
- return {
- 'return': 1, 'error': 'Can\'t find separator : in a mount string: {}'.format(mount)}
-
-# mount_parts = mount.split(":")
-# if len(mount_parts) != 2:
-# return {'return': 1, 'error': f'Invalid mount specified in docker
-# settings'}
-
- host_mount = mount_parts[0]
- new_host_mount = host_mount
- container_mount = mount_parts[1]
- new_container_mount = container_mount
-
- tmp_values = re.findall(r'\${{ (.*?) }}', str(host_mount))
- skip = False
- host_env_key = None
- if tmp_values:
- for tmp_value in tmp_values:
- if tmp_value in env:
- host_env_key = tmp_value
- new_host_mount = get_host_path(env[tmp_value])
- else: # we skip those mounts
- mounts[index] = None
- skip = True
- break
-
- tmp_values = re.findall(r'\${{ (.*?) }}', str(container_mount))
- if tmp_values:
- for tmp_value in tmp_values:
- container_env_key = tmp_value
- if tmp_value in env:
- new_container_mount, new_container_mount_env = get_container_path(
- env[tmp_value])
- container_env_key = new_container_mount_env
- else: # we skip those mounts
- mounts[index] = None
- skip = True
- break
- else:
- container_env_key = str(container_mount)
-
- if skip:
- continue
- mounts[index] = new_host_mount + ":" + new_container_mount
- if host_env_key:
- container_env[host_env_key] = container_env_key
-
- for v in docker_input_mapping:
- if docker_input_mapping[v] == host_env_key:
- i[v] = container_env_key
- i_run_cmd[v] = container_env_key
-
- mounts = list(filter(lambda item: item is not None, mounts))
-
- mount_string = "" if len(mounts) == 0 else ",".join(mounts)
-
- # check for proxy settings and pass onto the docker
- proxy_keys = [
- "ftp_proxy",
- "FTP_PROXY",
- "http_proxy",
- "HTTP_PROXY",
- "https_proxy",
- "HTTPS_PROXY",
- "no_proxy",
- "NO_PROXY",
- "socks_proxy",
- "SOCKS_PROXY",
- "GH_TOKEN"]
-
- if env.get('+ CM_DOCKER_BUILD_ARGS', []) == []:
- env['+ CM_DOCKER_BUILD_ARGS'] = []
-
- for key in proxy_keys:
- if os.environ.get(key, '') != '':
- value = os.environ[key]
- container_env[key] = value
- env['+ CM_DOCKER_BUILD_ARGS'].append(
- "{}={}".format(key, value))
-
- if container_env:
- if not i_run_cmd.get('env'):
- i_run_cmd['env'] = container_env
- else:
- i_run_cmd['env'] = {**i_run_cmd['env'], **container_env}
-
- docker_use_host_group_id = i.get(
- 'docker_use_host_group_id',
- docker_settings.get('use_host_group_id'))
- if str(docker_use_host_group_id).lower() not in [
- 'false', 'no', '0'] and os.name != 'nt':
- env['+ CM_DOCKER_BUILD_ARGS'].append(
- "{}={}".format('GID', '\\" $(id -g $USER) \\"'))
-
- docker_use_host_user_id = i.get(
- 'docker_use_host_user_id',
- docker_settings.get('use_host_user_id'))
- if str(docker_use_host_user_id).lower() not in [
- 'false', 'no', '0'] and os.name != 'nt':
- env['+ CM_DOCKER_BUILD_ARGS'].append(
- "{}={}".format('UID', '\\" $(id -u $USER) \\"'))
-
- docker_base_image = i.get(
- 'docker_base_image',
- docker_settings.get('base_image'))
- docker_os = i.get('docker_os', docker_settings.get('os', 'ubuntu'))
- docker_os_version = i.get(
- 'docker_os_version', docker_settings.get(
- 'os_version', '22.04'))
- image_tag_extra = i.get(
- 'docker_image_tag_extra',
- docker_settings.get(
- 'image_tag_extra',
- '-latest'))
-
- if not docker_base_image:
- dockerfilename_suffix = docker_os + '_' + docker_os_version
- else:
- if os.name == 'nt':
- dockerfilename_suffix = docker_base_image.replace(
- '/', '-').replace(':', '-')
- else:
- dockerfilename_suffix = docker_base_image.split("/")
- dockerfilename_suffix = dockerfilename_suffix[len(
- dockerfilename_suffix) - 1]
-
- cm_repo = i.get(
- 'docker_cm_repo',
- docker_settings.get(
- 'cm_repo',
- 'mlcommons@mlperf-automations'))
-
- docker_path = i.get('docker_path', '').strip()
- if docker_path == '':
- docker_path = script_path
-
- dockerfile_path = os.path.join(
- docker_path,
- 'dockerfiles',
- dockerfilename_suffix +
- '.Dockerfile')
-
- # Skips docker run cmd and gives an interactive shell to the user
- docker_skip_run_cmd = i.get(
- 'docker_skip_run_cmd', docker_settings.get(
- 'skip_run_cmd', False))
-
- docker_pre_run_cmds = i.get(
- 'docker_pre_run_cmds', []) + docker_settings.get('pre_run_cmds', [])
-
- docker_run_cmd_prefix = i.get(
- 'docker_run_cmd_prefix', docker_settings.get(
- 'run_cmd_prefix', ''))
-
- all_gpus = i.get('docker_all_gpus', docker_settings.get('all_gpus'))
-
- num_gpus = i.get('docker_num_gpus', docker_settings.get('num_gpus'))
-
- device = i.get('docker_device', docker_settings.get('device'))
-
- image_name = i.get(
- 'docker_image_name',
- docker_settings.get(
- 'image_name',
- ''))
-
- r = check_gh_token(i, docker_settings, quiet)
- if r['return'] > 0:
- return r
- gh_token = r['gh_token']
-
- port_maps = i.get(
- 'docker_port_maps',
- docker_settings.get(
- 'port_maps',
- []))
-
- shm_size = i.get(
- 'docker_shm_size',
- docker_settings.get(
- 'shm_size',
- ''))
-
- pass_user_id = i.get(
- 'docker_pass_user_id',
- docker_settings.get(
- 'pass_user_id',
- ''))
- pass_user_group = i.get(
- 'docker_pass_user_group',
- docker_settings.get(
- 'pass_user_group',
- ''))
-
- extra_run_args = i.get(
- 'docker_extra_run_args',
- docker_settings.get(
- 'extra_run_args',
- ''))
-
- if detached == '':
- detached = docker_settings.get('detached', '')
-
- if str(docker_skip_run_cmd).lower() in ['true', '1', 'yes']:
- interactive = 'yes'
- elif interactive == '':
- interactive = docker_settings.get('interactive', '')
-
-
-# # Regenerate run_cmd
-# if i.get('cmd'):
-# run_cmd = "cm run script " + " ".join( a for a in i['cmd'] if not a.startswith('--docker_') )
-# elif i.get('artifact'):
-# run_cmd = "cm run script "+i['artifact']
-# elif i.get('tags'):
-# run_cmd = "cm run script \""+" "+" ".join(i['tags']) + "\""
-# else:
-# run_cmd = ""
-
- r = regenerate_script_cmd({'script_uid': script_uid,
- 'script_alias': script_alias,
- 'tags': tags,
- 'run_cmd': i_run_cmd,
- 'docker_settings': docker_settings,
- 'docker_run_cmd_prefix': i.get('docker_run_cmd_prefix', '')})
- if r['return'] > 0:
- return r
- run_cmd = r['run_cmd_string'] + ' ' + ' --docker_run_deps '
-
- env['CM_RUN_STATE_DOCKER'] = True
-
- if docker_settings.get('mount_current_dir', '') == 'yes':
- run_cmd = 'cd ' + current_path_target + ' && ' + run_cmd
-
- final_run_cmd = run_cmd if docker_skip_run_cmd not in [
- 'yes', True, 'True'] else 'cm version'
-
- print('')
- print('CM command line regenerated to be used inside Docker:')
- print('')
- print(final_run_cmd)
- print('')
-
- docker_recreate_image = 'yes' if str(norecreate_docker_image).lower() not in [
- "yes", "true", "1"] else 'no'
-
- if i.get('docker_push_image', '') in ['True', True, 'yes']:
- env['CM_DOCKER_PUSH_IMAGE'] = 'yes'
-
- cm_docker_input = {'action': 'run',
- 'automation': 'script',
- 'tags': 'run,docker,container',
- 'recreate': docker_recreate_image,
- 'docker_base_image': docker_base_image,
- 'docker_os': docker_os,
- 'docker_os_version': docker_os_version,
- 'cm_repo': cm_repo,
- 'env': env,
- 'interactive': interactive,
- 'mounts': mounts,
- # 'image_tag': script_alias,
- 'image_tag_extra': image_tag_extra,
- 'detached': detached,
- 'script_tags': f"""{i.get('tags')}""",
- 'run_cmd': final_run_cmd,
- 'v': i.get('v', False),
- 'quiet': True,
- 'pre_run_cmds': docker_pre_run_cmds,
- 'real_run': True,
- 'add_deps_recursive': {
- 'build-docker-image': {
- 'dockerfile': dockerfile_path
- }
- }
- }
-
- if image_repo:
- cm_docker_input['image_repo'] = image_repo
-
- if image_name:
- cm_docker_input['image_name'] = image_name
-
- if all_gpus:
- cm_docker_input['all_gpus'] = True
-
- if num_gpus:
- cm_docker_input['num_gpus'] = str(num_gpus)
-
- if device:
- cm_docker_input['device'] = device
-
- if gh_token != '':
- cm_docker_input['gh_token'] = gh_token
-
- if port_maps:
- cm_docker_input['port_maps'] = port_maps
-
- if shm_size != '':
- cm_docker_input['shm_size'] = shm_size
-
- if pass_user_id != '':
- cm_docker_input['pass_user_id'] = pass_user_id
-
- if pass_user_group != '':
- cm_docker_input['pass_user_group'] = pass_user_group
-
- if extra_run_args != '':
- cm_docker_input['extra_run_args'] = extra_run_args
-
- if i.get('docker_save_script', ''):
- cm_docker_input['save_script'] = i['docker_save_script']
-
- print('')
-
- r = self_module.cmind.access(cm_docker_input)
- if r['return'] > 0:
- return r
-
- return {'return': 0}
-
-############################################################
-
-
-def check_gh_token(i, docker_settings, quiet):
- gh_token = i.get('docker_gh_token', '')
-
- if docker_settings.get('gh_token_required', False) and gh_token == '':
- rx = {
- 'return': 1,
- 'error': 'GH token is required but not provided. Use --docker_gh_token to set it'}
-
- if quiet:
- return rx
-
- print('')
- gh_token = input(
- 'Enter GitHub token to access private CM repositories required for this CM script: ')
-
- if gh_token == '':
- return rx
-
- return {'return': 0, 'gh_token': gh_token}
diff --git a/automation/script/template-ae-python/_cm.yaml b/automation/script/template-ae-python/_cm.yaml
index 8019b3647..261e4cf75 100644
--- a/automation/script/template-ae-python/_cm.yaml
+++ b/automation/script/template-ae-python/_cm.yaml
@@ -13,10 +13,10 @@ deps:
script_name: run
input_mapping:
- experiment: CM_EXPERIMENT
+ experiment: MLC_EXPERIMENT
default_env:
- CM_EXPERIMENT: '1'
+ MLC_EXPERIMENT: '1'
variations:
install_deps:
diff --git a/automation/script/template-ae-python/analyze.bat b/automation/script/template-ae-python/analyze.bat
index 7e786771a..375cfaebf 100644
--- a/automation/script/template-ae-python/analyze.bat
+++ b/automation/script/template-ae-python/analyze.bat
@@ -4,9 +4,9 @@ set CUR_DIR=%cd%
echo.
echo Current execution path: %CUR_DIR%
-echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
-echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH%
+echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT%
rem echo.
-rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py
rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/automation/script/template-ae-python/analyze.sh b/automation/script/template-ae-python/analyze.sh
index 630c3db3d..53c10c73c 100644
--- a/automation/script/template-ae-python/analyze.sh
+++ b/automation/script/template-ae-python/analyze.sh
@@ -4,9 +4,9 @@ CUR_DIR=${PWD}
echo ""
echo "Current execution path: ${CUR_DIR}"
-echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
-echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}"
#echo ""
-#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py
#test $? -eq 0 || exit 1
diff --git a/automation/script/template-ae-python/customize.py b/automation/script/template-ae-python/customize.py
index 273999d46..bd7c12dd3 100644
--- a/automation/script/template-ae-python/customize.py
+++ b/automation/script/template-ae-python/customize.py
@@ -12,7 +12,7 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
return {'return': 0}
diff --git a/automation/script/template-ae-python/install_deps.bat b/automation/script/template-ae-python/install_deps.bat
index 47f7e7ce2..3419d9511 100644
--- a/automation/script/template-ae-python/install_deps.bat
+++ b/automation/script/template-ae-python/install_deps.bat
@@ -4,15 +4,15 @@ set CUR_DIR=%cd%
echo.
echo Current execution path: %CUR_DIR%
-echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
-echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH%
+echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT%
-if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" (
+if exist "%MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" (
echo.
echo Installing requirements.txt ...
echo.
- %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
+ %MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
)
diff --git a/automation/script/template-ae-python/install_deps.sh b/automation/script/template-ae-python/install_deps.sh
index cb7c44c2b..5e8c50a20 100644
--- a/automation/script/template-ae-python/install_deps.sh
+++ b/automation/script/template-ae-python/install_deps.sh
@@ -4,14 +4,14 @@ CUR_DIR=${PWD}
echo ""
echo "Current execution path: ${CUR_DIR}"
-echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
-echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}"
-if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then
+if test -f "${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then
echo ""
echo "Installing requirements.txt ..."
echo ""
- ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
+ ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
test $? -eq 0 || exit 1
fi
diff --git a/automation/script/template-ae-python/main.py b/automation/script/template-ae-python/main.py
index caa499bf0..48b974b7f 100644
--- a/automation/script/template-ae-python/main.py
+++ b/automation/script/template-ae-python/main.py
@@ -4,7 +4,7 @@
print('')
print('Main script:')
- print('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT', '')))
+ print('Experiment: {}'.format(os.environ.get('MLC_EXPERIMENT', '')))
print('')
exit(0)
diff --git a/automation/script/template-ae-python/plot.bat b/automation/script/template-ae-python/plot.bat
index 7e786771a..375cfaebf 100644
--- a/automation/script/template-ae-python/plot.bat
+++ b/automation/script/template-ae-python/plot.bat
@@ -4,9 +4,9 @@ set CUR_DIR=%cd%
echo.
echo Current execution path: %CUR_DIR%
-echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
-echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH%
+echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT%
rem echo.
-rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py
rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/automation/script/template-ae-python/plot.sh b/automation/script/template-ae-python/plot.sh
index 630c3db3d..53c10c73c 100644
--- a/automation/script/template-ae-python/plot.sh
+++ b/automation/script/template-ae-python/plot.sh
@@ -4,9 +4,9 @@ CUR_DIR=${PWD}
echo ""
echo "Current execution path: ${CUR_DIR}"
-echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
-echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}"
#echo ""
-#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py
#test $? -eq 0 || exit 1
diff --git a/automation/script/template-ae-python/reproduce.bat b/automation/script/template-ae-python/reproduce.bat
index 7e786771a..375cfaebf 100644
--- a/automation/script/template-ae-python/reproduce.bat
+++ b/automation/script/template-ae-python/reproduce.bat
@@ -4,9 +4,9 @@ set CUR_DIR=%cd%
echo.
echo Current execution path: %CUR_DIR%
-echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
-echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH%
+echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT%
rem echo.
-rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py
rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/automation/script/template-ae-python/reproduce.sh b/automation/script/template-ae-python/reproduce.sh
index 630c3db3d..53c10c73c 100644
--- a/automation/script/template-ae-python/reproduce.sh
+++ b/automation/script/template-ae-python/reproduce.sh
@@ -4,9 +4,9 @@ CUR_DIR=${PWD}
echo ""
echo "Current execution path: ${CUR_DIR}"
-echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
-echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}"
#echo ""
-#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py
#test $? -eq 0 || exit 1
diff --git a/automation/script/template-ae-python/run.bat b/automation/script/template-ae-python/run.bat
index 6c1274ce6..f1b69d26d 100644
--- a/automation/script/template-ae-python/run.bat
+++ b/automation/script/template-ae-python/run.bat
@@ -4,9 +4,9 @@ set CUR_DIR=%cd%
echo.
echo Current execution path: %CUR_DIR%
-echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
-echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH%
+echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT%
echo.
-%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/automation/script/template-ae-python/run.sh b/automation/script/template-ae-python/run.sh
index 2150b45dc..a4b86e69a 100644
--- a/automation/script/template-ae-python/run.sh
+++ b/automation/script/template-ae-python/run.sh
@@ -4,9 +4,9 @@ CUR_DIR=${PWD}
echo ""
echo "Current execution path: ${CUR_DIR}"
-echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
-echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}"
echo ""
-${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py
test $? -eq 0 || exit 1
diff --git a/automation/script/template-ae-python/validate.bat b/automation/script/template-ae-python/validate.bat
index 7e786771a..375cfaebf 100644
--- a/automation/script/template-ae-python/validate.bat
+++ b/automation/script/template-ae-python/validate.bat
@@ -4,9 +4,9 @@ set CUR_DIR=%cd%
echo.
echo Current execution path: %CUR_DIR%
-echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
-echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH%
+echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT%
rem echo.
-rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py
rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/automation/script/template-ae-python/validate.sh b/automation/script/template-ae-python/validate.sh
index 630c3db3d..53c10c73c 100644
--- a/automation/script/template-ae-python/validate.sh
+++ b/automation/script/template-ae-python/validate.sh
@@ -4,9 +4,9 @@ CUR_DIR=${PWD}
echo ""
echo "Current execution path: ${CUR_DIR}"
-echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
-echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}"
#echo ""
-#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py
#test $? -eq 0 || exit 1
diff --git a/automation/script/template-python/_cm.yaml b/automation/script/template-python/_cm.yaml
index adbb8d4e7..11f646860 100644
--- a/automation/script/template-python/_cm.yaml
+++ b/automation/script/template-python/_cm.yaml
@@ -11,11 +11,11 @@ deps:
- python3
input_mapping:
- var1: CM_VAR1
+ var1: MLC_VAR1
req: PIP_REQUIREMENTS
default_env:
- CM_VAR1: 'something'
+ MLC_VAR1: 'something'
variations:
req:
diff --git a/automation/script/template-python/customize.py b/automation/script/template-python/customize.py
index 625b643d4..8961ab5ca 100644
--- a/automation/script/template-python/customize.py
+++ b/automation/script/template-python/customize.py
@@ -15,9 +15,9 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- print(' ENV CM_VAR1: {}'.format(env.get('CM_VAR1', '')))
+ print(' ENV MLC_VAR1: {}'.format(env.get('MLC_VAR1', '')))
return {'return': 0}
diff --git a/automation/script/template-python/main.py b/automation/script/template-python/main.py
index e3302f36f..68245e7bd 100644
--- a/automation/script/template-python/main.py
+++ b/automation/script/template-python/main.py
@@ -4,7 +4,7 @@
print('')
print('Main script:')
- print('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1', '')))
+ print('ENV MLC_VAR1: {}'.format(os.environ.get('MLC_VAR1', '')))
print('')
exit(0)
diff --git a/automation/script/template-python/run.bat b/automation/script/template-python/run.bat
index f9e1264bc..11e897362 100644
--- a/automation/script/template-python/run.bat
+++ b/automation/script/template-python/run.bat
@@ -4,22 +4,22 @@ set CUR_DIR=%cd%
echo.
echo Current execution path: %CUR_DIR%
-echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
+echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH%
echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS%
-echo ENV CM_VAR1: %CM_VAR1%
+echo ENV MLC_VAR1: %MLC_VAR1%
if "%PIP_REQUIREMENTS%" == "True" (
- if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" (
+ if exist "%MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" (
echo.
echo Installing requirements.txt ...
echo.
- %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
+ %MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
)
)
echo.
-%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/automation/script/template-python/run.sh b/automation/script/template-python/run.sh
index a1a6aec2e..a3e2021b9 100644
--- a/automation/script/template-python/run.sh
+++ b/automation/script/template-python/run.sh
@@ -4,21 +4,21 @@ CUR_DIR=${PWD}
echo ""
echo "Current execution path: ${CUR_DIR}"
-echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
+echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}"
echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}"
-echo "ENV CM_VAR1: ${CM_VAR1}"
+echo "ENV MLC_VAR1: ${MLC_VAR1}"
if [ "${PIP_REQUIREMENTS}" == "True" ]; then
- if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then
+ if test -f "${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then
echo ""
echo "Installing requirements.txt ..."
echo ""
- ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
+ ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
test $? -eq 0 || exit 1
fi
fi
echo ""
-${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py
test $? -eq 0 || exit 1
diff --git a/automation/script/template-pytorch/_cm.yaml b/automation/script/template-pytorch/_cm.yaml
index eaff95e47..22cd7a635 100644
--- a/automation/script/template-pytorch/_cm.yaml
+++ b/automation/script/template-pytorch/_cm.yaml
@@ -24,11 +24,11 @@ deps:
input_mapping:
- var1: CM_VAR1
+ var1: MLC_VAR1
req: PIP_REQUIREMENTS
default_env:
- CM_VAR1: 'something'
+ MLC_VAR1: 'something'
variations:
req:
diff --git a/automation/script/template-pytorch/customize.py b/automation/script/template-pytorch/customize.py
index 625b643d4..8961ab5ca 100644
--- a/automation/script/template-pytorch/customize.py
+++ b/automation/script/template-pytorch/customize.py
@@ -15,9 +15,9 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- print(' ENV CM_VAR1: {}'.format(env.get('CM_VAR1', '')))
+ print(' ENV MLC_VAR1: {}'.format(env.get('MLC_VAR1', '')))
return {'return': 0}
diff --git a/automation/script/template-pytorch/main.py b/automation/script/template-pytorch/main.py
index 217aed3b9..3bfcd7572 100644
--- a/automation/script/template-pytorch/main.py
+++ b/automation/script/template-pytorch/main.py
@@ -6,7 +6,7 @@
print('')
print('Main script:')
- print('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1', '')))
+ print('ENV MLC_VAR1: {}'.format(os.environ.get('MLC_VAR1', '')))
print('ENV USE_CUDA: {}'.format(os.environ.get('USE_CUDA', '')))
print('')
print('PyTorch version: {}'.format(torch.__version__))
diff --git a/automation/script/template-pytorch/run.bat b/automation/script/template-pytorch/run.bat
index f9e1264bc..11e897362 100644
--- a/automation/script/template-pytorch/run.bat
+++ b/automation/script/template-pytorch/run.bat
@@ -4,22 +4,22 @@ set CUR_DIR=%cd%
echo.
echo Current execution path: %CUR_DIR%
-echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
+echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH%
echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS%
-echo ENV CM_VAR1: %CM_VAR1%
+echo ENV MLC_VAR1: %MLC_VAR1%
if "%PIP_REQUIREMENTS%" == "True" (
- if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" (
+ if exist "%MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" (
echo.
echo Installing requirements.txt ...
echo.
- %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
+ %MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
)
)
echo.
-%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/automation/script/template-pytorch/run.sh b/automation/script/template-pytorch/run.sh
index a1a6aec2e..a3e2021b9 100644
--- a/automation/script/template-pytorch/run.sh
+++ b/automation/script/template-pytorch/run.sh
@@ -4,21 +4,21 @@ CUR_DIR=${PWD}
echo ""
echo "Current execution path: ${CUR_DIR}"
-echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
+echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}"
echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}"
-echo "ENV CM_VAR1: ${CM_VAR1}"
+echo "ENV MLC_VAR1: ${MLC_VAR1}"
if [ "${PIP_REQUIREMENTS}" == "True" ]; then
- if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then
+ if test -f "${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then
echo ""
echo "Installing requirements.txt ..."
echo ""
- ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
+ ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
test $? -eq 0 || exit 1
fi
fi
echo ""
-${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py
test $? -eq 0 || exit 1
diff --git a/automation/script/template/customize.py b/automation/script/template/customize.py
index 273999d46..bd7c12dd3 100644
--- a/automation/script/template/customize.py
+++ b/automation/script/template/customize.py
@@ -12,7 +12,7 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
return {'return': 0}
diff --git a/automation/script/template/run.sh b/automation/script/template/run.sh
index 4c23c380e..32cf4d51e 100644
--- a/automation/script/template/run.sh
+++ b/automation/script/template/run.sh
@@ -1,17 +1,17 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
echo "Running: "
-echo "${CM_RUN_CMD}"
+echo "${MLC_RUN_CMD}"
echo ""
-if [[ ${CM_FAKE_RUN} != "yes" ]]; then
- eval "${CM_RUN_CMD}"
+if [[ ${MLC_FAKE_RUN} != "yes" ]]; then
+ eval "${MLC_RUN_CMD}"
test $? -eq 0 || exit 1
fi
diff --git a/automation/script/template_list_of_scripts.md b/automation/script/template_list_of_scripts.md
index 198a500f1..07fb95cb7 100644
--- a/automation/script/template_list_of_scripts.md
+++ b/automation/script/template_list_of_scripts.md
@@ -17,11 +17,11 @@ via CM command line, Python API or GUI.
CM scripts can easily chained together into automation workflows using `deps` and `tags` keys
while automatically updating all environment variables and paths
-for a given task and platform [using simple JSON or YAML](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml).
+for a given task and platform [using simple JSON or YAML](https://github.com/mlcommons/ck/blob/master/mlc-mlops/script/app-image-classification-onnx-py/_cm.yaml).
*Note that CM is a community project being developed and extended by [MLCommons members and individual contributors](../CONTRIBUTING.md) -
- you can find source code of CM scripts maintained by MLCommons [here](../cm-mlops/script).
+ you can find source code of CM scripts maintained by MLCommons [here](../mlc-mlops/script).
Please join [Discord server](https://discord.gg/JjWNWXKxwT) to participate in collaborative developments or provide your feedback.*
@@ -40,13 +40,13 @@ for a given task and platform [using simple JSON or YAML](https://github.com/mlc
# List of CM scripts by categories
-{{CM_TOC_CATEGORIES}}
+{{MLC_TOC_CATEGORIES}}
-{{CM_TOC2}}
+{{MLC_TOC2}}
# List of all sorted CM scripts
-{{CM_TOC}}
+{{MLC_TOC}}
-{{CM_MAIN}}
+{{MLC_MAIN}}
diff --git a/automation/utils.py b/automation/utils.py
index 95aa0b2e9..61cc08e28 100644
--- a/automation/utils.py
+++ b/automation/utils.py
@@ -44,7 +44,7 @@ def get_host_os_info(i={}):
info['run_bat'] = 'call ${bat_file}'
info['start_script'] = ['@echo off', '']
info['env'] = {
- "CM_WINDOWS": "yes"
+ "MLC_WINDOWS": "yes"
}
else:
if platform.system().lower().startswith('darwin'):
@@ -121,7 +121,7 @@ def download_file(i):
(chunk_size) (int): chunck size in bytes (65536 by default)
(text) (str): print text before downloaded status ("Downloaded: " by default)
(verify) (bool): verify SSL certificate if True (True by default)
- can be switched by global env CM_UTILS_DOWNLOAD_VERIFY_SSL = no
+ can be switched by global env MLC_UTILS_DOWNLOAD_VERIFY_SSL = no
Returns:
(CM return dict):
@@ -170,8 +170,8 @@ def download_file(i):
text = i.get('text', 'Downloaded: ')
- if 'CM_UTILS_DOWNLOAD_VERIFY_SSL' in os.environ:
- verify = os.environ['CM_UTILS_DOWNLOAD_VERIFY_SSL'] == 'yes'
+ if 'MLC_UTILS_DOWNLOAD_VERIFY_SSL' in os.environ:
+ verify = os.environ['MLC_UTILS_DOWNLOAD_VERIFY_SSL'] == 'yes'
else:
verify = i.get('verify', True)
diff --git a/automation/utils/README.md b/automation/utils/README.md
deleted file mode 100644
index 9a844c656..000000000
--- a/automation/utils/README.md
+++ /dev/null
@@ -1,387 +0,0 @@
-*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!*
-
-### Automation actions
-
-#### test
-
- * CM CLI: ```cm test utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15))
- * CM CLI with UID: ```cm test utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'test'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### get_host_os_info
-
- * CM CLI: ```cm get_host_os_info utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54))
- * CM CLI with UID: ```cm get_host_os_info utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'get_host_os_info'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### download_file
-
- * CM CLI: ```cm download_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156))
- * CM CLI with UID: ```cm download_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'download_file'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### unzip_file
-
- * CM CLI: ```cm unzip_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265))
- * CM CLI with UID: ```cm unzip_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'unzip_file'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### compare_versions
-
- * CM CLI: ```cm compare_versions utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343))
- * CM CLI with UID: ```cm compare_versions utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'compare_versions'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### json2yaml
-
- * CM CLI: ```cm json2yaml utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391))
- * CM CLI with UID: ```cm json2yaml utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'json2yaml'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### yaml2json
-
- * CM CLI: ```cm yaml2json utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429))
- * CM CLI with UID: ```cm yaml2json utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'yaml2json'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### sort_json
-
- * CM CLI: ```cm sort_json utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467))
- * CM CLI with UID: ```cm sort_json utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'sort_json'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### dos2unix
-
- * CM CLI: ```cm dos2unix utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504))
- * CM CLI with UID: ```cm dos2unix utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'dos2unix'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### replace_string_in_file
-
- * CM CLI: ```cm replace_string_in_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541))
- * CM CLI with UID: ```cm replace_string_in_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'replace_string_in_file'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### create_toc_from_md
-
- * CM CLI: ```cm create_toc_from_md utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591))
- * CM CLI with UID: ```cm create_toc_from_md utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'create_toc_from_md'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### copy_to_clipboard
-
- * CM CLI: ```cm copy_to_clipboard utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659))
- * CM CLI with UID: ```cm copy_to_clipboard utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'copy_to_clipboard'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### list_files_recursively
-
- * CM CLI: ```cm list_files_recursively utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737))
- * CM CLI with UID: ```cm list_files_recursively utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'list_files_recursively'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### generate_secret
-
- * CM CLI: ```cm generate_secret utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770))
- * CM CLI with UID: ```cm generate_secret utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'generate_secret'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### detect_tags_in_artifact
-
- * CM CLI: ```cm detect_tags_in_artifact utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793))
- * CM CLI with UID: ```cm detect_tags_in_artifact utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'detect_tags_in_artifact'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### prune_input
-
- * CM CLI: ```cm prune_input utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822))
- * CM CLI with UID: ```cm prune_input utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'prune_input'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### uid
-
- * CM CLI: ```cm uid utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864))
- * CM CLI with UID: ```cm uid utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'uid'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### system
-
- * CM CLI: ```cm system utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891))
- * CM CLI with UID: ```cm system utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'system'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-#### load_cfg
-
- * CM CLI: ```cm load_cfg utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969))
- * CM CLI with UID: ```cm load_cfg utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969))
- * CM Python API:
- ```python
- import cmind
-
- r=cm.access({
- 'action':'load_cfg'
- 'automation':'utils,dc2743f8450541e3'
- 'out':'con'
- ```
- [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969)
- ```python
- })
- if r['return']>0:
- print(r['error'])
- ```
-
-### Maintainers
-
-* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/automation/utils/_cm.json b/automation/utils/_cm.json
deleted file mode 100644
index f2dc9c5b6..000000000
--- a/automation/utils/_cm.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "alias": "utils",
- "automation_alias": "automation",
- "automation_uid": "bbeb15d8f0a944a4",
- "desc": "Accessing various CM utils",
- "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)",
- "sort": 800,
- "tags": [
- "automation"
- ],
- "uid": "dc2743f8450541e3"
-}
diff --git a/automation/utils/module.py b/automation/utils/module.py
deleted file mode 100644
index 2a4851b0a..000000000
--- a/automation/utils/module.py
+++ /dev/null
@@ -1,1108 +0,0 @@
-import os
-
-from cmind.automation import Automation
-from cmind import utils
-
-
-class CAutomation(Automation):
- """
- Automation actions
- """
-
- ############################################################
- def __init__(self, cmind, automation_file):
- super().__init__(cmind, __file__)
-
- ############################################################
- def test(self, i):
- """
- Test automation
-
- Args:
- (CM input dict):
-
- (out) (str): if 'con', output to console
-
- automation (str): automation as CM string object
-
- parsed_automation (list): prepared in CM CLI or CM access function
- [ (automation alias, automation UID) ] or
- [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
-
- (artifact) (str): artifact as CM string object
-
- (parsed_artifact) (list): prepared in CM CLI or CM access function
- [ (artifact alias, artifact UID) ] or
- [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
-
- ...
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * Output from this automation action
-
- """
-
- import json
- print(json.dumps(i, indent=2))
-
- return {'return': 0}
-
- ##########################################################################
- def get_host_os_info(self, i):
- """
- Get some host platform name (currently windows or linux) and OS bits
-
- Args:
- (CM input dict):
-
- (bits) (str): force host platform bits
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * info (dict):
- * platform (str): "windows", "linux" or "darwin"
- * bat_ext (str): ".bat" or ".sh"
- * bits (str): 32 or 64 bits
- * python_bits 9str): python bits
-
- """
-
- import os
- import platform
- import struct
-
- info = {}
-
- pbits = str(8 * struct.calcsize("P"))
-
- if platform.system().lower().startswith('win'):
- platform = 'windows'
- info['bat_ext'] = '.bat'
- info['set_env'] = 'set ${key}=${value}'
- info['env_separator'] = ';'
- info['env_var'] = '%env_var%'
- info['bat_rem'] = 'rem ${rem}'
- info['run_local_bat'] = 'call ${bat_file}'
- info['run_local_bat_from_python'] = 'call ${bat_file}'
- info['run_bat'] = 'call ${bat_file}'
- info['start_script'] = ['@echo off', '']
- info['env'] = {
- "CM_WINDOWS": "yes"
- }
- else:
- if platform.system().lower().startswith('darwin'):
- platform = 'darwin'
- else:
- platform = 'linux'
-
- info['bat_ext'] = '.sh'
- info['set_env'] = 'export ${key}="${value}"'
- info['env_separator'] = ':'
- info['env_var'] = '${env_var}'
- info['set_exec_file'] = 'chmod 755 "${file_name}"'
- info['bat_rem'] = '# ${rem}'
- info['run_local_bat'] = '. ./${bat_file}'
- info['run_local_bat_from_python'] = 'bash -c ". ./${bat_file}"'
- info['run_bat'] = '. ${bat_file}'
- info['start_script'] = ['#!/bin/bash', '']
- info['env'] = {}
-
- info['platform'] = platform
-
- obits = i.get('bits', '')
- if obits == '':
- obits = '32'
- if platform == 'windows':
- # Trying to get fast way to detect bits
- if os.environ.get('ProgramW6432', '') != '' or os.environ.get(
- 'ProgramFiles(x86)', '') != '': # pragma: no cover
- obits = '64'
- else:
- # On Linux use first getconf LONG_BIT and if doesn't work use
- # python bits
-
- obits = pbits
-
- r = utils.gen_tmp_file({})
- if r['return'] > 0:
- return r
-
- fn = r['file_name']
-
- cmd = 'getconf LONG_BIT > ' + fn
- rx = os.system(cmd)
-
- if rx == 0:
- r = utils.load_txt(file_name=fn, remove_after_read=True)
-
- if r['return'] == 0:
- s = r['string'].strip()
- if len(s) > 0 and len(s) < 4:
- obits = s
- else:
- if os.path.isfile(fn):
- os.remove(fn)
-
- info['bits'] = obits
- info['python_bits'] = pbits
-
- return {'return': 0, 'info': info}
-
- ##########################################################################
- def download_file(self, i):
- """
- Download file using requests
-
- Args:
- (CM input dict):
-
- url (str): URL with file
- (filename) (str): explicit file name
- (path) (str): path to record file (or current if empty)
- (chunk_size) (int): chunck size in bytes (65536 by default)
- (text) (str): print text before downloaded status ("Downloaded: " by default)
- (verify) (bool): verify SSL certificate if True (True by default)
- can be switched by global env CM_UTILS_DOWNLOAD_VERIFY_SSL = no
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * filename (str): file name
- * path (str): path to file
- * size (int): file size
-
- """
-
- import requests
- import time
- import sys
- from urllib import parse
-
- # Get URL
- url = i['url']
-
- # Check file name
- file_name = i.get('filename', '')
- if file_name == '':
- parsed_url = parse.urlparse(url)
- file_name = os.path.basename(parsed_url.path)
-
- # Check path
- path = i.get('path', '')
- if path is None or path == '':
- path = os.getcwd()
-
- # Output file
- path_to_file = os.path.join(path, file_name)
-
- if os.path.isfile(path_to_file):
- os.remove(path_to_file)
-
- print('Downloading to {}'.format(path_to_file))
- print('')
-
- # Download
- size = -1
- downloaded = 0
- chunk_size = i.get('chunk_size', 65536)
-
- text = i.get('text', 'Downloaded: ')
-
- if 'CM_UTILS_DOWNLOAD_VERIFY_SSL' in os.environ:
- verify = os.environ['CM_UTILS_DOWNLOAD_VERIFY_SSL'] == 'yes'
- else:
- verify = i.get('verify', True)
-
- try:
- with requests.get(url, stream=True, allow_redirects=True, verify=verify) as download:
- download.raise_for_status()
-
- size_string = download.headers.get('Content-Length')
-
- if size_string is None:
- transfer_encoding = download.headers.get(
- 'Transfer-Encoding', '')
- if transfer_encoding != 'chunked':
- return {'return': 1, 'error': 'did not receive file'}
- else:
- size_string = "0"
-
- size = int(size_string)
-
- with open(path_to_file, 'wb') as output:
- for chunk in download.iter_content(chunk_size=chunk_size):
-
- if chunk:
- output.write(chunk)
- if size == 0:
- continue
- downloaded += 1
- percent = downloaded * chunk_size * 100 / size
-
- sys.stdout.write("\r{}{:3.0f}%".format(text, percent))
- sys.stdout.flush()
-
- sys.stdout.write("\r{}{:3.0f}%".format(text, 100))
- sys.stdout.flush()
-
- except Exception as e:
- return {'return': 1, 'error': format(e)}
-
- print('')
- if size == 0:
- file_stats = os.stat(path_to_file)
- size = file_stats.st_size
-
- return {'return': 0, 'filename': file_name,
- 'path': path_to_file, 'size': size}
-
- ##########################################################################
- def unzip_file(self, i):
- """
- Unzip file
-
- Args:
- (CM input dict):
-
- filename (str): explicit file name
- (path) (str): path where to unzip file (current path otherwise)
- (strip_folders) (int): strip first folders
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- """
-
- import zipfile
-
- # Check file name
- file_name = i['filename']
-
- if not os.path.isfile(file_name):
- return {'return': 1,
- 'error': 'file {} not found'.format(file_name)}
-
- console = i.get('out') == 'con'
-
- # Attempt to read cmr.json
- file_name_handle = open(file_name, 'rb')
- file_name_zip = zipfile.ZipFile(file_name_handle)
-
- info_files = file_name_zip.infolist()
-
- path = i.get('path', '')
- if path is None or path == '':
- path = os.getcwd()
-
- strip_folders = i.get('strip_folders', 0)
-
- # Unpacking zip
- for info in info_files:
- f = info.filename
- permissions = info.external_attr
-
- if not f.startswith('..') and not f.startswith(
- '/') and not f.startswith('\\'):
- f_zip = f
-
- if strip_folders > 0:
- fsplit = f.split('/') # Zip standard on all OS
- f = '/'.join(fsplit[strip_folders:])
-
- file_path = os.path.join(path, f)
-
- if f.endswith('/'):
- # create directory
- if not os.path.exists(file_path):
- os.makedirs(file_path)
- else:
- dir_name = os.path.dirname(file_path)
- if not os.path.exists(dir_name):
- os.makedirs(dir_name)
-
- # extract file
- file_out = open(file_path, 'wb')
- file_out.write(file_name_zip.read(f_zip))
- file_out.close()
-
- if permissions > 0xffff:
- os.chmod(file_path, permissions >> 16)
-
- file_name_zip.close()
- file_name_handle.close()
-
- return {'return': 0}
-
- ##########################################################################
- def compare_versions(self, i):
- """
- Compare versions
-
- Args:
-
- version1 (str): version 1
- version2 (str): version 2
-
- Returns:
- (CM return dict):
-
- * comparison (int): 1 - version 1 > version 2
- 0 - version 1 == version 2
- -1 - version 1 < version 2
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
- """
-
- version1 = i['version1']
- version2 = i['version2']
-
- l_version1 = version1.split('.')
- l_version2 = version2.split('.')
-
- # 3.9.6 vs 3.9
- # 3.9 vs 3.9.6
-
- i_version1 = [int(v) if v.isdigit() else v for v in l_version1]
- i_version2 = [int(v) if v.isdigit() else v for v in l_version2]
-
- comparison = 0
-
- for index in range(max(len(i_version1), len(i_version2))):
- v1 = i_version1[index] if index < len(i_version1) else 0
- v2 = i_version2[index] if index < len(i_version2) else 0
-
- if v1 > v2:
- comparison = 1
- break
- elif v1 < v2:
- comparison = -1
- break
-
- return {'return': 0, 'comparison': comparison}
-
- ##########################################################################
- def json2yaml(self, i):
- """
- Convert JSON file to YAML
-
- Args:
-
- input (str): input file (.json)
- (output) (str): output file (.yaml)
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
- """
-
- input_file = i.get('input', '')
-
- if input_file == '':
- return {'return': 1, 'error': 'please specify --input={json file}'}
-
- output_file = i.get('output', '')
-
- r = utils.load_json(input_file, check_if_exists=True)
- if r['return'] > 0:
- return r
-
- meta = r['meta']
-
- if output_file == '':
- output_file = input_file[:-
- 5] if input_file.endswith('.json') else input_file
- output_file += '.yaml'
-
- r = utils.save_yaml(output_file, meta)
- if r['return'] > 0:
- return r
-
- return {'return': 0}
-
- ##########################################################################
- def yaml2json(self, i):
- """
- Convert YAML file to JSON
-
- Args:
-
- input (str): input file (.yaml)
- (output) (str): output file (.json)
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
- """
-
- input_file = i.get('input', '')
-
- if input_file == '':
- return {'return': 1, 'error': 'please specify --input={yaml file}'}
-
- output_file = i.get('output', '')
-
- r = utils.load_yaml(input_file, check_if_exists=True)
- if r['return'] > 0:
- return r
-
- meta = r['meta']
-
- if output_file == '':
- output_file = input_file[:-
- 5] if input_file.endswith('.yaml') else input_file
- output_file += '.json'
-
- r = utils.save_json(output_file, meta)
- if r['return'] > 0:
- return r
-
- return {'return': 0}
-
- ##########################################################################
- def sort_json(self, i):
- """
- Sort JSON file
-
- Args:
-
- input (str): input file (.json)
- (output) (str): output file
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
- """
-
- input_file = i.get('input', '')
-
- if input_file == '':
- return {'return': 1, 'error': 'please specify --input={json file}'}
-
- r = utils.load_json(input_file, check_if_exists=True)
- if r['return'] > 0:
- return r
-
- meta = r['meta']
-
- output_file = i.get('output', '')
-
- if output_file == '':
- output_file = input_file
-
- r = utils.save_json(output_file, meta, sort_keys=True)
- if r['return'] > 0:
- return r
-
- return {'return': 0}
-
- ##########################################################################
- def dos2unix(self, i):
- """
- Convert DOS file to UNIX (remove \r)
-
- Args:
-
- input (str): input file (.txt)
- (output) (str): output file
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
- """
-
- input_file = i.get('input', '')
-
- if input_file == '':
- return {'return': 1, 'error': 'please specify --input={txt file}'}
-
- r = utils.load_txt(input_file, check_if_exists=True)
- if r['return'] > 0:
- return r
-
- s = r['string'].replace('\r', '')
-
- output_file = i.get('output', '')
-
- if output_file == '':
- output_file = input_file
-
- r = utils.save_txt(output_file, s)
- if r['return'] > 0:
- return r
-
- return {'return': 0}
-
- ##########################################################################
- def replace_string_in_file(self, i):
- """
- Convert DOS file to UNIX (remove \r)
-
- Args:
-
- input (str): input file (.txt)
- (output) (str): output file
- string (str): string to replace
- replacement (str): replacement string
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- (update) (bool): True if file was upated
- """
-
- input_file = i.get('input', '')
- if input_file == '':
- return {'return': 1, 'error': 'please specify --input={txt file}'}
-
- string = i.get('string', '')
- if string == '':
- return {'return': 1,
- 'error': 'please specify --string={string to replace}'}
-
- replacement = i.get('replacement', '')
- if replacement == '':
- return {'return': 1,
- 'error': 'please specify --replacement={string to replace}'}
-
- output_file = i.get('output', '')
-
- if output_file == '':
- output_file = input_file
-
- r = utils.load_txt(input_file, check_if_exists=True)
- if r['return'] > 0:
- return r
-
- s = r['string'].replace('\r', '')
-
- s = s.replace(string, replacement)
-
- r = utils.save_txt(output_file, s)
- if r['return'] > 0:
- return r
-
- return {'return': 0}
-
- ##########################################################################
- def create_toc_from_md(self, i):
- """
- Convert DOS file to UNIX (remove \r)
-
- Args:
-
- input (str): input file (.md)
- (output) (str): output file (input+'.toc)
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
- """
-
- input_file = i.get('input', '')
- if input_file == '':
- return {'return': 1, 'error': 'please specify --input={txt file}'}
-
- output_file = i.get('output', '')
-
- if output_file == '':
- output_file = input_file + '.toc'
-
- r = utils.load_txt(input_file, check_if_exists=True)
- if r['return'] > 0:
- return r
-
- lines = r['string'].split('\n')
-
- toc = []
-
- toc.append('')
- toc.append('Click here to see the table of contents.
')
- toc.append('')
-
- for line in lines:
- line = line.strip()
-
- if line.startswith('#'):
- j = line.find(' ')
- if j >= 0:
- title = line[j:].strip()
-
- x = title.lower().replace(' ', '-')
-
- for k in range(0, 2):
- if x.startswith('*'):
- x = x[1:]
- if x.endswith('*'):
- x = x[:-1]
-
- for z in [':', '+', '.', '(', ')', ',']:
- x = x.replace(z, '')
-
- y = ' ' * (2 * (j - 1)) + '* [' + title + '](#' + x + ')'
-
- toc.append(y)
-
- toc.append('')
- toc.append(' ')
-
- r = utils.save_txt(output_file, '\n'.join(toc) + '\n')
- if r['return'] > 0:
- return r
-
- return {'return': 0}
-
- ##########################################################################
- def copy_to_clipboard(self, i):
- """
- Copy string to a clipboard
-
- Args:
-
- string (str): string to copy to a clipboard
- (add_quotes) (bool): add quotes to the string in a clipboard
- (skip_fail) (bool): if True, do not fail
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
- """
-
- s = i.get('string', '')
-
- if i.get('add_quotes', False):
- s = '"' + s + '"'
-
- failed = False
- warning = ''
-
- # Try to load pyperclip (seems to work fine on Windows)
- try:
- import pyperclip
- except Exception as e:
- warning = format(e)
- failed = True
- pass
-
- if not failed:
- pyperclip.copy(s)
- else:
- failed = False
-
- # Try to load Tkinter
- try:
- from Tkinter import Tk
- except ImportError as e:
- warning = format(e)
- failed = True
- pass
-
- if failed:
- failed = False
- try:
- from tkinter import Tk
- except ImportError as e:
- warning = format(e)
- failed = True
- pass
-
- if not failed:
- # Copy to clipboard
- try:
- r = Tk()
- r.withdraw()
- r.clipboard_clear()
- r.clipboard_append(s)
- r.update()
- r.destroy()
- except Exception as e:
- failed = True
- warning = format(e)
-
- rr = {'return': 0}
-
- if failed:
- if not i.get('skip_fail', False):
- return {'return': 1, 'error': warning}
-
- rr['warning'] = warning
-
- return rr
-
- ##########################################################################
- def list_files_recursively(self, i):
- """
- List files and concatenate into string separate by comma
-
- Args:
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
- """
-
- files = os.walk('.')
-
- s = ''
-
- for (dir_path, dir_names, file_names) in files:
- for f in file_names:
- if s != '':
- s += ','
-
- if dir_path == '.':
- dir_path2 = ''
- else:
- dir_path2 = dir_path[2:].replace('\\', '/') + '/'
-
- s += dir_path2 + f
-
- print(s)
-
- return {'return': 0}
-
- ##########################################################################
- def generate_secret(self, i):
- """
- Generate secret for web apps
-
- Args:
-
- Returns:
- (CM return dict):
-
- secret (str): secret
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
- """
-
- import secrets
- s = secrets.token_urlsafe(16)
-
- print(s)
-
- return {'return': 0, 'secret': s}
-
- ##########################################################################
- def detect_tags_in_artifact(self, i):
- """
- Detect if there are tags in an artifact name (spaces) and update input
-
- Args:
-
- input (dict) : original input
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
- """
-
- inp = i['input']
-
- artifact = inp.get('artifact', '')
- if artifact == '.':
- del (inp['artifact'])
- elif ' ' in artifact: # or ',' in artifact:
- del (inp['artifact'])
- if 'parsed_artifact' in inp:
- del (inp['parsed_artifact'])
- # Force substitute tags
- inp['tags'] = artifact.replace(' ', ',')
-
- return {'return': 0}
-
- ##########################################################################
- def prune_input(self, i):
- """
- Leave only input keys and remove the rest (to regenerate CM commands)
-
- Args:
-
- input (dict) : original input
- (extra_keys_starts_with) (list): remove keys that starts
- with the ones from this list
-
- Returns:
- (CM return dict):
-
- new_input (dict): pruned input
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
- """
-
- import copy
-
- inp = i['input']
- extra_keys = i.get('extra_keys_starts_with', [])
-
- i_run_cmd_arc = copy.deepcopy(inp)
- for k in inp:
- remove = False
- if k in ['action', 'automation', 'cmd', 'out',
- 'parsed_automation', 'parsed_artifact', 'self_module']:
- remove = True
- if not remove:
- for ek in extra_keys:
- if k.startswith(ek):
- remove = True
- break
-
- if remove:
- del (i_run_cmd_arc[k])
-
- return {'return': 0, 'new_input': i_run_cmd_arc}
-
- ##########################################################################
-
- def uid(self, i):
- """
- Generate CM UID.
-
- Args:
- (CM input dict): empty dict
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * uid (str): CM UID
- """
-
- console = i.get('out') == 'con'
-
- r = utils.gen_uid()
-
- if console:
- print(r['uid'])
-
- return r
-
- ##########################################################################
-
- def system(self, i):
- """
- Run system command and redirect output to string.
-
- Args:
- (CM input dict):
-
- * cmd (str): command line
- * (path) (str): go to this directory and return back to current
- * (stdout) (str): stdout file
- * (stderr) (str): stderr file
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- * ret (int): return code
- * std (str): stdout + stderr
- * stdout (str): stdout
- * stderr (str): stderr
- """
-
- cmd = i['cmd']
-
- if cmd == '':
- return {'return': 1, 'error': 'cmd is empty'}
-
- path = i.get('path', '')
- if path != '' and os.path.isdir(path):
- cur_dir = os.getcwd()
- os.chdir(path)
-
- if i.get('stdout', '') != '':
- fn1 = i['stdout']
- fn1_delete = False
- else:
- r = utils.gen_tmp_file({})
- if r['return'] > 0:
- return r
- fn1 = r['file_name']
- fn1_delete = True
-
- if i.get('stderr', '') != '':
- fn2 = i['stderr']
- fn2_delete = False
- else:
- r = utils.gen_tmp_file({})
- if r['return'] > 0:
- return r
- fn2 = r['file_name']
- fn2_delete = True
-
- cmd += ' > ' + fn1 + ' 2> ' + fn2
- rx = os.system(cmd)
-
- std = ''
- stdout = ''
- stderr = ''
-
- if os.path.isfile(fn1):
- r = utils.load_txt(file_name=fn1, remove_after_read=fn1_delete)
- if r['return'] == 0:
- stdout = r['string'].strip()
-
- if os.path.isfile(fn2):
- r = utils.load_txt(file_name=fn2, remove_after_read=fn2_delete)
- if r['return'] == 0:
- stderr = r['string'].strip()
-
- std = stdout
- if stderr != '':
- if std != '':
- std += '\n'
- std += stderr
-
- if path != '' and os.path.isdir(path):
- os.chdir(cur_dir)
-
- return {'return': 0, 'ret': rx, 'stdout': stdout,
- 'stderr': stderr, 'std': std}
-
- ############################################################
- def load_cfg(self, i):
- """
- Load configuration artifacts and files
-
- Args:
- (CM input dict):
-
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- """
-
- return utils.call_internal_module(
- self, __file__, 'module_cfg', 'load_cfg', i)
-
- ############################################################
- def select_cfg(self, i):
- """
- Select cfg interactively
-
- Args:
- (CM input dict):
- tags (str): list of tags to find cfg
- alias (str): alias of a cfg file
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- """
-
- i['self_module'] = self
-
- return utils.call_internal_module(
- self, __file__, 'module_cfg', 'select_cfg', i)
-
- ############################################################
- def print_yaml(self, i):
- """
- Print YAML file
-
- Args:
- (CM input dict):
- file (str): input file
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- """
-
- filename = i.get('file', '')
- if filename == '':
- return {'return': 1, 'error': 'please specify --file={YAML file}'}
-
- r = utils.load_yaml(filename, check_if_exists=True)
- if r['return'] > 0:
- return r
-
- meta = r['meta']
-
- import json
- print(json.dumps(meta, indent=2))
-
- return {'return': 0}
-
- ############################################################
- def print_json(self, i):
- """
- Print YAML file
-
- Args:
- (CM input dict):
- file (str): input file
-
- Returns:
- (CM return dict):
-
- * return (int): return code == 0 if no error and >0 if error
- * (error) (str): error string if return>0
-
- """
-
- filename = i.get('file', '')
- if filename == '':
- return {'return': 1, 'error': 'please specify --file={JSON file}'}
-
- r = utils.load_json(filename, check_if_exists=True)
- if r['return'] > 0:
- return r
-
- meta = r['meta']
-
- import json
- print(json.dumps(meta, indent=2))
-
- return {'return': 0}
diff --git a/automation/utils/module_cfg.py b/automation/utils/module_cfg.py
deleted file mode 100644
index 36ec30915..000000000
--- a/automation/utils/module_cfg.py
+++ /dev/null
@@ -1,339 +0,0 @@
-import os
-import cmind
-import copy
-
-base_path = {}
-base_path_meta = {}
-
-##########################################################################
-
-
-def load_cfg(i):
-
- tags = i.get('tags', '')
- artifact = i.get('artifact', '')
-
- key = i.get('key', '')
- key_end = i.get('key_end', [])
-
- ii = {'action': 'find',
- 'automation': 'cfg'}
- if artifact != '':
- ii['artifact'] = artifact
- elif tags != '':
- ii['tags'] = tags
-
- r = cmind.access(ii)
- if r['return'] > 0:
- return r
-
- lst = r['list']
-
- prune = i.get('prune', {})
- prune_key = prune.get('key', '')
- prune_key_uid = prune.get('key_uid', '')
- prune_meta_key = prune.get('meta_key', '')
- prune_meta_key_uid = prune.get('meta_key_uid', '')
- prune_uid = prune.get('uid', '')
- prune_list = prune.get('list', [])
-
- # Checking individual files inside CM entry
- selection = []
-
- if i.get('skip_files', False):
- for l in lst:
- meta = l.meta
- full_path = l.path
-
- meta['full_path'] = full_path
-
- add = True
-
- if prune_key != '' and prune_key_uid != '':
- if prune_key_uid not in meta.get(prune_key, []):
- add = False
-
- if add:
- selection.append(meta)
- else:
- for l in lst:
- path = l.path
-
- main_meta = l.meta
-
- skip = False
-
- if prune_meta_key != '' and prune_meta_key_uid != '':
- if prune_meta_key_uid not in main_meta.get(prune_meta_key, []):
- skip = True
-
- if skip:
- continue
-
- all_tags = main_meta.get('tags', [])
-
- files = os.listdir(path)
-
- for f in files:
- if key != '' and not f.startswith(key):
- continue
-
- if f.startswith('_') or (not f.endswith(
- '.json') and not f.endswith('.yaml')):
- continue
-
- if len(key_end) > 0:
- skip = True
- for ke in key_end:
- if f.endswith(ke):
- skip = False
- break
- if skip:
- continue
-
- full_path = os.path.join(path, f)
-
- full_path_without_ext = full_path[:-5]
-
- r = cmind.utils.load_yaml_and_json(full_path_without_ext)
- if r['return'] > 0:
- print('Warning: problem loading file {}'.format(full_path))
- else:
- meta = r['meta']
-
- # Check base
- r = process_base(meta, full_path)
- if r['return'] > 0:
- return r
- meta = r['meta']
-
- uid = meta['uid']
-
- # Check pruning
- add = True
-
- if len(prune) > 0:
- if prune_uid != '' and uid != prune_uid:
- add = False
-
- if add and len(
- prune_list) > 0 and uid not in prune_list:
- add = False
-
- if add and prune_key != '' and prune_key_uid != '' and prune_key_uid != meta.get(
- prune_key, None):
- add = False
-
- if add:
- meta['full_path'] = full_path
-
- add_all_tags = copy.deepcopy(all_tags)
-
- name = meta.get('name', '')
- if name == '':
- name = ' '.join(meta.get('tags', []))
- name = name.strip()
- meta['name'] = name
-
- file_tags = meta.get('tags', '').strip()
- if file_tags == '':
- if name != '':
- add_all_tags += [v.lower()
- for v in name.split(' ')]
- else:
- add_all_tags += file_tags.split(',')
-
- meta['all_tags'] = add_all_tags
-
- meta['main_meta'] = main_meta
-
- selection.append(meta)
-
- return {'return': 0, 'lst': lst, 'selection': selection}
-
-##########################################################################
-
-
-def process_base(meta, full_path):
-
- global base_path, base_path_meta
-
- _base = meta.get('_base', '')
- if _base != '':
- name = ''
-
- filename = _base
- full_path_base = os.path.dirname(full_path)
-
- if not filename.endswith('.yaml') and not filename.endswith('.json'):
- return {'return': 1, 'error': '_base file {} in {} must be .yaml or .json'.format(
- filename, full_path)}
-
- if ':' in _base:
- x = _base.split(':')
- name = x[0]
-
- full_path_base = base_path.get(name, '')
- if full_path_base == '':
-
- # Find artifact
- r = cmind.access({'action': 'find',
- 'automation': 'cfg',
- 'artifact': name})
- if r['return'] > 0:
- return r
-
- lst = r['list']
-
- if len(lst) == 0:
- if not os.path.isfile(path):
- return {'return': 1, 'error': '_base artifact {} not found in {}'.format(
- name, full_path)}
-
- full_path_base = lst[0].path
-
- base_path[name] = full_path_base
-
- filename = x[1]
-
- # Load base
- path = os.path.join(full_path_base, filename)
-
- if not os.path.isfile(path):
- return {'return': 1, 'error': '_base file {} not found in {}'.format(
- filename, full_path)}
-
- if path in base_path_meta:
- base = copy.deepcopy(base_path_meta[path])
- else:
- path_without_ext = path[:-5]
-
- r = cmind.utils.load_yaml_and_json(path_without_ext)
- if r['return'] > 0:
- return r
-
- base = r['meta']
-
- base_path_meta[path] = copy.deepcopy(base)
-
- for k in meta:
- v = meta[k]
-
- if k not in base:
- base[k] = v
- else:
- if isinstance(v, str):
- # Only merge a few special keys and overwrite the rest
- if k in ['tags', 'name']:
- base[k] += meta[k]
- else:
- base[k] = meta[k]
-
-
-elif isinstance(v, elif) for vv in v:
- base[k].append(vv)
-elif isinstance(v, elif ) base[k].merge(v)
-
- meta = base
-
- return {'return': 0, 'meta':meta}
-
-##########################################################################
-
-def select_cfg(i):
-
- self_module = i['self_module']
- tags = i['tags']
- alias = i.get('alias', '')
- uid = i.get('uid', '')
- title = i.get('title', '')
-
- # Check if alias is not provided
- r = self_module.cmind.access({'action': 'find', 'automation':'cfg', 'tags':'basic,docker,configurations'})
- if r['return'] > 0:
- return r
-
- lst = r['list']
-
- selector = []
-
- # Do coarse-grain search for CM artifacts
- for l in lst:
- p = l.path
-
- if alias != '':
- for ext in ['.json', '.yaml']:
- p1 = os.path.join(p, alias +ext)
- if os.path.isfile(p1):
- selector.append({'path': p1, 'alias':alias})
- break
-
- else:
- files = os.listdir(p)
-
- for f in files:
- if not f.startswith('_cm') and (
- f.endswith('.json') or f.endswith('.yaml')):
- selector.append({'path': os.path.join(p, f), 'alias':f[:-5]})
-
- # Load meta for name and UID
- selector_with_meta = []
- for s in range(0, len(selector)):
- ss = selector[s]
-
- path = ss['path']
-
- full_path_without_ext = path[:-5]
-
- r = cmind.utils.load_yaml_and_json(full_path_without_ext)
- if r['return'] >0:
- print('Warning: problem loading configuration file {}'.format(path))
-
- meta = r['meta']
-
- if uid == '' or meta.get('uid', '') == uid:
- ss['meta'] = meta
- selector_with_meta.append(ss)
-
- # Quit if no configurations found
- if len(selector_with_meta) == 0:
- return {'return': 16, 'error':'configuration was not found'}
-
- select = 0
- if len(selector_with_meta) > 1:
- xtitle = ' ' + title if title != '' else ''
- print('')
- print('Available{} configurations:'.format(xtitle))
-
- print('')
-
- selector_with_meta = sorted(selector_with_meta, key = lambda x: x['meta'].get('name', ''))
- s = 0
- for ss in selector_with_meta:
- alias = ss['alias']
- uid = ss['meta'].get('uid', '')
- name = ss['meta'].get('name', '')
-
- x = name
- if x!='':
- x+=' '
- x += '(' + uid + ')'
-
- print(f'{s}) {x}'.format(s, x))
-
- s += 1
-
- print('')
- select = input('Enter configuration number of press Enter for 0: ')
-
- if select.strip() == '':
- select = '0'
-
- select = int(select)
-
- if select <0 or select>=len(selector):
- return {'return': 1, 'error':'selection is out of range'}
-
- ss = selector_with_meta[select]
-
- return {'return': 0, 'selection':ss}
diff --git a/script/activate-python-venv/customize.py b/script/activate-python-venv/customize.py
index 1d0e96c3c..2740bb9d8 100644
--- a/script/activate-python-venv/customize.py
+++ b/script/activate-python-venv/customize.py
@@ -12,9 +12,9 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- name = env.get('CM_NAME', '')
+ name = env.get('MLC_NAME', '')
if name != '':
name = name.strip().lower()
diff --git a/script/activate-python-venv/run.bat b/script/activate-python-venv/run.bat
index 5ca2ac0ed..76b2bfe18 100644
--- a/script/activate-python-venv/run.bat
+++ b/script/activate-python-venv/run.bat
@@ -1,7 +1,7 @@
echo.
-echo call "%CM_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd"
+echo call "%MLC_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd"
echo.
echo Enter exit to exit virtual env.
echo.
-call %CM_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd
+call %MLC_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd
diff --git a/script/activate-python-venv/run.sh b/script/activate-python-venv/run.sh
index 6569b07e5..0753ad888 100644
--- a/script/activate-python-venv/run.sh
+++ b/script/activate-python-venv/run.sh
@@ -1,9 +1,9 @@
#!/bin/bash
echo ""
-echo " bash --init-file ${CM_VIRTUAL_ENV_SCRIPTS_PATH}/activate"
+echo " bash --init-file ${MLC_VIRTUAL_ENV_SCRIPTS_PATH}/activate"
echo ""
echo " Enter exit to exit virtual env."
echo ""
-bash --init-file ${CM_VIRTUAL_ENV_SCRIPTS_PATH}/activate
+bash --init-file ${MLC_VIRTUAL_ENV_SCRIPTS_PATH}/activate
diff --git a/script/add-custom-nvidia-system/customize.py b/script/add-custom-nvidia-system/customize.py
index 714ce821d..b711c5664 100644
--- a/script/add-custom-nvidia-system/customize.py
+++ b/script/add-custom-nvidia-system/customize.py
@@ -19,6 +19,6 @@ def postprocess(i):
env = i['env']
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH']
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH']
return {'return': 0}
diff --git a/script/add-custom-nvidia-system/run.sh b/script/add-custom-nvidia-system/run.sh
index b89617f7f..8ce1d0b64 100644
--- a/script/add-custom-nvidia-system/run.sh
+++ b/script/add-custom-nvidia-system/run.sh
@@ -1,5 +1,5 @@
#!/bin/bash
CUR=$PWD
-cd ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH}
-${CM_PYTHON_BIN_WITH_PATH} scripts/custom_systems/add_custom_system.py
+cd ${MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH}
+${MLC_PYTHON_BIN_WITH_PATH} scripts/custom_systems/add_custom_system.py
test $? -eq 0 || exit $?
diff --git a/script/app-image-classification-onnx-py/customize.py b/script/app-image-classification-onnx-py/customize.py
index e3371f217..338986722 100644
--- a/script/app-image-classification-onnx-py/customize.py
+++ b/script/app-image-classification-onnx-py/customize.py
@@ -29,10 +29,10 @@ def postprocess(i):
data = state.get('cm_app_image_classification_onnx_py', {})
- fjson = 'cm-image-classification-onnx-py.json'
- fyaml = 'cm-image-classification-onnx-py.yaml'
+ fjson = 'mlc-image-classification-onnx-py.json'
+ fyaml = 'mlc-image-classification-onnx-py.yaml'
- output = env.get('CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT', '')
+ output = env.get('MLC_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT', '')
if output != '':
if not os.path.exists(output):
os.makedirs(output)
@@ -56,7 +56,7 @@ def postprocess(i):
top_classification = data.get('top_classification', '')
- if env.get('CM_TMP_SILENT', '') != 'yes':
+ if env.get('MLC_TMP_SILENT', '') != 'yes':
if top_classification != '':
print('')
x = 'Top classification: {}'.format(top_classification)
diff --git a/script/app-image-classification-onnx-py/meta.yaml b/script/app-image-classification-onnx-py/meta.yaml
index e53b91ec2..82a559f8f 100644
--- a/script/app-image-classification-onnx-py/meta.yaml
+++ b/script/app-image-classification-onnx-py/meta.yaml
@@ -16,8 +16,8 @@ tags:
tags_help: "modular python app image-classification onnx"
default_env:
- CM_BATCH_COUNT: '1'
- CM_BATCH_SIZE: '1'
+ MLC_BATCH_COUNT: '1'
+ MLC_BATCH_SIZE: '1'
deps:
@@ -81,12 +81,12 @@ variations:
USE_CPU: yes
input_mapping:
- input: CM_IMAGE
- output: CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT
+ input: MLC_IMAGE
+ output: MLC_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT
new_env_keys:
- - CM_APP_IMAGE_CLASSIFICATION_ONNX_PY*
+ - MLC_APP_IMAGE_CLASSIFICATION_ONNX_PY*
new_state_keys:
@@ -107,13 +107,13 @@ docker:
skip_cm_sys_upgrade: 'yes'
cm_repo_flags: '--branch=dev'
use_host_group_id: 'yes'
- image_tag_extra: '-cm-dev'
+ image_tag_extra: '-mlc-dev'
input_paths:
- input
- - env.CM_IMAGE
+ - env.MLC_IMAGE
- output
skip_input_for_fake_run:
- input
- - env.CM_IMAGE
+ - env.MLC_IMAGE
- output
- j
diff --git a/script/app-image-classification-onnx-py/run.bat b/script/app-image-classification-onnx-py/run.bat
index ee7db9867..c3aa5611a 100644
--- a/script/app-image-classification-onnx-py/run.bat
+++ b/script/app-image-classification-onnx-py/run.bat
@@ -1,29 +1,29 @@
-rem echo %CM_PYTHON_BIN%
-rem echo %CM_DATASET_PATH%
-rem echo %CM_DATASET_AUX_PATH%
-rem echo %CM_ML_MODEL_FILE_WITH_PATH%
+rem echo %MLC_PYTHON_BIN%
+rem echo %MLC_DATASET_PATH%
+rem echo %MLC_DATASET_AUX_PATH%
+rem echo %MLC_ML_MODEL_FILE_WITH_PATH%
rem connect CM intelligent components with CK env
-set CK_ENV_ONNX_MODEL_ONNX_FILEPATH=%CM_ML_MODEL_FILE_WITH_PATH%
+set CK_ENV_ONNX_MODEL_ONNX_FILEPATH=%MLC_ML_MODEL_FILE_WITH_PATH%
set CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME=input_tensor:0
set CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME=softmax_tensor:0
-set CK_ENV_DATASET_IMAGENET_VAL=%CM_DATASET_PATH%
-set CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%CM_DATASET_AUX_PATH%\synset_words.txt
+set CK_ENV_DATASET_IMAGENET_VAL=%MLC_DATASET_PATH%
+set CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%MLC_DATASET_AUX_PATH%\synset_words.txt
set ML_MODEL_DATA_LAYOUT=NCHW
-set CK_BATCH_SIZE=%CM_BATCH_SIZE%
-set CK_BATCH_COUNT=%CM_BATCH_COUNT%
+set CK_BATCH_SIZE=%MLC_BATCH_SIZE%
+set CK_BATCH_COUNT=%MLC_BATCH_COUNT%
-IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD%
+IF NOT DEFINED MLC_TMP_CURRENT_SCRIPT_PATH SET MLC_TMP_CURRENT_SCRIPT_PATH=%CD%
-IF DEFINED CM_INPUT SET CM_IMAGE=%CM_INPUT%
+IF DEFINED MLC_INPUT SET MLC_IMAGE=%MLC_INPUT%
echo.
-%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
+%MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
echo.
-%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\onnx_classify.py
+%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\src\onnx_classify.py
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
rem Just a demo to pass environment variables from native scripts back to CM workflows
-echo CM_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess > tmp-run-env.out
+echo MLC_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess > tmp-run-env.out
diff --git a/script/app-image-classification-onnx-py/run.sh b/script/app-image-classification-onnx-py/run.sh
index 62b07e1f1..4325faf5a 100644
--- a/script/app-image-classification-onnx-py/run.sh
+++ b/script/app-image-classification-onnx-py/run.sh
@@ -1,37 +1,37 @@
#!/bin/bash
-if [[ ${CM_RUN_DOCKER_CONTAINER} == "yes" ]]; then
+if [[ ${MLC_RUN_DOCKER_CONTAINER} == "yes" ]]; then
exit 0
fi
-#echo ${CM_PYTHON_BIN}
-#echo ${CM_DATASET_PATH}
-#echo ${CM_DATASET_AUX_PATH}
-#echo ${CM_ML_MODEL_FILE_WITH_PATH}
-CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3}
-CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+#echo ${MLC_PYTHON_BIN}
+#echo ${MLC_DATASET_PATH}
+#echo ${MLC_DATASET_AUX_PATH}
+#echo ${MLC_ML_MODEL_FILE_WITH_PATH}
+MLC_PYTHON_BIN=${MLC_PYTHON_BIN_WITH_PATH:-python3}
+MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD}
# connect CM intelligent components with CK env
-export CK_ENV_ONNX_MODEL_ONNX_FILEPATH=${CM_ML_MODEL_FILE_WITH_PATH}
+export CK_ENV_ONNX_MODEL_ONNX_FILEPATH=${MLC_ML_MODEL_FILE_WITH_PATH}
export CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME="input_tensor:0"
export CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME="softmax_tensor:0"
-export CK_ENV_DATASET_IMAGENET_VAL=${CM_DATASET_PATH}
-export CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${CM_DATASET_AUX_PATH}/synset_words.txt
+export CK_ENV_DATASET_IMAGENET_VAL=${MLC_DATASET_PATH}
+export CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${MLC_DATASET_AUX_PATH}/synset_words.txt
export ML_MODEL_DATA_LAYOUT="NCHW"
-export CK_BATCH_SIZE=${CM_BATCH_SIZE}
-export CK_BATCH_COUNT=${CM_BATCH_COUNT}
+export CK_BATCH_SIZE=${MLC_BATCH_SIZE}
+export CK_BATCH_COUNT=${MLC_BATCH_COUNT}
-if [[ "${CM_INPUT}" != "" ]]; then export CM_IMAGE=${CM_INPUT}; fi
+if [[ "${MLC_INPUT}" != "" ]]; then export MLC_IMAGE=${MLC_INPUT}; fi
-PIP_EXTRA=`${CM_PYTHON_BIN} -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"`
+PIP_EXTRA=`${MLC_PYTHON_BIN} -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"`
echo ""
-${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${PIP_EXTRA}
+${MLC_PYTHON_BIN} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${PIP_EXTRA}
test $? -eq 0 || exit 1
echo ""
-${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/onnx_classify.py
+${MLC_PYTHON_BIN} ${MLC_TMP_CURRENT_SCRIPT_PATH}/src/onnx_classify.py
test $? -eq 0 || exit 1
# Just a demo to pass environment variables from native scripts back to CM workflows
-echo "CM_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess" > tmp-run-env.out
+echo "MLC_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess" > tmp-run-env.out
diff --git a/script/app-image-classification-onnx-py/src/onnx_classify.py b/script/app-image-classification-onnx-py/src/onnx_classify.py
index c2c5a6ceb..5ce1dd4db 100644
--- a/script/app-image-classification-onnx-py/src/onnx_classify.py
+++ b/script/app-image-classification-onnx-py/src/onnx_classify.py
@@ -156,8 +156,8 @@ def load_a_batch(batch_filenames):
i) for i in range(batch_size)]
# Grigori: trick to test models:
- if os.environ.get('CM_IMAGE', '') != '':
- batch_filenames = [os.environ['CM_IMAGE']]
+ if os.environ.get('MLC_IMAGE', '') != '':
+ batch_filenames = [os.environ['MLC_IMAGE']]
batch_data = load_a_batch(batch_filenames)
# print(batch_data.shape)
diff --git a/script/app-image-classification-onnx-py/tests/README.md b/script/app-image-classification-onnx-py/tests/README.md
index 899509cb7..15254aa91 100644
--- a/script/app-image-classification-onnx-py/tests/README.md
+++ b/script/app-image-classification-onnx-py/tests/README.md
@@ -1,9 +1,9 @@
```bash
docker system prune -a -f
-cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.CM_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e
+cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.MLC_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e
-cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --env.CM_IMAGE=computer_mouse.jpg
+cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --env.MLC_IMAGE=computer_mouse.jpg
cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg
cmrd "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg -j --docker_it
diff --git a/script/app-image-classification-tf-onnx-cpp/include/benchmark.h b/script/app-image-classification-tf-onnx-cpp/include/benchmark.h
index 42b0418fc..4951aa232 100644
--- a/script/app-image-classification-tf-onnx-cpp/include/benchmark.h
+++ b/script/app-image-classification-tf-onnx-cpp/include/benchmark.h
@@ -124,7 +124,7 @@ class BenchmarkSettings {
const int num_classes = 1000;
const bool normalize_img = getenv_s("CK_ENV_TENSORFLOW_MODEL_NORMALIZE_DATA") == "YES";
const bool subtract_mean = getenv_s("CK_ENV_TENSORFLOW_MODEL_SUBTRACT_MEAN") == "YES";
- const char *given_channel_means_str = getenv("CM_ML_MODEL_GIVEN_CHANNEL_MEANS");
+ const char *given_channel_means_str = getenv("MLC_ML_MODEL_GIVEN_CHANNEL_MEANS");
const bool full_report = getenv_i("CK_SILENT_MODE") == 0;
diff --git a/script/app-image-classification-tf-onnx-cpp/meta.yaml b/script/app-image-classification-tf-onnx-cpp/meta.yaml
index c7ee8b560..957a0d28f 100644
--- a/script/app-image-classification-tf-onnx-cpp/meta.yaml
+++ b/script/app-image-classification-tf-onnx-cpp/meta.yaml
@@ -3,8 +3,8 @@ automation_alias: script
automation_uid: 5b4e0237da074764
category: Modular AI/ML application pipeline
default_env:
- CM_BATCH_COUNT: '1'
- CM_BATCH_SIZE: '1'
+ MLC_BATCH_COUNT: '1'
+ MLC_BATCH_SIZE: '1'
deps:
- tags: detect,os
- tags: get,sys-utils-cm
diff --git a/script/app-image-classification-tf-onnx-cpp/run.sh b/script/app-image-classification-tf-onnx-cpp/run.sh
index b4a46853b..8133da599 100644
--- a/script/app-image-classification-tf-onnx-cpp/run.sh
+++ b/script/app-image-classification-tf-onnx-cpp/run.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
-${CM_CXX_COMPILER_WITH_PATH} -O3 ${CM_TMP_CURRENT_SCRIPT_PATH}/src/classification.cpp -o classification.exe -ltensorflow
+MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+${MLC_CXX_COMPILER_WITH_PATH} -O3 ${MLC_TMP_CURRENT_SCRIPT_PATH}/src/classification.cpp -o classification.exe -ltensorflow
test $? -eq 0 || exit 1
diff --git a/script/app-image-classification-torch-py/meta.yaml b/script/app-image-classification-torch-py/meta.yaml
index 6684bb737..44736f7a2 100644
--- a/script/app-image-classification-torch-py/meta.yaml
+++ b/script/app-image-classification-torch-py/meta.yaml
@@ -3,8 +3,8 @@ automation_alias: script
automation_uid: 5b4e0237da074764
category: Modular AI/ML application pipeline
default_env:
- CM_BATCH_COUNT: '1'
- CM_BATCH_SIZE: '1'
+ MLC_BATCH_COUNT: '1'
+ MLC_BATCH_SIZE: '1'
deps:
- tags: detect,os
- names:
diff --git a/script/app-image-classification-torch-py/run.bat b/script/app-image-classification-torch-py/run.bat
index 1415d4265..f4ec7e5d0 100644
--- a/script/app-image-classification-torch-py/run.bat
+++ b/script/app-image-classification-torch-py/run.bat
@@ -1,20 +1,20 @@
rem connect CM portable scripts with CK env
-set CM_ML_TORCH_MODEL_NAME=resnet50
-set CM_ML_MODEL_INPUT_DATA_TYPE=float32
-set CM_ML_MODEL_IMAGE_HEIGHT=224
-set CM_ML_MODEL_IMAGE_WIDTH=224
+set MLC_ML_TORCH_MODEL_NAME=resnet50
+set MLC_ML_MODEL_INPUT_DATA_TYPE=float32
+set MLC_ML_MODEL_IMAGE_HEIGHT=224
+set MLC_ML_MODEL_IMAGE_WIDTH=224
-rem set CM_DATASET_IMAGENET_PREPROCESSED_DIR=%CM_DATASET_PREPROCESSED_PATH%
+rem set MLC_DATASET_IMAGENET_PREPROCESSED_DIR=%MLC_DATASET_PREPROCESSED_PATH%
-set CM_DATASET_IMAGENET_PREPROCESSED_DIR=%CM_DATASET_PREPROCESSED_FULL_PATH%
-set CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%CM_DATASET_AUX_PATH%\synset_words.txt
-set CM_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32
-set CM_RESULTS_DIR=%CM_TMP_CURRENT_SCRIPT_PATH%\results
+set MLC_DATASET_IMAGENET_PREPROCESSED_DIR=%MLC_DATASET_PREPROCESSED_FULL_PATH%
+set MLC_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%MLC_DATASET_AUX_PATH%\synset_words.txt
+set MLC_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32
+set MLC_RESULTS_DIR=%MLC_TMP_CURRENT_SCRIPT_PATH%\results
set ML_MODEL_DATA_LAYOUT=NCHW
-%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
+%MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
-%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\pytorch_classify_preprocessed.py
+%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\src\pytorch_classify_preprocessed.py
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/app-image-classification-torch-py/run.sh b/script/app-image-classification-torch-py/run.sh
index b50b79eb4..478332299 100644
--- a/script/app-image-classification-torch-py/run.sh
+++ b/script/app-image-classification-torch-py/run.sh
@@ -1,20 +1,20 @@
#!/bin/bash
-CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD}
# connect CM intelligent components with CK env
-export CM_ML_TORCH_MODEL_NAME=resnet50
-export CM_ML_MODEL_INPUT_DATA_TYPE=float32
-export CM_ML_MODEL_IMAGE_HEIGHT=224
-export CM_ML_MODEL_IMAGE_WIDTH=224
-export CM_DATASET_IMAGENET_PREPROCESSED_DIR=${CM_DATASET_PREPROCESSED_FULL_PATH}
-export CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${CM_DATASET_AUX_PATH}/synset_words.txt
-export CM_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32
-export CM_RESULTS_DIR=${CM_TMP_CURRENT_SCRIPT_PATH}/results
+export MLC_ML_TORCH_MODEL_NAME=resnet50
+export MLC_ML_MODEL_INPUT_DATA_TYPE=float32
+export MLC_ML_MODEL_IMAGE_HEIGHT=224
+export MLC_ML_MODEL_IMAGE_WIDTH=224
+export MLC_DATASET_IMAGENET_PREPROCESSED_DIR=${MLC_DATASET_PREPROCESSED_FULL_PATH}
+export MLC_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${MLC_DATASET_AUX_PATH}/synset_words.txt
+export MLC_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32
+export MLC_RESULTS_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH}/results
export ML_MODEL_DATA_LAYOUT=NCHW
-${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
+${MLC_PYTHON_BIN} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
test $? -eq 0 || exit 1
-${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/pytorch_classify_preprocessed.py
+${MLC_PYTHON_BIN} ${MLC_TMP_CURRENT_SCRIPT_PATH}/src/pytorch_classify_preprocessed.py
test $? -eq 0 || exit 1
diff --git a/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py
index 863b3a651..ff20972c6 100644
--- a/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py
+++ b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py
@@ -19,9 +19,9 @@
# Writing the results out:
#
-RESULTS_DIR = os.getenv('CM_RESULTS_DIR')
+RESULTS_DIR = os.getenv('MLC_RESULTS_DIR')
FULL_REPORT = os.getenv(
- 'CM_SILENT_MODE',
+ 'MLC_SILENT_MODE',
'0') in (
'NO',
'no',
@@ -31,14 +31,14 @@
# Processing by batches:
#
-BATCH_COUNT = int(os.getenv('CM_BATCH_COUNT', 1))
+BATCH_COUNT = int(os.getenv('MLC_BATCH_COUNT', 1))
# Enabling GPU if available and not disabled:
#
USE_CUDA = (os.getenv('USE_CUDA', '').strip() == 'yes')
-labels_path = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT']
+labels_path = os.environ['MLC_CAFFE_IMAGENET_SYNSET_WORDS_TXT']
def load_labels(labels_filepath):
@@ -69,7 +69,7 @@ def main():
os.mkdir(RESULTS_DIR)
# Load the [cached] Torch model
- path_to_model_pth = os.environ['CM_ML_MODEL_FILE_WITH_PATH']
+ path_to_model_pth = os.environ['MLC_ML_MODEL_FILE_WITH_PATH']
model = models.resnet50(pretrained=False)
model.load_state_dict(torch.load(path_to_model_pth))
@@ -90,7 +90,7 @@ def main():
first_classification_time = 0
images_loaded = 0
- image_path = os.environ.get('CM_INPUT', '')
+ image_path = os.environ.get('MLC_INPUT', '')
if image_path != '':
normalize_data_bool = True
diff --git a/script/app-image-classification-tvm-onnx-py/meta.yaml b/script/app-image-classification-tvm-onnx-py/meta.yaml
index 2b5cc9cca..c0abe6398 100644
--- a/script/app-image-classification-tvm-onnx-py/meta.yaml
+++ b/script/app-image-classification-tvm-onnx-py/meta.yaml
@@ -7,8 +7,8 @@ automation_uid: 5b4e0237da074764
category: Modular AI/ML application pipeline
default_env:
- CM_BATCH_COUNT: '1'
- CM_BATCH_SIZE: '1'
+ MLC_BATCH_COUNT: '1'
+ MLC_BATCH_SIZE: '1'
deps:
- tags: detect,os
diff --git a/script/app-image-classification-tvm-onnx-py/run.sh b/script/app-image-classification-tvm-onnx-py/run.sh
index 8eb066077..145a6c799 100644
--- a/script/app-image-classification-tvm-onnx-py/run.sh
+++ b/script/app-image-classification-tvm-onnx-py/run.sh
@@ -1,9 +1,9 @@
#!/bin/bash
-CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD}
-#if [[ ${CM_HOST_PLATFORM_FLAVOR} == "arm64" ]]; then
-# ${CM_PYTHON_BIN} -m pip install -i https://test.pypi.org/simple/ onnxruntime==1.9.0.dev174552
+#if [[ ${MLC_HOST_PLATFORM_FLAVOR} == "arm64" ]]; then
+# ${MLC_PYTHON_BIN} -m pip install -i https://test.pypi.org/simple/ onnxruntime==1.9.0.dev174552
#fi
export USE_TVM=yes
@@ -12,15 +12,15 @@ export USE_TVM=yes
wget -nc https://raw.githubusercontent.com/mlcommons/ck-mlops/main/program/ml-task-image-classification-tvm-onnx-cpu/synset.txt
test $? -eq 0 || exit 1
-${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
+${MLC_PYTHON_BIN} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
test $? -eq 0 || exit 1
-if [[ "${CM_INPUT}" != "" ]]; then
- export CM_IMAGE=${CM_INPUT}
+if [[ "${MLC_INPUT}" != "" ]]; then
+ export MLC_IMAGE=${MLC_INPUT}
else
- export CM_IMAGE=${CM_DATASET_PATH}/ILSVRC2012_val_00000001.JPEG
+ export MLC_IMAGE=${MLC_DATASET_PATH}/ILSVRC2012_val_00000001.JPEG
fi
-${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/classify.py --image ${CM_IMAGE}
+${MLC_PYTHON_BIN} ${MLC_TMP_CURRENT_SCRIPT_PATH}/src/classify.py --image ${MLC_IMAGE}
test $? -eq 0 || exit 1
diff --git a/script/app-image-classification-tvm-onnx-py/src/classify.py b/script/app-image-classification-tvm-onnx-py/src/classify.py
index 20c164288..058c42bfa 100644
--- a/script/app-image-classification-tvm-onnx-py/src/classify.py
+++ b/script/app-image-classification-tvm-onnx-py/src/classify.py
@@ -107,9 +107,9 @@ def run_case(dtype, image, target):
# plt.show()
plt.savefig('pre-processed-image.png')
# Load model
- model_path = os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', '')
+ model_path = os.environ.get('MLC_ML_MODEL_FILE_WITH_PATH', '')
if model_path == '':
- print('Error: environment variable CM_ML_MODEL_FILE_WITH_PATH is not defined')
+ print('Error: environment variable MLC_ML_MODEL_FILE_WITH_PATH is not defined')
exit(1)
opt = rt.SessionOptions()
@@ -154,9 +154,9 @@ def run_case(dtype, image, target):
build_conf = {'relay.backend.use_auto_scheduler': False}
opt_lvl = int(os.environ.get('TVM_OPT_LEVEL', 3))
- host = os.environ.get('CM_HOST_PLATFORM_FLAVOR')
+ host = os.environ.get('MLC_HOST_PLATFORM_FLAVOR')
if host == 'x86_64' and 'AMD' in os.environ.get(
- 'CM_HOST_CPU_VENDOR_ID', ''):
+ 'MLC_HOST_CPU_VENDOR_ID', ''):
target = os.environ.get('TVM_TARGET', 'llvm -mcpu=znver2')
else:
target = os.environ.get('TVM_TARGET', 'llvm')
@@ -283,7 +283,7 @@ def run_case(dtype, image, target):
args = parser.parse_args()
if args.image.strip().lower() == '':
- print('Please specify path to an image using CM_IMAGE environment variable!')
+ print('Please specify path to an image using MLC_IMAGE environment variable!')
exit(1)
# set parameter
@@ -296,7 +296,7 @@ def run_case(dtype, image, target):
out_shape = (batch_size, num_classes)
dtype = 'float32'
- if os.environ.get('CM_TVM_DTYPE', '') != '':
- dtype = os.environ['CM_TVM_DTYPE']
+ if os.environ.get('MLC_TVM_DTYPE', '') != '':
+ dtype = os.environ['MLC_TVM_DTYPE']
run_case(dtype, args.image, args.target)
diff --git a/script/app-image-corner-detection/customize.py b/script/app-image-corner-detection/customize.py
index f27ee028a..7b37eb663 100644
--- a/script/app-image-corner-detection/customize.py
+++ b/script/app-image-corner-detection/customize.py
@@ -8,28 +8,28 @@ def preprocess(i):
env = i['env']
script_path = i['run_script_input']['path']
- env["CM_SOURCE_FOLDER_PATH"] = script_path
- env['CM_C_SOURCE_FILES'] = "susan.c"
+ env["MLC_SOURCE_FOLDER_PATH"] = script_path
+ env['MLC_C_SOURCE_FILES'] = "susan.c"
- if 'CM_INPUT' not in env:
- env['CM_INPUT'] = os.path.join(script_path, 'data.pgm')
+ if 'MLC_INPUT' not in env:
+ env['MLC_INPUT'] = os.path.join(script_path, 'data.pgm')
- if 'CM_OUTPUT' not in env:
- env['CM_OUTPUT'] = 'output_image_with_corners.pgm'
+ if 'MLC_OUTPUT' not in env:
+ env['MLC_OUTPUT'] = 'output_image_with_corners.pgm'
- if 'CM_RUN_DIR' not in env:
+ if 'MLC_RUN_DIR' not in env:
output_path = os.path.join(script_path, "output")
if output_path != '' and not os.path.isdir(output_path):
os.makedirs(output_path)
- env['CM_RUN_DIR'] = output_path
+ env['MLC_RUN_DIR'] = output_path
- env['CM_RUN_SUFFIX'] = env['CM_INPUT'] + ' ' + env['CM_OUTPUT'] + ' -c'
+ env['MLC_RUN_SUFFIX'] = env['MLC_INPUT'] + ' ' + env['MLC_OUTPUT'] + ' -c'
if os_info['platform'] == 'windows':
- env['CM_BIN_NAME'] = 'image-corner.exe'
+ env['MLC_BIN_NAME'] = 'image-corner.exe'
else:
- env['CM_BIN_NAME'] = 'image-corner'
+ env['MLC_BIN_NAME'] = 'image-corner'
env['+ LDCFLAGS'] = ["-lm"]
return {'return': 0}
@@ -38,6 +38,6 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- print(env['CM_OUTPUT'] + " generated in " + env['CM_RUN_DIR'])
+ print(env['MLC_OUTPUT'] + " generated in " + env['MLC_RUN_DIR'])
return {'return': 0}
diff --git a/script/app-image-corner-detection/meta.yaml b/script/app-image-corner-detection/meta.yaml
index 1fd27d9b6..2deedbde5 100644
--- a/script/app-image-corner-detection/meta.yaml
+++ b/script/app-image-corner-detection/meta.yaml
@@ -18,11 +18,11 @@ deps:
posthook_deps:
- skip_if_env:
- CM_SKIP_COMPILE:
+ MLC_SKIP_COMPILE:
- 'on'
tags: compile,cpp-program
- skip_if_env:
- CM_SKIP_RUN:
+ MLC_SKIP_RUN:
- 'on'
tags: benchmark-program
diff --git a/script/app-image-corner-detection/run.sh b/script/app-image-corner-detection/run.sh
index 30cfbdd00..033e2f3aa 100644
--- a/script/app-image-corner-detection/run.sh
+++ b/script/app-image-corner-detection/run.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-CUR=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+CUR=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD}
mkdir -p $CUR"/output"
test $? -eq 0 || exit 1
diff --git a/script/app-loadgen-generic-python/README-extra.md b/script/app-loadgen-generic-python/README-extra.md
index cdd08ef41..6222b6574 100644
--- a/script/app-loadgen-generic-python/README-extra.md
+++ b/script/app-loadgen-generic-python/README-extra.md
@@ -49,7 +49,7 @@ including the above one, any time a script with python dependency is run. To avo
can set up the following environment variable with the name of the current virtual environment:
```bash
-export CM_SCRIPT_EXTRA_CMD="--adr.python.name=loadgen"
+export MLC_SCRIPT_EXTRA_CMD="--adr.python.name=loadgen"
```
The `--adr` flag stands for "Add to all Dependencies Recursively" and will find all sub-dependencies on other CM scripts
@@ -250,16 +250,16 @@ Available variations:
Available flags mapped to environment variables:
- --concurrency -> --env.CM_MLPERF_CONCURRENCY
- --ep -> --env.CM_MLPERF_EXECUTION_PROVIDER
- --execmode -> --env.CM_MLPERF_EXEC_MODE
- --interop -> --env.CM_MLPERF_INTEROP
- --intraop -> --env.CM_MLPERF_INTRAOP
- --modelpath -> --env.CM_ML_MODEL_FILE_WITH_PATH
- --output_dir -> --env.CM_MLPERF_OUTPUT_DIR
- --runner -> --env.CM_MLPERF_RUNNER
- --samples -> --env.CM_MLPERF_LOADGEN_SAMPLES
- --scenario -> --env.CM_MLPERF_LOADGEN_SCENARIO
+ --concurrency -> --env.MLC_MLPERF_CONCURRENCY
+ --ep -> --env.MLC_MLPERF_EXECUTION_PROVIDER
+ --execmode -> --env.MLC_MLPERF_EXEC_MODE
+ --interop -> --env.MLC_MLPERF_INTEROP
+ --intraop -> --env.MLC_MLPERF_INTRAOP
+ --modelpath -> --env.MLC_ML_MODEL_FILE_WITH_PATH
+ --output_dir -> --env.MLC_MLPERF_OUTPUT_DIR
+ --runner -> --env.MLC_MLPERF_RUNNER
+ --samples -> --env.MLC_MLPERF_LOADGEN_SAMPLES
+ --scenario -> --env.MLC_MLPERF_LOADGEN_SCENARIO
```
@@ -272,8 +272,8 @@ cm docker script "python app loadgen-generic _onnxruntime _custom _huggingface _
## Tuning CPU performance via CM experiment
```bash
-cm run experiment --tags=loadgen,python,llama2 -- cmr script "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2 --intraop={{CM_OPT_INTRAOP{[1,2,4]}}} --interop={{CM_OPT_INTEROP{[1,2,4]}}} --quiet
-cm run experiment --tags=loadgen,python,llama2 -- cmr "python app loadgen-generic _onnxruntime" --modelpath={PATH TO ONNX MODEL} --samples=2 --intraop={{CM_OPT_INTRAOP{[1,2,4]}}} --interop={{CM_OPT_INTEROP{[1,2,4]}}} --quiet
+cm run experiment --tags=loadgen,python,llama2 -- cmr script "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2 --intraop={{MLC_OPT_INTRAOP{[1,2,4]}}} --interop={{MLC_OPT_INTEROP{[1,2,4]}}} --quiet
+cm run experiment --tags=loadgen,python,llama2 -- cmr "python app loadgen-generic _onnxruntime" --modelpath={PATH TO ONNX MODEL} --samples=2 --intraop={{MLC_OPT_INTRAOP{[1,2,4]}}} --interop={{MLC_OPT_INTEROP{[1,2,4]}}} --quiet
```
diff --git a/script/app-loadgen-generic-python/customize.py b/script/app-loadgen-generic-python/customize.py
index 55050fadb..9a4a6104e 100644
--- a/script/app-loadgen-generic-python/customize.py
+++ b/script/app-loadgen-generic-python/customize.py
@@ -11,81 +11,81 @@ def preprocess(i):
env = i['env']
- if 'CM_ML_MODEL_FILE_WITH_PATH' not in env:
+ if 'MLC_ML_MODEL_FILE_WITH_PATH' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the model to run'}
- run_opts = env.get('CM_RUN_OPTS', '')
+ run_opts = env.get('MLC_RUN_OPTS', '')
- if env.get('CM_MLPERF_BACKEND', '') != '':
- run_opts += " -b " + env['CM_MLPERF_BACKEND']
+ if env.get('MLC_MLPERF_BACKEND', '') != '':
+ run_opts += " -b " + env['MLC_MLPERF_BACKEND']
- if env.get('CM_MLPERF_RUNNER', '') != '':
- run_opts += " -r " + env['CM_MLPERF_RUNNER']
+ if env.get('MLC_MLPERF_RUNNER', '') != '':
+ run_opts += " -r " + env['MLC_MLPERF_RUNNER']
- if env.get('CM_MLPERF_CONCURRENCY', '') != '':
- run_opts += " --concurrency " + env['CM_MLPERF_CONCURRENCY']
+ if env.get('MLC_MLPERF_CONCURRENCY', '') != '':
+ run_opts += " --concurrency " + env['MLC_MLPERF_CONCURRENCY']
- if env.get('CM_MLPERF_EXECUTION_PROVIDER', '') != '':
- run_opts += " --ep " + env['CM_MLPERF_EXECUTION_PROVIDER']
+ if env.get('MLC_MLPERF_EXECUTION_PROVIDER', '') != '':
+ run_opts += " --ep " + env['MLC_MLPERF_EXECUTION_PROVIDER']
- if env.get('CM_MLPERF_INTRAOP', '') != '':
- run_opts += " --intraop " + env['CM_MLPERF_INTRAOP']
+ if env.get('MLC_MLPERF_INTRAOP', '') != '':
+ run_opts += " --intraop " + env['MLC_MLPERF_INTRAOP']
- if env.get('CM_MLPERF_INTEROP', '') != '':
- run_opts += " --interop " + env['CM_MLPERF_INTEROP']
+ if env.get('MLC_MLPERF_INTEROP', '') != '':
+ run_opts += " --interop " + env['MLC_MLPERF_INTEROP']
- if env.get('CM_MLPERF_EXECMODE', '') != '':
- run_opts += " --execmode " + env['CM_MLPERF_EXECUTION_MODE']
+ if env.get('MLC_MLPERF_EXECMODE', '') != '':
+ run_opts += " --execmode " + env['MLC_MLPERF_EXECUTION_MODE']
- if env.get('CM_MLPERF_LOADGEN_SAMPLES', '') != '':
- run_opts += " --samples " + env['CM_MLPERF_LOADGEN_SAMPLES']
+ if env.get('MLC_MLPERF_LOADGEN_SAMPLES', '') != '':
+ run_opts += " --samples " + env['MLC_MLPERF_LOADGEN_SAMPLES']
- if env.get('CM_MLPERF_LOADGEN_EXPECTED_QPS', '') != '':
+ if env.get('MLC_MLPERF_LOADGEN_EXPECTED_QPS', '') != '':
run_opts += " --loadgen_expected_qps " + \
- env['CM_MLPERF_LOADGEN_EXPECTED_QPS']
+ env['MLC_MLPERF_LOADGEN_EXPECTED_QPS']
- if env.get('CM_MLPERF_LOADGEN_DURATION_SEC', '') != '':
+ if env.get('MLC_MLPERF_LOADGEN_DURATION_SEC', '') != '':
run_opts += " --loadgen_duration_sec " + \
- env['CM_MLPERF_LOADGEN_DURATION_SEC']
+ env['MLC_MLPERF_LOADGEN_DURATION_SEC']
- if env.get('CM_MLPERF_OUTPUT_DIR', '') != '':
- run_opts += " --output " + env['CM_MLPERF_OUTPUT_DIR']
+ if env.get('MLC_MLPERF_OUTPUT_DIR', '') != '':
+ run_opts += " --output " + env['MLC_MLPERF_OUTPUT_DIR']
- if env.get('CM_ML_MODEL_CODE_WITH_PATH', '') != '':
- run_opts += " --model_code " + env['CM_ML_MODEL_CODE_WITH_PATH']
+ if env.get('MLC_ML_MODEL_CODE_WITH_PATH', '') != '':
+ run_opts += " --model_code " + env['MLC_ML_MODEL_CODE_WITH_PATH']
- if env.get('CM_ML_MODEL_CFG_WITH_PATH', '') != '':
- run_opts += " --model_cfg " + env['CM_ML_MODEL_CFG_WITH_PATH']
+ if env.get('MLC_ML_MODEL_CFG_WITH_PATH', '') != '':
+ run_opts += " --model_cfg " + env['MLC_ML_MODEL_CFG_WITH_PATH']
else:
# Check cfg from command line
- cfg = env.get('CM_ML_MODEL_CFG', {})
+ cfg = env.get('MLC_ML_MODEL_CFG', {})
if len(cfg) > 0:
- del (env['CM_ML_MODEL_CFG'])
+ del (env['MLC_ML_MODEL_CFG'])
import json
import tempfile
tfile = tempfile.NamedTemporaryFile(mode="w+", suffix='.json')
- fd, tfile = tempfile.mkstemp(suffix='.json', prefix='cm-cfg-')
+ fd, tfile = tempfile.mkstemp(suffix='.json', prefix='mlc-cfg-')
os.close(fd)
with open(tfile, 'w') as fd:
json.dump(cfg, fd)
- env['CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE'] = tfile
+ env['MLC_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE'] = tfile
run_opts += " --model_cfg " + tfile
- if env.get('CM_ML_MODEL_SAMPLE_WITH_PATH', '') != '':
+ if env.get('MLC_ML_MODEL_SAMPLE_WITH_PATH', '') != '':
run_opts += " --model_sample_pickle " + \
- env['CM_ML_MODEL_SAMPLE_WITH_PATH']
+ env['MLC_ML_MODEL_SAMPLE_WITH_PATH']
# Add path to file model weights at the end of command line
- run_opts += ' ' + env['CM_ML_MODEL_FILE_WITH_PATH']
+ run_opts += ' ' + env['MLC_ML_MODEL_FILE_WITH_PATH']
- env['CM_RUN_OPTS'] = run_opts
+ env['MLC_RUN_OPTS'] = run_opts
print('')
print('Assembled flags: {}'.format(run_opts))
@@ -98,7 +98,7 @@ def postprocess(i):
env = i['env']
- tfile = env.get('CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE', '')
+ tfile = env.get('MLC_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE', '')
if tfile != '' and os.path.isfile(tfile):
os.remove(tfile)
diff --git a/script/app-loadgen-generic-python/meta.yaml b/script/app-loadgen-generic-python/meta.yaml
index 3e5fe56e1..ba5a3c616 100644
--- a/script/app-loadgen-generic-python/meta.yaml
+++ b/script/app-loadgen-generic-python/meta.yaml
@@ -23,31 +23,31 @@ tags_help: "python app generic loadgen"
# Default environment
default_env:
- CM_MLPERF_EXECUTION_MODE: parallel
- CM_MLPERF_BACKEND: onnxruntime
+ MLC_MLPERF_EXECUTION_MODE: parallel
+ MLC_MLPERF_BACKEND: onnxruntime
# Map script inputs to environment variables
input_mapping:
- modelpath: CM_ML_MODEL_FILE_WITH_PATH
- modelcodepath: CM_ML_MODEL_CODE_WITH_PATH
- modelcfgpath: CM_ML_MODEL_CFG_WITH_PATH
- modelcfg: CM_ML_MODEL_CFG
- modelsamplepath: CM_ML_MODEL_SAMPLE_WITH_PATH
- output_dir: CM_MLPERF_OUTPUT_DIR
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- runner: CM_MLPERF_RUNNER
- concurrency: CM_MLPERF_CONCURRENCY
- ep: CM_MLPERF_EXECUTION_PROVIDER
- intraop: CM_MLPERF_INTRAOP
- interop: CM_MLPERF_INTEROP
- execmode: CM_MLPERF_EXEC_MODE
- samples: CM_MLPERF_LOADGEN_SAMPLES
- loadgen_expected_qps: CM_MLPERF_LOADGEN_EXPECTED_QPS
- loadgen_duration_sec: CM_MLPERF_LOADGEN_DURATION_SEC
+ modelpath: MLC_ML_MODEL_FILE_WITH_PATH
+ modelcodepath: MLC_ML_MODEL_CODE_WITH_PATH
+ modelcfgpath: MLC_ML_MODEL_CFG_WITH_PATH
+ modelcfg: MLC_ML_MODEL_CFG
+ modelsamplepath: MLC_ML_MODEL_SAMPLE_WITH_PATH
+ output_dir: MLC_MLPERF_OUTPUT_DIR
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ runner: MLC_MLPERF_RUNNER
+ concurrency: MLC_MLPERF_CONCURRENCY
+ ep: MLC_MLPERF_EXECUTION_PROVIDER
+ intraop: MLC_MLPERF_INTRAOP
+ interop: MLC_MLPERF_INTEROP
+ execmode: MLC_MLPERF_EXEC_MODE
+ samples: MLC_MLPERF_LOADGEN_SAMPLES
+ loadgen_expected_qps: MLC_MLPERF_LOADGEN_EXPECTED_QPS
+ loadgen_duration_sec: MLC_MLPERF_LOADGEN_DURATION_SEC
# New env keys exported from this script
new_env_keys:
- - CM_MLPERF_*
+ - MLC_MLPERF_*
# Dependencies on other CM scripts
@@ -73,7 +73,7 @@ deps:
# Detect CUDA if required
- tags: get,cuda
enable_if_env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
# Install loadgen
@@ -85,25 +85,25 @@ deps:
# Install ML engines via CM
# ONNX
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cpu
tags: get,generic-python-lib,_onnxruntime
names:
- onnxruntime
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
tags: get,generic-python-lib,_onnxruntime_gpu
names:
- onnxruntime
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
tags: get,generic-python-lib,_onnx
names:
@@ -116,18 +116,18 @@ deps:
# CPU
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cpu
tags: get,generic-python-lib,_torch
names:
- torch
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cpu
tags: get,generic-python-lib,_torchvision
names:
@@ -136,18 +136,18 @@ deps:
# CUDA/GPU
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
tags: get,generic-python-lib,_torch_cuda
names:
- torch
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
tags: get,generic-python-lib,_torchvision_cuda
names:
@@ -158,17 +158,17 @@ deps:
########################################################################
# Install MLPerf models
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
tags: get,ml-model,resnet50,_onnx
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
tags: get,ml-model,retinanet,_onnx,_fp32
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
tags: get,ml-model,retinanet,_onnx,_fp32
@@ -181,14 +181,14 @@ variations:
pytorch:
group: backend
env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
pytorch
onnxruntime:
group: backend
default: true
env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
onnxruntime
@@ -199,9 +199,9 @@ variations:
default:
true
env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
cpu
- CM_MLPERF_EXECUTION_PROVIDER:
+ MLC_MLPERF_EXECUTION_PROVIDER:
CPUExecutionProvider
cuda:
@@ -211,9 +211,9 @@ variations:
group:
device
env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
gpu
- CM_MLPERF_EXECUTION_PROVIDER:
+ MLC_MLPERF_EXECUTION_PROVIDER:
CUDAExecutionProvider
@@ -222,25 +222,25 @@ variations:
group:
models
env:
- CM_MODEL: retinanet
+ MLC_MODEL: retinanet
resnet50:
group:
models
env:
- CM_MODEL: resnet50
+ MLC_MODEL: resnet50
custom:
group:
models
env:
- CM_MODEL: custom
+ MLC_MODEL: custom
huggingface:
env:
- CM_CUSTOM_MODEL_SOURCE: huggingface
+ MLC_CUSTOM_MODEL_SOURCE: huggingface
custom,huggingface:
deps:
@@ -249,16 +249,16 @@ variations:
- hf-downloader
update_tags_from_env_with_prefix:
"_model-stub.":
- - CM_ML_MODEL_STUB
+ - MLC_ML_MODEL_STUB
model-stub.#:
env:
- CM_ML_MODEL_STUB: "#"
+ MLC_ML_MODEL_STUB: "#"
cmc:
env:
- CM_CUSTOM_MODEL_CMC: yes
+ MLC_CUSTOM_MODEL_CMC: yes
custom,cmc:
@@ -303,15 +303,15 @@ docker:
input_paths:
- modelpath
- modelsamplepath
- - env.CM_ML_MODEL_FILE_WITH_PATH
- - env.CM_ML_MODEL_CODE_WITH_PATH
+ - env.MLC_ML_MODEL_FILE_WITH_PATH
+ - env.MLC_ML_MODEL_CODE_WITH_PATH
- output_dir
- repro_dir
skip_input_for_fake_run:
- modelpath
- modelsamplepath
- - env.CM_ML_MODEL_FILE_WITH_PATH
- - env.CM_ML_MODEL_CODE_WITH_PATH
+ - env.MLC_ML_MODEL_FILE_WITH_PATH
+ - env.MLC_ML_MODEL_CODE_WITH_PATH
- output_dir
- scenario
- runner
diff --git a/script/app-loadgen-generic-python/run.bat b/script/app-loadgen-generic-python/run.bat
index 3d4b5d58b..921853c60 100644
--- a/script/app-loadgen-generic-python/run.bat
+++ b/script/app-loadgen-generic-python/run.bat
@@ -1,4 +1,4 @@
rem native script
-%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\main.py %CM_RUN_OPTS%
+%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\src\main.py %MLC_RUN_OPTS%
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/app-loadgen-generic-python/run.sh b/script/app-loadgen-generic-python/run.sh
index 2a13312f0..843ecb749 100644
--- a/script/app-loadgen-generic-python/run.sh
+++ b/script/app-loadgen-generic-python/run.sh
@@ -1,4 +1,4 @@
#!/bin/bash
-${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/main.py ${CM_RUN_OPTS}
+${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/src/main.py ${MLC_RUN_OPTS}
test $? -eq 0 || exit 1
diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat b/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat
deleted file mode 100644
index c7154832f..000000000
--- a/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat
+++ /dev/null
@@ -1,7 +0,0 @@
-rem set CM_CACHE=--no-cache
-
-set CM_DOCKER_ORG=modularcm
-set CM_DOCKER_NAME=loadgen-generic-python
-set CM_OS_NAME=ubuntu
-set CM_HW_TARGET=cpu
-set CM_OS_VERSION=22.04
diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh b/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh
deleted file mode 100644
index 5f49d3be9..000000000
--- a/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#! /bin/bash
-
-#export CM_CACHE="--no-cache"
-
-export CM_DOCKER_ORG=modularcm
-export CM_DOCKER_NAME="loadgen-generic-python"
-export CM_OS_NAME="ubuntu"
-export CM_HW_TARGET="cpu"
-export CM_OS_VERSION="22.04"
-
diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat b/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat
deleted file mode 100644
index f51ea46b6..000000000
--- a/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat
+++ /dev/null
@@ -1,16 +0,0 @@
-call _common.bat
-
-docker build -f %CM_DOCKER_NAME%--%CM_OS_NAME%-%CM_HW_TARGET%.Dockerfile ^
- -t %CM_DOCKER_ORG%/%CM_DOCKER_NAME%-%CM_HW_TARGET%:%CM_OS_NAME%-%CM_OS_VERSION% ^
- --build-arg cm_os_name=%CM_OS_NAME% ^
- --build-arg cm_hw_target=%CM_HW_TARGET% ^
- --build-arg cm_os_version=%CM_OS_VERSION% ^
- --build-arg cm_version="" ^
- --build-arg cm_automation_repo="ctuning@mlcommons-ck" ^
- --build-arg cm_automation_checkout="" ^
- --build-arg cm_python_version="3.10.8" ^
- --build-arg cm_mlperf_inference_loadgen_version="" ^
- --build-arg cm_mlperf_inference_src_tags="" ^
- --build-arg cm_mlperf_inference_src_version="" ^
- --build-arg CM_ONNXRUNTIME_VERSION="1.13.1" ^
- %CM_CACHE% .
diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh b/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh
deleted file mode 100644
index 186a0eae9..000000000
--- a/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#! /bin/bash
-
-. ./_common.sh
-
-time docker build -f ${CM_DOCKER_NAME}--${CM_OS_NAME}-${CM_HW_TARGET}.Dockerfile \
- -t ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}-${CM_HW_TARGET}:${CM_OS_NAME}-${CM_OS_VERSION} \
- --build-arg cm_os_name=${CM_OS_NAME} \
- --build-arg cm_hw_target=${CM_HW_TARGET} \
- --build-arg cm_os_version=${CM_OS_VERSION} \
- --build-arg cm_version="" \
- --build-arg cm_automation_repo="ctuning@mlcommons-ck" \
- --build-arg cm_automation_checkout="" \
- --build-arg cm_python_version="3.10.8" \
- --build-arg cm_mlperf_inference_loadgen_version="" \
- --build-arg cm_mlperf_inference_src_tags="" \
- --build-arg cm_mlperf_inference_src_version="" \
- --build-arg CM_ONNXRUNTIME_VERSION="1.13.1" \
- ${CM_CACHE} .
diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile b/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile
deleted file mode 100644
index c82296c66..000000000
--- a/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile
+++ /dev/null
@@ -1,96 +0,0 @@
-# Modular MLPerf container with the MLCommons CM automation meta-framework
-
-# Preparing OS
-ARG cm_os_name="ubuntu"
-ARG cm_os_version="22.04"
-
-FROM ${cm_os_name}:${cm_os_version}
-
-# Maintained by the MLCommons taskforce on automation and reproducibility and OctoML
-LABEL github="https://github.com/mlcommons/ck"
-LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce"
-
-# Customization
-ARG CM_GH_TOKEN
-
-# Prepare shell and entry point
-SHELL ["/bin/bash", "-c"]
-ENTRYPOINT ["/bin/bash", "-c"]
-
-# Install system dependencies
-# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes
-RUN apt-get update -y
-RUN apt-get install -y lsb-release
-RUN apt-get install -y python3 python3-pip git wget sudo
-
-# Extra python deps
-RUN python3 -m pip install requests
-
-# CM version
-ARG cm_version=""
-ENV CM_VERSION="${cm_version}"
-RUN if [ "${CM_VERSION}" != "" ] ; then \
- python3 -m pip install cmind==${CM_VERSION} ; \
- else \
- python3 -m pip install cmind ; \
- fi
-
-# Setup docker environment
-ENTRYPOINT ["/bin/bash", "-c"]
-ENV TZ=US/Pacific
-RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone
-
-# Setup docker user
-# See example in https://github.com/mlcommons/GaNDLF/blob/master/Dockerfile-CPU
-RUN groupadd --gid 10001 cm
-RUN useradd --uid 10000 -g cm --create-home --shell /bin/bash cmuser
-RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-
-USER cmuser:cm
-WORKDIR /home/cmuser
-
-# Check CM installation
-RUN lsb_release -a > sys-version-os.log
-RUN uname -a > sys-version-kernel.log
-RUN python3 --version > sys-version-python3.log
-RUN cm version > sys-version-cm.log
-
-################################################################################
-# Get CM automation repository
-ARG cm_automation_repo="mlcommons@ck"
-ARG cm_automation_repo_checkout=""
-ENV CM_AUTOMATION_REPO=${cm_automation_repo}
-ENV CM_AUTOMATION_REPO_CHECKOUT=${cm_automation_repo_checkout}
-RUN echo ${CM_AUTOMATION_REPO}
-RUN cm pull repo ${CM_AUTOMATION_REPO} --checkout=${CM_AUTOMATION_REPO_CHECKOUT}
-
-################################################################################
-# Install CM system dependencies
-RUN cm run script "get sys-utils-cm" --quiet
-
-# Detect/install python
-ARG cm_python_version=""
-RUN cm run script "get python3" --version=${cm_python_version}
-
-################################################################################
-# Build MLPerf loadgen
-ARG cm_mlperf_inference_loadgen_version=""
-RUN cm run script "get mlperf loadgen" --adr.compiler.tags=gcc --version=${cm_mlperf_inference_loadgen_version} --adr.inference-src-loadgen.version=${cm_mlperf_inference_loadgen_version} -v
-
-################################################################################
-# Install ONNX runtime
-ARG CM_ONNXRUNTIME_VERSION=""
-RUN cm run script "get generic-python-lib _onnxruntime" --version=${CM_ONNXRUNTIME_VERSION}
-
-ARG CM_MLPERF_CHOICE_BACKEND="onnxruntime"
-ARG CM_MLPERF_CHOICE_DEVICE="cpu"
-
-RUN cm run script --tags=python,app,loadgen-generic,_onnxruntime,_resnet50 \
- --adr.compiler.tags=gcc \
- --adr.python.version_min=3.8 \
- --quiet \
- --fake_run
-
-################################################################################
-# CMD entry point
-CMD /bin/bash
diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile b/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile
deleted file mode 100644
index 195acdec6..000000000
--- a/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile
+++ /dev/null
@@ -1,33 +0,0 @@
-FROM ubuntu:20.04
-SHELL ["/bin/bash", "-c"]
-ARG CM_GH_TOKEN
-
-# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes
-# Install system dependencies
-RUN apt-get update -y
-RUN apt-get install -y python3 python3-pip git sudo wget
-
-# Install python packages
-RUN python3 -m pip install cmind requests
-
-# Setup docker environment
-ENTRYPOINT ["/bin/bash", "-c"]
-ENV TZ=US/Pacific
-ENV PATH=${PATH}:$HOME/.local/bin
-RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone
-
-# Setup docker user
-RUN groupadd cm
-RUN useradd -g cm --create-home --shell /bin/bash cmuser
-RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-USER cmuser:cm
-WORKDIR /home/cmuser
-
-# Download CM repo for scripts
-RUN cm pull repo ctuning@mlcommons-ck
-
-# Install all system dependencies
-RUN cm run script --quiet --tags=get,sys-utils-cm
-
-# Run commands
-RUN cm run script --quiet --tags=python,app,loadgen-generic,_onnxruntime,_resnet50 --fake_run
diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat b/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat
deleted file mode 100644
index 171aeecab..000000000
--- a/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-call _common.bat
-
-docker run -it %CM_DOCKER_ORG%/%CM_DOCKER_NAME%-%CM_HW_TARGET%:%CM_OS_NAME%-%CM_OS_VERSION%
diff --git a/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh b/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh
deleted file mode 100644
index c82d4b7b1..000000000
--- a/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-. ./_common.sh
-
-docker run -it ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}-%CM_HW_TARGET%:${CM_OS_NAME}-${CM_OS_VERSION}
diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py
index 70ae33af2..bfce993cd 100644
--- a/script/app-mlperf-automotive-mlcommons-python/customize.py
+++ b/script/app-mlperf-automotive-mlcommons-python/customize.py
@@ -12,147 +12,147 @@ def preprocess(i):
state = i['state']
script_path = i['run_script_input']['path']
- if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}
- if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes":
+ if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes":
return {'return': 0}
- if env.get('CM_MLPERF_POWER', '') == "yes":
+ if env.get('MLC_MLPERF_POWER', '') == "yes":
power = "yes"
else:
power = "no"
- rerun = True if env.get("CM_RERUN", "") != '' else False
+ rerun = True if env.get("MLC_RERUN", "") != '' else False
- if 'CM_MLPERF_LOADGEN_SCENARIO' not in env:
- env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline"
+ if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env:
+ env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline"
- if 'CM_MLPERF_LOADGEN_MODE' not in env:
- env['CM_MLPERF_LOADGEN_MODE'] = "accuracy"
+ if 'MLC_MLPERF_LOADGEN_MODE' not in env:
+ env['MLC_MLPERF_LOADGEN_MODE'] = "accuracy"
- if 'CM_MODEL' not in env:
+ if 'MLC_MODEL' not in env:
return {
'return': 1, 'error': "Please select a variation specifying the model to run"}
- # if env['CM_MODEL'] == "resnet50":
- # cmd = "cp " + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['CM_DATASET_PATH'],
+ # if env['MLC_MODEL'] == "resnet50":
+ # cmd = "cp " + os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['MLC_DATASET_PATH'],
# "val_map.txt")
# ret = os.system(cmd)
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \
- env.get('CM_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " "
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \
+ env.get('MLC_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " "
- if 'CM_MLPERF_LOADGEN_QPS' not in env:
- env['CM_MLPERF_LOADGEN_QPS_OPT'] = ""
+ if 'MLC_MLPERF_LOADGEN_QPS' not in env:
+ env['MLC_MLPERF_LOADGEN_QPS_OPT'] = ""
else:
- env['CM_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \
- env['CM_MLPERF_LOADGEN_QPS']
+ env['MLC_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \
+ env['MLC_MLPERF_LOADGEN_QPS']
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['CM_MLPERF_LOADGEN_QPS_OPT']
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT']
- if 'CM_NUM_THREADS' not in env:
- if 'CM_MINIMIZE_THREADS' in env:
- env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) //
- (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1'))))
+ if 'MLC_NUM_THREADS' not in env:
+ if 'MLC_MINIMIZE_THREADS' in env:
+ env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) //
+ (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1'))))
else:
- env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1')
+ env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1')
- if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and not env.get(
- 'CM_MLPERF_MODEL_SKIP_BATCHING', False):
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \
- str(env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE'])
+ if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and not env.get(
+ 'MLC_MLPERF_MODEL_SKIP_BATCHING', False):
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \
+ str(env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE'])
- if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE', '') != '':
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \
- str(env['CM_MLPERF_LOADGEN_BATCH_SIZE'])
+ if env.get('MLC_MLPERF_LOADGEN_BATCH_SIZE', '') != '':
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \
+ str(env['MLC_MLPERF_LOADGEN_BATCH_SIZE'])
- if env.get('CM_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get(
- 'CM_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and env.get('CM_MLPERF_RUN_STYLE', '') != "valid":
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \
- env['CM_MLPERF_LOADGEN_QUERY_COUNT']
+ if env.get('MLC_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get(
+ 'MLC_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and env.get('MLC_MLPERF_RUN_STYLE', '') != "valid":
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \
+ env['MLC_MLPERF_LOADGEN_QUERY_COUNT']
print("Using MLCommons Inference source from '" +
- env['CM_MLPERF_INFERENCE_SOURCE'] + "'")
+ env['MLC_MLPERF_INFERENCE_SOURCE'] + "'")
- if 'CM_MLPERF_CONF' not in env:
- env['CM_MLPERF_CONF'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
+ if 'MLC_MLPERF_CONF' not in env:
+ env['MLC_MLPERF_CONF'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
x = "" if os_info['platform'] == 'windows' else "'"
- if "llama2-70b" in env['CM_MODEL']:
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \
- x + env['CM_MLPERF_CONF'] + x
+ if "llama2-70b" in env['MLC_MODEL']:
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \
+ x + env['MLC_MLPERF_CONF'] + x
else:
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \
- x + env['CM_MLPERF_CONF'] + x
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \
+ x + env['MLC_MLPERF_CONF'] + x
- env['MODEL_DIR'] = env.get('CM_ML_MODEL_PATH')
+ env['MODEL_DIR'] = env.get('MLC_ML_MODEL_PATH')
if not env['MODEL_DIR']:
env['MODEL_DIR'] = os.path.dirname(
env.get(
- 'CM_MLPERF_CUSTOM_MODEL_PATH',
- env.get('CM_ML_MODEL_FILE_WITH_PATH')))
+ 'MLC_MLPERF_CUSTOM_MODEL_PATH',
+ env.get('MLC_ML_MODEL_FILE_WITH_PATH')))
RUN_CMD = ""
- scenario = env['CM_MLPERF_LOADGEN_SCENARIO']
+ scenario = env['MLC_MLPERF_LOADGEN_SCENARIO']
scenario_extra_options = ''
- NUM_THREADS = env['CM_NUM_THREADS']
- if int(NUM_THREADS) > 2 and env['CM_MLPERF_DEVICE'] == "gpu":
+ NUM_THREADS = env['MLC_NUM_THREADS']
+ if int(NUM_THREADS) > 2 and env['MLC_MLPERF_DEVICE'] == "gpu":
NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU
- if env['CM_MODEL'] in ['resnet50', 'retinanet', 'stable-diffusion-xl']:
+ if env['MLC_MODEL'] in ['resnet50', 'retinanet', 'stable-diffusion-xl']:
scenario_extra_options += " --threads " + NUM_THREADS
- ml_model_name = env['CM_MODEL']
- if 'CM_MLPERF_USER_CONF' in env:
- user_conf_path = env['CM_MLPERF_USER_CONF']
+ ml_model_name = env['MLC_MODEL']
+ if 'MLC_MLPERF_USER_CONF' in env:
+ user_conf_path = env['MLC_MLPERF_USER_CONF']
x = "" if os_info['platform'] == 'windows' else "'"
scenario_extra_options += " --user_conf " + x + user_conf_path + x
- mode = env['CM_MLPERF_LOADGEN_MODE']
+ mode = env['MLC_MLPERF_LOADGEN_MODE']
mode_extra_options = ""
# Grigori blocked for ABTF to preprocess data set on the fly for now
# we can later move it to a separate script to preprocess data set
-# if 'CM_DATASET_PREPROCESSED_PATH' in env and env['CM_MODEL'] in [ 'resnet50', 'retinanet' ]:
-# #dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['CM_DATASET_PREPROCESSED_PATH']
-# if env.get('CM_MLPERF_LAST_RELEASE') not in [ "v2.0", "v2.1" ]:
-# dataset_options = " --use_preprocessed_dataset --cache_dir "+env['CM_DATASET_PREPROCESSED_PATH']
+# if 'MLC_DATASET_PREPROCESSED_PATH' in env and env['MLC_MODEL'] in [ 'resnet50', 'retinanet' ]:
+# #dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['MLC_DATASET_PREPROCESSED_PATH']
+# if env.get('MLC_MLPERF_LAST_RELEASE') not in [ "v2.0", "v2.1" ]:
+# dataset_options = " --use_preprocessed_dataset --cache_dir "+env['MLC_DATASET_PREPROCESSED_PATH']
# else:
# dataset_options = ""
-# if env['CM_MODEL'] == "retinanet":
-# dataset_options += " --dataset-list "+ env['CM_DATASET_ANNOTATIONS_FILE_PATH']
-# elif env['CM_MODEL'] == "resnet50":
-# dataset_options += " --dataset-list "+ os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt")
-# env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH')
+# if env['MLC_MODEL'] == "retinanet":
+# dataset_options += " --dataset-list "+ env['MLC_DATASET_ANNOTATIONS_FILE_PATH']
+# elif env['MLC_MODEL'] == "resnet50":
+# dataset_options += " --dataset-list "+ os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt")
+# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH')
# else:
-# if 'CM_DATASET_PREPROCESSED_PATH' in env:
-# env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH')
+# if 'MLC_DATASET_PREPROCESSED_PATH' in env:
+# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH')
# else:
-# env['DATA_DIR'] = env.get('CM_DATASET_PATH')
+# env['DATA_DIR'] = env.get('MLC_DATASET_PATH')
# dataset_options = ''
# Grigori added for ABTF
-# dataset_path = env.get('CM_DATASET_PATH')
+# dataset_path = env.get('MLC_DATASET_PATH')
# env['DATA_DIR'] = dataset_path
-# dataset_options = " --dataset-list " + env['CM_DATASET_ANNOTATIONS_FILE_PATH']
+# dataset_options = " --dataset-list " + env['MLC_DATASET_ANNOTATIONS_FILE_PATH']
# dataset_options += " --cache_dir " + os.path.join(script_path, 'preprocessed-dataset')
dataset_options = ''
- if env.get('CM_MLPERF_EXTRA_DATASET_ARGS', '') != '':
- dataset_options += " " + env['CM_MLPERF_EXTRA_DATASET_ARGS']
+ if env.get('MLC_MLPERF_EXTRA_DATASET_ARGS', '') != '':
+ dataset_options += " " + env['MLC_MLPERF_EXTRA_DATASET_ARGS']
if mode == "accuracy":
mode_extra_options += " --accuracy"
- env['CM_OUTPUT_PREDICTIONS_PATH'] = os.path.join(
- env['CM_DATASET_MLCOMMONS_COGNATA_PATH'],
- env['CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS'],
+ env['MLC_OUTPUT_PREDICTIONS_PATH'] = os.path.join(
+ env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'],
+ env['MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS'],
'Cognata_Camera_01_8M_png',
'output')
@@ -161,13 +161,13 @@ def preprocess(i):
elif mode == "compliance":
- audit_full_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH']
+ audit_full_path = env['MLC_MLPERF_INFERENCE_AUDIT_PATH']
mode_extra_options = " --audit '" + audit_full_path + "'"
- if env.get('CM_MLPERF_OUTPUT_DIR', '') == '':
- env['CM_MLPERF_OUTPUT_DIR'] = os.getcwd()
+ if env.get('MLC_MLPERF_OUTPUT_DIR', '') == '':
+ env['MLC_MLPERF_OUTPUT_DIR'] = os.getcwd()
- mlperf_implementation = env.get('CM_MLPERF_IMPLEMENTATION', 'reference')
+ mlperf_implementation = env.get('MLC_MLPERF_IMPLEMENTATION', 'reference')
# Generate CMD
@@ -176,25 +176,25 @@ def preprocess(i):
cmd, run_dir = get_run_cmd_reference(
os_info, env, scenario_extra_options, mode_extra_options, dataset_options, script_path)
- if env.get('CM_NETWORK_LOADGEN', '') == "lon":
+ if env.get('MLC_NETWORK_LOADGEN', '') == "lon":
run_cmd = i['state']['mlperf_inference_run_cmd']
- env['CM_SSH_RUN_COMMANDS'] = []
- env['CM_SSH_RUN_COMMANDS'].append(
+ env['MLC_SSH_RUN_COMMANDS'] = []
+ env['MLC_SSH_RUN_COMMANDS'].append(
run_cmd.replace(
"--network=lon",
"--network=sut") + " &")
- env['CM_MLPERF_RUN_CMD'] = cmd
- env['CM_RUN_DIR'] = run_dir
- env['CM_RUN_CMD'] = cmd
- env['CK_PROGRAM_TMP_DIR'] = env.get('CM_ML_MODEL_PATH') # for tvm
+ env['MLC_MLPERF_RUN_CMD'] = cmd
+ env['MLC_RUN_DIR'] = run_dir
+ env['MLC_RUN_CMD'] = cmd
+ env['CK_PROGRAM_TMP_DIR'] = env.get('MLC_ML_MODEL_PATH') # for tvm
- if env.get('CM_HOST_PLATFORM_FLAVOR', '') == "arm64":
- env['CM_HOST_PLATFORM_FLAVOR'] = "aarch64"
+ if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == "arm64":
+ env['MLC_HOST_PLATFORM_FLAVOR'] = "aarch64"
- if not env.get('CM_COGNATA_ACCURACY_DUMP_FILE'):
- env['CM_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join(
+ if not env.get('MLC_COGNATA_ACCURACY_DUMP_FILE'):
+ env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join(
env['OUTPUT_DIR'], "accuracy.txt")
return {'return': 0}
@@ -208,33 +208,33 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options,
##########################################################################
# Grigori added for ABTF demo
- if env['CM_MODEL'] in ['retinanet']:
+ if env['MLC_MODEL'] in ['retinanet']:
run_dir = os.path.join(script_path, 'ref')
env['RUN_DIR'] = run_dir
- env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR']
+ env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR']
- cognata_dataset_path = env['CM_DATASET_MLCOMMONS_COGNATA_PATH']
-# cognata_dataset_path = env['CM_DATASET_PATH'] # Using open images
+ cognata_dataset_path = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH']
+# cognata_dataset_path = env['MLC_DATASET_PATH'] # Using open images
# dataset for some tests
path_to_model = env.get(
- 'CM_MLPERF_CUSTOM_MODEL_PATH',
+ 'MLC_MLPERF_CUSTOM_MODEL_PATH',
env.get(
- 'CM_ML_MODEL_FILE_WITH_PATH',
- env.get('CM_ML_MODEL_CODE_WITH_PATH')))
+ 'MLC_ML_MODEL_FILE_WITH_PATH',
+ env.get('MLC_ML_MODEL_CODE_WITH_PATH')))
env['MODEL_FILE'] = path_to_model
- cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " " + os.path.join(run_dir, "python", "main.py") + " --profile " + env['CM_MODEL'] + "-" + env['CM_MLPERF_BACKEND'] + \
+ cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " " + os.path.join(run_dir, "python", "main.py") + " --profile " + env['MLC_MODEL'] + "-" + env['MLC_MLPERF_BACKEND'] + \
" --model=" + q + path_to_model + q + \
- " --dataset=" + env["CM_MLPERF_VISION_DATASET_OPTION"] + \
+ " --dataset=" + env["MLC_MLPERF_VISION_DATASET_OPTION"] + \
" --dataset-path=" + q + cognata_dataset_path + q + \
" --cache_dir=" + q + os.path.join(script_path, 'tmp-preprocessed-dataset') + q + \
- " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \
+ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \
" --output " + q + env['OUTPUT_DIR'] + q + " " + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
scenario_extra_options + mode_extra_options + dataset_options
##########################################################################
diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml
index b22f119d6..e5567ac27 100644
--- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml
+++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml
@@ -17,70 +17,70 @@ tags:
# Default environment
default_env:
- CM_MLPERF_LOADGEN_MODE: accuracy
- CM_MLPERF_LOADGEN_SCENARIO: Offline
- CM_MLPERF_LOADGEN_BUILD_FROM_SRC: 'on'
- CM_OUTPUT_FOLDER_NAME: test_results
- CM_MLPERF_RUN_STYLE: test
- CM_TEST_QUERY_COUNT: '10'
- CM_MLPERF_QUANTIZATION: off
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference
- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: ''
+ MLC_MLPERF_LOADGEN_MODE: accuracy
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_BUILD_FROM_SRC: 'on'
+ MLC_OUTPUT_FOLDER_NAME: test_results
+ MLC_MLPERF_RUN_STYLE: test
+ MLC_TEST_QUERY_COUNT: '10'
+ MLC_MLPERF_QUANTIZATION: off
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference
+ MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: ''
# Map script inputs to environment variables
input_mapping:
- device: CM_MLPERF_DEVICE
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- docker: CM_RUN_DOCKER_CONTAINER
- hw_name: CM_HW_NAME
+ device: MLC_MLPERF_DEVICE
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ docker: MLC_RUN_DOCKER_CONTAINER
+ hw_name: MLC_HW_NAME
imagenet_path: IMAGENET_PATH
- max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
- mode: CM_MLPERF_LOADGEN_MODE
- num_threads: CM_NUM_THREADS
- threads: CM_NUM_THREADS
- dataset: CM_MLPERF_VISION_DATASET_OPTION
- model: CM_MLPERF_CUSTOM_MODEL_PATH
+ max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mode: MLC_MLPERF_LOADGEN_MODE
+ num_threads: MLC_NUM_THREADS
+ threads: MLC_NUM_THREADS
+ dataset: MLC_MLPERF_VISION_DATASET_OPTION
+ model: MLC_MLPERF_CUSTOM_MODEL_PATH
output_dir: OUTPUT_BASE_DIR
- power: CM_MLPERF_POWER
- power_server: CM_MLPERF_POWER_SERVER_ADDRESS
- ntp_server: CM_MLPERF_POWER_NTP_SERVER
- max_amps: CM_MLPERF_POWER_MAX_AMPS
- max_volts: CM_MLPERF_POWER_MAX_VOLTS
- regenerate_files: CM_REGENERATE_MEASURE_FILES
- rerun: CM_RERUN
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- test_query_count: CM_TEST_QUERY_COUNT
- clean: CM_MLPERF_CLEAN_SUBMISSION_DIR
- dataset_args: CM_MLPERF_EXTRA_DATASET_ARGS
- target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
- target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
- offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
- server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
- singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
- multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
- output: CM_MLPERF_OUTPUT_DIR
+ power: MLC_MLPERF_POWER
+ power_server: MLC_MLPERF_POWER_SERVER_ADDRESS
+ ntp_server: MLC_MLPERF_POWER_NTP_SERVER
+ max_amps: MLC_MLPERF_POWER_MAX_AMPS
+ max_volts: MLC_MLPERF_POWER_MAX_VOLTS
+ regenerate_files: MLC_REGENERATE_MEASURE_FILES
+ rerun: MLC_RERUN
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ test_query_count: MLC_TEST_QUERY_COUNT
+ clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR
+ dataset_args: MLC_MLPERF_EXTRA_DATASET_ARGS
+ target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS
+ target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
+ offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ output: MLC_MLPERF_OUTPUT_DIR
# Duplicate CM environment variables to the ones used in native apps
env_key_mappings:
- CM_HOST_: HOST_
- CM_ML_: ML_
- CM_MLPERF_TVM: MLPERF_TVM
- CM_MLPERF_DELETE: MLPERF_DELETE
+ MLC_HOST_: HOST_
+ MLC_ML_: ML_
+ MLC_MLPERF_TVM: MLPERF_TVM
+ MLC_MLPERF_DELETE: MLPERF_DELETE
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
- - CM_HW_NAME
- - CM_COGNATA_ACCURACY_DUMP_FILE
- - CM_OUTPUT_PREDICTIONS_PATH
- - CM_ML_MODEL_*
- - CM_MAX_EXAMPLES
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
+ - MLC_HW_NAME
+ - MLC_COGNATA_ACCURACY_DUMP_FILE
+ - MLC_OUTPUT_PREDICTIONS_PATH
+ - MLC_ML_MODEL_*
+ - MLC_MAX_EXAMPLES
new_state_keys:
- mlperf-inference-implementation
- - CM_SUT_*
+ - MLC_SUT_*
# Dependencies on other CM scripts
deps:
@@ -123,10 +123,10 @@ deps:
- ml-engine-onnxruntime
- onnxruntime
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- tvm-onnx
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cpu
- rocm
@@ -135,36 +135,36 @@ deps:
names:
- ml-engine-onnxruntime-cuda
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- tvm-onnx
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
skip_if_env:
- CM_MODEL:
+ MLC_MODEL:
- 3d-unet-99
- 3d-unet-99.9
## resnet50 and 3d-unet need both onnxruntime and onnxruntime_gpu on cuda
- tags: get,generic-python-lib,_onnxruntime
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
- CM_MODEL:
+ MLC_MODEL:
- 3d-unet-99
- 3d-unet-99.9
- resnet50
- tags: get,generic-python-lib,_onnxruntime_gpu
env:
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: ""
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: ""
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
- CM_MODEL:
+ MLC_MODEL:
- 3d-unet-99
- 3d-unet-99.9
- resnet50
@@ -175,10 +175,10 @@ deps:
- ml-engine-pytorch
- pytorch
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- tvm-pytorch
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cpu
- rocm
@@ -188,11 +188,11 @@ deps:
- ml-engine-pytorch
- pytorch
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- tvm-pytorch
- ray
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
## Torchvision (CPU)
@@ -200,10 +200,10 @@ deps:
names:
- ml-engine-torchvision
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- tvm-pytorch
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cpu
## Torchvision (CUDA)
@@ -211,11 +211,11 @@ deps:
names:
- ml-engine-torchvision
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- tvm-pytorch
- ray
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
## tensorrt
@@ -223,7 +223,7 @@ deps:
names:
- ml-engine-tensorrt
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- ray
## torch_tensorrt
@@ -231,7 +231,7 @@ deps:
names:
- ml-engine-torch_tensorrt
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- ray
## Ray
@@ -239,7 +239,7 @@ deps:
names:
- ray
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- ray
@@ -250,7 +250,7 @@ deps:
- ml-engine-tensorflow
- tensorflow
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tf
- tflite
@@ -267,7 +267,7 @@ deps:
# Install MLPerf loadgen
- tags: get,generic-python-lib,_package.mlcommons-loadgen
enable_if_env:
- CM_MLPERF_LOADGEN_BUILD_FROM_SRC:
+ MLC_MLPERF_LOADGEN_BUILD_FROM_SRC:
- "off"
names:
- loadgen
@@ -275,7 +275,7 @@ deps:
- tags: get,loadgen
enable_if_any_env:
- CM_MLPERF_LOADGEN_BUILD_FROM_SRC:
+ MLC_MLPERF_LOADGEN_BUILD_FROM_SRC:
- "on"
names:
- loadgen
@@ -287,7 +287,7 @@ deps:
# # Download MLPerf inference source
# - tags: get,mlcommons,inference,src
# env:
-# CM_GET_MLPERF_IMPLEMENTATION_ONLY: 'yes'
+# MLC_GET_MLPERF_IMPLEMENTATION_ONLY: 'yes'
# names:
# - mlperf-implementation
@@ -301,7 +301,7 @@ prehook_deps:
- remote-run-cmds
tags: remote,run,cmds
enable_if_env:
- CM_ASSH_RUN_COMMANDS:
+ MLC_ASSH_RUN_COMMANDS:
- "on"
@@ -311,7 +311,7 @@ posthook_deps:
- mlperf-runner
tags: benchmark-mlperf
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- "on"
@@ -331,23 +331,23 @@ variations:
group: implementation
default: true
env:
- CM_MLPERF_PYTHON: 'yes'
- CM_MLPERF_IMPLEMENTATION: reference
+ MLC_MLPERF_PYTHON: 'yes'
+ MLC_MLPERF_IMPLEMENTATION: reference
# ML engine
onnxruntime:
group: framework
env:
- CM_MLPERF_BACKEND: onnxruntime
+ MLC_MLPERF_BACKEND: onnxruntime
onnxruntime,cpu:
env:
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND_VERSION: <<>>
onnxruntime,cuda:
env:
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND_VERSION: <<>>
ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "CUDAExecutionProvider"
@@ -362,8 +362,8 @@ variations:
ml-model:
tags: raw,_pytorch
env:
- CM_MLPERF_BACKEND: pytorch
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: pytorch
+ MLC_MLPERF_BACKEND_VERSION: <<>>
@@ -376,9 +376,9 @@ variations:
# - tags: get,generic-python-lib,_pycocotools
#
# env:
-# CM_MODEL: retinanet
-# CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: 'yes'
-# CM_MLPERF_LOADGEN_MAX_BATCHSIZE: '1'
+# MLC_MODEL: retinanet
+# MLC_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: 'yes'
+# MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: '1'
abtf-demo-model:
@@ -401,7 +401,7 @@ variations:
- ml-model-abtf
env:
- CM_MODEL: retinanet
+ MLC_MODEL: retinanet
abtf-poc-model:
group: models
@@ -420,7 +420,7 @@ variations:
- cocoeval
- tags: get,dataset,raw,mlcommons-cognata,_abtf-poc
skip_if_env:
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- 'yes'
names:
- raw-dataset-mlcommons-cognata
@@ -429,14 +429,14 @@ variations:
- ml-model-abtf
env:
- CM_MODEL: retinanet
+ MLC_MODEL: retinanet
# Target devices
cpu:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
CUDA_VISIBLE_DEVICES: ''
USE_CUDA: no
USE_GPU: no
@@ -444,7 +444,7 @@ variations:
cuda:
group: device
env:
- CM_MLPERF_DEVICE: gpu
+ MLC_MLPERF_DEVICE: gpu
USE_CUDA: yes
USE_GPU: yes
@@ -453,17 +453,17 @@ variations:
# Loadgen scenarios
offline:
env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
multistream:
env:
- CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+ MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
singlestream:
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
- CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 1
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 1
server:
env:
- CM_MLPERF_LOADGEN_SCENARIO: Server
+ MLC_MLPERF_LOADGEN_SCENARIO: Server
mvp_demo:
env:
diff --git a/script/app-mlperf-automotive-mlcommons-python/ref/python/backend_pytorch_native.py b/script/app-mlperf-automotive-mlcommons-python/ref/python/backend_pytorch_native.py
index ec5401979..063cb1ce4 100644
--- a/script/app-mlperf-automotive-mlcommons-python/ref/python/backend_pytorch_native.py
+++ b/script/app-mlperf-automotive-mlcommons-python/ref/python/backend_pytorch_native.py
@@ -36,15 +36,15 @@ def image_format(self):
def load(self, model_path, inputs=None, outputs=None):
# From ABTF code
- sys.path.insert(0, os.environ['CM_ML_MODEL_CODE_WITH_PATH'])
+ sys.path.insert(0, os.environ['MLC_ML_MODEL_CODE_WITH_PATH'])
from src.transform import SSDTransformer
from src.utils import generate_dboxes, Encoder, colors, coco_classes
from src.model import SSD, ResNet
- abtf_model_config = os.environ.get('CM_ABTF_ML_MODEL_CONFIG', '')
+ abtf_model_config = os.environ.get('MLC_ABTF_ML_MODEL_CONFIG', '')
- num_classes_str = os.environ.get('CM_ABTF_NUM_CLASSES', '').strip()
+ num_classes_str = os.environ.get('MLC_ABTF_NUM_CLASSES', '').strip()
self.num_classes = int(
num_classes_str) if num_classes_str != '' else 15
diff --git a/script/app-mlperf-automotive-mlcommons-python/ref/python/cognata.py b/script/app-mlperf-automotive-mlcommons-python/ref/python/cognata.py
index 005fa4e2d..752f6dc77 100644
--- a/script/app-mlperf-automotive-mlcommons-python/ref/python/cognata.py
+++ b/script/app-mlperf-automotive-mlcommons-python/ref/python/cognata.py
@@ -74,13 +74,13 @@ def __init__(self, data_path, image_list, name, use_cache=0, image_size=None,
# Grigori added for tests
# Check if overridden by extrnal environment for tests
x = os.environ.get(
- 'CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS',
+ 'MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS',
'').strip()
if x != '':
folders = x.split(';') if ';' in x else [x]
x = os.environ.get(
- 'CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES',
+ 'MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES',
'').strip()
if x != '':
cameras = x.split(';') if ';' in x else [x]
@@ -103,7 +103,7 @@ def __init__(self, data_path, image_list, name, use_cache=0, image_size=None,
print(' Time: {:.2f} sec.'.format(time.time() - start))
if os.environ.get(
- 'CM_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS', '') == 'yes':
+ 'MLC_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS', '') == 'yes':
label_map = cognata_labels.label_map
label_info = cognata_labels.label_info
diff --git a/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py b/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py
index e4462da8c..255554f82 100644
--- a/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py
+++ b/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py
@@ -378,8 +378,8 @@ def add_results(final_results, name, result_dict,
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
- if os.environ.get('CM_COGNATA_ACCURACY_DUMP_FILE', '') != '':
- accuracy_file = os.environ['CM_COGNATA_ACCURACY_DUMP_FILE']
+ if os.environ.get('MLC_COGNATA_ACCURACY_DUMP_FILE', '') != '':
+ accuracy_file = os.environ['MLC_COGNATA_ACCURACY_DUMP_FILE']
with open(accuracy_file, "w") as f:
f.write("{:.3f}%".format(result["mAP"]))
@@ -489,7 +489,7 @@ def main():
count = ds.get_item_count()
# warmup
- if os.environ.get('CM_ABTF_ML_MODEL_SKIP_WARMUP',
+ if os.environ.get('MLC_ABTF_ML_MODEL_SKIP_WARMUP',
'').strip().lower() != 'yes':
ds.load_query_samples([0])
for _ in range(5):
diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py
index a9ea896d6..4a7600b14 100644
--- a/script/app-mlperf-automotive/customize.py
+++ b/script/app-mlperf-automotive/customize.py
@@ -39,16 +39,16 @@ def postprocess(i):
env['CMD'] = ''
- # if env.get('CM_MLPERF_USER_CONF', '') == '':
+ # if env.get('MLC_MLPERF_USER_CONF', '') == '':
# return {'return': 0}
- output_dir = env['CM_MLPERF_OUTPUT_DIR']
- mode = env['CM_MLPERF_LOADGEN_MODE']
+ output_dir = env['MLC_MLPERF_OUTPUT_DIR']
+ mode = env['MLC_MLPERF_LOADGEN_MODE']
- model = env['CM_MODEL']
- model_full_name = env.get('CM_ML_MODEL_FULL_NAME', model)
+ model = env['MLC_MODEL']
+ model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', model)
- scenario = env['CM_MLPERF_LOADGEN_SCENARIO']
+ scenario = env['MLC_MLPERF_LOADGEN_SCENARIO']
if not os.path.exists(output_dir) or not os.path.exists(
os.path.join(output_dir, "mlperf_log_summary.txt")):
@@ -60,12 +60,12 @@ def postprocess(i):
result = mlperf_log['result_mean_latency_ns'] / 1000000
elif mode == "accuracy":
if not env.get(
- 'CM_COGNATA_ACCURACY_DUMP_FILE'): # can happen while reusing old runs
- env['CM_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join(
+ 'MLC_COGNATA_ACCURACY_DUMP_FILE'): # can happen while reusing old runs
+ env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join(
output_dir, "accuracy.txt")
acc = ""
- if os.path.exists(env['CM_COGNATA_ACCURACY_DUMP_FILE']):
- with open(env['CM_COGNATA_ACCURACY_DUMP_FILE'], "r") as f:
+ if os.path.exists(env['MLC_COGNATA_ACCURACY_DUMP_FILE']):
+ with open(env['MLC_COGNATA_ACCURACY_DUMP_FILE'], "r") as f:
acc = f.readline()
result = acc
else:
@@ -74,26 +74,26 @@ def postprocess(i):
valid = {'performance': True, 'accuracy': True} # its POC
power_result = None # No power measurement in POC
- # result, valid, power_result = mlperf_utils.get_result_from_log(env['CM_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode)
+ # result, valid, power_result = mlperf_utils.get_result_from_log(env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode)
if not state.get('mlc-mlperf-inference-results'):
state['mlc-mlperf-inference-results'] = {}
if not state.get('mlc-mlperf-inference-results-last'):
state['mlc-mlperf-inference-results-last'] = {}
if not state['mlc-mlperf-inference-results'].get(
- state['CM_SUT_CONFIG_NAME']):
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']] = {}
- if not state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['MLC_SUT_CONFIG_NAME']):
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']] = {}
+ if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
].get(model):
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model] = {}
- if not state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']][model] = {}
+ if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model].get(scenario):
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model][scenario] = {}
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model][scenario][mode] = result
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model][scenario][mode + '_valid'] = valid.get(mode, False)
state['mlc-mlperf-inference-results-last'][mode] = result
diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml
index a999c0ee0..5f39eaac5 100644
--- a/script/app-mlperf-automotive/meta.yaml
+++ b/script/app-mlperf-automotive/meta.yaml
@@ -18,53 +18,53 @@ predeps: no
# Default environment
default_env:
- CM_MLPERF_LOADGEN_MODE: accuracy
- CM_MLPERF_LOADGEN_SCENARIO: Offline
- CM_OUTPUT_FOLDER_NAME: test_results
- CM_MLPERF_RUN_STYLE: test
- CM_TEST_QUERY_COUNT: '10'
- CM_MLPERF_QUANTIZATION: off
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference
- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: ''
+ MLC_MLPERF_LOADGEN_MODE: accuracy
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_OUTPUT_FOLDER_NAME: test_results
+ MLC_MLPERF_RUN_STYLE: test
+ MLC_TEST_QUERY_COUNT: '10'
+ MLC_MLPERF_QUANTIZATION: off
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference
+ MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: ''
# Map script inputs to environment variables
input_mapping:
- device: CM_MLPERF_DEVICE
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- docker: CM_RUN_DOCKER_CONTAINER
- hw_name: CM_HW_NAME
+ device: MLC_MLPERF_DEVICE
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ docker: MLC_RUN_DOCKER_CONTAINER
+ hw_name: MLC_HW_NAME
imagenet_path: IMAGENET_PATH
- max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
- mode: CM_MLPERF_LOADGEN_MODE
- num_threads: CM_NUM_THREADS
- threads: CM_NUM_THREADS
- dataset: CM_MLPERF_VISION_DATASET_OPTION
- model: CM_MLPERF_CUSTOM_MODEL_PATH
+ max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mode: MLC_MLPERF_LOADGEN_MODE
+ num_threads: MLC_NUM_THREADS
+ threads: MLC_NUM_THREADS
+ dataset: MLC_MLPERF_VISION_DATASET_OPTION
+ model: MLC_MLPERF_CUSTOM_MODEL_PATH
output_dir: OUTPUT_BASE_DIR
- power: CM_MLPERF_POWER
- power_server: CM_MLPERF_POWER_SERVER_ADDRESS
- ntp_server: CM_MLPERF_POWER_NTP_SERVER
- max_amps: CM_MLPERF_POWER_MAX_AMPS
- max_volts: CM_MLPERF_POWER_MAX_VOLTS
- regenerate_files: CM_REGENERATE_MEASURE_FILES
- rerun: CM_RERUN
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- test_query_count: CM_TEST_QUERY_COUNT
- clean: CM_MLPERF_CLEAN_SUBMISSION_DIR
- dataset_args: CM_MLPERF_EXTRA_DATASET_ARGS
- target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
- target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
- offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
- server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
- singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
- multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
- output: CM_MLPERF_OUTPUT_DIR
+ power: MLC_MLPERF_POWER
+ power_server: MLC_MLPERF_POWER_SERVER_ADDRESS
+ ntp_server: MLC_MLPERF_POWER_NTP_SERVER
+ max_amps: MLC_MLPERF_POWER_MAX_AMPS
+ max_volts: MLC_MLPERF_POWER_MAX_VOLTS
+ regenerate_files: MLC_REGENERATE_MEASURE_FILES
+ rerun: MLC_RERUN
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ test_query_count: MLC_TEST_QUERY_COUNT
+ clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR
+ dataset_args: MLC_MLPERF_EXTRA_DATASET_ARGS
+ target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS
+ target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
+ offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ output: MLC_MLPERF_OUTPUT_DIR
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
- - CM_OUTPUT_PREDICTIONS_PATH
+ - MLC_MLPERF_*
+ - MLC_OUTPUT_PREDICTIONS_PATH
new_state_keys:
- mlc-mlperf-inference-results*
@@ -103,7 +103,7 @@ docker:
deps:
- tags: get,abtf,scratch,space
mounts:
- - "${{ CM_ABTF_SCRATCH_PATH_DATASETS }}:${{ CM_ABTF_SCRATCH_PATH_DATASETS }}"
+ - "${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}:${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}"
# Variations to customize dependencies
@@ -114,15 +114,15 @@ variations:
group: implementation
default: true
env:
- CM_MLPERF_PYTHON: 'yes'
- CM_MLPERF_IMPLEMENTATION: reference
+ MLC_MLPERF_PYTHON: 'yes'
+ MLC_MLPERF_IMPLEMENTATION: reference
prehook_deps:
- names:
- python-reference-abtf-inference
- abtf-inference-implementation
tags: run-mlperf-inference,demo,abtf-model
skip_if_env:
- CM_SKIP_RUN:
+ MLC_SKIP_RUN:
- yes
@@ -130,29 +130,29 @@ variations:
fast:
group: execution-mode
env:
- CM_FAST_FACTOR: '5'
- CM_OUTPUT_FOLDER_NAME: fast_results
- CM_MLPERF_RUN_STYLE: fast
+ MLC_FAST_FACTOR: '5'
+ MLC_OUTPUT_FOLDER_NAME: fast_results
+ MLC_MLPERF_RUN_STYLE: fast
test:
group: execution-mode
default: true
env:
- CM_OUTPUT_FOLDER_NAME: test_results
- CM_MLPERF_RUN_STYLE: test
+ MLC_OUTPUT_FOLDER_NAME: test_results
+ MLC_MLPERF_RUN_STYLE: test
valid:
group: execution-mode
env:
- CM_OUTPUT_FOLDER_NAME: valid_results
- CM_MLPERF_RUN_STYLE: valid
+ MLC_OUTPUT_FOLDER_NAME: valid_results
+ MLC_MLPERF_RUN_STYLE: valid
# ML engine
onnxruntime:
group: framework
env:
- CM_MLPERF_BACKEND: onnxruntime
+ MLC_MLPERF_BACKEND: onnxruntime
add_deps_recursive:
abtf-inference-implementation:
tags: _onnxruntime
@@ -160,11 +160,11 @@ variations:
onnxruntime,cpu:
env:
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND_VERSION: <<>>
onnxruntime,cuda:
env:
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND_VERSION: <<>>
ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "CUDAExecutionProvider"
@@ -172,8 +172,8 @@ variations:
group: framework
default: true
env:
- CM_MLPERF_BACKEND: pytorch
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: pytorch
+ MLC_MLPERF_BACKEND_VERSION: <<>>
add_deps_recursive:
abtf-inference-implementation:
tags: _pytorch
@@ -181,7 +181,7 @@ variations:
abtf-demo-model:
env:
- CM_MODEL: retinanet
+ MLC_MODEL: retinanet
group: models
add_deps_recursive:
abtf-inference-implementation:
@@ -189,7 +189,7 @@ variations:
abtf-poc-model:
env:
- CM_MODEL: retinanet
+ MLC_MODEL: retinanet
default: true
group: models
add_deps_recursive:
@@ -201,11 +201,11 @@ variations:
names:
- raw-dataset-mlcommons-cognata
enable_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_IN_HOST:
+ MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_IN_HOST:
- yes
mounts:
- - "${{ CM_DATASET_MLCOMMONS_COGNATA_PATH }}:${{ CM_DATASET_MLCOMMONS_COGNATA_PATH }}"
+ - "${{ MLC_DATASET_MLCOMMONS_COGNATA_PATH }}:${{ MLC_DATASET_MLCOMMONS_COGNATA_PATH }}"
# Target devices
@@ -213,7 +213,7 @@ variations:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
CUDA_VISIBLE_DEVICES: ''
USE_CUDA: no
USE_GPU: no
@@ -224,7 +224,7 @@ variations:
cuda:
group: device
env:
- CM_MLPERF_DEVICE: gpu
+ MLC_MLPERF_DEVICE: gpu
USE_CUDA: yes
USE_GPU: yes
add_deps_recursive:
@@ -239,13 +239,13 @@ variations:
# Loadgen scenarios
offline:
env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
add_deps_recursive:
abtf-inference-implementation:
tags: _offline
multistream:
env:
- CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+ MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
add_deps_recursive:
abtf-inference-implementation:
tags: _multistream
@@ -253,35 +253,35 @@ variations:
group: loadgen-scenario
default: true
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
add_deps_recursive:
abtf-inference-implementation:
tags: _singlestream
server:
env:
- CM_MLPERF_LOADGEN_SCENARIO: Server
+ MLC_MLPERF_LOADGEN_SCENARIO: Server
add_deps_recursive:
abtf-inference-implementation:
tags: _server
mvp-demo:
env:
- CM_ABTF_MVP_DEMO: yes
- CM_MLPERF_VISION_DATASET_OPTION: cognata-8mp-pt
- CM_ABTF_ML_MODEL_CONFIG: baseline_8MP_ss_scales_all
- CM_ABTF_NUM_CLASSES: 15
- CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: 10002_Urban_Clear_Morning
- CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: Cognata_Camera_01_8M
- CM_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS: 'yes'
- CM_ABTF_ML_MODEL_SKIP_WARMUP: 'yes'
+ MLC_ABTF_MVP_DEMO: yes
+ MLC_MLPERF_VISION_DATASET_OPTION: cognata-8mp-pt
+ MLC_ABTF_ML_MODEL_CONFIG: baseline_8MP_ss_scales_all
+ MLC_ABTF_NUM_CLASSES: 15
+ MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: 10002_Urban_Clear_Morning
+ MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: Cognata_Camera_01_8M
+ MLC_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS: 'yes'
+ MLC_ABTF_ML_MODEL_SKIP_WARMUP: 'yes'
poc-demo:
env:
- CM_ABTF_POC_DEMO: yes
- CM_MLPERF_VISION_DATASET_OPTION: cognata-8mp-pt
- CM_ABTF_ML_MODEL_CONFIG: baseline_8MP_ss_scales_fm1_5x5_all
- CM_ABTF_NUM_CLASSES: 15
- CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: 10002_Urban_Clear_Morning
- CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: Cognata_Camera_01_8M
- CM_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS: 'yes'
- CM_ABTF_ML_MODEL_SKIP_WARMUP: 'yes'
+ MLC_ABTF_POC_DEMO: yes
+ MLC_MLPERF_VISION_DATASET_OPTION: cognata-8mp-pt
+ MLC_ABTF_ML_MODEL_CONFIG: baseline_8MP_ss_scales_fm1_5x5_all
+ MLC_ABTF_NUM_CLASSES: 15
+ MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: 10002_Urban_Clear_Morning
+ MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: Cognata_Camera_01_8M
+ MLC_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS: 'yes'
+ MLC_ABTF_ML_MODEL_SKIP_WARMUP: 'yes'
diff --git a/script/app-mlperf-inference-amd/customize.py b/script/app-mlperf-inference-amd/customize.py
index 16d6245ee..c2945f45c 100644
--- a/script/app-mlperf-inference-amd/customize.py
+++ b/script/app-mlperf-inference-amd/customize.py
@@ -11,31 +11,31 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']
- if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}
- env['CM_MLPERF_AMD_SCRIPT_PATH'] = env['CM_TMP_CURRENT_SCRIPT_PATH']
- env['CM_MLPERF_AMD_CODE_PATH'] = os.path.join(
- env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "closed", "AMD")
+ env['MLC_MLPERF_AMD_SCRIPT_PATH'] = env['MLC_TMP_CURRENT_SCRIPT_PATH']
+ env['MLC_MLPERF_AMD_CODE_PATH'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "closed", "AMD")
- if 'CM_MODEL' not in env:
+ if 'MLC_MODEL' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the model to run'}
- if 'CM_MLPERF_BACKEND' not in env:
+ if 'MLC_MLPERF_BACKEND' not in env:
return {'return': 1,
'error': 'Please select a variation specifying the backend'}
- if 'CM_MLPERF_DEVICE' not in env:
+ if 'MLC_MLPERF_DEVICE' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the device to run on'}
- if "llama2" in env['CM_MODEL']:
- env['CM_RUN_DIR'] = i['run_script_input']['path']
- env['CM_MLPERF_AMD_LLAMA2_CODE_PATH'] = os.path.join(
- env['CM_MLPERF_AMD_CODE_PATH'], "llama2-70b-99.9/VllmFp8")
- env['CM_RUN_CMD'] = "bash run-llama2.sh "
+ if "llama2" in env['MLC_MODEL']:
+ env['MLC_RUN_DIR'] = i['run_script_input']['path']
+ env['MLC_MLPERF_AMD_LLAMA2_CODE_PATH'] = os.path.join(
+ env['MLC_MLPERF_AMD_CODE_PATH'], "llama2-70b-99.9/VllmFp8")
+ env['MLC_RUN_CMD'] = "bash run-llama2.sh "
else:
return {'return': 1, 'error': 'Model {} not supported'.format(
- env['CM_MODEL'])}
+ env['MLC_MODEL'])}
return {'return': 0}
# return {'return':1, 'error': 'Run command needs to be tested'}
diff --git a/script/app-mlperf-inference-amd/meta.yaml b/script/app-mlperf-inference-amd/meta.yaml
index f073011f8..2c3b6d063 100644
--- a/script/app-mlperf-inference-amd/meta.yaml
+++ b/script/app-mlperf-inference-amd/meta.yaml
@@ -21,51 +21,51 @@ tags:
# Default environment
default_env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
- CM_MLPERF_LOADGEN_MODE: performance
- CM_SKIP_PREPROCESS_DATASET: 'no'
- CM_SKIP_MODEL_DOWNLOAD: 'no'
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: redhat_harness
- CM_MLPERF_SKIP_RUN: 'no'
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_MODE: performance
+ MLC_SKIP_PREPROCESS_DATASET: 'no'
+ MLC_SKIP_MODEL_DOWNLOAD: 'no'
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: redhat_harness
+ MLC_MLPERF_SKIP_RUN: 'no'
env:
- CM_CALL_MLPERF_RUNNER: 'no'
+ MLC_CALL_MLPERF_RUNNER: 'no'
# Map script inputs to environment variables
input_mapping:
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
- mlperf_conf: CM_MLPERF_CONF
- mode: CM_MLPERF_LOADGEN_MODE
- output_dir: CM_MLPERF_OUTPUT_DIR
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- user_conf: CM_MLPERF_USER_CONF
- skip_preprocess: CM_SKIP_PREPROCESS_DATASET
- skip_preprocessing: CM_SKIP_PREPROCESS_DATASET
- target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
- offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
- server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
- target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
- singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
- multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- rerun: CM_RERUN
- results_repo: CM_MLPERF_INFERENCE_RESULTS_REPO
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mlperf_conf: MLC_MLPERF_CONF
+ mode: MLC_MLPERF_LOADGEN_MODE
+ output_dir: MLC_MLPERF_OUTPUT_DIR
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ user_conf: MLC_MLPERF_USER_CONF
+ skip_preprocess: MLC_SKIP_PREPROCESS_DATASET
+ skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET
+ target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS
+ offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
+ singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ rerun: MLC_RERUN
+ results_repo: MLC_MLPERF_INFERENCE_RESULTS_REPO
new_state_keys:
- mlperf-inference-implementation
- - CM_SUT_*
+ - MLC_SUT_*
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
- - CM_HW_NAME
- - CM_ML_MODEL_*
- - CM_MAX_EXAMPLES
- - CM_IMAGENET_ACCURACY_DTYPE
- - CM_SQUAD_ACCURACY_DTYPE
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
+ - MLC_HW_NAME
+ - MLC_ML_MODEL_*
+ - MLC_MAX_EXAMPLES
+ - MLC_IMAGENET_ACCURACY_DTYPE
+ - MLC_SQUAD_ACCURACY_DTYPE
# Dependencies on other CM scripts
@@ -111,9 +111,9 @@ deps:
- inference-code
update_tags_from_env_with_prefix:
_repo.:
- - CM_MLPERF_INFERENCE_RESULTS_REPO
+ - MLC_MLPERF_INFERENCE_RESULTS_REPO
env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO
extra_cache_tags: results,repo,mlperf
# Post dependencies to run this app including for power measurement
@@ -123,7 +123,7 @@ post_deps:
- runner
- mlperf-runner
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'yes'
- yes
tags: benchmark-mlperf
@@ -139,29 +139,29 @@ variations:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
cuda:
group: device
env:
- CM_MLPERF_DEVICE: gpu
- CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
+ MLC_MLPERF_DEVICE: gpu
+ MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart
rocm:
group: device
env:
- CM_MLPERF_DEVICE: rocm
- CM_MLPERF_DEVICE_LIB_NAMESPEC: rocm
+ MLC_MLPERF_DEVICE: rocm
+ MLC_MLPERF_DEVICE_LIB_NAMESPEC: rocm
openshift:
group: backend
default: true
env:
- CM_MLPERF_BACKEND: openshift
+ MLC_MLPERF_BACKEND: openshift
pytorch:
group: backend
env:
- CM_MLPERF_BACKEND: pytorch
+ MLC_MLPERF_BACKEND: pytorch
pytorch,cuda:
deps:
@@ -184,14 +184,14 @@ variations:
group: model
default: true
env:
- CM_MODEL: resnet50
+ MLC_MODEL: resnet50
retinanet:
group: model
base:
- bs.1
env:
- CM_MODEL: retinanet
+ MLC_MODEL: retinanet
bert_:
{}
@@ -201,15 +201,15 @@ variations:
base:
- bert_
env:
- CM_MODEL: bert-99
- CM_SQUAD_ACCURACY_DTYPE: float32
+ MLC_MODEL: bert-99
+ MLC_SQUAD_ACCURACY_DTYPE: float32
bert-99.9:
group: model
base:
- bert_
env:
- CM_MODEL: bert-99.9
+ MLC_MODEL: bert-99.9
bert_:
{}
@@ -219,15 +219,15 @@ variations:
base:
- bert_
env:
- CM_MODEL: bert-99
- CM_SQUAD_ACCURACY_DTYPE: float32
+ MLC_MODEL: bert-99
+ MLC_SQUAD_ACCURACY_DTYPE: float32
bert-99.9:
group: model
base:
- bert_
env:
- CM_MODEL: bert-99.9
+ MLC_MODEL: bert-99.9
gptj_:
deps:
@@ -241,15 +241,15 @@ variations:
base:
- gptj_
env:
- CM_MODEL: gptj-99
- CM_SQUAD_ACCURACY_DTYPE: float32
+ MLC_MODEL: gptj-99
+ MLC_SQUAD_ACCURACY_DTYPE: float32
gptj-99.9:
group: model
base:
- gptj_
env:
- CM_MODEL: gptj-99.9
+ MLC_MODEL: gptj-99.9
llama2-70b_:
deps:
@@ -259,42 +259,42 @@ variations:
- tags: get,preprocessed,dataset,openorca,_mlc,_validation
- tags: get,ml-model,llama2,_amd,_pytorch
skip_if_env:
- CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST:
- 'yes'
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- 'yes'
- tags: get,preprocessed,dataset,openorca,_mlc,_validation
- tags: download,file,_url.https://github.com/vllm-project/vllm/blob/38c4b7e863570a045308af814c72f4504297222e/tests/fp8_kv/llama2-70b-fp8-kv/kv_cache_scales.json
extra_cache_tags: llama2-scales,kv-cache
force_cache: true
env:
- CM_DOWNLOAD_FINAL_ENV_NAME: QUANTIZATION_PARAM_PATH
+ MLC_DOWNLOAD_FINAL_ENV_NAME: QUANTIZATION_PARAM_PATH
- tags: get,generic-python-lib,_package.vllm
names:
- vllm
- tags: get,git,repo,_repo.https://github.com/mlcommons/inference_results_v4.1,_branch.cm-code-only
extra_cache_tags: inference,results
env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_RESULTS_PATH
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_RESULTS_PATH
llama2-70b-99:
group: model
base:
- llama2-70b_
env:
- CM_MODEL: llama2-70b-99
+ MLC_MODEL: llama2-70b-99
llama2-70b-99.9:
group: model
base:
- llama2-70b_
env:
- CM_MODEL: llama2-70b-99.9
+ MLC_MODEL: llama2-70b-99.9
singlestream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
singlestream,resnet50:
default_variations:
@@ -307,17 +307,17 @@ variations:
multistream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+ MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
offline:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
server:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Server
+ MLC_MLPERF_LOADGEN_SCENARIO: Server
uint8:
group: precision
@@ -330,12 +330,12 @@ variations:
group: version
default: true
env:
- CM_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.0
+ MLC_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.0
r4.1_default:
group: version
env:
- CM_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.1
+ MLC_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.1
docker:
real_run: False
diff --git a/script/app-mlperf-inference-amd/run-llama2.sh b/script/app-mlperf-inference-amd/run-llama2.sh
index 10f36f8ca..a056a713b 100644
--- a/script/app-mlperf-inference-amd/run-llama2.sh
+++ b/script/app-mlperf-inference-amd/run-llama2.sh
@@ -17,21 +17,21 @@ export HARNESS_DISABLE_VLLM_LOGS=1
export VLLM_LOGGING_LEVEL=ERROR
MODEL_PATH=${LLAMA2_CHECKPOINT_PATH:-/data/llm/llama2-70b-chat/}
-DATASET_PATH=${CM_DATASET_OPENORCA_PREPROCESSED_PATH:-/data/open_orca/open_orca_gpt4_tokenized_llama.sampled_24576.pkl.gz}
-QUANTIZED_WEIGHTS_PATH=${CM_LLAMA2_FINAL_SAFE_TENSORS_PATH:-quantized/quark_share/modelzoo/llama2_70b_wfp8_afp8_ofp8_nomerge/json-safetensors/llama.safetensors}
+DATASET_PATH=${MLC_DATASET_OPENORCA_PREPROCESSED_PATH:-/data/open_orca/open_orca_gpt4_tokenized_llama.sampled_24576.pkl.gz}
+QUANTIZED_WEIGHTS_PATH=${MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH:-quantized/quark_share/modelzoo/llama2_70b_wfp8_afp8_ofp8_nomerge/json-safetensors/llama.safetensors}
QUANTIZATION_PARAM_PATH=${QUANTIZATION_PARAM_PATH:-/app/kv_cache_scales.json}
-MLPERF_CONF="${CM_MLPERF_CONF:-/app/mlperf_inference/mlperf.conf}"
-USER_CONF="${CM_MLPERF_USER_CONF:-/lab-mlperf-inference/code/llama2-70b-99.9/mlperf_config_VllmFp8/user.conf}"
+MLPERF_CONF="${MLC_MLPERF_CONF:-/app/mlperf_inference/mlperf.conf}"
+USER_CONF="${MLC_MLPERF_USER_CONF:-/lab-mlperf-inference/code/llama2-70b-99.9/mlperf_config_VllmFp8/user.conf}"
SUBMISSION=${SUBMISSION:-0}
-LOG_DIR=${CM_MLPERF_OUTPUT_DIR}
+LOG_DIR=${MLC_MLPERF_OUTPUT_DIR}
cp $USER_CONF ${LOG_DIR}/user.conf
COMMON_CMD_OPTIONS="\
- --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \
+ --scenario ${MLC_MLPERF_LOADGEN_SCENARIO} \
--output-log-dir ${LOG_DIR} \
--model-path $MODEL_PATH \
--mlperf-conf $MLPERF_CONF \
@@ -48,16 +48,16 @@ COMMON_CMD_OPTIONS="\
--quantized-weights-path ${QUANTIZED_WEIGHTS_PATH} \
--quantization-param-path ${QUANTIZATION_PARAM_PATH}"
-if [ "${CM_MLPERF_LOADGEN_MODE}" == "accuracy" ]; then
+if [ "${MLC_MLPERF_LOADGEN_MODE}" == "accuracy" ]; then
COMMON_CMD_OPTIONS+=" --accuracy"
fi
-if [ "${CM_MLPERF_LOADGEN_SCENARIO}" == "Offline" ]; then
+if [ "${MLC_MLPERF_LOADGEN_SCENARIO}" == "Offline" ]; then
WD=${WD:-0}
SORTING=${SORTING:-descending} #ascending #descending #lexicographic #skip
export VLLM_SCHED_PREFILL_KVC_FREEPCT=31.0
# generate run command
- cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_Offline.py \
+ cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_Offline.py \
${COMMON_CMD_OPTIONS} \
--warmup-duration ${WD} \
--sorting ${SORTING} \
@@ -65,7 +65,7 @@ if [ "${CM_MLPERF_LOADGEN_SCENARIO}" == "Offline" ]; then
--gpu-memory-utilization 0.99"
else
# generate run command
- cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_SyncServer.py \
+ cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_SyncServer.py \
${COMMON_CMD_OPTIONS} \
--enable-warm-up \
--enable-batcher"
diff --git a/script/app-mlperf-inference-amd/run.sh b/script/app-mlperf-inference-amd/run.sh
index ddcd0b550..0c6a8fc4a 100644
--- a/script/app-mlperf-inference-amd/run.sh
+++ b/script/app-mlperf-inference-amd/run.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then
- cd ${CM_RUN_DIR}
- cmd=${CM_RUN_CMD}
+if [[ ${MLC_CALL_MLPERF_RUNNER} == "no" ]]; then
+ cd ${MLC_RUN_DIR}
+ cmd=${MLC_RUN_CMD}
echo "${cmd}"
eval "${cmd}"
test $? -eq 0 || exit $?
diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp b/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp
index c641e9d1e..5ba78b0ca 100644
--- a/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp
+++ b/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp
@@ -56,10 +56,10 @@ class Program {
public:
Program () : runtime( armnn::IRuntime::Create(options) ) {
- bool use_neon = getenv_b("CM_MLPERF_TFLITE_USE_NEON");
- bool use_opencl = getenv_b("CM_MLPERF_TFLITE_USE_OPENCL");
- string input_layer_name = getenv_s("CM_ML_MODEL_INPUT_LAYER_NAME");
- string output_layer_name = getenv_s("CM_ML_MODEL_OUTPUT_LAYER_NAME");
+ bool use_neon = getenv_b("MLC_MLPERF_TFLITE_USE_NEON");
+ bool use_opencl = getenv_b("MLC_MLPERF_TFLITE_USE_OPENCL");
+ string input_layer_name = getenv_s("MLC_ML_MODEL_INPUT_LAYER_NAME");
+ string output_layer_name = getenv_s("MLC_ML_MODEL_OUTPUT_LAYER_NAME");
settings = new BenchmarkSettings(MODEL_TYPE::LITE);
@@ -333,14 +333,14 @@ void TestSingleStream(Program *prg) {
SystemUnderTestSingleStream sut(prg);
QuerySampleLibrarySingleStream qsl(prg);
- const std::string mlperf_conf_path = getenv_s("CM_MLPERF_CONF");
- const std::string user_conf_path = getenv_s("CM_MLPERF_USER_CONF");
+ const std::string mlperf_conf_path = getenv_s("MLC_MLPERF_CONF");
+ const std::string user_conf_path = getenv_s("MLC_MLPERF_USER_CONF");
- std::string model_name = getenv_opt_s("CM_MODEL", "unknown_model");
- std::string logs_dir = getenv_opt_s("CM_MLPERF_LOADGEN_LOGS_DIR", "");
+ std::string model_name = getenv_opt_s("MLC_MODEL", "unknown_model");
+ std::string logs_dir = getenv_opt_s("MLC_MLPERF_LOADGEN_LOGS_DIR", "");
- const std::string scenario_string = getenv_s("CM_MLPERF_LOADGEN_SCENARIO");
- const std::string mode_string = getenv_s("CM_MLPERF_LOADGEN_MODE");
+ const std::string scenario_string = getenv_s("MLC_MLPERF_LOADGEN_SCENARIO");
+ const std::string mode_string = getenv_s("MLC_MLPERF_LOADGEN_MODE");
std::cout << "Path to mlperf.conf : " << mlperf_conf_path << std::endl;
std::cout << "Path to user.conf : " << user_conf_path << std::endl;
diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py b/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py
index 8589a8241..8bca479d2 100644
--- a/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py
+++ b/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py
@@ -11,30 +11,30 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']
- if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}
- if 'CM_MODEL' not in env:
+ if 'MLC_MODEL' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the model to run'}
- if 'CM_MLPERF_BACKEND' not in env:
+ if 'MLC_MLPERF_BACKEND' not in env:
return {'return': 1,
'error': 'Please select a variation specifying the backend'}
- if 'CM_MLPERF_DEVICE' not in env:
+ if 'MLC_MLPERF_DEVICE' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the device to run on'}
source_files = []
script_path = i['run_script_input']['path']
- env['CM_SOURCE_FOLDER_PATH'] = os.path.join(
- script_path, env['CM_TMP_SRC_FOLDER'])
+ env['MLC_SOURCE_FOLDER_PATH'] = os.path.join(
+ script_path, env['MLC_TMP_SRC_FOLDER'])
- for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']):
+ for file in os.listdir(env['MLC_SOURCE_FOLDER_PATH']):
if file.endswith(".c") or file.endswith(".cpp"):
source_files.append(file)
- env['CM_CXX_SOURCE_FILES'] = ";".join(source_files)
+ env['MLC_CXX_SOURCE_FILES'] = ";".join(source_files)
if '+CPLUS_INCLUDE_PATH' not in env:
env['+CPLUS_INCLUDE_PATH'] = []
@@ -43,24 +43,24 @@ def preprocess(i):
env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc"))
# TODO: get cuda path ugly fix
- if env['CM_MLPERF_DEVICE'] == 'gpu':
- env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
- env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
- env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB'])
- env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
+ if env['MLC_MLPERF_DEVICE'] == 'gpu':
+ env['+C_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE'])
+ env['+CPLUS_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE'])
+ env['+LD_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_LIB'])
+ env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_INCLUDE'])
if '+ CXXFLAGS' not in env:
env['+ CXXFLAGS'] = []
env['+ CXXFLAGS'].append("-std=c++17")
- # add preprocessor flag like "#define CM_MODEL_RESNET50"
- env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper())
- # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME"
- env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' +
- env['CM_MLPERF_BACKEND'].upper())
- # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU"
- env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' +
- env['CM_MLPERF_DEVICE'].upper())
+ # add preprocessor flag like "#define MLC_MODEL_RESNET50"
+ env['+ CXXFLAGS'].append('-DMLC_MODEL_' + env['MLC_MODEL'].upper())
+ # add preprocessor flag like "#define MLC_MLPERF_BACKEND_ONNXRUNTIME"
+ env['+ CXXFLAGS'].append('-DMLC_MLPERF_BACKEND_' +
+ env['MLC_MLPERF_BACKEND'].upper())
+ # add preprocessor flag like "#define MLC_MLPERF_DEVICE_CPU"
+ env['+ CXXFLAGS'].append('-DMLC_MLPERF_DEVICE_' +
+ env['MLC_MLPERF_DEVICE'].upper())
if '+ LDCXXFLAGS' not in env:
env['+ LDCXXFLAGS'] = []
@@ -70,33 +70,33 @@ def preprocess(i):
"-lpthread"
]
# e.g. -lonnxruntime
- if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env:
+ if 'MLC_MLPERF_BACKEND_LIB_NAMESPEC' in env:
env['+ LDCXXFLAGS'].append('-l' +
- env['CM_MLPERF_BACKEND_LIB_NAMESPEC'])
+ env['MLC_MLPERF_BACKEND_LIB_NAMESPEC'])
# e.g. -lcudart
- if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env:
- env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC'])
+ if 'MLC_MLPERF_DEVICE_LIB_NAMESPEC' in env:
+ env['+ LDCXXFLAGS'].append('-l' + env['MLC_MLPERF_DEVICE_LIB_NAMESPEC'])
- if env.get('CM_TMP_LINK_LIBS', []):
- libs = env['CM_TMP_LINK_LIBS'].split(",")
+ if env.get('MLC_TMP_LINK_LIBS', []):
+ libs = env['MLC_TMP_LINK_LIBS'].split(",")
for lib in libs:
env['+ LDCXXFLAGS'].append(' -l' + lib)
- env['CM_LINKER_LANG'] = 'CXX'
- env['CM_RUN_DIR'] = os.getcwd()
+ env['MLC_LINKER_LANG'] = 'CXX'
+ env['MLC_RUN_DIR'] = os.getcwd()
- if 'CM_MLPERF_CONF' not in env:
- env['CM_MLPERF_CONF'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
- if 'CM_MLPERF_USER_CONF' not in env:
- env['CM_MLPERF_USER_CONF'] = os.path.join(
- env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf")
+ if 'MLC_MLPERF_CONF' not in env:
+ env['MLC_MLPERF_CONF'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
+ if 'MLC_MLPERF_USER_CONF' not in env:
+ env['MLC_MLPERF_USER_CONF'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf")
- if env.get('CM_DATASET_COMPRESSED', "no").lower() in [
- "yes", "on", "true"] and "float" in env.get('CM_MLPERF_MODEL_PRECISION', ''):
+ if env.get('MLC_DATASET_COMPRESSED', "no").lower() in [
+ "yes", "on", "true"] and "float" in env.get('MLC_MLPERF_MODEL_PRECISION', ''):
# Use all cores for input preprocessing
- env['CM_HOST_USE_ALL_CORES'] = "yes"
- env['CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2'] = "with_live_preprocessing"
+ env['MLC_HOST_USE_ALL_CORES'] = "yes"
+ env['MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2'] = "with_live_preprocessing"
return {'return': 0}
diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h b/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h
index 76f1209a8..c63a4e221 100644
--- a/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h
+++ b/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h
@@ -98,25 +98,25 @@ class Accumulator {
class BenchmarkSettings {
public:
- const std::string images_dir = getenv_s("CM_DATASET_PREPROCESSED_PATH");
- const std::string available_images_file = getenv_s("CM_DATASET_PREPROCESSED_IMAGES_LIST");
- const bool skip_internal_preprocessing = (getenv_opt_s("CM_DATASET_COMPRESSED", "off") == "off");
- const std::string result_dir = getenv_s("CM_MLPERF_OUTPUT_DIR");
- const std::string input_layer_name = getenv_s("CM_ML_MODEL_INPUT_LAYER_NAME");
- const std::string output_layer_name = getenv_s("CM_ML_MODEL_OUTPUT_LAYER_NAME");
- const int images_in_memory_max = getenv_i("CM_LOADGEN_BUFFER_SIZE");
- const int image_size = getenv_i("CM_DATASET_INPUT_SQUARE_SIDE");
+ const std::string images_dir = getenv_s("MLC_DATASET_PREPROCESSED_PATH");
+ const std::string available_images_file = getenv_s("MLC_DATASET_PREPROCESSED_IMAGES_LIST");
+ const bool skip_internal_preprocessing = (getenv_opt_s("MLC_DATASET_COMPRESSED", "off") == "off");
+ const std::string result_dir = getenv_s("MLC_MLPERF_OUTPUT_DIR");
+ const std::string input_layer_name = getenv_s("MLC_ML_MODEL_INPUT_LAYER_NAME");
+ const std::string output_layer_name = getenv_s("MLC_ML_MODEL_OUTPUT_LAYER_NAME");
+ const int images_in_memory_max = getenv_i("MLC_LOADGEN_BUFFER_SIZE");
+ const int image_size = getenv_i("MLC_DATASET_INPUT_SQUARE_SIDE");
const int batch_size = 1;
const int num_channels = 3;
const int num_classes = 1000;
- const bool normalize_img = getenv_b("CM_ML_MODEL_NORMALIZE_DATA");
+ const bool normalize_img = getenv_b("MLC_ML_MODEL_NORMALIZE_DATA");
- const bool subtract_mean = getenv_b("CM_ML_MODEL_SUBTRACT_MEANS");
- const char *given_channel_means_str = getenv("CM_ML_MODEL_GIVEN_CHANNEL_MEANS");
+ const bool subtract_mean = getenv_b("MLC_ML_MODEL_SUBTRACT_MEANS");
+ const char *given_channel_means_str = getenv("MLC_ML_MODEL_GIVEN_CHANNEL_MEANS");
- const bool trigger_cold_run = getenv_b("CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN");
+ const bool trigger_cold_run = getenv_b("MLC_MLPERF_LOADGEN_TRIGGER_COLD_RUN");
- const int verbosity_level = getenv_i("CM_VERBOSE");
+ const int verbosity_level = getenv_i("MLC_VERBOSE");
BenchmarkSettings(enum MODEL_TYPE mode = MODEL_TYPE::LITE) {
@@ -130,11 +130,11 @@ class BenchmarkSettings {
switch (mode)
{
case MODEL_TYPE::LITE:
- _graph_file = getenv_s("CM_ML_MODEL_FILE_WITH_PATH");
+ _graph_file = getenv_s("MLC_ML_MODEL_FILE_WITH_PATH");
break;
case MODEL_TYPE::TF_FROZEN:
- _graph_file = getenv_s("CM_ML_MODEL_FILE_WITH_PATH");
+ _graph_file = getenv_s("MLC_ML_MODEL_FILE_WITH_PATH");
break;
default:
@@ -144,13 +144,13 @@ class BenchmarkSettings {
};
_number_of_threads = std::thread::hardware_concurrency();
- if (getenv_opt_s("CM_HOST_USE_ALL_CORES", "no") != "yes") {
+ if (getenv_opt_s("MLC_HOST_USE_ALL_CORES", "no") != "yes") {
_number_of_threads = _number_of_threads < 1 ? 1 : _number_of_threads;
- _number_of_threads = !getenv("CM_HOST_CPU_TOTAL_CORES")
+ _number_of_threads = !getenv("MLC_HOST_CPU_TOTAL_CORES")
? _number_of_threads
- : getenv_i("CM_HOST_CPU_TOTAL_CORES");
- if (getenv_i("CM_HOST_CPU_TOTAL_CORES") && getenv_i("CM_HOST_CPU_THREADS_PER_CORE")) {
- _number_of_threads = getenv_i("CM_HOST_CPU_TOTAL_CORES") / getenv_i("CM_HOST_CPU_THREADS_PER_CORE");
+ : getenv_i("MLC_HOST_CPU_TOTAL_CORES");
+ if (getenv_i("MLC_HOST_CPU_TOTAL_CORES") && getenv_i("MLC_HOST_CPU_THREADS_PER_CORE")) {
+ _number_of_threads = getenv_i("MLC_HOST_CPU_TOTAL_CORES") / getenv_i("MLC_HOST_CPU_THREADS_PER_CORE");
}
}
// Print settings
diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml b/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml
index e66ae2bac..815a2a152 100644
--- a/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml
+++ b/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml
@@ -3,27 +3,27 @@ automation_alias: script
automation_uid: 5b4e0237da074764
category: Modular MLPerf inference benchmark pipeline
default_env:
- CM_DATASET_COMPRESSED: 'off'
- CM_DATASET_INPUT_SQUARE_SIDE: '224'
- CM_FAST_COMPILATION: 'yes'
- CM_LOADGEN_BUFFER_SIZE: '1024'
- CM_MLPERF_LOADGEN_MODE: accuracy
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
- CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN: '0'
- CM_MLPERF_OUTPUT_DIR: .
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_cpp
- CM_MLPERF_TFLITE_USE_NEON: '0'
- CM_MLPERF_TFLITE_USE_OPENCL: '0'
- CM_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94
- CM_ML_MODEL_NORMALIZE_DATA: '0'
- CM_ML_MODEL_SUBTRACT_MEANS: '1'
- CM_VERBOSE: '0'
+ MLC_DATASET_COMPRESSED: 'off'
+ MLC_DATASET_INPUT_SQUARE_SIDE: '224'
+ MLC_FAST_COMPILATION: 'yes'
+ MLC_LOADGEN_BUFFER_SIZE: '1024'
+ MLC_MLPERF_LOADGEN_MODE: accuracy
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_TRIGGER_COLD_RUN: '0'
+ MLC_MLPERF_OUTPUT_DIR: .
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_cpp
+ MLC_MLPERF_TFLITE_USE_NEON: '0'
+ MLC_MLPERF_TFLITE_USE_OPENCL: '0'
+ MLC_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94
+ MLC_ML_MODEL_NORMALIZE_DATA: '0'
+ MLC_ML_MODEL_SUBTRACT_MEANS: '1'
+ MLC_VERBOSE: '0'
deps:
- tags: detect,os
- tags: detect,cpu
- tags: get,sys-utils-cm
- enable_if_env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
tags: get,cuda
- names:
@@ -33,10 +33,10 @@ deps:
- inference-src
tags: get,mlcommons,inference,src
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tflite
- armnn_tflite
- CM_MODEL:
+ MLC_MODEL:
- mobilenet
names:
- ml-model
@@ -44,10 +44,10 @@ deps:
- mobilenet-model
tags: get,ml-model,mobilenet,raw,_tflite
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tflite
- armnn_tflite
- CM_MODEL:
+ MLC_MODEL:
- resnet50
names:
- ml-model
@@ -55,9 +55,9 @@ deps:
- resnet50-model
tags: get,ml-model,resnet50,raw,_tflite,_no-argmax
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tf
- CM_MODEL:
+ MLC_MODEL:
- resnet50
names:
- ml-model
@@ -65,10 +65,10 @@ deps:
- resnet50-model
tags: get,ml-model,resnet50,raw,_tf
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tflite
- armnn_tflite
- CM_MODEL:
+ MLC_MODEL:
- efficientnet
names:
- ml-model
@@ -80,39 +80,39 @@ deps:
- tflite
tags: get,tensorflow,lib,_tflite
- enable_if_env:
- CM_MLPERF_TFLITE_USE_ARMNN:
+ MLC_MLPERF_TFLITE_USE_ARMNN:
- 'yes'
names:
- armnn
- lib-armnn
tags: get,lib,armnn
input_mapping:
- compressed_dataset: CM_DATASET_COMPRESSED
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- mlperf_conf: CM_MLPERF_CONF
- mode: CM_MLPERF_LOADGEN_MODE
- output_dir: CM_MLPERF_OUTPUT_DIR
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- user_conf: CM_MLPERF_USER_CONF
- verbose: CM_VERBOSE
+ compressed_dataset: MLC_DATASET_COMPRESSED
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ mlperf_conf: MLC_MLPERF_CONF
+ mode: MLC_MLPERF_LOADGEN_MODE
+ output_dir: MLC_MLPERF_OUTPUT_DIR
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ user_conf: MLC_MLPERF_USER_CONF
+ verbose: MLC_VERBOSE
new_env_keys:
-- CM_MLPERF_*
-- CM_ML_MODEL_*
-- CM_HW_NAME
+- MLC_MLPERF_*
+- MLC_ML_MODEL_*
+- MLC_HW_NAME
new_state_keys:
-- CM_SUT_*
+- MLC_SUT_*
post_deps:
- names:
- compiler-program
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'yes'
tags: compile,program
- names:
- mlperf-runner
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'yes'
tags: benchmark-mlperf
- names:
@@ -123,40 +123,40 @@ prehook_deps:
- user-conf-generator
tags: generate,user-conf,mlperf,inference
- enable_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'no'
- CM_MODEL:
+ MLC_MODEL:
- resnet50
names:
- imagenet-preprocessed
- preprocessed-dataset
skip_if_env:
- CM_DATASET_COMPRESSED:
+ MLC_DATASET_COMPRESSED:
- 'on'
tags: get,dataset,preprocessed,imagenet,_for.resnet50,_rgb32,_NHWC
update_tags_from_env:
- - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
+ - MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
- enable_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'no'
- CM_MODEL:
+ MLC_MODEL:
- mobilenet
- efficientnet
names:
- imagenet-preprocessed
- preprocessed-dataset
skip_if_env:
- CM_DATASET_COMPRESSED:
+ MLC_DATASET_COMPRESSED:
- 'on'
tags: get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb32,_NHWC
update_tags_from_env:
- - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
+ - MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
- enable_if_env:
- CM_DATASET_COMPRESSED:
+ MLC_DATASET_COMPRESSED:
- 'on'
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'no'
- CM_MODEL:
+ MLC_MODEL:
- mobilenet
- efficientnet
names:
@@ -164,20 +164,20 @@ prehook_deps:
- preprocessed-dataset
tags: get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb8,_NHWC
update_tags_from_env:
- - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
+ - MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
- enable_if_env:
- CM_DATASET_COMPRESSED:
+ MLC_DATASET_COMPRESSED:
- 'on'
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'no'
- CM_MODEL:
+ MLC_MODEL:
- resnet50
names:
- imagenet-preprocessed
- preprocessed-dataset
tags: get,dataset,preprocessed,imagenet,_for.resnet50,_rgb8,_NHWC
update_tags_from_env:
- - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
+ - MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
tags:
- app
- mlcommons
@@ -191,23 +191,23 @@ variations:
default_variations:
optimization-target: use-neon
env:
- CM_MLPERF_TFLITE_USE_ARMNN: 'yes'
- CM_TMP_LINK_LIBS: tensorflowlite,armnn
+ MLC_MLPERF_TFLITE_USE_ARMNN: 'yes'
+ MLC_TMP_LINK_LIBS: tensorflowlite,armnn
armnn,tflite:
env:
- CM_MLPERF_BACKEND: armnn_tflite
- CM_MLPERF_BACKEND_VERSION: <<>>
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_armnn_cpp
- CM_TMP_LINK_LIBS: tensorflowlite,armnn,armnnTfLiteParser
- CM_TMP_SRC_FOLDER: armnn
+ MLC_MLPERF_BACKEND: armnn_tflite
+ MLC_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_armnn_cpp
+ MLC_TMP_LINK_LIBS: tensorflowlite,armnn,armnnTfLiteParser
+ MLC_TMP_SRC_FOLDER: armnn
cpu:
default: true
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
group: device
efficientnet:
env:
- CM_MODEL: efficientnet
+ MLC_MODEL: efficientnet
group: model
fp32:
adr:
@@ -217,12 +217,12 @@ variations:
tags: _float32
default: true
env:
- CM_MLPERF_MODEL_PRECISION: float32
+ MLC_MLPERF_MODEL_PRECISION: float32
group: precision
gpu:
env:
- CM_MLPERF_DEVICE: gpu
- CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
+ MLC_MLPERF_DEVICE: gpu
+ MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart
group: device
int8:
adr:
@@ -231,34 +231,34 @@ variations:
preprocessed-dataset:
tags: _int8
env:
- CM_DATASET_COMPRESSED: 'on'
- CM_MLPERF_MODEL_PRECISION: int8
+ MLC_DATASET_COMPRESSED: 'on'
+ MLC_MLPERF_MODEL_PRECISION: int8
group: precision
mobilenet:
env:
- CM_MODEL: mobilenet
+ MLC_MODEL: mobilenet
group: model
resnet50:
default: true
env:
- CM_MODEL: resnet50
+ MLC_MODEL: resnet50
group: model
singlestream:
default: true
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
group: loadgen-scenario
tf:
env:
- CM_MLPERF_BACKEND: tf
+ MLC_MLPERF_BACKEND: tf
group: backend
tflite:
default: true
env:
- CM_MLPERF_BACKEND: tflite
- CM_MLPERF_BACKEND_VERSION: master
- CM_TMP_LINK_LIBS: tensorflowlite
- CM_TMP_SRC_FOLDER: src
+ MLC_MLPERF_BACKEND: tflite
+ MLC_MLPERF_BACKEND_VERSION: master
+ MLC_TMP_LINK_LIBS: tensorflowlite
+ MLC_TMP_SRC_FOLDER: src
group: backend
uint8:
adr:
@@ -267,16 +267,16 @@ variations:
preprocessed-dataset:
tags: _int8
env:
- CM_DATASET_COMPRESSED: 'on'
- CM_MLPERF_MODEL_PRECISION: uint8
+ MLC_DATASET_COMPRESSED: 'on'
+ MLC_MLPERF_MODEL_PRECISION: uint8
group: precision
use-neon:
env:
- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_neon
- CM_MLPERF_TFLITE_USE_NEON: '1'
+ MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_neon
+ MLC_MLPERF_TFLITE_USE_NEON: '1'
group: optimization-target
use-opencl:
env:
- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_opencl
- CM_MLPERF_TFLITE_USE_OPENCL: '1'
+ MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_opencl
+ MLC_MLPERF_TFLITE_USE_OPENCL: '1'
group: optimization-target
diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp b/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp
index 9493f5430..dbe464a9e 100644
--- a/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp
+++ b/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp
@@ -295,15 +295,15 @@ void TestSingleStream(Program *prg) {
SystemUnderTestSingleStream sut(prg);
QuerySampleLibrarySingleStream qsl(prg);
- const std::string mlperf_conf_path = getenv_s("CM_MLPERF_CONF");
- const std::string user_conf_path = getenv_s("CM_MLPERF_USER_CONF");
- const std::string audit_conf_path = getenv_opt_s("CM_MLPERF_INFERENCE_AUDIT_PATH","");
+ const std::string mlperf_conf_path = getenv_s("MLC_MLPERF_CONF");
+ const std::string user_conf_path = getenv_s("MLC_MLPERF_USER_CONF");
+ const std::string audit_conf_path = getenv_opt_s("MLC_MLPERF_INFERENCE_AUDIT_PATH","");
- std::string model_name = getenv_opt_s("CM_MODEL", "unknown_model");
- std::string logs_dir = getenv_opt_s("CM_MLPERF_LOADGEN_LOGS_DIR", "");
+ std::string model_name = getenv_opt_s("MLC_MODEL", "unknown_model");
+ std::string logs_dir = getenv_opt_s("MLC_MLPERF_LOADGEN_LOGS_DIR", "");
- const std::string scenario_string = getenv_s("CM_MLPERF_LOADGEN_SCENARIO");
- const std::string mode_string = getenv_s("CM_MLPERF_LOADGEN_MODE");
+ const std::string scenario_string = getenv_s("MLC_MLPERF_LOADGEN_SCENARIO");
+ const std::string mode_string = getenv_s("MLC_MLPERF_LOADGEN_MODE");
std::cout << "Path to mlperf.conf : " << mlperf_conf_path << std::endl;
std::cout << "Path to user.conf : " << user_conf_path << std::endl;
diff --git a/script/app-mlperf-inference-dummy/customize.py b/script/app-mlperf-inference-dummy/customize.py
index 40e41f738..f200e915b 100644
--- a/script/app-mlperf-inference-dummy/customize.py
+++ b/script/app-mlperf-inference-dummy/customize.py
@@ -11,20 +11,20 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']
- if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}
- if 'CM_MODEL' not in env:
+ if 'MLC_MODEL' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the model to run'}
- if 'CM_MLPERF_BACKEND' not in env:
+ if 'MLC_MLPERF_BACKEND' not in env:
return {'return': 1,
'error': 'Please select a variation specifying the backend'}
- if 'CM_MLPERF_DEVICE' not in env:
+ if 'MLC_MLPERF_DEVICE' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the device to run on'}
- r = get_run_cmd(env['CM_MODEL'], i)
+ r = get_run_cmd(env['MLC_MODEL'], i)
if r['return'] > 0:
return r
run_cmd = r['run_cmd']
@@ -37,16 +37,16 @@ def preprocess(i):
def get_run_cmd(model, i):
env = i['env']
if "gptj" in model:
- scenario = env['CM_MLPERF_LOADGEN_SCENARIO']
- device = env['CM_MLPERF_DEVICE']
- mode = env['CM_MLPERF_LOADGEN_MODE']
- outdir = env['CM_MLPERF_OUTPUT_DIR']
- mlperf_conf_path = env['CM_MLPERF_CONF']
- user_conf_path = env['CM_MLPERF_USER_CONF']
- api_server = env.get('CM_MLPERF_INFERENCE_API_SERVER', 'localhost')
+ scenario = env['MLC_MLPERF_LOADGEN_SCENARIO']
+ device = env['MLC_MLPERF_DEVICE']
+ mode = env['MLC_MLPERF_LOADGEN_MODE']
+ outdir = env['MLC_MLPERF_OUTPUT_DIR']
+ mlperf_conf_path = env['MLC_MLPERF_CONF']
+ user_conf_path = env['MLC_MLPERF_USER_CONF']
+ api_server = env.get('MLC_MLPERF_INFERENCE_API_SERVER', 'localhost')
model_path = env['GPTJ_CHECKPOINT_PATH']
- dataset_path = env['CM_DATASET_CNNDM_EVAL_PATH']
- precision = env['CM_MLPERF_MODEL_PRECISION']
+ dataset_path = env['MLC_DATASET_CNNDM_EVAL_PATH']
+ precision = env['MLC_MLPERF_MODEL_PRECISION']
if mode == "accuracy":
accuracy_string = " --accuracy "
else:
@@ -55,7 +55,7 @@ def get_run_cmd(model, i):
run_cmd = f"python3 -u main.py --scenario {scenario} --model-path {model_path} --api-server {api_server} --api-model-name gpt-j-cnn --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} "
submitter = "CTuning"
run_dir = os.path.join(
- env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'],
+ env['MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO'],
"open",
submitter,
"code",
diff --git a/script/app-mlperf-inference-dummy/meta.yaml b/script/app-mlperf-inference-dummy/meta.yaml
index 1343835b6..f8876eb81 100644
--- a/script/app-mlperf-inference-dummy/meta.yaml
+++ b/script/app-mlperf-inference-dummy/meta.yaml
@@ -21,51 +21,51 @@ tags:
# Default environment
default_env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
- CM_MLPERF_LOADGEN_MODE: performance
- CM_SKIP_PREPROCESS_DATASET: 'no'
- CM_SKIP_MODEL_DOWNLOAD: 'no'
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: dummy_harness
- CM_MLPERF_SKIP_RUN: 'no'
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_MODE: performance
+ MLC_SKIP_PREPROCESS_DATASET: 'no'
+ MLC_SKIP_MODEL_DOWNLOAD: 'no'
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: dummy_harness
+ MLC_MLPERF_SKIP_RUN: 'no'
env:
- CM_CALL_MLPERF_RUNNER: 'no'
+ MLC_CALL_MLPERF_RUNNER: 'no'
# Map script inputs to environment variables
input_mapping:
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
- mlperf_conf: CM_MLPERF_CONF
- mode: CM_MLPERF_LOADGEN_MODE
- output_dir: CM_MLPERF_OUTPUT_DIR
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- user_conf: CM_MLPERF_USER_CONF
- skip_preprocess: CM_SKIP_PREPROCESS_DATASET
- skip_preprocessing: CM_SKIP_PREPROCESS_DATASET
- target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
- offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
- server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
- target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
- singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
- multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- rerun: CM_RERUN
- results_repo: CM_MLPERF_INFERENCE_RESULTS_REPO
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mlperf_conf: MLC_MLPERF_CONF
+ mode: MLC_MLPERF_LOADGEN_MODE
+ output_dir: MLC_MLPERF_OUTPUT_DIR
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ user_conf: MLC_MLPERF_USER_CONF
+ skip_preprocess: MLC_SKIP_PREPROCESS_DATASET
+ skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET
+ target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS
+ offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
+ singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ rerun: MLC_RERUN
+ results_repo: MLC_MLPERF_INFERENCE_RESULTS_REPO
new_state_keys:
- mlperf-inference-implementation
- - CM_SUT_*
+ - MLC_SUT_*
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
- - CM_HW_NAME
- - CM_ML_MODEL_*
- - CM_MAX_EXAMPLES
- - CM_IMAGENET_ACCURACY_DTYPE
- - CM_SQUAD_ACCURACY_DTYPE
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
+ - MLC_HW_NAME
+ - MLC_ML_MODEL_*
+ - MLC_MAX_EXAMPLES
+ - MLC_IMAGENET_ACCURACY_DTYPE
+ - MLC_SQUAD_ACCURACY_DTYPE
# Dependencies on other CM scripts
@@ -110,9 +110,9 @@ deps:
inference-results
inference-code
updats_tags_from_env_with_prefix:
- _repo.: CM_MLPERF_INFERENCE_RESULTS_REPO
+ _repo.: MLC_MLPERF_INFERENCE_RESULTS_REPO
env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO
extra_cache_tags: inference-implementation,mlperf
# Post dependencies to run this app including for power measurement
@@ -122,7 +122,7 @@ post_deps:
- runner
- mlperf-runner
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'yes'
- yes
tags: benchmark-mlperf
@@ -138,18 +138,18 @@ variations:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
cuda:
group: device
env:
- CM_MLPERF_DEVICE: gpu
- CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
+ MLC_MLPERF_DEVICE: gpu
+ MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart
pytorch:
group: backend
default: true
env:
- CM_MLPERF_BACKEND: pytorch
+ MLC_MLPERF_BACKEND: pytorch
pytorch,cuda:
deps:
@@ -168,14 +168,14 @@ variations:
group: model
default: true
env:
- CM_MODEL: resnet50
+ MLC_MODEL: resnet50
retinanet:
group: model
base:
- bs.1
env:
- CM_MODEL: retinanet
+ MLC_MODEL: retinanet
bert_:
{}
@@ -185,15 +185,15 @@ variations:
base:
- bert_
env:
- CM_MODEL: bert-99
- CM_SQUAD_ACCURACY_DTYPE: float32
+ MLC_MODEL: bert-99
+ MLC_SQUAD_ACCURACY_DTYPE: float32
bert-99.9:
group: model
base:
- bert_
env:
- CM_MODEL: bert-99.9
+ MLC_MODEL: bert-99.9
bert_:
{}
@@ -203,15 +203,15 @@ variations:
base:
- bert_
env:
- CM_MODEL: bert-99
- CM_SQUAD_ACCURACY_DTYPE: float32
+ MLC_MODEL: bert-99
+ MLC_SQUAD_ACCURACY_DTYPE: float32
bert-99.9:
group: model
base:
- bert_
env:
- CM_MODEL: bert-99.9
+ MLC_MODEL: bert-99.9
gptj_:
deps:
@@ -225,15 +225,15 @@ variations:
base:
- gptj_
env:
- CM_MODEL: gptj-99
- CM_SQUAD_ACCURACY_DTYPE: float32
+ MLC_MODEL: gptj-99
+ MLC_SQUAD_ACCURACY_DTYPE: float32
gptj-99.9:
group: model
base:
- gptj_
env:
- CM_MODEL: gptj-99.9
+ MLC_MODEL: gptj-99.9
llama2-70b_:
{}
@@ -243,19 +243,19 @@ variations:
base:
- llama2-70b_
env:
- CM_MODEL: llama2-70b-99
+ MLC_MODEL: llama2-70b-99
llama2-70b-99.9:
group: model
base:
- llama2-70b_
env:
- CM_MODEL: llama2-70b-99.9
+ MLC_MODEL: llama2-70b-99.9
singlestream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
singlestream,resnet50:
default_variations:
@@ -268,17 +268,17 @@ variations:
multistream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+ MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
offline:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
server:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Server
+ MLC_MLPERF_LOADGEN_SCENARIO: Server
uint8:
group: precision
diff --git a/script/app-mlperf-inference-dummy/run.sh b/script/app-mlperf-inference-dummy/run.sh
index ddcd0b550..0c6a8fc4a 100644
--- a/script/app-mlperf-inference-dummy/run.sh
+++ b/script/app-mlperf-inference-dummy/run.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then
- cd ${CM_RUN_DIR}
- cmd=${CM_RUN_CMD}
+if [[ ${MLC_CALL_MLPERF_RUNNER} == "no" ]]; then
+ cd ${MLC_RUN_DIR}
+ cmd=${MLC_RUN_CMD}
echo "${cmd}"
eval "${cmd}"
test $? -eq 0 || exit $?
diff --git a/script/app-mlperf-inference-intel/build_bert_harness.sh b/script/app-mlperf-inference-intel/build_bert_harness.sh
index 4a2b957a9..bb2477caa 100644
--- a/script/app-mlperf-inference-intel/build_bert_harness.sh
+++ b/script/app-mlperf-inference-intel/build_bert_harness.sh
@@ -1,21 +1,21 @@
#!/bin/bash
-export PATH=${CM_CONDA_BIN_PATH}:$PATH
+export PATH=${MLC_CONDA_BIN_PATH}:$PATH
echo $PWD
if [ ! -d harness ]; then
mkdir -p harness
fi
-rm -rf ${CM_CONDA_LIB_PATH}/cmake/mkl/*
+rm -rf ${MLC_CONDA_LIB_PATH}/cmake/mkl/*
-rsync -avz --exclude=".git" ${CM_HARNESS_CODE_ROOT}/ harness/
+rsync -avz --exclude=".git" ${MLC_HARNESS_CODE_ROOT}/ harness/
pushd harness
-rsync -avz --exclude=".git" ${CM_MLPERF_INFERENCE_SOURCE}/ inference/
+rsync -avz --exclude=".git" ${MLC_MLPERF_INFERENCE_SOURCE}/ inference/
test $? -eq 0 || exit $?
pushd mlperf_plugins
rm -rf onednn
-rsync -avz --exclude=".git" ${CM_ONEDNN_INSTALLED_PATH}/ onednn/
+rsync -avz --exclude=".git" ${MLC_ONEDNN_INSTALLED_PATH}/ onednn/
test $? -eq 0 || exit $?
popd
@@ -30,13 +30,13 @@ test $? -eq 0 || exit $?
mkdir -p bert/dataset
cd bert
-ln -sf ${CM_DATASET_SQUAD_VAL_PATH} dataset/dev-v1.1.json
+ln -sf ${MLC_DATASET_SQUAD_VAL_PATH} dataset/dev-v1.1.json
test $? -eq 0 || exit $?
if [ ! -d model ]; then
git clone https://huggingface.co/bert-large-uncased model
cd model
rm pytorch_model.bin
- ln -sf ${CM_ML_MODEL_FILE_WITH_PATH} pytorch_model.bin
+ ln -sf ${MLC_ML_MODEL_FILE_WITH_PATH} pytorch_model.bin
test $? -eq 0 || exit $?
cd ..
fi
diff --git a/script/app-mlperf-inference-intel/build_gptj_harness.sh b/script/app-mlperf-inference-intel/build_gptj_harness.sh
index 3c2f26dc4..5175f4ede 100644
--- a/script/app-mlperf-inference-intel/build_gptj_harness.sh
+++ b/script/app-mlperf-inference-intel/build_gptj_harness.sh
@@ -1,14 +1,14 @@
#!/bin/bash
-export PATH=${CM_CONDA_BIN_PATH}:$PATH
+export PATH=${MLC_CONDA_BIN_PATH}:$PATH
echo $PWD
if [ ! -d harness ]; then
mkdir -p harness
fi
-echo ${CM_HARNESS_CODE_ROOT}
-cd ${CM_HARNESS_CODE_ROOT}
+echo ${MLC_HARNESS_CODE_ROOT}
+cd ${MLC_HARNESS_CODE_ROOT}
cd utils
python -m pip install .
test $? -eq 0 || exit $?
diff --git a/script/app-mlperf-inference-intel/build_resnet50_harness.sh b/script/app-mlperf-inference-intel/build_resnet50_harness.sh
index 92ef96243..3a27e4d3b 100644
--- a/script/app-mlperf-inference-intel/build_resnet50_harness.sh
+++ b/script/app-mlperf-inference-intel/build_resnet50_harness.sh
@@ -1,11 +1,11 @@
-export PATH=${CM_CONDA_BIN_PATH}:$PATH
+export PATH=${MLC_CONDA_BIN_PATH}:$PATH
echo $PWD
export DATA_CAL_DIR=calibration_dataset
-export CHECKPOINT=${CM_ML_MODEL_FILE_WITH_PATH}
+export CHECKPOINT=${MLC_ML_MODEL_FILE_WITH_PATH}
-cd ${CM_HARNESS_CODE_ROOT}
+cd ${MLC_HARNESS_CODE_ROOT}
cd src/ckernels/ && mkdir -p 3rdparty && \
cd 3rdparty && \
@@ -16,7 +16,7 @@ cd src/ckernels/ && mkdir -p 3rdparty && \
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
-export IPEX_PATH=${CM_IPEX_INSTALLED_PATH}
+export IPEX_PATH=${MLC_IPEX_INSTALLED_PATH}
export TORCH_PATH=`python -c 'import torch;print(torch.utils.cmake_prefix_path)'`
if [[ -z ${TORCH_PATH} ]]; then
@@ -24,11 +24,11 @@ if [[ -z ${TORCH_PATH} ]]; then
exit 1
fi
-export LOADGEN_DIR="${CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH}/../"
-export OPENCV_DIR=${CM_OPENCV_BUILD_PATH}
-export RAPIDJSON_INCLUDE_DIR=${CM_RAPIDJSON_SRC_REPO_PATH}/include
-export GFLAGS_DIR=${CM_GFLAGS_BUILD_PATH}
-export ONEDNN_DIR=${CM_ONEDNN_INSTALLED_PATH}
+export LOADGEN_DIR="${MLC_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH}/../"
+export OPENCV_DIR=${MLC_OPENCV_BUILD_PATH}
+export RAPIDJSON_INCLUDE_DIR=${MLC_RAPIDJSON_SRC_REPO_PATH}/include
+export GFLAGS_DIR=${MLC_GFLAGS_BUILD_PATH}
+export ONEDNN_DIR=${MLC_ONEDNN_INSTALLED_PATH}
export USE_CUDA=0
BUILD_DIR=${PWD}/build
diff --git a/script/app-mlperf-inference-intel/build_retinanet_harness.sh b/script/app-mlperf-inference-intel/build_retinanet_harness.sh
index 0d577b26b..225e6bdc4 100644
--- a/script/app-mlperf-inference-intel/build_retinanet_harness.sh
+++ b/script/app-mlperf-inference-intel/build_retinanet_harness.sh
@@ -1,11 +1,11 @@
-export PATH=${CM_CONDA_BIN_PATH}:$PATH
+export PATH=${MLC_CONDA_BIN_PATH}:$PATH
-cd ${CM_HARNESS_CODE_ROOT}
+cd ${MLC_HARNESS_CODE_ROOT}
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
-export IPEX_PATH=${CM_IPEX_INSTALLED_PATH}
+export IPEX_PATH=${MLC_IPEX_INSTALLED_PATH}
export TORCH_PATH=`python -c 'import torch;print(torch.utils.cmake_prefix_path)'`
if [[ -z ${TORCH_PATH} ]]; then
@@ -13,10 +13,10 @@ if [[ -z ${TORCH_PATH} ]]; then
exit 1
fi
-export LOADGEN_DIR="${CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH}/../"
-export OPENCV_DIR=${CM_OPENCV_BUILD_PATH}
-export RAPIDJSON_INCLUDE_DIR=${CM_RAPIDJSON_SRC_REPO_PATH}/include
-export GFLAGS_DIR=${CM_GFLAGS_BUILD_PATH}
+export LOADGEN_DIR="${MLC_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH}/../"
+export OPENCV_DIR=${MLC_OPENCV_BUILD_PATH}
+export RAPIDJSON_INCLUDE_DIR=${MLC_RAPIDJSON_SRC_REPO_PATH}/include
+export GFLAGS_DIR=${MLC_GFLAGS_BUILD_PATH}
export USE_CUDA=0
BUILD_DIR=${PWD}/build
diff --git a/script/app-mlperf-inference-intel/build_sdxl_harness.sh b/script/app-mlperf-inference-intel/build_sdxl_harness.sh
index a0817e495..1fdebbf55 100644
--- a/script/app-mlperf-inference-intel/build_sdxl_harness.sh
+++ b/script/app-mlperf-inference-intel/build_sdxl_harness.sh
@@ -1,4 +1,4 @@
-cd ${CM_HARNESS_CODE_ROOT}
+cd ${MLC_HARNESS_CODE_ROOT}
cd utils
cmd=" python -m pip install ."
diff --git a/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh b/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh
index 82aa6906c..7c95d0d29 100644
--- a/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh
+++ b/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh
@@ -1,9 +1,9 @@
#!/bin/bash
-export MODEL_DIR=${CM_ML_MODEL_FILE_WITH_PATH}
+export MODEL_DIR=${MLC_ML_MODEL_FILE_WITH_PATH}
export DATA_DIR=/mnt/dlrm_data
-echo ${CM_HARNESS_CODE_ROOT}
-cd ${CM_HARNESS_CODE_ROOT}
+echo ${MLC_HARNESS_CODE_ROOT}
+cd ${MLC_HARNESS_CODE_ROOT}
python -m pip install scikit-learn==1.3.0 torchsnapshot torchrec==0.3.2
test $? -eq 0 || exit $?
python -m pip install fbgemm-gpu==0.3.2 --index-url https://download.pytorch.org/whl/cpu
diff --git a/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh b/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh
index 75a0774d5..6e112a681 100644
--- a/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh
+++ b/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh
@@ -1,8 +1,8 @@
#!/bin/bash
-export PATH=${CM_CONDA_BIN_PATH}:$PATH
+export PATH=${MLC_CONDA_BIN_PATH}:$PATH
-cd ${CM_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH}
+cd ${MLC_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH}
CUR_DIR=$(pwd)
export WORKLOAD_DATA=${CUR_DIR}/data
mkdir -p ${WORKLOAD_DATA}
diff --git a/script/app-mlperf-inference-intel/compile_resnet50.sh b/script/app-mlperf-inference-intel/compile_resnet50.sh
index ee81956ec..8ba7f4812 100644
--- a/script/app-mlperf-inference-intel/compile_resnet50.sh
+++ b/script/app-mlperf-inference-intel/compile_resnet50.sh
@@ -1,9 +1,9 @@
-export PATH=${CM_CONDA_BIN_PATH}:$PATH
+export PATH=${MLC_CONDA_BIN_PATH}:$PATH
-export DATA_CAL_DIR=${CM_HARNESS_CODE_ROOT}/calibration_dataset
-export CHECKPOINT=${CM_ML_MODEL_FILE_WITH_PATH}
+export DATA_CAL_DIR=${MLC_HARNESS_CODE_ROOT}/calibration_dataset
+export CHECKPOINT=${MLC_ML_MODEL_FILE_WITH_PATH}
-cd ${CM_HARNESS_CODE_ROOT}
+cd ${MLC_HARNESS_CODE_ROOT}
bash generate_torch_model.sh
test "$?" -eq 0 || exit "$?"
diff --git a/script/app-mlperf-inference-intel/compile_retinanet.sh b/script/app-mlperf-inference-intel/compile_retinanet.sh
index 7e23b889a..933311523 100644
--- a/script/app-mlperf-inference-intel/compile_retinanet.sh
+++ b/script/app-mlperf-inference-intel/compile_retinanet.sh
@@ -1,11 +1,11 @@
-export PATH=${CM_CONDA_BIN_PATH}:$PATH
+export PATH=${MLC_CONDA_BIN_PATH}:$PATH
-export CALIBRATION_ANNOTATIONS=${CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH}
+export CALIBRATION_ANNOTATIONS=${MLC_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH}
-export CALIBRATION_DATA_DIR=${CM_CALIBRATION_DATASET_PATH}
-export MODEL_CHECKPOINT=${CM_ML_MODEL_FILE_WITH_PATH}
+export CALIBRATION_DATA_DIR=${MLC_CALIBRATION_DATASET_PATH}
+export MODEL_CHECKPOINT=${MLC_ML_MODEL_FILE_WITH_PATH}
-cd ${CM_HARNESS_CODE_ROOT}
+cd ${MLC_HARNESS_CODE_ROOT}
bash run_calibration.sh
test "$?" -eq 0 || exit "$?"
diff --git a/script/app-mlperf-inference-intel/customize.py b/script/app-mlperf-inference-intel/customize.py
index 667e5fb86..932817163 100644
--- a/script/app-mlperf-inference-intel/customize.py
+++ b/script/app-mlperf-inference-intel/customize.py
@@ -11,68 +11,68 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']
- if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}
import json
- if 'CM_MODEL' not in env:
+ if 'MLC_MODEL' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the model to run'}
- if 'CM_MLPERF_BACKEND' not in env:
+ if 'MLC_MLPERF_BACKEND' not in env:
return {'return': 1,
'error': 'Please select a variation specifying the backend'}
- if 'CM_MLPERF_DEVICE' not in env:
+ if 'MLC_MLPERF_DEVICE' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the device to run on'}
- ml_model = env['CM_MODEL']
+ ml_model = env['MLC_MODEL']
master_model = ml_model.replace("-99.9", "").replace("-99", "")
master_model = master_model.replace("gptj", "gpt-j")
- backend = env['CM_MLPERF_BACKEND']
- device = env['CM_MLPERF_DEVICE']
+ backend = env['MLC_MLPERF_BACKEND']
+ device = env['MLC_MLPERF_DEVICE']
code_base_folder = backend + '-' + device
- if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == 'v4.0':
+ if env.get('MLC_MLPERF_INFERENCE_CODE_VERSION', '') == 'v4.0':
if 'gptj' in ml_model:
code_base_folder = "ITREX"
if 'dlrm-v2' in ml_model:
code_base_folder = "pytorch-cpu-int8"
harness_root = os.path.join(
- env['CM_MLPERF_INFERENCE_RESULTS_PATH'],
+ env['MLC_MLPERF_INFERENCE_RESULTS_PATH'],
'closed',
'Intel',
'code',
ml_model,
code_base_folder)
- env['CM_HARNESS_CODE_ROOT'] = harness_root
+ env['MLC_HARNESS_CODE_ROOT'] = harness_root
- if env.get('CM_MODEL') == "resnet50":
+ if env.get('MLC_MODEL') == "resnet50":
pass
- elif "bert" in env.get('CM_MODEL'):
+ elif "bert" in env.get('MLC_MODEL'):
pass
- elif "retinanet" in env.get('CM_MODEL'):
+ elif "retinanet" in env.get('MLC_MODEL'):
pass
- elif "gptj" in env.get('CM_MODEL'):
+ elif "gptj" in env.get('MLC_MODEL'):
env['CHECKPOINT_DIR'] = env['GPTJ_CHECKPOINT_PATH']
script_path = i['run_script_input']['path']
- if env['CM_MODEL'] == "retinanet":
- env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH']
+ if env['MLC_MODEL'] == "retinanet":
+ env['MLC_DATASET_LIST'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH']
- if 'CM_MLPERF_CONF' not in env:
- env['CM_MLPERF_CONF'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
- if 'CM_MLPERF_USER_CONF' not in env:
- env['CM_MLPERF_USER_CONF'] = os.path.join(
- env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf")
+ if 'MLC_MLPERF_CONF' not in env:
+ env['MLC_MLPERF_CONF'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
+ if 'MLC_MLPERF_USER_CONF' not in env:
+ env['MLC_MLPERF_USER_CONF'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf")
- loadgen_mode = env['CM_MLPERF_LOADGEN_MODE']
- env['CONDA_PREFIX'] = env['CM_CONDA_PREFIX']
+ loadgen_mode = env['MLC_MLPERF_LOADGEN_MODE']
+ env['CONDA_PREFIX'] = env['MLC_CONDA_PREFIX']
- if env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "calibration":
+ if env['MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "calibration":
if master_model == "resnet50":
i['run_script_input']['script_name'] = "prepare_imagenet_calibration"
elif master_model == "3d-unet":
@@ -81,55 +81,55 @@ def preprocess(i):
i['run_script_input']['script_name'] = "calibrate_dlrm_v2_model"
else:
calibration_root = os.path.join(
- env['CM_MLPERF_INFERENCE_RESULTS_PATH'],
+ env['MLC_MLPERF_INFERENCE_RESULTS_PATH'],
'closed',
'Intel',
'calibration',
master_model,
backend + "-" + device)
- if "gpt" in env['CM_MODEL']:
+ if "gpt" in env['MLC_MODEL']:
i['run_script_input']['script_name'] = "calibrate_gptj_int4_model"
calibration_path = os.path.join(calibration_root, "INT4")
- env['CM_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH'] = calibration_path
+ env['MLC_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH'] = calibration_path
env['INT4_CALIBRATION_DIR'] = os.path.join(
calibration_path, "data", "quantized-int4-model")
- elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "compilation":
+ elif env['MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "compilation":
if master_model == "resnet50":
i['run_script_input']['script_name'] = "compile_resnet50"
elif master_model == "retinanet":
i['run_script_input']['script_name'] = "compile_retinanet"
- env['CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH'] = os.path.join(
- os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH']), 'retinanet-int8-model.pth')
+ env['MLC_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH'] = os.path.join(
+ os.path.dirname(env['MLC_ML_MODEL_FILE_WITH_PATH']), 'retinanet-int8-model.pth')
- elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "build_harness":
+ elif env['MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "build_harness":
print(f"Harness Root: {harness_root}")
- if "bert" in env['CM_MODEL']:
+ if "bert" in env['MLC_MODEL']:
i['run_script_input']['script_name'] = "build_bert_harness"
- env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(
os.getcwd(), "harness", "build", "bert_inference")
env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "bert")
- elif "stable-diffusion" in env['CM_MODEL']:
+ elif "stable-diffusion" in env['MLC_MODEL']:
i['run_script_input']['script_name'] = "build_sdxl_harness"
- elif "resnet50" in env['CM_MODEL']:
+ elif "resnet50" in env['MLC_MODEL']:
i['run_script_input']['script_name'] = "build_resnet50_harness"
- env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(
os.getcwd(), "harness", "build", "resnet50_inference")
env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "resnet50")
- elif "retinanet" in env['CM_MODEL']:
+ elif "retinanet" in env['MLC_MODEL']:
i['run_script_input']['script_name'] = "build_retinanet_harness"
- env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(
os.getcwd(), "harness", "build", "retinanet_inference")
- elif "gpt" in env['CM_MODEL']:
+ elif "gpt" in env['MLC_MODEL']:
i['run_script_input']['script_name'] = "build_gptj_harness"
- env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(
os.getcwd(), "harness", "build", "gptj_inference")
env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "gptj")
- env['MLPERF_INFERENCE_ROOT'] = env['CM_MLPERF_INFERENCE_SOURCE']
+ env['MLPERF_INFERENCE_ROOT'] = env['MLC_MLPERF_INFERENCE_SOURCE']
if env.get('INTEL_GPTJ_INT4', '') == 'yes':
model_precision = "int4"
- if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == 'v3.1':
+ if env.get('MLC_MLPERF_INFERENCE_CODE_VERSION', '') == 'v3.1':
env['RUN_QUANTIZATION_CMD'] = "bash run_quantization_int4.sh"
else:
env['FILE_TAG'] = "final"
@@ -138,7 +138,7 @@ def preprocess(i):
else:
model_precision = "int8"
env['RUN_QUANTIZATION_CMD'] = "bash run_quantization.sh"
- if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1":
+ if env.get('MLC_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1":
final_model_path = os.path.join(
harness_root, "data", f"gpt-j-{model_precision}-model", "best_model.pt")
else:
@@ -148,93 +148,93 @@ def preprocess(i):
env[model_dir_name] = os.path.dirname(final_model_path)
if not os.path.exists(env[model_dir_name]):
os.makedirs(env[model_dir_name])
- env['CM_ML_MODEL_PATH'] = env[model_dir_name]
- env['CM_ML_MODEL_FILE_WITH_PATH'] = final_model_path
- if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH',
+ env['MLC_ML_MODEL_PATH'] = env[model_dir_name]
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = final_model_path
+ if env.get('MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH',
'') != '' and env.get('INT8_MODEL_DIR', '') != '':
shutil.copy(
- env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH'],
+ env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH'],
env[model_dir_name])
- if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH',
+ if env.get('MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH',
'') != '' and env.get('INT4_MODEL_DIR', '') != '':
shutil.copy(
- env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH'],
+ env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH'],
env[model_dir_name])
- elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "run_harness":
+ elif env['MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "run_harness":
print(f"Harness Root: {harness_root}")
- if env.get('CM_MLPERF_LOADGEN_MODE', '') == "compliance":
- audit_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH']
- shutil.copy(audit_path, env['CM_RUN_DIR'])
+ if env.get('MLC_MLPERF_LOADGEN_MODE', '') == "compliance":
+ audit_path = env['MLC_MLPERF_INFERENCE_AUDIT_PATH']
+ shutil.copy(audit_path, env['MLC_RUN_DIR'])
- if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy":
+ if env['MLC_MLPERF_LOADGEN_MODE'] == "accuracy":
env['LOADGEN_MODE'] = 'Accuracy'
else:
env['LOADGEN_MODE'] = 'Performance'
- if 'bert' in env['CM_MODEL']:
+ if 'bert' in env['MLC_MODEL']:
env['MODEL_PATH'] = os.path.dirname(os.path.dirname(
- env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH']))
+ env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH']))
env['DATASET_PATH'] = os.path.dirname(os.path.dirname(
- env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH']))
- env['CM_RUN_DIR'] = i['run_script_input']['path']
- env['CM_RUN_CMD'] = "bash run_bert_harness.sh " + \
- ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE']
+ env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH']))
+ env['MLC_RUN_DIR'] = i['run_script_input']['path']
+ env['MLC_RUN_CMD'] = "bash run_bert_harness.sh " + \
+ ("--accuracy" if env['MLC_MLPERF_LOADGEN_MODE']
== "accuracy" else "")
- elif 'resnet50' in env['CM_MODEL']:
+ elif 'resnet50' in env['MLC_MODEL']:
env['MODEL_PATH'] = os.path.dirname(os.path.dirname(
- env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH']))
+ env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH']))
env['DATASET_PATH'] = os.path.dirname(os.path.dirname(
- env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH']))
- env['CM_RUN_DIR'] = env['CM_MLPERF_OUTPUT_DIR']
- env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_resnet50_harness.sh')} "
-
- elif 'retinanet' in env['CM_MODEL']:
- env['MODEL_PATH'] = env['CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH']
- env['DATA_DIR'] = env['CM_DATASET_PATH_ROOT']
- env['CM_RUN_DIR'] = env['CM_MLPERF_OUTPUT_DIR']
- env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_retinanet_harness.sh')} "
-
- elif '3d-unet' in env['CM_MODEL']:
- env['CM_RUN_DIR'] = env['CM_MLPERF_OUTPUT_DIR']
- env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_3d-unet_harness.sh')} "
-
- elif 'dlrm' in env['CM_MODEL']:
- env['CM_RUN_DIR'] = i['run_script_input']['path']
- env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_dlrm_v2_harness.sh')} "
-
- elif 'stable-diffusion' in env['CM_MODEL']:
- env['CM_RUN_DIR'] = i['run_script_input']['path']
- env['CM_RUN_CMD'] = "bash run_sdxl_harness.sh " + \
- ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE']
+ env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH']))
+ env['MLC_RUN_DIR'] = env['MLC_MLPERF_OUTPUT_DIR']
+ env['MLC_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_resnet50_harness.sh')} "
+
+ elif 'retinanet' in env['MLC_MODEL']:
+ env['MODEL_PATH'] = env['MLC_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH']
+ env['DATA_DIR'] = env['MLC_DATASET_PATH_ROOT']
+ env['MLC_RUN_DIR'] = env['MLC_MLPERF_OUTPUT_DIR']
+ env['MLC_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_retinanet_harness.sh')} "
+
+ elif '3d-unet' in env['MLC_MODEL']:
+ env['MLC_RUN_DIR'] = env['MLC_MLPERF_OUTPUT_DIR']
+ env['MLC_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_3d-unet_harness.sh')} "
+
+ elif 'dlrm' in env['MLC_MODEL']:
+ env['MLC_RUN_DIR'] = i['run_script_input']['path']
+ env['MLC_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_dlrm_v2_harness.sh')} "
+
+ elif 'stable-diffusion' in env['MLC_MODEL']:
+ env['MLC_RUN_DIR'] = i['run_script_input']['path']
+ env['MLC_RUN_CMD'] = "bash run_sdxl_harness.sh " + \
+ ("--accuracy" if env['MLC_MLPERF_LOADGEN_MODE']
== "accuracy" else "")
- elif "gptj" in env['CM_MODEL']:
- env['CM_RUN_DIR'] = i['run_script_input']['path']
- if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1":
- env['CM_RUN_CMD'] = "bash run_gptj_harness_v3_1.sh "
+ elif "gptj" in env['MLC_MODEL']:
+ env['MLC_RUN_DIR'] = i['run_script_input']['path']
+ if env.get('MLC_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1":
+ env['MLC_RUN_CMD'] = "bash run_gptj_harness_v3_1.sh "
if env.get('INTEL_GPTJ_INT4', '') == 'yes':
model_precision = "int4"
- env['INT4_MODEL_DIR'] = env['CM_ML_MODEL_PATH']
+ env['INT4_MODEL_DIR'] = env['MLC_ML_MODEL_PATH']
env['QUANTIZED_MODEL'] = os.path.join(
env['INT4_MODEL_DIR'], "best_int4_model.pt")
env['PRECISION'] = "int4_bf16_mixed"
else:
- env['INT8_MODEL_DIR'] = env['CM_ML_MODEL_PATH']
+ env['INT8_MODEL_DIR'] = env['MLC_ML_MODEL_PATH']
env['QUANTIZED_MODEL'] = os.path.join(
env["INT8_MODEL_DIR"], "best_model.pt")
env['PRECISION'] = "int8"
- elif env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v4.0":
- env['CM_RUN_CMD'] = "bash run_gptj_harness_v4_0.sh "
+ elif env.get('MLC_MLPERF_INFERENCE_CODE_VERSION', '') == "v4.0":
+ env['MLC_RUN_CMD'] = "bash run_gptj_harness_v4_0.sh "
- if env['CM_MLPERF_RUN_STYLE'] == "test":
- env['TOTAL_SAMPLE_COUNT'] = env['CM_TEST_QUERY_COUNT']
+ if env['MLC_MLPERF_RUN_STYLE'] == "test":
+ env['TOTAL_SAMPLE_COUNT'] = env['MLC_TEST_QUERY_COUNT']
else:
env['TOTAL_SAMPLE_COUNT'] = env.get(
- 'CM_MLPERF_MAX_QUERY_COUNT', env['CM_TEST_QUERY_COUNT'])
+ 'MLC_MLPERF_MAX_QUERY_COUNT', env['MLC_TEST_QUERY_COUNT'])
- if env['CM_MLPERF_LOADGEN_SCENARIO'] == "Offline":
+ if env['MLC_MLPERF_LOADGEN_SCENARIO'] == "Offline":
env['WORKERS_PER_PROC'] = 4
else:
env['WORKERS_PER_PROC'] = 1
diff --git a/script/app-mlperf-inference-intel/meta.yaml b/script/app-mlperf-inference-intel/meta.yaml
index 9a7c042d7..86a2806eb 100644
--- a/script/app-mlperf-inference-intel/meta.yaml
+++ b/script/app-mlperf-inference-intel/meta.yaml
@@ -24,47 +24,47 @@ tags:
# Default environment
default_env:
- CM_BATCH_COUNT: '1'
- CM_BATCH_SIZE: '1'
- CM_FAST_COMPILATION: 'yes'
- CM_MLPERF_LOADGEN_SCENARIO: Offline
- CM_MLPERF_LOADGEN_MODE: performance
- CM_SKIP_PREPROCESS_DATASET: 'no'
- CM_SKIP_MODEL_DOWNLOAD: 'no'
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: intel
- CM_MLPERF_SKIP_RUN: 'no'
+ MLC_BATCH_COUNT: '1'
+ MLC_BATCH_SIZE: '1'
+ MLC_FAST_COMPILATION: 'yes'
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_MODE: performance
+ MLC_SKIP_PREPROCESS_DATASET: 'no'
+ MLC_SKIP_MODEL_DOWNLOAD: 'no'
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: intel
+ MLC_MLPERF_SKIP_RUN: 'no'
verbosity: 1
loadgen_trigger_cold_run: 0
env:
- CM_CALL_MLPERF_RUNNER: 'no'
+ MLC_CALL_MLPERF_RUNNER: 'no'
CUDA_VISIBLE_DEVICES: ''
USE_CUDA: '0'
# Map script inputs to environment variables
input_mapping:
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
- mlperf_conf: CM_MLPERF_CONF
- mode: CM_MLPERF_LOADGEN_MODE
- output_dir: CM_MLPERF_OUTPUT_DIR
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- user_conf: CM_MLPERF_USER_CONF
- skip_preprocess: CM_SKIP_PREPROCESS_DATASET
- skip_preprocessing: CM_SKIP_PREPROCESS_DATASET
- target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
- offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
- server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
- target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
- singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
- multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- rerun: CM_RERUN
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mlperf_conf: MLC_MLPERF_CONF
+ mode: MLC_MLPERF_LOADGEN_MODE
+ output_dir: MLC_MLPERF_OUTPUT_DIR
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ user_conf: MLC_MLPERF_USER_CONF
+ skip_preprocess: MLC_SKIP_PREPROCESS_DATASET
+ skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET
+ target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS
+ offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
+ singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ rerun: MLC_RERUN
new_state_keys:
- mlperf-inference-implementation
- - CM_SUT_*
+ - MLC_SUT_*
@@ -92,7 +92,7 @@ deps:
# Install ResNet50 model (ONNX) and ImageNet
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
names:
- resnet50-model
@@ -100,7 +100,7 @@ deps:
tags: get,ml-model,resnet50,_fp32,_pytorch
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
names:
- imagenet-original
@@ -114,7 +114,7 @@ deps:
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
names:
- openimages-original
@@ -122,7 +122,7 @@ deps:
tags: get,dataset,original,openimages,_validation,_custom-annotations,_full
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
names:
- openimages-calibration
@@ -139,11 +139,11 @@ post_deps:
- runner
- mlperf-runner
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'yes'
- yes
enable_if_env:
- CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE:
+ MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE:
- run_harness
tags: benchmark-mlperf
@@ -158,7 +158,7 @@ variations:
group: version
default: true
env:
- CM_MLPERF_INFERENCE_CODE_VERSION: "v4.0"
+ MLC_MLPERF_INFERENCE_CODE_VERSION: "v4.0"
deps:
- tags: get,mlperf,inference,results,_go
names:
@@ -175,7 +175,7 @@ variations:
v3.1:
group: version
env:
- CM_MLPERF_INFERENCE_CODE_VERSION: "v3.1"
+ MLC_MLPERF_INFERENCE_CODE_VERSION: "v3.1"
deps:
- tags: get,mlperf,inference,results,_ctuning
names:
@@ -200,15 +200,15 @@ variations:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
# ML engine
pytorch:
group: framework
default: true
env:
- CM_MLPERF_BACKEND: pytorch
- CM_MLPERF_BACKEND_LIB_NAMESPEC: pytorch
+ MLC_MLPERF_BACKEND: pytorch
+ MLC_MLPERF_BACKEND_LIB_NAMESPEC: pytorch
bs.#:
env:
@@ -219,12 +219,12 @@ variations:
group: model
default: true
env:
- CM_MODEL: resnet50
- CM_BENCHMARK: STANDALONE_CLASSIFICATION
+ MLC_MODEL: resnet50
+ MLC_BENCHMARK: STANDALONE_CLASSIFICATION
resnet50,int8:
env:
- CM_IMAGENET_ACCURACY_DTYPE: int8
+ MLC_IMAGENET_ACCURACY_DTYPE: int8
bert-99:
deps:
@@ -232,15 +232,15 @@ variations:
names:
- bert-99-compiler
env:
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
- CM_ML_MODEL_INPUTS_DATA_TYPE: int8
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int8
retinanet:
group: model
env:
- CM_MODEL: retinanet
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth"
- CM_BENCHMARK: STANDALONE_OBJECT_DETECTION
+ MLC_MODEL: retinanet
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth"
+ MLC_BENCHMARK: STANDALONE_OBJECT_DETECTION
deps:
- tags: get,generic-python-lib,_numpy
@@ -254,25 +254,25 @@ variations:
base:
- 3d-unet_
env:
- CM_MODEL: 3d-unet-99
+ MLC_MODEL: 3d-unet-99
3d-unet-99.9:
group: model
base:
- 3d-unet_
env:
- CM_MODEL: 3d-unet-99.9
+ MLC_MODEL: 3d-unet-99.9
3d-unet_:
env:
- CM_BENCHMARK: MEDICAL_IMAGING
+ MLC_BENCHMARK: MEDICAL_IMAGING
deps:
- tags: get,dataset,kits19,preprocessed
- tags: get,ml-model,medical-imaging,3d-unet,_pytorch,_weights
bert_:
env:
- CM_BENCHMARK: STANDALONE_BERT
+ MLC_BENCHMARK: STANDALONE_BERT
bert_,pytorch:
deps:
@@ -308,7 +308,7 @@ variations:
gptj_:
env:
- CM_BENCHMARK: STANDALONE_GPTJ
+ MLC_BENCHMARK: STANDALONE_GPTJ
int4,gptj_,build-harness:
deps:
@@ -322,7 +322,7 @@ variations:
- sut
- loadgen-batchsize
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- v3.1
force_cache: true
- tags: get,generic-python-lib,_package.optimum
@@ -333,8 +333,8 @@ variations:
sdxl:
group: model
env:
- CM_BENCHMARK: STANDALONE_SDXL
- CM_MODEL: stable-diffusion-xl
+ MLC_BENCHMARK: STANDALONE_SDXL
+ MLC_MODEL: stable-diffusion-xl
sdxl,pytorch:
adr:
@@ -489,7 +489,7 @@ variations:
names:
- rapidjson-src
env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_RAPIDJSON_SRC_REPO_PATH
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_RAPIDJSON_SRC_REPO_PATH
- tags: install,gflags,from.src
names:
- gflags-from-src
@@ -588,7 +588,7 @@ variations:
- tags: get,torchvision,from.src,_sha.8e078971b8aebdeb1746fea58851e3754f103053
update_tags_from_env_with_prefix:
"_python.":
- - CM_PYTHON_BIN_WITH_PATH
+ - MLC_PYTHON_BIN_WITH_PATH
names:
- torchvision
- tags: install,opencv,from.src,_branch.4.x
@@ -598,7 +598,7 @@ variations:
names:
- rapidjson-src
env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_RAPIDJSON_SRC_REPO_PATH
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_RAPIDJSON_SRC_REPO_PATH
- tags: install,gflags,from.src
names:
- gflags-from-src
@@ -641,13 +641,13 @@ variations:
- run-mode
- loadgen-scenario
new_env_keys:
- - CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH
+ - MLC_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH
retinanet,compile-model:
deps:
- tags: get,ml-model,retinanet,_pytorch,_fp32
new_env_keys:
- - CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH
+ - MLC_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH
3d-unet_,pytorch:
adr:
@@ -737,28 +737,28 @@ variations:
- mkl
tags: get,generic,conda-package,_package.mkl,_source.conda-forge
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- v4.0
- names:
- conda-package
- mkl-include
tags: get,generic,conda-package,_package.mkl-include,_source.conda-forge
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- v4.0
- names:
- conda-package
- llvm-openmp
tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- v4.0
- names:
- conda-package
- pybind11
tags: get,generic,conda-package,_package.pybind11,_source.conda-forge
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- v4.0
- names:
- conda-package
@@ -768,7 +768,7 @@ variations:
names:
- llvm-from-src
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- v3.1
- names:
- conda-package
@@ -783,7 +783,7 @@ variations:
names:
- ipex-from-src
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- v3.1
- tags: get,generic,conda-package,_package.ninja
names:
@@ -798,7 +798,7 @@ variations:
enable_if_env:
INTEL_GPTJ_INT4:
- 'yes'
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- v3.1
- tags: get,generic-python-lib,_package.transformers
names:
@@ -826,7 +826,7 @@ variations:
- accelerate
- tags: get,generic-python-lib,_custom-python,_package.torch,_url.git+https://github.com/pytorch/pytorch.git@927dc662386af052018212c7d01309a506fc94cd
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- v3.1
env:
"+ CXXFLAGS":
@@ -836,13 +836,13 @@ variations:
- "-Wno-free-nonheap-object"
- tags: get,generic-python-lib,_custom-python,_package.torch
env:
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- 'v4.0'
- tags: install,intel-neural-speed,_for-intel-mlperf-inference-v4.0-gptj,_branch.mlperf-v4-0
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- 'v4.0'
@@ -851,18 +851,18 @@ variations:
base:
- gptj_
env:
- CM_MODEL: gptj-99
- CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx"
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
- CM_ML_MODEL_INPUTS_DATA_TYPE: int8
+ MLC_MODEL: gptj-99
+ MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx"
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int8
gptj-99.9:
group: model
base:
- gptj_
env:
- CM_MODEL: gptj-99.9
- CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx"
+ MLC_MODEL: gptj-99.9
+ MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx"
dlrm-v2_,build-harness:
deps:
@@ -896,14 +896,14 @@ variations:
- llvm-openmp
tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- v4.0
- names:
- conda-package
- pybind11
tags: get,generic,conda-package,_package.pybind11,_source.conda-forge
enable_if_env:
- CM_MLPERF_INFERENCE_CODE_VERSION:
+ MLC_MLPERF_INFERENCE_CODE_VERSION:
- v4.0
- names:
- conda-package
@@ -952,62 +952,62 @@ variations:
base:
- dlrm-v2_
env:
- CM_MODEL: dlrm-v2-99
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
- CM_ML_MODEL_INPUTS_DATA_TYPE: int8
+ MLC_MODEL: dlrm-v2-99
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int8
dlrm-v2-99.9:
group: model
base:
- dlrm-v2_
env:
- CM_MODEL: dlrm-v2-99.9
+ MLC_MODEL: dlrm-v2-99.9
standalone:
group: network-mode
default: true
env:
- CM_MLPERF_NETWORK_RUN_MODE: standalone
+ MLC_MLPERF_NETWORK_RUN_MODE: standalone
network-server:
group: network-mode
env:
- CM_MLPERF_NETWORK_RUN_MODE: network-server
+ MLC_MLPERF_NETWORK_RUN_MODE: network-server
network-client:
group: network-run-mode
env:
- CM_MLPERF_NETWORK_RUN_MODE: network-client
+ MLC_MLPERF_NETWORK_RUN_MODE: network-client
bert_,network-server:
env:
- CM_BENCHMARK: NETWORK_BERT_SERVER
+ MLC_BENCHMARK: NETWORK_BERT_SERVER
bert_,network-client:
env:
- CM_BENCHMARK: NETWORK_BERT_CLIENT
+ MLC_BENCHMARK: NETWORK_BERT_CLIENT
bert-99:
group: model
base:
- bert_
env:
- CM_MODEL: bert-99
- CM_SQUAD_ACCURACY_DTYPE: float32
- CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx"
+ MLC_MODEL: bert-99
+ MLC_SQUAD_ACCURACY_DTYPE: float32
+ MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx"
bert-99.9:
group: model
base:
- bert_
env:
- CM_MODEL: bert-99.9
- CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx"
+ MLC_MODEL: bert-99.9
+ MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx"
batch_size.#:
group: loadgen-batchsize
env:
- CM_MLPERF_LOADGEN_BATCH_SIZE: "#"
+ MLC_MLPERF_LOADGEN_BATCH_SIZE: "#"
build-harness:
@@ -1015,23 +1015,23 @@ variations:
real_run: false
group: run-mode
env:
- CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: build_harness
+ MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: build_harness
new_env_keys:
- - CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH
- - CM_ML_MODEL_*
+ - MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH
+ - MLC_ML_MODEL_*
- DATA_PATH
compile-model:
group: run-mode
env:
- CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: compilation
+ MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: compilation
calibration:
group: run-mode
env:
- CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: calibration
+ MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: calibration
new_env_keys:
- - CM_ML_MODEL_*
+ - MLC_ML_MODEL_*
- INT4_CALIBRATION_DIR
calibration,gptj_:
@@ -1080,47 +1080,47 @@ variations:
- tags: get,generic-sys-util,_rsync
env:
- CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: run_harness
+ MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: run_harness
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
- - CM_HW_NAME
- - CM_ML_MODEL_*
- - CM_MAX_EXAMPLES
- - CM_IMAGENET_ACCURACY_DTYPE
- - CM_SQUAD_ACCURACY_DTYPE
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
+ - MLC_HW_NAME
+ - MLC_ML_MODEL_*
+ - MLC_MAX_EXAMPLES
+ - MLC_IMAGENET_ACCURACY_DTYPE
+ - MLC_SQUAD_ACCURACY_DTYPE
maxq:
group: power-mode
env:
- CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes
+ MLC_MLPERF_NVIDIA_HARNESS_MAXQ: yes
maxn:
group: power-mode
env:
- CM_MLPERF_NVIDIA_HARNESS_MAXN: yes
+ MLC_MLPERF_NVIDIA_HARNESS_MAXN: yes
singlestream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
multistream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+ MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
offline:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
server:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Server
+ MLC_MLPERF_LOADGEN_SCENARIO: Server
int4:
group: precision
@@ -1148,7 +1148,7 @@ variations:
dataset-preprocessed:
tags: _float32,_rgb32
env:
- CM_IMAGENET_ACCURACY_DTYPE: float32
+ MLC_IMAGENET_ACCURACY_DTYPE: float32
sapphire-rapids.112c:
group: sut
@@ -1163,21 +1163,21 @@ variations:
KMP_BLOCKTIME: 10
WORKERS_PER_PROC: 1
default_env:
- CM_MLPERF_LOADGEN_BATCH_SIZE: 8
+ MLC_MLPERF_LOADGEN_BATCH_SIZE: 8
sapphire-rapids.24c,gptj-99,offline,int4:
env:
KMP_BLOCKTIME: 10
WORKERS_PER_PROC: 1
default_env:
- CM_MLPERF_LOADGEN_BATCH_SIZE: 8
+ MLC_MLPERF_LOADGEN_BATCH_SIZE: 8
sapphire-rapids.112c,gptj-99,offline,int8:
env:
KMP_BLOCKTIME: 1
WORKERS_PER_PROC: 2
default_env:
- CM_MLPERF_LOADGEN_BATCH_SIZE: 14
+ MLC_MLPERF_LOADGEN_BATCH_SIZE: 14
sapphire-rapids.112c,gptj-99,offline,int4:
env:
@@ -1185,21 +1185,21 @@ variations:
KMP_BLOCKTIME: 1
WORKERS_PER_PROC: 3
default_env:
- CM_MLPERF_LOADGEN_BATCH_SIZE: 8
+ MLC_MLPERF_LOADGEN_BATCH_SIZE: 8
sapphire-rapids.112c,gptj-99,server,int8:
env:
KMP_BLOCKTIME: 1
WORKERS_PER_PROC: 2
default_env:
- CM_MLPERF_LOADGEN_BATCH_SIZE: 1
+ MLC_MLPERF_LOADGEN_BATCH_SIZE: 1
sapphire-rapids.112c,gptj-99,server,int4:
env:
KMP_BLOCKTIME: 1
WORKERS_PER_PROC: 4
default_env:
- CM_MLPERF_LOADGEN_BATCH_SIZE: 1
+ MLC_MLPERF_LOADGEN_BATCH_SIZE: 1
sapphire-rapids.24c,bert_:
env:
diff --git a/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh b/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh
index 263388147..10cf123f1 100644
--- a/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh
+++ b/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh
@@ -1,13 +1,13 @@
#!/bin/bash
-export DOWNLOAD_DATA_DIR=${CM_DATASET_PATH}
-cd ${CM_HARNESS_CODE_ROOT}
+export DOWNLOAD_DATA_DIR=${MLC_DATASET_PATH}
+cd ${MLC_HARNESS_CODE_ROOT}
mkdir -p build
-ln -sf ${CM_DATASET_PREPROCESSED_PATH} build/preprocessed_data
+ln -sf ${MLC_DATASET_PREPROCESSED_PATH} build/preprocessed_data
mkdir -p build/model
-ln -sf ${CM_ML_MODEL_FILE_WITH_PATH} build/model/3dunet_kits19_pytorch_checkpoint.pth
+ln -sf ${MLC_ML_MODEL_FILE_WITH_PATH} build/model/3dunet_kits19_pytorch_checkpoint.pth
#make setup
#make duplicate_kits19_case_00185
diff --git a/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh b/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh
index e8a4fc61f..a3cd92bec 100644
--- a/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh
+++ b/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh
@@ -1,6 +1,6 @@
-cd ${CM_HARNESS_CODE_ROOT}
+cd ${MLC_HARNESS_CODE_ROOT}
if [ ! -e ILSVRC2012_img_val ]; then
- ln -s ${CM_DATASET_IMAGENET_VAL_PATH} ILSVRC2012_img_val
+ ln -s ${MLC_DATASET_IMAGENET_VAL_PATH} ILSVRC2012_img_val
fi
bash prepare_calibration_dataset.sh
diff --git a/script/app-mlperf-inference-intel/run_3d-unet_harness.sh b/script/app-mlperf-inference-intel/run_3d-unet_harness.sh
index 78f44fb2b..725986abd 100644
--- a/script/app-mlperf-inference-intel/run_3d-unet_harness.sh
+++ b/script/app-mlperf-inference-intel/run_3d-unet_harness.sh
@@ -1,8 +1,8 @@
#!/bin/bash
-scenario=${CM_MLPERF_LOADGEN_SCENARIO}
-OUTDIR="${CM_MLPERF_OUTPUT_DIR}"
+scenario=${MLC_MLPERF_LOADGEN_SCENARIO}
+OUTDIR="${MLC_MLPERF_OUTPUT_DIR}"
#python ../../user_config.py
@@ -14,11 +14,11 @@ number_threads=`nproc --all`
export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l`
num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }')
num_instance=$((number_cores/CPUS_PER_INSTANCE))
-export PYTHONPATH=${CM_HARNESS_CODE_ROOT}/common:$PYTHONPATH
-cp -r ${CM_HARNESS_CODE_ROOT}/meta $OUTDIR/
-cp ${CM_HARNESS_CODE_ROOT}/unet3d_jit_model.pt $OUTDIR/
-cp ${CM_HARNESS_CODE_ROOT}/calibration_result.json $OUTDIR/
-ln -sf ${CM_HARNESS_CODE_ROOT}/build $OUTDIR/build
+export PYTHONPATH=${MLC_HARNESS_CODE_ROOT}/common:$PYTHONPATH
+cp -r ${MLC_HARNESS_CODE_ROOT}/meta $OUTDIR/
+cp ${MLC_HARNESS_CODE_ROOT}/unet3d_jit_model.pt $OUTDIR/
+cp ${MLC_HARNESS_CODE_ROOT}/calibration_result.json $OUTDIR/
+ln -sf ${MLC_HARNESS_CODE_ROOT}/build $OUTDIR/build
#the log path is hardcoded in the intel implementation. This is a hack to get them to where we want
rm -rf $OUTDIR/output_logs
ln -sf $OUTDIR $OUTDIR/output_logs
@@ -31,13 +31,13 @@ export LD_PRELOAD=$CONDA_PREFIX/lib/libjemalloc.so:$LD_PRELOAD
export MALLOC_CONF="oversize_threshold:1,background_thread:true,percpu_arena:percpu,metadata_thp:always,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000";
-#cd ${CM_HARNESS_CODE_ROOT}
-cmd="python ${CM_HARNESS_CODE_ROOT}/run.py \
+#cd ${MLC_HARNESS_CODE_ROOT}
+cmd="python ${MLC_HARNESS_CODE_ROOT}/run.py \
--mode ${LOADGEN_MODE} \
--workload-name 3dunet \
- --mlperf-conf ${CM_MLPERF_CONF} \
- --user-conf ${CM_MLPERF_USER_CONF} \
- --workload-config ${CM_HARNESS_CODE_ROOT}/config.json \
+ --mlperf-conf ${MLC_MLPERF_CONF} \
+ --user-conf ${MLC_MLPERF_USER_CONF} \
+ --workload-config ${MLC_HARNESS_CODE_ROOT}/config.json \
--num-instance $num_instance \
--cpus-per-instance $CPUS_PER_INSTANCE \
--scenario $scenario \
diff --git a/script/app-mlperf-inference-intel/run_bert_harness.sh b/script/app-mlperf-inference-intel/run_bert_harness.sh
index b49783c6f..de10f4458 100644
--- a/script/app-mlperf-inference-intel/run_bert_harness.sh
+++ b/script/app-mlperf-inference-intel/run_bert_harness.sh
@@ -1,7 +1,7 @@
#!/bin/bash
WORKERS_PER_PROC=${WORKERS_PER_PROC:-4}
-THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${CM_HOST_CPU_THREADS_PER_CORE}) / ${CM_HOST_CPU_SOCKETS}))
+THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${MLC_HOST_CPU_THREADS_PER_CORE}) / ${MLC_HOST_CPU_SOCKETS}))
export LD_PRELOAD=${CONDA_PREFIX}/lib/libjemalloc.so
export MALLOC_CONF="oversize_threshold:1,background_thread:true,percpu_arena:percpu,metadata_thp:always,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000";
@@ -14,13 +14,13 @@ num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }')
num_instance=$(($number_cores / $THREADS_PER_INSTANCE))
sut_dir=${MODEL_PATH}
-executable=${CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH}
-mode=${CM_MLPERF_LOADGEN_SCENARIO}
-OUTDIR="${CM_MLPERF_OUTPUT_DIR}"
+executable=${MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH}
+mode=${MLC_MLPERF_LOADGEN_SCENARIO}
+OUTDIR="${MLC_MLPERF_OUTPUT_DIR}"
#python ../../user_config.py
-USER_CONF="${CM_MLPERF_USER_CONF}"
+USER_CONF="${MLC_MLPERF_USER_CONF}"
-CONFIG="-n ${num_numa} -i ${num_instance} -j ${THREADS_PER_INSTANCE} --test_scenario=${mode} --model_file=${sut_dir}/bert.pt --sample_file=${sut_dir}/squad.pt --mlperf_config=${CM_MLPERF_CONF} --user_config=${USER_CONF} -o ${OUTDIR} -w 1300 --warmup ${accuracy}"
+CONFIG="-n ${num_numa} -i ${num_instance} -j ${THREADS_PER_INSTANCE} --test_scenario=${mode} --model_file=${sut_dir}/bert.pt --sample_file=${sut_dir}/squad.pt --mlperf_config=${MLC_MLPERF_CONF} --user_config=${USER_CONF} -o ${OUTDIR} -w 1300 --warmup ${accuracy}"
${executable} ${CONFIG}
diff --git a/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh b/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh
index 65530c621..3e4d0adfa 100644
--- a/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh
+++ b/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh
@@ -1,9 +1,9 @@
#!/bin/bash
-export MODEL_DIR=${CM_ML_MODEL_FILE_WITH_PATH}
+export MODEL_DIR=${MLC_ML_MODEL_FILE_WITH_PATH}
export DATA_DIR=/mnt/dlrm_data
-NUM_SOCKETS=${CM_HOST_CPU_SOCKETS:-2}
+NUM_SOCKETS=${MLC_HOST_CPU_SOCKETS:-2}
export NUM_SOCKETS=$NUM_SOCKETS
export num_physical_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l`
export CPUS_PER_SOCKET=$((num_physical_cores/NUM_SOCKETS))
@@ -15,7 +15,7 @@ export CPUS_FOR_LOADGEN=1
export BATCH_SIZE=100
export DNNL_MAX_CPU_ISA=AVX512_CORE_AMX
-export LD_PRELOAD=${CM_CONDA_LIB_PATH}/libiomp5.so
+export LD_PRELOAD=${MLC_CONDA_LIB_PATH}/libiomp5.so
export KMP_BLOCKTIME=1
export OMP_NUM_THREADS=$CPUS_PER_INSTANCE
@@ -38,21 +38,21 @@ export EXTRA_OPS="$extra_option"
model_path="$MODEL_DIR/dlrm-multihot-pytorch.pt"
profile=dlrm-multihot-pytorch
-cd ${CM_HARNESS_CODE_ROOT}
-OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}"
+cd ${MLC_HARNESS_CODE_ROOT}
+OUTPUT_DIR="${MLC_MLPERF_OUTPUT_DIR}"
-if [[ "${CM_MLPERF_LOADGEN_MODE}" == "accuracy" ]]; then
+if [[ "${MLC_MLPERF_LOADGEN_MODE}" == "accuracy" ]]; then
accuracy_opt=" --accuracy"
else
accuracy_opt=""
fi
-USER_CONF="${CM_MLPERF_USER_CONF}"
+USER_CONF="${MLC_MLPERF_USER_CONF}"
cmd="python -u python/runner.py --profile $profile $common_opt --model dlrm --model-path $model_path \
---config ${CM_MLPERF_CONF} --user-config ${CM_MLPERF_USER_CONF} \
+--config ${MLC_MLPERF_CONF} --user-config ${MLC_MLPERF_USER_CONF} \
--dataset multihot-criteo --dataset-path $DATA_DIR --output $OUTPUT_DIR $EXTRA_OPS \
--max-ind-range=40000000 --samples-to-aggregate-quantile-file=${PWD}/tools/dist_quantile.txt \
---max-batchsize=$BATCH_SIZE --scenario=${CM_MLPERF_LOADGEN_SCENARIO} ${accuracy_opt}"
+--max-batchsize=$BATCH_SIZE --scenario=${MLC_MLPERF_LOADGEN_SCENARIO} ${accuracy_opt}"
echo "$cmd"
diff --git a/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh b/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh
index 74988df28..77a099e0f 100644
--- a/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh
+++ b/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-export PATH=${CM_CONDA_BIN_PATH}:$PATH
+export PATH=${MLC_CONDA_BIN_PATH}:$PATH
KMP_BLOCKTIME=${KMP_BLOCKTIME:-10}
@@ -15,30 +15,30 @@ NUM_PROC=${NUM_PROC:-$num_numa}
CPUS_PER_PROC=$((num_physical_cores/num_numa))
WORKERS_PER_PROC=${WORKERS_PER_PROC:-1}
TOTAL_SAMPLE_COUNT=13368
-BATCH_SIZE=${CM_MLPERF_LOADGEN_BATCH_SIZE:-8}
+BATCH_SIZE=${MLC_MLPERF_LOADGEN_BATCH_SIZE:-8}
TIMESTAMP=$(date +%m-%d-%H-%M)
HOSTNAME=$(hostname)
#OUTPUT_DIR=offline-output-${HOSTNAME}-batch-${BATCH_SIZE}-procs-${NUM_PROC}-ins-per-proc-${WORKERS_PER_PROC}-${TIMESTAMP}
-export WORKLOAD_DATA=${CM_HARNESS_CODE_ROOT}/data
+export WORKLOAD_DATA=${MLC_HARNESS_CODE_ROOT}/data
export VALIDATION_DATA_JSON=${WORKLOAD_DATA}/validation-data/cnn_dailymail_validation.json
-cd ${CM_HARNESS_CODE_ROOT}
-OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}"
+cd ${MLC_HARNESS_CODE_ROOT}
+OUTPUT_DIR="${MLC_MLPERF_OUTPUT_DIR}"
-USER_CONF="${CM_MLPERF_USER_CONF}"
+USER_CONF="${MLC_MLPERF_USER_CONF}"
cmd="python runner.py --workload-name gptj \
- --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \
+ --scenario ${MLC_MLPERF_LOADGEN_SCENARIO} \
--mode ${LOADGEN_MODE} \
--num-proc ${NUM_PROC} \
--cpus-per-proc ${CPUS_PER_PROC} \
--model-checkpoint-path ${CHECKPOINT_DIR} \
--dataset-path ${VALIDATION_DATA_JSON} \
--batch-size ${BATCH_SIZE} \
- --mlperf-conf ${CM_MLPERF_CONF} \
- --user-conf ${CM_MLPERF_USER_CONF} \
+ --mlperf-conf ${MLC_MLPERF_CONF} \
+ --user-conf ${MLC_MLPERF_USER_CONF} \
--precision ${PRECISION} \
--pad-inputs \
--quantized-model ${QUANTIZED_MODEL} \
diff --git a/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh b/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh
index 9186f733a..881926060 100644
--- a/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh
+++ b/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-export PATH=${CM_CONDA_BIN_PATH}:$PATH
+export PATH=${MLC_CONDA_BIN_PATH}:$PATH
export KMP_BLOCKTIME=1
export KMP_AFFINITY=granularity=fine,compact,1,0
@@ -7,7 +7,7 @@ export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so
# export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so
#
-BATCH_SIZE=${CM_MLPERF_LOADGEN_BATCH_SIZE}
+BATCH_SIZE=${MLC_MLPERF_LOADGEN_BATCH_SIZE}
DIR_SCRIPT=$(dirname "${BASH_SOURCE[0]}")
[ -z $DIR_NS ] && DIR_NS="$DIR_SCRIPT/gpt-j-env/neural-speed"
@@ -36,10 +36,10 @@ CPUS_PER_PROC=$((num_physical_cores / num_numa))
[ -z $BATCH_SIZE ] && BATCH_SIZE=12
[ -z $BEAM_SIZE ] && BEAM_SIZE=4
-OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}"
-MODEL_PATH="${CM_ML_MODEL_FILE_WITH_PATH}"
-cd ${CM_HARNESS_CODE_ROOT}
-export WORKLOAD_DATA=${CM_HARNESS_CODE_ROOT}/data
+OUTPUT_DIR="${MLC_MLPERF_OUTPUT_DIR}"
+MODEL_PATH="${MLC_ML_MODEL_FILE_WITH_PATH}"
+cd ${MLC_HARNESS_CODE_ROOT}
+export WORKLOAD_DATA=${MLC_HARNESS_CODE_ROOT}/data
export VALIDATION_DATA_JSON=${WORKLOAD_DATA}/validation-data/cnn_dailymail_validation.json
@@ -49,7 +49,7 @@ done
echo "Start time: $(date)"
cmd="python runner.py --workload-name gptj \
- --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \
+ --scenario ${MLC_MLPERF_LOADGEN_SCENARIO} \
--mode ${LOADGEN_MODE} \
--num-proc ${NUM_PROC} \
--cpus-per-proc ${CPUS_PER_PROC} \
@@ -58,8 +58,8 @@ cmd="python runner.py --workload-name gptj \
--model-checkpoint ${CHECKPOINT_DIR} \
--batch-size ${BATCH_SIZE} \
--beam-size ${BEAM_SIZE} \
- --mlperf-conf ${CM_MLPERF_CONF} \
- --user-conf ${CM_MLPERF_USER_CONF} \
+ --mlperf-conf ${MLC_MLPERF_CONF} \
+ --user-conf ${MLC_MLPERF_USER_CONF} \
--workers-per-proc ${WORKERS_PER_PROC} \
--total-sample-count ${TOTAL_SAMPLE_COUNT} \
--output-dir ${OUTPUT_DIR} \
diff --git a/script/app-mlperf-inference-intel/run_resnet50_harness.sh b/script/app-mlperf-inference-intel/run_resnet50_harness.sh
index 861d891aa..22782a615 100644
--- a/script/app-mlperf-inference-intel/run_resnet50_harness.sh
+++ b/script/app-mlperf-inference-intel/run_resnet50_harness.sh
@@ -7,7 +7,7 @@ number_sockets=`grep physical.id /proc/cpuinfo | sort -u | wc -l`
cpu_per_socket=$((number_cores/number_sockets))
WORKERS_PER_PROC=${WORKERS_PER_PROC:-4}
-THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${CM_HOST_CPU_THREADS_PER_CORE}) / ${CM_HOST_CPU_SOCKETS}))
+THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${MLC_HOST_CPU_THREADS_PER_CORE}) / ${MLC_HOST_CPU_SOCKETS}))
export LD_PRELOAD=${CONDA_PREFIX}/lib/libjemalloc.so
export LD_PRELOAD=${CONDA_PREFIX}/lib/libiomp5.so
@@ -18,23 +18,23 @@ export KMP_BLOCKTIME=1
export $KMP_SETTING
-export DATA_DIR=${CM_HARNESS_CODE_ROOT}/ILSVRC2012_img_val
-export RN50_START=${CM_HARNESS_CODE_ROOT}/models/resnet50-start-int8-model.pth
-export RN50_END=${CM_HARNESS_CODE_ROOT}/models/resnet50-end-int8-model.pth
-export RN50_FULL=${CM_HARNESS_CODE_ROOT}/models/resnet50-full.pth
+export DATA_DIR=${MLC_HARNESS_CODE_ROOT}/ILSVRC2012_img_val
+export RN50_START=${MLC_HARNESS_CODE_ROOT}/models/resnet50-start-int8-model.pth
+export RN50_END=${MLC_HARNESS_CODE_ROOT}/models/resnet50-end-int8-model.pth
+export RN50_FULL=${MLC_HARNESS_CODE_ROOT}/models/resnet50-full.pth
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CONDA_PREFIX}/lib
-rsync -avz ${CM_HARNESS_CODE_ROOT}/val_data/ ${DATA_DIR}/
-executable="${CM_HARNESS_CODE_ROOT}/build/bin/mlperf_runner"
+rsync -avz ${MLC_HARNESS_CODE_ROOT}/val_data/ ${DATA_DIR}/
+executable="${MLC_HARNESS_CODE_ROOT}/build/bin/mlperf_runner"
number_threads=`nproc --all`
export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l`
num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }')
num_instance=$(($number_cores / $THREADS_PER_INSTANCE))
-scenario=${CM_MLPERF_LOADGEN_SCENARIO}
-OUTDIR="${CM_MLPERF_OUTPUT_DIR}"
+scenario=${MLC_MLPERF_LOADGEN_SCENARIO}
+OUTDIR="${MLC_MLPERF_OUTPUT_DIR}"
scenario="Offline"
#python ../../user_config.py
@@ -42,7 +42,7 @@ scenario="Offline"
CONFIG=" --scenario ${scenario} --mode ${LOADGEN_MODE} --model_name resnet50 \
--rn50-part1 ${RN50_START} --rn50-part3 ${RN50_END} --rn50-full-model ${RN50_FULL} \
--data_path ${DATA_DIR} \
- --mlperf_conf ${CM_MLPERF_CONF} --user_conf ${CM_MLPERF_USER_CONF} \
+ --mlperf_conf ${MLC_MLPERF_CONF} --user_conf ${MLC_MLPERF_USER_CONF} \
--cpus_per_instance $CPUS_PER_INSTANCE \
--num_instance $number_cores \
--total_sample_count 50000 \
diff --git a/script/app-mlperf-inference-intel/run_retinanet_harness.sh b/script/app-mlperf-inference-intel/run_retinanet_harness.sh
index 98ca3a5b2..d2e507508 100644
--- a/script/app-mlperf-inference-intel/run_retinanet_harness.sh
+++ b/script/app-mlperf-inference-intel/run_retinanet_harness.sh
@@ -8,7 +8,7 @@ cpu_per_socket=$((number_cores/number_sockets))
number_instance=$((number_cores/CPUS_PER_INSTANCE))
WORKERS_PER_PROC=${WORKERS_PER_PROC:-4}
-THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${CM_HOST_CPU_THREADS_PER_CORE}) / ${CM_HOST_CPU_SOCKETS}))
+THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${MLC_HOST_CPU_THREADS_PER_CORE}) / ${MLC_HOST_CPU_SOCKETS}))
export LD_PRELOAD=${CONDA_PREFIX}/lib/libjemalloc.so
export LD_PRELOAD=${CONDA_PREFIX}/lib/libiomp5.so
@@ -21,15 +21,15 @@ export $KMP_SETTING
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CONDA_PREFIX}/lib
-executable="${CM_HARNESS_CODE_ROOT}/build/bin/mlperf_runner"
+executable="${MLC_HARNESS_CODE_ROOT}/build/bin/mlperf_runner"
number_threads=`nproc --all`
export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l`
num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }')
num_instance=$(($number_cores / $THREADS_PER_INSTANCE))
-scenario=${CM_MLPERF_LOADGEN_SCENARIO}
-OUTDIR="${CM_MLPERF_OUTPUT_DIR}"
+scenario=${MLC_MLPERF_LOADGEN_SCENARIO}
+OUTDIR="${MLC_MLPERF_OUTPUT_DIR}"
scenario="Offline"
#python ../../user_config.py
@@ -37,7 +37,7 @@ scenario="Offline"
CONFIG=" --scenario ${scenario} --mode ${LOADGEN_MODE} --model_name retinanet \
--model_path ${MODEL_PATH} \
--data_path ${DATA_DIR} \
- --mlperf_conf ${CM_MLPERF_CONF} --user_conf ${CM_MLPERF_USER_CONF} \
+ --mlperf_conf ${MLC_MLPERF_CONF} --user_conf ${MLC_MLPERF_USER_CONF} \
--cpus_per_instance $CPUS_PER_INSTANCE \
--num_instance $number_instance \
--total_sample_count 24781 \
diff --git a/script/app-mlperf-inference-intel/run_sdxl_harness.sh b/script/app-mlperf-inference-intel/run_sdxl_harness.sh
index 3dd71ec83..1b3c7bc20 100644
--- a/script/app-mlperf-inference-intel/run_sdxl_harness.sh
+++ b/script/app-mlperf-inference-intel/run_sdxl_harness.sh
@@ -6,16 +6,16 @@ export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so
# export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so
#
-BATCH_SIZE=${CM_MLPERF_LOADGEN_BATCH_SIZE}
+BATCH_SIZE=${MLC_MLPERF_LOADGEN_BATCH_SIZE}
export num_physical_cores=$(lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l)
num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }')
-OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}"
+OUTPUT_DIR="${MLC_MLPERF_OUTPUT_DIR}"
MODEL_PATH="${SDXL_CHECKPOINT_PATH}"
-cd ${CM_HARNESS_CODE_ROOT}
+cd ${MLC_HARNESS_CODE_ROOT}
NUM_PROC=1
CPUS_PER_PROC=16
@@ -30,14 +30,14 @@ echo "Start time: $(date)"
cmd="python -u main.py \
--dtype bfloat16 \
--device 'cpu' \
- --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \
+ --scenario ${MLC_MLPERF_LOADGEN_SCENARIO} \
--mode ${LOADGEN_MODE} \
--num-proc ${NUM_PROC} \
--cpus-per-proc ${CPUS_PER_PROC} \
--model-path ${MODEL_PATH} \
--batch-size ${BATCH_SIZE} \
- --mlperf-conf ${CM_MLPERF_CONF} \
- --user-conf ${CM_MLPERF_USER_CONF} \
+ --mlperf-conf ${MLC_MLPERF_CONF} \
+ --user-conf ${MLC_MLPERF_USER_CONF} \
--workers-per-proc ${WORKERS_PER_PROC} \
--total-sample-count ${TOTAL_SAMPLE_COUNT} \
--log-dir ${OUTPUT_DIR} "
diff --git a/script/app-mlperf-inference-mlcommons-cpp/customize.py b/script/app-mlperf-inference-mlcommons-cpp/customize.py
index 1884a0798..c71d90f27 100644
--- a/script/app-mlperf-inference-mlcommons-cpp/customize.py
+++ b/script/app-mlperf-inference-mlcommons-cpp/customize.py
@@ -22,30 +22,30 @@ def preprocess(i):
env = i['env']
- if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}
- if 'CM_MODEL' not in env:
+ if 'MLC_MODEL' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the model to run'}
- if 'CM_MLPERF_BACKEND' not in env:
+ if 'MLC_MLPERF_BACKEND' not in env:
return {'return': 1,
'error': 'Please select a variation specifying the backend'}
- if 'CM_MLPERF_DEVICE' not in env:
+ if 'MLC_MLPERF_DEVICE' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the device to run on'}
source_files = []
script_path = i['run_script_input']['path']
- if env['CM_MODEL'] == "retinanet":
- env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH']
- env['CM_SOURCE_FOLDER_PATH'] = os.path.join(script_path, "src")
+ if env['MLC_MODEL'] == "retinanet":
+ env['MLC_DATASET_LIST'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH']
+ env['MLC_SOURCE_FOLDER_PATH'] = os.path.join(script_path, "src")
- for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']):
+ for file in os.listdir(env['MLC_SOURCE_FOLDER_PATH']):
if file.endswith(".c") or file.endswith(".cpp"):
source_files.append(file)
- env['CM_CXX_SOURCE_FILES'] = ";".join(source_files)
+ env['MLC_CXX_SOURCE_FILES'] = ";".join(source_files)
if '+CPLUS_INCLUDE_PATH' not in env:
env['+CPLUS_INCLUDE_PATH'] = []
@@ -53,24 +53,24 @@ def preprocess(i):
env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc"))
env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc"))
- if env['CM_MLPERF_DEVICE'] == 'gpu':
- env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
- env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
- env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB'])
- env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
+ if env['MLC_MLPERF_DEVICE'] == 'gpu':
+ env['+C_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE'])
+ env['+CPLUS_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE'])
+ env['+LD_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_LIB'])
+ env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_INCLUDE'])
if '+ CXXFLAGS' not in env:
env['+ CXXFLAGS'] = []
env['+ CXXFLAGS'].append("-std=c++14")
- # add preprocessor flag like "#define CM_MODEL_RESNET50"
- env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper())
- # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME"
- env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' +
- env['CM_MLPERF_BACKEND'].upper())
- # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU"
- env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' +
- env['CM_MLPERF_DEVICE'].upper())
+ # add preprocessor flag like "#define MLC_MODEL_RESNET50"
+ env['+ CXXFLAGS'].append('-DMLC_MODEL_' + env['MLC_MODEL'].upper())
+ # add preprocessor flag like "#define MLC_MLPERF_BACKEND_ONNXRUNTIME"
+ env['+ CXXFLAGS'].append('-DMLC_MLPERF_BACKEND_' +
+ env['MLC_MLPERF_BACKEND'].upper())
+ # add preprocessor flag like "#define MLC_MLPERF_DEVICE_CPU"
+ env['+ CXXFLAGS'].append('-DMLC_MLPERF_DEVICE_' +
+ env['MLC_MLPERF_DEVICE'].upper())
if '+ LDCXXFLAGS' not in env:
env['+ LDCXXFLAGS'] = []
@@ -80,22 +80,22 @@ def preprocess(i):
"-lpthread"
]
# e.g. -lonnxruntime
- if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env:
+ if 'MLC_MLPERF_BACKEND_LIB_NAMESPEC' in env:
env['+ LDCXXFLAGS'].append('-l' +
- env['CM_MLPERF_BACKEND_LIB_NAMESPEC'])
+ env['MLC_MLPERF_BACKEND_LIB_NAMESPEC'])
# e.g. -lcudart
- if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env:
- env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC'])
-
- env['CM_LINKER_LANG'] = 'CXX'
- env['CM_RUN_DIR'] = os.getcwd()
-
- if 'CM_MLPERF_CONF' not in env:
- env['CM_MLPERF_CONF'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
- if 'CM_MLPERF_USER_CONF' not in env:
- env['CM_MLPERF_USER_CONF'] = os.path.join(
- env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf")
+ if 'MLC_MLPERF_DEVICE_LIB_NAMESPEC' in env:
+ env['+ LDCXXFLAGS'].append('-l' + env['MLC_MLPERF_DEVICE_LIB_NAMESPEC'])
+
+ env['MLC_LINKER_LANG'] = 'CXX'
+ env['MLC_RUN_DIR'] = os.getcwd()
+
+ if 'MLC_MLPERF_CONF' not in env:
+ env['MLC_MLPERF_CONF'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
+ if 'MLC_MLPERF_USER_CONF' not in env:
+ env['MLC_MLPERF_USER_CONF'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf")
return {'return': 0}
diff --git a/script/app-mlperf-inference-mlcommons-cpp/meta.yaml b/script/app-mlperf-inference-mlcommons-cpp/meta.yaml
index e13bab985..dda32e172 100644
--- a/script/app-mlperf-inference-mlcommons-cpp/meta.yaml
+++ b/script/app-mlperf-inference-mlcommons-cpp/meta.yaml
@@ -21,32 +21,32 @@ tags:
# Default environment
default_env:
- CM_BATCH_COUNT: '1'
- CM_BATCH_SIZE: '1'
- CM_FAST_COMPILATION: "yes"
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: cpp
+ MLC_BATCH_COUNT: '1'
+ MLC_BATCH_SIZE: '1'
+ MLC_FAST_COMPILATION: "yes"
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: cpp
# Map script inputs to environment variables
input_mapping:
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
- mlperf_conf: CM_MLPERF_CONF
- mode: CM_MLPERF_LOADGEN_MODE
- output_dir: CM_MLPERF_OUTPUT_DIR
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- user_conf: CM_MLPERF_USER_CONF
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mlperf_conf: MLC_MLPERF_CONF
+ mode: MLC_MLPERF_LOADGEN_MODE
+ output_dir: MLC_MLPERF_OUTPUT_DIR
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ user_conf: MLC_MLPERF_USER_CONF
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
- - CM_ML_MODEL_*
- - CM_HW_NAME
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
+ - MLC_ML_MODEL_*
+ - MLC_HW_NAME
new_state_keys:
- mlperf-inference-implementation
- - CM_SUT_*
+ - MLC_SUT_*
# Dependencies on other CM scripts
@@ -64,7 +64,7 @@ deps:
# Detect CUDA if required
- tags: get,cuda,_cudnn
enable_if_env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
########################################################################
@@ -83,16 +83,16 @@ deps:
########################################################################
# Install ML engines via CM
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cpu
tags: get,lib,onnxruntime,lang-cpp,_cpu
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
tags: get,lib,onnxruntime,lang-cpp,_cuda
@@ -101,14 +101,14 @@ deps:
# Install ResNet50 model (ONNX) and ImageNet
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
names:
- imagenet-preprocessed
tags: get,dataset,preprocessed,imagenet,_NCHW
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
tags: get,ml-model,raw,resnet50,_onnx
@@ -117,14 +117,14 @@ deps:
# Install RetinaNet model (ONNX) and OpenImages
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
names:
- openimages-preprocessed
tags: get,dataset,preprocessed,openimages,_validation,_NCHW
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
tags: get,ml-model,retinanet,_onnx,_fp32
@@ -141,14 +141,14 @@ post_deps:
- compile-program
tags: compile,cpp-program
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- "yes"
- names:
- mlperf-runner
tags: benchmark-mlperf
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- "yes"
- tags: save,mlperf,inference,state
@@ -162,90 +162,90 @@ variations:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
cuda:
group: device
env:
- CM_MLPERF_DEVICE: gpu
- CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
+ MLC_MLPERF_DEVICE: gpu
+ MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart
# ML engine
onnxruntime:
group: framework
default: true
env:
- CM_MLPERF_BACKEND: onnxruntime
- CM_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime
+ MLC_MLPERF_BACKEND: onnxruntime
+ MLC_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime
pytorch:
group: framework
env:
- CM_MLPERF_BACKEND: pytorch
+ MLC_MLPERF_BACKEND: pytorch
tf:
group: framework
env:
- CM_MLPERF_BACKEND: tf
+ MLC_MLPERF_BACKEND: tf
tflite:
group: framework
env:
- CM_MLPERF_BACKEND: tflite
+ MLC_MLPERF_BACKEND: tflite
tvm-onnx:
group: framework
env:
- CM_MLPERF_BACKEND: tvm-onnx
+ MLC_MLPERF_BACKEND: tvm-onnx
# Reference MLPerf models
resnet50:
group: model
default: true
env:
- CM_MODEL: resnet50
+ MLC_MODEL: resnet50
retinanet:
group: model
default_env:
- CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 1
+ MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 1
env:
- CM_MODEL: retinanet
+ MLC_MODEL: retinanet
resnet50,offline:
default_env:
- CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 32
+ MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 32
resnet50,server:
default_env:
- CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 32
+ MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 32
resnet50,multistream:
default_env:
- CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 8
+ MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 8
batch-size.#:
group: batch-size
env:
- CM_MLPERF_LOADGEN_MAX_BATCHSIZE: "#"
+ MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: "#"
offline:
group: loadgen-scenario
default: true
env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
multistream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+ MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
singlestream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
- CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 1
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 1
server:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Server
+ MLC_MLPERF_LOADGEN_SCENARIO: Server
multistream,resnet50:
default_variations:
diff --git a/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp b/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp
index c5a3c809e..bf3fe86b6 100644
--- a/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp
+++ b/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp
@@ -11,11 +11,11 @@
#include "model.h"
#include "sample_library.h"
#include "system.h"
-#ifdef CM_MLPERF_DEVICE_GPU
+#ifdef MLC_MLPERF_DEVICE_GPU
#include "gpu_device.h"
#endif
-#ifdef CM_MLPERF_BACKEND_ONNXRUNTIME
+#ifdef MLC_MLPERF_BACKEND_ONNXRUNTIME
#include "onnxruntime_backend.h"
#endif
@@ -23,28 +23,28 @@ class InputSettings {
public:
InputSettings() {
- mlperf_conf_path = getenv("CM_MLPERF_CONF", "../inference/mlperf.conf");
- user_conf_path = getenv("CM_MLPERF_USER_CONF", "../inference/vision/classification_and_detection/user.conf");
- audit_conf_path = getenv("CM_MLPERF_INFERENCE_AUDIT_PATH", "");
- output_dir = getenv("CM_MLPERF_OUTPUT_DIR", ".");
- backend_name = getenv("CM_MLPERF_BACKEND", "onnxruntime");
- device_name = getenv("CM_MLPERF_DEVICE", "cpu");
- model_name = getenv("CM_MODEL", "resnet50");
- model_path = getenv("CM_ML_MODEL_FILE_WITH_PATH", "");
- dataset_preprocessed_path = getenv("CM_DATASET_PREPROCESSED_PATH", "");
- dataset_path = getenv("CM_DATASET_PATH", "");
- dataset_list = getenv("CM_DATASET_LIST", "");
- imagenet_val_path = getenv("CM_DATASET_AUX_PATH", "") + "/val.txt";
- scenario_name = getenv("CM_MLPERF_LOADGEN_SCENARIO", "Offline");
- mode_name = getenv("CM_MLPERF_LOADGEN_MODE", "PerformanceOnly");
+ mlperf_conf_path = getenv("MLC_MLPERF_CONF", "../inference/mlperf.conf");
+ user_conf_path = getenv("MLC_MLPERF_USER_CONF", "../inference/vision/classification_and_detection/user.conf");
+ audit_conf_path = getenv("MLC_MLPERF_INFERENCE_AUDIT_PATH", "");
+ output_dir = getenv("MLC_MLPERF_OUTPUT_DIR", ".");
+ backend_name = getenv("MLC_MLPERF_BACKEND", "onnxruntime");
+ device_name = getenv("MLC_MLPERF_DEVICE", "cpu");
+ model_name = getenv("MLC_MODEL", "resnet50");
+ model_path = getenv("MLC_ML_MODEL_FILE_WITH_PATH", "");
+ dataset_preprocessed_path = getenv("MLC_DATASET_PREPROCESSED_PATH", "");
+ dataset_path = getenv("MLC_DATASET_PATH", "");
+ dataset_list = getenv("MLC_DATASET_LIST", "");
+ imagenet_val_path = getenv("MLC_DATASET_AUX_PATH", "") + "/val.txt";
+ scenario_name = getenv("MLC_MLPERF_LOADGEN_SCENARIO", "Offline");
+ mode_name = getenv("MLC_MLPERF_LOADGEN_MODE", "PerformanceOnly");
if (mode_name == "accuracy")
mode_name = "AccuracyOnly";
if (mode_name == "performance")
mode_name = "PerformanceOnly";
- query_count_override = std::stol(getenv("CM_MLPERF_LOADGEN_QUERY_COUNT", "0"));
+ query_count_override = std::stol(getenv("MLC_MLPERF_LOADGEN_QUERY_COUNT", "0"));
query_count_override = 0;
- performance_sample_count = std::stol(getenv("CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT", "0"));
- batch_size = std::stol(getenv("CM_MLPERF_LOADGEN_MAX_BATCHSIZE", "32"));
+ performance_sample_count = std::stol(getenv("MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT", "0"));
+ batch_size = std::stol(getenv("MLC_MLPERF_LOADGEN_MAX_BATCHSIZE", "32"));
std::cout << "MLPerf Conf path: " << mlperf_conf_path << std::endl;
std::cout << "User Conf path: " << user_conf_path << std::endl;
std::cout << "Dataset Preprocessed path: " << dataset_preprocessed_path << std::endl;
@@ -133,7 +133,7 @@ int main(int argc, const char *argv[]) {
if (input_settings.device_name == "cpu") {
device.reset(new CPUDevice());
} else if (input_settings.device_name == "gpu") {
-#ifdef CM_MLPERF_DEVICE_GPU
+#ifdef MLC_MLPERF_DEVICE_GPU
device.reset(new GPUDevice());
#endif
} else {
@@ -161,7 +161,7 @@ int main(int argc, const char *argv[]) {
// build backend
std::shared_ptr backend;
if (input_settings.backend_name == "onnxruntime") {
-#ifdef CM_MLPERF_BACKEND_ONNXRUNTIME
+#ifdef MLC_MLPERF_BACKEND_ONNXRUNTIME
backend.reset(new OnnxRuntimeBackend(
model, device, performance_sample_count, input_settings.batch_size,
input_settings.device_name == "gpu"));
diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py
index 0a4d22cdb..dc0e29942 100644
--- a/script/app-mlperf-inference-mlcommons-python/customize.py
+++ b/script/app-mlperf-inference-mlcommons-python/customize.py
@@ -12,76 +12,76 @@ def preprocess(i):
state = i['state']
script_path = i['run_script_input']['path']
- if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}
- if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes":
+ if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes":
return {'return': 0}
- if env.get('CM_MLPERF_POWER', '') == "yes":
+ if env.get('MLC_MLPERF_POWER', '') == "yes":
power = "yes"
else:
power = "no"
- rerun = True if env.get("CM_RERUN", "") != '' else False
+ rerun = True if env.get("MLC_RERUN", "") != '' else False
- if 'CM_MLPERF_LOADGEN_SCENARIO' not in env:
- env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline"
+ if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env:
+ env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline"
- if 'CM_MLPERF_LOADGEN_MODE' not in env:
- env['CM_MLPERF_LOADGEN_MODE'] = "accuracy"
+ if 'MLC_MLPERF_LOADGEN_MODE' not in env:
+ env['MLC_MLPERF_LOADGEN_MODE'] = "accuracy"
- if 'CM_MODEL' not in env:
+ if 'MLC_MODEL' not in env:
return {
'return': 1, 'error': "Please select a variation specifying the model to run"}
- # if env['CM_MODEL'] == "resnet50":
- # cmd = "cp " + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['CM_DATASET_PATH'],
+ # if env['MLC_MODEL'] == "resnet50":
+ # cmd = "cp " + os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['MLC_DATASET_PATH'],
# "val_map.txt")
# ret = os.system(cmd)
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \
- env.get('CM_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " "
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \
+ env.get('MLC_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " "
- if 'CM_MLPERF_LOADGEN_QPS' not in env:
- env['CM_MLPERF_LOADGEN_QPS_OPT'] = ""
+ if 'MLC_MLPERF_LOADGEN_QPS' not in env:
+ env['MLC_MLPERF_LOADGEN_QPS_OPT'] = ""
else:
- env['CM_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \
- env['CM_MLPERF_LOADGEN_QPS']
+ env['MLC_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \
+ env['MLC_MLPERF_LOADGEN_QPS']
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['CM_MLPERF_LOADGEN_QPS_OPT']
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT']
- if 'CM_NUM_THREADS' not in env:
- if 'CM_MINIMIZE_THREADS' in env:
- env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) //
- (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1'))))
+ if 'MLC_NUM_THREADS' not in env:
+ if 'MLC_MINIMIZE_THREADS' in env:
+ env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) //
+ (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1'))))
else:
- env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1')
+ env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1')
- if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and str(env.get(
- 'CM_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in ["true", "1", "yes"]:
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \
- str(env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE'])
+ if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and str(env.get(
+ 'MLC_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in ["true", "1", "yes"]:
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \
+ str(env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE'])
- if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE', '') != '':
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \
- str(env['CM_MLPERF_LOADGEN_BATCH_SIZE'])
+ if env.get('MLC_MLPERF_LOADGEN_BATCH_SIZE', '') != '':
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \
+ str(env['MLC_MLPERF_LOADGEN_BATCH_SIZE'])
- if env.get('CM_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get('CM_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and (
- env['CM_MLPERF_LOADGEN_MODE'] == 'accuracy' or 'gptj' in env['CM_MODEL'] or 'llama2' in env['CM_MODEL'] or 'mixtral' in env['CM_MODEL'] or 'llama3' in env['CM_MODEL']) and env.get('CM_MLPERF_RUN_STYLE', '') != "valid":
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \
- env['CM_MLPERF_LOADGEN_QUERY_COUNT']
+ if env.get('MLC_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get('MLC_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and (
+ env['MLC_MLPERF_LOADGEN_MODE'] == 'accuracy' or 'gptj' in env['MLC_MODEL'] or 'llama2' in env['MLC_MODEL'] or 'mixtral' in env['MLC_MODEL'] or 'llama3' in env['MLC_MODEL']) and env.get('MLC_MLPERF_RUN_STYLE', '') != "valid":
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \
+ env['MLC_MLPERF_LOADGEN_QUERY_COUNT']
print("Using MLCommons Inference source from '" +
- env['CM_MLPERF_INFERENCE_SOURCE'] + "'")
+ env['MLC_MLPERF_INFERENCE_SOURCE'] + "'")
- if 'CM_MLPERF_CONF' not in env:
- env['CM_MLPERF_CONF'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
+ if 'MLC_MLPERF_CONF' not in env:
+ env['MLC_MLPERF_CONF'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
x = "" if os_info['platform'] == 'windows' else "'"
- inference_src_version = env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION', '')
+ inference_src_version = env.get('MLC_MLPERF_INFERENCE_SOURCE_VERSION', '')
version_tuple = None
if inference_src_version:
version_tuple = tuple(map(int, inference_src_version.split('.')))
@@ -89,80 +89,80 @@ def preprocess(i):
if version_tuple and version_tuple >= (4, 1, 1):
pass # mlperf_conf is automatically loaded by the loadgen
else:
- if "llama2-70b" in env['CM_MODEL'] or "mixtral-8x7b" in env["CM_MODEL"]:
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \
- x + env['CM_MLPERF_CONF'] + x
+ if "llama2-70b" in env['MLC_MODEL'] or "mixtral-8x7b" in env["MLC_MODEL"]:
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \
+ x + env['MLC_MLPERF_CONF'] + x
else:
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \
- x + env['CM_MLPERF_CONF'] + x
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \
+ x + env['MLC_MLPERF_CONF'] + x
- if env.get('CM_NETWORK_LOADGEN', '') != "lon" and env.get(
- 'CM_MLPERF_INFERENCE_API_SERVER', '') == '' and "llama2-70b" not in env['CM_MODEL']:
- env['MODEL_DIR'] = env.get('CM_ML_MODEL_PATH')
+ if env.get('MLC_NETWORK_LOADGEN', '') != "lon" and env.get(
+ 'MLC_MLPERF_INFERENCE_API_SERVER', '') == '' and "llama2-70b" not in env['MLC_MODEL']:
+ env['MODEL_DIR'] = env.get('MLC_ML_MODEL_PATH')
if not env['MODEL_DIR']:
env['MODEL_DIR'] = os.path.dirname(
env.get(
- 'CM_MLPERF_CUSTOM_MODEL_PATH',
+ 'MLC_MLPERF_CUSTOM_MODEL_PATH',
env.get(
- 'CM_ML_MODEL_FILE_WITH_PATH',
+ 'MLC_ML_MODEL_FILE_WITH_PATH',
'')))
RUN_CMD = ""
state['RUN'] = {}
- scenario = env['CM_MLPERF_LOADGEN_SCENARIO']
+ scenario = env['MLC_MLPERF_LOADGEN_SCENARIO']
state['RUN'][scenario] = {}
scenario_extra_options = ''
- NUM_THREADS = env['CM_NUM_THREADS']
+ NUM_THREADS = env['MLC_NUM_THREADS']
if int(
- NUM_THREADS) > 2 and env['CM_MLPERF_DEVICE'] == "gpu" and env['CM_MODEL'] != "rgat":
+ NUM_THREADS) > 2 and env['MLC_MLPERF_DEVICE'] == "gpu" and env['MLC_MODEL'] != "rgat":
NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU
- if env['CM_MODEL'] in ['resnet50', 'retinanet',
+ if env['MLC_MODEL'] in ['resnet50', 'retinanet',
'stable-diffusion-xl', 'rgat']:
scenario_extra_options += " --threads " + NUM_THREADS
- ml_model_name = env['CM_MODEL']
- if 'CM_MLPERF_USER_CONF' in env:
- user_conf_path = env['CM_MLPERF_USER_CONF']
+ ml_model_name = env['MLC_MODEL']
+ if 'MLC_MLPERF_USER_CONF' in env:
+ user_conf_path = env['MLC_MLPERF_USER_CONF']
x = "" if os_info['platform'] == 'windows' else "'"
- if 'llama2-70b' in env['CM_MODEL'] or "mixtral-8x7b" in env["CM_MODEL"] or "llama3" in env["CM_MODEL"]:
+ if 'llama2-70b' in env['MLC_MODEL'] or "mixtral-8x7b" in env["MLC_MODEL"] or "llama3" in env["MLC_MODEL"]:
scenario_extra_options += " --user-conf " + x + user_conf_path + x
else:
scenario_extra_options += " --user_conf " + x + user_conf_path + x
- mode = env['CM_MLPERF_LOADGEN_MODE']
+ mode = env['MLC_MLPERF_LOADGEN_MODE']
mode_extra_options = ""
- if 'CM_DATASET_PREPROCESSED_PATH' in env and env['CM_MODEL'] in [
+ if 'MLC_DATASET_PREPROCESSED_PATH' in env and env['MLC_MODEL'] in [
'resnet50', 'retinanet']:
- # dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['CM_DATASET_PREPROCESSED_PATH']
- if env.get('CM_MLPERF_LAST_RELEASE') not in ["v2.0", "v2.1"]:
+ # dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['MLC_DATASET_PREPROCESSED_PATH']
+ if env.get('MLC_MLPERF_LAST_RELEASE') not in ["v2.0", "v2.1"]:
dataset_options = " --use_preprocessed_dataset --cache_dir " + \
- env['CM_DATASET_PREPROCESSED_PATH']
+ env['MLC_DATASET_PREPROCESSED_PATH']
else:
dataset_options = ""
- if env['CM_MODEL'] == "retinanet":
+ if env['MLC_MODEL'] == "retinanet":
dataset_options += " --dataset-list " + \
- env['CM_DATASET_ANNOTATIONS_FILE_PATH']
- elif env['CM_MODEL'] == "resnet50":
+ env['MLC_DATASET_ANNOTATIONS_FILE_PATH']
+ elif env['MLC_MODEL'] == "resnet50":
dataset_options += " --dataset-list " + \
- os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt")
- env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH')
+ os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt")
+ env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH')
else:
- if 'CM_DATASET_PREPROCESSED_PATH' in env:
- env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH')
+ if 'MLC_DATASET_PREPROCESSED_PATH' in env:
+ env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH')
else:
- env['DATA_DIR'] = env.get('CM_DATASET_PATH')
+ env['DATA_DIR'] = env.get('MLC_DATASET_PATH')
- if "dlrm" in env['CM_MODEL']:
- env['DATA_DIR'] = env['CM_CRITEO_PREPROCESSED_PATH']
+ if "dlrm" in env['MLC_MODEL']:
+ env['DATA_DIR'] = env['MLC_CRITEO_PREPROCESSED_PATH']
dataset_options = ''
- if env.get('CM_MLPERF_EXTRA_DATASET_ARGS', '') != '':
- dataset_options += " " + env['CM_MLPERF_EXTRA_DATASET_ARGS']
+ if env.get('MLC_MLPERF_EXTRA_DATASET_ARGS', '') != '':
+ dataset_options += " " + env['MLC_MLPERF_EXTRA_DATASET_ARGS']
if mode == "accuracy":
mode_extra_options += " --accuracy"
@@ -172,32 +172,32 @@ def preprocess(i):
elif mode == "compliance":
- audit_full_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH']
+ audit_full_path = env['MLC_MLPERF_INFERENCE_AUDIT_PATH']
mode_extra_options = " --audit '" + audit_full_path + "'"
- if env.get('CM_MLPERF_OUTPUT_DIR', '') == '':
- env['CM_MLPERF_OUTPUT_DIR'] = os.getcwd()
+ if env.get('MLC_MLPERF_OUTPUT_DIR', '') == '':
+ env['MLC_MLPERF_OUTPUT_DIR'] = os.getcwd()
- mlperf_implementation = env.get('CM_MLPERF_IMPLEMENTATION', 'reference')
+ mlperf_implementation = env.get('MLC_MLPERF_IMPLEMENTATION', 'reference')
cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options,
mode_extra_options, dataset_options, mlperf_implementation)
- if env.get('CM_NETWORK_LOADGEN', '') == "lon":
+ if env.get('MLC_NETWORK_LOADGEN', '') == "lon":
run_cmd = i['state']['mlperf_inference_run_cmd']
- env['CM_SSH_RUN_COMMANDS'] = []
- env['CM_SSH_RUN_COMMANDS'].append(
+ env['MLC_SSH_RUN_COMMANDS'] = []
+ env['MLC_SSH_RUN_COMMANDS'].append(
run_cmd.replace(
"--network=lon",
"--network=sut") + " &")
- env['CM_MLPERF_RUN_CMD'] = cmd
- env['CM_RUN_DIR'] = run_dir
- env['CM_RUN_CMD'] = cmd
- env['CK_PROGRAM_TMP_DIR'] = env.get('CM_ML_MODEL_PATH') # for tvm
+ env['MLC_MLPERF_RUN_CMD'] = cmd
+ env['MLC_RUN_DIR'] = run_dir
+ env['MLC_RUN_CMD'] = cmd
+ env['CK_PROGRAM_TMP_DIR'] = env.get('MLC_ML_MODEL_PATH') # for tvm
- if env.get('CM_HOST_PLATFORM_FLAVOR', '') == "arm64":
- env['CM_HOST_PLATFORM_FLAVOR'] = "aarch64"
+ if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == "arm64":
+ env['MLC_HOST_PLATFORM_FLAVOR'] = "aarch64"
return {'return': 0}
@@ -216,318 +216,318 @@ def get_run_cmd(os_info, env, scenario_extra_options,
def get_run_cmd_reference(
os_info, env, scenario_extra_options, mode_extra_options, dataset_options):
- if env['CM_MODEL'] in ["gptj-99", "gptj-99.9"]:
+ if env['MLC_MODEL'] in ["gptj-99", "gptj-99.9"]:
env['RUN_DIR'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j")
- if env.get('CM_NETWORK_LOADGEN', '') != "lon":
- cmd = env['CM_PYTHON_BIN_WITH_PATH'] + \
- " main.py --model-path=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_EVAL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
- ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \
+ env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j")
+ if env.get('MLC_NETWORK_LOADGEN', '') != "lon":
+ cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + \
+ " main.py --model-path=" + env['MLC_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['MLC_DATASET_EVAL_PATH'] + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION'] + \
scenario_extra_options + mode_extra_options + dataset_options
else:
- cmd = env['CM_PYTHON_BIN_WITH_PATH'] + \
- " main.py" + ' --dataset-path=' + env['CM_DATASET_EVAL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
- ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \
+ cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + \
+ " main.py" + ' --dataset-path=' + env['MLC_DATASET_EVAL_PATH'] + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION'] + \
scenario_extra_options + mode_extra_options + dataset_options
cmd = cmd.replace("--count", "--max_examples")
- if env['CM_MLPERF_DEVICE'] == "gpu":
+ if env['MLC_MLPERF_DEVICE'] == "gpu":
gpu_options = " --gpu"
env['CUDA_VISIBLE_DEVICES'] = "0"
else:
gpu_options = ""
cmd = cmd + gpu_options
- env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR']
+ env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR']
- if env['CM_MODEL'] in ["resnet50", "retinanet"]:
+ if env['MLC_MODEL'] in ["resnet50", "retinanet"]:
env['RUN_DIR'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'],
+ env['MLC_MLPERF_INFERENCE_SOURCE'],
"vision",
"classification_and_detection")
- env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR']
- if env.get('CM_MLPERF_VISION_DATASET_OPTION', '') == '' and env.get(
- 'CM_MLPERF_DEVICE') != "tpu":
+ env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR']
+ if env.get('MLC_MLPERF_VISION_DATASET_OPTION', '') == '' and env.get(
+ 'MLC_MLPERF_DEVICE') != "tpu":
if os_info['platform'] == 'windows':
- cmd = "python python/main.py --profile " + env['CM_MODEL'] + "-" + env['CM_MLPERF_BACKEND'] + \
- " --model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_PREPROCESSED_PATH'] + \
- " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \
+ cmd = "python python/main.py --profile " + env['MLC_MODEL'] + "-" + env['MLC_MLPERF_BACKEND'] + \
+ " --model=" + env['MLC_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['MLC_DATASET_PREPROCESSED_PATH'] + \
+ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \
" --output " + env['OUTPUT_DIR'] + " " + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
scenario_extra_options + mode_extra_options + dataset_options
else:
- cmd = "./run_local.sh " + env['CM_MLPERF_BACKEND'] + ' ' + \
- env['CM_MODEL'] + ' ' + env['CM_MLPERF_DEVICE'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ cmd = "./run_local.sh " + env['MLC_MLPERF_BACKEND'] + ' ' + \
+ env['MLC_MODEL'] + ' ' + env['MLC_MLPERF_DEVICE'] + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
scenario_extra_options + mode_extra_options + dataset_options
return cmd, env['RUN_DIR']
- if env['CM_MLPERF_BACKEND'] == "ncnn":
+ if env['MLC_MLPERF_BACKEND'] == "ncnn":
env['MODEL_FILE'] = os.path.join(
os.path.dirname(
- env.get('CM_ML_MODEL_FILE_WITH_PATH')),
+ env.get('MLC_ML_MODEL_FILE_WITH_PATH')),
"resnet50_v1")
else:
env['MODEL_FILE'] = env.get(
- 'CM_MLPERF_CUSTOM_MODEL_PATH',
- env.get('CM_ML_MODEL_FILE_WITH_PATH'))
+ 'MLC_MLPERF_CUSTOM_MODEL_PATH',
+ env.get('MLC_ML_MODEL_FILE_WITH_PATH'))
if not env['MODEL_FILE']:
return {'return': 1, 'error': 'No valid model file found!'}
- env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR']
+ env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR']
- extra_options = " --output " + env['CM_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['CM_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \
- " --dataset-path " + env['CM_DATASET_PREPROCESSED_PATH'] + " --model " + env['MODEL_FILE'] + \
- " --preprocessed_dir " + env['CM_DATASET_PREPROCESSED_PATH']
+ extra_options = " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['MLC_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \
+ " --dataset-path " + env['MLC_DATASET_PREPROCESSED_PATH'] + " --model " + env['MODEL_FILE'] + \
+ " --preprocessed_dir " + env['MLC_DATASET_PREPROCESSED_PATH']
- if env.get('CM_MLPERF_DEVICE') == "tpu":
- cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env.get('CM_SUDO', "") + " " + env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " +\
- "--backend " + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + " --device tpu " + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \
+ if env.get('MLC_MLPERF_DEVICE') == "tpu":
+ cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env.get('MLC_SUDO', "") + " " + env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " +\
+ "--backend " + env['MLC_MLPERF_BACKEND'] + " --scenario=" + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " --device tpu " + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \
mode_extra_options + dataset_options + extra_options
else:
- cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " +\
- "--backend " + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \
+ cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " +\
+ "--backend " + env['MLC_MLPERF_BACKEND'] + " --scenario=" + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \
mode_extra_options + dataset_options + extra_options
env['SKIP_VERIFY_ACCURACY'] = True
- elif "bert" in env['CM_MODEL']:
+ elif "bert" in env['MLC_MODEL']:
env['RUN_DIR'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'], "language", "bert")
+ env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "bert")
env['MODEL_FILE'] = env.get(
- 'CM_MLPERF_CUSTOM_MODEL_PATH',
- env.get('CM_ML_MODEL_FILE_WITH_PATH'))
+ 'MLC_MLPERF_CUSTOM_MODEL_PATH',
+ env.get('MLC_ML_MODEL_FILE_WITH_PATH'))
if not env['MODEL_FILE']:
return {'return': 1, 'error': 'No valid model file found!'}
- if env.get('CM_MLPERF_QUANTIZATION') in ["on", True, "1", "True"]:
+ if env.get('MLC_MLPERF_QUANTIZATION') in ["on", True, "1", "True"]:
quantization_options = " --quantized"
else:
quantization_options = ""
- cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \
+ cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + env['MLC_MLPERF_BACKEND'] + " --scenario=" + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \
mode_extra_options + dataset_options + quantization_options
- if env['CM_MLPERF_BACKEND'] == "deepsparse":
+ if env['MLC_MLPERF_BACKEND'] == "deepsparse":
cmd += " --batch_size=" + \
- env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \
+ env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \
" --model_path=" + env['MODEL_FILE']
- if env.get('CM_MLPERF_CUSTOM_MODEL_PATH', '') != '':
- env['CM_ML_MODEL_FILE_WITH_PATH'] = env['MODEL_FILE']
+ if env.get('MLC_MLPERF_CUSTOM_MODEL_PATH', '') != '':
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MODEL_FILE']
cmd = cmd.replace("--count", "--max_examples")
- env['VOCAB_FILE'] = env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH']
- env['DATASET_FILE'] = env['CM_DATASET_SQUAD_VAL_PATH']
- env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR']
+ env['VOCAB_FILE'] = env['MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH']
+ env['DATASET_FILE'] = env['MLC_DATASET_SQUAD_VAL_PATH']
+ env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR']
env['SKIP_VERIFY_ACCURACY'] = True
- elif "rnnt" in env['CM_MODEL']:
+ elif "rnnt" in env['MLC_MODEL']:
- env['RUN_DIR'] = env['CM_MLPERF_INFERENCE_RNNT_PATH']
- cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend " + env['CM_MLPERF_BACKEND'] + \
- " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \
- " --manifest " + env['CM_DATASET_PREPROCESSED_JSON'] + \
- " --dataset_dir " + os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "..") + \
+ env['RUN_DIR'] = env['MLC_MLPERF_INFERENCE_RNNT_PATH']
+ cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " run.py --backend " + env['MLC_MLPERF_BACKEND'] + \
+ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \
+ " --manifest " + env['MLC_DATASET_PREPROCESSED_JSON'] + \
+ " --dataset_dir " + os.path.join(env['MLC_DATASET_PREPROCESSED_PATH'], "..") + \
" --pytorch_config_toml " + os.path.join("pytorch", "configs", "rnnt.toml") + \
- " --pytorch_checkpoint " + env['CM_ML_MODEL_FILE_WITH_PATH'] + \
- " --log_dir " + env['CM_MLPERF_OUTPUT_DIR'] + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ " --pytorch_checkpoint " + env['MLC_ML_MODEL_FILE_WITH_PATH'] + \
+ " --log_dir " + env['MLC_MLPERF_OUTPUT_DIR'] + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
scenario_extra_options + mode_extra_options + dataset_options
env['SKIP_VERIFY_ACCURACY'] = True
- elif "stable-diffusion-xl" in env['CM_MODEL']:
+ elif "stable-diffusion-xl" in env['MLC_MODEL']:
env['RUN_DIR'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image")
+ env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image")
if env.get('+PYTHONPATH', '') == '':
env['+PYTHONPATH'] = []
env['+PYTHONPATH'].append(
os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'],
+ env['MLC_MLPERF_INFERENCE_SOURCE'],
"text_to_image",
"tools",
"fid"))
- backend = env['CM_MLPERF_BACKEND']
- device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] not in [
+ backend = env['MLC_MLPERF_BACKEND']
+ device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] not in [
"gpu", "rocm"] else "cuda"
- max_batchsize = env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1')
- cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \
- " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \
+ max_batchsize = env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1')
+ cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \
+ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \
" --profile " + 'stable-diffusion-xl-pytorch ' + \
" --dataset " + 'coco-1024' + \
- " --dataset-path " + env['CM_DATASET_PATH_ROOT'] + \
- ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'].replace("bfloat", "bf").replace("float", "fp") + \
+ " --dataset-path " + env['MLC_DATASET_PATH_ROOT'] + \
+ ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION'].replace("bfloat", "bf").replace("float", "fp") + \
" --device " + device + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
scenario_extra_options + mode_extra_options + \
- " --output " + env['CM_MLPERF_OUTPUT_DIR'] + \
- " --model-path " + env['CM_ML_MODEL_PATH']
+ " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + \
+ " --model-path " + env['MLC_ML_MODEL_PATH']
if "--max-batchsize" not in cmd:
cmd += " --max-batchsize " + max_batchsize
- if env.get('CM_COCO2014_SAMPLE_ID_PATH', '') != '':
- cmd += " --ids-path " + env['CM_COCO2014_SAMPLE_ID_PATH']
+ if env.get('MLC_COCO2014_SAMPLE_ID_PATH', '') != '':
+ cmd += " --ids-path " + env['MLC_COCO2014_SAMPLE_ID_PATH']
- elif "llama2-70b" in env['CM_MODEL']:
+ elif "llama2-70b" in env['MLC_MODEL']:
env['RUN_DIR'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'],
+ env['MLC_MLPERF_INFERENCE_SOURCE'],
"language",
"llama2-70b")
- backend = env['CM_MLPERF_BACKEND']
- device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda"
+ backend = env['MLC_MLPERF_BACKEND']
+ device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda"
- cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \
- " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \
- " --dataset-path " + env['CM_DATASET_PREPROCESSED_PATH'] + \
+ cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \
+ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \
+ " --dataset-path " + env['MLC_DATASET_PREPROCESSED_PATH'] + \
" --device " + device.replace("cuda", "cuda:0") + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
scenario_extra_options + mode_extra_options + \
- " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \
- ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION']
-
- if env.get('CM_MLPERF_INFERENCE_API_SERVER', '') != '':
- env['CM_VLLM_SERVER_MODEL_NAME'] = env.get(
- "CM_VLLM_SERVER_MODEL_NAME") or "NousResearch/Meta-Llama-3-8B-Instruct"
- # env['CM_MLPERF_INFERENCE_API_SERVER'] = "http://localhost:8000"
- cmd += f""" --api-server {env['CM_MLPERF_INFERENCE_API_SERVER']} \
- --model-path {env['CM_VLLM_SERVER_MODEL_NAME']} \
- --api-model-name {env['CM_VLLM_SERVER_MODEL_NAME']} --vllm """
+ " --output-log-dir " + env['MLC_MLPERF_OUTPUT_DIR'] + \
+ ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION']
+
+ if env.get('MLC_MLPERF_INFERENCE_API_SERVER', '') != '':
+ env['MLC_VLLM_SERVER_MODEL_NAME'] = env.get(
+ "MLC_VLLM_SERVER_MODEL_NAME") or "NousResearch/Meta-Llama-3-8B-Instruct"
+ # env['MLC_MLPERF_INFERENCE_API_SERVER'] = "http://localhost:8000"
+ cmd += f""" --api-server {env['MLC_MLPERF_INFERENCE_API_SERVER']} \
+ --model-path {env['MLC_VLLM_SERVER_MODEL_NAME']} \
+ --api-model-name {env['MLC_VLLM_SERVER_MODEL_NAME']} --vllm """
else:
cmd += f" --model-path {env['LLAMA2_CHECKPOINT_PATH']}"
- if env.get('CM_MLPERF_INFERENCE_NUM_WORKERS', '') != '':
- cmd += f" --num-workers {env['CM_MLPERF_INFERENCE_NUM_WORKERS']}"
+ if env.get('MLC_MLPERF_INFERENCE_NUM_WORKERS', '') != '':
+ cmd += f" --num-workers {env['MLC_MLPERF_INFERENCE_NUM_WORKERS']}"
cmd = cmd.replace("--count", "--total-sample-count")
cmd = cmd.replace("--max-batchsize", "--batch-size")
- elif "mixtral-8x7b" in env['CM_MODEL']:
+ elif "mixtral-8x7b" in env['MLC_MODEL']:
env['RUN_DIR'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'],
+ env['MLC_MLPERF_INFERENCE_SOURCE'],
"language",
"mixtral-8x7b")
- backend = env['CM_MLPERF_BACKEND']
- device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda"
- cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \
- " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \
- " --dataset-path " + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] + \
+ backend = env['MLC_MLPERF_BACKEND']
+ device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda"
+ cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \
+ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \
+ " --dataset-path " + env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] + \
" --device " + device.replace("cuda", "cuda:0") + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
scenario_extra_options + mode_extra_options + \
- " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \
- ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \
+ " --output-log-dir " + env['MLC_MLPERF_OUTPUT_DIR'] + \
+ ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION'] + \
" --model-path " + env['MIXTRAL_CHECKPOINT_PATH']
cmd = cmd.replace("--count", "--total-sample-count")
cmd = cmd.replace("--max-batchsize", "--batch-size")
- elif "3d-unet" in env['CM_MODEL']:
+ elif "3d-unet" in env['MLC_MODEL']:
- env['RUN_DIR'] = env['CM_MLPERF_INFERENCE_3DUNET_PATH']
- backend = env['CM_MLPERF_BACKEND'] if env['CM_MLPERF_BACKEND'] != 'tf' else 'tensorflow'
- cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + backend + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
- " --model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + \
- " --preprocessed_data_dir=" + env['CM_DATASET_KITS19_PREPROCESSED_PATH'] + \
+ env['RUN_DIR'] = env['MLC_MLPERF_INFERENCE_3DUNET_PATH']
+ backend = env['MLC_MLPERF_BACKEND'] if env['MLC_MLPERF_BACKEND'] != 'tf' else 'tensorflow'
+ cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + backend + " --scenario=" + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ " --model=" + env['MLC_ML_MODEL_FILE_WITH_PATH'] + \
+ " --preprocessed_data_dir=" + env['MLC_DATASET_KITS19_PREPROCESSED_PATH'] + \
scenario_extra_options + mode_extra_options + dataset_options
- env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR']
+ env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR']
env['SKIP_VERIFY_ACCURACY'] = True
- elif "dlrm" in env['CM_MODEL']: # DLRM is in draft stage
+ elif "dlrm" in env['MLC_MODEL']: # DLRM is in draft stage
env['RUN_DIR'] = os.path.join(
- env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch")
- if 'multihot-criteo-sample' in env['CM_ML_MODEL_DATASET_TYPE']:
+ env['MLC_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch")
+ if 'multihot-criteo-sample' in env['MLC_ML_MODEL_DATASET_TYPE']:
dataset = "multihot-criteo-sample"
- elif 'multihot-criteo' in env['CM_ML_MODEL_DATASET_TYPE']:
+ elif 'multihot-criteo' in env['MLC_ML_MODEL_DATASET_TYPE']:
dataset = "multihot-criteo"
env['MODEL_DIR'] = os.path.join(env['MODEL_DIR'], "model_weights")
- if env.get('CM_MLPERF_BIN_LOADER', '') == 'yes':
+ if env.get('MLC_MLPERF_BIN_LOADER', '') == 'yes':
mlperf_bin_loader_string = " --mlperf-bin-loader"
else:
mlperf_bin_loader_string = ""
- if env.get('CM_ML_MODEL_DEBUG', '') == 'yes':
+ if env.get('MLC_ML_MODEL_DEBUG', '') == 'yes':
config = " --max-ind-range=10000000 --data-sub-sample-rate=0.875 "
else:
config = " --max-ind-range=40000000 "
- if env['CM_MLPERF_DEVICE'] == "gpu":
+ if env['MLC_MLPERF_DEVICE'] == "gpu":
gpu_options = ""
env['CUDA_VISIBLE_DEVICES'] = "0"
else:
gpu_options = ""
env['WORLD_SIZE'] = "1"
- if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy" and env['CM_MLPERF_LOADGEN_SCENARIO'] == "Offline":
+ if env['MLC_MLPERF_LOADGEN_MODE'] == "accuracy" and env['MLC_MLPERF_LOADGEN_SCENARIO'] == "Offline":
mode_extra_options += " --samples-per-query-offline=1"
- cmd = " ./run_local.sh " + env['CM_MLPERF_BACKEND'] + \
- ' dlrm ' + dataset + ' ' + env['CM_MLPERF_DEVICE'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ cmd = " ./run_local.sh " + env['MLC_MLPERF_BACKEND'] + \
+ ' dlrm ' + dataset + ' ' + env['MLC_MLPERF_DEVICE'] + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
config + mlperf_bin_loader_string + \
' --samples-to-aggregate-quantile-file=./tools/dist_quantile.txt ' + \
scenario_extra_options + mode_extra_options + dataset_options + gpu_options
cmd = cmd.replace("--count", "--count-queries")
- env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR']
+ env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR']
- elif "rgat" in env['CM_MODEL']:
+ elif "rgat" in env['MLC_MODEL']:
env['RUN_DIR'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'],
+ env['MLC_MLPERF_INFERENCE_SOURCE'],
"graph",
"R-GAT")
- backend = env['CM_MLPERF_BACKEND']
+ backend = env['MLC_MLPERF_BACKEND']
- dtype_rgat = env['CM_MLPERF_MODEL_PRECISION'].replace("float", "fp")
+ dtype_rgat = env['MLC_MLPERF_MODEL_PRECISION'].replace("float", "fp")
- if env.get('CM_MLPERF_SUBMISSION_GENERATION_STYLE', '') == "full":
+ if env.get('MLC_MLPERF_SUBMISSION_GENERATION_STYLE', '') == "full":
mode_extra_options += " --dataset igbh-dgl --profile rgat-dgl-full "
else:
mode_extra_options += " --dataset igbh-dgl-tiny --profile debug-dgl "
- device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda"
+ device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda"
# have to add the condition for running in debug mode or real run mode
- cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \
- " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \
- " --dataset-path " + env['CM_DATASET_IGBH_PATH'] + \
+ cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \
+ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \
+ " --dataset-path " + env['MLC_DATASET_IGBH_PATH'] + \
" --device " + device.replace("cuda", "gpu") + \
- env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
+ env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
scenario_extra_options + mode_extra_options + \
- " --output " + env['CM_MLPERF_OUTPUT_DIR'] + \
+ " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + \
' --dtype ' + dtype_rgat + \
" --model-path " + env['RGAT_CHECKPOINT_PATH']
- if env.get('CM_ACTIVATE_RGAT_IN_MEMORY', '') == "yes":
+ if env.get('MLC_ACTIVATE_RGAT_IN_MEMORY', '') == "yes":
cmd += " --in-memory "
- elif "llama3" in env['CM_MODEL']:
+ elif "llama3" in env['MLC_MODEL']:
env['RUN_DIR'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'],
+ env['MLC_MLPERF_INFERENCE_SOURCE'],
"language",
"llama3.1-405b")
- if int(env.get('CM_MLPERF_INFERENCE_TP_SIZE', '')) > 1:
+ if int(env.get('MLC_MLPERF_INFERENCE_TP_SIZE', '')) > 1:
env['VLLM_WORKER_MULTIPROC_METHOD'] = "spawn"
- cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \
- " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \
- " --dataset-path " + env['CM_DATASET_LLAMA3_PATH'] + \
- " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \
- ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \
- " --model-path " + env['CM_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + \
- " --tensor-parallel-size " + env['CM_MLPERF_INFERENCE_TP_SIZE'] + \
+ cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \
+ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \
+ " --dataset-path " + env['MLC_DATASET_LLAMA3_PATH'] + \
+ " --output-log-dir " + env['MLC_MLPERF_OUTPUT_DIR'] + \
+ ' --dtype ' + env['MLC_MLPERF_MODEL_PRECISION'] + \
+ " --model-path " + env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + \
+ " --tensor-parallel-size " + env['MLC_MLPERF_INFERENCE_TP_SIZE'] + \
" --vllm "
- if env.get('CM_MLPERF_INFERENCE_NUM_WORKERS', '') != '':
- cmd += f" --num-workers {env['CM_MLPERF_INFERENCE_NUM_WORKERS']}"
+ if env.get('MLC_MLPERF_INFERENCE_NUM_WORKERS', '') != '':
+ cmd += f" --num-workers {env['MLC_MLPERF_INFERENCE_NUM_WORKERS']}"
cmd = cmd.replace("--count", "--total-sample-count")
cmd = cmd.replace("--max-batchsize", "--batch-size")
- if env.get('CM_NETWORK_LOADGEN', '') in ["lon", "sut"]:
- cmd = cmd + " " + "--network " + env['CM_NETWORK_LOADGEN']
- if env.get('CM_NETWORK_LOADGEN_SUT_SERVERS', []):
- sut_servers = env['CM_NETWORK_LOADGEN_SUT_SERVERS']
+ if env.get('MLC_NETWORK_LOADGEN', '') in ["lon", "sut"]:
+ cmd = cmd + " " + "--network " + env['MLC_NETWORK_LOADGEN']
+ if env.get('MLC_NETWORK_LOADGEN_SUT_SERVERS', []):
+ sut_servers = env['MLC_NETWORK_LOADGEN_SUT_SERVERS']
cmd += " --sut_server '" + "','".join(sut_servers) + "' "
return cmd, env['RUN_DIR']
diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml
index 250d2dc86..a23acee4d 100644
--- a/script/app-mlperf-inference-mlcommons-python/meta.yaml
+++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml
@@ -22,69 +22,69 @@ tags:
# Default environment
default_env:
- CM_MLPERF_LOADGEN_MODE: accuracy
- CM_MLPERF_LOADGEN_SCENARIO: Offline
- CM_OUTPUT_FOLDER_NAME: test_results
- CM_MLPERF_RUN_STYLE: test
- CM_TEST_QUERY_COUNT: "10"
- CM_MLPERF_QUANTIZATION: off
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference
- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: ""
+ MLC_MLPERF_LOADGEN_MODE: accuracy
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_OUTPUT_FOLDER_NAME: test_results
+ MLC_MLPERF_RUN_STYLE: test
+ MLC_TEST_QUERY_COUNT: "10"
+ MLC_MLPERF_QUANTIZATION: off
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference
+ MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: ""
docker:
real_run: False
# Map script inputs to environment variables
input_mapping:
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- docker: CM_RUN_DOCKER_CONTAINER
- hw_name: CM_HW_NAME
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ docker: MLC_RUN_DOCKER_CONTAINER
+ hw_name: MLC_HW_NAME
imagenet_path: IMAGENET_PATH
- max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
- mode: CM_MLPERF_LOADGEN_MODE
- num_threads: CM_NUM_THREADS
- threads: CM_NUM_THREADS
- dataset: CM_MLPERF_VISION_DATASET_OPTION
- model: CM_MLPERF_CUSTOM_MODEL_PATH
+ max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mode: MLC_MLPERF_LOADGEN_MODE
+ num_threads: MLC_NUM_THREADS
+ threads: MLC_NUM_THREADS
+ dataset: MLC_MLPERF_VISION_DATASET_OPTION
+ model: MLC_MLPERF_CUSTOM_MODEL_PATH
output_dir: OUTPUT_BASE_DIR
- power: CM_MLPERF_POWER
- power_server: CM_MLPERF_POWER_SERVER_ADDRESS
- ntp_server: CM_MLPERF_POWER_NTP_SERVER
- max_amps: CM_MLPERF_POWER_MAX_AMPS
- max_volts: CM_MLPERF_POWER_MAX_VOLTS
- regenerate_files: CM_REGENERATE_MEASURE_FILES
- rerun: CM_RERUN
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- test_query_count: CM_TEST_QUERY_COUNT
- clean: CM_MLPERF_CLEAN_SUBMISSION_DIR
- dataset_args: CM_MLPERF_EXTRA_DATASET_ARGS
- target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
- target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
- offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
- server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
- singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
- multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
- network: CM_NETWORK_LOADGEN
- sut_servers: CM_NETWORK_LOADGEN_SUT_SERVERS
+ power: MLC_MLPERF_POWER
+ power_server: MLC_MLPERF_POWER_SERVER_ADDRESS
+ ntp_server: MLC_MLPERF_POWER_NTP_SERVER
+ max_amps: MLC_MLPERF_POWER_MAX_AMPS
+ max_volts: MLC_MLPERF_POWER_MAX_VOLTS
+ regenerate_files: MLC_REGENERATE_MEASURE_FILES
+ rerun: MLC_RERUN
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ test_query_count: MLC_TEST_QUERY_COUNT
+ clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR
+ dataset_args: MLC_MLPERF_EXTRA_DATASET_ARGS
+ target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS
+ target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
+ offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ network: MLC_NETWORK_LOADGEN
+ sut_servers: MLC_NETWORK_LOADGEN_SUT_SERVERS
# Duplicate CM environment variables to the ones used in native apps
env_key_mappings:
- CM_HOST_: HOST_
- CM_ML_: ML_
- CM_MLPERF_TVM: MLPERF_TVM
- CM_MLPERF_DELETE: MLPERF_DELETE
+ MLC_HOST_: HOST_
+ MLC_ML_: ML_
+ MLC_MLPERF_TVM: MLPERF_TVM
+ MLC_MLPERF_DELETE: MLPERF_DELETE
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
- - CM_HW_NAME
- - CM_ML_MODEL_*
- - CM_MAX_EXAMPLES
- - CM_VLLM_*
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
+ - MLC_HW_NAME
+ - MLC_ML_MODEL_*
+ - MLC_MAX_EXAMPLES
+ - MLC_VLLM_*
new_state_keys:
- mlperf-inference-implementation
- - CM_SUT_*
+ - MLC_SUT_*
# Dependencies on other CM scripts
deps:
@@ -108,9 +108,9 @@ deps:
names:
- cuda
enable_if_env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- tf
- tflite
@@ -119,7 +119,7 @@ deps:
# Detect TensorRT if required
- tags: get,nvidia,tensorrt
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tensorrt
########################################################################
@@ -131,10 +131,10 @@ deps:
- ml-engine-onnxruntime
- onnxruntime
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- tvm-onnx
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cpu
- rocm
@@ -143,36 +143,36 @@ deps:
names:
- ml-engine-onnxruntime-cuda
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- tvm-onnx
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
skip_if_env:
- CM_MODEL:
+ MLC_MODEL:
- 3d-unet-99
- 3d-unet-99.9
## resnet50 and 3d-unet need both onnxruntime and onnxruntime_gpu on cuda
- tags: get,generic-python-lib,_onnxruntime
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
- CM_MODEL:
+ MLC_MODEL:
- 3d-unet-99
- 3d-unet-99.9
- resnet50
- tags: get,generic-python-lib,_onnxruntime_gpu
env:
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: ""
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: ""
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
- CM_MODEL:
+ MLC_MODEL:
- 3d-unet-99
- 3d-unet-99.9
- resnet50
@@ -184,14 +184,14 @@ deps:
- ml-engine-pytorch
- pytorch
skip_if_env:
- CM_MODEL:
+ MLC_MODEL:
- dlrm-v2-99
- dlrm-v2-99.9
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- tvm-pytorch
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cpu
- rocm
@@ -201,11 +201,11 @@ deps:
- ml-engine-pytorch
- pytorch
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- tvm-pytorch
- ray
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
## Torchvision (CPU)
@@ -214,15 +214,15 @@ deps:
- ml-engine-torchvision
- torchvision
skip_if_env:
- CM_MODEL:
+ MLC_MODEL:
- dlrm-v2-99
- dlrm-v2-99.9
- rgat
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- tvm-pytorch
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cpu
## Torchvision (CUDA)
@@ -231,16 +231,16 @@ deps:
- ml-engine-torchvision
- torchvision
skip_if_env:
- CM_MODEL:
+ MLC_MODEL:
- dlrm-v2-99
- dlrm-v2-99.9
- rgat
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- tvm-pytorch
- ray
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
## tensorrt
@@ -248,7 +248,7 @@ deps:
names:
- ml-engine-tensorrt
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- ray
## torch_tensorrt
@@ -256,7 +256,7 @@ deps:
names:
- ml-engine-torch_tensorrt
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- ray
## Ray
@@ -264,7 +264,7 @@ deps:
names:
- ray
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- ray
## async_timeout (for multi-node)
@@ -274,7 +274,7 @@ deps:
names:
- async_timeout
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- ray
## Transformers
@@ -282,7 +282,7 @@ deps:
names:
- ml-engine-transformers
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- bert-99
- bert-99.9
- gptj-99
@@ -294,7 +294,7 @@ deps:
- ml-engine-tensorflow
- tensorflow
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tf
## NCNN
@@ -302,29 +302,29 @@ deps:
names:
- ml-engine-ncnn
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- ncnn
- tags: get,tensorflow,lib,_tflite
names:
- ml-engine-tflite
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tflite
########################################################################
# Install ML models
- tags: get,ml-model,neural-magic,zoo
- # sets CM_MLPERF_CUSTOM_MODEL_PATH
+ # sets MLC_MLPERF_CUSTOM_MODEL_PATH
names:
- custom-ml-model
enable_if_env:
- CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB:
+ MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB:
- "on"
update_tags_from_env_with_prefix:
"_model-stub.":
- - CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB
+ - MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB
## ResNet50
- tags: get,ml-model,image-classification,resnet50
@@ -332,10 +332,10 @@ deps:
- ml-model
- resnet50-model
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
skip_if_env:
- CM_MLPERF_CUSTOM_MODEL_PATH:
+ MLC_MLPERF_CUSTOM_MODEL_PATH:
- "on"
## RetinaNet
@@ -344,7 +344,7 @@ deps:
- ml-model
- retinanet-model
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
## GPT-J
@@ -354,11 +354,11 @@ deps:
- gptj-model
- gpt-j-model
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- gptj-99
- gptj-99.9
skip_if_env:
- CM_NETWORK_LOADGEN:
+ MLC_NETWORK_LOADGEN:
- lon
## RetinaNet (PyTorch weights, FP32)
@@ -367,11 +367,11 @@ deps:
- ml-model
- retinanet-model
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- CM_MLPERF_IMPLEMENTATION:
+ MLC_MLPERF_IMPLEMENTATION:
- nvidia
- CM_MODEL:
+ MLC_MODEL:
- retinanet
## BERT
@@ -380,11 +380,11 @@ deps:
- ml-model
- bert-model
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- bert-99
- bert-99.9
skip_if_env:
- CM_MLPERF_CUSTOM_MODEL_PATH:
+ MLC_MLPERF_CUSTOM_MODEL_PATH:
- "on"
## SDXL
@@ -394,15 +394,15 @@ deps:
- sdxl-model
- ml-model-float16
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- stable-diffusion-xl
skip_if_any_env:
- CM_MLPERF_CUSTOM_MODEL_PATH:
+ MLC_MLPERF_CUSTOM_MODEL_PATH:
- "on"
skip_if_env:
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- "yes"
- CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
- "yes"
## LLAMA2-70B
@@ -411,18 +411,18 @@ deps:
- ml-model
- llama2-model
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- llama2-70b-99
- llama2-70b-99.9
skip_if_any_env:
- CM_MLPERF_CUSTOM_MODEL_PATH:
+ MLC_MLPERF_CUSTOM_MODEL_PATH:
- "on"
- CM_MLPERF_INFERENCE_API_SERVER:
+ MLC_MLPERF_INFERENCE_API_SERVER:
- "on"
skip_if_env:
- CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST:
- "yes"
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- "yes"
## mixtral-8x7b
@@ -431,15 +431,15 @@ deps:
- ml-model
- mixtral-model
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- mixtral-8x7b
skip_if_any_env:
- CM_MLPERF_CUSTOM_MODEL_PATH:
+ MLC_MLPERF_CUSTOM_MODEL_PATH:
- "on"
skip_if_env:
- CM_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST:
- "yes"
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- "yes"
## 3d-unet
@@ -448,7 +448,7 @@ deps:
- ml-model
- 3d-unet-model
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- 3d-unet-99
- 3d-unet-99.9
@@ -458,7 +458,7 @@ deps:
- ml-model
- rnnt-model
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- rnnt
## Dlrm
@@ -467,13 +467,13 @@ deps:
- ml-model
- dlrm-model
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- dlrm-99
- dlrm-99.9
- dlrm-v2-99
- dlrm-v2-99.9
skip_if_env:
- CM_ML_MODEL_FILE_WITH_PATH:
+ MLC_ML_MODEL_FILE_WITH_PATH:
- "on"
## RGAT
@@ -481,7 +481,7 @@ deps:
names:
- rgat-model
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- rgat
skip_if_env:
RGAT_CHECKPOINT_PATH:
@@ -493,13 +493,13 @@ deps:
- llama3-405b-model
- llama3-402b-model
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- llama3_1-405b
- llama3-405b
skip_if_env:
- CM_USE_MODEL_FROM_HOST:
+ MLC_USE_MODEL_FROM_HOST:
- "yes"
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- "yes"
########################################################################
@@ -510,24 +510,24 @@ deps:
names:
- imagenet-preprocessed
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
skip_if_env:
- CM_MLPERF_VISION_DATASET_OPTION:
+ MLC_MLPERF_VISION_DATASET_OPTION:
- on
- tags: get,dataset,image-classification,imagenet,preprocessed,_pytorch
names:
- imagenet-preprocessed
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
- CM_MLPERF_VISION_DATASET_OPTION:
+ MLC_MLPERF_VISION_DATASET_OPTION:
- imagenet_pytorch
- tags: get,dataset-aux,image-classification,imagenet-aux
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
## Open Images for RetinaNet
@@ -535,7 +535,7 @@ deps:
names:
- openimages-preprocessed
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
## CNNDM for Large Language Model
@@ -543,7 +543,7 @@ deps:
names:
- cnndm-original
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- gptj-99
- gptj-99.9
@@ -552,13 +552,13 @@ deps:
names:
- squad-original
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- bert-99
- bert-99.9
- tags: get,dataset-aux,squad-vocab
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- bert-99
- bert-99.9
@@ -568,7 +568,7 @@ deps:
- coco2014-preprocessed
- coco2014-dataset
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- stable-diffusion-xl
## OpenOrca for LLAMA2-70b
@@ -576,7 +576,7 @@ deps:
names:
- openorca-preprocessed
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- llama2-70b-99
- llama2-70b-99.9
@@ -585,12 +585,12 @@ deps:
names:
- openorca-mbxp-gsm8k-combined-preprocessed
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- mixtral-8x7b
skip_if_env:
- CM_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST:
- "yes"
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- "yes"
## Kits19 for 3d-unet
@@ -598,13 +598,13 @@ deps:
names:
- kits19-preprocessed
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- 3d-unet-99
- 3d-unet-99.9
skip_if_env:
- CM_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST:
- "yes"
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- "yes"
## Librispeech for rnnt
@@ -612,7 +612,7 @@ deps:
names:
- librispeech-preprocessed
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- rnnt
## Criteo for dlrm
@@ -620,11 +620,11 @@ deps:
names:
- criteo-preprocessed
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- dlrm-v2-99
- dlrm-v2-99.9
skip_if_env:
- CM_CRITEO_PREPROCESSED_PATH:
+ MLC_CRITEO_PREPROCESSED_PATH:
- on
## igbh for rgat
@@ -633,12 +633,12 @@ deps:
- igbh-dataset
- illinois-graph-benchmark-heterogeneous
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- rgat
skip_if_env:
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- "yes"
- CM_USE_DATASET_FROM_HOST:
+ MLC_USE_DATASET_FROM_HOST:
- "yes"
## llama3_1 dataset
@@ -647,13 +647,13 @@ deps:
- llama3_1-dataset
- llama3-dataset
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- llama3_1-405b
- llama3-402b
skip_if_env:
- CM_USE_DATASET_FROM_HOST:
+ MLC_USE_DATASET_FROM_HOST:
- "yes"
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- "yes"
########################################################################
@@ -664,7 +664,7 @@ deps:
names:
- user-conf-generator
skip_if_env:
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- "yes"
# Install MLPerf loadgen
@@ -681,7 +681,7 @@ deps:
# Download MLPerf inference source
- tags: get,mlcommons,inference,src
env:
- CM_GET_MLPERF_IMPLEMENTATION_ONLY: "yes"
+ MLC_GET_MLPERF_IMPLEMENTATION_ONLY: "yes"
names:
- mlperf-implementation
@@ -692,7 +692,7 @@ prehook_deps:
- remote-run-cmds
tags: remote,run,cmds
enable_if_env:
- CM_ASSH_RUN_COMMANDS:
+ MLC_ASSH_RUN_COMMANDS:
- "on"
posthook_deps:
@@ -700,7 +700,7 @@ posthook_deps:
- mlperf-runner
tags: benchmark-mlperf
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- "on"
post_deps:
@@ -717,8 +717,8 @@ variations:
imagenet-accuracy-script:
tags: _float32
env:
- CM_MLPERF_PYTHON: "yes"
- CM_MLPERF_IMPLEMENTATION: reference
+ MLC_MLPERF_PYTHON: "yes"
+ MLC_MLPERF_IMPLEMENTATION: reference
# ML engine
onnxruntime:
@@ -735,15 +735,15 @@ variations:
version_max: "1.26.4"
version_max_usable: "1.26.4"
env:
- CM_MLPERF_BACKEND: onnxruntime
+ MLC_MLPERF_BACKEND: onnxruntime
onnxruntime,cpu:
env:
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND_VERSION: <<>>
onnxruntime,cuda:
env:
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND_VERSION: <<>>
ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "CUDAExecutionProvider"
pytorch:
@@ -759,8 +759,8 @@ variations:
version_max: "1.26.4"
version_max_usable: "1.26.4"
env:
- CM_MLPERF_BACKEND: pytorch
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: pytorch
+ MLC_MLPERF_BACKEND_VERSION: <<>>
pytorch,rocm:
add_deps_recursive:
@@ -779,15 +779,15 @@ variations:
ml-model:
tags: raw,_pytorch
env:
- CM_MLPERF_BACKEND: ray
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: ray
+ MLC_MLPERF_BACKEND_VERSION: <<>>
tf,rocm:
add_deps_recursive:
tensorflow:
tags: _rocm
env:
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND_VERSION: <<>>
onnxruntime,rocm:
add_deps_recursive:
@@ -795,7 +795,7 @@ variations:
tags: _rocm
env:
ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "ROCMExecutionProvider"
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND_VERSION: <<>>
ncnn:
group: framework
@@ -805,9 +805,9 @@ variations:
ml-model:
tags: raw,_ncnn
env:
- CM_MLPERF_BACKEND: ncnn
- CM_MLPERF_BACKEND_VERSION: <<>>
- CM_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch
+ MLC_MLPERF_BACKEND: ncnn
+ MLC_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch
tflite:
group: framework
@@ -817,9 +817,9 @@ variations:
ml-model:
tags: raw,_tflite,_no-argmax
env:
- CM_MLPERF_BACKEND: tflite
- CM_MLPERF_BACKEND_VERSION: <<>>
- CM_MLPERF_VISION_DATASET_OPTION: imagenet_tflite_tpu
+ MLC_MLPERF_BACKEND: tflite
+ MLC_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_VISION_DATASET_OPTION: imagenet_tflite_tpu
tf:
group: framework
@@ -829,8 +829,8 @@ variations:
ml-model:
tags: raw,_tf
env:
- CM_MLPERF_BACKEND: tf
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: tf
+ MLC_MLPERF_BACKEND_VERSION: <<>>
tensorflow:
alias: tf
@@ -838,16 +838,16 @@ variations:
deepsparse:
group: framework
env:
- CM_MLPERF_BACKEND: deepsparse
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: deepsparse
+ MLC_MLPERF_BACKEND_VERSION: <<>>
deps:
- tags: get,generic-python-lib,_deepsparse
skip_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- aarch64
- tags: get,generic-python-lib,_package.deepsparse-nightly
enable_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- aarch64
add_deps_recursive:
mlperf-implementation:
@@ -858,8 +858,8 @@ variations:
tvm-onnx:
group: framework
env:
- CM_MLPERF_BACKEND: tvm-onnx
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: tvm-onnx
+ MLC_MLPERF_BACKEND_VERSION: <<>>
deps:
- tags: get,generic-python-lib,_onnx
- tags: get,generic-python-lib,_numpy
@@ -873,13 +873,13 @@ variations:
- tvm-model
update_tags_from_env_with_prefix:
_model.:
- - CM_MODEL
+ - MLC_MODEL
tvm-tflite:
group: framework
env:
- CM_MLPERF_BACKEND: tvm-tflite
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: tvm-tflite
+ MLC_MLPERF_BACKEND_VERSION: <<>>
deps:
- tags: get,generic-python-lib,_tflite
- tags: get,tvm
@@ -890,14 +890,14 @@ variations:
- tvm-model
update_tags_from_env_with_prefix:
_model.:
- - CM_MODEL
+ - MLC_MODEL
tvm-pytorch:
group: framework
env:
- CM_MLPERF_BACKEND: tvm-pytorch
- CM_MLPERF_BACKEND_VERSION: <<>>
- CM_PREPROCESS_PYTORCH: "yes"
+ MLC_MLPERF_BACKEND: tvm-pytorch
+ MLC_MLPERF_BACKEND_VERSION: <<>>
+ MLC_PREPROCESS_PYTORCH: "yes"
MLPERF_TVM_TORCH_QUANTIZED_ENGINE: qnnpack
deps:
- tags: get,generic-python-lib,_torch
@@ -912,7 +912,7 @@ variations:
- tvm-model
update_tags_from_env_with_prefix:
_model.:
- - CM_MODEL
+ - MLC_MODEL
# Reference MLPerf models
gptj-99.9:
@@ -920,14 +920,14 @@ variations:
base:
- gptj_
env:
- CM_MODEL: gptj-99.9
+ MLC_MODEL: gptj-99.9
gptj-99:
group: models
base:
- gptj_
env:
- CM_MODEL: gptj-99
+ MLC_MODEL: gptj-99
gptj_:
deps:
@@ -940,18 +940,18 @@ variations:
base:
- bert
env:
- CM_MODEL: bert-99.9
+ MLC_MODEL: bert-99.9
bert-99:
group: models
base:
- bert
env:
- CM_MODEL: bert-99
+ MLC_MODEL: bert-99
bert:
env:
- CM_MLPERF_MODEL_SKIP_BATCHING: true
+ MLC_MLPERF_MODEL_SKIP_BATCHING: true
deps:
- tags: get,generic-python-lib,_package.pydantic
- tags: get,generic-python-lib,_tokenization
@@ -962,19 +962,19 @@ variations:
- protobuf
version_max: "3.19"
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tf
- tflite
- tags: get,generic-python-lib,_boto3
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- tags: get,generic-python-lib,_torch
names:
- ml-engine-pytorch
- pytorch
skip_if_env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
add_deps_recursive:
inference-src:
@@ -983,8 +983,8 @@ variations:
sdxl:
group: models
env:
- CM_MODEL: stable-diffusion-xl
- CM_NUM_THREADS: "1"
+ MLC_MODEL: stable-diffusion-xl
+ MLC_NUM_THREADS: "1"
deps:
- tags: get,generic-python-lib,_package.diffusers
names:
@@ -1019,8 +1019,8 @@ variations:
llama2-70b_:
env:
- CM_MLPERF_MODEL_SKIP_BATCHING: false
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51"
+ MLC_MLPERF_MODEL_SKIP_BATCHING: false
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51"
deps:
- tags: get,generic-python-lib,_package.transformers
names:
@@ -1064,25 +1064,25 @@ variations:
llama2-70b-99:
group: models
env:
- CM_MODEL: llama2-70b-99
+ MLC_MODEL: llama2-70b-99
base:
- llama2-70b_
llama2-70b_,cuda:
default_env:
- CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 8
+ MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 8
llama2-70b-99.9:
group: models
env:
- CM_MODEL: llama2-70b-99.9
+ MLC_MODEL: llama2-70b-99.9
base:
- llama2-70b_
mixtral-8x7b:
group: models
env:
- CM_MODEL: mixtral-8x7b
+ MLC_MODEL: mixtral-8x7b
deps:
- tags: get,rust-compiler
names:
@@ -1123,26 +1123,26 @@ variations:
mixtral-8x7b,cuda:
default_env:
- CM_MLPERF_LOADGEN_BATCH_SIZE: 1
+ MLC_MLPERF_LOADGEN_BATCH_SIZE: 1
3d-unet-99.9:
group: models
base:
- 3d-unet
env:
- CM_MODEL: 3d-unet-99.9
+ MLC_MODEL: 3d-unet-99.9
3d-unet-99:
group: models
base:
- 3d-unet
env:
- CM_MODEL: 3d-unet-99
+ MLC_MODEL: 3d-unet-99
3d-unet:
env:
- CM_TMP_IGNORE_MLPERF_QUERY_COUNT: true
- CM_MLPERF_MODEL_SKIP_BATCHING: true
+ MLC_TMP_IGNORE_MLPERF_QUERY_COUNT: true
+ MLC_MLPERF_MODEL_SKIP_BATCHING: true
deps:
- tags: get,generic-python-lib,_package.nibabel
- tags: get,generic-python-lib,_package.scipy
@@ -1155,19 +1155,19 @@ variations:
base:
- dlrm-v2_
env:
- CM_MODEL: dlrm-v2-99.9
+ MLC_MODEL: dlrm-v2-99.9
dlrm-v2-99:
group: models
base:
- dlrm-v2_
env:
- CM_MODEL: dlrm-v2-99
+ MLC_MODEL: dlrm-v2-99
dlrm-v2_:
env:
- CM_MLPERF_MODEL_SKIP_BATCHING: true
- CM_ML_MODEL_DATASET_TYPE: multihot-criteo
+ MLC_MLPERF_MODEL_SKIP_BATCHING: true
+ MLC_ML_MODEL_DATASET_TYPE: multihot-criteo
dlrm-v2_,pytorch:
deps:
@@ -1202,9 +1202,9 @@ variations:
rnnt:
group: models
env:
- CM_MODEL: rnnt
- CM_MLPERF_MODEL_SKIP_BATCHING: true
- CM_TMP_IGNORE_MLPERF_QUERY_COUNT: true
+ MLC_MODEL: rnnt
+ MLC_MLPERF_MODEL_SKIP_BATCHING: true
+ MLC_TMP_IGNORE_MLPERF_QUERY_COUNT: true
deps:
- tags: get,generic-python-lib,_package.pydantic
version_max: "1.10.9"
@@ -1225,16 +1225,16 @@ variations:
- tags: get,generic-python-lib,_pycocotools
env:
- CM_MODEL: retinanet
- CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: "yes"
- CM_MLPERF_LOADGEN_MAX_BATCHSIZE: "1"
+ MLC_MODEL: retinanet
+ MLC_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: "yes"
+ MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: "1"
resnet50:
group: models
default: true
env:
- CM_MODEL: resnet50
- CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: "yes"
+ MLC_MODEL: resnet50
+ MLC_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: "yes"
deps:
- tags: get,generic-python-lib,_opencv-python
- tags: get,generic-python-lib,_numpy
@@ -1248,14 +1248,14 @@ variations:
version_max: "4.23.4"
version_max_usable: "4.23.4"
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tf
- tflite
rgat:
group: models
env:
- CM_MODEL: rgat
+ MLC_MODEL: rgat
add_deps_recursive:
pytorch:
version_max: "2.4.0"
@@ -1275,38 +1275,38 @@ variations:
- tags: get,generic-python-lib,_package.torch-geometric
update_tags_from_env_with_prefix:
_find_links_url.:
- - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL
+ - MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL
- tags: get,generic-python-lib,_package.torch-scatter
update_tags_from_env_with_prefix:
_find_links_url.:
- - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL
+ - MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL
- tags: get,generic-python-lib,_package.torch-sparse
update_tags_from_env_with_prefix:
_find_links_url.:
- - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL
+ - MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL
- tags: get,generic-python-lib,_package.dgl
update_tags_from_env_with_prefix:
_find_links_url.:
- - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL
+ - MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL
rgat,cuda:
env:
- CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>.html"
- CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL: "https://data.dgl.ai/wheels/torch-<<>>/cu121/repo.html"
+ MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>.html"
+ MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL: "https://data.dgl.ai/wheels/torch-<<>>/cu121/repo.html"
rgat,cpu:
env:
- CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html"
- CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL: "https://data.dgl.ai/wheels/torch-<<>>/repo.html"
+ MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html"
+ MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL: "https://data.dgl.ai/wheels/torch-<<>>/repo.html"
llama3_1-405b:
group: models
env:
- CM_MODEL: llama3_1-405b
+ MLC_MODEL: llama3_1-405b
adr:
pytorch:
version_max: 2.5.1
- CM_MODEL: llama3-402b
+ MLC_MODEL: llama3-402b
deps:
- tags: get,generic-python-lib,_package.torchvision
- tags: get,generic-python-lib,_package.torchaudio
@@ -1316,25 +1316,25 @@ variations:
- tags: get,generic-python-lib,_package.accelerate
- tags: get,generic-python-lib,_package.vllm
env:
- CM_GENERIC_PYTHON_PIP_EXTRA: "--upgrade"
+ MLC_GENERIC_PYTHON_PIP_EXTRA: "--upgrade"
- tags: get,generic-python-lib,_package.pybind11
- tags: get,generic-python-lib,_package.pandas
version_max: 2.2.1
llama3_1-405b,cuda:
env:
- CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>.html"
+ MLC_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>.html"
llama3_1-405b,cpu:
env:
- CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html"
+ MLC_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html"
# Target devices
cpu:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
CUDA_VISIBLE_DEVICES: ""
USE_CUDA: no
USE_GPU: no
@@ -1342,20 +1342,20 @@ variations:
cuda:
group: device
env:
- CM_MLPERF_DEVICE: gpu
+ MLC_MLPERF_DEVICE: gpu
USE_CUDA: yes
USE_GPU: yes
rocm:
group: device
env:
- CM_MLPERF_DEVICE: rocm
+ MLC_MLPERF_DEVICE: rocm
USE_GPU: yes
tpu:
group: device
env:
- CM_MLPERF_DEVICE: tpu
+ MLC_MLPERF_DEVICE: tpu
tpu,tflite:
add_deps_recursive:
@@ -1365,16 +1365,16 @@ variations:
# Loadgen scenarios
offline:
env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
multistream:
env:
- CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+ MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
singlestream:
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
server:
env:
- CM_MLPERF_LOADGEN_SCENARIO: Server
+ MLC_MLPERF_LOADGEN_SCENARIO: Server
# Model precision
fp32:
@@ -1384,8 +1384,8 @@ variations:
ml-model:
tags: _fp32
env:
- CM_MLPERF_QUANTIZATION: off
- CM_MLPERF_MODEL_PRECISION: float32
+ MLC_MLPERF_QUANTIZATION: off
+ MLC_MLPERF_MODEL_PRECISION: float32
# Model precision
float16:
@@ -1394,8 +1394,8 @@ variations:
ml-model-float16:
tags: _fp16
env:
- CM_MLPERF_QUANTIZATION: off
- CM_MLPERF_MODEL_PRECISION: float16
+ MLC_MLPERF_QUANTIZATION: off
+ MLC_MLPERF_MODEL_PRECISION: float16
# Model precision
bfloat16:
@@ -1404,14 +1404,14 @@ variations:
ml-model-float16:
tags: _fp16
env:
- CM_MLPERF_QUANTIZATION: off
- CM_MLPERF_MODEL_PRECISION: bfloat16
+ MLC_MLPERF_QUANTIZATION: off
+ MLC_MLPERF_MODEL_PRECISION: bfloat16
int8:
group: precision
env:
- CM_MLPERF_QUANTIZATION: on
- CM_MLPERF_MODEL_PRECISION: int8
+ MLC_MLPERF_QUANTIZATION: on
+ MLC_MLPERF_MODEL_PRECISION: int8
add_deps_recursive:
ml-model:
tags: _int8
@@ -1422,7 +1422,7 @@ variations:
batch_size.#:
group: batch-size
env:
- CM_MLPERF_LOADGEN_MAX_BATCHSIZE: "#"
+ MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: "#"
add_deps_recursive:
ml-model:
tags: _batch_size.#
@@ -1436,14 +1436,14 @@ variations:
names:
- flask
env:
- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_sut
- CM_NETWORK_LOADGEN: sut
+ MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_sut
+ MLC_NETWORK_LOADGEN: sut
network-lon:
group: network
env:
- CM_NETWORK_LOADGEN: lon
- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_loadgen
+ MLC_NETWORK_LOADGEN: lon
+ MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_loadgen
beam_size.#:
env:
@@ -1459,6 +1459,6 @@ variations:
loadgen:
version: r2.1
env:
- CM_RERUN: "yes"
- CM_SKIP_SYS_UTILS: "yes"
- CM_TEST_QUERY_COUNT: "100"
+ MLC_RERUN: "yes"
+ MLC_SKIP_SYS_UTILS: "yes"
+ MLC_TEST_QUERY_COUNT: "100"
diff --git a/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py b/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py
index 090d1b072..dd2a3e016 100644
--- a/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py
+++ b/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py
@@ -55,14 +55,14 @@
G_OPENIMAGE_CALMAP_PATH = "data_maps/open-images-v6-mlperf/cal_map.txt"
G_OPENIMAGE_VALSET_PATH = os.path.join(
os.environ.get(
- "CM_DATASET_PATH",
+ "MLC_DATASET_PATH",
"build/data/open-images-v6-mlperf"),
"validation",
"data")
G_OPENIMAGE_VALMAP_PATH = "data_maps/open-images-v6-mlperf/val_map.txt"
G_OPENIMAGE_ANNO_PATH = os.path.join(
os.environ.get(
- "CM_DATASET_PATH",
+ "MLC_DATASET_PATH",
"build/data/open-images-v6-mlperf"),
"annotations",
"openimages-mlperf.json")
diff --git a/script/app-mlperf-inference-nvidia/README-about.md b/script/app-mlperf-inference-nvidia/README-about.md
deleted file mode 100644
index b78d64b62..000000000
--- a/script/app-mlperf-inference-nvidia/README-about.md
+++ /dev/null
@@ -1,137 +0,0 @@
-This script is a CM wrapper to the official [Nvidia submission code](https://github.com/mlcommons/inference_results_v3.0/tree/master/closed/NVIDIA) used for MLPerf inference submissions.
-
-
-
-## Download the needed files
-
-* Please ask privately in [this discord channel](https://discord.gg/y7hupJsUNb) if you would like to get access to an Amazon S3 bucket containing all the needed files for easiness. Otherwise, you can download them from the below links.
-
-For x86 machines, please download the latest install tar files from the below sites
-1. [cuDNN](https://developer.nvidia.com/cudnn) (for cuda 11)
-2. [TensorRT](https://developer.nvidia.com/tensorrt)
-3. Imagenet validation set (unfortunately not available via public URL) following the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md)
-
-
-
-
-
-## Using Docker (Recommended on x86 systems)
-
-
-Assuming all the downloaded files are to the user home directory please do the following steps:
-
-1. Download CUDA 11.8
- ```
- wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
- ```
-2. [Install docker](https://docs.docker.com/engine/install/) and [Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
-
-3. Give docker permission to the current user
- ```
- sudo usermod -aG docker $USER
- ```
- Logout and login
- Restart docker if required and confirm that Nvidia container toolkit is working by
- ```
- nvidia-ctk --version
- ```
-4. Check if Nvidia driver is working properly on the host.
- ```
- nvidia-smi
- ```
- If the above command produces any error you'll need to install Nvidia drivers on the host. You can do this via CM if you have sudo access
- ```
- cmr "install cuda prebuilt _driver" --version=11.8.0
- ```
-5. Build the docker container and mount the paths from the host machine.
- ** You may want to change the `scratch_path` location as it can take 100s of GBs.**
- ```bash
- cm docker script --tags=build,nvidia,inference,server \
- --cuda_run_file_path=$HOME/cuda_11.8.0_520.61.05_linux.run \
- --tensorrt_tar_file_path=$HOME/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \
- --cudnn_tar_file_path=$HOME/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \
- --imagenet_path=$HOME/imagenet-2012-val \
- --scratch_path=$HOME/mlperf_scratch \
- --docker_cm_repo=mlcommons@cm4mlops \
- --results_dir=$HOME/results_dir \
- --submission_dir=$HOME/submission_dir \
- --adr.compiler.tags=gcc
- ```
- * Use `--docker_cache=no` to turn off docker caching
- * Use `--docker_run_cmd_prefix="cm pull repo mlcommons@cm4mlops --checkout=dev"` to update the CK repository when docker caching is used
- * Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems).
-
-6. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files
- ### Example output
- ```
- ============================================
- => A system ID is a string containing only letters, numbers, and underscores
- => that is used as the human-readable name of the system. It is also used as
- => the system name when creating the measurements/ and results/ entries.
- => This string should also start with a letter to be a valid Python enum member name.
- => Specify the system ID to use for the current system: phoenix
- => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix
- => This script will generate Benchmark Configuration stubs for the detected system.
- Continue? [y/n]: y
- ```
- Now you'll be inside the CM Nvidia docker container and can run further scripts.
-
-7. Once the build is complete, you can proceed with any further CM scripts like for MLPerf inference. You can also save the container at this stage using [docker commit](https://docs.docker.com/engine/reference/commandline/commit/) so that it can be launched later without having to go through the previous steps.
-
-
-
-
-
-
-
-## Without Docker
-
-
-1. Install CUDA
- If CUDA is not detected, CM should download and install it automatically when you run the workflow.
- ** Nvidia drivers are expected to be installed on the system **
-
-2. Install cuDNN
- ```bash
- cmr "get cudnn" --tar_file=
- ```
-3. Install TensorRT
- ```bash
- cmr "get tensorrt _dev" --tar_file=
- ```
- On non x86 systems like Nvidia Orin, you can do a package manager install and then CM should pick up the installation automatically during the workflow run.
-
-4. Build the Nvidia inference server
- ```
- cmr "build nvidia inference server" \
- --adr.install-cuda-prebuilt.local_run_file_path=/data/cuda_11.8.0_520.61.05_linux.run \
- --adr.tensorrt.tar_file=/data/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \
- --adr.cudnn.tar_file=/data/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \
- --adr.compiler.tags=gcc \
- [--custom_system=no]
- ```
- Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems).
-
-5. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files
-
- ### Example output
- ```
- ============================================
- => A system ID is a string containing only letters, numbers, and underscores
- => that is used as the human-readable name of the system. It is also used as
- => the system name when creating the measurements/ and results/ entries.
- => This string should also start with a letter to be a valid Python enum member name.
- => Specify the system ID to use for the current system: phoenix
- => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix
- => This script will generate Benchmark Configuration stubs for the detected system.
- Continue? [y/n]: y
- ```
-
-
-
-## Acknowledgments
-
-* A common CM interface and automation for MLPerf inference benchmarks was developed by Arjun Suresh and Grigori Fursin
- sponsored by the [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org).
-* Nvidia's MLPerf inference implementation was developed by Zhihan Jiang, Ethan Cheng, Yiheng Zhang and Jinho Suh.
-
diff --git a/script/app-mlperf-inference-nvidia/customize.py b/script/app-mlperf-inference-nvidia/customize.py
index 851f9fa84..36b3a575e 100644
--- a/script/app-mlperf-inference-nvidia/customize.py
+++ b/script/app-mlperf-inference-nvidia/customize.py
@@ -11,40 +11,40 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']
- if str(env.get('CM_RUN_STATE_DOCKER', '')).lower() in ['1', 'true', 'yes']:
+ if str(env.get('MLC_RUN_STATE_DOCKER', '')).lower() in ['1', 'true', 'yes']:
return {'return': 0}
- if env.get('CM_MODEL', '') == '':
+ if env.get('MLC_MODEL', '') == '':
return {
'return': 1, 'error': 'Please select a variation specifying the model to run'}
make_command = env['MLPERF_NVIDIA_RUN_COMMAND']
- if env.get('CM_MLPERF_DEVICE', '') == '':
+ if env.get('MLC_MLPERF_DEVICE', '') == '':
return {
'return': 1, 'error': 'Please select a variation specifying the device to run on'}
- if env.get('CM_MLPERF_SKIP_RUN',
+ if env.get('MLC_MLPERF_SKIP_RUN',
'') == "yes" and make_command == "run_harness":
return {'return': 0}
- env['MLPERF_SCRATCH_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH']
+ env['MLPERF_SCRATCH_PATH'] = env['MLC_NVIDIA_MLPERF_SCRATCH_PATH']
cmds = []
- scenario = env['CM_MLPERF_LOADGEN_SCENARIO']
- mode = env['CM_MLPERF_LOADGEN_MODE']
+ scenario = env['MLC_MLPERF_LOADGEN_SCENARIO']
+ mode = env['MLC_MLPERF_LOADGEN_MODE']
make_command = env['MLPERF_NVIDIA_RUN_COMMAND']
if make_command == "prebuild":
cmds.append(f"""make prebuild NETWORK_NODE=SUT""")
- if env['CM_MODEL'] == "resnet50":
+ if env['MLC_MODEL'] == "resnet50":
target_data_path = os.path.join(
env['MLPERF_SCRATCH_PATH'], 'data', 'imagenet')
if not os.path.exists(target_data_path):
cmds.append(
- f"""ln -sf {env['CM_DATASET_IMAGENET_PATH']} {target_data_path}""")
+ f"""ln -sf {env['MLC_DATASET_IMAGENET_PATH']} {target_data_path}""")
model_path = os.path.join(
env['MLPERF_SCRATCH_PATH'],
@@ -57,10 +57,10 @@ def preprocess(i):
if not os.path.exists(model_path):
cmds.append(
- f"""ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}""")
+ f"""ln -sf {env['MLC_ML_MODEL_FILE_WITH_PATH']} {model_path}""")
model_name = "resnet50"
- elif "bert" in env['CM_MODEL']:
+ elif "bert" in env['MLC_MODEL']:
target_data_path = os.path.join(
env['MLPERF_SCRATCH_PATH'], 'data', 'squad')
if not os.path.exists(target_data_path):
@@ -87,34 +87,34 @@ def preprocess(i):
if not os.path.exists(fp32_model_path):
cmds.append(
- f"""cp -r --remove-destination {env['CM_ML_MODEL_BERT_LARGE_FP32_PATH']} {fp32_model_path}""")
+ f"""cp -r --remove-destination {env['MLC_ML_MODEL_BERT_LARGE_FP32_PATH']} {fp32_model_path}""")
if not os.path.exists(int8_model_path):
cmds.append(
- f"""cp -r --remove-destination {env['CM_ML_MODEL_BERT_LARGE_INT8_PATH']} {int8_model_path}""")
+ f"""cp -r --remove-destination {env['MLC_ML_MODEL_BERT_LARGE_INT8_PATH']} {int8_model_path}""")
if not os.path.exists(vocab_path):
cmds.append(
- f"""cp -r --remove-destination {env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH']} {vocab_path}""")
+ f"""cp -r --remove-destination {env['MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH']} {vocab_path}""")
model_name = "bert"
model_path = fp32_model_path
- elif "stable-diffusion" in env["CM_MODEL"]:
+ elif "stable-diffusion" in env["MLC_MODEL"]:
target_data_path = os.path.join(
env['MLPERF_SCRATCH_PATH'], 'data', 'coco', 'SDXL')
tsv_file = os.path.join(target_data_path, "captions_5k_final.tsv")
if os.path.exists(tsv_file):
with open(tsv_file, "r") as file:
line_count = sum(1 for line in file)
- if env.get('CM_MLPERF_SUBMISSION_GENERATION_STYLE', '') == 'full':
+ if env.get('MLC_MLPERF_SUBMISSION_GENERATION_STYLE', '') == 'full':
if line_count < 5000:
shutil.rmtree(target_data_path)
if not os.path.exists(tsv_file):
os.makedirs(target_data_path, exist_ok=True)
# cmds.append("make download_data BENCHMARKS='stable-diffusion-xl'")
- env['CM_REQUIRE_COCO2014_DOWNLOAD'] = 'yes'
+ env['MLC_REQUIRE_COCO2014_DOWNLOAD'] = 'yes'
cmds.append(
- f"""cp -r \\$CM_DATASET_PATH_ROOT/captions/captions.tsv {target_data_path}/captions_5k_final.tsv""")
+ f"""cp -r \\$MLC_DATASET_PATH_ROOT/captions/captions.tsv {target_data_path}/captions_5k_final.tsv""")
cmds.append(
- f"""cp -r \\$CM_DATASET_PATH_ROOT/latents/latents.pt {target_data_path}/latents.pt""")
+ f"""cp -r \\$MLC_DATASET_PATH_ROOT/latents/latents.pt {target_data_path}/latents.pt""")
fp16_model_path = os.path.join(
env['MLPERF_SCRATCH_PATH'],
'models',
@@ -129,13 +129,13 @@ def preprocess(i):
if not os.path.exists(fp16_model_path):
if os.path.islink(fp16_model_path):
cmds.append(f"rm -f {fp16_model_path}")
- env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes'
+ env['MLC_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes'
cmds.append(f"cp -r \\$SDXL_CHECKPOINT_PATH {fp16_model_path}")
model_name = "stable-diffusion-xl"
model_path = fp16_model_path
- elif "3d-unet" in env['CM_MODEL']:
+ elif "3d-unet" in env['MLC_MODEL']:
target_data_path = os.path.join(
env['MLPERF_SCRATCH_PATH'],
'data',
@@ -153,7 +153,7 @@ def preprocess(i):
if not os.path.exists(target_data_path) or not os.path.exists(
inference_cases_json_path) or not os.path.exists(calibration_cases_json_path):
- # cmds.append(f"ln -sf {env['CM_DATASET_PATH']} {target_data_path}")
+ # cmds.append(f"ln -sf {env['MLC_DATASET_PATH']} {target_data_path}")
cmds.append("make download_data BENCHMARKS='3d-unet'")
model_path = os.path.join(
@@ -163,7 +163,7 @@ def preprocess(i):
'3dUNetKiTS19.onnx')
model_name = "3d-unet"
- elif "rnnt" in env['CM_MODEL']:
+ elif "rnnt" in env['MLC_MODEL']:
target_data_path = os.path.join(
env['MLPERF_SCRATCH_PATH'],
'data',
@@ -173,7 +173,7 @@ def preprocess(i):
if not os.path.exists(target_data_path_base_dir):
cmds.append(f"mkdir -p {target_data_path_base_dir}")
if not os.path.exists(target_data_path):
- # cmds.append(f"ln -sf {env['CM_DATASET_LIBRISPEECH_PATH']} {target_data_path}")
+ # cmds.append(f"ln -sf {env['MLC_DATASET_LIBRISPEECH_PATH']} {target_data_path}")
cmds.append("make download_data BENCHMARKS='rnnt'")
model_path = os.path.join(
@@ -183,12 +183,12 @@ def preprocess(i):
'DistributedDataParallel_1576581068.9962234-epoch-100.pt')
model_name = "rnnt"
- elif "pdlrm" in env['CM_MODEL']:
+ elif "pdlrm" in env['MLC_MODEL']:
target_data_path = os.path.join(
env['MLPERF_SCRATCH_PATH'], 'data', 'criteo')
if not os.path.exists(target_data_path):
cmds.append(
- f"ln -sf {env['CM_DATASET_PREPROCESSED_PATH']} {target_data_path}")
+ f"ln -sf {env['MLC_DATASET_PREPROCESSED_PATH']} {target_data_path}")
model_path = os.path.join(
env['MLPERF_SCRATCH_PATH'],
@@ -200,18 +200,18 @@ def preprocess(i):
if not os.path.exists(model_path):
cmds.append(
- f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}")
+ f"ln -sf {env['MLC_ML_MODEL_FILE_WITH_PATH']} {model_path}")
model_name = "dlrm"
- elif "dlrm-v2" in env['CM_MODEL']:
+ elif "dlrm-v2" in env['MLC_MODEL']:
model_name = "dlrm-v2"
- elif env['CM_MODEL'] == "retinanet":
+ elif env['MLC_MODEL'] == "retinanet":
# print(env)
- dataset_path = env['CM_DATASET_OPENIMAGES_PATH']
+ dataset_path = env['MLC_DATASET_OPENIMAGES_PATH']
# return {'return': 1, 'error': 'error'}
- annotations_path = env['CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH']
+ annotations_path = env['MLC_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH']
target_data_path_dir = os.path.join(
env['MLPERF_SCRATCH_PATH'], 'data', 'open-images-v6-mlperf')
if not os.path.exists(target_data_path_dir):
@@ -231,7 +231,7 @@ def preprocess(i):
if not os.path.exists(target_data_path):
cmds.append(f"ln -sf {dataset_path} {target_data_path}")
- calibration_dataset_path = env['CM_OPENIMAGES_CALIBRATION_DATASET_PATH']
+ calibration_dataset_path = env['MLC_OPENIMAGES_CALIBRATION_DATASET_PATH']
target_data_path_dir = os.path.join(
env['MLPERF_SCRATCH_PATH'],
'data',
@@ -264,7 +264,7 @@ def preprocess(i):
model_name = "retinanet"
- elif "gptj" in env['CM_MODEL']:
+ elif "gptj" in env['MLC_MODEL']:
target_data_path = os.path.join(
env['MLPERF_SCRATCH_PATH'],
'data',
@@ -283,7 +283,7 @@ def preprocess(i):
'models',
'GPTJ-6B',
'fp8-quantized-ammo',
- env['CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX'])
+ env['MLC_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX'])
vocab_path = os.path.join(
env['MLPERF_SCRATCH_PATH'],
'models',
@@ -297,15 +297,15 @@ def preprocess(i):
if not os.path.exists(fp32_model_path):
# download via prehook_deps
- env['CM_REQUIRE_GPTJ_MODEL_DOWNLOAD'] = 'yes'
+ env['MLC_REQUIRE_GPTJ_MODEL_DOWNLOAD'] = 'yes'
if make_command == "build_engine":
cmds.append(
- f"cp -r $CM_ML_MODEL_FILE_WITH_PATH {fp32_model_path}")
+ f"cp -r $MLC_ML_MODEL_FILE_WITH_PATH {fp32_model_path}")
model_name = "gptj"
model_path = fp8_model_path
- elif "llama2" in env["CM_MODEL"]:
+ elif "llama2" in env["MLC_MODEL"]:
# path to which the data file is present
target_data_path = os.path.join(
env['MLPERF_SCRATCH_PATH'],
@@ -317,7 +317,7 @@ def preprocess(i):
'preprocessed_data',
'open_orca',
'open_orca_gpt4_tokenized_llama.sampled_24576.pkl')
- tmp_tp_size = env['CM_NVIDIA_TP_SIZE']
+ tmp_tp_size = env['MLC_NVIDIA_TP_SIZE']
if tmp_tp_size == "1":
fp8_model_path = os.path.join(
env['MLPERF_SCRATCH_PATH'],
@@ -333,13 +333,13 @@ def preprocess(i):
'fp8-quantized-ammo',
f'llama2-70b-chat-hf-tp{tmp_tp_size}pp1-fp8')
if not os.path.exists(target_data_file_path):
- if env.get('CM_NVIDIA_LLAMA_DATASET_FILE_PATH', '') == '':
+ if env.get('MLC_NVIDIA_LLAMA_DATASET_FILE_PATH', '') == '':
return {
'return': 1, 'error': 'Please specify the path to LLAMA2 dataset (pickle file)'}
if not os.path.exists(target_data_path):
cmds.append(f"mkdir {target_data_path}")
cmds.append(
- f"ln -sf {env['CM_NVIDIA_LLAMA_DATASET_FILE_PATH']} {target_data_file_path}")
+ f"ln -sf {env['MLC_NVIDIA_LLAMA_DATASET_FILE_PATH']} {target_data_file_path}")
model_name = "llama2-70b"
model_path = fp8_model_path
@@ -347,13 +347,13 @@ def preprocess(i):
# cmds.append(f"make prebuild")
if make_command == "download_model":
if not os.path.exists(model_path):
- if "llama2" in env['CM_MODEL']:
+ if "llama2" in env['MLC_MODEL']:
if not os.path.exists(os.path.join(model_path, 'config.json')):
return {
'return': 1, 'error': f'Quantised model absent - did not detect config.json in path {model_path}'}
else:
cmds.append(f"make download_model BENCHMARKS='{model_name}'")
- elif "stable-diffusion" in env['CM_MODEL']:
+ elif "stable-diffusion" in env['MLC_MODEL']:
folders = ["clip1", "clip2", "unetxl", "vae"]
for folder in folders:
onnx_model_path = os.path.join(
@@ -364,7 +364,7 @@ def preprocess(i):
folder,
'model.onnx')
if not os.path.exists(onnx_model_path):
- env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes'
+ env['MLC_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes'
cmds.append(
f"make download_model BENCHMARKS='{model_name}'")
break
@@ -377,19 +377,19 @@ def preprocess(i):
'unetxl.int8',
'unet.onnx')
if not os.path.exists(ammo_model_path):
- env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes'
+ env['MLC_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes'
cmds.append(
f"make download_model BENCHMARKS='{model_name}'")
else:
return {'return': 0}
elif make_command == "preprocess_data":
- if env['CM_MODEL'] == "rnnt":
+ if env['MLC_MODEL'] == "rnnt":
cmds.append(
f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_dev_clean_500_raw')}")
cmds.append(
f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_train_clean_512_wav')}")
- if "llama2" in env["CM_MODEL"]:
+ if "llama2" in env["MLC_MODEL"]:
# Preprocessing script in the inference results repo is not checking whether the preprocessed
# file is already there, so we are handling it here.
target_preprocessed_data_path = os.path.join(
@@ -405,27 +405,27 @@ def preprocess(i):
else:
scenario = scenario.lower()
- if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy":
+ if env['MLC_MLPERF_LOADGEN_MODE'] == "accuracy":
test_mode = "AccuracyOnly"
- elif env['CM_MLPERF_LOADGEN_MODE'] == "performance":
+ elif env['MLC_MLPERF_LOADGEN_MODE'] == "performance":
test_mode = "PerformanceOnly"
- elif env['CM_MLPERF_LOADGEN_MODE'] == "compliance":
+ elif env['MLC_MLPERF_LOADGEN_MODE'] == "compliance":
test_mode = ""
test_name = env.get(
- 'CM_MLPERF_LOADGEN_COMPLIANCE_TEST',
+ 'MLC_MLPERF_LOADGEN_COMPLIANCE_TEST',
'test01').lower()
- env['CM_MLPERF_NVIDIA_RUN_COMMAND'] = "run_audit_{}_once".format(
+ env['MLC_MLPERF_NVIDIA_RUN_COMMAND'] = "run_audit_{}_once".format(
test_name)
make_command = "run_audit_{}_once".format(test_name)
else:
return {'return': 1, 'error': 'Unsupported mode: {}'.format(
- env['CM_MLPERF_LOADGEN_MODE'])}
+ env['MLC_MLPERF_LOADGEN_MODE'])}
run_config = ''
- target_qps = env.get('CM_MLPERF_LOADGEN_TARGET_QPS')
- offline_target_qps = env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS')
- server_target_qps = env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS')
+ target_qps = env.get('MLC_MLPERF_LOADGEN_TARGET_QPS')
+ offline_target_qps = env.get('MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS')
+ server_target_qps = env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS')
if target_qps:
target_qps = int(float(target_qps))
if scenario == "offline" and not offline_target_qps:
@@ -440,11 +440,11 @@ def preprocess(i):
server_target_qps = int(float(server_target_qps))
run_config += f" --server_target_qps={server_target_qps}"
- target_latency = env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY')
+ target_latency = env.get('MLC_MLPERF_LOADGEN_TARGET_LATENCY')
singlestream_target_latency = env.get(
- 'CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY')
+ 'MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY')
multistream_target_latency = env.get(
- 'CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY')
+ 'MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY')
if target_latency:
target_latency_ns = int(float(target_latency) * 1000000)
if scenario == "singlestream" and not singlestream_target_latency:
@@ -461,23 +461,23 @@ def preprocess(i):
float(multistream_target_latency) * 1000000)
run_config += f" --multi_stream_expected_latency_ns={multistream_target_latency_ns}"
- high_accuracy = "99.9" in env['CM_MODEL']
+ high_accuracy = "99.9" in env['MLC_MODEL']
config_ver_list = []
- use_lon = env.get('CM_MLPERF_NVIDIA_HARNESS_LON')
+ use_lon = env.get('MLC_MLPERF_NVIDIA_HARNESS_LON')
if use_lon:
config_ver_list.append("lon_node")
# run_config += " --lon_node"
- maxq = env.get('CM_MLPERF_NVIDIA_HARNESS_MAXQ')
+ maxq = env.get('MLC_MLPERF_NVIDIA_HARNESS_MAXQ')
if maxq:
config_ver_list.append("maxq")
if high_accuracy:
config_ver_list.append("high_accuracy")
- use_triton = env.get('CM_MLPERF_NVIDIA_HARNESS_USE_TRITON')
+ use_triton = env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_TRITON')
if use_triton:
run_config += " --use_triton "
config_ver_list.append("triton")
@@ -485,114 +485,114 @@ def preprocess(i):
if config_ver_list:
run_config += f" --config_ver={'_'.join(config_ver_list)}"
- user_conf_path = env.get('CM_MLPERF_USER_CONF')
- if user_conf_path and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness":
+ user_conf_path = env.get('MLC_MLPERF_USER_CONF')
+ if user_conf_path and env['MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness":
run_config += f" --user_conf_path={user_conf_path}"
- mlperf_conf_path = env.get('CM_MLPERF_INFERENCE_CONF_PATH')
- if mlperf_conf_path and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness":
+ mlperf_conf_path = env.get('MLC_MLPERF_INFERENCE_CONF_PATH')
+ if mlperf_conf_path and env['MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness":
run_config += f" --mlperf_conf_path={mlperf_conf_path}"
- power_setting = env.get('CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING')
- if power_setting and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness":
+ power_setting = env.get('MLC_MLPERF_NVIDIA_HARNESS_POWER_SETTING')
+ if power_setting and env['MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness":
run_config += f" --power_setting={power_setting}"
- gpu_copy_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS')
+ gpu_copy_streams = env.get('MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS')
if gpu_copy_streams:
run_config += f" --gpu_copy_streams={gpu_copy_streams}"
gpu_inference_streams = env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS')
+ 'MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS')
if gpu_inference_streams:
run_config += f" --gpu_inference_streams={gpu_inference_streams}"
- dla_copy_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS')
+ dla_copy_streams = env.get('MLC_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS')
if dla_copy_streams:
run_config += f" --dla_copy_streams={dla_copy_streams}"
dla_inference_streams = env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS')
+ 'MLC_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS')
if dla_inference_streams:
run_config += f" --dla_inference_streams={dla_inference_streams}"
- gpu_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE')
+ gpu_batch_size = env.get('MLC_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE')
if gpu_batch_size:
run_config += f" --gpu_batch_size={gpu_batch_size}"
- dla_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE')
+ dla_batch_size = env.get('MLC_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE')
if dla_batch_size:
run_config += f" --dla_batch_size={dla_batch_size}"
- input_format = env.get('CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT')
+ input_format = env.get('MLC_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT')
if input_format:
run_config += f" --input_format={input_format}"
performance_sample_count = env.get(
- 'CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT')
+ 'MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT')
if performance_sample_count:
run_config += f" --performance_sample_count={performance_sample_count}"
- devices = env.get('CM_MLPERF_NVIDIA_HARNESS_DEVICES')
+ devices = env.get('MLC_MLPERF_NVIDIA_HARNESS_DEVICES')
if devices:
run_config += f" --devices={devices}"
- audio_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE')
+ audio_batch_size = env.get('MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE')
if audio_batch_size:
run_config += f" --audio_batch_size={audio_batch_size}"
disable_encoder_plugin = str(
- env.get('CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN', ''))
+ env.get('MLC_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN', ''))
if disable_encoder_plugin and disable_encoder_plugin.lower() not in [
"no", "false", "0", ""]:
run_config += " --disable_encoder_plugin"
disable_beta1_smallk = str(
- env.get('CM_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK', ''))
+ env.get('MLC_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK', ''))
if disable_beta1_smallk and disable_beta1_smallk.lower() in [
"yes", "true", "1"]:
run_config += " --disable_beta1_smallk"
- workspace_size = env.get('CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE')
+ workspace_size = env.get('MLC_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE')
if workspace_size:
run_config += f" --workspace_size={workspace_size}"
- if env.get('CM_MLPERF_LOADGEN_LOGS_DIR'):
- env['MLPERF_LOADGEN_LOGS_DIR'] = env['CM_MLPERF_LOADGEN_LOGS_DIR']
+ if env.get('MLC_MLPERF_LOADGEN_LOGS_DIR'):
+ env['MLPERF_LOADGEN_LOGS_DIR'] = env['MLC_MLPERF_LOADGEN_LOGS_DIR']
- log_dir = env.get('CM_MLPERF_NVIDIA_HARNESS_LOG_DIR')
+ log_dir = env.get('MLC_MLPERF_NVIDIA_HARNESS_LOG_DIR')
if log_dir:
run_config += f" --log_dir={log_dir}"
- use_graphs = str(env.get('CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS', ''))
+ use_graphs = str(env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS', ''))
if use_graphs and use_graphs.lower() not in ["no", "false", "0", ""]:
run_config += " --use_graphs"
use_deque_limit = str(
- env.get('CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT'))
+ env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT'))
if use_deque_limit and use_deque_limit.lower() not in [
"no", "false", "0"]:
run_config += " --use_deque_limit"
deque_timeout_usec = env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC')
+ 'MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC')
if deque_timeout_usec:
run_config += f" --deque_timeout_usec={deque_timeout_usec}"
use_cuda_thread_per_device = str(
- env.get('CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE', ''))
+ env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE', ''))
if use_cuda_thread_per_device and use_cuda_thread_per_device.lower() not in [
"no", "false", "0", ""]:
run_config += " --use_cuda_thread_per_device"
run_infer_on_copy_streams = str(
- env.get('CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS', ''))
+ env.get('MLC_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS', ''))
if run_infer_on_copy_streams and run_infer_on_copy_streams.lower() not in [
"no", "false", "0", ""]:
run_config += " --run_infer_on_copy_streams"
start_from_device = str(
env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE',
+ 'MLC_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE',
''))
if start_from_device and start_from_device.lower() not in [
"no", "false", "0", ""]:
@@ -600,75 +600,75 @@ def preprocess(i):
end_on_device = str(
env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE',
+ 'MLC_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE',
''))
if end_on_device and end_on_device.lower() not in [
"no", "false", "0", ""]:
run_config += " --end_on_device"
- max_dlas = env.get('CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS')
+ max_dlas = env.get('MLC_MLPERF_NVIDIA_HARNESS_MAX_DLAS')
if max_dlas:
run_config += f" --max_dlas={max_dlas}"
graphs_max_seqlen = env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN')
+ 'MLC_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN')
if graphs_max_seqlen:
run_config += f" --graphs_max_seqlen={graphs_max_seqlen}"
num_issue_query_threads = env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS')
+ 'MLC_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS')
if num_issue_query_threads:
run_config += f" --num_issue_query_threads={num_issue_query_threads}"
- soft_drop = env.get('CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP')
+ soft_drop = env.get('MLC_MLPERF_NVIDIA_HARNESS_SOFT_DROP')
if soft_drop:
run_config += f" --soft_drop={soft_drop}"
use_small_tile_gemm_plugin = str(
- env.get('CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN', ''))
+ env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN', ''))
if use_small_tile_gemm_plugin and use_small_tile_gemm_plugin.lower() not in [
"no", "false", "0", ""]:
run_config += f" --use_small_tile_gemm_plugin"
audio_buffer_num_lines = env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES')
+ 'MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES')
if audio_buffer_num_lines:
run_config += f" --audio_buffer_num_lines={audio_buffer_num_lines}"
- use_fp8 = str(env.get('CM_MLPERF_NVIDIA_HARNESS_USE_FP8', ''))
+ use_fp8 = str(env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_FP8', ''))
if use_fp8 and use_fp8.lower() not in ["no", "false", "0", ""]:
run_config += f" --use_fp8"
- if "llama2" in env["CM_MODEL"]:
+ if "llama2" in env["MLC_MODEL"]:
run_config += f" --fp8_quant_model_path={fp8_model_path}"
run_config += f" --tensor_parallelism={tmp_tp_size}"
- enable_sort = env.get('CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT')
+ enable_sort = env.get('MLC_MLPERF_NVIDIA_HARNESS_ENABLE_SORT')
if enable_sort and enable_sort.lower() not in ["no", "false", "0"]:
run_config += f" --enable_sort"
sdxl_server_batcher_time_limit = env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT')
+ 'MLC_MLPERF_NVIDIA_HARNESS_ENABLE_SORT')
if sdxl_server_batcher_time_limit:
run_config += f" --sdxl_batcher_time_limit {sdxl_server_batcher_time_limit}"
num_sort_segments = env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS')
+ 'MLC_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS')
if num_sort_segments:
run_config += f" --num_sort_segments={num_sort_segments}"
embedding_weights_on_gpu_part = env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART', '')
+ 'MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART', '')
if embedding_weights_on_gpu_part != '':
run_config += f" --embedding_weights_on_gpu_part={embedding_weights_on_gpu_part}"
- num_warmups = env.get('CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS', '')
+ num_warmups = env.get('MLC_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS', '')
if num_warmups != '':
run_config += f" --num_warmups={num_warmups}"
skip_postprocess = str(
env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS',
+ 'MLC_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS',
''))
if skip_postprocess and skip_postprocess.lower() not in [
"no", "false", "0", ""]:
@@ -680,13 +680,13 @@ def preprocess(i):
test_mode_string = ""
extra_build_engine_options_string = env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS', '')
+ 'MLC_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS', '')
extra_run_options_string = env.get(
- 'CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS',
+ 'MLC_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS',
'') # will be ignored during build engine
- if "stable-diffusion" in env["CM_MODEL"]:
+ if "stable-diffusion" in env["MLC_MODEL"]:
extra_build_engine_options_string += f""" --model_path {
os.path.join(
env['MLPERF_SCRATCH_PATH'],
@@ -698,9 +698,9 @@ def preprocess(i):
cmds.append(f"""make {make_command} RUN_ARGS=' --benchmarks={model_name} --scenarios={scenario} {test_mode_string} {run_config} {extra_build_engine_options_string} {extra_run_options_string}'""")
run_cmd = " && ".join(cmds)
- env['CM_MLPERF_RUN_CMD'] = run_cmd
- env['CM_RUN_CMD'] = run_cmd
- env['CM_RUN_DIR'] = env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH']
+ env['MLC_MLPERF_RUN_CMD'] = run_cmd
+ env['MLC_RUN_CMD'] = run_cmd
+ env['MLC_RUN_DIR'] = env['MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH']
# print(env)
diff --git a/script/app-mlperf-inference-nvidia/meta.yaml b/script/app-mlperf-inference-nvidia/meta.yaml
index 5b96c7f65..473c336c6 100644
--- a/script/app-mlperf-inference-nvidia/meta.yaml
+++ b/script/app-mlperf-inference-nvidia/meta.yaml
@@ -22,72 +22,72 @@ tags:
# Default environment
default_env:
- CM_BATCH_COUNT: '1'
- CM_BATCH_SIZE: '1'
- CM_FAST_COMPILATION: 'yes'
- CM_MLPERF_LOADGEN_SCENARIO: Offline
- CM_MLPERF_LOADGEN_MODE: performance
+ MLC_BATCH_COUNT: '1'
+ MLC_BATCH_SIZE: '1'
+ MLC_FAST_COMPILATION: 'yes'
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_MODE: performance
# SKIP_POLICIES: '1'
- CM_SKIP_PREPROCESS_DATASET: 'no'
- CM_SKIP_MODEL_DOWNLOAD: 'no'
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia_original
- CM_MLPERF_SKIP_RUN: 'no'
+ MLC_SKIP_PREPROCESS_DATASET: 'no'
+ MLC_SKIP_MODEL_DOWNLOAD: 'no'
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia_original
+ MLC_MLPERF_SKIP_RUN: 'no'
env:
- CM_CALL_MLPERF_RUNNER: 'no'
+ MLC_CALL_MLPERF_RUNNER: 'no'
# Map script inputs to environment variables
input_mapping:
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
- mlperf_conf: CM_MLPERF_CONF
- mode: CM_MLPERF_LOADGEN_MODE
- output_dir: CM_MLPERF_OUTPUT_DIR
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- user_conf: CM_MLPERF_USER_CONF
- devices: CM_MLPERF_NVIDIA_HARNESS_DEVICES
- skip_preprocess: CM_SKIP_PREPROCESS_DATASET
- skip_preprocessing: CM_SKIP_PREPROCESS_DATASET
- target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
- offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
- server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
- target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
- singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
- multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
- use_triton: CM_MLPERF_NVIDIA_HARNESS_USE_TRITON
- gpu_copy_streams: CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS
- gpu_inference_streams: CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS
- gpu_batch_size: CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE
- dla_copy_streams: CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS
- dla_inference_streams: CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS
- dla_batch_size: CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE
- input_format: CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- workspace_size: CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE
- log_dir: CM_MLPERF_NVIDIA_HARNESS_LOG_DIR
- use_graphs: CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS
- run_infer_on_copy_streams: CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS
- start_from_device: CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE
- end_on_device: CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE
- max_dlas: CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS
- power_setting: CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mlperf_conf: MLC_MLPERF_CONF
+ mode: MLC_MLPERF_LOADGEN_MODE
+ output_dir: MLC_MLPERF_OUTPUT_DIR
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ user_conf: MLC_MLPERF_USER_CONF
+ devices: MLC_MLPERF_NVIDIA_HARNESS_DEVICES
+ skip_preprocess: MLC_SKIP_PREPROCESS_DATASET
+ skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET
+ target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS
+ offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
+ singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ use_triton: MLC_MLPERF_NVIDIA_HARNESS_USE_TRITON
+ gpu_copy_streams: MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS
+ gpu_inference_streams: MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS
+ gpu_batch_size: MLC_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE
+ dla_copy_streams: MLC_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS
+ dla_inference_streams: MLC_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS
+ dla_batch_size: MLC_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE
+ input_format: MLC_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ workspace_size: MLC_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE
+ log_dir: MLC_MLPERF_NVIDIA_HARNESS_LOG_DIR
+ use_graphs: MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS
+ run_infer_on_copy_streams: MLC_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS
+ start_from_device: MLC_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE
+ end_on_device: MLC_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE
+ max_dlas: MLC_MLPERF_NVIDIA_HARNESS_MAX_DLAS
+ power_setting: MLC_MLPERF_NVIDIA_HARNESS_POWER_SETTING
make_cmd: MLPERF_NVIDIA_RUN_COMMAND
- rerun: CM_RERUN
- extra_run_options: CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS
- use_deque_limit: CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT
- deque_timeout_usec: CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC
- use_cuda_thread_per_device: CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE
- num_warmups: CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS
- graphs_max_seqlen: CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN
- num_issue_query_threads: CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS
- soft_drop: CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP
- use_small_tile_gemm_plugin: CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN
- audio_buffer_num_lines: CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES
- use_fp8: CM_MLPERF_NVIDIA_HARNESS_USE_FP8
- enable_sort: CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT
- num_sort_segments: CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS
- skip_postprocess: CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS
- embedding_weights_on_gpu_part: CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART
- sdxl_batcher_time_limit: CM_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT
+ rerun: MLC_RERUN
+ extra_run_options: MLC_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS
+ use_deque_limit: MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT
+ deque_timeout_usec: MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC
+ use_cuda_thread_per_device: MLC_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE
+ num_warmups: MLC_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS
+ graphs_max_seqlen: MLC_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN
+ num_issue_query_threads: MLC_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS
+ soft_drop: MLC_MLPERF_NVIDIA_HARNESS_SOFT_DROP
+ use_small_tile_gemm_plugin: MLC_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN
+ audio_buffer_num_lines: MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES
+ use_fp8: MLC_MLPERF_NVIDIA_HARNESS_USE_FP8
+ enable_sort: MLC_MLPERF_NVIDIA_HARNESS_ENABLE_SORT
+ num_sort_segments: MLC_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS
+ skip_postprocess: MLC_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS
+ embedding_weights_on_gpu_part: MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART
+ sdxl_batcher_time_limit: MLC_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT
# Dependencies on other CM scripts
@@ -116,19 +116,19 @@ deps:
# Install ResNet50 model (ONNX) and ImageNet
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
skip_if_env:
- CM_USE_DATASET_FROM_HOST:
+ MLC_USE_DATASET_FROM_HOST:
- 'yes'
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- 'yes'
names:
- imagenet-original
tags: get,dataset,original,imagenet,_full
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
names:
- resnet50-model
@@ -139,7 +139,7 @@ deps:
# Install kits19 dataset
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- 3d-unet-99-disabled
- 3d-unet-99.9-disabled
names:
@@ -151,7 +151,7 @@ deps:
# Install librispeech dataset
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- rnnt
names:
- librispeech-original
@@ -161,13 +161,13 @@ deps:
# Install criteo dataset
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- dlrm-v2-99
- dlrm-v2-99.9
skip_if_any_env:
DLRM_DATA_PATH:
- 'on'
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- 'yes'
names:
- criteo-preprocessed
@@ -176,13 +176,13 @@ deps:
########################################################################
# Install dlrm model
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- dlrm-v2-99
- dlrm-v2-99.9
skip_if_any_env:
DLRM_DATA_PATH:
- on
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- 'yes'
names:
- dlrm-model
@@ -191,7 +191,7 @@ deps:
########################################################################
# Install bert models
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- bert-99
- bert-99.9
names:
@@ -200,7 +200,7 @@ deps:
tags: get,ml-model,bert,_onnx,_fp32
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- bert-99
- bert-99.9
names:
@@ -209,7 +209,7 @@ deps:
tags: get,ml-model,bert,_onnx,_int8
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- bert-99
- bert-99.9
names:
@@ -220,24 +220,24 @@ deps:
# Install OpenImages
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
skip_if_env:
- CM_USE_DATASET_FROM_HOST:
+ MLC_USE_DATASET_FROM_HOST:
- 'yes'
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- 'yes'
names:
- openimages-original
tags: get,dataset,original,openimages,_validation,_full,_custom-annotations
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
skip_if_env:
- CM_USE_DATASET_FROM_HOST:
+ MLC_USE_DATASET_FROM_HOST:
- 'yes'
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- 'yes'
names:
- openimages-calibration
@@ -258,9 +258,9 @@ deps:
- tags: pull,git,repo
env:
- CM_GIT_CHECKOUT_PATH: '<<>>'
+ MLC_GIT_CHECKOUT_PATH: '<<>>'
enable_if_env:
- CM_MLPERF_INFERENCE_PULL_CODE_CHANGES:
+ MLC_MLPERF_INFERENCE_PULL_CODE_CHANGES:
- 'yes'
# Creates user conf for given SUT
@@ -268,7 +268,7 @@ deps:
names:
- user-conf-generator
enable_if_env:
- CM_MLPERF_NVIDIA_HARNESS_RUN_MODE:
+ MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE:
- run_harness
- tags: get,generic-python-lib,_package.pycuda
@@ -277,16 +277,16 @@ deps:
- tags: get,generic-python-lib,_package.nvmitten
update_tags_from_env_with_prefix:
_path.:
- - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH
+ - MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH
enable_if_env:
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- 'yes'
- True
- 'True'
- tags: get,nvidia,mitten
skip_if_env:
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- 'yes'
- True
- 'True'
@@ -295,9 +295,9 @@ prehook_deps:
########################################################################
# Install GPTJ-6B model
- enable_if_env:
- CM_REQUIRE_GPTJ_MODEL_DOWNLOAD:
+ MLC_REQUIRE_GPTJ_MODEL_DOWNLOAD:
- 'yes'
- CM_MLPERF_NVIDIA_HARNESS_RUN_MODE:
+ MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE:
- download_model
- preprocess_data
names:
@@ -306,9 +306,9 @@ prehook_deps:
# Download model for sdxl
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- stable-diffusion-xl
- CM_REQUIRE_SDXL_MODEL_DOWNLOAD:
+ MLC_REQUIRE_SDXL_MODEL_DOWNLOAD:
- 'yes'
names:
- stable-diffusion-xl
@@ -316,16 +316,16 @@ prehook_deps:
- ml-model
tags: get,ml-model,sdxl,_fp16,_rclone
skip_if_env:
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- 'yes'
- CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
- 'yes'
# Install coco2014 dataset
- enable_if_env:
- CM_REQUIRE_COCO2014_DOWNLOAD:
+ MLC_REQUIRE_COCO2014_DOWNLOAD:
- 'yes'
- CM_MLPERF_NVIDIA_HARNESS_RUN_MODE:
+ MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE:
- preprocess_data
names:
- coco2014-dataset
@@ -338,12 +338,12 @@ post_deps:
- runner
- mlperf-runner
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'yes'
- yes
tags: benchmark-mlperf
enable_if_env:
- CM_CALL_MLPERF_RUNNER:
+ MLC_CALL_MLPERF_RUNNER:
- yes
- tags: save,mlperf,inference,state
names:
@@ -355,8 +355,8 @@ variations:
v4.1:
group: version
env:
- CM_MLPERF_INFERENCE_CODE_VERSION: "v4.1"
- CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized
+ MLC_MLPERF_INFERENCE_CODE_VERSION: "v4.1"
+ MLC_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized
adr:
pytorch:
tags: _for-nvidia-mlperf-inference-v4.1
@@ -365,8 +365,8 @@ variations:
group: version
default: true
env:
- CM_MLPERF_INFERENCE_CODE_VERSION: "v4.0"
- CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized
+ MLC_MLPERF_INFERENCE_CODE_VERSION: "v4.0"
+ MLC_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized
adr:
pytorch:
tags: _for-nvidia-mlperf-inference-v4.0
@@ -374,15 +374,15 @@ variations:
v4.0:
group: version
env:
- CM_MLPERF_INFERENCE_CODE_VERSION: "v4.0"
- CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized
+ MLC_MLPERF_INFERENCE_CODE_VERSION: "v4.0"
+ MLC_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized
adr:
pytorch:
tags: _for-nvidia-mlperf-inference-v4.0
v3.1:
env:
- CM_MLPERF_INFERENCE_CODE_VERSION: "v3.1"
- CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-07142023.pth
+ MLC_MLPERF_INFERENCE_CODE_VERSION: "v3.1"
+ MLC_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-07142023.pth
adr:
pytorch:
tags: _for-nvidia-mlperf-inference-v3.1
@@ -391,31 +391,31 @@ variations:
cpu:
group: device
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
cuda:
group: device
default: true
env:
- CM_MLPERF_DEVICE: gpu
- CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
+ MLC_MLPERF_DEVICE: gpu
+ MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart
tensorrt:
group: backend
default: true
env:
- CM_MLPERF_BACKEND: tensorrt
- CM_MLPERF_BACKEND_NAME: TensorRT
+ MLC_MLPERF_BACKEND: tensorrt
+ MLC_MLPERF_BACKEND_NAME: TensorRT
# Reference MLPerf models
resnet50:
group: model
default: true
env:
- CM_MODEL: resnet50
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
- CM_ML_MODEL_INPUTS_DATA_TYPE: int8
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
- CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: 10
+ MLC_MODEL: resnet50
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int8
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: 10
deps:
- tags: get,generic-python-lib,_onnx-graphsurgeon
version: 0.3.27
@@ -425,11 +425,11 @@ variations:
retinanet:
group: model
env:
- CM_MODEL: retinanet
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth"
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
- CM_ML_MODEL_INPUTS_DATA_TYPE: int8
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_MODEL: retinanet
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth"
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int8
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
deps:
- tags: get,generic-python-lib,_Pillow
- tags: get,generic-python-lib,_opencv-python
@@ -442,15 +442,15 @@ variations:
sdxl:
new_env_keys:
- - CM_SDXL_ACCURACY_RUN_DEVICE
+ - MLC_SDXL_ACCURACY_RUN_DEVICE
group: model
env:
- CM_MODEL: stable-diffusion-xl
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/main/script/get-ml-model-stable-diffusion/_cm.json#L174"
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "quantization, affine fusion"
- CM_ML_MODEL_INPUTS_DATA_TYPE: int32
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
- CM_SDXL_ACCURACY_RUN_DEVICE: "gpu"
+ MLC_MODEL: stable-diffusion-xl
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/main/script/get-ml-model-stable-diffusion/_cm.json#L174"
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: "quantization, affine fusion"
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int32
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_SDXL_ACCURACY_RUN_DEVICE: "gpu"
deps:
- tags: get,generic-python-lib,_package.diffusers
names:
@@ -498,8 +498,8 @@ variations:
- nvidia-ammo
version: 0.7.4
env:
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: "https://pypi.nvidia.com"
- CM_GENERIC_PYTHON_PIP_EXTRA: "--no-cache-dir"
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: "https://pypi.nvidia.com"
+ MLC_GENERIC_PYTHON_PIP_EXTRA: "--no-cache-dir"
- tags: get,generic-python-lib,_package.optimum
names:
- optimum
@@ -538,22 +538,22 @@ variations:
base:
- bert_
env:
- CM_MODEL: bert-99
- CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx"
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
- CM_ML_MODEL_INPUTS_DATA_TYPE: int32
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_MODEL: bert-99
+ MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx"
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int32
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
bert-99.9:
group: model
base:
- bert_
env:
- CM_MODEL: bert-99.9
- CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx"
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
- CM_ML_MODEL_INPUTS_DATA_TYPE: int32
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
+ MLC_MODEL: bert-99.9
+ MLC_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx"
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int32
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
3d-unet_:
deps:
@@ -571,31 +571,31 @@ variations:
base:
- 3d-unet_
env:
- CM_MODEL: 3d-unet-99
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx"
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
- CM_ML_MODEL_INPUTS_DATA_TYPE: int8
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_MODEL: 3d-unet-99
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx"
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int8
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
3d-unet-99.9:
group: model
base:
- 3d-unet_
env:
- CM_MODEL: 3d-unet-99.9
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx"
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
- CM_ML_MODEL_INPUTS_DATA_TYPE: int8
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_MODEL: 3d-unet-99.9
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx"
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int8
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
rnnt:
group: model
env:
- CM_MODEL: rnnt
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt"
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
- CM_ML_MODEL_INPUTS_DATA_TYPE: fp16
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
+ MLC_MODEL: rnnt
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt"
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: fp16
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
deps:
- tags: get,generic-python-lib,_toml
- tags: get,generic-python-lib,_torchvision_cuda
@@ -613,8 +613,8 @@ variations:
dlrm_:
new_env_keys:
- - CM_DLRM_V2_DAY23_FILE_PATH
- - CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH
+ - MLC_DLRM_V2_DAY23_FILE_PATH
+ - MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH
deps:
- tags: get,dlrm,data,mlperf,inference,_nvidia
- tags: get,generic-python-lib,_package.torchsnapshot
@@ -630,20 +630,20 @@ variations:
base:
- dlrm_
env:
- CM_MODEL: dlrm-v2-99
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion
- CM_ML_MODEL_INPUTS_DATA_TYPE: fp32
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
+ MLC_MODEL: dlrm-v2-99
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
dlrm-v2-99.9:
group: model
base:
- dlrm_
env:
- CM_MODEL: dlrm-v2-99.9
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion
- CM_ML_MODEL_INPUTS_DATA_TYPE: fp32
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
+ MLC_MODEL: dlrm-v2-99.9
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
llama2-70b_:
deps:
@@ -678,24 +678,24 @@ variations:
names:
- rouge-score
env:
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51"
- CM_ML_MODEL_INPUTS_DATA_TYPE: int32
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51"
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int32
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
llama2-70b-99:
group: model
base:
- llama2-70b_
env:
- CM_MODEL: llama2-70b-99
+ MLC_MODEL: llama2-70b-99
llama2-70b-99.9:
group: model
base:
- llama2-70b_
env:
- CM_MODEL: llama2-70b-99.9
+ MLC_MODEL: llama2-70b-99.9
gptj_:
deps:
@@ -706,7 +706,7 @@ variations:
- tags: get,generic-python-lib,_onnx-graphsurgeon
- tags: get,generic-python-lib,_package.sympy
env:
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download"
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download"
gptj_,build:
deps:
@@ -729,33 +729,33 @@ variations:
base:
- gptj_
env:
- CM_MODEL: gptj-99
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
- CM_ML_MODEL_INPUTS_DATA_TYPE: int32
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
+ MLC_MODEL: gptj-99
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int32
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
gptj-99.9:
group: model
base:
- gptj_
env:
- CM_MODEL: gptj-99.9
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
- CM_ML_MODEL_INPUTS_DATA_TYPE: int32
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
+ MLC_MODEL: gptj-99.9
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int32
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp16
batch_size.#:
group: batch-size
env:
- CM_MODEL_BATCH_SIZE: "#"
- CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "#"
- #CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "gpu_batch_size.#"
+ MLC_MODEL_BATCH_SIZE: "#"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "#"
+ #MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "gpu_batch_size.#"
dla_batch_size.#:
group: dla-batch-size
env:
- CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE: "#"
- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2: "dla_batch_size.#"
+ MLC_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE: "#"
+ MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2: "dla_batch_size.#"
adr:
build-engine:
tags: _dla_batch_size.#
@@ -763,25 +763,25 @@ variations:
use_triton:
group: triton
env:
- CM_MLPERF_NVIDIA_HARNESS_USE_TRITON: "yes"
- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX3: "using_triton"
+ MLC_MLPERF_NVIDIA_HARNESS_USE_TRITON: "yes"
+ MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX3: "using_triton"
use-graphs:
group: graphs
env:
- CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: "yes"
+ MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: "yes"
prebuild:
group: run-mode
env:
MLPERF_NVIDIA_RUN_COMMAND: prebuild
- CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: prebuild
+ MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: prebuild
build:
group: run-mode
env:
MLPERF_NVIDIA_RUN_COMMAND: build
- CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: build
+ MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: build
deps:
- tags: get,cmake
version_min: "3.18"
@@ -826,12 +826,12 @@ variations:
maxq:
group: power-mode
env:
- CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes
+ MLC_MLPERF_NVIDIA_HARNESS_MAXQ: yes
maxn:
group: power-mode
env:
- CM_MLPERF_NVIDIA_HARNESS_MAXN: yes
+ MLC_MLPERF_NVIDIA_HARNESS_MAXN: yes
preprocess-data:
alias: preprocess_data
@@ -840,7 +840,7 @@ variations:
group: run-mode
env:
MLPERF_NVIDIA_RUN_COMMAND: preprocess_data
- CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: preprocess_data
+ MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: preprocess_data
download-model:
alias: download-model
@@ -849,18 +849,18 @@ variations:
group: run-mode
env:
MLPERF_NVIDIA_RUN_COMMAND: download_model
- CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: download_model
+ MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: download_model
deps:
- tags: get,generic-python-lib,_torch_cuda
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
calibrate:
group: run-mode
env:
MLPERF_NVIDIA_RUN_COMMAND: calibrate
- CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: calibrate
+ MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: calibrate
deps:
- tags: reproduce,mlperf,inference,nvidia,harness,_download_model
inherit_variation_tags: true
@@ -874,7 +874,7 @@ variations:
- batch-size
- triton
skip_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet_old
- resnet50
- bert-99
@@ -891,7 +891,7 @@ variations:
loadgen-scenario: offline
env:
MLPERF_NVIDIA_RUN_COMMAND: generate_engines
- CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: generate_engines
+ MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: generate_engines
deps:
# Detect CUDA
- names:
@@ -923,7 +923,7 @@ variations:
- triton
- build-engine-options
skip_if_env:
- CM_MODEL:
+ MLC_MODEL:
- dlrm-v2-99
- dlrm-v2-99.9
@@ -941,7 +941,7 @@ variations:
- power-mode
- build-engine-options
skip_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet_old
- resnet50
- bert-99
@@ -952,7 +952,7 @@ variations:
- tags: reproduce,mlperf,inference,nvidia,harness,_calibrate
inherit_variation_tags: true
enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
force_cache: true
skip_inherit_variation_groups:
@@ -971,20 +971,20 @@ variations:
singlestream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
CUDA_VISIBLE_DEVICES_NOT_USED: "0"
multistream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+ MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
offline:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
server:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Server
+ MLC_MLPERF_LOADGEN_SCENARIO: Server
run-harness:
alis: run_harness
@@ -1036,7 +1036,7 @@ variations:
- build-engine-options
force_cache: true
skip_if_env:
- CM_MODEL:
+ MLC_MODEL:
- dlrm-v2-99
- dlrm-v2-99.9
@@ -1053,7 +1053,7 @@ variations:
- build-engine-options
force_cache: true
skip_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
- resnet50
- bert-99
@@ -1062,73 +1062,73 @@ variations:
- dlrm-v2-99.9
- stable-diffusion-xl
env:
- CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: run_harness
+ MLC_MLPERF_NVIDIA_HARNESS_RUN_MODE: run_harness
MLPERF_NVIDIA_RUN_COMMAND: run_harness
- CM_CALL_MLPERF_RUNNER: 'yes'
+ MLC_CALL_MLPERF_RUNNER: 'yes'
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
- - CM_ML_MODEL_*
- - CM_HW_NAME
- - CM_MAX_EXAMPLES
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
+ - MLC_ML_MODEL_*
+ - MLC_HW_NAME
+ - MLC_MAX_EXAMPLES
new_state_keys:
- mlperf-inference-implementation
- - CM_SUT_*
+ - MLC_SUT_*
build_engine_options.#:
group: build-engine-options
env:
- CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS: "#"
+ MLC_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS: "#"
gpu_memory.16:
group: device-memory
env:
- CM_NVIDIA_GPU_MEMORY: "16"
+ MLC_NVIDIA_GPU_MEMORY: "16"
gpu_memory.24:
group: device-memory
env:
- CM_NVIDIA_GPU_MEMORY: "24"
+ MLC_NVIDIA_GPU_MEMORY: "24"
gpu_memory.8:
group: device-memory
env:
- CM_NVIDIA_GPU_MEMORY: "8"
+ MLC_NVIDIA_GPU_MEMORY: "8"
gpu_memory.32:
group: device-memory
env:
- CM_NVIDIA_GPU_MEMORY: "32"
+ MLC_NVIDIA_GPU_MEMORY: "32"
gpu_memory.40:
group: device-memory
env:
- CM_NVIDIA_GPU_MEMORY: "40"
+ MLC_NVIDIA_GPU_MEMORY: "40"
gpu_memory.48:
group: device-memory
env:
- CM_NVIDIA_GPU_MEMORY: "48"
+ MLC_NVIDIA_GPU_MEMORY: "48"
gpu_memory.80:
group: device-memory
env:
- CM_NVIDIA_GPU_MEMORY: "80"
+ MLC_NVIDIA_GPU_MEMORY: "80"
gpu_memory.#:
group: device-memory
env:
- CM_NVIDIA_GPU_MEMORY: "#"
+ MLC_NVIDIA_GPU_MEMORY: "#"
singlestream,resnet50:
env:
- CM_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK: yes
+ MLC_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK: yes
SKIP_POLICIES: '0' # skip_policies used to give better latency but is not working with 4.0 and later Nvidia codes
server,resnet50:
env:
- CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000
- CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: True
- CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE: True
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: 9
- CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: 2
+ MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000
+ MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: True
+ MLC_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE: True
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: 9
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: 2
multistream,resnet50:
env:
- CM_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK: yes
+ MLC_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK: yes
SKIP_POLICIES: '0'
singlestream,run_harness:
@@ -1137,7 +1137,7 @@ variations:
llama2-70b_,run_harness:
env:
- CM_MLPERF_NVIDIA_HARNESS_USE_FP8: 'True'
+ MLC_MLPERF_NVIDIA_HARNESS_USE_FP8: 'True'
gptj_,run_harness:
deps:
@@ -1147,10 +1147,10 @@ variations:
- tags: get,cmake
version_min: "3.25.0"
env:
- CM_MLPERF_NVIDIA_HARNESS_USE_FP8: 'True'
- CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT: 'True'
- CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS: '2'
- CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS: True
+ MLC_MLPERF_NVIDIA_HARNESS_USE_FP8: 'True'
+ MLC_MLPERF_NVIDIA_HARNESS_ENABLE_SORT: 'True'
+ MLC_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS: '2'
+ MLC_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS: True
gpu_memory.80,num-gpus.2,llama2-70b,offline,run_harness:
default_variations:
@@ -1244,13 +1244,13 @@ variations:
default_variations:
batch-size: batch_size.64
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
gpu_memory.16,resnet50,offline,run_harness:
default_variations:
batch-size: batch_size.1024
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
gpu_memory.40,resnet50,offline,run_harness:
default_variations:
@@ -1275,13 +1275,13 @@ variations:
num-gpus.#:
group: num-gpus
env:
- CM_NVIDIA_NUM_GPUS: "#"
+ MLC_NVIDIA_NUM_GPUS: "#"
num-gpus.1:
group: num-gpus
default: true
env:
- CM_NVIDIA_NUM_GPUS: "1"
+ MLC_NVIDIA_NUM_GPUS: "1"
resnet50,server,run_harness:
default_variations:
@@ -1323,8 +1323,8 @@ variations:
default_variations:
batch-size: batch_size.2
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
gpu_memory.80,retinanet,offline,run_harness:
default_variations:
@@ -1334,8 +1334,8 @@ variations:
default_variations:
batch-size: batch_size.8
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
gpu_memory.8,rnnt,offline,run_harness:
default_variations:
@@ -1401,13 +1401,13 @@ variations:
default_variations:
batch-size: batch_size.1400
env:
- CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.40"
+ MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.40"
gpu_memory.24,dlrm_,offline,run_harness:
default_variations:
batch-size: batch_size.1400
env:
- CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.30"
+ MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.30"
gpu_memory.32,dlrm_,offline,run_harness:
default_variations:
@@ -1417,7 +1417,7 @@ variations:
default_variations:
batch-size: batch_size.1400
env:
- CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.50"
+ MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.50"
gpu_memory.80,dlrm_,offline,run_harness:
default_variations:
@@ -1426,13 +1426,13 @@ variations:
orin:
group: gpu-name
env:
- CM_NVIDIA_CUSTOM_GPU: "yes"
- CM_MODEL_BATCH_SIZE: "" #we pick from nvidia config
- CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>"
+ MLC_NVIDIA_CUSTOM_GPU: "yes"
+ MLC_MODEL_BATCH_SIZE: "" #we pick from nvidia config
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>"
orin,rnnt,singlestream,run_harness:
env:
- CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1"
+ MLC_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1"
orin,sdxl,offline,run_harness:
default_variations:
@@ -1441,7 +1441,7 @@ variations:
rtx_4090:
group: gpu-name
env:
- CM_NVIDIA_CUSTOM_GPU: "yes"
+ MLC_NVIDIA_CUSTOM_GPU: "yes"
rtx_4090,sdxl,offline,run_harness:
default_variations:
@@ -1466,15 +1466,15 @@ variations:
default_variations:
batch-size: batch_size.2
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
rtx_4090,retinanet,server,run_harness:
default_variations:
batch-size: batch_size.2
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
rtx_4090,bert_,offline,run_harness:
default_variations:
@@ -1512,12 +1512,12 @@ variations:
default_variations:
batch-size: batch_size.1400
env:
- CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.30"
+ MLC_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.30"
a6000:
group: gpu-name
env:
- CM_NVIDIA_CUSTOM_GPU: "yes"
+ MLC_NVIDIA_CUSTOM_GPU: "yes"
rtx_a6000,resnet50,offline,run_harness:
default_variations:
@@ -1566,7 +1566,7 @@ variations:
rtx_6000_ada:
group: gpu-name
env:
- CM_NVIDIA_CUSTOM_GPU: "yes"
+ MLC_NVIDIA_CUSTOM_GPU: "yes"
rtx_6000_ada,resnet50,offline,run_harness:
default_variations:
@@ -1615,63 +1615,63 @@ variations:
l4:
group: gpu-name
env:
- CM_NVIDIA_CUSTOM_GPU: "yes"
+ MLC_NVIDIA_CUSTOM_GPU: "yes"
l4,sdxl,offline,run_harness:
default_variations:
batch-size: batch_size.1
env:
- CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
- CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 0.6
+ MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
+ MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 0.6
l4,sdxl,offline,run_harness,num-gpu.8:
default_variations:
batch-size: batch_size.1
env:
- CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
- CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 4.8
+ MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
+ MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 4.8
l4,sdxl,server,run_harness,num-gpu.1:
default_variations:
batch-size: batch_size.1
env:
- CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
- CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 0.55
- CM_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT: 0
+ MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
+ MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS: 0.55
+ MLC_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT: 0
l4,sdxl,server,run_harness,num-gpu.8:
default_variations:
batch-size: batch_size.1
env:
- CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
- CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 5.05
- CM_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT: 0
+ MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
+ MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS: 5.05
+ MLC_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT: 0
l4,resnet50:
default_env:
- CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 10500
- CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 9000
- CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.35
- CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 1
+ MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 10500
+ MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS: 9000
+ MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.35
+ MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 1
l4,resnet50,offline,run_harness:
default_variations:
batch-size: batch_size.32
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "1"
- CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "1"
+ MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
l4,resnet50,server,run_harness:
default_variations:
batch-size: batch_size.16
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "9"
- CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
- CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True'
- CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000
- CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE: 'True'
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "9"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
+ MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True'
+ MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000
+ MLC_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE: 'True'
l4,retinanet,offline,run_harness:
default_variations:
@@ -1681,11 +1681,11 @@ variations:
default_variations:
batch-size: batch_size.2
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True'
- CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 30000
- CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True'
+ MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 30000
+ MLC_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000
l4,bert_,offline,run_harness:
default_variations:
@@ -1695,10 +1695,10 @@ variations:
default_variations:
batch-size: batch_size.16
env:
- CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "200"
- CM_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "1"
- CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "1.0"
- CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "True"
+ MLC_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "200"
+ MLC_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "1"
+ MLC_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "1.0"
+ MLC_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "True"
l4,3d-unet_,offline,run_harness:
default_variations:
@@ -1712,9 +1712,9 @@ variations:
default_variations:
batch-size: batch_size.512
env:
- CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "64"
- CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES: "1024"
- CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1024"
+ MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "64"
+ MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES: "1024"
+ MLC_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1024"
l4,dlrm_,offline,run_harness:
default_variations:
@@ -1722,30 +1722,30 @@ variations:
t4:
group: gpu-name
env:
- CM_NVIDIA_CUSTOM_GPU: "yes"
+ MLC_NVIDIA_CUSTOM_GPU: "yes"
t4,resnet50:
default_env:
- CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 4900
- CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 4000
- CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.6
- CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 2
+ MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 4900
+ MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS: 4000
+ MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.6
+ MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 2
t4,resnet50,offline,run_harness:
default_variations:
batch-size: batch_size.256
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
t4,resnet50,server,run_harness:
default_variations:
batch-size: batch_size.26
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
- CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: True
- CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000
- CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "0.993"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
+ MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: True
+ MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000
+ MLC_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "0.993"
t4,retinanet,offline,run_harness:
default_variations:
@@ -1755,11 +1755,11 @@ variations:
default_variations:
batch-size: batch_size.2
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True'
- CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 20000
- CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True'
+ MLC_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 20000
+ MLC_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000
t4,bert_,offline,run_harness:
default_variations:
@@ -1769,9 +1769,9 @@ variations:
default_variations:
batch-size: batch_size.4
env:
- CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "240"
- CM_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "0"
- CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "no"
+ MLC_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "240"
+ MLC_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "0"
+ MLC_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "no"
t4,3d-unet_,offline,run_harness:
default_variations:
@@ -1781,19 +1781,19 @@ variations:
default_variations:
batch-size: batch_size.2048
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
- CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
- CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128"
- CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
+ MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
+ MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128"
+ MLC_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True"
t4,rnnt,server,run_harness:
default_variations:
batch-size: batch_size.2048
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
- CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
- CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128"
- CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4"
+ MLC_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True'
+ MLC_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128"
+ MLC_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True"
t4,dlrm_,offline,run_harness:
default_variations:
@@ -1808,30 +1808,30 @@ variations:
custom:
group: gpu-name
env:
- CM_NVIDIA_CUSTOM_GPU: "yes"
- CM_MODEL_BATCH_SIZE: "" #we pick from nvidia config
- CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>"
+ MLC_NVIDIA_CUSTOM_GPU: "yes"
+ MLC_MODEL_BATCH_SIZE: "" #we pick from nvidia config
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>"
a100:
default_variation:
gpu-connection: sxm
group: gpu-name
env:
- CM_NVIDIA_CUSTOM_GPU: "yes"
+ MLC_NVIDIA_CUSTOM_GPU: "yes"
a100,sxm,resnet50,offline,run_harness:
default_variations:
batch-size: batch_size.2048
env:
- CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT: "2048"
+ MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT: "2048"
a100,sxm,retinanet,offline,run_harness:
default_variations:
batch-size: batch_size.32
env:
- CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
- CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: "300000000000"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2"
+ MLC_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: "300000000000"
a100,sxm,bert_,offline,run_harness:
default_variations:
diff --git a/script/app-mlperf-inference-nvidia/run.sh b/script/app-mlperf-inference-nvidia/run.sh
index ddcd0b550..0c6a8fc4a 100644
--- a/script/app-mlperf-inference-nvidia/run.sh
+++ b/script/app-mlperf-inference-nvidia/run.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then
- cd ${CM_RUN_DIR}
- cmd=${CM_RUN_CMD}
+if [[ ${MLC_CALL_MLPERF_RUNNER} == "no" ]]; then
+ cd ${MLC_RUN_DIR}
+ cmd=${MLC_RUN_CMD}
echo "${cmd}"
eval "${cmd}"
test $? -eq 0 || exit $?
diff --git a/script/app-mlperf-inference-qualcomm/customize.py b/script/app-mlperf-inference-qualcomm/customize.py
index e14de6d5a..8b64acb6d 100644
--- a/script/app-mlperf-inference-qualcomm/customize.py
+++ b/script/app-mlperf-inference-qualcomm/customize.py
@@ -11,33 +11,33 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']
- if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}
- if 'CM_MODEL' not in env:
+ if 'MLC_MODEL' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the model to run'}
- if 'CM_MLPERF_BACKEND' not in env:
+ if 'MLC_MLPERF_BACKEND' not in env:
return {'return': 1,
'error': 'Please select a variation specifying the backend'}
- if 'CM_MLPERF_DEVICE' not in env:
+ if 'MLC_MLPERF_DEVICE' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the device to run on'}
- kilt_root = env['CM_KILT_CHECKOUT_PATH']
+ kilt_root = env['MLC_KILT_CHECKOUT_PATH']
print(f"Harness Root: {kilt_root}")
source_files = []
- env['CM_SOURCE_FOLDER_PATH'] = env['CM_KILT_CHECKOUT_PATH']
+ env['MLC_SOURCE_FOLDER_PATH'] = env['MLC_KILT_CHECKOUT_PATH']
- env['kilt_model_root'] = env.get('CM_ML_MODEL_FILE_WITH_PATH')
+ env['kilt_model_root'] = env.get('MLC_ML_MODEL_FILE_WITH_PATH')
- if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE', '') != '':
- env['kilt_model_batch_size'] = env['CM_MLPERF_LOADGEN_BATCH_SIZE']
+ if env.get('MLC_MLPERF_LOADGEN_BATCH_SIZE', '') != '':
+ env['kilt_model_batch_size'] = env['MLC_MLPERF_LOADGEN_BATCH_SIZE']
- if env.get('CM_QAIC_DEVICES', '') != '':
- env['kilt_device_ids'] = env['CM_QAIC_DEVICES']
+ if env.get('MLC_QAIC_DEVICES', '') != '':
+ env['kilt_device_ids'] = env['MLC_QAIC_DEVICES']
if '+ CXXFLAGS' not in env:
env['+ CXXFLAGS'] = []
@@ -45,40 +45,40 @@ def preprocess(i):
if '+CPLUS_INCLUDE_PATH' not in env:
env['+CPLUS_INCLUDE_PATH'] = []
- if env['CM_MLPERF_DEVICE'] == "qaic":
+ if env['MLC_MLPERF_DEVICE'] == "qaic":
env['kilt_model_root'] = os.path.dirname(
- env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'])
+ env['MLC_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'])
- if env.get('CM_MODEL') == "resnet50":
- env['dataset_imagenet_preprocessed_subset_fof'] = env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST']
- env['dataset_imagenet_preprocessed_dir'] = env['CM_DATASET_PREPROCESSED_PATH']
+ if env.get('MLC_MODEL') == "resnet50":
+ env['dataset_imagenet_preprocessed_subset_fof'] = env['MLC_DATASET_PREPROCESSED_IMAGENAMES_LIST']
+ env['dataset_imagenet_preprocessed_dir'] = env['MLC_DATASET_PREPROCESSED_PATH']
- elif "bert" in env.get('CM_MODEL'):
- env['dataset_squad_tokenized_max_seq_length'] = env['CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH']
- env['dataset_squad_tokenized_root'] = env['CM_DATASET_SQUAD_TOKENIZED_ROOT']
+ elif "bert" in env.get('MLC_MODEL'):
+ env['dataset_squad_tokenized_max_seq_length'] = env['MLC_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH']
+ env['dataset_squad_tokenized_root'] = env['MLC_DATASET_SQUAD_TOKENIZED_ROOT']
env['dataset_squad_tokenized_input_ids'] = os.path.basename(
- env['CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS'])
+ env['MLC_DATASET_SQUAD_TOKENIZED_INPUT_IDS'])
env['dataset_squad_tokenized_input_mask'] = os.path.basename(
- env['CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK'])
+ env['MLC_DATASET_SQUAD_TOKENIZED_INPUT_MASK'])
env['dataset_squad_tokenized_segment_ids'] = os.path.basename(
- env['CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS'])
+ env['MLC_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS'])
- elif "retinanet" in env.get('CM_MODEL'):
+ elif "retinanet" in env.get('MLC_MODEL'):
env['kilt_prior_bin_path'] = os.path.join(
kilt_root, "plugins", "nms-abp", "data")
env['kilt_object_detection_preprocessed_subset_fof'] = os.path.basename(
- env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'])
- env['kilt_object_detection_preprocessed_dir'] = env['CM_DATASET_PREPROCESSED_PATH']
+ env['MLC_DATASET_PREPROCESSED_IMAGENAMES_LIST'])
+ env['kilt_object_detection_preprocessed_dir'] = env['MLC_DATASET_PREPROCESSED_PATH']
env['+ CXXFLAGS'].append("-DMODEL_RX50")
env['+ CXXFLAGS'].append("-DSDK_1_11_X")
- loc_offset = env.get('CM_QAIC_MODEL_RETINANET_LOC_OFFSET')
+ loc_offset = env.get('MLC_QAIC_MODEL_RETINANET_LOC_OFFSET')
if loc_offset:
env['+ CXXFLAGS'].append("-DMODEL_RX50")
keys = ['LOC_OFFSET', 'LOC_SCALE', 'CONF_OFFSET', 'CONF_SCALE']
- if env.get('CM_RETINANET_USE_MULTIPLE_SCALES_OFFSETS', '') == 'yes':
+ if env.get('MLC_RETINANET_USE_MULTIPLE_SCALES_OFFSETS', '') == 'yes':
env['+ CXXFLAGS'].append("-DUSE_MULTIPLE_SCALES_OFFSETS=1")
for j in range(0, 4):
keys.append(f'LOC_OFFSET{j}')
@@ -87,11 +87,11 @@ def preprocess(i):
keys.append(f'CONF_SCALE{j}')
for key in keys:
- value = env.get('CM_QAIC_MODEL_RETINANET_' + key, '')
+ value = env.get('MLC_QAIC_MODEL_RETINANET_' + key, '')
if value != '':
env['+ CXXFLAGS'].append(f" -D{key}_={value} ")
- if env.get('CM_BENCHMARK', '') == 'NETWORK_BERT_SERVER':
+ if env.get('MLC_BENCHMARK', '') == 'NETWORK_BERT_SERVER':
source_files.append(
os.path.join(
kilt_root,
@@ -109,12 +109,12 @@ def preprocess(i):
"server",
"server.cpp"))
env['+ CXXFLAGS'].append("-DNETWORK_DIVISION=1")
- elif env.get('CM_BENCHMARK', '') == 'NETWORK_BERT_CLIENT':
+ elif env.get('MLC_BENCHMARK', '') == 'NETWORK_BERT_CLIENT':
# source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "pack.cpp"))
# env['+CPLUS_INCLUDE_PATH'].append(kilt_root)
# source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "client.cpp"))
env['+ CXXFLAGS'].append("-DNETWORK_DIVISION")
- elif env.get('CM_BENCHMARK', '') == 'STANDALONE_BERT':
+ elif env.get('MLC_BENCHMARK', '') == 'STANDALONE_BERT':
source_files.append(
os.path.join(
kilt_root,
@@ -124,14 +124,14 @@ def preprocess(i):
"pack.cpp"))
script_path = i['run_script_input']['path']
- if env['CM_MODEL'] == "retinanet":
- env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH']
+ if env['MLC_MODEL'] == "retinanet":
+ env['MLC_DATASET_LIST'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH']
- for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']):
+ for file in os.listdir(env['MLC_SOURCE_FOLDER_PATH']):
if file.endswith(".c") or file.endswith(".cpp"):
source_files.append(file)
- if 'SERVER' not in env.get('CM_BENCHMARK', ''):
+ if 'SERVER' not in env.get('MLC_BENCHMARK', ''):
source_files.append(
os.path.join(
kilt_root,
@@ -139,18 +139,18 @@ def preprocess(i):
"harness",
"harness.cpp"))
- # source_files.append(env['CM_QAIC_API_SRC_FILE'])
+ # source_files.append(env['MLC_QAIC_API_SRC_FILE'])
env['+CPLUS_INCLUDE_PATH'].append(kilt_root)
env['+C_INCLUDE_PATH'].append(kilt_root)
- if env['CM_MLPERF_DEVICE'] == 'gpu':
- env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
- env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
- env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB'])
- env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
+ if env['MLC_MLPERF_DEVICE'] == 'gpu':
+ env['+C_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE'])
+ env['+CPLUS_INCLUDE_PATH'].append(env['MLC_CUDA_PATH_INCLUDE'])
+ env['+LD_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_LIB'])
+ env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['MLC_CUDA_PATH_INCLUDE'])
- elif env['CM_MLPERF_DEVICE'] == 'qaic':
+ elif env['MLC_MLPERF_DEVICE'] == 'qaic':
source_files.append(
os.path.join(
kilt_root,
@@ -161,24 +161,24 @@ def preprocess(i):
"QAicInfApi.cpp"))
print(f"Compiling the source files: {source_files}")
- env['CM_CXX_SOURCE_FILES'] = ";".join(source_files)
+ env['MLC_CXX_SOURCE_FILES'] = ";".join(source_files)
env['+ CXXFLAGS'].append("-std=c++17")
env['+ CXXFLAGS'].append("-fpermissive")
env['+ CXXFLAGS'].append("-DKILT_CONFIG_FROM_ENV")
env['+ CXXFLAGS'].append("-DKILT_CONFIG_TRANSLATE_X")
- env['+ CXXFLAGS'].append("-DKILT_BENCHMARK_" + env['CM_BENCHMARK'])
+ env['+ CXXFLAGS'].append("-DKILT_BENCHMARK_" + env['MLC_BENCHMARK'])
env['+ CXXFLAGS'].append("-DKILT_DEVICE_" + env['device'].upper())
- # add preprocessor flag like "#define CM_MODEL_RESNET50"
- # env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper())
- # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME"
- env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' +
- env['CM_MLPERF_BACKEND'].upper())
- # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU"
- env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' +
- env['CM_MLPERF_DEVICE'].upper())
+ # add preprocessor flag like "#define MLC_MODEL_RESNET50"
+ # env['+ CXXFLAGS'].append('-DMLC_MODEL_' + env['MLC_MODEL'].upper())
+ # add preprocessor flag like "#define MLC_MLPERF_BACKEND_ONNXRUNTIME"
+ env['+ CXXFLAGS'].append('-DMLC_MLPERF_BACKEND_' +
+ env['MLC_MLPERF_BACKEND'].upper())
+ # add preprocessor flag like "#define MLC_MLPERF_DEVICE_CPU"
+ env['+ CXXFLAGS'].append('-DMLC_MLPERF_DEVICE_' +
+ env['MLC_MLPERF_DEVICE'].upper())
if '+ LDCXXFLAGS' not in env:
env['+ LDCXXFLAGS'] = []
@@ -189,33 +189,33 @@ def preprocess(i):
"-ldl"
]
# e.g. -lonnxruntime
- if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env:
+ if 'MLC_MLPERF_BACKEND_LIB_NAMESPEC' in env:
env['+ LDCXXFLAGS'].append('-l' +
- env['CM_MLPERF_BACKEND_LIB_NAMESPEC'])
+ env['MLC_MLPERF_BACKEND_LIB_NAMESPEC'])
# e.g. -lcudart
- if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env:
- env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC'])
+ if 'MLC_MLPERF_DEVICE_LIB_NAMESPEC' in env:
+ env['+ LDCXXFLAGS'].append('-l' + env['MLC_MLPERF_DEVICE_LIB_NAMESPEC'])
if '-DPRINT_NETWORK_DESCRIPTOR' in env['+ CXXFLAGS']:
env['+ LDCXXFLAGS'].append('-lprotobuf')
- env['CM_LINKER_LANG'] = 'CXX'
- env['CM_RUN_DIR'] = env.get('CM_MLPERF_OUTPUT_DIR', os.getcwd())
+ env['MLC_LINKER_LANG'] = 'CXX'
+ env['MLC_RUN_DIR'] = env.get('MLC_MLPERF_OUTPUT_DIR', os.getcwd())
- if 'CM_MLPERF_CONF' not in env:
- env['CM_MLPERF_CONF'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
- if 'CM_MLPERF_USER_CONF' not in env:
- env['CM_MLPERF_USER_CONF'] = os.path.join(
- env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf")
+ if 'MLC_MLPERF_CONF' not in env:
+ env['MLC_MLPERF_CONF'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
+ if 'MLC_MLPERF_USER_CONF' not in env:
+ env['MLC_MLPERF_USER_CONF'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf")
# to LOADGEN_MLPERF_CONF
- env['loadgen_mlperf_conf_path'] = env['CM_MLPERF_CONF']
+ env['loadgen_mlperf_conf_path'] = env['MLC_MLPERF_CONF']
# to LOADGEN_USER_CONF
- env['loadgen_user_conf_path'] = env['CM_MLPERF_USER_CONF']
- env['loadgen_scenario'] = env['CM_MLPERF_LOADGEN_SCENARIO']
+ env['loadgen_user_conf_path'] = env['MLC_MLPERF_USER_CONF']
+ env['loadgen_scenario'] = env['MLC_MLPERF_LOADGEN_SCENARIO']
- loadgen_mode = env['CM_MLPERF_LOADGEN_MODE']
+ loadgen_mode = env['MLC_MLPERF_LOADGEN_MODE']
if loadgen_mode == 'performance':
kilt_loadgen_mode = 'PerformanceOnly'
elif loadgen_mode == 'accuracy':
diff --git a/script/app-mlperf-inference-qualcomm/meta.yaml b/script/app-mlperf-inference-qualcomm/meta.yaml
index 5e3de4302..1e508e0e6 100644
--- a/script/app-mlperf-inference-qualcomm/meta.yaml
+++ b/script/app-mlperf-inference-qualcomm/meta.yaml
@@ -24,59 +24,59 @@ tags:
# Default environment
default_env:
- CM_BATCH_COUNT: '1'
- CM_BATCH_SIZE: '1'
- CM_FAST_COMPILATION: 'yes'
- CM_MLPERF_LOADGEN_SCENARIO: Offline
- CM_MLPERF_LOADGEN_MODE: performance
- CM_SKIP_PREPROCESS_DATASET: 'no'
- CM_SKIP_MODEL_DOWNLOAD: 'no'
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: kilt
- CM_MLPERF_SKIP_RUN: 'no'
- CM_KILT_REPO_URL: https://github.com/GATEOverflow/kilt-mlperf
- CM_QAIC_DEVICES: "0"
+ MLC_BATCH_COUNT: '1'
+ MLC_BATCH_SIZE: '1'
+ MLC_FAST_COMPILATION: 'yes'
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_MODE: performance
+ MLC_SKIP_PREPROCESS_DATASET: 'no'
+ MLC_SKIP_MODEL_DOWNLOAD: 'no'
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: kilt
+ MLC_MLPERF_SKIP_RUN: 'no'
+ MLC_KILT_REPO_URL: https://github.com/GATEOverflow/kilt-mlperf
+ MLC_QAIC_DEVICES: "0"
kilt_max_wait_abs: 10000
verbosity: 0
loadgen_trigger_cold_run: 0
env:
- CM_CALL_MLPERF_RUNNER: 'no'
+ MLC_CALL_MLPERF_RUNNER: 'no'
# Map script inputs to environment variables
input_mapping:
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
- mlperf_conf: CM_MLPERF_CONF
- mode: CM_MLPERF_LOADGEN_MODE
- output_dir: CM_MLPERF_OUTPUT_DIR
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- user_conf: CM_MLPERF_USER_CONF
- devices: CM_QAIC_DEVICES
- skip_preprocess: CM_SKIP_PREPROCESS_DATASET
- skip_preprocessing: CM_SKIP_PREPROCESS_DATASET
- target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
- offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
- server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
- target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
- singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
- multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- rerun: CM_RERUN
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mlperf_conf: MLC_MLPERF_CONF
+ mode: MLC_MLPERF_LOADGEN_MODE
+ output_dir: MLC_MLPERF_OUTPUT_DIR
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ user_conf: MLC_MLPERF_USER_CONF
+ devices: MLC_QAIC_DEVICES
+ skip_preprocess: MLC_SKIP_PREPROCESS_DATASET
+ skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET
+ target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS
+ offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
+ singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ rerun: MLC_RERUN
new_state_keys:
- mlperf-inference-implementation
- - CM_SUT_*
+ - MLC_SUT_*
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
- - CM_HW_NAME
- - CM_ML_MODEL_*
- - CM_MAX_EXAMPLES
- - CM_IMAGENET_ACCURACY_DTYPE
- - CM_SQUAD_ACCURACY_DTYPE
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
+ - MLC_HW_NAME
+ - MLC_ML_MODEL_*
+ - MLC_MAX_EXAMPLES
+ - MLC_IMAGENET_ACCURACY_DTYPE
+ - MLC_SQUAD_ACCURACY_DTYPE
# Dependencies on other CM scripts
@@ -97,10 +97,10 @@ deps:
- kilt-repo
update_tags_from_env_with_prefix:
_repo.:
- - CM_KILT_REPO_URL
+ - MLC_KILT_REPO_URL
extra_cache_tags: kilt,kilt-repo
env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_KILT_CHECKOUT_PATH
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_KILT_CHECKOUT_PATH
########################################################################
# Install MLPerf inference dependencies
@@ -129,10 +129,10 @@ deps:
# Install ResNet50 model (ONNX) and ImageNet
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
skip_if_env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- qaic
names:
- resnet50-model
@@ -140,27 +140,27 @@ deps:
tags: get,ml-model,resnet50,_fp32,_onnx,_from-tf
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- qaic
tags: compile,qaic,model,_resnet50
names:
- qaic-model-compiler
- resnet50-compiler
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- yes
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
names:
- imagenet-preprocessed
- dataset-preprocessed
tags: get,dataset,imagenet,preprocessed,_for.resnet50,_NHWC,_full
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- yes
@@ -169,45 +169,45 @@ deps:
# Install bert dependencies
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- bert-99
- bert-99.9
names:
- bert-vocab
tags: get,squad-vocab
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- yes
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- bert-99
- bert-99.9
names:
- squad-tokenized
tags: get,dataset,tokenized,squad,_raw
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- yes
########################################################################
# Install OpenImages
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- qaic
tags: compile,qaic,model,_retinanet
names:
- qaic-model-compiler
- retinanet-compiler
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- yes
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
names:
- openimages-preprocessed
@@ -215,11 +215,11 @@ deps:
tags: get,dataset,preprocessed,openimages,_for.retinanet.onnx,_NCHW,_validation,_custom-annotations
update_tags_from_env_with_prefix1: #disabling now to prevent unnecessary preprocessing
_quant-scale.:
- - CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET
+ - MLC_QAIC_MODEL_RETINANET_IMAGE_OFFSET
_quant-offset.:
- - CM_QAIC_MODEL_RETINANET_IMAGE_SCALE
+ - MLC_QAIC_MODEL_RETINANET_IMAGE_SCALE
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- yes
@@ -228,16 +228,16 @@ deps:
########################################################################
# Install ML engines via CM
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cpu
tags: get,lib,onnxruntime,lang-cpp,_cpu
- enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- onnxruntime
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- gpu
tags: get,lib,onnxruntime,lang-cpp,_cuda
@@ -249,14 +249,14 @@ post_deps:
- compile-program
tags: compile,cpp-program
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- yes
- names:
- runner
- mlperf-runner
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'yes'
- yes
tags: benchmark-mlperf
@@ -272,47 +272,47 @@ variations:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
kilt_backend_type: cpu
cuda:
group: device
env:
- CM_MLPERF_DEVICE: gpu
- CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
+ MLC_MLPERF_DEVICE: gpu
+ MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart
kilt_backend_type: gpu
qaic:
group: device
env:
- CM_MLPERF_DEVICE: qaic
- CM_MLPERF_DEVICE_LIB_NAMESPEC: QAic
+ MLC_MLPERF_DEVICE: qaic
+ MLC_MLPERF_DEVICE_LIB_NAMESPEC: QAic
kilt_backend_type: qaic
deps:
- tags: get,qaic,platform,sdk
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- yes
- tags: get,lib,protobuf,_tag.v3.11.4
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- yes
- tags: set,device,mode,qaic
enable_if_env:
- CM_QAIC_VC:
+ MLC_QAIC_VC:
"on"
update_tags_from_env_with_prefix":
_vc.:
- - CM_QAIC_VC
+ - MLC_QAIC_VC
- tags: set,device,mode,qaic,_ecc
enable_if_env:
- CM_QAIC_ECC:
+ MLC_QAIC_ECC:
"yes"
tensorrt:
group: framework
env:
- CM_MLPERF_BACKEND: tensorrt
+ MLC_MLPERF_BACKEND: tensorrt
device: tensorrt
- CM_MLPERF_BACKEND_NAME: TensorRT
+ MLC_MLPERF_BACKEND_NAME: TensorRT
# ML engine
onnxruntime:
@@ -320,15 +320,15 @@ variations:
default: true
env:
device: onnxrt
- CM_MLPERF_BACKEND: onnxruntime
- CM_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime
+ MLC_MLPERF_BACKEND: onnxruntime
+ MLC_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime
glow:
group: framework
env:
device: qaic
- CM_MLPERF_BACKEND: glow
- CM_MLPERF_BACKEND_LIB_NAMESPEC: QAic
+ MLC_MLPERF_BACKEND: glow
+ MLC_MLPERF_BACKEND_LIB_NAMESPEC: QAic
bs.#:
group: batch-size
@@ -348,7 +348,7 @@ variations:
group: model
default: true
env:
- CM_MODEL: resnet50
+ MLC_MODEL: resnet50
kilt_model_name: resnet50
kilt_input_count: 1
kilt_output_count: 1
@@ -359,16 +359,16 @@ variations:
ml_model_image_height: 224
loadgen_buffer_size: 1024
loadgen_dataset_size: 50000
- CM_BENCHMARK: STANDALONE_CLASSIFICATION
+ MLC_BENCHMARK: STANDALONE_CLASSIFICATION
resnet50,uint8:
env:
kilt_input_format: "UINT8,-1,224,224,3"
kilt_device_qaic_skip_stage: convert
- CM_IMAGENET_ACCURACY_DTYPE: int8
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md"
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
- CM_ML_MODEL_INPUTS_DATA_TYPE: int8
+ MLC_IMAGENET_ACCURACY_DTYPE: int8
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md"
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int8
bert-99,qaic:
deps:
@@ -377,12 +377,12 @@ variations:
- qaic-model-compiler
- bert-99-compiler
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- yes
env:
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md"
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int32
- CM_ML_MODEL_INPUTS_DATA_TYPE: int8,fp16
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md"
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int32
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int8,fp16
bert-99.9,qaic:
deps:
@@ -391,20 +391,20 @@ variations:
- qaic-model-compiler
- bert-99.9-compiler
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- yes
env:
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md"
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int32
- CM_ML_MODEL_INPUTS_DATA_TYPE: fp16
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md"
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int32
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: fp16
retinanet:
group: model
base:
- bs.1
env:
- CM_MODEL: retinanet
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth"
+ MLC_MODEL: retinanet
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth"
kilt_model_name: retinanet
kilt_input_count: 1
#kilt_model_disable_nms: ''
@@ -417,7 +417,7 @@ variations:
ml_model_image_width: 800
loadgen_buffer_size: 64
loadgen_dataset_size: 24576
- CM_BENCHMARK: STANDALONE_OBJECT_DETECTION
+ MLC_BENCHMARK: STANDALONE_OBJECT_DETECTION
deps:
- tags: get,generic-python-lib,_Pillow
@@ -432,9 +432,9 @@ variations:
kilt_device_qaic_skip_stage: 'convert'
kilt_input_format: "UINT8,1,3,800,800"
kilt_output_format: "INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,4,1000:INT8,14,1000:INT8,1,4,1000:INT8,1,4,1000:INT8,1,4,1000"
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md"
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
- CM_ML_MODEL_INPUTS_DATA_TYPE: int8
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md"
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int8
bert_:
@@ -443,7 +443,7 @@ variations:
- tags: get,generic-python-lib,_safetensors
- tags: get,generic-python-lib,_onnx
env:
- CM_BENCHMARK: STANDALONE_BERT
+ MLC_BENCHMARK: STANDALONE_BERT
kilt_model_name: bert
kilt_model_seq_length: 384
kilt_model_bert_variant: BERT_PACKED
@@ -467,25 +467,25 @@ variations:
group: run-mode
default: true
env:
- CM_RUN_MODE: standalone
+ MLC_RUN_MODE: standalone
network-server:
group: run-mode
env:
- CM_RUN_MODE: network-server
+ MLC_RUN_MODE: network-server
network-client:
group: run-mode
env:
- CM_RUN_MODE: network-client
+ MLC_RUN_MODE: network-client
bert_,network-server:
env:
- CM_BENCHMARK: NETWORK_BERT_SERVER
+ MLC_BENCHMARK: NETWORK_BERT_SERVER
bert_,network-client:
env:
- CM_BENCHMARK: NETWORK_BERT_CLIENT
+ MLC_BENCHMARK: NETWORK_BERT_CLIENT
bert_,singlestream:
env:
@@ -496,22 +496,22 @@ variations:
base:
- bert_
env:
- CM_MODEL: bert-99
- CM_SQUAD_ACCURACY_DTYPE: float32
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx"
+ MLC_MODEL: bert-99
+ MLC_SQUAD_ACCURACY_DTYPE: float32
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx"
bert-99.9:
group: model
base:
- bert_
env:
- CM_MODEL: bert-99.9
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx"
+ MLC_MODEL: bert-99.9
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx"
loadgen-batch-size.#:
group: loadgen-batch-size
env:
- CM_MLPERF_LOADGEN_BATCH_SIZE: "#"
+ MLC_MLPERF_LOADGEN_BATCH_SIZE: "#"
bert-99,offline:
default_variations:
@@ -523,23 +523,23 @@ variations:
activation-count.#:
env:
- CM_MLPERF_QAIC_ACTIVATION_COUNT: "#"
- #CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "activation_count.#"
+ MLC_MLPERF_QAIC_ACTIVATION_COUNT: "#"
+ #MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "activation_count.#"
maxq:
group: power-mode
env:
- CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes
+ MLC_MLPERF_NVIDIA_HARNESS_MAXQ: yes
maxn:
group: power-mode
env:
- CM_MLPERF_NVIDIA_HARNESS_MAXN: yes
+ MLC_MLPERF_NVIDIA_HARNESS_MAXN: yes
singlestream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
adr:
qaic-model-compiler:
tags: _singlestream
@@ -554,21 +554,21 @@ variations:
multistream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+ MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
adr:
qaic-model-compiler:
tags: _multistream
offline:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
adr:
qaic-model-compiler:
tags: _offline
server:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Server
+ MLC_MLPERF_LOADGEN_SCENARIO: Server
adr:
qaic-model-compiler:
tags: _server
@@ -586,7 +586,7 @@ variations:
dataset-preprocessed:
tags: _float32,_rgb32
env:
- CM_IMAGENET_ACCURACY_DTYPE: float32
+ MLC_IMAGENET_ACCURACY_DTYPE: float32
nsp.14:
group: nsp
@@ -614,12 +614,12 @@ variations:
base:
- nsp.14
env:
- CM_QAIC_DEVICES: "0,1,2,3,4,5,6,7"
+ MLC_QAIC_DEVICES: "0,1,2,3,4,5,6,7"
qaic_queue_length: 4
dl2q.24xlarge,singlestream:
env:
- CM_QAIC_DEVICES: 0
+ MLC_QAIC_DEVICES: 0
qaic_activation_count: "1"
dl2q.24xlarge,resnet50,offline:
@@ -668,11 +668,11 @@ variations:
num-devices.4:
env:
- CM_QAIC_DEVICES: "0,1,2,3"
+ MLC_QAIC_DEVICES: "0,1,2,3"
pro,num-devices.4,singlestream:
env:
- CM_QAIC_DEVICES: "0"
+ MLC_QAIC_DEVICES: "0"
qaic_activation_count: "1"
pro,num-devices.4,resnet50,offline:
@@ -740,7 +740,7 @@ variations:
base:
- nsp.9
env:
- CM_QAIC_DEVICES: "0"
+ MLC_QAIC_DEVICES: "0"
qaic_queue_length: 6
rb6,singlestream:
diff --git a/script/app-mlperf-inference-qualcomm/run.sh b/script/app-mlperf-inference-qualcomm/run.sh
index ddcd0b550..0c6a8fc4a 100644
--- a/script/app-mlperf-inference-qualcomm/run.sh
+++ b/script/app-mlperf-inference-qualcomm/run.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then
- cd ${CM_RUN_DIR}
- cmd=${CM_RUN_CMD}
+if [[ ${MLC_CALL_MLPERF_RUNNER} == "no" ]]; then
+ cd ${MLC_RUN_DIR}
+ cmd=${MLC_RUN_CMD}
echo "${cmd}"
eval "${cmd}"
test $? -eq 0 || exit $?
diff --git a/script/app-mlperf-inference-redhat/customize.py b/script/app-mlperf-inference-redhat/customize.py
index d5d4ee85d..7278f89a2 100644
--- a/script/app-mlperf-inference-redhat/customize.py
+++ b/script/app-mlperf-inference-redhat/customize.py
@@ -11,29 +11,29 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']
- if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}
- if 'CM_MODEL' not in env:
+ if 'MLC_MODEL' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the model to run'}
- if 'CM_MLPERF_BACKEND' not in env:
+ if 'MLC_MLPERF_BACKEND' not in env:
return {'return': 1,
'error': 'Please select a variation specifying the backend'}
- if 'CM_MLPERF_DEVICE' not in env:
+ if 'MLC_MLPERF_DEVICE' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the device to run on'}
- r = get_run_cmd(env['CM_MODEL'], i)
+ r = get_run_cmd(env['MLC_MODEL'], i)
if r['return'] > 0:
return r
run_cmd = r['run_cmd']
run_dir = r['run_dir']
print(run_cmd)
print(run_dir)
- env['CM_MLPERF_RUN_CMD'] = run_cmd
- env['CM_RUN_DIR'] = run_dir
- env['CM_RUN_CMD'] = run_cmd
+ env['MLC_MLPERF_RUN_CMD'] = run_cmd
+ env['MLC_RUN_DIR'] = run_dir
+ env['MLC_RUN_CMD'] = run_cmd
return {'return': 0}
# return {'return':1, 'error': 'Run command needs to be tested'}
@@ -42,16 +42,16 @@ def preprocess(i):
def get_run_cmd(model, i):
env = i['env']
if "gptj" in model:
- scenario = env['CM_MLPERF_LOADGEN_SCENARIO']
- device = env['CM_MLPERF_DEVICE']
- mode = env['CM_MLPERF_LOADGEN_MODE']
- outdir = env['CM_MLPERF_OUTPUT_DIR']
- mlperf_conf_path = env['CM_MLPERF_CONF']
- user_conf_path = env['CM_MLPERF_USER_CONF']
- api_server = env.get('CM_MLPERF_INFERENCE_API_SERVER', 'localhost')
+ scenario = env['MLC_MLPERF_LOADGEN_SCENARIO']
+ device = env['MLC_MLPERF_DEVICE']
+ mode = env['MLC_MLPERF_LOADGEN_MODE']
+ outdir = env['MLC_MLPERF_OUTPUT_DIR']
+ mlperf_conf_path = env['MLC_MLPERF_CONF']
+ user_conf_path = env['MLC_MLPERF_USER_CONF']
+ api_server = env.get('MLC_MLPERF_INFERENCE_API_SERVER', 'localhost')
model_path = env['GPTJ_CHECKPOINT_PATH']
- dataset_path = env['CM_DATASET_CNNDM_EVAL_PATH']
- precision = env['CM_MLPERF_MODEL_PRECISION']
+ dataset_path = env['MLC_DATASET_CNNDM_EVAL_PATH']
+ precision = env['MLC_MLPERF_MODEL_PRECISION']
if mode == "accuracy":
accuracy_string = " --accuracy "
else:
@@ -60,7 +60,7 @@ def get_run_cmd(model, i):
run_cmd = f"python3 -u main.py --scenario {scenario} --model-path {model_path} --api-server {api_server} --api-model-name gpt-j-cnn --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} "
submitter = "CTuning"
run_dir = os.path.join(
- env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'],
+ env['MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO'],
"open",
submitter,
"code",
@@ -69,18 +69,18 @@ def get_run_cmd(model, i):
return {'return': 0, 'run_cmd': run_cmd, 'run_dir': run_dir}
if "llama2" in model:
- scenario = env['CM_MLPERF_LOADGEN_SCENARIO']
- device = env['CM_MLPERF_DEVICE']
- mode = env['CM_MLPERF_LOADGEN_MODE']
- outdir = env['CM_MLPERF_OUTPUT_DIR']
- mlperf_conf_path = env['CM_MLPERF_CONF']
- user_conf_path = env['CM_MLPERF_USER_CONF']
+ scenario = env['MLC_MLPERF_LOADGEN_SCENARIO']
+ device = env['MLC_MLPERF_DEVICE']
+ mode = env['MLC_MLPERF_LOADGEN_MODE']
+ outdir = env['MLC_MLPERF_OUTPUT_DIR']
+ mlperf_conf_path = env['MLC_MLPERF_CONF']
+ user_conf_path = env['MLC_MLPERF_USER_CONF']
api_server = env.get(
- 'CM_MLPERF_INFERENCE_API_SERVER',
+ 'MLC_MLPERF_INFERENCE_API_SERVER',
'localhost:8000/v1')
- api_model_name = env['CM_VLLM_SERVER_MODEL_NAME']
- dataset_path = env['CM_DATASET_OPENORCA_PATH']
- precision = env['CM_MLPERF_MODEL_PRECISION']
+ api_model_name = env['MLC_VLLM_SERVER_MODEL_NAME']
+ dataset_path = env['MLC_DATASET_OPENORCA_PATH']
+ precision = env['MLC_MLPERF_MODEL_PRECISION']
if mode == "accuracy":
accuracy_string = " --accuracy "
else:
@@ -89,7 +89,7 @@ def get_run_cmd(model, i):
run_cmd = f"python3 -u 'main.py' --scenario {scenario} --model-path {api_model_name} --api-model-name {api_model_name} --api-server {api_server} --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} "
submitter = "RedHat-Supermicro"
run_dir = os.path.join(
- env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'],
+ env['MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO'],
"open",
submitter,
"code",
diff --git a/script/app-mlperf-inference-redhat/meta.yaml b/script/app-mlperf-inference-redhat/meta.yaml
index 2c7011bd5..55af68d65 100644
--- a/script/app-mlperf-inference-redhat/meta.yaml
+++ b/script/app-mlperf-inference-redhat/meta.yaml
@@ -21,51 +21,51 @@ tags:
# Default environment
default_env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
- CM_MLPERF_LOADGEN_MODE: performance
- CM_SKIP_PREPROCESS_DATASET: 'no'
- CM_SKIP_MODEL_DOWNLOAD: 'no'
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: redhat_harness
- CM_MLPERF_SKIP_RUN: 'no'
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_MODE: performance
+ MLC_SKIP_PREPROCESS_DATASET: 'no'
+ MLC_SKIP_MODEL_DOWNLOAD: 'no'
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: redhat_harness
+ MLC_MLPERF_SKIP_RUN: 'no'
env:
- CM_CALL_MLPERF_RUNNER: 'no'
+ MLC_CALL_MLPERF_RUNNER: 'no'
# Map script inputs to environment variables
input_mapping:
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
- mlperf_conf: CM_MLPERF_CONF
- mode: CM_MLPERF_LOADGEN_MODE
- output_dir: CM_MLPERF_OUTPUT_DIR
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- user_conf: CM_MLPERF_USER_CONF
- skip_preprocess: CM_SKIP_PREPROCESS_DATASET
- skip_preprocessing: CM_SKIP_PREPROCESS_DATASET
- target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
- offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
- server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
- target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
- singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
- multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
- rerun: CM_RERUN
- results_repo: CM_MLPERF_INFERENCE_RESULTS_REPO
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mlperf_conf: MLC_MLPERF_CONF
+ mode: MLC_MLPERF_LOADGEN_MODE
+ output_dir: MLC_MLPERF_OUTPUT_DIR
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ user_conf: MLC_MLPERF_USER_CONF
+ skip_preprocess: MLC_SKIP_PREPROCESS_DATASET
+ skip_preprocessing: MLC_SKIP_PREPROCESS_DATASET
+ target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS
+ offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
+ singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ rerun: MLC_RERUN
+ results_repo: MLC_MLPERF_INFERENCE_RESULTS_REPO
new_state_keys:
- mlperf-inference-implementation
- - CM_SUT_*
+ - MLC_SUT_*
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
- - CM_HW_NAME
- - CM_ML_MODEL_*
- - CM_MAX_EXAMPLES
- - CM_IMAGENET_ACCURACY_DTYPE
- - CM_SQUAD_ACCURACY_DTYPE
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
+ - MLC_HW_NAME
+ - MLC_ML_MODEL_*
+ - MLC_MAX_EXAMPLES
+ - MLC_IMAGENET_ACCURACY_DTYPE
+ - MLC_SQUAD_ACCURACY_DTYPE
# Dependencies on other CM scripts
@@ -111,9 +111,9 @@ deps:
- inference-code
update_tags_from_env_with_prefix:
_repo.:
- - CM_MLPERF_INFERENCE_RESULTS_REPO
+ - MLC_MLPERF_INFERENCE_RESULTS_REPO
env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_IMPLEMENTATION_REPO
extra_cache_tags: results,repo,mlperf
# Post dependencies to run this app including for power measurement
@@ -123,7 +123,7 @@ post_deps:
- runner
- mlperf-runner
skip_if_env:
- CM_MLPERF_SKIP_RUN:
+ MLC_MLPERF_SKIP_RUN:
- 'yes'
- yes
tags: benchmark-mlperf
@@ -139,23 +139,23 @@ variations:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
cuda:
group: device
env:
- CM_MLPERF_DEVICE: gpu
- CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
+ MLC_MLPERF_DEVICE: gpu
+ MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart
openshift:
group: backend
default: true
env:
- CM_MLPERF_BACKEND: openshift
+ MLC_MLPERF_BACKEND: openshift
pytorch:
group: backend
env:
- CM_MLPERF_BACKEND: pytorch
+ MLC_MLPERF_BACKEND: pytorch
pytorch,cuda:
deps:
@@ -174,14 +174,14 @@ variations:
group: model
default: true
env:
- CM_MODEL: resnet50
+ MLC_MODEL: resnet50
retinanet:
group: model
base:
- bs.1
env:
- CM_MODEL: retinanet
+ MLC_MODEL: retinanet
bert_:
{}
@@ -191,15 +191,15 @@ variations:
base:
- bert_
env:
- CM_MODEL: bert-99
- CM_SQUAD_ACCURACY_DTYPE: float32
+ MLC_MODEL: bert-99
+ MLC_SQUAD_ACCURACY_DTYPE: float32
bert-99.9:
group: model
base:
- bert_
env:
- CM_MODEL: bert-99.9
+ MLC_MODEL: bert-99.9
bert_:
{}
@@ -209,15 +209,15 @@ variations:
base:
- bert_
env:
- CM_MODEL: bert-99
- CM_SQUAD_ACCURACY_DTYPE: float32
+ MLC_MODEL: bert-99
+ MLC_SQUAD_ACCURACY_DTYPE: float32
bert-99.9:
group: model
base:
- bert_
env:
- CM_MODEL: bert-99.9
+ MLC_MODEL: bert-99.9
gptj_:
deps:
@@ -231,42 +231,42 @@ variations:
base:
- gptj_
env:
- CM_MODEL: gptj-99
- CM_SQUAD_ACCURACY_DTYPE: float32
+ MLC_MODEL: gptj-99
+ MLC_SQUAD_ACCURACY_DTYPE: float32
gptj-99.9:
group: model
base:
- gptj_
env:
- CM_MODEL: gptj-99.9
+ MLC_MODEL: gptj-99.9
llama2-70b_:
deps:
- tags: get,dataset,openorca,language-processing,original,_redhat
env:
- CM_MLPERF_IMPLEMENTATION: redhat
+ MLC_MLPERF_IMPLEMENTATION: redhat
env:
- CM_VLLM_SERVER_MODEL_NAME: NousResearch/Meta-Llama-3-8B-Instruct # assigned just for testing purpose
+ MLC_VLLM_SERVER_MODEL_NAME: NousResearch/Meta-Llama-3-8B-Instruct # assigned just for testing purpose
llama2-70b-99:
group: model
base:
- llama2-70b_
env:
- CM_MODEL: llama2-70b-99
+ MLC_MODEL: llama2-70b-99
llama2-70b-99.9:
group: model
base:
- llama2-70b_
env:
- CM_MODEL: llama2-70b-99.9
+ MLC_MODEL: llama2-70b-99.9
singlestream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
singlestream,resnet50:
default_variations:
@@ -279,17 +279,17 @@ variations:
multistream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+ MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
offline:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
server:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Server
+ MLC_MLPERF_LOADGEN_SCENARIO: Server
uint8:
group: precision
@@ -302,7 +302,7 @@ variations:
group: version
default: true
env:
- CM_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.0
+ MLC_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.0
docker:
real_run: False
diff --git a/script/app-mlperf-inference-redhat/run.sh b/script/app-mlperf-inference-redhat/run.sh
index ddcd0b550..0c6a8fc4a 100644
--- a/script/app-mlperf-inference-redhat/run.sh
+++ b/script/app-mlperf-inference-redhat/run.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then
- cd ${CM_RUN_DIR}
- cmd=${CM_RUN_CMD}
+if [[ ${MLC_CALL_MLPERF_RUNNER} == "no" ]]; then
+ cd ${MLC_RUN_DIR}
+ cmd=${MLC_RUN_CMD}
echo "${cmd}"
eval "${cmd}"
test $? -eq 0 || exit $?
diff --git a/script/app-mlperf-inference/README-extra.md b/script/app-mlperf-inference/README-extra.md
index e661f3e53..f412c3c8f 100644
--- a/script/app-mlperf-inference/README-extra.md
+++ b/script/app-mlperf-inference/README-extra.md
@@ -56,7 +56,7 @@ The first run of this CM script takes around 25 minutes on a GCP instance with 1
CM will automatically detect, install and cache all the necessary ML components
while adapting them to your system using [portable CM scripts](https://github.com/mlcommons/cm4mlops/tree/main/script).
-These dependencies are described using [this simple YAML file](https://github.com/octoml/ck/blob/master/cm-mlops/script/app-mlperf-inference-reference/_cm.yaml#L57)
+These dependencies are described using [this simple YAML file](https://github.com/octoml/ck/blob/master/mlc-mlops/script/app-mlperf-inference-reference/_cm.yaml#L57)
and can be turned on or off using different environment variables passed to this CM script using `--env.KEY=VALUE`.
You should see the following output in the end:
diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py
index 976f5124b..21e34e98a 100644
--- a/script/app-mlperf-inference/customize.py
+++ b/script/app-mlperf-inference/customize.py
@@ -18,19 +18,19 @@ def preprocess(i):
env = i['env']
state = i['state']
- if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'nvidia':
- if env.get('CM_NVIDIA_GPU_NAME', '') in [
+ if env.get('MLC_MLPERF_IMPLEMENTATION', '') == 'nvidia':
+ if env.get('MLC_NVIDIA_GPU_NAME', '') in [
"rtx_4090", "a100", "t4", "l4", "orin", "custom"]:
- env['CM_NVIDIA_HARNESS_GPU_VARIATION'] = "_" + \
- env['CM_NVIDIA_GPU_NAME']
- env['CM_NVIDIA_GPU_MEMORY'] = ''
+ env['MLC_NVIDIA_HARNESS_GPU_VARIATION'] = "_" + \
+ env['MLC_NVIDIA_GPU_NAME']
+ env['MLC_NVIDIA_GPU_MEMORY'] = ''
else:
gpu_memory = i['state'].get(
'cm_cuda_device_prop', '').get('Global memory')
gpu_memory_size = str(
int((float(gpu_memory) / (1024 * 1024 * 1024) + 7) / 8) * 8)
- env['CM_NVIDIA_GPU_MEMORY'] = gpu_memory_size
- env['CM_NVIDIA_HARNESS_GPU_VARIATION'] = ''
+ env['MLC_NVIDIA_GPU_MEMORY'] = gpu_memory_size
+ env['MLC_NVIDIA_HARNESS_GPU_VARIATION'] = ''
if 'cmd' in i['input']:
state['mlperf_inference_run_cmd'] = "cm run script " + \
@@ -42,9 +42,9 @@ def preprocess(i):
state['mlperf-inference-implementation']['script_id'] = run_state['script_id'] + \
":" + ",".join(run_state['script_variation_tags'])
- if env.get('CM_VLLM_SERVER_MODEL_NAME', '') != '' and env.get(
- 'CM_ML_MODEL_FULL_NAME', '') == '':
- env['CM_ML_MODEL_FULL_NAME'] = env['CM_VLLM_SERVER_MODEL_NAME'].replace(
+ if env.get('MLC_VLLM_SERVER_MODEL_NAME', '') != '' and env.get(
+ 'MLC_ML_MODEL_FULL_NAME', '') == '':
+ env['MLC_ML_MODEL_FULL_NAME'] = env['MLC_VLLM_SERVER_MODEL_NAME'].replace(
"/", "_")
return {'return': 0}
@@ -61,14 +61,14 @@ def postprocess(i):
env['CMD'] = ''
state = i['state']
- # if env.get('CM_MLPERF_USER_CONF', '') == '':
+ # if env.get('MLC_MLPERF_USER_CONF', '') == '':
# return {'return': 0}
- output_dir = env['CM_MLPERF_OUTPUT_DIR']
+ output_dir = env['MLC_MLPERF_OUTPUT_DIR']
- result_sut_folder_path = env['CM_MLPERF_INFERENCE_RESULTS_SUT_PATH']
+ result_sut_folder_path = env['MLC_MLPERF_INFERENCE_RESULTS_SUT_PATH']
- mode = env['CM_MLPERF_LOADGEN_MODE']
+ mode = env['MLC_MLPERF_LOADGEN_MODE']
if not os.path.exists(output_dir) or not os.path.exists(
os.path.join(output_dir, "mlperf_log_summary.txt")):
@@ -76,62 +76,62 @@ def postprocess(i):
return {'return': 0}
# in power mode copy the log files from tmp_power directory
- if env.get('CM_MLPERF_POWER', '') == "yes" and mode == "performance":
+ if env.get('MLC_MLPERF_POWER', '') == "yes" and mode == "performance":
mlperf_power_logs_dir = os.path.join(
- env['CM_MLPERF_OUTPUT_DIR'], "..", "power")
+ env['MLC_MLPERF_OUTPUT_DIR'], "..", "power")
mlperf_ranging_logs_dir = os.path.join(
- env['CM_MLPERF_OUTPUT_DIR'], "..", "ranging")
+ env['MLC_MLPERF_OUTPUT_DIR'], "..", "ranging")
if os.path.exists(os.path.join(
- env['CM_MLPERF_POWER_LOG_DIR'], "power")):
+ env['MLC_MLPERF_POWER_LOG_DIR'], "power")):
if os.path.exists(mlperf_power_logs_dir):
shutil.rmtree(mlperf_power_logs_dir)
shutil.copytree(
os.path.join(
- env['CM_MLPERF_POWER_LOG_DIR'],
+ env['MLC_MLPERF_POWER_LOG_DIR'],
"power"),
mlperf_power_logs_dir)
if os.path.exists(os.path.join(
- env['CM_MLPERF_POWER_LOG_DIR'], "ranging")):
+ env['MLC_MLPERF_POWER_LOG_DIR'], "ranging")):
if os.path.exists(mlperf_ranging_logs_dir):
shutil.rmtree(mlperf_ranging_logs_dir)
shutil.copytree(
os.path.join(
- env['CM_MLPERF_POWER_LOG_DIR'],
+ env['MLC_MLPERF_POWER_LOG_DIR'],
"ranging"),
mlperf_ranging_logs_dir)
if os.path.exists(os.path.join(
- env['CM_MLPERF_POWER_LOG_DIR'], "run_1", "spl.txt")):
+ env['MLC_MLPERF_POWER_LOG_DIR'], "run_1", "spl.txt")):
shutil.copyfile(
os.path.join(
- env['CM_MLPERF_POWER_LOG_DIR'],
+ env['MLC_MLPERF_POWER_LOG_DIR'],
"run_1",
"spl.txt"),
os.path.join(
- env['CM_MLPERF_OUTPUT_DIR'],
+ env['MLC_MLPERF_OUTPUT_DIR'],
"spl.txt"))
- model = env['CM_MODEL']
- model_full_name = env.get('CM_ML_MODEL_FULL_NAME', model)
+ model = env['MLC_MODEL']
+ model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', model)
if mode == "accuracy" or mode == "compliance" and env[
- 'CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] == "TEST01":
+ 'MLC_MLPERF_LOADGEN_COMPLIANCE_TEST'] == "TEST01":
out_baseline_accuracy_string = f"""> {os.path.join(output_dir, "accuracy", "baseline_accuracy.txt")} """
out_compliance_accuracy_string = f"""> {os.path.join(output_dir, "accuracy", "compliance_accuracy.txt")} """
if model == "resnet50":
accuracy_filename = "accuracy-imagenet.py"
- accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools",
+ accuracy_filepath = os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools",
accuracy_filename)
dataset_args = " --imagenet-val-file " + \
- os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt")
+ os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt")
accuracy_log_file_option_name = " --mlperf-accuracy-file "
- datatype_option = " --dtype " + env['CM_IMAGENET_ACCURACY_DTYPE']
+ datatype_option = " --dtype " + env['MLC_IMAGENET_ACCURACY_DTYPE']
elif model == "retinanet":
accuracy_filename = "accuracy-openimages.py"
- accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools",
+ accuracy_filepath = os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools",
accuracy_filename)
dataset_args = " --openimages-dir " + \
os.getcwd() # just to make the script happy
@@ -141,20 +141,20 @@ def postprocess(i):
elif 'bert' in model:
accuracy_filename = "accuracy-squad.py"
accuracy_filepath = os.path.join(
- env['CM_MLPERF_INFERENCE_BERT_PATH'], accuracy_filename)
- dataset_args = " --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + "' --vocab_file '" + \
- env['CM_DATASET_SQUAD_VOCAB_PATH'] + \
+ env['MLC_MLPERF_INFERENCE_BERT_PATH'], accuracy_filename)
+ dataset_args = " --val_data '" + env['MLC_DATASET_SQUAD_VAL_PATH'] + "' --vocab_file '" + \
+ env['MLC_DATASET_SQUAD_VOCAB_PATH'] + \
"' --out_file predictions.json "
accuracy_log_file_option_name = " --log_file "
datatype_option = " --output_dtype " + \
- env['CM_SQUAD_ACCURACY_DTYPE']
+ env['MLC_SQUAD_ACCURACY_DTYPE']
elif 'rgat' in model:
accuracy_filename = "accuracy_igbh.py"
accuracy_filepath = os.path.join(
- env['CM_MLPERF_INFERENCE_RGAT_PATH'], "tools", accuracy_filename)
- dataset_args = " --dataset-path '" + env['CM_DATASET_IGBH_PATH'] + "' --dataset-size '" + \
- env['CM_DATASET_IGBH_SIZE'] + "'"
+ env['MLC_MLPERF_INFERENCE_RGAT_PATH'], "tools", accuracy_filename)
+ dataset_args = " --dataset-path '" + env['MLC_DATASET_IGBH_PATH'] + "' --dataset-size '" + \
+ env['MLC_DATASET_IGBH_SIZE'] + "'"
accuracy_log_file_option_name = " --mlperf-accuracy-file "
datatype_option = ""
out_baseline_accuracy_string = f""" --output-file {os.path.join(output_dir, "accuracy", "baseline_accuracy.txt")} """
@@ -172,24 +172,24 @@ def postprocess(i):
pass # Not giving an error now. But accuracy paths need to be done for other benchmarks which may need the non-determinism test
# return {'return': 1, 'error': f'Accuracy paths not done for model
# {model}'}
- scenario = env['CM_MLPERF_LOADGEN_SCENARIO']
+ scenario = env['MLC_MLPERF_LOADGEN_SCENARIO']
if not state.get('mlc-mlperf-inference-results'):
state['mlc-mlperf-inference-results'] = {}
if not state.get('mlc-mlperf-inference-results-last'):
state['mlc-mlperf-inference-results-last'] = {}
if not state['mlc-mlperf-inference-results'].get(
- state['CM_SUT_CONFIG_NAME']):
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']] = {}
- if not state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['MLC_SUT_CONFIG_NAME']):
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']] = {}
+ if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
].get(model):
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model] = {}
- if not state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']][model] = {}
+ if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model].get(scenario):
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model][scenario] = {}
- # if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes" and mode ==
+ # if env.get("MLC_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes" and mode ==
# "performance" and scenario != "Server":
if mode == "performance" and scenario != "Server":
os.chdir(output_dir)
@@ -224,9 +224,9 @@ def postprocess(i):
if "\\(ns\\)" in pattern[scenario]:
value = str(float(value) / 1000000) # convert to milliseconds
- sut_name = state['CM_SUT_CONFIG_NAME']
- sut_config = state['CM_SUT_CONFIG'][sut_name]
- sut_config_path = state['CM_SUT_CONFIG_PATH'][sut_name]
+ sut_name = state['MLC_SUT_CONFIG_NAME']
+ sut_config = state['MLC_SUT_CONFIG'][sut_name]
+ sut_config_path = state['MLC_SUT_CONFIG_PATH'][sut_name]
if scenario not in sut_config[model_full_name]:
sut_config[model_full_name][scenario] = {}
sut_config[model_full_name][scenario][metric] = value
@@ -245,20 +245,20 @@ def postprocess(i):
else:
measurements = {}
measurements['starting_weights_filename'] = env.get(
- 'CM_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get(
- 'CM_ML_MODEL_FILE', measurements.get(
+ 'MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get(
+ 'MLC_ML_MODEL_FILE', measurements.get(
'starting_weights_filename', '')))
measurements['retraining'] = env.get(
- 'CM_ML_MODEL_RETRAINING', measurements.get(
+ 'MLC_ML_MODEL_RETRAINING', measurements.get(
'retraining', 'no'))
measurements['input_data_types'] = env.get(
- 'CM_ML_MODEL_INPUTS_DATA_TYPE', measurements.get(
+ 'MLC_ML_MODEL_INPUTS_DATA_TYPE', measurements.get(
'input_data_types', 'fp32'))
measurements['weight_data_types'] = env.get(
- 'CM_ML_MODEL_WEIGHTS_DATA_TYPE', measurements.get(
+ 'MLC_ML_MODEL_WEIGHTS_DATA_TYPE', measurements.get(
'weight_data_types', 'fp32'))
measurements['weight_transformations'] = env.get(
- 'CM_ML_MODEL_WEIGHT_TRANSFORMATIONS', measurements.get(
+ 'MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS', measurements.get(
'weight_transformations', 'none'))
os.chdir(output_dir)
@@ -279,7 +279,7 @@ def postprocess(i):
state['app_mlperf_inference_log_summary'][y[0].strip().lower()
] = y[1].strip()
- if env.get("CM_MLPERF_PRINT_SUMMARY", "").lower() not in [
+ if env.get("MLC_MLPERF_PRINT_SUMMARY", "").lower() not in [
"no", "0", "false"]:
print("\n")
print(mlperf_log_summary)
@@ -288,15 +288,15 @@ def postprocess(i):
json.dump(measurements, fp, indent=2)
cm_sut_info = {}
- cm_sut_info['system_name'] = state['CM_SUT_META']['system_name']
- cm_sut_info['implementation'] = env['CM_MLPERF_IMPLEMENTATION']
- cm_sut_info['device'] = env['CM_MLPERF_DEVICE']
- cm_sut_info['framework'] = state['CM_SUT_META']['framework']
- cm_sut_info['run_config'] = env['CM_MLPERF_INFERENCE_SUT_RUN_CONFIG']
- with open(os.path.join(result_sut_folder_path, "cm-sut-info.json"), "w") as fp:
+ cm_sut_info['system_name'] = state['MLC_SUT_META']['system_name']
+ cm_sut_info['implementation'] = env['MLC_MLPERF_IMPLEMENTATION']
+ cm_sut_info['device'] = env['MLC_MLPERF_DEVICE']
+ cm_sut_info['framework'] = state['MLC_SUT_META']['framework']
+ cm_sut_info['run_config'] = env['MLC_MLPERF_INFERENCE_SUT_RUN_CONFIG']
+ with open(os.path.join(result_sut_folder_path, "mlc-sut-info.json"), "w") as fp:
json.dump(cm_sut_info, fp, indent=2)
- system_meta = state['CM_SUT_META']
+ system_meta = state['MLC_SUT_META']
with open("system_meta.json", "w") as fp:
json.dump(system_meta, fp, indent=2)
@@ -312,14 +312,14 @@ def postprocess(i):
state['app_mlperf_inference_measurements'] = copy.deepcopy(
measurements)
- if os.path.exists(env['CM_MLPERF_CONF']):
- shutil.copy(env['CM_MLPERF_CONF'], 'mlperf.conf')
+ if os.path.exists(env['MLC_MLPERF_CONF']):
+ shutil.copy(env['MLC_MLPERF_CONF'], 'mlperf.conf')
- if os.path.exists(env['CM_MLPERF_USER_CONF']):
- shutil.copy(env['CM_MLPERF_USER_CONF'], 'user.conf')
+ if os.path.exists(env['MLC_MLPERF_USER_CONF']):
+ shutil.copy(env['MLC_MLPERF_USER_CONF'], 'user.conf')
result, valid, power_result = mlperf_utils.get_result_from_log(
- env['CM_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION'))
+ env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('MLC_MLPERF_INFERENCE_SOURCE_VERSION'))
power = None
power_efficiency = None
if power_result:
@@ -328,9 +328,9 @@ def postprocess(i):
power = power_result_split[0]
power_efficiency = power_result_split[1]
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model][scenario][mode] = result
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model][scenario][mode + '_valid'] = valid.get(mode, False)
state['mlc-mlperf-inference-results-last'][mode] = result
@@ -338,14 +338,14 @@ def postprocess(i):
'_valid'] = valid.get(mode, False)
if power:
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model][scenario]['power'] = power
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model][scenario]['power_valid'] = valid['power']
state['mlc-mlperf-inference-results-last']['power'] = power
state['mlc-mlperf-inference-results-last']['power_valid'] = valid['power']
if power_efficiency:
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model][scenario]['power_efficiency'] = power_efficiency
state['mlc-mlperf-inference-results-last']['power_efficiency'] = power_efficiency
@@ -358,15 +358,15 @@ def postprocess(i):
}
x = ''
- if env.get('CM_HOST_OS_FLAVOR', '') != '':
- x += env['CM_HOST_OS_FLAVOR']
- if env.get('CM_HOST_OS_VERSION', '') != '':
- x += ' ' + env['CM_HOST_OS_VERSION']
+ if env.get('MLC_HOST_OS_FLAVOR', '') != '':
+ x += env['MLC_HOST_OS_FLAVOR']
+ if env.get('MLC_HOST_OS_VERSION', '') != '':
+ x += ' ' + env['MLC_HOST_OS_VERSION']
if x != '':
host_info['os_version_sys'] = x
- if env.get('CM_HOST_SYSTEM_NAME', '') != '':
- host_info['system_name'] = env['CM_HOST_SYSTEM_NAME']
+ if env.get('MLC_HOST_SYSTEM_NAME', '') != '':
+ host_info['system_name'] = env['MLC_HOST_SYSTEM_NAME']
# Check CM automation repository
repo_name = 'mlcommons@mlperf-automations'
@@ -471,22 +471,22 @@ def postprocess(i):
elif mode == "compliance":
- test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01")
+ test = env.get("MLC_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01")
RESULT_DIR = os.path.split(output_dir)[0]
COMPLIANCE_DIR = output_dir
OUTPUT_DIR = os.path.dirname(COMPLIANCE_DIR)
SCRIPT_PATH = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'],
+ env['MLC_MLPERF_INFERENCE_SOURCE'],
"compliance",
"nvidia",
test,
"run_verification.py")
if test == "TEST06":
- cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32"
+ cmd = f"{env['MLC_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32"
else:
- cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -r {RESULT_DIR} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR}"
+ cmd = f"{env['MLC_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -r {RESULT_DIR} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR}"
print(cmd)
os.system(cmd)
@@ -496,7 +496,7 @@ def postprocess(i):
run_script_input = i['run_script_input']
automation = i['automation']
- SCRIPT_PATH = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test,
+ SCRIPT_PATH = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test,
"create_accuracy_baseline.sh")
TEST01_DIR = os.path.join(OUTPUT_DIR, "TEST01")
OUTPUT_DIR = os.path.join(OUTPUT_DIR, "TEST01", "accuracy")
@@ -529,7 +529,7 @@ def postprocess(i):
baseline_accuracy_file = os.path.join(
TEST01_DIR, "mlperf_log_accuracy_baseline.json")
- CMD = "cd " + ACCURACY_DIR + " && " + env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \
+ CMD = "cd " + ACCURACY_DIR + " && " + env['MLC_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \
baseline_accuracy_file + ' ' + dataset_args + \
datatype_option + out_baseline_accuracy_string
@@ -543,7 +543,7 @@ def postprocess(i):
return {'return': 1,
'error': f"{baseline_accuracy_file} is empty"}
- CMD = "cd " + ACCURACY_DIR + " && " + env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \
+ CMD = "cd " + ACCURACY_DIR + " && " + env['MLC_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \
os.path.join(TEST01_DIR, "mlperf_log_accuracy.json") + \
dataset_args + datatype_option + out_compliance_accuracy_string
@@ -555,17 +555,17 @@ def postprocess(i):
import submission_checker as checker
is_valid = checker.check_compliance_perf_dir(
COMPLIANCE_DIR) if test != "TEST06" else True
- state['mlc-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']
+ state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
][model][scenario][test] = "passed" if is_valid else "failed"
# portion of the code where the avg utilisation and system informations are extracted
# NOTE: The section is under development and print statements are added
# for further debugging
- if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on":
+ if env.get('MLC_PROFILE_NVIDIA_POWER', '') == "on":
import pandas as pd
system_utilisation_info_dump = {}
logs_dir = output_dir
- # logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR'])
+ # logs_dir = env.get('MLC_LOGS_DIR', env['MLC_RUN_DIR'])
sys_utilisation_log = pd.read_csv(
os.path.join(
logs_dir,
@@ -609,11 +609,11 @@ def postprocess(i):
if state.get(
'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'):
- env['CM_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join(
+ env['MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join(
output_dir, "mlc-version-info.json")
- env['CM_MLPERF_RUN_DEPS_GRAPH'] = os.path.join(
+ env['MLC_MLPERF_RUN_DEPS_GRAPH'] = os.path.join(
output_dir, "mlc-deps.png")
- env['CM_MLPERF_RUN_DEPS_MERMAID'] = os.path.join(
+ env['MLC_MLPERF_RUN_DEPS_MERMAID'] = os.path.join(
output_dir, "mlc-deps.mmd")
with open(os.path.join(output_dir, "mlc-version-info.json"), "w") as f:
f.write(
@@ -621,7 +621,7 @@ def postprocess(i):
state['mlperf-inference-implementation']['version_info'],
indent=2))
- if env.get('CM_DUMP_SYSTEM_INFO', True):
+ if env.get('MLC_DUMP_SYSTEM_INFO', True):
dump_script_output(
"detect,os",
env,
@@ -638,8 +638,8 @@ def postprocess(i):
os.path.join(
output_dir,
"cpu_info.json"))
- env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join(
- env['CM_MLPERF_OUTPUT_DIR'], "pip_freeze.raw")
+ env['MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join(
+ env['MLC_MLPERF_OUTPUT_DIR'], "pip_freeze.raw")
dump_script_output(
"dump,pip,freeze",
env,
diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml
index bf5057814..305535b49 100644
--- a/script/app-mlperf-inference/meta.yaml
+++ b/script/app-mlperf-inference/meta.yaml
@@ -21,62 +21,62 @@ tags:
# Default environment
default_env:
- CM_MLPERF_LOADGEN_MODE: accuracy
- CM_MLPERF_LOADGEN_SCENARIO: Offline
- CM_OUTPUT_FOLDER_NAME: test_results
- CM_MLPERF_RUN_STYLE: test
- CM_TEST_QUERY_COUNT: '10'
- CM_MLPERF_QUANTIZATION: off
- CM_GET_PLATFORM_DETAILS: yes
+ MLC_MLPERF_LOADGEN_MODE: accuracy
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_OUTPUT_FOLDER_NAME: test_results
+ MLC_MLPERF_RUN_STYLE: test
+ MLC_TEST_QUERY_COUNT: '10'
+ MLC_MLPERF_QUANTIZATION: off
+ MLC_GET_PLATFORM_DETAILS: yes
env:
- CM_MLPERF_PRINT_SUMMARY: "no"
- CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'no'
+ MLC_MLPERF_PRINT_SUMMARY: "no"
+ MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'no'
# Map script inputs to environment variables
input_mapping:
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- docker: CM_RUN_DOCKER_CONTAINER
- hw_name: CM_HW_NAME
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ docker: MLC_RUN_DOCKER_CONTAINER
+ hw_name: MLC_HW_NAME
imagenet_path: IMAGENET_PATH
- max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
- mode: CM_MLPERF_LOADGEN_MODE
- num_threads: CM_NUM_THREADS
+ max_batchsize: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mode: MLC_MLPERF_LOADGEN_MODE
+ num_threads: MLC_NUM_THREADS
output_dir: OUTPUT_BASE_DIR
- power: CM_MLPERF_POWER
- power_server: CM_MLPERF_POWER_SERVER_ADDRESS
- ntp_server: CM_MLPERF_POWER_NTP_SERVER
- max_amps: CM_MLPERF_POWER_MAX_AMPS
- max_volts: CM_MLPERF_POWER_MAX_VOLTS
- regenerate_files: CM_REGENERATE_MEASURE_FILES
- rerun: CM_RERUN
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- test_query_count: CM_TEST_QUERY_COUNT
- clean: CM_MLPERF_CLEAN_SUBMISSION_DIR
- target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
- target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
- offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
- server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
- singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
- multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
- readme: CM_MLPERF_README
- debug: CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM
- gpu_name: CM_NVIDIA_GPU_NAME
- nvidia_llama2_dataset_file_path: CM_NVIDIA_LLAMA_DATASET_FILE_PATH
- tp_size: CM_NVIDIA_TP_SIZE
- use_dataset_from_host: CM_USE_DATASET_FROM_HOST
+ power: MLC_MLPERF_POWER
+ power_server: MLC_MLPERF_POWER_SERVER_ADDRESS
+ ntp_server: MLC_MLPERF_POWER_NTP_SERVER
+ max_amps: MLC_MLPERF_POWER_MAX_AMPS
+ max_volts: MLC_MLPERF_POWER_MAX_VOLTS
+ regenerate_files: MLC_REGENERATE_MEASURE_FILES
+ rerun: MLC_RERUN
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ test_query_count: MLC_TEST_QUERY_COUNT
+ clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR
+ target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS
+ target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
+ offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ readme: MLC_MLPERF_README
+ debug: MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM
+ gpu_name: MLC_NVIDIA_GPU_NAME
+ nvidia_llama2_dataset_file_path: MLC_NVIDIA_LLAMA_DATASET_FILE_PATH
+ tp_size: MLC_NVIDIA_TP_SIZE
+ use_dataset_from_host: MLC_USE_DATASET_FROM_HOST
predeps: False
# Duplicate CM environment variables to the ones used in native apps
env_key_mappings:
- CM_HOST_: HOST_
- CM_ML_: ML_
- CM_MLPERF_TVM: MLPERF_TVM
+ MLC_HOST_: HOST_
+ MLC_ML_: ML_
+ MLC_MLPERF_TVM: MLPERF_TVM
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
+ - MLC_MLPERF_*
new_state_keys:
- app_mlperf_inference_*
@@ -107,38 +107,38 @@ deps:
- inference-src
- tags: pull,git,repo
env:
- CM_GIT_CHECKOUT_PATH: '<<>>'
+ MLC_GIT_CHECKOUT_PATH: '<<>>'
enable_if_env:
- CM_MLPERF_INFERENCE_PULL_SRC_CHANGES:
+ MLC_MLPERF_INFERENCE_PULL_SRC_CHANGES:
- 'yes'
- tags: get,mlperf,inference,utils
- tags: install,pip-package,for-cmind-python,_package.pandas
enable_if_env:
- CM_PROFILE_NVIDIA_POWER:
+ MLC_PROFILE_NVIDIA_POWER:
- on
posthook_deps:
- tags: get,mlperf,sut,description #populate system meta information like framework
- tags: get,platform,details
enable_if_any_env:
- CM_GET_PLATFORM_DETAILS:
+ MLC_GET_PLATFORM_DETAILS:
- yes
skip_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
env:
- CM_PLATFORM_DETAILS_FILE_PATH: '<<>>/system_info.txt'
+ MLC_PLATFORM_DETAILS_FILE_PATH: '<<>>/system_info.txt'
post_deps:
- tags: draw,graph,from-json
enable_if_env:
- CM_MLPERF_RUN_JSON_VERSION_INFO_FILE:
+ MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE:
- on
env:
- CM_JSON_INPUT_FILE: <<>>
- CM_OUTPUT_IMAGE_PATH: <<>>
- CM_OUTPUT_MERMAID_PATH: <<>>
+ MLC_JSON_INPUT_FILE: <<>>
+ MLC_OUTPUT_IMAGE_PATH: <<>>
+ MLC_OUTPUT_MERMAID_PATH: <<>>
# Order of variations for documentation
variation_groups_order:
@@ -160,17 +160,17 @@ variations:
imagenet-accuracy-script:
tags: _int64
env:
- CM_MLPERF_CPP: 'yes'
- CM_MLPERF_IMPLEMENTATION: mlcommons_cpp
- CM_IMAGENET_ACCURACY_DTYPE: float32
- CM_OPENIMAGES_ACCURACY_DTYPE: float32
+ MLC_MLPERF_CPP: 'yes'
+ MLC_MLPERF_IMPLEMENTATION: mlcommons_cpp
+ MLC_IMAGENET_ACCURACY_DTYPE: float32
+ MLC_OPENIMAGES_ACCURACY_DTYPE: float32
prehook_deps:
- names:
- cpp-mlperf-inference
- mlperf-inference-implementation
tags: app,mlperf,cpp,inference
skip_if_env:
- CM_SKIP_RUN:
+ MLC_SKIP_RUN:
- yes
mil:
@@ -192,17 +192,17 @@ variations:
imagenet-accuracy-script:
tags: _float32
env:
- CM_MLPERF_TFLITE_CPP: 'yes'
- CM_MLPERF_CPP: 'yes'
- CM_MLPERF_IMPLEMENTATION: ctuning_cpp_tflite
- CM_IMAGENET_ACCURACY_DTYPE: float32
+ MLC_MLPERF_TFLITE_CPP: 'yes'
+ MLC_MLPERF_CPP: 'yes'
+ MLC_MLPERF_IMPLEMENTATION: ctuning_cpp_tflite
+ MLC_IMAGENET_ACCURACY_DTYPE: float32
prehook_deps:
- names:
- tflite-cpp-mlperf-inference
- mlperf-inference-implementation
tags: app,mlperf,tflite-cpp,inference
skip_if_env:
- CM_SKIP_RUN:
+ MLC_SKIP_RUN:
- yes
reference:
@@ -224,20 +224,20 @@ variations:
llama3_1-405b-accuracy-script:
tags: _int32
env:
- CM_MLPERF_PYTHON: 'yes'
- CM_MLPERF_IMPLEMENTATION: mlcommons_python
- CM_SQUAD_ACCURACY_DTYPE: float32
- CM_IMAGENET_ACCURACY_DTYPE: float32
- CM_OPENIMAGES_ACCURACY_DTYPE: float32
- CM_LIBRISPEECH_ACCURACY_DTYPE: float32
- CM_CNNDM_ACCURACY_DTYPE: int32
+ MLC_MLPERF_PYTHON: 'yes'
+ MLC_MLPERF_IMPLEMENTATION: mlcommons_python
+ MLC_SQUAD_ACCURACY_DTYPE: float32
+ MLC_IMAGENET_ACCURACY_DTYPE: float32
+ MLC_OPENIMAGES_ACCURACY_DTYPE: float32
+ MLC_LIBRISPEECH_ACCURACY_DTYPE: float32
+ MLC_CNNDM_ACCURACY_DTYPE: int32
prehook_deps:
- names:
- python-reference-mlperf-inference
- mlperf-inference-implementation
tags: app,mlperf,reference,inference
skip_if_env:
- CM_SKIP_RUN:
+ MLC_SKIP_RUN:
- yes
neuralmagic:
@@ -299,36 +299,36 @@ variations:
deps:
- tags: get,ml-model,gptj,raw
skip_if_env:
- CM_MLPERF_NVIDIA_SKIP_GPTJ:
+ MLC_MLPERF_NVIDIA_SKIP_GPTJ:
- "yes"
- tags: get,ml-model,gptj,_nvidia,_fp8
skip_if_env:
- CM_MLPERF_NVIDIA_SKIP_GPTJ:
+ MLC_MLPERF_NVIDIA_SKIP_GPTJ:
- "yes"
- tags: get,ml-model,llama2-70b,_nvidia,_fp8
update_tags_from_env_with_prefix:
_tp-size.:
- - CM_NVIDIA_TP_SIZE
+ - MLC_NVIDIA_TP_SIZE
skip_if_env:
- CM_MLPERF_NVIDIA_SKIP_LLAMA2_70B:
+ MLC_MLPERF_NVIDIA_SKIP_LLAMA2_70B:
- "yes"
- tags: get,dataset,imagenet,validation,original,_full
names:
- imagenet-original
- dataset-original
skip_if_env:
- CM_MLPERF_NVIDIA_SKIP_RESNET50:
+ MLC_MLPERF_NVIDIA_SKIP_RESNET50:
- "yes"
- tags: get,dlrm,data,mlperf,inference,_nvidia
skip_if_env:
- CM_MLPERF_NVIDIA_SKIP_DLRM:
+ MLC_MLPERF_NVIDIA_SKIP_DLRM:
- "yes"
- enable_if_env:
- CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
- 'yes'
tags: get,ml-model,sdxl,_fp16,_rclone
skip_if_env:
- CM_MLPERF_NVIDIA_SKIP_SDXL:
+ MLC_MLPERF_NVIDIA_SKIP_SDXL:
- "yes"
env:
BUILD_TRTLLM: 1
@@ -340,20 +340,20 @@ variations:
image_name: mlperf-inference-nvidia-v4.1-dev-common
update_meta_if_env:
- enable_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
docker:
base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v4.0-cuda12.2-cudnn8.9-x86_64-ubuntu20.04-public
env:
- CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl'
+ MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl'
- skip_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
docker:
base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v4.1-cuda12.4-pytorch24.04-ubuntu22.04-aarch64-GraceHopper-release
env:
- CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp310-cp310-linux_aarch64.whl'
+ MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp310-cp310-linux_aarch64.whl'
@@ -376,7 +376,7 @@ variations:
- tags: get,ml-model,gptj,_nvidia,_fp8
update_tags_from_env_with_prefix:
_tp-size.:
- - CM_NVIDIA_TP_SIZE
+ - MLC_NVIDIA_TP_SIZE
nvidia-original,r4.1_default:
docker:
@@ -388,7 +388,7 @@ variations:
- tags: get,ml-model,gptj,_nvidia,_fp8
update_tags_from_env_with_prefix:
_tp-size.:
- - CM_NVIDIA_TP_SIZE
+ - MLC_NVIDIA_TP_SIZE
nvidia-original,r4.1-dev_default,llama2-70b_:
@@ -398,7 +398,7 @@ variations:
- tags: get,ml-model,llama2-70b,_nvidia,_fp8
update_tags_from_env_with_prefix:
_tp-size.:
- - CM_NVIDIA_TP_SIZE
+ - MLC_NVIDIA_TP_SIZE
env:
BUILD_TRTLLM: 1
@@ -408,7 +408,7 @@ variations:
- tags: get,ml-model,llama2-70b,_nvidia,_fp8
update_tags_from_env_with_prefix:
_tp-size.:
- - CM_NVIDIA_TP_SIZE
+ - MLC_NVIDIA_TP_SIZE
env:
BUILD_TRTLLM: 1
@@ -425,19 +425,19 @@ variations:
- mlperf-inference-nvidia-scratch-space
- tags: get,nvidia-docker
skip_if_env:
- CM_SKIP_GET_NVIDIA_DOCKER:
+ MLC_SKIP_GET_NVIDIA_DOCKER:
- yes
mounts:
- - "${{ CM_CUDNN_TAR_FILE_PATH }}:${{ CM_CUDNN_TAR_FILE_PATH }}"
- - "${{ CM_TENSORRT_TAR_FILE_PATH }}:${{ CM_TENSORRT_TAR_FILE_PATH }}"
+ - "${{ MLC_CUDNN_TAR_FILE_PATH }}:${{ MLC_CUDNN_TAR_FILE_PATH }}"
+ - "${{ MLC_TENSORRT_TAR_FILE_PATH }}:${{ MLC_TENSORRT_TAR_FILE_PATH }}"
- "${{ CUDA_RUN_FILE_LOCAL_PATH }}:${{ CUDA_RUN_FILE_LOCAL_PATH }}"
- "${{ MLPERF_SCRATCH_PATH }}:${{ MLPERF_SCRATCH_PATH }}"
update_meta_if_env:
- enable_if_env:
- CM_HOST_OS_FLAVOR:
+ MLC_HOST_OS_FLAVOR:
- ubuntu
- CM_HOST_OS_VERSION:
+ MLC_HOST_OS_VERSION:
- 20.04
docker:
extra_run_args: ' --runtime=nvidia --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined'
@@ -459,12 +459,12 @@ variations:
tags: _int32
env:
BUILD_TRTLLM: 0
- CM_MLPERF_IMPLEMENTATION: nvidia
- CM_SQUAD_ACCURACY_DTYPE: float16
- CM_IMAGENET_ACCURACY_DTYPE: int32
- CM_CNNDM_ACCURACY_DTYPE: int32
- CM_LIBRISPEECH_ACCURACY_DTYPE: int8
- CM_DOCKER_USE_VIRTUAL_PYTHON: no
+ MLC_MLPERF_IMPLEMENTATION: nvidia
+ MLC_SQUAD_ACCURACY_DTYPE: float16
+ MLC_IMAGENET_ACCURACY_DTYPE: int32
+ MLC_CNNDM_ACCURACY_DTYPE: int32
+ MLC_LIBRISPEECH_ACCURACY_DTYPE: int8
+ MLC_DOCKER_USE_VIRTUAL_PYTHON: no
prehook_deps:
- names:
- nvidia-original-mlperf-inference
@@ -472,13 +472,13 @@ variations:
- mlperf-inference-implementation
tags: reproduce,mlperf,nvidia,inference,_run_harness
skip_if_env:
- CM_SKIP_RUN:
+ MLC_SKIP_RUN:
- yes
update_tags_from_env_with_prefix:
"_gpu_memory." :
- - CM_NVIDIA_GPU_MEMORY
+ - MLC_NVIDIA_GPU_MEMORY
update_tags_from_env:
- - CM_NVIDIA_HARNESS_GPU_VARIATION
+ - MLC_NVIDIA_HARNESS_GPU_VARIATION
intel:
alias: intel-original
@@ -490,7 +490,7 @@ variations:
interactive: True
extra_run_args: ' --privileged'
mounts:
- - "${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}:${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}"
+ - "${{ MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}:${{ MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}"
- "${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }}"
skip_run_cmd: 'no'
shm_size: '32gb'
@@ -500,7 +500,7 @@ variations:
docker_input_mapping:
criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH
dlrm_data_path: DLRM_DATA_PATH
- intel_gptj_int8_model_path: CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH
+ intel_gptj_int8_model_path: MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH
default_variations:
device: cpu
backend: pytorch
@@ -512,10 +512,10 @@ variations:
- mlperf-inference-implementation
tags: reproduce,mlperf,inference,intel
skip_if_env:
- CM_SKIP_RUN:
+ MLC_SKIP_RUN:
- yes
env:
- CM_MLPERF_IMPLEMENTATION: intel
+ MLC_MLPERF_IMPLEMENTATION: intel
intel-original,gptj_:
adr:
@@ -552,10 +552,10 @@ variations:
- mlperf-inference-implementation
tags: reproduce,mlperf,inference,amd
skip_if_env:
- CM_SKIP_RUN:
+ MLC_SKIP_RUN:
- yes
env:
- CM_MLPERF_IMPLEMENTATION: amd
+ MLC_MLPERF_IMPLEMENTATION: amd
redhat:
group:
@@ -571,10 +571,10 @@ variations:
- mlperf-inference-implementation
tags: reproduce,mlperf,inference,redhat
skip_if_env:
- CM_SKIP_RUN:
+ MLC_SKIP_RUN:
- yes
env:
- CM_MLPERF_IMPLEMENTATION: redhat
+ MLC_MLPERF_IMPLEMENTATION: redhat
docker:
interactive: True
@@ -595,10 +595,10 @@ variations:
- mlperf-inference-implementation
tags: reproduce,mlperf,inference,kilt
skip_if_env:
- CM_SKIP_RUN:
+ MLC_SKIP_RUN:
- yes
env:
- CM_MLPERF_IMPLEMENTATION: qualcomm
+ MLC_MLPERF_IMPLEMENTATION: qualcomm
docker:
interactive: True
@@ -651,7 +651,7 @@ variations:
default:
true
env:
- CM_MODEL:
+ MLC_MODEL:
resnet50
deps:
- tags: get,dataset-aux,imagenet-aux
@@ -660,10 +660,10 @@ variations:
tags: _resnet50
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
names:
- mlperf-accuracy-script
@@ -673,7 +673,7 @@ variations:
deps:
- tags: get,dataset,imagenet,validation,original,_full
enable_if_env:
- CM_USE_DATASET_FROM_HOST:
+ MLC_USE_DATASET_FROM_HOST:
- 'yes'
names:
- imagenet-original
@@ -683,17 +683,17 @@ variations:
group:
model
env:
- CM_MODEL:
+ MLC_MODEL:
retinanet
add_deps_recursive:
mlperf-inference-implementation:
tags: _retinanet
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
names:
- mlperf-accuracy-script
@@ -705,13 +705,13 @@ variations:
- names:
- openimages-original
enable_if_env:
- CM_USE_DATASET_FROM_HOST:
+ MLC_USE_DATASET_FROM_HOST:
- 'yes'
tags: get,dataset,original,openimages,_validation,_full,_custom-annotations
- names:
- openimages-calibration
enable_if_env:
- CM_USE_DATASET_FROM_HOST:
+ MLC_USE_DATASET_FROM_HOST:
- 'yes'
tags: get,dataset,original,openimages,_calibration
@@ -721,7 +721,7 @@ variations:
base:
- 3d-unet_
env:
- CM_MODEL:
+ MLC_MODEL:
3d-unet-99
add_deps_recursive:
mlperf-inference-implementation:
@@ -733,7 +733,7 @@ variations:
base:
- 3d-unet_
env:
- CM_MODEL:
+ MLC_MODEL:
3d-unet-99.9
add_deps_recursive:
mlperf-inference-implementation:
@@ -741,18 +741,18 @@ variations:
3d-unet_:
default_env:
- CM_MLPERF_INFERENCE_TEST_QPS: "0.01"
+ MLC_MLPERF_INFERENCE_TEST_QPS: "0.01"
env:
- CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes'
+ MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes'
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
skip_if_env:
- CM_MLPERF_IMPLEMENTATION:
+ MLC_MLPERF_IMPLEMENTATION:
- nvidia
names:
- mlperf-accuracy-script
@@ -764,7 +764,7 @@ variations:
image_name: mlperf-inference-mlcommons-python-implementation-3d-unet
deps:
- enable_if_env:
- CM_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST:
- 'yes'
tags: get,dataset,kits19,preprocessed
@@ -775,17 +775,17 @@ variations:
mlperf-inference-implementation:
tags: _rgat
env:
- CM_MODEL:
+ MLC_MODEL:
rgat
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
skip_if_env:
- CM_MLPERF_IMPLEMENTATION:
+ MLC_MLPERF_IMPLEMENTATION:
- nvidia
names:
- mlperf-accuracy-script
@@ -795,7 +795,7 @@ variations:
deps:
- tags: get,dataset,igbh
enable_if_env:
- CM_USE_DATASET_FROM_HOST:
+ MLC_USE_DATASET_FROM_HOST:
- 'yes'
names:
- igbh-original
@@ -808,17 +808,17 @@ variations:
mlperf-inference-implementation:
tags: _llama3_1-405b
env:
- CM_MODEL:
+ MLC_MODEL:
llama3_1-405b
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
skip_if_env:
- CM_MLPERF_IMPLEMENTATION:
+ MLC_MLPERF_IMPLEMENTATION:
- nvidia
names:
- mlperf-accuracy-script
@@ -828,7 +828,7 @@ variations:
deps:
- tags: get,ml-model,llama3
enable_if_env:
- CM_USE_DATASET_FROM_HOST:
+ MLC_USE_DATASET_FROM_HOST:
- 'yes'
names:
- llama3_1-405b
@@ -839,8 +839,8 @@ variations:
group:
model
env:
- CM_MODEL: stable-diffusion-xl
- CM_MLPERF_INFERENCE_TEST_QPS: "0.05"
+ MLC_MODEL: stable-diffusion-xl
+ MLC_MLPERF_INFERENCE_TEST_QPS: "0.05"
default_variations:
precision: float32
add_deps_recursive:
@@ -848,10 +848,10 @@ variations:
tags: _sdxl
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
names:
- mlperf-accuracy-script
@@ -862,9 +862,9 @@ variations:
docker:
deps:
- enable_if_any_env:
- CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
- 'yes'
- CM_USE_MODEL_FROM_HOST:
+ MLC_USE_MODEL_FROM_HOST:
- 'yes'
tags: get,ml-model,sdxl,_fp16,_rclone
@@ -873,9 +873,9 @@ variations:
image_name: mlperf-inference-mlcommons-python-implementation-sdxl-float16
deps:
- enable_if_any_env:
- CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
- 'yes'
- CM_USE_MODEL_FROM_HOST:
+ MLC_USE_MODEL_FROM_HOST:
- 'yes'
tags: get,ml-model,sdxl,_fp16,_rclone
@@ -884,9 +884,9 @@ variations:
image_name: mlperf-inference-mlcommons-python-implementation-sdxl-bfloat16
deps:
- enable_if_any_env:
- CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
- 'yes'
- CM_USE_MODEL_FROM_HOST:
+ MLC_USE_MODEL_FROM_HOST:
- 'yes'
tags: get,ml-model,sdxl,_fp16,_rclone
@@ -895,26 +895,26 @@ variations:
image_name: mlperf-inference-mlcommons-python-implementation-sdxl-float32
deps:
- enable_if_any_env:
- CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST:
- 'yes'
- CM_USE_MODEL_FROM_HOST:
+ MLC_USE_MODEL_FROM_HOST:
- 'yes'
tags: get,ml-model,sdxl,_fp32,_rclone
llama2-70b_:
default_env:
- CM_MLPERF_INFERENCE_TEST_QPS: "0.01"
+ MLC_MLPERF_INFERENCE_TEST_QPS: "0.01"
env:
- CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes'
+ MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes'
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
skip_if_env:
- CM_MLPERF_IMPLEMENTATION:
+ MLC_MLPERF_IMPLEMENTATION:
- nvidia
names:
- mlperf-accuracy-script
@@ -927,7 +927,7 @@ variations:
base:
- llama2-70b_
env:
- CM_MODEL:
+ MLC_MODEL:
llama2-70b-99
add_deps_recursive:
mlperf-inference-implementation:
@@ -939,7 +939,7 @@ variations:
base:
- llama2-70b_
env:
- CM_MODEL:
+ MLC_MODEL:
llama2-70b-99.9
add_deps_recursive:
mlperf-inference-implementation:
@@ -950,9 +950,9 @@ variations:
image_name: mlperf-inference-mlcommons-python-implementation-llama2-70b
deps:
- enable_if_any_env:
- CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST:
- 'yes'
- CM_USE_MODEL_FROM_HOST:
+ MLC_USE_MODEL_FROM_HOST:
- 'yes'
tags: get,ml-model,llama2
@@ -960,12 +960,12 @@ variations:
docker:
image_name: mlperf-inference-amd-python-implementation-llama2-70b
mounts:
- - "${{ CM_LLAMA2_FINAL_SAFE_TENSORS_PATH }}:${{ CM_LLAMA2_FINAL_SAFE_TENSORS_PATH }"
+ - "${{ MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH }}:${{ MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH }"
deps:
- enable_if_any_env:
- CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST:
- 'yes'
- CM_USE_MODEL_FROM_HOST:
+ MLC_USE_MODEL_FROM_HOST:
- 'yes'
tags: get,ml-model,llama2,_amd,_pytorch
@@ -975,22 +975,22 @@ variations:
base:
- mixtral-8x7b
env:
- CM_MODEL:
+ MLC_MODEL:
mixtral-8x7b
add_deps_recursive:
mlperf-inference-implementation:
tags: _mixtral-8x7b
env:
- CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes'
+ MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes'
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
skip_if_env:
- CM_MLPERF_IMPLEMENTATION:
+ MLC_MLPERF_IMPLEMENTATION:
- nvidia
names:
- mlperf-accuracy-script
@@ -1006,38 +1006,38 @@ variations:
- ml-model
- mixtral-model
enable_if_any_env:
- CM_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST:
- 'yes'
- CM_USE_MODEL_FROM_HOST:
+ MLC_USE_MODEL_FROM_HOST:
- 'yes'
- tags: get,dataset-mixtral,openorca-mbxp-gsm8k-combined
names:
- openorca-mbxp-gsm8k-combined-preprocessed
enable_if_env:
- CM_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST:
+ MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST:
- 'yes'
mounts:
- "${{ MIXTRAL_CHECKPOINT_PATH }}:${{ MIXTRAL_CHECKPOINT_PATH }}"
- - "${{ CM_DATASET_MIXTRAL_PREPROCESSED_PATH }}:${{ CM_DATASET_MIXTRAL_PREPROCESSED_PATH }}"
+ - "${{ MLC_DATASET_MIXTRAL_PREPROCESSED_PATH }}:${{ MLC_DATASET_MIXTRAL_PREPROCESSED_PATH }}"
rnnt:
group:
model
env:
- CM_MODEL:
+ MLC_MODEL:
rnnt
add_deps_recursive:
mlperf-inference-implementation:
tags: _rnnt
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
skip_if_env:
- CM_MLPERF_IMPLEMENTATION:
+ MLC_MLPERF_IMPLEMENTATION:
- nvidia
names:
- mlperf-accuracy-script
@@ -1046,7 +1046,7 @@ variations:
rnnt,reference:
env:
- CM_MLPERF_PRINT_SUMMARY: "no"
+ MLC_MLPERF_PRINT_SUMMARY: "no"
gptj-99:
group:
@@ -1054,7 +1054,7 @@ variations:
base:
- gptj_
env:
- CM_MODEL:
+ MLC_MODEL:
gptj-99
add_deps_recursive:
mlperf-inference-implementation:
@@ -1066,7 +1066,7 @@ variations:
base:
- gptj_
env:
- CM_MODEL:
+ MLC_MODEL:
gptj-99.9
add_deps_recursive:
mlperf-inference-implementation:
@@ -1080,13 +1080,13 @@ variations:
deps:
- tags: get,ml-model,gptj,raw
env:
- CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes'
+ MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes'
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
names:
- cnndm-accuracy-script
@@ -1096,17 +1096,17 @@ variations:
bert_:
deps:
- skip_if_env:
- CM_DATASET_SQUAD_VAL_PATH: "on"
+ MLC_DATASET_SQUAD_VAL_PATH: "on"
tags: get,dataset,squad,language-processing
- skip_if_env:
- CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: "on"
+ MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: "on"
tags: get,dataset-aux,squad-vocab
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
names:
- squad-accuracy-script
@@ -1122,7 +1122,7 @@ variations:
base:
- bert_
env:
- CM_MODEL:
+ MLC_MODEL:
bert-99
add_deps_recursive:
mlperf-inference-implementation:
@@ -1134,7 +1134,7 @@ variations:
base:
- bert_
env:
- CM_MODEL:
+ MLC_MODEL:
bert-99.9
add_deps_recursive:
mlperf-inference-implementation:
@@ -1143,10 +1143,10 @@ variations:
dlrm_:
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
names:
- terabyte-accuracy-script
@@ -1159,7 +1159,7 @@ variations:
base:
- dlrm_
env:
- CM_MODEL:
+ MLC_MODEL:
dlrm-v2-99
add_deps_recursive:
mlperf-inference-implementation:
@@ -1171,7 +1171,7 @@ variations:
base:
- dlrm_
env:
- CM_MODEL:
+ MLC_MODEL:
dlrm-v2-99.9
add_deps_recursive:
mlperf-inference-implementation:
@@ -1197,17 +1197,17 @@ variations:
- tags: get,preprocessed,dataset,criteo,_mlc
- tags: get,ml-model,dlrm,_pytorch,_fp32
mounts:
- - "${{ CM_ML_MODEL_FILE_WITH_PATH }}:${{ CM_ML_MODEL_FILE_WITH_PATH }}"
+ - "${{ MLC_ML_MODEL_FILE_WITH_PATH }}:${{ MLC_ML_MODEL_FILE_WITH_PATH }}"
- "${{ DLRM_DATA_PATH }}:${{ DLRM_DATA_PATH }}"
dockerfile_env:
- CM_ML_MODEL_FILE_WITH_PATH: "on"
+ MLC_ML_MODEL_FILE_WITH_PATH: "on"
mobilenet:
group:
model
env:
- CM_MODEL:
+ MLC_MODEL:
mobilenet
add_deps_recursive:
mlperf-inference-implementation:
@@ -1216,10 +1216,10 @@ variations:
- tags: get,dataset-aux,imagenet-aux
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
names:
- mlperf-accuracy-script
@@ -1230,7 +1230,7 @@ variations:
group:
model
env:
- CM_MODEL:
+ MLC_MODEL:
efficientnet
add_deps_recursive:
mlperf-inference-implementation:
@@ -1239,10 +1239,10 @@ variations:
- tags: get,dataset-aux,imagenet-aux
posthook_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- accuracy
- all
- CM_MLPERF_ACCURACY_RESULTS_DIR:
+ MLC_MLPERF_ACCURACY_RESULTS_DIR:
- 'on'
names:
- mlperf-accuracy-script
@@ -1252,7 +1252,7 @@ variations:
onnxruntime:
group: backend
env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
onnxruntime
add_deps_recursive:
mlperf-inference-implementation:
@@ -1261,7 +1261,7 @@ variations:
tensorrt:
group: backend
env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
tensorrt
add_deps_recursive:
mlperf-inference-implementation:
@@ -1273,7 +1273,7 @@ variations:
tf:
group: backend
env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
tf
add_deps_recursive:
mlperf-inference-implementation:
@@ -1282,7 +1282,7 @@ variations:
pytorch:
group: backend
env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
pytorch
add_deps_recursive:
mlperf-inference-implementation:
@@ -1291,7 +1291,7 @@ variations:
openshift:
group: backend
env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
openshift
add_deps_recursive:
mlperf-inference-implementation:
@@ -1300,7 +1300,7 @@ variations:
ncnn:
group: backend
env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
ncnn
add_deps_recursive:
mlperf-inference-implementation:
@@ -1311,7 +1311,7 @@ variations:
default_variations:
precision: int8
env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
deepsparse
add_deps_recursive:
mlperf-inference-implementation:
@@ -1320,7 +1320,7 @@ variations:
tflite:
group: backend
env:
- CM_MLPERF_BACKEND: tflite
+ MLC_MLPERF_BACKEND: tflite
add_deps_recursive:
mlperf-inference-implementation:
tags: _tflite
@@ -1328,7 +1328,7 @@ variations:
glow:
group: backend
env:
- CM_MLPERF_BACKEND: glow
+ MLC_MLPERF_BACKEND: glow
add_deps_recursive:
mlperf-inference-implementation:
tags: _glow
@@ -1338,7 +1338,7 @@ variations:
base:
- batch_size.1
env:
- CM_MLPERF_BACKEND: tvm-onnx
+ MLC_MLPERF_BACKEND: tvm-onnx
add_deps_recursive:
mlperf-inference-implementation:
tags: _tvm-onnx
@@ -1348,7 +1348,7 @@ variations:
base:
- batch_size.1
env:
- CM_MLPERF_BACKEND: tvm-pytorch
+ MLC_MLPERF_BACKEND: tvm-pytorch
add_deps_recursive:
mlperf-inference-implementation:
tags: _tvm-pytorch
@@ -1358,7 +1358,7 @@ variations:
base:
- batch_size.1
env:
- CM_MLPERF_BACKEND: tvm-tflite
+ MLC_MLPERF_BACKEND: tvm-tflite
add_deps_recursive:
mlperf-inference-implementation:
tags: _tvm-tflite
@@ -1366,7 +1366,7 @@ variations:
ray:
group: backend
env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
ray
add_deps_recursive:
mlperf-inference-implementation:
@@ -1378,7 +1378,7 @@ variations:
default:
True
env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
cpu
add_deps_recursive:
mlperf-inference-implementation:
@@ -1394,12 +1394,12 @@ variations:
deps:
- tags: get,nvidia-docker
skip_if_env:
- CM_SKIP_GET_NVIDIA_DOCKER:
+ MLC_SKIP_GET_NVIDIA_DOCKER:
- yes
group:
device
env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
gpu
add_deps_recursive:
mlperf-inference-implementation:
@@ -1407,7 +1407,7 @@ variations:
deps:
- tags: get,cuda-devices,_with-pycuda
skip_if_env:
- CM_CUDA_DEVICE_PROP_GLOBAL_MEMORY:
+ MLC_CUDA_DEVICE_PROP_GLOBAL_MEMORY:
- "yes"
- "on"
rocm:
@@ -1416,7 +1416,7 @@ variations:
group:
device
env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
rocm
add_deps_recursive:
mlperf-inference-implementation:
@@ -1425,7 +1425,7 @@ variations:
group:
device
env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
qaic
add_deps_recursive:
mlperf-inference-implementation:
@@ -1435,7 +1435,7 @@ variations:
group:
device
env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
tpu
add_deps_recursive:
mlperf-inference-implementation:
@@ -1445,16 +1445,16 @@ variations:
fast:
group: execution-mode
env:
- CM_FAST_FACTOR: '5'
- CM_OUTPUT_FOLDER_NAME: fast_results
- CM_MLPERF_RUN_STYLE: fast
+ MLC_FAST_FACTOR: '5'
+ MLC_OUTPUT_FOLDER_NAME: fast_results
+ MLC_MLPERF_RUN_STYLE: fast
test:
group: execution-mode
default: true
env:
- CM_OUTPUT_FOLDER_NAME: test_results
- CM_MLPERF_RUN_STYLE: test
+ MLC_OUTPUT_FOLDER_NAME: test_results
+ MLC_MLPERF_RUN_STYLE: test
valid,retinanet:
adr:
@@ -1464,8 +1464,8 @@ variations:
valid:
group: execution-mode
env:
- CM_OUTPUT_FOLDER_NAME: valid_results
- CM_MLPERF_RUN_STYLE: valid
+ MLC_OUTPUT_FOLDER_NAME: valid_results
+ MLC_MLPERF_RUN_STYLE: valid
# Model precision
quantized:
@@ -1478,8 +1478,8 @@ variations:
group: precision
default: true
env:
- CM_MLPERF_QUANTIZATION: off
- CM_MLPERF_MODEL_PRECISION: float32
+ MLC_MLPERF_QUANTIZATION: off
+ MLC_MLPERF_MODEL_PRECISION: float32
add_deps_recursive:
python-reference-mlperf-inference:
tags: _fp32
@@ -1489,8 +1489,8 @@ variations:
float16:
group: precision
env:
- CM_MLPERF_QUANTIZATION: off
- CM_MLPERF_MODEL_PRECISION: float16
+ MLC_MLPERF_QUANTIZATION: off
+ MLC_MLPERF_MODEL_PRECISION: float16
add_deps_recursive:
python-reference-mlperf-inference:
tags: _float16
@@ -1500,8 +1500,8 @@ variations:
bfloat16:
group: precision
env:
- CM_MLPERF_QUANTIZATION: off
- CM_MLPERF_MODEL_PRECISION: bfloat16
+ MLC_MLPERF_QUANTIZATION: off
+ MLC_MLPERF_MODEL_PRECISION: bfloat16
add_deps_recursive:
python-reference-mlperf-inference:
tags: _bfloat16
@@ -1509,16 +1509,16 @@ variations:
int4:
group: precision
env:
- CM_MLPERF_QUANTIZATION: on
- CM_MLPERF_MODEL_PRECISION: int4
+ MLC_MLPERF_QUANTIZATION: on
+ MLC_MLPERF_MODEL_PRECISION: int4
add_deps_recursive:
mlperf-inference-implementation:
tags: _int4
int8:
group: precision
env:
- CM_MLPERF_QUANTIZATION: on
- CM_MLPERF_MODEL_PRECISION: int8
+ MLC_MLPERF_QUANTIZATION: on
+ MLC_MLPERF_MODEL_PRECISION: int8
add_deps_recursive:
mlperf-inference-implementation:
tags: _int8
@@ -1528,8 +1528,8 @@ variations:
uint8:
group: precision
env:
- CM_MLPERF_QUANTIZATION: on
- CM_MLPERF_MODEL_PRECISION: uint8
+ MLC_MLPERF_QUANTIZATION: on
+ MLC_MLPERF_MODEL_PRECISION: uint8
add_deps_recursive:
mlperf-inference-implementation:
tags: _uint8
@@ -1540,36 +1540,36 @@ variations:
group: loadgen-scenario
default: true
env:
- CM_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
add_deps_recursive:
mlperf-inference-implementation:
tags: _offline
multistream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+ MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
add_deps_recursive:
mlperf-inference-implementation:
tags: _multistream
singlestream:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ MLC_MLPERF_LOADGEN_SCENARIO: SingleStream
add_deps_recursive:
mlperf-inference-implementation:
tags: _singlestream
server:
group: loadgen-scenario
env:
- CM_MLPERF_LOADGEN_SCENARIO: Server
+ MLC_MLPERF_LOADGEN_SCENARIO: Server
add_deps_recursive:
mlperf-inference-implementation:
tags: _server
power:
env:
- CM_MLPERF_POWER: 'yes'
- CM_SYSTEM_POWER: 'yes'
+ MLC_MLPERF_POWER: 'yes'
+ MLC_SYSTEM_POWER: 'yes'
add_deps_recursive:
mlperf-runner:
tags:
@@ -1578,7 +1578,7 @@ variations:
batch_size.#:
group: batch_size
env:
- CM_MLPERF_LOADGEN_MAX_BATCHSIZE: '#'
+ MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: '#'
add_deps_recursive:
mlperf-inference-implementation:
tags: _batch_size.#
@@ -1601,8 +1601,8 @@ variations:
version: r2.1
tags: _custom
env:
- CM_SKIP_SYS_UTILS: 'yes'
- CM_TEST_QUERY_COUNT: '100'
+ MLC_SKIP_SYS_UTILS: 'yes'
+ MLC_TEST_QUERY_COUNT: '100'
r3.0_default:
group:
@@ -1619,7 +1619,7 @@ variations:
version: r2.1
tags: _custom
env:
- CM_SKIP_SYS_UTILS: 'yes'
+ MLC_SKIP_SYS_UTILS: 'yes'
r3.1_default:
group:
@@ -1632,10 +1632,10 @@ variations:
version: r3.0
tags: _nvidia-only
default_env:
- CM_SKIP_SYS_UTILS: 'yes'
- CM_REGENERATE_MEASURE_FILES: 'yes'
+ MLC_SKIP_SYS_UTILS: 'yes'
+ MLC_REGENERATE_MEASURE_FILES: 'yes'
env:
- CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl'
+ MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl'
r4.0-dev_default:
group:
@@ -1650,10 +1650,10 @@ variations:
intel-harness:
tags: _v3.1
default_env:
- CM_SKIP_SYS_UTILS: 'yes'
- CM_REGENERATE_MEASURE_FILES: 'yes'
+ MLC_SKIP_SYS_UTILS: 'yes'
+ MLC_REGENERATE_MEASURE_FILES: 'yes'
env:
- CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl'
+ MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl'
r4.0_default:
group:
@@ -1668,10 +1668,10 @@ variations:
intel-harness:
tags: _v3.1
default_env:
- CM_SKIP_SYS_UTILS: 'yes'
- CM_REGENERATE_MEASURE_FILES: 'yes'
+ MLC_SKIP_SYS_UTILS: 'yes'
+ MLC_REGENERATE_MEASURE_FILES: 'yes'
env:
- CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl'
+ MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl'
#uses public code for inference v4.1
@@ -1688,8 +1688,8 @@ variations:
intel-harness:
tags: _v4.0
default_env:
- CM_SKIP_SYS_UTILS: 'yes'
- CM_REGENERATE_MEASURE_FILES: 'yes'
+ MLC_SKIP_SYS_UTILS: 'yes'
+ MLC_REGENERATE_MEASURE_FILES: 'yes'
r4.1_default:
group:
@@ -1704,11 +1704,11 @@ variations:
intel-harness:
tags: _v4.1
default_env:
- CM_SKIP_SYS_UTILS: 'yes'
- CM_REGENERATE_MEASURE_FILES: 'yes'
+ MLC_SKIP_SYS_UTILS: 'yes'
+ MLC_REGENERATE_MEASURE_FILES: 'yes'
env:
- CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl'
- CM_MLPERF_INFERENCE_VERSION: '4.1'
+ MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl'
+ MLC_MLPERF_INFERENCE_VERSION: '4.1'
r5.0-dev_default:
group:
@@ -1725,10 +1725,10 @@ variations:
inference-src:
version: r5.0
default_env:
- CM_SKIP_SYS_UTILS: 'yes'
- CM_REGENERATE_MEASURE_FILES: 'yes'
+ MLC_SKIP_SYS_UTILS: 'yes'
+ MLC_REGENERATE_MEASURE_FILES: 'yes'
env:
- CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl'
+ MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl'
invalid_variation_combinations:
@@ -1804,9 +1804,9 @@ input_description:
adr.compiler.tags:
desc: "Compiler for loadgen"
default: gcc
- adr.inference-src-loadgen.env.CM_GIT_URL:
+ adr.inference-src-loadgen.env.MLC_GIT_URL:
desc: "Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations)"
- adr.inference-src.env.CM_GIT_URL:
+ adr.inference-src.env.MLC_GIT_URL:
desc: "Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations)"
quiet:
desc: "Quiet run (select default values for all questions)"
@@ -1819,7 +1819,7 @@ input_description:
update_meta_if_env:
- enable_if_env:
- CM_CONTAINER_TOOL:
+ MLC_CONTAINER_TOOL:
- podman
# podman maps the host userid to the root user inside the container
docker:
@@ -1827,16 +1827,16 @@ update_meta_if_env:
use_host_user_id: False
pass_user_group: False #useful if docker is run by a different user from the one who built it and under the same group
default_env:
- CM_DOCKER_USE_DEFAULT_USER: 'yes'
+ MLC_DOCKER_USE_DEFAULT_USER: 'yes'
- skip_if_env:
- CM_CONTAINER_TOOL:
+ MLC_CONTAINER_TOOL:
- podman
docker:
use_host_group_id: True
use_host_user_id: True
pass_user_group: True #useful if docker is run by a different user from the one who built it and under the same group
- enable_if_env:
- CM_HOST_OS_TYPE:
+ MLC_HOST_OS_TYPE:
- linux
adr:
compiler:
@@ -1853,27 +1853,27 @@ docker:
names:
- get-mlperf-inference-submission-dir
skip_if_env:
- CM_MLPERF_INFERENCE_SUBMISSION_DIR: [ on ]
+ MLC_MLPERF_INFERENCE_SUBMISSION_DIR: [ on ]
pre_run_cmds:
#- cm pull repo && cm run script --tags=get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update
- cm pull repo
mounts:
- - "${{ CM_DATASET_IMAGENET_PATH }}:${{ CM_DATASET_IMAGENET_PATH }}"
- - "${{ CM_DATASET_OPENIMAGES_PATH }}:${{ CM_DATASET_OPENIMAGES_PATH }}"
- - "${{ CM_OPENIMAGES_CALIBRATION_DATASET_PATH }}:${{ CM_OPENIMAGES_CALIBRATION_DATASET_PATH }}"
- - "${{ CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH }}:${{ CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH }}"
- - "${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}"
+ - "${{ MLC_DATASET_IMAGENET_PATH }}:${{ MLC_DATASET_IMAGENET_PATH }}"
+ - "${{ MLC_DATASET_OPENIMAGES_PATH }}:${{ MLC_DATASET_OPENIMAGES_PATH }}"
+ - "${{ MLC_OPENIMAGES_CALIBRATION_DATASET_PATH }}:${{ MLC_OPENIMAGES_CALIBRATION_DATASET_PATH }}"
+ - "${{ MLC_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH }}:${{ MLC_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH }}"
+ - "${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}:${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}"
- "${{ OUTPUT_BASE_DIR }}:${{ OUTPUT_BASE_DIR }}"
- - "${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}"
+ - "${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}"
- "${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }}"
- - "${{ CM_CRITEO_PREPROCESSED_PATH }}:${{ CM_CRITEO_PREPROCESSED_PATH }}"
+ - "${{ MLC_CRITEO_PREPROCESSED_PATH }}:${{ MLC_CRITEO_PREPROCESSED_PATH }}"
- "${{ LLAMA2_CHECKPOINT_PATH }}:${{ LLAMA2_CHECKPOINT_PATH }}"
- - "${{ CM_NVIDIA_LLAMA_DATASET_FILE_PATH }}:${{ CM_NVIDIA_LLAMA_DATASET_FILE_PATH }}"
+ - "${{ MLC_NVIDIA_LLAMA_DATASET_FILE_PATH }}:${{ MLC_NVIDIA_LLAMA_DATASET_FILE_PATH }}"
- "${{ SDXL_CHECKPOINT_PATH }}:${{ SDXL_CHECKPOINT_PATH }}"
- - "${{ CM_DATASET_KITS19_PREPROCESSED_PATH }}:${{ CM_DATASET_KITS19_PREPROCESSED_PATH }}"
- - "${{ CM_DATASET_IGBH_PATH }}:${{ CM_DATASET_IGBH_PATH }}"
- - "${{ CM_ML_MODEL_RGAT_CHECKPOINT_PATH }}:${{ CM_ML_MODEL_RGAT_CHECKPOINT_PATH }}"
+ - "${{ MLC_DATASET_KITS19_PREPROCESSED_PATH }}:${{ MLC_DATASET_KITS19_PREPROCESSED_PATH }}"
+ - "${{ MLC_DATASET_IGBH_PATH }}:${{ MLC_DATASET_IGBH_PATH }}"
+ - "${{ MLC_ML_MODEL_RGAT_CHECKPOINT_PATH }}:${{ MLC_ML_MODEL_RGAT_CHECKPOINT_PATH }}"
skip_run_cmd: 'no'
shm_size: '32gb'
interactive: True
@@ -1890,6 +1890,6 @@ docker:
results_dir: RESULTS_DIR
submission_dir: SUBMISSION_DIR
dlrm_data_path: DLRM_DATA_PATH
- intel_gptj_int8_model_path: CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH
- nvidia_llama2_dataset_file_path: CM_NVIDIA_LLAMA_DATASET_FILE_PATH
- tp_size: CM_NVIDIA_TP_SIZE
+ intel_gptj_int8_model_path: MLC_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH
+ nvidia_llama2_dataset_file_path: MLC_NVIDIA_LLAMA_DATASET_FILE_PATH
+ tp_size: MLC_NVIDIA_TP_SIZE
diff --git a/script/app-mlperf-training-nvidia/customize.py b/script/app-mlperf-training-nvidia/customize.py
index e1613cd75..890daa60b 100644
--- a/script/app-mlperf-training-nvidia/customize.py
+++ b/script/app-mlperf-training-nvidia/customize.py
@@ -12,54 +12,54 @@ def preprocess(i):
state = i['state']
script_path = i['run_script_input']['path']
- if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}
- if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes":
+ if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes":
return {'return': 0}
- if env.get('CM_MLPERF_POWER', '') == "yes":
+ if env.get('MLC_MLPERF_POWER', '') == "yes":
power = "yes"
else:
power = "no"
- rerun = True if env.get("CM_RERUN", "") != '' else False
+ rerun = True if env.get("MLC_RERUN", "") != '' else False
- if 'CM_MLPERF_MODEL' not in env:
+ if 'MLC_MLPERF_MODEL' not in env:
return {
'return': 1, 'error': "Please select a variation specifying the model to run"}
- if 'CM_NUM_THREADS' not in env:
- if 'CM_MINIMIZE_THREADS' in env:
- env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) //
- (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1'))))
+ if 'MLC_NUM_THREADS' not in env:
+ if 'MLC_MINIMIZE_THREADS' in env:
+ env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) //
+ (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1'))))
else:
- env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1')
+ env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1')
print("Using MLCommons Training source from '" +
- env['CM_MLPERF_TRAINING_SOURCE'] + "'")
+ env['MLC_MLPERF_TRAINING_SOURCE'] + "'")
- NUM_THREADS = env['CM_NUM_THREADS']
+ NUM_THREADS = env['MLC_NUM_THREADS']
- if "bert" in env['CM_MLPERF_MODEL']:
- env['CM_RUN_DIR'] = os.path.join(
- env['CM_GIT_REPO_CHECKOUT_PATH'],
+ if "bert" in env['MLC_MLPERF_MODEL']:
+ env['MLC_RUN_DIR'] = os.path.join(
+ env['MLC_GIT_REPO_CHECKOUT_PATH'],
"NVIDIA",
"benchmarks",
"bert",
"implementations",
"pytorch-22.09")
- if "resnet" in env['CM_MLPERF_MODEL']:
- env['CM_RUN_DIR'] = os.path.join(
- env['CM_GIT_REPO_CHECKOUT_PATH'],
+ if "resnet" in env['MLC_MLPERF_MODEL']:
+ env['MLC_RUN_DIR'] = os.path.join(
+ env['MLC_GIT_REPO_CHECKOUT_PATH'],
"NVIDIA",
"benchmarks",
"resnet",
"implementations",
"mxnet-22.04")
- env['CM_RESULTS_DIR'] = os.getcwd()
+ env['MLC_RESULTS_DIR'] = os.getcwd()
return {'return': 0}
diff --git a/script/app-mlperf-training-nvidia/meta.yaml b/script/app-mlperf-training-nvidia/meta.yaml
index a2fad3584..abf4e7dd9 100644
--- a/script/app-mlperf-training-nvidia/meta.yaml
+++ b/script/app-mlperf-training-nvidia/meta.yaml
@@ -19,29 +19,29 @@ tags:
# Default environment
default_env:
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia
# Map script inputs to environment variables
input_mapping:
- docker: CM_RUN_DOCKER_CONTAINER
- hw_name: CM_HW_NAME
- num_threads: CM_NUM_THREADS
- model: CM_MLPERF_CUSTOM_MODEL_PATH
+ docker: MLC_RUN_DOCKER_CONTAINER
+ hw_name: MLC_HW_NAME
+ num_threads: MLC_NUM_THREADS
+ model: MLC_MLPERF_CUSTOM_MODEL_PATH
output_dir: OUTPUT_BASE_DIR
- rerun: CM_RERUN
- clean: CM_MLPERF_CLEAN_SUBMISSION_DIR
+ rerun: MLC_RERUN
+ clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
- - CM_HW_NAME
- - CM_ML_MODEL_*
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
+ - MLC_HW_NAME
+ - MLC_ML_MODEL_*
new_state_keys:
- mlperf-inference-implementation
- - CM_SUT_*
+ - MLC_SUT_*
# Dependencies on other CM scripts
deps:
@@ -77,7 +77,7 @@ deps:
# Detect CUDA if required
- tags: get,cuda
enable_if_env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cuda
@@ -86,9 +86,9 @@ deps:
names:
- ml-engine-torchvision
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cuda
- tags: get,generic-python-lib,_mlperf_logging
@@ -103,7 +103,7 @@ deps:
- prepare-data
- bert-model
enable_if_env:
- CM_MLPERF_MODEL:
+ MLC_MLPERF_MODEL:
- bert
@@ -113,14 +113,14 @@ variations:
pytorch:
group: framework
env:
- CM_MLPERF_BACKEND: pytorch
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: pytorch
+ MLC_MLPERF_BACKEND_VERSION: <<>>
tf:
group: framework
env:
- CM_MLPERF_BACKEND: tf
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: tf
+ MLC_MLPERF_BACKEND_VERSION: <<>>
tensorflow:
alias: tf
@@ -128,14 +128,14 @@ variations:
# Reference MLPerf models
bert:
env:
- CM_MLPERF_MODEL: bert
+ MLC_MLPERF_MODEL: bert
deps:
- tags: get,generic-python-lib,_protobuf
names:
- protobuf
version_max: "3.19"
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tf
- tflite
- tags: get,generic-python-lib,_torch
@@ -144,7 +144,7 @@ variations:
tpu:
group: device
env:
- CM_MLPERF_DEVICE: tpu
+ MLC_MLPERF_DEVICE: tpu
CUDA_VISIBLE_DEVICES: ''
USE_CUDA: no
@@ -152,5 +152,5 @@ variations:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cuda
+ MLC_MLPERF_DEVICE: cuda
USE_CUDA: yes
diff --git a/script/app-mlperf-training-nvidia/run-bert-training.sh b/script/app-mlperf-training-nvidia/run-bert-training.sh
index 1515404f3..69daeebda 100644
--- a/script/app-mlperf-training-nvidia/run-bert-training.sh
+++ b/script/app-mlperf-training-nvidia/run-bert-training.sh
@@ -1,8 +1,8 @@
#!/bin/bash
source ./config_DGXA100_1x8x56x1.sh
-results_dir=${CM_RESULTS_DIR}
-cmd="CONT=mlperf-nvidia:language_model DATADIR=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength DATADIR_PHASE2=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength EVALDIR=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/eval_varlength/ CHECKPOINTDIR=${results_dir} CHECKPOINTDIR_PHASE1=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/phase1 ./run_with_docker.sh"
+results_dir=${MLC_RESULTS_DIR}
+cmd="CONT=mlperf-nvidia:language_model DATADIR=${MLC_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength DATADIR_PHASE2=${MLC_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength EVALDIR=${MLC_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/eval_varlength/ CHECKPOINTDIR=${results_dir} CHECKPOINTDIR_PHASE1=${MLC_MLPERF_TRAINING_BERT_DATA_PATH}/phase1 ./run_with_docker.sh"
echo "$cmd"
eval "$cmd"
test $? -eq 0 || exit $?
diff --git a/script/app-mlperf-training-nvidia/run.sh b/script/app-mlperf-training-nvidia/run.sh
index 2f15ea73b..24500651c 100644
--- a/script/app-mlperf-training-nvidia/run.sh
+++ b/script/app-mlperf-training-nvidia/run.sh
@@ -1,10 +1,10 @@
#!/bin/bash
-cmd="cd ${CM_RUN_DIR}"
+cmd="cd ${MLC_RUN_DIR}"
echo "$cmd"
eval "$cmd"
-if [[ ${CM_MLPERF_MODEL} == "bert" ]]; then
- bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh
+if [[ ${MLC_MLPERF_MODEL} == "bert" ]]; then
+ bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh
test $? -eq 0 || exit $?
fi
diff --git a/script/app-mlperf-training-reference/customize.py b/script/app-mlperf-training-reference/customize.py
index 54a544fcb..fb2d0c709 100644
--- a/script/app-mlperf-training-reference/customize.py
+++ b/script/app-mlperf-training-reference/customize.py
@@ -12,38 +12,38 @@ def preprocess(i):
state = i['state']
script_path = i['run_script_input']['path']
- if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}
- if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes":
+ if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes":
return {'return': 0}
- if env.get('CM_MLPERF_POWER', '') == "yes":
+ if env.get('MLC_MLPERF_POWER', '') == "yes":
power = "yes"
else:
power = "no"
- rerun = True if env.get("CM_RERUN", "") != '' else False
+ rerun = True if env.get("MLC_RERUN", "") != '' else False
- if 'CM_MLPERF_MODEL' not in env:
+ if 'MLC_MLPERF_MODEL' not in env:
return {
'return': 1, 'error': "Please select a variation specifying the model to run"}
- if 'CM_NUM_THREADS' not in env:
- if 'CM_MINIMIZE_THREADS' in env:
- env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) //
- (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1'))))
+ if 'MLC_NUM_THREADS' not in env:
+ if 'MLC_MINIMIZE_THREADS' in env:
+ env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) //
+ (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1'))))
else:
- env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1')
+ env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1')
print("Using MLCommons Training source from '" +
- env['CM_MLPERF_TRAINING_SOURCE'] + "'")
+ env['MLC_MLPERF_TRAINING_SOURCE'] + "'")
- NUM_THREADS = env['CM_NUM_THREADS']
+ NUM_THREADS = env['MLC_NUM_THREADS']
- if "bert" in env['CM_MLPERF_MODEL']:
- env['CM_RUN_DIR'] = os.path.join(
- env['CM_MLPERF_TRAINING_SOURCE'],
+ if "bert" in env['MLC_MLPERF_MODEL']:
+ env['MLC_RUN_DIR'] = os.path.join(
+ env['MLC_MLPERF_TRAINING_SOURCE'],
"language_model",
"tensorflow",
"bert")
diff --git a/script/app-mlperf-training-reference/meta.yaml b/script/app-mlperf-training-reference/meta.yaml
index 56b4ad05d..45b0633be 100644
--- a/script/app-mlperf-training-reference/meta.yaml
+++ b/script/app-mlperf-training-reference/meta.yaml
@@ -20,30 +20,30 @@ tags:
# Default environment
default_env:
- CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference
- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: ''
+ MLC_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference
+ MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: ''
# Map script inputs to environment variables
input_mapping:
- docker: CM_RUN_DOCKER_CONTAINER
- hw_name: CM_HW_NAME
- num_threads: CM_NUM_THREADS
- model: CM_MLPERF_CUSTOM_MODEL_PATH
+ docker: MLC_RUN_DOCKER_CONTAINER
+ hw_name: MLC_HW_NAME
+ num_threads: MLC_NUM_THREADS
+ model: MLC_MLPERF_CUSTOM_MODEL_PATH
output_dir: OUTPUT_BASE_DIR
- rerun: CM_RERUN
- clean: CM_MLPERF_CLEAN_SUBMISSION_DIR
+ rerun: MLC_RERUN
+ clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
- - CM_HW_NAME
- - CM_ML_MODEL_*
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
+ - MLC_HW_NAME
+ - MLC_ML_MODEL_*
new_state_keys:
- mlperf-inference-implementation
- - CM_SUT_*
+ - MLC_SUT_*
# Dependencies on other CM scripts
deps:
@@ -71,7 +71,7 @@ deps:
# Detect CUDA if required
- tags: get,cuda
enable_if_env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cuda
@@ -80,9 +80,9 @@ deps:
names:
- ml-engine-torchvision
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- pytorch
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cuda
- tags: get,generic-python-lib,_mlperf_logging
@@ -97,7 +97,7 @@ deps:
- prepare-data
- bert-model
enable_if_env:
- CM_MLPERF_MODEL:
+ MLC_MLPERF_MODEL:
- bert
@@ -107,14 +107,14 @@ variations:
pytorch:
group: framework
env:
- CM_MLPERF_BACKEND: pytorch
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: pytorch
+ MLC_MLPERF_BACKEND_VERSION: <<>>
tf:
group: framework
env:
- CM_MLPERF_BACKEND: tf
- CM_MLPERF_BACKEND_VERSION: <<>>
+ MLC_MLPERF_BACKEND: tf
+ MLC_MLPERF_BACKEND_VERSION: <<>>
tensorflow:
alias: tf
@@ -122,14 +122,14 @@ variations:
# Reference MLPerf models
bert:
env:
- CM_MLPERF_MODEL: bert
+ MLC_MLPERF_MODEL: bert
deps:
- tags: get,generic-python-lib,_protobuf
names:
- protobuf
version_max: "3.19"
enable_if_env:
- CM_MLPERF_BACKEND:
+ MLC_MLPERF_BACKEND:
- tf
- tflite
- tags: get,generic-python-lib,_torch
@@ -138,7 +138,7 @@ variations:
tpu:
group: device
env:
- CM_MLPERF_DEVICE: tpu
+ MLC_MLPERF_DEVICE: tpu
CUDA_VISIBLE_DEVICES: ''
USE_CUDA: no
@@ -146,5 +146,5 @@ variations:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cuda
+ MLC_MLPERF_DEVICE: cuda
USE_CUDA: yes
diff --git a/script/app-mlperf-training-reference/run-bert-training.sh b/script/app-mlperf-training-reference/run-bert-training.sh
index 08ed5b70a..1ba44bebb 100644
--- a/script/app-mlperf-training-reference/run-bert-training.sh
+++ b/script/app-mlperf-training-reference/run-bert-training.sh
@@ -3,14 +3,14 @@
export TF_XLA_FLAGS='--tf_xla_auto_jit=2'
train_batch_size=24
cmd="python run_pretraining.py \
- --bert_config_file=${CM_MLPERF_TRAINING_BERT_CONFIG_PATH} \
+ --bert_config_file=${MLC_MLPERF_TRAINING_BERT_CONFIG_PATH} \
--output_dir=/tmp/output/ \
- --input_file=${CM_MLPERF_TRAINING_BERT_TFRECORDS_PATH}/part* \
+ --input_file=${MLC_MLPERF_TRAINING_BERT_TFRECORDS_PATH}/part* \
--nodo_eval \
--do_train \
--eval_batch_size=8 \
--learning_rate=0.0001 \
- --init_checkpoint=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/phase1/model.ckpt-28252 \
+ --init_checkpoint=${MLC_MLPERF_TRAINING_BERT_DATA_PATH}/phase1/model.ckpt-28252 \
--iterations_per_loop=1000 \
--max_predictions_per_seq=76 \
--max_seq_length=512 \
diff --git a/script/app-mlperf-training-reference/run.sh b/script/app-mlperf-training-reference/run.sh
index 2f15ea73b..24500651c 100644
--- a/script/app-mlperf-training-reference/run.sh
+++ b/script/app-mlperf-training-reference/run.sh
@@ -1,10 +1,10 @@
#!/bin/bash
-cmd="cd ${CM_RUN_DIR}"
+cmd="cd ${MLC_RUN_DIR}"
echo "$cmd"
eval "$cmd"
-if [[ ${CM_MLPERF_MODEL} == "bert" ]]; then
- bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh
+if [[ ${MLC_MLPERF_MODEL} == "bert" ]]; then
+ bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh
test $? -eq 0 || exit $?
fi
diff --git a/script/app-stable-diffusion-onnx-py/README-extra.md b/script/app-stable-diffusion-onnx-py/README-extra.md
index ecab8070e..de321d158 100644
--- a/script/app-stable-diffusion-onnx-py/README-extra.md
+++ b/script/app-stable-diffusion-onnx-py/README-extra.md
@@ -12,7 +12,7 @@ cm run script "python app stable-diffusion onnx" --adr.python.name=sd-test --tex
cm rm cache -f
cm run script "python app stable-diffusion onnx _cuda" --adr.python.name=sd-test --text="crazy programmer"
-cm docker script "python app stable-diffusion onnx" --text="crazy programmer" --output=. --docker_cm_repo=ctuning@mlcommons-ck --env.CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO=xyz4
+cm docker script "python app stable-diffusion onnx" --text="crazy programmer" --output=. --docker_cm_repo=ctuning@mlcommons-ck --env.MLC_DOCKER_ADD_FLAG_TO_MLC_MLOPS_REPO=xyz4
```
diff --git a/script/app-stable-diffusion-onnx-py/meta.yaml b/script/app-stable-diffusion-onnx-py/meta.yaml
index 306bebbb5..4aacbe801 100644
--- a/script/app-stable-diffusion-onnx-py/meta.yaml
+++ b/script/app-stable-diffusion-onnx-py/meta.yaml
@@ -77,18 +77,18 @@ variations:
group: target
env:
USE_CUDA: yes
- CM_DEVICE: cuda:0
+ MLC_DEVICE: cuda:0
cpu:
group: target
default: yes
env:
USE_CPU: yes
- CM_DEVICE: cpu
+ MLC_DEVICE: cpu
input_mapping:
- text: CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT
- output: CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT
+ text: MLC_APP_STABLE_DIFFUSION_ONNX_PY_TEXT
+ output: MLC_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT
input_description:
@@ -107,4 +107,4 @@ docker:
skip_input_for_fake_run:
- text
- output
- - env.CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO
+ - env.MLC_DOCKER_ADD_FLAG_TO_MLC_MLOPS_REPO
diff --git a/script/app-stable-diffusion-onnx-py/process.py b/script/app-stable-diffusion-onnx-py/process.py
index 86bbd3c3b..86a59ef19 100644
--- a/script/app-stable-diffusion-onnx-py/process.py
+++ b/script/app-stable-diffusion-onnx-py/process.py
@@ -4,24 +4,24 @@
from optimum.onnxruntime import ORTStableDiffusionPipeline
-output = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT', '')
+output = os.environ.get('MLC_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT', '')
f = os.path.join(output, 'output.png')
if os.path.isfile(f):
os.remove(f)
-cm_model_path = os.environ.get('CM_ML_MODEL_PATH', '')
+cm_model_path = os.environ.get('MLC_ML_MODEL_PATH', '')
if cm_model_path == '':
- print('Error: CM_ML_MODEL_PATH env is not defined')
+ print('Error: MLC_ML_MODEL_PATH env is not defined')
exit(1)
-device = os.environ.get('CM_DEVICE', '')
+device = os.environ.get('MLC_DEVICE', '')
pipeline = ORTStableDiffusionPipeline.from_pretrained(
cm_model_path, local_files_only=True).to(device)
-text = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT', '')
+text = os.environ.get('MLC_APP_STABLE_DIFFUSION_ONNX_PY_TEXT', '')
if text == '':
text = "a photo of an astronaut riding a horse on mars"
diff --git a/script/app-stable-diffusion-onnx-py/run.bat b/script/app-stable-diffusion-onnx-py/run.bat
index fbcf3a07e..03fa74bd9 100644
--- a/script/app-stable-diffusion-onnx-py/run.bat
+++ b/script/app-stable-diffusion-onnx-py/run.bat
@@ -1,2 +1,2 @@
-%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\process.py
+%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\process.py
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/app-stable-diffusion-onnx-py/run.sh b/script/app-stable-diffusion-onnx-py/run.sh
index efffec67f..b2cd262a4 100644
--- a/script/app-stable-diffusion-onnx-py/run.sh
+++ b/script/app-stable-diffusion-onnx-py/run.sh
@@ -1,4 +1,4 @@
#!/bin/bash
-${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/process.py
+${MLC_PYTHON_BIN} ${MLC_TMP_CURRENT_SCRIPT_PATH}/process.py
test $? -eq 0 || exit 1
diff --git a/script/authenticate-github-cli/customize.py b/script/authenticate-github-cli/customize.py
index a4fb19772..631b93f58 100644
--- a/script/authenticate-github-cli/customize.py
+++ b/script/authenticate-github-cli/customize.py
@@ -13,16 +13,16 @@ def preprocess(i):
automation = i['automation']
cmd = "gh auth login"
- if env.get('CM_GH_AUTH_TOKEN', '') != '':
+ if env.get('MLC_GH_AUTH_TOKEN', '') != '':
if os_info['platform'] == 'windows':
with open("token", "w") as f:
- f.write(env['CM_GH_AUTH_TOKEN'])
+ f.write(env['MLC_GH_AUTH_TOKEN'])
cmd = f"{cmd} --with-token < token"
else:
- cmd = f" echo {env['CM_GH_AUTH_TOKEN']} | {cmd} --with-token"
+ cmd = f" echo {env['MLC_GH_AUTH_TOKEN']} | {cmd} --with-token"
- env['CM_RUN_CMD'] = cmd
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ env['MLC_RUN_CMD'] = cmd
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
return {'return': 0}
diff --git a/script/authenticate-github-cli/meta.yaml b/script/authenticate-github-cli/meta.yaml
index 605cc955f..1a69bff6e 100644
--- a/script/authenticate-github-cli/meta.yaml
+++ b/script/authenticate-github-cli/meta.yaml
@@ -3,8 +3,8 @@ automation_alias: script
automation_uid: 5b4e0237da074764
cache: true
input_mapping:
- with_token: CM_GH_AUTH_TOKEN
- with-token: CM_GH_AUTH_TOKEN
+ with_token: MLC_GH_AUTH_TOKEN
+ with-token: MLC_GH_AUTH_TOKEN
tags:
- auth
- authenticate
diff --git a/script/authenticate-github-cli/run.bat b/script/authenticate-github-cli/run.bat
index 2366ffc07..73a07ec19 100644
--- a/script/authenticate-github-cli/run.bat
+++ b/script/authenticate-github-cli/run.bat
@@ -1,15 +1,15 @@
@echo off
echo Running gh auth:
-REM Not printing CM_RUN_CMD as it can contain secret
-REM echo %CM_RUN_CMD%
+REM Not printing MLC_RUN_CMD as it can contain secret
+REM echo %MLC_RUN_CMD%
echo.
-REM Check if CM_FAKE_RUN is not equal to "yes"
-if not "%CM_FAKE_RUN%"=="yes" (
+REM Check if MLC_FAKE_RUN is not equal to "yes"
+if not "%MLC_FAKE_RUN%"=="yes" (
- REM Execute the command stored in CM_RUN_CMD
- REM %CM_RUN_CMD%
- echo %CM_GH_AUTH_TOKEN% | gh auth login --with-token
+ REM Execute the command stored in MLC_RUN_CMD
+ REM %MLC_RUN_CMD%
+ echo %MLC_GH_AUTH_TOKEN% | gh auth login --with-token
REM Check the exit code and exit with error if non-zero
if %ERRORLEVEL% neq 0 (
diff --git a/script/authenticate-github-cli/run.sh b/script/authenticate-github-cli/run.sh
index 58c52dad6..ad1472f09 100644
--- a/script/authenticate-github-cli/run.sh
+++ b/script/authenticate-github-cli/run.sh
@@ -1,18 +1,18 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
echo "Running gh auth: " #Not printing as it can contain secret
-#echo "${CM_RUN_CMD}"
+#echo "${MLC_RUN_CMD}"
echo ""
-if [[ ${CM_FAKE_RUN} != "yes" ]]; then
- eval "${CM_RUN_CMD}"
+if [[ ${MLC_FAKE_RUN} != "yes" ]]; then
+ eval "${MLC_RUN_CMD}"
test $? -eq 0 || exit 1
fi
diff --git a/script/benchmark-any-mlperf-inference-implementation/customize.py b/script/benchmark-any-mlperf-inference-implementation/customize.py
index b5961dbae..644de05b2 100644
--- a/script/benchmark-any-mlperf-inference-implementation/customize.py
+++ b/script/benchmark-any-mlperf-inference-implementation/customize.py
@@ -13,7 +13,7 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
models = env['MODELS'].split(",")
@@ -68,7 +68,7 @@ def preprocess(i):
cmds.append(cmd)
assemble_tflite_cmds(cmds)
- if env.get('CM_HOST_CPU_ARCHITECTURE', '') == "aarch64":
+ if env.get('MLC_HOST_CPU_ARCHITECTURE', '') == "aarch64":
extra_tags = ",_armnn,_use-neon"
cmd = f'export extra_tags="{extra_tags}"'
cmds.append(cmd)
diff --git a/script/benchmark-any-mlperf-inference-implementation/meta.yaml b/script/benchmark-any-mlperf-inference-implementation/meta.yaml
index 5f1ae4ad6..2e176a5b4 100644
--- a/script/benchmark-any-mlperf-inference-implementation/meta.yaml
+++ b/script/benchmark-any-mlperf-inference-implementation/meta.yaml
@@ -302,7 +302,7 @@ variations:
rb6,qualcomm:
default_env:
- EXTRA_ARGS: " --adr.mlperf-inference-implementation.tags=_rb6 --env.CM_MLPERF_SHORT_RANGING_RUN=no"
+ EXTRA_ARGS: " --adr.mlperf-inference-implementation.tags=_rb6 --env.MLC_MLPERF_SHORT_RANGING_RUN=no"
state:
resnet50:
qaic:
diff --git a/script/benchmark-any-mlperf-inference-implementation/run-template.sh b/script/benchmark-any-mlperf-inference-implementation/run-template.sh
index 17c1ffa00..8556de945 100644
--- a/script/benchmark-any-mlperf-inference-implementation/run-template.sh
+++ b/script/benchmark-any-mlperf-inference-implementation/run-template.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
@@ -17,7 +17,7 @@ function run() {
echo "Running: "
echo "$1"
echo ""
- if [[ ${CM_FAKE_RUN} != 'yes' ]]; then
+ if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then
eval "$1"
exit_if_error
fi
diff --git a/script/benchmark-program-mlperf/customize.py b/script/benchmark-program-mlperf/customize.py
index fa8c7d627..3f92511eb 100644
--- a/script/benchmark-program-mlperf/customize.py
+++ b/script/benchmark-program-mlperf/customize.py
@@ -14,51 +14,51 @@ def postprocess(i):
os_info = i['os_info']
env = i['env']
- env['CM_MLPERF_RUN_CMD'] = env.get('CM_RUN_CMD')
+ env['MLC_MLPERF_RUN_CMD'] = env.get('MLC_RUN_CMD')
- if env.get('CM_MLPERF_POWER', '') == "yes":
+ if env.get('MLC_MLPERF_POWER', '') == "yes":
- if env.get('CM_MLPERF_SHORT_RANGING_RUN', '') != 'no':
- # Write '0' to the count.txt file in CM_RUN_DIR
- count_file = os.path.join(env.get('CM_RUN_DIR', ''), 'count.txt')
+ if env.get('MLC_MLPERF_SHORT_RANGING_RUN', '') != 'no':
+ # Write '0' to the count.txt file in MLC_RUN_DIR
+ count_file = os.path.join(env.get('MLC_RUN_DIR', ''), 'count.txt')
with open(count_file, 'w') as f:
f.write('0')
if os_info['platform'] != 'windows':
# Construct the shell command with proper escaping
- env['CM_MLPERF_RUN_CMD'] = r"""
-CM_MLPERF_RUN_COUNT=\$(cat \${CM_RUN_DIR}/count.txt);
-echo \${CM_MLPERF_RUN_COUNT};
-CM_MLPERF_RUN_COUNT=\$((CM_MLPERF_RUN_COUNT+1));
-echo \${CM_MLPERF_RUN_COUNT} > \${CM_RUN_DIR}/count.txt;
+ env['MLC_MLPERF_RUN_CMD'] = r"""
+MLC_MLPERF_RUN_COUNT=\$(cat \${MLC_RUN_DIR}/count.txt);
+echo \${MLC_MLPERF_RUN_COUNT};
+MLC_MLPERF_RUN_COUNT=\$((MLC_MLPERF_RUN_COUNT+1));
+echo \${MLC_MLPERF_RUN_COUNT} > \${MLC_RUN_DIR}/count.txt;
-if [ \${CM_MLPERF_RUN_COUNT} -eq 1 ]; then
-export CM_MLPERF_USER_CONF="${CM_MLPERF_RANGING_USER_CONF}";
+if [ \${MLC_MLPERF_RUN_COUNT} -eq 1 ]; then
+export MLC_MLPERF_USER_CONF="${MLC_MLPERF_RANGING_USER_CONF}";
else
-export CM_MLPERF_USER_CONF="${CM_MLPERF_TESTING_USER_CONF}";
+export MLC_MLPERF_USER_CONF="${MLC_MLPERF_TESTING_USER_CONF}";
fi
;
- """ + env.get('CM_RUN_CMD', '').strip()
+ """ + env.get('MLC_RUN_CMD', '').strip()
else:
- env['CM_MLPERF_RUN_CMD'] = r"""
+ env['MLC_MLPERF_RUN_CMD'] = r"""
:: Read the current count from the file
-set /p CM_MLPERF_RUN_COUNT=<%CM_RUN_DIR%\count.txt
-echo !CM_MLPERF_RUN_COUNT!
+set /p MLC_MLPERF_RUN_COUNT=<%MLC_RUN_DIR%\count.txt
+echo !MLC_MLPERF_RUN_COUNT!
:: Increment the count
-set /a CM_MLPERF_RUN_COUNT=!CM_MLPERF_RUN_COUNT! + 1
-echo !CM_MLPERF_RUN_COUNT! > %CM_RUN_DIR%\count.txt
+set /a MLC_MLPERF_RUN_COUNT=!MLC_MLPERF_RUN_COUNT! + 1
+echo !MLC_MLPERF_RUN_COUNT! > %MLC_RUN_DIR%\count.txt
:: Check the value and set the environment variable accordingly
-if !CM_MLPERF_RUN_COUNT! EQU 1 (
- set CM_MLPERF_USER_CONF=%CM_MLPERF_RANGING_USER_CONF%
+if !MLC_MLPERF_RUN_COUNT! EQU 1 (
+ set MLC_MLPERF_USER_CONF=%MLC_MLPERF_RANGING_USER_CONF%
) else (
- set CM_MLPERF_USER_CONF=%CM_MLPERF_TESTING_USER_CONF%
+ set MLC_MLPERF_USER_CONF=%MLC_MLPERF_TESTING_USER_CONF%
)
- """ + env.get('CM_RUN_CMD', '').strip()
+ """ + env.get('MLC_RUN_CMD', '').strip()
else:
- # Just use the existing CM_RUN_CMD if no ranging run is needed
- env['CM_MLPERF_RUN_CMD'] = env.get('CM_RUN_CMD', '').strip()
+ # Just use the existing MLC_RUN_CMD if no ranging run is needed
+ env['MLC_MLPERF_RUN_CMD'] = env.get('MLC_RUN_CMD', '').strip()
return {'return': 0}
diff --git a/script/benchmark-program-mlperf/meta.yaml b/script/benchmark-program-mlperf/meta.yaml
index ed532f8bc..d5ffe62f3 100644
--- a/script/benchmark-program-mlperf/meta.yaml
+++ b/script/benchmark-program-mlperf/meta.yaml
@@ -17,14 +17,14 @@ variations:
tags: benchmark-program,program
power:
env:
- CM_MLPERF_POWER: 'yes'
- CM_SAVE_CONSOLE_LOG: 'no'
+ MLC_MLPERF_POWER: 'yes'
+ MLC_SAVE_CONSOLE_LOG: 'no'
group: power-mode
new_env_keys:
- - CM_MLPERF_*
+ - MLC_MLPERF_*
post_deps:
- enable_if_env:
- CM_MLPERF_LOADGEN_MODE:
+ MLC_MLPERF_LOADGEN_MODE:
- performance
names:
- mlperf-power-client
diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py
index 08e15863c..d0286557a 100644
--- a/script/benchmark-program/customize.py
+++ b/script/benchmark-program/customize.py
@@ -8,109 +8,109 @@ def preprocess(i):
q = '"' if os_info['platform'] == 'windows' else "'"
- if env.get('CM_RUN_CMD', '') == '':
- if env.get('CM_BIN_NAME', '') == '':
+ if env.get('MLC_RUN_CMD', '') == '':
+ if env.get('MLC_BIN_NAME', '') == '':
x = 'run.exe' if os_info['platform'] == 'windows' else 'run.out'
- env['CM_BIN_NAME'] = x
+ env['MLC_BIN_NAME'] = x
if os_info['platform'] == 'windows':
- env['CM_RUN_CMD'] = env.get(
- 'CM_RUN_PREFIX', '') + env['CM_BIN_NAME']
- if env.get('CM_RUN_SUFFIX', '') != '':
- env['CM_RUN_CMD'] += ' ' + env['CM_RUN_SUFFIX']
+ env['MLC_RUN_CMD'] = env.get(
+ 'MLC_RUN_PREFIX', '') + env['MLC_BIN_NAME']
+ if env.get('MLC_RUN_SUFFIX', '') != '':
+ env['MLC_RUN_CMD'] += ' ' + env['MLC_RUN_SUFFIX']
else:
- if env['CM_ENABLE_NUMACTL'].lower() in ["on", "1", "true", "yes"]:
- env['CM_ENABLE_NUMACTL'] = "1"
- CM_RUN_PREFIX = "numactl " + env['CM_NUMACTL_MEMBIND'] + ' '
+ if env['MLC_ENABLE_NUMACTL'].lower() in ["on", "1", "true", "yes"]:
+ env['MLC_ENABLE_NUMACTL'] = "1"
+ MLC_RUN_PREFIX = "numactl " + env['MLC_NUMACTL_MEMBIND'] + ' '
else:
- CM_RUN_PREFIX = ''
+ MLC_RUN_PREFIX = ''
- CM_RUN_PREFIX += env.get('CM_RUN_PREFIX', '')
+ MLC_RUN_PREFIX += env.get('MLC_RUN_PREFIX', '')
- env['CM_RUN_PREFIX'] = CM_RUN_PREFIX
+ env['MLC_RUN_PREFIX'] = MLC_RUN_PREFIX
- CM_RUN_SUFFIX = (
- env['CM_REDIRECT_OUT'] +
- ' ') if 'CM_REDIRECT_OUT' in env else ''
- CM_RUN_SUFFIX += (env['CM_REDIRECT_ERR'] +
- ' ') if 'CM_REDIRECT_ERR' in env else ''
+ MLC_RUN_SUFFIX = (
+ env['MLC_REDIRECT_OUT'] +
+ ' ') if 'MLC_REDIRECT_OUT' in env else ''
+ MLC_RUN_SUFFIX += (env['MLC_REDIRECT_ERR'] +
+ ' ') if 'MLC_REDIRECT_ERR' in env else ''
- env['CM_RUN_SUFFIX'] = env['CM_RUN_SUFFIX'] + \
- CM_RUN_SUFFIX if 'CM_RUN_SUFFIX' in env else CM_RUN_SUFFIX
+ env['MLC_RUN_SUFFIX'] = env['MLC_RUN_SUFFIX'] + \
+ MLC_RUN_SUFFIX if 'MLC_RUN_SUFFIX' in env else MLC_RUN_SUFFIX
- if env.get('CM_RUN_DIR', '') == '':
- env['CM_RUN_DIR'] = os.getcwd()
+ if env.get('MLC_RUN_DIR', '') == '':
+ env['MLC_RUN_DIR'] = os.getcwd()
- env['CM_RUN_CMD'] = CM_RUN_PREFIX + ' ' + os.path.join(
- env['CM_RUN_DIR'], env['CM_BIN_NAME']) + ' ' + env['CM_RUN_SUFFIX']
+ env['MLC_RUN_CMD'] = MLC_RUN_PREFIX + ' ' + os.path.join(
+ env['MLC_RUN_DIR'], env['MLC_BIN_NAME']) + ' ' + env['MLC_RUN_SUFFIX']
- x = env.get('CM_RUN_PREFIX0', '')
+ x = env.get('MLC_RUN_PREFIX0', '')
if x != '':
- env['CM_RUN_CMD'] = x + ' ' + env.get('CM_RUN_CMD', '')
+ env['MLC_RUN_CMD'] = x + ' ' + env.get('MLC_RUN_CMD', '')
if os_info['platform'] != 'windows' and str(
- env.get('CM_SAVE_CONSOLE_LOG', True)).lower() not in ["no", "false", "0"]:
- logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR'])
- env['CM_RUN_CMD'] += r" 2>&1 | tee " + q + os.path.join(
+ env.get('MLC_SAVE_CONSOLE_LOG', True)).lower() not in ["no", "false", "0"]:
+ logs_dir = env.get('MLC_LOGS_DIR', env['MLC_RUN_DIR'])
+ env['MLC_RUN_CMD'] += r" 2>&1 | tee " + q + os.path.join(
logs_dir, "console.out") + q + r"; echo \${PIPESTATUS[0]} > exitstatus"
# additional arguments and tags for measuring system informations(only if
- # 'CM_PROFILE_NVIDIA_POWER' is 'on')
- if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on":
- env['CM_SYS_UTILISATION_SCRIPT_TAGS'] = ''
+ # 'MLC_PROFILE_NVIDIA_POWER' is 'on')
+ if env.get('MLC_PROFILE_NVIDIA_POWER', '') == "on":
+ env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] = ''
# this section is for selecting the variation
- if env.get('CM_MLPERF_DEVICE', '') == "gpu":
- env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cuda'
- elif env.get('CM_MLPERF_DEVICE', '') == "cpu":
- env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cpu'
+ if env.get('MLC_MLPERF_DEVICE', '') == "gpu":
+ env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cuda'
+ elif env.get('MLC_MLPERF_DEVICE', '') == "cpu":
+ env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cpu'
# this section is for supplying the input arguments/tags
- env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --log_dir=\'' + \
+ env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] += ' --log_dir=\'' + \
logs_dir + '\'' # specify the logs directory
# specifying the interval in which the system information should be
# measured
- if env.get('CM_SYSTEM_INFO_MEASUREMENT_INTERVAL', '') != '':
- env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --interval=\"' + \
- env['CM_SYSTEM_INFO_MEASUREMENT_INTERVAL'] + '\"'
+ if env.get('MLC_SYSTEM_INFO_MEASUREMENT_INTERVAL', '') != '':
+ env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] += ' --interval=\"' + \
+ env['MLC_SYSTEM_INFO_MEASUREMENT_INTERVAL'] + '\"'
# generate the pre run cmd - recording runtime system infos
pre_run_cmd = ""
- if env.get('CM_PRE_RUN_CMD_EXTERNAL', '') != '':
- pre_run_cmd += env['CM_PRE_RUN_CMD_EXTERNAL']
+ if env.get('MLC_PRE_RUN_CMD_EXTERNAL', '') != '':
+ pre_run_cmd += env['MLC_PRE_RUN_CMD_EXTERNAL']
- if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on":
+ if env.get('MLC_PROFILE_NVIDIA_POWER', '') == "on":
if pre_run_cmd != '':
pre_run_cmd += ' && '
# running the script as a process in background
pre_run_cmd = pre_run_cmd + 'cm run script --tags=runtime,system,utilisation' + \
- env['CM_SYS_UTILISATION_SCRIPT_TAGS'] + ' --quiet & '
+ env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] + ' --quiet & '
# obtain the command if of the background process
pre_run_cmd += r" cmd_pid=\$! && echo CMD_PID=\$cmd_pid"
print(
f"Pre run command for recording the runtime system information: {pre_run_cmd}")
- env['CM_PRE_RUN_CMD'] = pre_run_cmd
+ env['MLC_PRE_RUN_CMD'] = pre_run_cmd
# generate the post run cmd - for killing the process that records runtime
# system infos
post_run_cmd = ""
- if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on":
+ if env.get('MLC_PROFILE_NVIDIA_POWER', '') == "on":
post_run_cmd += r"echo killing process \$cmd_pid && kill -TERM \${cmd_pid}"
print(
f"Post run command for killing the process that measures the runtime system information: {post_run_cmd}")
- env['CM_POST_RUN_CMD'] = post_run_cmd
+ env['MLC_POST_RUN_CMD'] = post_run_cmd
# Print info
print('***************************************************************************')
print('CM script::benchmark-program/run.sh')
print('')
- print('Run Directory: {}'.format(env.get('CM_RUN_DIR', '')))
+ print('Run Directory: {}'.format(env.get('MLC_RUN_DIR', '')))
print('')
- print('CMD: {}'.format(env.get('CM_RUN_CMD', '')))
+ print('CMD: {}'.format(env.get('MLC_RUN_CMD', '')))
print('')
diff --git a/script/benchmark-program/meta.yaml b/script/benchmark-program/meta.yaml
index 4abb48d60..73d59e6c8 100644
--- a/script/benchmark-program/meta.yaml
+++ b/script/benchmark-program/meta.yaml
@@ -3,19 +3,19 @@ automation_alias: script
automation_uid: 5b4e0237da074764
category: DevOps automation
default_env:
- CM_ENABLE_NUMACTL: '0'
- CM_ENABLE_PROFILING: '0'
+ MLC_ENABLE_NUMACTL: '0'
+ MLC_ENABLE_PROFILING: '0'
deps:
- tags: detect,cpu
- enable_if_env:
- CM_SET_PERFORMANCE_MODE:
+ MLC_SET_PERFORMANCE_MODE:
- 'on'
- 'yes'
- 'True'
- true
tags: set,performance,mode,_performance
new_env_keys:
-- CM_RUN_CMD
+- MLC_RUN_CMD
tags:
- program
- benchmark
@@ -25,14 +25,14 @@ uid: 19f369ef47084895
variations:
numactl:
default_env:
- CM_ENABLE_NUMACTL: 1
- CM_NUMACTL_MEMBIND: --localalloc
+ MLC_ENABLE_NUMACTL: 1
+ MLC_NUMACTL_MEMBIND: --localalloc
numactl-interleave:
default_env:
- CM_ENABLE_NUMACTL: 1
- CM_NUMACTL_MEMBIND: --interleave=all
+ MLC_ENABLE_NUMACTL: 1
+ MLC_NUMACTL_MEMBIND: --interleave=all
profile:
default_env:
- CM_ENABLE_PROFILING: 1
+ MLC_ENABLE_PROFILING: 1
deps:
- tags: get,profiler
diff --git a/script/benchmark-program/run-ubuntu.sh b/script/benchmark-program/run-ubuntu.sh
index 1f19ed80b..dfca75282 100644
--- a/script/benchmark-program/run-ubuntu.sh
+++ b/script/benchmark-program/run-ubuntu.sh
@@ -1,9 +1,9 @@
#!/bin/bash
-CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD}
-cd ${CM_TMP_CURRENT_SCRIPT_PATH}
-if [ ${CM_ENABLE_NUMACTL} == "1" ]; then
+cd ${MLC_TMP_CURRENT_SCRIPT_PATH}
+if [ ${MLC_ENABLE_NUMACTL} == "1" ]; then
sudo apt-get install numactl
fi
diff --git a/script/benchmark-program/run.bat b/script/benchmark-program/run.bat
index d15449355..ccc797361 100644
--- a/script/benchmark-program/run.bat
+++ b/script/benchmark-program/run.bat
@@ -1,21 +1,21 @@
@echo off
-if "%CM_RUN_DIR%" == "" (
- echo CM_RUN_DIR is not set
+if "%MLC_RUN_DIR%" == "" (
+ echo MLC_RUN_DIR is not set
exit 1
)
-cd %CM_RUN_DIR%
+cd %MLC_RUN_DIR%
-if "%CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM%" == "True" (
+if "%MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM%" == "True" (
echo *****************************************************
echo You are now in Debug shell with pre-set CM env and can run the following command line manually:
echo.
- if not "%CM_RUN_CMD0%" == "" (
- echo %CM_RUN_CMD0%
+ if not "%MLC_RUN_CMD0%" == "" (
+ echo %MLC_RUN_CMD0%
) else (
- echo %CM_RUN_CMD%
+ echo %MLC_RUN_CMD%
)
echo.
@@ -27,13 +27,13 @@ if "%CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM%" == "True" (
exit 0
)
-rem Check CM_RUN_CMD0
-if not "%CM_RUN_CMD0%" == "" (
+rem Check MLC_RUN_CMD0
+if not "%MLC_RUN_CMD0%" == "" (
echo.
- %CM_RUN_CMD0%
+ %MLC_RUN_CMD0%
) else (
echo.
- %CM_RUN_CMD%
+ %MLC_RUN_CMD%
)
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/benchmark-program/run.sh b/script/benchmark-program/run.sh
index 6eb39d333..011e6a8fe 100755
--- a/script/benchmark-program/run.sh
+++ b/script/benchmark-program/run.sh
@@ -2,8 +2,8 @@
# function to safely exit the background process
safe_exit() {
- if [[ "${CM_POST_RUN_CMD}" != "" ]]; then
- eval ${CM_POST_RUN_CMD}
+ if [[ "${MLC_POST_RUN_CMD}" != "" ]]; then
+ eval ${MLC_POST_RUN_CMD}
if [ $? -eq 0 ]; then
exit 0
else
@@ -15,27 +15,27 @@ safe_exit() {
# trap signals to redirect the execution flow to safe_exit
trap safe_exit SIGINT SIGTERM
-if [[ ${CM_MLPERF_POWER} == "yes" && ${CM_MLPERF_LOADGEN_MODE} == "performance" ]]; then
+if [[ ${MLC_MLPERF_POWER} == "yes" && ${MLC_MLPERF_LOADGEN_MODE} == "performance" ]]; then
exit 0
fi
# Run
-if [ -z ${CM_RUN_DIR} ]; then
- echo "CM_RUN_DIR is not set"
+if [ -z ${MLC_RUN_DIR} ]; then
+ echo "MLC_RUN_DIR is not set"
exit 1
fi
-cd ${CM_RUN_DIR}
+cd ${MLC_RUN_DIR}
-if [[ "${CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM}" == "True" ]]; then
+if [[ "${MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM}" == "True" ]]; then
echo "*****************************************************"
echo "You are now in Debug shell with pre-set CM env and can run the following command line manually:"
echo ""
- if [[ "${CM_RUN_CMD0}" != "" ]]; then
- echo "${CM_RUN_CMD0}"
+ if [[ "${MLC_RUN_CMD0}" != "" ]]; then
+ echo "${MLC_RUN_CMD0}"
else
- echo "${CM_RUN_CMD}"
+ echo "${MLC_RUN_CMD}"
fi
echo ""
@@ -46,7 +46,7 @@ if [[ "${CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM}" == "True" ]]; then
#
# cp -f tmp-run.sh debug-script-benchmark-program.sh
#
-# sed -e 's/CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM="True"/CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM="False"/g' -i debug-script-benchmark-program.sh
+# sed -e 's/MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM="True"/MLC_DEBUG_SCRIPT_BENCHMARK_PROGRAM="False"/g' -i debug-script-benchmark-program.sh
bash
@@ -54,8 +54,8 @@ if [[ "${CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM}" == "True" ]]; then
exit 0
fi
-echo $CM_PRE_RUN_CMD
-eval ${CM_PRE_RUN_CMD}
+echo $MLC_PRE_RUN_CMD
+eval ${MLC_PRE_RUN_CMD}
# Function to run command and check exit status
run_command() {
@@ -78,17 +78,17 @@ run_command() {
fi
}
-# Run CM_RUN_CMD0 if it exists, otherwise run CM_RUN_CMD
-if [[ -n "$CM_RUN_CMD0" ]]; then
- run_command "$CM_RUN_CMD0"
+# Run MLC_RUN_CMD0 if it exists, otherwise run MLC_RUN_CMD
+if [[ -n "$MLC_RUN_CMD0" ]]; then
+ run_command "$MLC_RUN_CMD0"
fi
-run_command "$CM_RUN_CMD"
+run_command "$MLC_RUN_CMD"
# Run post-run command if it exists
-if [[ -n "$CM_POST_RUN_CMD" ]]; then
- eval "$CM_POST_RUN_CMD"
+if [[ -n "$MLC_POST_RUN_CMD" ]]; then
+ eval "$MLC_POST_RUN_CMD"
post_exitstatus=$?
# Exit if post-run command fails
if [[ $post_exitstatus -ne 0 ]]; then
diff --git a/script/build-docker-image/customize.py b/script/build-docker-image/customize.py
index a231b6b09..be7c33035 100644
--- a/script/build-docker-image/customize.py
+++ b/script/build-docker-image/customize.py
@@ -8,57 +8,57 @@ def preprocess(i):
os_info = i['os_info']
env = i['env']
- dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '')
+ dockerfile_path = env.get('MLC_DOCKERFILE_WITH_PATH', '')
if dockerfile_path != '' and os.path.exists(dockerfile_path):
build_dockerfile = False
- env['CM_BUILD_DOCKERFILE'] = "no"
+ env['MLC_BUILD_DOCKERFILE'] = "no"
os.chdir(os.path.dirname(dockerfile_path))
else:
build_dockerfile = True
- env['CM_BUILD_DOCKERFILE'] = "yes"
- env['CM_DOCKERFILE_BUILD_FROM_IMAGE_SCRIPT'] = "yes"
+ env['MLC_BUILD_DOCKERFILE'] = "yes"
+ env['MLC_DOCKERFILE_BUILD_FROM_IMAGE_SCRIPT'] = "yes"
- CM_DOCKER_BUILD_ARGS = env.get('+ CM_DOCKER_BUILD_ARGS', [])
+ MLC_DOCKER_BUILD_ARGS = env.get('+ MLC_DOCKER_BUILD_ARGS', [])
- if env.get('CM_GH_TOKEN', '') != '':
- CM_DOCKER_BUILD_ARGS.append("CM_GH_TOKEN=" + env['CM_GH_TOKEN'])
+ if env.get('MLC_GH_TOKEN', '') != '':
+ MLC_DOCKER_BUILD_ARGS.append("MLC_GH_TOKEN=" + env['MLC_GH_TOKEN'])
- if CM_DOCKER_BUILD_ARGS:
+ if MLC_DOCKER_BUILD_ARGS:
build_args = "--build-arg " + \
- " --build-arg ".join(CM_DOCKER_BUILD_ARGS)
+ " --build-arg ".join(MLC_DOCKER_BUILD_ARGS)
else:
build_args = ""
- env['CM_DOCKER_BUILD_ARGS'] = build_args
+ env['MLC_DOCKER_BUILD_ARGS'] = build_args
-# if 'CM_DOCKERFILE_WITH_PATH' not in env or not exists(env['CM_DOCKERFILE_WITH_PATH']):
-# env['CM_BUILD_DOCKERFILE'] = "yes"
+# if 'MLC_DOCKERFILE_WITH_PATH' not in env or not exists(env['MLC_DOCKERFILE_WITH_PATH']):
+# env['MLC_BUILD_DOCKERFILE'] = "yes"
# else:
-# env['CM_BUILD_DOCKERFILE'] = "no"
+# env['MLC_BUILD_DOCKERFILE'] = "no"
#
- if env.get("CM_DOCKER_IMAGE_REPO", "") == '':
- env['CM_DOCKER_IMAGE_REPO'] = "localhost/local"
+ if env.get("MLC_DOCKER_IMAGE_REPO", "") == '':
+ env['MLC_DOCKER_IMAGE_REPO'] = "localhost/local"
- docker_image_name = env.get('CM_DOCKER_IMAGE_NAME', '')
+ docker_image_name = env.get('MLC_DOCKER_IMAGE_NAME', '')
if docker_image_name == '':
- docker_image_name = "cm-script-" + \
- env.get('CM_DOCKER_RUN_SCRIPT_TAGS', '').replace(
+ docker_image_name = "mlc-script-" + \
+ env.get('MLC_DOCKER_RUN_SCRIPT_TAGS', '').replace(
',', '-').replace('_', '-')
- env['CM_DOCKER_IMAGE_NAME'] = docker_image_name.lower()
+ env['MLC_DOCKER_IMAGE_NAME'] = docker_image_name.lower()
- if env.get("CM_DOCKER_IMAGE_TAG", "") == '':
- env['CM_DOCKER_IMAGE_TAG'] = "latest"
+ if env.get("MLC_DOCKER_IMAGE_TAG", "") == '':
+ env['MLC_DOCKER_IMAGE_TAG'] = "latest"
- if str(env.get("CM_DOCKER_CACHE", "yes")).lower() in ["no", "false", "0"]:
- env["CM_DOCKER_CACHE_ARG"] = " --no-cache"
+ if str(env.get("MLC_DOCKER_CACHE", "yes")).lower() in ["no", "false", "0"]:
+ env["MLC_DOCKER_CACHE_ARG"] = " --no-cache"
CMD = ''
image_name = get_image_name(env)
if build_dockerfile:
- dockerfile_path = r"\${CM_DOCKERFILE_WITH_PATH}"
+ dockerfile_path = r"\${MLC_DOCKERFILE_WITH_PATH}"
# Write .dockerignore
with open('.dockerignore', 'w') as f:
@@ -66,8 +66,8 @@ def preprocess(i):
# Prepare CMD to build image
XCMD = [
- f'{env["CM_CONTAINER_TOOL"]} build ' +
- env.get('CM_DOCKER_CACHE_ARG', ''),
+ f'{env["MLC_CONTAINER_TOOL"]} build ' +
+ env.get('MLC_DOCKER_CACHE_ARG', ''),
' ' + build_args,
' -f "' + dockerfile_path + '"',
' -t "' + image_name,
@@ -89,16 +89,16 @@ def preprocess(i):
print('')
- env['CM_DOCKER_BUILD_CMD'] = CMD
+ env['MLC_DOCKER_BUILD_CMD'] = CMD
return {'return': 0}
def get_image_name(env):
- image_name = env.get('CM_DOCKER_IMAGE_REPO', '') + '/' + \
- env.get('CM_DOCKER_IMAGE_NAME', '') + ':' + \
- env.get('CM_DOCKER_IMAGE_TAG', '') + '"'
+ image_name = env.get('MLC_DOCKER_IMAGE_REPO', '') + '/' + \
+ env.get('MLC_DOCKER_IMAGE_NAME', '') + ':' + \
+ env.get('MLC_DOCKER_IMAGE_TAG', '') + '"'
return image_name
@@ -108,13 +108,13 @@ def postprocess(i):
env = i['env']
# Check if need to push docker image to the Docker Hub
- if env.get('CM_DOCKER_PUSH_IMAGE', '') in ['True', True, 'yes']:
+ if env.get('MLC_DOCKER_PUSH_IMAGE', '') in ['True', True, 'yes']:
image_name = get_image_name(env)
# Prepare CMD to build image
PCMD = 'docker image push ' + image_name
- dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '')
+ dockerfile_path = env.get('MLC_DOCKERFILE_WITH_PATH', '')
if dockerfile_path != '' and os.path.isfile(dockerfile_path):
with open(dockerfile_path + '.push.sh', 'w') as f:
f.write(PCMD + '\n')
diff --git a/script/build-docker-image/examples/0-common.bat b/script/build-docker-image/examples/0-common.bat
deleted file mode 100644
index 721cc1b5d..000000000
--- a/script/build-docker-image/examples/0-common.bat
+++ /dev/null
@@ -1,21 +0,0 @@
-set DOCKER_IMAGE_REPO=cknowledge
-
-set DOCKER_OS=ubuntu
-
-rem set DOCKER_OS_VER=22.04
-set DOCKER_OS_VER=23.04
-set DOCKER_PIP_EXTRA_FLAGS=--break-system-packages
-
-rem set DOCKER_IMAGE_NAME=cm-base
-set DOCKER_IMAGE_NAME=cm-script-app-image-classification-onnx-py
-set DOCKER_IMAGE_POST_FILE=%CD%\extra-cmd.cm-script-app-image-classification-onnx-py
-
-rem set DOCKER_IMAGE_TAG=%DOCKER_OS%-%DOCKER_OS_VER%-20230804
-
-set DOCKER_IMAGE_TAG=%DOCKER_OS%-%DOCKER_OS_VER%-latest
-set DOCKERFILE_EXT=%DOCKER_IMAGE_NAME%-%DOCKER_IMAGE_TAG%
-
-set DOCKER_PACKAGE_MANAGER_UPDATE_CMD="apt-get update -y && apt-get upgrade -y"
-
-set DOCKER_CM_MLOPS_REPO="ctuning@mlcommons-ck"
-rem set DOCKER_CM_MLOPS_REPO="mlcommons@ck"
diff --git a/script/build-docker-image/examples/0-generate.bat b/script/build-docker-image/examples/0-generate.bat
deleted file mode 100644
index 443d029ae..000000000
--- a/script/build-docker-image/examples/0-generate.bat
+++ /dev/null
@@ -1,9 +0,0 @@
-call 0-common.bat
-
-cmr "build dockerfile" --file_path=%CD%\Dockerfile.%DOCKERFILE_EXT% ^
- --docker_os=%DOCKER_OS% ^
- --docker_os_version=%DOCKER_OS_VER% ^
- --package_manager_update_cmd=%DOCKER_PACKAGE_MANAGER_UPDATE_CMD% ^
- --pip_extra_flags=%DOCKER_PIP_EXTRA_FLAGS% ^
- --post_file=%DOCKER_IMAGE_POST_FILE% ^
- --cm_repo=%DOCKER_CM_MLOPS_REPO%
diff --git a/script/build-docker-image/examples/1-build.bat b/script/build-docker-image/examples/1-build.bat
deleted file mode 100644
index 2356eb032..000000000
--- a/script/build-docker-image/examples/1-build.bat
+++ /dev/null
@@ -1,8 +0,0 @@
-call 0-common.bat
-
-cmr "build docker image" --dockerfile=%CD%\Dockerfile.%DOCKERFILE_EXT% ^
- --docker_os=%DOCKER_OS% ^
- --docker_os_version=%DOCKER_OS_VER% ^
- --image_repo=%DOCKER_IMAGE_REPO% ^
- --image_name=%DOCKER_IMAGE_NAME% ^
- --image_tag=%DOCKER_IMAGE_TAG%
diff --git a/script/build-docker-image/examples/2-run-cm-command1.bat b/script/build-docker-image/examples/2-run-cm-command1.bat
deleted file mode 100644
index eeeadd311..000000000
--- a/script/build-docker-image/examples/2-run-cm-command1.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-call 0-common.bat
-
-docker run -it %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% "cmr 'detect os' -j"
diff --git a/script/build-docker-image/examples/2-run-cm-command2.bat b/script/build-docker-image/examples/2-run-cm-command2.bat
deleted file mode 100644
index ac1c8a3a6..000000000
--- a/script/build-docker-image/examples/2-run-cm-command2.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-call 0-common.bat
-
-cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_name=%DOCKER_IMAGE_NAME% --image_tag=%DOCKER_IMAGE_TAG% --run_cmd="cmr 'detect os' -j"
diff --git a/script/build-docker-image/examples/2-run-cm-command3.bat b/script/build-docker-image/examples/2-run-cm-command3.bat
deleted file mode 100644
index e690f093c..000000000
--- a/script/build-docker-image/examples/2-run-cm-command3.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-call 0-common.bat
-
-cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_tag=%DOCKER_IMAGE_TAG% --script_tags=detect,os
diff --git a/script/build-docker-image/examples/2-run-cm-command4.bat b/script/build-docker-image/examples/2-run-cm-command4.bat
deleted file mode 100644
index c2e6f801c..000000000
--- a/script/build-docker-image/examples/2-run-cm-command4.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-call 0-common.bat
-
-cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_tag=%DOCKER_IMAGE_TAG% --script_tags=detect,os --it
diff --git a/script/build-docker-image/examples/2-run-cm-command5.bat b/script/build-docker-image/examples/2-run-cm-command5.bat
deleted file mode 100644
index d153437f1..000000000
--- a/script/build-docker-image/examples/2-run-cm-command5.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-call 0-common.bat
-
-cm docker script --tags=detect,os -j
diff --git a/script/build-docker-image/examples/2-run-interactive1.bat b/script/build-docker-image/examples/2-run-interactive1.bat
deleted file mode 100644
index 917dda930..000000000
--- a/script/build-docker-image/examples/2-run-interactive1.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-call 0-common.bat
-
-docker run -it %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% -c bash
diff --git a/script/build-docker-image/examples/2-run-interactive2.bat b/script/build-docker-image/examples/2-run-interactive2.bat
deleted file mode 100644
index 67dd22650..000000000
--- a/script/build-docker-image/examples/2-run-interactive2.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-call 0-common.bat
-
-cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_name=%DOCKER_IMAGE_NAME% --image_tag=%DOCKER_IMAGE_TAG% --it
diff --git a/script/build-docker-image/examples/3-push-to-docker-hub.bat b/script/build-docker-image/examples/3-push-to-docker-hub.bat
deleted file mode 100644
index 2c9eb634d..000000000
--- a/script/build-docker-image/examples/3-push-to-docker-hub.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-call 0-common.bat
-
-docker push %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG%
diff --git a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804
deleted file mode 100644
index 418e73363..000000000
--- a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804
+++ /dev/null
@@ -1,38 +0,0 @@
-FROM ubuntu:22.04
-
-# Maintained by the MLCommons taskforce on automation and reproducibility
-LABEL github="https://github.com/mlcommons/ck"
-LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce"
-
-SHELL ["/bin/bash", "-c"]
-ARG CM_GH_TOKEN
-
-# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes
-# Install system dependencies
-RUN apt-get update -y && apt-get upgrade -y
-RUN apt-get install -y python3 python3-pip git sudo wget
-
-# Install python packages
-RUN python3 -m pip install cmind requests
-
-# Setup docker environment
-ENTRYPOINT ["/bin/bash", "-c"]
-ENV TZ="US/Pacific"
-ENV PATH="${PATH}:/home/cmuser/.local/bin"
-RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone
-
-# Setup docker user
-RUN groupadd cm
-RUN useradd -g cm --create-home --shell /bin/bash cmuser
-RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-USER cmuser:cm
-WORKDIR /home/cmuser
-
-# Download CM repo for scripts
-RUN cm pull repo mlcommons@ck --dummy
-
-# Install all system dependencies
-RUN cm run script --quiet --tags=get,sys-utils-cm
-
-# Run commands
-RUN cm version
diff --git a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804
deleted file mode 100644
index 478e155f6..000000000
--- a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804
+++ /dev/null
@@ -1,38 +0,0 @@
-FROM ubuntu:23.04
-
-# Maintained by the MLCommons taskforce on automation and reproducibility
-LABEL github="https://github.com/mlcommons/ck"
-LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce"
-
-SHELL ["/bin/bash", "-c"]
-ARG CM_GH_TOKEN
-
-# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes
-# Install system dependencies
-RUN apt-get update -y && apt-get upgrade -y
-RUN apt-get install -y python3 python3-pip git sudo wget
-
-# Install python packages
-RUN python3 -m pip install cmind requests --break-system-packages
-
-# Setup docker environment
-ENTRYPOINT ["/bin/bash", "-c"]
-ENV TZ="US/Pacific"
-ENV PATH="${PATH}:/home/cmuser/.local/bin"
-RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone
-
-# Setup docker user
-RUN groupadd cm
-RUN useradd -g cm --create-home --shell /bin/bash cmuser
-RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-USER cmuser:cm
-WORKDIR /home/cmuser
-
-# Download CM repo for scripts
-RUN cm pull repo mlcommons@ck --dummy
-
-# Install all system dependencies
-RUN cm run script --quiet --tags=get,sys-utils-cm
-
-# Run commands
-RUN cm version
diff --git a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest b/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest
deleted file mode 100644
index 832a37669..000000000
--- a/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest
+++ /dev/null
@@ -1,38 +0,0 @@
-FROM ubuntu:23.04
-
-# Maintained by the MLCommons taskforce on automation and reproducibility
-LABEL github="https://github.com/mlcommons/ck"
-LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce"
-
-SHELL ["/bin/bash", "-c"]
-ARG CM_GH_TOKEN
-
-# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes
-# Install system dependencies
-RUN apt-get update -y && apt-get upgrade -y
-RUN apt-get install -y python3 python3-pip git sudo wget
-
-# Install python packages
-RUN python3 -m pip install cmind requests --break-system-packages
-
-# Setup docker environment
-ENTRYPOINT ["/bin/bash", "-c"]
-ENV TZ="US/Pacific"
-ENV PATH="${PATH}:/home/cmuser/.local/bin"
-RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone
-
-# Setup docker user
-RUN groupadd cm
-RUN useradd -g cm --create-home --shell /bin/bash cmuser
-RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-USER cmuser:cm
-WORKDIR /home/cmuser
-
-# Download CM repo for scripts
-RUN cm pull repo ctuning@mlcommons-ck
-
-# Install all system dependencies
-RUN cm run script --quiet --tags=get,sys-utils-cm
-
-# Run commands
-RUN cm version
diff --git a/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest b/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest
deleted file mode 100644
index 7ce0af2fb..000000000
--- a/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest
+++ /dev/null
@@ -1,45 +0,0 @@
-FROM ubuntu:23.04
-
-# Maintained by the MLCommons taskforce on automation and reproducibility
-LABEL github="https://github.com/mlcommons/ck"
-LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce"
-
-SHELL ["/bin/bash", "-c"]
-ARG CM_GH_TOKEN
-
-# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes
-# Install system dependencies
-RUN apt-get update -y && apt-get upgrade -y
-RUN apt-get install -y python3 python3-pip git sudo wget
-
-# Install python packages
-RUN python3 -m pip install cmind requests --break-system-packages
-
-# Setup docker environment
-ENTRYPOINT ["/bin/bash", "-c"]
-ENV TZ="US/Pacific"
-ENV PATH="${PATH}:/home/cmuser/.local/bin"
-RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone
-
-# Setup docker user
-RUN groupadd cm
-RUN useradd -g cm --create-home --shell /bin/bash cmuser
-RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-USER cmuser:cm
-WORKDIR /home/cmuser
-
-# Download CM repo for scripts
-RUN cm pull repo ctuning@mlcommons-ck
-
-# Install all system dependencies
-RUN cm run script --quiet --tags=get,sys-utils-cm
-
-# Run commands
-RUN cm version
-
-# Create virtual python environment
-RUN cmr "install python-venv" --name=cm --quiet
-
-# Run image classification and install all related CM components automatically
-RUN cmr "python app image-classification onnx" --adr.python.name=cm --quiet
-
diff --git a/script/build-docker-image/examples/README.md b/script/build-docker-image/examples/README.md
deleted file mode 100644
index 8035bc429..000000000
--- a/script/build-docker-image/examples/README.md
+++ /dev/null
@@ -1 +0,0 @@
-https://hub.docker.com/r/cknowledge/cm-base/tags
diff --git a/script/build-docker-image/examples/computer_mouse.jpg b/script/build-docker-image/examples/computer_mouse.jpg
deleted file mode 100644
index e7f8abb6f..000000000
Binary files a/script/build-docker-image/examples/computer_mouse.jpg and /dev/null differ
diff --git a/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py b/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py
deleted file mode 100644
index 981f9b94d..000000000
--- a/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py
+++ /dev/null
@@ -1,6 +0,0 @@
-
-# Create virtual python environment
-RUN cmr "install python-venv" --name=cm --quiet
-
-# Run image classification and install all related CM components automatically
-RUN cmr "python app image-classification onnx" --adr.python.name=cm --quiet
diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat b/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat
deleted file mode 100644
index c4f8e2204..000000000
--- a/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat
+++ /dev/null
@@ -1,6 +0,0 @@
-rem call this script with computer_mouse.jpg as input
-
-call 0-common.bat
-
-rem docker run -v %CD%:/tmp/host -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/%1"
-docker run -v %CD%:/tmp/host -it --rm %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/%1"
\ No newline at end of file
diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh b/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh
deleted file mode 100644
index 55314e9e4..000000000
--- a/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-
-# call this script with computer_mouse.jpg as input
-
-docker run -v $PWD:/tmp/host -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/$1"
diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat b/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat
deleted file mode 100644
index 762ed99fd..000000000
--- a/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat
+++ /dev/null
@@ -1 +0,0 @@
-docker run -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm"
diff --git a/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh b/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh
deleted file mode 100644
index a24a06ed9..000000000
--- a/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-docker run -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm"
diff --git a/script/build-docker-image/meta.yaml b/script/build-docker-image/meta.yaml
index 8fd7c2571..d1d86083a 100644
--- a/script/build-docker-image/meta.yaml
+++ b/script/build-docker-image/meta.yaml
@@ -16,33 +16,33 @@ tags:
- dockerimage
default_env:
- CM_DOCKER_IMAGE_REPO: local
- CM_DOCKER_IMAGE_TAG: latest
+ MLC_DOCKER_IMAGE_REPO: local
+ MLC_DOCKER_IMAGE_TAG: latest
input_mapping:
- cache: CM_DOCKER_CACHE
- cm_repo: CM_MLOPS_REPO
- docker_os: CM_DOCKER_OS
- docker_os_version: CM_DOCKER_OS_VERSION
- dockerfile: CM_DOCKERFILE_WITH_PATH
- gh_token: CM_GH_TOKEN
- image_name: CM_DOCKER_IMAGE_NAME
- image_repo: CM_DOCKER_IMAGE_REPO
- image_tag: CM_DOCKER_IMAGE_TAG
- post_run_cmds: CM_DOCKER_POST_RUN_COMMANDS
- pre_run_cmds: CM_DOCKER_PRE_RUN_COMMANDS
- real_run: CM_REAL_RUN
- script_tags: CM_DOCKER_RUN_SCRIPT_TAGS
- push_image: CM_DOCKER_PUSH_IMAGE
+ cache: MLC_DOCKER_CACHE
+ cm_repo: MLC_MLOPS_REPO
+ docker_os: MLC_DOCKER_OS
+ docker_os_version: MLC_DOCKER_OS_VERSION
+ dockerfile: MLC_DOCKERFILE_WITH_PATH
+ gh_token: MLC_GH_TOKEN
+ image_name: MLC_DOCKER_IMAGE_NAME
+ image_repo: MLC_DOCKER_IMAGE_REPO
+ image_tag: MLC_DOCKER_IMAGE_TAG
+ post_run_cmds: MLC_DOCKER_POST_RUN_COMMANDS
+ pre_run_cmds: MLC_DOCKER_PRE_RUN_COMMANDS
+ real_run: MLC_REAL_RUN
+ script_tags: MLC_DOCKER_RUN_SCRIPT_TAGS
+ push_image: MLC_DOCKER_PUSH_IMAGE
new_env_keys:
-- CM_DOCKER_*
+- MLC_DOCKER_*
deps:
- tags: get,docker
prehook_deps:
- enable_if_env:
- CM_BUILD_DOCKERFILE:
+ MLC_BUILD_DOCKERFILE:
- 'yes'
tags: build,dockerfile
diff --git a/script/build-docker-image/run.bat b/script/build-docker-image/run.bat
index d3a1b061d..ab5be88c7 100644
--- a/script/build-docker-image/run.bat
+++ b/script/build-docker-image/run.bat
@@ -1,13 +1,13 @@
-if exist %CM_DOCKERFILE_WITH_PATH% (
+if exist %MLC_DOCKERFILE_WITH_PATH% (
rem echo .git > .dockerignore
rem echo.
-rem echo docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f %CM_DOCKERFILE_WITH_PATH% -t %CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG% .
+rem echo docker build %MLC_DOCKER_CACHE_ARG% %MLC_DOCKER_BUILD_ARGS% -f %MLC_DOCKERFILE_WITH_PATH% -t %MLC_DOCKER_IMAGE_REPO%/%MLC_DOCKER_IMAGE_NAME%:%MLC_DOCKER_IMAGE_TAG% .
rem echo.
-rem docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f "%CM_DOCKERFILE_WITH_PATH%" -t "%CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG%" .
+rem docker build %MLC_DOCKER_CACHE_ARG% %MLC_DOCKER_BUILD_ARGS% -f "%MLC_DOCKERFILE_WITH_PATH%" -t "%MLC_DOCKER_IMAGE_REPO%/%MLC_DOCKER_IMAGE_NAME%:%MLC_DOCKER_IMAGE_TAG%" .
- %CM_DOCKER_BUILD_CMD%
+ %MLC_DOCKER_BUILD_CMD%
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
echo.
diff --git a/script/build-docker-image/run.sh b/script/build-docker-image/run.sh
index 6aa2390c0..38eeb399e 100644
--- a/script/build-docker-image/run.sh
+++ b/script/build-docker-image/run.sh
@@ -1,14 +1,14 @@
#!/bin/bash
-if [ -f "${CM_DOCKERFILE_WITH_PATH}" ]; then
+if [ -f "${MLC_DOCKERFILE_WITH_PATH}" ]; then
# echo ".git" > .dockerignore
# echo ""
-# echo "docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f ${CM_DOCKERFILE_WITH_PATH} -t ${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG} ."
+# echo "docker build ${MLC_DOCKER_CACHE_ARG} ${MLC_DOCKER_BUILD_ARGS} -f ${MLC_DOCKERFILE_WITH_PATH} -t ${MLC_DOCKER_IMAGE_REPO}/${MLC_DOCKER_IMAGE_NAME}:${MLC_DOCKER_IMAGE_TAG} ."
-# docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f "${CM_DOCKERFILE_WITH_PATH}" -t "${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG}" .
+# docker build ${MLC_DOCKER_CACHE_ARG} ${MLC_DOCKER_BUILD_ARGS} -f "${MLC_DOCKERFILE_WITH_PATH}" -t "${MLC_DOCKER_IMAGE_REPO}/${MLC_DOCKER_IMAGE_NAME}:${MLC_DOCKER_IMAGE_TAG}" .
- eval "${CM_DOCKER_BUILD_CMD}"
+ eval "${MLC_DOCKER_BUILD_CMD}"
test $? -eq 0 || exit 1
echo ""
diff --git a/script/build-dockerfile/README-extra.md b/script/build-dockerfile/README-extra.md
deleted file mode 100644
index 992fee4b3..000000000
--- a/script/build-dockerfile/README-extra.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Build CM Dockerfile
-This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) builds a dockerfile with for using CM.
-
-## How to use
-```bash
-cm run script --tags=build,dockerfile --docker_os=[DOCKER_OS] --docker_os_version=[DOCKER_OS_VERSION] --build --image_repo=[IMAGE_REPO] --image_tag=[IMAGE_TAG] --gh_token=[GITHUB_AUTH_TOKEN] --script_tags=[CM_SCRIPT_TAGS]
-```
-where
-* `[DOCKER_OS]` is one of `ubuntu` or `rhel`. Default is `ubuntu`.
-* `[DOCKER_OS_VERSION]` is one of `18.04`, `20.04`, `22.04` for `ubuntu` and `9` for `rhel`. Default is `20.04`.
-* `--build` option calls the [CM docker image build script](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/build-docker-image) to build a docker image from the generated dockerfile. Default is off.
-* `[GITHUB_AUTH_TOKEN]`: Github auth token to be passed to docker build to use as build argument. This is optional.
-* `[CM_SCRIPT_TAGS]`: Tags for the CM script which should be run as the last command inside dockerfile. This script will do a fake run and set up all its dependencies in the docker image once built.
-* `[IMAGE_REPO]`: Repo name to add the docker image. Default is `local`.
-* `[IMAGE_TAG]`: Tag for the docker image. Default is `latest`.
-
-
-## Supported and Tested OS
-1. Ubuntu 18.04, 20.04, 22.04
-2. RHEL 9
-
-## Sample dockerfiles
-1. [Ubuntu 18.04](dockerfiles/ubuntu_18.04.Dockerfile)
-2. [Ubuntu 20.04](dockerfiles/ubuntu_20.04.Dockerfile)
-1. [Ubuntu 22.04](dockerfiles/ubuntu_22.04.Dockerfile)
-1. [rhel9](dockerfiles/rhel_9.Dockerfile)
-
diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py
index c95c2f96f..a91853185 100644
--- a/script/build-dockerfile/customize.py
+++ b/script/build-dockerfile/customize.py
@@ -11,9 +11,9 @@ def preprocess(i):
os_info = i['os_info']
env = i['env']
- if env["CM_DOCKER_OS"] not in ["ubuntu", "rhel", "arch"]:
+ if env["MLC_DOCKER_OS"] not in ["ubuntu", "rhel", "arch"]:
return {
- 'return': 1, 'error': f"Specified docker OS: {env['CM_DOCKER_OS']}. Currently only ubuntu, rhel and arch are supported in CM docker"}
+ 'return': 1, 'error': f"Specified docker OS: {env['MLC_DOCKER_OS']}. Currently only ubuntu, rhel and arch are supported in CM docker"}
path = i['run_script_input']['path']
@@ -26,9 +26,9 @@ def preprocess(i):
copy_files = []
automation = i['automation']
- # print(env['CM_DOCKER_RUN_SCRIPT_TAGS'])
- if env.get('CM_DOCKER_RUN_SCRIPT_TAGS', '') != '':
- script_tags = env['CM_DOCKER_RUN_SCRIPT_TAGS']
+ # print(env['MLC_DOCKER_RUN_SCRIPT_TAGS'])
+ if env.get('MLC_DOCKER_RUN_SCRIPT_TAGS', '') != '':
+ script_tags = env['MLC_DOCKER_RUN_SCRIPT_TAGS']
found_scripts = automation.action_object.access(
{'action': 'search', 'automation': 'script', 'tags': script_tags})
scripts_list = found_scripts['list']
@@ -54,13 +54,13 @@ def preprocess(i):
# build_args.append(arg)
# input_args.append("--"+input_+"="+"$"+env_)
- if "CM_DOCKER_OS_VERSION" not in env:
- env["CM_DOCKER_OS_VERSION"] = "20.04"
+ if "MLC_DOCKER_OS_VERSION" not in env:
+ env["MLC_DOCKER_OS_VERSION"] = "20.04"
- docker_image_base = get_value(env, config, 'FROM', 'CM_DOCKER_IMAGE_BASE')
+ docker_image_base = get_value(env, config, 'FROM', 'MLC_DOCKER_IMAGE_BASE')
if not docker_image_base:
return {
- 'return': 1, 'error': f"Version \"{env['CM_DOCKER_OS_VERSION']}\" is not supported yet for \"{env['CM_DOCKER_OS']}\" "}
+ 'return': 1, 'error': f"Version \"{env['MLC_DOCKER_OS_VERSION']}\" is not supported yet for \"{env['MLC_DOCKER_OS']}\" "}
# Handle cm_mlops Repository
if env.get("MLC_REPO_PATH", "") != "":
@@ -79,7 +79,7 @@ def preprocess(i):
# Define the build context directory (where the Dockerfile will be)
build_context_dir = os.path.dirname(
env.get(
- 'CM_DOCKERFILE_WITH_PATH',
+ 'MLC_DOCKERFILE_WITH_PATH',
os.path.join(
os.getcwd(),
"Dockerfile")))
@@ -118,8 +118,8 @@ def preprocess(i):
# MLC_REPO_PATH is not set; use mlc pull repo as before
use_copy_repo = False
- if env.get("CM_MLOPS_REPO", "") != "":
- cm_mlops_repo = env["CM_MLOPS_REPO"]
+ if env.get("MLC_MLOPS_REPO", "") != "":
+ cm_mlops_repo = env["MLC_MLOPS_REPO"]
# the below pattern matches both the HTTPS and SSH git link formats
git_link_pattern = r'^(https?://github\.com/([^/]+)/([^/]+)(?:\.git)?|git@github\.com:([^/]+)/([^/]+)(?:\.git)?)$'
if match := re.match(git_link_pattern, cm_mlops_repo):
@@ -131,17 +131,17 @@ def preprocess(i):
repo_name = match.group(5)
cm_mlops_repo = f"{repo_owner}@{repo_name}"
print(
- f"Converted repo format from {env['CM_MLOPS_REPO']} to {cm_mlops_repo}")
+ f"Converted repo format from {env['MLC_MLOPS_REPO']} to {cm_mlops_repo}")
else:
cm_mlops_repo = "mlcommons@mlperf-automations"
- cm_mlops_repo_branch_string = f" --branch={env['CM_MLOPS_REPO_BRANCH']}"
+ cm_mlops_repo_branch_string = f" --branch={env['MLC_MLOPS_REPO_BRANCH']}"
- if env.get('CM_DOCKERFILE_WITH_PATH', '') == '':
- env['CM_DOCKERFILE_WITH_PATH'] = os.path.join(
+ if env.get('MLC_DOCKERFILE_WITH_PATH', '') == '':
+ env['MLC_DOCKERFILE_WITH_PATH'] = os.path.join(
os.getcwd(), "Dockerfile")
- dockerfile_with_path = env['CM_DOCKERFILE_WITH_PATH']
+ dockerfile_with_path = env['MLC_DOCKERFILE_WITH_PATH']
dockerfile_dir = os.path.dirname(dockerfile_with_path)
extra_dir = os.path.dirname(dockerfile_with_path)
@@ -150,7 +150,7 @@ def preprocess(i):
os.makedirs(extra_dir, exist_ok=True)
f = open(dockerfile_with_path, "w")
- EOL = env['CM_DOCKER_IMAGE_EOL']
+ EOL = env['MLC_DOCKER_IMAGE_EOL']
f.write('FROM ' + docker_image_base + EOL)
# Maintainers
@@ -166,12 +166,12 @@ def preprocess(i):
f.write(EOL)
- image_label = get_value(env, config, 'LABEL', 'CM_DOCKER_IMAGE_LABEL')
+ image_label = get_value(env, config, 'LABEL', 'MLC_DOCKER_IMAGE_LABEL')
if image_label:
f.write('LABEL ' + image_label + EOL)
f.write(EOL)
- shell = get_value(env, config, 'SHELL', 'CM_DOCKER_IMAGE_SHELL')
+ shell = get_value(env, config, 'SHELL', 'MLC_DOCKER_IMAGE_SHELL')
if shell:
# f.write('SHELL ' + shell + EOL)
f.write(EOL)
@@ -192,8 +192,8 @@ def preprocess(i):
f.write(EOL)
copy_cmds = []
- if 'CM_DOCKER_COPY_FILES' in env:
- for copy_file in env['CM_DOCKER_COPY_FILES']:
+ if 'MLC_DOCKER_COPY_FILES' in env:
+ for copy_file in env['MLC_DOCKER_COPY_FILES']:
copy_split = copy_file.split(":")
if len(copy_split) != 2:
return {
@@ -217,20 +217,20 @@ def preprocess(i):
env,
config,
'package-manager-update-cmd',
- 'CM_PACKAGE_MANAGER_UPDATE_CMD') +
+ 'MLC_PACKAGE_MANAGER_UPDATE_CMD') +
EOL)
f.write('RUN ' + get_value(env, config, 'package-manager-get-cmd') + " " + " ".join(get_value(env, config,
'packages')) + EOL)
- if env.get('CM_DOCKER_EXTRA_SYS_DEPS', '') != '':
- f.write('RUN ' + env['CM_DOCKER_EXTRA_SYS_DEPS'] + EOL)
+ if env.get('MLC_DOCKER_EXTRA_SYS_DEPS', '') != '':
+ f.write('RUN ' + env['MLC_DOCKER_EXTRA_SYS_DEPS'] + EOL)
- if env['CM_DOCKER_OS'] == "ubuntu":
- if int(env['CM_DOCKER_OS_VERSION'].split('.')[0]) >= 23:
+ if env['MLC_DOCKER_OS'] == "ubuntu":
+ if int(env['MLC_DOCKER_OS_VERSION'].split('.')[0]) >= 23:
if "--break-system-packages" not in env.get(
- 'CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''):
- env['CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS'] = " --break-system-packages"
- pip_extra_flags = env.get('CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', '')
+ 'MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''):
+ env['MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS'] = " --break-system-packages"
+ pip_extra_flags = env.get('MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS', '')
f.write(EOL + '# Setup docker environment' + EOL)
@@ -238,7 +238,7 @@ def preprocess(i):
env,
config,
'ENTRYPOINT',
- 'CM_DOCKER_IMAGE_ENTRYPOINT')
+ 'MLC_DOCKER_IMAGE_ENTRYPOINT')
if entry_point:
f.write('ENTRYPOINT ' + entry_point + EOL)
@@ -251,11 +251,11 @@ def preprocess(i):
docker_user = get_value(env, config, 'USER', 'MLC_DOCKER_USER')
docker_group = get_value(env, config, 'GROUP', 'MLC_DOCKER_GROUP')
- if env.get('CM_CONTAINER_TOOL', '') == 'podman' and env.get(
- 'CM_DOCKER_USE_DEFAULT_USER', '') == '':
- env['CM_DOCKER_USE_DEFAULT_USER'] = 'yes'
+ if env.get('MLC_CONTAINER_TOOL', '') == 'podman' and env.get(
+ 'MLC_DOCKER_USE_DEFAULT_USER', '') == '':
+ env['MLC_DOCKER_USE_DEFAULT_USER'] = 'yes'
- if docker_user and str(env.get('CM_DOCKER_USE_DEFAULT_USER', '')).lower() not in [
+ if docker_user and str(env.get('MLC_DOCKER_USE_DEFAULT_USER', '')).lower() not in [
"yes", "1", "true"]:
f.write('RUN groupadd -g $GID -o ' + docker_group + EOL)
@@ -277,21 +277,21 @@ def preprocess(i):
else:
f.write('ENV HOME=/root' + EOL)
- dockerfile_env = env.get('CM_DOCKERFILE_ENV', {})
+ dockerfile_env = env.get('MLC_DOCKERFILE_ENV', {})
dockerfile_env_input_string = ""
for docker_env_key in dockerfile_env:
dockerfile_env_input_string = dockerfile_env_input_string + " --env." + \
docker_env_key + "=" + str(dockerfile_env[docker_env_key])
- workdir = get_value(env, config, 'WORKDIR', 'CM_DOCKER_WORKDIR')
- if workdir and ("/home/mlcuser" not in workdir or str(env.get('CM_DOCKER_USE_DEFAULT_USER', '')).lower() not in [
+ workdir = get_value(env, config, 'WORKDIR', 'MLC_DOCKER_WORKDIR')
+ if workdir and ("/home/mlcuser" not in workdir or str(env.get('MLC_DOCKER_USE_DEFAULT_USER', '')).lower() not in [
"yes", "1", "true"]):
f.write('WORKDIR ' + workdir + EOL)
f.write(EOL + '# Install python packages' + EOL)
- python = get_value(env, config, 'PYTHON', 'CM_DOCKERFILE_PYTHON')
+ python = get_value(env, config, 'PYTHON', 'MLC_DOCKERFILE_PYTHON')
- docker_use_virtual_python = env.get('CM_DOCKER_USE_VIRTUAL_PYTHON', "yes")
+ docker_use_virtual_python = env.get('MLC_DOCKER_USE_VIRTUAL_PYTHON', "yes")
if str(docker_use_virtual_python).lower() not in ["no", "0", "false"]:
f.write('RUN {} -m venv $HOME/venv/mlc'.format(python) + " " + EOL)
f.write('ENV PATH="$HOME/venv/mlc/bin:$PATH"' + EOL)
@@ -323,7 +323,7 @@ def preprocess(i):
else:
# Use mlc pull repo as before
- x = env.get('CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO', '')
+ x = env.get('MLC_DOCKER_ADD_FLAG_TO_MLC_MLOPS_REPO', '')
if x != '':
x = ' ' + x
@@ -335,55 +335,55 @@ def preprocess(i):
EOL)
# Check extra repositories
- x = env.get('CM_DOCKER_EXTRA_CM_REPOS', '')
+ x = env.get('MLC_DOCKER_EXTRA_MLC_REPOS', '')
if x != '':
for y in x.split(','):
f.write('RUN ' + y + EOL)
- if str(env.get('CM_DOCKER_SKIP_CM_SYS_UPGRADE', False)
+ if str(env.get('MLC_DOCKER_SKIP_MLC_SYS_UPGRADE', False)
).lower() not in ["true", "1", "yes"]:
f.write(EOL + '# Install all system dependencies' + EOL)
f.write('RUN mlc run script --tags=get,sys-utils-cm --quiet' + EOL)
- if 'CM_DOCKER_PRE_RUN_COMMANDS' in env:
- for pre_run_cmd in env['CM_DOCKER_PRE_RUN_COMMANDS']:
+ if 'MLC_DOCKER_PRE_RUN_COMMANDS' in env:
+ for pre_run_cmd in env['MLC_DOCKER_PRE_RUN_COMMANDS']:
f.write('RUN ' + pre_run_cmd + EOL)
run_cmd_extra = " " + \
- env.get('CM_DOCKER_RUN_CMD_EXTRA', '').replace(":", "=")
- gh_token = get_value(env, config, "GH_TOKEN", "CM_GH_TOKEN")
+ env.get('MLC_DOCKER_RUN_CMD_EXTRA', '').replace(":", "=")
+ gh_token = get_value(env, config, "GH_TOKEN", "MLC_GH_TOKEN")
if gh_token:
- run_cmd_extra = " --env.CM_GH_TOKEN=$CM_GH_TOKEN"
+ run_cmd_extra = " --env.MLC_GH_TOKEN=$MLC_GH_TOKEN"
f.write(EOL + '# Run commands' + EOL)
- for comment in env.get('CM_DOCKER_RUN_COMMENTS', []):
+ for comment in env.get('MLC_DOCKER_RUN_COMMENTS', []):
f.write(comment + EOL)
skip_extra = False
- if 'CM_DOCKER_RUN_CMD' not in env:
- env['CM_DOCKER_RUN_CMD'] = ""
- if 'CM_DOCKER_RUN_SCRIPT_TAGS' not in env:
- env['CM_DOCKER_RUN_CMD'] += "mlc version"
+ if 'MLC_DOCKER_RUN_CMD' not in env:
+ env['MLC_DOCKER_RUN_CMD'] = ""
+ if 'MLC_DOCKER_RUN_SCRIPT_TAGS' not in env:
+ env['MLC_DOCKER_RUN_CMD'] += "mlc version"
skip_extra = True
else:
- if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False')
+ if str(env.get('MLC_DOCKER_NOT_PULL_UPDATE', 'False')
).lower() not in ["yes", "1", "true"]:
- env['CM_DOCKER_RUN_CMD'] += "mlc pull repo && "
- env['CM_DOCKER_RUN_CMD'] += "mlc run script --tags=" + \
- env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet'
+ env['MLC_DOCKER_RUN_CMD'] += "mlc pull repo && "
+ env['MLC_DOCKER_RUN_CMD'] += "mlc run script --tags=" + \
+ env['MLC_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet'
else:
- if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False')
+ if str(env.get('MLC_DOCKER_NOT_PULL_UPDATE', 'False')
).lower() not in ["yes", "1", "true"]:
- env['CM_DOCKER_RUN_CMD'] = "mlc pull repo && " + \
- env['CM_DOCKER_RUN_CMD']
+ env['MLC_DOCKER_RUN_CMD'] = "mlc pull repo && " + \
+ env['MLC_DOCKER_RUN_CMD']
- print(env['CM_DOCKER_RUN_CMD'])
- fake_run = env.get("CM_DOCKER_FAKE_RUN_OPTION",
+ print(env['MLC_DOCKER_RUN_CMD'])
+ fake_run = env.get("MLC_DOCKER_FAKE_RUN_OPTION",
" --fake_run") + dockerfile_env_input_string
fake_run = fake_run + \
- " --fake_deps" if env.get('CM_DOCKER_FAKE_DEPS') else fake_run
+ " --fake_deps" if env.get('MLC_DOCKER_FAKE_DEPS') else fake_run
- x = 'RUN ' + env['CM_DOCKER_RUN_CMD']
+ x = 'RUN ' + env['MLC_DOCKER_RUN_CMD']
if not skip_extra:
x += fake_run
@@ -392,11 +392,11 @@ def preprocess(i):
if run_cmd_extra != '':
x += ' ' + run_cmd_extra
- if env.get('CM_DOCKER_RUN_SCRIPT_TAGS', '') != '' and str(env.get(
- 'CM_DOCKER_ADD_DEPENDENT_SCRIPTS_RUN_COMMANDS', '')).lower() in ["yes", "1", "true"]:
+ if env.get('MLC_DOCKER_RUN_SCRIPT_TAGS', '') != '' and str(env.get(
+ 'MLC_DOCKER_ADD_DEPENDENT_SCRIPTS_RUN_COMMANDS', '')).lower() in ["yes", "1", "true"]:
mlc_input = {'action': 'run',
'automation': 'script',
- 'tags': f"""{env['CM_DOCKER_RUN_SCRIPT_TAGS']}""",
+ 'tags': f"""{env['MLC_DOCKER_RUN_SCRIPT_TAGS']}""",
'print_deps': True,
'quiet': True,
'silent': True,
@@ -407,7 +407,7 @@ def preprocess(i):
if r['return'] > 0:
return r
print_deps = r['new_state']['print_deps']
- fake_run_str = " --fake_run" if env.get('CM_DOCKER_FAKE_DEPS') else ""
+ fake_run_str = " --fake_run" if env.get('MLC_DOCKER_FAKE_DEPS') else ""
cmds = ["RUN " + dep for dep in print_deps]
for cmd in cmds:
f.write(cmd + fake_run_str + EOL)
@@ -415,19 +415,19 @@ def preprocess(i):
f.write(x + EOL)
# fake_run to install the dependent scripts and caching them
- if not "run" in env['CM_DOCKER_RUN_CMD'] and str(
- env.get('CM_REAL_RUN', False)).lower() in ["false", "0", "no"]:
+ if not "run" in env['MLC_DOCKER_RUN_CMD'] and str(
+ env.get('MLC_REAL_RUN', False)).lower() in ["false", "0", "no"]:
fake_run = dockerfile_env_input_string
- x = 'RUN ' + env['CM_DOCKER_RUN_CMD'] + fake_run + run_cmd_extra
+ x = 'RUN ' + env['MLC_DOCKER_RUN_CMD'] + fake_run + run_cmd_extra
if '--quiet' not in x:
x += ' --quiet '
x += EOL
f.write(x)
- if 'CM_DOCKER_POST_RUN_COMMANDS' in env:
- for post_run_cmd in env['CM_DOCKER_POST_RUN_COMMANDS']:
+ if 'MLC_DOCKER_POST_RUN_COMMANDS' in env:
+ for post_run_cmd in env['MLC_DOCKER_POST_RUN_COMMANDS']:
f.write('RUN ' + post_run_cmd + EOL)
post_file = env.get('DOCKER_IMAGE_POST_FILE', '')
@@ -443,7 +443,7 @@ def preprocess(i):
f.close()
- # f = open(env['CM_DOCKERFILE_WITH_PATH'], "r")
+ # f = open(env['MLC_DOCKERFILE_WITH_PATH'], "r")
# print(f.read())
return {'return': 0}
@@ -456,8 +456,8 @@ def get_value(env, config, key, env_key=None):
if env.get(env_key, None) is not None:
return env[env_key]
- docker_os = env['CM_DOCKER_OS']
- docker_os_version = env['CM_DOCKER_OS_VERSION']
+ docker_os = env['MLC_DOCKER_OS']
+ docker_os_version = env['MLC_DOCKER_OS_VERSION']
version_meta = config['distros'][docker_os]['versions'].get(
docker_os_version, '')
diff --git a/script/build-dockerfile/meta.yaml b/script/build-dockerfile/meta.yaml
index cc81fe3c5..a8acb0c30 100644
--- a/script/build-dockerfile/meta.yaml
+++ b/script/build-dockerfile/meta.yaml
@@ -13,49 +13,49 @@ cache: false
category: Docker automation
default_env:
- CM_DOCKER_BUILD_SLIM: 'no'
- CM_DOCKER_IMAGE_EOL: '
+ MLC_DOCKER_BUILD_SLIM: 'no'
+ MLC_DOCKER_IMAGE_EOL: '
'
- CM_DOCKER_OS: ubuntu
- CM_DOCKER_NOT_PULL_UPDATE: False
- CM_MLOPS_REPO_BRANCH: mlc
+ MLC_DOCKER_OS: ubuntu
+ MLC_DOCKER_NOT_PULL_UPDATE: False
+ MLC_MLOPS_REPO_BRANCH: mlc
input_mapping:
- build: CM_BUILD_DOCKER_IMAGE
- cache: CM_DOCKER_CACHE
- cm_repo: CM_MLOPS_REPO
- cm_repo_flags: CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO
- cm_repos: CM_DOCKER_EXTRA_CM_REPOS
- cm_repo_branch: CM_MLOPS_REPO_BRANCH
- comments: CM_DOCKER_RUN_COMMENTS
- copy_files: CM_DOCKER_COPY_FILES
- docker_base_image: CM_DOCKER_IMAGE_BASE
- docker_os: CM_DOCKER_OS
- docker_os_version: CM_DOCKER_OS_VERSION
- dockerfile_env: CM_DOCKERFILE_ENV
- extra_sys_deps: CM_DOCKER_EXTRA_SYS_DEPS
- fake_docker_deps: CM_DOCKER_FAKE_DEPS
- fake_run_option: CM_DOCKER_FAKE_RUN_OPTION
- file_path: CM_DOCKERFILE_WITH_PATH
- gh_token: CM_GH_TOKEN
- image_repo: CM_DOCKER_IMAGE_REPO
- image_tag: CM_DOCKER_IMAGE_TAG
- package_manager_update_cmd: CM_PACKAGE_MANAGER_UPDATE_CMD
- pip_extra_flags: CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS
+ build: MLC_BUILD_DOCKER_IMAGE
+ cache: MLC_DOCKER_CACHE
+ cm_repo: MLC_MLOPS_REPO
+ cm_repo_flags: MLC_DOCKER_ADD_FLAG_TO_MLC_MLOPS_REPO
+ cm_repos: MLC_DOCKER_EXTRA_MLC_REPOS
+ cm_repo_branch: MLC_MLOPS_REPO_BRANCH
+ comments: MLC_DOCKER_RUN_COMMENTS
+ copy_files: MLC_DOCKER_COPY_FILES
+ docker_base_image: MLC_DOCKER_IMAGE_BASE
+ docker_os: MLC_DOCKER_OS
+ docker_os_version: MLC_DOCKER_OS_VERSION
+ dockerfile_env: MLC_DOCKERFILE_ENV
+ extra_sys_deps: MLC_DOCKER_EXTRA_SYS_DEPS
+ fake_docker_deps: MLC_DOCKER_FAKE_DEPS
+ fake_run_option: MLC_DOCKER_FAKE_RUN_OPTION
+ file_path: MLC_DOCKERFILE_WITH_PATH
+ gh_token: MLC_GH_TOKEN
+ image_repo: MLC_DOCKER_IMAGE_REPO
+ image_tag: MLC_DOCKER_IMAGE_TAG
+ package_manager_update_cmd: MLC_PACKAGE_MANAGER_UPDATE_CMD
+ pip_extra_flags: MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS
post_file: DOCKER_IMAGE_POST_FILE
- post_run_cmds: CM_DOCKER_POST_RUN_COMMANDS
- pre_run_cmds: CM_DOCKER_PRE_RUN_COMMANDS
- real_run: CM_REAL_RUN
- run_cmd: CM_DOCKER_RUN_CMD
- run_cmd_extra: CM_DOCKER_RUN_CMD_EXTRA
- script_tags: CM_DOCKER_RUN_SCRIPT_TAGS
- skip_cm_sys_upgrade: CM_DOCKER_SKIP_CM_SYS_UPGRADE
- push_image: CM_DOCKER_PUSH_IMAGE
- docker_not_pull_update: CM_DOCKER_NOT_PULL_UPDATE
+ post_run_cmds: MLC_DOCKER_POST_RUN_COMMANDS
+ pre_run_cmds: MLC_DOCKER_PRE_RUN_COMMANDS
+ real_run: MLC_REAL_RUN
+ run_cmd: MLC_DOCKER_RUN_CMD
+ run_cmd_extra: MLC_DOCKER_RUN_CMD_EXTRA
+ script_tags: MLC_DOCKER_RUN_SCRIPT_TAGS
+ skip_cm_sys_upgrade: MLC_DOCKER_SKIP_MLC_SYS_UPGRADE
+ push_image: MLC_DOCKER_PUSH_IMAGE
+ docker_not_pull_update: MLC_DOCKER_NOT_PULL_UPDATE
new_env_keys:
-- CM_DOCKERFILE_*
+- MLC_DOCKERFILE_*
deps:
- tags: get,docker
@@ -64,7 +64,7 @@ deps:
post_deps:
- enable_if_env:
- CM_BUILD_DOCKER_IMAGE:
+ MLC_BUILD_DOCKER_IMAGE:
- 'yes'
names:
- build-docker-image
@@ -73,4 +73,4 @@ post_deps:
variations:
slim:
env:
- CM_DOCKER_BUILD_SLIM: 'yes'
+ MLC_DOCKER_BUILD_SLIM: 'yes'
diff --git a/script/build-mlperf-inference-server-nvidia/customize.py b/script/build-mlperf-inference-server-nvidia/customize.py
index f150d930b..5fa70aa45 100644
--- a/script/build-mlperf-inference-server-nvidia/customize.py
+++ b/script/build-mlperf-inference-server-nvidia/customize.py
@@ -14,25 +14,25 @@ def preprocess(i):
if '+LIBRARY_PATH' not in env:
env['+LIBRARY_PATH'] = []
- if 'CM_TENSORRT_INSTALL_PATH' in env:
+ if 'MLC_TENSORRT_INSTALL_PATH' in env:
env['+LIBRARY_PATH'].append(os.path.join(
- env['CM_TENSORRT_INSTALL_PATH'], "lib"))
+ env['MLC_TENSORRT_INSTALL_PATH'], "lib"))
cxxflags = [
"-Wno-error=switch",
"-DDALI_1_15=1",
"-Wno-error=maybe-uninitialized"]
- if env.get('CM_GCC_VERSION', '') != '':
- gcc_major_version = env['CM_GCC_VERSION'].split(".")[0]
+ if env.get('MLC_GCC_VERSION', '') != '':
+ gcc_major_version = env['MLC_GCC_VERSION'].split(".")[0]
if int(gcc_major_version) > 10:
- if env.get('CM_MLPERF_INFERENCE_VERSION', '') != "4.1":
+ if env.get('MLC_MLPERF_INFERENCE_VERSION', '') != "4.1":
cxxflags.append("-Wno-error=range-loop-construct")
- if env.get('CM_MLPERF_DEVICE', '') == "inferentia":
+ if env.get('MLC_MLPERF_DEVICE', '') == "inferentia":
env['USE_INFERENTIA'] = "1"
env['USE_NIGHTLY'] = "0"
- env['CM_MAKE_BUILD_COMMAND'] = "build"
+ env['MLC_MAKE_BUILD_COMMAND'] = "build"
if '+ CXXFLAGS' not in env:
env['+ CXXFLAGS'] = []
diff --git a/script/build-mlperf-inference-server-nvidia/meta.yaml b/script/build-mlperf-inference-server-nvidia/meta.yaml
index c5003f67c..d7f005bb4 100644
--- a/script/build-mlperf-inference-server-nvidia/meta.yaml
+++ b/script/build-mlperf-inference-server-nvidia/meta.yaml
@@ -22,16 +22,16 @@ tags:
new_env_keys:
- - CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH
+ - MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH
default_env:
- CM_MAKE_BUILD_COMMAND: build
- CM_MAKE_CLEAN: "no"
- CM_CUSTOM_SYSTEM_NVIDIA: "yes"
+ MLC_MAKE_BUILD_COMMAND: build
+ MLC_MAKE_CLEAN: "no"
+ MLC_CUSTOM_SYSTEM_NVIDIA: "yes"
input_mapping:
- custom_system: CM_CUSTOM_SYSTEM_NVIDIA
- clean: CM_MAKE_CLEAN
+ custom_system: MLC_CUSTOM_SYSTEM_NVIDIA
+ clean: MLC_MAKE_CLEAN
# Dependencies on other CM scripts
@@ -57,7 +57,7 @@ deps:
names:
- cuda
enable_if_env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cuda
- inferentia
@@ -66,11 +66,11 @@ deps:
names:
- tensorrt
enable_if_env:
- CM_MLPERF_DEVICE:
+ MLC_MLPERF_DEVICE:
- cuda
- inferentia
skip_if_env:
- CM_TENSORRT_SYSTEM_DETECT:
+ MLC_TENSORRT_SYSTEM_DETECT:
- yes
# Detect gcc
@@ -113,7 +113,7 @@ deps:
- tags: get,generic-python-lib,_pycuda
version: "2022.2.2"
skip_if_env:
- CM_RUN_STATE_DOCKER:
+ MLC_RUN_STATE_DOCKER:
- 'yes'
- True
- 'True'
@@ -137,7 +137,7 @@ post_deps:
- custom-system-nvidia
- nvidia-inference-common-code
skip_if_env:
- CM_CUSTOM_SYSTEM_NVIDIA:
+ MLC_CUSTOM_SYSTEM_NVIDIA:
- "no"
- False
- "False"
@@ -147,17 +147,17 @@ variations:
cpu:
group: device
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
inferentia:
group: device
env:
- CM_MLPERF_DEVICE: inferentia
+ MLC_MLPERF_DEVICE: inferentia
cuda:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cuda
- CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
+ MLC_MLPERF_DEVICE: cuda
+ MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart
ctuning:
group: code
@@ -200,15 +200,15 @@ variations:
- pytorch
- torch
skip_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
- tags: get,generic-python-lib,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torch-2.1.0a0+git32f93b1-cp38-cp38-linux_x86_64.whl
enable_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
- tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v4.0
@@ -216,15 +216,15 @@ variations:
- pytorchvision
- torchvision
skip_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
- tags: get,generic-python-lib,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torchvision-0.16.0a0+657027f-cp38-cp38-linux_x86_64.whl
enable_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
versions:
@@ -274,30 +274,30 @@ versions:
- pytorch
- torch
skip_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
- tags: get,generic-python-lib,_package.torch,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torch-2.1.0a0+git32f93b1-cp38-cp38-linux_x86_64.whl
enable_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
- tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v4.0
names:
- pytorchvision
- torchvision
skip_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
- tags: get,generic-python-lib,_package.torchvision,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torchvision-0.16.0a0+657027f-cp38-cp38-linux_x86_64.whl
enable_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
r4.1-dev:
@@ -316,30 +316,30 @@ versions:
- pytorch
- torch
skip_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
- tags: get,generic-python-lib,_package.torch,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torch-2.1.0a0+git32f93b1-cp38-cp38-linux_x86_64.whl
enable_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
- tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v4.0
names:
- pytorchvision
- torchvision
skip_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
- tags: get,generic-python-lib,_package.torchvision,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torchvision-0.16.0a0+657027f-cp38-cp38-linux_x86_64.whl
enable_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- 8
r4.1:
@@ -368,8 +368,8 @@ docker:
criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH
results_dir: RESULTS_DIR
submission_dir: SUBMISSION_DIR
- cudnn_tar_file_path: CM_CUDNN_TAR_FILE_PATH
- tensorrt_tar_file_path: CM_TENSORRT_TAR_FILE_PATH
+ cudnn_tar_file_path: MLC_CUDNN_TAR_FILE_PATH
+ tensorrt_tar_file_path: MLC_TENSORRT_TAR_FILE_PATH
cuda_run_file_path: CUDA_RUN_FILE_LOCAL_PATH
dlrm_data_path: DLRM_DATA_PATH
scratch_path: MLPERF_SCRATCH_PATH
@@ -379,7 +379,7 @@ docker:
- tags: get,mlperf,inference,submission,dir,local
- tags: get,nvidia-docker
skip_if_env:
- CM_SKIP_GET_NVIDIA_DOCKER:
+ MLC_SKIP_GET_NVIDIA_DOCKER:
- yes
pre_run_cmds:
@@ -387,12 +387,12 @@ docker:
run_cmd_prefix: sudo apt remove -y cmake
mounts:
- "${{ IMAGENET_PATH }}:/data/imagenet-val"
- - "${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}"
- - "${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}"
+ - "${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}:${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}"
+ - "${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}"
- "${{ RESULTS_DIR }}:/home/cmuser/results_dir"
- "${{ SUBMISSION_DIR }}:/home/cmuser/submission_dir"
- - "${{ CM_CUDNN_TAR_FILE_PATH }}:${{ CM_CUDNN_TAR_FILE_PATH }}"
- - "${{ CM_TENSORRT_TAR_FILE_PATH }}:${{ CM_TENSORRT_TAR_FILE_PATH }}"
+ - "${{ MLC_CUDNN_TAR_FILE_PATH }}:${{ MLC_CUDNN_TAR_FILE_PATH }}"
+ - "${{ MLC_TENSORRT_TAR_FILE_PATH }}:${{ MLC_TENSORRT_TAR_FILE_PATH }}"
- "${{ CUDA_RUN_FILE_LOCAL_PATH }}:${{ CUDA_RUN_FILE_LOCAL_PATH }}"
- "${{ MLPERF_SCRATCH_PATH }}:${{ MLPERF_SCRATCH_PATH }}"
- "${{ DLRM_DATA_PATH }}:/home/mlperf_inf_dlrmv2"
diff --git a/script/build-mlperf-inference-server-nvidia/run.sh b/script/build-mlperf-inference-server-nvidia/run.sh
index e03aaa72b..ac990aa62 100644
--- a/script/build-mlperf-inference-server-nvidia/run.sh
+++ b/script/build-mlperf-inference-server-nvidia/run.sh
@@ -1,16 +1,16 @@
#!/bin/bash
CUR=$PWD
-cd ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH}
+cd ${MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH}
-if [[ ${CM_MAKE_CLEAN} == "yes" ]]; then
+if [[ ${MLC_MAKE_CLEAN} == "yes" ]]; then
make clean
fi
-if [[ ${CM_MLPERF_DEVICE} == "inferentia" ]]; then
+if [[ ${MLC_MLPERF_DEVICE} == "inferentia" ]]; then
make prebuild
fi
-SKIP_DRIVER_CHECK=1 make ${CM_MAKE_BUILD_COMMAND}
+SKIP_DRIVER_CHECK=1 make ${MLC_MAKE_BUILD_COMMAND}
test $? -eq 0 || exit $?
diff --git a/script/calibrate-model-for.qaic/customize.py b/script/calibrate-model-for.qaic/customize.py
index 32ff19353..6e09b1a2f 100644
--- a/script/calibrate-model-for.qaic/customize.py
+++ b/script/calibrate-model-for.qaic/customize.py
@@ -14,9 +14,9 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- if env.get('CM_CREATE_INPUT_BATCH', '') == 'yes':
+ if env.get('MLC_CREATE_INPUT_BATCH', '') == 'yes':
r = create_batched_inputs(env)
if r['return'] > 0:
return r
@@ -28,14 +28,14 @@ def preprocess(i):
print("Profiling from " + os.getcwd())
- env['CM_RUN_CMD'] = cmd
+ env['MLC_RUN_CMD'] = cmd
return {'return': 0}
def create_batched_inputs(env):
- original_images_file = env['CM_DATASET_PREPROCESSED_IMAGES_LIST']
- batchsize = env['CM_QAIC_MODEL_BATCH_SIZE']
+ original_images_file = env['MLC_DATASET_PREPROCESSED_IMAGES_LIST']
+ batchsize = env['MLC_QAIC_MODEL_BATCH_SIZE']
file_paths = []
with open(original_images_file) as f:
@@ -71,13 +71,13 @@ def create_batched_inputs(env):
def construct_calibration_cmd(env):
- compiler_params = env['CM_QAIC_COMPILER_PARAMS']
- batchsize = env.get('CM_QAIC_MODEL_BATCH_SIZE', "1")
- cmd = env['CM_QAIC_EXEC_PATH'] + " "
- if env.get('CM_CREATE_INPUT_BATCH', '') == 'yes':
+ compiler_params = env['MLC_QAIC_COMPILER_PARAMS']
+ batchsize = env.get('MLC_QAIC_MODEL_BATCH_SIZE', "1")
+ cmd = env['MLC_QAIC_EXEC_PATH'] + " "
+ if env.get('MLC_CREATE_INPUT_BATCH', '') == 'yes':
cmd += " -input-list-file=batched_input_files -batchsize=" + batchsize + " "
cmd += compiler_params + " -dump-profile=profile.yaml -model=" + \
- env['CM_ML_MODEL_FILE_WITH_PATH']
+ env['MLC_ML_MODEL_FILE_WITH_PATH']
return {'return': 0, 'cmd': cmd}
@@ -86,10 +86,10 @@ def postprocess(i):
env = i['env']
profile_file_path = os.path.join(os.getcwd(), "profile.yaml")
- env['CM_QAIC_MODEL_PROFILE_WITH_PATH'] = profile_file_path
+ env['MLC_QAIC_MODEL_PROFILE_WITH_PATH'] = profile_file_path
- if env.get('CM_ML_MODEL_INPUT_LAYER_NAME', '') != '':
- input_layer_names = [env.get('CM_ML_MODEL_INPUT_LAYER_NAME')]
+ if env.get('MLC_ML_MODEL_INPUT_LAYER_NAME', '') != '':
+ input_layer_names = [env.get('MLC_ML_MODEL_INPUT_LAYER_NAME')]
else:
input_layer_names = ["images:0", "images/:0"]
@@ -128,7 +128,7 @@ def postprocess(i):
"TopK_578/:0"
]
- if env.get('CM_QAIC_MODEL_NAME', '') == "retinanet":
+ if env.get('MLC_QAIC_MODEL_NAME', '') == "retinanet":
with open(profile_file_path, "r") as stream:
try:
output_min_val_loc = sys.maxsize
@@ -158,8 +158,8 @@ def postprocess(i):
max_val = k['Max']
scale, offset = get_scale_offset(
min_val, max_val)
- env['CM_QAIC_MODEL_RETINANET_IMAGE_SCALE'] = scale
- env['CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET'] = offset
+ env['MLC_QAIC_MODEL_RETINANET_IMAGE_SCALE'] = scale
+ env['MLC_QAIC_MODEL_RETINANET_IMAGE_OFFSET'] = offset
if k["NodeOutputName"] in output_layer_names_loc[oindex]:
min_val = k['Min']
@@ -172,9 +172,9 @@ def postprocess(i):
min_val, max_val)
index = output_layer_names_loc[oindex].index(
k["NodeOutputName"])
- env[f'CM_QAIC_MODEL_RETINANET_LOC_SCALE{index}'] = loc_scale
+ env[f'MLC_QAIC_MODEL_RETINANET_LOC_SCALE{index}'] = loc_scale
# to uint8 is done in NMS code
- env[f'CM_QAIC_MODEL_RETINANET_LOC_OFFSET{index}'] = loc_offset - 128
+ env[f'MLC_QAIC_MODEL_RETINANET_LOC_OFFSET{index}'] = loc_offset - 128
total_range = max_val - min_val
scale = total_range / 256.0
@@ -191,9 +191,9 @@ def postprocess(i):
min_val, max_val)
index = output_layer_names_conf[oindex].index(
k["NodeOutputName"])
- env[f'CM_QAIC_MODEL_RETINANET_CONF_SCALE{index}'] = conf_scale
+ env[f'MLC_QAIC_MODEL_RETINANET_CONF_SCALE{index}'] = conf_scale
# to uint8 is done in NMS code
- env[f'CM_QAIC_MODEL_RETINANET_CONF_OFFSET{index}'] = conf_offset - 128
+ env[f'MLC_QAIC_MODEL_RETINANET_CONF_OFFSET{index}'] = conf_offset - 128
total_range = max_val - min_val
scale = total_range / 256.0
offset = round(-min_val / scale)
@@ -202,10 +202,10 @@ def postprocess(i):
output_min_val_loc, output_max_val_loc)
conf_scale, conf_offset = get_scale_offset(
output_min_val_conf, output_max_val_conf)
- env['CM_QAIC_MODEL_RETINANET_LOC_SCALE'] = loc_scale
- env['CM_QAIC_MODEL_RETINANET_LOC_OFFSET'] = loc_offset - 128 # to uint8 is done in NMS code
- env['CM_QAIC_MODEL_RETINANET_CONF_SCALE'] = conf_scale
- env['CM_QAIC_MODEL_RETINANET_CONF_OFFSET'] = conf_offset - 128 # to uint8 is done in NMS code
+ env['MLC_QAIC_MODEL_RETINANET_LOC_SCALE'] = loc_scale
+ env['MLC_QAIC_MODEL_RETINANET_LOC_OFFSET'] = loc_offset - 128 # to uint8 is done in NMS code
+ env['MLC_QAIC_MODEL_RETINANET_CONF_SCALE'] = conf_scale
+ env['MLC_QAIC_MODEL_RETINANET_CONF_OFFSET'] = conf_offset - 128 # to uint8 is done in NMS code
except yaml.YAMLError as exc:
return {'return': 1, 'error': exc}
diff --git a/script/calibrate-model-for.qaic/meta.yaml b/script/calibrate-model-for.qaic/meta.yaml
index 958d9f846..e86389a9f 100644
--- a/script/calibrate-model-for.qaic/meta.yaml
+++ b/script/calibrate-model-for.qaic/meta.yaml
@@ -9,21 +9,21 @@ deps:
- qaic-apps-sdk
tags: get,qaic,apps,sdk
- enable_if_env:
- CM_CALIBRATE_OPENIMAGES:
+ MLC_CALIBRATE_OPENIMAGES:
- 'yes'
names:
- openimages-cal
- preprocessed-dataset
tags: get,preprocessed,dataset,_calibration,openimages,_for.retinanet.onnx,_NCHW,_fp32,_custom-annotations
- enable_if_env:
- CM_CALIBRATE_IMAGENET:
+ MLC_CALIBRATE_IMAGENET:
- 'yes'
names:
- imagenet-cal
- preprocessed-calibration-dataset
tags: get,dataset,imagenet,preprocessed,_calibration,_for.resnet50,_float32,_rgb32
- enable_if_env:
- CM_CALIBRATE_SQUAD:
+ MLC_CALIBRATE_SQUAD:
- 'on'
names:
- squad-cal
@@ -33,7 +33,7 @@ deps:
- model-src
tags: get,ml-model
new_env_keys:
-- CM_QAIC_MODEL_PROFILE_*
+- MLC_QAIC_MODEL_PROFILE_*
tags:
- qaic
- calibrate
@@ -46,12 +46,12 @@ variations:
base:
- bert_
env:
- CM_CALIBRATE_SQUAD: 'yes'
- CM_QAIC_COMPILER_ARGS: ''
- CM_QAIC_COMPILER_PARAMS: -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,<<>>
- -input-list-file=<<>> -num-histogram-bins=512
- -profiling-threads=<<>>
- CM_QAIC_MODEL_TO_CONVERT: calibrate_bert_mlperf
+ MLC_CALIBRATE_SQUAD: 'yes'
+ MLC_QAIC_COMPILER_ARGS: ''
+ MLC_QAIC_COMPILER_PARAMS: -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,<<>>
+ -input-list-file=<<>> -num-histogram-bins=512
+ -profiling-threads=<<>>
+ MLC_QAIC_MODEL_TO_CONVERT: calibrate_bert_mlperf
group: model
bert_:
adr:
@@ -60,17 +60,17 @@ variations:
default-variations:
seq-length: seq.384
env:
- CM_CREATE_INPUT_BATCH: 'no'
- CM_QAIC_MODEL_NAME: bert-large
+ MLC_CREATE_INPUT_BATCH: 'no'
+ MLC_QAIC_MODEL_NAME: bert-large
bs.#:
env:
- CM_CREATE_INPUT_BATCH: 'yes'
- CM_QAIC_MODEL_BATCH_SIZE: '#'
+ MLC_CREATE_INPUT_BATCH: 'yes'
+ MLC_QAIC_MODEL_BATCH_SIZE: '#'
group: batch-size
bs.1:
env:
- CM_CREATE_INPUT_BATCH: 'yes'
- CM_QAIC_MODEL_BATCH_SIZE: '1'
+ MLC_CREATE_INPUT_BATCH: 'yes'
+ MLC_QAIC_MODEL_BATCH_SIZE: '1'
group: batch-size
filter-size.#:
ad:
@@ -99,12 +99,12 @@ variations:
calibration-option: mlperf.option1
model-framework: tf
env:
- CM_CALIBRATE_IMAGENET: 'yes'
- CM_QAIC_COMPILER_ARGS: ''
- CM_QAIC_COMPILER_PARAMS: -output-node-name=ArgMax -profiling-threads=<<>>
- CM_QAIC_MODEL_NAME: resnet50
- CM_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf
- CM_QAIC_OUTPUT_NODE_NAME: -output-node-name=ArgMax
+ MLC_CALIBRATE_IMAGENET: 'yes'
+ MLC_QAIC_COMPILER_ARGS: ''
+ MLC_QAIC_COMPILER_PARAMS: -output-node-name=ArgMax -profiling-threads=<<>>
+ MLC_QAIC_MODEL_NAME: resnet50
+ MLC_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf
+ MLC_QAIC_OUTPUT_NODE_NAME: -output-node-name=ArgMax
group: model
resnet50,tf:
adr:
@@ -113,34 +113,34 @@ variations:
preprocessed-dataset:
tags: _NHWC
env:
- CM_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf
+ MLC_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf
retinanet:
adr:
model-src:
tags: retinanet,_no-nms,_onnx
env:
- CM_CALIBRATE_OPENIMAGES: 'yes'
- CM_QAIC_COMPILER_ARGS: ''
- CM_QAIC_COMPILER_PARAMS: -enable-channelwise -profiling-threads=<<>>
- -onnx-define-symbol=batch_size,<<>> -node-precision-info=<<>>
- CM_QAIC_MODEL_NAME: retinanet
- CM_QAIC_MODEL_TO_CONVERT: calibrate_retinanet_no_nms_mlperf
+ MLC_CALIBRATE_OPENIMAGES: 'yes'
+ MLC_QAIC_COMPILER_ARGS: ''
+ MLC_QAIC_COMPILER_PARAMS: -enable-channelwise -profiling-threads=<<>>
+ -onnx-define-symbol=batch_size,<<>> -node-precision-info=<<>>
+ MLC_QAIC_MODEL_NAME: retinanet
+ MLC_QAIC_MODEL_TO_CONVERT: calibrate_retinanet_no_nms_mlperf
group: model
new_env_keys:
- - CM_QAIC_MODEL_RETINANET_*
+ - MLC_QAIC_MODEL_RETINANET_*
seq.#:
ad:
squad-preprocessed:
tags: _seq.#
env:
- CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: '#'
+ MLC_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: '#'
group: seq-length
seq.384:
ad:
squad-preprocessed:
tags: _seq.384
env:
- CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: '#'
+ MLC_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: '#'
group: seq-length
tf:
group: model-framework
diff --git a/script/calibrate-model-for.qaic/run.sh b/script/calibrate-model-for.qaic/run.sh
index 59b1aed3d..7da7962b9 100644
--- a/script/calibrate-model-for.qaic/run.sh
+++ b/script/calibrate-model-for.qaic/run.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
@@ -17,12 +17,12 @@ function run() {
echo "Running: "
echo "$1"
echo ""
- if [[ ${CM_FAKE_RUN} != 'yes' ]]; then
+ if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then
eval "$1"
exit_if_error
fi
}
#Add your run commands here...
-run "$CM_RUN_CMD"
+run "$MLC_RUN_CMD"
diff --git a/script/clean-nvidia-mlperf-inference-scratch-space/customize.py b/script/clean-nvidia-mlperf-inference-scratch-space/customize.py
index 149fe34e7..2e3d4bc64 100644
--- a/script/clean-nvidia-mlperf-inference-scratch-space/customize.py
+++ b/script/clean-nvidia-mlperf-inference-scratch-space/customize.py
@@ -12,21 +12,21 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
clean_cmd = ''
cache_rm_tags = ''
- extra_cache_rm_tags = env.get('CM_CLEAN_EXTRA_CACHE_RM_TAGS', '')
+ extra_cache_rm_tags = env.get('MLC_CLEAN_EXTRA_CACHE_RM_TAGS', '')
- if env.get('CM_MODEL', '') == 'sdxl':
- if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'downloaded_data':
- clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "data", "coco", "SDXL")} """
+ if env.get('MLC_MODEL', '') == 'sdxl':
+ if env.get('MLC_CLEAN_ARTIFACT_NAME', '') == 'downloaded_data':
+ clean_cmd = f"""rm -rf {os.path.join(env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'], "data", "coco", "SDXL")} """
cache_rm_tags = "nvidia-harness,_preprocess_data,_sdxl"
- if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'preprocessed_data':
- clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "preprocessed_data", "coco2014-tokenized-sdxl")} """
+ if env.get('MLC_CLEAN_ARTIFACT_NAME', '') == 'preprocessed_data':
+ clean_cmd = f"""rm -rf {os.path.join(env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'], "preprocessed_data", "coco2014-tokenized-sdxl")} """
cache_rm_tags = "nvidia-harness,_preprocess_data,_sdxl"
- if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'downloaded_model':
- clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "models", "SDXL")} """
+ if env.get('MLC_CLEAN_ARTIFACT_NAME', '') == 'downloaded_model':
+ clean_cmd = f"""rm -rf {os.path.join(env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'], "models", "SDXL")} """
cache_rm_tags = "nvidia-harness,_download_model,_sdxl"
cache_rm_tags = cache_rm_tags + extra_cache_rm_tags
@@ -39,10 +39,10 @@ def preprocess(i):
return r
if r['return'] == 0: # cache entry found
if clean_cmd != '':
- env['CM_RUN_CMD'] = clean_cmd
+ env['MLC_RUN_CMD'] = clean_cmd
else:
if clean_cmd != '':
- env['CM_RUN_CMD'] = clean_cmd
+ env['MLC_RUN_CMD'] = clean_cmd
return {'return': 0}
diff --git a/script/clean-nvidia-mlperf-inference-scratch-space/meta.yaml b/script/clean-nvidia-mlperf-inference-scratch-space/meta.yaml
index 079fe309d..1909c3a07 100644
--- a/script/clean-nvidia-mlperf-inference-scratch-space/meta.yaml
+++ b/script/clean-nvidia-mlperf-inference-scratch-space/meta.yaml
@@ -11,7 +11,7 @@ tags:
- inference
uid: bb41f6e3608e4e8a
input_mapping:
- extra_cache_rm_tags: CM_CLEAN_EXTRA_CACHE_RM_TAGS
+ extra_cache_rm_tags: MLC_CLEAN_EXTRA_CACHE_RM_TAGS
deps:
# Get Nvidia scratch space where data and models get downloaded
- tags: get,mlperf,inference,nvidia,scratch,space
@@ -22,24 +22,24 @@ variations:
sdxl:
group: model
env:
- CM_MODEL: sdxl
+ MLC_MODEL: sdxl
downloaded-data:
group: artifact
env:
- CM_CLEAN_ARTIFACT_NAME: downloaded_data
+ MLC_CLEAN_ARTIFACT_NAME: downloaded_data
preprocessed-data:
group: artifact
env:
- CM_CLEAN_ARTIFACT_NAME: preprocessed_data
+ MLC_CLEAN_ARTIFACT_NAME: preprocessed_data
downloaded-model:
group: artifact
env:
- CM_CLEAN_ARTIFACT_NAME: downloaded_model
+ MLC_CLEAN_ARTIFACT_NAME: downloaded_model
v4.1:
group: version
env:
- CM_NVIDIA_MLPERF_INFERENCE_CODE_VERSION: v4.1
+ MLC_NVIDIA_MLPERF_INFERENCE_CODE_VERSION: v4.1
v4.0:
group: version
env:
- CM_NVIDIA_MLPERF_INFERENCE_CODE_VERSION: v4.0
+ MLC_NVIDIA_MLPERF_INFERENCE_CODE_VERSION: v4.0
diff --git a/script/clean-nvidia-mlperf-inference-scratch-space/run.sh b/script/clean-nvidia-mlperf-inference-scratch-space/run.sh
index 4c23c380e..32cf4d51e 100644
--- a/script/clean-nvidia-mlperf-inference-scratch-space/run.sh
+++ b/script/clean-nvidia-mlperf-inference-scratch-space/run.sh
@@ -1,17 +1,17 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
echo "Running: "
-echo "${CM_RUN_CMD}"
+echo "${MLC_RUN_CMD}"
echo ""
-if [[ ${CM_FAKE_RUN} != "yes" ]]; then
- eval "${CM_RUN_CMD}"
+if [[ ${MLC_FAKE_RUN} != "yes" ]]; then
+ eval "${MLC_RUN_CMD}"
test $? -eq 0 || exit 1
fi
diff --git a/script/compile-model-for.qaic/customize.py b/script/compile-model-for.qaic/customize.py
index 8f9f5717c..c055cfb07 100644
--- a/script/compile-model-for.qaic/customize.py
+++ b/script/compile-model-for.qaic/customize.py
@@ -12,9 +12,9 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- if env.get('CM_REGISTER_CACHE', '') == '':
+ if env.get('MLC_REGISTER_CACHE', '') == '':
r = construct_compilation_cmd(env)
if r['return'] > 0:
@@ -23,18 +23,18 @@ def preprocess(i):
print("Compiling from " + os.getcwd())
- env['CM_QAIC_MODEL_FINAL_COMPILATION_CMD'] = cmd
+ env['MLC_QAIC_MODEL_FINAL_COMPILATION_CMD'] = cmd
- env['CM_RUN_CMD'] = cmd
+ env['MLC_RUN_CMD'] = cmd
else:
import shutil
print(
"Creating cache entry from " +
- env['CM_REGISTER_CACHE'] +
+ env['MLC_REGISTER_CACHE'] +
" to " +
os.getcwd())
r = shutil.copytree(
- env['CM_REGISTER_CACHE'],
+ env['MLC_REGISTER_CACHE'],
os.path.join(
os.getcwd(),
"elfs"))
@@ -44,14 +44,14 @@ def preprocess(i):
def construct_compilation_cmd(env):
- compiler_params_base = env['CM_QAIC_MODEL_COMPILER_PARAMS_BASE']
- compiler_args = env['CM_QAIC_MODEL_COMPILER_ARGS'] + \
- ' ' + env.get('CM_QAIC_MODEL_COMPILER_ARGS_SUT', '')
- batchsize = env.get('CM_QAIC_MODEL_BATCH_SIZE')
+ compiler_params_base = env['MLC_QAIC_MODEL_COMPILER_PARAMS_BASE']
+ compiler_args = env['MLC_QAIC_MODEL_COMPILER_ARGS'] + \
+ ' ' + env.get('MLC_QAIC_MODEL_COMPILER_ARGS_SUT', '')
+ batchsize = env.get('MLC_QAIC_MODEL_BATCH_SIZE')
- if env.get('CM_QAIC_MODEL_QUANTIZATION', '') == 'yes':
+ if env.get('MLC_QAIC_MODEL_QUANTIZATION', '') == 'yes':
profile_string = " -load-profile=" + \
- env['CM_QAIC_MODEL_PROFILE_WITH_PATH']
+ env['MLC_QAIC_MODEL_PROFILE_WITH_PATH']
else:
profile_string = ''
@@ -61,14 +61,14 @@ def construct_compilation_cmd(env):
compiler_params += " -batchsize=" + batchsize
percentile_calibration_params = env.get(
- 'CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS')
+ 'MLC_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS')
if percentile_calibration_params:
compiler_params += " " + percentile_calibration_params
aic_binary_dir = os.path.join(os.getcwd(), "elfs")
- cmd = env['CM_QAIC_EXEC_PATH'] + \
- " -model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + \
+ cmd = env['MLC_QAIC_EXEC_PATH'] + \
+ " -model=" + env['MLC_ML_MODEL_FILE_WITH_PATH'] + \
profile_string + ' -aic-binary-dir=' + aic_binary_dir + ' ' \
+ compiler_params
@@ -78,12 +78,12 @@ def construct_compilation_cmd(env):
def postprocess(i):
env = i['env']
- env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] = os.path.join(
+ env['MLC_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] = os.path.join(
os.getcwd(), "elfs", "programqpc.bin")
if not os.path.isdir(os.path.join(os.getcwd(), "elfs")):
return {
'return': 1, 'error': 'elfs directory not found inside the compiled directory'}
- env['CM_ML_MODEL_FILE_WITH_PATH'] = env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH']
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_QAIC_MODEL_COMPILED_BINARY_WITH_PATH']
return {'return': 0}
diff --git a/script/compile-model-for.qaic/meta.yaml b/script/compile-model-for.qaic/meta.yaml
index 634bb948b..b2b667c94 100644
--- a/script/compile-model-for.qaic/meta.yaml
+++ b/script/compile-model-for.qaic/meta.yaml
@@ -8,34 +8,34 @@ deps:
- names:
- qaic-apps-sdk
skip_if_env:
- CM_REGISTER_CACHE:
+ MLC_REGISTER_CACHE:
- 'on'
tags: get,qaic,apps,sdk
- enable_if_env:
- CM_COMPILE_RETINANET:
+ MLC_COMPILE_RETINANET:
- 'yes'
names:
- retinanet-profile
- qaic-profile
tags: qaic,calibrate,_retinanet
- enable_if_env:
- CM_COMPILE_RESNET:
+ MLC_COMPILE_RESNET:
- 'on'
names:
- resnet-profile
- qaic-profile
skip_if_env:
- CM_REGISTER_CACHE:
+ MLC_REGISTER_CACHE:
- 'on'
tags: qaic,calibrate,_resnet50
- names:
- model-src
tags: get,ml-model
input_mapping:
- register: CM_REGISTER_CACHE
+ register: MLC_REGISTER_CACHE
new_env_keys:
-- CM_QAIC_MODEL*
-- CM_ML_MODEL_FILE_WITH_PATH
+- MLC_QAIC_MODEL*
+- MLC_ML_MODEL_FILE_WITH_PATH
tags:
- qaic
- compile
@@ -54,39 +54,39 @@ variations:
- qaic-profile
tags: calibrate,qaic,_bert-99
env:
- CM_COMPILE_BERT: 'on'
- CM_QAIC_MODEL_COMPILER_ARGS: ''
- CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -execute-nodes-in-fp16=Add,Div,Erf,Softmax
+ MLC_COMPILE_BERT: 'on'
+ MLC_QAIC_MODEL_COMPILER_ARGS: ''
+ MLC_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -execute-nodes-in-fp16=Add,Div,Erf,Softmax
-quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -quantization-precision-bias=Int32
-vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384
-multicast-weights -combine-inputs=false -combine-outputs=false
- CM_QAIC_MODEL_TO_CONVERT: calibrate_bert_mlperf
+ MLC_QAIC_MODEL_TO_CONVERT: calibrate_bert_mlperf
bert-99,offline:
env:
- CM_QAIC_MODEL_COMPILER_ARGS: -allocator-dealloc-delay=2 -size-split-granularity=1536
+ MLC_QAIC_MODEL_COMPILER_ARGS: -allocator-dealloc-delay=2 -size-split-granularity=1536
-vtcm-working-set-limit-ratio=1
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=2
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=2
bert-99,offline,nsp.14:
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3
bert-99,offline,nsp.16:
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=2
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=2
bert-99,server:
env:
- CM_QAIC_MODEL_COMPILER_ARGS: -allocator-dealloc-delay=2 -size-split-granularity=1536
+ MLC_QAIC_MODEL_COMPILER_ARGS: -allocator-dealloc-delay=2 -size-split-granularity=1536
-vtcm-working-set-limit-ratio=1
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3
bert-99,server,nsp.14:
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3
bert-99,singlestream:
env:
- CM_QAIC_MODEL_COMPILER_ARGS: ''
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=8 -ols=1
+ MLC_QAIC_MODEL_COMPILER_ARGS: ''
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=8 -ols=1
bert-99,singlestream,nsp.14:
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=8 -ols=1
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=8 -ols=1
bert-99.9:
adr:
model-src:
@@ -94,40 +94,40 @@ variations:
base:
- no-quantized
env:
- CM_COMPILE_BERT: 'on'
- CM_QAIC_MODEL_COMPILER_ARGS: ''
- CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -convert-to-fp16
+ MLC_COMPILE_BERT: 'on'
+ MLC_QAIC_MODEL_COMPILER_ARGS: ''
+ MLC_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -convert-to-fp16
-vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384
-combine-inputs=false -combine-outputs=false
- CM_QAIC_MODEL_TO_CONVERT: bert_mlperf
+ MLC_QAIC_MODEL_TO_CONVERT: bert_mlperf
bert-99.9,offline:
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2
bert-99.9,offline,nsp.14:
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2
bert-99.9,offline,nsp.16:
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2
bert-99.9,server:
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2
bert-99.9,server,nsp.14:
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2
bs.#:
adr:
qaic-profile:
tags: _bs.#
env:
- CM_QAIC_MODEL_BATCH_SIZE: '#'
+ MLC_QAIC_MODEL_BATCH_SIZE: '#'
group: batch-size
bs.1:
adr:
qaic-profile:
tags: _bs.1
env:
- CM_QAIC_MODEL_BATCH_SIZE: '1'
+ MLC_QAIC_MODEL_BATCH_SIZE: '1'
group: batch-size
filter-size.#:
ad:
@@ -138,7 +138,7 @@ variations:
group: mlperf-scenario
no-quantized:
env:
- CM_QAIC_MODEL_QUANTIZATION: 'no'
+ MLC_QAIC_MODEL_QUANTIZATION: 'no'
group: quantization
nsp.14:
group: nsp
@@ -152,13 +152,13 @@ variations:
group: mlperf-scenario
pc.#:
env:
- CM_QAIC_MODEL_COMPILER_PERCENTILE_CALIBRATION_VALUE: '#'
- CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS: -quantization-calibration=Percentile -percentile-calibration-value=<<>>
+ MLC_QAIC_MODEL_COMPILER_PERCENTILE_CALIBRATION_VALUE: '#'
+ MLC_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS: -quantization-calibration=Percentile -percentile-calibration-value=<<>>
group: percentile-calibration
quantized:
default: true
env:
- CM_QAIC_MODEL_QUANTIZATION: 'yes'
+ MLC_QAIC_MODEL_QUANTIZATION: 'yes'
group: quantization
resnet50:
adr:
@@ -167,87 +167,87 @@ variations:
default_variations:
model-framework: tf
env:
- CM_COMPILE_RESNET: 'on'
- CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -quantization-schema=symmetric_with_uint8
+ MLC_COMPILE_RESNET: 'on'
+ MLC_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -quantization-schema=symmetric_with_uint8
-quantization-precision=Int8 -output-node-name=ArgMax -vvv -compile-only -use-producer-dma=1
- CM_QAIC_MODEL_TO_CONVERT: compile_resnet50_tf
+ MLC_QAIC_MODEL_TO_CONVERT: compile_resnet50_tf
resnet50,multistream:
env:
- CM_QAIC_MODEL_COMPILER_ARGS: ''
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1 -ols=1
+ MLC_QAIC_MODEL_COMPILER_ARGS: ''
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1 -ols=1
resnet50,multistream,nsp.14:
default_variations:
batch-size: bs.1
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4
resnet50,offline:
env:
- CM_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=2,2 -multicast-weights
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1,2 -ols=4
+ MLC_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=2,2 -multicast-weights
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1,2 -ols=4
resnet50,offline,nsp.14:
default_variations:
batch-size: bs.8
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1,2 -ols=4
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1,2 -ols=4
resnet50,server:
env: {}
resnet50,server,nsp.14:
default_variations:
batch-size: bs.8
env:
- CM_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=2,2 -mos=1,2 -multicast-weights
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -ols=4
+ MLC_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=2,2 -mos=1,2 -multicast-weights
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -ols=4
resnet50,server,nsp.16:
default_variations:
batch-size: bs.8
env:
- CM_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=4,4 -mos=1,4
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -ols=4
+ MLC_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=4,4 -mos=1,4
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -ols=4
resnet50,singlestream:
env:
- CM_QAIC_MODEL_COMPILER_ARGS: -aic-num-of-instances=1
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1
+ MLC_QAIC_MODEL_COMPILER_ARGS: -aic-num-of-instances=1
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1
resnet50,singlestream,nsp.14:
default_variations:
batch-size: bs.1
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1
resnet50,tf:
ad:
model-src:
tags: _fix-input-shape
env:
- CM_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf
+ MLC_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf
retinanet:
adr:
model-src:
tags: retinanet,_no-nms
env:
- CM_COMPILE_RETINANET: 'on'
- CM_QAIC_MODEL_COMPILER_ARGS: -aic-enable-depth-first
- CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -compile-only
- -enable-channelwise -onnx-define-symbol=batch_size,1 -node-precision-info=<<>>
+ MLC_COMPILE_RETINANET: 'on'
+ MLC_QAIC_MODEL_COMPILER_ARGS: -aic-enable-depth-first
+ MLC_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -compile-only
+ -enable-channelwise -onnx-define-symbol=batch_size,1 -node-precision-info=<<>>
-quantization-schema-constants=symmetric_with_uint8 -quantization-schema-activations=asymmetric
-quantization-calibration=None
- CM_QAIC_MODEL_TO_CONVERT: calibrate_retinanet_no_nms_mlperf
+ MLC_QAIC_MODEL_TO_CONVERT: calibrate_retinanet_no_nms_mlperf
new_env_keys:
- - CM_QAIC_MODEL_RETINANET_*
+ - MLC_QAIC_MODEL_RETINANET_*
retinanet,multistream: {}
retinanet,nsp.14:
env: {}
retinanet,offline:
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=1
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=1
retinanet,offline,nsp.14: {}
retinanet,server: {}
retinanet,server,nsp.14: {}
retinanet,singlestream:
env:
- CM_QAIC_MODEL_COMPILER_ARGS: ''
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1
+ MLC_QAIC_MODEL_COMPILER_ARGS: ''
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1
retinanet,singlestream,nsp.14:
env:
- CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1
+ MLC_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1
server:
group: mlperf-scenario
singlestream:
diff --git a/script/compile-model-for.qaic/run.sh b/script/compile-model-for.qaic/run.sh
index c5c3c04cb..d20c3a705 100644
--- a/script/compile-model-for.qaic/run.sh
+++ b/script/compile-model-for.qaic/run.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
@@ -17,7 +17,7 @@ function run() {
echo "Running: "
echo "$1"
echo ""
- if [[ ${CM_FAKE_RUN} != 'yes' ]]; then
+ if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then
eval "$1"
exit_if_error
fi
@@ -25,4 +25,4 @@ function run() {
#Add your run commands here...
run "rm -rf elfs"
-run "$CM_RUN_CMD"
+run "$MLC_RUN_CMD"
diff --git a/script/compile-program/customize.py b/script/compile-program/customize.py
index 9fa65ee54..681f88ca7 100644
--- a/script/compile-program/customize.py
+++ b/script/compile-program/customize.py
@@ -7,56 +7,56 @@ def preprocess(i):
env = i['env']
CPPFLAGS = env.get('+ CPPFLAGS', [])
- env['CM_C_COMPILER_FLAGS'] = " ".join(env.get('+ CFLAGS', []) + CPPFLAGS)
- env['CM_CXX_COMPILER_FLAGS'] = " ".join(
+ env['MLC_C_COMPILER_FLAGS'] = " ".join(env.get('+ CFLAGS', []) + CPPFLAGS)
+ env['MLC_CXX_COMPILER_FLAGS'] = " ".join(
env.get('+ CXXFLAGS', []) + CPPFLAGS)
- env['CM_F_COMPILER_FLAGS'] = " ".join(env.get('+ FFLAGS', []))
+ env['MLC_F_COMPILER_FLAGS'] = " ".join(env.get('+ FFLAGS', []))
CPATH = env.get('+CPATH', [])
- env['CM_C_INCLUDE_PATH'] = " -I".join([" "] +
+ env['MLC_C_INCLUDE_PATH'] = " -I".join([" "] +
env.get('+C_INCLUDE_PATH', []) +
CPATH)
- env['CM_CPLUS_INCLUDE_PATH'] = " -I".join(
+ env['MLC_CPLUS_INCLUDE_PATH'] = " -I".join(
[" "] + env.get('+CPLUS_INCLUDE_PATH', []) + CPATH)
- env['CM_F_INCLUDE_PATH'] = " -I".join([" "] +
+ env['MLC_F_INCLUDE_PATH'] = " -I".join([" "] +
env.get('+F_INCLUDE_PATH', []) +
CPATH)
# If windows, need to extend it more ...
if os_info['platform'] == 'windows' and env.get(
- 'CM_COMPILER_FAMILY', '') != 'LLVM':
+ 'MLC_COMPILER_FAMILY', '') != 'LLVM':
print("WARNING: compile-program script should be extended to support flags for non-LLVM compilers on Windows")
return {'return': 0}
LDFLAGS = env.get('+ LDFLAGS', [])
- env['CM_C_LINKER_FLAGS'] = " ".join(env.get('+ LDCFLAGS', []) + LDFLAGS)
- env['CM_CXX_LINKER_FLAGS'] = " ".join(
+ env['MLC_C_LINKER_FLAGS'] = " ".join(env.get('+ LDCFLAGS', []) + LDFLAGS)
+ env['MLC_CXX_LINKER_FLAGS'] = " ".join(
env.get('+ LDCXXFLAGS', []) + LDFLAGS)
- env['CM_F_LINKER_FLAGS'] = " ".join(env.get('+ LDFFLAGS', []) + LDFLAGS)
+ env['MLC_F_LINKER_FLAGS'] = " ".join(env.get('+ LDFFLAGS', []) + LDFLAGS)
- if env.get('CM_LINKER_LANG', 'C') == "C":
- env['CM_LINKER_BIN'] = env['CM_C_COMPILER_BIN']
- env['CM_LINKER_WITH_PATH'] = env['CM_C_COMPILER_WITH_PATH']
- env['CM_LINKER_COMPILE_FLAGS'] = env['CM_C_COMPILER_FLAGS']
- env['CM_LINKER_FLAGS'] = env['CM_C_LINKER_FLAGS']
+ if env.get('MLC_LINKER_LANG', 'C') == "C":
+ env['MLC_LINKER_BIN'] = env['MLC_C_COMPILER_BIN']
+ env['MLC_LINKER_WITH_PATH'] = env['MLC_C_COMPILER_WITH_PATH']
+ env['MLC_LINKER_COMPILE_FLAGS'] = env['MLC_C_COMPILER_FLAGS']
+ env['MLC_LINKER_FLAGS'] = env['MLC_C_LINKER_FLAGS']
- elif env.get('CM_LINKER_LANG', 'C') == "CXX":
- env['CM_LINKER_BIN'] = env['CM_CXX_COMPILER_BIN']
- env['CM_LINKER_WITH_PATH'] = env['CM_CXX_COMPILER_WITH_PATH']
- env['CM_LINKER_COMPILE_FLAGS'] = env['CM_CXX_COMPILER_FLAGS']
- env['CM_LINKER_FLAGS'] = env['CM_CXX_LINKER_FLAGS']
+ elif env.get('MLC_LINKER_LANG', 'C') == "CXX":
+ env['MLC_LINKER_BIN'] = env['MLC_CXX_COMPILER_BIN']
+ env['MLC_LINKER_WITH_PATH'] = env['MLC_CXX_COMPILER_WITH_PATH']
+ env['MLC_LINKER_COMPILE_FLAGS'] = env['MLC_CXX_COMPILER_FLAGS']
+ env['MLC_LINKER_FLAGS'] = env['MLC_CXX_LINKER_FLAGS']
- elif env.get('CM_LINKER_LANG', 'C') == "F":
- env['CM_LINKER_BIN'] = env['CM_F_COMPILER_BIN']
- env['CM_LINKER_WITH_PATH'] = env['CM_F_COMPILER_WITH_PATH']
- env['CM_LINKER_COMPILE_FLAGS'] = env['CM_F_COMPILER_FLAGS']
- env['CM_LINKER_FLAGS'] = env['CM_F_LINKER_FLAGS']
+ elif env.get('MLC_LINKER_LANG', 'C') == "F":
+ env['MLC_LINKER_BIN'] = env['MLC_F_COMPILER_BIN']
+ env['MLC_LINKER_WITH_PATH'] = env['MLC_F_COMPILER_WITH_PATH']
+ env['MLC_LINKER_COMPILE_FLAGS'] = env['MLC_F_COMPILER_FLAGS']
+ env['MLC_LINKER_FLAGS'] = env['MLC_F_LINKER_FLAGS']
- env['CM_LD_LIBRARY_PATH'] = " -L".join([" "] +
+ env['MLC_LD_LIBRARY_PATH'] = " -L".join([" "] +
env.get('+LD_LIBRARY_PATH', []))
- env['CM_SOURCE_FOLDER_PATH'] = env['CM_SOURCE_FOLDER_PATH'] if 'CM_SOURCE_FOLDER_PATH' in env else env[
- 'CM_TMP_CURRENT_SCRIPT_PATH'] if 'CM_TMP_CURRENT_SCRIPT_PATH' in env else ''
+ env['MLC_SOURCE_FOLDER_PATH'] = env['MLC_SOURCE_FOLDER_PATH'] if 'MLC_SOURCE_FOLDER_PATH' in env else env[
+ 'MLC_TMP_CURRENT_SCRIPT_PATH'] if 'MLC_TMP_CURRENT_SCRIPT_PATH' in env else ''
return {'return': 0}
diff --git a/script/compile-program/run.bat b/script/compile-program/run.bat
index ece5d9e9c..8a9e5436d 100644
--- a/script/compile-program/run.bat
+++ b/script/compile-program/run.bat
@@ -1,16 +1,16 @@
rem Compile program
-set BIN_NAME=%CM_BIN_NAME%
-IF NOT DEFINED CM_BIN_NAME SET BIN_NAME=run.exe
+set BIN_NAME=%MLC_BIN_NAME%
+IF NOT DEFINED MLC_BIN_NAME SET BIN_NAME=run.exe
-set RUN_DIR=%CM_RUN_DIR%
-IF NOT DEFINED CM_RUN_DIR SET RUN_DIR=.
+set RUN_DIR=%MLC_RUN_DIR%
+IF NOT DEFINED MLC_RUN_DIR SET RUN_DIR=.
echo.
echo Checking compiler version ...
echo.
-"%CM_C_COMPILER_WITH_PATH%" %CM_C_COMPILER_FLAG_VERSION%
+"%MLC_C_COMPILER_WITH_PATH%" %MLC_C_COMPILER_FLAG_VERSION%
echo.
echo Compiling source files ...
@@ -18,18 +18,18 @@ echo.
if not exist %RUN_DIR% mkdir %RUN_DIR%
-cd %CM_SOURCE_FOLDER_PATH%
+cd %MLC_SOURCE_FOLDER_PATH%
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
-if not "%CM_C_SOURCE_FILES%" == "" (
- echo %CM_C_COMPILER_WITH_PATH% %CM_C_COMPILER_FLAGS% %CM_C_INCLUDE_PATH% %CM_C_SOURCE_FILES% %CM_LD_LIBRARY_PATH% %LDCFLAGS% %CM_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%"
- "%CM_C_COMPILER_WITH_PATH%" %CM_C_COMPILER_FLAGS% %CM_C_INCLUDE_PATH% %CM_C_SOURCE_FILES% %CM_LD_LIBRARY_PATH% %LDCFLAGS% %CM_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%"
+if not "%MLC_C_SOURCE_FILES%" == "" (
+ echo %MLC_C_COMPILER_WITH_PATH% %MLC_C_COMPILER_FLAGS% %MLC_C_INCLUDE_PATH% %MLC_C_SOURCE_FILES% %MLC_LD_LIBRARY_PATH% %LDCFLAGS% %MLC_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%"
+ "%MLC_C_COMPILER_WITH_PATH%" %MLC_C_COMPILER_FLAGS% %MLC_C_INCLUDE_PATH% %MLC_C_SOURCE_FILES% %MLC_LD_LIBRARY_PATH% %LDCFLAGS% %MLC_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%"
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
)
-if not "%CM_CXX_SOURCE_FILES%" == "" (
- echo %CM_CXX_COMPILER_WITH_PATH% %CM_CXX_SOURCE_FILES% %CM_CXX_COMPILER_FLAGS% %CM_CPLUS_INCLUDE_PATH% %CM_LD_LIBRARY_PATH% %LDCXXFLAGS% %CM_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%"
- "%CM_CXX_COMPILER_WITH_PATH%" %CM_CXX_SOURCE_FILES% %CM_CXX_COMPILER_FLAGS% %CM_CPLUS_INCLUDE_PATH% %CM_LD_LIBRARY_PATH% %LDCXXFLAGS% %CM_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%"
+if not "%MLC_CXX_SOURCE_FILES%" == "" (
+ echo %MLC_CXX_COMPILER_WITH_PATH% %MLC_CXX_SOURCE_FILES% %MLC_CXX_COMPILER_FLAGS% %MLC_CPLUS_INCLUDE_PATH% %MLC_LD_LIBRARY_PATH% %LDCXXFLAGS% %MLC_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%"
+ "%MLC_CXX_COMPILER_WITH_PATH%" %MLC_CXX_SOURCE_FILES% %MLC_CXX_COMPILER_FLAGS% %MLC_CPLUS_INCLUDE_PATH% %MLC_LD_LIBRARY_PATH% %LDCXXFLAGS% %MLC_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%"
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
)
diff --git a/script/compile-program/run.sh b/script/compile-program/run.sh
index 7e98bc47d..68045945d 100644
--- a/script/compile-program/run.sh
+++ b/script/compile-program/run.sh
@@ -2,11 +2,11 @@
# Compile
-BIN_NAME=${CM_BIN_NAME:-run.out}
-RUN_DIR=${CM_RUN_DIR:-.}
+BIN_NAME=${MLC_BIN_NAME:-run.out}
+RUN_DIR=${MLC_RUN_DIR:-.}
echo "RUN_DIR=$RUN_DIR"
-if [[ ${CM_SKIP_RECOMPILE} == "yes" ]]; then
+if [[ ${MLC_SKIP_RECOMPILE} == "yes" ]]; then
if [ -f ${RUN_DIR}/${BIN_NAME} ]; then
exit 0
fi
@@ -14,13 +14,13 @@ fi
rm -f ${RUN_DIR}/${BIN_NAME}
-if [ -z "${CM_SOURCE_FOLDER_PATH}" ]; then
- echo "No source directory (CM_SOURCE_FOLDER_PATH} specified"
+if [ -z "${MLC_SOURCE_FOLDER_PATH}" ]; then
+ echo "No source directory (MLC_SOURCE_FOLDER_PATH} specified"
exit 1
fi
-if [[ -z "${CM_C_SOURCE_FILES}" && -z "${CM_CXX_SOURCE_FILES}" && -z "${CM_F_SOURCE_FILES}" ]]; then
- echo "No source files (CM_C_SOURCE_FILES or CM_CXX_SOURCE_FILES or CM_F_SOURCE_FILES) specified"
+if [[ -z "${MLC_C_SOURCE_FILES}" && -z "${MLC_CXX_SOURCE_FILES}" && -z "${MLC_F_SOURCE_FILES}" ]]; then
+ echo "No source files (MLC_C_SOURCE_FILES or MLC_CXX_SOURCE_FILES or MLC_F_SOURCE_FILES) specified"
exit 1
fi
@@ -28,34 +28,34 @@ echo ""
echo "Checking compiler version ..."
echo ""
-${CM_C_COMPILER_WITH_PATH} ${CM_C_COMPILER_FLAG_VERSION}
+${MLC_C_COMPILER_WITH_PATH} ${MLC_C_COMPILER_FLAG_VERSION}
echo ""
echo "Compiling source files ..."
echo ""
-cd ${CM_SOURCE_FOLDER_PATH}
+cd ${MLC_SOURCE_FOLDER_PATH}
test $? -eq 0 || exit 1
-IFS=';' read -ra FILES <<< "${CM_C_SOURCE_FILES}"
+IFS=';' read -ra FILES <<< "${MLC_C_SOURCE_FILES}"
for file in "${FILES[@]}"; do
base="$(basename -- $file)"
base_name=${base%.*}
echo $base
echo $basename
- CMD="${CM_C_COMPILER_WITH_PATH} -c ${CM_C_COMPILER_FLAGS} ${CM_C_INCLUDE_PATH} $file ${CM_C_COMPILER_FLAG_OUTPUT}$base_name.o"
+ CMD="${MLC_C_COMPILER_WITH_PATH} -c ${MLC_C_COMPILER_FLAGS} ${MLC_C_INCLUDE_PATH} $file ${MLC_C_COMPILER_FLAG_OUTPUT}$base_name.o"
echo $CMD
eval $CMD
test $? -eq 0 || exit 1
done
-IFS=';' read -ra FILES <<< "${CM_CXX_SOURCE_FILES}"
+IFS=';' read -ra FILES <<< "${MLC_CXX_SOURCE_FILES}"
for file in "${FILES[@]}"; do
base="$(basename -- $file)"
base_name=${base%.*}
echo $base
echo $basename
- CMD="${CM_CXX_COMPILER_WITH_PATH} -c ${CM_CXX_COMPILER_FLAGS} ${CM_CPLUS_INCLUDE_PATH} $file ${CM_CXX_COMPILER_FLAG_OUTPUT}$base_name.o"
+ CMD="${MLC_CXX_COMPILER_WITH_PATH} -c ${MLC_CXX_COMPILER_FLAGS} ${MLC_CPLUS_INCLUDE_PATH} $file ${MLC_CXX_COMPILER_FLAG_OUTPUT}$base_name.o"
echo $CMD
eval $CMD
test $? -eq 0 || exit 1
@@ -65,7 +65,7 @@ done
echo ""
echo "Linking ..."
echo ""
-CMD="${CM_LINKER_WITH_PATH} ${CM_LINKER_COMPILE_FLAGS} *.o -o ${RUN_DIR}/${BIN_NAME} ${CM_LD_LIBRARY_PATH} ${CM_LINKER_FLAGS}"
+CMD="${MLC_LINKER_WITH_PATH} ${MLC_LINKER_COMPILE_FLAGS} *.o -o ${RUN_DIR}/${BIN_NAME} ${MLC_LD_LIBRARY_PATH} ${MLC_LINKER_FLAGS}"
echo $CMD
eval $CMD
diff --git a/script/convert-csv-to-md/customize.py b/script/convert-csv-to-md/customize.py
index 872c2b678..e547a6219 100644
--- a/script/convert-csv-to-md/customize.py
+++ b/script/convert-csv-to-md/customize.py
@@ -12,14 +12,14 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- csv_file = env.get('CM_CSV_FILE', '')
- md_file = env.get('CM_MD_FILE', '')
+ csv_file = env.get('MLC_CSV_FILE', '')
+ md_file = env.get('MLC_MD_FILE', '')
process_file = os.path.join(i['run_script_input']['path'], "process.py")
- env['CM_RUN_CMD'] = '{} {} {} {} '.format(
- env["CM_PYTHON_BIN_WITH_PATH"], process_file, csv_file, md_file)
+ env['MLC_RUN_CMD'] = '{} {} {} {} '.format(
+ env["MLC_PYTHON_BIN_WITH_PATH"], process_file, csv_file, md_file)
return {'return': 0}
diff --git a/script/convert-csv-to-md/meta.yaml b/script/convert-csv-to-md/meta.yaml
index 962601431..e1ed6f82e 100644
--- a/script/convert-csv-to-md/meta.yaml
+++ b/script/convert-csv-to-md/meta.yaml
@@ -17,8 +17,8 @@ deps:
docker_input_mapping: {}
input_description: {}
input_mapping:
- csv_file: CM_CSV_FILE
- md_file: CM_MD_FILE
+ csv_file: MLC_CSV_FILE
+ md_file: MLC_MD_FILE
new_env_keys: []
new_state_keys: []
post_deps: []
diff --git a/script/convert-csv-to-md/run.sh b/script/convert-csv-to-md/run.sh
index 59b1aed3d..7da7962b9 100644
--- a/script/convert-csv-to-md/run.sh
+++ b/script/convert-csv-to-md/run.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
@@ -17,12 +17,12 @@ function run() {
echo "Running: "
echo "$1"
echo ""
- if [[ ${CM_FAKE_RUN} != 'yes' ]]; then
+ if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then
eval "$1"
exit_if_error
fi
}
#Add your run commands here...
-run "$CM_RUN_CMD"
+run "$MLC_RUN_CMD"
diff --git a/script/convert-ml-model-huggingface-to-onnx/customize.py b/script/convert-ml-model-huggingface-to-onnx/customize.py
index 90950b120..2c3959044 100644
--- a/script/convert-ml-model-huggingface-to-onnx/customize.py
+++ b/script/convert-ml-model-huggingface-to-onnx/customize.py
@@ -8,8 +8,8 @@ def preprocess(i):
env = i['env']
- if env.get("CM_MODEL_HUGG_PATH", "") == "":
- return {'return': 1, 'error': 'CM_MODEL_HUGG_PATH is not set'}
+ if env.get("MLC_MODEL_HUGG_PATH", "") == "":
+ return {'return': 1, 'error': 'MLC_MODEL_HUGG_PATH is not set'}
automation = i['automation']
diff --git a/script/convert-ml-model-huggingface-to-onnx/meta.yaml b/script/convert-ml-model-huggingface-to-onnx/meta.yaml
index 54f54b0e5..53b6c6474 100644
--- a/script/convert-ml-model-huggingface-to-onnx/meta.yaml
+++ b/script/convert-ml-model-huggingface-to-onnx/meta.yaml
@@ -12,8 +12,8 @@ deps:
- tags: get,generic-python-lib,_onnxruntime
env: {}
new_env_keys:
-- CM_ML_MODEL*
-- CM_MODEL_HUGG_PATH
+- MLC_ML_MODEL*
+- MLC_MODEL_HUGG_PATH
- HUGGINGFACE_ONNX_FILE_PATH
tags:
- ml-model
@@ -26,4 +26,4 @@ uid: eacb01655d7e49ac
variations:
model-path.#:
env:
- CM_MODEL_HUGG_PATH: '#'
+ MLC_MODEL_HUGG_PATH: '#'
diff --git a/script/convert-ml-model-huggingface-to-onnx/run.sh b/script/convert-ml-model-huggingface-to-onnx/run.sh
index 56be76db9..04a94bbcf 100644
--- a/script/convert-ml-model-huggingface-to-onnx/run.sh
+++ b/script/convert-ml-model-huggingface-to-onnx/run.sh
@@ -1,2 +1,2 @@
#!/bin/bash
-python -m transformers.onnx --model=${CM_MODEL_HUGG_PATH} ${PWD}
\ No newline at end of file
+python -m transformers.onnx --model=${MLC_MODEL_HUGG_PATH} ${PWD}
\ No newline at end of file
diff --git a/script/copy-to-clipboard/code.py b/script/copy-to-clipboard/code.py
index 0a1aa014a..ce7fe265a 100644
--- a/script/copy-to-clipboard/code.py
+++ b/script/copy-to-clipboard/code.py
@@ -1,10 +1,10 @@
import os
import pyperclip as pc
-text = os.environ.get('CM_COPY_TO_CLIPBOARD_TEXT', '')
+text = os.environ.get('MLC_COPY_TO_CLIPBOARD_TEXT', '')
add_quotes = os.environ.get(
- 'CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES', '') in [
+ 'MLC_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES', '') in [
True, 'True', 'yes']
if add_quotes:
diff --git a/script/copy-to-clipboard/meta.yaml b/script/copy-to-clipboard/meta.yaml
index de631040b..f10ed5d9a 100644
--- a/script/copy-to-clipboard/meta.yaml
+++ b/script/copy-to-clipboard/meta.yaml
@@ -26,7 +26,7 @@ deps:
- tags: get,generic-python-lib,_package.pyperclip
input_mapping:
- text: CM_COPY_TO_CLIPBOARD_TEXT
- t: CM_COPY_TO_CLIPBOARD_TEXT
- add_quotes: CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES
- q: CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES
+ text: MLC_COPY_TO_CLIPBOARD_TEXT
+ t: MLC_COPY_TO_CLIPBOARD_TEXT
+ add_quotes: MLC_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES
+ q: MLC_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES
diff --git a/script/copy-to-clipboard/run.bat b/script/copy-to-clipboard/run.bat
index 545178f20..0e1db36fe 100644
--- a/script/copy-to-clipboard/run.bat
+++ b/script/copy-to-clipboard/run.bat
@@ -1,4 +1,4 @@
rem native script
-%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py
+%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\code.py
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/copy-to-clipboard/run.sh b/script/copy-to-clipboard/run.sh
index fa6f579f7..88087983d 100644
--- a/script/copy-to-clipboard/run.sh
+++ b/script/copy-to-clipboard/run.sh
@@ -1,4 +1,4 @@
#!/bin/bash
-${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py
+${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/code.py
test $? -eq 0 || exit 1
diff --git a/script/create-conda-env/customize.py b/script/create-conda-env/customize.py
index ddc74fab9..e22b25b6c 100644
--- a/script/create-conda-env/customize.py
+++ b/script/create-conda-env/customize.py
@@ -12,7 +12,7 @@ def preprocess(i):
recursion_spaces = i['recursion_spaces']
- if env.get('CM_CONDA_ENV_NAME', '') == '':
+ if env.get('MLC_CONDA_ENV_NAME', '') == '':
return {'return': 1, 'error': 'Please use "_name." variation'}
return {'return': 0}
@@ -23,11 +23,11 @@ def postprocess(i):
conda_prefix = os.getcwd()
env['CONDA_PREFIX'] = conda_prefix
- env['CM_CONDA_PREFIX'] = conda_prefix
- env['CM_CONDA_BIN_PATH'] = os.path.join(conda_prefix, "bin")
- env['CM_CONDA_LIB_PATH'] = os.path.join(conda_prefix, "lib")
+ env['MLC_CONDA_PREFIX'] = conda_prefix
+ env['MLC_CONDA_BIN_PATH'] = os.path.join(conda_prefix, "bin")
+ env['MLC_CONDA_LIB_PATH'] = os.path.join(conda_prefix, "lib")
- env['+PATH'] = [env['CM_CONDA_BIN_PATH']]
- env['+LD_LIBRARY_PATH'] = [env['CM_CONDA_LIB_PATH']]
+ env['+PATH'] = [env['MLC_CONDA_BIN_PATH']]
+ env['+LD_LIBRARY_PATH'] = [env['MLC_CONDA_LIB_PATH']]
return {'return': 0}
diff --git a/script/create-conda-env/meta.yaml b/script/create-conda-env/meta.yaml
index 56a61f0f1..672843771 100644
--- a/script/create-conda-env/meta.yaml
+++ b/script/create-conda-env/meta.yaml
@@ -12,10 +12,10 @@ deps:
new_env_keys:
- +PATH
- +LD_LIBRARY_PATH
-- CM_CONDA_PREFIX
+- MLC_CONDA_PREFIX
- CONDA_PREFIX
-- CM_CONDA_BIN_PATH
-- CM_CONDA_LIB_PATH
+- MLC_CONDA_BIN_PATH
+- MLC_CONDA_LIB_PATH
tags:
- create
- get
@@ -27,4 +27,4 @@ uid: e39e0b04c86a40f2
variations:
name.#:
env:
- CM_CONDA_ENV_NAME: '#'
+ MLC_CONDA_ENV_NAME: '#'
diff --git a/script/create-conda-env/run.sh b/script/create-conda-env/run.sh
index 540dde9b1..f608722aa 100644
--- a/script/create-conda-env/run.sh
+++ b/script/create-conda-env/run.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-cmd="${CM_CONDA_BIN_WITH_PATH} create -p ${PWD}"
+cmd="${MLC_CONDA_BIN_WITH_PATH} create -p ${PWD}"
echo "$cmd"
eval "$cmd"
test $? -eq 0 || exit $?
diff --git a/script/create-custom-cache-entry/customize.py b/script/create-custom-cache-entry/customize.py
index ce3e0a988..a0299f50d 100644
--- a/script/create-custom-cache-entry/customize.py
+++ b/script/create-custom-cache-entry/customize.py
@@ -9,7 +9,7 @@ def preprocess(i):
env = i['env']
extra_cache_tags = []
- if env.get('CM_EXTRA_CACHE_TAGS', '').strip() == '':
+ if env.get('MLC_EXTRA_CACHE_TAGS', '').strip() == '':
print('')
extra_cache_tags_str = input(
'Enter extra tags for the custom CACHE entry separated by comma: ')
@@ -23,7 +23,7 @@ def postprocess(i):
env = i['env']
- path = env.get('CM_CUSTOM_CACHE_ENTRY_PATH', '').strip()
+ path = env.get('MLC_CUSTOM_CACHE_ENTRY_PATH', '').strip()
if path != '':
if not os.path.isdir(path):
@@ -32,17 +32,17 @@ def postprocess(i):
path = os.getcwd()
x = ''
- env_key = env.get('CM_CUSTOM_CACHE_ENTRY_ENV_KEY', '')
+ env_key = env.get('MLC_CUSTOM_CACHE_ENTRY_ENV_KEY', '')
if env_key != '':
x = env_key + '_'
- env['CM_CUSTOM_CACHE_ENTRY_{}PATH'.format(x)] = path
- env['CM_CUSTOM_CACHE_ENTRY_PATH'] = path
+ env['MLC_CUSTOM_CACHE_ENTRY_{}PATH'.format(x)] = path
+ env['MLC_CUSTOM_CACHE_ENTRY_PATH'] = path
- env_key2 = env.get('CM_CUSTOM_CACHE_ENTRY_ENV_KEY2', '')
+ env_key2 = env.get('MLC_CUSTOM_CACHE_ENTRY_ENV_KEY2', '')
v = env.get(env_key2, '')
real_path = v if v != '' else path
- env['CM_CUSTOM_CACHE_ENTRY_{}REAL_PATH'.format(x)] = real_path
+ env['MLC_CUSTOM_CACHE_ENTRY_{}REAL_PATH'.format(x)] = real_path
return {'return': 0}
diff --git a/script/create-custom-cache-entry/meta.yaml b/script/create-custom-cache-entry/meta.yaml
index 7272bb99a..d7f876fae 100644
--- a/script/create-custom-cache-entry/meta.yaml
+++ b/script/create-custom-cache-entry/meta.yaml
@@ -15,13 +15,13 @@ category: CM automation
cache: true
input_mapping:
- env_key: CM_CUSTOM_CACHE_ENTRY_ENV_KEY
- env_key2: CM_CUSTOM_CACHE_ENTRY_ENV_KEY2
- path: CM_CUSTOM_CACHE_ENTRY_PATH
- to: CM_CUSTOM_CACHE_ENTRY_PATH
+ env_key: MLC_CUSTOM_CACHE_ENTRY_ENV_KEY
+ env_key2: MLC_CUSTOM_CACHE_ENTRY_ENV_KEY2
+ path: MLC_CUSTOM_CACHE_ENTRY_PATH
+ to: MLC_CUSTOM_CACHE_ENTRY_PATH
new_env_keys:
-- CM_CUSTOM_CACHE_ENTRY*
+- MLC_CUSTOM_CACHE_ENTRY*
print_env_at_the_end:
- CM_CUSTOM_CACHE_ENTRY_PATH: "Path to custom cache entry"
+ MLC_CUSTOM_CACHE_ENTRY_PATH: "Path to custom cache entry"
diff --git a/script/create-fpgaconvnet-app-tinyml/customize.py b/script/create-fpgaconvnet-app-tinyml/customize.py
index a12f17f36..ae8668d89 100644
--- a/script/create-fpgaconvnet-app-tinyml/customize.py
+++ b/script/create-fpgaconvnet-app-tinyml/customize.py
@@ -12,15 +12,15 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- network_env_name = env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME']
- run_dir = env['CM_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR']
+ network_env_name = env['MLC_TINY_FPGACONVNET_NETWORK_ENV_NAME']
+ run_dir = env['MLC_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR']
run_cmd = "cd " + run_dir + " && xsct create_boot_image.tcl"
- env['CM_RUN_CMD'] = run_cmd
- env['CM_RUN_DIR'] = run_dir
+ env['MLC_RUN_CMD'] = run_cmd
+ env['MLC_RUN_DIR'] = run_dir
return {'return': 0}
@@ -30,10 +30,10 @@ def postprocess(i):
env = i['env']
return {'return': 1}
- network = env['CM_TINY_NETWORK_NAME']
+ network = env['MLC_TINY_NETWORK_NAME']
json_location = os.path.join(
- env['CM_RUN_DIR'],
- env['CM_TINY_NETWORK_NAME'] + ".json")
+ env['MLC_RUN_DIR'],
+ env['MLC_TINY_NETWORK_NAME'] + ".json")
if os.path.exists(json_location):
print(
f"JSON configuration file for {network} created at {json_location}")
diff --git a/script/create-fpgaconvnet-app-tinyml/meta.yaml b/script/create-fpgaconvnet-app-tinyml/meta.yaml
index 3ad1cdc9b..ad63685de 100644
--- a/script/create-fpgaconvnet-app-tinyml/meta.yaml
+++ b/script/create-fpgaconvnet-app-tinyml/meta.yaml
@@ -39,6 +39,6 @@ variations:
tags: _zc706
default: true
env:
- CM_TINY_BOARD: zc706
+ MLC_TINY_BOARD: zc706
group: board
versions: {}
diff --git a/script/create-fpgaconvnet-app-tinyml/run.sh b/script/create-fpgaconvnet-app-tinyml/run.sh
index fe67c233c..35de74bab 100644
--- a/script/create-fpgaconvnet-app-tinyml/run.sh
+++ b/script/create-fpgaconvnet-app-tinyml/run.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
function exit_if_error() {
test $? -eq 0 || exit $?
@@ -15,12 +15,12 @@ function run() {
echo "Running: "
echo "$1"
echo ""
- if [[ ${CM_FAKE_RUN} != 'yes' ]]; then
+ if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then
eval "$1"
fi
exit_if_error
}
#Add your run commands here...
-run "${CM_RUN_CMD}"
+run "${MLC_RUN_CMD}"
diff --git a/script/create-fpgaconvnet-config-tinyml/customize.py b/script/create-fpgaconvnet-config-tinyml/customize.py
index 90ed2d6f0..853ff007f 100644
--- a/script/create-fpgaconvnet-config-tinyml/customize.py
+++ b/script/create-fpgaconvnet-config-tinyml/customize.py
@@ -12,30 +12,30 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
code_path = os.path.join(
- env['CM_GIT_REPO_CHECKOUT_PATH'],
+ env['MLC_GIT_REPO_CHECKOUT_PATH'],
"closed",
"fpgaconvnet",
"code")
- network_env_name = env['CM_TINY_NETWORK_NAME'].replace("-", "_").upper()
- env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] = network_env_name
- env['CM_TINY_FPGACONVNET_' + network_env_name + '_CODE_PATH'] = code_path
+ network_env_name = env['MLC_TINY_NETWORK_NAME'].replace("-", "_").upper()
+ env['MLC_TINY_FPGACONVNET_NETWORK_ENV_NAME'] = network_env_name
+ env['MLC_TINY_FPGACONVNET_' + network_env_name + '_CODE_PATH'] = code_path
- board = env.get('CM_TINY_BOARD', 'zc706')
+ board = env.get('MLC_TINY_BOARD', 'zc706')
- benchmark = env.get('CM_TINY_BENCHMARK', 'ic')
+ benchmark = env.get('MLC_TINY_BENCHMARK', 'ic')
run_dir = os.path.join(code_path, board, benchmark)
- env['CM_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR'] = run_dir
+ env['MLC_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR'] = run_dir
run_cmd = "cd " + run_dir + " && " + \
- env['CM_PYTHON_BIN_WITH_PATH'] + " " + "create_config.py"
+ env['MLC_PYTHON_BIN_WITH_PATH'] + " " + "create_config.py"
- env['ML_MODEL_FILE_WITH_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH']
- env['CM_RUN_CMD'] = run_cmd
- env['CM_RUN_DIR'] = run_dir
+ env['ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH']
+ env['MLC_RUN_CMD'] = run_cmd
+ env['MLC_RUN_DIR'] = run_dir
return {'return': 0}
@@ -44,21 +44,21 @@ def postprocess(i):
env = i['env']
- network = env['CM_TINY_NETWORK_NAME']
- env['CM_TINY_FPGACONVNET_NETWORK_NAME'] = network
- network_env_name = env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME']
+ network = env['MLC_TINY_NETWORK_NAME']
+ env['MLC_TINY_FPGACONVNET_NETWORK_NAME'] = network
+ network_env_name = env['MLC_TINY_FPGACONVNET_NETWORK_ENV_NAME']
json_location = os.path.join(
- env['CM_RUN_DIR'],
- env['CM_TINY_NETWORK_NAME'] + ".json")
+ env['MLC_RUN_DIR'],
+ env['MLC_TINY_NETWORK_NAME'] + ".json")
if os.path.exists(json_location):
print(
f"JSON configuration file for {network} created at {json_location}")
else:
return {'return': 1, 'error': "JSON configuration file generation failed"}
- env['CM_TINY_FPGACONVNET_CONFIG_FILE_' +
+ env['MLC_TINY_FPGACONVNET_CONFIG_FILE_' +
network_env_name + '_PATH'] = json_location
- env['CM_GET_DEPENDENT_CACHED_PATH'] = json_location
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = json_location
return {'return': 0}
diff --git a/script/create-fpgaconvnet-config-tinyml/meta.yaml b/script/create-fpgaconvnet-config-tinyml/meta.yaml
index f74a3165d..b22316e2f 100644
--- a/script/create-fpgaconvnet-config-tinyml/meta.yaml
+++ b/script/create-fpgaconvnet-config-tinyml/meta.yaml
@@ -12,7 +12,7 @@ deps:
input_description: {}
input_mapping: {}
new_env_keys:
-- CM_TINY_FPGACONVNET*
+- MLC_TINY_FPGACONVNET*
new_state_keys: []
post_deps: []
posthook_deps: []
@@ -32,9 +32,9 @@ variations:
zc706:
default: true
env:
- CM_TINY_BOARD: zc706
+ MLC_TINY_BOARD: zc706
group: board
zc706,ic:
env:
- CM_TINY_NETWORK_NAME: zc706-resnet
+ MLC_TINY_NETWORK_NAME: zc706-resnet
versions: {}
diff --git a/script/create-fpgaconvnet-config-tinyml/run.sh b/script/create-fpgaconvnet-config-tinyml/run.sh
index fe67c233c..35de74bab 100644
--- a/script/create-fpgaconvnet-config-tinyml/run.sh
+++ b/script/create-fpgaconvnet-config-tinyml/run.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
function exit_if_error() {
test $? -eq 0 || exit $?
@@ -15,12 +15,12 @@ function run() {
echo "Running: "
echo "$1"
echo ""
- if [[ ${CM_FAKE_RUN} != 'yes' ]]; then
+ if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then
eval "$1"
fi
exit_if_error
}
#Add your run commands here...
-run "${CM_RUN_CMD}"
+run "${MLC_RUN_CMD}"
diff --git a/script/create-patch/customize.py b/script/create-patch/customize.py
index 14c64623d..573e6735e 100644
--- a/script/create-patch/customize.py
+++ b/script/create-patch/customize.py
@@ -12,23 +12,23 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- new_dir = env.get('CM_CREATE_PATCH_NEW', '')
+ new_dir = env.get('MLC_CREATE_PATCH_NEW', '')
if new_dir == '':
return {'return': 1, 'error': 'specify NEW directory using --new'}
if not os.path.isdir(new_dir):
return {'return': 1,
'error': 'NEW directory doesn\'t exist {}'.format(new_dir)}
- old_dir = env.get('CM_CREATE_PATCH_OLD', '')
+ old_dir = env.get('MLC_CREATE_PATCH_OLD', '')
if old_dir == '':
return {'return': 1, 'error': 'specify OLD directory using --old'}
if not os.path.isdir(old_dir):
return {'return': 1,
'error': 'OLD directory doesn\'t exist {}'.format(old_dir)}
- exclude = env.get('CM_CREATE_PATCH_EXCLUDE', '').strip()
+ exclude = env.get('MLC_CREATE_PATCH_EXCLUDE', '').strip()
x_exclude = ''
if exclude != '':
diff --git a/script/create-patch/meta.yaml b/script/create-patch/meta.yaml
index cbcedb648..d5c83fbd0 100644
--- a/script/create-patch/meta.yaml
+++ b/script/create-patch/meta.yaml
@@ -12,9 +12,9 @@ deps:
- tags: detect,os
input_mapping:
- new: CM_CREATE_PATCH_NEW
- old: CM_CREATE_PATCH_OLD
- exclude: CM_CREATE_PATCH_EXCLUDE
+ new: MLC_CREATE_PATCH_NEW
+ old: MLC_CREATE_PATCH_OLD
+ exclude: MLC_CREATE_PATCH_EXCLUDE
tags:
- create
diff --git a/script/destroy-terraform/run.sh b/script/destroy-terraform/run.sh
index 9e0ae31ac..25e40a3a0 100644
--- a/script/destroy-terraform/run.sh
+++ b/script/destroy-terraform/run.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-source ${CM_TERRAFORM_CONFIG_DIR}/credentials.sh
-source ${CM_TERRAFORM_CONFIG_DIR}/apply_credentials.sh
-cd ${CM_TERRAFORM_RUN_DIR}
+source ${MLC_TERRAFORM_CONFIG_DIR}/credentials.sh
+source ${MLC_TERRAFORM_CONFIG_DIR}/apply_credentials.sh
+cd ${MLC_TERRAFORM_RUN_DIR}
terraform destroy --auto-approve
test $? -eq 0 || exit 1
diff --git a/script/detect-cpu/README-extra.md b/script/detect-cpu/README-extra.md
index c2326c281..3e6e1dad0 100644
--- a/script/detect-cpu/README-extra.md
+++ b/script/detect-cpu/README-extra.md
@@ -2,14 +2,14 @@
This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the host CPU details and exports them in a unified list of environment variables to be reused across the supported operating systems.
## Exported Variables
-* `CM_HOST_CPU_L1I_CACHE_SIZE`
-* `CM_HOST_CPU_L2_CACHE_SIZE`
-* `CM_HOST_CPU_MEMSIZE`
-* `CM_HOST_CPU_SOCKETS`
-* `CM_HOST_CPU_THREADS_PER_CORE`
-* `CM_HOST_CPU_TOTAL_CORES`
-* `CM_HOST_CPU_TOTAL_LOGICAL_CORES`
-* `CM_HOST_CPU_TOTAL_PHYSICAL_CORES`
+* `MLC_HOST_CPU_L1I_CACHE_SIZE`
+* `MLC_HOST_CPU_L2_CACHE_SIZE`
+* `MLC_HOST_CPU_MEMSIZE`
+* `MLC_HOST_CPU_SOCKETS`
+* `MLC_HOST_CPU_THREADS_PER_CORE`
+* `MLC_HOST_CPU_TOTAL_CORES`
+* `MLC_HOST_CPU_TOTAL_LOGICAL_CORES`
+* `MLC_HOST_CPU_TOTAL_PHYSICAL_CORES`
## Supported and Tested OS
1. Ubuntu 18.04, 20.04, 22.04
diff --git a/script/detect-cpu/customize.py b/script/detect-cpu/customize.py
index cd0c6dc95..6ca244cf7 100644
--- a/script/detect-cpu/customize.py
+++ b/script/detect-cpu/customize.py
@@ -119,36 +119,36 @@ def postprocess(i):
# Unifying some CPU info across different platforms
unified_env = {
- 'CM_CPUINFO_CPUs': 'CM_HOST_CPU_TOTAL_CORES',
- 'CM_CPUINFO_L1d_cache': 'CM_HOST_CPU_L1D_CACHE_SIZE',
- 'CM_CPUINFO_L1i_cache': 'CM_HOST_CPU_L1I_CACHE_SIZE',
- 'CM_CPUINFO_L2_cache': 'CM_HOST_CPU_L2_CACHE_SIZE',
- 'CM_CPUINFO_L3_cache': 'CM_HOST_CPU_L3_CACHE_SIZE',
- 'CM_CPUINFO_Sockets': 'CM_HOST_CPU_SOCKETS',
- 'CM_CPUINFO_NUMA_nodes': 'CM_HOST_CPU_NUMA_NODES',
- 'CM_CPUINFO_Cores_per_socket': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET',
- 'CM_CPUINFO_Cores_per_cluster': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET',
- 'CM_CPUINFO_Threads_per_core': 'CM_HOST_CPU_THREADS_PER_CORE',
- 'CM_CPUINFO_Architecture': 'CM_HOST_CPU_ARCHITECTURE',
- 'CM_CPUINFO_CPU_family': 'CM_HOST_CPU_FAMILY',
- 'CM_CPUINFO_CPU_max_MHz': 'CM_HOST_CPU_MAX_MHZ',
- 'CM_CPUINFO_Model_name': 'CM_HOST_CPU_MODEL_NAME',
- 'CM_CPUINFO_On_line_CPUs_list': 'CM_HOST_CPU_ON_LINE_CPUS_LIST',
- 'CM_CPUINFO_Vendor_ID': 'CM_HOST_CPU_VENDOR_ID',
- 'CM_CPUINFO_hw_physicalcpu': 'CM_HOST_CPU_TOTAL_PHYSICAL_CORES',
- 'CM_CPUINFO_hw_logicalcpu': 'CM_HOST_CPU_TOTAL_CORES',
- 'CM_CPUINFO_hw_packages': 'CM_HOST_CPU_SOCKETS',
- 'CM_CPUINFO_hw_memsize': 'CM_HOST_CPU_MEMSIZE',
- 'CM_CPUINFO_hw_l1icachesize': 'CM_HOST_CPU_L1I_CACHE_SIZE',
- 'CM_CPUINFO_hw_l1dcachesize': 'CM_HOST_CPU_L1D_CACHE_SIZE',
- 'CM_CPUINFO_hw_l2cachesize': 'CM_HOST_CPU_L2_CACHE_SIZE'
+ 'MLC_CPUINFO_CPUs': 'MLC_HOST_CPU_TOTAL_CORES',
+ 'MLC_CPUINFO_L1d_cache': 'MLC_HOST_CPU_L1D_CACHE_SIZE',
+ 'MLC_CPUINFO_L1i_cache': 'MLC_HOST_CPU_L1I_CACHE_SIZE',
+ 'MLC_CPUINFO_L2_cache': 'MLC_HOST_CPU_L2_CACHE_SIZE',
+ 'MLC_CPUINFO_L3_cache': 'MLC_HOST_CPU_L3_CACHE_SIZE',
+ 'MLC_CPUINFO_Sockets': 'MLC_HOST_CPU_SOCKETS',
+ 'MLC_CPUINFO_NUMA_nodes': 'MLC_HOST_CPU_NUMA_NODES',
+ 'MLC_CPUINFO_Cores_per_socket': 'MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET',
+ 'MLC_CPUINFO_Cores_per_cluster': 'MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET',
+ 'MLC_CPUINFO_Threads_per_core': 'MLC_HOST_CPU_THREADS_PER_CORE',
+ 'MLC_CPUINFO_Architecture': 'MLC_HOST_CPU_ARCHITECTURE',
+ 'MLC_CPUINFO_CPU_family': 'MLC_HOST_CPU_FAMILY',
+ 'MLC_CPUINFO_CPU_max_MHz': 'MLC_HOST_CPU_MAX_MHZ',
+ 'MLC_CPUINFO_Model_name': 'MLC_HOST_CPU_MODEL_NAME',
+ 'MLC_CPUINFO_On_line_CPUs_list': 'MLC_HOST_CPU_ON_LINE_CPUS_LIST',
+ 'MLC_CPUINFO_Vendor_ID': 'MLC_HOST_CPU_VENDOR_ID',
+ 'MLC_CPUINFO_hw_physicalcpu': 'MLC_HOST_CPU_TOTAL_PHYSICAL_CORES',
+ 'MLC_CPUINFO_hw_logicalcpu': 'MLC_HOST_CPU_TOTAL_CORES',
+ 'MLC_CPUINFO_hw_packages': 'MLC_HOST_CPU_SOCKETS',
+ 'MLC_CPUINFO_hw_memsize': 'MLC_HOST_CPU_MEMSIZE',
+ 'MLC_CPUINFO_hw_l1icachesize': 'MLC_HOST_CPU_L1I_CACHE_SIZE',
+ 'MLC_CPUINFO_hw_l1dcachesize': 'MLC_HOST_CPU_L1D_CACHE_SIZE',
+ 'MLC_CPUINFO_hw_l2cachesize': 'MLC_HOST_CPU_L2_CACHE_SIZE'
}
- if env['CM_HOST_OS_TYPE'] == 'linux':
+ if env['MLC_HOST_OS_TYPE'] == 'linux':
vkeys = ['Architecture', 'Model name', 'Vendor ID', 'CPU family', 'NUMA node(s)', 'CPU(s)',
'On-line CPU(s) list', 'Socket(s)', 'Core(s) per socket', 'Core(s) per cluster', 'Thread(s) per core', 'L1d cache', 'L1i cache', 'L2 cache',
'L3 cache', 'CPU max MHz']
- elif env['CM_HOST_OS_FLAVOR'] == 'macos':
+ elif env['MLC_HOST_OS_FLAVOR'] == 'macos':
vkeys = ['hw.physicalcpu', 'hw.logicalcpu', 'hw.packages', 'hw.ncpu', 'hw.memsize', 'hw.l1icachesize',
'hw.l2cachesize']
if vkeys:
@@ -156,7 +156,7 @@ def postprocess(i):
v = s.split(':')
key = v[0]
if key in vkeys:
- env_key = 'CM_CPUINFO_' + key.replace(
+ env_key = 'MLC_CPUINFO_' + key.replace(
" ",
"_").replace(
'(',
@@ -172,21 +172,21 @@ def postprocess(i):
else:
env[env_key] = v[1].strip()
- if env.get('CM_HOST_CPU_SOCKETS', '') == '-': # assume as 1
- env['CM_HOST_CPU_SOCKETS'] = '1'
+ if env.get('MLC_HOST_CPU_SOCKETS', '') == '-': # assume as 1
+ env['MLC_HOST_CPU_SOCKETS'] = '1'
- if env.get('CM_HOST_CPU_TOTAL_CORES', '') != '' and env.get(
- 'CM_HOST_CPU_TOTAL_LOGICAL_CORES', '') == '':
- env['CM_HOST_CPU_TOTAL_LOGICAL_CORES'] = env['CM_HOST_CPU_TOTAL_CORES']
+ if env.get('MLC_HOST_CPU_TOTAL_CORES', '') != '' and env.get(
+ 'MLC_HOST_CPU_TOTAL_LOGICAL_CORES', '') == '':
+ env['MLC_HOST_CPU_TOTAL_LOGICAL_CORES'] = env['MLC_HOST_CPU_TOTAL_CORES']
- if env.get('CM_HOST_CPU_TOTAL_LOGICAL_CORES', '') != '' and env.get(
- 'CM_HOST_CPU_TOTAL_PHYSICAL_CORES', '') != '' and env.get('CM_HOST_CPU_THREADS_PER_CORE', '') == '':
- env['CM_HOST_CPU_THREADS_PER_CORE'] = str(int(int(env['CM_HOST_CPU_TOTAL_LOGICAL_CORES']) //
- int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES'])))
+ if env.get('MLC_HOST_CPU_TOTAL_LOGICAL_CORES', '') != '' and env.get(
+ 'MLC_HOST_CPU_TOTAL_PHYSICAL_CORES', '') != '' and env.get('MLC_HOST_CPU_THREADS_PER_CORE', '') == '':
+ env['MLC_HOST_CPU_THREADS_PER_CORE'] = str(int(int(env['MLC_HOST_CPU_TOTAL_LOGICAL_CORES']) //
+ int(env['MLC_HOST_CPU_TOTAL_PHYSICAL_CORES'])))
- if env.get('CM_HOST_CPU_SOCKETS', '') != '' and env.get('CM_HOST_CPU_TOTAL_PHYSICAL_CORES',
- '') != '' and env.get('CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', '') == '':
- env['CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET'] = str(
- int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES']) // int(env['CM_HOST_CPU_SOCKETS']))
+ if env.get('MLC_HOST_CPU_SOCKETS', '') != '' and env.get('MLC_HOST_CPU_TOTAL_PHYSICAL_CORES',
+ '') != '' and env.get('MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', '') == '':
+ env['MLC_HOST_CPU_PHYSICAL_CORES_PER_SOCKET'] = str(
+ int(env['MLC_HOST_CPU_TOTAL_PHYSICAL_CORES']) // int(env['MLC_HOST_CPU_SOCKETS']))
return {'return': 0}
diff --git a/script/detect-cpu/meta.yaml b/script/detect-cpu/meta.yaml
index 1da7a920e..7befa2ece 100644
--- a/script/detect-cpu/meta.yaml
+++ b/script/detect-cpu/meta.yaml
@@ -9,9 +9,9 @@ clean_files:
deps:
- tags: detect,os
new_env_keys:
-- CM_HOST_CPU_*
-- CM_HOST_MEMORY_CAPACITY
-- CM_HOST_DISK_CAPACITY
+- MLC_HOST_CPU_*
+- MLC_HOST_MEMORY_CAPACITY
+- MLC_HOST_DISK_CAPACITY
new_state_keys:
- host_device_raw_info
tags:
diff --git a/script/detect-cpu/run.sh b/script/detect-cpu/run.sh
index 2ca2fcc9b..b57ecc367 100644
--- a/script/detect-cpu/run.sh
+++ b/script/detect-cpu/run.sh
@@ -11,37 +11,37 @@ extract_field() {
echo "${value:-$default}"
}
-if [[ ${CM_HOST_OS_FLAVOR} == "macos" ]]; then
+if [[ ${MLC_HOST_OS_FLAVOR} == "macos" ]]; then
sysctl -a | grep hw > tmp-lscpu.out
else
lscpu > tmp-lscpu.out
memory_capacity=`free -h --si | grep Mem: | tr -s ' ' | cut -d' ' -f2`
- echo "CM_HOST_MEMORY_CAPACITY=$memory_capacity">>tmp-run-env.out
+ echo "MLC_HOST_MEMORY_CAPACITY=$memory_capacity">>tmp-run-env.out
disk_capacity=`df -h --total -l |grep total |tr -s ' '|cut -d' ' -f2`
- echo "CM_HOST_DISK_CAPACITY=$disk_capacity">>tmp-run-env.out
+ echo "MLC_HOST_DISK_CAPACITY=$disk_capacity">>tmp-run-env.out
# extract cpu information which are not there in lscpu
- CM_HOST_CPU_WRITE_PROTECT_SUPPORT=$(extract_field "wp" "Not Found")
- CM_HOST_CPU_MICROCODE=$(extract_field "microcode" "Not Found")
- CM_HOST_CPU_FPU_SUPPORT=$(extract_field "fpu" "Not Found")
- CM_HOST_CPU_FPU_EXCEPTION_SUPPORT=$(extract_field "fpu_exception" "Not Found")
- CM_HOST_CPU_BUGS=$(extract_field "bugs" "Not Found")
- CM_HOST_CPU_TLB_SIZE=$(extract_field "TLB size" "Not Found")
- CM_HOST_CPU_CFLUSH_SIZE=$(extract_field "clflush size" "Not Found")
- CM_HOST_CACHE_ALIGNMENT_SIZE=$(extract_field "cache_alignment" "Not Found")
- CM_HOST_POWER_MANAGEMENT=$(extract_field "power management" "Not Found")
+ MLC_HOST_CPU_WRITE_PROTECT_SUPPORT=$(extract_field "wp" "Not Found")
+ MLC_HOST_CPU_MICROCODE=$(extract_field "microcode" "Not Found")
+ MLC_HOST_CPU_FPU_SUPPORT=$(extract_field "fpu" "Not Found")
+ MLC_HOST_CPU_FPU_EXCEPTION_SUPPORT=$(extract_field "fpu_exception" "Not Found")
+ MLC_HOST_CPU_BUGS=$(extract_field "bugs" "Not Found")
+ MLC_HOST_CPU_TLB_SIZE=$(extract_field "TLB size" "Not Found")
+ MLC_HOST_CPU_CFLUSH_SIZE=$(extract_field "clflush size" "Not Found")
+ MLC_HOST_CACHE_ALIGNMENT_SIZE=$(extract_field "cache_alignment" "Not Found")
+ MLC_HOST_POWER_MANAGEMENT=$(extract_field "power management" "Not Found")
# Write results to a file
{
- echo "CM_HOST_CPU_WRITE_PROTECT_SUPPORT=$CM_HOST_CPU_WRITE_PROTECT_SUPPORT"
- echo "CM_HOST_CPU_MICROCODE=$CM_HOST_CPU_MICROCODE"
- echo "CM_HOST_CPU_FPU_SUPPORT=$CM_HOST_CPU_FPU_SUPPORT"
- echo "CM_HOST_CPU_FPU_EXCEPTION_SUPPORT=$CM_HOST_CPU_FPU_EXCEPTION_SUPPORT"
- echo "CM_HOST_CPU_BUGS=$CM_HOST_CPU_BUGS"
- echo "CM_HOST_CPU_TLB_SIZE=$CM_HOST_CPU_TLB_SIZE"
- echo "CM_HOST_CPU_CFLUSH_SIZE=$CM_HOST_CPU_CFLUSH_SIZE"
- echo "CM_HOST_CACHE_ALIGNMENT_SIZE=$CM_HOST_CACHE_ALIGNMENT_SIZE"
- echo "CM_HOST_POWER_MANAGEMENT=$CM_HOST_POWER_MANAGEMENT"
+ echo "MLC_HOST_CPU_WRITE_PROTECT_SUPPORT=$MLC_HOST_CPU_WRITE_PROTECT_SUPPORT"
+ echo "MLC_HOST_CPU_MICROCODE=$MLC_HOST_CPU_MICROCODE"
+ echo "MLC_HOST_CPU_FPU_SUPPORT=$MLC_HOST_CPU_FPU_SUPPORT"
+ echo "MLC_HOST_CPU_FPU_EXCEPTION_SUPPORT=$MLC_HOST_CPU_FPU_EXCEPTION_SUPPORT"
+ echo "MLC_HOST_CPU_BUGS=$MLC_HOST_CPU_BUGS"
+ echo "MLC_HOST_CPU_TLB_SIZE=$MLC_HOST_CPU_TLB_SIZE"
+ echo "MLC_HOST_CPU_CFLUSH_SIZE=$MLC_HOST_CPU_CFLUSH_SIZE"
+ echo "MLC_HOST_CACHE_ALIGNMENT_SIZE=$MLC_HOST_CACHE_ALIGNMENT_SIZE"
+ echo "MLC_HOST_POWER_MANAGEMENT=$MLC_HOST_POWER_MANAGEMENT"
} >> tmp-run-env.out
fi
diff --git a/script/detect-os/customize.py b/script/detect-os/customize.py
index 82ee00d7a..fb87b1220 100644
--- a/script/detect-os/customize.py
+++ b/script/detect-os/customize.py
@@ -11,9 +11,9 @@ def preprocess(i):
os_info = i['os_info']
# Update env variables
- env['CM_HOST_OS_TYPE'] = os_info['platform']
- env['CM_HOST_OS_BITS'] = os_info['bits']
- env['CM_HOST_PYTHON_BITS'] = os_info['python_bits']
+ env['MLC_HOST_OS_TYPE'] = os_info['platform']
+ env['MLC_HOST_OS_BITS'] = os_info['bits']
+ env['MLC_HOST_PYTHON_BITS'] = os_info['python_bits']
# Update state (demo)
# state['os_info'] = os_info
@@ -43,7 +43,7 @@ def postprocess(i):
for _dir in dirs:
if _dir != '' and _dir not in lib_dir:
lib_dir.append(_dir)
- env['+CM_HOST_OS_DEFAULT_LIBRARY_PATH'] = lib_dir
+ env['+MLC_HOST_OS_DEFAULT_LIBRARY_PATH'] = lib_dir
r = utils.load_txt(file_name='tmp-run.out',
check_if_exists=True,
@@ -56,54 +56,54 @@ def postprocess(i):
state['os_uname_machine'] = s[0]
state['os_uname_all'] = s[1]
- env['CM_HOST_OS_MACHINE'] = state['os_uname_machine']
+ env['MLC_HOST_OS_MACHINE'] = state['os_uname_machine']
else:
- env['CM_HOST_OS_PACKAGE_MANAGER'] = "choco"
+ env['MLC_HOST_OS_PACKAGE_MANAGER'] = "choco"
import platform
- env['CM_HOST_SYSTEM_NAME'] = platform.node()
-
- if 'CM_HOST_OS_PACKAGE_MANAGER' not in env:
- if env.get('CM_HOST_OS_FLAVOR', '') == "ubuntu" or \
- "debian" in env.get('CM_HOST_OS_FLAVOR_LIKE', '') or \
- env.get('CM_HOST_OS_FLAVOR', '') == "debian":
- env['CM_HOST_OS_PACKAGE_MANAGER'] = "apt"
- if env.get('CM_HOST_OS_FLAVOR', '') == "rhel" or \
- "rhel" in env.get('CM_HOST_OS_FLAVOR_LIKE', ''):
- env['CM_HOST_OS_PACKAGE_MANAGER'] = "dnf"
- if env.get('CM_HOST_OS_FLAVOR', '') == "amzn":
- env['CM_HOST_OS_PACKAGE_MANAGER'] = "yum"
- if env.get('CM_HOST_OS_FLAVOR_LIKE', '') == "arch":
- env['CM_HOST_OS_PACKAGE_MANAGER'] = "arch"
- if env.get('CM_HOST_OS_FLAVOR', '') == "macos":
- env['CM_HOST_OS_PACKAGE_MANAGER'] = "brew"
- if env.get('CM_HOST_OS_FLAVOR', '') == "sles":
- env['CM_HOST_OS_PACKAGE_MANAGER'] = "zypper"
- if env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "apt":
- env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "DEBIAN_FRONTEND=noninteractive apt-get install -y"
- env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "apt-get update -y"
- elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "dnf":
- env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "dnf install -y"
- env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "dnf update -y"
- elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "pacman":
- env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "pacman -Sy --noconfirm"
- env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "pacman -Syu"
- elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "brew":
- env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "brew install"
- env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "brew update"
- elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "yum":
- env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "yum install -y --skip-broken"
- env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "yum update -y"
- elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "zypper":
- env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "zypper install -y"
- env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "zypper update -y"
- elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "choco":
- env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "choco install -y"
- env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "choco upgrade -y"
+ env['MLC_HOST_SYSTEM_NAME'] = platform.node()
+
+ if 'MLC_HOST_OS_PACKAGE_MANAGER' not in env:
+ if env.get('MLC_HOST_OS_FLAVOR', '') == "ubuntu" or \
+ "debian" in env.get('MLC_HOST_OS_FLAVOR_LIKE', '') or \
+ env.get('MLC_HOST_OS_FLAVOR', '') == "debian":
+ env['MLC_HOST_OS_PACKAGE_MANAGER'] = "apt"
+ if env.get('MLC_HOST_OS_FLAVOR', '') == "rhel" or \
+ "rhel" in env.get('MLC_HOST_OS_FLAVOR_LIKE', ''):
+ env['MLC_HOST_OS_PACKAGE_MANAGER'] = "dnf"
+ if env.get('MLC_HOST_OS_FLAVOR', '') == "amzn":
+ env['MLC_HOST_OS_PACKAGE_MANAGER'] = "yum"
+ if env.get('MLC_HOST_OS_FLAVOR_LIKE', '') == "arch":
+ env['MLC_HOST_OS_PACKAGE_MANAGER'] = "arch"
+ if env.get('MLC_HOST_OS_FLAVOR', '') == "macos":
+ env['MLC_HOST_OS_PACKAGE_MANAGER'] = "brew"
+ if env.get('MLC_HOST_OS_FLAVOR', '') == "sles":
+ env['MLC_HOST_OS_PACKAGE_MANAGER'] = "zypper"
+ if env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "apt":
+ env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "DEBIAN_FRONTEND=noninteractive apt-get install -y"
+ env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "apt-get update -y"
+ elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "dnf":
+ env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "dnf install -y"
+ env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "dnf update -y"
+ elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "pacman":
+ env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "pacman -Sy --noconfirm"
+ env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "pacman -Syu"
+ elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "brew":
+ env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "brew install"
+ env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "brew update"
+ elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "yum":
+ env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "yum install -y --skip-broken"
+ env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "yum update -y"
+ elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "zypper":
+ env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "zypper install -y"
+ env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "zypper update -y"
+ elif env.get('MLC_HOST_OS_PACKAGE_MANAGER', '') == "choco":
+ env['MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "choco install -y"
+ env['MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "choco upgrade -y"
if os.path.exists("/.dockerenv"):
- env['CM_RUN_INSIDE_DOCKER'] = "yes"
+ env['MLC_RUN_INSIDE_DOCKER'] = "yes"
return {'return': 0}
diff --git a/script/detect-os/meta.yaml b/script/detect-os/meta.yaml
index 12266a02a..8f9070cac 100644
--- a/script/detect-os/meta.yaml
+++ b/script/detect-os/meta.yaml
@@ -5,21 +5,21 @@ category: Platform information
clean_files:
- tmp-run.out
new_env_keys:
-- CM_HOST_OS_*
-- +CM_HOST_OS_*
-- CM_HOST_PLATFORM_*
-- CM_HOST_PYTHON_*
-- CM_HOST_SYSTEM_NAME
-- CM_RUN_STATE_DOCKER
+- MLC_HOST_OS_*
+- +MLC_HOST_OS_*
+- MLC_HOST_PLATFORM_*
+- MLC_HOST_PYTHON_*
+- MLC_HOST_SYSTEM_NAME
+- MLC_RUN_STATE_DOCKER
- +PATH
new_state_keys:
- os_uname_*
post_deps:
- enable_if_env:
- CM_HOST_OS_TYPE:
+ MLC_HOST_OS_TYPE:
- windows
skip_if_env:
- CM_WINDOWS_SYS_UTILS_MIN_INSTALL:
+ MLC_WINDOWS_SYS_UTILS_MIN_INSTALL:
- 'yes'
tags: get,sys-utils-min
tags:
diff --git a/script/detect-os/run.sh b/script/detect-os/run.sh
index 9e3c56cd9..a3ff776e2 100644
--- a/script/detect-os/run.sh
+++ b/script/detect-os/run.sh
@@ -3,19 +3,19 @@
uname -m > tmp-run.out
uname -a >> tmp-run.out
if test -f "/etc/os-release"; then
- echo "CM_HOST_OS_FLAVOR=`cat /etc/os-release | grep '^ID=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out
- echo "CM_HOST_OS_FLAVOR_LIKE=`cat /etc/os-release | grep '^ID_LIKE=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out
- echo "CM_HOST_OS_VERSION=`cat /etc/os-release | grep '^VERSION_ID=' | cut -d'=' -f2 | cut -d'"' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out
- echo "CM_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out
- echo "CM_HOST_PLATFORM_FLAVOR=`uname -m`" >> tmp-run-env.out
- echo "CM_HOST_OS_GLIBC_VERSION=`ldd --version | tail -n +1 | head -1 | cut -d')' -f2 | cut -d' ' -f2`" >> tmp-run-env.out
+ echo "MLC_HOST_OS_FLAVOR=`cat /etc/os-release | grep '^ID=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out
+ echo "MLC_HOST_OS_FLAVOR_LIKE=`cat /etc/os-release | grep '^ID_LIKE=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out
+ echo "MLC_HOST_OS_VERSION=`cat /etc/os-release | grep '^VERSION_ID=' | cut -d'=' -f2 | cut -d'"' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out
+ echo "MLC_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out
+ echo "MLC_HOST_PLATFORM_FLAVOR=`uname -m`" >> tmp-run-env.out
+ echo "MLC_HOST_OS_GLIBC_VERSION=`ldd --version | tail -n +1 | head -1 | cut -d')' -f2 | cut -d' ' -f2`" >> tmp-run-env.out
else
- CM_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f2 | tr '[:upper:]' '[:lower:]'`
- if [ -z ${CM_HOST_OS_FLAVOR} ]; then
- CM_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f3 | tr '[:upper:]' '[:lower:]' `
+ MLC_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f2 | tr '[:upper:]' '[:lower:]'`
+ if [ -z ${MLC_HOST_OS_FLAVOR} ]; then
+ MLC_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f3 | tr '[:upper:]' '[:lower:]' `
fi
- echo "CM_HOST_OS_FLAVOR=${CM_HOST_OS_FLAVOR}" >> tmp-run-env.out
- echo "CM_HOST_OS_VERSION=`sw_vers | grep '^ProductVersion:' | cut -f2 | tr '[:upper:]' '[:lower:]' `" >> tmp-run-env.out
- echo "CM_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out
- echo "CM_HOST_PLATFORM_FLAVOR=`uname -m `" >> tmp-run-env.out
+ echo "MLC_HOST_OS_FLAVOR=${MLC_HOST_OS_FLAVOR}" >> tmp-run-env.out
+ echo "MLC_HOST_OS_VERSION=`sw_vers | grep '^ProductVersion:' | cut -f2 | tr '[:upper:]' '[:lower:]' `" >> tmp-run-env.out
+ echo "MLC_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out
+ echo "MLC_HOST_PLATFORM_FLAVOR=`uname -m `" >> tmp-run-env.out
fi
diff --git a/script/detect-sudo/customize.py b/script/detect-sudo/customize.py
index bc5e92296..844a2b328 100644
--- a/script/detect-sudo/customize.py
+++ b/script/detect-sudo/customize.py
@@ -16,19 +16,19 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
if os.geteuid() == 0:
- env['CM_SUDO'] = '' # root user does not need sudo
- env['CM_SUDO_USER'] = "yes"
+ env['MLC_SUDO'] = '' # root user does not need sudo
+ env['MLC_SUDO_USER'] = "yes"
else:
if can_execute_sudo_without_password() or prompt_sudo() == 0:
- env['CM_SUDO_USER'] = "yes"
- env['CM_SUDO'] = 'sudo'
+ env['MLC_SUDO_USER'] = "yes"
+ env['MLC_SUDO'] = 'sudo'
else:
- env['CM_SUDO_USER'] = "no"
- env['CM_SUDO'] = ''
+ env['MLC_SUDO_USER'] = "no"
+ env['MLC_SUDO'] = ''
return {'return': 0}
diff --git a/script/detect-sudo/meta.yaml b/script/detect-sudo/meta.yaml
index 64b60a5f6..6e3cb29fb 100644
--- a/script/detect-sudo/meta.yaml
+++ b/script/detect-sudo/meta.yaml
@@ -9,7 +9,7 @@ cache: false
category: DevOps automation
new_env_keys:
- - CM_SUDO*
+ - MLC_SUDO*
tags:
- detect
diff --git a/script/detect-sudo/run.sh b/script/detect-sudo/run.sh
index 3a584c10c..821adb3f9 100644
--- a/script/detect-sudo/run.sh
+++ b/script/detect-sudo/run.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
@@ -17,11 +17,11 @@ function run() {
echo "Running: "
echo "$1"
echo ""
- if [[ ${CM_FAKE_RUN} != 'yes' ]]; then
+ if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then
eval "$1"
exit_if_error
fi
}
#Add your run commands here...
-# run "$CM_RUN_CMD"
+# run "$MLC_RUN_CMD"
diff --git a/script/download-and-extract/README-extra.md b/script/download-and-extract/README-extra.md
index 6573ab848..91d015ee4 100644
--- a/script/download-and-extract/README-extra.md
+++ b/script/download-and-extract/README-extra.md
@@ -4,7 +4,7 @@
### Use internal CM download function
-This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/utils/module.py#L157)
+This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/mlc-mlops/automation/utils/module.py#L157)
to download and extract a given file to the current directory:
```bash
@@ -26,25 +26,25 @@ cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotation
```json
"new_env": {
- "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip",
- "CM_EXTRACT_EXTRACTED_PATH": "D:\\Work",
- "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work"
+ "MLC_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip",
+ "MLC_EXTRACT_EXTRACTED_PATH": "D:\\Work",
+ "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Work"
},
```
#### Input flags and equivalent environment variables
-* `--url` or `--env.CM_DAE_URL` - URL to download file
-* `--verify` or `--env.CM_VERIFY_SSL` - set to `no` to skip SSL certificate verification
-* `--download_path` or `--store` or `--env.CM_DOWNLOAD_PATH` - where to download file
-* `--local_path` or `--from` or `--env.CM_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading
-* `--extract_path` or `--to` or `--env.CM_EXTRACT_PATH` - where to extract files (--input should have full path then)
-* `--extra_folder` or `--env.CM_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory)
+* `--url` or `--env.MLC_DAE_URL` - URL to download file
+* `--verify` or `--env.MLC_VERIFY_SSL` - set to `no` to skip SSL certificate verification
+* `--download_path` or `--store` or `--env.MLC_DOWNLOAD_PATH` - where to download file
+* `--local_path` or `--from` or `--env.MLC_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading
+* `--extract_path` or `--to` or `--env.MLC_EXTRACT_PATH` - where to extract files (--input should have full path then)
+* `--extra_folder` or `--env.MLC_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory)
#### Variations
-* `_keep` or `_no-remove-extracted` or `--env.CM_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default)
+* `_keep` or `_no-remove-extracted` or `--env.MLC_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default)
@@ -63,25 +63,25 @@ cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-anno
### Check MD5SUM
```bash
-cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
+cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
```
### Save to another file
```bash
-cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_FILENAME=xyz --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
+cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_FILENAME=xyz --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
```
### Save to another place
```bash
-cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
+cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
```
### Reuse local file instead of downloading a file
```bash
-cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j
+cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j
```
@@ -89,7 +89,7 @@ cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-anno
```bash
-cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --store=$HOME/dir1 --to=$HOME/dir2
+cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --store=$HOME/dir1 --to=$HOME/dir2
```
@@ -100,7 +100,7 @@ You can use all above commands with `--force_cache` and `--extra_cache_tags` fla
In such case, a given file will be downloaded to CM cache and can be reused by other CM scripts and workflows:
```bash
-cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations
+cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations
```
You can find it in CM cache using extra cache tags as follows:
diff --git a/script/download-and-extract/customize.py b/script/download-and-extract/customize.py
index 32cac6476..86f8b1d77 100644
--- a/script/download-and-extract/customize.py
+++ b/script/download-and-extract/customize.py
@@ -13,7 +13,7 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
if i['input'].get('force_cache'):
extra_cache_tags = i['input'].get('extra_cache_tags', '')
@@ -33,20 +33,20 @@ def preprocess(i):
if r['return'] > 0:
return r
- if env.get('CM_DOWNLOAD_LOCAL_FILE_PATH'):
- filepath = env['CM_DOWNLOAD_LOCAL_FILE_PATH']
+ if env.get('MLC_DOWNLOAD_LOCAL_FILE_PATH'):
+ filepath = env['MLC_DOWNLOAD_LOCAL_FILE_PATH']
if not os.path.exists(filepath):
return {'return': 1,
'error': 'Local file {} doesn\'t exist'.format(filepath)}
- env['CM_EXTRACT_REMOVE_EXTRACTED'] = 'no'
+ env['MLC_EXTRACT_REMOVE_EXTRACTED'] = 'no'
- if str(env.get('CM_DAE_EXTRACT_DOWNLOADED')
+ if str(env.get('MLC_DAE_EXTRACT_DOWNLOADED')
).lower() in ["yes", "1", "true"]:
- if (env.get('CM_EXTRACT_FINAL_ENV_NAME', '') == '') and (
- env.get('CM_DAE_FINAL_ENV_NAME', '') != ''):
- env['CM_EXTRACT_FINAL_ENV_NAME'] = env['CM_DAE_FINAL_ENV_NAME']
+ if (env.get('MLC_EXTRACT_FINAL_ENV_NAME', '') == '') and (
+ env.get('MLC_DAE_FINAL_ENV_NAME', '') != ''):
+ env['MLC_EXTRACT_FINAL_ENV_NAME'] = env['MLC_DAE_FINAL_ENV_NAME']
return {'return': 0}
@@ -54,20 +54,20 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- filepath = env.get('CM_EXTRACT_EXTRACTED_PATH', '')
+ filepath = env.get('MLC_EXTRACT_EXTRACTED_PATH', '')
if filepath == '':
- filepath = env.get('CM_DOWNLOAD_DOWNLOADED_PATH', '')
+ filepath = env.get('MLC_DOWNLOAD_DOWNLOADED_PATH', '')
if filepath == '':
return {'return': 1,
- 'error': 'No extracted path set in "CM_EXTRACT_EXTRACTED_PATH"'}
+ 'error': 'No extracted path set in "MLC_EXTRACT_EXTRACTED_PATH"'}
if not os.path.exists(filepath):
return {'return': 1,
'error': 'Extracted path doesn\'t exist: {}'.format(filepath)}
- if env.get('CM_DAE_FINAL_ENV_NAME'):
- env[env['CM_DAE_FINAL_ENV_NAME']] = filepath
+ if env.get('MLC_DAE_FINAL_ENV_NAME'):
+ env[env['MLC_DAE_FINAL_ENV_NAME']] = filepath
- env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = filepath
return {'return': 0}
diff --git a/script/download-and-extract/meta.yaml b/script/download-and-extract/meta.yaml
index bd4003b91..1d02bfbae 100644
--- a/script/download-and-extract/meta.yaml
+++ b/script/download-and-extract/meta.yaml
@@ -7,51 +7,51 @@ category: DevOps automation
deps: []
input_description: {}
input_mapping:
- download_path: CM_DOWNLOAD_PATH
- extra_folder: CM_EXTRACT_TO_FOLDER
- extract_path: CM_EXTRACT_PATH
- from: CM_DOWNLOAD_LOCAL_FILE_PATH
- local_path: CM_DOWNLOAD_LOCAL_FILE_PATH
- store: CM_DOWNLOAD_PATH
- to: CM_EXTRACT_PATH
- url: CM_DAE_URL
- verify: CM_VERIFY_SSL
+ download_path: MLC_DOWNLOAD_PATH
+ extra_folder: MLC_EXTRACT_TO_FOLDER
+ extract_path: MLC_EXTRACT_PATH
+ from: MLC_DOWNLOAD_LOCAL_FILE_PATH
+ local_path: MLC_DOWNLOAD_LOCAL_FILE_PATH
+ store: MLC_DOWNLOAD_PATH
+ to: MLC_EXTRACT_PATH
+ url: MLC_DAE_URL
+ verify: MLC_VERIFY_SSL
new_env_keys:
-- CM_DOWNLOAD_DOWNLOADED_PATH*
-- CM_EXTRACT_EXTRACTED_PATH
-- <<>>
-- <<>>
-- <<>>
-- CM_GET_DEPENDENT_CACHED_PATH
+- MLC_DOWNLOAD_DOWNLOADED_PATH*
+- MLC_EXTRACT_EXTRACTED_PATH
+- <<>>
+- <<>>
+- <<>>
+- MLC_GET_DEPENDENT_CACHED_PATH
new_state_keys: []
post_deps: []
posthook_deps:
- enable_if_env:
- CM_DAE_EXTRACT_DOWNLOADED:
+ MLC_DAE_EXTRACT_DOWNLOADED:
- 'yes'
- 'True'
names:
- extract-script
tags: extract,file
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env_with_prefix:
_path.:
- - CM_DOWNLOAD_DOWNLOADED_PATH
- - CM_TORRENT_DOWNLOADED_PATH
+ - MLC_DOWNLOAD_DOWNLOADED_PATH
+ - MLC_TORRENT_DOWNLOADED_PATH
prehook_deps:
- names:
- download-script
skip_if_env:
- CM_DAE_DOWNLOAD_USING_TORRENT:
+ MLC_DAE_DOWNLOAD_USING_TORRENT:
- 'yes'
- 'True'
tags: download,file
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env_with_prefix:
_url.:
- - CM_DAE_URL
+ - MLC_DAE_URL
tags:
- dae
- file
@@ -72,7 +72,7 @@ variations:
group: download-tool
extract:
env:
- CM_DAE_EXTRACT_DOWNLOADED: 'yes'
+ MLC_DAE_EXTRACT_DOWNLOADED: 'yes'
gdown:
add_deps:
download-script:
@@ -81,11 +81,11 @@ variations:
keep:
default: 'true'
env:
- CM_EXTRACT_REMOVE_EXTRACTED: 'no'
+ MLC_EXTRACT_REMOVE_EXTRACTED: 'no'
group: keep
no-remove-extracted:
env:
- CM_EXTRACT_REMOVE_EXTRACTED: 'no'
+ MLC_EXTRACT_REMOVE_EXTRACTED: 'no'
group: keep
rclone:
add_deps:
@@ -94,21 +94,21 @@ variations:
group: download-tool
torrent:
env:
- CM_DAE_DOWNLOAD_USING_TORRENT: 'yes'
- CM_TORRENT_DOWNLOADED_FILE_NAME: <<>>
- CM_TORRENT_DOWNLOADED_PATH_ENV_KEY: CM_DAE_FILEPATH
- CM_TORRENT_WAIT_UNTIL_COMPLETED: 'yes'
+ MLC_DAE_DOWNLOAD_USING_TORRENT: 'yes'
+ MLC_TORRENT_DOWNLOADED_FILE_NAME: <<>>
+ MLC_TORRENT_DOWNLOADED_PATH_ENV_KEY: MLC_DAE_FILEPATH
+ MLC_TORRENT_WAIT_UNTIL_COMPLETED: 'yes'
group: download-tool
new_env_keys:
- - CM_TORRENT_DOWNLOADED_PATH
+ - MLC_TORRENT_DOWNLOADED_PATH
prehook_deps:
- tags: download,torrent
update_tags_from_env_with_prefix:
_torrent.:
- - CM_DAE_TORRENT_PATH
+ - MLC_DAE_TORRENT_PATH
url.#:
env:
- CM_DAE_URL: '#'
+ MLC_DAE_URL: '#'
wget:
add_deps:
download-script:
diff --git a/script/download-and-extract/tests/download-and-extract-file.bat b/script/download-and-extract/tests/download-and-extract-file.bat
index 0688461de..ecb28f0c7 100644
--- a/script/download-and-extract/tests/download-and-extract-file.bat
+++ b/script/download-and-extract/tests/download-and-extract-file.bat
@@ -1 +1 @@
-cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract _no-remove-extracted" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1
+cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract _no-remove-extracted" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1
diff --git a/script/download-and-extract/tests/download-and-extract-file2.bat b/script/download-and-extract/tests/download-and-extract-file2.bat
index af344b927..f2806eb1a 100644
--- a/script/download-and-extract/tests/download-and-extract-file2.bat
+++ b/script/download-and-extract/tests/download-and-extract-file2.bat
@@ -1 +1 @@
-cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1
+cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1
diff --git a/script/download-file/README-extra.md b/script/download-file/README-extra.md
index b645b0419..09c9d065b 100644
--- a/script/download-file/README-extra.md
+++ b/script/download-file/README-extra.md
@@ -4,7 +4,7 @@
### Use internal CM download function
-This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/utils/module.py#L157)
+This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/mlc-mlops/automation/utils/module.py#L157)
to download a given file to the current directory:
```bash
@@ -26,17 +26,17 @@ cmr "download file" _url.https://cKnowledge.org/test/coco-2017-val-annotations.z
```json
"new_env": {
- "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip",
- "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip"
+ "MLC_DOWNLOAD_DOWNLOADED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip",
+ "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip"
},
```
#### Input flags and equivalent environment variables
-* `--url` or `--env.CM_DAE_URL` - URL to download file
-* `--download_path` or `--to` or `--env.CM_DOWNLOAD_PATH` - where to download file
-* `--local_path` or `--from` or `--env.CM_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading
-* `--verify` or `--env.CM_VERIFY_SSL` - set to `no` to skip SSL certificate verification
+* `--url` or `--env.MLC_DAE_URL` - URL to download file
+* `--download_path` or `--to` or `--env.MLC_DOWNLOAD_PATH` - where to download file
+* `--local_path` or `--from` or `--env.MLC_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading
+* `--verify` or `--env.MLC_VERIFY_SSL` - set to `no` to skip SSL certificate verification
### Use wget without SSL certificate verification
@@ -54,32 +54,32 @@ cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zi
### Check MD5SUM
```bash
-cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
+cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
```
### Save to another file
```bash
-cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_FILENAME=xyz --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
+cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_FILENAME=xyz --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
```
### Save to another place
```bash
-cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
+cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56
```
### Reuse local file instead of downloading a file
```bash
-cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j
+cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j
```
Output environment variables produced by this CM script:
```json
"new_env": {
- "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip",
- "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work\\coco-2017-val-annotations.zip"
+ "MLC_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip",
+ "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Work\\coco-2017-val-annotations.zip"
}
```
@@ -89,7 +89,7 @@ You can use all above commands with `--force_cache` and `--extra_cache_tags` fla
In such case, a given file will be downloaded to CM cache and can be reused by other CM scripts and workflows:
```bash
-cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations
+cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.MLC_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations
```
You can find it in CM cache using extra cache tags as follows:
diff --git a/script/download-file/customize.py b/script/download-file/customize.py
index fc237635c..f72034d5f 100644
--- a/script/download-file/customize.py
+++ b/script/download-file/customize.py
@@ -33,10 +33,10 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- tool = env.get('CM_DOWNLOAD_TOOL', '')
- pre_clean = env.get('CM_PRE_DOWNLOAD_CLEAN', False)
+ tool = env.get('MLC_DOWNLOAD_TOOL', '')
+ pre_clean = env.get('MLC_PRE_DOWNLOAD_CLEAN', False)
# xsep = '^&^&' if windows else '&&'
xsep = '&&'
@@ -52,26 +52,26 @@ def preprocess(i):
else:
del_cmd = "rm -f"
- if env.get('CM_DOWNLOAD_LOCAL_FILE_PATH'):
- filepath = env['CM_DOWNLOAD_LOCAL_FILE_PATH']
+ if env.get('MLC_DOWNLOAD_LOCAL_FILE_PATH'):
+ filepath = env['MLC_DOWNLOAD_LOCAL_FILE_PATH']
if not os.path.exists(filepath):
return {'return': 1,
'error': 'Local file {} doesn\'t exist'.format(filepath)}
- env['CM_DOWNLOAD_CMD'] = ""
+ env['MLC_DOWNLOAD_CMD'] = ""
- env['CM_DOWNLOAD_FILENAME'] = filepath
+ env['MLC_DOWNLOAD_FILENAME'] = filepath
if not quiet:
print('')
print('Using local file: {}'.format(filepath))
else:
- url = env.get('CM_DOWNLOAD_URL', '')
+ url = env.get('MLC_DOWNLOAD_URL', '')
if url == '':
return {
- 'return': 1, 'error': 'please specify URL using --url={URL} or --env.CM_DOWNLOAD_URL={URL}'}
+ 'return': 1, 'error': 'please specify URL using --url={URL} or --env.MLC_DOWNLOAD_URL={URL}'}
print('')
print('Downloading from {}'.format(url))
@@ -82,42 +82,42 @@ def preprocess(i):
else:
url = url.replace('&', '\\&')
- extra_download_options = env.get('CM_DOWNLOAD_EXTRA_OPTIONS', '')
+ extra_download_options = env.get('MLC_DOWNLOAD_EXTRA_OPTIONS', '')
- verify_ssl = env.get('CM_VERIFY_SSL', "True")
+ verify_ssl = env.get('MLC_VERIFY_SSL', "True")
if str(verify_ssl).lower() in [
"no", "false"] or os_info['platform'] == 'windows':
verify_ssl = False
else:
verify_ssl = True
- if env.get('CM_DOWNLOAD_PATH', '') != '':
- download_path = env['CM_DOWNLOAD_PATH']
+ if env.get('MLC_DOWNLOAD_PATH', '') != '':
+ download_path = env['MLC_DOWNLOAD_PATH']
if not os.path.exists(download_path):
os.makedirs(download_path, exist_ok=True)
os.chdir(download_path)
- if env.get('CM_DOWNLOAD_FILENAME', '') == '':
- urltail = os.path.basename(env['CM_DOWNLOAD_URL'])
- urlhead = os.path.dirname(env['CM_DOWNLOAD_URL'])
+ if env.get('MLC_DOWNLOAD_FILENAME', '') == '':
+ urltail = os.path.basename(env['MLC_DOWNLOAD_URL'])
+ urlhead = os.path.dirname(env['MLC_DOWNLOAD_URL'])
if "." in urltail and "/" in urlhead:
# Check if ? after filename
j = urltail.find('?')
if j > 0:
urltail = urltail[:j]
- env['CM_DOWNLOAD_FILENAME'] = urltail
- elif env.get('CM_DOWNLOAD_TOOL', '') == "rclone":
- env['CM_DOWNLOAD_FILENAME'] = urltail
+ env['MLC_DOWNLOAD_FILENAME'] = urltail
+ elif env.get('MLC_DOWNLOAD_TOOL', '') == "rclone":
+ env['MLC_DOWNLOAD_FILENAME'] = urltail
else:
- env['CM_DOWNLOAD_FILENAME'] = "index.html"
+ env['MLC_DOWNLOAD_FILENAME'] = "index.html"
if tool == "cmutil":
cmutil_require_download = 0
- if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '':
+ if env.get('MLC_DOWNLOAD_CHECKSUM_FILE', '') != '':
if os_info['platform'] == 'windows':
- checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{escape_special_chars(env['CM_DOWNLOAD_CHECKSUM_FILE'])}"
+ checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{escape_special_chars(env['MLC_DOWNLOAD_CHECKSUM_FILE'])}"
else:
- checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}"
+ checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{q}{env['MLC_DOWNLOAD_CHECKSUM_FILE']}{q}"
checksum_result = subprocess.run(
checksum_cmd,
cwd=f'{q}{filepath}{q}',
@@ -125,45 +125,45 @@ def preprocess(i):
text=True,
shell=True,
env=subprocess_env)
- elif env.get('CM_DOWNLOAD_CHECKSUM', '') != '':
+ elif env.get('MLC_DOWNLOAD_CHECKSUM', '') != '':
if os_info['platform'] == 'windows':
- checksum_cmd = f"echo {env.get('CM_DOWNLOAD_CHECKSUM')} {x}{escape_special_chars(env['CM_DOWNLOAD_FILENAME'])} | md5sum -c{x_c} -"
+ checksum_cmd = f"echo {env.get('MLC_DOWNLOAD_CHECKSUM')} {x}{escape_special_chars(env['MLC_DOWNLOAD_FILENAME'])} | md5sum -c{x_c} -"
else:
- checksum_cmd = f"echo {env.get('CM_DOWNLOAD_CHECKSUM')} {x}{q}{env['CM_DOWNLOAD_FILENAME']}{q} | md5sum -c{x_c} -"
+ checksum_cmd = f"echo {env.get('MLC_DOWNLOAD_CHECKSUM')} {x}{q}{env['MLC_DOWNLOAD_FILENAME']}{q} | md5sum -c{x_c} -"
checksum_result = subprocess.run(
checksum_cmd,
capture_output=True,
text=True,
shell=True,
env=subprocess_env)
- if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '' or env.get(
- 'CM_DOWNLOAD_CHECKSUM', '') != '':
+ if env.get('MLC_DOWNLOAD_CHECKSUM_FILE', '') != '' or env.get(
+ 'MLC_DOWNLOAD_CHECKSUM', '') != '':
# print(checksum_result) #for debugging
if "checksum did not match" in checksum_result.stderr.lower():
computed_checksum = subprocess.run(
- f"md5sum {env['CM_DOWNLOAD_FILENAME']}",
+ f"md5sum {env['MLC_DOWNLOAD_FILENAME']}",
capture_output=True,
text=True,
shell=True).stdout.split(" ")[0]
print(
- f"WARNING: File already present, mismatch between original checksum({env.get('CM_DOWNLOAD_CHECKSUM')}) and computed checksum({computed_checksum}). Deleting the already present file and downloading new.")
+ f"WARNING: File already present, mismatch between original checksum({env.get('MLC_DOWNLOAD_CHECKSUM')}) and computed checksum({computed_checksum}). Deleting the already present file and downloading new.")
try:
- os.remove(env['CM_DOWNLOAD_FILENAME'])
+ os.remove(env['MLC_DOWNLOAD_FILENAME'])
print(
- f"File {env['CM_DOWNLOAD_FILENAME']} deleted successfully.")
+ f"File {env['MLC_DOWNLOAD_FILENAME']} deleted successfully.")
except PermissionError:
return {
- "return": 1, "error": f"Permission denied to delete file {env['CM_DOWNLOAD_FILENAME']}."}
+ "return": 1, "error": f"Permission denied to delete file {env['MLC_DOWNLOAD_FILENAME']}."}
cmutil_require_download = 1
elif "no such file" in checksum_result.stderr.lower():
- # print(f"No file {env['CM_DOWNLOAD_FILENAME']}. Downloading through cmutil.")
+ # print(f"No file {env['MLC_DOWNLOAD_FILENAME']}. Downloading through cmutil.")
cmutil_require_download = 1
elif checksum_result.returncode > 0:
return {
"return": 1, "error": f"Error while checking checksum: {checksum_result.stderr}"}
else:
print(
- f"File {env['CM_DOWNLOAD_FILENAME']} already present, original checksum and computed checksum matches! Skipping Download..")
+ f"File {env['MLC_DOWNLOAD_FILENAME']} already present, original checksum and computed checksum matches! Skipping Download..")
else:
cmutil_require_download = 1
@@ -176,7 +176,7 @@ def preprocess(i):
if r['return'] == 0:
break
oldurl = url
- url = env.get('CM_DOWNLOAD_URL' + str(i), '')
+ url = env.get('MLC_DOWNLOAD_URL' + str(i), '')
if url == '':
break
print(f"Download from {oldurl} failed, trying from {url}")
@@ -184,123 +184,123 @@ def preprocess(i):
if r['return'] > 0:
return r
- env['CM_DOWNLOAD_CMD'] = ""
- env['CM_DOWNLOAD_FILENAME'] = r['filename']
+ env['MLC_DOWNLOAD_CMD'] = ""
+ env['MLC_DOWNLOAD_FILENAME'] = r['filename']
elif tool == "wget":
- if env.get('CM_DOWNLOAD_FILENAME', '') != '':
- extra_download_options += f" --tries=3 -O {q}{env['CM_DOWNLOAD_FILENAME']}{q} "
+ if env.get('MLC_DOWNLOAD_FILENAME', '') != '':
+ extra_download_options += f" --tries=3 -O {q}{env['MLC_DOWNLOAD_FILENAME']}{q} "
if not verify_ssl:
extra_download_options += "--no-check-certificate "
- env['CM_DOWNLOAD_CMD'] = f"wget -nc {extra_download_options} {url}"
+ env['MLC_DOWNLOAD_CMD'] = f"wget -nc {extra_download_options} {url}"
for i in range(1, 5):
- url = env.get('CM_DOWNLOAD_URL' + str(i), '')
+ url = env.get('MLC_DOWNLOAD_URL' + str(i), '')
if url == '':
break
- env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && wget -nc {extra_download_options} {url})"
- print(env['CM_DOWNLOAD_CMD'])
+ env['MLC_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['MLC_DOWNLOAD_FILENAME']} || true) && wget -nc {extra_download_options} {url})"
+ print(env['MLC_DOWNLOAD_CMD'])
elif tool == "curl":
- if env.get('CM_DOWNLOAD_FILENAME', '') != '':
- extra_download_options += f" --output {q}{env['CM_DOWNLOAD_FILENAME']}{q} "
+ if env.get('MLC_DOWNLOAD_FILENAME', '') != '':
+ extra_download_options += f" --output {q}{env['MLC_DOWNLOAD_FILENAME']}{q} "
- env['CM_DOWNLOAD_CMD'] = f"curl {extra_download_options} {url}"
+ env['MLC_DOWNLOAD_CMD'] = f"curl {extra_download_options} {url}"
for i in range(1, 5):
- url = env.get('CM_DOWNLOAD_URL' + str(i), '')
+ url = env.get('MLC_DOWNLOAD_URL' + str(i), '')
if url == '':
break
- env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && curl {extra_download_options} {url})"
+ env['MLC_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['MLC_DOWNLOAD_FILENAME']} || true) && curl {extra_download_options} {url})"
elif tool == "gdown":
if not verify_ssl:
extra_download_options += "--no-check-certificate "
- env['CM_DOWNLOAD_CMD'] = f"gdown {extra_download_options} {url}"
+ env['MLC_DOWNLOAD_CMD'] = f"gdown {extra_download_options} {url}"
for i in range(1, 5):
- url = env.get('CM_DOWNLOAD_URL' + str(i), '')
+ url = env.get('MLC_DOWNLOAD_URL' + str(i), '')
if url == '':
break
- env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && gdown {extra_download_options} {url})"
+ env['MLC_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['MLC_DOWNLOAD_FILENAME']} || true) && gdown {extra_download_options} {url})"
elif tool == "rclone":
# keeping this for backward compatibility. Ideally should be done
# via get,rclone-config script
- if env.get('CM_RCLONE_CONFIG_CMD', '') != '':
- env['CM_DOWNLOAD_CONFIG_CMD'] = env['CM_RCLONE_CONFIG_CMD']
- rclone_copy_using = env.get('CM_RCLONE_COPY_USING', 'sync')
+ if env.get('MLC_RCLONE_CONFIG_CMD', '') != '':
+ env['MLC_DOWNLOAD_CONFIG_CMD'] = env['MLC_RCLONE_CONFIG_CMD']
+ rclone_copy_using = env.get('MLC_RCLONE_COPY_USING', 'sync')
if rclone_copy_using == "sync":
pre_clean = False
- if env["CM_HOST_OS_TYPE"] == "windows":
+ if env["MLC_HOST_OS_TYPE"] == "windows":
# have to modify the variable from url to temp_url if it is
# going to be used anywhere after this point
url = url.replace("%", "%%")
- temp_download_file = env['CM_DOWNLOAD_FILENAME'].replace(
+ temp_download_file = env['MLC_DOWNLOAD_FILENAME'].replace(
"%", "%%")
- env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P --error-on-no-transfer"
+ env['MLC_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P --error-on-no-transfer"
else:
- env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])}{q} -P --error-on-no-transfer"
+ env['MLC_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['MLC_DOWNLOAD_FILENAME'])}{q} -P --error-on-no-transfer"
- filename = env['CM_DOWNLOAD_FILENAME']
- env['CM_DOWNLOAD_DOWNLOADED_FILENAME'] = filename
+ filename = env['MLC_DOWNLOAD_FILENAME']
+ env['MLC_DOWNLOAD_DOWNLOADED_FILENAME'] = filename
- filename = os.path.basename(env['CM_DOWNLOAD_FILENAME'])
+ filename = os.path.basename(env['MLC_DOWNLOAD_FILENAME'])
filepath = os.path.join(os.getcwd(), filename)
- env['CM_DOWNLOAD_DOWNLOADED_PATH'] = filepath
+ env['MLC_DOWNLOAD_DOWNLOADED_PATH'] = filepath
# verify checksum if file already present
- if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '':
- env['CM_DOWNLOAD_CHECKSUM_CMD'] = f"cd {q}{filepath}{q} {xsep} md5sum -c {x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}"
- elif env.get('CM_DOWNLOAD_CHECKSUM', '') != '':
+ if env.get('MLC_DOWNLOAD_CHECKSUM_FILE', '') != '':
+ env['MLC_DOWNLOAD_CHECKSUM_CMD'] = f"cd {q}{filepath}{q} {xsep} md5sum -c {x_c} {x}{q}{env['MLC_DOWNLOAD_CHECKSUM_FILE']}{q}"
+ elif env.get('MLC_DOWNLOAD_CHECKSUM', '') != '':
if os_info['platform'] == 'windows':
- env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{} | md5sum {} -c -".format(
- env.get('CM_DOWNLOAD_CHECKSUM'), x, escape_special_chars(
- env['CM_DOWNLOAD_FILENAME']), x_c)
+ env['MLC_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{} | md5sum {} -c -".format(
+ env.get('MLC_DOWNLOAD_CHECKSUM'), x, escape_special_chars(
+ env['MLC_DOWNLOAD_FILENAME']), x_c)
else:
- env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{}{}{} | md5sum {} -c -".format(
- env.get('CM_DOWNLOAD_CHECKSUM'), x, q, env['CM_DOWNLOAD_FILENAME'], q, x_c)
+ env['MLC_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{}{}{} | md5sum {} -c -".format(
+ env.get('MLC_DOWNLOAD_CHECKSUM'), x, q, env['MLC_DOWNLOAD_FILENAME'], q, x_c)
for i in range(1, 5):
- if env.get('CM_DOWNLOAD_CHECKSUM' + str(i), '') == '':
+ if env.get('MLC_DOWNLOAD_CHECKSUM' + str(i), '') == '':
break
if os_info['platform'] == 'windows':
- env['CM_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{} | md5sum {} -c -".format(
+ env['MLC_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{} | md5sum {} -c -".format(
env.get(
- 'CM_DOWNLOAD_CHECKSUM' +
+ 'MLC_DOWNLOAD_CHECKSUM' +
str(i)),
x,
escape_special_chars(
- env['CM_DOWNLOAD_FILENAME']),
+ env['MLC_DOWNLOAD_FILENAME']),
x_c)
else:
- env['CM_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{}{}{} | md5sum {} -c -".format(
+ env['MLC_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{}{}{} | md5sum {} -c -".format(
env.get(
- 'CM_DOWNLOAD_CHECKSUM' +
+ 'MLC_DOWNLOAD_CHECKSUM' +
str(i)),
x,
q,
- env['CM_DOWNLOAD_FILENAME'].replace(
+ env['MLC_DOWNLOAD_FILENAME'].replace(
"%",
"%%"),
q,
x_c)
- # print(env['CM_DOWNLOAD_CHECKSUM_CMD'])
+ # print(env['MLC_DOWNLOAD_CHECKSUM_CMD'])
else:
- env['CM_DOWNLOAD_CHECKSUM_CMD'] = ""
+ env['MLC_DOWNLOAD_CHECKSUM_CMD'] = ""
if not pre_clean:
- env['CM_PRE_DOWNLOAD_CMD'] = ''
+ env['MLC_PRE_DOWNLOAD_CMD'] = ''
if os_info['platform'] == 'windows' and env.get(
- 'CM_DOWNLOAD_CMD', '') != '':
- env['CM_DOWNLOAD_CMD'] = escape_special_chars(
- env['CM_DOWNLOAD_CMD'], tool)
+ 'MLC_DOWNLOAD_CMD', '') != '':
+ env['MLC_DOWNLOAD_CMD'] = escape_special_chars(
+ env['MLC_DOWNLOAD_CMD'], tool)
if pre_clean:
- env['CM_PRE_DOWNLOAD_CLEAN_CMD'] = "del /Q %CM_DOWNLOAD_FILENAME%"
+ env['MLC_PRE_DOWNLOAD_CLEAN_CMD'] = "del /Q %MLC_DOWNLOAD_FILENAME%"
# Check that if empty CMD, should add ""
- for x in ['CM_DOWNLOAD_CMD', 'CM_DOWNLOAD_CHECKSUM_CMD']:
+ for x in ['MLC_DOWNLOAD_CMD', 'MLC_DOWNLOAD_CHECKSUM_CMD']:
env[x + '_USED'] = 'YES' if env.get(x, '') != '' else 'NO'
else:
- env['CM_PRE_DOWNLOAD_CLEAN_CMD'] = "rm -f {}".format(
- env['CM_DOWNLOAD_FILENAME'])
+ env['MLC_PRE_DOWNLOAD_CLEAN_CMD'] = "rm -f {}".format(
+ env['MLC_DOWNLOAD_FILENAME'])
return {'return': 0}
@@ -311,23 +311,23 @@ def postprocess(i):
env = i['env']
- filepath = env['CM_DOWNLOAD_DOWNLOADED_PATH']
+ filepath = env['MLC_DOWNLOAD_DOWNLOADED_PATH']
if not os.path.exists(filepath):
return {
- 'return': 1, 'error': 'Downloaded path {} does not exist. Probably CM_DOWNLOAD_FILENAME is not set and CM_DOWNLOAD_URL given is not pointing to a file'.format(filepath)}
+ 'return': 1, 'error': 'Downloaded path {} does not exist. Probably MLC_DOWNLOAD_FILENAME is not set and MLC_DOWNLOAD_URL given is not pointing to a file'.format(filepath)}
- if env.get('CM_DOWNLOAD_RENAME_FILE', '') != '':
+ if env.get('MLC_DOWNLOAD_RENAME_FILE', '') != '':
file_dir = os.path.dirname(filepath)
- new_file_name = env['CM_DOWNLOAD_RENAME_FILE']
+ new_file_name = env['MLC_DOWNLOAD_RENAME_FILE']
new_file_path = os.path.join(file_dir, new_file_name)
os.rename(filepath, new_file_path)
filepath = new_file_path
- if env.get('CM_DOWNLOAD_FINAL_ENV_NAME', '') != '':
- env[env['CM_DOWNLOAD_FINAL_ENV_NAME']] = filepath
+ if env.get('MLC_DOWNLOAD_FINAL_ENV_NAME', '') != '':
+ env[env['MLC_DOWNLOAD_FINAL_ENV_NAME']] = filepath
- env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = filepath
# Since may change directory, check if need to clean some temporal files
automation.clean_some_tmp_files({'env': env})
diff --git a/script/download-file/meta.yaml b/script/download-file/meta.yaml
index aedf0ab58..832275c1c 100644
--- a/script/download-file/meta.yaml
+++ b/script/download-file/meta.yaml
@@ -5,30 +5,30 @@ cache: false
can_force_cache: true
category: DevOps automation
default_env:
- CM_RCLONE_COPY_USING: sync
+ MLC_RCLONE_COPY_USING: sync
deps:
- tags: detect,os
- enable_if_env:
- CM_DOWNLOAD_CHECKSUM:
+ MLC_DOWNLOAD_CHECKSUM:
- 'on'
- CM_HOST_OS_FLAVOR:
+ MLC_HOST_OS_FLAVOR:
- macos
tags: get,generic-sys-util,_md5sha1sum
input_description: {}
input_mapping:
- download_path: CM_DOWNLOAD_PATH
- from: CM_DOWNLOAD_LOCAL_FILE_PATH
- local_path: CM_DOWNLOAD_LOCAL_FILE_PATH
- md5sum: CM_DOWNLOAD_CHECKSUM
- output_file: CM_DOWNLOAD_FILENAME
- store: CM_DOWNLOAD_PATH
- url: CM_DOWNLOAD_URL
- verify: CM_VERIFY_SSL
- verify_ssl: CM_VERIFY_SSL
+ download_path: MLC_DOWNLOAD_PATH
+ from: MLC_DOWNLOAD_LOCAL_FILE_PATH
+ local_path: MLC_DOWNLOAD_LOCAL_FILE_PATH
+ md5sum: MLC_DOWNLOAD_CHECKSUM
+ output_file: MLC_DOWNLOAD_FILENAME
+ store: MLC_DOWNLOAD_PATH
+ url: MLC_DOWNLOAD_URL
+ verify: MLC_VERIFY_SSL
+ verify_ssl: MLC_VERIFY_SSL
new_env_keys:
-- CM_DOWNLOAD_DOWNLOADED_PATH
-- <<>>
-- CM_GET_DEPENDENT_CACHED_PATH
+- MLC_DOWNLOAD_DOWNLOADED_PATH
+- <<>>
+- MLC_GET_DEPENDENT_CACHED_PATH
new_state_keys: []
post_deps: []
prehook_deps: []
@@ -42,38 +42,38 @@ variations:
cmutil:
default: true
env:
- CM_DOWNLOAD_TOOL: cmutil
+ MLC_DOWNLOAD_TOOL: cmutil
group: download-tool
curl:
default_env:
- CM_DOWNLOAD_CURL_EMULATE_BROWSER: 'no'
+ MLC_DOWNLOAD_CURL_EMULATE_BROWSER: 'no'
env:
- CM_DOWNLOAD_TOOL: curl
+ MLC_DOWNLOAD_TOOL: curl
group: download-tool
gdown:
deps:
- tags: get,generic-python-lib,_package.gdown
env:
- CM_DOWNLOAD_TOOL: gdown
+ MLC_DOWNLOAD_TOOL: gdown
group: download-tool
rclone:
deps:
- tags: get,rclone
- enable_if_env:
- CM_RCLONE_CONFIG_NAME:
+ MLC_RCLONE_CONFIG_NAME:
- 'on'
tags: get,rclone-config
update_tags_from_env_with_prefix:
_:
- - CM_RCLONE_CONFIG_NAME
+ - MLC_RCLONE_CONFIG_NAME
env:
- CM_DOWNLOAD_TOOL: rclone
+ MLC_DOWNLOAD_TOOL: rclone
group: download-tool
url.#:
env:
- CM_DOWNLOAD_URL: '#'
+ MLC_DOWNLOAD_URL: '#'
wget:
env:
- CM_DOWNLOAD_TOOL: wget
+ MLC_DOWNLOAD_TOOL: wget
group: download-tool
versions: {}
diff --git a/script/download-file/run.bat b/script/download-file/run.bat
index 5449c9ecf..dcd7603c9 100644
--- a/script/download-file/run.bat
+++ b/script/download-file/run.bat
@@ -5,33 +5,33 @@ rem If MD5 is wrong, download again!
rem Next line allows ERRORLEVEL inside if statements!
setlocal enabledelayedexpansion
-if NOT "%CM_DOWNLOAD_CONFIG_CMD%" == "" (
+if NOT "%MLC_DOWNLOAD_CONFIG_CMD%" == "" (
echo.
- echo %CM_DOWNLOAD_CONFIG_CMD%
+ echo %MLC_DOWNLOAD_CONFIG_CMD%
echo.
- %CM_DOWNLOAD_CONFIG_CMD%
+ %MLC_DOWNLOAD_CONFIG_CMD%
IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL!
)
set require_download=1
-if not "%CM_DOWNLOAD_LOCAL_FILE_PATH%" == "" (
+if not "%MLC_DOWNLOAD_LOCAL_FILE_PATH%" == "" (
set require_download=0
)
-if "%CM_DOWNLOAD_TOOL%" == "cmutil" (
+if "%MLC_DOWNLOAD_TOOL%" == "cmutil" (
set require_download=0
)
-if exist "%CM_DOWNLOAD_DOWNLOADED_PATH%" (
- if "%CM_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" (
+if exist "%MLC_DOWNLOAD_DOWNLOADED_PATH%" (
+ if "%MLC_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" (
echo.
- echo %CM_DOWNLOAD_CHECKSUM_CMD%
- cmd /c %CM_DOWNLOAD_CHECKSUM_CMD%
+ echo %MLC_DOWNLOAD_CHECKSUM_CMD%
+ cmd /c %MLC_DOWNLOAD_CHECKSUM_CMD%
IF !ERRORLEVEL! NEQ 0 (
- if NOT "%CM_DOWNLOAD_LOCAL_FILE_PATH%" == "" exit 1
- if "%CM_DOWNLOAD_CMD_USED%" == "NO" exit 1
+ if NOT "%MLC_DOWNLOAD_LOCAL_FILE_PATH%" == "" exit 1
+ if "%MLC_DOWNLOAD_CMD_USED%" == "NO" exit 1
) else (
set require_download=0
)
@@ -40,17 +40,17 @@ if exist "%CM_DOWNLOAD_DOWNLOADED_PATH%" (
if "!require_download!" == "1" (
echo.
- cmd /c %CM_PRE_DOWNLOAD_CLEAN_CMD%
+ cmd /c %MLC_PRE_DOWNLOAD_CLEAN_CMD%
echo.
- echo %CM_DOWNLOAD_CMD%
- cmd /c %CM_DOWNLOAD_CMD%
+ echo %MLC_DOWNLOAD_CMD%
+ cmd /c %MLC_DOWNLOAD_CMD%
IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL!
- if "%CM_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" (
+ if "%MLC_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" (
echo.
- echo %CM_DOWNLOAD_CHECKSUM_CMD%
- cmd /c %CM_DOWNLOAD_CHECKSUM_CMD%
+ echo %MLC_DOWNLOAD_CHECKSUM_CMD%
+ cmd /c %MLC_DOWNLOAD_CHECKSUM_CMD%
IF !ERRORLEVEL! NEQ 0 EXIT 1
)
)
diff --git a/script/download-file/run.sh b/script/download-file/run.sh
index c02e44f00..b737ea34e 100644
--- a/script/download-file/run.sh
+++ b/script/download-file/run.sh
@@ -1,32 +1,32 @@
#!/bin/bash
# Execute config command if it exists
-if [[ -n ${CM_DOWNLOAD_CONFIG_CMD} ]]; then
- echo -e "\nExecuting: ${CM_DOWNLOAD_CONFIG_CMD}"
- eval "${CM_DOWNLOAD_CONFIG_CMD}" || exit $?
+if [[ -n ${MLC_DOWNLOAD_CONFIG_CMD} ]]; then
+ echo -e "\nExecuting: ${MLC_DOWNLOAD_CONFIG_CMD}"
+ eval "${MLC_DOWNLOAD_CONFIG_CMD}" || exit $?
fi
# Assume download is required by default
require_download=1
# No download needed if a local file path is specified or the tool is 'cmutil'
-if [[ -n "${CM_DOWNLOAD_LOCAL_FILE_PATH}" || ${CM_DOWNLOAD_TOOL} == "cmutil" ]]; then
+if [[ -n "${MLC_DOWNLOAD_LOCAL_FILE_PATH}" || ${MLC_DOWNLOAD_TOOL} == "cmutil" ]]; then
require_download=0
fi
# If the file exists, check the checksum if necessary
-if [[ -e "${CM_DOWNLOAD_DOWNLOADED_PATH}" && -n "${CM_DOWNLOAD_CHECKSUM_CMD}" ]]; then
- echo -e "\nChecking checksum: ${CM_DOWNLOAD_CHECKSUM_CMD}"
- eval "${CM_DOWNLOAD_CHECKSUM_CMD}"
+if [[ -e "${MLC_DOWNLOAD_DOWNLOADED_PATH}" && -n "${MLC_DOWNLOAD_CHECKSUM_CMD}" ]]; then
+ echo -e "\nChecking checksum: ${MLC_DOWNLOAD_CHECKSUM_CMD}"
+ eval "${MLC_DOWNLOAD_CHECKSUM_CMD}"
if [[ $? -ne 0 ]]; then
# If the checksum fails, handle errors based on whether the file is local
- if [[ -n "${CM_DOWNLOAD_LOCAL_FILE_PATH}" ]]; then
+ if [[ -n "${MLC_DOWNLOAD_LOCAL_FILE_PATH}" ]]; then
echo "Checksum failed for local file. Exiting."
exit 1
else
echo "Checksum failed. Marking for re-download."
- CM_PRE_DOWNLOAD_CLEAN=true
+ MLC_PRE_DOWNLOAD_CLEAN=true
fi
else
# If checksum succeeds, no download is required
@@ -39,20 +39,20 @@ if [[ ${require_download} == 1 ]]; then
echo ""
# If a pre-download clean command is specified and needed, execute it
- if [[ -n "${CM_PRE_DOWNLOAD_CLEAN}" && "${CM_PRE_DOWNLOAD_CLEAN,,}" != "false" ]]; then
- echo "Executing pre-download clean: ${CM_PRE_DOWNLOAD_CLEAN_CMD}"
- eval "${CM_PRE_DOWNLOAD_CLEAN_CMD}" || exit $?
+ if [[ -n "${MLC_PRE_DOWNLOAD_CLEAN}" && "${MLC_PRE_DOWNLOAD_CLEAN,,}" != "false" ]]; then
+ echo "Executing pre-download clean: ${MLC_PRE_DOWNLOAD_CLEAN_CMD}"
+ eval "${MLC_PRE_DOWNLOAD_CLEAN_CMD}" || exit $?
fi
# Execute the download command
- echo "Downloading: ${CM_DOWNLOAD_CMD}"
- eval "${CM_DOWNLOAD_CMD}" || exit $?
+ echo "Downloading: ${MLC_DOWNLOAD_CMD}"
+ eval "${MLC_DOWNLOAD_CMD}" || exit $?
fi
# Verify checksum again if necessary
-if [[ ${CM_DOWNLOAD_TOOL} == "cmutil" || ${require_download} == 1 ]]; then
- if [[ -n "${CM_DOWNLOAD_CHECKSUM_CMD}" ]]; then
- echo -e "\nVerifying checksum after download: ${CM_DOWNLOAD_CHECKSUM_CMD}"
- eval "${CM_DOWNLOAD_CHECKSUM_CMD}" || exit $?
+if [[ ${MLC_DOWNLOAD_TOOL} == "cmutil" || ${require_download} == 1 ]]; then
+ if [[ -n "${MLC_DOWNLOAD_CHECKSUM_CMD}" ]]; then
+ echo -e "\nVerifying checksum after download: ${MLC_DOWNLOAD_CHECKSUM_CMD}"
+ eval "${MLC_DOWNLOAD_CHECKSUM_CMD}" || exit $?
fi
fi
diff --git a/script/download-file/tests/download-file.bat b/script/download-file/tests/download-file.bat
index 442150282..dbfcfc5ce 100644
--- a/script/download-file/tests/download-file.bat
+++ b/script/download-file/tests/download-file.bat
@@ -1,2 +1,2 @@
-cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1
+cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1
diff --git a/script/download-file/tests/download-file2.bat b/script/download-file/tests/download-file2.bat
index 2032bc177..6d919c8c1 100644
--- a/script/download-file/tests/download-file2.bat
+++ b/script/download-file/tests/download-file2.bat
@@ -1 +1 @@
-cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _cmutil" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1
+cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _cmutil" -j --env.MLC_VERIFY_SSL=False --env.MLC_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1
diff --git a/script/download-torrent/customize.py b/script/download-torrent/customize.py
index e194e7ff7..0006d5680 100644
--- a/script/download-torrent/customize.py
+++ b/script/download-torrent/customize.py
@@ -12,10 +12,10 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- if not env.get('CM_TORRENT_DOWNLOADED_FILE_NAME'):
- return {'return': 1, 'error': 'CM_TORRENT_DOWNLOADED_FILE_NAME is not set'}
+ if not env.get('MLC_TORRENT_DOWNLOADED_FILE_NAME'):
+ return {'return': 1, 'error': 'MLC_TORRENT_DOWNLOADED_FILE_NAME is not set'}
return {'return': 0}
@@ -24,14 +24,14 @@ def postprocess(i):
env = i['env']
torrent_downloaded_path = os.path.join(
- env['CM_TORRENT_DOWNLOADED_DIR'],
- env['CM_TORRENT_DOWNLOADED_NAME'])
- env['CM_TORRENT_DOWNLOADED_PATH'] = torrent_downloaded_path
+ env['MLC_TORRENT_DOWNLOADED_DIR'],
+ env['MLC_TORRENT_DOWNLOADED_NAME'])
+ env['MLC_TORRENT_DOWNLOADED_PATH'] = torrent_downloaded_path
- if 'CM_TORRENT_DOWNLOADED_PATH_ENV_KEY' in env:
- key = env['CM_TORRENT_DOWNLOADED_PATH_ENV_KEY']
+ if 'MLC_TORRENT_DOWNLOADED_PATH_ENV_KEY' in env:
+ key = env['MLC_TORRENT_DOWNLOADED_PATH_ENV_KEY']
env[key] = torrent_downloaded_path
- env['CM_GET_DEPENDENT_CACHED_PATH'] = torrent_downloaded_path
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = torrent_downloaded_path
return {'return': 0}
diff --git a/script/download-torrent/meta.yaml b/script/download-torrent/meta.yaml
index d2e83b8fe..632f007f6 100644
--- a/script/download-torrent/meta.yaml
+++ b/script/download-torrent/meta.yaml
@@ -4,15 +4,15 @@ automation_uid: 5b4e0237da074764
cache: true
category: DevOps automation
default_env:
- CM_TORRENT_WAIT_UNTIL_COMPLETED: 'no'
+ MLC_TORRENT_WAIT_UNTIL_COMPLETED: 'no'
deps:
- tags: get,generic-sys-util,_transmission
input_description: {}
input_mapping:
- wait: CM_TORRENT_WAIT_UNTIL_COMPLETED
+ wait: MLC_TORRENT_WAIT_UNTIL_COMPLETED
new_env_keys:
-- CM_TORRENT_DOWNLOADED_PATH
-- <<>>
+- MLC_TORRENT_DOWNLOADED_PATH
+- <<>>
new_state_keys: []
post_deps: []
posthook_deps: []
@@ -25,5 +25,5 @@ uid: 69b752c5618e45bb
variations:
torrent.#:
env:
- CM_TORRENT_FILE: '#'
+ MLC_TORRENT_FILE: '#'
versions: {}
diff --git a/script/download-torrent/run.sh b/script/download-torrent/run.sh
index c3d639ff1..c00afb96d 100644
--- a/script/download-torrent/run.sh
+++ b/script/download-torrent/run.sh
@@ -1,7 +1,7 @@
#!/bin/bash
chmod 777 ${PWD}
-#transmission-remote --no-auth --download-dir ${PWD} -a ${CM_TORRENT_FILE}
-cmd="transmission-remote --download-dir ${PWD} -a ${CM_TORRENT_FILE}"
+#transmission-remote --no-auth --download-dir ${PWD} -a ${MLC_TORRENT_FILE}
+cmd="transmission-remote --download-dir ${PWD} -a ${MLC_TORRENT_FILE}"
echo $cmd
eval $cmd
test $? -eq 0 || exit $?
@@ -11,10 +11,10 @@ echo $cmd
eval $cmd
test $? -eq 0 || exit $?
-if [[ ${CM_TORRENT_WAIT_UNTIL_COMPLETED} == "yes" ]]; then
+if [[ ${MLC_TORRENT_WAIT_UNTIL_COMPLETED} == "yes" ]]; then
while true;
do
- out=`transmission-remote -l |grep ${CM_TORRENT_DOWNLOADED_FILE_NAME} | grep "100%"`
+ out=`transmission-remote -l |grep ${MLC_TORRENT_DOWNLOADED_FILE_NAME} | grep "100%"`
if [[ -z $out ]]; then
transmission-remote -l
sleep 6
@@ -24,11 +24,11 @@ if [[ ${CM_TORRENT_WAIT_UNTIL_COMPLETED} == "yes" ]]; then
done
fi
-id=`transmission-remote -l |grep ${CM_TORRENT_DOWNLOADED_FILE_NAME} |tr -s ' ' | cut -d' ' -f2`
+id=`transmission-remote -l |grep ${MLC_TORRENT_DOWNLOADED_FILE_NAME} |tr -s ' ' | cut -d' ' -f2`
test $? -eq 0 || exit $?
location=`transmission-remote -t${id} -i |grep Location |cut -d':' -f2 |tr -d ' '`
test $? -eq 0 || exit $?
-echo "CM_TORRENT_DOWNLOADED_DIR=$location">> tmp-run-env.out
+echo "MLC_TORRENT_DOWNLOADED_DIR=$location">> tmp-run-env.out
name=`transmission-remote -t${id} -i |grep Name |cut -d':' -f2 |tr -d ' '`
test $? -eq 0 || exit $?
-echo "CM_TORRENT_DOWNLOADED_NAME=$name">> tmp-run-env.out
+echo "MLC_TORRENT_DOWNLOADED_NAME=$name">> tmp-run-env.out
diff --git a/script/draw-graph-from-json-data/customize.py b/script/draw-graph-from-json-data/customize.py
index 8fafad78f..77affa7e3 100644
--- a/script/draw-graph-from-json-data/customize.py
+++ b/script/draw-graph-from-json-data/customize.py
@@ -12,15 +12,15 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- env['CM_RUN_CMD'] = f"""{env['CM_PYTHON_BIN_WITH_PATH']} {os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'],"process-cm-deps.py")} {env['CM_JSON_INPUT_FILE']}"""
+ env['MLC_RUN_CMD'] = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(env['MLC_TMP_CURRENT_SCRIPT_PATH'],"process-mlc-deps.py")} {env['MLC_JSON_INPUT_FILE']}"""
- if env.get('CM_OUTPUT_IMAGE_PATH', '') != '':
- env['CM_RUN_CMD'] += f""" --output_image {env['CM_OUTPUT_IMAGE_PATH']}"""
+ if env.get('MLC_OUTPUT_IMAGE_PATH', '') != '':
+ env['MLC_RUN_CMD'] += f""" --output_image {env['MLC_OUTPUT_IMAGE_PATH']}"""
- if env.get('CM_OUTPUT_MERMAID_PATH', '') != '':
- env['CM_RUN_CMD'] += f""" --output_mermaid {env['CM_OUTPUT_MERMAID_PATH']}"""
+ if env.get('MLC_OUTPUT_MERMAID_PATH', '') != '':
+ env['MLC_RUN_CMD'] += f""" --output_mermaid {env['MLC_OUTPUT_MERMAID_PATH']}"""
return {'return': 0}
diff --git a/script/draw-graph-from-json-data/meta.yaml b/script/draw-graph-from-json-data/meta.yaml
index eb1d1a157..971fe1027 100644
--- a/script/draw-graph-from-json-data/meta.yaml
+++ b/script/draw-graph-from-json-data/meta.yaml
@@ -9,9 +9,9 @@ tags:
- from-json-data
uid: 2ed1ebcb6be548fd
input_mapping:
- input: CM_JSON_INPUT_FILE
- json_input_file: CM_JSON_INPUT_FILE
- output_image_path: CM_OUTPUT_IMAGE_PATH
+ input: MLC_JSON_INPUT_FILE
+ json_input_file: MLC_JSON_INPUT_FILE
+ output_image_path: MLC_OUTPUT_IMAGE_PATH
deps:
- tags: get,python3
names:
diff --git a/script/draw-graph-from-json-data/run.sh b/script/draw-graph-from-json-data/run.sh
index 4c23c380e..32cf4d51e 100644
--- a/script/draw-graph-from-json-data/run.sh
+++ b/script/draw-graph-from-json-data/run.sh
@@ -1,17 +1,17 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
echo "Running: "
-echo "${CM_RUN_CMD}"
+echo "${MLC_RUN_CMD}"
echo ""
-if [[ ${CM_FAKE_RUN} != "yes" ]]; then
- eval "${CM_RUN_CMD}"
+if [[ ${MLC_FAKE_RUN} != "yes" ]]; then
+ eval "${MLC_RUN_CMD}"
test $? -eq 0 || exit 1
fi
diff --git a/script/dump-pip-freeze/customize.py b/script/dump-pip-freeze/customize.py
index 9c2940d1e..617e387df 100644
--- a/script/dump-pip-freeze/customize.py
+++ b/script/dump-pip-freeze/customize.py
@@ -12,11 +12,11 @@ def preprocess(i):
automation = i['automation']
- if env.get('CM_DUMP_RAW_PIP_FREEZE_FILE_PATH', '') == '':
- env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join(
+ if env.get('MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH', '') == '':
+ env['MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join(
os.getcwd(), "tmp-pip-freeze")
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
return {'return': 0}
@@ -31,7 +31,7 @@ def postprocess(i):
automation = i['automation']
pip_freeze = {}
- pip_freeze_file = env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH']
+ pip_freeze_file = env['MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH']
if not os.path.isfile(pip_freeze_file):
# If was not created, sometimes issues on Windows
# There is another workaround
diff --git a/script/dump-pip-freeze/dump.py b/script/dump-pip-freeze/dump.py
index c6d4dc2ea..9fe8e3ebd 100644
--- a/script/dump-pip-freeze/dump.py
+++ b/script/dump-pip-freeze/dump.py
@@ -2,7 +2,7 @@
from pip._internal.operations import freeze
pip_freeze_out = os.environ.get(
- 'CM_DUMP_RAW_PIP_FREEZE_FILE_PATH',
+ 'MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH',
'tmp-pip-freeze')
if os.path.isfile(pip_freeze_out):
diff --git a/script/dump-pip-freeze/run.bat b/script/dump-pip-freeze/run.bat
index b323ddc22..18f6b56e5 100644
--- a/script/dump-pip-freeze/run.bat
+++ b/script/dump-pip-freeze/run.bat
@@ -1,4 +1,4 @@
-if not "%CM_FAKE_RUN%" == "yes" (
- %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\dump.py
+if not "%MLC_FAKE_RUN%" == "yes" (
+ %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\dump.py
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
)
diff --git a/script/dump-pip-freeze/run.sh b/script/dump-pip-freeze/run.sh
index a1cdb52eb..8d4d76e1a 100644
--- a/script/dump-pip-freeze/run.sh
+++ b/script/dump-pip-freeze/run.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
@@ -17,12 +17,12 @@ function run() {
echo "Running: "
echo "$1"
echo ""
- if [[ ${CM_FAKE_RUN} != 'yes' ]]; then
+ if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then
eval "$1"
exit_if_error
fi
}
#Add your run commands here...
-# run "$CM_RUN_CMD"
-run "${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/dump.py"
+# run "$MLC_RUN_CMD"
+run "${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/dump.py"
diff --git a/script/extract-file/README-extra.md b/script/extract-file/README-extra.md
index fbd8ccaf4..b227eadca 100644
--- a/script/extract-file/README-extra.md
+++ b/script/extract-file/README-extra.md
@@ -37,20 +37,20 @@ cmr "extract file _keep" --input=coco-2017-val-annotations.zip -j
```json
"new_env": {
- "CM_EXTRACT_EXTRACTED_PATH": "D:\\Work99.3 readme\\xyz",
- "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work99.3 readme\\xyz"
+ "MLC_EXTRACT_EXTRACTED_PATH": "D:\\Work99.3 readme\\xyz",
+ "MLC_GET_DEPENDENT_CACHED_PATH": "D:\\Work99.3 readme\\xyz"
},
```
#### Input flags and equivalent environment variables
-* `--input` or `--env.CM_EXTRACT_FILEPATH` - input file
-* `--extract_path` or `--to` or `--env.CM_EXTRACT_PATH` - where to extract files (--input should have full path then)
-* `--extra_folder` or `--env.CM_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory)
+* `--input` or `--env.MLC_EXTRACT_FILEPATH` - input file
+* `--extract_path` or `--to` or `--env.MLC_EXTRACT_PATH` - where to extract files (--input should have full path then)
+* `--extra_folder` or `--env.MLC_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory)
#### Variations
-* `_keep` or `_no-remove-extracted` or `--env.CM_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default)
+* `_keep` or `_no-remove-extracted` or `--env.MLC_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default)
@@ -59,7 +59,7 @@ cmr "extract file _keep" --input=coco-2017-val-annotations.zip -j
Note that you need to provide a full path to the archive file if you want to extract it to some directory:
```bash
-cmr "extract file _keep" --input="$PWD/coco-2017-val-annotations.zip" --extract_path="$HOME/cm-test"
+cmr "extract file _keep" --input="$PWD/coco-2017-val-annotations.zip" --extract_path="$HOME/mlc-test"
```
### Add extra folder to extracted files
@@ -85,7 +85,7 @@ cmr "download file _url.https://cKnowledge.org/test/captions_val2017.json.gz"
Then extract it and test MD5SUM as follows:
```bash
-cmr "extract file _keep _path.captions_val2017.json.gz" --env.CM_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f -j
+cmr "extract file _keep _path.captions_val2017.json.gz" --env.MLC_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f -j
```
@@ -93,7 +93,7 @@ cmr "extract file _keep _path.captions_val2017.json.gz" --env.CM_EXTRACT_EXTRACT
Some workflows may need to use a different filename than original. You can change it as follows:
```bash
-cmr "extract file _keep _path.captions_val2017.json.gz" --env.CM_EXTRACT_EXTRACTED_FILENAME=new-file.json --env.CM_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f
+cmr "extract file _keep _path.captions_val2017.json.gz" --env.MLC_EXTRACT_EXTRACTED_FILENAME=new-file.json --env.MLC_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f
```
diff --git a/script/extract-file/customize.py b/script/extract-file/customize.py
index f4489105d..9e15efbfa 100644
--- a/script/extract-file/customize.py
+++ b/script/extract-file/customize.py
@@ -21,21 +21,21 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- filename = env.get('CM_EXTRACT_FILEPATH', '')
+ filename = env.get('MLC_EXTRACT_FILEPATH', '')
if filename == '':
return {
- 'return': 1, 'error': 'Extract with no download requested and CM_EXTRACT_FILEPATH is not set'}
+ 'return': 1, 'error': 'Extract with no download requested and MLC_EXTRACT_FILEPATH is not set'}
if windows:
filename = filename.replace("%", "%%")
- env['CM_EXTRACT_FILENAME'] = filename
+ env['MLC_EXTRACT_FILENAME'] = filename
# Check if extract to some path outside CM cache (to reuse large files
# later if cache is cleaned)
- extract_path = env.get('CM_EXTRACT_PATH', '')
+ extract_path = env.get('MLC_EXTRACT_PATH', '')
if extract_path != '':
if not os.path.exists(extract_path):
os.makedirs(extract_path, exist_ok=True)
@@ -44,114 +44,114 @@ def preprocess(i):
# By default remove archive after extraction
remove_extracted = False if env.get(
- 'CM_EXTRACT_REMOVE_EXTRACTED',
+ 'MLC_EXTRACT_REMOVE_EXTRACTED',
'').lower() == 'no' else True
if filename.endswith(".zip") or filename.endswith(".pth"):
- env['CM_EXTRACT_TOOL'] = "unzip"
+ env['MLC_EXTRACT_TOOL'] = "unzip"
elif filename.endswith(".tar.gz"):
if windows:
x = '"' if ' ' in filename else ''
- env['CM_EXTRACT_CMD0'] = 'gzip -d ' + x + filename + x
+ env['MLC_EXTRACT_CMD0'] = 'gzip -d ' + x + filename + x
filename = filename[:-3] # leave only .tar
- env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf'
- env['CM_EXTRACT_TOOL'] = 'tar '
+ env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvf'
+ env['MLC_EXTRACT_TOOL'] = 'tar '
elif os_info['platform'] == 'darwin':
- env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvzf '
- env['CM_EXTRACT_TOOL'] = 'tar '
+ env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvzf '
+ env['MLC_EXTRACT_TOOL'] = 'tar '
else:
- env['CM_EXTRACT_TOOL_OPTIONS'] = ' --skip-old-files -xvzf '
- env['CM_EXTRACT_TOOL'] = 'tar '
+ env['MLC_EXTRACT_TOOL_OPTIONS'] = ' --skip-old-files -xvzf '
+ env['MLC_EXTRACT_TOOL'] = 'tar '
elif filename.endswith(".tar.xz"):
if windows:
x = '"' if ' ' in filename else ''
- env['CM_EXTRACT_CMD0'] = 'xz -d ' + x + filename + x
+ env['MLC_EXTRACT_CMD0'] = 'xz -d ' + x + filename + x
filename = filename[:-3] # leave only .tar
- env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf'
- env['CM_EXTRACT_TOOL'] = 'tar '
+ env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvf'
+ env['MLC_EXTRACT_TOOL'] = 'tar '
else:
- env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvJf'
- env['CM_EXTRACT_TOOL'] = 'tar '
+ env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvJf'
+ env['MLC_EXTRACT_TOOL'] = 'tar '
elif filename.endswith(".tar"):
- env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf'
- env['CM_EXTRACT_TOOL'] = 'tar '
+ env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvf'
+ env['MLC_EXTRACT_TOOL'] = 'tar '
elif filename.endswith(".gz"):
# Check target filename
- extracted_filename = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '')
+ extracted_filename = env.get('MLC_EXTRACT_EXTRACTED_FILENAME', '')
if extracted_filename == '':
extracted_filename = os.path.basename(filename)[:-3]
- env['CM_EXTRACT_EXTRACTED_FILENAME'] = extracted_filename
+ env['MLC_EXTRACT_EXTRACTED_FILENAME'] = extracted_filename
x = '-c' if windows else '-k'
- env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \
+ env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \
(x + ' ' if not remove_extracted else '') + \
' > ' + q + extracted_filename + q + ' < '
- env['CM_EXTRACT_TOOL'] = 'gzip '
- elif env.get('CM_EXTRACT_UNZIP', '') == 'yes':
- env['CM_EXTRACT_TOOL'] = 'unzip '
- elif env.get('CM_EXTRACT_UNTAR', '') == 'yes':
- env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf'
- env['CM_EXTRACT_TOOL'] = 'tar '
- elif env.get('CM_EXTRACT_GZIP', '') == 'yes':
- env['CM_EXTRACT_CMD'] = 'gzip '
- env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \
+ env['MLC_EXTRACT_TOOL'] = 'gzip '
+ elif env.get('MLC_EXTRACT_UNZIP', '') == 'yes':
+ env['MLC_EXTRACT_TOOL'] = 'unzip '
+ elif env.get('MLC_EXTRACT_UNTAR', '') == 'yes':
+ env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvf'
+ env['MLC_EXTRACT_TOOL'] = 'tar '
+ elif env.get('MLC_EXTRACT_GZIP', '') == 'yes':
+ env['MLC_EXTRACT_CMD'] = 'gzip '
+ env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \
('-k ' if not remove_extracted else '')
else:
return {'return': 1,
- 'error': 'Neither CM_EXTRACT_UNZIP nor CM_EXTRACT_UNTAR is yes'}
+ 'error': 'Neither MLC_EXTRACT_UNZIP nor MLC_EXTRACT_UNTAR is yes'}
- env['CM_EXTRACT_PRE_CMD'] = ''
+ env['MLC_EXTRACT_PRE_CMD'] = ''
- extract_to_folder = env.get('CM_EXTRACT_TO_FOLDER', '')
+ extract_to_folder = env.get('MLC_EXTRACT_TO_FOLDER', '')
# Check if extract to additional folder in the current directory (or external path)
# to avoid messing up other files and keep clean directory structure
# particularly if archive has many sub-directories and files
if extract_to_folder != '':
- if 'tar ' in env['CM_EXTRACT_TOOL']:
+ if 'tar ' in env['MLC_EXTRACT_TOOL']:
x = '' if windows else '-p'
y = '"' if ' ' in extract_to_folder else ''
- # env['CM_EXTRACT_TOOL_OPTIONS'] = ' --one-top-level='+ env['CM_EXTRACT_TO_FOLDER'] + env.get('CM_EXTRACT_TOOL_OPTIONS', '')
- env['CM_EXTRACT_TOOL_OPTIONS'] = ' -C ' + y + extract_to_folder + \
- y + ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '')
- env['CM_EXTRACT_PRE_CMD'] = 'mkdir ' + x + ' ' + \
+ # env['MLC_EXTRACT_TOOL_OPTIONS'] = ' --one-top-level='+ env['MLC_EXTRACT_TO_FOLDER'] + env.get('MLC_EXTRACT_TOOL_OPTIONS', '')
+ env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -C ' + y + extract_to_folder + \
+ y + ' ' + env.get('MLC_EXTRACT_TOOL_OPTIONS', '')
+ env['MLC_EXTRACT_PRE_CMD'] = 'mkdir ' + x + ' ' + \
y + extract_to_folder + y + ' ' + xsep + ' '
- env['CM_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder
+ env['MLC_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder
- elif 'unzip' in env['CM_EXTRACT_TOOL']:
- env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + q + extract_to_folder + q
- env['CM_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder
+ elif 'unzip' in env['MLC_EXTRACT_TOOL']:
+ env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -d ' + q + extract_to_folder + q
+ env['MLC_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder
x = '"' if ' ' in filename else ''
- env['CM_EXTRACT_CMD'] = env['CM_EXTRACT_PRE_CMD'] + env['CM_EXTRACT_TOOL'] + ' ' + \
- env.get('CM_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \
- ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '') + ' ' + x + filename + x
+ env['MLC_EXTRACT_CMD'] = env['MLC_EXTRACT_PRE_CMD'] + env['MLC_EXTRACT_TOOL'] + ' ' + \
+ env.get('MLC_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \
+ ' ' + env.get('MLC_EXTRACT_TOOL_OPTIONS', '') + ' ' + x + filename + x
print('')
print('Current directory: {}'.format(os.getcwd()))
- print('Command line: "{}"'.format(env['CM_EXTRACT_CMD']))
+ print('Command line: "{}"'.format(env['MLC_EXTRACT_CMD']))
print('')
- final_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '')
+ final_file = env.get('MLC_EXTRACT_EXTRACTED_FILENAME', '')
if final_file != '':
- if env.get('CM_EXTRACT_EXTRACTED_CHECKSUM_FILE', '') != '':
- env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = f"cd {q}{final_file}{q} {xsep} md5sum -c {q}{env['CM_EXTRACT_EXTRACTED_CHECKSUM_FILE']}{q}"
- elif env.get('CM_EXTRACT_EXTRACTED_CHECKSUM', '') != '':
+ if env.get('MLC_EXTRACT_EXTRACTED_CHECKSUM_FILE', '') != '':
+ env['MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = f"cd {q}{final_file}{q} {xsep} md5sum -c {q}{env['MLC_EXTRACT_EXTRACTED_CHECKSUM_FILE']}{q}"
+ elif env.get('MLC_EXTRACT_EXTRACTED_CHECKSUM', '') != '':
x = '*' if os_info['platform'] == 'windows' else ''
- env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "echo {} {}{q}{}{q} | md5sum -c".format(
- env.get('CM_EXTRACT_EXTRACTED_CHECKSUM'), x, env['CM_EXTRACT_EXTRACTED_FILENAME'])
+ env['MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "echo {} {}{q}{}{q} | md5sum -c".format(
+ env.get('MLC_EXTRACT_EXTRACTED_CHECKSUM'), x, env['MLC_EXTRACT_EXTRACTED_FILENAME'])
else:
- env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = ""
+ env['MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = ""
else:
- env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = ""
+ env['MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = ""
# Not needed - can be simpler with cmd /c {empty}
# if os_info['platform'] == 'windows':
# # Check that if empty CMD, should add ""
-# for x in ['CM_EXTRACT_CMD', 'CM_EXTRACT_EXTRACTED_CHECKSUM_CMD']:
+# for x in ['MLC_EXTRACT_CMD', 'MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD']:
# env[x+'_USED']='YES' if env.get(x,'')!='' else 'NO'
# If force cache, add filepath to tag unless _path is used ...
@@ -170,10 +170,10 @@ def postprocess(i):
env = i['env']
- extract_to_folder = env.get('CM_EXTRACT_TO_FOLDER', '')
- extract_path = env.get('CM_EXTRACT_PATH', '')
+ extract_to_folder = env.get('MLC_EXTRACT_TO_FOLDER', '')
+ extract_path = env.get('MLC_EXTRACT_PATH', '')
- extracted_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '')
+ extracted_file = env.get('MLC_EXTRACT_EXTRACTED_FILENAME', '')
# Preparing filepath
# Can be either full extracted filename (such as model) or folder
@@ -182,7 +182,7 @@ def postprocess(i):
filename = os.path.basename(extracted_file)
# We do not use this env variable anymore
-# folderpath = env.get('CM_EXTRACT_EXTRACT_TO_PATH', '')
+# folderpath = env.get('MLC_EXTRACT_EXTRACT_TO_PATH', '')
folderpath = extract_path if extract_path != '' else os.getcwd()
filepath = os.path.join(folderpath, filename)
@@ -193,21 +193,21 @@ def postprocess(i):
if not os.path.exists(filepath):
return {
'return': 1, 'error': 'Path {} was not created or doesn\'t exist'.format(filepath)}
-# return {'return':1, 'error': 'CM_EXTRACT_EXTRACTED_FILENAME and
-# CM_EXTRACT_TO_FOLDER are not set'}
+# return {'return':1, 'error': 'MLC_EXTRACT_EXTRACTED_FILENAME and
+# MLC_EXTRACT_TO_FOLDER are not set'}
- env['CM_EXTRACT_EXTRACTED_PATH'] = filepath
+ env['MLC_EXTRACT_EXTRACTED_PATH'] = filepath
# Set external environment variable with the final path
- if env.get('CM_EXTRACT_FINAL_ENV_NAME', '') != '':
- env[env['CM_EXTRACT_FINAL_ENV_NAME']] = filepath
+ if env.get('MLC_EXTRACT_FINAL_ENV_NAME', '') != '':
+ env[env['MLC_EXTRACT_FINAL_ENV_NAME']] = filepath
# Detect if this file will be deleted or moved
- env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = filepath
# Check if need to remove archive after extraction
- if env.get('CM_EXTRACT_REMOVE_EXTRACTED', '').lower() != 'no':
- archive_filepath = env.get('CM_EXTRACT_FILEPATH', '')
+ if env.get('MLC_EXTRACT_REMOVE_EXTRACTED', '').lower() != 'no':
+ archive_filepath = env.get('MLC_EXTRACT_FILEPATH', '')
if archive_filepath != '' and os.path.isfile(archive_filepath):
os.remove(archive_filepath)
diff --git a/script/extract-file/meta.yaml b/script/extract-file/meta.yaml
index 3cee898a0..56f29fe1d 100644
--- a/script/extract-file/meta.yaml
+++ b/script/extract-file/meta.yaml
@@ -7,24 +7,24 @@ category: DevOps automation
deps:
- tags: detect,os
- enable_if_env:
- CM_HOST_OS_FLAVOR:
+ MLC_HOST_OS_FLAVOR:
- macos
skip_if_any_env:
- CM_EXTRACT_EXTRACTED_CHECKSUM:
+ MLC_EXTRACT_EXTRACTED_CHECKSUM:
- 'off'
- CM_EXTRACT_EXTRACTED_CHECKSUM_FILE:
+ MLC_EXTRACT_EXTRACTED_CHECKSUM_FILE:
- 'off'
tags: get,generic-sys-util,_md5sha1sum
input_description: {}
input_mapping:
- extra_folder: CM_EXTRACT_TO_FOLDER
- extract_path: CM_EXTRACT_PATH
- input: CM_EXTRACT_FILEPATH
- to: CM_EXTRACT_PATH
+ extra_folder: MLC_EXTRACT_TO_FOLDER
+ extract_path: MLC_EXTRACT_PATH
+ input: MLC_EXTRACT_FILEPATH
+ to: MLC_EXTRACT_PATH
new_env_keys:
-- CM_EXTRACT_EXTRACTED_PATH
-- <<>>
-- CM_GET_DEPENDENT_CACHED_PATH
+- MLC_EXTRACT_EXTRACTED_PATH
+- <<>>
+- MLC_GET_DEPENDENT_CACHED_PATH
new_state_keys: []
post_deps: []
posthook_deps: []
@@ -37,11 +37,11 @@ uid: 3f0b76219d004817
variations:
keep:
env:
- CM_EXTRACT_REMOVE_EXTRACTED: 'no'
+ MLC_EXTRACT_REMOVE_EXTRACTED: 'no'
no-remove-extracted:
env:
- CM_EXTRACT_REMOVE_EXTRACTED: 'no'
+ MLC_EXTRACT_REMOVE_EXTRACTED: 'no'
path.#:
env:
- CM_EXTRACT_FILEPATH: '#'
+ MLC_EXTRACT_FILEPATH: '#'
versions: {}
diff --git a/script/extract-file/run.bat b/script/extract-file/run.bat
index 530ebbd2c..2a2727965 100644
--- a/script/extract-file/run.bat
+++ b/script/extract-file/run.bat
@@ -7,33 +7,33 @@ setlocal enabledelayedexpansion
set require_extract=1
-if exist "%CM_EXTRACT_EXTRACTED_FILENAME%" (
+if exist "%MLC_EXTRACT_EXTRACTED_FILENAME%" (
set require_extract=0
echo.
- echo %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD%
- cmd /c %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD%
+ echo %MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD%
+ cmd /c %MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD%
IF !ERRORLEVEL! NEQ 0 (
set require_extract=1
- del /Q %CM_EXTRACT_EXTRACTED_FILENAME%
+ del /Q %MLC_EXTRACT_EXTRACTED_FILENAME%
)
)
if "!require_extract!" == "1" (
- if not "%CM_EXTRACT_CMD0%" == "" (
+ if not "%MLC_EXTRACT_CMD0%" == "" (
echo.
- echo %CM_EXTRACT_CMD0%
- cmd /c %CM_EXTRACT_CMD0%
+ echo %MLC_EXTRACT_CMD0%
+ cmd /c %MLC_EXTRACT_CMD0%
IF !ERRORLEVEL! NEQ 0 EXIT 1
)
echo.
- echo %CM_EXTRACT_CMD%
- cmd /c %CM_EXTRACT_CMD%
+ echo %MLC_EXTRACT_CMD%
+ cmd /c %MLC_EXTRACT_CMD%
IF !ERRORLEVEL! NEQ 0 EXIT 1
echo.
- echo %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD%
- cmd /c %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD%
+ echo %MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD%
+ cmd /c %MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD%
IF !ERRORLEVEL! NEQ 0 EXIT 1
)
diff --git a/script/extract-file/run.sh b/script/extract-file/run.sh
index 4ee4f8512..29627a196 100644
--- a/script/extract-file/run.sh
+++ b/script/extract-file/run.sh
@@ -1,20 +1,20 @@
#!/bin/bash
-if [ -e "${CM_EXTRACT_EXTRACTED_FILENAME}" ] ; then
- CMD=${CM_EXTRACT_EXTRACTED_CHECKSUM_CMD}
+if [ -e "${MLC_EXTRACT_EXTRACTED_FILENAME}" ] ; then
+ CMD=${MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD}
echo ""
echo "${CMD}"
eval "${CMD}"
test $? -eq 0 && exit 0
fi
-CMD=${CM_EXTRACT_CMD}
+CMD=${MLC_EXTRACT_CMD}
echo ""
echo "${CMD}"
eval "${CMD}"
test $? -eq 0 || exit $?
-CMD=${CM_EXTRACT_EXTRACTED_CHECKSUM_CMD}
+CMD=${MLC_EXTRACT_EXTRACTED_CHECKSUM_CMD}
echo ""
echo "${CMD}"
eval "${CMD}"
diff --git a/script/fail/customize.py b/script/fail/customize.py
index 69ccec77e..3f826ffe2 100644
--- a/script/fail/customize.py
+++ b/script/fail/customize.py
@@ -12,10 +12,10 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
# Checking conditions
- if env.get('CM_FAIL_WINDOWS', '').lower() == 'true':
+ if env.get('MLC_FAIL_WINDOWS', '').lower() == 'true':
if os_info['platform'] == 'windows':
return {'return': 1,
'error': 'CM detected fail condition: running on Windows'}
diff --git a/script/fail/meta.yaml b/script/fail/meta.yaml
index 9c5d8fcfc..a68d75749 100644
--- a/script/fail/meta.yaml
+++ b/script/fail/meta.yaml
@@ -15,4 +15,4 @@ tags:
variations:
windows:
env:
- CM_FAIL_WINDOWS: true
+ MLC_FAIL_WINDOWS: true
diff --git a/script/flash-tinyml-binary/customize.py b/script/flash-tinyml-binary/customize.py
index ab0d7e5a3..80690ce16 100644
--- a/script/flash-tinyml-binary/customize.py
+++ b/script/flash-tinyml-binary/customize.py
@@ -9,9 +9,9 @@ def preprocess(i):
if os_info['platform'] == 'windows':
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
- if 'CM_TINY_BUILD_DIR' not in env:
+ if 'MLC_TINY_BUILD_DIR' not in env:
return {
- 'return': 1, 'error': 'Please set CM_TINY_BUILD_DIR to the build directory of the model'}
+ 'return': 1, 'error': 'Please set MLC_TINY_BUILD_DIR to the build directory of the model'}
return {'return': 0}
diff --git a/script/flash-tinyml-binary/meta.yaml b/script/flash-tinyml-binary/meta.yaml
index 42ebb7ae7..92cab05ae 100644
--- a/script/flash-tinyml-binary/meta.yaml
+++ b/script/flash-tinyml-binary/meta.yaml
@@ -14,13 +14,13 @@ deps:
tags: get,zephyr-sdk
- inherit_variation_tags: 'True'
skip_if_env:
- CM_TINY_BUILD_DIR:
+ MLC_TINY_BUILD_DIR:
- 'on'
tags: reproduce,tiny,mlperf
input_mapping:
- build_dir: CM_TINY_BUILD_DIR
+ build_dir: MLC_TINY_BUILD_DIR
local_env_keys:
-- CM_*
+- MLC_*
tags:
- flash
- tiny
diff --git a/script/flash-tinyml-binary/run.sh b/script/flash-tinyml-binary/run.sh
index 962dc74d5..9d8231794 100644
--- a/script/flash-tinyml-binary/run.sh
+++ b/script/flash-tinyml-binary/run.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-build_dir=${CM_TINY_BUILD_DIR}
-cmd="cd ${CM_ZEPHYR_DIR}"
+build_dir=${MLC_TINY_BUILD_DIR}
+cmd="cd ${MLC_ZEPHYR_DIR}"
echo $cmd
eval $cmd
cmd="west flash --build-dir ${build_dir}"
diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py
index a6f5167aa..384b0c9b8 100644
--- a/script/generate-mlperf-inference-submission/customize.py
+++ b/script/generate-mlperf-inference-submission/customize.py
@@ -60,39 +60,39 @@ def generate_submission(env, state, inp, submission_division):
# Save current user directory
cur_dir = os.getcwd()
- if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR_', '') == '':
+ if env.get('MLC_MLPERF_INFERENCE_RESULTS_DIR_', '') == '':
results_dir = os.path.join(
- env['CM_MLPERF_INFERENCE_RESULTS_DIR'],
- f"{env['CM_MLPERF_RUN_STYLE']}_results")
+ env['MLC_MLPERF_INFERENCE_RESULTS_DIR'],
+ f"{env['MLC_MLPERF_RUN_STYLE']}_results")
else:
- results_dir = env['CM_MLPERF_INFERENCE_RESULTS_DIR_']
+ results_dir = env['MLC_MLPERF_INFERENCE_RESULTS_DIR_']
- mlperf_path = env['CM_MLPERF_INFERENCE_SOURCE']
+ mlperf_path = env['MLC_MLPERF_INFERENCE_SOURCE']
submission_checker_dir = os.path.join(mlperf_path, "tools", "submission")
sys.path.append(submission_checker_dir)
- if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') == '':
+ if env.get('MLC_MLPERF_INFERENCE_SUBMISSION_DIR', '') == '':
from pathlib import Path
user_home = str(Path.home())
- env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join(
user_home, "mlperf_submission")
- submission_dir = env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '')
+ submission_dir = env.get('MLC_MLPERF_INFERENCE_SUBMISSION_DIR', '')
if submission_dir == '':
submission_base_dir = env.get(
- 'CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR', '')
+ 'MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR', '')
if submission_base_dir == '':
- return {'return': 1, 'error': f"Both CM_MLPERF_INFERENCE_SUBMISSION_DIR and CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR can not be empty!"}
+ return {'return': 1, 'error': f"Both MLC_MLPERF_INFERENCE_SUBMISSION_DIR and MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR can not be empty!"}
else:
submission_dir = os.path.join(
submission_base_dir, "mlperf_inference_submission")
- env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = submission_dir
+ env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR'] = submission_dir
- if env.get('CM_MLPERF_CLEAN_SUBMISSION_DIR', '') != '':
+ if env.get('MLC_MLPERF_CLEAN_SUBMISSION_DIR', '') != '':
print('=================================================')
print(
'Cleaning {} ...'.format(
- env['CM_MLPERF_INFERENCE_SUBMISSION_DIR']))
+ env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR']))
if os.path.exists(submission_dir):
shutil.rmtree(submission_dir)
print('=================================================')
@@ -100,7 +100,7 @@ def generate_submission(env, state, inp, submission_division):
if not os.path.isdir(submission_dir):
os.makedirs(submission_dir)
- if str(env.get('CM_MLPERF_SUBMISSION_DIR_SHARED', '')
+ if str(env.get('MLC_MLPERF_SUBMISSION_DIR_SHARED', '')
).lower() in ["yes", "true", "1"]:
os.chmod(submission_dir, 0o2775)
@@ -112,15 +112,15 @@ def generate_submission(env, state, inp, submission_division):
results_dir,
f))]
- system_meta_default = state['CM_SUT_META']
+ system_meta_default = state['MLC_SUT_META']
# set pytorch as the default framework
if system_meta_default['framework'] == '':
system_meta_default['framework'] = "pytorch"
system_meta_tmp = {}
- if 'CM_MLPERF_SUBMISSION_SYSTEM_TYPE' in env:
- system_meta_tmp['system_type'] = env['CM_MLPERF_SUBMISSION_SYSTEM_TYPE']
+ if 'MLC_MLPERF_SUBMISSION_SYSTEM_TYPE' in env:
+ system_meta_tmp['system_type'] = env['MLC_MLPERF_SUBMISSION_SYSTEM_TYPE']
if submission_division != "":
system_meta_tmp['division'] = submission_division
@@ -128,13 +128,13 @@ def generate_submission(env, state, inp, submission_division):
else:
division = system_meta_default['division']
- if 'CM_MLPERF_SUBMISSION_CATEGORY' in env:
- system_meta_tmp['system_type'] = env['CM_MLPERF_SUBMISSION_CATEGORY'].replace(
+ if 'MLC_MLPERF_SUBMISSION_CATEGORY' in env:
+ system_meta_tmp['system_type'] = env['MLC_MLPERF_SUBMISSION_CATEGORY'].replace(
"-", ",")
duplicate = (
env.get(
- 'CM_MLPERF_DUPLICATE_SCENARIO_RESULTS',
+ 'MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS',
'no') in [
"yes",
"True"])
@@ -150,25 +150,25 @@ def generate_submission(env, state, inp, submission_division):
os.makedirs(path_submission_division)
# Check submitter
- if env.get('CM_MLPERF_SUBMITTER'):
- submitter = env['CM_MLPERF_SUBMITTER']
+ if env.get('MLC_MLPERF_SUBMITTER'):
+ submitter = env['MLC_MLPERF_SUBMITTER']
system_meta_tmp['submitter'] = submitter
else:
submitter = system_meta_default['submitter']
- env['CM_MLPERF_SUBMITTER'] = submitter
+ env['MLC_MLPERF_SUBMITTER'] = submitter
print('* MLPerf inference submitter: {}'.format(submitter))
- if env.get('CM_MLPERF_SUT_SW_NOTES_EXTRA', '') != '':
+ if env.get('MLC_MLPERF_SUT_SW_NOTES_EXTRA', '') != '':
sw_notes = f"""{
system_meta_tmp['sw_notes']} {
- env['CM_MLPERF_SUT_SW_NOTES_EXTRA']}"""
+ env['MLC_MLPERF_SUT_SW_NOTES_EXTRA']}"""
system_meta_tmp['sw_notes'] = sw_notes
- if env.get('CM_MLPERF_SUT_HW_NOTES_EXTRA', '') != '':
+ if env.get('MLC_MLPERF_SUT_HW_NOTES_EXTRA', '') != '':
hw_notes = f"""{
system_meta_tmp['hw_notes']} {
- env['CM_MLPERF_SUT_HW_NOTES_EXTRA']}"""
+ env['MLC_MLPERF_SUT_HW_NOTES_EXTRA']}"""
system_meta_tmp['hw_notes'] = hw_notes
path_submission = os.path.join(path_submission_division, submitter)
@@ -176,7 +176,7 @@ def generate_submission(env, state, inp, submission_division):
os.makedirs(path_submission)
# SUT base
- system = env.get('CM_HW_NAME', 'default').replace(' ', '_')
+ system = env.get('MLC_HW_NAME', 'default').replace(' ', '_')
code_path = os.path.join(path_submission, "code")
@@ -237,7 +237,7 @@ def generate_submission(env, state, inp, submission_division):
if division == "open" and len(model_mapping_combined) == 0:
for model in models:
is_valid, returned_model_name = model_in_valid_models(
- model, env.get('CM_MLPERF_LAST_RELEASE', 'v4.1'))
+ model, env.get('MLC_MLPERF_LAST_RELEASE', 'v4.1'))
if not is_valid:
result_model_path = os.path.join(result_path, model)
scenarios = [
@@ -276,7 +276,7 @@ def generate_submission(env, state, inp, submission_division):
{model: returned_model_name})
if check_dict_filled(sut_info.keys(), sut_info):
- system = env.get('CM_HW_NAME', sut_info["system_name"])
+ system = env.get('MLC_HW_NAME', sut_info["system_name"])
implementation = sut_info["implementation"]
device = sut_info["device"]
framework = sut_info["framework"].replace(" ", "_")
@@ -431,11 +431,11 @@ def generate_submission(env, state, inp, submission_division):
submission_power_path, f))
analyzer_settings_file = env.get(
- 'CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH', os.path.join(
- env['CM_TMP_CURRENT_SCRIPT_PATH'], "default_files", "analyzer_table.md"))
+ 'MLC_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH', os.path.join(
+ env['MLC_TMP_CURRENT_SCRIPT_PATH'], "default_files", "analyzer_table.md"))
power_settings_file = env.get(
- 'CM_MLPERF_POWER_SETTINGS_FILE_PATH', os.path.join(
- env['CM_TMP_CURRENT_SCRIPT_PATH'], "default_files", "power_settings.md"))
+ 'MLC_MLPERF_POWER_SETTINGS_FILE_PATH', os.path.join(
+ env['MLC_TMP_CURRENT_SCRIPT_PATH'], "default_files", "power_settings.md"))
shutil.copy(
analyzer_settings_file, os.path.join(
@@ -651,7 +651,7 @@ def generate_submission(env, state, inp, submission_division):
readme_suffix = ""
result_string, result = mlperf_utils.get_result_string(
- env['CM_MLPERF_LAST_RELEASE'], model, scenario, result_scenario_path, power_run, sub_res, division, system_file, model_precision, env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION'))
+ env['MLC_MLPERF_LAST_RELEASE'], model, scenario, result_scenario_path, power_run, sub_res, division, system_file, model_precision, env.get('MLC_MLPERF_INFERENCE_SOURCE_VERSION'))
for key in result:
results[model][scenario][key] = result[key]
@@ -693,11 +693,11 @@ def generate_submission(env, state, inp, submission_division):
measurement_path,
"system_info.txt"))
else:
- if env.get('CM_GET_PLATFORM_DETAILS', '') == "yes":
+ if env.get('MLC_GET_PLATFORM_DETAILS', '') == "yes":
mlc_input = {'action': 'run',
'automation': 'script',
'tags': 'get,platform,details',
- 'env': {'CM_PLATFORM_DETAILS_FILE_PATH': os.path.join(measurement_path, "system_info.txt")},
+ 'env': {'MLC_PLATFORM_DETAILS_FILE_PATH': os.path.join(measurement_path, "system_info.txt")},
'quiet': True
}
r = mlc.access(mlc_input)
@@ -725,15 +725,15 @@ def postprocess(i):
submission_divisions = []
- if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') in [
+ if env.get('MLC_MLPERF_SUBMISSION_DIVISION', '') in [
"open-closed", "closed-open"]:
submission_divisions = ["open", "closed"]
- elif env.get('CM_MLPERF_SUBMISSION_DIVISION', '') != '':
- submission_divisions.append(env['CM_MLPERF_SUBMISSION_DIVISION'])
+ elif env.get('MLC_MLPERF_SUBMISSION_DIVISION', '') != '':
+ submission_divisions.append(env['MLC_MLPERF_SUBMISSION_DIVISION'])
# if submission division is not assigned, default value would be taken in
# submission_generation function
- if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') == '':
+ if env.get('MLC_MLPERF_SUBMISSION_DIVISION', '') == '':
r = generate_submission(env, state, inp, submission_division="")
if r['return'] > 0:
return r
diff --git a/script/generate-mlperf-inference-submission/meta.yaml b/script/generate-mlperf-inference-submission/meta.yaml
index 013997df9..4c5a0ab34 100644
--- a/script/generate-mlperf-inference-submission/meta.yaml
+++ b/script/generate-mlperf-inference-submission/meta.yaml
@@ -4,9 +4,9 @@ automation_uid: 5b4e0237da074764
cache: false
category: MLPerf benchmark support
default_env:
- CM_MLPERF_RUN_STYLE: valid
- CM_MLPERF_SUBMISSION_DIR_SHARED: 'yes'
- CM_RUN_MLPERF_ACCURACY: 'on'
+ MLC_MLPERF_RUN_STYLE: valid
+ MLC_MLPERF_SUBMISSION_DIR_SHARED: 'yes'
+ MLC_RUN_MLPERF_ACCURACY: 'on'
predeps: False
deps:
- names:
@@ -22,13 +22,13 @@ deps:
- names:
- get-mlperf-results-dir
skip_if_env:
- CM_MLPERF_INFERENCE_RESULTS_DIR_:
+ MLC_MLPERF_INFERENCE_RESULTS_DIR_:
- 'on'
tags: get,mlperf,results,dir,local
- names:
- get-mlperf-submission-dir
skip_if_env:
- CM_MLPERF_INFERENCE_SUBMISSION_DIR:
+ MLC_MLPERF_INFERENCE_SUBMISSION_DIR:
- 'on'
tags: get,mlperf,submission,dir
docker:
@@ -37,22 +37,22 @@ docker:
deps:
- names: get-mlperf-inference-results-dir
skip_if_env:
- CM_MLPERF_INFERENCE_RESULTS_DIR_:
+ MLC_MLPERF_INFERENCE_RESULTS_DIR_:
- 'on'
tags: get,mlperf,inference,results,dir,local
- names: get-mlperf-inference-submission-dir
skip_if_any_env:
- CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR:
+ MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR:
- 'on'
tags: get,mlperf,inference,submission,dir,local
docker_input_mapping:
- results_dir: CM_MLPERF_INFERENCE_RESULTS_DIR_
- submission_base_dir: CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR
+ results_dir: MLC_MLPERF_INFERENCE_RESULTS_DIR_
+ submission_base_dir: MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR
extra_run_args: ' --cap-add SYS_ADMIN'
mounts:
- - ${{ CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR
+ - ${{ MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR }}:${{ MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR
}}
- - ${{ CM_MLPERF_INFERENCE_RESULTS_DIR_ }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR_ }}
+ - ${{ MLC_MLPERF_INFERENCE_RESULTS_DIR_ }}:${{ MLC_MLPERF_INFERENCE_RESULTS_DIR_ }}
os: ubuntu
os_version: '22.04'
pre_run_cmds:
@@ -61,49 +61,49 @@ docker:
use_host_group_id: true
use_host_user_id: true
input_mapping:
- analyzer_settings_file: CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH
- category: CM_MLPERF_SUBMISSION_CATEGORY
- clean: CM_MLPERF_CLEAN_SUBMISSION_DIR
- dashboard: CM_MLPERF_DASHBOARD
- dashboard_wb_project: CM_MLPERF_DASHBOARD_WANDB_PROJECT
- device: CM_MLPERF_DEVICE
- division: CM_MLPERF_SUBMISSION_DIVISION
- duplicate: CM_MLPERF_DUPLICATE_SCENARIO_RESULTS
- extra_checker_args: CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARG
- hw_name: CM_HW_NAME
- hw_notes_extra: CM_MLPERF_SUT_HW_NOTES_EXTRA
- infer_scenario_results: CM_MLPERF_DUPLICATE_SCENARIO_RESULTS
- power_settings_file: CM_MLPERF_POWER_SETTINGS_FILE_PATH
- preprocess: CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR
- preprocess_submission: CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR
- results_dir: CM_MLPERF_INFERENCE_RESULTS_DIR_
- run_checker: CM_RUN_SUBMISSION_CHECKER
- run_style: CM_MLPERF_RUN_STYLE
- skip_truncation: CM_SKIP_TRUNCATE_ACCURACY
- submission_base_dir: CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR
- submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR
- submitter: CM_MLPERF_SUBMITTER
- sw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA
- tar: CM_TAR_SUBMISSION_DIR
- get_platform_details: CM_GET_PLATFORM_DETAILS
- version: CM_MLPERF_SUBMISSION_CHECKER_VERSION
+ analyzer_settings_file: MLC_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH
+ category: MLC_MLPERF_SUBMISSION_CATEGORY
+ clean: MLC_MLPERF_CLEAN_SUBMISSION_DIR
+ dashboard: MLC_MLPERF_DASHBOARD
+ dashboard_wb_project: MLC_MLPERF_DASHBOARD_WANDB_PROJECT
+ device: MLC_MLPERF_DEVICE
+ division: MLC_MLPERF_SUBMISSION_DIVISION
+ duplicate: MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS
+ extra_checker_args: MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARG
+ hw_name: MLC_HW_NAME
+ hw_notes_extra: MLC_MLPERF_SUT_HW_NOTES_EXTRA
+ infer_scenario_results: MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS
+ power_settings_file: MLC_MLPERF_POWER_SETTINGS_FILE_PATH
+ preprocess: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR
+ preprocess_submission: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR
+ results_dir: MLC_MLPERF_INFERENCE_RESULTS_DIR_
+ run_checker: MLC_RUN_SUBMISSION_CHECKER
+ run_style: MLC_MLPERF_RUN_STYLE
+ skip_truncation: MLC_SKIP_TRUNCATE_ACCURACY
+ submission_base_dir: MLC_MLPERF_INFERENCE_SUBMISSION_BASE_DIR
+ submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR
+ submitter: MLC_MLPERF_SUBMITTER
+ sw_notes_extra: MLC_MLPERF_SUT_SW_NOTES_EXTRA
+ tar: MLC_TAR_SUBMISSION_DIR
+ get_platform_details: MLC_GET_PLATFORM_DETAILS
+ version: MLC_MLPERF_SUBMISSION_CHECKER_VERSION
post_deps:
- enable_if_env:
- CM_RUN_MLPERF_ACCURACY:
+ MLC_RUN_MLPERF_ACCURACY:
- 'on'
skip_if_env:
- CM_SKIP_TRUNCATE_ACCURACY:
+ MLC_SKIP_TRUNCATE_ACCURACY:
- 'yes'
tags: accuracy,truncate,mlc
- enable_if_env:
- CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR:
+ MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR:
- 'on'
- 'True'
- 'yes'
- true
tags: preprocess,mlperf,submission
- skip_if_env:
- CM_RUN_SUBMISSION_CHECKER:
+ MLC_RUN_SUBMISSION_CHECKER:
- 'no'
names:
- mlperf-inference-submission-checker
diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py
index 88255718c..068a4161f 100644
--- a/script/generate-mlperf-inference-user-conf/customize.py
+++ b/script/generate-mlperf-inference-user-conf/customize.py
@@ -13,77 +13,77 @@ def preprocess(i):
state = i['state']
script_path = i['run_script_input']['path']
- rerun = True if env.get("CM_RERUN", "") != '' else False
+ rerun = True if env.get("MLC_RERUN", "") != '' else False
- env['CM_MLPERF_SKIP_RUN'] = env.get('CM_MLPERF_SKIP_RUN', "no")
+ env['MLC_MLPERF_SKIP_RUN'] = env.get('MLC_MLPERF_SKIP_RUN', "no")
- mlperf_path = env['CM_MLPERF_INFERENCE_SOURCE']
+ mlperf_path = env['MLC_MLPERF_INFERENCE_SOURCE']
submission_checker_dir = os.path.join(mlperf_path, "tools", "submission")
sys.path.append(submission_checker_dir)
- version = env.get('CM_MLPERF_INFERENCE_VERSION', "4.1")
+ version = env.get('MLC_MLPERF_INFERENCE_VERSION', "4.1")
required_files = []
required_files = get_checker_files()
- if 'CM_MLPERF_LOADGEN_SCENARIO' not in env:
- env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline"
+ if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env:
+ env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline"
- if 'CM_MLPERF_LOADGEN_MODE' not in env:
+ if 'MLC_MLPERF_LOADGEN_MODE' not in env:
print("\nNo mode given. Using accuracy as default\n")
- env['CM_MLPERF_LOADGEN_MODE'] = "accuracy"
+ env['MLC_MLPERF_LOADGEN_MODE'] = "accuracy"
if env.get('OUTPUT_BASE_DIR', '') == '':
env['OUTPUT_BASE_DIR'] = env.get(
- 'CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd())
+ 'MLC_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd())
- if 'CM_NUM_THREADS' not in env:
- if 'CM_MINIMIZE_THREADS' in env:
- env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) //
- (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1'))))
+ if 'MLC_NUM_THREADS' not in env:
+ if 'MLC_MINIMIZE_THREADS' in env:
+ env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) //
+ (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1'))))
else:
- env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1')
+ env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1')
print("Using MLCommons Inference source from '" +
- env['CM_MLPERF_INFERENCE_SOURCE'] + "'")
+ env['MLC_MLPERF_INFERENCE_SOURCE'] + "'")
- if 'CM_MLPERF_CONF' not in env:
- env['CM_MLPERF_CONF'] = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
+ if 'MLC_MLPERF_CONF' not in env:
+ env['MLC_MLPERF_CONF'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
RUN_CMD = ""
state['RUN'] = {}
- scenario = env['CM_MLPERF_LOADGEN_SCENARIO']
+ scenario = env['MLC_MLPERF_LOADGEN_SCENARIO']
state['RUN'][scenario] = {}
- model_full_name = env.get('CM_ML_MODEL_FULL_NAME', env['CM_MODEL'])
+ model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', env['MLC_MODEL'])
- if model_full_name != env['CM_MODEL']:
- if 'model_mapping' not in state['CM_SUT_CONFIG']:
- state['CM_SUT_CONFIG']['model_mappings'] = {}
- state['CM_SUT_CONFIG']['model_mappings'][model_full_name] = env['CM_MODEL']
+ if model_full_name != env['MLC_MODEL']:
+ if 'model_mapping' not in state['MLC_SUT_CONFIG']:
+ state['MLC_SUT_CONFIG']['model_mappings'] = {}
+ state['MLC_SUT_CONFIG']['model_mappings'][model_full_name] = env['MLC_MODEL']
- if model_full_name not in i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']]:
- i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']][model_full_name] = {}
+ if model_full_name not in i['state']['MLC_SUT_CONFIG'][env['MLC_SUT_NAME']]:
+ i['state']['MLC_SUT_CONFIG'][env['MLC_SUT_NAME']][model_full_name] = {}
- if scenario not in i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']
+ if scenario not in i['state']['MLC_SUT_CONFIG'][env['MLC_SUT_NAME']
][model_full_name]:
- i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']
+ i['state']['MLC_SUT_CONFIG'][env['MLC_SUT_NAME']
][model_full_name][scenario] = {}
- conf = i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']
+ conf = i['state']['MLC_SUT_CONFIG'][env['MLC_SUT_NAME']
][model_full_name][scenario]
- mode = env['CM_MLPERF_LOADGEN_MODE']
+ mode = env['MLC_MLPERF_LOADGEN_MODE']
user_conf = ''
- if env['CM_MLPERF_RUN_STYLE'] == "fast":
- fast_factor = int(env['CM_FAST_FACTOR'])
+ if env['MLC_MLPERF_RUN_STYLE'] == "fast":
+ fast_factor = int(env['MLC_FAST_FACTOR'])
else:
fast_factor = 1
- ml_model_name = env['CM_MODEL']
+ ml_model_name = env['MLC_MODEL']
if 'bert' in ml_model_name:
ml_model_name = "bert"
if 'dlrm' in ml_model_name:
@@ -101,19 +101,19 @@ def preprocess(i):
if scenario in ['Offline', 'Server']:
metric = "target_qps"
tolerance = 1.01
- # value = env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS') if scenario == "Server" else env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS')
- value = env.get('CM_MLPERF_LOADGEN_TARGET_QPS')
+ # value = env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS') if scenario == "Server" else env.get('MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS')
+ value = env.get('MLC_MLPERF_LOADGEN_TARGET_QPS')
elif scenario in ['SingleStream', 'MultiStream']:
metric = "target_latency"
- value = env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY')
+ value = env.get('MLC_MLPERF_LOADGEN_TARGET_LATENCY')
if value:
if scenario == "SingleStream" and (
1000 / float(value) * 660 < 100):
- env['CM_MLPERF_USE_MAX_DURATION'] = 'no'
+ env['MLC_MLPERF_USE_MAX_DURATION'] = 'no'
elif scenario == "MultiStream" and (1000 / float(value) * 660 < 662):
- env['CM_MLPERF_USE_MAX_DURATION'] = 'no'
- if env.get('CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"] and env.get(
- 'CM_MLPERF_USE_MAX_DURATION', "yes").lower() not in ["no", "false", "0"]:
+ env['MLC_MLPERF_USE_MAX_DURATION'] = 'no'
+ if env.get('MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"] and env.get(
+ 'MLC_MLPERF_USE_MAX_DURATION', "yes").lower() not in ["no", "false", "0"]:
tolerance = 0.4 # much lower because we have max_duration
else:
tolerance = 0.9
@@ -136,26 +136,26 @@ def preprocess(i):
"Adjusted configuration value {} {}".format(
metric_value, metric))
else:
- # if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes":
+ # if env.get("MLC_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes":
if metric == "target_qps":
- if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes":
+ if env.get("MLC_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes":
print("In find performance mode: using 1 as target_qps")
else:
print("No target_qps specified. Using 1 as target_qps")
conf[metric] = 1
if metric == "target_latency":
- if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes":
+ if env.get("MLC_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes":
print("In find performance mode: using 0.5ms as target_latency")
else:
print("No target_latency specified. Using default")
- if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() in ["no", "false", "0"] or env.get(
- 'CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() in ["yes", "1", "true"]:
+ if env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes').lower() in ["no", "false", "0"] or env.get(
+ 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() in ["yes", "1", "true"]:
# Total number of queries needed is a multiple of dataset
# size. So we dont use max_duration and so we need to be
# careful with the input latency
- if '3d-unet' in env['CM_MODEL']:
+ if '3d-unet' in env['MLC_MODEL']:
conf[metric] = 400
- elif 'gptj' in env['CM_MODEL']:
+ elif 'gptj' in env['MLC_MODEL']:
conf[metric] = 1000
else:
conf[metric] = 100
@@ -164,93 +164,93 @@ def preprocess(i):
metric_value = conf[metric]
# else:
# return {'return': 1, 'error': f"Config details missing for
- # SUT:{env['CM_SUT_NAME']}, Model:{env['CM_MODEL']}, Scenario:
+ # SUT:{env['MLC_SUT_NAME']}, Model:{env['MLC_MODEL']}, Scenario:
# {scenario}. Please input {metric} value"}
# Pass the modified performance metrics to the implementation
- if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes":
+ if env.get("MLC_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes":
if metric == "target_latency" and env.get(
- 'CM_MLPERF_LOADGEN_TARGET_LATENCY', '') == '':
- env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = conf[metric]
- elif metric == "target_qps" and env.get('CM_MLPERF_LOADGEN_TARGET_QPS', '') == '':
- env['CM_MLPERF_LOADGEN_TARGET_QPS'] = conf[metric]
+ 'MLC_MLPERF_LOADGEN_TARGET_LATENCY', '') == '':
+ env['MLC_MLPERF_LOADGEN_TARGET_LATENCY'] = conf[metric]
+ elif metric == "target_qps" and env.get('MLC_MLPERF_LOADGEN_TARGET_QPS', '') == '':
+ env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = conf[metric]
- if env['CM_MLPERF_RUN_STYLE'] == "fast":
+ if env['MLC_MLPERF_RUN_STYLE'] == "fast":
if scenario == "Offline":
metric_value = float(metric_value) / fast_factor
if scenario in ["SingleStream", "MultiStream"]:
metric_value = float(metric_value) * fast_factor
- elif env['CM_MLPERF_RUN_STYLE'] == "test":
+ elif env['MLC_MLPERF_RUN_STYLE'] == "test":
if scenario == "Offline":
- metric_value = float(env.get('CM_MLPERF_INFERENCE_TEST_QPS', 1))
+ metric_value = float(env.get('MLC_MLPERF_INFERENCE_TEST_QPS', 1))
if scenario in ["SingleStream"]:
metric_value = 1000
- elif env['CM_MLPERF_RUN_STYLE'] == "valid":
+ elif env['MLC_MLPERF_RUN_STYLE'] == "valid":
if scenario == "Offline":
required_min_queries_offline = {}
required_min_queries_offline = get_required_min_queries_offline(
- env['CM_MODEL'], version)
+ env['MLC_MODEL'], version)
if mode == "compliance" and scenario == "Server": # Adjust the server_target_qps
- test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01")
+ test = env.get("MLC_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01")
if test == "TEST01":
metric_value = str(
float(metric_value) *
float(
env.get(
- "CM_MLPERF_TEST01_SERVER_ADJUST_FACTOR",
+ "MLC_MLPERF_TEST01_SERVER_ADJUST_FACTOR",
0.96)))
# if test == "TEST05":
- # metric_value = str(float(metric_value) * float(env.get("CM_MLPERF_TEST05_SERVER_ADJUST_FACTOR", 0.97)))
+ # metric_value = str(float(metric_value) * float(env.get("MLC_MLPERF_TEST05_SERVER_ADJUST_FACTOR", 0.97)))
if test == "TEST04":
metric_value = str(
float(metric_value) *
float(
env.get(
- "CM_MLPERF_TEST04_SERVER_ADJUST_FACTOR",
+ "MLC_MLPERF_TEST04_SERVER_ADJUST_FACTOR",
0.97)))
conf[metric] = metric_value
user_conf += ml_model_name + "." + scenario + \
"." + metric + " = " + str(metric_value) + "\n"
- if env.get('CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT', '') != '':
- performance_sample_count = env['CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT']
+ if env.get('MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT', '') != '':
+ performance_sample_count = env['MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT']
user_conf += ml_model_name + ".*.performance_sample_count_override = " + \
performance_sample_count + "\n"
log_mode = mode
- if 'CM_MLPERF_POWER' in env and mode == "performance":
+ if 'MLC_MLPERF_POWER' in env and mode == "performance":
log_mode = "performance_power"
- env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] = os.path.join(
- env['OUTPUT_BASE_DIR'], env['CM_OUTPUT_FOLDER_NAME'])
+ env['MLC_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] = os.path.join(
+ env['OUTPUT_BASE_DIR'], env['MLC_OUTPUT_FOLDER_NAME'])
sut_name = env.get(
- 'CM_SUT_NAME',
- env['CM_MLPERF_BACKEND'] +
+ 'MLC_SUT_NAME',
+ env['MLC_MLPERF_BACKEND'] +
"-" +
- env['CM_MLPERF_DEVICE'])
- OUTPUT_DIR = os.path.join(env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name,
+ env['MLC_MLPERF_DEVICE'])
+ OUTPUT_DIR = os.path.join(env['MLC_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name,
model_full_name, scenario.lower(), mode)
- env['CM_MLPERF_INFERENCE_RESULTS_SUT_PATH'] = os.path.join(
- env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name)
+ env['MLC_MLPERF_INFERENCE_RESULTS_SUT_PATH'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name)
- if 'CM_MLPERF_POWER' in env and mode == "performance":
- env['CM_MLPERF_POWER_LOG_DIR'] = os.path.join(OUTPUT_DIR, "tmp_power")
+ if 'MLC_MLPERF_POWER' in env and mode == "performance":
+ env['MLC_MLPERF_POWER_LOG_DIR'] = os.path.join(OUTPUT_DIR, "tmp_power")
if mode == "accuracy":
pass
elif mode == "performance":
OUTPUT_DIR = os.path.join(OUTPUT_DIR, "run_1")
elif mode == "compliance":
- test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01")
+ test = env.get("MLC_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01")
OUTPUT_DIR = os.path.join(
env['OUTPUT_BASE_DIR'],
- env['CM_OUTPUT_FOLDER_NAME'],
+ env['MLC_OUTPUT_FOLDER_NAME'],
sut_name,
model_full_name,
scenario.lower(),
@@ -261,12 +261,12 @@ def preprocess(i):
audit_path = test
audit_full_path = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'],
+ env['MLC_MLPERF_INFERENCE_SOURCE'],
"compliance",
"nvidia",
audit_path,
"audit.config")
- env['CM_MLPERF_INFERENCE_AUDIT_PATH'] = audit_full_path
+ env['MLC_MLPERF_INFERENCE_AUDIT_PATH'] = audit_full_path
# copy the audit conf to the run directory incase the implementation is
# not supporting the audit-conf path
if not os.path.exists(OUTPUT_DIR):
@@ -277,20 +277,20 @@ def preprocess(i):
OUTPUT_DIR,
"audit.config"))
- env['CM_MLPERF_OUTPUT_DIR'] = OUTPUT_DIR
- env['CM_LOGS_DIR'] = OUTPUT_DIR
- env['CM_MLPERF_LOADGEN_LOGS_DIR'] = OUTPUT_DIR
+ env['MLC_MLPERF_OUTPUT_DIR'] = OUTPUT_DIR
+ env['MLC_LOGS_DIR'] = OUTPUT_DIR
+ env['MLC_MLPERF_LOADGEN_LOGS_DIR'] = OUTPUT_DIR
if mode == "accuracy":
- output_dir = env['CM_MLPERF_OUTPUT_DIR']
- env['CM_MLPERF_ACCURACY_RESULTS_DIR'] = output_dir
+ output_dir = env['MLC_MLPERF_OUTPUT_DIR']
+ env['MLC_MLPERF_ACCURACY_RESULTS_DIR'] = output_dir
else:
- env['CM_MLPERF_ACCURACY_RESULTS_DIR'] = ''
+ env['MLC_MLPERF_ACCURACY_RESULTS_DIR'] = ''
run_exists = run_files_exist(log_mode, OUTPUT_DIR, required_files, env)
- if 'CM_MLPERF_POWER' in env and env.get(
- 'CM_MLPERF_SHORT_RANGING_RUN', '') != 'no' and env['CM_MLPERF_RUN_STYLE'] == "valid" and mode == "performance":
+ if 'MLC_MLPERF_POWER' in env and env.get(
+ 'MLC_MLPERF_SHORT_RANGING_RUN', '') != 'no' and env['MLC_MLPERF_RUN_STYLE'] == "valid" and mode == "performance":
short_ranging = True
else:
short_ranging = False
@@ -301,18 +301,18 @@ def preprocess(i):
ranging_user_conf += ml_model_name + "." + \
scenario + ".min_duration = 300000" + "\n"
- if env['CM_MLPERF_RUN_STYLE'] == "test":
- max_duration_test_s = int(env.get('CM_MLPERF_MAX_DURATION_TEST', 30))
+ if env['MLC_MLPERF_RUN_STYLE'] == "test":
+ max_duration_test_s = int(env.get('MLC_MLPERF_MAX_DURATION_TEST', 30))
max_duration_test = str(max_duration_test_s * 1000) # in milliseconds
- query_count = int(env.get('CM_TEST_QUERY_COUNT', 5))
+ query_count = int(env.get('MLC_TEST_QUERY_COUNT', 5))
min_query_count = int(
env.get(
- 'CM_MLPERF_INFERENCE_MIN_QUERY_COUNT',
+ 'MLC_MLPERF_INFERENCE_MIN_QUERY_COUNT',
query_count))
max_query_count = max(
min_query_count, int(
env.get(
- 'CM_MLPERF_INFERENCE_MAX_QUERY_COUNT', query_count)))
+ 'MLC_MLPERF_INFERENCE_MAX_QUERY_COUNT', query_count)))
user_conf += ml_model_name + "." + scenario + \
".max_query_count = " + str(max_query_count) + "\n"
user_conf += ml_model_name + "." + scenario + \
@@ -320,19 +320,19 @@ def preprocess(i):
user_conf += ml_model_name + "." + scenario + ".min_duration = 0" + "\n"
user_conf += ml_model_name + "." + scenario + \
".sample_concatenate_permutation = 0" + "\n"
- env['CM_MLPERF_MAX_QUERY_COUNT'] = max_query_count
+ env['MLC_MLPERF_MAX_QUERY_COUNT'] = max_query_count
# max_duration is effective for all scenarios except the Offline
- if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in [
+ if env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes').lower() not in [
"no", "false", "0"]:
if scenario != "Offline":
user_conf += ml_model_name + "." + scenario + \
f".max_duration = {max_duration_test}" + "\n"
- elif env['CM_MLPERF_RUN_STYLE'] == "fast":
+ elif env['MLC_MLPERF_RUN_STYLE'] == "fast":
user_conf += ml_model_name + "." + scenario + \
".sample_concatenate_permutation = 0" + "\n"
- max_duration_fast_s = int(env.get('CM_MLPERF_MAX_DURATION_FAST', 120))
+ max_duration_fast_s = int(env.get('MLC_MLPERF_MAX_DURATION_FAST', 120))
max_duration_fast = str(max_duration_fast_s * 1000) # in milliseconds
if scenario == "Server":
user_conf += ml_model_name + "." + scenario + \
@@ -341,31 +341,31 @@ def preprocess(i):
query_count = str(int((660 / fast_factor) * (float(target_qps))))
user_conf += ml_model_name + "." + scenario + \
".max_query_count = " + query_count + "\n"
- env['CM_MLPERF_MAX_QUERY_COUNT'] = query_count
+ env['MLC_MLPERF_MAX_QUERY_COUNT'] = query_count
else:
max_duration_valid_s = int(
- env.get('CM_MLPERF_MAX_DURATION_VALID', 660))
+ env.get('MLC_MLPERF_MAX_DURATION_VALID', 660))
max_duration_valid = str(
max_duration_valid_s *
1000) # in milliseconds
max_duration_ranging_s = int(
- env.get('CM_MLPERF_MAX_DURATION_RANGING', 300))
+ env.get('MLC_MLPERF_MAX_DURATION_RANGING', 300))
max_duration_ranging = str(
max_duration_ranging_s *
1000) # in milliseconds
if scenario == "MultiStream" or scenario == "SingleStream":
- if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in ["no", "false", "0"] and env.get(
- 'CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"]:
+ if env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes').lower() not in ["no", "false", "0"] and env.get(
+ 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"]:
user_conf += ml_model_name + "." + scenario + \
f".max_duration = {max_duration_valid}" + "\n"
- elif env.get('CM_MLPERF_INFERENCE_MIN_DURATION', '') != '':
+ elif env.get('MLC_MLPERF_INFERENCE_MIN_DURATION', '') != '':
user_conf += ml_model_name + "." + scenario + ".min_duration = " + \
- env['CM_MLPERF_INFERENCE_MIN_DURATION'] + " \n"
+ env['MLC_MLPERF_INFERENCE_MIN_DURATION'] + " \n"
if scenario == "MultiStream":
user_conf += ml_model_name + "." + scenario + ".min_query_count = " + \
env.get(
- 'CM_MLPERF_INFERENCE_MULTISTREAM_MIN_QUERY_COUNT',
+ 'MLC_MLPERF_INFERENCE_MULTISTREAM_MIN_QUERY_COUNT',
"662") + "\n"
if short_ranging:
ranging_user_conf += ml_model_name + "." + scenario + \
@@ -383,7 +383,7 @@ def preprocess(i):
if query_count:
# needed for squad accuracy checker
- env['CM_MAX_EXAMPLES'] = str(query_count)
+ env['MLC_MAX_EXAMPLES'] = str(query_count)
import uuid
from pathlib import Path
@@ -399,44 +399,44 @@ def preprocess(i):
ranging_user_conf_file = Path(ranging_user_conf_path)
ranging_user_conf_file.write_text(ranging_user_conf)
- if (env.get('CM_MLPERF_LOADGEN_QUERY_COUNT', '') == '') and query_count and (
- (mode != "accuracy") or (env['CM_MLPERF_RUN_STYLE'] != "valid")):
- env['CM_MLPERF_LOADGEN_QUERY_COUNT'] = str(query_count)
+ if (env.get('MLC_MLPERF_LOADGEN_QUERY_COUNT', '') == '') and query_count and (
+ (mode != "accuracy") or (env['MLC_MLPERF_RUN_STYLE'] != "valid")):
+ env['MLC_MLPERF_LOADGEN_QUERY_COUNT'] = str(query_count)
if not run_exists or rerun:
print("Output Dir: '" + OUTPUT_DIR + "'")
print(user_conf)
- if env.get('CM_MLPERF_POWER', '') == "yes" and os.path.exists(
- env.get('CM_MLPERF_POWER_LOG_DIR', '')):
- shutil.rmtree(env['CM_MLPERF_POWER_LOG_DIR'])
+ if env.get('MLC_MLPERF_POWER', '') == "yes" and os.path.exists(
+ env.get('MLC_MLPERF_POWER_LOG_DIR', '')):
+ shutil.rmtree(env['MLC_MLPERF_POWER_LOG_DIR'])
else:
- if not env.get('CM_MLPERF_COMPLIANCE_RUN_POSTPONED', False):
+ if not env.get('MLC_MLPERF_COMPLIANCE_RUN_POSTPONED', False):
print("Run files exist, skipping run...\n")
- env['CM_MLPERF_SKIP_RUN'] = "yes"
+ env['MLC_MLPERF_SKIP_RUN'] = "yes"
if not run_exists or rerun or not measure_files_exist(OUTPUT_DIR,
- required_files[4]) or env.get("CM_MLPERF_LOADGEN_COMPLIANCE", "") == "yes" or env.get("CM_REGENERATE_MEASURE_FILES", False):
+ required_files[4]) or env.get("MLC_MLPERF_LOADGEN_COMPLIANCE", "") == "yes" or env.get("MLC_REGENERATE_MEASURE_FILES", False):
- env['CM_MLPERF_TESTING_USER_CONF'] = os.path.join(
+ env['MLC_MLPERF_TESTING_USER_CONF'] = os.path.join(
os.path.dirname(user_conf_path), key + ".conf") # user_conf_path
- env['CM_MLPERF_RANGING_USER_CONF'] = os.path.join(
+ env['MLC_MLPERF_RANGING_USER_CONF'] = os.path.join(
os.path.dirname(user_conf_path),
"ranging_" + key + ".conf") # ranging_user_conf_path for a shorter run
if short_ranging:
- env['CM_MLPERF_USER_CONF'] = r"\${CM_MLPERF_USER_CONF}"
+ env['MLC_MLPERF_USER_CONF'] = r"\${MLC_MLPERF_USER_CONF}"
else:
- env['CM_MLPERF_USER_CONF'] = os.path.join(
+ env['MLC_MLPERF_USER_CONF'] = os.path.join(
os.path.dirname(user_conf_path), key + ".conf") # user_conf_path
else:
print(
f"Measure files exist at {OUTPUT_DIR}. Skipping regeneration...\n")
- env['CM_MLPERF_USER_CONF'] = ''
+ env['MLC_MLPERF_USER_CONF'] = ''
os.makedirs(OUTPUT_DIR, exist_ok=True)
- if str(env.get('CM_MLPERF_RESULTS_DIR_SHARED', '')
+ if str(env.get('MLC_MLPERF_RESULTS_DIR_SHARED', '')
).lower() in ["yes", "true", "1"]:
os.chmod(OUTPUT_DIR, 0o2775)
@@ -500,37 +500,37 @@ def run_files_exist(mode, OUTPUT_DIR, run_files, env):
"result_validity" not in mlperf_log.get_keys()
or mlperf_log["result_validity"] != "VALID"
):
- env['CM_MLPERF_COMPLIANCE_RUN_POSTPONED'] = True
+ env['MLC_MLPERF_COMPLIANCE_RUN_POSTPONED'] = True
return True
- test = env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST']
+ test = env['MLC_MLPERF_LOADGEN_COMPLIANCE_TEST']
SCRIPT_PATH = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'],
+ env['MLC_MLPERF_INFERENCE_SOURCE'],
"compliance",
"nvidia",
test,
"run_verification.py")
if test == "TEST06":
- cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32"
+ cmd = f"{env['MLC_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32"
else:
- cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -r {RESULT_DIR} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR}"
+ cmd = f"{env['MLC_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -r {RESULT_DIR} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR}"
print(cmd)
os.system(cmd)
is_valid = checker.check_compliance_perf_dir(COMPLIANCE_DIR)
- if not is_valid and 'Stream' in env['CM_MLPERF_LOADGEN_SCENARIO']:
+ if not is_valid and 'Stream' in env['MLC_MLPERF_LOADGEN_SCENARIO']:
# We have the determined latency, compliance test failed, so lets
# not use max duration
- env['CM_MLPERF_USE_MAX_DURATION'] = 'no'
- env['CM_MLPERF_INFERENCE_MIN_DURATION'] = '990000' # Try a longer run
+ env['MLC_MLPERF_USE_MAX_DURATION'] = 'no'
+ env['MLC_MLPERF_INFERENCE_MIN_DURATION'] = '990000' # Try a longer run
return is_valid
if "power" in mode and env.get(
- 'CM_MLPERF_SKIP_POWER_CHECKS', 'no').lower() not in ["yes", "true", "on"]:
+ 'MLC_MLPERF_SKIP_POWER_CHECKS', 'no').lower() not in ["yes", "true", "on"]:
from power.power_checker import check as check_power_more
try:
is_valid = check_power_more(os.path.dirname(OUTPUT_DIR)) == 0
diff --git a/script/generate-mlperf-inference-user-conf/meta.yaml b/script/generate-mlperf-inference-user-conf/meta.yaml
index c19bdcba3..fbba97b33 100644
--- a/script/generate-mlperf-inference-user-conf/meta.yaml
+++ b/script/generate-mlperf-inference-user-conf/meta.yaml
@@ -19,48 +19,48 @@ tags:
# Default environment
default_env:
- CM_MLPERF_LOADGEN_MODE: accuracy
- CM_MLPERF_LOADGEN_SCENARIO: Offline
- CM_OUTPUT_FOLDER_NAME: test_results
- CM_MLPERF_RUN_STYLE: test
- CM_TEST_QUERY_COUNT: '10'
- CM_FAST_FACTOR: '5'
- CM_MLPERF_QUANTIZATION: off
- CM_MLPERF_RESULTS_DIR_SHARED: yes
+ MLC_MLPERF_LOADGEN_MODE: accuracy
+ MLC_MLPERF_LOADGEN_SCENARIO: Offline
+ MLC_OUTPUT_FOLDER_NAME: test_results
+ MLC_MLPERF_RUN_STYLE: test
+ MLC_TEST_QUERY_COUNT: '10'
+ MLC_FAST_FACTOR: '5'
+ MLC_MLPERF_QUANTIZATION: off
+ MLC_MLPERF_RESULTS_DIR_SHARED: yes
docker:
real_run: False
# Map script inputs to environment variables
input_mapping:
- count: CM_MLPERF_LOADGEN_QUERY_COUNT
- hw_name: CM_HW_NAME
- mode: CM_MLPERF_LOADGEN_MODE
- num_threads: CM_NUM_THREADS
+ count: MLC_MLPERF_LOADGEN_QUERY_COUNT
+ hw_name: MLC_HW_NAME
+ mode: MLC_MLPERF_LOADGEN_MODE
+ num_threads: MLC_NUM_THREADS
output_dir: OUTPUT_BASE_DIR
- power: CM_MLPERF_POWER
- regenerate_files: CM_REGENERATE_MEASURE_FILES
- rerun: CM_RERUN
- scenario: CM_MLPERF_LOADGEN_SCENARIO
- test_query_count: CM_TEST_QUERY_COUNT
- target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
- target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
- offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
- server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
- singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
- multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
- performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ power: MLC_MLPERF_POWER
+ regenerate_files: MLC_REGENERATE_MEASURE_FILES
+ rerun: MLC_RERUN
+ scenario: MLC_MLPERF_LOADGEN_SCENARIO
+ test_query_count: MLC_TEST_QUERY_COUNT
+ target_qps: MLC_MLPERF_LOADGEN_TARGET_QPS
+ target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
+ offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
# Env keys which are exposed to higher level scripts
new_env_keys:
- - CM_MLPERF_*
- - CM_LOGS_DIR
- - CM_HW_*
- - CM_SUT_*
- - CM_MAX_EXAMPLES
+ - MLC_MLPERF_*
+ - MLC_LOGS_DIR
+ - MLC_HW_*
+ - MLC_SUT_*
+ - MLC_MAX_EXAMPLES
new_state_keys:
- - CM_SUT_*
+ - MLC_SUT_*
# Dependencies on other CM scripts
deps:
diff --git a/script/generate-mlperf-tiny-report/customize.py b/script/generate-mlperf-tiny-report/customize.py
index 6efd29616..32bc3701d 100644
--- a/script/generate-mlperf-tiny-report/customize.py
+++ b/script/generate-mlperf-tiny-report/customize.py
@@ -13,7 +13,7 @@ def preprocess(i):
cur_dir = os.getcwd()
# Query cache for results dirs
- env_repo_tags = env.get('CM_IMPORT_TINYMLPERF_REPO_TAGS', '').strip()
+ env_repo_tags = env.get('MLC_IMPORT_TINYMLPERF_REPO_TAGS', '').strip()
xtags = '' if env_repo_tags == '' else ',version-' + env_repo_tags
r = mlc.access({'action': 'find',
@@ -45,9 +45,9 @@ def preprocess(i):
run_script_input = i['run_script_input']
automation = i['automation']
- env['CM_TINYMLPERF_REPO_PATH'] = path
- env['CM_TINYMLPERF_CURRENT_DIR'] = cur_dir
- env['CM_TINYMLPERF_REPO_VERSION'] = version
+ env['MLC_TINYMLPERF_REPO_PATH'] = path
+ env['MLC_TINYMLPERF_CURRENT_DIR'] = cur_dir
+ env['MLC_TINYMLPERF_REPO_VERSION'] = version
print('')
print('Repo path: {}'.format(path))
@@ -65,9 +65,9 @@ def postprocess(i):
env = i['env']
- path = env['CM_TINYMLPERF_REPO_PATH']
- cur_dir = env['CM_TINYMLPERF_CURRENT_DIR']
- version = env['CM_TINYMLPERF_REPO_VERSION']
+ path = env['MLC_TINYMLPERF_REPO_PATH']
+ cur_dir = env['MLC_TINYMLPERF_CURRENT_DIR']
+ version = env['MLC_TINYMLPERF_REPO_VERSION']
for ext in ['.csv', '.xlsx']:
diff --git a/script/generate-mlperf-tiny-report/meta.yaml b/script/generate-mlperf-tiny-report/meta.yaml
index 3af0906f7..467226c1b 100644
--- a/script/generate-mlperf-tiny-report/meta.yaml
+++ b/script/generate-mlperf-tiny-report/meta.yaml
@@ -10,7 +10,7 @@ category: "MLPerf benchmark support"
developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)"
default_env:
- CM_IMPORT_TINYMLPERF_REPO_TAGS: "1.1-private"
+ MLC_IMPORT_TINYMLPERF_REPO_TAGS: "1.1-private"
# User-friendly tags to find this CM script
tags:
@@ -21,7 +21,7 @@ tags:
- report
input_mapping:
- repo_tags: CM_IMPORT_TINYMLPERF_REPO_TAGS
+ repo_tags: MLC_IMPORT_TINYMLPERF_REPO_TAGS
# Dependencies on other CM scripts
deps:
diff --git a/script/generate-mlperf-tiny-report/run_submission_checker.bat b/script/generate-mlperf-tiny-report/run_submission_checker.bat
index 5d9a6fbaf..5cd8a781a 100644
--- a/script/generate-mlperf-tiny-report/run_submission_checker.bat
+++ b/script/generate-mlperf-tiny-report/run_submission_checker.bat
@@ -1,10 +1,10 @@
-cd %CM_TINYMLPERF_REPO_PATH%
+cd %MLC_TINYMLPERF_REPO_PATH%
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
echo.
-%CM_PYTHON_BIN_WITH_PATH% submission_checker.py --input .
+%MLC_PYTHON_BIN_WITH_PATH% submission_checker.py --input .
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
echo.
-%CM_PYTHON_BIN_WITH_PATH% generate_final_report.py --input summary.csv
+%MLC_PYTHON_BIN_WITH_PATH% generate_final_report.py --input summary.csv
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/generate-mlperf-tiny-report/run_submission_checker.sh b/script/generate-mlperf-tiny-report/run_submission_checker.sh
index d858c9b22..115b52365 100644
--- a/script/generate-mlperf-tiny-report/run_submission_checker.sh
+++ b/script/generate-mlperf-tiny-report/run_submission_checker.sh
@@ -1,12 +1,12 @@
#!/bin/bash
-cd ${CM_TINYMLPERF_REPO_PATH}
+cd ${MLC_TINYMLPERF_REPO_PATH}
test $? -eq 0 || exit $?
echo ""
-${CM_PYTHON_BIN_WITH_PATH} submission_checker.py --input .
+${MLC_PYTHON_BIN_WITH_PATH} submission_checker.py --input .
test $? -eq 0 || exit $?
echo ""
-${CM_PYTHON_BIN_WITH_PATH} generate_final_report.py --input summary.csv
+${MLC_PYTHON_BIN_WITH_PATH} generate_final_report.py --input summary.csv
test $? -eq 0 || exit $?
diff --git a/script/generate-mlperf-tiny-submission/customize.py b/script/generate-mlperf-tiny-submission/customize.py
index 45e7f2d84..32a97ef28 100644
--- a/script/generate-mlperf-tiny-submission/customize.py
+++ b/script/generate-mlperf-tiny-submission/customize.py
@@ -17,11 +17,11 @@ def generate_submission(i):
env = i['env']
state = i['state']
inp = i['input']
- results_dir = env['CM_MLPERF_RESULTS_DIR']
+ results_dir = env['MLC_MLPERF_RESULTS_DIR']
- if 'CM_MLPERF_SUBMISSION_DIR' not in env:
- env['CM_MLPERF_SUBMISSION_DIR'] = os.path.join(cur_dir, "results")
- submission_dir = env['CM_MLPERF_SUBMISSION_DIR']
+ if 'MLC_MLPERF_SUBMISSION_DIR' not in env:
+ env['MLC_MLPERF_SUBMISSION_DIR'] = os.path.join(cur_dir, "results")
+ submission_dir = env['MLC_MLPERF_SUBMISSION_DIR']
if not os.path.isdir(submission_dir):
os.makedirs(submission_dir)
@@ -37,7 +37,7 @@ def generate_submission(i):
if division not in ['open', 'closed']:
return {'return': 1, 'error': '"division" must be "open" or "closed"'}
- system_meta = state['CM_SUT_META']
+ system_meta = state['MLC_SUT_META']
division = system_meta['division']
print('* MLPerf tiny division: {}'.format(division))
@@ -49,7 +49,7 @@ def generate_submission(i):
# Check submitter
submitter = system_meta['submitter']
- env['CM_MLPERF_SUBMITTER'] = submitter
+ env['MLC_MLPERF_SUBMITTER'] = submitter
print('* MLPerf tiny submitter: {}'.format(submitter))
diff --git a/script/generate-mlperf-tiny-submission/meta.yaml b/script/generate-mlperf-tiny-submission/meta.yaml
index e6f112c42..5b6bce128 100644
--- a/script/generate-mlperf-tiny-submission/meta.yaml
+++ b/script/generate-mlperf-tiny-submission/meta.yaml
@@ -11,7 +11,7 @@ deps:
- tags: get,sut,system-description
post_deps:
- enable_if_env:
- CM_MLPERF_RUN_STYLE:
+ MLC_MLPERF_RUN_STYLE:
- valid
tags:
- generate
diff --git a/script/generate-nvidia-engine/customize.py b/script/generate-nvidia-engine/customize.py
index 832e32e6c..11a97df9c 100644
--- a/script/generate-nvidia-engine/customize.py
+++ b/script/generate-nvidia-engine/customize.py
@@ -10,21 +10,21 @@ def preprocess(i):
if os_info['platform'] == 'windows':
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
- if 'CM_MODEL' not in env:
+ if 'MLC_MODEL' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the model to run'}
- if 'CM_MLPERF_DEVICE' not in env:
+ if 'MLC_MLPERF_DEVICE' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the device to run on'}
# will later extend to other scenarios
- scenarios = env['CM_LOADGEN_SCENARIO']
+ scenarios = env['MLC_LOADGEN_SCENARIO']
cmd = " --action generate_engines " +\
- " --benchmarks " + env['CM_MODEL'] + \
+ " --benchmarks " + env['MLC_MODEL'] + \
" --scenarios " + scenarios + \
- " --gpu_batch_size=" + env['CM_MODEL_BATCH_SIZE'] +\
- " --gpu_copy_streams=" + env['CM_GPU_COPY_STREAMS'] +\
- " --workspace_size=" + env['CM_TENSORRT_WORKSPACE_SIZE']
+ " --gpu_batch_size=" + env['MLC_MODEL_BATCH_SIZE'] +\
+ " --gpu_copy_streams=" + env['MLC_GPU_COPY_STREAMS'] +\
+ " --workspace_size=" + env['MLC_TENSORRT_WORKSPACE_SIZE']
~
diff --git a/script/generate-nvidia-engine/meta.yaml b/script/generate-nvidia-engine/meta.yaml
index 7a6852447..b63ba77e1 100644
--- a/script/generate-nvidia-engine/meta.yaml
+++ b/script/generate-nvidia-engine/meta.yaml
@@ -18,19 +18,19 @@ tags:
# Default environment
default_env:
- CM_BATCH_COUNT: '1'
- CM_BATCH_SIZE: '1'
- CM_LOADGEN_SCENARIO: 'Offline'
- CM_GPU_COPY_STREAMS: '1'
- CM_TENSORRT_WORKSPACE_SIZE: '4194304'
+ MLC_BATCH_COUNT: '1'
+ MLC_BATCH_SIZE: '1'
+ MLC_LOADGEN_SCENARIO: 'Offline'
+ MLC_GPU_COPY_STREAMS: '1'
+ MLC_TENSORRT_WORKSPACE_SIZE: '4194304'
# Map script inputs to environment variables
input_mapping:
- output_dir: CM_MLPERF_OUTPUT_DIR
+ output_dir: MLC_MLPERF_OUTPUT_DIR
new_env_keys:
- - CM_MLPERF_*
- - CM_DATASET_*
+ - MLC_MLPERF_*
+ - MLC_DATASET_*
# Dependencies on other CM scripts
@@ -81,14 +81,14 @@ deps:
# Install ResNet50 model (ONNX) and ImageNet
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
names:
- imagenet-preprocessed
tags: get,dataset,preprocessed,imagenet,_NCHW
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- resnet50
names:
- ml-model
@@ -100,14 +100,14 @@ deps:
# Install RetinaNet model (ONNX) and OpenImages
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
names:
- openimages-preprocessed
tags: get,dataset,preprocessed,openimages,_validation,_NCHW
- enable_if_env:
- CM_MODEL:
+ MLC_MODEL:
- retinanet
names:
- ml-model
@@ -124,11 +124,11 @@ variations:
group: device
default: true
env:
- CM_MLPERF_DEVICE: cpu
+ MLC_MLPERF_DEVICE: cpu
cuda:
env:
- CM_MLPERF_DEVICE: gpu
- CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
+ MLC_MLPERF_DEVICE: gpu
+ MLC_MLPERF_DEVICE_LIB_NAMESPEC: cudart
# Reference MLPerf models
@@ -136,17 +136,17 @@ variations:
group: model
default: true
env:
- CM_MODEL: resnet50
+ MLC_MODEL: resnet50
retinanet:
group: model
env:
- CM_MODEL: retinanet
+ MLC_MODEL: retinanet
batch_size.#:
env:
- CM_MODEL_BATCH_SIZE: #
+ MLC_MODEL_BATCH_SIZE: #
copy_streams.#:
env:
- CM_GPU_COPY_STREAMS: #
+ MLC_GPU_COPY_STREAMS: #
diff --git a/script/generate-nvidia-engine/run.sh b/script/generate-nvidia-engine/run.sh
index c5dd2d9a4..4372d5023 100644
--- a/script/generate-nvidia-engine/run.sh
+++ b/script/generate-nvidia-engine/run.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-nvidia_code_path=${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH}
+nvidia_code_path=${MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH}
cd ${nvidia_code_path}
-scenarios=${CM_TMP_LOADGEN_SCENARIOS}
+scenarios=${MLC_TMP_LOADGEN_SCENARIOS}
#batchsize=$
python3 code/main.py --action generate_engines --benchmarks resnet50 --scenarios $scenarios --gpu_batch_size=256 --gpu_copy_streams=1 --workspace_size=4194304
diff --git a/script/get-android-sdk/customize.py b/script/get-android-sdk/customize.py
index c1f7aea5d..38598bc5b 100644
--- a/script/get-android-sdk/customize.py
+++ b/script/get-android-sdk/customize.py
@@ -24,7 +24,7 @@ def preprocess(i):
if android_home == '':
android_home = cur_dir
- env['CM_ANDROID_HOME'] = android_home
+ env['MLC_ANDROID_HOME'] = android_home
env['ANDROID_HOME'] = android_home
paths = []
@@ -61,19 +61,19 @@ def preprocess(i):
os.chdir(new_path)
- cmdline_tools_version = env.get('CM_ANDROID_CMDLINE_TOOLS_VERSION', '')
+ cmdline_tools_version = env.get('MLC_ANDROID_CMDLINE_TOOLS_VERSION', '')
- env['CM_ANDROID_CMDLINE_TOOLS_VERSION'] = cmdline_tools_version
+ env['MLC_ANDROID_CMDLINE_TOOLS_VERSION'] = cmdline_tools_version
- package_url = env['CM_ANDROID_CMDLINE_TOOLS_URL']
+ package_url = env['MLC_ANDROID_CMDLINE_TOOLS_URL']
package_url = package_url.replace(
- '${CM_ANDROID_CMDLINE_TOOLS_OS}',
+ '${MLC_ANDROID_CMDLINE_TOOLS_OS}',
host_os_for_android)
package_url = package_url.replace(
- '${CM_ANDROID_CMDLINE_TOOLS_VERSION}',
+ '${MLC_ANDROID_CMDLINE_TOOLS_VERSION}',
cmdline_tools_version)
- env['CM_ANDROID_CMDLINE_TOOLS_URL'] = package_url
+ env['MLC_ANDROID_CMDLINE_TOOLS_URL'] = package_url
print('')
print('Downloading from {} ...'.format(package_url))
@@ -114,10 +114,10 @@ def preprocess(i):
sdk_manager_dir = os.path.dirname(sdk_manager_path)
- env['CM_ANDROID_SDK_MANAGER_BIN'] = sdk_manager_file
- env['CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH'] = sdk_manager_path
+ env['MLC_ANDROID_SDK_MANAGER_BIN'] = sdk_manager_file
+ env['MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH'] = sdk_manager_path
- env['CM_GET_DEPENDENT_CACHED_PATH'] = cur_dir
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = cur_dir
paths.append(sdk_manager_dir)
@@ -129,41 +129,41 @@ def preprocess(i):
if r['return'] > 0:
return r
- build_tools_version = env['CM_ANDROID_BUILD_TOOLS_VERSION']
+ build_tools_version = env['MLC_ANDROID_BUILD_TOOLS_VERSION']
path_build_tools = os.path.join(
android_home, 'build-tools', build_tools_version)
- env['CM_ANDROID_BUILD_TOOLS_PATH'] = path_build_tools
+ env['MLC_ANDROID_BUILD_TOOLS_PATH'] = path_build_tools
paths.append(path_build_tools)
- cmake_version = env['CM_ANDROID_CMAKE_VERSION']
+ cmake_version = env['MLC_ANDROID_CMAKE_VERSION']
path_cmake = os.path.join(android_home, 'cmake', cmake_version, 'bin')
- env['CM_ANDROID_CMAKE_PATH'] = path_cmake
+ env['MLC_ANDROID_CMAKE_PATH'] = path_cmake
paths.append(path_cmake)
path_emulator = os.path.join(android_home, 'emulator')
- env['CM_ANDROID_EMULATOR_PATH'] = path_emulator
+ env['MLC_ANDROID_EMULATOR_PATH'] = path_emulator
paths.append(path_emulator)
path_platform_tools = os.path.join(android_home, 'platform-tools')
- env['CM_ANDROID_PLATFORM_TOOLS_PATH'] = path_platform_tools
+ env['MLC_ANDROID_PLATFORM_TOOLS_PATH'] = path_platform_tools
paths.append(path_platform_tools)
- android_version = env['CM_ANDROID_VERSION']
+ android_version = env['MLC_ANDROID_VERSION']
path_platforms = os.path.join(android_home, 'platforms', android_version)
- env['CM_ANDROID_PLATFORMS_PATH'] = path_platforms
+ env['MLC_ANDROID_PLATFORMS_PATH'] = path_platforms
path_tools = os.path.join(android_home, 'tools')
- env['CM_ANDROID_TOOLS_PATH'] = path_tools
+ env['MLC_ANDROID_TOOLS_PATH'] = path_tools
paths.append(path_tools)
- android_ndk_version = env['CM_ANDROID_NDK_VERSION']
+ android_ndk_version = env['MLC_ANDROID_NDK_VERSION']
# Check Android NDK
path_ndk = os.path.join(android_home, 'ndk', android_ndk_version)
- env['CM_ANDROID_NDK_PATH'] = path_ndk
+ env['MLC_ANDROID_NDK_PATH'] = path_ndk
env['ANDROID_NDK_HOME'] = path_ndk
path_ndk_compiler = os.path.join(
@@ -173,8 +173,8 @@ def preprocess(i):
'prebuilt',
host_os_for_ndk,
'bin')
- env['CM_ANDROID_LLVM_PATH'] = path_ndk_compiler
- env['CM_ANDROID_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join(
+ env['MLC_ANDROID_LLVM_PATH'] = path_ndk_compiler
+ env['MLC_ANDROID_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join(
path_ndk_compiler, 'clang.exe')
paths.append(path_ndk_compiler)
diff --git a/script/get-android-sdk/meta.yaml b/script/get-android-sdk/meta.yaml
index a4da9f94f..f8d1edb6d 100644
--- a/script/get-android-sdk/meta.yaml
+++ b/script/get-android-sdk/meta.yaml
@@ -4,23 +4,23 @@ automation_uid: 5b4e0237da074764
cache: true
category: Detection or installation of tools and artifacts
default_env:
- CM_ANDROID_BUILD_TOOLS_VERSION: 29.0.3
- CM_ANDROID_CMAKE_VERSION: 3.6.4111459
- CM_ANDROID_CMDLINE_TOOLS_URL: https://dl.google.com/android/repository/commandlinetools-${CM_ANDROID_CMDLINE_TOOLS_OS}-${CM_ANDROID_CMDLINE_TOOLS_VERSION}_latest.zip
- CM_ANDROID_CMDLINE_TOOLS_VERSION: '9123335'
- CM_ANDROID_NDK_VERSION: 21.3.6528147
- CM_ANDROID_VERSION: '30'
+ MLC_ANDROID_BUILD_TOOLS_VERSION: 29.0.3
+ MLC_ANDROID_CMAKE_VERSION: 3.6.4111459
+ MLC_ANDROID_CMDLINE_TOOLS_URL: https://dl.google.com/android/repository/commandlinetools-${MLC_ANDROID_CMDLINE_TOOLS_OS}-${MLC_ANDROID_CMDLINE_TOOLS_VERSION}_latest.zip
+ MLC_ANDROID_CMDLINE_TOOLS_VERSION: '9123335'
+ MLC_ANDROID_NDK_VERSION: 21.3.6528147
+ MLC_ANDROID_VERSION: '30'
deps:
- tags: detect,os
- tags: get,java
input_mapping:
- android_cmake_version: CM_ANDROID_CMAKE_VERSION
- android_ndk_version: CM_ANDROID_NDK_VERSION
- android_version: CM_ANDROID_VERSION
- build_tools_version: CM_ANDROID_BUILD_TOOLS_VERSION
- cmdline_tools_version: CM_ANDROID_CMDLINE_TOOLS_VERSION
+ android_cmake_version: MLC_ANDROID_CMAKE_VERSION
+ android_ndk_version: MLC_ANDROID_NDK_VERSION
+ android_version: MLC_ANDROID_VERSION
+ build_tools_version: MLC_ANDROID_BUILD_TOOLS_VERSION
+ cmdline_tools_version: MLC_ANDROID_CMDLINE_TOOLS_VERSION
new_env_keys:
-- CM_ANDROID_HOME
+- MLC_ANDROID_HOME
- ANDROID_HOME
- ANDROID_NDK_HOME
- +PATH
diff --git a/script/get-android-sdk/prepare-sdk-manager.bat b/script/get-android-sdk/prepare-sdk-manager.bat
index 5b1add122..33814d57f 100644
--- a/script/get-android-sdk/prepare-sdk-manager.bat
+++ b/script/get-android-sdk/prepare-sdk-manager.bat
@@ -1,27 +1,27 @@
-echo %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH%
+echo %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH%
-call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --version > tmp-ver.out
+call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --version > tmp-ver.out
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
more tmp-ver.out
-call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --licenses
+call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --licenses
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
-call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% ^
+call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% ^
"tools" ^
"platform-tools" ^
"extras;android;m2repository" ^
"extras;google;m2repository" ^
"extras;google;google_play_services" ^
- "build-tools;%CM_ANDROID_BUILD_TOOLS_VERSION%"
+ "build-tools;%MLC_ANDROID_BUILD_TOOLS_VERSION%"
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
-call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "platforms;android-%CM_ANDROID_VERSION%"
+call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "platforms;android-%MLC_ANDROID_VERSION%"
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
-call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "cmake;%CM_ANDROID_CMAKE_VERSION%"
+call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "cmake;%MLC_ANDROID_CMAKE_VERSION%"
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
-call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "ndk;%CM_ANDROID_NDK_VERSION%"
+call %MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "ndk;%MLC_ANDROID_NDK_VERSION%"
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-android-sdk/prepare-sdk-manager.sh b/script/get-android-sdk/prepare-sdk-manager.sh
index 8613a43b1..9161e1355 100644
--- a/script/get-android-sdk/prepare-sdk-manager.sh
+++ b/script/get-android-sdk/prepare-sdk-manager.sh
@@ -1,26 +1,26 @@
echo ${JAVA_HOME}
-echo ${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH}
+echo ${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH}
-${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --version > tmp-ver.out
+${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --version > tmp-ver.out
cat tmp-ver.out
-${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --licenses
+${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --licenses
test $? -eq 0 || exit 1
-${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} \
+${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} \
"tools" \
"platform-tools" \
"extras;android;m2repository" \
"extras;google;m2repository" \
"extras;google;google_play_services" \
- "build-tools;${CM_ANDROID_BUILD_TOOLS_VERSION}"
+ "build-tools;${MLC_ANDROID_BUILD_TOOLS_VERSION}"
test $? -eq 0 || exit 1
-${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "platforms;android-${CM_ANDROID_VERSION}"
+${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "platforms;android-${MLC_ANDROID_VERSION}"
test $? -eq 0 || exit 1
-${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "cmake;${CM_ANDROID_CMAKE_VERSION}"
+${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "cmake;${MLC_ANDROID_CMAKE_VERSION}"
test $? -eq 0 || exit 1
-${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "ndk;${CM_ANDROID_NDK_VERSION}"
+${MLC_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "ndk;${MLC_ANDROID_NDK_VERSION}"
test $? -eq 0 || exit 1
diff --git a/script/get-aocl/customize.py b/script/get-aocl/customize.py
index 67b95ed28..a9ad06398 100644
--- a/script/get-aocl/customize.py
+++ b/script/get-aocl/customize.py
@@ -12,7 +12,7 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
return {'return': 0}
@@ -21,15 +21,15 @@ def postprocess(i):
env = i['env']
- env['CM_AOCL_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH']
- env['CM_AOCL_BUILD_PATH'] = os.path.join(
- env['CM_GIT_REPO_CHECKOUT_PATH'], "build")
+ env['MLC_AOCL_SRC_PATH'] = env['MLC_GIT_REPO_CHECKOUT_PATH']
+ env['MLC_AOCL_BUILD_PATH'] = os.path.join(
+ env['MLC_GIT_REPO_CHECKOUT_PATH'], "build")
aocl_lib_path = os.path.join(
- env['CM_GIT_REPO_CHECKOUT_PATH'],
+ env['MLC_GIT_REPO_CHECKOUT_PATH'],
"build",
"aocl-release",
"src")
- env['CM_AOCL_LIB_PATH'] = aocl_lib_path
+ env['MLC_AOCL_LIB_PATH'] = aocl_lib_path
env['+LIBRARY_PATH'] = [aocl_lib_path] if '+LIBRARY_PATH' not in env else env['+LIBRARY_PATH'] + [aocl_lib_path]
env['+LD_LIBRARY_PATH'] = [aocl_lib_path] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [aocl_lib_path]
diff --git a/script/get-aocl/meta.yaml b/script/get-aocl/meta.yaml
index 061d6829e..66bd6b660 100644
--- a/script/get-aocl/meta.yaml
+++ b/script/get-aocl/meta.yaml
@@ -8,12 +8,12 @@ deps:
- tags: get,generic,sys-util,_libmpfr-dev
- tags: get,generic-python-lib,_scons
- force_env_keys:
- - CM_GIT_CHECKOUT
+ - MLC_GIT_CHECKOUT
tags: get,git,_repo.https://github.com/amd/aocl-libm-ose
new_env_keys:
-- CM_AOCL_BUILD_PATH
-- CM_AOCL_SRC_PATH
-- CM_AOCL_LIB_PATH
+- MLC_AOCL_BUILD_PATH
+- MLC_AOCL_SRC_PATH
+- MLC_AOCL_LIB_PATH
- +LD_LIBRARY_PATH
- +LIBRARY_PATH
tags:
@@ -27,7 +27,7 @@ variations: {}
versions:
'4.0':
env:
- CM_GIT_CHECKOUT: aocl-4.0
+ MLC_GIT_CHECKOUT: aocl-4.0
master:
env:
- CM_GIT_CHECKOUT: master
+ MLC_GIT_CHECKOUT: master
diff --git a/script/get-aocl/run.sh b/script/get-aocl/run.sh
index 1b00dd9fd..d36d37f4a 100644
--- a/script/get-aocl/run.sh
+++ b/script/get-aocl/run.sh
@@ -1,9 +1,9 @@
#!/bin/bash
-if [[ -z ${CM_GIT_REPO_CHECKOUT_PATH} ]]; then
+if [[ -z ${MLC_GIT_REPO_CHECKOUT_PATH} ]]; then
echo "Git repository not found!"
exit 1
fi
-cd ${CM_GIT_REPO_CHECKOUT_PATH}
+cd ${MLC_GIT_REPO_CHECKOUT_PATH}
scons
test $? -eq 0 || exit $?
diff --git a/script/get-aria2/customize.py b/script/get-aria2/customize.py
index c45449430..f52b1d3bf 100644
--- a/script/get-aria2/customize.py
+++ b/script/get-aria2/customize.py
@@ -16,7 +16,7 @@ def preprocess(i):
file_name = file_name_core + \
'.exe' if os_info['platform'] == 'windows' else file_name_core
- force_install = env.get('CM_FORCE_INSTALL', False) == True
+ force_install = env.get('MLC_FORCE_INSTALL', False) == True
if not force_install:
r = i['automation'].find_artifact({'file_name': file_name,
@@ -24,7 +24,7 @@ def preprocess(i):
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_ARIA2_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_ARIA2_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
if r['return'] > 0:
@@ -37,9 +37,9 @@ def preprocess(i):
# Force install
if force_install:
# Attempt to run installer
- version = env.get('CM_VERSION', '')
+ version = env.get('MLC_VERSION', '')
if version == '':
- version = env['CM_ARIA2_DEFAULT_INSTALL_VERSION']
+ version = env['MLC_ARIA2_DEFAULT_INSTALL_VERSION']
if os_info['platform'] == 'windows':
archive = 'aria2-{}-win-64bit-build1'
@@ -53,15 +53,15 @@ def preprocess(i):
archive = archive.format(version)
archive_with_ext = archive + ext
- env['CM_ARIA2_DOWNLOAD_DIR'] = archive
+ env['MLC_ARIA2_DOWNLOAD_DIR'] = archive
- env['CM_ARIA2_DOWNLOAD_FILE'] = archive_with_ext
+ env['MLC_ARIA2_DOWNLOAD_FILE'] = archive_with_ext
if ext2 != '':
- env['CM_ARIA2_DOWNLOAD_FILE2'] = archive + ext2
+ env['MLC_ARIA2_DOWNLOAD_FILE2'] = archive + ext2
url = 'https://github.com/aria2/aria2/releases/download/release-{}/{}'.format(
version, archive_with_ext)
- env['CM_ARIA2_DOWNLOAD_URL'] = url
+ env['MLC_ARIA2_DOWNLOAD_URL'] = url
print('URL to download ARIA2: {}'.format(url))
@@ -71,7 +71,7 @@ def preprocess(i):
return r
if os_info['platform'] == 'windows' or env.get(
- 'CM_ARIA2_BUILD_FROM_SRC', '').lower() == 'true':
+ 'MLC_ARIA2_BUILD_FROM_SRC', '').lower() == 'true':
install_path = os.path.join(os.getcwd(), archive)
path_to_file = os.path.join(install_path, file_name)
@@ -79,18 +79,18 @@ def preprocess(i):
return {'return': 1,
'error': 'file not found: {}'.format(path_to_file)}
- env['CM_ARIA2_BIN_WITH_PATH'] = path_to_file
- env['CM_ARIA2_INSTALLED_TO_CACHE'] = 'yes'
+ env['MLC_ARIA2_BIN_WITH_PATH'] = path_to_file
+ env['MLC_ARIA2_INSTALLED_TO_CACHE'] = 'yes'
else:
- path_to_bin = r['env_tmp'].get('CM_ARIA2_BIN_WITH_PATH', '')
- env['CM_ARIA2_BIN_WITH_PATH'] = path_to_bin
+ path_to_bin = r['env_tmp'].get('MLC_ARIA2_BIN_WITH_PATH', '')
+ env['MLC_ARIA2_BIN_WITH_PATH'] = path_to_bin
r = i['automation'].find_artifact({'file_name': file_name,
'env': env,
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_ARIA2_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_ARIA2_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
if r['return'] > 0:
@@ -104,7 +104,7 @@ def detect_version(i):
r = i['automation'].parse_version({'match_text': r'aria2 version\s*([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_ARIA2_VERSION',
+ 'env_key': 'MLC_ARIA2_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
@@ -123,13 +123,13 @@ def postprocess(i):
return r
version = r['version']
- found_file_path = env['CM_ARIA2_BIN_WITH_PATH']
+ found_file_path = env['MLC_ARIA2_BIN_WITH_PATH']
found_path = os.path.dirname(found_file_path)
- env['CM_ARIA2_INSTALLED_PATH'] = found_path
+ env['MLC_ARIA2_INSTALLED_PATH'] = found_path
- if env.get('CM_ARIA2_INSTALLED_TO_CACHE', '') == 'yes':
- env['+PATH'] = [env['CM_ARIA2_INSTALLED_PATH']]
+ if env.get('MLC_ARIA2_INSTALLED_TO_CACHE', '') == 'yes':
+ env['+PATH'] = [env['MLC_ARIA2_INSTALLED_PATH']]
return {'return': 0, 'version': version}
diff --git a/script/get-aria2/install.bat b/script/get-aria2/install.bat
index 6255f0caf..baeca0e3f 100644
--- a/script/get-aria2/install.bat
+++ b/script/get-aria2/install.bat
@@ -1,9 +1,9 @@
echo.
-del /Q /S %CM_ARIA2_DOWNLOAD_FILE%
+del /Q /S %MLC_ARIA2_DOWNLOAD_FILE%
-wget --no-check-certificate %CM_ARIA2_DOWNLOAD_URL%
+wget --no-check-certificate %MLC_ARIA2_DOWNLOAD_URL%
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
-unzip -o -q %CM_ARIA2_DOWNLOAD_FILE%
+unzip -o -q %MLC_ARIA2_DOWNLOAD_FILE%
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-aria2/install.sh b/script/get-aria2/install.sh
index d9424732d..aa865ed74 100644
--- a/script/get-aria2/install.sh
+++ b/script/get-aria2/install.sh
@@ -2,24 +2,24 @@
echo ""
-if [[ "${CM_ARIA2_BUILD_FROM_SRC}" == "True" ]]; then
+if [[ "${MLC_ARIA2_BUILD_FROM_SRC}" == "True" ]]; then
echo "Building from sources ..."
echo ""
- rm -rf ${CM_ARIA2_DOWNLOAD_FILE}
- rm -rf ${CM_ARIA2_DOWNLOAD_FILE2}
+ rm -rf ${MLC_ARIA2_DOWNLOAD_FILE}
+ rm -rf ${MLC_ARIA2_DOWNLOAD_FILE2}
- wget --no-check-certificate ${CM_ARIA2_DOWNLOAD_URL}
+ wget --no-check-certificate ${MLC_ARIA2_DOWNLOAD_URL}
test $? -eq 0 || exit $?
- bzip2 -d ${CM_ARIA2_DOWNLOAD_FILE}
+ bzip2 -d ${MLC_ARIA2_DOWNLOAD_FILE}
test $? -eq 0 || exit $?
- tar xvf ${CM_ARIA2_DOWNLOAD_FILE2}
+ tar xvf ${MLC_ARIA2_DOWNLOAD_FILE2}
test $? -eq 0 || exit $?
- cd ${CM_ARIA2_DOWNLOAD_DIR}
+ cd ${MLC_ARIA2_DOWNLOAD_DIR}
test $? -eq 0 || exit $?
./configure --prefix=$PWD/bin
@@ -35,13 +35,13 @@ else
echo "Installing binary via sudo ..."
echo ""
- cmd="sudo ${CM_HOST_OS_PACKAGE_MANAGER} install aria2"
+ cmd="sudo ${MLC_HOST_OS_PACKAGE_MANAGER} install aria2"
echo "$cmd"
$cmd
test $? -eq 0 || exit $?
path_to_bin=`which aria2c`
- echo "CM_ARIA2_BIN_WITH_PATH=$path_to_bin" > tmp-run-env.out
+ echo "MLC_ARIA2_BIN_WITH_PATH=$path_to_bin" > tmp-run-env.out
fi
diff --git a/script/get-aria2/meta.yaml b/script/get-aria2/meta.yaml
index 6fdd8bb17..79981d1d8 100644
--- a/script/get-aria2/meta.yaml
+++ b/script/get-aria2/meta.yaml
@@ -9,8 +9,8 @@ cache: true
category: Detection or installation of tools and artifacts
input_mapping:
- install: CM_FORCE_INSTALL
- src: CM_ARIA2_BUILD_FROM_SRC
+ install: MLC_FORCE_INSTALL
+ src: MLC_ARIA2_BUILD_FROM_SRC
deps:
- tags: detect,cpu
@@ -21,15 +21,15 @@ deps:
# - tags: print,native,hello-world
env:
- CM_REQUIRE_INSTALL: no
- CM_ARIA2_DEFAULT_INSTALL_VERSION: "1.37.0"
+ MLC_REQUIRE_INSTALL: no
+ MLC_ARIA2_DEFAULT_INSTALL_VERSION: "1.37.0"
new_env_keys:
- - CM_ARIA2_*
+ - MLC_ARIA2_*
- +PATH
print_env_at_the_end:
- CM_ARIA2_INSTALLED_PATH: Path to the tool
+ MLC_ARIA2_INSTALLED_PATH: Path to the tool
tags:
- get
diff --git a/script/get-aria2/run.bat b/script/get-aria2/run.bat
index 625b7edc0..eb4f33ef4 100644
--- a/script/get-aria2/run.bat
+++ b/script/get-aria2/run.bat
@@ -1,4 +1,4 @@
rem Detect version
-%CM_ARIA2_BIN_WITH_PATH% --version > tmp-ver.out
+%MLC_ARIA2_BIN_WITH_PATH% --version > tmp-ver.out
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-aria2/run.sh b/script/get-aria2/run.sh
index 85ba9421a..e44fb6e3d 100644
--- a/script/get-aria2/run.sh
+++ b/script/get-aria2/run.sh
@@ -2,5 +2,5 @@
# Detect version
-${CM_ARIA2_BIN_WITH_PATH} --version > tmp-ver.out
+${MLC_ARIA2_BIN_WITH_PATH} --version > tmp-ver.out
test $? -eq 0 || exit 1
diff --git a/script/get-aws-cli/README-extra.md b/script/get-aws-cli/README-extra.md
index 7c8475871..94c96ea86 100644
--- a/script/get-aws-cli/README-extra.md
+++ b/script/get-aws-cli/README-extra.md
@@ -2,7 +2,7 @@
This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed aws-cli on the system and if not found calls the [install script for aws-cli](../script/install-aws-cli).
## Exported Variables
-* `CM_AWS_BIN_WITH_PATH`
+* `MLC_AWS_BIN_WITH_PATH`
## Supported and Tested OS
1. Ubuntu 18.04, 20.04, 22.04
diff --git a/script/get-aws-cli/customize.py b/script/get-aws-cli/customize.py
index 281127a9b..1da3b4d97 100644
--- a/script/get-aws-cli/customize.py
+++ b/script/get-aws-cli/customize.py
@@ -12,18 +12,18 @@ def preprocess(i):
file_name = 'aws.exe' if os_info['platform'] == 'windows' else 'aws'
env['FILE_NAME'] = file_name
- if 'CM_AWS_BIN_WITH_PATH' not in env:
+ if 'MLC_AWS_BIN_WITH_PATH' not in env:
r = i['automation'].find_artifact({'file_name': file_name,
'env': env,
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_AWS_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_AWS_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
if r['return'] > 0:
if r['return'] == 16:
- env['CM_REQUIRE_INSTALL'] = "yes"
+ env['MLC_REQUIRE_INSTALL'] = "yes"
return {'return': 0}
else:
return r
@@ -34,7 +34,7 @@ def preprocess(i):
def detect_version(i):
r = i['automation'].parse_version({'match_text': r'aws-cli/([\d.]+)\s',
'group_number': 1,
- 'env_key': 'CM_AWS_VERSION',
+ 'env_key': 'MLC_AWS_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
@@ -54,11 +54,11 @@ def postprocess(i):
return r
version = r['version']
- found_file_path = env['CM_AWS_BIN_WITH_PATH']
+ found_file_path = env['MLC_AWS_BIN_WITH_PATH']
found_path = os.path.dirname(found_file_path)
- env['CM_AWS_INSTALLED_PATH'] = found_path
+ env['MLC_AWS_INSTALLED_PATH'] = found_path
- env['CM_AWS_CACHE_TAGS'] = 'version-' + version
+ env['MLC_AWS_CACHE_TAGS'] = 'version-' + version
return {'return': 0, 'version': version}
diff --git a/script/get-aws-cli/meta.yaml b/script/get-aws-cli/meta.yaml
index a8017278c..63f621344 100644
--- a/script/get-aws-cli/meta.yaml
+++ b/script/get-aws-cli/meta.yaml
@@ -5,10 +5,10 @@ cache: true
category: Cloud automation
clean_files: []
new_env_keys:
-- CM_AWS_*
+- MLC_AWS_*
prehook_deps:
- enable_if_env:
- CM_REQUIRE_INSTALL:
+ MLC_REQUIRE_INSTALL:
- 'yes'
reuse_version: true
tags: install,aws-cli
diff --git a/script/get-bazel/README-extra.md b/script/get-bazel/README-extra.md
index 8e11a61bc..a0cc8d963 100644
--- a/script/get-bazel/README-extra.md
+++ b/script/get-bazel/README-extra.md
@@ -2,7 +2,7 @@
This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed bazel on the system and if not found calls the [install script for bazel](../script/install-bazel).
## Exported Variables
-* `CM_BAZEL_BIN_WITH_PATH`
+* `MLC_BAZEL_BIN_WITH_PATH`
## Supported and Tested OS
1. Ubuntu 18.04, 20.04, 22.04
diff --git a/script/get-bazel/customize.py b/script/get-bazel/customize.py
index 416a20477..32a629ea3 100644
--- a/script/get-bazel/customize.py
+++ b/script/get-bazel/customize.py
@@ -12,18 +12,18 @@ def preprocess(i):
file_name = 'bazel.exe' if os_info['platform'] == 'windows' else 'bazel'
env['FILE_NAME'] = file_name
- if 'CM_BAZEL_BIN_WITH_PATH' not in env:
+ if 'MLC_BAZEL_BIN_WITH_PATH' not in env:
r = i['automation'].find_artifact({'file_name': file_name,
'env': env,
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_BAZEL_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_BAZEL_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
if r['return'] > 0:
if r['return'] == 16:
- env['CM_REQUIRE_INSTALL'] = "yes"
+ env['MLC_REQUIRE_INSTALL'] = "yes"
return {'return': 0}
else:
return r
@@ -34,7 +34,7 @@ def preprocess(i):
def detect_version(i):
r = i['automation'].parse_version({'match_text': r'bazel\s*([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_BAZEL_VERSION',
+ 'env_key': 'MLC_BAZEL_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
@@ -54,12 +54,12 @@ def postprocess(i):
return r
version = r['version']
- found_file_path = env['CM_BAZEL_BIN_WITH_PATH']
+ found_file_path = env['MLC_BAZEL_BIN_WITH_PATH']
found_path = os.path.dirname(found_file_path)
- env['CM_BAZEL_INSTALLED_PATH'] = found_path
+ env['MLC_BAZEL_INSTALLED_PATH'] = found_path
env['+PATH'] = [found_path]
- env['CM_BAZEL_CACHE_TAGS'] = 'version-' + version
+ env['MLC_BAZEL_CACHE_TAGS'] = 'version-' + version
return {'return': 0, 'version': version}
diff --git a/script/get-bazel/meta.yaml b/script/get-bazel/meta.yaml
index ee5b19581..574651236 100644
--- a/script/get-bazel/meta.yaml
+++ b/script/get-bazel/meta.yaml
@@ -4,11 +4,11 @@ automation_uid: 5b4e0237da074764
cache: true
category: Detection or installation of tools and artifacts
new_env_keys:
-- CM_BAZEL_*
+- MLC_BAZEL_*
- +PATH
prehook_deps:
- enable_if_env:
- CM_REQUIRE_INSTALL:
+ MLC_REQUIRE_INSTALL:
- 'yes'
reuse_version: true
tags: install,bazel
diff --git a/script/get-bazel/run.bat b/script/get-bazel/run.bat
index 1e8da4b27..9eba886b3 100644
--- a/script/get-bazel/run.bat
+++ b/script/get-bazel/run.bat
@@ -1,2 +1,2 @@
-%CM_BAZEL_BIN_WITH_PATH% --version > tmp-ver.out
+%MLC_BAZEL_BIN_WITH_PATH% --version > tmp-ver.out
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-bazel/run.sh b/script/get-bazel/run.sh
index e145f4638..b5084b2eb 100644
--- a/script/get-bazel/run.sh
+++ b/script/get-bazel/run.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-bazel_bin=${CM_BAZEL_BIN_WITH_PATH}
-if [[ ${CM_VERSION} == "0.26.1" ]]; then
+bazel_bin=${MLC_BAZEL_BIN_WITH_PATH}
+if [[ ${MLC_VERSION} == "0.26.1" ]]; then
${bazel_bin} version |grep "Build label" |sed 's/Build label:/bazel/' > tmp-ver.out
else
${bazel_bin} --version > tmp-ver.out
diff --git a/script/get-blis/customize.py b/script/get-blis/customize.py
index 3bfe968fc..19f524ef8 100644
--- a/script/get-blis/customize.py
+++ b/script/get-blis/customize.py
@@ -12,9 +12,9 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- env['CM_BLIS_SRC_PATH'] = env['CM_GIT_CHECKOUT_PATH']
+ env['MLC_BLIS_SRC_PATH'] = env['MLC_GIT_CHECKOUT_PATH']
return {'return': 0}
@@ -22,11 +22,11 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- install_dir = os.path.join(env['CM_BLIS_SRC_PATH'], "install")
+ install_dir = os.path.join(env['MLC_BLIS_SRC_PATH'], "install")
- env['CM_BLIS_INSTALL_PATH'] = install_dir
- env['CM_BLIS_INC'] = os.path.join(install_dir, 'include', 'blis')
- env['CM_BLIS_LIB'] = os.path.join(install_dir, 'lib', 'libblis.a')
+ env['MLC_BLIS_INSTALL_PATH'] = install_dir
+ env['MLC_BLIS_INC'] = os.path.join(install_dir, 'include', 'blis')
+ env['MLC_BLIS_LIB'] = os.path.join(install_dir, 'lib', 'libblis.a')
blis_lib_path = os.path.join(install_dir, 'lib')
diff --git a/script/get-blis/meta.yaml b/script/get-blis/meta.yaml
index 8f90c9e9d..dab16ffb9 100644
--- a/script/get-blis/meta.yaml
+++ b/script/get-blis/meta.yaml
@@ -6,7 +6,7 @@ category: Detection or installation of tools and artifacts
default_version: master
deps:
- force_env_keys:
- - CM_GIT_CHECKOUT
+ - MLC_GIT_CHECKOUT
names:
- blis-source-repo
tags: get,git
@@ -14,11 +14,11 @@ deps:
input_description: {}
input_mapping: {}
new_env_keys:
-- CM_BLIS_SRC_PATH
+- MLC_BLIS_SRC_PATH
- +LD_LIBRARY_PATH
-- CM_BLIS_INSTALL_PATH
-- CM_BLIS_INC
-- CM_BLIS_LIB
+- MLC_BLIS_INSTALL_PATH
+- MLC_BLIS_INC
+- MLC_BLIS_LIB
new_state_keys: []
post_deps: []
posthook_deps: []
@@ -43,7 +43,7 @@ variations:
versions:
0.9.0:
env:
- CM_GIT_CHECKOUT: 0.9.0
+ MLC_GIT_CHECKOUT: 0.9.0
master:
env:
- CM_GIT_CHECKOUT: master
+ MLC_GIT_CHECKOUT: master
diff --git a/script/get-blis/run.sh b/script/get-blis/run.sh
index 4c6d91d78..756795b82 100644
--- a/script/get-blis/run.sh
+++ b/script/get-blis/run.sh
@@ -3,10 +3,10 @@ CUR=$PWD
mkdir -p install
test $? -eq 0 || exit $?
INSTALL_DIR=$PWD/install
-cd ${CM_BLIS_SRC_PATH}
+cd ${MLC_BLIS_SRC_PATH}
./configure --prefix=$INSTALL_DIR auto
test $? -eq 0 || exit $?
-make -j${CM_HOST_CPU_TOTAL_PHYSICAL_CORES}
+make -j${MLC_HOST_CPU_TOTAL_PHYSICAL_CORES}
test $? -eq 0 || exit $?
make install
test $? -eq 0 || exit $?
diff --git a/script/get-cache-dir/customize.py b/script/get-cache-dir/customize.py
index bd5bd1468..41ac52d30 100644
--- a/script/get-cache-dir/customize.py
+++ b/script/get-cache-dir/customize.py
@@ -12,7 +12,7 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
return {'return': 0}
@@ -22,10 +22,10 @@ def postprocess(i):
env = i['env']
cache_dir = os.getcwd()
- if env.get('CM_CACHE_DIR_ENV_NAME', '') != '':
- env[env['CM_CACHE_DIR_ENV_NAME']] = cache_dir
+ if env.get('MLC_CACHE_DIR_ENV_NAME', '') != '':
+ env[env['MLC_CACHE_DIR_ENV_NAME']] = cache_dir
- env['CM_CACHE_DIR'] = cache_dir
- env['CM_GET_DEPENDENT_CACHED_PATH'] = cache_dir
+ env['MLC_CACHE_DIR'] = cache_dir
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = cache_dir
return {'return': 0}
diff --git a/script/get-cache-dir/meta.yaml b/script/get-cache-dir/meta.yaml
index ad9695f53..e02b9a7cb 100644
--- a/script/get-cache-dir/meta.yaml
+++ b/script/get-cache-dir/meta.yaml
@@ -8,8 +8,8 @@ docker:
run: false
input_description: {}
new_env_keys:
-- CM_CACHE_DIR
-- <<>>
+- MLC_CACHE_DIR
+- <<>>
new_state_keys: []
post_deps: []
posthook_deps: []
@@ -23,5 +23,5 @@ uid: 48f4622e059b45ce
variations:
name.#:
env:
- CM_CACHE_DIR_NAME: '#'
+ MLC_CACHE_DIR_NAME: '#'
versions: {}
diff --git a/script/get-ck/COPYRIGHT.md b/script/get-ck/COPYRIGHT.md
deleted file mode 100644
index 9e44ad290..000000000
--- a/script/get-ck/COPYRIGHT.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright Notice
-
-© 2022-2025 MLCommons. All Rights Reserved.
-
-This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at:
-
-[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
-
-Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License.
diff --git a/script/get-ck/README.md b/script/get-ck/README.md
deleted file mode 100644
index 0f8f829cf..000000000
--- a/script/get-ck/README.md
+++ /dev/null
@@ -1 +0,0 @@
-Please see [https://docs.mlcommons.org/cm4mlops/scripts/Legacy-CK-support/get-ck](https://docs.mlcommons.org/cm4mlops/scripts/Legacy-CK-support/get-ck) for the documentation of this CM script.
diff --git a/script/get-ck/meta.yaml b/script/get-ck/meta.yaml
deleted file mode 100644
index 2dbb1fb66..000000000
--- a/script/get-ck/meta.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-alias: get-ck
-automation_alias: script
-automation_uid: 5b4e0237da074764
-cache: true
-category: Legacy CK support
-tags:
-- get
-- ck
-- ck-framework
-uid: 5575126797174cac
diff --git a/script/get-ck/run.bat b/script/get-ck/run.bat
deleted file mode 100644
index 75d92799e..000000000
--- a/script/get-ck/run.bat
+++ /dev/null
@@ -1 +0,0 @@
-pip install ck
diff --git a/script/get-ck/run.sh b/script/get-ck/run.sh
deleted file mode 100644
index eae526fd3..000000000
--- a/script/get-ck/run.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-python3 -m pip install ck ${CM_CK_FRAMEWORK_INSTALL_CLI}
diff --git a/script/get-cl/customize.py b/script/get-cl/customize.py
index 2bae685a2..cd6123150 100644
--- a/script/get-cl/customize.py
+++ b/script/get-cl/customize.py
@@ -17,13 +17,13 @@ def preprocess(i):
file_name = 'cl.exe'
- # Will check env['CM_TMP_PATH'] if comes from installation script
+ # Will check env['MLC_TMP_PATH'] if comes from installation script
ii = {'file_name': file_name,
'env': env,
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_CL_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_CL_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces}
@@ -33,8 +33,8 @@ def preprocess(i):
if rr['return'] != 16:
return rr
- if env.get('CM_INPUT', '').strip() == '' and env.get(
- 'CM_TMP_PATH', '').strip() == '':
+ if env.get('MLC_INPUT', '').strip() == '' and env.get(
+ 'MLC_TMP_PATH', '').strip() == '':
print(
i['recursion_spaces'] +
@@ -59,8 +59,8 @@ def preprocess(i):
tmp_paths = ';'.join(found_paths)
- env['CM_TMP_PATH'] = tmp_paths
- env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
+ env['MLC_TMP_PATH'] = tmp_paths
+ env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
ii['env'] = env
@@ -96,19 +96,19 @@ def preprocess(i):
state['script_prefix'] = script_prefix
- env['CM_CL_BIN'] = file_name
- env['CM_CL_BIN_WITH_PATH'] = os.path.join(found_path, file_name)
+ env['MLC_CL_BIN'] = file_name
+ env['MLC_CL_BIN_WITH_PATH'] = os.path.join(found_path, file_name)
# General compiler for general program compilation
- env['CM_C_COMPILER_BIN'] = file_name
- env['CM_C_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name)
- env['CM_C_COMPILER_FLAG_OUTPUT'] = '/Fe:'
- env['CM_C_COMPILER_FLAG_VERSION'] = ''
+ env['MLC_C_COMPILER_BIN'] = file_name
+ env['MLC_C_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name)
+ env['MLC_C_COMPILER_FLAG_OUTPUT'] = '/Fe:'
+ env['MLC_C_COMPILER_FLAG_VERSION'] = ''
- env['CM_CXX_COMPILER_BIN'] = env['CM_C_COMPILER_BIN']
- env['CM_CXX_COMPILER_WITH_PATH'] = env['CM_C_COMPILER_WITH_PATH']
- env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '/Fe:'
- env['CM_CXX_COMPILER_FLAG_VERSION'] = ''
+ env['MLC_CXX_COMPILER_BIN'] = env['MLC_C_COMPILER_BIN']
+ env['MLC_CXX_COMPILER_WITH_PATH'] = env['MLC_C_COMPILER_WITH_PATH']
+ env['MLC_CXX_COMPILER_FLAG_OUTPUT'] = '/Fe:'
+ env['MLC_CXX_COMPILER_FLAG_VERSION'] = ''
return {'return': 0}
@@ -116,7 +116,7 @@ def preprocess(i):
def detect_version(i):
r = i['automation'].parse_version({'match_text': r'Version\s*([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_CL_VERSION',
+ 'env_key': 'MLC_CL_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
@@ -139,9 +139,9 @@ def postprocess(i):
version = r['version']
- env['CM_CL_CACHE_TAGS'] = 'version-' + version
- env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-msvc'
- env['CM_COMPILER_FAMILY'] = 'MSVC'
- env['CM_COMPILER_VERSION'] = env['CM_CL_VERSION']
+ env['MLC_CL_CACHE_TAGS'] = 'version-' + version
+ env['MLC_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-msvc'
+ env['MLC_COMPILER_FAMILY'] = 'MSVC'
+ env['MLC_COMPILER_VERSION'] = env['MLC_CL_VERSION']
return {'return': 0, 'version': version}
diff --git a/script/get-cl/meta.yaml b/script/get-cl/meta.yaml
index 2bc7741d7..8938f3463 100644
--- a/script/get-cl/meta.yaml
+++ b/script/get-cl/meta.yaml
@@ -6,11 +6,11 @@ category: Compiler automation
clean_files: []
name: Detect or install Microsoft C compiler
new_env_keys:
-- CM_CL_*
-- CM_C_COMPILER_*
-- CM_CXX_COMPILER_*
-- CM_COMPILER_*
-- CM_LINKER_*
+- MLC_CL_*
+- MLC_C_COMPILER_*
+- MLC_CXX_COMPILER_*
+- MLC_COMPILER_*
+- MLC_LINKER_*
- +PATH
new_state_keys:
- script_prefix
diff --git a/script/get-cl/run.bat b/script/get-cl/run.bat
index 2a5fc7c9b..e56cee4a2 100644
--- a/script/get-cl/run.bat
+++ b/script/get-cl/run.bat
@@ -1,3 +1,3 @@
-"%CM_CL_BIN_WITH_PATH%" > tmp-ver.out 2>&1
+"%MLC_CL_BIN_WITH_PATH%" > tmp-ver.out 2>&1
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-cmake/customize.py b/script/get-cmake/customize.py
index b0201bed6..f276ab1bf 100644
--- a/script/get-cmake/customize.py
+++ b/script/get-cmake/customize.py
@@ -12,18 +12,18 @@ def preprocess(i):
file_name = 'cmake.exe' if os_info['platform'] == 'windows' else 'cmake'
- if 'CM_CMAKE_BIN_WITH_PATH' not in env:
+ if 'MLC_CMAKE_BIN_WITH_PATH' not in env:
r = i['automation'].find_artifact({'file_name': file_name,
'env': env,
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_CMAKE_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_CMAKE_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
if r['return'] > 0:
if r['return'] == 16:
- env['CM_REQUIRE_INSTALL'] = "yes"
+ env['MLC_REQUIRE_INSTALL'] = "yes"
return {'return': 0}
else:
return r
@@ -34,7 +34,7 @@ def preprocess(i):
def detect_version(i):
r = i['automation'].parse_version({'match_text': r'cmake version\s*([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_CMAKE_VERSION',
+ 'env_key': 'MLC_CMAKE_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
@@ -53,13 +53,13 @@ def postprocess(i):
return r
version = r['version']
- found_file_path = env['CM_CMAKE_BIN_WITH_PATH']
+ found_file_path = env['MLC_CMAKE_BIN_WITH_PATH']
found_path = os.path.dirname(found_file_path)
- env['CM_CMAKE_CACHE_TAGS'] = 'version-' + version
+ env['MLC_CMAKE_CACHE_TAGS'] = 'version-' + version
- if 'CM_HOST_CPU_TOTAL_CORES' in env:
- env['CM_MAKE_CORES'] = env['CM_HOST_CPU_TOTAL_CORES']
+ if 'MLC_HOST_CPU_TOTAL_CORES' in env:
+ env['MLC_MAKE_CORES'] = env['MLC_HOST_CPU_TOTAL_CORES']
return {'return': 0, 'version': version}
diff --git a/script/get-cmake/meta.yaml b/script/get-cmake/meta.yaml
index ae051d22a..5545297f1 100644
--- a/script/get-cmake/meta.yaml
+++ b/script/get-cmake/meta.yaml
@@ -6,19 +6,19 @@ category: Detection or installation of tools and artifacts
deps:
- tags: detect,cpu
env:
- CM_REQUIRE_INSTALL: 'no'
+ MLC_REQUIRE_INSTALL: 'no'
new_env_keys:
-- CM_CMAKE_*
-- CM_MAKE_CORES
+- MLC_CMAKE_*
+- MLC_MAKE_CORES
- +PATH
prehook_deps:
- enable_if_env:
- CM_REQUIRE_INSTALL:
+ MLC_REQUIRE_INSTALL:
- 'yes'
reuse_version: true
tags: install,cmake,prebuilt
print_env_at_the_end:
- CM_CMAKE_BIN_WITH_PATH: Path to the tool
+ MLC_CMAKE_BIN_WITH_PATH: Path to the tool
tags:
- get
- cmake
diff --git a/script/get-cmake/run.bat b/script/get-cmake/run.bat
index 0802ae828..940bd06d2 100644
--- a/script/get-cmake/run.bat
+++ b/script/get-cmake/run.bat
@@ -1,2 +1,2 @@
-%CM_CMAKE_BIN_WITH_PATH% --version > tmp-ver.out
+%MLC_CMAKE_BIN_WITH_PATH% --version > tmp-ver.out
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-cmake/run.sh b/script/get-cmake/run.sh
index 6d2aeff97..9d9230232 100644
--- a/script/get-cmake/run.sh
+++ b/script/get-cmake/run.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-cmake_bin=${CM_CMAKE_BIN_WITH_PATH}
+cmake_bin=${MLC_CMAKE_BIN_WITH_PATH}
${cmake_bin} --version > tmp-ver.out
test $? -eq 0 || exit 1
diff --git a/script/get-cmsis_5/README-extra.md b/script/get-cmsis_5/README-extra.md
deleted file mode 100644
index 1f052e7ea..000000000
--- a/script/get-cmsis_5/README-extra.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# GET-CMSIS_5
-This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) clones the git repository of [CMSIS Version 5](https://github.com/ARM-software/CMSIS_5) and cache it in CM for reuse across other CM scripts.
-
-## Exported Variables
-1. [CMSIS_PATH](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-cmsis_5/customize.py#L23): Location in CM cache where CMSIS_5 git repository is cloned.
diff --git a/script/get-cmsis_5/customize.py b/script/get-cmsis_5/customize.py
index e5fac8d7e..099629649 100644
--- a/script/get-cmsis_5/customize.py
+++ b/script/get-cmsis_5/customize.py
@@ -10,10 +10,10 @@ def preprocess(i):
if os_info['platform'] == 'windows':
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']
- if 'CM_GIT_DEPTH' not in env:
- env['CM_GIT_DEPTH'] = ''
- if 'CM_GIT_RECURSE_SUBMODULES' not in env:
- env['CM_GIT_RECURSE_SUBMODULES'] = ''
+ if 'MLC_GIT_DEPTH' not in env:
+ env['MLC_GIT_DEPTH'] = ''
+ if 'MLC_GIT_RECURSE_SUBMODULES' not in env:
+ env['MLC_GIT_RECURSE_SUBMODULES'] = ''
return {'return': 0}
diff --git a/script/get-cmsis_5/meta.yaml b/script/get-cmsis_5/meta.yaml
index e28a2d5aa..95ac1ef3a 100644
--- a/script/get-cmsis_5/meta.yaml
+++ b/script/get-cmsis_5/meta.yaml
@@ -4,9 +4,9 @@ automation_uid: 5b4e0237da074764
cache: true
category: Detection or installation of tools and artifacts
default_env:
- CM_GIT_DEPTH: ''
- CM_GIT_PATCH: 'no'
- CM_GIT_URL: https://github.com/ARM-software/CMSIS_5.git
+ MLC_GIT_DEPTH: ''
+ MLC_GIT_PATCH: 'no'
+ MLC_GIT_URL: https://github.com/ARM-software/CMSIS_5.git
default_version: custom
deps:
- tags: detect,os
@@ -21,18 +21,18 @@ uid: 2258c212b11443f5
variations:
recurse-submodules:
env:
- CM_GIT_RECURSE_SUBMODULES: --recurse-submodules
+ MLC_GIT_RECURSE_SUBMODULES: --recurse-submodules
short-history:
env:
- CM_GIT_DEPTH: --depth 10
+ MLC_GIT_DEPTH: --depth 10
versions:
custom:
env:
- CM_GIT_CHECKOUT: e5dc19182f6084de32d8dc5a22c84e01210f4995
- CM_GIT_SHA: 'yes'
+ MLC_GIT_CHECKOUT: e5dc19182f6084de32d8dc5a22c84e01210f4995
+ MLC_GIT_SHA: 'yes'
develop:
env:
- CM_GIT_CHECKOUT: develop
+ MLC_GIT_CHECKOUT: develop
master:
env:
- CM_GIT_CHECKOUT: master
+ MLC_GIT_CHECKOUT: master
diff --git a/script/get-cmsis_5/run.sh b/script/get-cmsis_5/run.sh
index 9093c093b..47d1e2554 100644
--- a/script/get-cmsis_5/run.sh
+++ b/script/get-cmsis_5/run.sh
@@ -1,21 +1,21 @@
#!/bin/bash
CUR_DIR=$PWD
-SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH}
+SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH}
echo "******************************************************"
if [ ! -d "cmsis" ]; then
- if [ -z ${CM_GIT_SHA} ]; then
- echo "Cloning CMSIS_5 from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..."
- git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} cmsis
+ if [ -z ${MLC_GIT_SHA} ]; then
+ echo "Cloning CMSIS_5 from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES}..."
+ git clone ${MLC_GIT_RECURSE_SUBMODULES} -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} ${MLC_GIT_DEPTH} cmsis
if [ "${?}" != "0" ]; then exit 1; fi
else
- echo "Cloning CMSIS_5 from ${CM_GIT_URL} with default branch and checkout ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..."
- git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} cmsis
+ echo "Cloning CMSIS_5 from ${MLC_GIT_URL} with default branch and checkout ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES}..."
+ git clone ${MLC_GIT_RECURSE_SUBMODULES} ${MLC_GIT_URL} ${MLC_GIT_DEPTH} cmsis
if [ "${?}" != "0" ]; then exit 1; fi
cd cmsis
- git checkout "${CM_GIT_CHECKOUT}"
+ git checkout "${MLC_GIT_CHECKOUT}"
if [ "${?}" != "0" ]; then exit 1; fi
fi
fi
diff --git a/script/get-compiler-flags/customize.py b/script/get-compiler-flags/customize.py
index dd7ee775a..96463c054 100644
--- a/script/get-compiler-flags/customize.py
+++ b/script/get-compiler-flags/customize.py
@@ -16,16 +16,16 @@ def preprocess(i):
if os_info['platform'] == 'windows':
return {'return': 0}
- if env.get("CM_FAST_COMPILATION") in ["yes", "on", "1"]:
- DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_FAST", "-O3")
+ if env.get("MLC_FAST_COMPILATION") in ["yes", "on", "1"]:
+ DEFAULT_COMPILER_FLAGS = env.get("MLC_COMPILER_FLAGS_FAST", "-O3")
# -flto") - this flag is not always available
- DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_FAST", "-O3")
- elif env.get("CM_DEBUG_COMPILATION") in ["yes", "on", "1"]:
- DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_DEBUG", "-O0")
- DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_DEBUG", "-O0")
+ DEFAULT_LINKER_FLAGS = env.get("MLC_LINKER_FLAGS_FAST", "-O3")
+ elif env.get("MLC_DEBUG_COMPILATION") in ["yes", "on", "1"]:
+ DEFAULT_COMPILER_FLAGS = env.get("MLC_COMPILER_FLAGS_DEBUG", "-O0")
+ DEFAULT_LINKER_FLAGS = env.get("MLC_LINKER_FLAGS_DEBUG", "-O0")
else:
- DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_DEFAULT", "-O2")
- DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_DEFAULT", "-O2")
+ DEFAULT_COMPILER_FLAGS = env.get("MLC_COMPILER_FLAGS_DEFAULT", "-O2")
+ DEFAULT_LINKER_FLAGS = env.get("MLC_LINKER_FLAGS_DEFAULT", "-O2")
env['+ CFLAGS'] += DEFAULT_COMPILER_FLAGS.split(" ")
env['+ CXXFLAGS'] += DEFAULT_COMPILER_FLAGS.split(" ")
@@ -51,15 +51,15 @@ def preprocess(i):
break
if 'gcc' not in out:
inc_dir.append(out.strip())
- env['+CM_HOST_OS_DEFAULT_INCLUDE_PATH'] = inc_dir
+ env['+MLC_HOST_OS_DEFAULT_INCLUDE_PATH'] = inc_dir
-# if env['CM_C_COMPILER_BIN'] == 'icc':
-# if env['CM_CPUINFO_Vendor_ID'] == 'GenuineIntel':
-# if int(env['CM_CPUINFO_CPU_family']) >= 0:
+# if env['MLC_C_COMPILER_BIN'] == 'icc':
+# if env['MLC_CPUINFO_Vendor_ID'] == 'GenuineIntel':
+# if int(env['MLC_CPUINFO_CPU_family']) >= 0:
# env['+ CFLAGS'] += ["-ipo"]
-# if env['CM_C_COMPILER_BIN'] == 'gcc':
-# if env['CM_HOST_CPU_VENDOR_ID'] == 'AMD':
-# if int(env['CM_HOST_CPU_FAMILY']) >= 0:
+# if env['MLC_C_COMPILER_BIN'] == 'gcc':
+# if env['MLC_HOST_CPU_VENDOR_ID'] == 'AMD':
+# if int(env['MLC_HOST_CPU_FAMILY']) >= 0:
# env['+ CFLAGS'] += ["-march=znver2", "-flto"]
return {'return': 0}
diff --git a/script/get-compiler-flags/meta.yaml b/script/get-compiler-flags/meta.yaml
index 080020d0d..c70bc6161 100644
--- a/script/get-compiler-flags/meta.yaml
+++ b/script/get-compiler-flags/meta.yaml
@@ -7,7 +7,7 @@ deps:
- names:
- compiler
skip_if_env:
- CM_C_COMPILER_BIN:
+ MLC_C_COMPILER_BIN:
- 'on'
tags: get,compiler
new_env_keys:
@@ -15,7 +15,7 @@ new_env_keys:
- + CXXFLAGS
- + FFLAGS
- + LDFLAGS
-- +CM_HOST_OS_DEFAULT_INCLUDE_PATH
+- +MLC_HOST_OS_DEFAULT_INCLUDE_PATH
tags:
- get
- compiler-flags
diff --git a/script/get-compiler-rust/customize.py b/script/get-compiler-rust/customize.py
index 01bf84b37..7481e6527 100644
--- a/script/get-compiler-rust/customize.py
+++ b/script/get-compiler-rust/customize.py
@@ -12,7 +12,7 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
return {'return': 0}
diff --git a/script/get-compiler-rust/run.sh b/script/get-compiler-rust/run.sh
index 4651e2fd0..28a25ced9 100644
--- a/script/get-compiler-rust/run.sh
+++ b/script/get-compiler-rust/run.sh
@@ -1,7 +1,7 @@
-CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3}
+MLC_PYTHON_BIN=${MLC_PYTHON_BIN_WITH_PATH:-python3}
-${CM_PYTHON_BIN} -m pip install --upgrade pip ${CM_PYTHON_PIP_COMMON_EXTRA}
-${CM_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${CM_PYTHON_PIP_COMMON_EXTRA}
+${MLC_PYTHON_BIN} -m pip install --upgrade pip ${MLC_PYTHON_PIP_COMMON_EXTRA}
+${MLC_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${MLC_PYTHON_PIP_COMMON_EXTRA}
curl https://sh.rustup.rs -sSf -o tmp.sh
sh tmp.sh -y
diff --git a/script/get-conda/customize.py b/script/get-conda/customize.py
index 3c44af161..691b19a1d 100644
--- a/script/get-conda/customize.py
+++ b/script/get-conda/customize.py
@@ -12,11 +12,11 @@ def preprocess(i):
recursion_spaces = i['recursion_spaces']
- conda_prefix_name = env.get('CM_CONDA_PREFIX_NAME', '')
+ conda_prefix_name = env.get('MLC_CONDA_PREFIX_NAME', '')
r = None
file_name = 'conda.exe' if os_info['platform'] == 'windows' else 'conda'
if conda_prefix_name == '':
- tmp_path = env.get('CM_CONDA_INSTALL_PATH', env.get('CM_TMP_PATH', ''))
+ tmp_path = env.get('MLC_CONDA_INSTALL_PATH', env.get('MLC_TMP_PATH', ''))
if tmp_path:
x = ';' if os_info['platform'] == 'windows' else ':'
tmp_path += x
@@ -24,26 +24,26 @@ def preprocess(i):
if os.path.exists(conda_path):
tmp_path += os.path.join(os.path.expanduser("~"),
"miniconda3", "bin")
- env['CM_TMP_PATH'] = tmp_path
+ env['MLC_TMP_PATH'] = tmp_path
r = i['automation'].find_artifact({'file_name': file_name,
'env': env,
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_CONDA_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_CONDA_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
else:
- env['CM_CONDA_INSTALL_PATH'] = os.path.join(os.getcwd(), "miniconda3")
+ env['MLC_CONDA_INSTALL_PATH'] = os.path.join(os.getcwd(), "miniconda3")
bin_dir = 'Scripts' if os_info['platform'] == 'windows' else 'bin'
- env['CM_CONDA_BIN_WITH_PATH'] = os.path.join(
- env['CM_CONDA_INSTALL_PATH'], bin_dir, file_name)
+ env['MLC_CONDA_BIN_WITH_PATH'] = os.path.join(
+ env['MLC_CONDA_INSTALL_PATH'], bin_dir, file_name)
if conda_prefix_name != '' or r['return'] > 0:
if conda_prefix_name != '' or r['return'] == 16:
if conda_prefix_name == '':
- if env.get('CM_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes':
+ if env.get('MLC_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes':
return r
print(recursion_spaces + ' # {}'.format(r['error']))
@@ -56,11 +56,11 @@ def preprocess(i):
# Grigori: temporal fix - should be generalized/improved above
if os_info['platform'] == 'windows' and env.get(
- 'CM_CONDA_BIN_WITH_PATH', '') == '':
- env['CM_CONDA_INSTALL_PATH'] = os.path.join(
+ 'MLC_CONDA_BIN_WITH_PATH', '') == '':
+ env['MLC_CONDA_INSTALL_PATH'] = os.path.join(
os.getcwd(), "miniconda3")
- env['CM_CONDA_BIN_WITH_PATH'] = os.path.join(
- env['CM_CONDA_INSTALL_PATH'], 'Scripts', file_name)
+ env['MLC_CONDA_BIN_WITH_PATH'] = os.path.join(
+ env['MLC_CONDA_INSTALL_PATH'], 'Scripts', file_name)
else:
found_path = r['found_path']
@@ -72,7 +72,7 @@ def preprocess(i):
def detect_version(i):
r = i['automation'].parse_version({'match_text': r'conda\s*([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_CONDA_VERSION',
+ 'env_key': 'MLC_CONDA_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
@@ -86,19 +86,19 @@ def postprocess(i):
if r['return'] > 0:
return r
- conda_bin_path = os.path.dirname(env['CM_CONDA_BIN_WITH_PATH'])
- env['CM_CONDA_BIN_PATH'] = conda_bin_path
+ conda_bin_path = os.path.dirname(env['MLC_CONDA_BIN_WITH_PATH'])
+ env['MLC_CONDA_BIN_PATH'] = conda_bin_path
env['+PATH'] = [conda_bin_path]
conda_prefix = os.path.dirname(conda_bin_path)
- env['CM_CONDA_PREFIX'] = conda_prefix
+ env['MLC_CONDA_PREFIX'] = conda_prefix
env['CONDA_PREFIX'] = conda_prefix
conda_lib_path = os.path.join(conda_prefix, "lib")
if os.path.exists(conda_lib_path):
- env['CM_CONDA_LIB_PATH'] = conda_lib_path
+ env['MLC_CONDA_LIB_PATH'] = conda_lib_path
env['+LD_LIBRARY_PATH'] = [conda_lib_path]
env['+LIBRARY_PATH'] = [conda_lib_path]
diff --git a/script/get-conda/install.sh b/script/get-conda/install.sh
index 6d1888285..17bd859aa 100644
--- a/script/get-conda/install.sh
+++ b/script/get-conda/install.sh
@@ -4,14 +4,14 @@ curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Minicond
test $? -eq 0 || exit $?
chmod +x ~/miniconda.sh
-if [ ! -z ${CM_CONDA_PREFIX_NAME} ]; then
- CM_CONDA_INSTALL_PATH=$PWD/miniconda3
- rm -rf ${CM_CONDA_INSTALL_PATH}
+if [ ! -z ${MLC_CONDA_PREFIX_NAME} ]; then
+ MLC_CONDA_INSTALL_PATH=$PWD/miniconda3
+ rm -rf ${MLC_CONDA_INSTALL_PATH}
fi
-if [ ! -z ${CM_CONDA_INSTALL_PATH} ]; then
- ~/miniconda.sh -b -p ${CM_CONDA_INSTALL_PATH}
+if [ ! -z ${MLC_CONDA_INSTALL_PATH} ]; then
+ ~/miniconda.sh -b -p ${MLC_CONDA_INSTALL_PATH}
else
~/miniconda.sh -b
fi
diff --git a/script/get-conda/meta.yaml b/script/get-conda/meta.yaml
index 8e34801fa..1bb33b194 100644
--- a/script/get-conda/meta.yaml
+++ b/script/get-conda/meta.yaml
@@ -10,11 +10,11 @@ new_env_keys:
- +PATH
- +LD_LIBRARY_PATH
- +LIBRARY_PATH
-- CM_CONDA_PREFIX
+- MLC_CONDA_PREFIX
- CONDA_PREFIX
-- CM_CONDA_BIN_PATH
-- CM_CONDA_BIN_WITH_PATH
-- CM_CONDA_LIB_PATH
+- MLC_CONDA_BIN_PATH
+- MLC_CONDA_BIN_WITH_PATH
+- MLC_CONDA_LIB_PATH
tags:
- get
- conda
@@ -26,12 +26,12 @@ variations:
conda-package:
tags: _name.#
env:
- CM_CONDA_PREFIX_NAME: '#'
+ MLC_CONDA_PREFIX_NAME: '#'
python-3.#:
env:
- CM_CONDA_PYTHON_VERSION: 3.#
+ MLC_CONDA_PYTHON_VERSION: 3.#
group: conda-python
python-3.8:
env:
- CM_CONDA_PYTHON_VERSION: '3.8'
+ MLC_CONDA_PYTHON_VERSION: '3.8'
group: conda-python
diff --git a/script/get-conda/run.bat b/script/get-conda/run.bat
index 99b9d97d2..2cbb75627 100644
--- a/script/get-conda/run.bat
+++ b/script/get-conda/run.bat
@@ -1 +1 @@
-%CM_CONDA_BIN_WITH_PATH% --version > tmp-ver.out
+%MLC_CONDA_BIN_WITH_PATH% --version > tmp-ver.out
diff --git a/script/get-conda/run.sh b/script/get-conda/run.sh
index 5d61f106f..e37ec0ddc 100644
--- a/script/get-conda/run.sh
+++ b/script/get-conda/run.sh
@@ -1,3 +1,3 @@
#!/bin/bash
-${CM_CONDA_BIN_WITH_PATH} --version > tmp-ver.out
+${MLC_CONDA_BIN_WITH_PATH} --version > tmp-ver.out
diff --git a/script/get-croissant/meta.yaml b/script/get-croissant/meta.yaml
index a024189d2..f53583122 100644
--- a/script/get-croissant/meta.yaml
+++ b/script/get-croissant/meta.yaml
@@ -18,7 +18,7 @@ deps:
version_min: '3.10'
- env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLCOMMONS_CROISSANT_PATH
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLCOMMONS_CROISSANT_PATH
extra_cache_tags: mlcommons,croissant
names:
- git-mlcommons-croissant
diff --git a/script/get-croissant/run.bat b/script/get-croissant/run.bat
index 3177de9f6..f23b67b8f 100644
--- a/script/get-croissant/run.bat
+++ b/script/get-croissant/run.bat
@@ -2,13 +2,13 @@
echo =======================================================
-cd %CM_MLCOMMONS_CROISSANT_PATH%\python\mlcroissant
+cd %MLC_MLCOMMONS_CROISSANT_PATH%\python\mlcroissant
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
echo.
-echo Running %CM_PYTHON_BIN_WITH_PATH% -m pip install -e .[git]
+echo Running %MLC_PYTHON_BIN_WITH_PATH% -m pip install -e .[git]
-%CM_PYTHON_BIN_WITH_PATH% -m pip install -e .[git]
+%MLC_PYTHON_BIN_WITH_PATH% -m pip install -e .[git]
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
echo.
diff --git a/script/get-croissant/run.sh b/script/get-croissant/run.sh
index dd2c67bb2..3fadc239d 100644
--- a/script/get-croissant/run.sh
+++ b/script/get-croissant/run.sh
@@ -2,13 +2,13 @@
echo "======================================================="
-cd ${CM_MLCOMMONS_CROISSANT_PATH}/python/mlcroissant
+cd ${MLC_MLCOMMONS_CROISSANT_PATH}/python/mlcroissant
if [ "${?}" != "0" ]; then exit 1; fi
echo ""
-echo "Running ${CM_PYTHON_BIN_WITH_PATH} -m pip install -e .[git]"
+echo "Running ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -e .[git]"
-${CM_PYTHON_BIN_WITH_PATH} -m pip install -e .[git]
+${MLC_PYTHON_BIN_WITH_PATH} -m pip install -e .[git]
if [ "${?}" != "0" ]; then exit 1; fi
echo ""
diff --git a/script/get-cuda-devices/customize.py b/script/get-cuda-devices/customize.py
index 26969185d..62832a6e7 100644
--- a/script/get-cuda-devices/customize.py
+++ b/script/get-cuda-devices/customize.py
@@ -7,7 +7,7 @@ def preprocess(i):
env = i['env']
- if str(env.get('CM_DETECT_USING_PYCUDA', '')
+ if str(env.get('MLC_DETECT_USING_PYCUDA', '')
).lower() in ["1", "yes", "true"]:
i['run_script_input']['script_name'] = 'detect'
@@ -54,11 +54,11 @@ def postprocess(i):
gpu[gpu_id][key] = val
p[key] = val
- key_env = 'CM_CUDA_DEVICE_PROP_' + key.upper().replace(' ', '_')
+ key_env = 'MLC_CUDA_DEVICE_PROP_' + key.upper().replace(' ', '_')
env[key_env] = val
state['cm_cuda_num_devices'] = gpu_id + 1
- env['CM_CUDA_NUM_DEVICES'] = gpu_id + 1
+ env['MLC_CUDA_NUM_DEVICES'] = gpu_id + 1
state['cm_cuda_device_prop'] = p
state['cm_cuda_devices_prop'] = gpu
diff --git a/script/get-cuda-devices/detect.sh b/script/get-cuda-devices/detect.sh
index 8f6b93596..9de8aa64b 100644
--- a/script/get-cuda-devices/detect.sh
+++ b/script/get-cuda-devices/detect.sh
@@ -1,4 +1,4 @@
#!/bin/bash
-${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect.py
+${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/detect.py
test $? -eq 0 || exit $?
diff --git a/script/get-cuda-devices/meta.yaml b/script/get-cuda-devices/meta.yaml
index 2d4869286..a340263e4 100644
--- a/script/get-cuda-devices/meta.yaml
+++ b/script/get-cuda-devices/meta.yaml
@@ -29,15 +29,15 @@ docker:
skip_cm_sys_upgrade: 'yes'
cm_repo_flags: '--checkout=dev'
use_host_group_id: 'yes'
- image_tag_extra: '-cm-dev'
+ image_tag_extra: '-mlc-dev'
env:
- CM_DETECT_USING_PYCUDA: 'no'
+ MLC_DETECT_USING_PYCUDA: 'no'
new_env_keys:
-- CM_CUDA_DEVICE_*
-- CM_CUDA_NUM_DEVICES
-- CM_CUDA_VERSION
+- MLC_CUDA_DEVICE_*
+- MLC_CUDA_NUM_DEVICES
+- MLC_CUDA_VERSION
new_state_keys:
- cm_cuda_device_prop
@@ -50,7 +50,7 @@ print_files_if_script_error:
variations:
with-pycuda:
env:
- CM_DETECT_USING_PYCUDA: 'yes'
+ MLC_DETECT_USING_PYCUDA: 'yes'
deps:
- tags: get,python3
names:
diff --git a/script/get-cuda-devices/run.bat b/script/get-cuda-devices/run.bat
index 4f1467c19..2b2c03d5c 100644
--- a/script/get-cuda-devices/run.bat
+++ b/script/get-cuda-devices/run.bat
@@ -3,22 +3,22 @@ rem Compile
del a.exe
echo.
-echo NVCC path: %CM_NVCC_BIN_WITH_PATH%
+echo NVCC path: %MLC_NVCC_BIN_WITH_PATH%
echo.
echo.
echo Checking compiler version ...
echo.
-"%CM_NVCC_BIN_WITH_PATH%" -V
+"%MLC_NVCC_BIN_WITH_PATH%" -V
echo.
echo Compiling program ...
echo.
-cd %CM_TMP_CURRENT_SCRIPT_PATH%
+cd %MLC_TMP_CURRENT_SCRIPT_PATH%
-"%CM_NVCC_BIN_WITH_PATH%" print_cuda_devices.cu -allow-unsupported-compiler -DWINDOWS
+"%MLC_NVCC_BIN_WITH_PATH%" print_cuda_devices.cu -allow-unsupported-compiler -DWINDOWS
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
rem Return to the original path obtained in CM
@@ -27,7 +27,7 @@ echo.
echo Running program ...
echo.
-cd %CM_TMP_CURRENT_PATH%
+cd %MLC_TMP_CURRENT_PATH%
-%CM_TMP_CURRENT_SCRIPT_PATH%\a.exe > tmp-run.out
+%MLC_TMP_CURRENT_SCRIPT_PATH%\a.exe > tmp-run.out
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-cuda-devices/run.sh b/script/get-cuda-devices/run.sh
index 3d208dd6b..2ee43d856 100644
--- a/script/get-cuda-devices/run.sh
+++ b/script/get-cuda-devices/run.sh
@@ -5,22 +5,22 @@
rm a.out
echo ""
-echo "NVCC path: ${CM_NVCC_BIN_WITH_PATH}"
+echo "NVCC path: ${MLC_NVCC_BIN_WITH_PATH}"
echo ""
echo ""
echo "Checking compiler version ..."
echo ""
-${CM_NVCC_BIN_WITH_PATH} -V
+${MLC_NVCC_BIN_WITH_PATH} -V
echo ""
echo "Compiling program ..."
echo ""
-cd ${CM_TMP_CURRENT_SCRIPT_PATH}
+cd ${MLC_TMP_CURRENT_SCRIPT_PATH}
-${CM_NVCC_BIN_WITH_PATH} -allow-unsupported-compiler print_cuda_devices.cu
+${MLC_NVCC_BIN_WITH_PATH} -allow-unsupported-compiler print_cuda_devices.cu
test $? -eq 0 || exit 1
# Return to the original path obtained in CM
@@ -29,7 +29,7 @@ echo ""
echo "Running program ..."
echo ""
-cd ${CM_TMP_CURRENT_PATH}
+cd ${MLC_TMP_CURRENT_PATH}
-${CM_TMP_CURRENT_SCRIPT_PATH}/a.out > tmp-run.out
+${MLC_TMP_CURRENT_SCRIPT_PATH}/a.out > tmp-run.out
test $? -eq 0 || exit 1
diff --git a/script/get-cuda/README-extra.md b/script/get-cuda/README-extra.md
index c075711ff..d1d37c98c 100644
--- a/script/get-cuda/README-extra.md
+++ b/script/get-cuda/README-extra.md
@@ -4,9 +4,9 @@ This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/scrip
and if not found calls the [install script for CUDA](../script/install-cuda-prebuilt).
## Exported Variables
-* `CM_CUDA_INSTALLED_PATH`
-* `CM_CUDA_VERSION`
-* `CM_NVCC_BIN_WITH_PATH`
+* `MLC_CUDA_INSTALLED_PATH`
+* `MLC_CUDA_VERSION`
+* `MLC_NVCC_BIN_WITH_PATH`
* `CUDA_HOME`
* `CUDA_PATH`
diff --git a/script/get-cuda/customize.py b/script/get-cuda/customize.py
index c8a68c4a7..2c9ae7915 100644
--- a/script/get-cuda/customize.py
+++ b/script/get-cuda/customize.py
@@ -10,15 +10,15 @@ def preprocess(i):
env = i['env']
if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true':
- env['CM_SUDO'] = ''
+ env['MLC_SUDO'] = ''
recursion_spaces = i['recursion_spaces']
if os_info['platform'] == 'windows':
- file_name = env['CM_TMP_FILE_TO_CHECK_WINDOWS']
+ file_name = env['MLC_TMP_FILE_TO_CHECK_WINDOWS']
- if env.get('CM_INPUT', '').strip() == '' and env.get(
- 'CM_TMP_PATH', '').strip() == '':
+ if env.get('MLC_INPUT', '').strip() == '' and env.get(
+ 'MLC_TMP_PATH', '').strip() == '':
# Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA"
paths = []
for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA",
@@ -34,31 +34,31 @@ def preprocess(i):
tmp_paths = ';'.join(paths)
tmp_paths += ';' + os.environ.get('PATH', '')
- env['CM_TMP_PATH'] = tmp_paths
- env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
+ env['MLC_TMP_PATH'] = tmp_paths
+ env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
else:
- file_name = env['CM_TMP_FILE_TO_CHECK_UNIX']
+ file_name = env['MLC_TMP_FILE_TO_CHECK_UNIX']
# paths to cuda are not always in PATH - add a few typical locations to search for
# (unless forced by a user)
- if env.get('CM_INPUT', '').strip() == '' and env.get(
- 'CM_TMP_PATH', '').strip() == '':
+ if env.get('MLC_INPUT', '').strip() == '' and env.get(
+ 'MLC_TMP_PATH', '').strip() == '':
system_path = os.environ.get('PATH')
if system_path:
system_path = system_path + ":"
- env['CM_TMP_PATH'] = system_path + \
+ env['MLC_TMP_PATH'] = system_path + \
'/usr/local/cuda/bin:/usr/cuda/bin:/usr/local/cuda-11/bin:/usr/cuda-11/bin:/usr/local/cuda-12/bin:/usr/cuda-12/bin:/usr/local/packages/cuda'
- env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
+ env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
- if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes":
- env_key = 'CM_NVCC_BIN_WITH_PATH'
+ if env['MLC_CUDA_FULL_TOOLKIT_INSTALL'] == "yes":
+ env_key = 'MLC_NVCC_BIN_WITH_PATH'
path_env_key = 'PATH'
else:
- env_key = 'CM_CUDA_RT_WITH_PATH'
+ env_key = 'MLC_CUDA_RT_WITH_PATH'
path_env_key = 'LD_LIBRARY_PATH'
- env['CM_TMP_ENV_KEY'] = env_key
+ env['MLC_TMP_ENV_KEY'] = env_key
if env_key not in env:
r = i['automation'].find_artifact({'file_name': file_name,
@@ -73,8 +73,8 @@ def preprocess(i):
if os_info['platform'] == 'windows':
return r
- if r['return'] == 16 and env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes":
- env['CM_REQUIRE_INSTALL'] = "yes"
+ if r['return'] == 16 and env['MLC_CUDA_FULL_TOOLKIT_INSTALL'] == "yes":
+ env['MLC_REQUIRE_INSTALL'] = "yes"
return {'return': 0}
else:
return r
@@ -84,7 +84,7 @@ def preprocess(i):
def detect_version(i):
env = i['env']
- if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes":
+ if env['MLC_CUDA_FULL_TOOLKIT_INSTALL'] == "yes":
return detect_version_nvcc(i)
else:
return detect_version_cuda_lib(i)
@@ -93,7 +93,7 @@ def detect_version(i):
def detect_version_nvcc(i):
r = i['automation'].parse_version({'match_text': r'release\s*([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_CUDA_VERSION',
+ 'env_key': 'MLC_CUDA_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
@@ -109,7 +109,7 @@ def detect_version_cuda_lib(i):
env = i['env']
print(env)
- cuda_rt_file_path = env['CM_CUDA_RT_WITH_PATH']
+ cuda_rt_file_path = env['MLC_CUDA_RT_WITH_PATH']
cuda_lib_path = os.path.dirname(cuda_rt_file_path)
cuda_path = os.path.abspath(os.path.join(cuda_lib_path, os.pardir))
@@ -123,7 +123,7 @@ def detect_version_cuda_lib(i):
if cuda_version_info:
cuda_version = cuda_version_info.get('version')
- env['CM_CUDA_VERSION'] = cuda_version
+ env['MLC_CUDA_VERSION'] = cuda_version
version = cuda_version
print(i['recursion_spaces'] + ' Detected version: {}'.format(version))
@@ -142,35 +142,35 @@ def postprocess(i):
return r
version = r['version']
- env['CM_CUDA_CACHE_TAGS'] = 'version-' + version
+ env['MLC_CUDA_CACHE_TAGS'] = 'version-' + version
- found_file_path = env[env['CM_TMP_ENV_KEY']]
+ found_file_path = env[env['MLC_TMP_ENV_KEY']]
- if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes":
+ if env['MLC_CUDA_FULL_TOOLKIT_INSTALL'] == "yes":
cuda_path_bin = os.path.dirname(found_file_path)
- env['CM_CUDA_PATH_BIN'] = cuda_path_bin
+ env['MLC_CUDA_PATH_BIN'] = cuda_path_bin
cuda_path = os.path.dirname(cuda_path_bin)
- env['CM_CUDA_INSTALLED_PATH'] = cuda_path
- env['CM_NVCC_BIN'] = os.path.basename(found_file_path)
+ env['MLC_CUDA_INSTALLED_PATH'] = cuda_path
+ env['MLC_NVCC_BIN'] = os.path.basename(found_file_path)
else:
# We traverse backwards until we find a path with include dir
parent_path = os.path.dirname(found_file_path)
- env['CM_CUDA_PATH_LIB'] = parent_path
+ env['MLC_CUDA_PATH_LIB'] = parent_path
parent_path = os.path.dirname(parent_path)
while os.path.isdir(parent_path):
if os.path.exists(os.path.join(parent_path, "include")):
print("Path is " + parent_path)
found_path = parent_path
cuda_path = found_path
- env['CM_CUDA_INSTALLED_PATH'] = cuda_path
+ env['MLC_CUDA_INSTALLED_PATH'] = cuda_path
break
else:
parent_path = os.path.dirname(parent_path)
- if 'CM_CUDA_INSTALLED_PATH' not in env:
+ if 'MLC_CUDA_INSTALLED_PATH' not in env:
return {
'return': 1, 'error': "No CUDA installation path with an include directory is found"}
@@ -194,7 +194,7 @@ def postprocess(i):
env['+C_INCLUDE_PATH'].append(cuda_path_include)
env['+CPLUS_INCLUDE_PATH'].append(cuda_path_include)
- env['CM_CUDA_PATH_INCLUDE'] = cuda_path_include
+ env['MLC_CUDA_PATH_INCLUDE'] = cuda_path_include
# Lib
if os_info['platform'] == 'windows':
@@ -213,19 +213,19 @@ def postprocess(i):
env['+LD_LIBRARY_PATH'].append(cuda_path_lib)
env['+DYLD_FALLBACK_LIBRARY_PATH'].append(cuda_path_lib)
- env['CM_CUDA_PATH_LIB'] = cuda_path_lib
+ env['MLC_CUDA_PATH_LIB'] = cuda_path_lib
break
if '+ LDFLAGS' not in env:
env['+ LDFLAGS'] = []
- if 'CM_CUDA_PATH_LIB' in env and not cuda_system_path_install:
- x = env['CM_CUDA_PATH_LIB']
+ if 'MLC_CUDA_PATH_LIB' in env and not cuda_system_path_install:
+ x = env['MLC_CUDA_PATH_LIB']
if ' ' in x:
x = '"' + x + '"'
env['+ LDFLAGS'].append("-L" + x)
- env['CM_CUDA_VERSION_STRING'] = "cu" + \
- env['CM_CUDA_VERSION'].replace(".", "")
- env['CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX5'] = env['CM_CUDA_VERSION_STRING']
+ env['MLC_CUDA_VERSION_STRING'] = "cu" + \
+ env['MLC_CUDA_VERSION'].replace(".", "")
+ env['MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX5'] = env['MLC_CUDA_VERSION_STRING']
return {'return': 0, 'version': version}
diff --git a/script/get-cuda/meta.yaml b/script/get-cuda/meta.yaml
index db5a30b0b..ec5e26b69 100644
--- a/script/get-cuda/meta.yaml
+++ b/script/get-cuda/meta.yaml
@@ -21,32 +21,32 @@ cache: true
category: CUDA automation
default_env:
- CM_CUDA_PATH_LIB_CUDNN_EXISTS: 'no'
- CM_REQUIRE_INSTALL: 'no'
+ MLC_CUDA_PATH_LIB_CUDNN_EXISTS: 'no'
+ MLC_REQUIRE_INSTALL: 'no'
deps:
- tags: detect,os
- enable_if_env:
- CM_CUDA_FULL_TOOLKIT_INSTALL:
+ MLC_CUDA_FULL_TOOLKIT_INSTALL:
- 'yes'
- CM_HOST_OS_TYPE:
+ MLC_HOST_OS_TYPE:
- windows
names:
- compiler
tags: get,cl
input_mapping:
- cudnn_tar_file: CM_CUDNN_TAR_FILE_PATH
- cudnn_tar_path: CM_CUDNN_TAR_FILE_PATH
+ cudnn_tar_file: MLC_CUDNN_TAR_FILE_PATH
+ cudnn_tar_path: MLC_CUDNN_TAR_FILE_PATH
skip_sudo: CUDA_SKIP_SUDO
- skip_cudnn_install: CM_CUDA_SKIP_CUDNN_INSTALL
+ skip_cudnn_install: MLC_CUDA_SKIP_CUDNN_INSTALL
new_env_keys:
- CUDA_HOME
- CUDA_PATH
-- CM_CUDA_*
-- CM_NVCC_*
-- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX5
+- MLC_CUDA_*
+- MLC_NVCC_*
+- MLC_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX5
- +PATH
- +C_INCLUDE_PATH
- +CPLUS_INCLUDE_PATH
@@ -56,22 +56,22 @@ new_env_keys:
prehook_deps:
- enable_if_env:
- CM_REQUIRE_INSTALL:
+ MLC_REQUIRE_INSTALL:
- 'yes'
names:
- install-cuda-prebuilt
reuse_version: true
tags: install,cuda,prebuilt
- enable_if_env:
- CM_CUDA_PACKAGE_MANAGER_INSTALL:
+ MLC_CUDA_PACKAGE_MANAGER_INSTALL:
- 'yes'
tags: get,generic-sys-util,_nvidia-cuda-toolkit
print_env_at_the_end:
- CM_CUDA_PATH_LIB_CUDNN_EXISTS: ''
- CM_CUDA_VERSION: ''
- CM_CUDA_VERSION_STRING: ''
- CM_NVCC_BIN_WITH_PATH: ''
+ MLC_CUDA_PATH_LIB_CUDNN_EXISTS: ''
+ MLC_CUDA_VERSION: ''
+ MLC_CUDA_VERSION_STRING: ''
+ MLC_NVCC_BIN_WITH_PATH: ''
CUDA_HOME: ''
print_files_if_script_error:
@@ -80,31 +80,31 @@ print_files_if_script_error:
variations:
cudnn:
env:
- CM_CUDA_NEEDS_CUDNN: 'yes'
+ MLC_CUDA_NEEDS_CUDNN: 'yes'
post_deps:
- names:
- cudnn
tags: get,nvidia,cudnn
skip_if_env:
- CM_CUDA_SKIP_CUDNN_INSTALL:
+ MLC_CUDA_SKIP_CUDNN_INSTALL:
- yes
lib-only:
env:
- CM_CUDA_FULL_TOOLKIT_INSTALL: 'no'
- CM_TMP_FILE_TO_CHECK_UNIX: libcudart.so
- CM_TMP_FILE_TO_CHECK_WINDOWS: libcudart.dll
+ MLC_CUDA_FULL_TOOLKIT_INSTALL: 'no'
+ MLC_TMP_FILE_TO_CHECK_UNIX: libcudart.so
+ MLC_TMP_FILE_TO_CHECK_WINDOWS: libcudart.dll
group: installation-mode
package-manager:
env:
- CM_CUDA_PACKAGE_MANAGER_INSTALL: 'yes'
+ MLC_CUDA_PACKAGE_MANAGER_INSTALL: 'yes'
prebuilt:
env:
- CM_REQUIRE_INSTALL: 'yes'
+ MLC_REQUIRE_INSTALL: 'yes'
toolkit:
default: true
env:
- CM_CUDA_FULL_TOOLKIT_INSTALL: 'yes'
- CM_TMP_FILE_TO_CHECK_UNIX: nvcc
- CM_TMP_FILE_TO_CHECK_WINDOWS: nvcc.exe
+ MLC_CUDA_FULL_TOOLKIT_INSTALL: 'yes'
+ MLC_TMP_FILE_TO_CHECK_UNIX: nvcc
+ MLC_TMP_FILE_TO_CHECK_WINDOWS: nvcc.exe
group: installation-mode
diff --git a/script/get-cuda/run.bat b/script/get-cuda/run.bat
index 89af970ac..38ed97dc7 100644
--- a/script/get-cuda/run.bat
+++ b/script/get-cuda/run.bat
@@ -1,3 +1,3 @@
-"%CM_NVCC_BIN_WITH_PATH%" -V > tmp-ver.out
+"%MLC_NVCC_BIN_WITH_PATH%" -V > tmp-ver.out
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-cuda/run.sh b/script/get-cuda/run.sh
index aac0fee36..2ba9d511c 100644
--- a/script/get-cuda/run.sh
+++ b/script/get-cuda/run.sh
@@ -1,14 +1,14 @@
#!/bin/bash
-if [[ ${CM_CUDA_FULL_TOOLKIT_INSTALL} == "no" ]]; then
+if [[ ${MLC_CUDA_FULL_TOOLKIT_INSTALL} == "no" ]]; then
exit 0
fi
-nvcc_bin=${CM_NVCC_BIN_WITH_PATH:-nvcc}
+nvcc_bin=${MLC_NVCC_BIN_WITH_PATH:-nvcc}
${nvcc_bin} -V > tmp-ver.out
test $? -eq 0 || exit 1
if [[ ${nvcc_bin} == "nvcc" ]]; then
nvcc_path=`which nvcc`
- echo "CM_NVCC_BIN_WITH_PATH=${nvcc_path}" >> tmp-run-env.out
+ echo "MLC_NVCC_BIN_WITH_PATH=${nvcc_path}" >> tmp-run-env.out
test $? -eq 0 || exit 1
fi
diff --git a/script/get-cudnn/customize.py b/script/get-cudnn/customize.py
index ed64cd241..097c4342d 100644
--- a/script/get-cudnn/customize.py
+++ b/script/get-cudnn/customize.py
@@ -12,12 +12,12 @@ def preprocess(i):
env = i['env']
- env['CM_TMP_RUN_COPY_SCRIPT'] = "no"
+ env['MLC_TMP_RUN_COPY_SCRIPT'] = "no"
# If TAR file is not explicitly specified, search
- if env.get('CM_CUDNN_TAR_FILE_PATH', '') == '':
+ if env.get('MLC_CUDNN_TAR_FILE_PATH', '') == '':
- cuda_path_lib = env.get('CM_CUDA_PATH_LIB')
+ cuda_path_lib = env.get('MLC_CUDA_PATH_LIB')
if os_info['platform'] == 'windows':
extra_pre = ''
@@ -27,21 +27,21 @@ def preprocess(i):
extra_ext = 'so'
libfilename = extra_pre + 'cudnn.' + extra_ext
- env['CM_CUDNN_VERSION'] = 'vdetected'
+ env['MLC_CUDNN_VERSION'] = 'vdetected'
if os.path.exists(os.path.join(cuda_path_lib, libfilename)):
- env['CM_CUDA_PATH_LIB_CUDNN'] = env['CM_CUDA_PATH_LIB']
+ env['MLC_CUDA_PATH_LIB_CUDNN'] = env['MLC_CUDA_PATH_LIB']
return {'return': 0}
- if env.get('CM_TMP_PATH', '').strip() != '':
- path = env.get('CM_TMP_PATH')
+ if env.get('MLC_TMP_PATH', '').strip() != '':
+ path = env.get('MLC_TMP_PATH')
if os.path.exists(os.path.join(path, libfilename)):
- env['CM_CUDA_PATH_LIB_CUDNN'] = path
+ env['MLC_CUDA_PATH_LIB_CUDNN'] = path
return {'return': 0}
- if env.get('CM_INPUT', '').strip() == '':
+ if env.get('MLC_INPUT', '').strip() == '':
if os_info['platform'] == 'windows':
- if env.get('CM_TMP_PATH', '').strip() == '':
+ if env.get('MLC_TMP_PATH', '').strip() == '':
# Check in "C:\Program Files\NVIDIA GPU Computing
# Toolkit\CUDA"
paths = []
@@ -58,32 +58,32 @@ def preprocess(i):
tmp_paths = ';'.join(paths)
tmp_paths += ';' + os.environ.get('PATH', '')
- env['CM_TMP_PATH'] = tmp_paths
- env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
+ env['MLC_TMP_PATH'] = tmp_paths
+ env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
else:
# paths to cuda are not always in PATH - add a few typical locations to search for
# (unless forced by a user)
- cm_tmp_path = env.get('CM_TMP_PATH', '').strip()
+ cm_tmp_path = env.get('MLC_TMP_PATH', '').strip()
if cm_tmp_path != '':
cm_tmp_path += ':'
cm_tmp_path += '/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib'
cm_tmp_path += os.path.expandvars(':$CUDNN_ROOT/lib')
- env['CM_TMP_PATH'] = cm_tmp_path
- env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
+ env['MLC_TMP_PATH'] = cm_tmp_path
+ env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
for lib_path in env.get(
- '+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []):
+ '+MLC_HOST_OS_DEFAULT_LIBRARY_PATH', []):
if (os.path.exists(lib_path)):
- env['CM_TMP_PATH'] += ':' + lib_path
+ env['MLC_TMP_PATH'] += ':' + lib_path
r = i['automation'].find_artifact({'file_name': libfilename,
'env': env,
'os_info': os_info,
'default_path_env_key': 'LD_LIBRARY_PATH',
'detect_version': False,
- 'env_path_key': 'CM_CUDA_PATH_LIB_CUDNN',
+ 'env_path_key': 'MLC_CUDA_PATH_LIB_CUDNN',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
if r['return'] > 0:
@@ -91,23 +91,23 @@ def preprocess(i):
return r
if r['return'] == 16:
- env['CM_TMP_REQUIRE_INSTALL'] = "yes"
+ env['MLC_TMP_REQUIRE_INSTALL'] = "yes"
else:
return r
else:
# On Linux we may detected file instead of path to cudnn
- if os.path.isfile(env['CM_CUDA_PATH_LIB_CUDNN']):
- env['CM_CUDA_PATH_LIB_CUDNN'] = os.path.dirname(
- env['CM_CUDA_PATH_LIB_CUDNN'])
+ if os.path.isfile(env['MLC_CUDA_PATH_LIB_CUDNN']):
+ env['MLC_CUDA_PATH_LIB_CUDNN'] = os.path.dirname(
+ env['MLC_CUDA_PATH_LIB_CUDNN'])
return {'return': 0}
- if env.get('CM_CUDNN_TAR_FILE_PATH', '') == '':
+ if env.get('MLC_CUDNN_TAR_FILE_PATH', '') == '':
return {'return': 1, 'error': 'Please envoke cm run script "get cudnn" --tar_file={full path to the cuDNN tar file}'}
print('Untaring file - can take some time ...')
- my_tar = tarfile.open(os.path.expanduser(env['CM_CUDNN_TAR_FILE_PATH']))
+ my_tar = tarfile.open(os.path.expanduser(env['MLC_CUDNN_TAR_FILE_PATH']))
folder_name = my_tar.getnames()[0]
if not os.path.exists(os.path.join(os.getcwd(), folder_name)):
my_tar.extractall()
@@ -119,14 +119,14 @@ def preprocess(i):
return {
'return': 1, 'error': 'Extracted CUDNN folder does not seem proper - Version information missing'}
version = version_match.group(1)
- env['CM_CUDNN_VERSION'] = version
+ env['MLC_CUDNN_VERSION'] = version
inc_path = os.path.join(os.getcwd(), folder_name, "include")
lib_path = os.path.join(os.getcwd(), folder_name, "lib")
- cuda_inc_path = env['CM_CUDA_PATH_INCLUDE']
- cuda_lib_path = env['CM_CUDA_PATH_LIB']
- env['CM_CUDA_PATH_LIB_CUDNN'] = env['CM_CUDA_PATH_LIB']
- env['CM_CUDA_PATH_INCLUDE_CUDNN'] = env['CM_CUDA_PATH_INCLUDE']
+ cuda_inc_path = env['MLC_CUDA_PATH_INCLUDE']
+ cuda_lib_path = env['MLC_CUDA_PATH_LIB']
+ env['MLC_CUDA_PATH_LIB_CUDNN'] = env['MLC_CUDA_PATH_LIB']
+ env['MLC_CUDA_PATH_INCLUDE_CUDNN'] = env['MLC_CUDA_PATH_INCLUDE']
try:
print(
@@ -136,9 +136,9 @@ def preprocess(i):
shutil.copytree(lib_path, cuda_lib_path, dirs_exist_ok=True)
except BaseException:
# Need to copy to system path via run.sh
- env['CM_TMP_RUN_COPY_SCRIPT'] = "yes"
- env['CM_TMP_INC_PATH'] = inc_path
- env['CM_TMP_LIB_PATH'] = lib_path
+ env['MLC_TMP_RUN_COPY_SCRIPT'] = "yes"
+ env['MLC_TMP_INC_PATH'] = inc_path
+ env['MLC_TMP_LIB_PATH'] = lib_path
return {'return': 0}
@@ -149,10 +149,10 @@ def postprocess(i):
env = i['env']
- version = env['CM_CUDNN_VERSION']
+ version = env['MLC_CUDNN_VERSION']
if version == 'vdetected':
- path_to_cudnn = env.get('CM_CUDA_PATH_LIB_CUDNN', '')
+ path_to_cudnn = env.get('MLC_CUDA_PATH_LIB_CUDNN', '')
if os.path.isdir(path_to_cudnn):
path_to_include = path_to_cudnn
path_to_include_file = ''
@@ -169,7 +169,7 @@ def postprocess(i):
path_to_include_file = x
if path_to_include_file != '':
- env['CM_CUDA_PATH_INCLUDE_CUDNN'] = os.path.dirname(
+ env['MLC_CUDA_PATH_INCLUDE_CUDNN'] = os.path.dirname(
path_to_include_file)
r = utils.load_txt(path_to_include_file, split=True)
@@ -195,8 +195,8 @@ def postprocess(i):
if xversion != '':
version = xversion
- env['CM_CUDNN_VERSION'] = xversion
+ env['MLC_CUDNN_VERSION'] = xversion
- env['CM_CUDA_PATH_LIB_CUDNN_EXISTS'] = 'yes'
+ env['MLC_CUDA_PATH_LIB_CUDNN_EXISTS'] = 'yes'
return {'return': 0, 'version': version}
diff --git a/script/get-cudnn/meta.yaml b/script/get-cudnn/meta.yaml
index fa5ccd2c7..b4f459bee 100644
--- a/script/get-cudnn/meta.yaml
+++ b/script/get-cudnn/meta.yaml
@@ -14,8 +14,8 @@ cache: true
category: CUDA automation
default_env:
- CM_INPUT: ''
- CM_SUDO: sudo
+ MLC_INPUT: ''
+ MLC_SUDO: sudo
deps:
- tags: detect,os
@@ -23,9 +23,9 @@ deps:
- names:
- cuda
skip_if_env:
- CM_CUDA_PATH_INCLUDE:
+ MLC_CUDA_PATH_INCLUDE:
- 'on'
- CM_CUDA_PATH_LIB:
+ MLC_CUDA_PATH_LIB:
- 'on'
tags: get,cuda
@@ -36,14 +36,14 @@ input_description:
desc: Full path to the cuDNN Tar file downloaded from Nvidia website (https://developer.nvidia.com/cudnn)
input_mapping:
- input: CM_INPUT
- tar_file: CM_CUDNN_TAR_FILE_PATH
+ input: MLC_INPUT
+ tar_file: MLC_CUDNN_TAR_FILE_PATH
new_env_keys:
-- CM_CUDNN_*
-- CM_CUDA_PATH_LIB_CUDNN
-- CM_CUDA_PATH_INCLUDE_CUDNN
-- CM_CUDA_PATH_LIB_CUDNN_EXISTS
+- MLC_CUDNN_*
+- MLC_CUDA_PATH_LIB_CUDNN
+- MLC_CUDA_PATH_INCLUDE_CUDNN
+- MLC_CUDA_PATH_LIB_CUDNN_EXISTS
- +PATH
- +C_INCLUDE_PATH
- +CPLUS_INCLUDE_PATH
@@ -51,6 +51,6 @@ new_env_keys:
- +DYLD_FALLBACK_LIBRARY_PATH
print_env_at_the_end:
- CM_CUDA_PATH_LIB_CUDNN: ''
- CM_CUDA_PATH_INCLUDE_CUDNN: ''
- CM_CUDNN_VERSION: ''
+ MLC_CUDA_PATH_LIB_CUDNN: ''
+ MLC_CUDA_PATH_INCLUDE_CUDNN: ''
+ MLC_CUDNN_VERSION: ''
diff --git a/script/get-cudnn/run.sh b/script/get-cudnn/run.sh
index e2cb00fb0..0ac138303 100644
--- a/script/get-cudnn/run.sh
+++ b/script/get-cudnn/run.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-if [ ${CM_TMP_RUN_COPY_SCRIPT} == "yes" ]; then
- cmd="${CM_SUDO} cp ${CM_TMP_INC_PATH}/*.h ${CM_CUDA_PATH_INCLUDE}/"
+if [ ${MLC_TMP_RUN_COPY_SCRIPT} == "yes" ]; then
+ cmd="${MLC_SUDO} cp ${MLC_TMP_INC_PATH}/*.h ${MLC_CUDA_PATH_INCLUDE}/"
echo $cmd
eval $cmd
test $? -eq 0 || exit 1
- cmd="${CM_SUDO} cp -P ${CM_TMP_LIB_PATH}/libcudnn* ${CM_CUDA_PATH_LIB}/"
+ cmd="${MLC_SUDO} cp -P ${MLC_TMP_LIB_PATH}/libcudnn* ${MLC_CUDA_PATH_LIB}/"
echo $cmd
eval $cmd
test $? -eq 0 || exit 1
diff --git a/script/get-dataset-cifar10/meta.yaml b/script/get-dataset-cifar10/meta.yaml
index 1be5ef644..dfc05ab15 100644
--- a/script/get-dataset-cifar10/meta.yaml
+++ b/script/get-dataset-cifar10/meta.yaml
@@ -6,7 +6,7 @@ category: AI/ML datasets
deps:
- tags: detect,os
new_env_keys:
-- CM_DATASET_*
+- MLC_DATASET_*
tags:
- get
- dataset
@@ -19,10 +19,10 @@ variations:
python:
default: true
env:
- CM_DATASET: CIFAR10
- CM_DATASET_CIFAR10: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
- CM_DATASET_FILENAME: cifar-10-python.tar.gz
- CM_DATASET_FILENAME1: cifar-10-python.tar
+ MLC_DATASET: CIFAR10
+ MLC_DATASET_CIFAR10: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
+ MLC_DATASET_FILENAME: cifar-10-python.tar.gz
+ MLC_DATASET_FILENAME1: cifar-10-python.tar
group: data_format
tiny:
deps:
@@ -33,4 +33,4 @@ variations:
- tags: get,tinymlperf,src
- tags: get,src,eembc,energy-runner
env:
- CM_DATASET_CONVERT_TO_TINYMLPERF: 'yes'
+ MLC_DATASET_CONVERT_TO_TINYMLPERF: 'yes'
diff --git a/script/get-dataset-cifar10/run.bat b/script/get-dataset-cifar10/run.bat
index 8f54fb86e..621dbd632 100644
--- a/script/get-dataset-cifar10/run.bat
+++ b/script/get-dataset-cifar10/run.bat
@@ -1,48 +1,48 @@
-wget -nc %CM_DATASET_CIFAR10% --no-check-certificate
+wget -nc %MLC_DATASET_CIFAR10% --no-check-certificate
IF %ERRORLEVEL% NEQ 0 EXIT 1
-del /Q /S %CM_DATASET_FILENAME1%
+del /Q /S %MLC_DATASET_FILENAME1%
-gzip -d %CM_DATASET_FILENAME%
+gzip -d %MLC_DATASET_FILENAME%
IF %ERRORLEVEL% NEQ 0 EXIT 1
-tar -xvf %CM_DATASET_FILENAME1%
+tar -xvf %MLC_DATASET_FILENAME1%
IF %ERRORLEVEL% NEQ 0 EXIT 1
-del /Q /S %CM_DATASET_FILENAME1%
+del /Q /S %MLC_DATASET_FILENAME1%
-echo CM_DATASET_PATH=%CD%\cifar-10-batches-py > tmp-run-env.out
-echo CM_DATASET_CIFAR10_PATH=%CD%\cifar-10-batches-py >> tmp-run-env.out
+echo MLC_DATASET_PATH=%CD%\cifar-10-batches-py > tmp-run-env.out
+echo MLC_DATASET_CIFAR10_PATH=%CD%\cifar-10-batches-py >> tmp-run-env.out
-if "%CM_DATASET_CONVERT_TO_TINYMLPERF%" == "yes" (
+if "%MLC_DATASET_CONVERT_TO_TINYMLPERF%" == "yes" (
echo.
echo Copying TinyMLPerf convertor ...
echo.
- copy /B /Y %CM_MLPERF_TINY_TRAINING_IC%\* .
+ copy /B /Y %MLC_MLPERF_TINY_TRAINING_IC%\* .
echo.
echo Installing Python requirements ...
echo.
- %CM_PYTHON_BIN% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
+ %MLC_PYTHON_BIN% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
IF %ERRORLEVEL% NEQ 0 EXIT 1
echo.
echo Converting ...
echo.
- %CM_PYTHON_BIN% perf_samples_loader.py
+ %MLC_PYTHON_BIN% perf_samples_loader.py
IF %ERRORLEVEL% NEQ 0 EXIT 1
copy /B /Y y_labels.csv perf_samples
- echo CM_DATASET_CIFAR10_TINYMLPERF_PATH=%CD%\perf_samples >> tmp-run-env.out
+ echo MLC_DATASET_CIFAR10_TINYMLPERF_PATH=%CD%\perf_samples >> tmp-run-env.out
echo.
echo Copying to EEMBC runner user space ...
echo.
- copy /B /Y perf_samples\* %CM_EEMBC_ENERGY_RUNNER_DATASETS%\ic01
+ copy /B /Y perf_samples\* %MLC_EEMBC_ENERGY_RUNNER_DATASETS%\ic01
)
diff --git a/script/get-dataset-cifar10/run.sh b/script/get-dataset-cifar10/run.sh
index a113a2e4d..814177d52 100644
--- a/script/get-dataset-cifar10/run.sh
+++ b/script/get-dataset-cifar10/run.sh
@@ -1,50 +1,50 @@
#!/bin/bash
-wget -nc ${CM_DATASET_CIFAR10} --no-check-certificate
+wget -nc ${MLC_DATASET_CIFAR10} --no-check-certificate
test $? -eq 0 || exit 1
-rm -rf ${CM_DATASET_FILENAME1}
+rm -rf ${MLC_DATASET_FILENAME1}
-gzip -d ${CM_DATASET_FILENAME}
+gzip -d ${MLC_DATASET_FILENAME}
test $? -eq 0 || exit 1
-tar -xvf ${CM_DATASET_FILENAME1}
+tar -xvf ${MLC_DATASET_FILENAME1}
test $? -eq 0 || exit 1
-rm -rf ${CM_DATASET_FILENAME}
+rm -rf ${MLC_DATASET_FILENAME}
-echo "CM_DATASET_PATH=$PWD/cifar-10-batches-py" > tmp-run-env.out
-echo "CM_DATASET_CIFAR10_PATH=$PWD/cifar-10-batches-py" >> tmp-run-env.out
+echo "MLC_DATASET_PATH=$PWD/cifar-10-batches-py" > tmp-run-env.out
+echo "MLC_DATASET_CIFAR10_PATH=$PWD/cifar-10-batches-py" >> tmp-run-env.out
-if [ "${CM_DATASET_CONVERT_TO_TINYMLPERF}" == "yes" ]; then
+if [ "${MLC_DATASET_CONVERT_TO_TINYMLPERF}" == "yes" ]; then
echo ""
echo "Copying TinyMLPerf convertor ..."
echo ""
- cp -rf ${CM_MLPERF_TINY_TRAINING_IC}/* .
+ cp -rf ${MLC_MLPERF_TINY_TRAINING_IC}/* .
echo ""
echo "Installing Python requirements ..."
echo ""
- ${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
+ ${MLC_PYTHON_BIN} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
if [ "${?}" != "0" ]; then exit 1; fi
echo ""
echo "Converting in $PWD ..."
echo ""
- ${CM_PYTHON_BIN} perf_samples_loader.py
+ ${MLC_PYTHON_BIN} perf_samples_loader.py
if [ "${?}" != "0" ]; then exit 1; fi
cp -rf y_labels.csv perf_samples
- echo "CM_DATASET_CIFAR10_TINYMLPERF_PATH=$PWD/perf_samples" >> tmp-run-env.out
+ echo "MLC_DATASET_CIFAR10_TINYMLPERF_PATH=$PWD/perf_samples" >> tmp-run-env.out
echo ""
echo "Copying to EEMBC runner user space ..."
echo ""
- cp -rf perf_samples/* ${CM_EEMBC_ENERGY_RUNNER_DATASETS}/ic01
+ cp -rf perf_samples/* ${MLC_EEMBC_ENERGY_RUNNER_DATASETS}/ic01
fi
diff --git a/script/get-dataset-cnndm/customize.py b/script/get-dataset-cnndm/customize.py
index a6cf2d476..34726734f 100644
--- a/script/get-dataset-cnndm/customize.py
+++ b/script/get-dataset-cnndm/customize.py
@@ -7,11 +7,11 @@ def preprocess(i):
env = i['env']
- if env.get('CM_CNNDM_INTEL_VARIATION', '') == 'yes':
+ if env.get('MLC_CNNDM_INTEL_VARIATION', '') == 'yes':
i['run_script_input']['script_name'] = "run-intel"
else:
print("Using MLCommons Inference source from '" +
- env['CM_MLPERF_INFERENCE_SOURCE'] + "'")
+ env['MLC_MLPERF_INFERENCE_SOURCE'] + "'")
return {'return': 0}
@@ -19,18 +19,18 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- if env.get('CM_DATASET_CALIBRATION', '') == "no":
- env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install')
- env['CM_DATASET_EVAL_PATH'] = os.path.join(
+ if env.get('MLC_DATASET_CALIBRATION', '') == "no":
+ env['MLC_DATASET_PATH'] = os.path.join(os.getcwd(), 'install')
+ env['MLC_DATASET_EVAL_PATH'] = os.path.join(
os.getcwd(), 'install', 'cnn_eval.json')
- env['CM_DATASET_CNNDM_EVAL_PATH'] = os.path.join(
+ env['MLC_DATASET_CNNDM_EVAL_PATH'] = os.path.join(
os.getcwd(), 'install', 'cnn_eval.json')
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_PATH']
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_PATH']
else:
- env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(
+ env['MLC_CALIBRATION_DATASET_PATH'] = os.path.join(
os.getcwd(), 'install', 'cnn_dailymail_calibration.json')
- env['CM_CALIBRATION_DATASET_CNNDM_PATH'] = os.path.join(
+ env['MLC_CALIBRATION_DATASET_CNNDM_PATH'] = os.path.join(
os.getcwd(), 'install', 'cnn_dailymail_calibration.json')
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_CALIBRATION_DATASET_PATH']
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_CALIBRATION_DATASET_PATH']
return {'return': 0}
diff --git a/script/get-dataset-cnndm/meta.yaml b/script/get-dataset-cnndm/meta.yaml
index 91b2af381..bdc27957b 100644
--- a/script/get-dataset-cnndm/meta.yaml
+++ b/script/get-dataset-cnndm/meta.yaml
@@ -4,7 +4,7 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML datasets
default_env:
- CM_DATASET_CALIBRATION: 'no'
+ MLC_DATASET_CALIBRATION: 'no'
deps:
- tags: get,sys-utils-cm
- names:
@@ -15,7 +15,7 @@ deps:
- names:
- inference-src
skip_if_env:
- CM_CNNDM_INTEL_VARIATION:
+ MLC_CNNDM_INTEL_VARIATION:
- 'yes'
tags: mlperf,inference,source
- tags: get,generic-python-lib,_package.simplejson
@@ -23,7 +23,7 @@ deps:
- tags: get,generic-python-lib,_package.tokenizers
- tags: get,generic-python-lib,_numpy
env:
- CM_DATASET: CNNDM
+ MLC_DATASET: CNNDM
tags:
- get
- dataset
@@ -35,21 +35,21 @@ uid: aed298c156e24257
variations:
calibration:
env:
- CM_DATASET_CALIBRATION: 'yes'
+ MLC_DATASET_CALIBRATION: 'yes'
group: dataset-type
new_env_keys:
- - CM_CALIBRATION_DATASET_PATH
- - CM_CALIBRATION_DATASET_CNNDM_PATH
+ - MLC_CALIBRATION_DATASET_PATH
+ - MLC_CALIBRATION_DATASET_CNNDM_PATH
intel: {}
intel,validation:
env:
- CM_CNNDM_INTEL_VARIATION: 'yes'
+ MLC_CNNDM_INTEL_VARIATION: 'yes'
validation:
default: true
env:
- CM_DATASET_CALIBRATION: 'no'
+ MLC_DATASET_CALIBRATION: 'no'
group: dataset-type
new_env_keys:
- - CM_DATASET_PATH
- - CM_DATASET_EVAL_PATH
- - CM_DATASET_CNNDM_EVAL_PATH
+ - MLC_DATASET_PATH
+ - MLC_DATASET_EVAL_PATH
+ - MLC_DATASET_CNNDM_EVAL_PATH
diff --git a/script/get-dataset-cnndm/run-intel.sh b/script/get-dataset-cnndm/run-intel.sh
index 067f158a5..36976e282 100644
--- a/script/get-dataset-cnndm/run-intel.sh
+++ b/script/get-dataset-cnndm/run-intel.sh
@@ -9,7 +9,7 @@ export DATASET_CNNDM_PATH=${CUR}/install
wget -nc https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/gptj-99/pytorch-cpu/download-dataset.py
test $? -eq 0 || exit 1
-cmd="${CM_PYTHON_BIN_WITH_PATH} download-dataset.py --split validation --output-dir ${DATASET_CNNDM_PATH}"
+cmd="${MLC_PYTHON_BIN_WITH_PATH} download-dataset.py --split validation --output-dir ${DATASET_CNNDM_PATH}"
echo "$cmd"
eval "$cmd"
test $? -eq 0 || exit 1
diff --git a/script/get-dataset-cnndm/run.sh b/script/get-dataset-cnndm/run.sh
index f9aa3864b..48e3050e0 100644
--- a/script/get-dataset-cnndm/run.sh
+++ b/script/get-dataset-cnndm/run.sh
@@ -4,16 +4,16 @@ CUR=${PWD}
mkdir -p install
export DATASET_CNNDM_PATH=${CUR}/install
-cd ${CM_MLPERF_INFERENCE_SOURCE}
+cd ${MLC_MLPERF_INFERENCE_SOURCE}
cd language/gpt-j
-if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then
- cmd="${CM_PYTHON_BIN_WITH_PATH} download_cnndm.py"
+if [[ ${MLC_DATASET_CALIBRATION} == "no" ]]; then
+ cmd="${MLC_PYTHON_BIN_WITH_PATH} download_cnndm.py"
echo $cmd
eval $cmd
test $? -eq 0 || exit 1
else
- cmd="${CM_PYTHON_BIN_WITH_PATH} prepare-calibration.py --calibration-list-file calibration-list.txt --output-dir ${DATASET_CNNDM_PATH}"
+ cmd="${MLC_PYTHON_BIN_WITH_PATH} prepare-calibration.py --calibration-list-file calibration-list.txt --output-dir ${DATASET_CNNDM_PATH}"
echo $cmd
eval $cmd
test $? -eq 0 || exit 1
diff --git a/script/get-dataset-coco/README-extra.md b/script/get-dataset-coco/README-extra.md
index 9f19d2e8d..2bf3a5321 100644
--- a/script/get-dataset-coco/README-extra.md
+++ b/script/get-dataset-coco/README-extra.md
@@ -36,23 +36,23 @@ cmr "get coco dataset _val _2017" -j
```json
"new_env": {
- "CM_DATASET_COCO_URL_ANNOTATIONS": "http://images.cocodataset.org/annotations",
- "CM_DATASET_COCO_URL_DATA": "http://images.cocodataset.org/zips",
- "CM_DATASET_COCO_VERSION": "2017",
- "CM_DATASET_COCO_TYPE": "val",
- "CM_DATASET_COCO_SIZE": "complete",
- "CM_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\annotations_trainval2017.zip",
- "CM_DATASET_COCO_ANNOTATIONS_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\annotations",
- "CM_DATASET_COCO_DATA_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\val2017.zip",
- "CM_DATASET_COCO_DATA_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\val2017",
- "CM_DATASET_COCO_MD5SUM_ANN": "f4bbac642086de4f52a3fdda2de5fa2c",
- "CM_DATASET_COCO_MD5SUM_DATA": "442b8da7639aecaf257c1dceb8ba8c80",
- "CM_DATASET_COCO_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07",
- "CM_DATASET_COCO_TYPE_AND_VERSION": "val2017",
- "CM_DATASET_COCO_URL_ANNOTATIONS_FULL": "http://images.cocodataset.org/annotations/annotations_trainval2017.zip",
- "CM_DATASET_COCO_URL_DATA_FULL": "http://images.cocodataset.org/zips/val2017.zip",
- "CM_DATASET_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07",
- "CM_DATASET_PATH_ROOT": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07"
+ "MLC_DATASET_COCO_URL_ANNOTATIONS": "http://images.cocodataset.org/annotations",
+ "MLC_DATASET_COCO_URL_DATA": "http://images.cocodataset.org/zips",
+ "MLC_DATASET_COCO_VERSION": "2017",
+ "MLC_DATASET_COCO_TYPE": "val",
+ "MLC_DATASET_COCO_SIZE": "complete",
+ "MLC_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\annotations_trainval2017.zip",
+ "MLC_DATASET_COCO_ANNOTATIONS_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\annotations",
+ "MLC_DATASET_COCO_DATA_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\val2017.zip",
+ "MLC_DATASET_COCO_DATA_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\val2017",
+ "MLC_DATASET_COCO_MD5SUM_ANN": "f4bbac642086de4f52a3fdda2de5fa2c",
+ "MLC_DATASET_COCO_MD5SUM_DATA": "442b8da7639aecaf257c1dceb8ba8c80",
+ "MLC_DATASET_COCO_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07",
+ "MLC_DATASET_COCO_TYPE_AND_VERSION": "val2017",
+ "MLC_DATASET_COCO_URL_ANNOTATIONS_FULL": "http://images.cocodataset.org/annotations/annotations_trainval2017.zip",
+ "MLC_DATASET_COCO_URL_DATA_FULL": "http://images.cocodataset.org/zips/val2017.zip",
+ "MLC_DATASET_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07",
+ "MLC_DATASET_PATH_ROOT": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07"
},
```
diff --git a/script/get-dataset-coco/customize.py b/script/get-dataset-coco/customize.py
index b7ee135ee..029967fc4 100644
--- a/script/get-dataset-coco/customize.py
+++ b/script/get-dataset-coco/customize.py
@@ -10,11 +10,11 @@ def preprocess(i):
automation = i['automation']
env = i['env']
meta = i['meta']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
# Check if path is there to detect existing data set
detected = False
- path = env.get('CM_TMP_PATH', '')
+ path = env.get('MLC_TMP_PATH', '')
if path != '':
if not os.path.isdir(path):
return {'return': 1,
@@ -40,14 +40,14 @@ def preprocess(i):
print('')
print('Detected COCO dataset {} {}'.format(tp, ver))
- env['CM_DATASET_COCO_DETECTED'] = 'yes'
- env['CM_DATASET_COCO_PATH'] = path
+ env['MLC_DATASET_COCO_DETECTED'] = 'yes'
+ env['MLC_DATASET_COCO_PATH'] = path
else:
- ver = env['CM_DATASET_COCO_VERSION']
- tp = env['CM_DATASET_COCO_TYPE']
+ ver = env['MLC_DATASET_COCO_VERSION']
+ tp = env['MLC_DATASET_COCO_TYPE']
# Prepare URL
- size = env.get('CM_DATASET_COCO_SIZE', '')
+ size = env.get('MLC_DATASET_COCO_SIZE', '')
if size == 'small' and tp == 'val' and ver == '2017':
# We prepared a small version with 50 images for val 2017
@@ -60,8 +60,8 @@ def preprocess(i):
filename_annotation)
else:
- url_data = env['CM_DATASET_COCO_URL_DATA']
- url_ann = env['CM_DATASET_COCO_URL_ANNOTATIONS']
+ url_data = env['MLC_DATASET_COCO_URL_DATA']
+ url_ann = env['MLC_DATASET_COCO_URL_ANNOTATIONS']
filename_data = tp + ver + '.zip'
filename_annotation = 'annotations_trainval' + ver + '.zip'
@@ -80,7 +80,7 @@ def preprocess(i):
'extra_cache_tags': download_extra_cache_tags
}
- path_from = env.get('CM_FROM', '')
+ path_from = env.get('MLC_FROM', '')
if path_from != '':
path_from_data = os.path.join(path_from, filename_data)
if not os.path.isfile(path_from_data):
@@ -94,12 +94,12 @@ def preprocess(i):
path_from_annotation)}
dae_input_annotation['local_path'] = path_from_annotation
- path_to = env.get('CM_TO', '')
+ path_to = env.get('MLC_TO', '')
if path_to != '':
dae_input_data['extract_path'] = path_to
dae_input_annotation['extract_path'] = path_to
- path_store = env.get('CM_STORE', '')
+ path_store = env.get('MLC_STORE', '')
if path_store != '':
dae_input_data['download_path'] = path_store
dae_input_data['tags'] = '_keep'
@@ -116,11 +116,11 @@ def preprocess(i):
return r
# Prepare environment variables
- env['CM_DATASET_COCO_VERSION'] = ver
- env['CM_DATASET_COCO_TYPE'] = tp
- env['CM_DATASET_COCO_TYPE_AND_VERSION'] = tp + ver
- env['CM_DATASET_COCO_URL_DATA_FULL'] = url_data_full
- env['CM_DATASET_COCO_URL_ANNOTATIONS_FULL'] = url_ann_full
+ env['MLC_DATASET_COCO_VERSION'] = ver
+ env['MLC_DATASET_COCO_TYPE'] = tp
+ env['MLC_DATASET_COCO_TYPE_AND_VERSION'] = tp + ver
+ env['MLC_DATASET_COCO_URL_DATA_FULL'] = url_data_full
+ env['MLC_DATASET_COCO_URL_ANNOTATIONS_FULL'] = url_ann_full
# Check MD5SUM
md5sum_data = ''
@@ -136,9 +136,9 @@ def preprocess(i):
md5sum_ann = 'f4bbac642086de4f52a3fdda2de5fa2c'
if md5sum_data != '':
- env['CM_DATASET_COCO_MD5SUM_DATA'] = md5sum_data
+ env['MLC_DATASET_COCO_MD5SUM_DATA'] = md5sum_data
if md5sum_ann != '':
- env['CM_DATASET_COCO_MD5SUM_ANN'] = md5sum_ann
+ env['MLC_DATASET_COCO_MD5SUM_ANN'] = md5sum_ann
if not detected:
print('')
@@ -160,25 +160,25 @@ def postprocess(i):
env = i['env']
- tp_ver = env['CM_DATASET_COCO_TYPE_AND_VERSION']
+ tp_ver = env['MLC_DATASET_COCO_TYPE_AND_VERSION']
- path_to = env.get('CM_TO', '')
+ path_to = env.get('MLC_TO', '')
# Check if detected or downloaded
- if env.get('CM_DATASET_COCO_DETECTED',
+ if env.get('MLC_DATASET_COCO_DETECTED',
'').lower() == 'yes' or path_to != '':
- path_all = env['CM_DATASET_COCO_PATH'] if path_to == '' else path_to
+ path_all = env['MLC_DATASET_COCO_PATH'] if path_to == '' else path_to
- env['CM_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver)
- env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join(
+ env['MLC_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver)
+ env['MLC_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join(
path_all, 'annotations')
else:
path_all = os.getcwd()
# Moving 2 directories to 1 place
- path_data = env['CM_DATASET_COCO_DATA_PATH']
- path_ann = env['CM_DATASET_COCO_ANNOTATIONS_PATH']
+ path_data = env['MLC_DATASET_COCO_DATA_PATH']
+ path_ann = env['MLC_DATASET_COCO_ANNOTATIONS_PATH']
print('')
print(path_all)
@@ -192,8 +192,8 @@ def postprocess(i):
command1 = ' move /y ' + path_data_full + ' ' + tp_ver
command2 = ' move /y ' + path_ann_full + ' annotations'
- env['CM_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver)
- env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join(
+ env['MLC_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver)
+ env['MLC_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join(
path_all, 'annotations')
else:
# Make soft links from data and annotations into 1 directory
@@ -206,8 +206,8 @@ def postprocess(i):
print(command)
os.system(command)
- env['CM_DATASET_COCO_PATH'] = path_all
- env['CM_DATASET_PATH'] = path_all
- env['CM_DATASET_PATH_ROOT'] = path_all
+ env['MLC_DATASET_COCO_PATH'] = path_all
+ env['MLC_DATASET_PATH'] = path_all
+ env['MLC_DATASET_PATH_ROOT'] = path_all
return {'return': 0}
diff --git a/script/get-dataset-coco/meta.yaml b/script/get-dataset-coco/meta.yaml
index 301d76951..bf842a486 100644
--- a/script/get-dataset-coco/meta.yaml
+++ b/script/get-dataset-coco/meta.yaml
@@ -17,53 +17,53 @@ docker:
- to
skip_run_cmd: 'no'
env:
- CM_DATASET: COCO
- CM_DATASET_COCO_URL_ANNOTATIONS: http://images.cocodataset.org/annotations
- CM_DATASET_COCO_URL_DATA: http://images.cocodataset.org/zips
+ MLC_DATASET: COCO
+ MLC_DATASET_COCO_URL_ANNOTATIONS: http://images.cocodataset.org/annotations
+ MLC_DATASET_COCO_URL_DATA: http://images.cocodataset.org/zips
input_mapping:
- from: CM_FROM
- home: CM_HOME_DIR
- store: CM_STORE
- to: CM_TO
+ from: MLC_FROM
+ home: MLC_HOME_DIR
+ store: MLC_STORE
+ to: MLC_TO
new_env_keys:
-- CM_DATASET_COCO*
-- CM_DATASET_PATH
-- CM_DATASET_PATH_ROOT
+- MLC_DATASET_COCO*
+- MLC_DATASET_PATH
+- MLC_DATASET_PATH_ROOT
prehook_deps:
- env:
- CM_DOWNLOAD_CHECKSUM: <<>>
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_COCO_DATA_DOWNLOAD_PATH
- CM_EXTRACT_FINAL_ENV_NAME: CM_DATASET_COCO_DATA_PATH
+ MLC_DOWNLOAD_CHECKSUM: <<>>
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_COCO_DATA_DOWNLOAD_PATH
+ MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_COCO_DATA_PATH
force_cache: true
names:
- get-dataset-coco-data
- 746e5dad5e784ad6
skip_if_env:
- CM_DATASET_COCO_DETECTED:
+ MLC_DATASET_COCO_DETECTED:
- 'yes'
skip_if_fake_run: true
tags: download-and-extract,file,_wget,_extract
update_tags_from_env_with_prefix:
_url.:
- - CM_DATASET_COCO_URL_DATA_FULL
+ - MLC_DATASET_COCO_URL_DATA_FULL
verify: false
- env:
- CM_DOWNLOAD_CHECKSUM: <<>>
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH
- CM_DOWNLOAD_PATH: <<>>
- CM_EXTRACT_FINAL_ENV_NAME: CM_DATASET_COCO_ANNOTATIONS_PATH
+ MLC_DOWNLOAD_CHECKSUM: <<>>
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH
+ MLC_DOWNLOAD_PATH: <<>>
+ MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_COCO_ANNOTATIONS_PATH
force_cache: true
names:
- get-dataset-coco-annotations
- edb6cd092ff64171
skip_if_env:
- CM_DATASET_COCO_DETECTED:
+ MLC_DATASET_COCO_DETECTED:
- 'yes'
skip_if_fake_run: true
tags: download-and-extract,file,_wget,_extract
update_tags_from_env_with_prefix:
_url.:
- - CM_DATASET_COCO_URL_ANNOTATIONS_FULL
+ - MLC_DATASET_COCO_URL_ANNOTATIONS_FULL
verify: false
tags:
- get
@@ -75,23 +75,23 @@ variations:
'2017':
default: true
env:
- CM_DATASET_COCO_VERSION: '2017'
+ MLC_DATASET_COCO_VERSION: '2017'
group: version
complete:
default: true
env:
- CM_DATASET_COCO_SIZE: complete
+ MLC_DATASET_COCO_SIZE: complete
group: size
small:
env:
- CM_DATASET_COCO_SIZE: small
+ MLC_DATASET_COCO_SIZE: small
group: size
train:
env:
- CM_DATASET_COCO_TYPE: train
+ MLC_DATASET_COCO_TYPE: train
group: type
val:
default: true
env:
- CM_DATASET_COCO_TYPE: val
+ MLC_DATASET_COCO_TYPE: val
group: type
diff --git a/script/get-dataset-coco2014/customize.py b/script/get-dataset-coco2014/customize.py
index 90a502219..837539efc 100644
--- a/script/get-dataset-coco2014/customize.py
+++ b/script/get-dataset-coco2014/customize.py
@@ -8,33 +8,33 @@ def preprocess(i):
env = i['env']
print("Using MLCommons Inference source from '" +
- env['CM_MLPERF_INFERENCE_SOURCE'] + "'")
+ env['MLC_MLPERF_INFERENCE_SOURCE'] + "'")
run_dir = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'],
+ env['MLC_MLPERF_INFERENCE_SOURCE'],
"text_to_image",
"tools")
- env['CM_RUN_DIR'] = run_dir
+ env['MLC_RUN_DIR'] = run_dir
return {'return': 0}
def postprocess(i):
env = i['env']
- if env.get('CM_GENERATE_SAMPLE_ID', '') == "yes":
- env['CM_COCO2014_SAMPLE_ID_PATH'] = os.path.join(
+ if env.get('MLC_GENERATE_SAMPLE_ID', '') == "yes":
+ env['MLC_COCO2014_SAMPLE_ID_PATH'] = os.path.join(
os.getcwd(), 'sample_ids.txt')
- print(env['CM_COCO2014_SAMPLE_ID_PATH'])
- if env.get('CM_DATASET_CALIBRATION', '') == "no":
- env['CM_DATASET_PATH_ROOT'] = os.getcwd()
- # env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data')
- env['CM_DATASET_CAPTIONS_DIR_PATH'] = os.path.join(
+ print(env['MLC_COCO2014_SAMPLE_ID_PATH'])
+ if env.get('MLC_DATASET_CALIBRATION', '') == "no":
+ env['MLC_DATASET_PATH_ROOT'] = os.getcwd()
+ # env['MLC_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data')
+ env['MLC_DATASET_CAPTIONS_DIR_PATH'] = os.path.join(
os.getcwd(), 'captions')
- env['CM_DATASET_LATENTS_DIR_PATH'] = os.path.join(
+ env['MLC_DATASET_LATENTS_DIR_PATH'] = os.path.join(
os.getcwd(), 'latents')
else:
- env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(
+ env['MLC_CALIBRATION_DATASET_PATH'] = os.path.join(
os.getcwd(), 'calibration', 'data')
return {'return': 0}
diff --git a/script/get-dataset-coco2014/meta.yaml b/script/get-dataset-coco2014/meta.yaml
index 39c603642..fa3724f83 100644
--- a/script/get-dataset-coco2014/meta.yaml
+++ b/script/get-dataset-coco2014/meta.yaml
@@ -17,7 +17,7 @@ tags:
- original
default_env:
- CM_DATASET_CALIBRATION: 'no'
+ MLC_DATASET_CALIBRATION: 'no'
deps:
@@ -30,26 +30,26 @@ deps:
- tags: get,generic-python-lib,_package.pandas
- force_env_keys:
- - CM_GIT_*
+ - MLC_GIT_*
names:
- inference-src
tags: mlperf,inference,source
version: master
env:
- CM_DATASET: COCO2014
+ MLC_DATASET: COCO2014
new_env_keys:
-- CM_DATASET_PATH
-- CM_DATASET_PATH_ROOT
-- CM_DATASET_ANNOTATIONS_DIR_PATH
-- CM_DATASET_ANNOTATIONS_FILE_PATH
-- CM_CALIBRATION_DATASET_PATH
-- CM_COCO2014_SAMPLE_ID_PATH
+- MLC_DATASET_PATH
+- MLC_DATASET_PATH_ROOT
+- MLC_DATASET_ANNOTATIONS_DIR_PATH
+- MLC_DATASET_ANNOTATIONS_FILE_PATH
+- MLC_CALIBRATION_DATASET_PATH
+- MLC_COCO2014_SAMPLE_ID_PATH
posthook_deps:
- enable_if_env:
- CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS:
+ MLC_DATASET_COCO2014_CUSTOM_ANNOTATIONS:
- 'yes'
tags: get,coco2014,annotations
@@ -57,38 +57,38 @@ variations:
'50':
default: true
env:
- CM_DATASET_SIZE: '50'
+ MLC_DATASET_SIZE: '50'
group: size
'500':
env:
- CM_DATASET_SIZE: '500'
+ MLC_DATASET_SIZE: '500'
group: size
calibration:
env:
- CM_DATASET_CALIBRATION: 'yes'
+ MLC_DATASET_CALIBRATION: 'yes'
group: dataset-type
custom-annotations:
env:
- CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'yes'
+ MLC_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'yes'
group: annotations
default-annotations:
default: true
env:
- CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'no'
+ MLC_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'no'
group: annotations
full:
env:
- CM_DATASET_SIZE: ''
+ MLC_DATASET_SIZE: ''
group: size
size.#:
env:
- CM_DATASET_SIZE: '#'
+ MLC_DATASET_SIZE: '#'
group: size
with-sample-ids:
env:
- CM_GENERATE_SAMPLE_ID: 'yes'
+ MLC_GENERATE_SAMPLE_ID: 'yes'
validation:
default: true
env:
- CM_DATASET_CALIBRATION: 'no'
+ MLC_DATASET_CALIBRATION: 'no'
group: dataset-type
diff --git a/script/get-dataset-coco2014/run.bat b/script/get-dataset-coco2014/run.bat
index 9ac62e6ad..b0aa60f13 100644
--- a/script/get-dataset-coco2014/run.bat
+++ b/script/get-dataset-coco2014/run.bat
@@ -1,21 +1,21 @@
@echo off
set CUR_DIR=%cd%
-set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH%
+set SCRIPT_DIR=%MLC_TMP_CURRENT_SCRIPT_PATH%
if not exist install mkdir install
set INSTALL_DIR=%CUR_DIR%\install
-cd %CM_RUN_DIR%
+cd %MLC_RUN_DIR%
-if not "%CM_DATASET_SIZE%" == "" (
- set MAX_IMAGES=--max-images %CM_DATASET_SIZE% --seed 42
+if not "%MLC_DATASET_SIZE%" == "" (
+ set MAX_IMAGES=--max-images %MLC_DATASET_SIZE% --seed 42
) else (
set MAX_IMAGES=
)
rem TBD - next file doesn't exist in the latest inference - need to check/fix ...
-%CM_PYTHON_BIN% download-coco-2014.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json
+%MLC_PYTHON_BIN% download-coco-2014.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-dataset-coco2014/run.sh b/script/get-dataset-coco2014/run.sh
index 3685b161c..a891b2330 100644
--- a/script/get-dataset-coco2014/run.sh
+++ b/script/get-dataset-coco2014/run.sh
@@ -1,17 +1,17 @@
#!/bin/bash
python3() {
- ${CM_PYTHON_BIN_WITH_PATH} "$@"
+ ${MLC_PYTHON_BIN_WITH_PATH} "$@"
}
export -f python3
CUR=${PWD}
INSTALL_DIR=${CUR}
-cd ${CM_RUN_DIR}
+cd ${MLC_RUN_DIR}
-if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then
- if [ ! -z ${CM_DATASET_SIZE} ]; then
- max_images=" -m ${CM_DATASET_SIZE}"
+if [[ ${MLC_DATASET_CALIBRATION} == "no" ]]; then
+ if [ ! -z ${MLC_DATASET_SIZE} ]; then
+ max_images=" -m ${MLC_DATASET_SIZE}"
else
max_images=""
fi
@@ -32,7 +32,7 @@ else
eval $cmd
test $? -eq 0 || exit $?
fi
-if [[ ${CM_GENERATE_SAMPLE_ID} == "yes" ]]; then
+if [[ ${MLC_GENERATE_SAMPLE_ID} == "yes" ]]; then
cmd="python3 sample_ids.py --tsv-path ${INSTALL_DIR}/captions/captions.tsv --output-path ${INSTALL_DIR}/sample_ids.txt"
echo $cmd
eval $cmd
diff --git a/script/get-dataset-cognata-mlcommons/customize.py b/script/get-dataset-cognata-mlcommons/customize.py
index 471a44cbc..dbb18802f 100644
--- a/script/get-dataset-cognata-mlcommons/customize.py
+++ b/script/get-dataset-cognata-mlcommons/customize.py
@@ -8,7 +8,7 @@ def preprocess(i):
env = i['env']
cm_cache_dataset_path = env.get(
- 'CM_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip()
+ 'MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip()
res = utils.load_json(
os.path.join(
@@ -16,42 +16,42 @@ def preprocess(i):
'cfg.json'))
cfg = res.get('meta', {})
if cfg.get('imported', False):
- env['CM_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'yes'
+ env['MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'yes'
- if env.get('CM_ABTF_SCRATCH_PATH_DATASETS', '') != '':
- env['CM_ABTF_SCRATCH_PATH_DATASET_COGNATA'] = os.path.join(
- env['CM_ABTF_SCRATCH_PATH_DATASETS'], "cognata")
- env['CM_ABTF_SCRATCH_PATH_DATASET_COGNATA_TMP'] = os.path.join(
- env['CM_ABTF_SCRATCH_PATH_DATASETS'], "cognata_tmp")
+ if env.get('MLC_ABTF_SCRATCH_PATH_DATASETS', '') != '':
+ env['MLC_ABTF_SCRATCH_PATH_DATASET_COGNATA'] = os.path.join(
+ env['MLC_ABTF_SCRATCH_PATH_DATASETS'], "cognata")
+ env['MLC_ABTF_SCRATCH_PATH_DATASET_COGNATA_TMP'] = os.path.join(
+ env['MLC_ABTF_SCRATCH_PATH_DATASETS'], "cognata_tmp")
- env['CM_DATASET_COGNATA_POC_TEXT_MD5_FILE_PATH'] = os.path.join(
+ env['MLC_DATASET_COGNATA_POC_TEXT_MD5_FILE_PATH'] = os.path.join(
i['run_script_input']['path'], 'checksums', 'cognata_poc.txt')
# Check if user requests path not in CM cache
#
- # --path (env CM_TMP_PATH) shows where to store Cognata data set instead of CM cahe
+ # --path (env MLC_TMP_PATH) shows where to store Cognata data set instead of CM cahe
# --import tells CM to import existing Cognata from a given path and skip further download/processing
#
import_path = env.get(
- 'CM_DATASET_MLCOMMONS_COGNATA_IMPORT_PATH',
+ 'MLC_DATASET_MLCOMMONS_COGNATA_IMPORT_PATH',
'').strip()
if import_path != '':
if not os.path.isdir(import_path):
return {'return': 1, 'error': 'directory to import this dataset doesn\'t exist: {}'.format(
import_path)}
- env['CM_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'yes'
- env['CM_DATASET_MLCOMMONS_COGNATA_PATH'] = import_path
+ env['MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'yes'
+ env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] = import_path
else:
- path = env.get('CM_TMP_PATH', '')
+ path = env.get('MLC_TMP_PATH', '')
if path != '':
- env['CM_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'no'
+ env['MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED'] = 'no'
if not os.path.isdir(path):
os.makedirs(path)
- env['CM_DATASET_MLCOMMONS_COGNATA_PATH'] = path
+ env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] = path
return {'return': 0}
@@ -65,29 +65,29 @@ def postprocess(i):
cur_dir = os.getcwd()
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
cm_cache_dataset_path = env.get(
- 'CM_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip()
+ 'MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip()
if not os.path.isdir(cm_cache_dataset_path):
return {
'return': 1, 'error': 'Dataset corrupted - CM cache path not found: {}'.format(cm_cache_dataset_path)}
- if env.get('CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES', '') == '':
- env['CM_DATASET_MLCOMMONS_COGNATA_PATH'] = os.path.dirname(
- env['CM_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH'])
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_MLCOMMONS_COGNATA_PATH']
+ if env.get('MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES', '') == '':
+ env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] = os.path.dirname(
+ env['MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH'])
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH']
return {'return': 0}
cm_cache_dataset_cfg_file = os.path.join(cm_cache_dataset_path, 'cfg.json')
- env['CM_DATASET_MLCOMMONS_COGNATA_CFG_FILE'] = cm_cache_dataset_cfg_file
+ env['MLC_DATASET_MLCOMMONS_COGNATA_CFG_FILE'] = cm_cache_dataset_cfg_file
res = utils.load_json(cm_cache_dataset_cfg_file)
cfg = res.get('meta', {})
dataset_path = cfg.get('real_path', '')
- dataset_path_requested = env.get('CM_DATASET_MLCOMMONS_COGNATA_PATH', '')
+ dataset_path_requested = env.get('MLC_DATASET_MLCOMMONS_COGNATA_PATH', '')
if dataset_path == '':
if dataset_path_requested != '':
dataset_path = dataset_path_requested
@@ -102,10 +102,10 @@ def postprocess(i):
print('')
print('Used dataset path: {}'.format(dataset_path))
- env['CM_DATASET_MLCOMMONS_COGNATA_PATH'] = dataset_path
+ env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] = dataset_path
# If imported, don't process further
- if env.get('CM_DATASET_MLCOMMONS_COGNATA_IMPORTED', '') == 'yes':
+ if env.get('MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED', '') == 'yes':
cfg['imported'] = True
else:
cfg['imported'] = False
@@ -118,7 +118,7 @@ def postprocess(i):
# If processed once, don't process unless forced
if cfg.get('processed', False):
if not utils.check_if_true_yes_on(
- env, 'CM_DATASET_MLCOMMONS_COGNATA_UPDATE'):
+ env, 'MLC_DATASET_MLCOMMONS_COGNATA_UPDATE'):
print('')
print('Already processed: use --update to update this dataset')
@@ -146,7 +146,7 @@ def postprocess(i):
first_url = dataset_meta.get('first_url', '').strip()
if first_url == '':
- x = env.get('CM_DATASET_MLCOMMONS_COGNATA_PRIVATE_URL', '').strip()
+ x = env.get('MLC_DATASET_MLCOMMONS_COGNATA_PRIVATE_URL', '').strip()
if x != '':
first_url = x
else:
@@ -196,12 +196,12 @@ def postprocess(i):
# Parse XLSX and check serial number
serial_numbers = []
for s in env.get(
- 'CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS', '').strip().split(','):
+ 'MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS', '').strip().split(','):
s = s.strip()
if s != '' and s not in serial_numbers:
serial_numbers.append(s)
- dataset_key = env['CM_DATASET_MLCOMMONS_COGNATA_KEY1']
+ dataset_key = env['MLC_DATASET_MLCOMMONS_COGNATA_KEY1']
url_key = 'Link to Excel File (Download Links)'
serial_key = 'Serial Number'
@@ -269,14 +269,14 @@ def postprocess(i):
print('Processing subsets ...')
group_names = []
- for s in env.get('CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES',
+ for s in env.get('MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES',
'').strip().split(','):
s = s.strip()
if s != '' and s not in group_names:
group_names.append(s)
# Check if force some filenames
- x = env.get('CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES', '').strip()
+ x = env.get('MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES', '').strip()
file_names = []
if x != '':
file_names = x.strip(';') if ';' in x else [x]
@@ -339,7 +339,7 @@ def postprocess(i):
continue
if os.name == 'nt':
- aria2_tool = env['CM_ARIA2_BIN_WITH_PATH']
+ aria2_tool = env['MLC_ARIA2_BIN_WITH_PATH']
else:
aria2_tool = 'aria2c'
@@ -394,7 +394,7 @@ def postprocess(i):
cfg['processed'] = True
utils.save_json(cm_cache_dataset_cfg_file, cfg)
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_MLCOMMONS_COGNATA_PATH']
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH']
return {'return': 0}
diff --git a/script/get-dataset-cognata-mlcommons/meta.yaml b/script/get-dataset-cognata-mlcommons/meta.yaml
index 1b8155d7b..b59662b22 100644
--- a/script/get-dataset-cognata-mlcommons/meta.yaml
+++ b/script/get-dataset-cognata-mlcommons/meta.yaml
@@ -28,16 +28,16 @@ category_sort: 8500
input_mapping:
- update: CM_DATASET_MLCOMMONS_COGNATA_UPDATE
- import: CM_DATASET_MLCOMMONS_COGNATA_IMPORT_PATH
- private_url: CM_DATASET_MLCOMMONS_COGNATA_PRIVATE_URL
- serial_numbers: CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS
- group_names: CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES
- file_names: CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES
+ update: MLC_DATASET_MLCOMMONS_COGNATA_UPDATE
+ import: MLC_DATASET_MLCOMMONS_COGNATA_IMPORT_PATH
+ private_url: MLC_DATASET_MLCOMMONS_COGNATA_PRIVATE_URL
+ serial_numbers: MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS
+ group_names: MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES
+ file_names: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES
env:
- CM_DATASET: MLCOMMONS_COGNATA
- CM_DATASET_MLCOMMONS_COGNATA_KEY1: "Dataset 1.0"
+ MLC_DATASET: MLCOMMONS_COGNATA
+ MLC_DATASET_MLCOMMONS_COGNATA_KEY1: "Dataset 1.0"
@@ -49,51 +49,51 @@ deps:
tags: create,custom,cache,entry
extra_cache_tags: dataset,cognata,mlcommons-cognata
skip_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
+ MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
- 'off'
env_key: DATASET_MLCOMMONS_COGNATA
- # this script will prepare env CM_CUSTOM_CACHE_ENTRY_{env_key}_PATH
+ # this script will prepare env MLC_CUSTOM_CACHE_ENTRY_{env_key}_PATH
prehook_deps:
- names:
- gdrive-downloader-cognata
skip_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
+ MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
- 'on'
enable_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_IMPORTED:
+ MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED:
- 'no'
- CM_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL:
+ MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL:
- gdrive
tags: download,file,_gdown,_url.https://drive.google.com/drive/folders/1FS-qLbzB5htgMnfry6z4gx8J_ZH_7MsJ?usp=drive_link
env:
- CM_DOWNLOAD_EXTRA_OPTIONS: " --folder"
- CM_DOWNLOAD_FILENAME: 10002_Urban_Clear_Morning
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH
+ MLC_DOWNLOAD_EXTRA_OPTIONS: " --folder"
+ MLC_DOWNLOAD_FILENAME: 10002_Urban_Clear_Morning
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH
force_cache: true
extra_cache_tags: abtf,cognata,poc,dataset
- names:
- rclone-downloader-cognata
skip_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
+ MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
- 'on'
enable_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_IMPORTED:
+ MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED:
- 'no'
- CM_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL:
+ MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL:
- rclone
tags: download-and-extract,file,_extract,_rclone,_url.https://automotive.mlcommons-storage.org/Cognata_Dataset_PoC_Demo%2F10002_Urban_Clear_Morning.zip
env:
- CM_RCLONE_COPY_USING: copyurl
- CM_RCLONE_CONFIG_CMD: ''
- CM_DOWNLOAD_CHECKSUM: '76389b05b0ee1e08d354d3c1b696b8c0'
- CM_EXTRACT_EXTRACTED_CHECKSUM_FILE: "<<>>"
- CM_DOWNLOAD_PATH: <<>>
- CM_EXTRACT_PATH: <<>>
- CM_EXTRACT_EXTRACTED_FILENAME: 10002_Urban_Clear_Morning
- CM_DAE_FINAL_ENV_NAME: CM_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH
+ MLC_RCLONE_COPY_USING: copyurl
+ MLC_RCLONE_CONFIG_CMD: ''
+ MLC_DOWNLOAD_CHECKSUM: '76389b05b0ee1e08d354d3c1b696b8c0'
+ MLC_EXTRACT_EXTRACTED_CHECKSUM_FILE: "<<>>"
+ MLC_DOWNLOAD_PATH: <<>>
+ MLC_EXTRACT_PATH: <<>>
+ MLC_EXTRACT_EXTRACTED_FILENAME: 10002_Urban_Clear_Morning
+ MLC_DAE_FINAL_ENV_NAME: MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH
force_cache: true
extra_cache_tags: abtf,cognata,poc,dataset
@@ -102,28 +102,28 @@ prehook_deps:
- python3
tags: get,python3
skip_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_IMPORTED:
+ MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED:
- 'yes'
enable_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
+ MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
- 'on'
# Python package to read/write Excel files
- tags: get,generic-python-lib,_package.openpyxl
skip_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_IMPORTED:
+ MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED:
- 'yes'
enable_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
+ MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
- 'on'
# Tool to download large files
- tags: get,aria2
skip_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_IMPORTED:
+ MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED:
- 'yes'
enable_if_env:
- CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
+ MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES:
- 'on'
@@ -131,31 +131,31 @@ variations:
abtf-demo:
group: dataset-type
env:
- CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: "10002_Urban_Clear_Morning"
- CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: "Cognata_Camera_01_8M"
- CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: "Cognata_Camera_01_8M_ann.zip;Cognata_Camera_01_8M_ann_laneline.zip;Cognata_Camera_01_8M.zip"
+ MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: "10002_Urban_Clear_Morning"
+ MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: "Cognata_Camera_01_8M"
+ MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: "Cognata_Camera_01_8M_ann.zip;Cognata_Camera_01_8M_ann_laneline.zip;Cognata_Camera_01_8M.zip"
abtf-poc:
group: dataset-type
default: true
env:
- CM_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: "10002_Urban_Clear_Morning"
- CM_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: "Cognata_Camera_01_8M"
- CM_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: ""
+ MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS: "10002_Urban_Clear_Morning"
+ MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: "Cognata_Camera_01_8M"
+ MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: ""
rclone:
group: download-tool
default: true
env:
- CM_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: rclone
+ MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: rclone
gdrive:
group: download-tool
env:
- CM_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: gdrive
+ MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: gdrive
new_env_keys:
-- CM_DATASET_MLCOMMONS_COGNATA*
+- MLC_DATASET_MLCOMMONS_COGNATA*
print_env_at_the_end:
- CM_DATASET_MLCOMMONS_COGNATA_PATH: Path to Cognata dataset
+ MLC_DATASET_MLCOMMONS_COGNATA_PATH: Path to Cognata dataset
diff --git a/script/get-dataset-criteo/README-extra.md b/script/get-dataset-criteo/README-extra.md
index 345a59cfe..efe669715 100644
--- a/script/get-dataset-criteo/README-extra.md
+++ b/script/get-dataset-criteo/README-extra.md
@@ -2,7 +2,7 @@
This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the 24 days of Criteo dataset for MLPerf inference using DLRM.
## Exported Variables
-* `CM_DATASET_PATH`
+* `MLC_DATASET_PATH`
## Supported and Tested OS
1. Ubuntu 18.04, 20.04, 22.04
diff --git a/script/get-dataset-criteo/meta.yaml b/script/get-dataset-criteo/meta.yaml
index 06bdd335c..772e48fb8 100644
--- a/script/get-dataset-criteo/meta.yaml
+++ b/script/get-dataset-criteo/meta.yaml
@@ -4,13 +4,13 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML datasets
default_env:
- CM_BACKUP_ZIPS: 'no'
+ MLC_BACKUP_ZIPS: 'no'
env:
- CM_DATASET: terabyte
+ MLC_DATASET: terabyte
input_mapping:
- criteo_path: CM_CRITEO_PATH
+ criteo_path: MLC_CRITEO_PATH
new_env_keys:
-- CM_DATASET*
+- MLC_DATASET*
tags:
- get
- dataset
@@ -20,7 +20,7 @@ uid: 194a47d908714897
variations:
backup:
env:
- CM_BACKUP_ZIPS: 'yes'
+ MLC_BACKUP_ZIPS: 'yes'
fake:
env:
- CM_CRITEO_FAKE: 'yes'
+ MLC_CRITEO_FAKE: 'yes'
diff --git a/script/get-dataset-criteo/run.sh b/script/get-dataset-criteo/run.sh
index 32a1c777f..b6f321d2f 100644
--- a/script/get-dataset-criteo/run.sh
+++ b/script/get-dataset-criteo/run.sh
@@ -1,14 +1,14 @@
#!/bin/bash
-if [ ! -z ${CM_CRITEO_PATH+x} ]; then
- echo "CM_DATASET_PATH=${CM_CRITEO_PATH}" > tmp-run-env.out
+if [ ! -z ${MLC_CRITEO_PATH+x} ]; then
+ echo "MLC_DATASET_PATH=${MLC_CRITEO_PATH}" > tmp-run-env.out
test $? -eq 0 || exit 1
exit 0
fi
CUR=$PWD
-if [[ ${CM_CRITEO_FAKE} == "yes" ]]; then
- cd ${CM_MLPERF_INFERENCE_DLRM_PATH}/pytorch/tools
+if [[ ${MLC_CRITEO_FAKE} == "yes" ]]; then
+ cd ${MLC_MLPERF_INFERENCE_DLRM_PATH}/pytorch/tools
bash ./make_fake_criteo.sh terabyte
mv ./fake_criteo/* $CUR/
cd $CUR
@@ -16,11 +16,11 @@ else
curl -O -C - https://storage.googleapis.com/criteo-cail-datasets/day_{`seq -s "," 0 23`}.gz
test $? -eq 0 || exit 1
- if [ ${CM_BACKUP_ZIPS:-no} == "yes" ]; then
+ if [ ${MLC_BACKUP_ZIPS:-no} == "yes" ]; then
mkdir backup
cp -r *.gz backup/
fi
yes n | gunzip -k day_{0..23}.gz
fi
-echo "CM_DATASET_PATH=$PWD" > tmp-run-env.out
+echo "MLC_DATASET_PATH=$PWD" > tmp-run-env.out
diff --git a/script/get-dataset-igbh/customize.py b/script/get-dataset-igbh/customize.py
index c454d415d..d64d701ba 100644
--- a/script/get-dataset-igbh/customize.py
+++ b/script/get-dataset-igbh/customize.py
@@ -12,46 +12,46 @@ def preprocess(i):
return {'return': 1, 'error': 'Script not supported in windows yet!'}
print("Using MLCommons Inference source from '" +
- env['CM_MLPERF_INFERENCE_SOURCE'] + "'")
+ env['MLC_MLPERF_INFERENCE_SOURCE'] + "'")
# run cmd
run_cmd = ""
graph_folder = os.path.join(
- env['CM_MLPERF_INFERENCE_SOURCE'], 'graph', 'R-GAT')
+ env['MLC_MLPERF_INFERENCE_SOURCE'], 'graph', 'R-GAT')
- if env.get('CM_DATASET_IGBH_PATH',
+ if env.get('MLC_DATASET_IGBH_PATH',
'') != '': # skip download, just register in cache
- env['CM_DATASET_IGBH_OUT_PATH'] = env['CM_DATASET_IGBH_PATH']
+ env['MLC_DATASET_IGBH_OUT_PATH'] = env['MLC_DATASET_IGBH_PATH']
return {'return': 0}
- download_loc = env.get('CM_DATASET_IGBH_OUT_PATH', os.getcwd())
+ download_loc = env.get('MLC_DATASET_IGBH_OUT_PATH', os.getcwd())
- env['CM_DATASET_IGBH_DOWNLOAD_LOCATION'] = download_loc
+ env['MLC_DATASET_IGBH_DOWNLOAD_LOCATION'] = download_loc
run_cmd += f"cd {graph_folder} "
x_sep = " && "
# download the model
- if env['CM_DATASET_IGBH_TYPE'] == "debug":
- run_cmd += x_sep + env['CM_PYTHON_BIN_WITH_PATH'] + \
+ if env['MLC_DATASET_IGBH_TYPE'] == "debug":
+ run_cmd += x_sep + env['MLC_PYTHON_BIN_WITH_PATH'] + \
f" tools/download_igbh_test.py --target-path {download_loc} "
else:
- env['CM_DATASET_IGBH_FULL_DOWNLOAD'] = 'yes'
+ env['MLC_DATASET_IGBH_FULL_DOWNLOAD'] = 'yes'
# split seeds
run_cmd += x_sep + \
f"""{
- env['CM_PYTHON_BIN_WITH_PATH']} tools/split_seeds.py --path {download_loc} --dataset_size {
- env['CM_DATASET_IGBH_SIZE']} """
+ env['MLC_PYTHON_BIN_WITH_PATH']} tools/split_seeds.py --path {download_loc} --dataset_size {
+ env['MLC_DATASET_IGBH_SIZE']} """
# compress graph(for glt implementation)
- if env.get('CM_IGBH_GRAPH_COMPRESS', '') == "yes":
+ if env.get('MLC_IGBH_GRAPH_COMPRESS', '') == "yes":
run_cmd += x_sep + \
- f"""{env['CM_PYTHON_BIN_WITH_PATH']} tools/compress_graph.py --path {download_loc} --dataset_size {env['CM_DATASET_IGBH_SIZE']} --layout {env['CM_IGBH_GRAPH_COMPRESS_LAYOUT']}
+ f"""{env['MLC_PYTHON_BIN_WITH_PATH']} tools/compress_graph.py --path {download_loc} --dataset_size {env['MLC_DATASET_IGBH_SIZE']} --layout {env['MLC_IGBH_GRAPH_COMPRESS_LAYOUT']}
"""
- env['CM_RUN_CMD'] = run_cmd
+ env['MLC_RUN_CMD'] = run_cmd
return {'return': 0}
@@ -60,10 +60,10 @@ def postprocess(i):
env = i['env']
- env['CM_DATASET_IGBH_PATH'] = env.get(
- 'CM_DATASET_IGBH_OUT_PATH', os.getcwd())
+ env['MLC_DATASET_IGBH_PATH'] = env.get(
+ 'MLC_DATASET_IGBH_OUT_PATH', os.getcwd())
print(
- f"Path to the IGBH dataset: {os.path.join(env['CM_DATASET_IGBH_PATH'], env['CM_DATASET_IGBH_SIZE'])}")
+ f"Path to the IGBH dataset: {os.path.join(env['MLC_DATASET_IGBH_PATH'], env['MLC_DATASET_IGBH_SIZE'])}")
return {'return': 0}
diff --git a/script/get-dataset-igbh/meta.yaml b/script/get-dataset-igbh/meta.yaml
index 8e5c7b4cd..430fd3075 100644
--- a/script/get-dataset-igbh/meta.yaml
+++ b/script/get-dataset-igbh/meta.yaml
@@ -11,10 +11,10 @@ tags:
- inference
uid: 824e61316c074253
new_env_keys:
- - CM_DATASET_IGBH_PATH
- - CM_DATASET_IGBH_SIZE
+ - MLC_DATASET_IGBH_PATH
+ - MLC_DATASET_IGBH_SIZE
input_mapping:
- out_path: CM_DATASET_IGBH_OUT_PATH
+ out_path: MLC_DATASET_IGBH_OUT_PATH
env:
SKIP_USER_PROMPT: yes
deps:
@@ -32,16 +32,16 @@ deps:
prehook_deps:
#paper
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_feat.npy
- CM_DOWNLOAD_CHECKSUM: 71058b9ac8011bafa1c5467504452d13
- CM_DOWNLOAD_FILENAME: node_feat.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/paper/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_feat.npy
+ MLC_DOWNLOAD_CHECKSUM: 71058b9ac8011bafa1c5467504452d13
+ MLC_DOWNLOAD_FILENAME: node_feat.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/paper/
extra_cache_tags: dataset,igbh,paper,node_feat
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
force_cache: true
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -49,18 +49,18 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_19.npy
- CM_DOWNLOAD_CHECKSUM: be6fda45566e679bdb05ebea98ad16d4
- CM_DOWNLOAD_FILENAME: node_label_19.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/paper/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_19.npy
+ MLC_DOWNLOAD_CHECKSUM: be6fda45566e679bdb05ebea98ad16d4
+ MLC_DOWNLOAD_FILENAME: node_label_19.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/paper/
extra_cache_tags: dataset,igbh,paper,node_label_19
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
force_cache: true
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -68,18 +68,18 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_2K.npy
- CM_DOWNLOAD_CHECKSUM: 6eccab9a14f92f42be5b367c39002031
- CM_DOWNLOAD_FILENAME: node_label_2K.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/paper/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_2K.npy
+ MLC_DOWNLOAD_CHECKSUM: 6eccab9a14f92f42be5b367c39002031
+ MLC_DOWNLOAD_FILENAME: node_label_2K.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/paper/
extra_cache_tags: dataset,igbh,paper,node_label_2K
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -87,18 +87,18 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/paper_id_index_mapping.npy
- CM_DOWNLOAD_CHECKSUM: f70dd642a4f7e41d926c91c8c054fc4c
- CM_DOWNLOAD_FILENAME: paper_id_index_mapping.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/paper/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/paper_id_index_mapping.npy
+ MLC_DOWNLOAD_CHECKSUM: f70dd642a4f7e41d926c91c8c054fc4c
+ MLC_DOWNLOAD_FILENAME: paper_id_index_mapping.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/paper/
extra_cache_tags: dataset,igbh,paper,paper_id_index_mapping
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -106,19 +106,19 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
#paper_cites_paper
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__cites__paper/edge_index.npy
- CM_DOWNLOAD_CHECKSUM: f4897f53636c04a9c66f6063ec635c16
- CM_DOWNLOAD_FILENAME: edge_index.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/paper__cites__paper/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__cites__paper/edge_index.npy
+ MLC_DOWNLOAD_CHECKSUM: f4897f53636c04a9c66f6063ec635c16
+ MLC_DOWNLOAD_FILENAME: edge_index.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/paper__cites__paper/
extra_cache_tags: dataset,igbh,paper_cites_paper,edge_index
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -126,19 +126,19 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
# author
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/author_id_index_mapping.npy
- CM_DOWNLOAD_CHECKSUM: 58c15aab7dae03bbd57e6a4ac5e61bd9
- CM_DOWNLOAD_FILENAME: author_id_index_mapping.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/author/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/author_id_index_mapping.npy
+ MLC_DOWNLOAD_CHECKSUM: 58c15aab7dae03bbd57e6a4ac5e61bd9
+ MLC_DOWNLOAD_FILENAME: author_id_index_mapping.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/author/
extra_cache_tags: dataset,igbh,author,author_id_index_mapping
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -146,18 +146,18 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/node_feat.npy
- CM_DOWNLOAD_CHECKSUM: 2ec2512b554088381c04ec013e893c8d
- CM_DOWNLOAD_FILENAME: node_feat.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/author/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/node_feat.npy
+ MLC_DOWNLOAD_CHECKSUM: 2ec2512b554088381c04ec013e893c8d
+ MLC_DOWNLOAD_FILENAME: node_feat.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/author/
extra_cache_tags: dataset,igbh,author,node_feat
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -165,19 +165,19 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
# conference
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/conference_id_index_mapping.npy
- CM_DOWNLOAD_CHECKSUM: 0bf7c555d8c697b31b6af6c4cb6b6612
- CM_DOWNLOAD_FILENAME: conference_id_index_mapping.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/conference/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/conference_id_index_mapping.npy
+ MLC_DOWNLOAD_CHECKSUM: 0bf7c555d8c697b31b6af6c4cb6b6612
+ MLC_DOWNLOAD_FILENAME: conference_id_index_mapping.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/conference/
extra_cache_tags: dataset,igbh,conference,conference_id_index_mapping
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -185,18 +185,18 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/node_feat.npy
- CM_DOWNLOAD_CHECKSUM: 898ff529b8cf972261fedd50df6377f8
- CM_DOWNLOAD_FILENAME: node_feat.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/conference/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/node_feat.npy
+ MLC_DOWNLOAD_CHECKSUM: 898ff529b8cf972261fedd50df6377f8
+ MLC_DOWNLOAD_FILENAME: node_feat.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/conference/
extra_cache_tags: dataset,igbh,conference,node_feat
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -204,19 +204,19 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
# institute
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/institute_id_index_mapping.npy
- CM_DOWNLOAD_CHECKSUM: 03fb45eafb7bd35875ef4c7cd2a299a9
- CM_DOWNLOAD_FILENAME: institute_id_index_mapping.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/institute/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/institute_id_index_mapping.npy
+ MLC_DOWNLOAD_CHECKSUM: 03fb45eafb7bd35875ef4c7cd2a299a9
+ MLC_DOWNLOAD_FILENAME: institute_id_index_mapping.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/institute/
extra_cache_tags: dataset,igbh,institute,institute_id_index_mapping
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -224,18 +224,18 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/node_feat.npy
- CM_DOWNLOAD_CHECKSUM: 12eaeced22d17b4e97d4b4742331c819
- CM_DOWNLOAD_FILENAME: node_feat.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/institute/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/node_feat.npy
+ MLC_DOWNLOAD_CHECKSUM: 12eaeced22d17b4e97d4b4742331c819
+ MLC_DOWNLOAD_FILENAME: node_feat.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/institute/
extra_cache_tags: dataset,igbh,institute,node_feat
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -243,19 +243,19 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
# journal
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/journal_id_index_mapping.npy
- CM_DOWNLOAD_CHECKSUM: b630c20852b76d17a5c9c37b39176f69
- CM_DOWNLOAD_FILENAME: journal_id_index_mapping.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/journal/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/journal_id_index_mapping.npy
+ MLC_DOWNLOAD_CHECKSUM: b630c20852b76d17a5c9c37b39176f69
+ MLC_DOWNLOAD_FILENAME: journal_id_index_mapping.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/journal/
extra_cache_tags: dataset,igbh,journal,journal_id_index_mapping
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -263,18 +263,18 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/node_feat.npy
- CM_DOWNLOAD_CHECKSUM: 49d51b554b3004f10bee19d1c7f9b416
- CM_DOWNLOAD_FILENAME: node_feat.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/journal/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/node_feat.npy
+ MLC_DOWNLOAD_CHECKSUM: 49d51b554b3004f10bee19d1c7f9b416
+ MLC_DOWNLOAD_FILENAME: node_feat.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/journal/
extra_cache_tags: dataset,igbh,journal,node_feat
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -282,19 +282,19 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
# fos
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/fos_id_index_mapping.npy
- CM_DOWNLOAD_CHECKSUM: 0f0cfde619361cde35d3be9f201d081a
- CM_DOWNLOAD_FILENAME: fos_id_index_mapping.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/fos/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/fos_id_index_mapping.npy
+ MLC_DOWNLOAD_CHECKSUM: 0f0cfde619361cde35d3be9f201d081a
+ MLC_DOWNLOAD_FILENAME: fos_id_index_mapping.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/fos/
extra_cache_tags: dataset,igbh,fos,fos_id_index_mapping
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -302,18 +302,18 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/node_feat.npy
- CM_DOWNLOAD_CHECKSUM: 3ef3df19e2475c387fec10bac82773df
- CM_DOWNLOAD_FILENAME: node_feat.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/fos/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/node_feat.npy
+ MLC_DOWNLOAD_CHECKSUM: 3ef3df19e2475c387fec10bac82773df
+ MLC_DOWNLOAD_FILENAME: node_feat.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/fos/
extra_cache_tags: dataset,igbh,fos,node_feat
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -321,19 +321,19 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
# author__affiliated_to__institute
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author__affiliated_to__institute/edge_index.npy
- CM_DOWNLOAD_CHECKSUM: e35dba208f81e0987207f78787c75711
- CM_DOWNLOAD_FILENAME: edge_index.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/author__affiliated_to__institute/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author__affiliated_to__institute/edge_index.npy
+ MLC_DOWNLOAD_CHECKSUM: e35dba208f81e0987207f78787c75711
+ MLC_DOWNLOAD_FILENAME: edge_index.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/author__affiliated_to__institute/
extra_cache_tags: dataset,igbh,author_affiliated_to_institute,edge_index
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -341,19 +341,19 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
# paper__published__journal
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__published__journal/edge_index.npy
- CM_DOWNLOAD_CHECKSUM: 38505e83bde8e5cf94ae0a85afa60e13
- CM_DOWNLOAD_FILENAME: edge_index.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/paper__published__journal/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__published__journal/edge_index.npy
+ MLC_DOWNLOAD_CHECKSUM: 38505e83bde8e5cf94ae0a85afa60e13
+ MLC_DOWNLOAD_FILENAME: edge_index.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/paper__published__journal/
extra_cache_tags: dataset,igbh,paper_published_journal,edge_index
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -361,19 +361,19 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
# paper__topic__fos
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__topic__fos/edge_index.npy
- CM_DOWNLOAD_CHECKSUM: 427fb350a248ee6eaa8c21cde942fda4
- CM_DOWNLOAD_FILENAME: edge_index.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/paper__topic__fos/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__topic__fos/edge_index.npy
+ MLC_DOWNLOAD_CHECKSUM: 427fb350a248ee6eaa8c21cde942fda4
+ MLC_DOWNLOAD_FILENAME: edge_index.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/paper__topic__fos/
extra_cache_tags: dataset,igbh,paper_topic_fos,edge_index
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -381,19 +381,19 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
# paper__venue__conference
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__venue__conference/edge_index.npy
- CM_DOWNLOAD_CHECKSUM: 541b8d43cd93579305cfb71961e10a7d
- CM_DOWNLOAD_FILENAME: edge_index.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/paper__venue__conference/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__venue__conference/edge_index.npy
+ MLC_DOWNLOAD_CHECKSUM: 541b8d43cd93579305cfb71961e10a7d
+ MLC_DOWNLOAD_FILENAME: edge_index.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/paper__venue__conference/
extra_cache_tags: dataset,igbh,paper_venue_conference,edge_index
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -401,19 +401,19 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
# paper__written_by__author
- env:
- CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__written_by__author/edge_index.npy
- CM_DOWNLOAD_CHECKSUM: df39fe44bbcec93a640400e6d81ffcb5
- CM_DOWNLOAD_FILENAME: edge_index.npy
- CM_DOWNLOAD_PATH: <<>>/full/processed/paper__written_by__author/
+ MLC_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__written_by__author/edge_index.npy
+ MLC_DOWNLOAD_CHECKSUM: df39fe44bbcec93a640400e6d81ffcb5
+ MLC_DOWNLOAD_FILENAME: edge_index.npy
+ MLC_DOWNLOAD_PATH: <<>>/full/processed/paper__written_by__author/
extra_cache_tags: dataset,igbh,paper_written_by_author,edge_index
force_cache: true
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
enable_if_env:
- CM_DATASET_IGBH_FULL_DOWNLOAD:
+ MLC_DATASET_IGBH_FULL_DOWNLOAD:
- 'yes'
names:
- dae
@@ -421,28 +421,28 @@ prehook_deps:
tags: download-and-extract,_wget
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
variations:
debug:
default: true
group: dataset-type
env:
- CM_DATASET_IGBH_TYPE: debug
- CM_DATASET_IGBH_SIZE: tiny
+ MLC_DATASET_IGBH_TYPE: debug
+ MLC_DATASET_IGBH_SIZE: tiny
full:
group: dataset-type
env:
- CM_DATASET_IGBH_TYPE: full
- CM_DATASET_IGBH_SIZE: full
+ MLC_DATASET_IGBH_TYPE: full
+ MLC_DATASET_IGBH_SIZE: full
glt:
env:
- CM_IGBH_GRAPH_COMPRESS: yes
+ MLC_IGBH_GRAPH_COMPRESS: yes
csc:
group: compressed-layout
default: true
env:
- CM_IGBH_GRAPH_COMPRESS_LAYOUT: csc
+ MLC_IGBH_GRAPH_COMPRESS_LAYOUT: csc
csr:
group: compressed-layout
env:
- CM_IGBH_GRAPH_COMPRESS_LAYOUT: csr
+ MLC_IGBH_GRAPH_COMPRESS_LAYOUT: csr
diff --git a/script/get-dataset-igbh/run.sh b/script/get-dataset-igbh/run.sh
index 238652160..edb705045 100644
--- a/script/get-dataset-igbh/run.sh
+++ b/script/get-dataset-igbh/run.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
@@ -21,4 +21,4 @@ function run() {
exit_if_error
}
-run "$CM_RUN_CMD"
+run "$MLC_RUN_CMD"
diff --git a/script/get-dataset-imagenet-aux/meta.yaml b/script/get-dataset-imagenet-aux/meta.yaml
index c5944aedf..00036303f 100644
--- a/script/get-dataset-imagenet-aux/meta.yaml
+++ b/script/get-dataset-imagenet-aux/meta.yaml
@@ -4,21 +4,21 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML datasets
new_env_keys:
-- CM_DATASET_AUX_*
+- MLC_DATASET_AUX_*
prehook_deps:
- env:
- CM_DOWNLOAD_URL: <<>>
- CM_DOWNLOAD_URL1: <<>>
- CM_EXTRACT_EXTRACTED_FILENAME: <<>>
- CM_EXTRACT_FINAL_ENV_NAME: CM_DATASET_AUX_PATH
+ MLC_DOWNLOAD_URL: <<>>
+ MLC_DOWNLOAD_URL1: <<>>
+ MLC_EXTRACT_EXTRACTED_FILENAME: <<>>
+ MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_AUX_PATH
extra_cache_tags: imagenet-aux,dataset-aux
force_cache: true
tags: download-and-extract,_extract,_wget
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
tags:
- get
- aux
@@ -29,26 +29,26 @@ uid: bb2c6dd8c8c64217
variations:
'2012':
env:
- CM_DATASET_AUX_VER: '2012'
+ MLC_DATASET_AUX_VER: '2012'
from.berkeleyvision:
base:
- '2012'
default: true
env:
- CM_DOWNLOAD_CHECKSUM: f963098ea0e785a968ca1eb634003a90
- CM_DOWNLOAD_CHECKSUM1: ee346d67141e476df9c1a3f813552503
- CM_PACKAGE_URL: http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz
- CM_PACKAGE_URL1: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz
+ MLC_DOWNLOAD_CHECKSUM: f963098ea0e785a968ca1eb634003a90
+ MLC_DOWNLOAD_CHECKSUM1: ee346d67141e476df9c1a3f813552503
+ MLC_PACKAGE_URL: http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz
+ MLC_PACKAGE_URL1: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz
group: download-source
from.dropbox:
base:
- '2012'
env:
- CM_DOWNLOAD_CHECKSUM: ee346d67141e476df9c1a3f813552503
- CM_DOWNLOAD_CHECKSUM1: f963098ea0e785a968ca1eb634003a90
- CM_PACKAGE_URL: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz
- CM_PACKAGE_URL1: http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz
+ MLC_DOWNLOAD_CHECKSUM: ee346d67141e476df9c1a3f813552503
+ MLC_DOWNLOAD_CHECKSUM1: f963098ea0e785a968ca1eb634003a90
+ MLC_PACKAGE_URL: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz
+ MLC_PACKAGE_URL1: http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz
group: download-source
skip_ssl_verification:
env:
- CM_VERIFY_SSL: 'False'
+ MLC_VERIFY_SSL: 'False'
diff --git a/script/get-dataset-imagenet-calibration/meta.yaml b/script/get-dataset-imagenet-calibration/meta.yaml
index 7e499146a..dd61e9976 100644
--- a/script/get-dataset-imagenet-calibration/meta.yaml
+++ b/script/get-dataset-imagenet-calibration/meta.yaml
@@ -11,16 +11,16 @@ category: "AI/ML datasets"
deps:
- tags: download,file
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
force_cache: true
extra_cache_tags: imagenet-calibration,imagenet,calibration
names:
- calibration-file-downloader
env:
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH
new_env_keys:
-- CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH
+- MLC_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH
tags:
- get
@@ -33,16 +33,16 @@ variations:
group: calibration-option
default: true
env:
- CM_MLPERF_IMAGENET_CALIBRATION_OPTION: one
- CM_DOWNLOAD_CHECKSUM: f09719174af3553119e2c621157773a6
+ MLC_MLPERF_IMAGENET_CALIBRATION_OPTION: one
+ MLC_DOWNLOAD_CHECKSUM: f09719174af3553119e2c621157773a6
adr:
calibration-file-downloader:
tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/ImageNet/cal_image_list_option_1.txt
mlperf.option2:
group: calibration-option
env:
- CM_MLPERF_IMAGENET_CALIBRATION_OPTION: two
- CM_DOWNLOAD_CHECKSUM: e44582af00e3b4fc3fac30efd6bdd05f
+ MLC_MLPERF_IMAGENET_CALIBRATION_OPTION: two
+ MLC_DOWNLOAD_CHECKSUM: e44582af00e3b4fc3fac30efd6bdd05f
adr:
calibration-file-downloader:
tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/ImageNet/cal_image_list_option_2.txt
diff --git a/script/get-dataset-imagenet-helper/customize.py b/script/get-dataset-imagenet-helper/customize.py
index 911411db4..05ab824cd 100644
--- a/script/get-dataset-imagenet-helper/customize.py
+++ b/script/get-dataset-imagenet-helper/customize.py
@@ -5,9 +5,9 @@
def postprocess(i):
env = i['env']
- script_path = env['CM_TMP_CURRENT_SCRIPT_PATH']
+ script_path = env['MLC_TMP_CURRENT_SCRIPT_PATH']
- env['CM_DATASET_IMAGENET_HELPER_PATH'] = script_path
+ env['MLC_DATASET_IMAGENET_HELPER_PATH'] = script_path
env['+PYTHONPATH'] = [script_path]
return {'return': 0}
diff --git a/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py b/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py
index aa90deefd..83e5dbded 100644
--- a/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py
+++ b/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py
@@ -6,35 +6,35 @@
# Processing in batches:
#
-BATCH_SIZE = int(os.getenv('CM_BATCH_SIZE', 1))
+BATCH_SIZE = int(os.getenv('MLC_BATCH_SIZE', 1))
# Model properties:
#
-MODEL_IMAGE_HEIGHT = int(os.getenv('CM_ML_MODEL_IMAGE_HEIGHT',
- os.getenv('CM_ONNX_MODEL_IMAGE_HEIGHT',
- os.getenv('CM_TENSORFLOW_MODEL_IMAGE_HEIGHT',
+MODEL_IMAGE_HEIGHT = int(os.getenv('MLC_ML_MODEL_IMAGE_HEIGHT',
+ os.getenv('MLC_ONNX_MODEL_IMAGE_HEIGHT',
+ os.getenv('MLC_TENSORFLOW_MODEL_IMAGE_HEIGHT',
''))))
-MODEL_IMAGE_WIDTH = int(os.getenv('CM_ML_MODEL_IMAGE_WIDTH',
- os.getenv('CM_ONNX_MODEL_IMAGE_WIDTH',
- os.getenv('CM_TENSORFLOW_MODEL_IMAGE_WIDTH',
+MODEL_IMAGE_WIDTH = int(os.getenv('MLC_ML_MODEL_IMAGE_WIDTH',
+ os.getenv('MLC_ONNX_MODEL_IMAGE_WIDTH',
+ os.getenv('MLC_TENSORFLOW_MODEL_IMAGE_WIDTH',
''))))
-MODEL_IMAGE_CHANNELS = int(os.getenv('CM_ML_MODEL_IMAGE_CHANNELS', 3))
-MODEL_DATA_LAYOUT = os.getenv('CM_ML_MODEL_DATA_LAYOUT', 'NCHW')
+MODEL_IMAGE_CHANNELS = int(os.getenv('MLC_ML_MODEL_IMAGE_CHANNELS', 3))
+MODEL_DATA_LAYOUT = os.getenv('MLC_ML_MODEL_DATA_LAYOUT', 'NCHW')
MODEL_COLOURS_BGR = os.getenv(
- 'CM_ML_MODEL_COLOUR_CHANNELS_BGR', 'NO') in (
+ 'MLC_ML_MODEL_COLOUR_CHANNELS_BGR', 'NO') in (
'YES', 'yes', 'ON', 'on', '1')
-MODEL_INPUT_DATA_TYPE = os.getenv('CM_ML_MODEL_INPUT_DATA_TYPE', 'float32')
-MODEL_DATA_TYPE = os.getenv('CM_ML_MODEL_DATA_TYPE', '(unknown)')
+MODEL_INPUT_DATA_TYPE = os.getenv('MLC_ML_MODEL_INPUT_DATA_TYPE', 'float32')
+MODEL_DATA_TYPE = os.getenv('MLC_ML_MODEL_DATA_TYPE', '(unknown)')
MODEL_USE_DLA = os.getenv(
- 'CM_ML_MODEL_USE_DLA',
+ 'MLC_ML_MODEL_USE_DLA',
'NO') in (
'YES',
'yes',
'ON',
'on',
'1')
-MODEL_MAX_BATCH_SIZE = int(os.getenv('CM_ML_MODEL_MAX_BATCH_SIZE', BATCH_SIZE))
+MODEL_MAX_BATCH_SIZE = int(os.getenv('MLC_ML_MODEL_MAX_BATCH_SIZE', BATCH_SIZE))
# Internal processing:
@@ -45,14 +45,14 @@
# Image normalization:
#
-MODEL_NORMALIZE_DATA = os.getenv('CM_ML_MODEL_NORMALIZE_DATA') in (
+MODEL_NORMALIZE_DATA = os.getenv('MLC_ML_MODEL_NORMALIZE_DATA') in (
'YES', 'yes', 'ON', 'on', '1')
-MODEL_NORMALIZE_LOWER = float(os.getenv('CM_ML_MODEL_NORMALIZE_LOWER', -1.0))
-MODEL_NORMALIZE_UPPER = float(os.getenv('CM_ML_MODEL_NORMALIZE_UPPER', 1.0))
+MODEL_NORMALIZE_LOWER = float(os.getenv('MLC_ML_MODEL_NORMALIZE_LOWER', -1.0))
+MODEL_NORMALIZE_UPPER = float(os.getenv('MLC_ML_MODEL_NORMALIZE_UPPER', 1.0))
SUBTRACT_MEAN = os.getenv(
- 'CM_ML_MODEL_SUBTRACT_MEANS', 'YES') in (
+ 'MLC_ML_MODEL_SUBTRACT_MEANS', 'YES') in (
'YES', 'yes', 'ON', 'on', '1')
-GIVEN_CHANNEL_MEANS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_MEANS', '')
+GIVEN_CHANNEL_MEANS = os.getenv('MLC_ML_MODEL_GIVEN_CHANNEL_MEANS', '')
if GIVEN_CHANNEL_MEANS:
GIVEN_CHANNEL_MEANS = np.fromstring(
GIVEN_CHANNEL_MEANS,
@@ -62,7 +62,7 @@
# swapping Red and Blue colour channels
GIVEN_CHANNEL_MEANS = GIVEN_CHANNEL_MEANS[::-1]
-GIVEN_CHANNEL_STDS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_STDS', '')
+GIVEN_CHANNEL_STDS = os.getenv('MLC_ML_MODEL_GIVEN_CHANNEL_STDS', '')
if GIVEN_CHANNEL_STDS:
GIVEN_CHANNEL_STDS = np.fromstring(
GIVEN_CHANNEL_STDS,
@@ -75,13 +75,13 @@
# ImageNet dataset properties:
#
-LABELS_PATH = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT']
+LABELS_PATH = os.environ['MLC_CAFFE_IMAGENET_SYNSET_WORDS_TXT']
# Preprocessed input images' properties:
#
-IMAGE_DIR = os.getenv('CM_DATASET_PREPROCESSED_PATH')
-IMAGE_DATA_TYPE = os.getenv('CM_DATASET_PREPROCESSED_DATA_TYPE', 'float32')
+IMAGE_DIR = os.getenv('MLC_DATASET_PREPROCESSED_PATH')
+IMAGE_DATA_TYPE = os.getenv('MLC_DATASET_PREPROCESSED_DATA_TYPE', 'float32')
def load_labels(labels_filepath):
diff --git a/script/get-dataset-imagenet-helper/meta.yaml b/script/get-dataset-imagenet-helper/meta.yaml
index a6ab0e7c8..e81aa6e1a 100644
--- a/script/get-dataset-imagenet-helper/meta.yaml
+++ b/script/get-dataset-imagenet-helper/meta.yaml
@@ -5,7 +5,7 @@ cache: true
category: AI/ML datasets
new_env_keys:
- +PYTHONPATH
-- CM_DATASET_IMAGENET_HELPER_PATH
+- MLC_DATASET_IMAGENET_HELPER_PATH
tags:
- get
- imagenet
diff --git a/script/get-dataset-imagenet-train/customize.py b/script/get-dataset-imagenet-train/customize.py
index 5a760c895..fb6c67f66 100644
--- a/script/get-dataset-imagenet-train/customize.py
+++ b/script/get-dataset-imagenet-train/customize.py
@@ -13,16 +13,16 @@ def preprocess(i):
if os_info['platform'] == 'windows':
return {'return': 0}
- env['CM_DATASET_IMAGENET_TRAIN_REQUIRE_DAE'] = 'no'
+ env['MLC_DATASET_IMAGENET_TRAIN_REQUIRE_DAE'] = 'no'
- path = env.get('CM_INPUT', env.get('IMAGENET_TRAIN_PATH', '')).strip()
+ path = env.get('MLC_INPUT', env.get('IMAGENET_TRAIN_PATH', '')).strip()
if path == '':
- if env.get('CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH'):
- path = env['CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH']
- env['CM_DAE_EXTRA_TAGS'] = "_torrent"
- env['CM_DAE_TORRENT_PATH'] = path
- env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes'
+ if env.get('MLC_DATASET_IMAGENET_TRAIN_TORRENT_PATH'):
+ path = env['MLC_DATASET_IMAGENET_TRAIN_TORRENT_PATH']
+ env['MLC_DAE_EXTRA_TAGS'] = "_torrent"
+ env['MLC_DAE_TORRENT_PATH'] = path
+ env['MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes'
return {'return': 0}
@@ -31,15 +31,15 @@ def preprocess(i):
elif not os.path.isdir(path):
if path.endswith(".tar"):
- # env['CM_DAE_FILEPATH'] = path
- env['CM_EXTRACT_FILEPATH'] = path
- env['CM_DAE_ONLY_EXTRACT'] = 'yes'
+ # env['MLC_DAE_FILEPATH'] = path
+ env['MLC_EXTRACT_FILEPATH'] = path
+ env['MLC_DAE_ONLY_EXTRACT'] = 'yes'
return {'return': 0}
else:
return {'return': 1,
'error': 'Path {} doesn\'t exist'.format(path)}
else:
- env['CM_EXTRACT_EXTRACTED_PATH'] = path
+ env['MLC_EXTRACT_EXTRACTED_PATH'] = path
return {'return': 0}
@@ -52,7 +52,7 @@ def postprocess(i):
env = i['env']
- path = env['CM_EXTRACT_EXTRACTED_PATH']
+ path = env['MLC_EXTRACT_EXTRACTED_PATH']
path_tar = os.path.join(path, 'n01440764.tar')
@@ -60,10 +60,10 @@ def postprocess(i):
return {'return': 1,
'error': 'ImageNet file {} not found'.format(path_tar)}
- env['CM_DATASET_PATH'] = path
- env['CM_DATASET_IMAGENET_PATH'] = path
- env['CM_DATASET_IMAGENET_TRAIN_PATH'] = path
+ env['MLC_DATASET_PATH'] = path
+ env['MLC_DATASET_IMAGENET_PATH'] = path
+ env['MLC_DATASET_IMAGENET_TRAIN_PATH'] = path
- env['CM_GET_DEPENDENT_CACHED_PATH'] = path
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = path
return {'return': 0}
diff --git a/script/get-dataset-imagenet-train/meta.yaml b/script/get-dataset-imagenet-train/meta.yaml
index 3d1a1bd22..1b9d88575 100644
--- a/script/get-dataset-imagenet-train/meta.yaml
+++ b/script/get-dataset-imagenet-train/meta.yaml
@@ -7,36 +7,36 @@ deps: []
input_description: {}
input_mapping:
input: IMAGENET_TRAIN_PATH
- torrent: CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH
+ torrent: MLC_DATASET_IMAGENET_TRAIN_TORRENT_PATH
new_env_keys:
-- CM_DATASET_PATH
-- CM_DATASET_IMAGENET_*
+- MLC_DATASET_PATH
+- MLC_DATASET_IMAGENET_*
new_state_keys: []
post_deps: []
posthook_deps: []
prehook_deps:
- enable_if_env:
- CM_DATASET_IMAGENET_VAL_REQUIRE_DAE:
+ MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE:
- 'yes'
- 'True'
env:
- CM_EXTRACT_TO_FOLDER: imagenet-2012-train
+ MLC_EXTRACT_TO_FOLDER: imagenet-2012-train
tags: download-and-extract,file,_extract
update_tags_from_env:
- - CM_DAE_EXTRA_TAGS
+ - MLC_DAE_EXTRA_TAGS
update_tags_from_env_with_prefix:
_url.:
- - CM_DAE_URL
+ - MLC_DAE_URL
- enable_if_env:
- CM_DAE_ONLY_EXTRACT:
+ MLC_DAE_ONLY_EXTRACT:
- 'yes'
- 'True'
env:
- CM_EXTRACT_TO_FOLDER: imagenet-2012-train
+ MLC_EXTRACT_TO_FOLDER: imagenet-2012-train
tags: file,extract
update_tags_from_env_with_prefix:
_path.:
- - CM_EXTRACT_PATH
+ - MLC_EXTRACT_PATH
tags:
- get
- imagenet
diff --git a/script/get-dataset-imagenet-val/customize.py b/script/get-dataset-imagenet-val/customize.py
index e11648adf..b2f9a389d 100644
--- a/script/get-dataset-imagenet-val/customize.py
+++ b/script/get-dataset-imagenet-val/customize.py
@@ -11,32 +11,32 @@ def preprocess(i):
meta = i['meta']
os_info = i['os_info']
- env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'no'
+ env['MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'no'
- full = env.get('CM_IMAGENET_FULL', '').strip() == 'yes'
+ full = env.get('MLC_IMAGENET_FULL', '').strip() == 'yes'
path = env.get(
- 'CM_INPUT',
+ 'MLC_INPUT',
env.get(
'IMAGENET_PATH',
env.get(
- 'CM_DATASET_IMAGENET_PATH',
+ 'MLC_DATASET_IMAGENET_PATH',
''))).strip()
if path == '':
if full:
- if env.get('CM_DATASET_IMAGENET_VAL_TORRENT_PATH'):
- path = env['CM_DATASET_IMAGENET_VAL_TORRENT_PATH']
- env['CM_DAE_EXTRA_TAGS'] = "_torrent"
- env['CM_DAE_TORRENT_PATH'] = path
- env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes'
+ if env.get('MLC_DATASET_IMAGENET_VAL_TORRENT_PATH'):
+ path = env['MLC_DATASET_IMAGENET_VAL_TORRENT_PATH']
+ env['MLC_DAE_EXTRA_TAGS'] = "_torrent"
+ env['MLC_DAE_TORRENT_PATH'] = path
+ env['MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes'
return {'return': 0}
else:
- env['CM_DAE_URL'] = 'https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar'
- env['CM_DAE_FILENAME'] = 'ILSVRC2012_img_val.tar'
- env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes'
+ env['MLC_DAE_URL'] = 'https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar'
+ env['MLC_DAE_FILENAME'] = 'ILSVRC2012_img_val.tar'
+ env['MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes'
return {'return': 0}
# return {'return':1, 'error':'Please rerun the last CM command
@@ -46,18 +46,18 @@ def preprocess(i):
# images}'}
else:
- env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes'
+ env['MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes'
elif not os.path.isdir(path):
if path.endswith(".tar"):
- env['CM_EXTRACT_FILEPATH'] = path
- env['CM_DAE_ONLY_EXTRACT'] = 'yes'
+ env['MLC_EXTRACT_FILEPATH'] = path
+ env['MLC_DAE_ONLY_EXTRACT'] = 'yes'
return {'return': 0}
else:
return {'return': 1,
'error': 'Path {} doesn\'t exist'.format(path)}
else:
- env['CM_EXTRACT_EXTRACTED_PATH'] = path
+ env['MLC_EXTRACT_EXTRACTED_PATH'] = path
return {'return': 0}
@@ -67,7 +67,7 @@ def postprocess(i):
os_info = i['os_info']
env = i['env']
- path = env['CM_EXTRACT_EXTRACTED_PATH']
+ path = env['MLC_EXTRACT_EXTRACTED_PATH']
path1 = os.path.join(path, 'imagenet-2012-val')
if os.path.isdir(path1):
path = path1
@@ -79,14 +79,14 @@ def postprocess(i):
'error': 'ImageNet file {} not found'.format(path_image)}
files = os.listdir(path)
- if len(files) < int(env.get('CM_DATASET_SIZE', 0)):
+ if len(files) < int(env.get('MLC_DATASET_SIZE', 0)):
return {'return': 1, 'error': 'Only {} files found in {}. {} expected'.format(
- len(files), path, env.get('CM_DATASET_SIZE'))}
+ len(files), path, env.get('MLC_DATASET_SIZE'))}
- env['CM_DATASET_PATH'] = path
- env['CM_DATASET_IMAGENET_PATH'] = path
- env['CM_DATASET_IMAGENET_VAL_PATH'] = path
+ env['MLC_DATASET_PATH'] = path
+ env['MLC_DATASET_IMAGENET_PATH'] = path
+ env['MLC_DATASET_IMAGENET_VAL_PATH'] = path
- env['CM_GET_DEPENDENT_CACHED_PATH'] = path
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = path
return {'return': 0}
diff --git a/script/get-dataset-imagenet-val/meta.yaml b/script/get-dataset-imagenet-val/meta.yaml
index 0a23afac1..f1db2f01c 100644
--- a/script/get-dataset-imagenet-val/meta.yaml
+++ b/script/get-dataset-imagenet-val/meta.yaml
@@ -14,44 +14,44 @@ docker:
run: false
env:
- CM_DATASET: IMAGENET
+ MLC_DATASET: IMAGENET
input_mapping:
imagenet_path: IMAGENET_PATH
- torrent: CM_DATASET_IMAGENET_VAL_TORRENT_PATH
+ torrent: MLC_DATASET_IMAGENET_VAL_TORRENT_PATH
new_env_keys:
-- CM_DATASET_PATH
-- CM_DATASET_IMAGENET_PATH
-- CM_DATASET_IMAGENET_VAL_PATH
-- CM_DATASET_SIZE
-- CM_DATASET_VER
+- MLC_DATASET_PATH
+- MLC_DATASET_IMAGENET_PATH
+- MLC_DATASET_IMAGENET_VAL_PATH
+- MLC_DATASET_SIZE
+- MLC_DATASET_VER
prehook_deps:
- enable_if_env:
- CM_DATASET_IMAGENET_VAL_REQUIRE_DAE:
+ MLC_DATASET_IMAGENET_VAL_REQUIRE_DAE:
- 'yes'
- 'True'
env:
- CM_EXTRACT_TO_FOLDER: imagenet-2012-val
+ MLC_EXTRACT_TO_FOLDER: imagenet-2012-val
tags: download-and-extract,file,_extract
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env:
- - CM_DAE_EXTRA_TAGS
+ - MLC_DAE_EXTRA_TAGS
update_tags_from_env_with_prefix:
_url.:
- - CM_DAE_URL
+ - MLC_DAE_URL
- enable_if_env:
- CM_DAE_ONLY_EXTRACT:
+ MLC_DAE_ONLY_EXTRACT:
- 'yes'
- 'True'
env:
- CM_EXTRACT_TO_FOLDER: imagenet-2012-val
+ MLC_EXTRACT_TO_FOLDER: imagenet-2012-val
tags: file,extract,_no-remove-extracted
update_tags_from_env_with_prefix:
_path.:
- - CM_EXTRACT_PATH
+ - MLC_EXTRACT_PATH
tags:
- get
@@ -67,7 +67,7 @@ variations:
'2012':
default: true
env:
- CM_DATASET_VER: '2012'
+ MLC_DATASET_VER: '2012'
group: dataset-version
2012-500:
base:
@@ -79,25 +79,25 @@ variations:
- '2012'
full:
env:
- CM_DAE_FILENAME: ILSVRC2012_img_val.tar
- CM_DATASET_SIZE: '50000'
- CM_DOWNLOAD_CHECKSUM: 29b22e2961454d5413ddabcf34fc5622
- CM_IMAGENET_FULL: 'yes'
+ MLC_DAE_FILENAME: ILSVRC2012_img_val.tar
+ MLC_DATASET_SIZE: '50000'
+ MLC_DOWNLOAD_CHECKSUM: 29b22e2961454d5413ddabcf34fc5622
+ MLC_IMAGENET_FULL: 'yes'
group: count
run-during-docker-build:
docker:
run: true
size.#:
env:
- CM_DATASET_SIZE: '#'
+ MLC_DATASET_SIZE: '#'
group: count
size.500:
default: true
env:
- CM_DAE_FILENAME: ILSVRC2012_img_val_500.tar
- CM_DAE_URL: http://cKnowledge.org/ai/data/ILSVRC2012_img_val_500.tar
- CM_DOWNLOAD_URL1: https://www.dropbox.com/scl/fi/a7fhjnzxi6x3ceapxh5bm/ILSVRC2012_img_val_500.tar?rlkey=hz4rabo9ve43co3c303y9r6l7&st=ulcgb3av&dl=1
- CM_DATASET_SIZE: '500'
- CM_DOWNLOAD_CHECKSUM: 8627befdd8c2bcf305729020e9db354e
- CM_DOWNLOAD_FILENAME: ILSVRC2012_img_val_500.tar
+ MLC_DAE_FILENAME: ILSVRC2012_img_val_500.tar
+ MLC_DAE_URL: http://cKnowledge.org/ai/data/ILSVRC2012_img_val_500.tar
+ MLC_DOWNLOAD_URL1: https://www.dropbox.com/scl/fi/a7fhjnzxi6x3ceapxh5bm/ILSVRC2012_img_val_500.tar?rlkey=hz4rabo9ve43co3c303y9r6l7&st=ulcgb3av&dl=1
+ MLC_DATASET_SIZE: '500'
+ MLC_DOWNLOAD_CHECKSUM: 8627befdd8c2bcf305729020e9db354e
+ MLC_DOWNLOAD_FILENAME: ILSVRC2012_img_val_500.tar
group: count
diff --git a/script/get-dataset-imagenet-val/run.bat b/script/get-dataset-imagenet-val/run.bat
index 94625b7e5..6f481b0e2 100644
--- a/script/get-dataset-imagenet-val/run.bat
+++ b/script/get-dataset-imagenet-val/run.bat
@@ -1,4 +1,4 @@
-if "%CM_EXTRACT_EXTRACTED_PATH%" == "" (
+if "%MLC_EXTRACT_EXTRACTED_PATH%" == "" (
echo.
wget -nc https://www.dropbox.com/s/57s11df6pts3z69/ILSVRC2012_img_val_500.tar --no-check-certificate
@@ -11,7 +11,7 @@ if "%CM_EXTRACT_EXTRACTED_PATH%" == "" (
del /Q /S ILSVRC2012_img_val_500.tar
- echo CM_DATASET_PATH=%CD%\images > tmp-run-env.out
- echo CM_DATASET_IMAGENET_PATH=%CD%\images >> tmp-run-env.out
- echo CM_DATASET_IMAGENET_VAL_PATH=%CD%\images >> tmp-run-env.out
+ echo MLC_DATASET_PATH=%CD%\images > tmp-run-env.out
+ echo MLC_DATASET_IMAGENET_PATH=%CD%\images >> tmp-run-env.out
+ echo MLC_DATASET_IMAGENET_VAL_PATH=%CD%\images >> tmp-run-env.out
)
diff --git a/script/get-dataset-kits19/customize.py b/script/get-dataset-kits19/customize.py
index 5f95125c0..8b0a48fea 100644
--- a/script/get-dataset-kits19/customize.py
+++ b/script/get-dataset-kits19/customize.py
@@ -13,21 +13,21 @@ def preprocess(i):
env = i['env']
meta = i['meta']
- if not env.get('CM_GIT_CHECKOUT', ''):
+ if not env.get('MLC_GIT_CHECKOUT', ''):
return {
- 'return': 1, 'error': 'Please provide a valid CM_GIT_SHA inside the custom variation of _cm.json'}
+ 'return': 1, 'error': 'Please provide a valid MLC_GIT_SHA inside the custom variation of _cm.json'}
- if 'CM_GIT_DEPTH' not in env:
- env['CM_GIT_DEPTH'] = ''
+ if 'MLC_GIT_DEPTH' not in env:
+ env['MLC_GIT_DEPTH'] = ''
- if 'CM_GIT_RECURSE_SUBMODULES' not in env:
- env['CM_GIT_RECURSE_SUBMODULES'] = ''
+ if 'MLC_GIT_RECURSE_SUBMODULES' not in env:
+ env['MLC_GIT_RECURSE_SUBMODULES'] = ''
- need_version = env.get('CM_VERSION', '')
+ need_version = env.get('MLC_VERSION', '')
versions = meta['versions']
if need_version != '' and not need_version in versions:
- env['CM_GIT_CHECKOUT'] = need_version
+ env['MLC_GIT_CHECKOUT'] = need_version
return {'return': 0}
@@ -35,7 +35,7 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'kits19', 'data')
+ env['MLC_DATASET_PATH'] = os.path.join(os.getcwd(), 'kits19', 'data')
state = i['state']
return {'return': 0}
diff --git a/script/get-dataset-kits19/meta.yaml b/script/get-dataset-kits19/meta.yaml
index eddb6a9a8..7c5716558 100644
--- a/script/get-dataset-kits19/meta.yaml
+++ b/script/get-dataset-kits19/meta.yaml
@@ -4,11 +4,11 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML datasets
default_env:
- CM_GIT_CHECKOUT: master
- CM_GIT_DEPTH: --depth 2
- CM_GIT_PATCH: 'no'
- CM_GIT_RECURSE_SUBMODULES: ''
- CM_GIT_URL: https://github.com/neheller/kits19
+ MLC_GIT_CHECKOUT: master
+ MLC_GIT_DEPTH: --depth 2
+ MLC_GIT_PATCH: 'no'
+ MLC_GIT_RECURSE_SUBMODULES: ''
+ MLC_GIT_URL: https://github.com/neheller/kits19
default_version: master
deps:
- tags: detect,os
@@ -17,7 +17,7 @@ deps:
- python
tags: get,python3
new_env_keys:
-- CM_DATASET_*
+- MLC_DATASET_*
tags:
- get
- dataset
@@ -29,32 +29,32 @@ uid: 79992bb221024ac5
variations:
calibration:
env:
- CM_DATASET_CALIBRATION: 'yes'
+ MLC_DATASET_CALIBRATION: 'yes'
default:
base:
- short-history
env:
- CM_GIT_PATCH: 'no'
+ MLC_GIT_PATCH: 'no'
full-history:
env:
- CM_GIT_DEPTH: ''
+ MLC_GIT_DEPTH: ''
no-recurse-submodules:
env:
- CM_GIT_RECURSE_SUBMODULES: ''
+ MLC_GIT_RECURSE_SUBMODULES: ''
patch:
env:
- CM_GIT_PATCH: 'yes'
+ MLC_GIT_PATCH: 'yes'
short-history:
env:
- CM_GIT_DEPTH: --depth 5
+ MLC_GIT_DEPTH: --depth 5
validation:
env:
- CM_DATASET_VALIDATION: 'yes'
+ MLC_DATASET_VALIDATION: 'yes'
versions:
custom:
env:
- CM_GIT_CHECKOUT: ''
- CM_GIT_SHA: 'yes'
+ MLC_GIT_CHECKOUT: ''
+ MLC_GIT_SHA: 'yes'
master:
env:
- CM_GIT_CHECKOUT: master
+ MLC_GIT_CHECKOUT: master
diff --git a/script/get-dataset-kits19/run.sh b/script/get-dataset-kits19/run.sh
index f5bf0617a..aa164daf2 100644
--- a/script/get-dataset-kits19/run.sh
+++ b/script/get-dataset-kits19/run.sh
@@ -1,34 +1,34 @@
#!/bin/bash
CUR_DIR=$PWD
-SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH}
+SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH}
echo "******************************************************"
-echo "Cloning kits19 from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..."
+echo "Cloning kits19 from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES}..."
if [ ! -d "kits19" ]; then
- if [ -z ${CM_GIT_SHA} ]; then
- cmd="git clone ${CM_GIT_RECURSE_SUBMODULES} -b ${CM_GIT_CHECKOUT} ${CM_GIT_URL} ${CM_GIT_DEPTH} kits19"
+ if [ -z ${MLC_GIT_SHA} ]; then
+ cmd="git clone ${MLC_GIT_RECURSE_SUBMODULES} -b ${MLC_GIT_CHECKOUT} ${MLC_GIT_URL} ${MLC_GIT_DEPTH} kits19"
echo $cmd
eval $cmd
cd kits19
else
- git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} kits19
+ git clone ${MLC_GIT_RECURSE_SUBMODULES} ${MLC_GIT_URL} ${MLC_GIT_DEPTH} kits19
cd kits19
- git checkout -b "${CM_GIT_CHECKOUT}"
+ git checkout -b "${MLC_GIT_CHECKOUT}"
fi
if [ "${?}" != "0" ]; then exit 1; fi
else
cd kits19
fi
-if [ ${CM_GIT_PATCH} == "yes" ]; then
- patch_filename=${CM_GIT_PATCH_FILENAME}
- if [ ! -n ${CM_GIT_PATCH_FILENAMES} ]; then
- patchfile=${CM_GIT_PATCH_FILENAME:-"git.patch"}
- CM_GIT_PATCH_FILENAMES=$patchfile
+if [ ${MLC_GIT_PATCH} == "yes" ]; then
+ patch_filename=${MLC_GIT_PATCH_FILENAME}
+ if [ ! -n ${MLC_GIT_PATCH_FILENAMES} ]; then
+ patchfile=${MLC_GIT_PATCH_FILENAME:-"git.patch"}
+ MLC_GIT_PATCH_FILENAMES=$patchfile
fi
- IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILENAMES}
+ IFS=', ' read -r -a patch_files <<< ${MLC_GIT_PATCH_FILENAMES}
for patch_filename in "${patch_files[@]}"
do
echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename"
@@ -37,7 +37,7 @@ if [ ${CM_GIT_PATCH} == "yes" ]; then
done
fi
cd ${CUR_DIR}/kits19
-${CM_PYTHON_BIN_WITH_PATH} -m starter_code.get_imaging
+${MLC_PYTHON_BIN_WITH_PATH} -m starter_code.get_imaging
cd data
cp -rf case_00185 case_00400
cd "$CUR_DIR"
diff --git a/script/get-dataset-librispeech/README-extra.md b/script/get-dataset-librispeech/README-extra.md
index 265902c92..d5d937fa3 100644
--- a/script/get-dataset-librispeech/README-extra.md
+++ b/script/get-dataset-librispeech/README-extra.md
@@ -16,10 +16,10 @@ where [VERSION] is one of
* `train-other-500`
## Exported Variables
-* `CM_DATASET_ARCHIVE:`
-* `CM_DATASET_LIBRISPEECH_PATH:`
-* `CM_DATASET_MD5:`
-* `CM_DATASET_NAME:`
+* `MLC_DATASET_ARCHIVE:`
+* `MLC_DATASET_LIBRISPEECH_PATH:`
+* `MLC_DATASET_MD5:`
+* `MLC_DATASET_NAME:`
## Supported and Tested OS
1. Ubuntu 18.04, 20.04, 22.04
diff --git a/script/get-dataset-librispeech/customize.py b/script/get-dataset-librispeech/customize.py
index 969b4b4ad..cf65e55c4 100644
--- a/script/get-dataset-librispeech/customize.py
+++ b/script/get-dataset-librispeech/customize.py
@@ -13,10 +13,10 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- folder_name = env['CM_DATASET_ARCHIVE'].split(".")[0]
- env['CM_DATASET_LIBRISPEECH_PATH'] = os.path.join(
+ folder_name = env['MLC_DATASET_ARCHIVE'].split(".")[0]
+ env['MLC_DATASET_LIBRISPEECH_PATH'] = os.path.join(
os.getcwd(), "LibriSpeech", folder_name)
- env['CM_DATASET_PATH'] = os.path.join(
+ env['MLC_DATASET_PATH'] = os.path.join(
os.getcwd(), "LibriSpeech", folder_name)
return {'return': 0}
diff --git a/script/get-dataset-librispeech/meta.yaml b/script/get-dataset-librispeech/meta.yaml
index ead114f4a..c696b3e25 100644
--- a/script/get-dataset-librispeech/meta.yaml
+++ b/script/get-dataset-librispeech/meta.yaml
@@ -9,10 +9,10 @@ deps:
- sys-utils
tags: get,sys-utils-cm
env:
- CM_DATASET: LIBRISPEECH
- CM_WGET_URL: http://www.openslr.org/resources/12/<<>>
+ MLC_DATASET: LIBRISPEECH
+ MLC_WGET_URL: http://www.openslr.org/resources/12/<<>>
new_env_keys:
-- CM_DATASET_*
+- MLC_DATASET_*
tags:
- get
- dataset
@@ -27,29 +27,29 @@ uid: 09f29df607e0415d
versions:
dev-clean:
env:
- CM_DATASET_ARCHIVE: dev-clean.tar.gz
- CM_DATASET_MD5: 42e2234ba48799c1f50f24a7926300a1
- CM_DATASET_NAME: LibriSpeech Dev Clean dataset
+ MLC_DATASET_ARCHIVE: dev-clean.tar.gz
+ MLC_DATASET_MD5: 42e2234ba48799c1f50f24a7926300a1
+ MLC_DATASET_NAME: LibriSpeech Dev Clean dataset
dev-other:
env:
- CM_DATASET_ARCHIVE: dev-other.tar.gz
- CM_DATASET_MD5: c8d0bcc9cca99d4f8b62fcc847357931
- CM_DATASET_NAME: LibriSpeech Dev Other dataset
+ MLC_DATASET_ARCHIVE: dev-other.tar.gz
+ MLC_DATASET_MD5: c8d0bcc9cca99d4f8b62fcc847357931
+ MLC_DATASET_NAME: LibriSpeech Dev Other dataset
test-clean:
env:
- CM_DATASET_ARCHIVE: test-clean.tar.gz
- CM_DATASET_MD5: 32fa31d27d2e1cad72775fee3f4849a9
- CM_DATASET_NAME: LibriSpeech Test Clean dataset
+ MLC_DATASET_ARCHIVE: test-clean.tar.gz
+ MLC_DATASET_MD5: 32fa31d27d2e1cad72775fee3f4849a9
+ MLC_DATASET_NAME: LibriSpeech Test Clean dataset
test-other:
env:
- CM_DATASET_ARCHIVE: test-other.tar.gz
- CM_DATASET_MD5: fb5a50374b501bb3bac4815ee91d3135
- CM_DATASET_NAME: LibriSpeech Test Other dataset
+ MLC_DATASET_ARCHIVE: test-other.tar.gz
+ MLC_DATASET_MD5: fb5a50374b501bb3bac4815ee91d3135
+ MLC_DATASET_NAME: LibriSpeech Test Other dataset
train-clean-100:
env:
- CM_DATASET_ARCHIVE: train-clean-100.tar.gz
- CM_DATASET_MD5: 2a93770f6d5c6c964bc36631d331a522
- CM_DATASET_NAME: LibriSpeech Train Clean 100 dataset
+ MLC_DATASET_ARCHIVE: train-clean-100.tar.gz
+ MLC_DATASET_MD5: 2a93770f6d5c6c964bc36631d331a522
+ MLC_DATASET_NAME: LibriSpeech Train Clean 100 dataset
train-clean-360:
env:
DATASET_ARCHIVE: train-clean-360.tar.gz
diff --git a/script/get-dataset-librispeech/run.sh b/script/get-dataset-librispeech/run.sh
index 9c2fc2660..99bd174fe 100644
--- a/script/get-dataset-librispeech/run.sh
+++ b/script/get-dataset-librispeech/run.sh
@@ -1,8 +1,8 @@
#!/bin/bash
-wget -nc ${CM_WGET_URL} --no-check-certificate
+wget -nc ${MLC_WGET_URL} --no-check-certificate
test $? -eq 0 || exit 1
-tar -x --skip-old-files -vf ${CM_DATASET_ARCHIVE}
+tar -x --skip-old-files -vf ${MLC_DATASET_ARCHIVE}
test $? -eq 0 || exit 1
diff --git a/script/get-dataset-mlperf-inference-llama3/customize.py b/script/get-dataset-mlperf-inference-llama3/customize.py
index 827dcd2cd..c501a6e60 100644
--- a/script/get-dataset-mlperf-inference-llama3/customize.py
+++ b/script/get-dataset-mlperf-inference-llama3/customize.py
@@ -11,11 +11,11 @@ def preprocess(i):
if os_info['platform'] == "windows":
return {'return': 1, 'error': 'Script not supported in windows yet!'}
- if env.get('CM_DATASET_LLAMA3_PATH', '') == '':
- env['CM_TMP_REQUIRE_DOWNLOAD'] = "yes"
+ if env.get('MLC_DATASET_LLAMA3_PATH', '') == '':
+ env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes"
- if env.get('CM_OUTDIRNAME', '') != '':
- env['CM_DOWNLOAD_PATH'] = env['CM_OUTDIRNAME']
+ if env.get('MLC_OUTDIRNAME', '') != '':
+ env['MLC_DOWNLOAD_PATH'] = env['MLC_OUTDIRNAME']
return {'return': 0}
@@ -24,8 +24,8 @@ def postprocess(i):
env = i['env']
- if env.get('CM_TMP_REQUIRE_DOWNLOAD', '') == "yes":
- env['CM_DATASET_LLAMA3_PATH'] = os.path.join(
- env['CM_DATASET_LLAMA3_PATH'], env['CM_DATASET_FILE_NAME'])
+ if env.get('MLC_TMP_REQUIRE_DOWNLOAD', '') == "yes":
+ env['MLC_DATASET_LLAMA3_PATH'] = os.path.join(
+ env['MLC_DATASET_LLAMA3_PATH'], env['MLC_DATASET_FILE_NAME'])
return {'return': 0}
diff --git a/script/get-dataset-mlperf-inference-llama3/meta.yaml b/script/get-dataset-mlperf-inference-llama3/meta.yaml
index d8af83b88..1028ab0cf 100644
--- a/script/get-dataset-mlperf-inference-llama3/meta.yaml
+++ b/script/get-dataset-mlperf-inference-llama3/meta.yaml
@@ -10,47 +10,47 @@ tags:
- inference
uid: c3bc69599cbc4db7
new_env_keys:
- - CM_DATASET_LLAMA3_PATH
+ - MLC_DATASET_LLAMA3_PATH
input_mapping:
- outdirname: CM_OUTDIRNAME
+ outdirname: MLC_OUTDIRNAME
prehook_deps:
- env:
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_LLAMA3_PATH
- CM_EXTRACT_TO_FOLDER: llama-3-dataset
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_LLAMA3_PATH
+ MLC_EXTRACT_TO_FOLDER: llama-3-dataset
extra_cache_tags: dataset,llama3
force_cache: true
enable_if_env:
- CM_TMP_REQUIRE_DOWNLOAD:
+ MLC_TMP_REQUIRE_DOWNLOAD:
- 'yes'
names:
- dae
tags: download-and-extract
update_tags_from_env_with_prefix:
_url.:
- - CM_DOWNLOAD_URL
+ - MLC_DOWNLOAD_URL
variations:
validation:
default: true
group: dataset-type
env:
- CM_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/llama3_405b/mlperf_llama3.1_405b_dataset_8313_processed_fp16_eval.pkl
- CM_DATASET_TYPE: validation
- CM_DATASET_FILE_NAME: mlperf_llama3.1_405b_dataset_8313_processed_fp16_eval.pkl
+ MLC_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/llama3_405b/mlperf_llama3.1_405b_dataset_8313_processed_fp16_eval.pkl
+ MLC_DATASET_TYPE: validation
+ MLC_DATASET_FILE_NAME: mlperf_llama3.1_405b_dataset_8313_processed_fp16_eval.pkl
calibration:
group: dataset-type
env:
- CM_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/llama3_405b/mlperf_llama3.1_405b_calibration_dataset_512_processed_fp16_eval.pkl
- CM_DATASET_TYPE: calibration
- CM_DATASET_FILE_NAME: mlperf_llama3.1_405b_calibration_dataset_512_processed_fp16_eval.pkl
+ MLC_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/llama3_405b/mlperf_llama3.1_405b_calibration_dataset_512_processed_fp16_eval.pkl
+ MLC_DATASET_TYPE: calibration
+ MLC_DATASET_FILE_NAME: mlperf_llama3.1_405b_calibration_dataset_512_processed_fp16_eval.pkl
rclone:
add_deps_recursive:
dae:
tags: _rclone
default: true
env:
- CM_DOWNLOAD_FILENAME: checkpoint
- CM_DOWNLOAD_URL: <<>>
- CM_RCLONE_CONFIG_NAME: mlc-inference
+ MLC_DOWNLOAD_FILENAME: checkpoint
+ MLC_DOWNLOAD_URL: <<>>
+ MLC_RCLONE_CONFIG_NAME: mlc-inference
group: download-tool
print_env_at_the_end:
- CM_DATASET_LLAMA3_PATH: Path to the dataset
+ MLC_DATASET_LLAMA3_PATH: Path to the dataset
diff --git a/script/get-dataset-mlperf-inference-mixtral/customize.py b/script/get-dataset-mlperf-inference-mixtral/customize.py
index dc46a6661..bcfb39259 100644
--- a/script/get-dataset-mlperf-inference-mixtral/customize.py
+++ b/script/get-dataset-mlperf-inference-mixtral/customize.py
@@ -8,8 +8,8 @@ def preprocess(i):
env = i['env']
- if env.get('CM_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes":
- env['CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] = os.path.join(
+ if env.get('MLC_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes":
+ env['MLC_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] = os.path.join(
os.getcwd(), "mixtral-test-dataset.pkl")
return {'return': 0}
@@ -18,9 +18,9 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH']
+ env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['MLC_DATASET_PREPROCESSED_PATH']
- if env.get('CM_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes":
- env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH']
+ if env.get('MLC_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes":
+ env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['MLC_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH']
return {'return': 0}
diff --git a/script/get-dataset-mlperf-inference-mixtral/meta.yaml b/script/get-dataset-mlperf-inference-mixtral/meta.yaml
index 566f7bb05..99b8e0c2f 100644
--- a/script/get-dataset-mlperf-inference-mixtral/meta.yaml
+++ b/script/get-dataset-mlperf-inference-mixtral/meta.yaml
@@ -4,18 +4,18 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML datasets
new_env_keys:
-- CM_DATASET_*
+- MLC_DATASET_*
prehook_deps:
- env:
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_PREPROCESSED_PATH
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_PREPROCESSED_PATH
extra_cache_tags: mixtral,get-mixtral-dataset
force_cache: true
tags: download-and-extract
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
tags:
- get
- dataset-mixtral
@@ -25,9 +25,9 @@ variations:
mlcommons-storage:
default: true
env:
- CM_DOWNLOAD_CHECKSUM: 78823c13e0e73e518872105c4b09628b
- CM_DOWNLOAD_FILENAME: 2024.06.06_mixtral_15k_v4.pkl
- CM_PACKAGE_URL: https://inference.mlcommons-storage.org/mixtral_8x7b%2F2024.06.06_mixtral_15k_v4.pkl
+ MLC_DOWNLOAD_CHECKSUM: 78823c13e0e73e518872105c4b09628b
+ MLC_DOWNLOAD_FILENAME: 2024.06.06_mixtral_15k_v4.pkl
+ MLC_PACKAGE_URL: https://inference.mlcommons-storage.org/mixtral_8x7b%2F2024.06.06_mixtral_15k_v4.pkl
group: download-source
size.#:
base:
@@ -36,5 +36,5 @@ variations:
- tags: get,generic-python-lib,_package.pandas
- tags: get,python3
env:
- CM_DATASET_MIXTRAL_GENERATE_TEST_DATA: 'yes'
- CM_DATASET_MIXTRAL_TEST_DATA_SIZE: '#'
+ MLC_DATASET_MIXTRAL_GENERATE_TEST_DATA: 'yes'
+ MLC_DATASET_MIXTRAL_TEST_DATA_SIZE: '#'
diff --git a/script/get-dataset-mlperf-inference-mixtral/run.sh b/script/get-dataset-mlperf-inference-mixtral/run.sh
index 91ad97a53..2bd2955b9 100644
--- a/script/get-dataset-mlperf-inference-mixtral/run.sh
+++ b/script/get-dataset-mlperf-inference-mixtral/run.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-if [[ "$CM_DATASET_MIXTRAL_GENERATE_TEST_DATA" == "yes" ]]; then
- ${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/generate-test-dataset.py --dataset-path ${CM_DATASET_PREPROCESSED_PATH} --output-path ${CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH} --samples ${CM_DATASET_MIXTRAL_TEST_DATA_SIZE}
+if [[ "$MLC_DATASET_MIXTRAL_GENERATE_TEST_DATA" == "yes" ]]; then
+ ${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/generate-test-dataset.py --dataset-path ${MLC_DATASET_PREPROCESSED_PATH} --output-path ${MLC_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH} --samples ${MLC_DATASET_MIXTRAL_TEST_DATA_SIZE}
fi
diff --git a/script/get-dataset-openimages-annotations/customize.py b/script/get-dataset-openimages-annotations/customize.py
index 286ba3fa8..3b4160d4e 100644
--- a/script/get-dataset-openimages-annotations/customize.py
+++ b/script/get-dataset-openimages-annotations/customize.py
@@ -14,11 +14,11 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join(
- env['CM_DATASET_ANNOTATIONS_FILE_PATH'], 'openimages-mlperf.json')
- env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.dirname(
- env['CM_DATASET_ANNOTATIONS_FILE_PATH'])
- env['CM_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH']
- env['CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH'] = env['CM_DATASET_ANNOTATIONS_DIR_PATH']
+ env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join(
+ env['MLC_DATASET_ANNOTATIONS_FILE_PATH'], 'openimages-mlperf.json')
+ env['MLC_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.dirname(
+ env['MLC_DATASET_ANNOTATIONS_FILE_PATH'])
+ env['MLC_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH']
+ env['MLC_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH'] = env['MLC_DATASET_ANNOTATIONS_DIR_PATH']
return {'return': 0}
diff --git a/script/get-dataset-openimages-annotations/meta.yaml b/script/get-dataset-openimages-annotations/meta.yaml
index 16158cef6..03e44e436 100644
--- a/script/get-dataset-openimages-annotations/meta.yaml
+++ b/script/get-dataset-openimages-annotations/meta.yaml
@@ -4,21 +4,21 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML datasets
new_env_keys:
-- CM_DATASET_OPENIMAGES_ANNOTATIONS_*
-- CM_DATASET_ANNOTATIONS_*
+- MLC_DATASET_OPENIMAGES_ANNOTATIONS_*
+- MLC_DATASET_ANNOTATIONS_*
prehook_deps:
- env:
- CM_DAE_FINAL_ENV_NAME: CM_DATASET_ANNOTATIONS_FILE_PATH
+ MLC_DAE_FINAL_ENV_NAME: MLC_DATASET_ANNOTATIONS_FILE_PATH
extra_cache_tags: retinanet,get,dataset-openimages-annotations
force_cache: true
tags: download-and-extract,_wget,_extract
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
print_env_at_the_end:
- CM_DATASET_ANNOTATIONS_FILE_PATH: Path to OpenImages annotation file
+ MLC_DATASET_ANNOTATIONS_FILE_PATH: Path to OpenImages annotation file
tags:
- get
- aux
@@ -31,7 +31,7 @@ variations:
from.github:
default: true
env:
- CM_DOWNLOAD_CHECKSUM: 817fd8da3aeeb0575f1e2d2926b15e68
- CM_DOWNLOAD_FILENAME: openimages-mlperf_annotations_2.1.json.zip
- CM_PACKAGE_URL: https://github.com/mlcommons/inference/releases/download/v2.1/openimages-mlperf_annotations_2.1.json.zip
+ MLC_DOWNLOAD_CHECKSUM: 817fd8da3aeeb0575f1e2d2926b15e68
+ MLC_DOWNLOAD_FILENAME: openimages-mlperf_annotations_2.1.json.zip
+ MLC_PACKAGE_URL: https://github.com/mlcommons/inference/releases/download/v2.1/openimages-mlperf_annotations_2.1.json.zip
group: download-source
diff --git a/script/get-dataset-openimages-calibration/customize.py b/script/get-dataset-openimages-calibration/customize.py
index 5fc459075..fc8466566 100644
--- a/script/get-dataset-openimages-calibration/customize.py
+++ b/script/get-dataset-openimages-calibration/customize.py
@@ -12,13 +12,13 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- if env.get("CM_CALIBRATE_FILTER", "") == "yes":
+ if env.get("MLC_CALIBRATE_FILTER", "") == "yes":
i['run_script_input']['script_name'] = "run-filter"
- env['CM_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] = os.path.join(
+ env['MLC_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] = os.path.join(
os.getcwd(), "filtered.txt")
- env['CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH'] = env['CM_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST']
+ env['MLC_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH'] = env['MLC_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST']
return {'return': 0}
diff --git a/script/get-dataset-openimages-calibration/filter.py b/script/get-dataset-openimages-calibration/filter.py
index d8d2638b5..66edc90b1 100644
--- a/script/get-dataset-openimages-calibration/filter.py
+++ b/script/get-dataset-openimages-calibration/filter.py
@@ -19,7 +19,7 @@
data['images'],
key=lambda x: x['num_boxes'],
reverse=os.environ.get(
- 'CM_CALIBRATION_FILTER_ORDER_BY_NUM_BOXES_ASC',
+ 'MLC_CALIBRATION_FILTER_ORDER_BY_NUM_BOXES_ASC',
'') == "yes")
for image in data['images']:
print(image['file_name'])
diff --git a/script/get-dataset-openimages-calibration/meta.yaml b/script/get-dataset-openimages-calibration/meta.yaml
index 6edd3716c..08585665b 100644
--- a/script/get-dataset-openimages-calibration/meta.yaml
+++ b/script/get-dataset-openimages-calibration/meta.yaml
@@ -11,16 +11,16 @@ category: "AI/ML datasets"
deps:
- tags: download,file
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
force_cache: true
extra_cache_tags: openimages-calibration,openimages,calibration
names:
- calibration-file-downloader
env:
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH
new_env_keys:
-- CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH
+- MLC_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH
tags:
- get
@@ -33,13 +33,13 @@ variations:
group: calibration-option
default: true
env:
- CM_MLPERF_OPENIMAGES_CALIBRATION_OPTION: one
- CM_DOWNLOAD_CHECKSUM: 5c3196ddcec4605c6a9fcf004d9615e6
+ MLC_MLPERF_OPENIMAGES_CALIBRATION_OPTION: one
+ MLC_DOWNLOAD_CHECKSUM: 5c3196ddcec4605c6a9fcf004d9615e6
adr:
calibration-file-downloader:
tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/openimages/openimages_cal_images_list.txt
env:
- CM_DOWNLOAD_CHECKSUM: 5c3196ddcec4605c6a9fcf004d9615e6
+ MLC_DOWNLOAD_CHECKSUM: 5c3196ddcec4605c6a9fcf004d9615e6
filter:
default_variations:
filter-size: filter_size.400
@@ -50,15 +50,15 @@ variations:
tags: get,python3
- tags: get,openimages,dataset,original,_calibration
env:
- CM_CALIBRATE_FILTER: ''
+ MLC_CALIBRATE_FILTER: ''
env:
- CM_CALIBRATE_FILTER: 'yes'
+ MLC_CALIBRATE_FILTER: 'yes'
filter-size.#:
group: filter-size
env:
- CM_CALIBRATION_FILTER_SIZE: "#"
+ MLC_CALIBRATION_FILTER_SIZE: "#"
filter-size.400:
group: filter-size
env:
- CM_CALIBRATION_FILTER_SIZE: 400
+ MLC_CALIBRATION_FILTER_SIZE: 400
diff --git a/script/get-dataset-openimages-calibration/run-filter.sh b/script/get-dataset-openimages-calibration/run-filter.sh
index 9b1a90c68..7517475fe 100644
--- a/script/get-dataset-openimages-calibration/run-filter.sh
+++ b/script/get-dataset-openimages-calibration/run-filter.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/filter.py ${CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH} > ordered.txt
+${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/filter.py ${MLC_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH} > ordered.txt
test $? -eq 0 || exit $?
-head -n ${CM_CALIBRATION_FILTER_SIZE} ordered.txt >filtered.txt
+head -n ${MLC_CALIBRATION_FILTER_SIZE} ordered.txt >filtered.txt
test $? -eq 0 || exit $?
diff --git a/script/get-dataset-openimages/customize.py b/script/get-dataset-openimages/customize.py
index 0f68a45c2..6eb686d97 100644
--- a/script/get-dataset-openimages/customize.py
+++ b/script/get-dataset-openimages/customize.py
@@ -10,7 +10,7 @@ def preprocess(i):
print("")
print("Using MLCommons Inference source from '" +
- env['CM_MLPERF_INFERENCE_SOURCE'] + "'")
+ env['MLC_MLPERF_INFERENCE_SOURCE'] + "'")
print("")
if os_info['platform'] == 'windows':
@@ -58,7 +58,7 @@ def preprocess(i):
if x != '':
x += ' '
x += '"' + v + '"'
- env['CM_DATASET_OPENIMAGES_CLASSES'] = x
+ env['MLC_DATASET_OPENIMAGES_CLASSES'] = x
return {'return': 0}
@@ -66,36 +66,36 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join(
+ env['MLC_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join(
os.getcwd(), 'install', 'annotations')
- if env.get('CM_DATASET_CALIBRATION', '') == "no":
- env['CM_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install')
- env['CM_DATASET_PATH'] = os.path.join(
+ if env.get('MLC_DATASET_CALIBRATION', '') == "no":
+ env['MLC_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install')
+ env['MLC_DATASET_PATH'] = os.path.join(
os.getcwd(), 'install', 'validation', 'data')
annotations_file_path = os.path.join(
- env['CM_DATASET_ANNOTATIONS_DIR_PATH'],
+ env['MLC_DATASET_ANNOTATIONS_DIR_PATH'],
"openimages-mlperf.json")
- env['CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path
- env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = annotations_file_path
- env['CM_DATASET_OPENIMAGES_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path
- if env.get("CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS", '') == "yes":
- annotations_file_src = env['CM_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH']
+ env['MLC_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path
+ env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] = annotations_file_path
+ env['MLC_DATASET_OPENIMAGES_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path
+ if env.get("MLC_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS", '') == "yes":
+ annotations_file_src = env['MLC_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH']
shutil.copy(
annotations_file_src,
- env['CM_DATASET_ANNOTATIONS_DIR_PATH'])
- env['CM_DATASET_OPENIMAGES_PATH'] = env['CM_DATASET_PATH']
- env['CM_DATASET_OPENIMAGES_PATH_ROOT'] = env['CM_DATASET_PATH_ROOT']
+ env['MLC_DATASET_ANNOTATIONS_DIR_PATH'])
+ env['MLC_DATASET_OPENIMAGES_PATH'] = env['MLC_DATASET_PATH']
+ env['MLC_DATASET_OPENIMAGES_PATH_ROOT'] = env['MLC_DATASET_PATH_ROOT']
else:
- env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(
+ env['MLC_CALIBRATION_DATASET_PATH'] = os.path.join(
os.getcwd(), 'install', 'calibration', 'data')
- env['CM_OPENIMAGES_CALIBRATION_DATASET_PATH'] = os.path.join(
+ env['MLC_OPENIMAGES_CALIBRATION_DATASET_PATH'] = os.path.join(
os.getcwd(), 'install', 'calibration', 'data')
- env['CM_CALIBRATION_DATASET_PATH_ROOT'] = os.path.join(
+ env['MLC_CALIBRATION_DATASET_PATH_ROOT'] = os.path.join(
os.getcwd(), 'install')
annotations_file_path = os.path.join(
- env['CM_DATASET_ANNOTATIONS_DIR_PATH'],
+ env['MLC_DATASET_ANNOTATIONS_DIR_PATH'],
"openimages-calibration-mlperf.json")
- env['CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path
+ env['MLC_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path
return {'return': 0}
diff --git a/script/get-dataset-openimages/meta.yaml b/script/get-dataset-openimages/meta.yaml
index 2e0189183..e885d8585 100644
--- a/script/get-dataset-openimages/meta.yaml
+++ b/script/get-dataset-openimages/meta.yaml
@@ -5,7 +5,7 @@ cache: true
category: AI/ML datasets
category_sort: 8500
default_env:
- CM_DATASET_CALIBRATION: 'no'
+ MLC_DATASET_CALIBRATION: 'no'
deps:
- names:
- python
@@ -13,7 +13,7 @@ deps:
tags: get,python3
- tags: get,generic-python-lib,_requests
- force_env_keys:
- - CM_GIT_*
+ - MLC_GIT_*
names:
- inference-src
tags: mlperf,inference,source
@@ -26,25 +26,25 @@ deps:
- pycocotools
tags: get,generic-python-lib,_pycocotools
env:
- CM_DATASET: OPENIMAGES
+ MLC_DATASET: OPENIMAGES
new_env_keys:
-- CM_DATASET_PATH
-- CM_DATASET_PATH_ROOT
-- CM_DATASET_OPENIMAGES_PATH
-- CM_DATASET_OPENIMAGES_DATASET_PATH
-- CM_DATASET_OPENIMAGES_DATASET_PATH_ROOT
-- CM_DATASET_ANNOTATIONS_DIR_PATH
-- CM_DATASET_ANNOTATIONS_FILE_PATH
-- CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH
-- CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH
-- CM_CALIBRATION_DATASET_PATH
-- CM_CALIBRATION_DATASET_PATH_ROOT
-- CM_OPENIMAGES_CALIBRATION_DATASET_PATH
-- CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH
-- CM_DATASET_OPENIMAGES_VALIDATION_ANNOTATIONS_FILE_PATH
+- MLC_DATASET_PATH
+- MLC_DATASET_PATH_ROOT
+- MLC_DATASET_OPENIMAGES_PATH
+- MLC_DATASET_OPENIMAGES_DATASET_PATH
+- MLC_DATASET_OPENIMAGES_DATASET_PATH_ROOT
+- MLC_DATASET_ANNOTATIONS_DIR_PATH
+- MLC_DATASET_ANNOTATIONS_FILE_PATH
+- MLC_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH
+- MLC_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH
+- MLC_CALIBRATION_DATASET_PATH
+- MLC_CALIBRATION_DATASET_PATH_ROOT
+- MLC_OPENIMAGES_CALIBRATION_DATASET_PATH
+- MLC_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH
+- MLC_DATASET_OPENIMAGES_VALIDATION_ANNOTATIONS_FILE_PATH
posthook_deps:
- enable_if_env:
- CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS:
+ MLC_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS:
- 'yes'
tags: get,openimages,annotations
tags:
@@ -59,11 +59,11 @@ variations:
'50':
default: true
env:
- CM_DATASET_SIZE: '50'
+ MLC_DATASET_SIZE: '50'
group: size
'500':
env:
- CM_DATASET_SIZE: '500'
+ MLC_DATASET_SIZE: '500'
group: size
calibration:
deps:
@@ -71,20 +71,20 @@ variations:
- openimages-calibration
tags: get,openimages,calibration
env:
- CM_DATASET_CALIBRATION: 'yes'
+ MLC_DATASET_CALIBRATION: 'yes'
group: dataset-type
new_env_keys:
- - CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH
- - CM_CALIBRATION_DATASET_PATH
- - CM_CALIBRATION_DATASET_PATH_ROOT
+ - MLC_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH
+ - MLC_CALIBRATION_DATASET_PATH
+ - MLC_CALIBRATION_DATASET_PATH_ROOT
custom-annotations:
env:
- CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: 'yes'
+ MLC_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: 'yes'
group: annotations
default-annotations:
default: true
env:
- CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: 'no'
+ MLC_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: 'no'
group: annotations
filter: {}
filter,calibration:
@@ -97,11 +97,11 @@ variations:
tags: _filter-size.#
full:
env:
- CM_DATASET_SIZE: ''
+ MLC_DATASET_SIZE: ''
group: size
size.#:
env:
- CM_DATASET_SIZE: '#'
+ MLC_DATASET_SIZE: '#'
group: size
using-fiftyone:
add_deps_recursive:
@@ -114,13 +114,13 @@ variations:
validation:
default: true
env:
- CM_DATASET_CALIBRATION: 'no'
+ MLC_DATASET_CALIBRATION: 'no'
group: dataset-type
new_env_keys:
- - CM_DATASET_PATH
- - CM_DATASET_PATH_ROOT
- - CM_DATASET_OPENIMAGES_DATASET_PATH
- - CM_DATASET_OPENIMAGES_DATASET_PATH_ROOT
- - CM_DATASET_ANNOTATIONS_DIR_PATH
- - CM_DATASET_ANNOTATIONS_FILE_PATH
- - CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH
+ - MLC_DATASET_PATH
+ - MLC_DATASET_PATH_ROOT
+ - MLC_DATASET_OPENIMAGES_DATASET_PATH
+ - MLC_DATASET_OPENIMAGES_DATASET_PATH_ROOT
+ - MLC_DATASET_ANNOTATIONS_DIR_PATH
+ - MLC_DATASET_ANNOTATIONS_FILE_PATH
+ - MLC_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH
diff --git a/script/get-dataset-openimages/run.bat b/script/get-dataset-openimages/run.bat
index 742542d25..017eac131 100644
--- a/script/get-dataset-openimages/run.bat
+++ b/script/get-dataset-openimages/run.bat
@@ -1,21 +1,21 @@
@echo off
set CUR_DIR=%cd%
-set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH%
+set SCRIPT_DIR=%MLC_TMP_CURRENT_SCRIPT_PATH%
if not exist install mkdir install
set INSTALL_DIR=%CUR_DIR%\install
-cd %CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH%
+cd %MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH%
-if not "%CM_DATASET_SIZE%" == "" (
- set MAX_IMAGES=--max-images %CM_DATASET_SIZE% --seed 42
+if not "%MLC_DATASET_SIZE%" == "" (
+ set MAX_IMAGES=--max-images %MLC_DATASET_SIZE% --seed 42
) else (
set MAX_IMAGES=
)
-%CM_PYTHON_BIN% tools\openimages.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json --classes %CM_DATASET_OPENIMAGES_CLASSES%
+%MLC_PYTHON_BIN% tools\openimages.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json --classes %MLC_DATASET_OPENIMAGES_CLASSES%
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
rem Next is a hack to support MLPerf inference on Windows
diff --git a/script/get-dataset-openimages/run.sh b/script/get-dataset-openimages/run.sh
index 2fc6eaddf..2a034ed31 100644
--- a/script/get-dataset-openimages/run.sh
+++ b/script/get-dataset-openimages/run.sh
@@ -1,6 +1,6 @@
#!/bin/bash
python3() {
- ${CM_PYTHON_BIN_WITH_PATH} "$@"
+ ${MLC_PYTHON_BIN_WITH_PATH} "$@"
}
export -f python3
@@ -8,11 +8,11 @@ CUR=${PWD}
mkdir -p install
INSTALL_DIR=${CUR}/install
-cd ${CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH}
+cd ${MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH}
cd tools
-if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then
- if [ ! -z ${CM_DATASET_SIZE} ]; then
- max_images=" -m ${CM_DATASET_SIZE}"
+if [[ ${MLC_DATASET_CALIBRATION} == "no" ]]; then
+ if [ ! -z ${MLC_DATASET_SIZE} ]; then
+ max_images=" -m ${MLC_DATASET_SIZE}"
else
max_images=""
fi
@@ -21,8 +21,8 @@ if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then
eval $cmd
test $? -eq 0 || exit 1
else
- if [ -n ${CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH} ]; then
- calibration_file_string=" --calibration-file ${CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH}"
+ if [ -n ${MLC_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH} ]; then
+ calibration_file_string=" --calibration-file ${MLC_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH}"
else
calibration_file_string=""
fi
diff --git a/script/get-dataset-openorca/customize.py b/script/get-dataset-openorca/customize.py
index 6daca4bf6..3bb95817b 100644
--- a/script/get-dataset-openorca/customize.py
+++ b/script/get-dataset-openorca/customize.py
@@ -12,13 +12,13 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- if env.get('CM_DATASET_CALIBRATION', '') == "no":
- env['CM_DATASET_PATH_ROOT'] = env['CM_DATASET_OPENORCA_PATH']
- env['CM_DATASET_PATH'] = env['CM_DATASET_OPENORCA_PATH']
- env['CM_DATASET_OPENORCA_PARQUET'] = os.path.join(
- env['CM_DATASET_OPENORCA_PATH'], '1M-GPT4-Augmented.parquet')
+ if env.get('MLC_DATASET_CALIBRATION', '') == "no":
+ env['MLC_DATASET_PATH_ROOT'] = env['MLC_DATASET_OPENORCA_PATH']
+ env['MLC_DATASET_PATH'] = env['MLC_DATASET_OPENORCA_PATH']
+ env['MLC_DATASET_OPENORCA_PARQUET'] = os.path.join(
+ env['MLC_DATASET_OPENORCA_PATH'], '1M-GPT4-Augmented.parquet')
else:
- env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(
+ env['MLC_CALIBRATION_DATASET_PATH'] = os.path.join(
os.getcwd(), 'install', 'calibration', 'data')
return {'return': 0}
diff --git a/script/get-dataset-openorca/meta.yaml b/script/get-dataset-openorca/meta.yaml
index c860b0213..2e4856f4c 100644
--- a/script/get-dataset-openorca/meta.yaml
+++ b/script/get-dataset-openorca/meta.yaml
@@ -5,22 +5,22 @@ cache: true
category: AI/ML datasets
category_sort: 8500
default_env:
- CM_DATASET_CALIBRATION: 'no'
+ MLC_DATASET_CALIBRATION: 'no'
deps:
- env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_DATASET_OPENORCA_PATH
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_DATASET_OPENORCA_PATH
extra_cache_tags: openorca,repo,src
force_env_keys:
- - CM_GIT_*
+ - MLC_GIT_*
names:
- openorca-src
tags: get,git,repo,_lfs,_repo.https://huggingface.co/datasets/Open-Orca/OpenOrca
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
env:
- CM_DATASET: OPENORCA
+ MLC_DATASET: OPENORCA
new_env_keys:
-- CM_DATASET_*
+- MLC_DATASET_*
tags:
- get
- dataset
@@ -31,27 +31,27 @@ uid: 9252c4d90d5940b7
variations:
'500':
env:
- CM_DATASET_SIZE: '500'
+ MLC_DATASET_SIZE: '500'
group: size
'60':
env:
- CM_DATASET_SIZE: '60'
+ MLC_DATASET_SIZE: '60'
group: size
calibration:
env:
- CM_DATASET_CALIBRATION: 'yes'
+ MLC_DATASET_CALIBRATION: 'yes'
group: dataset-type
full:
default: true
env:
- CM_DATASET_SIZE: '24576'
+ MLC_DATASET_SIZE: '24576'
group: size
size.#:
env:
- CM_DATASET_SIZE: '#'
+ MLC_DATASET_SIZE: '#'
group: size
validation:
default: true
env:
- CM_DATASET_CALIBRATION: 'no'
+ MLC_DATASET_CALIBRATION: 'no'
group: dataset-type
diff --git a/script/get-dataset-squad-vocab/customize.py b/script/get-dataset-squad-vocab/customize.py
index 538250b19..5808e00b9 100644
--- a/script/get-dataset-squad-vocab/customize.py
+++ b/script/get-dataset-squad-vocab/customize.py
@@ -14,6 +14,6 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] = env['CM_DATASET_SQUAD_VOCAB_PATH']
+ env['MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] = env['MLC_DATASET_SQUAD_VOCAB_PATH']
return {'return': 0}
diff --git a/script/get-dataset-squad-vocab/meta.yaml b/script/get-dataset-squad-vocab/meta.yaml
index aa1bad21c..f06c24282 100644
--- a/script/get-dataset-squad-vocab/meta.yaml
+++ b/script/get-dataset-squad-vocab/meta.yaml
@@ -4,21 +4,21 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML datasets
new_env_keys:
-- CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH
-- CM_DATASET_SQUAD_VOCAB_PATH
+- MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH
+- MLC_DATASET_SQUAD_VOCAB_PATH
prehook_deps:
- env:
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_SQUAD_VOCAB_PATH
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_SQUAD_VOCAB_PATH
extra_cache_tags: bert,get,dataset-squad-vocab
force_cache: true
tags: download-and-extract,_wget
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
print_env_at_the_end:
- CM_DATASET_SQUAD_VOCAB_PATH: Path to SQUAD vocab file
+ MLC_DATASET_SQUAD_VOCAB_PATH: Path to SQUAD vocab file
tags:
- get
- aux
@@ -33,7 +33,7 @@ variations:
from.zenodo:
default: true
env:
- CM_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e
- CM_DOWNLOAD_FILENAME: vocab.txt
- CM_PACKAGE_URL: https://zenodo.org/record/3733868/files/vocab.txt
+ MLC_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e
+ MLC_DOWNLOAD_FILENAME: vocab.txt
+ MLC_PACKAGE_URL: https://zenodo.org/record/3733868/files/vocab.txt
group: download-source
diff --git a/script/get-dataset-squad/README-extra.md b/script/get-dataset-squad/README-extra.md
index 4497abe6b..deb677c5f 100644
--- a/script/get-dataset-squad/README-extra.md
+++ b/script/get-dataset-squad/README-extra.md
@@ -11,9 +11,9 @@ where [VERSION] is one of
* `2.0`
## Exported Variables
-* `CM_DATASET_SQUAD_PATH:` Directory path to SQUAD dataset
-* `CM_DATASET_SQUAD_TRAIN_PATH:` JSON file path to SQUAD training dataset
-* `CM_DATASET_SQUAD_VAL_PATH:` JSON file path to SQUAD validation dataset
+* `MLC_DATASET_SQUAD_PATH:` Directory path to SQUAD dataset
+* `MLC_DATASET_SQUAD_TRAIN_PATH:` JSON file path to SQUAD training dataset
+* `MLC_DATASET_SQUAD_VAL_PATH:` JSON file path to SQUAD validation dataset
## Supported and Tested OS
1. Ubuntu 18.04, 20.04, 22.04
diff --git a/script/get-dataset-squad/customize.py b/script/get-dataset-squad/customize.py
index c372a75d8..655b7df97 100644
--- a/script/get-dataset-squad/customize.py
+++ b/script/get-dataset-squad/customize.py
@@ -14,9 +14,9 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- env['CM_DATASET_SQUAD_PATH'] = os.path.dirname(
- env['CM_DATASET_SQUAD_VAL_PATH'])
- env['CM_DATASET_PATH'] = os.path.dirname(env['CM_DATASET_SQUAD_VAL_PATH'])
- # env['CM_DATASET_SQUAD_VAL_PATH'] = os.path.join(os.getcwd(), env['CM_VAL_FILENAME'])
+ env['MLC_DATASET_SQUAD_PATH'] = os.path.dirname(
+ env['MLC_DATASET_SQUAD_VAL_PATH'])
+ env['MLC_DATASET_PATH'] = os.path.dirname(env['MLC_DATASET_SQUAD_VAL_PATH'])
+ # env['MLC_DATASET_SQUAD_VAL_PATH'] = os.path.join(os.getcwd(), env['MLC_VAL_FILENAME'])
return {'return': 0}
diff --git a/script/get-dataset-squad/meta.yaml b/script/get-dataset-squad/meta.yaml
index d47fc9ce3..cc55e3b50 100644
--- a/script/get-dataset-squad/meta.yaml
+++ b/script/get-dataset-squad/meta.yaml
@@ -7,22 +7,22 @@ default_version: '1.1'
deps:
- tags: get,sys-utils-cm
env:
- CM_DATASET: SQUAD
+ MLC_DATASET: SQUAD
new_env_keys:
-- CM_DATASET_*
+- MLC_DATASET_*
prehook_deps:
- env:
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_SQUAD_VAL_PATH
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_SQUAD_VAL_PATH
extra_cache_tags: bert,get,dataset-squad
force_cache: true
tags: download-and-extract,_wget
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
print_env_at_the_end:
- CM_DATASET_SQUAD_VAL_PATH: Path to SQUAD dataset
+ MLC_DATASET_SQUAD_VAL_PATH: Path to SQUAD dataset
tags:
- get
- dataset
@@ -34,15 +34,15 @@ uid: 6651c119c3ae49b3
versions:
'1.1':
env:
- CM_DOWNLOAD_CHECKSUM: 3e85deb501d4e538b6bc56f786231552
- CM_DOWNLOAD_FILENAME: dev-v1.1.json
- CM_PACKAGE_URL: https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v1.1.json
- CM_TRAIN_FILENAME: train-v1.1.json
- CM_VAL_FILENAME: dev-v1.1.json
+ MLC_DOWNLOAD_CHECKSUM: 3e85deb501d4e538b6bc56f786231552
+ MLC_DOWNLOAD_FILENAME: dev-v1.1.json
+ MLC_PACKAGE_URL: https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v1.1.json
+ MLC_TRAIN_FILENAME: train-v1.1.json
+ MLC_VAL_FILENAME: dev-v1.1.json
'2.0':
env:
- CM_DOWNLOAD_CHECKSUM: 246adae8b7002f8679c027697b0b7cf8
- CM_DOWNLOAD_FILENAME: dev-v2.0.json
- CM_PACKAGE_URL: https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v2.0.json
- CM_TRAIN_FILENAME: train-v2.0.json
- CM_VAL_FILENAME: dev-v2.0.json
+ MLC_DOWNLOAD_CHECKSUM: 246adae8b7002f8679c027697b0b7cf8
+ MLC_DOWNLOAD_FILENAME: dev-v2.0.json
+ MLC_PACKAGE_URL: https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v2.0.json
+ MLC_TRAIN_FILENAME: train-v2.0.json
+ MLC_VAL_FILENAME: dev-v2.0.json
diff --git a/script/get-dlrm-data-mlperf-inference/customize.py b/script/get-dlrm-data-mlperf-inference/customize.py
index 2684d2594..366a3425d 100644
--- a/script/get-dlrm-data-mlperf-inference/customize.py
+++ b/script/get-dlrm-data-mlperf-inference/customize.py
@@ -9,7 +9,7 @@ def preprocess(i):
env = i['env']
dlrm_data_path = env.get(
- 'CM_DLRM_DATA_PATH', env.get(
+ 'MLC_DLRM_DATA_PATH', env.get(
'DLRM_DATA_PATH', ''))
if dlrm_data_path == '':
print(
@@ -35,48 +35,48 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- variation = env['CM_DLRM_DATA_VARIATION']
+ variation = env['MLC_DLRM_DATA_VARIATION']
if variation == "nvidia":
if not os.path.exists(os.path.join(dlrm_data_path, "model")):
print(f'model directory is missing inside {dlrm_data_path}')
- env['CM_DLRM_MODEL_DOWNLOAD'] = True
+ env['MLC_DLRM_MODEL_DOWNLOAD'] = True
if not os.path.exists(os.path.join(dlrm_data_path, "criteo")):
print(f'criteo directory is missing inside {dlrm_data_path}')
- env['CM_DLRM_DATASET_DOWNLOAD'] = True
+ env['MLC_DLRM_DATASET_DOWNLOAD'] = True
if not os.path.exists(os.path.join(
dlrm_data_path, "model", "model_weights")):
print(
f'model_weights directory is missing inside {dlrm_data_path}/model')
- env['CM_DLRM_MODEL_DOWNLOAD'] = True
+ env['MLC_DLRM_MODEL_DOWNLOAD'] = True
if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23")):
print(f'day23 directory is missing inside {dlrm_data_path}/day23')
- env['CM_DLRM_DATASET_DOWNLOAD'] = True
+ env['MLC_DLRM_DATASET_DOWNLOAD'] = True
if not os.path.exists(os.path.join(
dlrm_data_path, "criteo", "day23", "fp32")):
print(
f'fp32 directory is missing inside {dlrm_data_path}/criteo/day23')
- env['CM_DLRM_DATASET_DOWNLOAD'] = True
+ env['MLC_DLRM_DATASET_DOWNLOAD'] = True
if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz")) and not os.path.exists(
os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")):
print(
f'day_23_sparse_multi_hot.npz or day_23_sparse_multi_hot_unpacked is missing inside {dlrm_data_path}/criteo/day23/fp32')
- env['CM_DLRM_DATASET_DOWNLOAD'] = True
+ env['MLC_DLRM_DATASET_DOWNLOAD'] = True
if not os.path.exists(os.path.join(
dlrm_data_path, "criteo", "day23", "fp32", "day_23_dense.npy")):
print(
f'day_23_dense.npy is missing inside {dlrm_data_path}/criteo/day23/fp32')
- env['CM_DLRM_DATASET_DOWNLOAD'] = True
+ env['MLC_DLRM_DATASET_DOWNLOAD'] = True
if not os.path.exists(os.path.join(
dlrm_data_path, "criteo", "day23", "fp32", "day_23_labels.npy")):
print(
f'day_23_labels.npy is missing inside {dlrm_data_path}/criteo/day23/fp32')
- env['CM_DLRM_DATASET_DOWNLOAD'] = True
+ env['MLC_DLRM_DATASET_DOWNLOAD'] = True
if not os.path.exists(os.path.join(
dlrm_data_path, "criteo", "day23", "raw_data")):
- if env.get('CM_CRITEO_DAY23_RAW_DATA_PATH', '') == '':
+ if env.get('MLC_CRITEO_DAY23_RAW_DATA_PATH', '') == '':
return {
'return': 1, 'error': 'Raw data missing inside {dlrm_data_path}/criteo/day23. Specify the target folder through input mapping(--criteo_day23_raw_data_path="path to raw criteo dataset")'}
@@ -84,14 +84,14 @@ def preprocess(i):
xsep = ' && '
# addition of run command to download the datasets and model
- if env.get('CM_DLRM_DATASET_DOWNLOAD', False) == True:
- run_cmd += 'cp -r "$CM_CRITEO_PREPROCESSED_PATH"/. ' + \
+ if env.get('MLC_DLRM_DATASET_DOWNLOAD', False) == True:
+ run_cmd += 'cp -r "$MLC_CRITEO_PREPROCESSED_PATH"/. ' + \
os.path.join(dlrm_data_path, "criteo", "day23", "fp32") + xsep
- if env.get('CM_DLRM_MODEL_DOWNLOAD', False) == True:
- run_cmd += 'cp -r "$CM_ML_MODEL_FILE_WITH_PATH"/. ' + \
+ if env.get('MLC_DLRM_MODEL_DOWNLOAD', False) == True:
+ run_cmd += 'cp -r "$MLC_ML_MODEL_FILE_WITH_PATH"/. ' + \
os.path.join(dlrm_data_path, "model") + xsep
- if env.get('CM_DLRM_DATASET_DOWNLOAD', '') != True:
+ if env.get('MLC_DLRM_DATASET_DOWNLOAD', '') != True:
if not os.path.exists(os.path.join(
dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")):
os.system(f"unzip {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot.npz')} -d {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot_unpacked')}")
@@ -99,7 +99,7 @@ def preprocess(i):
run_cmd += f"unzip {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot.npz')} -d {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot_unpacked')}" + xsep
if os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32",
- "day_23_sparse_multi_hot.npz")) or env['CM_DLRM_DATASET_DOWNLOAD'] == True:
+ "day_23_sparse_multi_hot.npz")) or env['MLC_DLRM_DATASET_DOWNLOAD'] == True:
file_path = os.path.join(
dlrm_data_path,
"criteo",
@@ -131,12 +131,12 @@ def preprocess(i):
run_cmd += ("cd {}; md5sum -c {}").format(dir_path,
os.path.join(script_path, "checksums.txt"))
- env['CM_DLRM_V2_DAY23_FILE_PATH'] = os.path.join(
+ env['MLC_DLRM_V2_DAY23_FILE_PATH'] = os.path.join(
dlrm_data_path, "criteo", "day23", "raw_data")
- env['CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH'] = os.path.join(
+ env['MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH'] = os.path.join(
dlrm_data_path, "criteo", "day23", "sample_partition.txt")
- env['CM_RUN_CMD'] = run_cmd
+ env['MLC_RUN_CMD'] = run_cmd
return {'return': 0}
@@ -145,11 +145,11 @@ def postprocess(i):
env = i['env']
- if env.get('CM_DLRM_DATA_PATH', '') == '' and env.get(
+ if env.get('MLC_DLRM_DATA_PATH', '') == '' and env.get(
'DLRM_DATA_PATH', '') == '':
- env['CM_DLRM_DATA_PATH'] = os.getcwd()
+ env['MLC_DLRM_DATA_PATH'] = os.getcwd()
else:
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env.get(
- 'CM_DLRM_DATA_PATH', env['DLRM_DATA_PATH'])
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env.get(
+ 'MLC_DLRM_DATA_PATH', env['DLRM_DATA_PATH'])
return {'return': 0}
diff --git a/script/get-dlrm-data-mlperf-inference/meta.yaml b/script/get-dlrm-data-mlperf-inference/meta.yaml
index f287e37db..839a97ecf 100644
--- a/script/get-dlrm-data-mlperf-inference/meta.yaml
+++ b/script/get-dlrm-data-mlperf-inference/meta.yaml
@@ -12,30 +12,30 @@ uid: 34bdfcd9c8364935
docker:
real_run: false
new_env_keys:
- - CM_DLRM_DATA_PATH
+ - MLC_DLRM_DATA_PATH
- DLRM_DATA_PATH
input_mapping:
- dlrm_data_path: CM_DLRM_DATA_PATH
- criteo_day23_raw_data_path: CM_CRITEO_DAY23_RAW_DATA_PATH
+ dlrm_data_path: MLC_DLRM_DATA_PATH
+ criteo_day23_raw_data_path: MLC_CRITEO_DAY23_RAW_DATA_PATH
prehook_deps:
- tags: get,ml-model,dlrm,_pytorch
enable_if_env:
- CM_DLRM_MODEL_DOWNLOAD:
+ MLC_DLRM_MODEL_DOWNLOAD:
- "on"
- tags: get,dataset,preprocessed,criteo,_mlc
enable_if_env:
- CM_DLRM_DATASET_DOWNLOAD:
+ MLC_DLRM_DATASET_DOWNLOAD:
- "on"
variations:
nvidia:
group: implementation
default: true
new_env_keys:
- - CM_DLRM_V2_DAY23_FILE_PATH
- - CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH
+ - MLC_DLRM_V2_DAY23_FILE_PATH
+ - MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH
env:
- CM_DLRM_DATA_VARIATION: nvidia
+ MLC_DLRM_DATA_VARIATION: nvidia
intel:
group: implementation
env:
- CM_DLRM_DATA_VARIATION: intel
+ MLC_DLRM_DATA_VARIATION: intel
diff --git a/script/get-dlrm-data-mlperf-inference/run.sh b/script/get-dlrm-data-mlperf-inference/run.sh
index d1cb7df69..180056e2f 100644
--- a/script/get-dlrm-data-mlperf-inference/run.sh
+++ b/script/get-dlrm-data-mlperf-inference/run.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH}
#To export any variable
#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
-#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
@@ -17,11 +17,11 @@ function run() {
echo "Running: "
echo "$1"
echo ""
- if [[ ${CM_FAKE_RUN} != 'yes' ]]; then
+ if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then
eval "$1"
exit_if_error
fi
}
#Add your run commands here...
-run "$CM_RUN_CMD"
+run "$MLC_RUN_CMD"
diff --git a/script/get-dlrm/customize.py b/script/get-dlrm/customize.py
index e7c634f3c..33a8ca2ca 100644
--- a/script/get-dlrm/customize.py
+++ b/script/get-dlrm/customize.py
@@ -13,17 +13,17 @@ def preprocess(i):
env = i['env']
meta = i['meta']
- if 'CM_GIT_DEPTH' not in env:
- env['CM_GIT_DEPTH'] = ''
+ if 'MLC_GIT_DEPTH' not in env:
+ env['MLC_GIT_DEPTH'] = ''
- if 'CM_GIT_RECURSE_SUBMODULES' not in env:
- env['CM_GIT_RECURSE_SUBMODULES'] = ''
+ if 'MLC_GIT_RECURSE_SUBMODULES' not in env:
+ env['MLC_GIT_RECURSE_SUBMODULES'] = ''
- need_version = env.get('CM_VERSION', '')
+ need_version = env.get('MLC_VERSION', '')
versions = meta['versions']
if need_version != '' and not need_version in versions:
- env['CM_GIT_CHECKOUT'] = need_version
+ env['MLC_GIT_CHECKOUT'] = need_version
return {'return': 0}
diff --git a/script/get-dlrm/meta.yaml b/script/get-dlrm/meta.yaml
index cc598990f..a5f7e11f1 100644
--- a/script/get-dlrm/meta.yaml
+++ b/script/get-dlrm/meta.yaml
@@ -4,9 +4,9 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML models
default_env:
- CM_GIT_DEPTH: --depth 10
- CM_GIT_PATCH: 'no'
- CM_GIT_URL: https://github.com/facebookresearch/dlrm.git
+ MLC_GIT_DEPTH: --depth 10
+ MLC_GIT_PATCH: 'no'
+ MLC_GIT_URL: https://github.com/facebookresearch/dlrm.git
default_version: main
deps:
- tags: detect,os
@@ -20,8 +20,8 @@ uid: 63680ac2449a4241
variations:
full-history:
env:
- CM_GIT_DEPTH: ''
+ MLC_GIT_DEPTH: ''
versions:
main:
env:
- CM_GIT_CHECKOUT: main
+ MLC_GIT_CHECKOUT: main
diff --git a/script/get-dlrm/run.sh b/script/get-dlrm/run.sh
index 37e9e59a7..5aefd4511 100644
--- a/script/get-dlrm/run.sh
+++ b/script/get-dlrm/run.sh
@@ -1,12 +1,12 @@
#!/bin/bash
CUR_DIR=$PWD
-SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH}
+SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH}
echo "******************************************************"
-echo "Cloning DLRM from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..."
+echo "Cloning DLRM from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES}..."
if [ ! -d "dlrm" ]; then
- git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} dlrm
+ git clone ${MLC_GIT_RECURSE_SUBMODULES} -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} ${MLC_GIT_DEPTH} dlrm
if [ "${?}" != "0" ]; then exit 1; fi
fi
diff --git a/script/get-docker/customize.py b/script/get-docker/customize.py
index 08975cf52..c8aaf7376 100644
--- a/script/get-docker/customize.py
+++ b/script/get-docker/customize.py
@@ -15,13 +15,13 @@ def preprocess(i):
file_name = 'docker.exe' if os_info['platform'] == 'windows' else 'docker'
env['FILE_NAME'] = file_name
- if 'CM_DOCKER_BIN_WITH_PATH' not in env:
+ if 'MLC_DOCKER_BIN_WITH_PATH' not in env:
r = i['automation'].find_artifact({'file_name': file_name,
'env': env,
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_DOCKER_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_DOCKER_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
if r['return'] > 0:
@@ -40,7 +40,7 @@ def preprocess(i):
def detect_version(i):
r = i['automation'].parse_version({'match_text': r'[Docker|podman] version\s*([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_DOCKER_VERSION',
+ 'env_key': 'MLC_DOCKER_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
@@ -66,16 +66,16 @@ def postprocess(i):
version = r['version']
tool = r['tool']
- found_file_path = env['CM_DOCKER_BIN_WITH_PATH']
+ found_file_path = env['MLC_DOCKER_BIN_WITH_PATH']
found_path = os.path.dirname(found_file_path)
- env['CM_DOCKER_INSTALLED_PATH'] = found_path
+ env['MLC_DOCKER_INSTALLED_PATH'] = found_path
env['+PATH'] = [found_path]
- env['CM_DOCKER_CACHE_TAGS'] = 'version-' + version
+ env['MLC_DOCKER_CACHE_TAGS'] = 'version-' + version
- env['CM_DOCKER_VERSION'] = version
+ env['MLC_DOCKER_VERSION'] = version
- env['CM_CONTAINER_TOOL'] = tool
+ env['MLC_CONTAINER_TOOL'] = tool
return {'return': 0, 'version': version}
diff --git a/script/get-docker/meta.yaml b/script/get-docker/meta.yaml
index 881039852..b3a5f1f89 100644
--- a/script/get-docker/meta.yaml
+++ b/script/get-docker/meta.yaml
@@ -9,8 +9,8 @@ docker_input_mapping: {}
input_description: {}
input_mapping: {}
new_env_keys: [
- "CM_DOCKER_VERSION",
- "CM_CONTAINER_TOOL"
+ "MLC_DOCKER_VERSION",
+ "MLC_CONTAINER_TOOL"
]
new_state_keys: []
post_deps: []
diff --git a/script/get-gcc/README-extra.md b/script/get-gcc/README-extra.md
index bb9d97694..a20669f48 100644
--- a/script/get-gcc/README-extra.md
+++ b/script/get-gcc/README-extra.md
@@ -2,13 +2,13 @@
This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed gcc on the system.
## Exported Variables
-* `CM_GCC_BIN`
-* `CM_GCC_BIN_WITH_PATH`
-* `CM_C_COMPILER_BIN`
-* `CM_C_COMPILER_WITH_PATH`
-* `CM_CXX_COMPILER_BIN`
-* `CM_CXX_COMPILER_WITH_PATH`
-* `CM_COMPILER_*`
+* `MLC_GCC_BIN`
+* `MLC_GCC_BIN_WITH_PATH`
+* `MLC_C_COMPILER_BIN`
+* `MLC_C_COMPILER_WITH_PATH`
+* `MLC_CXX_COMPILER_BIN`
+* `MLC_CXX_COMPILER_WITH_PATH`
+* `MLC_COMPILER_*`
## Supported and Tested OS
1. Ubuntu 18.04, 20.04, 22.04
diff --git a/script/get-gcc/customize.py b/script/get-gcc/customize.py
index 2203b8a48..a8c8b3099 100644
--- a/script/get-gcc/customize.py
+++ b/script/get-gcc/customize.py
@@ -11,26 +11,26 @@ def preprocess(i):
recursion_spaces = i['recursion_spaces']
file_name_c = 'gcc.exe' if os_info['platform'] == 'windows' else 'gcc'
- if env.get('CM_HOST_OS_FLAVOR', '') == 'rhel':
- if "12" in env.get('CM_VERSION', '') or "12" in env.get(
- 'CM_VERSION_MIN', ''):
- if env.get('CM_TMP_PATH', '') == '':
- env['CM_TMP_PATH'] = ''
- env['CM_TMP_PATH'] += "/opt/rh/gcc-toolset-12/root/usr/bin"
- env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
-
- if 'CM_GCC_BIN_WITH_PATH' not in env:
+ if env.get('MLC_HOST_OS_FLAVOR', '') == 'rhel':
+ if "12" in env.get('MLC_VERSION', '') or "12" in env.get(
+ 'MLC_VERSION_MIN', ''):
+ if env.get('MLC_TMP_PATH', '') == '':
+ env['MLC_TMP_PATH'] = ''
+ env['MLC_TMP_PATH'] += "/opt/rh/gcc-toolset-12/root/usr/bin"
+ env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'
+
+ if 'MLC_GCC_BIN_WITH_PATH' not in env:
r = i['automation'].find_artifact({'file_name': file_name_c,
'env': env,
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_GCC_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_GCC_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
if r['return'] > 0:
# if r['return'] == 16:
- # if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes':
+ # if env.get('MLC_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes':
# return r
#
# print (recursion_spaces+' # {}'.format(r['error']))
@@ -46,7 +46,7 @@ def preprocess(i):
def detect_version(i):
r = i['automation'].parse_version({'match_text': r'\s+([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_GCC_VERSION',
+ 'env_key': 'MLC_GCC_VERSION',
'which_env': i['env']})
if r['return'] > 0:
if 'clang' in r['error']:
@@ -66,41 +66,41 @@ def postprocess(i):
if r['return'] > 0:
return r
- env['CM_COMPILER_FAMILY'] = 'GCC'
+ env['MLC_COMPILER_FAMILY'] = 'GCC'
version = r['version']
- env['CM_COMPILER_VERSION'] = env['CM_GCC_VERSION']
- env['CM_GCC_CACHE_TAGS'] = 'version-' + version
- env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-gcc'
+ env['MLC_COMPILER_VERSION'] = env['MLC_GCC_VERSION']
+ env['MLC_GCC_CACHE_TAGS'] = 'version-' + version
+ env['MLC_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-gcc'
- found_file_path = env['CM_GCC_BIN_WITH_PATH']
+ found_file_path = env['MLC_GCC_BIN_WITH_PATH']
found_path = os.path.dirname(found_file_path)
- env['CM_GCC_INSTALLED_PATH'] = found_path
+ env['MLC_GCC_INSTALLED_PATH'] = found_path
file_name_c = os.path.basename(found_file_path)
# G: changed next line to handle cases like gcc-8
file_name_cpp = file_name_c.replace('gcc', 'g++')
env['FILE_NAME_CPP'] = file_name_cpp
- env['CM_GCC_BIN'] = file_name_c
+ env['MLC_GCC_BIN'] = file_name_c
# General compiler for general program compilation
- env['CM_C_COMPILER_BIN'] = file_name_c
- env['CM_C_COMPILER_FLAG_OUTPUT'] = '-o '
- env['CM_C_COMPILER_WITH_PATH'] = found_file_path
- env['CM_C_COMPILER_FLAG_VERSION'] = '--version'
-
- env['CM_CXX_COMPILER_BIN'] = file_name_cpp
- env['CM_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp)
- env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '-o '
- env['CM_CXX_COMPILER_FLAG_VERSION'] = '--version'
-
- env['CM_COMPILER_FLAGS_FAST'] = "-O3"
- env['CM_LINKER_FLAGS_FAST'] = "-O3"
- env['CM_COMPILER_FLAGS_DEBUG'] = "-O0"
- env['CM_LINKER_FLAGS_DEBUG'] = "-O0"
- env['CM_COMPILER_FLAGS_DEFAULT'] = "-O2"
- env['CM_LINKER_FLAGS_DEFAULT'] = "-O2"
+ env['MLC_C_COMPILER_BIN'] = file_name_c
+ env['MLC_C_COMPILER_FLAG_OUTPUT'] = '-o '
+ env['MLC_C_COMPILER_WITH_PATH'] = found_file_path
+ env['MLC_C_COMPILER_FLAG_VERSION'] = '--version'
+
+ env['MLC_CXX_COMPILER_BIN'] = file_name_cpp
+ env['MLC_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp)
+ env['MLC_CXX_COMPILER_FLAG_OUTPUT'] = '-o '
+ env['MLC_CXX_COMPILER_FLAG_VERSION'] = '--version'
+
+ env['MLC_COMPILER_FLAGS_FAST'] = "-O3"
+ env['MLC_LINKER_FLAGS_FAST'] = "-O3"
+ env['MLC_COMPILER_FLAGS_DEBUG'] = "-O0"
+ env['MLC_LINKER_FLAGS_DEBUG'] = "-O0"
+ env['MLC_COMPILER_FLAGS_DEFAULT'] = "-O2"
+ env['MLC_LINKER_FLAGS_DEFAULT'] = "-O2"
return {'return': 0, 'version': version}
diff --git a/script/get-gcc/meta.yaml b/script/get-gcc/meta.yaml
index f67a59d2e..27a3b6feb 100644
--- a/script/get-gcc/meta.yaml
+++ b/script/get-gcc/meta.yaml
@@ -8,16 +8,16 @@ deps:
- tags: detect,os
name: Detect or install GCC compiler
new_env_keys:
-- CM_GCC_*
-- CM_C_COMPILER_*
-- CM_CXX_COMPILER_*
-- CM_COMPILER_*
-- CM_LINKER_*
+- MLC_GCC_*
+- MLC_C_COMPILER_*
+- MLC_CXX_COMPILER_*
+- MLC_COMPILER_*
+- MLC_LINKER_*
- + CFLAGS
- + CXXFLAGS
- + FFLAGS
- + LDFLAGS
-- +CM_HOST_OS_DEFAULT_INCLUDE_PATH
+- +MLC_HOST_OS_DEFAULT_INCLUDE_PATH
- +PATH
post_deps:
- tags: get,compiler-flags
diff --git a/script/get-gcc/run.bat b/script/get-gcc/run.bat
index fac96d834..c459d6218 100644
--- a/script/get-gcc/run.bat
+++ b/script/get-gcc/run.bat
@@ -1,3 +1,3 @@
-%CM_GCC_BIN_WITH_PATH% --version > tmp-ver.out
+%MLC_GCC_BIN_WITH_PATH% --version > tmp-ver.out
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-gcc/run.sh b/script/get-gcc/run.sh
index 08be81f21..e5b397bf6 100644
--- a/script/get-gcc/run.sh
+++ b/script/get-gcc/run.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-gcc_bin=${CM_GCC_BIN_WITH_PATH}
+gcc_bin=${MLC_GCC_BIN_WITH_PATH}
echo "${gcc_bin} --version"
${gcc_bin} --version > tmp-ver.out
diff --git a/script/get-generic-python-lib/customize.py b/script/get-generic-python-lib/customize.py
index 2b259feca..85b7326cc 100644
--- a/script/get-generic-python-lib/customize.py
+++ b/script/get-generic-python-lib/customize.py
@@ -9,9 +9,9 @@ def preprocess(i):
meta = i['meta']
automation = i['automation']
run_script_input = i['run_script_input']
- pip_version = env.get('CM_PIP_VERSION', '').strip().split('.')
+ pip_version = env.get('MLC_PIP_VERSION', '').strip().split('.')
- package_name = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '').strip()
+ package_name = env.get('MLC_GENERIC_PYTHON_PACKAGE_NAME', '').strip()
if package_name == '':
return automation._available_variations({'meta': meta})
@@ -20,37 +20,37 @@ def preprocess(i):
# 20240214: ONNXRuntime 1.17.0 now support CUDA 12 so we remove next check
# TBD: if we have explicit version for ONNX < 17.0.0 and CUDA is >= 12,
# we should add a check to fail ...
- cuda_version = env.get('CM_CUDA_VERSION', '').strip()
+ cuda_version = env.get('MLC_CUDA_VERSION', '').strip()
# if cuda_version!='':
# cuda_version_split = cuda_version.split('.')
# if int(cuda_version_split[0]) >= 12:
-# # env['CM_INSTALL_ONNXRUNTIME_GPU_FROM_SRC'] = "yes"
+# # env['MLC_INSTALL_ONNXRUNTIME_GPU_FROM_SRC'] = "yes"
# return {'return': 1, 'error':'at this moment, PIP package
# "onnxruntime_gpu" needs CUDA < 12'}
- extra = env.get('CM_GENERIC_PYTHON_PIP_EXTRA', '')
+ extra = env.get('MLC_GENERIC_PYTHON_PIP_EXTRA', '')
if (pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23) and (
'--break-system-packages' not in extra):
extra += ' --break-system-packages '
- env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages"
+ env['MLC_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages"
- if env.get('CM_GENERIC_PYTHON_PACKAGE_INSTALL_DEPS', '') == "no":
- env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --no-deps"
+ if env.get('MLC_GENERIC_PYTHON_PACKAGE_INSTALL_DEPS', '') == "no":
+ env['MLC_PYTHON_PIP_COMMON_EXTRA'] = " --no-deps"
- if env.get('CM_PIP_INSTALL_NEEDS_USER', '') == "yes":
- env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --user"
+ if env.get('MLC_PIP_INSTALL_NEEDS_USER', '') == "yes":
+ env['MLC_PYTHON_PIP_COMMON_EXTRA'] = " --user"
- if env.get('CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS', '') != '':
+ if env.get('MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS', '') != '':
r = automation.run_native_script(
{'run_script_input': run_script_input, 'env': env, 'script_name': 'uninstall_deps'})
if r['return'] > 0:
return r
- prepare_env_key = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '')
+ prepare_env_key = env.get('MLC_GENERIC_PYTHON_PACKAGE_NAME', '')
for x in ["-", "[", "]"]:
prepare_env_key = prepare_env_key.replace(x, "_")
- env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'] = prepare_env_key.upper()
+ env['MLC_TMP_PYTHON_PACKAGE_NAME_ENV'] = prepare_env_key.upper()
recursion_spaces = i['recursion_spaces']
@@ -61,7 +61,7 @@ def preprocess(i):
force_install = (
env.get(
- 'CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL',
+ 'MLC_TMP_PYTHON_PACKAGE_FORCE_INSTALL',
'') in [
'yes',
'true',
@@ -72,8 +72,8 @@ def preprocess(i):
if r['return'] == 16 or force_install:
# Clean detected version env if exists otherwise takes detected version
# for example, when we reinstall generic python lib package
- env_version_key = 'CM_' + \
- env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION'
+ env_version_key = 'MLC_' + \
+ env['MLC_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION'
if env.get(env_version_key, '') != '':
del (env[env_version_key])
@@ -82,36 +82,36 @@ def preprocess(i):
extra += ' --upgrade --no-deps --force-reinstall'
# Check index URL
- index_url = env.get('CM_GENERIC_PYTHON_PIP_INDEX_URL', '').strip()
+ index_url = env.get('MLC_GENERIC_PYTHON_PIP_INDEX_URL', '').strip()
if index_url != '':
# Check special cases
- if '${CM_TORCH_CUDA}' in index_url:
+ if '${MLC_TORCH_CUDA}' in index_url:
index_url = index_url.replace(
- '${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA'))
+ '${MLC_TORCH_CUDA}', env.get('MLC_TORCH_CUDA'))
extra += ' --index-url ' + index_url
# Check extra index URL
extra_index_url = env.get(
- 'CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL', '').strip()
+ 'MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL', '').strip()
if extra_index_url != '':
# Check special cases
- if '${CM_TORCH_CUDA}' in extra_index_url:
+ if '${MLC_TORCH_CUDA}' in extra_index_url:
extra_index_url = extra_index_url.replace(
- '${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA'))
+ '${MLC_TORCH_CUDA}', env.get('MLC_TORCH_CUDA'))
extra += ' --extra-index-url ' + extra_index_url
# check find-links
find_links_url = env.get(
- 'CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL', '').strip()
+ 'MLC_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL', '').strip()
if find_links_url != '':
extra += ' -f ' + find_links_url
# Check update
- if env.get('CM_GENERIC_PYTHON_PIP_UPDATE', '') in [
+ if env.get('MLC_GENERIC_PYTHON_PIP_UPDATE', '') in [
True, 'true', 'yes', 'on']:
extra += ' -U'
@@ -119,7 +119,7 @@ def preprocess(i):
print(recursion_spaces + ' Extra PIP CMD: ' + extra)
print('')
- env['CM_GENERIC_PYTHON_PIP_EXTRA'] = extra
+ env['MLC_GENERIC_PYTHON_PIP_EXTRA'] = extra
r = automation.run_native_script(
{'run_script_input': run_script_input, 'env': env, 'script_name': 'install'})
@@ -134,11 +134,11 @@ def detect_version(i):
env = i['env']
- if env.get('CM_TMP_PYTHON_PACKAGE_NAME_ENV', '') != '':
- env_version_key = 'CM_' + \
- env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION'
+ if env.get('MLC_TMP_PYTHON_PACKAGE_NAME_ENV', '') != '':
+ env_version_key = 'MLC_' + \
+ env['MLC_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION'
else:
- env_version_key = 'CM_CACHE_TMP_VERSION'
+ env_version_key = 'MLC_CACHE_TMP_VERSION'
r = i['automation'].parse_version({'match_text': r'\s*([\d.a-z\-]+)',
'group_number': 1,
@@ -150,7 +150,7 @@ def detect_version(i):
version = r['version']
current_detected_version = version
- if env.get('CM_TMP_SILENT', '') != 'yes':
+ if env.get('MLC_TMP_SILENT', '') != 'yes':
print(
i['recursion_spaces'] +
' Detected version: {}'.format(version))
@@ -162,8 +162,8 @@ def postprocess(i):
env = i['env']
- env_version_key = 'CM_' + \
- env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION'
+ env_version_key = 'MLC_' + \
+ env['MLC_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION'
if env.get(env_version_key, '') != '':
version = env[env_version_key]
@@ -174,19 +174,19 @@ def postprocess(i):
version = r['version']
- env['CM_PYTHONLIB_' + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'] +
+ env['MLC_PYTHONLIB_' + env['MLC_TMP_PYTHON_PACKAGE_NAME_ENV'] +
'_CACHE_TAGS'] = 'version-' + version
import pkgutil
- package_name = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '').strip()
+ package_name = env.get('MLC_GENERIC_PYTHON_PACKAGE_NAME', '').strip()
package = pkgutil.get_loader(package_name)
if package:
installed_file_path = package.get_filename()
- env['CM_GET_DEPENDENT_CACHED_PATH'] = installed_file_path
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = installed_file_path
- pip_version = env.get('CM_PIP_VERSION', '').strip().split('.')
+ pip_version = env.get('MLC_PIP_VERSION', '').strip().split('.')
if pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23:
- env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages"
+ env['MLC_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages"
if version.count('.') > 1:
env[f"{env_version_key}_MAJOR_MINOR"] = ".".join(
diff --git a/script/get-generic-python-lib/detect-version.py b/script/get-generic-python-lib/detect-version.py
index fc879f04e..98e0ee022 100644
--- a/script/get-generic-python-lib/detect-version.py
+++ b/script/get-generic-python-lib/detect-version.py
@@ -1,7 +1,7 @@
import os
import sys
-package_name = os.environ.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '')
+package_name = os.environ.get('MLC_GENERIC_PYTHON_PACKAGE_NAME', '')
package_name = package_name.split("[")[0]
filename = 'tmp-ver.out'
diff --git a/script/get-generic-python-lib/install.bat b/script/get-generic-python-lib/install.bat
index 0a5967462..e74450c72 100644
--- a/script/get-generic-python-lib/install.bat
+++ b/script/get-generic-python-lib/install.bat
@@ -1,13 +1,13 @@
echo.
-if NOT "%CM_GENERIC_PYTHON_PIP_URL%" == "" (
+if NOT "%MLC_GENERIC_PYTHON_PIP_URL%" == "" (
- %CM_PYTHON_BIN_WITH_PATH% -m pip install %CM_GENERIC_PYTHON_PIP_URL% %CM_GENERIC_PYTHON_PIP_EXTRA%
+ %MLC_PYTHON_BIN_WITH_PATH% -m pip install %MLC_GENERIC_PYTHON_PIP_URL% %MLC_GENERIC_PYTHON_PIP_EXTRA%
IF %ERRORLEVEL% NEQ 0 EXIT 1
) else (
- %CM_PYTHON_BIN_WITH_PATH% -m pip install %CM_GENERIC_PYTHON_PACKAGE_NAME%%CM_TMP_PIP_VERSION_STRING% %CM_GENERIC_PYTHON_PIP_EXTRA%
+ %MLC_PYTHON_BIN_WITH_PATH% -m pip install %MLC_GENERIC_PYTHON_PACKAGE_NAME%%MLC_TMP_PIP_VERSION_STRING% %MLC_GENERIC_PYTHON_PIP_EXTRA%
IF %ERRORLEVEL% NEQ 0 EXIT 1
)
diff --git a/script/get-generic-python-lib/install.sh b/script/get-generic-python-lib/install.sh
index b79aa8146..655c6d869 100644
--- a/script/get-generic-python-lib/install.sh
+++ b/script/get-generic-python-lib/install.sh
@@ -2,11 +2,11 @@
echo ""
-if [[ ${CM_GENERIC_PYTHON_PACKAGE_VARIANT} == "nvidia-apex-depreciated" ]]; then
- cd ${CM_GIT_REPO_CHECKOUT_PATH}
- cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install -v --disable-pip-version-check --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./"
+if [[ ${MLC_GENERIC_PYTHON_PACKAGE_VARIANT} == "nvidia-apex-depreciated" ]]; then
+ cd ${MLC_GIT_REPO_CHECKOUT_PATH}
+ cmd="${MLC_PYTHON_BIN_WITH_PATH} -m pip install -v --disable-pip-version-check --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./"
echo $cmd
- if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then
+ if [[ -n ${MLC_PIP_ERROR_SKIP} ]]; then
eval $cmd
else
eval $cmd
@@ -15,31 +15,31 @@ if [[ ${CM_GENERIC_PYTHON_PACKAGE_VARIANT} == "nvidia-apex-depreciated" ]]; then
exit 0
fi
-if [[ ${CM_GENERIC_PYTHON_PACKAGE_NAME} == "tensorflow_old" ]]; then
- if [[ ${CM_HOST_OS_FLAVOR} == "macos" ]]; then
- if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then
- . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-macos.sh
+if [[ ${MLC_GENERIC_PYTHON_PACKAGE_NAME} == "tensorflow_old" ]]; then
+ if [[ ${MLC_HOST_OS_FLAVOR} == "macos" ]]; then
+ if [[ -n ${MLC_PIP_ERROR_SKIP} ]]; then
+ . ${MLC_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-macos.sh
else
- . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-macos.sh
+ . ${MLC_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-macos.sh
test $? -eq 0 || exit $?
fi
exit 0
fi
- if [[ ${CM_HOST_PLATFORM_FLAVOR} == "aarch64" ]]; then
- if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then
- . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-aarch64.sh
+ if [[ ${MLC_HOST_PLATFORM_FLAVOR} == "aarch64" ]]; then
+ if [[ -n ${MLC_PIP_ERROR_SKIP} ]]; then
+ . ${MLC_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-aarch64.sh
else
- . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-aarch64.sh
+ . ${MLC_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-aarch64.sh
test $? -eq 0 || exit $?
fi
exit 0
fi
fi
-if [[ -n ${CM_GENERIC_PYTHON_PIP_URL} ]]; then
- cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install \"${CM_GENERIC_PYTHON_PIP_URL}\" ${CM_GENERIC_PYTHON_PIP_EXTRA}"
+if [[ -n ${MLC_GENERIC_PYTHON_PIP_URL} ]]; then
+ cmd="${MLC_PYTHON_BIN_WITH_PATH} -m pip install \"${MLC_GENERIC_PYTHON_PIP_URL}\" ${MLC_GENERIC_PYTHON_PIP_EXTRA}"
echo $cmd
- if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then
+ if [[ -n ${MLC_PIP_ERROR_SKIP} ]]; then
eval $cmd
else
eval $cmd
@@ -48,10 +48,10 @@ if [[ -n ${CM_GENERIC_PYTHON_PIP_URL} ]]; then
exit 0
fi
-cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install \"${CM_GENERIC_PYTHON_PACKAGE_NAME}${CM_TMP_PIP_VERSION_STRING}\" ${CM_GENERIC_PYTHON_PIP_EXTRA}"
+cmd="${MLC_PYTHON_BIN_WITH_PATH} -m pip install \"${MLC_GENERIC_PYTHON_PACKAGE_NAME}${MLC_TMP_PIP_VERSION_STRING}\" ${MLC_GENERIC_PYTHON_PIP_EXTRA}"
echo $cmd
-if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then
+if [[ -n ${MLC_PIP_ERROR_SKIP} ]]; then
eval $cmd
else
eval $cmd
diff --git a/script/get-generic-python-lib/meta.yaml b/script/get-generic-python-lib/meta.yaml
index ee0a4cdd1..6eb0b0a78 100644
--- a/script/get-generic-python-lib/meta.yaml
+++ b/script/get-generic-python-lib/meta.yaml
@@ -11,7 +11,7 @@ deps:
- python
- python3
skip_if_env:
- CM_TMP_USE_CUSTOM_PYTHON:
+ MLC_TMP_USE_CUSTOM_PYTHON:
- 'on'
tags: get,python3
dynamic: true
@@ -19,23 +19,23 @@ deps:
- python-pip
- pip
skip_if_env:
- CM_GENERIC_PYTHON_PACKAGE_NAME:
+ MLC_GENERIC_PYTHON_PACKAGE_NAME:
- pip
tags: get,generic-python-lib,_pip
extra_cache_tags_from_env:
-- env: CM_PYTHON_CACHE_TAGS
+- env: MLC_PYTHON_CACHE_TAGS
prefix: python-
input_mapping:
- extra_index_url: CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL
- force_install: CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL
- index_url: CM_GENERIC_PYTHON_PIP_INDEX_URL
+ extra_index_url: MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL
+ force_install: MLC_TMP_PYTHON_PACKAGE_FORCE_INSTALL
+ index_url: MLC_GENERIC_PYTHON_PIP_INDEX_URL
local_env_keys:
-- CM_GENERIC_PYTHON_PACKAGE_VARIANT
+- MLC_GENERIC_PYTHON_PACKAGE_VARIANT
new_env_keys:
-- CM_PYTHONLIB_*
+- MLC_PYTHONLIB_*
prehook_deps:
- enable_if_env:
- CM_INSTALL_ONNXRUNTIME_GPU_FROM_SRC:
+ MLC_INSTALL_ONNXRUNTIME_GPU_FROM_SRC:
- 'yes'
tags: install,onnxruntime,from.src,_cuda
tags:
@@ -49,63 +49,63 @@ uid: 94b62a682bc44791
variations:
Pillow:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: Pillow
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: Pillow
new_env_keys:
- - CM_PILLOW_VERSION
+ - MLC_PILLOW_VERSION
anthropic:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: anthropic
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: anthropic
new_env_keys:
- - CM_ANTHROPIC_VERSION
+ - MLC_ANTHROPIC_VERSION
apache-tvm:
deps:
- tags: get,generic-python-lib,_typing_extensions
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: apache-tvm
- CM_GENERIC_PYTHON_PIP_EXTRA: ' --pre'
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: apache-tvm
+ MLC_GENERIC_PYTHON_PIP_EXTRA: ' --pre'
new_env_keys:
- - CM_APACHE_TVM_VERSION
+ - MLC_APACHE_TVM_VERSION
apex:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: apex
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: apex
new_env_keys:
- - CM_APEX_VERSION
+ - MLC_APEX_VERSION
async_timeout:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: async_timeout
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: async_timeout
new_env_keys:
- - CM_ASYNC_TIMEOUT_VERSION
+ - MLC_ASYNC_TIMEOUT_VERSION
attr:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: attr
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: attr
new_env_keys:
- - CM_ATTR_VERSION
+ - MLC_ATTR_VERSION
attrs:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: attrs
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: attrs
new_env_keys:
- - CM_ATTRS_VERSION
+ - MLC_ATTRS_VERSION
boto3:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: boto3
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: boto3
new_env_keys:
- - CM_BOTO3_VERSION
+ - MLC_BOTO3_VERSION
cloudpickle:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: cloudpickle
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: cloudpickle
new_env_keys:
- - CM_CLOUDPICKLE_VERSION
+ - MLC_CLOUDPICKLE_VERSION
cmind:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: cmind
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: cmind
new_env_keys:
- - CM_CMIND_VERSION
+ - MLC_CMIND_VERSION
colored:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: colored
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://pypi.ngc.nvidia.com
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: colored
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://pypi.ngc.nvidia.com
new_env_keys:
- - CM_COLORED_VERSION
+ - MLC_COLORED_VERSION
conda.#:
ad:
python-pip:
@@ -118,135 +118,135 @@ variations:
- cuda
tags: get,cuda
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: cupy
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: cupy
new_env_keys:
- - CM_CUPY_VERSION
+ - MLC_CUPY_VERSION
custom-python:
ad:
python-pip:
tags: _custom-python
env:
- CM_TMP_USE_CUSTOM_PYTHON: 'on'
+ MLC_TMP_USE_CUSTOM_PYTHON: 'on'
cxx11-abi:
env: {}
datasets:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: datasets
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: datasets
new_env_keys:
- - CM_DATASETS_VERSION
+ - MLC_DATASETS_VERSION
decorator:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: decorator
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: decorator
new_env_keys:
- - CM_DECORATOR_VERSION
+ - MLC_DECORATOR_VERSION
deepsparse:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: deepsparse
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: deepsparse
new_env_keys:
- - CM_DEEPSPARSE_VERSION
+ - MLC_DEEPSPARSE_VERSION
dllogger:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: dllogger
- CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/NVIDIA/dllogger#egg=dllogger
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: dllogger
+ MLC_GENERIC_PYTHON_PIP_URL: git+https://github.com/NVIDIA/dllogger#egg=dllogger
extra-index-url.#:
env:
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '#'
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '#'
fiftyone:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: fiftyone
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: fiftyone
new_env_keys:
- - CM_FIFTYONE_VERSION
+ - MLC_FIFTYONE_VERSION
google-api-python-client:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: google_api_python_client
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: google_api_python_client
new_env_keys:
- - CM_GOOGLE_API_PYTHON_CLIENT_VERSION
+ - MLC_GOOGLE_API_PYTHON_CLIENT_VERSION
google-auth-oauthlib:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: google_auth_oauthlib
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: google_auth_oauthlib
new_env_keys:
- - CM_GOOGLE_AUTH_OAUTHLIB_VERSION
+ - MLC_GOOGLE_AUTH_OAUTHLIB_VERSION
huggingface_hub:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: huggingface_hub
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: huggingface_hub
new_env_keys:
- - CM_HUGGINGFACE_HUB_VERSION
+ - MLC_HUGGINGFACE_HUB_VERSION
index-url.#:
env:
- CM_GENERIC_PYTHON_PIP_INDEX_URL: '#'
+ MLC_GENERIC_PYTHON_PIP_INDEX_URL: '#'
inflect:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: inflect
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: inflect
new_env_keys:
- - CM_INFLECT_VERSION
+ - MLC_INFLECT_VERSION
jax:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: jax
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: jax
new_env_keys:
- - CM_JAX_VERSION*
+ - MLC_JAX_VERSION*
jax_cuda:
deps:
- names:
- cuda
tags: get,cuda
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: jax[cuda]
- CM_GENERIC_PYTHON_PIP_EXTRA: -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
- CM_JAX_VERSION_EXTRA: CUDA
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: jax[cuda]
+ MLC_GENERIC_PYTHON_PIP_EXTRA: -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
+ MLC_JAX_VERSION_EXTRA: CUDA
new_env_keys:
- - CM_JAX_VERSION*
+ - MLC_JAX_VERSION*
librosa:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: librosa
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: librosa
new_env_keys:
- - CM_LIBROSA_VERSION
+ - MLC_LIBROSA_VERSION
matplotlib:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: matplotlib
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: matplotlib
new_env_keys:
- - CM_MATPLOTLIB_VERSION
+ - MLC_MATPLOTLIB_VERSION
mlperf_loadgen:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: mlperf_loadgen
- CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/mlcommons/inference.git#subdirectory=loadgen
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: mlperf_loadgen
+ MLC_GENERIC_PYTHON_PIP_URL: git+https://github.com/mlcommons/inference.git#subdirectory=loadgen
new_env_keys:
- - CM_MLPERF_LOADGEN_VERSION
+ - MLC_MLPERF_LOADGEN_VERSION
mlperf_logging:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: mlperf_logging
- CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/mlperf/logging.git
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: mlperf_logging
+ MLC_GENERIC_PYTHON_PIP_URL: git+https://github.com/mlperf/logging.git
new_env_keys:
- - CM_MLPERF_LOGGING_VERSION
+ - MLC_MLPERF_LOGGING_VERSION
mpld3:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: mpld3
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: mpld3
new_env_keys:
- - CM_MPLD3_VERSION
+ - MLC_MPLD3_VERSION
mxeval:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: mxeval
- CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/amazon-science/mxeval.git
- CM_PIP_ERROR_SKIP: 'true'
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: mxeval
+ MLC_GENERIC_PYTHON_PIP_URL: git+https://github.com/amazon-science/mxeval.git
+ MLC_PIP_ERROR_SKIP: 'true'
nibabel:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: nibabel
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: nibabel
new_env_keys:
- - CM_NIBABEL_VERSION
+ - MLC_NIBABEL_VERSION
no-deps:
env:
- CM_GENERIC_PYTHON_PACKAGE_INSTALL_DEPS: 'no'
+ MLC_GENERIC_PYTHON_PACKAGE_INSTALL_DEPS: 'no'
numpy:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: numpy
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: numpy
new_env_keys:
- - CM_NUMPY_VERSION
+ - MLC_NUMPY_VERSION
nvidia-apex:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: apex
- CM_GENERIC_PYTHON_PACKAGE_VARIANT: nvidia-apex
- CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/nvidia/apex@0da3ffb92ee6fbe5336602f0e3989db1cd16f880
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: apex
+ MLC_GENERIC_PYTHON_PACKAGE_VARIANT: nvidia-apex
+ MLC_GENERIC_PYTHON_PIP_URL: git+https://github.com/nvidia/apex@0da3ffb92ee6fbe5336602f0e3989db1cd16f880
new_env_keys:
- - CM_NVIDIA_APEX_VERSION
+ - MLC_NVIDIA_APEX_VERSION
nvidia-apex-from-src:
deps:
- names:
@@ -256,25 +256,25 @@ variations:
- torch
tags: get,generic-python-lib,_torch_cuda
- env:
- CM_GIT_CHECKOUT_FOLDER: apex
+ MLC_GIT_CHECKOUT_FOLDER: apex
extra_cache_tags: nvidia-apex
tags: get,git,repo,_repo.https://github.com/NVIDIA/apex,_tag.23.05
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: apex
- CM_GENERIC_PYTHON_PACKAGE_VARIANT: nvidia-apex
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: apex
+ MLC_GENERIC_PYTHON_PACKAGE_VARIANT: nvidia-apex
new_env_keys:
- - CM_NVIDIA_APEX_VERSION
+ - MLC_NVIDIA_APEX_VERSION
nvidia-dali:
deps:
- names:
- cuda
tags: get,cuda
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: nvidia-dali-cuda120
- CM_GENERIC_PYTHON_PIP_EXTRA: ' --upgrade --default-timeout=900'
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://developer.download.nvidia.com/compute/redist
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: nvidia-dali-cuda120
+ MLC_GENERIC_PYTHON_PIP_EXTRA: ' --upgrade --default-timeout=900'
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://developer.download.nvidia.com/compute/redist
new_env_keys:
- - CM_NVIDIA_DALI_VERSION
+ - MLC_NVIDIA_DALI_VERSION
nvidia-pycocotools:
base:
- pycocotools
@@ -286,149 +286,149 @@ variations:
- numpy
tags: get,generic-python-lib,_package.numpy
env:
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: pycocotools
- CM_GENERIC_PYTHON_PIP_URL: pycocotools@git+https://github.com/NVIDIA/cocoapi#subdirectory=PythonAPI
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: pycocotools
+ MLC_GENERIC_PYTHON_PIP_URL: pycocotools@git+https://github.com/NVIDIA/cocoapi#subdirectory=PythonAPI
nvidia-pyindex:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: nvidia-pyindex
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: nvidia-pyindex
new_env_keys:
- - CM_NVIDIA_PYINDEX_VERSION
+ - MLC_NVIDIA_PYINDEX_VERSION
nvidia-tensorrt:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: nvidia-tensorrt
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: nvidia-tensorrt
new_env_keys:
- - CM_NVIDIA_TENSORRT_VERSION
+ - MLC_NVIDIA_TENSORRT_VERSION
onnx:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: onnx
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: onnx
new_env_keys:
- - CM_ONNX_VERSION
+ - MLC_ONNX_VERSION
onnx-graphsurgeon:
deps:
- tags: get,generic-python-lib,_package.nvidia-pyindex
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: onnx_graphsurgeon
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: onnx_graphsurgeon
new_env_keys:
- - CM_ONNX_GRAPHSURGEON_VERSION
+ - MLC_ONNX_GRAPHSURGEON_VERSION
onnxruntime:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime
new_env_keys:
- - CM_ONNXRUNTIME_VERSION
+ - MLC_ONNXRUNTIME_VERSION
onnxruntime,rocm:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime-training
- CM_GENERIC_PYTHON_PIP_URL: https://download.onnxruntime.ai/onnxruntime_training-1.16.0%2Brocm56-cp3<<>>-cp3<<>>-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime-training
+ MLC_GENERIC_PYTHON_PIP_URL: https://download.onnxruntime.ai/onnxruntime_training-1.16.0%2Brocm56-cp3<<>>-cp3<<>>-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
new_env_keys:
- - CM_ONNXRUNTIME_TRAINING_VERSION*
+ - MLC_ONNXRUNTIME_TRAINING_VERSION*
onnxruntime_gpu:
default_env:
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: onnxruntime
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: onnxruntime
deps:
- names:
- cuda
tags: get,cuda
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime_gpu
- CM_ONNXRUNTIME_VERSION_EXTRA: GPU
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime_gpu
+ MLC_ONNXRUNTIME_VERSION_EXTRA: GPU
new_env_keys:
- - CM_ONNXRUNTIME_GPU_VERSION*
+ - MLC_ONNXRUNTIME_GPU_VERSION*
openai:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: openai
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: openai
new_env_keys:
- - CM_OPENAI_VERSION
+ - MLC_OPENAI_VERSION
opencv-python:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: opencv-python
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: opencv-python
new_env_keys:
- - CM_OPENCV_PYTHON_VERSION
+ - MLC_OPENCV_PYTHON_VERSION
package.#:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: '#'
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: ''
- CM_GENERIC_PYTHON_PIP_URL: ''
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: '#'
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: ''
+ MLC_GENERIC_PYTHON_PIP_URL: ''
find_links_url.#:
env:
- CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: '#'
+ MLC_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: '#'
package.torch,cxx11-abi:
env:
- CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu-cxx11-abi
+ MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu-cxx11-abi
pandas:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: pandas
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: pandas
new_env_keys:
- - CM_PANDAS_VERSION
+ - MLC_PANDAS_VERSION
path.#:
env:
- CM_GENERIC_PYTHON_PIP_URL: '#'
+ MLC_GENERIC_PYTHON_PIP_URL: '#'
pillow:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: Pillow
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: Pillow
new_env_keys:
- - CM_PILLOW_VERSION
+ - MLC_PILLOW_VERSION
pip:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: pip
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: pip
new_env_keys:
- - CM_PIP_VERSION
- - CM_PYTHON_PIP_COMMON_EXTRA
+ - MLC_PIP_VERSION
+ - MLC_PYTHON_PIP_COMMON_EXTRA
polygraphy:
deps:
- tags: get,generic-python-lib,_colored
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: polygraphy
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://pypi.ngc.nvidia.com
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: polygraphy
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://pypi.ngc.nvidia.com
new_env_keys:
- - CM_POLYGRAPHY_VERSION
+ - MLC_POLYGRAPHY_VERSION
pre:
env:
- CM_GENERIC_PYTHON_DEV_VERSION: 'yes'
+ MLC_GENERIC_PYTHON_DEV_VERSION: 'yes'
protobuf:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: protobuf
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: protobuf
new_env_keys:
- - CM_PROTOBUF_VERSION
+ - MLC_PROTOBUF_VERSION
psutil:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: psutil
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: psutil
new_env_keys:
- - CM_PSUTIL_VERSION
+ - MLC_PSUTIL_VERSION
pycocotools:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: pycocotools
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: pycocotools
new_env_keys:
- - CM_PYCOCOTOOLS_VERSION
+ - MLC_PYCOCOTOOLS_VERSION
pycuda:
deps:
- names:
- cuda
tags: get,cuda
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: pycuda
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: pycuda
new_env_keys:
- - CM_PYCUDA_VERSION
+ - MLC_PYCUDA_VERSION
quark-amd:
deps:
- env:
- CM_DOWNLOAD_FILENAME: quark-0.1.0+a9827f5-py39-none-any.whl
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_QUARK_AMD_WHL_PATH
+ MLC_DOWNLOAD_FILENAME: quark-0.1.0+a9827f5-py39-none-any.whl
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_QUARK_AMD_WHL_PATH
extra_cache_tags: quark-amd
force_cache: true
tags: download,file,_wget,_url.https://www.xilinx.com/bin/public/openDownload?filename=quark-0.1.0+a9827f5-py39-none-any.whl
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: quark
- CM_GENERIC_PYTHON_PIP_URL: <<>>
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: quark
+ MLC_GENERIC_PYTHON_PIP_URL: <<>>
ray:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: ray[default]
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: ray[default]
new_env_keys:
- - CM_RAY_VERSION
+ - MLC_RAY_VERSION
requests:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: requests
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: requests
new_env_keys:
- - CM_REQUESTS_VERSION
+ - MLC_REQUESTS_VERSION
rocm:
deps:
- names:
@@ -438,142 +438,142 @@ variations:
safetensors:
deps:
- skip_if_env:
- CM_HOST_PLATFORM_FLAVOR:
+ MLC_HOST_PLATFORM_FLAVOR:
- x86_64
tags: get,rust-compiler
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: safetensors
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: safetensors
new_env_keys:
- - CM_SAFETENSORS_VERSION
+ - MLC_SAFETENSORS_VERSION
scikit-learn:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: scikit-learn
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: scikit-learn
new_env_keys:
- - CM_SCIKIT_LEARN_VERSION
+ - MLC_SCIKIT_LEARN_VERSION
scipy:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: scipy
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: scipy
new_env_keys:
- - CM_SCIPY_VERSION
+ - MLC_SCIPY_VERSION
scons:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: scons
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: scons
new_env_keys:
- - CM_SCONS_VERSION
+ - MLC_SCONS_VERSION
setfit:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: setfit
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: setfit
new_env_keys:
- - CM_SETFIT_VERSION
+ - MLC_SETFIT_VERSION
setuptools:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: setuptools
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: setuptools
new_env_keys:
- - CM_SETUPTOOL_VERSION
+ - MLC_SETUPTOOL_VERSION
six:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: six
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: six
new_env_keys:
- - CM_SIX_VERSION
+ - MLC_SIX_VERSION
sklearn:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: sklearn
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: sklearn
new_env_keys:
- - CM_SKLEARN_VERSION
+ - MLC_SKLEARN_VERSION
sox:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: sox
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: sox
new_env_keys:
- - CM_SOX_VERSION
+ - MLC_SOX_VERSION
sparsezoo:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: sparsezoo
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: sparsezoo
new_env_keys:
- - CM_SPARSEZOO_VERSION
+ - MLC_SPARSEZOO_VERSION
streamlit:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: streamlit
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: streamlit
new_env_keys:
- - CM_STREAMLIT_VERSION
+ - MLC_STREAMLIT_VERSION
streamlit_option_menu:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: streamlit_option_menu
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: streamlit_option_menu
new_env_keys:
- - CM_STREAMLIT_OPTION_MENU_VERSION
+ - MLC_STREAMLIT_OPTION_MENU_VERSION
tensorboard:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: tensorboard
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: tensorboard
new_env_keys:
- - CM_TENSORBOARD_VERSION
+ - MLC_TENSORBOARD_VERSION
tensorflow:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: tensorflow
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: tensorflow
new_env_keys:
- - CM_TENSORFLOW_VERSION
+ - MLC_TENSORFLOW_VERSION
tensorflow,rocm:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: tensorflow-rocm
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: tensorflow-rocm
new_env_keys:
- - CM_TENSORFLOW_ROCM_VERSION
+ - MLC_TENSORFLOW_ROMLC_VERSION
tensorrt:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: tensorrt
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/<<>>
- CM_TORCH_VERSION_EXTRA: CUDA
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: tensorrt
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/<<>>
+ MLC_TORCH_VERSION_EXTRA: CUDA
new_env_keys:
- - CM_TENSORRT_VERSION
+ - MLC_TENSORRT_VERSION
tflite:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: tflite
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: tflite
new_env_keys:
- - CM_TFLITE_VERSION
+ - MLC_TFLITE_VERSION
tflite-runtime:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: tflite-runtime
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: tflite-runtime
new_env_keys:
- - CM_TFLITE_RUNTIME_VERSION
+ - MLC_TFLITE_RUNTIME_VERSION
tokenization:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: tokenization
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: tokenization
new_env_keys:
- - CM_TOKENIZATION_VERSION
+ - MLC_TOKENIZATION_VERSION
toml:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: toml
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: toml
new_env_keys:
- - CM_TOML_VERSION
+ - MLC_TOML_VERSION
torch:
deps:
- enable_if_env:
- CM_PYTHON_MINOR_VERSION:
+ MLC_PYTHON_MINOR_VERSION:
- '7'
- '8'
tags: get,generic-python-lib,_package.networkx
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torch
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torch
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu
new_env_keys:
- - CM_TORCH_VERSION*
+ - MLC_TORCH_VERSION*
torch,cxx11-abi:
env:
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu-cxx11-abi
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu-cxx11-abi
torch,pre:
default_env:
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torch
- CM_GENERIC_PYTHON_PIP_EXTRA: ' --pre'
- CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torch
+ MLC_GENERIC_PYTHON_PIP_EXTRA: ' --pre'
+ MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu
new_env_keys:
- - CM_TORCH_VERSION*
+ - MLC_TORCH_VERSION*
torch,rocm:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torch
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: ''
- CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torch
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: ''
+ MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch
new_env_keys:
- - CM_TORCH_VERSION*
+ - MLC_TORCH_VERSION*
post_deps:
- tags: get,generic-python-lib,_torchvision,_rocm
- tags: get,generic-python-lib,_torchaudio,_rocm
@@ -584,144 +584,144 @@ variations:
- cuda
tags: get,cuda
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torch
- CM_TORCH_VERSION_EXTRA: CUDA
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torch
+ MLC_TORCH_VERSION_EXTRA: CUDA
new_env_keys:
- - CM_TORCH_VERSION*
+ - MLC_TORCH_VERSION*
torch_cuda,pre:
default_env:
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch_cuda
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch_cuda
deps:
- names:
- cuda
tags: get,cuda
- tags: get,generic-python-lib,_numpy
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torch
- CM_GENERIC_PYTHON_PIP_EXTRA: ' --pre'
- CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/<<>>
- CM_TORCH_VERSION_EXTRA: CUDA
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torch
+ MLC_GENERIC_PYTHON_PIP_EXTRA: ' --pre'
+ MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/<<>>
+ MLC_TORCH_VERSION_EXTRA: CUDA
new_env_keys:
- - CM_TORCH_VERSION*
+ - MLC_TORCH_VERSION*
torch_tensorrt:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torch-tensorrt
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/<<>>
- CM_TORCH_VERSION_EXTRA: CUDA
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torch-tensorrt
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/<<>>
+ MLC_TORCH_VERSION_EXTRA: CUDA
new_env_keys:
- - CM_TORCH_TENSORRT_VERSION
+ - MLC_TORCH_TENSORRT_VERSION
torchaudio:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torchaudio
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torchaudio
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu
new_env_keys:
- - CM_TORCHAUDIO_VERSION*
+ - MLC_TORCHAUDIO_VERSION*
torchaudio,rocm:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torchaudio
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: ''
- CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchaudio
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torchaudio
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: ''
+ MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchaudio
new_env_keys:
- - CM_TORCHAUDIO_VERSION*
+ - MLC_TORCHAUDIO_VERSION*
torchaudio_cuda:
default_env:
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchaudio
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchaudio
deps:
- names:
- cuda
tags: get,cuda
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torchaudio
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1: https://download.pytorch.org/whl/<<>>
- CM_TORCHAUDIO_VERSION_EXTRA: CUDA
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torchaudio
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1: https://download.pytorch.org/whl/<<>>
+ MLC_TORCHAUDIO_VERSION_EXTRA: CUDA
new_env_keys:
- - CM_TORCHAUDIO_VERSION*
+ - MLC_TORCHAUDIO_VERSION*
torchvision:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torchvision
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torchvision
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu
new_env_keys:
- - CM_TORCHVISION_VERSION*
+ - MLC_TORCHVISION_VERSION*
torchvision,rocm:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torchvision
- CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: ''
- CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchvision
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torchvision
+ MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: ''
+ MLC_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchvision
new_env_keys:
- - CM_TORCHVISION_VERSION*
+ - MLC_TORCHVISION_VERSION*
torchvision_cuda:
default_env:
- CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS1: torchvision
+ MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS1: torchvision
deps:
- names:
- cuda
tags: get,cuda
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: torchvision
- CM_TORCHVISION_VERSION_EXTRA: CUDA
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: torchvision
+ MLC_TORCHVISION_VERSION_EXTRA: CUDA
new_env_keys:
- - CM_TORCHVISION_VERSION*
+ - MLC_TORCHVISION_VERSION*
tornado:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: tornado
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: tornado
new_env_keys:
- - CM_TORNADO_VERSION
+ - MLC_TORNADO_VERSION
tqdm:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: tqdm
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: tqdm
new_env_keys:
- - CM_TQDM_VERSION
+ - MLC_TQDM_VERSION
transformers:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: transformers
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: transformers
new_env_keys:
- - CM_TRANSFORMERS_VERSION
+ - MLC_TRANSFORMERS_VERSION
typing_extensions:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: typing_extensions
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: typing_extensions
new_env_keys:
- - CM_TYPING_EXTENSIONS_VERSION
+ - MLC_TYPING_EXTENSIONS_VERSION
ujson:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: ujson
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: ujson
new_env_keys:
- - CM_UJSON_VERSION
+ - MLC_UJSON_VERSION
unidecode:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: unidecode
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: unidecode
new_env_keys:
- - CM_UNIDECODE_VERSION
+ - MLC_UNIDECODE_VERSION
url.#:
env:
- CM_GENERIC_PYTHON_PIP_URL: '#'
- CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL: 'yes'
+ MLC_GENERIC_PYTHON_PIP_URL: '#'
+ MLC_TMP_PYTHON_PACKAGE_FORCE_INSTALL: 'yes'
wandb:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: wandb
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: wandb
new_env_keys:
- - CM_WANDB_VERSION
+ - MLC_WANDB_VERSION
west:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: west
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: west
new_env_keys:
- - CM_WEST_VERSION
+ - MLC_WEST_VERSION
whl-url.#:
deps:
- env:
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_GENERIC_PYTHON_PIP_URL
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_GENERIC_PYTHON_PIP_URL
force_cache: 'yes'
tags: download,file,_url.#
env:
- CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL: 'yes'
+ MLC_TMP_PYTHON_PACKAGE_FORCE_INSTALL: 'yes'
xgboost:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: xgboost
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: xgboost
new_env_keys:
- - CM_XGBOOST_VERSION
+ - MLC_XGBOOST_VERSION
xlsxwriter:
env:
- CM_GENERIC_PYTHON_PACKAGE_NAME: xlsxwriter
+ MLC_GENERIC_PYTHON_PACKAGE_NAME: xlsxwriter
new_env_keys:
- - CM_XLSXWRITER_VERSION
+ - MLC_XLSXWRITER_VERSION
diff --git a/script/get-generic-python-lib/run.bat b/script/get-generic-python-lib/run.bat
index 2612377c8..17e27e030 100644
--- a/script/get-generic-python-lib/run.bat
+++ b/script/get-generic-python-lib/run.bat
@@ -1,4 +1,4 @@
-IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD%
+IF NOT DEFINED MLC_TMP_CURRENT_SCRIPT_PATH SET MLC_TMP_CURRENT_SCRIPT_PATH=%CD%
-%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\detect-version.py
+%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\detect-version.py
IF %ERRORLEVEL% NEQ 0 EXIT 1
diff --git a/script/get-generic-python-lib/run.sh b/script/get-generic-python-lib/run.sh
index b60ac0814..2df36823d 100644
--- a/script/get-generic-python-lib/run.sh
+++ b/script/get-generic-python-lib/run.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD}
-${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect-version.py
+${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/detect-version.py
test $? -eq 0 || exit $?
exit 0
diff --git a/script/get-generic-python-lib/tensorflow/run-aarch64.sh b/script/get-generic-python-lib/tensorflow/run-aarch64.sh
index 6c11efb71..71fca3564 100644
--- a/script/get-generic-python-lib/tensorflow/run-aarch64.sh
+++ b/script/get-generic-python-lib/tensorflow/run-aarch64.sh
@@ -1,13 +1,13 @@
-CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3}
+MLC_PYTHON_BIN=${MLC_PYTHON_BIN_WITH_PATH:-python3}
-${CM_PYTHON_BIN} -m pip install --upgrade pip ${CM_PYTHON_PIP_COMMON_EXTRA}
-${CM_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${CM_PYTHON_PIP_COMMON_EXTRA}
+${MLC_PYTHON_BIN} -m pip install --upgrade pip ${MLC_PYTHON_PIP_COMMON_EXTRA}
+${MLC_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${MLC_PYTHON_PIP_COMMON_EXTRA}
curl https://sh.rustup.rs -sSf -o tmp.sh
sh tmp.sh -y
export PATH=$PATH:$HOME/.cargo/bin
-${CM_PYTHON_BIN} -m pip install tensorflow-aarch64${CM_TMP_PIP_VERSION_STRING} --user ${CM_PYTHON_PIP_COMMON_EXTRA}
+${MLC_PYTHON_BIN} -m pip install tensorflow-aarch64${MLC_TMP_PIP_VERSION_STRING} --user ${MLC_PYTHON_PIP_COMMON_EXTRA}
test $? -eq 0 || exit 1
-echo "CM_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-aarch64" >> $PWD/tmp-run-env.out
+echo "MLC_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-aarch64" >> $PWD/tmp-run-env.out
diff --git a/script/get-generic-python-lib/tensorflow/run-macos.sh b/script/get-generic-python-lib/tensorflow/run-macos.sh
index 525b532eb..6b41b939f 100644
--- a/script/get-generic-python-lib/tensorflow/run-macos.sh
+++ b/script/get-generic-python-lib/tensorflow/run-macos.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-CM_PYTHON_BIN=${CM_PYTHON_BIN:-python3}
+MLC_PYTHON_BIN=${MLC_PYTHON_BIN:-python3}
-${CM_PYTHON_BIN} -m pip install tensorflow-macos${CM_TMP_PIP_VERSION_STRING}
+${MLC_PYTHON_BIN} -m pip install tensorflow-macos${MLC_TMP_PIP_VERSION_STRING}
test $? -eq 0 || exit 1
-echo "CM_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-macos" >> $PWD/tmp-run-env.out
+echo "MLC_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-macos" >> $PWD/tmp-run-env.out
diff --git a/script/get-generic-python-lib/uninstall_deps.sh b/script/get-generic-python-lib/uninstall_deps.sh
index eeddf36d7..b288c967b 100644
--- a/script/get-generic-python-lib/uninstall_deps.sh
+++ b/script/get-generic-python-lib/uninstall_deps.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-if [[ -n ${CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} ]]; then
- cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip uninstall ${CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} -y ${CM_PYTHON_PIP_COMMON_EXTRA}"
+if [[ -n ${MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} ]]; then
+ cmd="${MLC_PYTHON_BIN_WITH_PATH} -m pip uninstall ${MLC_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} -y ${MLC_PYTHON_PIP_COMMON_EXTRA}"
echo "$cmd"
eval "$cmd"
test $? -eq 0 || exit $?
diff --git a/script/get-generic-python-lib/validate_cache.bat b/script/get-generic-python-lib/validate_cache.bat
index 2612377c8..17e27e030 100644
--- a/script/get-generic-python-lib/validate_cache.bat
+++ b/script/get-generic-python-lib/validate_cache.bat
@@ -1,4 +1,4 @@
-IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD%
+IF NOT DEFINED MLC_TMP_CURRENT_SCRIPT_PATH SET MLC_TMP_CURRENT_SCRIPT_PATH=%CD%
-%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\detect-version.py
+%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\detect-version.py
IF %ERRORLEVEL% NEQ 0 EXIT 1
diff --git a/script/get-generic-python-lib/validate_cache.sh b/script/get-generic-python-lib/validate_cache.sh
index b60ac0814..2df36823d 100644
--- a/script/get-generic-python-lib/validate_cache.sh
+++ b/script/get-generic-python-lib/validate_cache.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+MLC_TMP_CURRENT_SCRIPT_PATH=${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD}
-${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect-version.py
+${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/detect-version.py
test $? -eq 0 || exit $?
exit 0
diff --git a/script/get-generic-sys-util/customize.py b/script/get-generic-sys-util/customize.py
index 81f2bf76b..74b8c75b6 100644
--- a/script/get-generic-sys-util/customize.py
+++ b/script/get-generic-sys-util/customize.py
@@ -12,37 +12,37 @@ def preprocess(i):
automation = i['automation']
# Use VERSION_CMD and CHECK_CMD if no CHECK_CMD is set
- if env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' and env.get(
- 'CM_SYS_UTIL_CHECK_CMD', '') == '':
- env['CM_SYS_UTIL_CHECK_CMD'] = env['CM_SYS_UTIL_VERSION_CMD']
+ if env.get('MLC_SYS_UTIL_VERSION_CMD', '') != '' and env.get(
+ 'MLC_SYS_UTIL_CHECK_CMD', '') == '':
+ env['MLC_SYS_UTIL_CHECK_CMD'] = env['MLC_SYS_UTIL_VERSION_CMD']
- if env.get('CM_GENERIC_SYS_UTIL_RUN_MODE', '') == "install":
- if env.get('CM_SYS_UTIL_INSTALL_WITH_RETRY', '') == "yes":
+ if env.get('MLC_GENERIC_SYS_UTIL_RUN_MODE', '') == "install":
+ if env.get('MLC_SYS_UTIL_INSTALL_WITH_RETRY', '') == "yes":
i['run_script_input']['script_name'] = "install-with-retry"
else:
i['run_script_input']['script_name'] = "install"
- if env.get('CM_GENERIC_SYS_UTIL_RUN_MODE', '') == "detect":
- if env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' or env.get(
- 'CM_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '':
+ if env.get('MLC_GENERIC_SYS_UTIL_RUN_MODE', '') == "detect":
+ if env.get('MLC_SYS_UTIL_VERSION_CMD', '') != '' or env.get(
+ 'MLC_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '':
r = automation.run_native_script(
{'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'detect'})
if r['return'] != 0: # detection failed, do install via prehook_deps
print("detection failed, going for installation")
- env['CM_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes"
+ env['MLC_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes"
return {'return': 0}
else: # detection is successful, no need to install
# print("detection success")
- env['CM_SYS_UTIL_INSTALL_CMD'] = ""
+ env['MLC_SYS_UTIL_INSTALL_CMD'] = ""
return {'return': 0}
else: # No detction command available, just install
# print("No detection possible, going for installation")
- env['CM_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes"
+ env['MLC_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes"
return {'return': 0}
# Only "install" mode reaches here
- pm = env.get('CM_HOST_OS_PACKAGE_MANAGER')
- util = env.get('CM_SYS_UTIL_NAME', '')
+ pm = env.get('MLC_HOST_OS_PACKAGE_MANAGER')
+ util = env.get('MLC_SYS_UTIL_NAME', '')
if util == '':
return {
'return': 1, 'error': 'Please select a variation specifying the sys util name'}
@@ -67,19 +67,19 @@ def preprocess(i):
'error': f'No package name specified for {util} in the meta'}
if not package_name:
- if str(env.get('CM_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE', '')
+ if str(env.get('MLC_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE', '')
).lower() in ["1", "true", "yes"]:
print(
f"WARNING: No package name specified for {pm} and util name {util}. Ignoring it...")
- env['CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED'] = 'yes'
+ env['MLC_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED'] = 'yes'
return {'return': 0}
else:
return {
'return': 1, 'error': f'No package name specified for {pm} and util name {util}'}
if util == "libffi":
- if env.get("CM_HOST_OS_FLAVOR", "") == "ubuntu":
- if env.get("CM_HOST_OS_VERSION", "") in [
+ if env.get("MLC_HOST_OS_FLAVOR", "") == "ubuntu":
+ if env.get("MLC_HOST_OS_VERSION", "") in [
"20.04", "20.10", "21.04", "21.10"]:
package_name = "libffi7"
else:
@@ -96,7 +96,7 @@ def preprocess(i):
package_name = package_name.replace(
"<<<" + tmp_value + ">>>", str(env[tmp_value]))
- install_cmd = env.get('CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD')
+ install_cmd = env.get('MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD')
if not install_cmd:
return {
'return': 1, 'error': 'Package manager installation command not detected for the given OS'}
@@ -104,32 +104,32 @@ def preprocess(i):
if pm == "brew":
sudo = ''
else:
- sudo = env.get('CM_SUDO', '')
- env['CM_SYS_UTIL_INSTALL_CMD'] = sudo + \
+ sudo = env.get('MLC_SUDO', '')
+ env['MLC_SYS_UTIL_INSTALL_CMD'] = sudo + \
' ' + install_cmd + ' ' + package_name
env['+PATH'] = []
- if env.get('CM_HOST_OS_FLAVOR', '') == 'rhel':
- if env['CM_SYS_UTIL_NAME'] == "g++12":
+ if env.get('MLC_HOST_OS_FLAVOR', '') == 'rhel':
+ if env['MLC_SYS_UTIL_NAME'] == "g++12":
env['+PATH'] = ["/opt/rh/gcc-toolset-12/root/usr/bin"]
- if env['CM_SYS_UTIL_NAME'] == "numactl" and env['CM_HOST_OS_VERSION'] in [
+ if env['MLC_SYS_UTIL_NAME'] == "numactl" and env['MLC_HOST_OS_VERSION'] in [
"9.1", "9.2", "9.3"]:
- env['CM_SYS_UTIL_INSTALL_CMD'] = ''
+ env['MLC_SYS_UTIL_INSTALL_CMD'] = ''
- if env.get('CM_SYS_UTIL_CHECK_CMD',
- '') != '' and env['CM_SYS_UTIL_INSTALL_CMD'] != '':
- env['CM_SYS_UTIL_INSTALL_CMD'] = f"""{env['CM_SYS_UTIL_CHECK_CMD']} || {env['CM_SYS_UTIL_INSTALL_CMD']}"""
+ if env.get('MLC_SYS_UTIL_CHECK_CMD',
+ '') != '' and env['MLC_SYS_UTIL_INSTALL_CMD'] != '':
+ env['MLC_SYS_UTIL_INSTALL_CMD'] = f"""{env['MLC_SYS_UTIL_CHECK_CMD']} || {env['MLC_SYS_UTIL_INSTALL_CMD']}"""
return {'return': 0}
def detect_version(i):
env = i['env']
- version_env_key = f"CM_{env['CM_SYS_UTIL_NAME'].upper()}_VERSION"
- version_check_re = env.get('CM_SYS_UTIL_VERSION_RE', '')
- group_number = env.get('CM_TMP_VERSION_DETECT_GROUP_NUMBER', 1)
+ version_env_key = f"MLC_{env['MLC_SYS_UTIL_NAME'].upper()}_VERSION"
+ version_check_re = env.get('MLC_SYS_UTIL_VERSION_RE', '')
+ group_number = env.get('MLC_TMP_VERSION_DETECT_GROUP_NUMBER', 1)
# Confirm that the regex pattern and file are present
if version_check_re == '' or not os.path.exists("tmp-ver.out"):
@@ -154,17 +154,17 @@ def detect_version(i):
def postprocess(i):
env = i['env']
- version_env_key = f"CM_{env['CM_SYS_UTIL_NAME'].upper()}_VERSION"
+ version_env_key = f"MLC_{env['MLC_SYS_UTIL_NAME'].upper()}_VERSION"
- if (env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' or env.get('CM_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '') and env.get(version_env_key, '') == '' and str(env.get(
- 'CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED', '')).lower() not in ["yes", "1", "true"] and env.get('CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED', '') != 'yes':
+ if (env.get('MLC_SYS_UTIL_VERSION_CMD', '') != '' or env.get('MLC_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '') and env.get(version_env_key, '') == '' and str(env.get(
+ 'MLC_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED', '')).lower() not in ["yes", "1", "true"] and env.get('MLC_GET_GENERIC_SYS_UTIL_INSTALL_FAILED', '') != 'yes':
automation = i['automation']
r = automation.run_native_script(
{'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'detect'})
if r['return'] > 0 and str(env.get(
- 'CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE', '')).lower() not in ["1", "yes", "true"]:
- return {'return': 1, 'error': 'Version detection failed after installation. Please check the provided version command or use env.CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE=yes to ignore the error.'}
+ 'MLC_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE', '')).lower() not in ["1", "yes", "true"]:
+ return {'return': 1, 'error': 'Version detection failed after installation. Please check the provided version command or use env.MLC_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE=yes to ignore the error.'}
elif r['return'] == 0:
r = detect_version(i)
@@ -177,7 +177,7 @@ def postprocess(i):
env[version_env_key] = version
# Not used now
- env['CM_GENERIC_SYS_UTIL_' + env['CM_SYS_UTIL_NAME'].upper() +
+ env['MLC_GENERIC_SYS_UTIL_' + env['MLC_SYS_UTIL_NAME'].upper() +
'_CACHE_TAGS'] = 'version-' + version
if env.get(version_env_key, '') == '':
diff --git a/script/get-generic-sys-util/detect.sh b/script/get-generic-sys-util/detect.sh
index 2c3583799..53d36fa2b 100755
--- a/script/get-generic-sys-util/detect.sh
+++ b/script/get-generic-sys-util/detect.sh
@@ -1,17 +1,17 @@
#!/bin/bash
-if [[ -n "${CM_SYS_UTIL_VERSION_CMD_OVERRIDE}" ]]; then
- cmd="${CM_SYS_UTIL_VERSION_CMD_OVERRIDE}"
+if [[ -n "${MLC_SYS_UTIL_VERSION_CMD_OVERRIDE}" ]]; then
+ cmd="${MLC_SYS_UTIL_VERSION_CMD_OVERRIDE}"
echo $cmd
eval $cmd
test $? -eq 0 || exit $?
else
- if [[ -n "${CM_SYS_UTIL_VERSION_CMD}" ]]; then
- if [[ "${CM_SYS_UTIL_VERSION_CMD_USE_ERROR_STREAM}" == "yes" ]]; then
+ if [[ -n "${MLC_SYS_UTIL_VERSION_CMD}" ]]; then
+ if [[ "${MLC_SYS_UTIL_VERSION_CMD_USE_ERROR_STREAM}" == "yes" ]]; then
# Redirect both stdout and stderr to tmp-ver.out
- cmd="${CM_SYS_UTIL_VERSION_CMD} > tmp-ver.out 2>&1"
+ cmd="${MLC_SYS_UTIL_VERSION_CMD} > tmp-ver.out 2>&1"
else
- cmd="${CM_SYS_UTIL_VERSION_CMD} > tmp-ver.out"
+ cmd="${MLC_SYS_UTIL_VERSION_CMD} > tmp-ver.out"
fi
echo $cmd
eval $cmd
diff --git a/script/get-generic-sys-util/install-with-retry.sh b/script/get-generic-sys-util/install-with-retry.sh
index 9abc55d08..43ee22556 100644
--- a/script/get-generic-sys-util/install-with-retry.sh
+++ b/script/get-generic-sys-util/install-with-retry.sh
@@ -1,6 +1,6 @@
#!/bin/bash
# Safe execution of a command stored in a variable
-cmd="${CM_SYS_UTIL_INSTALL_CMD}"
+cmd="${MLC_SYS_UTIL_INSTALL_CMD}"
echo "$cmd"
# set the max number of retries as well as the delay between the retries
@@ -9,7 +9,7 @@ delay_in_retry=3
for ((i=1; i<=max_retries; i++)); do
- echo "Attempting to install ${CM_SYS_UTIL_NAME} - $i of $max_retries..."
+ echo "Attempting to install ${MLC_SYS_UTIL_NAME} - $i of $max_retries..."
output=$(eval "$cmd" 2>&1)
echo "$output"
exit_status=$?
@@ -21,8 +21,8 @@ for ((i=1; i<=max_retries; i++)); do
sleep $delay_in_retry
else
# If it's a non-network error, handle based on fail-safe setting
- if [[ "${CM_TMP_FAIL_SAFE}" == 'yes' ]]; then
- echo "CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED=yes" > tmp-run-env.out
+ if [[ "${MLC_TMP_FAIL_SAFE}" == 'yes' ]]; then
+ echo "MLC_GET_GENERIC_SYS_UTIL_INSTALL_FAILED=yes" > tmp-run-env.out
echo "Fail-safe is enabled, exiting with status 0."
exit 0
else
@@ -32,14 +32,14 @@ for ((i=1; i<=max_retries; i++)); do
fi
else
# If the command succeeded
- echo "Successfully installed ${CM_SYS_UTIL_NAME}."
+ echo "Successfully installed ${MLC_SYS_UTIL_NAME}."
exit 0
fi
# If this was the last retry, print a final failure message
if [[ $i -eq $max_retries ]]; then
echo "Installation failed after $max_retries attempts due to persistent network issues."
- if [[ "${CM_TMP_FAIL_SAFE}" == 'yes' ]]; then
+ if [[ "${MLC_TMP_FAIL_SAFE}" == 'yes' ]]; then
exit 0
else
exit 1
diff --git a/script/get-generic-sys-util/install.sh b/script/get-generic-sys-util/install.sh
index c8f532c49..d264ba3e9 100644
--- a/script/get-generic-sys-util/install.sh
+++ b/script/get-generic-sys-util/install.sh
@@ -1,14 +1,14 @@
#!/bin/bash
# Safe execution of a command stored in a variable
-cmd="${CM_SYS_UTIL_INSTALL_CMD}"
+cmd="${MLC_SYS_UTIL_INSTALL_CMD}"
echo "$cmd"
# Execute the command and capture the exit status directly
if ! eval "$cmd"; then
echo "Command failed with status $?"
- if [[ "${CM_TMP_FAIL_SAFE}" == 'yes' ]]; then
+ if [[ "${MLC_TMP_FAIL_SAFE}" == 'yes' ]]; then
# Exit safely if fail-safe is enabled
- echo "CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED=yes" > tmp-run-env.out
+ echo "MLC_GET_GENERIC_SYS_UTIL_INSTALL_FAILED=yes" > tmp-run-env.out
echo "Fail-safe is enabled, exiting with status 0"
exit 0
else
diff --git a/script/get-generic-sys-util/meta.yaml b/script/get-generic-sys-util/meta.yaml
index b75e24bbc..0436ba72d 100644
--- a/script/get-generic-sys-util/meta.yaml
+++ b/script/get-generic-sys-util/meta.yaml
@@ -4,16 +4,16 @@ automation_uid: 5b4e0237da074764
cache: true
category: Detection or installation of tools and artifacts
default_env:
- CM_CLEAN_DIRS: bin
- CM_SUDO: sudo
+ MLC_CLEAN_DIRS: bin
+ MLC_SUDO: sudo
deps:
- tags: detect,os
env:
- CM_GENERIC_SYS_UTIL_INSTALL_NEEDED: 'no'
- CM_SYS_UTIL_VERSION_CMD: ''
+ MLC_GENERIC_SYS_UTIL_INSTALL_NEEDED: 'no'
+ MLC_SYS_UTIL_VERSION_CMD: ''
input_mapping:
- fail_safe: CM_TMP_FAIL_SAFE
- ignore_missing: CM_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE
+ fail_safe: MLC_TMP_FAIL_SAFE
+ ignore_missing: MLC_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE
new_env_keys:
- +PATH
tags:
@@ -27,7 +27,7 @@ tests:
docker_os: rhel
docker_os_version: '9'
env:
- CM_TMP_FAIL_SAFE: 'yes'
+ MLC_TMP_FAIL_SAFE: 'yes'
ignore_missing: 'yes'
test-all-variations: 'yes'
- docker: 'yes'
@@ -52,12 +52,12 @@ uid: bb0393afa8404a11
variations:
cmake:
env:
- CM_SYS_UTIL_NAME: cmake
- CM_SYS_UTIL_VERSION_CMD: cmake --version
- CM_SYS_UTIL_VERSION_RE: cmake version ([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1
+ MLC_SYS_UTIL_NAME: cmake
+ MLC_SYS_UTIL_VERSION_CMD: cmake --version
+ MLC_SYS_UTIL_VERSION_RE: cmake version ([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1
new_env_keys:
- - CM_CMAKE_VERSION
+ - MLC_CMAKE_VERSION
state:
cmake:
apt: cmake
@@ -67,14 +67,14 @@ variations:
detect:
default: true
env:
- CM_GENERIC_SYS_UTIL_RUN_MODE: detect
+ MLC_GENERIC_SYS_UTIL_RUN_MODE: detect
group: mode
prehook_deps:
- enable_if_env:
- CM_GENERIC_SYS_UTIL_INSTALL_NEEDED:
+ MLC_GENERIC_SYS_UTIL_INSTALL_NEEDED:
- 'yes'
force_env_keys:
- - CM_TMP_FAIL_SAFE
+ - MLC_TMP_FAIL_SAFE
inherit_variation_tags: true
names:
- install-sys-util
@@ -83,12 +83,12 @@ variations:
tags: get,generic-sys-util,_install
dmidecode:
env:
- CM_SYS_UTIL_NAME: dmidecode
- CM_SYS_UTIL_VERSION_CMD: dmidecode --version
- CM_SYS_UTIL_VERSION_RE: ([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: dmidecode
+ MLC_SYS_UTIL_VERSION_CMD: dmidecode --version
+ MLC_SYS_UTIL_VERSION_RE: ([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_DMIDECODE_VERSION
+ - MLC_DMIDECODE_VERSION
state:
dmidecode:
apt: dmidecode
@@ -97,69 +97,69 @@ variations:
yum: dmidecode
g++-11:
env:
- CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes'
- CM_SYS_UTIL_NAME: g++11
- CM_SYS_UTIL_VERSION_CMD: g++-11 --version
- CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes'
+ MLC_SYS_UTIL_NAME: g++11
+ MLC_SYS_UTIL_VERSION_CMD: g++-11 --version
+ MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_GPP11_VERSION
+ - MLC_GPP11_VERSION
state:
g++11:
apt: g++-11
dnf: gcc-toolset-11-gcc-c++
g++-12:
env:
- CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes'
- CM_SYS_UTIL_NAME: g++12
- CM_SYS_UTIL_VERSION_CMD: g++-12 --version
- CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes'
+ MLC_SYS_UTIL_NAME: g++12
+ MLC_SYS_UTIL_VERSION_CMD: g++-12 --version
+ MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_GPP12_VERSION
+ - MLC_GPP12_VERSION
state:
g++12:
apt: g++-12
dnf: gcc-toolset-12-gcc-c++
g++-9:
env:
- CM_SYS_UTIL_NAME: g++9
- CM_SYS_UTIL_VERSION_CMD: g++-9 --version
- CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: g++9
+ MLC_SYS_UTIL_VERSION_CMD: g++-9 --version
+ MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_GPP9_VERSION
+ - MLC_GPP9_VERSION
state:
g++9:
apt: g++-9
dnf: gcc-toolset-9-gcc-c++
gcc-11:
env:
- CM_SYS_UTIL_NAME: gcc11
- CM_SYS_UTIL_VERSION_CMD: gcc-11 --version
- CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: gcc11
+ MLC_SYS_UTIL_VERSION_CMD: gcc-11 --version
+ MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_GCC11_VERSION
+ - MLC_GCC11_VERSION
state:
gcc11:
apt: gcc-11
gcc-9:
env:
- CM_SYS_UTIL_NAME: gcc9
- CM_SYS_UTIL_VERSION_CMD: gcc-9 --version
- CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: gcc9
+ MLC_SYS_UTIL_VERSION_CMD: gcc-9 --version
+ MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_GCC9_VERSION
+ - MLC_GCC9_VERSION
state:
gcc9:
apt: gcc-9
gflags-dev:
env:
- CM_SYS_UTIL_NAME: gflags-dev
+ MLC_SYS_UTIL_NAME: gflags-dev
new_env_keys:
- - CM_GFLAGS_DEV_VERSION
+ - MLC_GFLAGS_DEV_VERSION
state:
gflags-dev:
apt: libgflags-dev
@@ -168,12 +168,12 @@ variations:
yum: gflags-devel
git-lfs:
env:
- CM_SYS_UTIL_NAME: git-lfs
- CM_SYS_UTIL_VERSION_CMD: git-lfs --version
- CM_SYS_UTIL_VERSION_RE: git-lfs\/([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: git-lfs
+ MLC_SYS_UTIL_VERSION_CMD: git-lfs --version
+ MLC_SYS_UTIL_VERSION_RE: git-lfs\/([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_GIT_LFS_VERSION
+ - MLC_GIT_LFS_VERSION
state:
git-lfs:
apt: git-lfs
@@ -182,9 +182,9 @@ variations:
yum: git-lfs
glog-dev:
env:
- CM_SYS_UTIL_NAME: glog-dev
+ MLC_SYS_UTIL_NAME: glog-dev
new_env_keys:
- - CM_GLOG_DEV_VERSION
+ - MLC_GLOG_DEV_VERSION
state:
glog-dev:
apt: libgoogle-glog-dev
@@ -193,19 +193,19 @@ variations:
yum: glog-devel
install:
env:
- CM_GENERIC_SYS_UTIL_RUN_MODE: install
+ MLC_GENERIC_SYS_UTIL_RUN_MODE: install
group: mode
new_env_keys:
- - CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED
- - CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED
+ - MLC_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED
+ - MLC_GET_GENERIC_SYS_UTIL_INSTALL_FAILED
libboost-all-dev:
env:
- CM_SYS_UTIL_NAME: libboost-all-dev
- CM_SYS_UTIL_VERSION_CMD: dpkg -s libboost-dev | grep 'Version'
- CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1
+ MLC_SYS_UTIL_NAME: libboost-all-dev
+ MLC_SYS_UTIL_VERSION_CMD: dpkg -s libboost-dev | grep 'Version'
+ MLC_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1
new_env_keys:
- - CM_LIBBOOST_ALL_DEV_VERSION
+ - MLC_LIBBOOST_ALL_DEV_VERSION
state:
libboost-all-dev:
apt: libboost-all-dev
@@ -214,12 +214,12 @@ variations:
yum: boost-devel
bzip2:
env:
- CM_SYS_UTIL_NAME: bzip2
- CM_SYS_UTIL_VERSION_CMD_OVERRIDE: bzcat --version 2>&1 | grep bzip > tmp-ver.out
- CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1
+ MLC_SYS_UTIL_NAME: bzip2
+ MLC_SYS_UTIL_VERSION_CMD_OVERRIDE: bzcat --version 2>&1 | grep bzip > tmp-ver.out
+ MLC_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1
new_env_keys:
- - CM_BZIP2_VERSION
+ - MLC_BZIP2_VERSION
state:
bzip2:
apt: bzip2
@@ -228,12 +228,12 @@ variations:
yum: bzip2
libbz2-dev:
env:
- CM_SYS_UTIL_NAME: libbz2_dev
- CM_SYS_UTIL_VERSION_CMD: dpkg -s libbz2-dev | grep 'Version'
- CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: libbz2_dev
+ MLC_SYS_UTIL_VERSION_CMD: dpkg -s libbz2-dev | grep 'Version'
+ MLC_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_LIBBZ2_DEV_VERSION
+ - MLC_LIBBZ2_DEV_VERSION
state:
libbz2_dev:
apt: libbz2-dev
@@ -241,28 +241,28 @@ variations:
yum: libbzip2-devel
libev-dev:
env:
- CM_SYS_UTIL_NAME: libev_dev
- CM_SYS_UTIL_VERSION_CMD: dpkg -s libev-dev | grep 'Version'
- CM_SYS_UTIL_VERSION_RE: ([\d:]+\.[\d\.-]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: libev_dev
+ MLC_SYS_UTIL_VERSION_CMD: dpkg -s libev-dev | grep 'Version'
+ MLC_SYS_UTIL_VERSION_RE: ([\d:]+\.[\d\.-]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_LIBEV_DEV_VERSION
+ - MLC_LIBEV_DEV_VERSION
state:
libev_dev:
apt: libev-dev
libffi:
env:
- CM_SYS_UTIL_NAME: libffi
+ MLC_SYS_UTIL_NAME: libffi
new_env_keys:
- - CM_LIBFFI_VERSION
+ - MLC_LIBFFI_VERSION
state:
libffi:
apt: libffi
libffi-dev:
env:
- CM_SYS_UTIL_NAME: libffi_dev
+ MLC_SYS_UTIL_NAME: libffi_dev
new_env_keys:
- - CM_LIBFFI_DEV_VERSION
+ - MLC_LIBFFI_DEV_VERSION
state:
libffi_dev:
apt: libffi-dev
@@ -271,40 +271,40 @@ variations:
yum: libffi-devel
libffi7:
env:
- CM_SYS_UTIL_NAME: libffi7
- CM_SYS_UTIL_VERSION_CMD: dpkg -l libffi7 2>/dev/null | grep '^ii' | awk '{print
+ MLC_SYS_UTIL_NAME: libffi7
+ MLC_SYS_UTIL_VERSION_CMD: dpkg -l libffi7 2>/dev/null | grep '^ii' | awk '{print
$3}' || rpm -q libffi7 2>/dev/null || pacman -Q libffi7 2>/dev/null
- CM_SYS_UTIL_VERSION_RE: \d\.\d-[0-9]+
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_VERSION_RE: \d\.\d-[0-9]+
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_LIBFFI7_VERSION
+ - MLC_LIBFFI7_VERSION
state:
libffi7:
apt: libffi7
libffi8:
env:
- CM_SYS_UTIL_NAME: libffi8
+ MLC_SYS_UTIL_NAME: libffi8
new_env_keys:
- - CM_LIBFFI8_VERSION
+ - MLC_LIBFFI8_VERSION
state:
libffi8:
apt: libffi8
libgdbm-dev:
env:
- CM_SYS_UTIL_NAME: libgdbm_dev
- CM_SYS_UTIL_VERSION_CMD: dpkg -s libgdbm-dev | grep 'Version'
- CM_SYS_UTIL_VERSION_RE: ([\d]+\.[\d\.-]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: libgdbm_dev
+ MLC_SYS_UTIL_VERSION_CMD: dpkg -s libgdbm-dev | grep 'Version'
+ MLC_SYS_UTIL_VERSION_RE: ([\d]+\.[\d\.-]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_LIBGDBM_DEV_VERSION
+ - MLC_LIBGDBM_DEV_VERSION
state:
libgdbm_dev:
apt: libgdbm-dev
libgmock-dev:
env:
- CM_SYS_UTIL_NAME: libgmock-dev
+ MLC_SYS_UTIL_NAME: libgmock-dev
new_env_keys:
- - CM_LIBGMOCK_DEV_VERSION
+ - MLC_LIBGMOCK_DEV_VERSION
state:
libgmock-dev:
apt: libgmock-dev
@@ -313,20 +313,20 @@ variations:
yum: gmock-devel
liblzma-dev:
env:
- CM_SYS_UTIL_NAME: liblzma_dev
- CM_SYS_UTIL_VERSION_CMD: xz --version
- CM_SYS_UTIL_VERSION_RE: (\d(\.\d)+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: liblzma_dev
+ MLC_SYS_UTIL_VERSION_CMD: xz --version
+ MLC_SYS_UTIL_VERSION_RE: (\d(\.\d)+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_LIBLZMA_DEV_VERSION
+ - MLC_LIBLZMA_DEV_VERSION
state:
liblzma_dev:
apt: liblzma-dev
libmkl-dev:
env:
- CM_SYS_UTIL_NAME: libmkl-dev
+ MLC_SYS_UTIL_NAME: libmkl-dev
new_env_keys:
- - CM_LIBMKL_DEV_VERSION
+ - MLC_LIBMKL_DEV_VERSION
state:
libmkl-dev:
apt: libmkl-dev
@@ -335,9 +335,9 @@ variations:
yum: ''
libmpfr-dev:
env:
- CM_SYS_UTIL_NAME: libmpfr-dev
+ MLC_SYS_UTIL_NAME: libmpfr-dev
new_env_keys:
- - CM_LIBMPFR_DEV_VERSION
+ - MLC_LIBMPFR_DEV_VERSION
state:
libmpfr-dev:
apt: libmpfr-dev
@@ -347,13 +347,13 @@ variations:
zypper: mpfr-devel
libncurses-dev:
env:
- CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes'
- CM_SYS_UTIL_NAME: libncurses_dev
- CM_SYS_UTIL_VERSION_CMD: ncurses5-config --version
- CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1
+ MLC_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes'
+ MLC_SYS_UTIL_NAME: libncurses_dev
+ MLC_SYS_UTIL_VERSION_CMD: ncurses5-config --version
+ MLC_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1
new_env_keys:
- - CM_LIBNCURSES_DEV_VERSION
+ - MLC_LIBNCURSES_DEV_VERSION
state:
libncurses_dev:
apt: libncurses-dev
@@ -361,9 +361,9 @@ variations:
yum: libncurses-devel
libnuma-dev:
env:
- CM_SYS_UTIL_NAME: libnuma-dev
+ MLC_SYS_UTIL_NAME: libnuma-dev
new_env_keys:
- - CM_LIBNUMA_DEV_VERSION
+ - MLC_LIBNUMA_DEV_VERSION
state:
libnuma-dev:
apt: libnuma-dev
@@ -372,9 +372,9 @@ variations:
yum: numactl-libs
libpci-dev:
env:
- CM_SYS_UTIL_NAME: libpci-dev
+ MLC_SYS_UTIL_NAME: libpci-dev
new_env_keys:
- - CM_LIBPCI_DEV_VERSION
+ - MLC_LIBPCI_DEV_VERSION
state:
libpci-dev:
apt: libpci-dev
@@ -383,9 +383,9 @@ variations:
yum: pciutils-devel
libpng-dev:
env:
- CM_SYS_UTIL_NAME: libpng-dev
+ MLC_SYS_UTIL_NAME: libpng-dev
new_env_keys:
- - CM_LIBPNG_DEV_VERSION
+ - MLC_LIBPNG_DEV_VERSION
state:
libpng-dev:
apt: libpng-dev
@@ -394,9 +394,9 @@ variations:
yum: libpng-devel
libre2-dev:
env:
- CM_SYS_UTIL_NAME: libre2-dev
+ MLC_SYS_UTIL_NAME: libre2-dev
new_env_keys:
- - CM_LIBRE2_DEV_VERSION
+ - MLC_LIBRE2_DEV_VERSION
state:
libre2-dev:
apt: libre2-dev
@@ -405,9 +405,9 @@ variations:
yum: libre-devel
libreadline-dev:
env:
- CM_SYS_UTIL_NAME: libreadline_dev
+ MLC_SYS_UTIL_NAME: libreadline_dev
new_env_keys:
- - CM_LIBREADLINE_DEV_VERSION
+ - MLC_LIBREADLINE_DEV_VERSION
state:
libreadline_dev:
apt: libreadline-dev
@@ -415,20 +415,20 @@ variations:
yum: readline-devel
libsqlite3-dev:
env:
- CM_SYS_UTIL_NAME: libsqlite3_dev
+ MLC_SYS_UTIL_NAME: libsqlite3_dev
new_env_keys:
- - CM_LIBSQLITE3_DEV_VERSION
+ - MLC_LIBSQLITE3_DEV_VERSION
state:
libsqlite3_dev:
apt: libsqlite3-dev
libssl-dev:
env:
- CM_SYS_UTIL_NAME: libssl_dev
- CM_SYS_UTIL_VERSION_CMD: openssl version
- CM_SYS_UTIL_VERSION_RE: OpenSSL\s+([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1
+ MLC_SYS_UTIL_NAME: libssl_dev
+ MLC_SYS_UTIL_VERSION_CMD: openssl version
+ MLC_SYS_UTIL_VERSION_RE: OpenSSL\s+([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1
new_env_keys:
- - CM_LIBSSL_DEV_VERSION
+ - MLC_LIBSSL_DEV_VERSION
state:
libssl_dev:
apt: libssl-dev
@@ -437,9 +437,9 @@ variations:
yum: libssl-devel
libudev-dev:
env:
- CM_SYS_UTIL_NAME: libudev-dev
+ MLC_SYS_UTIL_NAME: libudev-dev
new_env_keys:
- - CM_LIBUDEV_DEV_VERSION
+ - MLC_LIBUDEV_DEV_VERSION
state:
libudev-dev:
apt: libudev-dev
@@ -450,32 +450,32 @@ variations:
deps:
- tags: detect,os
env:
- CM_SYS_UTIL_NAME: linux-tools
+ MLC_SYS_UTIL_NAME: linux-tools
new_env_keys:
- - CM_LINUX_TOOLS_VERSION
+ - MLC_LINUX_TOOLS_VERSION
state:
linux-tools:
- apt: linux-tools-<<>>
+ apt: linux-tools-<<>>
md5sha1sum:
env:
- CM_SYS_UTIL_NAME: md5sha1sum
- CM_SYS_UTIL_VERSION_CMD: md5sum --version | grep sha1sum
- CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: md5sha1sum
+ MLC_SYS_UTIL_VERSION_CMD: md5sum --version | grep sha1sum
+ MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_MD5SHA1SUM_VERSION
+ - MLC_MD5SHA1SUM_VERSION
state:
md5sha1sum:
apt: ''
brew: md5sha1sum
ninja-build:
env:
- CM_SYS_UTIL_NAME: ninja-build
- CM_SYS_UTIL_VERSION_CMD: ninja --version
- CM_SYS_UTIL_VERSION_RE: ([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: ninja-build
+ MLC_SYS_UTIL_VERSION_CMD: ninja --version
+ MLC_SYS_UTIL_VERSION_RE: ([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_NINJA_BUILD_VERSION
+ - MLC_NINJA_BUILD_VERSION
state:
ninja-build:
apt: ninja-build
@@ -485,18 +485,18 @@ variations:
zypper: ninja-build
nlohmann-json3-dev:
env:
- CM_SYS_UTIL_NAME: nlohmann_json3_dev
+ MLC_SYS_UTIL_NAME: nlohmann_json3_dev
new_env_keys:
- - CM_NLOHMANN_JSON3_DEV_VERSION
+ - MLC_NLOHMANN_JSON3_DEV_VERSION
state:
nlohmann_json3_dev:
apt: nlohmann-json3-dev
dnf: nlohmann-json-devel
ntpdate:
env:
- CM_SYS_UTIL_NAME: ntpdate
+ MLC_SYS_UTIL_NAME: ntpdate
new_env_keys:
- - CM_NTPDATE_VERSION
+ - MLC_NTPDATE_VERSION
state:
ntpdate:
apt: ntpdate
@@ -506,18 +506,18 @@ variations:
numactl:
deps:
- enable_if_env:
- CM_HOST_OS_FLAVOR:
+ MLC_HOST_OS_FLAVOR:
- rhel
- CM_HOST_OS_VERSION:
+ MLC_HOST_OS_VERSION:
- '9.1'
- '9.2'
- '9.3'
tags: install,numactl,from.src
env:
- CM_SYS_UTIL_NAME: numactl
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: numactl
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_NUMACTL_VERSION
+ - MLC_NUMACTL_VERSION
state:
numactl:
apt: numactl
@@ -525,12 +525,12 @@ variations:
yum: numactl-devel
nvidia-cuda-toolkit:
env:
- CM_SYS_UTIL_NAME: nvidia-cuda-toolkit
- CM_SYS_UTIL_VERSION_CMD: nvcc --version
- CM_SYS_UTIL_VERSION_RE: release ([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1
+ MLC_SYS_UTIL_NAME: nvidia-cuda-toolkit
+ MLC_SYS_UTIL_VERSION_CMD: nvcc --version
+ MLC_SYS_UTIL_VERSION_RE: release ([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1
new_env_keys:
- - CM_NVIDIA_CUDA_TOOLKIT_VERSION
+ - MLC_NVIDIA_CUDA_TOOLKIT_VERSION
state:
nvidia-cuda-toolkit:
apt: nvidia-cuda-toolkit
@@ -539,9 +539,9 @@ variations:
yum: nvidia-cuda-toolkit
pkg-config:
env:
- CM_SYS_UTIL_NAME: pkg_config
- CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: pkg_config
+ MLC_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
state:
pkg_config:
apt: pkg-config
@@ -550,9 +550,9 @@ variations:
yum: pkg-config
psmisc:
env:
- CM_SYS_UTIL_NAME: psmisc
+ MLC_SYS_UTIL_NAME: psmisc
new_env_keys:
- - CM_PSMISC_VERSION
+ - MLC_PSMISC_VERSION
state:
psmisc:
apt: psmisc
@@ -561,9 +561,9 @@ variations:
yum: psmisc
rapidjson-dev:
env:
- CM_SYS_UTIL_NAME: rapidjson-dev
+ MLC_SYS_UTIL_NAME: rapidjson-dev
new_env_keys:
- - CM_RAPIDJSON_DEV_VERSION
+ - MLC_RAPIDJSON_DEV_VERSION
state:
rapidjson-dev:
apt: rapidjson-dev
@@ -572,12 +572,12 @@ variations:
yum: rapidjson-devel
rsync:
env:
- CM_SYS_UTIL_NAME: rsync
- CM_SYS_UTIL_VERSION_CMD: rsync --version
- CM_SYS_UTIL_VERSION_RE: rsync\s+version\s+([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: rsync
+ MLC_SYS_UTIL_VERSION_CMD: rsync --version
+ MLC_SYS_UTIL_VERSION_RE: rsync\s+version\s+([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_RSYNC_VERSION
+ - MLC_RSYNC_VERSION
state:
rsync:
apt: rsync
@@ -587,12 +587,12 @@ variations:
zypper: rsync
screen:
env:
- CM_SYS_UTIL_NAME: screen
- CM_SYS_UTIL_VERSION_CMD: screen --version
- CM_SYS_UTIL_VERSION_RE: Screen version ([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: screen
+ MLC_SYS_UTIL_VERSION_CMD: screen --version
+ MLC_SYS_UTIL_VERSION_RE: Screen version ([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_SCREEN_VERSION
+ - MLC_SCREEN_VERSION
state:
screen:
apt: screen
@@ -602,12 +602,12 @@ variations:
zypper: rsync
sox:
env:
- CM_SYS_UTIL_NAME: sox
- CM_SYS_UTIL_VERSION_CMD: sox --version
- CM_SYS_UTIL_VERSION_RE: sox:\s+SoX\s+v([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: sox
+ MLC_SYS_UTIL_VERSION_CMD: sox --version
+ MLC_SYS_UTIL_VERSION_RE: sox:\s+SoX\s+v([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_SOX_VERSION
+ - MLC_SOX_VERSION
state:
sox:
apt: sox
@@ -615,12 +615,12 @@ variations:
dnf: sox
systemd:
env:
- CM_SYS_UTIL_NAME: systemd
- CM_SYS_UTIL_VERSION_CMD: systemctl --version
- CM_SYS_UTIL_VERSION_RE: systemd ([\d]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: systemd
+ MLC_SYS_UTIL_VERSION_CMD: systemctl --version
+ MLC_SYS_UTIL_VERSION_RE: systemd ([\d]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_SYSTEMD_VERSION
+ - MLC_SYSTEMD_VERSION
state:
systemd:
apt: systemd
@@ -629,24 +629,24 @@ variations:
yum: systemd
tk-dev:
env:
- CM_SYS_UTIL_NAME: tk_dev
- CM_SYS_UTIL_VERSION_CMD: dpkg -s tk-dev | grep Version
- CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1
+ MLC_SYS_UTIL_NAME: tk_dev
+ MLC_SYS_UTIL_VERSION_CMD: dpkg -s tk-dev | grep Version
+ MLC_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1
new_env_keys:
- - CM_TK_DEV_VERSION
+ - MLC_TK_DEV_VERSION
state:
tk_dev:
apt: tk-dev
transmission:
env:
- CM_SYS_UTIL_NAME: transmission
- CM_SYS_UTIL_VERSION_CMD: transmission-daemon --version
- CM_SYS_UTIL_VERSION_CMD_USE_ERROR_STREAM: 'yes'
- CM_SYS_UTIL_VERSION_RE: transmission-daemon ([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: transmission
+ MLC_SYS_UTIL_VERSION_CMD: transmission-daemon --version
+ MLC_SYS_UTIL_VERSION_CMD_USE_ERROR_STREAM: 'yes'
+ MLC_SYS_UTIL_VERSION_RE: transmission-daemon ([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_TRANSMISSION_VERSION
+ - MLC_TRANSMISSION_VERSION
state:
transmission:
apt: transmission-daemon
@@ -655,12 +655,12 @@ variations:
yum: transmission-daemon
vim-common:
env:
- CM_SYS_UTIL_NAME: vim_common
- CM_SYS_UTIL_VERSION_CMD: vim --version
- CM_SYS_UTIL_VERSION_RE: VIM - Vi IMproved ([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: vim_common
+ MLC_SYS_UTIL_VERSION_CMD: vim --version
+ MLC_SYS_UTIL_VERSION_RE: VIM - Vi IMproved ([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_VIM_COMMON_VERSION
+ - MLC_VIM_COMMON_VERSION
state:
vim_common:
apt: vim-common
@@ -670,36 +670,36 @@ variations:
yum: vim-common
wget:
env:
- CM_SYS_UTIL_NAME: wget
- CM_SYS_UTIL_VERSION_CMD: wget --version
- CM_SYS_UTIL_VERSION_RE: Wget\s*([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: wget
+ MLC_SYS_UTIL_VERSION_CMD: wget --version
+ MLC_SYS_UTIL_VERSION_RE: Wget\s*([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_WGET_VERSION
+ - MLC_WGET_VERSION
state:
wget:
apt: wget
brew: wget
wkhtmltopdf:
env:
- CM_SYS_UTIL_NAME: wkhtmltopdf
- CM_SYS_UTIL_VERSION_CMD: wkhtmltopdf --version
- CM_SYS_UTIL_VERSION_RE: wkhtmltopdf ([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0
+ MLC_SYS_UTIL_NAME: wkhtmltopdf
+ MLC_SYS_UTIL_VERSION_CMD: wkhtmltopdf --version
+ MLC_SYS_UTIL_VERSION_RE: wkhtmltopdf ([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0
new_env_keys:
- - CM_WKHTMLTOPDF_VERSION
+ - MLC_WKHTMLTOPDF_VERSION
state:
wkhtmltopdf:
apt: wkhtmltopdf
brew: wkhtmltopdf
xz:
env:
- CM_SYS_UTIL_NAME: xz
- CM_SYS_UTIL_VERSION_CMD: xz --version
- CM_SYS_UTIL_VERSION_RE: xz \(XZ Utils\) ([\d.]+)
- CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1
+ MLC_SYS_UTIL_NAME: xz
+ MLC_SYS_UTIL_VERSION_CMD: xz --version
+ MLC_SYS_UTIL_VERSION_RE: xz \(XZ Utils\) ([\d.]+)
+ MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1
new_env_keys:
- - CM_XZ_VERSION
+ - MLC_XZ_VERSION
state:
xz:
apt: xz-utils
@@ -709,18 +709,18 @@ variations:
yum: xz
zlib:
env:
- CM_SYS_UTIL_NAME: zlib
+ MLC_SYS_UTIL_NAME: zlib
new_env_keys:
- - CM_ZLIB_VERSION
+ - MLC_ZLIB_VERSION
state:
zlib:
apt: zlib1g
choco: zlib
zlib1g-dev:
env:
- CM_SYS_UTIL_NAME: zlib1g_dev
+ MLC_SYS_UTIL_NAME: zlib1g_dev
new_env_keys:
- - CM_ZLIB1G_DEV_VERSION
+ - MLC_ZLIB1G_DEV_VERSION
state:
zlib1g_dev:
apt: zlib1g-dev
diff --git a/script/get-gh-actions-runner/customize.py b/script/get-gh-actions-runner/customize.py
index 5fa54e71f..564065fb4 100644
--- a/script/get-gh-actions-runner/customize.py
+++ b/script/get-gh-actions-runner/customize.py
@@ -13,17 +13,17 @@ def preprocess(i):
automation = i['automation']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
- cmd = env.get('CM_GH_ACTIONS_RUNNER_COMMAND', '')
+ cmd = env.get('MLC_GH_ACTIONS_RUNNER_COMMAND', '')
if cmd == "config":
- run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && ./config.sh --url {env['CM_GH_ACTIONS_RUNNER_URL']} --token {env['CM_GH_ACTIONS_RUNNER_TOKEN']}"
+ run_cmd = f"cd {env['MLC_GH_ACTIONS_RUNNER_CODE_PATH']} && ./config.sh --url {env['MLC_GH_ACTIONS_RUNNER_URL']} --token {env['MLC_GH_ACTIONS_RUNNER_TOKEN']}"
elif cmd == "remove":
- run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && ./config.sh remove --token {env['CM_GH_ACTIONS_RUNNER_TOKEN']}"
+ run_cmd = f"cd {env['MLC_GH_ACTIONS_RUNNER_CODE_PATH']} && ./config.sh remove --token {env['MLC_GH_ACTIONS_RUNNER_TOKEN']}"
elif cmd == "install":
- run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh install"
+ run_cmd = f"cd {env['MLC_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh install"
elif cmd == "uninstall":
- run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh uninstall"
+ run_cmd = f"cd {env['MLC_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh uninstall"
cache_rm_tags = "gh,runner,_install"
r = cm.access({'action': 'rm', 'automation': 'cache',
'tags': cache_rm_tags, 'f': True})
@@ -31,9 +31,9 @@ def preprocess(i):
if r['return'] != 0 and r['return'] != 16: # ignore missing ones
return r
elif cmd == "start":
- run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh start"
+ run_cmd = f"cd {env['MLC_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh start"
- env['CM_RUN_CMD'] = run_cmd
+ env['MLC_RUN_CMD'] = run_cmd
return {'return': 0}
diff --git a/script/get-gh-actions-runner/meta.yaml b/script/get-gh-actions-runner/meta.yaml
index 287ee254a..67eabf7fb 100644
--- a/script/get-gh-actions-runner/meta.yaml
+++ b/script/get-gh-actions-runner/meta.yaml
@@ -14,11 +14,11 @@ tags:
- gh-actions-runner
uid: 5b005c5a76f242a7
input_mapping:
- token: CM_GH_ACTIONS_RUNNER_TOKEN
- url: CM_GH_ACTIONS_RUNNER_URL
+ token: MLC_GH_ACTIONS_RUNNER_TOKEN
+ url: MLC_GH_ACTIONS_RUNNER_URL
new_env_keys:
- - CM_GH_ACTIONS_RUNNER_CODE_PATH
+ - MLC_GH_ACTIONS_RUNNER_CODE_PATH
deps:
- tags: detect-os
@@ -26,33 +26,33 @@ deps:
force_cache: yes
extra_cache_tags: gh-actions-runner-code,gh-actions,code
env:
- CM_DAE_FINAL_ENV_NAME: CM_GH_ACTIONS_RUNNER_CODE_PATH
+ MLC_DAE_FINAL_ENV_NAME: MLC_GH_ACTIONS_RUNNER_CODE_PATH
variations:
config:
group: command
default: true
env:
- CM_GH_ACTIONS_RUNNER_COMMAND: config
+ MLC_GH_ACTIONS_RUNNER_COMMAND: config
remove:
group: command
env:
- CM_GH_ACTIONS_RUNNER_COMMAND: remove
+ MLC_GH_ACTIONS_RUNNER_COMMAND: remove
install:
group: command
deps:
- tags: get,gh,actions-runner,_config
force_cache: yes
env:
- CM_GH_ACTIONS_RUNNER_COMMAND: install
+ MLC_GH_ACTIONS_RUNNER_COMMAND: install
uninstall:
group: command
env:
- CM_GH_ACTIONS_RUNNER_COMMAND: uninstall
+ MLC_GH_ACTIONS_RUNNER_COMMAND: uninstall
start:
group: command
deps:
- tags: get,gh,actions-runner,_install
force_cache: yes
env:
- CM_GH_ACTIONS_RUNNER_COMMAND: start
+ MLC_GH_ACTIONS_RUNNER_COMMAND: start
diff --git a/script/get-gh-actions-runner/run.sh b/script/get-gh-actions-runner/run.sh
index 547395120..43988f060 100644
--- a/script/get-gh-actions-runner/run.sh
+++ b/script/get-gh-actions-runner/run.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-echo ${CM_RUN_CMD}
-eval ${CM_RUN_CMD}
+echo ${MLC_RUN_CMD}
+eval ${MLC_RUN_CMD}
test $? -eq 0 || exit $?
diff --git a/script/get-git-repo/README-extra.md b/script/get-git-repo/README-extra.md
index 83a368e5f..9ef54386b 100644
--- a/script/get-git-repo/README-extra.md
+++ b/script/get-git-repo/README-extra.md
@@ -13,7 +13,7 @@ where [VARIATION] is one of
* `no-recurse-submodules:` Only download the main repository
## Exported Variables
-* `CM_GIT_CHECKOUT_PATH`: Directory path of the cloned git repository
+* `MLC_GIT_CHECKOUT_PATH`: Directory path of the cloned git repository
## Supported and Tested OS
1. Ubuntu 18.04, 20.04, 22.04
diff --git a/script/get-git-repo/customize.py b/script/get-git-repo/customize.py
index 603340e47..8f9e3ea31 100644
--- a/script/get-git-repo/customize.py
+++ b/script/get-git-repo/customize.py
@@ -16,37 +16,37 @@ def preprocess(i):
env_key = get_env_key(env)
- cm_git_url = env['CM_GIT_URL']
+ cm_git_url = env['MLC_GIT_URL']
- if 'CM_GIT_REPO_NAME' not in env:
+ if 'MLC_GIT_REPO_NAME' not in env:
update_env(
env,
- 'CM_GIT_REPO{}_NAME',
+ 'MLC_GIT_REPO{}_NAME',
env_key,
os.path.basename(
- env['CM_GIT_URL']))
+ env['MLC_GIT_URL']))
- if 'CM_GIT_DEPTH' not in env:
- env['CM_GIT_DEPTH'] = ''
+ if 'MLC_GIT_DEPTH' not in env:
+ env['MLC_GIT_DEPTH'] = ''
- if 'CM_GIT_RECURSE_SUBMODULES' not in env:
- env['CM_GIT_RECURSE_SUBMODULES'] = ''
+ if 'MLC_GIT_RECURSE_SUBMODULES' not in env:
+ env['MLC_GIT_RECURSE_SUBMODULES'] = ''
- if env.get('CM_GIT_CHECKOUT', '') == '':
- env['CM_GIT_CHECKOUT'] = env.get(
- 'CM_GIT_SHA', env.get(
- 'CM_GIT_BRANCH', ''))
+ if env.get('MLC_GIT_CHECKOUT', '') == '':
+ env['MLC_GIT_CHECKOUT'] = env.get(
+ 'MLC_GIT_SHA', env.get(
+ 'MLC_GIT_BRANCH', ''))
- git_checkout_string = " -b " + env['CM_GIT_BRANCH'] if (
- "CM_GIT_BRANCH" in env and env.get('CM_GIT_SHA', '') == '') else ""
+ git_checkout_string = " -b " + env['MLC_GIT_BRANCH'] if (
+ "MLC_GIT_BRANCH" in env and env.get('MLC_GIT_SHA', '') == '') else ""
- git_clone_cmd = "git clone " + env['CM_GIT_RECURSE_SUBMODULES'] + git_checkout_string + " " + \
- env['CM_GIT_URL'] + " " + \
- env.get('CM_GIT_DEPTH', '') + ' ' + env['CM_GIT_CHECKOUT_FOLDER']
+ git_clone_cmd = "git clone " + env['MLC_GIT_RECURSE_SUBMODULES'] + git_checkout_string + " " + \
+ env['MLC_GIT_URL'] + " " + \
+ env.get('MLC_GIT_DEPTH', '') + ' ' + env['MLC_GIT_CHECKOUT_FOLDER']
- env['CM_GIT_CLONE_CMD'] = git_clone_cmd
- env['CM_TMP_GIT_PATH'] = os.path.join(
- os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER'], ".gitdone")
+ env['MLC_GIT_CLONE_CMD'] = git_clone_cmd
+ env['MLC_TMP_GIT_PATH'] = os.path.join(
+ os.getcwd(), env['MLC_GIT_CHECKOUT_FOLDER'], ".gitdone")
return {'return': 0}
@@ -55,41 +55,41 @@ def postprocess(i):
env = i['env']
state = i['state']
- env['CM_GIT_CHECKOUT_PATH'] = os.path.join(
- os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER'])
- git_checkout_path = env['CM_GIT_CHECKOUT_PATH']
+ env['MLC_GIT_CHECKOUT_PATH'] = os.path.join(
+ os.getcwd(), env['MLC_GIT_CHECKOUT_FOLDER'])
+ git_checkout_path = env['MLC_GIT_CHECKOUT_PATH']
env_key = get_env_key(env)
- # We remap CM_GIT variables with CM_GIT_REPO prefix so that they don't
+ # We remap MLC_GIT variables with MLC_GIT_REPO prefix so that they don't
# contaminate the env of the parent script
- update_env(env, 'CM_GIT_REPO{}_CHECKOUT_PATH',
- env_key, env['CM_GIT_CHECKOUT_PATH'])
- update_env(env, 'CM_GIT_REPO{}_URL', env_key, env['CM_GIT_URL'])
- update_env(env, 'CM_GIT_REPO{}_CHECKOUT', env_key, env['CM_GIT_CHECKOUT'])
- update_env(env, 'CM_GIT_REPO{}_DEPTH', env_key, env['CM_GIT_DEPTH'])
- update_env(env, 'CM_GIT_REPO{}_CHECKOUT_FOLDER',
- env_key, env['CM_GIT_CHECKOUT_FOLDER'])
- update_env(env, 'CM_GIT_REPO{}_PATCH', env_key, env['CM_GIT_PATCH'])
- update_env(env, 'CM_GIT_REPO{}_RECURSE_SUBMODULES',
- env_key, env['CM_GIT_RECURSE_SUBMODULES'])
-
- if (env.get('CM_GIT_CHECKOUT_PATH_ENV_NAME', '') != ''):
- env[env['CM_GIT_CHECKOUT_PATH_ENV_NAME']] = git_checkout_path
-
- env['CM_GET_DEPENDENT_CACHED_PATH'] = git_checkout_path
-
- if os.path.exists("tmp-cm-git-hash.out"):
- with open("tmp-cm-git-hash.out", "r") as f:
+ update_env(env, 'MLC_GIT_REPO{}_CHECKOUT_PATH',
+ env_key, env['MLC_GIT_CHECKOUT_PATH'])
+ update_env(env, 'MLC_GIT_REPO{}_URL', env_key, env['MLC_GIT_URL'])
+ update_env(env, 'MLC_GIT_REPO{}_CHECKOUT', env_key, env['MLC_GIT_CHECKOUT'])
+ update_env(env, 'MLC_GIT_REPO{}_DEPTH', env_key, env['MLC_GIT_DEPTH'])
+ update_env(env, 'MLC_GIT_REPO{}_CHECKOUT_FOLDER',
+ env_key, env['MLC_GIT_CHECKOUT_FOLDER'])
+ update_env(env, 'MLC_GIT_REPO{}_PATCH', env_key, env['MLC_GIT_PATCH'])
+ update_env(env, 'MLC_GIT_REPO{}_RECURSE_SUBMODULES',
+ env_key, env['MLC_GIT_RECURSE_SUBMODULES'])
+
+ if (env.get('MLC_GIT_CHECKOUT_PATH_ENV_NAME', '') != ''):
+ env[env['MLC_GIT_CHECKOUT_PATH_ENV_NAME']] = git_checkout_path
+
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = git_checkout_path
+
+ if os.path.exists("tmp-mlc-git-hash.out"):
+ with open("tmp-mlc-git-hash.out", "r") as f:
git_hash = f.readline().strip()
- env['CM_GIT_REPO_CURRENT_HASH'] = git_hash
+ env['MLC_GIT_REPO_CURRENT_HASH'] = git_hash
return {'return': 0}
def get_env_key(env):
- env_key = env.get('CM_GIT_ENV_KEY', '')
+ env_key = env.get('MLC_GIT_ENV_KEY', '')
if env_key != '' and not env_key.startswith('_'):
env_key = '_' + env_key
diff --git a/script/get-git-repo/meta.yaml b/script/get-git-repo/meta.yaml
index eae2ac3e7..962b9281f 100644
--- a/script/get-git-repo/meta.yaml
+++ b/script/get-git-repo/meta.yaml
@@ -4,39 +4,39 @@ automation_uid: 5b4e0237da074764
cache: true
category: DevOps automation
default_env:
- CM_GIT_CHECKOUT_FOLDER: repo
- CM_GIT_DEPTH: --depth 4
- CM_GIT_PATCH: 'no'
- CM_GIT_RECURSE_SUBMODULES: ' --recurse-submodules'
- CM_GIT_URL: https://github.com/mlcommons/ck.git
+ MLC_GIT_CHECKOUT_FOLDER: repo
+ MLC_GIT_DEPTH: --depth 4
+ MLC_GIT_PATCH: 'no'
+ MLC_GIT_RECURSE_SUBMODULES: ' --recurse-submodules'
+ MLC_GIT_URL: https://github.com/mlcommons/ck.git
deps:
- tags: detect,os
input_mapping:
- branch: CM_GIT_CHECKOUT
- depth: CM_GIT_DEPTH
- env_key: CM_GIT_ENV_KEY
- folder: CM_GIT_CHECKOUT_FOLDER
- patch: CM_GIT_PATCH
- pull: CM_GIT_REPO_PULL
- submodules: CM_GIT_RECURSE_SUBMODULES
- update: CM_GIT_REPO_PULL
+ branch: MLC_GIT_CHECKOUT
+ depth: MLC_GIT_DEPTH
+ env_key: MLC_GIT_ENV_KEY
+ folder: MLC_GIT_CHECKOUT_FOLDER
+ patch: MLC_GIT_PATCH
+ pull: MLC_GIT_REPO_PULL
+ submodules: MLC_GIT_RECURSE_SUBMODULES
+ update: MLC_GIT_REPO_PULL
new_env_keys:
-- CM_GIT_CHECKOUT_PATH
-- CM_GIT_REPO_*
-- <<>>
+- MLC_GIT_CHECKOUT_PATH
+- MLC_GIT_REPO_*
+- <<>>
post_deps:
- dynamic: true
enable_if_env:
- CM_GIT_REPO_PULL:
+ MLC_GIT_REPO_PULL:
- 'yes'
- 'True'
force_env_keys:
- - CM_GIT_CHECKOUT_PATH
+ - MLC_GIT_CHECKOUT_PATH
names:
- pull-git-repo
tags: pull,git,repo
print_env_at_the_end:
- CM_GIT_CHECKOUT_PATH: CM cache path to the Git repo
+ MLC_GIT_CHECKOUT_PATH: CM cache path to the Git repo
tags:
- get
- git
@@ -47,48 +47,48 @@ uid: ed603e7292974f10
variations:
branch.#:
env:
- CM_GIT_BRANCH: '#'
+ MLC_GIT_BRANCH: '#'
group: checkout
cherrypicks.#:
env:
- CM_GIT_CHERRYPICKS: '#'
+ MLC_GIT_CHERRYPICKS: '#'
full-history:
env:
- CM_GIT_DEPTH: ''
+ MLC_GIT_DEPTH: ''
group: git-history
lfs:
deps:
- tags: get,generic,sys-util,_git-lfs
env:
- CM_GIT_REPO_NEEDS_LFS: 'yes'
+ MLC_GIT_REPO_NEEDS_LFS: 'yes'
no-recurse-submodules:
env:
- CM_GIT_RECURSE_SUBMODULES: ''
+ MLC_GIT_RECURSE_SUBMODULES: ''
patch:
env:
- CM_GIT_PATCH: 'yes'
+ MLC_GIT_PATCH: 'yes'
pr-to-apply.#:
env:
- CM_GIT_PR_TO_APPLY: '#'
+ MLC_GIT_PR_TO_APPLY: '#'
repo.#:
env:
- CM_GIT_URL: '#'
+ MLC_GIT_URL: '#'
group: repo
sha.#:
default_variations:
git-history: full-history
env:
- CM_GIT_SHA: '#'
+ MLC_GIT_SHA: '#'
group: checkout
short-history:
default: true
env:
- CM_GIT_DEPTH: --depth 5
+ MLC_GIT_DEPTH: --depth 5
group: git-history
submodules.#:
env:
- CM_GIT_SUBMODULES: '#'
+ MLC_GIT_SUBMODULES: '#'
tag.#:
env:
- CM_GIT_CHECKOUT_TAG: '#'
+ MLC_GIT_CHECKOUT_TAG: '#'
group: checkout
diff --git a/script/get-git-repo/run.bat b/script/get-git-repo/run.bat
index d00f32b15..8a8003513 100644
--- a/script/get-git-repo/run.bat
+++ b/script/get-git-repo/run.bat
@@ -1,22 +1,22 @@
@echo off
rem echo ******************************************************
-rem echo Cloning MLCommons from %CM_GIT_URL% with branch %CM_GIT_CHECKOUT% %CM_GIT_DEPTH% %CM_GIT_RECURSE_SUBMODULES% ...
+rem echo Cloning MLCommons from %MLC_GIT_URL% with branch %MLC_GIT_CHECKOUT% %MLC_GIT_DEPTH% %MLC_GIT_RECURSE_SUBMODULES% ...
-rem git clone %CM_GIT_RECURSE_SUBMODULES% %CM_GIT_URL% %CM_GIT_DEPTH% inference
+rem git clone %MLC_GIT_RECURSE_SUBMODULES% %MLC_GIT_URL% %MLC_GIT_DEPTH% inference
rem cd inference
-rem git checkout -b "%CM_GIT_CHECKOUT%"
+rem git checkout -b "%MLC_GIT_CHECKOUT%"
rem
rem Next line allows ERRORLEVEL inside if statements!
setlocal enabledelayedexpansion
set CUR_DIR=%cd%
-set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH%
+set SCRIPT_DIR=%MLC_TMP_CURRENT_SCRIPT_PATH%
-set folder=%CM_GIT_CHECKOUT_FOLDER%
+set folder=%MLC_GIT_CHECKOUT_FOLDER%
-if not exist "%CM_TMP_GIT_PATH%" (
+if not exist "%MLC_TMP_GIT_PATH%" (
if exist "%folder%" (
rmdir /S /Q "%folder%" rem Use rmdir instead of deltree
@@ -25,20 +25,20 @@ if not exist "%CM_TMP_GIT_PATH%" (
echo ******************************************************
echo Current directory: %CUR_DIR%
echo.
- echo Cloning %CM_GIT_REPO_NAME% from %CM_GIT_URL%
+ echo Cloning %MLC_GIT_REPO_NAME% from %MLC_GIT_URL%
echo.
- echo "%CM_GIT_CLONE_CMD%"
+ echo "%MLC_GIT_CLONE_CMD%"
echo.
- %CM_GIT_CLONE_CMD%
+ %MLC_GIT_CLONE_CMD%
IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL!
cd "%folder%"
- if not "%CM_GIT_SHA%" == "" (
+ if not "%MLC_GIT_SHA%" == "" (
echo.
echo.
- git checkout "%CM_GIT_CHECKOUT%"
+ git checkout "%MLC_GIT_CHECKOUT%"
IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL!
)
@@ -46,8 +46,8 @@ if not exist "%CM_TMP_GIT_PATH%" (
cd "%folder%"
)
-if not "%CM_GIT_SUBMODULES%" == "" (
- for /F %%s in ("%CM_GIT_SUBMODULES%") do (
+if not "%MLC_GIT_SUBMODULES%" == "" (
+ for /F %%s in ("%MLC_GIT_SUBMODULES%") do (
echo.
echo Initializing submodule %%s
git submodule update --init %%s
@@ -55,8 +55,8 @@ if not "%CM_GIT_SUBMODULES%" == "" (
)
)
-if "%CM_GIT_PATCH%" == "yes" (
- for %%x in (%CM_GIT_PATCH_FILEPATHS%) do (
+if "%MLC_GIT_PATCH%" == "yes" (
+ for %%x in (%MLC_GIT_PATCH_FILEPATHS%) do (
echo.
echo Applying patch %%x ...
git apply %%x
diff --git a/script/get-git-repo/run.sh b/script/get-git-repo/run.sh
index 2a7b0b51c..0e0c19324 100644
--- a/script/get-git-repo/run.sh
+++ b/script/get-git-repo/run.sh
@@ -2,53 +2,53 @@
CUR_DIR=$PWD
echo "$CUR_DIR"
-SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH}
+SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH}
-folder=${CM_GIT_CHECKOUT_FOLDER}
-if [ ! -e "${CM_TMP_GIT_PATH}" ]; then
+folder=${MLC_GIT_CHECKOUT_FOLDER}
+if [ ! -e "${MLC_TMP_GIT_PATH}" ]; then
cmd="rm -rf ${folder}"
echo $cmd
eval $cmd
echo "******************************************************"
echo "Current directory: ${CUR_DIR}"
echo ""
- echo "Cloning ${CM_GIT_REPO_NAME} from ${CM_GIT_URL}"
+ echo "Cloning ${MLC_GIT_REPO_NAME} from ${MLC_GIT_URL}"
echo ""
- echo "${CM_GIT_CLONE_CMD}";
+ echo "${MLC_GIT_CLONE_CMD}";
echo ""
- ${CM_GIT_CLONE_CMD}
+ ${MLC_GIT_CLONE_CMD}
rcode=$?
if [ ! $rcode -eq 0 ]; then #try once more
rm -rf $folder
- ${CM_GIT_CLONE_CMD}
+ ${MLC_GIT_CLONE_CMD}
test $? -eq 0 || exit $?
fi
cd ${folder}
- if [ ! -z ${CM_GIT_SHA} ]; then
+ if [ ! -z ${MLC_GIT_SHA} ]; then
echo ""
- cmd="git checkout -b ${CM_GIT_SHA} ${CM_GIT_SHA}"
+ cmd="git checkout -b ${MLC_GIT_SHA} ${MLC_GIT_SHA}"
echo "$cmd"
eval "$cmd"
test $? -eq 0 || exit $?
- elif [ ! -z ${CM_GIT_CHECKOUT_TAG} ]; then
+ elif [ ! -z ${MLC_GIT_CHECKOUT_TAG} ]; then
echo ""
cmd="git fetch --all --tags"
echo "$cmd"
eval "$cmd"
- cmd="git checkout tags/${CM_GIT_CHECKOUT_TAG} -b ${CM_GIT_CHECKOUT_TAG}"
+ cmd="git checkout tags/${MLC_GIT_CHECKOUT_TAG} -b ${MLC_GIT_CHECKOUT_TAG}"
echo "$cmd"
eval "$cmd"
test $? -eq 0 || exit $?
else
- cmd="git rev-parse HEAD >> ../tmp-cm-git-hash.out"
+ cmd="git rev-parse HEAD >> ../tmp-mlc-git-hash.out"
echo "$cmd"
eval "$cmd"
test $? -eq 0 || exit $?
@@ -58,13 +58,13 @@ else
cd ${folder}
fi
-if [ ! -z ${CM_GIT_PR_TO_APPLY} ]; then
+if [ ! -z ${MLC_GIT_PR_TO_APPLY} ]; then
echo ""
- echo "Fetching from ${CM_GIT_PR_TO_APPLY}"
- git fetch origin ${CM_GIT_PR_TO_APPLY}:tmp-apply
+ echo "Fetching from ${MLC_GIT_PR_TO_APPLY}"
+ git fetch origin ${MLC_GIT_PR_TO_APPLY}:tmp-apply
fi
-IFS=',' read -r -a cherrypicks <<< "${CM_GIT_CHERRYPICKS}"
+IFS=',' read -r -a cherrypicks <<< "${MLC_GIT_CHERRYPICKS}"
for cherrypick in "${cherrypicks[@]}"
do
echo ""
@@ -73,7 +73,7 @@ do
test $? -eq 0 || exit $?
done
-IFS=',' read -r -a submodules <<< "${CM_GIT_SUBMODULES}"
+IFS=',' read -r -a submodules <<< "${MLC_GIT_SUBMODULES}"
for submodule in "${submodules[@]}"
do
@@ -83,8 +83,8 @@ do
test $? -eq 0 || exit $?
done
-if [ ${CM_GIT_PATCH} == "yes" ]; then
- IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILEPATHS}
+if [ ${MLC_GIT_PATCH} == "yes" ]; then
+ IFS=', ' read -r -a patch_files <<< ${MLC_GIT_PATCH_FILEPATHS}
for patch_file in "${patch_files[@]}"
do
echo ""
diff --git a/script/get-github-cli/customize.py b/script/get-github-cli/customize.py
index bf6a19089..33b2c3cac 100644
--- a/script/get-github-cli/customize.py
+++ b/script/get-github-cli/customize.py
@@ -12,18 +12,18 @@ def preprocess(i):
file_name = 'gh.exe' if os_info['platform'] == 'windows' else 'gh'
- # Will check env['CM_TMP_PATH'] if comes from installation script
+ # Will check env['MLC_TMP_PATH'] if comes from installation script
r = i['automation'].find_artifact({'file_name': file_name,
'env': env,
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_GITHUBCLI_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_GITHUBCLI_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
if r['return'] > 0:
if r['return'] == 16:
- if env.get('CM_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes':
+ if env.get('MLC_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes':
return r
print(recursion_spaces + ' # {}'.format(r['error']))
@@ -47,7 +47,7 @@ def postprocess(i):
r = i['automation'].parse_version({'match_text': r'gh\s*version\s*([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_GITHUBCLI_VERSION',
+ 'env_key': 'MLC_GITHUBCLI_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
diff --git a/script/get-go/README-extra.md b/script/get-go/README-extra.md
index d1c4f9caa..327cee0a9 100644
--- a/script/get-go/README-extra.md
+++ b/script/get-go/README-extra.md
@@ -2,7 +2,7 @@
This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed GO tool on the system.
## Exported Variables
-* `CM_GO_BIN_WITH_PATH`
+* `MLC_GO_BIN_WITH_PATH`
* `+PATH`
## Supported and Tested OS
diff --git a/script/get-go/customize.py b/script/get-go/customize.py
index b3ccee3cc..95ff8630a 100644
--- a/script/get-go/customize.py
+++ b/script/get-go/customize.py
@@ -12,18 +12,18 @@ def preprocess(i):
file_name = 'go.exe' if os_info['platform'] == 'windows' else 'go'
env['FILE_NAME'] = file_name
- if 'CM_GO_BIN_WITH_PATH' not in env:
+ if 'MLC_GO_BIN_WITH_PATH' not in env:
r = i['automation'].find_artifact({'file_name': file_name,
'env': env,
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_GO_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_GO_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
if r['return'] > 0:
if r['return'] == 16:
- env['CM_REQUIRE_INSTALL'] = "yes"
+ env['MLC_REQUIRE_INSTALL'] = "yes"
return {'return': 0}
else:
return r
@@ -34,7 +34,7 @@ def preprocess(i):
def detect_version(i):
r = i['automation'].parse_version({'match_text': r'\s+go([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_GO_VERSION',
+ 'env_key': 'MLC_GO_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
@@ -54,11 +54,11 @@ def postprocess(i):
return r
version = r['version']
- found_file_path = env['CM_GO_BIN_WITH_PATH']
+ found_file_path = env['MLC_GO_BIN_WITH_PATH']
found_path = os.path.dirname(found_file_path)
- env['CM_GO_INSTALLED_PATH'] = found_path
+ env['MLC_GO_INSTALLED_PATH'] = found_path
- env['CM_GO_CACHE_TAGS'] = 'version-' + version
+ env['MLC_GO_CACHE_TAGS'] = 'version-' + version
return {'return': 0, 'version': version}
diff --git a/script/get-go/meta.yaml b/script/get-go/meta.yaml
index f7c5c89d2..8f73f7284 100644
--- a/script/get-go/meta.yaml
+++ b/script/get-go/meta.yaml
@@ -5,13 +5,13 @@ cache: true
category: Compiler automation
clean_files: []
env:
- CM_REQUIRE_INSTALL: 'no'
+ MLC_REQUIRE_INSTALL: 'no'
new_env_keys:
-- CM_GO_*
+- MLC_GO_*
- +PATH
prehook_deps:
- enable_if_env:
- CM_REQUIRE_INSTALL:
+ MLC_REQUIRE_INSTALL:
- 'yes'
reuse_version: true
tags: install,go
diff --git a/script/get-google-saxml/meta.yaml b/script/get-google-saxml/meta.yaml
index 2e2db0f88..015ddddcc 100644
--- a/script/get-google-saxml/meta.yaml
+++ b/script/get-google-saxml/meta.yaml
@@ -20,10 +20,10 @@ deps:
tags: get,python3
- env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_GOOGLE_SAXML_SRC
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_GOOGLE_SAXML_SRC
extra_cache_tags: google,saxsml,src
force_env_keys:
- - CM_GIT_CHECKOUT
+ - MLC_GIT_CHECKOUT
names:
- google-saxml-git-src
tags: get,git,_repo.https://github.com/google/saxml
@@ -33,11 +33,11 @@ deps:
- bazel
extra_cache_tags_from_env:
- - env: CM_PYTHON_CACHE_TAGS
+ - env: MLC_PYTHON_CACHE_TAGS
prefix: python-
new_env_keys:
-- CM_GOOGLE_SAXML*
+- MLC_GOOGLE_SAXML*
tags:
- get
diff --git a/script/get-google-test/customize.py b/script/get-google-test/customize.py
index a22c59349..cb1be8fe5 100644
--- a/script/get-google-test/customize.py
+++ b/script/get-google-test/customize.py
@@ -12,8 +12,8 @@ def preprocess(i):
automation = i['automation']
- env['CM_GIT_CHECKOUT'] = "v" + env['CM_VERSION']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ env['MLC_GIT_CHECKOUT'] = "v" + env['MLC_VERSION']
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
return {'return': 0}
@@ -27,8 +27,8 @@ def postprocess(i):
env['+LD_LIBRARY_PATH'] = []
gtest_install_path = os.path.join(os.getcwd(), "install")
- env['CM_GOOGLE_TEST_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH']
- env['CM_GOOGLE_TEST_INSTALL_PATH'] = gtest_install_path
+ env['MLC_GOOGLE_TEST_SRC_PATH'] = env['MLC_GIT_REPO_CHECKOUT_PATH']
+ env['MLC_GOOGLE_TEST_INSTALL_PATH'] = gtest_install_path
env['+C_INCLUDE_PATH'].append(os.path.join(gtest_install_path, "include"))
env['+LD_LIBRARY_PATH'].append(os.path.join(gtest_install_path, "lib"))
diff --git a/script/get-google-test/meta.yaml b/script/get-google-test/meta.yaml
index 68de9dbfd..18af534a1 100644
--- a/script/get-google-test/meta.yaml
+++ b/script/get-google-test/meta.yaml
@@ -14,8 +14,8 @@ deps:
input_description: {}
input_mapping: {}
new_env_keys:
-- CM_GOOGLE_TEST_SRC_PATH
-- CM_GOOGLE_TEST_INSTALL_PATH
+- MLC_GOOGLE_TEST_SRC_PATH
+- MLC_GOOGLE_TEST_INSTALL_PATH
- +C_INCLUDE_PATH
- +LD_LIBRARY_PATH
new_state_keys: []
@@ -24,7 +24,7 @@ posthook_deps: []
prehook_deps:
- extra_cache_tags: google-test,gtest
force_env_keys:
- - CM_GIT_*
+ - MLC_GIT_*
tags: get,git,repo,_repo.https://github.com/google/googletest.git
tags:
- get
diff --git a/script/get-google-test/run.sh b/script/get-google-test/run.sh
index c8a9a4425..eaf2eb367 100644
--- a/script/get-google-test/run.sh
+++ b/script/get-google-test/run.sh
@@ -1,19 +1,19 @@
#!/bin/bash
function cmake() {
-${CM_CMAKE_BIN_WITH_PATH} $@
+${MLC_CMAKE_BIN_WITH_PATH} $@
}
-export CC=${CM_C_COMPILER_WITH_PATH}
-export CXX=${CM_CXX_COMPILER_WITH_PATH}
+export CC=${MLC_C_COMPILER_WITH_PATH}
+export CXX=${MLC_CXX_COMPILER_WITH_PATH}
CUR=$PWD
mkdir -p install
INSTALL_DIR=$CUR/install
-cd ${CM_GIT_REPO_CHECKOUT_PATH}
+cd ${MLC_GIT_REPO_CHECKOUT_PATH}
mkdir build
cd build
-export MAKEFLAGS=-j${CM_MAKE_CORES}
+export MAKEFLAGS=-j${MLC_MAKE_CORES}
cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} ..
test $? -eq 0 || exit $?
diff --git a/script/get-huggingface-cli/customize.py b/script/get-huggingface-cli/customize.py
index 56fcb0761..03fc5b753 100644
--- a/script/get-huggingface-cli/customize.py
+++ b/script/get-huggingface-cli/customize.py
@@ -4,11 +4,11 @@
def preprocess(i):
env = i['env']
- if env.get('CM_HF_TOKEN', '') != '':
- env['CM_HF_LOGIN_CMD'] = f"""git config --global credential.helper store && huggingface-cli login --token {env['CM_HF_TOKEN']} --add-to-git-credential
+ if env.get('MLC_HF_TOKEN', '') != '':
+ env['MLC_HF_LOGIN_CMD'] = f"""git config --global credential.helper store && huggingface-cli login --token {env['MLC_HF_TOKEN']} --add-to-git-credential
"""
- elif str(env.get('CM_HF_DO_LOGIN')).lower() in ["yes", "1", "true"]:
- env['CM_HF_LOGIN_CMD'] = f"""git config --global credential.helper store && huggingface-cli login
+ elif str(env.get('MLC_HF_DO_LOGIN')).lower() in ["yes", "1", "true"]:
+ env['MLC_HF_LOGIN_CMD'] = f"""git config --global credential.helper store && huggingface-cli login
"""
return {'return': 0}
@@ -18,7 +18,7 @@ def postprocess(i):
r = i['automation'].parse_version({'match_text': r'huggingface_hub\s*version:\s*([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_GITHUBCLI_VERSION',
+ 'env_key': 'MLC_GITHUBCLI_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
diff --git a/script/get-huggingface-cli/meta.yaml b/script/get-huggingface-cli/meta.yaml
index 6643eb222..aad5f7aa1 100644
--- a/script/get-huggingface-cli/meta.yaml
+++ b/script/get-huggingface-cli/meta.yaml
@@ -14,7 +14,7 @@ tags:
- huggingface-cli
- cli
input_mapping:
- token: CM_HF_TOKEN
+ token: MLC_HF_TOKEN
uid: e9488a272f1d4160
deps:
- tags: get,generic-python-lib,_package.huggingface_hub[cli]
@@ -23,4 +23,4 @@ variations:
cache: true
force_cache: true
env:
- CM_HF_DO_LOGIN: yes
+ MLC_HF_DO_LOGIN: yes
diff --git a/script/get-huggingface-cli/run.bat b/script/get-huggingface-cli/run.bat
index 464afe5c7..97c90f089 100644
--- a/script/get-huggingface-cli/run.bat
+++ b/script/get-huggingface-cli/run.bat
@@ -1,8 +1,8 @@
@echo off
-REM Check if the environment variable CM_HF_LOGIN_CMD is defined and not empty
-IF DEFINED CM_HF_LOGIN_CMD (
- echo %CM_HF_LOGIN_CMD%
- call %CM_HF_LOGIN_CMD%
+REM Check if the environment variable MLC_HF_LOGIN_CMD is defined and not empty
+IF DEFINED MLC_HF_LOGIN_CMD (
+ echo %MLC_HF_LOGIN_CMD%
+ call %MLC_HF_LOGIN_CMD%
IF ERRORLEVEL 1 (
echo Command failed with error code %ERRORLEVEL%
exit /b %ERRORLEVEL%
diff --git a/script/get-huggingface-cli/run.sh b/script/get-huggingface-cli/run.sh
index 43d20f367..cb1d022ee 100644
--- a/script/get-huggingface-cli/run.sh
+++ b/script/get-huggingface-cli/run.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-if [[ -n ${CM_HF_LOGIN_CMD} ]]; then
- echo "${CM_HF_LOGIN_CMD}"
- eval ${CM_HF_LOGIN_CMD}
+if [[ -n ${MLC_HF_LOGIN_CMD} ]]; then
+ echo "${MLC_HF_LOGIN_CMD}"
+ eval ${MLC_HF_LOGIN_CMD}
test $? -eq 0 || exit $?
fi
huggingface-cli version > tmp-ver.out
diff --git a/script/get-ipol-src/customize.py b/script/get-ipol-src/customize.py
index 9291872cf..4de9d4f9c 100644
--- a/script/get-ipol-src/customize.py
+++ b/script/get-ipol-src/customize.py
@@ -16,15 +16,15 @@ def preprocess(i):
path = os.getcwd()
- url = env['CM_IPOL_SRC_URL']
+ url = env['MLC_IPOL_SRC_URL']
- year = env.get('CM_IPOL_YEAR', '')
- number = env.get('CM_IPOL_NUMBER', '')
+ year = env.get('MLC_IPOL_YEAR', '')
+ number = env.get('MLC_IPOL_NUMBER', '')
url = url.replace(
- '{{CM_IPOL_YEAR}}',
+ '{{MLC_IPOL_YEAR}}',
year).replace(
- '{{CM_IPOL_NUMBER}}',
+ '{{MLC_IPOL_NUMBER}}',
number)
print('Downloading from {}'.format(url))
@@ -54,7 +54,7 @@ def preprocess(i):
subdir = ff[0]
- env['CM_IPOL_PATH'] = os.path.join(path, subdir)
+ env['MLC_IPOL_PATH'] = os.path.join(path, subdir)
# Applying patch
cmd = 'patch -p0 < {}'.format(os.path.join(script_path,
diff --git a/script/get-ipol-src/meta.yaml b/script/get-ipol-src/meta.yaml
index dd6b6ca0d..ac64d8971 100644
--- a/script/get-ipol-src/meta.yaml
+++ b/script/get-ipol-src/meta.yaml
@@ -4,22 +4,22 @@ automation_uid: 5b4e0237da074764
cache: true
category: Reproducibility and artifact evaluation
env:
- CM_IPOL_NUMBER: '439'
- CM_IPOL_SRC_URL: http://www.ipol.im/pub/art/{{CM_IPOL_YEAR}}/{{CM_IPOL_NUMBER}}/{{CM_IPOL_NUMBER}}-master.zip
- CM_IPOL_YEAR: '2022'
+ MLC_IPOL_NUMBER: '439'
+ MLC_IPOL_SRC_URL: http://www.ipol.im/pub/art/{{MLC_IPOL_YEAR}}/{{MLC_IPOL_NUMBER}}/{{MLC_IPOL_NUMBER}}-master.zip
+ MLC_IPOL_YEAR: '2022'
extra_cache_tags_from_env:
-- env: CM_IPOL_NUMBER
+- env: MLC_IPOL_NUMBER
prefix: number-
-- env: CM_IPOL_YEAR
+- env: MLC_IPOL_YEAR
prefix: year-
input_description:
number: IPOL publication number
year: IPOL publication year
input_mapping:
- number: CM_IPOL_NUMBER
- year: CM_IPOL_YEAR
+ number: MLC_IPOL_NUMBER
+ year: MLC_IPOL_YEAR
new_env_keys:
-- CM_IPOL_*
+- MLC_IPOL_*
tags:
- get
- ipol
diff --git a/script/get-java/customize.py b/script/get-java/customize.py
index b82010d8d..199d1b23d 100644
--- a/script/get-java/customize.py
+++ b/script/get-java/customize.py
@@ -21,9 +21,9 @@ def preprocess(i):
meta = i['meta']
found = False
- install = env.get('CM_JAVA_PREBUILT_INSTALL', '') in ['on', 'True', True]
+ install = env.get('MLC_JAVA_PREBUILT_INSTALL', '') in ['on', 'True', True]
- env_path_key = 'CM_JAVA_BIN_WITH_PATH'
+ env_path_key = 'MLC_JAVA_BIN_WITH_PATH'
# If not force install, search for artifact
if not install:
@@ -45,27 +45,27 @@ def preprocess(i):
if not found or install:
if os_info['platform'] == 'windows':
- env['CM_JAVA_PREBUILT_HOST_OS'] = 'windows'
- env['CM_JAVA_PREBUILT_EXT'] = '.zip'
+ env['MLC_JAVA_PREBUILT_HOST_OS'] = 'windows'
+ env['MLC_JAVA_PREBUILT_EXT'] = '.zip'
else:
- env['CM_JAVA_PREBUILT_HOST_OS'] = 'linux'
- env['CM_JAVA_PREBUILT_EXT'] = '.tar.gz'
+ env['MLC_JAVA_PREBUILT_HOST_OS'] = 'linux'
+ env['MLC_JAVA_PREBUILT_EXT'] = '.tar.gz'
- url = env['CM_JAVA_PREBUILT_URL']
- filename = env['CM_JAVA_PREBUILT_FILENAME']
+ url = env['MLC_JAVA_PREBUILT_URL']
+ filename = env['MLC_JAVA_PREBUILT_FILENAME']
- java_prebuilt_version = env['CM_JAVA_PREBUILT_VERSION']
- java_prebuilt_build = env['CM_JAVA_PREBUILT_BUILD']
+ java_prebuilt_version = env['MLC_JAVA_PREBUILT_VERSION']
+ java_prebuilt_build = env['MLC_JAVA_PREBUILT_BUILD']
- for key in ['CM_JAVA_PREBUILT_VERSION',
- 'CM_JAVA_PREBUILT_BUILD',
- 'CM_JAVA_PREBUILT_HOST_OS',
- 'CM_JAVA_PREBUILT_EXT']:
+ for key in ['MLC_JAVA_PREBUILT_VERSION',
+ 'MLC_JAVA_PREBUILT_BUILD',
+ 'MLC_JAVA_PREBUILT_HOST_OS',
+ 'MLC_JAVA_PREBUILT_EXT']:
url = url.replace('${' + key + '}', env[key])
filename = filename.replace('${' + key + '}', env[key])
- env['CM_JAVA_PREBUILT_URL'] = url
- env['CM_JAVA_PREBUILT_FILENAME'] = filename
+ env['MLC_JAVA_PREBUILT_URL'] = url
+ env['MLC_JAVA_PREBUILT_FILENAME'] = filename
print('')
print(
@@ -118,7 +118,7 @@ def detect_version(i):
r = i['automation'].parse_version({'match_text': r'\s*"(.*?)"',
'group_number': 1,
- 'env_key': 'CM_JAVA_VERSION',
+ 'env_key': 'MLC_JAVA_VERSION',
'which_env': i['env'],
'debug': True})
if r['return'] > 0:
@@ -138,13 +138,13 @@ def postprocess(i):
if r['return'] > 0:
return r
- version = env['CM_JAVA_VERSION']
- env['CM_JAVA_CACHE_TAGS'] = 'version-' + version
+ version = env['MLC_JAVA_VERSION']
+ env['MLC_JAVA_CACHE_TAGS'] = 'version-' + version
- found_file_path = env['CM_JAVA_BIN_WITH_PATH']
+ found_file_path = env['MLC_JAVA_BIN_WITH_PATH']
file_name = os.path.basename(found_file_path)
- env['CM_JAVA_BIN'] = file_name
+ env['MLC_JAVA_BIN'] = file_name
found_path = os.path.dirname(found_file_path)
java_home_path = os.path.dirname(found_path)
diff --git a/script/get-java/install-prebuilt.bat b/script/get-java/install-prebuilt.bat
index 17b00e5ab..beada62d3 100644
--- a/script/get-java/install-prebuilt.bat
+++ b/script/get-java/install-prebuilt.bat
@@ -1,9 +1,9 @@
-del /Q %CM_JAVA_PREBUILT_FILENAME%.zip
+del /Q %MLC_JAVA_PREBUILT_FILENAME%.zip
-wget --no-check-certificate %CM_JAVA_PREBUILT_URL%%CM_JAVA_PREBUILT_FILENAME%.zip
+wget --no-check-certificate %MLC_JAVA_PREBUILT_URL%%MLC_JAVA_PREBUILT_FILENAME%.zip
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
-unzip %CM_JAVA_PREBUILT_FILENAME%.zip
+unzip %MLC_JAVA_PREBUILT_FILENAME%.zip
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
-del /Q %CM_JAVA_PREBUILT_FILENAME%.zip
+del /Q %MLC_JAVA_PREBUILT_FILENAME%.zip
diff --git a/script/get-java/install-prebuilt.sh b/script/get-java/install-prebuilt.sh
index 575d0467e..a037c60ef 100644
--- a/script/get-java/install-prebuilt.sh
+++ b/script/get-java/install-prebuilt.sh
@@ -1,15 +1,15 @@
#!/bin/bash
-rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar.gz
-rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar
+rm -f ${MLC_JAVA_PREBUILT_FILENAME}.tar.gz
+rm -f ${MLC_JAVA_PREBUILT_FILENAME}.tar
-wget --no-check-certificate ${CM_JAVA_PREBUILT_URL}${CM_JAVA_PREBUILT_FILENAME}.tar.gz
+wget --no-check-certificate ${MLC_JAVA_PREBUILT_URL}${MLC_JAVA_PREBUILT_FILENAME}.tar.gz
test $? -eq 0 || exit 1
-gzip -d ${CM_JAVA_PREBUILT_FILENAME}.tar.gz
+gzip -d ${MLC_JAVA_PREBUILT_FILENAME}.tar.gz
test $? -eq 0 || exit 1
-tar xvf ${CM_JAVA_PREBUILT_FILENAME}.tar
+tar xvf ${MLC_JAVA_PREBUILT_FILENAME}.tar
test $? -eq 0 || exit 1
-rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar
+rm -f ${MLC_JAVA_PREBUILT_FILENAME}.tar
diff --git a/script/get-java/meta.yaml b/script/get-java/meta.yaml
index 07facec86..434d34a00 100644
--- a/script/get-java/meta.yaml
+++ b/script/get-java/meta.yaml
@@ -4,16 +4,16 @@ automation_uid: 5b4e0237da074764
cache: true
category: Detection or installation of tools and artifacts
default_env:
- CM_JAVA_PREBUILT_BUILD: '36'
- CM_JAVA_PREBUILT_FILENAME: openjdk-${CM_JAVA_PREBUILT_VERSION}+${CM_JAVA_PREBUILT_BUILD}_${CM_JAVA_PREBUILT_HOST_OS}-x64_bin
- CM_JAVA_PREBUILT_URL: https://download.java.net/openjdk/jdk${CM_JAVA_PREBUILT_VERSION}/ri/
- CM_JAVA_PREBUILT_VERSION: '19'
+ MLC_JAVA_PREBUILT_BUILD: '36'
+ MLC_JAVA_PREBUILT_FILENAME: openjdk-${MLC_JAVA_PREBUILT_VERSION}+${MLC_JAVA_PREBUILT_BUILD}_${MLC_JAVA_PREBUILT_HOST_OS}-x64_bin
+ MLC_JAVA_PREBUILT_URL: https://download.java.net/openjdk/jdk${MLC_JAVA_PREBUILT_VERSION}/ri/
+ MLC_JAVA_PREBUILT_VERSION: '19'
deps:
- tags: detect,os
input_mapping:
- install: CM_JAVA_PREBUILT_INSTALL
+ install: MLC_JAVA_PREBUILT_INSTALL
new_env_keys:
-- CM_JAVA_*
+- MLC_JAVA_*
- JAVA_HOME
- +PATH
tags:
@@ -23,4 +23,4 @@ uid: 9399d0e785704f8c
variations:
install:
env:
- CM_JAVA_PREBUILT_INSTALL: 'on'
+ MLC_JAVA_PREBUILT_INSTALL: 'on'
diff --git a/script/get-java/run.bat b/script/get-java/run.bat
index 0a80aa34c..807efafe4 100644
--- a/script/get-java/run.bat
+++ b/script/get-java/run.bat
@@ -1,3 +1,3 @@
-"%CM_JAVA_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1
+"%MLC_JAVA_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-java/run.sh b/script/get-java/run.sh
index 566a2b569..ddb8cb04b 100644
--- a/script/get-java/run.sh
+++ b/script/get-java/run.sh
@@ -1,3 +1,3 @@
#!/bin/bash
-${CM_JAVA_BIN_WITH_PATH} -version &> tmp-ver.out
+${MLC_JAVA_BIN_WITH_PATH} -version &> tmp-ver.out
test $? -eq 0 || exit 1
diff --git a/script/get-javac/customize.py b/script/get-javac/customize.py
index 4a1aa03f9..b039c2483 100644
--- a/script/get-javac/customize.py
+++ b/script/get-javac/customize.py
@@ -21,9 +21,9 @@ def preprocess(i):
meta = i['meta']
found = False
- install = env.get('CM_JAVAC_PREBUILT_INSTALL', '') in ['on', 'True', True]
+ install = env.get('MLC_JAVAC_PREBUILT_INSTALL', '') in ['on', 'True', True]
- env_path_key = 'CM_JAVAC_BIN_WITH_PATH'
+ env_path_key = 'MLC_JAVAC_BIN_WITH_PATH'
# If not force install, search for artifact
if not install:
@@ -45,27 +45,27 @@ def preprocess(i):
if not found or install:
if os_info['platform'] == 'windows':
- env['CM_JAVAC_PREBUILT_HOST_OS'] = 'windows'
- env['CM_JAVAC_PREBUILT_EXT'] = '.zip'
+ env['MLC_JAVAC_PREBUILT_HOST_OS'] = 'windows'
+ env['MLC_JAVAC_PREBUILT_EXT'] = '.zip'
else:
- env['CM_JAVAC_PREBUILT_HOST_OS'] = 'linux'
- env['CM_JAVAC_PREBUILT_EXT'] = '.tar.gz'
+ env['MLC_JAVAC_PREBUILT_HOST_OS'] = 'linux'
+ env['MLC_JAVAC_PREBUILT_EXT'] = '.tar.gz'
- url = env['CM_JAVAC_PREBUILT_URL']
- filename = env['CM_JAVAC_PREBUILT_FILENAME']
+ url = env['MLC_JAVAC_PREBUILT_URL']
+ filename = env['MLC_JAVAC_PREBUILT_FILENAME']
- javac_prebuilt_version = env['CM_JAVAC_PREBUILT_VERSION']
- javac_prebuilt_build = env['CM_JAVAC_PREBUILT_BUILD']
+ javac_prebuilt_version = env['MLC_JAVAC_PREBUILT_VERSION']
+ javac_prebuilt_build = env['MLC_JAVAC_PREBUILT_BUILD']
- for key in ['CM_JAVAC_PREBUILT_VERSION',
- 'CM_JAVAC_PREBUILT_BUILD',
- 'CM_JAVAC_PREBUILT_HOST_OS',
- 'CM_JAVAC_PREBUILT_EXT']:
+ for key in ['MLC_JAVAC_PREBUILT_VERSION',
+ 'MLC_JAVAC_PREBUILT_BUILD',
+ 'MLC_JAVAC_PREBUILT_HOST_OS',
+ 'MLC_JAVAC_PREBUILT_EXT']:
url = url.replace('${' + key + '}', env[key])
filename = filename.replace('${' + key + '}', env[key])
- env['CM_JAVAC_PREBUILT_URL'] = url
- env['CM_JAVAC_PREBUILT_FILENAME'] = filename
+ env['MLC_JAVAC_PREBUILT_URL'] = url
+ env['MLC_JAVAC_PREBUILT_FILENAME'] = filename
print('')
print(
@@ -118,7 +118,7 @@ def detect_version(i):
r = i['automation'].parse_version({'match_text': r'javac\s*([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_JAVAC_VERSION',
+ 'env_key': 'MLC_JAVAC_VERSION',
'which_env': i['env'],
'debug': True})
if r['return'] > 0:
@@ -140,21 +140,21 @@ def postprocess(i):
if r['return'] > 0:
return r
- version = env['CM_JAVAC_VERSION']
- env['CM_JAVAC_CACHE_TAGS'] = 'version-' + version
+ version = env['MLC_JAVAC_VERSION']
+ env['MLC_JAVAC_CACHE_TAGS'] = 'version-' + version
- found_file_path = env['CM_JAVAC_BIN_WITH_PATH']
+ found_file_path = env['MLC_JAVAC_BIN_WITH_PATH']
file_name = os.path.basename(found_file_path)
file_path = os.path.dirname(found_file_path)
- env['CM_JAVAC_BIN'] = file_name
+ env['MLC_JAVAC_BIN'] = file_name
if os_info['platform'] == 'windows':
- env['CM_JAVA_BIN'] = 'java.exe'
+ env['MLC_JAVA_BIN'] = 'java.exe'
else:
- env['CM_JAVA_BIN'] = 'java'
+ env['MLC_JAVA_BIN'] = 'java'
- env['CM_JAVA_BIN_WITH_PATH'] = os.path.join(file_path, env['CM_JAVA_BIN'])
+ env['MLC_JAVA_BIN_WITH_PATH'] = os.path.join(file_path, env['MLC_JAVA_BIN'])
found_path = os.path.dirname(found_file_path)
javac_home_path = os.path.dirname(found_path)
diff --git a/script/get-javac/install-prebuilt.bat b/script/get-javac/install-prebuilt.bat
index 74b1c4812..fdec0a335 100644
--- a/script/get-javac/install-prebuilt.bat
+++ b/script/get-javac/install-prebuilt.bat
@@ -1,9 +1,9 @@
-del /Q %CM_JAVAC_PREBUILT_FILENAME%.zip
+del /Q %MLC_JAVAC_PREBUILT_FILENAME%.zip
-wget --no-check-certificate %CM_JAVAC_PREBUILT_URL%%CM_JAVAC_PREBUILT_FILENAME%.zip
+wget --no-check-certificate %MLC_JAVAC_PREBUILT_URL%%MLC_JAVAC_PREBUILT_FILENAME%.zip
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
-unzip %CM_JAVAC_PREBUILT_FILENAME%.zip
+unzip %MLC_JAVAC_PREBUILT_FILENAME%.zip
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
-del /Q %CM_JAVAC_PREBUILT_FILENAME%.zip
+del /Q %MLC_JAVAC_PREBUILT_FILENAME%.zip
diff --git a/script/get-javac/install-prebuilt.sh b/script/get-javac/install-prebuilt.sh
index eed1b8b01..96db5c87d 100644
--- a/script/get-javac/install-prebuilt.sh
+++ b/script/get-javac/install-prebuilt.sh
@@ -1,15 +1,15 @@
#!/bin/bash
-rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar.gz
-rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar
+rm -f ${MLC_JAVAC_PREBUILT_FILENAME}.tar.gz
+rm -f ${MLC_JAVAC_PREBUILT_FILENAME}.tar
-wget --no-check-certificate ${CM_JAVAC_PREBUILT_URL}${CM_JAVAC_PREBUILT_FILENAME}.tar.gz
+wget --no-check-certificate ${MLC_JAVAC_PREBUILT_URL}${MLC_JAVAC_PREBUILT_FILENAME}.tar.gz
test $? -eq 0 || exit 1
-gzip -d ${CM_JAVAC_PREBUILT_FILENAME}.tar.gz
+gzip -d ${MLC_JAVAC_PREBUILT_FILENAME}.tar.gz
test $? -eq 0 || exit 1
-tar xvf ${CM_JAVAC_PREBUILT_FILENAME}.tar
+tar xvf ${MLC_JAVAC_PREBUILT_FILENAME}.tar
test $? -eq 0 || exit 1
-rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar
+rm -f ${MLC_JAVAC_PREBUILT_FILENAME}.tar
diff --git a/script/get-javac/meta.yaml b/script/get-javac/meta.yaml
index 89ffaf779..30f20360d 100644
--- a/script/get-javac/meta.yaml
+++ b/script/get-javac/meta.yaml
@@ -4,17 +4,17 @@ automation_uid: 5b4e0237da074764
cache: true
category: Detection or installation of tools and artifacts
default_env:
- CM_JAVAC_PREBUILT_BUILD: '36'
- CM_JAVAC_PREBUILT_FILENAME: openjdk-${CM_JAVAC_PREBUILT_VERSION}+${CM_JAVAC_PREBUILT_BUILD}_${CM_JAVAC_PREBUILT_HOST_OS}-x64_bin
- CM_JAVAC_PREBUILT_URL: https://download.java.net/openjdk/jdk${CM_JAVAC_PREBUILT_VERSION}/ri/
- CM_JAVAC_PREBUILT_VERSION: '19'
+ MLC_JAVAC_PREBUILT_BUILD: '36'
+ MLC_JAVAC_PREBUILT_FILENAME: openjdk-${MLC_JAVAC_PREBUILT_VERSION}+${MLC_JAVAC_PREBUILT_BUILD}_${MLC_JAVAC_PREBUILT_HOST_OS}-x64_bin
+ MLC_JAVAC_PREBUILT_URL: https://download.java.net/openjdk/jdk${MLC_JAVAC_PREBUILT_VERSION}/ri/
+ MLC_JAVAC_PREBUILT_VERSION: '19'
deps:
- tags: detect,os
input_mapping:
- install: CM_JAVAC_PREBUILT_INSTALL
+ install: MLC_JAVAC_PREBUILT_INSTALL
new_env_keys:
-- CM_JAVAC_*
-- CM_JAVA_*
+- MLC_JAVAC_*
+- MLC_JAVA_*
- JAVA_HOME
- +PATH
tags:
@@ -24,4 +24,4 @@ uid: 509280c497b24226
variations:
install:
env:
- CM_JAVAC_PREBUILT_INSTALL: 'on'
+ MLC_JAVAC_PREBUILT_INSTALL: 'on'
diff --git a/script/get-javac/run.bat b/script/get-javac/run.bat
index 1919f559c..75b6bdcc8 100644
--- a/script/get-javac/run.bat
+++ b/script/get-javac/run.bat
@@ -1,3 +1,3 @@
-"%CM_JAVAC_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1
+"%MLC_JAVAC_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-javac/run.sh b/script/get-javac/run.sh
index 40f97218d..418a3230f 100644
--- a/script/get-javac/run.sh
+++ b/script/get-javac/run.sh
@@ -1,3 +1,3 @@
#!/bin/bash
-${CM_JAVAC_BIN_WITH_PATH} -version &> tmp-ver.out
+${MLC_JAVAC_BIN_WITH_PATH} -version &> tmp-ver.out
test $? -eq 0 || exit 1
diff --git a/script/get-lib-armnn/customize.py b/script/get-lib-armnn/customize.py
index c365543c7..263850444 100644
--- a/script/get-lib-armnn/customize.py
+++ b/script/get-lib-armnn/customize.py
@@ -8,16 +8,16 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']
- version = env['CM_LIB_ARMNN_VERSION']
- if env.get('CM_HOST_PLATFORM_FLAVOR', '') == 'x86_64':
+ version = env['MLC_LIB_ARMNN_VERSION']
+ if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == 'x86_64':
url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-x86_64.tar.gz"
- elif env.get('CM_HOST_PLATFORM_FLAVOR', '') == 'aarch64':
+ elif env.get('MLC_HOST_PLATFORM_FLAVOR', '') == 'aarch64':
url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-aarch64.tar.gz"
- env['CM_LIB_ARMNN_PREBUILT_BINARY_URL'] = url
- env['CM_LIB_ARMNN_EXTRACT_FILENAME'] = os.path.basename(url)
+ env['MLC_LIB_ARMNN_PREBUILT_BINARY_URL'] = url
+ env['MLC_LIB_ARMNN_EXTRACT_FILENAME'] = os.path.basename(url)
- env['CM_GIT_CHECKOUT'] = env['CM_TMP_GIT_BRANCH_NAME']
+ env['MLC_GIT_CHECKOUT'] = env['MLC_TMP_GIT_BRANCH_NAME']
return {'return': 0}
@@ -37,7 +37,7 @@ def postprocess(i):
env[key] = []
include_paths = []
- armnn_src_path = env['CM_GIT_CHECKOUT_PATH']
+ armnn_src_path = env['MLC_GIT_CHECKOUT_PATH']
include_paths.append(os.path.join(os.getcwd(), 'include'))
include_paths.append(os.path.join(armnn_src_path, 'include'))
include_paths.append(os.path.join(armnn_src_path, 'profiling'))
diff --git a/script/get-lib-armnn/meta.yaml b/script/get-lib-armnn/meta.yaml
index df9a42a4d..6c33e97ed 100644
--- a/script/get-lib-armnn/meta.yaml
+++ b/script/get-lib-armnn/meta.yaml
@@ -7,16 +7,16 @@ default_version: '23.11'
deps:
- tags: detect,os
env:
- CM_GIT_URL: https://github.com/ARM-software/armnn
+ MLC_GIT_URL: https://github.com/ARM-software/armnn
new_env_keys:
-- CM_LIB_ARMNN_VERSION
-- CM_LIB_DNNL_*
+- MLC_LIB_ARMNN_VERSION
+- MLC_LIB_DNNL_*
- +C_INCLUDE_PATH
- +CPLUS_INCLUDE_PATH
- +LD_LIBRARY_PATH
prehook_deps:
- force_env_keys:
- - CM_GIT_*
+ - MLC_GIT_*
tags: get,git,repo,_repo.https://github.com/ARM-software/armnn
tags:
- get
@@ -27,13 +27,13 @@ uid: 9603a2e90fd44587
versions:
'22.11':
env:
- CM_LIB_ARMNN_VERSION: v22.11
- CM_TMP_GIT_BRANCH_NAME: branches/armnn_22_11
+ MLC_LIB_ARMNN_VERSION: v22.11
+ MLC_TMP_GIT_BRANCH_NAME: branches/armnn_22_11
'23.05':
env:
- CM_LIB_ARMNN_VERSION: v23.05
- CM_TMP_GIT_BRANCH_NAME: branches/armnn_23_05
+ MLC_LIB_ARMNN_VERSION: v23.05
+ MLC_TMP_GIT_BRANCH_NAME: branches/armnn_23_05
'23.11':
env:
- CM_LIB_ARMNN_VERSION: v23.11
- CM_TMP_GIT_BRANCH_NAME: branches/armnn_23_11
+ MLC_LIB_ARMNN_VERSION: v23.11
+ MLC_TMP_GIT_BRANCH_NAME: branches/armnn_23_11
diff --git a/script/get-lib-armnn/run.sh b/script/get-lib-armnn/run.sh
index 4bb5d182a..1c4d20e9d 100644
--- a/script/get-lib-armnn/run.sh
+++ b/script/get-lib-armnn/run.sh
@@ -2,8 +2,8 @@
CUR_DIR=${PWD:-tmp}
-wget -nc ${CM_LIB_ARMNN_PREBUILT_BINARY_URL}
-tar -xvzf ${CM_LIB_ARMNN_EXTRACT_FILENAME}
+wget -nc ${MLC_LIB_ARMNN_PREBUILT_BINARY_URL}
+tar -xvzf ${MLC_LIB_ARMNN_EXTRACT_FILENAME}
echo "******************************************************"
echo "ArmNN prebuilt binary downloaded to ${CUR_DIR} ..."
diff --git a/script/get-lib-dnnl/customize.py b/script/get-lib-dnnl/customize.py
index 051595167..d02be486e 100644
--- a/script/get-lib-dnnl/customize.py
+++ b/script/get-lib-dnnl/customize.py
@@ -13,7 +13,7 @@ def preprocess(i):
def postprocess(i):
env = i['env']
- env['CM_LIB_DNNL_INSTALL_DIR'] = os.getcwd()
+ env['MLC_LIB_DNNL_INSTALL_DIR'] = os.getcwd()
for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH',
'+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']:
diff --git a/script/get-lib-dnnl/meta.yaml b/script/get-lib-dnnl/meta.yaml
index 2fdc8cb7c..c625565cd 100644
--- a/script/get-lib-dnnl/meta.yaml
+++ b/script/get-lib-dnnl/meta.yaml
@@ -8,13 +8,13 @@ deps:
- tags: detect,cpu
- tags: cmake,get-cmake
env:
- CM_DNNL_CLEAN_BUILD: 'yes'
- CM_GIT_URL: https://github.com/oneapi-src/oneDNN
+ MLC_DNNL_CLEAN_BUILD: 'yes'
+ MLC_GIT_URL: https://github.com/oneapi-src/oneDNN
DNNL_BUILD_EXAMPLES: 'OFF'
DNNL_BUILD_TESTS: 'OFF'
DNNL_CPU_RUNTIME: OMP
new_env_keys:
-- CM_LIB_DNNL_*
+- MLC_LIB_DNNL_*
- +C_INCLUDE_PATH
- +CPLUS_INCLUDE_PATH
- +LD_LIBRARY_PATH
@@ -27,7 +27,7 @@ uid: 1cd35a6a3b0b4530
versions:
2.2.4:
env:
- CM_GIT_CHECKOUT: v2.2.4
+ MLC_GIT_CHECKOUT: v2.2.4
dev:
env:
- CM_GIT_CHECKOUT: master
+ MLC_GIT_CHECKOUT: master
diff --git a/script/get-lib-dnnl/run.sh b/script/get-lib-dnnl/run.sh
index ca47ee3b9..eaf887199 100644
--- a/script/get-lib-dnnl/run.sh
+++ b/script/get-lib-dnnl/run.sh
@@ -2,7 +2,7 @@
CUR_DIR=${PWD:-tmp}
-git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} src
+git clone --recursive -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} src
test $? -eq 0 || exit 1
@@ -25,7 +25,7 @@ cmake .. \
if [ "${?}" != "0" ]; then exit 1; fi
echo "******************************************************"
-cmake --build . -j${CM_CPUINFO_CPUs}
+cmake --build . -j${MLC_CPUINFO_CPUs}
if [ "${?}" != "0" ]; then exit 1; fi
echo "******************************************************"
@@ -35,7 +35,7 @@ if [ "${?}" != "0" ]; then exit 1; fi
# Clean build directory (too large)
cd ${INSTALL_DIR}
-if [ "${CM_DNNL_CLEAN_BUILD}" != "no" ]; then
+if [ "${MLC_DNNL_CLEAN_BUILD}" != "no" ]; then
rm -rf build
fi
diff --git a/script/get-lib-protobuf/customize.py b/script/get-lib-protobuf/customize.py
index e8e6ea450..4219223dd 100644
--- a/script/get-lib-protobuf/customize.py
+++ b/script/get-lib-protobuf/customize.py
@@ -12,8 +12,8 @@ def preprocess(i):
automation = i['automation']
- env['CM_GIT_CHECKOUT'] = "v" + env['CM_VERSION']
- quiet = (env.get('CM_QUIET', False) == 'yes')
+ env['MLC_GIT_CHECKOUT'] = "v" + env['MLC_VERSION']
+ quiet = (env.get('MLC_QUIET', False) == 'yes')
return {'return': 0}
@@ -26,8 +26,8 @@ def postprocess(i):
env['+LD_LIBRARY_PATH'] = []
protobuf_install_path = os.path.join(os.getcwd(), "install")
- env['CM_GOOGLE_PROTOBUF_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH']
- env['CM_GOOGLE_PROTOBUF_INSTALL_PATH'] = protobuf_install_path
+ env['MLC_GOOGLE_PROTOBUF_SRC_PATH'] = env['MLC_GIT_REPO_CHECKOUT_PATH']
+ env['MLC_GOOGLE_PROTOBUF_INSTALL_PATH'] = protobuf_install_path
env['+C_INCLUDE_PATH'].append(
os.path.join(
protobuf_install_path,
diff --git a/script/get-lib-protobuf/meta.yaml b/script/get-lib-protobuf/meta.yaml
index 6f06409fd..551a31a0e 100644
--- a/script/get-lib-protobuf/meta.yaml
+++ b/script/get-lib-protobuf/meta.yaml
@@ -10,8 +10,8 @@ deps:
input_description: {}
input_mapping: {}
new_env_keys:
-- CM_GOOGLE_PROTOBUF_SRC_PATH
-- CM_GOOGLE_PROTOBUF_INSTALL_PATH
+- MLC_GOOGLE_PROTOBUF_SRC_PATH
+- MLC_GOOGLE_PROTOBUF_INSTALL_PATH
- +C_INCLUDE_PATH
- +CPLUS_INCLUDE_PATH
- +LD_LIBRARY_PATH
@@ -21,15 +21,15 @@ posthook_deps: []
prehook_deps:
- extra_cache_tags: lib,protobuf,src
force_env_keys:
- - CM_GIT_*
+ - MLC_GIT_*
tags: get,git,repo,_repo.https://github.com/google/protobuf.git
update_tags_from_env_with_prefix:
_branch.:
- - CM_TMP_GIT_CHECKOUT
+ - MLC_TMP_GIT_CHECKOUT
_repo.:
- - CM_TMP_GIT_URL
+ - MLC_TMP_GIT_URL
_tag.:
- - CM_GIT_CHECKOUT_TAG
+ - MLC_GIT_CHECKOUT_TAG
tags:
- get
- google-protobuf
@@ -41,8 +41,8 @@ uid: db45f1eb73934f91
variations:
branch.#:
env:
- CM_TMP_GIT_CHECKOUT: '#'
+ MLC_TMP_GIT_CHECKOUT: '#'
tag.#:
env:
- CM_GIT_CHECKOUT_TAG: '#'
+ MLC_GIT_CHECKOUT_TAG: '#'
versions: {}
diff --git a/script/get-lib-protobuf/run.sh b/script/get-lib-protobuf/run.sh
index 29c0267d1..a44b8f688 100644
--- a/script/get-lib-protobuf/run.sh
+++ b/script/get-lib-protobuf/run.sh
@@ -2,11 +2,11 @@
CUR=$PWD
mkdir -p install
INSTALL_DIR=$CUR/install
-cd ${CM_GIT_REPO_CHECKOUT_PATH}
+cd ${MLC_GIT_REPO_CHECKOUT_PATH}
rm -rf build
mkdir build
cd build
-export MAKEFLAGS=-j${CM_MAKE_CORES}
+export MAKEFLAGS=-j${MLC_MAKE_CORES}
cmake -Dprotobuf_BUILD_TESTS=OFF -DBUILD_SHARED_LIBS=ON -DCMAKE_CXX_STANDARD=14 -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} ../cmake
test $? -eq 0 || exit $?
CMD="make install"
diff --git a/script/get-lib-qaic-api/customize.py b/script/get-lib-qaic-api/customize.py
index 6c829ae86..d94c53325 100644
--- a/script/get-lib-qaic-api/customize.py
+++ b/script/get-lib-qaic-api/customize.py
@@ -9,7 +9,7 @@ def preprocess(i):
env = i['env']
- # env['CM_GIT_CHECKOUT'] = env['CM_TMP_GIT_BRANCH_NAME']
+ # env['MLC_GIT_CHECKOUT'] = env['MLC_TMP_GIT_BRANCH_NAME']
return {'return': 0}
@@ -28,16 +28,16 @@ def postprocess(i):
for key in paths:
env[key] = []
- include_paths = [env['CM_TMP_CURRENT_SCRIPT_PATH']]
+ include_paths = [env['MLC_TMP_CURRENT_SCRIPT_PATH']]
for inc_path in include_paths:
env['+C_INCLUDE_PATH'].append(inc_path)
env['+CPLUS_INCLUDE_PATH'].append(inc_path)
version = "master"
- env['CM_QAIC_API_SRC_FILE'] = os.path.join(
- env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.cpp")
- env['CM_QAIC_API_INC_FILE'] = os.path.join(
- env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.h")
+ env['MLC_QAIC_API_SRC_FILE'] = os.path.join(
+ env['MLC_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.cpp")
+ env['MLC_QAIC_API_INC_FILE'] = os.path.join(
+ env['MLC_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.h")
return {'return': 0}
diff --git a/script/get-lib-qaic-api/meta.yaml b/script/get-lib-qaic-api/meta.yaml
index aaf6688dd..65d8ebed3 100644
--- a/script/get-lib-qaic-api/meta.yaml
+++ b/script/get-lib-qaic-api/meta.yaml
@@ -8,8 +8,8 @@ deps:
- tags: detect,os
env: {}
new_env_keys:
-- CM_LIB_QAIC_*
-- CM_QAIC_API_*
+- MLC_LIB_QAIC_*
+- MLC_QAIC_API_*
- +C_INCLUDE_PATH
- +CPLUS_INCLUDE_PATH
- +LD_LIBRARY_PATH
@@ -24,4 +24,4 @@ uid: 1e253ae184e44f23
versions:
master:
env:
- CM_LIB_QAIC_VERSION: master
+ MLC_LIB_QAIC_VERSION: master
diff --git a/script/get-llvm/README-extra.md b/script/get-llvm/README-extra.md
index 8020e09ba..a57c16f5a 100644
--- a/script/get-llvm/README-extra.md
+++ b/script/get-llvm/README-extra.md
@@ -2,14 +2,14 @@
This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed llvm on the system and if not found calls the [install script for llvm](../script/install-llvm-prebuilt).
## Exported Variables
-* `CM_LLVM_CLANG_BIN`
-* `CM_LLVM_CLANG_BIN_WITH_PATH`
-* `CM_C_COMPILER_BIN`
-* `CM_C_COMPILER_WITH_PATH`
-* `CM_CXX_COMPILER_BIN`
-* `CM_CXX_COMPILER_WITH_PATH`
-* `CM_COMPILER_*`
-* `CM_LINKER_*`
+* `MLC_LLVM_CLANG_BIN`
+* `MLC_LLVM_CLANG_BIN_WITH_PATH`
+* `MLC_C_COMPILER_BIN`
+* `MLC_C_COMPILER_WITH_PATH`
+* `MLC_CXX_COMPILER_BIN`
+* `MLC_CXX_COMPILER_WITH_PATH`
+* `MLC_COMPILER_*`
+* `MLC_LINKER_*`
## Supported and Tested OS
1. Ubuntu 18.04, 20.04, 22.04
@@ -83,7 +83,7 @@ cm run script "app image corner-detection"
```bash
cm rm cache -f
-cm run script "get llvm" --version=13.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz
+cm run script "get llvm" --version=13.0.0 --env.MLC_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz
cm run script "app image corner-detection"
```
@@ -91,6 +91,6 @@ cm run script "app image corner-detection"
```bash
cm rm cache -f
-cm run script "get llvm" --version=12.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz
+cm run script "get llvm" --version=12.0.0 --env.MLC_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz
cm run script "app image corner-detection"
```
diff --git a/script/get-llvm/customize.py b/script/get-llvm/customize.py
index c8bb004d6..5e1eb0edf 100644
--- a/script/get-llvm/customize.py
+++ b/script/get-llvm/customize.py
@@ -14,18 +14,18 @@ def preprocess(i):
env['FILE_NAME_C'] = file_name_c
- if 'CM_LLVM_CLANG_BIN_WITH_PATH' not in env:
+ if 'MLC_LLVM_CLANG_BIN_WITH_PATH' not in env:
r = i['automation'].find_artifact({'file_name': file_name_c,
'env': env,
'os_info': os_info,
'default_path_env_key': 'PATH',
'detect_version': True,
- 'env_path_key': 'CM_LLVM_CLANG_BIN_WITH_PATH',
+ 'env_path_key': 'MLC_LLVM_CLANG_BIN_WITH_PATH',
'run_script_input': i['run_script_input'],
'recursion_spaces': recursion_spaces})
if r['return'] > 0:
if r['return'] == 16:
- env['CM_REQUIRE_INSTALL'] = "yes"
+ env['MLC_REQUIRE_INSTALL'] = "yes"
return {'return': 0}
else:
return r
@@ -37,7 +37,7 @@ def detect_version(i):
r = i['automation'].parse_version({'match_text': r'clang version\s*([\d.]+)',
'group_number': 1,
- 'env_key': 'CM_LLVM_CLANG_VERSION',
+ 'env_key': 'MLC_LLVM_CLANG_VERSION',
'which_env': i['env']})
if r['return'] > 0:
return r
@@ -56,42 +56,42 @@ def postprocess(i):
if r['return'] > 0:
return r
- version = env['CM_LLVM_CLANG_VERSION']
- env['CM_LLVM_CLANG_CACHE_TAGS'] = 'version-' + version
- env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-llvm'
- env['CM_COMPILER_FAMILY'] = 'LLVM'
- env['CM_COMPILER_VERSION'] = env['CM_LLVM_CLANG_VERSION']
+ version = env['MLC_LLVM_CLANG_VERSION']
+ env['MLC_LLVM_CLANG_CACHE_TAGS'] = 'version-' + version
+ env['MLC_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-llvm'
+ env['MLC_COMPILER_FAMILY'] = 'LLVM'
+ env['MLC_COMPILER_VERSION'] = env['MLC_LLVM_CLANG_VERSION']
- found_file_path = env['CM_LLVM_CLANG_BIN_WITH_PATH']
+ found_file_path = env['MLC_LLVM_CLANG_BIN_WITH_PATH']
found_path = os.path.dirname(found_file_path)
file_name_c = os.path.basename(found_file_path)
file_name_cpp = file_name_c.replace("clang", "clang++")
- env['CM_LLVM_CLANG_BIN'] = file_name_c
+ env['MLC_LLVM_CLANG_BIN'] = file_name_c
# General compiler for general program compilation
- env['CM_C_COMPILER_BIN'] = file_name_c
- env['CM_C_COMPILER_WITH_PATH'] = found_file_path
- env['CM_C_COMPILER_FLAG_OUTPUT'] = '-o '
- env['CM_C_COMPILER_FLAG_VERSION'] = '--version'
- env['CM_C_COMPILER_FLAG_INCLUDE'] = '-I'
-
- env['CM_CXX_COMPILER_BIN'] = file_name_cpp
- env['CM_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp)
- env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '-o '
- env['CM_CXX_COMPILER_FLAG_VERSION'] = '--version'
- env['CM_CXX_COMPILER_FLAG_INCLUDE'] = '-I'
-
- env['CM_COMPILER_FLAGS_FAST'] = "-O4"
+ env['MLC_C_COMPILER_BIN'] = file_name_c
+ env['MLC_C_COMPILER_WITH_PATH'] = found_file_path
+ env['MLC_C_COMPILER_FLAG_OUTPUT'] = '-o '
+ env['MLC_C_COMPILER_FLAG_VERSION'] = '--version'
+ env['MLC_C_COMPILER_FLAG_INCLUDE'] = '-I'
+
+ env['MLC_CXX_COMPILER_BIN'] = file_name_cpp
+ env['MLC_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp)
+ env['MLC_CXX_COMPILER_FLAG_OUTPUT'] = '-o '
+ env['MLC_CXX_COMPILER_FLAG_VERSION'] = '--version'
+ env['MLC_CXX_COMPILER_FLAG_INCLUDE'] = '-I'
+
+ env['MLC_COMPILER_FLAGS_FAST'] = "-O4"
# "-flto" - this flag is not always available (requires LLVMgold.so)
- env['CM_LINKER_FLAGS_FAST'] = "-O4"
- env['CM_COMPILER_FLAGS_DEBUG'] = "-O0"
- env['CM_LINKER_FLAGS_DEBUG'] = "-O0"
- env['CM_COMPILER_FLAGS_DEFAULT'] = "-O2"
- env['CM_LINKER_FLAGS_DEFAULT'] = "-O2"
+ env['MLC_LINKER_FLAGS_FAST'] = "-O4"
+ env['MLC_COMPILER_FLAGS_DEBUG'] = "-O0"
+ env['MLC_LINKER_FLAGS_DEBUG'] = "-O0"
+ env['MLC_COMPILER_FLAGS_DEFAULT'] = "-O2"
+ env['MLC_LINKER_FLAGS_DEFAULT'] = "-O2"
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH']
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_LLVM_CLANG_BIN_WITH_PATH']
return {'return': 0, 'version': version}
diff --git a/script/get-llvm/meta.yaml b/script/get-llvm/meta.yaml
index 49795a0ba..cd6242287 100644
--- a/script/get-llvm/meta.yaml
+++ b/script/get-llvm/meta.yaml
@@ -5,25 +5,25 @@ cache: true
category: Compiler automation
clean_files: []
env:
- CM_REQUIRE_INSTALL: 'no'
+ MLC_REQUIRE_INSTALL: 'no'
name: Detect or install LLVM compiler
new_env_keys:
-- CM_LLVM_*
-- CM_C_COMPILER_*
-- CM_CXX_COMPILER_*
-- CM_COMPILER_*
-- CM_LINKER_*
+- MLC_LLVM_*
+- MLC_C_COMPILER_*
+- MLC_CXX_COMPILER_*
+- MLC_COMPILER_*
+- MLC_LINKER_*
- + CFLAGS
- + CXXFLAGS
- + FFLAGS
- + LDFLAGS
-- +CM_HOST_OS_DEFAULT_INCLUDE_PATH
+- +MLC_HOST_OS_DEFAULT_INCLUDE_PATH
- +PATH
post_deps:
- tags: get,compiler-flags
prehook_deps:
- enable_if_env:
- CM_REQUIRE_INSTALL:
+ MLC_REQUIRE_INSTALL:
- 'yes'
names: llvm-install
reuse_version: true
diff --git a/script/get-llvm/run.bat b/script/get-llvm/run.bat
index 632b201da..829bfa2aa 100644
--- a/script/get-llvm/run.bat
+++ b/script/get-llvm/run.bat
@@ -1,3 +1,3 @@
-%CM_LLVM_CLANG_BIN_WITH_PATH% --version > tmp-ver.out
+%MLC_LLVM_CLANG_BIN_WITH_PATH% --version > tmp-ver.out
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-llvm/run.sh b/script/get-llvm/run.sh
index c24cbb1ad..cb1c45ece 100644
--- a/script/get-llvm/run.sh
+++ b/script/get-llvm/run.sh
@@ -1,4 +1,4 @@
#!/bin/bash
-clang_bin=${CM_LLVM_CLANG_BIN_WITH_PATH}
+clang_bin=${MLC_LLVM_CLANG_BIN_WITH_PATH}
${clang_bin} --version > tmp-ver.out
test $? -eq 0 || exit 1
diff --git a/script/get-microtvm/README-extra.md b/script/get-microtvm/README-extra.md
index 5e8876519..3a27d6e1a 100644
--- a/script/get-microtvm/README-extra.md
+++ b/script/get-microtvm/README-extra.md
@@ -2,4 +2,4 @@
This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) clones the git repository of [Microtvm](https://github.com/octoml/microtvm) and cache it in CM for reuse across other CM scripts.
## Exported Variables
-1. [CN_MICROTVM_SOURCE](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-microtvm/customize.py#L24): Location in CM cache where microtvm git repository is cloned.
+1. [CN_MICROTVM_SOURCE](https://github.com/mlcommons/ck/blob/master/mlc-mlops/script/get-microtvm/customize.py#L24): Location in CM cache where microtvm git repository is cloned.
diff --git a/script/get-microtvm/customize.py b/script/get-microtvm/customize.py
index 8572322f4..fbfa55b2f 100644
--- a/script/get-microtvm/customize.py
+++ b/script/get-microtvm/customize.py
@@ -10,10 +10,10 @@ def preprocess(i):
if os_info['platform'] == 'windows':
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']
- if 'CM_GIT_DEPTH' not in env:
- env['CM_GIT_DEPTH'] = ''
- if 'CM_GIT_RECURSE_SUBMODULES' not in env:
- env['CM_GIT_RECURSE_SUBMODULES'] = ''
+ if 'MLC_GIT_DEPTH' not in env:
+ env['MLC_GIT_DEPTH'] = ''
+ if 'MLC_GIT_RECURSE_SUBMODULES' not in env:
+ env['MLC_GIT_RECURSE_SUBMODULES'] = ''
return {'return': 0}
@@ -23,6 +23,6 @@ def postprocess(i):
env = i['env']
state = i['state']
- env['CM_MICROTVM_SOURCE'] = os.path.join(os.getcwd(), 'microtvm')
+ env['MLC_MICROTVM_SOURCE'] = os.path.join(os.getcwd(), 'microtvm')
return {'return': 0}
diff --git a/script/get-microtvm/meta.yaml b/script/get-microtvm/meta.yaml
index c47a88f31..9540bce1f 100644
--- a/script/get-microtvm/meta.yaml
+++ b/script/get-microtvm/meta.yaml
@@ -7,16 +7,16 @@ default_version: main
deps:
- tags: detect,os
env:
- CM_GIT_AUTH: 'yes'
- CM_GIT_DEPTH: ''
- CM_GIT_PATCH: 'no'
- CM_GIT_URL: https://github.com/mlcommons/tiny_results_v1.0
+ MLC_GIT_AUTH: 'yes'
+ MLC_GIT_DEPTH: ''
+ MLC_GIT_PATCH: 'no'
+ MLC_GIT_URL: https://github.com/mlcommons/tiny_results_v1.0
input_mapping:
- ssh: CM_GIT_SSH
+ ssh: MLC_GIT_SSH
local_env_keys:
-- CM_GIT_*
+- MLC_GIT_*
new_env_keys:
-- CM_MICROTVM_*
+- MLC_MICROTVM_*
tags:
- get
- src
@@ -27,12 +27,12 @@ uid: a9cad70972a140b9
variations:
full-history:
env:
- CM_GIT_DEPTH: --depth 10
+ MLC_GIT_DEPTH: --depth 10
short-history:
env:
- CM_GIT_DEPTH: --depth 10
+ MLC_GIT_DEPTH: --depth 10
versions:
custom: {}
main:
env:
- CM_GIT_CHECKOUT: main
+ MLC_GIT_CHECKOUT: main
diff --git a/script/get-microtvm/run.sh b/script/get-microtvm/run.sh
index 2bffb48d8..4dae49467 100644
--- a/script/get-microtvm/run.sh
+++ b/script/get-microtvm/run.sh
@@ -1,12 +1,12 @@
#!/bin/bash
CUR_DIR=$PWD
-SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH}
+SCRIPT_DIR=${MLC_TMP_CURRENT_SCRIPT_PATH}
echo "******************************************************"
-echo "Cloning microtvm from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..."
+echo "Cloning microtvm from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT} ${MLC_GIT_DEPTH} ${MLC_GIT_RECURSE_SUBMODULES}..."
if [ ! -d "microtvm" ]; then
- git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} microtvm
+ git clone ${MLC_GIT_RECURSE_SUBMODULES} -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} ${MLC_GIT_DEPTH} microtvm
if [ "${?}" != "0" ]; then exit 1; fi
fi
diff --git a/script/get-ml-model-3d-unet-kits19/customize.py b/script/get-ml-model-3d-unet-kits19/customize.py
index d24b386c9..df4c46806 100644
--- a/script/get-ml-model-3d-unet-kits19/customize.py
+++ b/script/get-ml-model-3d-unet-kits19/customize.py
@@ -12,14 +12,14 @@ def preprocess(i):
cm = automation.action_object
- path = os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH'])
+ path = os.path.dirname(env['MLC_ML_MODEL_FILE_WITH_PATH'])
- if env.get("CM_DAE_EXTRACT_DOWNLOADED", " ") != " ":
- env['CM_ML_MODEL_PATH'] = os.path.join(path, env['CM_ML_MODEL_FILE'])
- env['CM_ML_MODEL_FILE_WITH_PATH'] = env['CM_ML_MODEL_PATH']
+ if env.get("MLC_DAE_EXTRACT_DOWNLOADED", " ") != " ":
+ env['MLC_ML_MODEL_PATH'] = os.path.join(path, env['MLC_ML_MODEL_FILE'])
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_PATH']
else:
- env['CM_ML_MODEL_PATH'] = path
+ env['MLC_ML_MODEL_PATH'] = path
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH']
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_PATH']
return {'return': 0}
diff --git a/script/get-ml-model-3d-unet-kits19/meta.yaml b/script/get-ml-model-3d-unet-kits19/meta.yaml
index 658f306a7..532152570 100644
--- a/script/get-ml-model-3d-unet-kits19/meta.yaml
+++ b/script/get-ml-model-3d-unet-kits19/meta.yaml
@@ -4,14 +4,14 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML models
env:
- CM_ML_MODEL: 3d-unet-kits19
- CM_ML_MODEL_DATASET: kits19
- CM_ML_MODEL_RETRAINING: 'no'
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
+ MLC_ML_MODEL: 3d-unet-kits19
+ MLC_ML_MODEL_DATASET: kits19
+ MLC_ML_MODEL_RETRAINING: 'no'
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
new_env_keys:
-- CM_ML_MODEL_*
+- MLC_ML_MODEL_*
print_env_at_the_end:
- CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model
+ MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model
tags:
- get
- ml-model
@@ -24,85 +24,85 @@ variations:
fp32:
default: true
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: fp32
- CM_ML_MODEL_PRECISION: fp32
- CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32
+ MLC_ML_MODEL_INPUT_DATA_TYPES: fp32
+ MLC_ML_MODEL_PRECISION: fp32
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32
group: precision
onnx:
default: true
env:
- CM_ML_MODEL_FRAMEWORK: onnx
+ MLC_ML_MODEL_FRAMEWORK: onnx
group: framework
onnx,fp32:
deps:
- env:
- CM_DOWNLOAD_CHECKSUM: 82f0618fde78f9839e7c712274019b4a
- CM_DOWNLOAD_FILENAME: 3dunet_kits19_128x128x128_dynbatch.onnx
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH
- CM_DOWNLOAD_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128_dynbatch.onnx?download=1
+ MLC_DOWNLOAD_CHECKSUM: 82f0618fde78f9839e7c712274019b4a
+ MLC_DOWNLOAD_FILENAME: 3dunet_kits19_128x128x128_dynbatch.onnx
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH
+ MLC_DOWNLOAD_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128_dynbatch.onnx?download=1
extra_cache_tags: 3d-unet,medical-imaging
force-cache: true
tags: download,file,download-file,_wget
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
env:
- CM_ML_MODEL_ACCURACY: '0.86170'
- CM_ML_MODEL_FILE: 3dunet_kits19_128x128x128_dynbatch.onnx
+ MLC_ML_MODEL_ACCURACY: '0.86170'
+ MLC_ML_MODEL_FILE: 3dunet_kits19_128x128x128_dynbatch.onnx
pytorch:
env:
- CM_ML_MODEL_FRAMEWORK: pytorch
+ MLC_ML_MODEL_FRAMEWORK: pytorch
group: framework
pytorch,fp32:
deps:
- env:
- CM_DOWNLOAD_CHECKSUM: 2251109371f408c9f10a4320ffdcaef8
- CM_DOWNLOAD_FILENAME: 3dunet_kits19_pytorch.ptc
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH
- CM_DOWNLOAD_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch.ptc?download=1
+ MLC_DOWNLOAD_CHECKSUM: 2251109371f408c9f10a4320ffdcaef8
+ MLC_DOWNLOAD_FILENAME: 3dunet_kits19_pytorch.ptc
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH
+ MLC_DOWNLOAD_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch.ptc?download=1
extra_cache_tags: 3d-unet,medical-imaging
force-cache: true
tags: download,file,download-file,_wget
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
env:
- CM_ML_MODEL_ACCURACY: '0.86170'
- CM_ML_MODEL_FILE: 3dunet_kits19_pytorch.ptc
+ MLC_ML_MODEL_ACCURACY: '0.86170'
+ MLC_ML_MODEL_FILE: 3dunet_kits19_pytorch.ptc
pytorch,fp32,weights:
deps:
- env:
- CM_DAE_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH
- CM_DAE_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch_checkpoint.pth?download=1
- CM_DOWNLOAD_CHECKSUM: 09c696e3ec13d83c628498bcd831eb5b
- CM_DOWNLOAD_FILENAME: 3dunet_kits19_pytorch_checkpoint.pth
+ MLC_DAE_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH
+ MLC_DAE_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch_checkpoint.pth?download=1
+ MLC_DOWNLOAD_CHECKSUM: 09c696e3ec13d83c628498bcd831eb5b
+ MLC_DOWNLOAD_FILENAME: 3dunet_kits19_pytorch_checkpoint.pth
extra_cache_tags: 3d-unet,medical-imaging
force-cache: true
tags: download-and-extract,_wget,_extract
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
env:
- CM_ML_MODEL_ACCURACY: '0.86170'
- CM_ML_MODEL_FILE: 3dunet_kits19_pytorch_checkpoint.pth
+ MLC_ML_MODEL_ACCURACY: '0.86170'
+ MLC_ML_MODEL_FILE: 3dunet_kits19_pytorch_checkpoint.pth
tensorflow:
alias: tf
tf:
env:
- CM_ML_MODEL_FRAMEWORK: tensorflow
+ MLC_ML_MODEL_FRAMEWORK: tensorflow
group: framework
tf,fp32:
deps:
- env:
- CM_DAE_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH
- CM_DAE_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1
- CM_DOWNLOAD_CHECKSUM: 9497108bd0504ae8f85a764a807b76a9
- CM_DOWNLOAD_FILENAME: 3dunet_kits19_128x128x128.tf.zip
+ MLC_DAE_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH
+ MLC_DAE_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1
+ MLC_DOWNLOAD_CHECKSUM: 9497108bd0504ae8f85a764a807b76a9
+ MLC_DOWNLOAD_FILENAME: 3dunet_kits19_128x128x128.tf.zip
extra_cache_tags: 3d-unet,medical-imaging
force-cache: true
tags: download-and-extract,_wget,_extract
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
env:
- CM_ML_MODEL_ACCURACY: '0.86170'
- CM_ML_MODEL_FILE: 3dunet_kits19_128x128x128.tf
+ MLC_ML_MODEL_ACCURACY: '0.86170'
+ MLC_ML_MODEL_FILE: 3dunet_kits19_128x128x128.tf
weights:
env:
- CM_MODEL_WEIGHTS_FILE: 'yes'
+ MLC_MODEL_WEIGHTS_FILE: 'yes'
diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py
index 7d20952a2..1e3df4acf 100644
--- a/script/get-ml-model-abtf-ssd-pytorch/customize.py
+++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py
@@ -8,15 +8,15 @@ def preprocess(i):
env = i['env']
- if env.get('CM_ML_MODEL_LOCAL', '') == 'yes':
- ml_model = env.get('CM_ML_MODEL_FILENAME', '')
+ if env.get('MLC_ML_MODEL_LOCAL', '') == 'yes':
+ ml_model = env.get('MLC_ML_MODEL_FILENAME', '')
if ml_model == '':
return {'return': 1, 'error': '_local.{model name.pth} is not specified'}
if not os.path.isabs(ml_model):
ml_model = os.path.join(
env.get(
- 'CM_TMP_CURRENT_PATH',
+ 'MLC_TMP_CURRENT_PATH',
''),
ml_model)
@@ -24,7 +24,7 @@ def preprocess(i):
return {'return': 1,
'error': 'ML model {} is not found'.format(ml_model)}
- env['CM_ML_MODEL_FILE_WITH_PATH'] = ml_model
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model
return {'return': 0}
@@ -33,17 +33,17 @@ def postprocess(i):
env = i['env']
- if env.get('CM_ML_MODEL_FILE_WITH_PATH', '') == '':
- env['CM_ML_MODEL_FILE_WITH_PATH'] = 'model-weights-skipped'
+ if env.get('MLC_ML_MODEL_FILE_WITH_PATH', '') == '':
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = 'model-weights-skipped'
- env['CM_ML_MODEL_FILE'] = os.path.basename(
- env['CM_ML_MODEL_FILE_WITH_PATH'])
+ env['MLC_ML_MODEL_FILE'] = os.path.basename(
+ env['MLC_ML_MODEL_FILE_WITH_PATH'])
- if env.get('CM_ABTF_SSD_PYTORCH', '') == '':
- env['CM_ABTF_SSD_PYTORCH'] = 'model-code-skipped'
+ if env.get('MLC_ABTF_SSD_PYTORCH', '') == '':
+ env['MLC_ABTF_SSD_PYTORCH'] = 'model-code-skipped'
- env['CM_ML_MODEL_CODE_WITH_PATH'] = env['CM_ABTF_SSD_PYTORCH']
+ env['MLC_ML_MODEL_CODE_WITH_PATH'] = env['MLC_ABTF_SSD_PYTORCH']
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH']
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH']
return {'return': 0}
diff --git a/script/get-ml-model-abtf-ssd-pytorch/meta.yaml b/script/get-ml-model-abtf-ssd-pytorch/meta.yaml
index b346288d2..b9f70ebc3 100644
--- a/script/get-ml-model-abtf-ssd-pytorch/meta.yaml
+++ b/script/get-ml-model-abtf-ssd-pytorch/meta.yaml
@@ -21,13 +21,13 @@ tags:
input_mapping:
- model_code_git_url: CM_ABTF_MODEL_CODE_GIT_URL
- model_code_git_branch: CM_ABTF_MODEL_CODE_GIT_BRANCH
+ model_code_git_url: MLC_ABTF_MODEL_CODE_GIT_URL
+ model_code_git_branch: MLC_ABTF_MODEL_CODE_GIT_BRANCH
default_env:
- CM_ABTF_MODEL_CODE_GIT_URL: https://github.com/mlcommons/abtf-ssd-pytorch
- CM_ABTF_MODEL_CODE_GIT_BRANCH: cognata
+ MLC_ABTF_MODEL_CODE_GIT_URL: https://github.com/mlcommons/abtf-ssd-pytorch
+ MLC_ABTF_MODEL_CODE_GIT_BRANCH: cognata
deps:
@@ -39,122 +39,122 @@ deps:
- abtf-ssd-pytorch-git-repo
- abtf-ml-model-code-git-repo
skip_if_env:
- CM_SKIP_MODEL_CODE_DOWNLOAD:
+ MLC_SKIP_MODEL_CODE_DOWNLOAD:
- 'yes'
env:
- CM_GIT_AUTH: 'yes'
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_ABTF_SSD_PYTORCH
+ MLC_GIT_AUTH: 'yes'
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_ABTF_SSD_PYTORCH
extra_cache_tags: abtf,ssd,pytorch,ml-model,cmc
update_tags_from_env_with_prefix:
_repo.:
- - CM_ABTF_MODEL_CODE_GIT_URL
+ - MLC_ABTF_MODEL_CODE_GIT_URL
_branch.:
- - CM_ABTF_MODEL_CODE_GIT_BRANCH
+ - MLC_ABTF_MODEL_CODE_GIT_BRANCH
- tags: download,file
env:
- CM_DOWNLOAD_CHECKSUM: <<>>
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH
- CM_DOWNLOAD_FILENAME: <<>>
- CM_VERIFY_SSL: 'no'
+ MLC_DOWNLOAD_CHECKSUM: <<>>
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH
+ MLC_DOWNLOAD_FILENAME: <<>>
+ MLC_VERIFY_SSL: 'no'
force_cache: true
names:
- abtf-ml-model-weights
- abtf-ml-model-weights-download
skip_if_env:
- CM_SKIP_MODEL_WEIGHTS_DOWNLOAD:
+ MLC_SKIP_MODEL_WEIGHTS_DOWNLOAD:
- 'yes'
update_tags_from_env_with_prefix:
_url.:
- - CM_ML_MODEL_URL
+ - MLC_ML_MODEL_URL
new_env_keys:
-- CM_ML_MODEL_*
+- MLC_ML_MODEL_*
print_env_at_the_end:
- CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model weights
- CM_ML_MODEL_CODE_WITH_PATH: Path to the ML model code
+ MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model weights
+ MLC_ML_MODEL_CODE_WITH_PATH: Path to the ML model code
variations:
e01:
env:
- CM_ML_MODEL: abtf-ssd-pytorch
- CM_ML_MODEL_CHECKSUM: 31d177228308bbe43917c912b01c2d67
- CM_ML_MODEL_DATASET: coco
- CM_ML_MODEL_FILENAME: SSD_e1.pth
- CM_ML_MODEL_IMAGE_HEIGHT: '300'
- CM_ML_MODEL_IMAGE_WIDTH: '300'
- CM_ML_MODEL_URL: https://www.dropbox.com/scl/fi/7nqt5z8gplgeaveo933eo/SSD_e1.pth?rlkey=7lyb4qs2hzg491bfprwcuvx54&dl=0
+ MLC_ML_MODEL: abtf-ssd-pytorch
+ MLC_ML_MODEL_CHECKSUM: 31d177228308bbe43917c912b01c2d67
+ MLC_ML_MODEL_DATASET: coco
+ MLC_ML_MODEL_FILENAME: SSD_e1.pth
+ MLC_ML_MODEL_IMAGE_HEIGHT: '300'
+ MLC_ML_MODEL_IMAGE_WIDTH: '300'
+ MLC_ML_MODEL_URL: https://www.dropbox.com/scl/fi/7nqt5z8gplgeaveo933eo/SSD_e1.pth?rlkey=7lyb4qs2hzg491bfprwcuvx54&dl=0
group: model-weights
e65:
env:
- CM_ML_MODEL: abtf-ssd-pytorch
- CM_ML_MODEL_CHECKSUM: f769eb0321ac7fc1c16f982db6131d2f
- CM_ML_MODEL_DATASET: coco
- CM_ML_MODEL_FILENAME: SSD_e65.pth
- CM_ML_MODEL_IMAGE_HEIGHT: '300'
- CM_ML_MODEL_IMAGE_WIDTH: '300'
- CM_ML_MODEL_URL: https://www.dropbox.com/scl/fi/wkegl2qxvm8cefbqq00o3/SSD_e65.pth?rlkey=ez26jafjdcly665npl6pdqxl8&dl=0
+ MLC_ML_MODEL: abtf-ssd-pytorch
+ MLC_ML_MODEL_CHECKSUM: f769eb0321ac7fc1c16f982db6131d2f
+ MLC_ML_MODEL_DATASET: coco
+ MLC_ML_MODEL_FILENAME: SSD_e65.pth
+ MLC_ML_MODEL_IMAGE_HEIGHT: '300'
+ MLC_ML_MODEL_IMAGE_WIDTH: '300'
+ MLC_ML_MODEL_URL: https://www.dropbox.com/scl/fi/wkegl2qxvm8cefbqq00o3/SSD_e65.pth?rlkey=ez26jafjdcly665npl6pdqxl8&dl=0
group: model-weights
abtf-mvp:
env:
- CM_ML_MODEL: abtf-ssd-pytorch
- CM_ML_MODEL_CHECKSUM: 1ab66f523715f9564603626e94e59c8c
- CM_ML_MODEL_DATASET: cognata
- CM_ML_MODEL_FILENAME: baseline_8MP_ss_scales_all_ep60.pth
- CM_ML_MODEL_IMAGE_SIZE: '8M'
- CM_ML_MODEL_URL: https://www.dropbox.com/scl/fi/9un2i2169rgebui4xklnm/baseline_8MP_ss_scales_all_ep60.pth?rlkey=sez3dnjep4waa09s5uy4r3wmk&st=z859czgk&dl=0
+ MLC_ML_MODEL: abtf-ssd-pytorch
+ MLC_ML_MODEL_CHECKSUM: 1ab66f523715f9564603626e94e59c8c
+ MLC_ML_MODEL_DATASET: cognata
+ MLC_ML_MODEL_FILENAME: baseline_8MP_ss_scales_all_ep60.pth
+ MLC_ML_MODEL_IMAGE_SIZE: '8M'
+ MLC_ML_MODEL_URL: https://www.dropbox.com/scl/fi/9un2i2169rgebui4xklnm/baseline_8MP_ss_scales_all_ep60.pth?rlkey=sez3dnjep4waa09s5uy4r3wmk&st=z859czgk&dl=0
group: model-weights
abtf-poc:
default_variations:
download-tool: rclone
env:
- CM_ML_MODEL: abtf-ssd-pytorch
- CM_ML_MODEL_CHECKSUM: 26845c3b9573ce115ef29dca4ae5be14
- CM_ML_MODEL_DATASET: cognata
- CM_ML_MODEL_FILENAME: baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth
- CM_ML_MODEL_IMAGE_SIZE: '8M'
+ MLC_ML_MODEL: abtf-ssd-pytorch
+ MLC_ML_MODEL_CHECKSUM: 26845c3b9573ce115ef29dca4ae5be14
+ MLC_ML_MODEL_DATASET: cognata
+ MLC_ML_MODEL_FILENAME: baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth
+ MLC_ML_MODEL_IMAGE_SIZE: '8M'
group: model-weights
abtf-poc,gdrive:
env:
- CM_ML_MODEL_URL: https://drive.google.com/file/d/1kfJR_bs54KONprVd51kZu0PYmmh1wZZa/view
+ MLC_ML_MODEL_URL: https://drive.google.com/file/d/1kfJR_bs54KONprVd51kZu0PYmmh1wZZa/view
abtf-poc,rclone:
env:
- CM_RCLONE_COPY_USING: copyurl
- CM_ML_MODEL_URL: https://automotive.mlcommons-storage.org/SSD_ResNet50%2Fbaseline_8MP_ss_scales_fm1_5x5_all_ep60.pth
- CM_RCLONE_CONFIG_CMD: ''
+ MLC_RCLONE_COPY_USING: copyurl
+ MLC_ML_MODEL_URL: https://automotive.mlcommons-storage.org/SSD_ResNet50%2Fbaseline_8MP_ss_scales_fm1_5x5_all_ep60.pth
+ MLC_RCLONE_CONFIG_CMD: ''
local.#:
env:
- CM_ML_MODEL_FILENAME: '#'
- CM_ML_MODEL_LOCAL: 'yes'
- CM_SKIP_MODEL_WEIGHTS_DOWNLOAD: 'yes'
+ MLC_ML_MODEL_FILENAME: '#'
+ MLC_ML_MODEL_LOCAL: 'yes'
+ MLC_SKIP_MODEL_WEIGHTS_DOWNLOAD: 'yes'
group: model-weights
skip_weights:
default: true
env:
- CM_SKIP_MODEL_WEIGHTS_DOWNLOAD: 'yes'
+ MLC_SKIP_MODEL_WEIGHTS_DOWNLOAD: 'yes'
group: model-weights
skip_code:
env:
- CM_SKIP_MODEL_CODE_DOWNLOAD: 'yes'
+ MLC_SKIP_MODEL_CODE_DOWNLOAD: 'yes'
rclone:
group: download-tool
env:
- CM_RCLONE_COPY_USING: copyurl
+ MLC_RCLONE_COPY_USING: copyurl
adr:
abtf-ml-model-weights-download:
tags: _rclone
@@ -168,7 +168,7 @@ variations:
gdown:
group: download-tool
env:
- CM_DOWNLOAD_EXTRA_OPTIONS: " --fuzzy"
+ MLC_DOWNLOAD_EXTRA_OPTIONS: " --fuzzy"
adr:
abtf-ml-model-weights-download:
tags: _gdown
diff --git a/script/get-ml-model-bert-base-squad/meta.yaml b/script/get-ml-model-bert-base-squad/meta.yaml
index 477f5570a..11acdde02 100644
--- a/script/get-ml-model-bert-base-squad/meta.yaml
+++ b/script/get-ml-model-bert-base-squad/meta.yaml
@@ -4,29 +4,29 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML models
env:
- CM_ML_MODEL: BERT
- CM_ML_MODEL_DATASET: squad-1.1
- CM_ML_MODEL_MAX_SEQ_LENGTH: '384'
- CM_ML_MODEL_NAME: MLPERF BERT Base on SQuAD v1.1
- CM_TMP_ML_MODEL_REQUIRE_DOWNLOAD: 'no'
+ MLC_ML_MODEL: BERT
+ MLC_ML_MODEL_DATASET: squad-1.1
+ MLC_ML_MODEL_MAX_SEQ_LENGTH: '384'
+ MLC_ML_MODEL_NAME: MLPERF BERT Base on SQuAD v1.1
+ MLC_TMP_ML_MODEL_REQUIRE_DOWNLOAD: 'no'
new_env_keys:
-- CM_ML_MODEL*
+- MLC_ML_MODEL*
post_deps:
- tags: get,bert,squad,vocab
prehook_deps:
- enable_if_env:
- CM_TMP_ML_MODEL_REQUIRE_DOWNLOAD: 'yes'
+ MLC_TMP_ML_MODEL_REQUIRE_DOWNLOAD: 'yes'
env:
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH
- CM_EXTRACT_EXTRACTED_FILENAME: <<>>
- CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH
+ MLC_EXTRACT_EXTRACTED_FILENAME: <<>>
+ MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH
tags: download-and-extract
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
print_env_at_the_end:
- CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: Path to the BERT vocab file
- CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model
+ MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: Path to the BERT vocab file
+ MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model
tags:
- get
- ml-model
@@ -40,12 +40,12 @@ uid: b3b10b452ce24c5f
variations:
deepsparse:
env:
- CM_ML_MODEL_FRAMEWORK: deepsparse
- CM_ML_MODEL_INPUT_IDS_NAME: input_ids
- CM_ML_MODEL_INPUT_MASK_NAME: input_mask
- CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids
- CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits
- CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits
+ MLC_ML_MODEL_FRAMEWORK: deepsparse
+ MLC_ML_MODEL_INPUT_IDS_NAME: input_ids
+ MLC_ML_MODEL_INPUT_MASK_NAME: input_mask
+ MLC_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids
+ MLC_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits
+ MLC_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits
group: framework
deepsparse,int8:
deps:
@@ -53,16 +53,16 @@ variations:
- neural-magic-zoo-downloader
tags: get,ml-model,zoo,deepsparse,_bert-base-pruned95_obs_quant-none
env:
- CM_ML_MODEL_F1: '87.89'
- CM_ML_MODEL_FILE: model.onnx
- CM_PRUNING_PERCENTAGE: '95'
+ MLC_ML_MODEL_F1: '87.89'
+ MLC_ML_MODEL_FILE: model.onnx
+ MLC_PRUNING_PERCENTAGE: '95'
fp32:
default: true
env:
- CM_ML_MODEL_PRECISION: fp32
+ MLC_ML_MODEL_PRECISION: fp32
group: precision
int8:
env:
- CM_ML_MODEL_PRECISION: int8
- CM_ML_MODEL_QUANTIZED: 'yes'
+ MLC_ML_MODEL_PRECISION: int8
+ MLC_ML_MODEL_QUANTIZED: 'yes'
group: precision
diff --git a/script/get-ml-model-bert-large-squad/customize.py b/script/get-ml-model-bert-large-squad/customize.py
index 76eddaaed..3de4c51d5 100644
--- a/script/get-ml-model-bert-large-squad/customize.py
+++ b/script/get-ml-model-bert-large-squad/customize.py
@@ -7,14 +7,14 @@ def preprocess(i):
os_info = i['os_info']
env = i['env']
- if env.get('CM_ML_MODEL_BERT_PACKED', '') == 'yes':
+ if env.get('MLC_ML_MODEL_BERT_PACKED', '') == 'yes':
i['run_script_input']['script_name'] = "run-packed"
- env['CM_BERT_CONFIG_PATH'] = os.path.join(
- env['CM_MLPERF_INFERENCE_BERT_PATH'], "bert_config.json")
- env['CM_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.getcwd()
- env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(
+ env['MLC_BERT_CONFIG_PATH'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_BERT_PATH'], "bert_config.json")
+ env['MLC_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.getcwd()
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join(
os.getcwd(), "model.onnx")
- env['CM_ML_MODEL_BERT_PACKED_PATH'] = os.path.join(
+ env['MLC_ML_MODEL_BERT_PACKED_PATH'] = os.path.join(
os.getcwd(), "model.onnx")
return {'return': 0}
@@ -24,14 +24,14 @@ def postprocess(i):
env = i['env']
- env['CM_ML_MODEL_FILE'] = os.path.basename(
- env['CM_ML_MODEL_FILE_WITH_PATH'])
+ env['MLC_ML_MODEL_FILE'] = os.path.basename(
+ env['MLC_ML_MODEL_FILE_WITH_PATH'])
- if env.get('CM_ML_MODEL_PRECISION', '') == "fp32":
- env['CM_ML_MODEL_BERT_LARGE_FP32_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH']
- elif env.get('CM_ML_MODEL_PRECISION', '') == "int8":
- env['CM_ML_MODEL_BERT_LARGE_INT8_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH']
+ if env.get('MLC_ML_MODEL_PRECISION', '') == "fp32":
+ env['MLC_ML_MODEL_BERT_LARGE_FP32_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH']
+ elif env.get('MLC_ML_MODEL_PRECISION', '') == "int8":
+ env['MLC_ML_MODEL_BERT_LARGE_INT8_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH']
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH']
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH']
return {'return': 0}
diff --git a/script/get-ml-model-bert-large-squad/meta.yaml b/script/get-ml-model-bert-large-squad/meta.yaml
index e81819279..51bdd93d3 100644
--- a/script/get-ml-model-bert-large-squad/meta.yaml
+++ b/script/get-ml-model-bert-large-squad/meta.yaml
@@ -4,36 +4,36 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML models
env:
- CM_ML_MODEL: BERT
- CM_ML_MODEL_DATASET: squad-1.1
- CM_ML_MODEL_MAX_SEQ_LENGTH: '384'
- CM_ML_MODEL_NAME: MLPERF BERT Large on SQuAD v1.1
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>>
+ MLC_ML_MODEL: BERT
+ MLC_ML_MODEL_DATASET: squad-1.1
+ MLC_ML_MODEL_MAX_SEQ_LENGTH: '384'
+ MLC_ML_MODEL_NAME: MLPERF BERT Large on SQuAD v1.1
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>>
new_env_keys:
-- CM_ML_MODEL*
+- MLC_ML_MODEL*
post_deps:
- tags: get,dataset-aux,squad-vocab
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
prehook_deps:
- env:
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH
- CM_DOWNLOAD_URL1: <<>>
- CM_EXTRACT_EXTRACTED_FILENAME: <<>>
- CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH
+ MLC_DOWNLOAD_URL1: <<>>
+ MLC_EXTRACT_EXTRACTED_FILENAME: <<>>
+ MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH
extra_cache_tags: bert-large,ml-model
force_cache: true
skip_if_env:
- CM_ML_MODEL_BERT_PACKED:
+ MLC_ML_MODEL_BERT_PACKED:
- 'yes'
tags: download-and-extract
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
print_env_at_the_end:
- CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model
+ MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model
tags:
- get
- ml-model
@@ -51,73 +51,73 @@ variations:
group: download-source
custom-url.#:
env:
- CM_PACKAGE_URL: '#'
+ MLC_PACKAGE_URL: '#'
group: download-source
deepsparse:
default_variations:
download-source: github
env:
- CM_ML_MODEL_FRAMEWORK: deepsparse
- CM_ML_MODEL_INPUT_IDS_NAME: input_ids
- CM_ML_MODEL_INPUT_MASK_NAME: input_mask
- CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids
- CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits
- CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits
+ MLC_ML_MODEL_FRAMEWORK: deepsparse
+ MLC_ML_MODEL_INPUT_IDS_NAME: input_ids
+ MLC_ML_MODEL_INPUT_MASK_NAME: input_mask
+ MLC_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids
+ MLC_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits
+ MLC_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits
group: framework
deepsparse,int8:
env:
- CM_DAE_EXTRACT_DOWNLOADED: 'yes'
- CM_ML_MODEL_F1: '90.21282641816266'
- CM_ML_MODEL_FILE: oBERT-Large_95sparse_block4_qat.onnx
+ MLC_DAE_EXTRACT_DOWNLOADED: 'yes'
+ MLC_ML_MODEL_F1: '90.21282641816266'
+ MLC_ML_MODEL_FILE: oBERT-Large_95sparse_block4_qat.onnx
deepsparse,int8,github:
env:
- CM_PACKAGE_URL: https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz
+ MLC_PACKAGE_URL: https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz
fp32:
default: true
env:
- CM_ML_MODEL_PRECISION: fp32
+ MLC_ML_MODEL_PRECISION: fp32
group: precision
github:
group: download-source
int8:
env:
- CM_ML_MODEL_PRECISION: int8
- CM_ML_MODEL_QUANTIZED: 'yes'
+ MLC_ML_MODEL_PRECISION: int8
+ MLC_ML_MODEL_QUANTIZED: 'yes'
group: precision
onnx:
default: true
default_variations:
download-source: armi
env:
- CM_ML_MODEL_FRAMEWORK: onnx
- CM_ML_MODEL_INPUT_IDS_NAME: input_ids
- CM_ML_MODEL_INPUT_MASK_NAME: input_mask
- CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids
- CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits
- CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits
+ MLC_ML_MODEL_FRAMEWORK: onnx
+ MLC_ML_MODEL_INPUT_IDS_NAME: input_ids
+ MLC_ML_MODEL_INPUT_MASK_NAME: input_mask
+ MLC_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids
+ MLC_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits
+ MLC_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits
group: framework
onnx,fp32:
env:
- CM_DOWNLOAD_CHECKSUM: 819b25b19cd8e59080c10892689750ca
- CM_ML_MODEL_F1: '90.874'
+ MLC_DOWNLOAD_CHECKSUM: 819b25b19cd8e59080c10892689750ca
+ MLC_ML_MODEL_F1: '90.874'
onnx,fp32,armi:
env:
- CM_PACKAGE_URL: https://armi.in/files/model.onnx
- CM_PACKAGE_URL1: https://zenodo.org/record/3733910/files/model.onnx
+ MLC_PACKAGE_URL: https://armi.in/files/model.onnx
+ MLC_PACKAGE_URL1: https://zenodo.org/record/3733910/files/model.onnx
onnx,fp32,zenodo:
env:
- CM_PACKAGE_URL: https://zenodo.org/record/3733910/files/model.onnx
+ MLC_PACKAGE_URL: https://zenodo.org/record/3733910/files/model.onnx
onnx,int8:
env:
- CM_DOWNLOAD_CHECKSUM: 45f88ffb2915362242703c85c38ec2d4
- CM_ML_MODEL_F1: '90.067'
- CM_PACKAGE_URL: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx
+ MLC_DOWNLOAD_CHECKSUM: 45f88ffb2915362242703c85c38ec2d4
+ MLC_ML_MODEL_F1: '90.067'
+ MLC_PACKAGE_URL: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx
onnx,int8,amazon-s3:
env:
- CM_PACKAGE_URL: https://mlperf-public.s3.us-west-2.amazonaws.com/bert_large_v1_1_fake_quant.onnx
+ MLC_PACKAGE_URL: https://mlperf-public.s3.us-west-2.amazonaws.com/bert_large_v1_1_fake_quant.onnx
onnx,int8,zenodo:
env:
- CM_PACKAGE_URL: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx
+ MLC_PACKAGE_URL: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx
onnxruntime:
base:
- onnx
@@ -162,47 +162,47 @@ variations:
- inference-src
tags: get,mlperf,inference,src
env:
- CM_ML_MODEL_BERT_PACKED: 'yes'
+ MLC_ML_MODEL_BERT_PACKED: 'yes'
group: packing
new_env_keys:
- - CM_BERT_
+ - MLC_BERT_
prehook_deps:
- env:
- CM_DOWNLOAD_CHECKSUM: 3089b27c559906a868878741d992ade7
- CM_DOWNLOAD_FILENAME: model.ckpt-5474.data-00000-of-00001
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CHECKPOINT_DATA_PATH
- CM_DOWNLOAD_PATH: <<>>
+ MLC_DOWNLOAD_CHECKSUM: 3089b27c559906a868878741d992ade7
+ MLC_DOWNLOAD_FILENAME: model.ckpt-5474.data-00000-of-00001
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_CHECKPOINT_DATA_PATH
+ MLC_DOWNLOAD_PATH: <<>>
extra_cache_tags: bert,checkpoint,weights,bert-large
force_cache: true
tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.data-00000-of-00001
- env:
- CM_DOWNLOAD_CHECKSUM: d23d61572d9404da4dac3363b5bc735b
- CM_DOWNLOAD_FILENAME: model.ckpt-5474.index
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CHECKPOINT_INDEX_PATH
- CM_DOWNLOAD_PATH: <<>>
+ MLC_DOWNLOAD_CHECKSUM: d23d61572d9404da4dac3363b5bc735b
+ MLC_DOWNLOAD_FILENAME: model.ckpt-5474.index
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_CHECKPOINT_INDEX_PATH
+ MLC_DOWNLOAD_PATH: <<>>
extra_cache_tags: bert,checkpoint-index,bert-large
force_cache: true
tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.index
- env:
- CM_DOWNLOAD_CHECKSUM: 83e11e57eea14c9e9a246af74af40d66
- CM_DOWNLOAD_FILENAME: model.ckpt-5474.meta
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CHECKPOINT_META_PATH
- CM_DOWNLOAD_PATH: <<>>
+ MLC_DOWNLOAD_CHECKSUM: 83e11e57eea14c9e9a246af74af40d66
+ MLC_DOWNLOAD_FILENAME: model.ckpt-5474.meta
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_CHECKPOINT_META_PATH
+ MLC_DOWNLOAD_PATH: <<>>
extra_cache_tags: bert,checkpoint-meta,bert-large
force_cache: true
tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.meta
- env:
- CM_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e
- CM_DOWNLOAD_FILENAME: vocab.txt
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_VOCAB_PATH
- CM_DOWNLOAD_PATH: <<>>
+ MLC_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e
+ MLC_DOWNLOAD_FILENAME: vocab.txt
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_VOCAB_PATH
+ MLC_DOWNLOAD_PATH: <<>>
extra_cache_tags: bert,vocab,bert-large
force_cache: true
tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/vocab.txt
- env:
- CM_DOWNLOAD_CHECKSUM: 94c91ce422e8f36f9d98b4926e2ad688
- CM_DOWNLOAD_FILENAME: convert_model.py
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CONVERTER_CODE_PATH
+ MLC_DOWNLOAD_CHECKSUM: 94c91ce422e8f36f9d98b4926e2ad688
+ MLC_DOWNLOAD_FILENAME: convert_model.py
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_BERT_CONVERTER_CODE_PATH
extra_cache_tags: bert,checkpoint,converter,code,bert-large
force_cache: true
tags: download,file,_wget,_url.https://raw.githubusercontent.com/krai/axs2kilt/main/model_onnx_bert_large_packed_recipe/convert_model.py
@@ -210,35 +210,35 @@ variations:
default_variations:
download-source: armi
env:
- CM_ML_MODEL_FRAMEWORK: pytorch
- CM_ML_MODEL_INPUT_IDS_NAME: input_ids
- CM_ML_MODEL_INPUT_MASK_NAME: input_mask
- CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids
- CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits
- CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits
+ MLC_ML_MODEL_FRAMEWORK: pytorch
+ MLC_ML_MODEL_INPUT_IDS_NAME: input_ids
+ MLC_ML_MODEL_INPUT_MASK_NAME: input_mask
+ MLC_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids
+ MLC_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits
+ MLC_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits
group: framework
pytorch,fp32:
env:
- CM_DOWNLOAD_CHECKSUM: 00fbcbfaebfa20d87ac9885120a6e9b4
- CM_ML_MODEL_F1: '90.874'
+ MLC_DOWNLOAD_CHECKSUM: 00fbcbfaebfa20d87ac9885120a6e9b4
+ MLC_ML_MODEL_F1: '90.874'
pytorch,fp32,armi:
env:
- CM_PACKAGE_URL: https://armi.in/files/fp32/model.pytorch
- CM_PACKAGE_URL1: https://zenodo.org/record/3733896/files/model.pytorch
+ MLC_PACKAGE_URL: https://armi.in/files/fp32/model.pytorch
+ MLC_PACKAGE_URL1: https://zenodo.org/record/3733896/files/model.pytorch
pytorch,fp32,zenodo:
env:
- CM_PACKAGE_URL: https://zenodo.org/record/3733896/files/model.pytorch
+ MLC_PACKAGE_URL: https://zenodo.org/record/3733896/files/model.pytorch
pytorch,int8:
env:
- CM_DOWNLOAD_CHECKSUM: 0734c580cb53b4b56a3f400771ffcb7c
- CM_ML_MODEL_F1: '90.633'
+ MLC_DOWNLOAD_CHECKSUM: 0734c580cb53b4b56a3f400771ffcb7c
+ MLC_ML_MODEL_F1: '90.633'
pytorch,int8,armi:
env:
- CM_PACKAGE_URL: https://armi.in/files/int8/pytorch_model.bin
- CM_PACKAGE_URL1: https://zenodo.org/record/4792496/files/pytorch_model.bin
+ MLC_PACKAGE_URL: https://armi.in/files/int8/pytorch_model.bin
+ MLC_PACKAGE_URL1: https://zenodo.org/record/4792496/files/pytorch_model.bin
pytorch,int8,zenodo:
env:
- CM_PACKAGE_URL: https://zenodo.org/record/4792496/files/pytorch_model.bin
+ MLC_PACKAGE_URL: https://zenodo.org/record/4792496/files/pytorch_model.bin
tensorflow:
base:
- tf
@@ -246,24 +246,24 @@ variations:
default_variations:
download-source: zenodo
env:
- CM_ML_MODEL_FRAMEWORK: tf
- CM_ML_MODEL_INPUT_IDS_NAME: input_ids
- CM_ML_MODEL_INPUT_MASK_NAME: input_mask
- CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids
- CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits
- CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits
+ MLC_ML_MODEL_FRAMEWORK: tf
+ MLC_ML_MODEL_INPUT_IDS_NAME: input_ids
+ MLC_ML_MODEL_INPUT_MASK_NAME: input_mask
+ MLC_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids
+ MLC_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits
+ MLC_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits
group: framework
tf,fp32:
env:
- CM_DOWNLOAD_CHECKSUM: dd72de12e8226f25f0128a1a864b97ad
- CM_ML_MODEL_F1: '90.874'
+ MLC_DOWNLOAD_CHECKSUM: dd72de12e8226f25f0128a1a864b97ad
+ MLC_ML_MODEL_F1: '90.874'
tf,fp32,zenodo:
env:
- CM_PACKAGE_URL: https://zenodo.org/record/3939747/files/model.pb
+ MLC_PACKAGE_URL: https://zenodo.org/record/3939747/files/model.pb
unpacked:
default: true
env:
- CM_ML_MODEL_BERT_PACKED: 'no'
+ MLC_ML_MODEL_BERT_PACKED: 'no'
group: packing
zenodo:
group: download-source
diff --git a/script/get-ml-model-bert-large-squad/run-packed.sh b/script/get-ml-model-bert-large-squad/run-packed.sh
index 4c7b016c9..220d1fbc2 100644
--- a/script/get-ml-model-bert-large-squad/run-packed.sh
+++ b/script/get-ml-model-bert-large-squad/run-packed.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_BERT_CONVERTER_CODE_PATH} --src '${CM_BERT_CHECKPOINT_INDEX_PATH}/../model.ckpt-5474' --dest '$PWD/' --config_path '${CM_BERT_CONFIG_PATH}'"
+cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_BERT_CONVERTER_CODE_PATH} --src '${MLC_BERT_CHECKPOINT_INDEX_PATH}/../model.ckpt-5474' --dest '$PWD/' --config_path '${MLC_BERT_CONFIG_PATH}'"
echo $cmd
eval $cmd
test $? -eq 0 || exit $?
diff --git a/script/get-ml-model-dlrm-terabyte/meta.yaml b/script/get-ml-model-dlrm-terabyte/meta.yaml
index 4d5c93f1b..fed99aaa4 100644
--- a/script/get-ml-model-dlrm-terabyte/meta.yaml
+++ b/script/get-ml-model-dlrm-terabyte/meta.yaml
@@ -4,34 +4,34 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML models
env:
- CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH
- CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH
- CM_ML_MODEL: dlrm
- CM_ML_MODEL_DATASET: criteo-terabyte
- CM_ML_MODEL_RETRAINING: 'no'
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
+ MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH
+ MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_FILE_WITH_PATH
+ MLC_ML_MODEL: dlrm
+ MLC_ML_MODEL_DATASET: criteo-terabyte
+ MLC_ML_MODEL_RETRAINING: 'no'
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
input_mapping:
- dir: CM_DOWNLOAD_PATH
- download_path: CM_DOWNLOAD_PATH
- to: CM_DOWNLOAD_PATH
+ dir: MLC_DOWNLOAD_PATH
+ download_path: MLC_DOWNLOAD_PATH
+ to: MLC_DOWNLOAD_PATH
new_env_keys:
-- CM_ML_MODEL_*
+- MLC_ML_MODEL_*
prehook_deps:
- env:
- CM_DOWNLOAD_DOWNLOADED_FILENAME: <<>>
- CM_EXTRACT_EXTRACTED_FILENAME: <<>>
+ MLC_DOWNLOAD_DOWNLOADED_FILENAME: <<>>
+ MLC_EXTRACT_EXTRACTED_FILENAME: <<>>
extra_cache_tags: ml-model,dlrm,terabyte,raw,ml-model-dlrm
force_cache: true
names:
- dae
tags: download-and-extract
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env_with_prefix:
_url.:
- - CM_PACKAGE_URL
+ - MLC_PACKAGE_URL
print_env_at_the_end:
- CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model
+ MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model
tags:
- get
- ml-model
@@ -45,71 +45,71 @@ uid: 8fa7582c603a4db3
variations:
debug:
env:
- CM_ML_MODEL_DEBUG: 'yes'
+ MLC_ML_MODEL_DEBUG: 'yes'
fp32:
default: true
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: fp32
- CM_ML_MODEL_PRECISION: fp32
- CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32
+ MLC_ML_MODEL_INPUT_DATA_TYPES: fp32
+ MLC_ML_MODEL_PRECISION: fp32
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32
group: precision
onnx:
env:
- CM_ML_MODEL_FRAMEWORK: onnx
+ MLC_ML_MODEL_FRAMEWORK: onnx
group: framework
onnx,fp32:
env:
- CM_DOWNLOAD_CHECKSUM: 763b964eaffe5f86e92cdcb60c5dc0de
- CM_ML_MODEL_ACCURACY: '0.8025'
- CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000'
- CM_ML_MODEL_FILE: tb00_40M.onnx
- CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.onnx.tar
- CM_UNTAR: 'yes'
+ MLC_DOWNLOAD_CHECKSUM: 763b964eaffe5f86e92cdcb60c5dc0de
+ MLC_ML_MODEL_ACCURACY: '0.8025'
+ MLC_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000'
+ MLC_ML_MODEL_FILE: tb00_40M.onnx
+ MLC_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.onnx.tar
+ MLC_UNTAR: 'yes'
onnx,fp32,debug:
env:
- CM_DOWNLOAD_CHECKSUM: d11255cd9926cda9181a347861e4d263
- CM_ML_MODEL_ACCURACY: '0.8107'
- CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '10000000'
- CM_ML_MODEL_FILE: tb0875_10M.onnx
- CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.onnx.tar
- CM_UNTAR: 'yes'
+ MLC_DOWNLOAD_CHECKSUM: d11255cd9926cda9181a347861e4d263
+ MLC_ML_MODEL_ACCURACY: '0.8107'
+ MLC_ML_MODEL_DLRM_MAX_INDEX_RANGE: '10000000'
+ MLC_ML_MODEL_FILE: tb0875_10M.onnx
+ MLC_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.onnx.tar
+ MLC_UNTAR: 'yes'
pytorch:
default: true
env:
- CM_ML_MODEL_FRAMEWORK: pytorch
- CM_TMP_MODEL_ADDITIONAL_NAME: dlrm_terabyte.pytorch
+ MLC_ML_MODEL_FRAMEWORK: pytorch
+ MLC_TMP_MODEL_ADDITIONAL_NAME: dlrm_terabyte.pytorch
group: framework
pytorch,fp32:
env:
- CM_DOWNLOAD_CHECKSUM: 2d49a5288cddb37c3c64860a06d79bb9
- CM_ML_MODEL_ACCURACY: '0.8025'
- CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000'
- CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.pt
+ MLC_DOWNLOAD_CHECKSUM: 2d49a5288cddb37c3c64860a06d79bb9
+ MLC_ML_MODEL_ACCURACY: '0.8025'
+ MLC_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000'
+ MLC_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.pt
pytorch,fp32,debug:
env:
- CM_DOWNLOAD_CHECKSUM: b7cacffcf75f767faa9cb2af397723aa
- CM_ML_MODEL_ACCURACY: '0.8107'
- CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '10000000'
- CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.pt
+ MLC_DOWNLOAD_CHECKSUM: b7cacffcf75f767faa9cb2af397723aa
+ MLC_ML_MODEL_ACCURACY: '0.8107'
+ MLC_ML_MODEL_DLRM_MAX_INDEX_RANGE: '10000000'
+ MLC_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.pt
pytorch,fp32,weight_sharded:
default_variations:
download-tool: rclone
env:
- CM_DOWNLOAD_CHECKSUM: ''
- CM_ML_MODEL_ACCURACY: '0.8025'
- CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000'
- CM_ML_MODEL_FILE: model_weights
- CM_TMP_MODEL_ADDITIONAL_NAME: ''
+ MLC_DOWNLOAD_CHECKSUM: ''
+ MLC_ML_MODEL_ACCURACY: '0.8025'
+ MLC_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000'
+ MLC_ML_MODEL_FILE: model_weights
+ MLC_TMP_MODEL_ADDITIONAL_NAME: ''
pytorch,fp32,weight_sharded,rclone:
env:
- CM_PACKAGE_URL: mlc-inference:mlcommons-inference-wg-public/model_weights
- CM_RCLONE_CONFIG_NAME: mlc-inference
+ MLC_PACKAGE_URL: mlc-inference:mlcommons-inference-wg-public/model_weights
+ MLC_RCLONE_CONFIG_NAME: mlc-inference
pytorch,fp32,weight_sharded,wget:
env:
- CM_DAE_EXTRACT_DOWNLOADED: 'yes'
- CM_DOWNLOAD_FILENAME: download
- CM_EXTRACT_UNZIP: 'yes'
- CM_PACKAGE_URL: https://cloud.mlcommons.org/index.php/s/XzfSeLgW8FYfR3S/download
+ MLC_DAE_EXTRACT_DOWNLOADED: 'yes'
+ MLC_DOWNLOAD_FILENAME: download
+ MLC_EXTRACT_UNZIP: 'yes'
+ MLC_PACKAGE_URL: https://cloud.mlcommons.org/index.php/s/XzfSeLgW8FYfR3S/download
rclone:
ad:
dae:
@@ -118,7 +118,7 @@ variations:
weight_sharded:
default: true
env:
- CM_DLRM_MULTIHOT_MODEL: 'yes'
+ MLC_DLRM_MULTIHOT_MODEL: 'yes'
group: type
wget:
ad:
diff --git a/script/get-ml-model-dlrm-terabyte/run.sh b/script/get-ml-model-dlrm-terabyte/run.sh
index d2595b32f..2da188061 100644
--- a/script/get-ml-model-dlrm-terabyte/run.sh
+++ b/script/get-ml-model-dlrm-terabyte/run.sh
@@ -1,4 +1,4 @@
#/bin/bash
-if [[ ${CM_TMP_MODEL_ADDITIONAL_NAME} ]]; then
- ln -s ${CM_ML_MODEL_FILE} ${CM_TMP_MODEL_ADDITIONAL_NAME}
+if [[ ${MLC_TMP_MODEL_ADDITIONAL_NAME} ]]; then
+ ln -s ${MLC_ML_MODEL_FILE} ${MLC_TMP_MODEL_ADDITIONAL_NAME}
fi
diff --git a/script/get-ml-model-efficientnet-lite/customize.py b/script/get-ml-model-efficientnet-lite/customize.py
index 59f3c580e..6b8a8fd6b 100644
--- a/script/get-ml-model-efficientnet-lite/customize.py
+++ b/script/get-ml-model-efficientnet-lite/customize.py
@@ -14,8 +14,8 @@ def preprocess(i):
path = os.getcwd()
- url = env['CM_PACKAGE_URL']
- env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url
+ url = env['MLC_PACKAGE_URL']
+ env['MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url
print('Downloading from {}'.format(url))
@@ -27,30 +27,30 @@ def preprocess(i):
filename = r['filename']
- if env.get('CM_UNZIP') == "yes" or env.get('CM_UNTAR') == "yes":
- if env.get('CM_UNZIP') == "yes":
+ if env.get('MLC_UNZIP') == "yes" or env.get('MLC_UNTAR') == "yes":
+ if env.get('MLC_UNZIP') == "yes":
cmd = "unzip "
- elif env.get('CM_UNTAR') == "yes":
+ elif env.get('MLC_UNTAR') == "yes":
cmd = "tar -xvzf "
os.system(cmd + filename)
- filename = env['CM_ML_MODEL_FILE']
+ filename = env['MLC_ML_MODEL_FILE']
- extract_folder = env.get('CM_EXTRACT_FOLDER', '')
+ extract_folder = env.get('MLC_EXTRACT_FOLDER', '')
if extract_folder:
- env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join(
path, extract_folder, filename)
else:
- env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename)
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename)
else:
- env['CM_ML_MODEL_FILE'] = filename
- env['CM_ML_MODEL_FILE_WITH_PATH'] = r['path']
+ env['MLC_ML_MODEL_FILE'] = filename
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = r['path']
- env['CM_ML_MODEL_PATH'] = path
+ env['MLC_ML_MODEL_PATH'] = path
- if not os.path.exists(env['CM_ML_MODEL_FILE_WITH_PATH']):
+ if not os.path.exists(env['MLC_ML_MODEL_FILE_WITH_PATH']):
return {
- 'return': 1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"}
+ 'return': 1, 'error': f"Model file path {env['MLC_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['MLC_ML_MODEL_FILE']} in model meta is wrong"}
return {'return': 0}
diff --git a/script/get-ml-model-efficientnet-lite/meta.yaml b/script/get-ml-model-efficientnet-lite/meta.yaml
index e40dd196c..df1bbc519 100644
--- a/script/get-ml-model-efficientnet-lite/meta.yaml
+++ b/script/get-ml-model-efficientnet-lite/meta.yaml
@@ -4,34 +4,34 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML models
default_env:
- CM_ML_MODEL_INPUTS_DATA_TYPE: fp32
- CM_ML_MODEL_PRECISION: fp32
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32
+ MLC_ML_MODEL_PRECISION: fp32
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32
env:
- CM_EXTRACT_FOLDER: efficientnet-<<>>
- CM_ML_MODEL: efficientnet-lite
- CM_ML_MODEL_DATASET: imagenet2012-val
- CM_ML_MODEL_DATA_LAYOUT: NHWC
- CM_ML_MODEL_FILE: efficientnet-<<>>-<<>>.tflite
- CM_ML_MODEL_FULL_NAME: efficientnet-<<>>-<<>>
- CM_ML_MODEL_GIVEN_CHANNEL_MEANS: ''
- CM_ML_MODEL_INPUT_LAYER_NAME: images
- CM_ML_MODEL_INPUT_SHAPES: '\"input\": (BATCH_SIZE, 224, 224, 3)'
- CM_ML_MODEL_MOBILENET_NAME_SUFFIX: ''
- CM_ML_MODEL_NORMALIZE_DATA: 'yes'
- CM_ML_MODEL_OUTPUT_LAYER_NAME: Softmax
- CM_ML_MODEL_RETRAINING: 'no'
- CM_ML_MODEL_SUBTRACT_MEANS: '0'
- CM_ML_MODEL_WEIGHTS_ARE_CHECKPOINTS: 'yes'
- CM_ML_MODEL_WEIGHTS_FILE: model.ckpt.data-00000-of-00001
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
- CM_PACKAGE_URL: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/efficientnet-<<>>.tar.gz
- CM_UNTAR: 'yes'
+ MLC_EXTRACT_FOLDER: efficientnet-<<>>
+ MLC_ML_MODEL: efficientnet-lite
+ MLC_ML_MODEL_DATASET: imagenet2012-val
+ MLC_ML_MODEL_DATA_LAYOUT: NHWC
+ MLC_ML_MODEL_FILE: efficientnet-<<>>-<<>>.tflite
+ MLC_ML_MODEL_FULL_NAME: efficientnet-<<>>-<<>>
+ MLC_ML_MODEL_GIVEN_CHANNEL_MEANS: ''
+ MLC_ML_MODEL_INPUT_LAYER_NAME: images
+ MLC_ML_MODEL_INPUT_SHAPES: '\"input\": (BATCH_SIZE, 224, 224, 3)'
+ MLC_ML_MODEL_MOBILENET_NAME_SUFFIX: ''
+ MLC_ML_MODEL_NORMALIZE_DATA: 'yes'
+ MLC_ML_MODEL_OUTPUT_LAYER_NAME: Softmax
+ MLC_ML_MODEL_RETRAINING: 'no'
+ MLC_ML_MODEL_SUBTRACT_MEANS: '0'
+ MLC_ML_MODEL_WEIGHTS_ARE_CHECKPOINTS: 'yes'
+ MLC_ML_MODEL_WEIGHTS_FILE: model.ckpt.data-00000-of-00001
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
+ MLC_PACKAGE_URL: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/efficientnet-<<>>.tar.gz
+ MLC_UNTAR: 'yes'
new_env_keys:
-- CM_ML_MODEL_*
-- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
+- MLC_ML_MODEL_*
+- MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
print_env_at_the_end:
- CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model
+ MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model
tags:
- get
- ml-model
@@ -58,10 +58,10 @@ variations:
fp32:
default: true
env:
- CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: fp32
- CM_ML_MODEL_INPUTS_DATA_TYPE: fp32
- CM_ML_MODEL_PRECISION: fp32
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32
+ MLC_ML_MODEL_EFFICIENTNET_LITE_PRECISION: fp32
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32
+ MLC_ML_MODEL_PRECISION: fp32
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32
group: precision
int8:
alias: uint8
@@ -70,73 +70,73 @@ variations:
- resolution-224
default: true
env:
- CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite0
+ MLC_ML_MODEL_EFFICIENTNET_LITE_KIND: lite0
group: kind
lite1:
base:
- resolution-240
env:
- CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite1
+ MLC_ML_MODEL_EFFICIENTNET_LITE_KIND: lite1
group: kind
lite2:
base:
- resolution-260
env:
- CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite2
+ MLC_ML_MODEL_EFFICIENTNET_LITE_KIND: lite2
group: kind
lite3:
base:
- resolution-280
env:
- CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite3
+ MLC_ML_MODEL_EFFICIENTNET_LITE_KIND: lite3
group: kind
lite4:
base:
- resolution-300
env:
- CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite4
+ MLC_ML_MODEL_EFFICIENTNET_LITE_KIND: lite4
group: kind
resolution-224:
default: true
env:
- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.224
- CM_ML_MODEL_IMAGE_HEIGHT: '224'
- CM_ML_MODEL_IMAGE_WIDTH: '224'
- CM_ML_MODEL_MOBILENET_RESOLUTION: '224'
+ MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.224
+ MLC_ML_MODEL_IMAGE_HEIGHT: '224'
+ MLC_ML_MODEL_IMAGE_WIDTH: '224'
+ MLC_ML_MODEL_MOBILENET_RESOLUTION: '224'
group: resolution
resolution-240:
env:
- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.240
- CM_ML_MODEL_IMAGE_HEIGHT: '240'
- CM_ML_MODEL_IMAGE_WIDTH: '240'
- CM_ML_MODEL_MOBILENET_RESOLUTION: '240'
+ MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.240
+ MLC_ML_MODEL_IMAGE_HEIGHT: '240'
+ MLC_ML_MODEL_IMAGE_WIDTH: '240'
+ MLC_ML_MODEL_MOBILENET_RESOLUTION: '240'
group: resolution
resolution-260:
env:
- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.260
- CM_ML_MODEL_IMAGE_HEIGHT: '260'
- CM_ML_MODEL_IMAGE_WIDTH: '260'
- CM_ML_MODEL_MOBILENET_RESOLUTION: '260'
+ MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.260
+ MLC_ML_MODEL_IMAGE_HEIGHT: '260'
+ MLC_ML_MODEL_IMAGE_WIDTH: '260'
+ MLC_ML_MODEL_MOBILENET_RESOLUTION: '260'
group: resolution
resolution-280:
env:
- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.280
- CM_ML_MODEL_IMAGE_HEIGHT: '280'
- CM_ML_MODEL_IMAGE_WIDTH: '280'
- CM_ML_MODEL_MOBILENET_RESOLUTION: '280'
+ MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.280
+ MLC_ML_MODEL_IMAGE_HEIGHT: '280'
+ MLC_ML_MODEL_IMAGE_WIDTH: '280'
+ MLC_ML_MODEL_MOBILENET_RESOLUTION: '280'
group: resolution
resolution-300:
env:
- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.300
- CM_ML_MODEL_IMAGE_HEIGHT: '300'
- CM_ML_MODEL_IMAGE_WIDTH: '300'
- CM_ML_MODEL_MOBILENET_RESOLUTION: '300'
+ MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.300
+ MLC_ML_MODEL_IMAGE_HEIGHT: '300'
+ MLC_ML_MODEL_IMAGE_WIDTH: '300'
+ MLC_ML_MODEL_MOBILENET_RESOLUTION: '300'
group: resolution
tflite: {}
uint8:
env:
- CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: int8
- CM_ML_MODEL_INPUTS_DATA_TYPE: uint8
- CM_ML_MODEL_PRECISION: uint8
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: uint8
+ MLC_ML_MODEL_EFFICIENTNET_LITE_PRECISION: int8
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: uint8
+ MLC_ML_MODEL_PRECISION: uint8
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: uint8
group: precision
diff --git a/script/get-ml-model-gptj/customize.py b/script/get-ml-model-gptj/customize.py
index d4fc749e5..b9c640faf 100644
--- a/script/get-ml-model-gptj/customize.py
+++ b/script/get-ml-model-gptj/customize.py
@@ -7,43 +7,43 @@ def preprocess(i):
os_info = i['os_info']
env = i['env']
- if env.get('CM_GPTJ_INTEL_MODEL', '') == 'yes':
+ if env.get('MLC_GPTJ_INTEL_MODEL', '') == 'yes':
i['run_script_input']['script_name'] = 'run-intel'
harness_root = os.path.join(
- env['CM_MLPERF_INFERENCE_RESULTS_PATH'],
+ env['MLC_MLPERF_INFERENCE_RESULTS_PATH'],
'closed',
'Intel',
'code',
'gptj-99',
'pytorch-cpu')
print(f"Harness Root: {harness_root}")
- env['CM_HARNESS_CODE_ROOT'] = harness_root
- env['CM_CALIBRATION_CODE_ROOT'] = os.path.join(
- env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration')
+ env['MLC_HARNESS_CODE_ROOT'] = harness_root
+ env['MLC_CALIBRATION_CODE_ROOT'] = os.path.join(
+ env['MLC_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration')
env['CHECKPOINT_DIR'] = env['GPTJ_CHECKPOINT_PATH']
env['QUANTIZED_MODEL_DIR'] = os.getcwd()
- if env['CM_ML_MODEL_WEIGHT_DATA_TYPES'] == "int8":
+ if env['MLC_ML_MODEL_WEIGHT_DATA_TYPES'] == "int8":
env['INT8_MODEL_DIR'] = os.getcwd()
else:
env['INT4_MODEL_DIR'] = os.getcwd()
- elif env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia':
+ elif env.get('MLC_TMP_ML_MODEL_PROVIDER', '') == 'nvidia':
i['run_script_input']['script_name'] = 'run-nvidia'
- if str(env.get('CM_DOCKER_DETACHED_MODE', '')
+ if str(env.get('MLC_DOCKER_DETACHED_MODE', '')
).lower() in ['yes', 'true', "1"]:
env['DOCKER_RUN_OPTS'] = "--rm --ipc=host --ulimit memlock=-1 --ulimit stack=67108864"
gpu_arch = int(
float(
- env['CM_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) *
+ env['MLC_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) *
10)
- env['CM_GPU_ARCH'] = gpu_arch
- env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no'
+ env['MLC_GPU_ARCH'] = gpu_arch
+ env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'no'
else:
- is_saxml = env.get('CM_TMP_MODEL_SAXML', '')
+ is_saxml = env.get('MLC_TMP_MODEL_SAXML', '')
if is_saxml == "fp32":
i['run_script_input']['script_name'] = 'run-saxml'
elif is_saxml == "int8":
@@ -52,7 +52,7 @@ def preprocess(i):
path = env.get('GPTJ_CHECKPOINT_PATH', '').strip()
if path == '' or not os.path.exists(path):
- env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes'
+ env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes'
return {'return': 0}
@@ -66,12 +66,12 @@ def postprocess(i):
env['GPTJ_CHECKPOINT_PATH'] = os.path.join(
env['GPTJ_CHECKPOINT_PATH'], "checkpoint-final")
- is_saxml = env.get('CM_TMP_MODEL_SAXML', '')
+ is_saxml = env.get('MLC_TMP_MODEL_SAXML', '')
if is_saxml == "fp32":
if os.path.exists("pax_gptj_checkpoint"):
env['GPTJ_SAXML_CHECKPOINT_PATH'] = os.path.join(
os.getcwd(), "pax_gptj_checkpoint")
- env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_CHECKPOINT_PATH']
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_CHECKPOINT_PATH']
else:
return {'return': 1, 'error': 'pax_gptj_checkpoint generation failed'}
@@ -79,21 +79,21 @@ def postprocess(i):
if os.path.exists("int8_ckpt"):
env['GPTJ_SAXML_INT8_CHECKPOINT_PATH'] = os.path.join(
os.getcwd(), "int8_ckpt")
- env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_INT8_CHECKPOINT_PATH']
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_INT8_CHECKPOINT_PATH']
else:
return {'return': 1, 'error': 'pax_gptj_checkpoint generation failed'}
- elif env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia':
- env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(
- env['CM_NVIDIA_MLPERF_SCRATCH_PATH'],
+ elif env.get('MLC_TMP_ML_MODEL_PROVIDER', '') == 'nvidia':
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join(
+ env['MLC_NVIDIA_MLPERF_SCRATCH_PATH'],
'models',
'GPTJ-6B',
'fp8-quantized-ammo',
'GPTJ-FP8-quantized')
else:
- env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_CHECKPOINT_PATH']
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_CHECKPOINT_PATH']
- env['CM_ML_MODEL_FILE'] = os.path.basename(
- env['CM_ML_MODEL_FILE_WITH_PATH'])
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH']
+ env['MLC_ML_MODEL_FILE'] = os.path.basename(
+ env['MLC_ML_MODEL_FILE_WITH_PATH'])
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_FILE_WITH_PATH']
return {'return': 0}
diff --git a/script/get-ml-model-gptj/meta.yaml b/script/get-ml-model-gptj/meta.yaml
index 25b2ef981..9ebaf1524 100644
--- a/script/get-ml-model-gptj/meta.yaml
+++ b/script/get-ml-model-gptj/meta.yaml
@@ -6,36 +6,36 @@ category: AI/ML models
docker:
run: false
env:
- CM_ML_MODEL: GPTJ
- CM_ML_MODEL_DATASET: cnndm
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
+ MLC_ML_MODEL: GPTJ
+ MLC_ML_MODEL_DATASET: cnndm
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
input_mapping:
checkpoint: GPTJ_CHECKPOINT_PATH
- download_path: CM_DOWNLOAD_PATH
- to: CM_DOWNLOAD_PATH
+ download_path: MLC_DOWNLOAD_PATH
+ to: MLC_DOWNLOAD_PATH
new_env_keys:
-- CM_ML_MODEL_*
+- MLC_ML_MODEL_*
- GPTJ_CHECKPOINT_PATH
prehook_deps:
- enable_if_env:
- CM_TMP_REQUIRE_DOWNLOAD:
+ MLC_TMP_REQUIRE_DOWNLOAD:
- 'yes'
env:
- CM_DOWNLOAD_FINAL_ENV_NAME: GPTJ_CHECKPOINT_PATH
- CM_EXTRACT_FINAL_ENV_NAME: GPTJ_CHECKPOINT_PATH
- CM_EXTRACT_TO_FOLDER: gpt-j
+ MLC_DOWNLOAD_FINAL_ENV_NAME: GPTJ_CHECKPOINT_PATH
+ MLC_EXTRACT_FINAL_ENV_NAME: GPTJ_CHECKPOINT_PATH
+ MLC_EXTRACT_TO_FOLDER: gpt-j
extra_cache_tags: gptj,model
force_cache: true
names:
- dae
tags: download-and-extract
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env_with_prefix:
_url.:
- - CM_DOWNLOAD_URL
+ - MLC_DOWNLOAD_URL
print_env_at_the_end:
- CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model
+ MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model
tags:
- get
- raw
@@ -47,28 +47,28 @@ uid: a41166210f294fbf
variations:
batch_size.#:
env:
- CM_ML_MODEL_BATCH_SIZE: '#'
+ MLC_ML_MODEL_BATCH_SIZE: '#'
fp32:
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: fp32
- CM_ML_MODEL_PRECISION: fp32
- CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32
+ MLC_ML_MODEL_INPUT_DATA_TYPES: fp32
+ MLC_ML_MODEL_PRECISION: fp32
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32
group: precision
fp8:
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: fp8
- CM_ML_MODEL_WEIGHT_DATA_TYPES: fp8
+ MLC_ML_MODEL_INPUT_DATA_TYPES: fp8
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp8
group: precision
int4:
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: int4
- CM_ML_MODEL_WEIGHT_DATA_TYPES: int4
+ MLC_ML_MODEL_INPUT_DATA_TYPES: int4
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: int4
group: precision
int8:
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: int8
- CM_ML_MODEL_PRECISION: int8
- CM_ML_MODEL_WEIGHT_DATA_TYPES: int8
+ MLC_ML_MODEL_INPUT_DATA_TYPES: int8
+ MLC_ML_MODEL_PRECISION: int8
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: int8
group: precision
intel:
default_variations:
@@ -83,23 +83,23 @@ variations:
default_variations:
framework: pytorch
env:
- CM_TMP_ML_MODEL_PROVIDER: nvidia
+ MLC_TMP_ML_MODEL_PROVIDER: nvidia
group: model-provider
pytorch:
default: true
env:
- CM_ML_MODEL_DATA_LAYOUT: NCHW
- CM_ML_MODEL_FRAMEWORK: pytorch
- CM_ML_STARTING_WEIGHTS_FILENAME: <<>>
+ MLC_ML_MODEL_DATA_LAYOUT: NCHW
+ MLC_ML_MODEL_FRAMEWORK: pytorch
+ MLC_ML_STARTING_WEIGHTS_FILENAME: <<>>
group: framework
pytorch,fp32:
env:
- CM_DOWNLOAD_CHECKSUM_NOT_USED: e677e28aaf03da84584bb3073b7ee315
- CM_DOWNLOAD_EXTRA_OPTIONS: ' --output-document checkpoint.zip'
- CM_PACKAGE_URL: https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download
- CM_RCLONE_CONFIG_NAME: mlc-inference
- CM_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/gpt-j
- CM_UNZIP: 'yes'
+ MLC_DOWNLOAD_CHECKSUM_NOT_USED: e677e28aaf03da84584bb3073b7ee315
+ MLC_DOWNLOAD_EXTRA_OPTIONS: ' --output-document checkpoint.zip'
+ MLC_PACKAGE_URL: https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download
+ MLC_RCLONE_CONFIG_NAME: mlc-inference
+ MLC_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/gpt-j
+ MLC_UNZIP: 'yes'
required_disk_space: 22700
pytorch,fp32,wget:
add_deps_recursive:
@@ -117,7 +117,7 @@ variations:
- tags: get,mlperf,inference,results
version: v3.1
- env:
- CM_GPTJ_INTEL_MODEL: ''
+ MLC_GPTJ_INTEL_MODEL: ''
force_new_env_keys:
- GPTJ_CHECKPOINT_PATH
tags: get,ml-model,gpt-j,_fp32,_pytorch
@@ -135,13 +135,13 @@ variations:
- tags: install,ipex,from.src,_for-intel-mlperf-inference-v3.1-gptj
- tags: get,dataset,cnndm,_calibration
env:
- CM_GPTJ_INTEL_MODEL: 'yes'
+ MLC_GPTJ_INTEL_MODEL: 'yes'
pytorch,nvidia:
default_variations:
precision: fp8
deps:
- env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_TENSORRT_LLM_CHECKOUT_PATH
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_TENSORRT_LLM_CHECKOUT_PATH
extra_cache_tags: tensorrt-llm
tags: get,git,repo,_lfs,_repo.https://github.com/NVIDIA/TensorRT-LLM.git,_sha.0ab9d17a59c284d2de36889832fe9fc7c8697604
- names:
@@ -171,8 +171,8 @@ variations:
tags: _rclone
default: true
env:
- CM_DOWNLOAD_FILENAME: checkpoint
- CM_DOWNLOAD_URL: <<>>
+ MLC_DOWNLOAD_FILENAME: checkpoint
+ MLC_DOWNLOAD_URL: <<>>
group: download-tool
saxml:
group: framework
@@ -189,7 +189,7 @@ variations:
- tags: get,generic-python-lib,_package.transformers
- tags: get,generic-python-lib,_package.accelerate
env:
- CM_TMP_MODEL_SAXML: fp32
+ MLC_TMP_MODEL_SAXML: fp32
new_env_keys:
- GPTJ_SAXML_CHECKPOINT_PATH
saxml,int8:
@@ -203,24 +203,24 @@ variations:
- tags: get,generic-python-lib,_package.praxis
- tags: get,generic-python-lib,_package.apache-beam
- env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_SAXML_REPO_PATH
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_SAXML_REPO_PATH
extra_cache_tags: saxml
names:
- saxml
tags: get,git,repo,_repo.https://github.com/google/saxml
env:
- CM_TMP_MODEL_SAXML: int8
+ MLC_TMP_MODEL_SAXML: int8
uint8:
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: uint8
- CM_ML_MODEL_PRECISION: uint8
- CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8
+ MLC_ML_MODEL_INPUT_DATA_TYPES: uint8
+ MLC_ML_MODEL_PRECISION: uint8
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: uint8
group: precision
wget:
add_deps_recursive:
dae:
tags: _wget
env:
- CM_DOWNLOAD_FILENAME: checkpoint.zip
- CM_DOWNLOAD_URL: <<>>
+ MLC_DOWNLOAD_FILENAME: checkpoint.zip
+ MLC_DOWNLOAD_URL: <<>>
group: download-tool
diff --git a/script/get-ml-model-gptj/run-int4-calibration.sh b/script/get-ml-model-gptj/run-int4-calibration.sh
index 45c3669e5..112716bc4 100644
--- a/script/get-ml-model-gptj/run-int4-calibration.sh
+++ b/script/get-ml-model-gptj/run-int4-calibration.sh
@@ -1,9 +1,9 @@
#!/bin/bash
-export PATH=${CM_CONDA_BIN_PATH}:$PATH
+export PATH=${MLC_CONDA_BIN_PATH}:$PATH
-echo ${CM_CALIBRATION_CODE_ROOT}
-cd ${CM_CALIBRATION_CODE_ROOT}/gpt-j/pytorch-cpu/INT4
+echo ${MLC_CALIBRATION_CODE_ROOT}
+cd ${MLC_CALIBRATION_CODE_ROOT}/gpt-j/pytorch-cpu/INT4
pip install -r requirements.txt
bash run_calibration_int4.sh
diff --git a/script/get-ml-model-gptj/run-intel.sh b/script/get-ml-model-gptj/run-intel.sh
index f6cb2134d..a83d69bf1 100644
--- a/script/get-ml-model-gptj/run-intel.sh
+++ b/script/get-ml-model-gptj/run-intel.sh
@@ -1,17 +1,17 @@
#!/bin/bash
-export PATH=${CM_CONDA_BIN_PATH}:$PATH
+export PATH=${MLC_CONDA_BIN_PATH}:$PATH
-export CALIBRATION_DATA_JSON=${CM_CALIBRATION_DATASET_CNNDM_PATH}
+export CALIBRATION_DATA_JSON=${MLC_CALIBRATION_DATASET_CNNDM_PATH}
-if [[ ${CM_ML_MODEL_WEIGHT_DATA_TYPES} == "int4" ]]; then
+if [[ ${MLC_ML_MODEL_WEIGHT_DATA_TYPES} == "int4" ]]; then
export INT4_CALIBRATION_DIR=${PWD}/quantized-int4-model
- bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-int4-calibration.sh
- cd ${CM_HARNESS_CODE_ROOT}
+ bash ${MLC_TMP_CURRENT_SCRIPT_PATH}/run-int4-calibration.sh
+ cd ${MLC_HARNESS_CODE_ROOT}
bash run_quantization_int4.sh
else
- cd ${CM_HARNESS_CODE_ROOT}
+ cd ${MLC_HARNESS_CODE_ROOT}
bash run_quantization.sh
fi
diff --git a/script/get-ml-model-gptj/run-nvidia.sh b/script/get-ml-model-gptj/run-nvidia.sh
index b16ee45da..a81d52f7c 100644
--- a/script/get-ml-model-gptj/run-nvidia.sh
+++ b/script/get-ml-model-gptj/run-nvidia.sh
@@ -1,21 +1,21 @@
#!/bin/bash
-if [[ ! -e ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/checkpoint-final ]]; then
- mkdir -p ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/
- cp -r ${GPTJ_CHECKPOINT_PATH} ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/checkpoint-final
+if [[ ! -e ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/checkpoint-final ]]; then
+ mkdir -p ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/
+ cp -r ${GPTJ_CHECKPOINT_PATH} ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/checkpoint-final
test $? -eq 0 || exit $?
fi
-echo "cd ${CM_TENSORRT_LLM_CHECKOUT_PATH}"
-cd ${CM_TENSORRT_LLM_CHECKOUT_PATH}
+echo "cd ${MLC_TENSORRT_LLM_CHECKOUT_PATH}"
+cd ${MLC_TENSORRT_LLM_CHECKOUT_PATH}
make -C docker build
test $? -eq 0 || exit $?
-export RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${CM_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/GPTJ-6B/fp8-quantized-ammo/GPTJ-FP8-quantized --model_dir=/mnt/models/GPTJ-6B/checkpoint-final --qformat=fp8 --kv_cache_dtype=fp8 '"
-export DOCKER_RUN_ARGS=" -v ${CM_NVIDIA_MLPERF_SCRATCH_PATH}:/mnt"
+export RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${MLC_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/GPTJ-6B/fp8-quantized-ammo/GPTJ-FP8-quantized --model_dir=/mnt/models/GPTJ-6B/checkpoint-final --qformat=fp8 --kv_cache_dtype=fp8 '"
+export DOCKER_RUN_ARGS=" -v ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}:/mnt"
make -C docker run LOCAL_USER=1
test $? -eq 0 || exit $?
-PYTHONPATH='' ${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH}/code/gptj/tensorrt/onnx_tune.py --fp8-scalers-path=${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/fp8-quantized-ammo/GPTJ-FP8-quantized/rank0.safetensors --scaler 1.005 --index 15
+PYTHONPATH='' ${MLC_PYTHON_BIN_WITH_PATH} ${MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH}/code/gptj/tensorrt/onnx_tune.py --fp8-scalers-path=${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/fp8-quantized-ammo/GPTJ-FP8-quantized/rank0.safetensors --scaler 1.005 --index 15
test $? -eq 0 || exit $?
diff --git a/script/get-ml-model-gptj/run-saxml-quantized.sh b/script/get-ml-model-gptj/run-saxml-quantized.sh
index e74862be0..019b455c5 100644
--- a/script/get-ml-model-gptj/run-saxml-quantized.sh
+++ b/script/get-ml-model-gptj/run-saxml-quantized.sh
@@ -1,6 +1,6 @@
#!/bin/bash
CUR=$PWD
-${CM_PYTHON_BIN_WITH_PATH} -m pip install jaxlib==0.4.24
-cd ${CM_TMP_CURRENT_SCRIPT_PATH}
-${CM_PYTHON_BIN_WITH_PATH} ${CM_SAXML_REPO_PATH}/saxml/tools/offline_quantize.py --input_dir ${CM_ML_MODEL_FILE_WITH_PATH}/checkpoint_00000000/state --output_dir ${CUR}/int8_ckpt/checkpoint_00000000/state --quantization_configs "gptj" > offline_quantize2.log
+${MLC_PYTHON_BIN_WITH_PATH} -m pip install jaxlib==0.4.24
+cd ${MLC_TMP_CURRENT_SCRIPT_PATH}
+${MLC_PYTHON_BIN_WITH_PATH} ${MLC_SAXML_REPO_PATH}/saxml/tools/offline_quantize.py --input_dir ${MLC_ML_MODEL_FILE_WITH_PATH}/checkpoint_00000000/state --output_dir ${CUR}/int8_ckpt/checkpoint_00000000/state --quantization_configs "gptj" > offline_quantize2.log
test $? -eq 0 || exit $?
diff --git a/script/get-ml-model-gptj/run-saxml.sh b/script/get-ml-model-gptj/run-saxml.sh
index 031d736c0..78ad4a92f 100644
--- a/script/get-ml-model-gptj/run-saxml.sh
+++ b/script/get-ml-model-gptj/run-saxml.sh
@@ -1,8 +1,8 @@
#!/bin/bash
CUR=$PWD
rm -rf pax_gptj_checkpoint
-cd ${CM_TMP_CURRENT_SCRIPT_PATH}
-${CM_PYTHON_BIN_WITH_PATH} -m convert_gptj_ckpt --base ${GPTJ_CHECKPOINT_PATH} --pax ${CUR}/pax_gptj_checkpoint
+cd ${MLC_TMP_CURRENT_SCRIPT_PATH}
+${MLC_PYTHON_BIN_WITH_PATH} -m convert_gptj_ckpt --base ${GPTJ_CHECKPOINT_PATH} --pax ${CUR}/pax_gptj_checkpoint
test $? -eq 0 || exit $?
cd "$CUR"
diff --git a/script/get-ml-model-huggingface-zoo/customize.py b/script/get-ml-model-huggingface-zoo/customize.py
index e117df4e3..4f57cfb8a 100644
--- a/script/get-ml-model-huggingface-zoo/customize.py
+++ b/script/get-ml-model-huggingface-zoo/customize.py
@@ -14,17 +14,17 @@ def preprocess(i):
script_path = i['run_script_input']['path']
- path = env.get('CM_DOWNLOAD_PATH', '')
+ path = env.get('MLC_DOWNLOAD_PATH', '')
if path == '':
path = os.getcwd()
- if env.get('CM_GIT_CLONE_REPO', '') != 'yes':
- run_cmd = env.get('CM_PYTHON_BIN_WITH_PATH') + " " + \
+ if env.get('MLC_GIT_CLONE_REPO', '') != 'yes':
+ run_cmd = env.get('MLC_PYTHON_BIN_WITH_PATH') + " " + \
os.path.join(script_path, 'download_model.py')
else:
run_cmd = ''
- env['CM_RUN_CMD'] = run_cmd
+ env['MLC_RUN_CMD'] = run_cmd
return {'return': 0}
@@ -33,21 +33,21 @@ def postprocess(i):
env = i['env']
- env_key = env.get('CM_MODEL_ZOO_ENV_KEY', '')
+ env_key = env.get('MLC_MODEL_ZOO_ENV_KEY', '')
- path_file = env.get('CM_ML_MODEL_FILE_WITH_PATH', '')
+ path_file = env.get('MLC_ML_MODEL_FILE_WITH_PATH', '')
if path_file != '':
path_dir = os.path.dirname(path_file)
- env['CM_ML_MODEL_PATH'] = path_dir
+ env['MLC_ML_MODEL_PATH'] = path_dir
if env_key != '':
- env['CM_ML_MODEL_' + env_key + '_PATH'] = path_dir
+ env['MLC_ML_MODEL_' + env_key + '_PATH'] = path_dir
else:
- path_dir = env['CM_ML_MODEL_PATH']
+ path_dir = env['MLC_ML_MODEL_PATH']
if env_key != '':
- env['CM_ML_MODEL_' + env_key + '_FILE_WITH_PATH'] = path_dir
+ env['MLC_ML_MODEL_' + env_key + '_FILE_WITH_PATH'] = path_dir
return {'return': 0}
diff --git a/script/get-ml-model-huggingface-zoo/download_model.py b/script/get-ml-model-huggingface-zoo/download_model.py
index 2f3584278..f993f7ab3 100644
--- a/script/get-ml-model-huggingface-zoo/download_model.py
+++ b/script/get-ml-model-huggingface-zoo/download_model.py
@@ -1,10 +1,10 @@
from huggingface_hub import hf_hub_download
import os
-model_stub = os.environ.get('CM_MODEL_ZOO_STUB', '')
-model_task = os.environ.get('CM_MODEL_TASK', '')
+model_stub = os.environ.get('MLC_MODEL_ZOO_STUB', '')
+model_task = os.environ.get('MLC_MODEL_TASK', '')
-revision = os.environ.get('CM_HF_REVISION', '')
+revision = os.environ.get('MLC_HF_REVISION', '')
if model_task == "prune":
print("Downloading model: " + model_stub)
@@ -16,13 +16,13 @@
cache_dir=os.getcwd())
with open('tmp-run-env.out', 'w') as f:
- f.write(f"CM_ML_MODEL_FILE_WITH_PATH={os.path.join(os.getcwd(),'')}")
+ f.write(f"MLC_ML_MODEL_FILE_WITH_PATH={os.path.join(os.getcwd(),'')}")
else:
- subfolder = os.environ.get('CM_HF_SUBFOLDER', '')
- full_subfolder = os.environ.get('CM_HF_FULL_SUBFOLDER', '')
+ subfolder = os.environ.get('MLC_HF_SUBFOLDER', '')
+ full_subfolder = os.environ.get('MLC_HF_FULL_SUBFOLDER', '')
- model_filename = os.environ.get('CM_MODEL_ZOO_FILENAME', '')
+ model_filename = os.environ.get('MLC_MODEL_ZOO_FILENAME', '')
if model_filename == '':
model_filename = 'model.onnx'
@@ -102,4 +102,4 @@ def list_hf_files(path):
print('')
with open('tmp-run-env.out', 'w') as f:
- f.write(f"CM_ML_MODEL_FILE_WITH_PATH={base_model_filepath}")
+ f.write(f"MLC_ML_MODEL_FILE_WITH_PATH={base_model_filepath}")
diff --git a/script/get-ml-model-huggingface-zoo/meta.yaml b/script/get-ml-model-huggingface-zoo/meta.yaml
index b8235a57d..aef4f541f 100644
--- a/script/get-ml-model-huggingface-zoo/meta.yaml
+++ b/script/get-ml-model-huggingface-zoo/meta.yaml
@@ -11,17 +11,17 @@ deps:
- tags: get,generic-python-lib,_huggingface_hub
env: {}
input_mapping:
- download_path: CM_DOWNLOAD_PATH
- env_key: CM_MODEL_ZOO_ENV_KEY
- full_subfolder: CM_HF_FULL_SUBFOLDER
- model_filename: CM_MODEL_ZOO_FILENAME
- revision: CM_HF_REVISION
- subfolder: CM_HF_SUBFOLDER
+ download_path: MLC_DOWNLOAD_PATH
+ env_key: MLC_MODEL_ZOO_ENV_KEY
+ full_subfolder: MLC_HF_FULL_SUBFOLDER
+ model_filename: MLC_MODEL_ZOO_FILENAME
+ revision: MLC_HF_REVISION
+ subfolder: MLC_HF_SUBFOLDER
new_env_keys:
-- CM_ML_MODEL*
-- CM_MODEL_ZOO_STUB
+- MLC_ML_MODEL*
+- MLC_MODEL_ZOO_STUB
print_env_at_the_end:
- CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model
+ MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model
tags:
- get
- ml-model
@@ -37,28 +37,28 @@ variations:
deps:
- tags: get,hf-cli,_with-login
enable_if_env:
- CM_HF_TOKEN:
+ MLC_HF_TOKEN:
- on
- env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_ML_MODEL_PATH
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_ML_MODEL_PATH
tags: get,git,repo,_lfs
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
update_tags_from_env_with_prefix:
_repo.https://huggingface.co/:
- - CM_MODEL_ZOO_STUB
+ - MLC_MODEL_ZOO_STUB
env:
- CM_GIT_CLONE_REPO: 'yes'
+ MLC_GIT_CLONE_REPO: 'yes'
group: download-type
model-stub.#:
env:
- CM_MODEL_ZOO_STUB: '#'
+ MLC_MODEL_ZOO_STUB: '#'
onnx-subfolder:
env:
- CM_HF_SUBFOLDER: onnx
+ MLC_HF_SUBFOLDER: onnx
pierreguillou_bert_base_cased_squad_v1.1_portuguese:
env:
- CM_MODEL_ZOO_STUB: pierreguillou/bert-base-cased-squad-v1.1-portuguese
+ MLC_MODEL_ZOO_STUB: pierreguillou/bert-base-cased-squad-v1.1-portuguese
prune:
env:
- CM_MODEL_TASK: prune
+ MLC_MODEL_TASK: prune
diff --git a/script/get-ml-model-huggingface-zoo/run.bat b/script/get-ml-model-huggingface-zoo/run.bat
index 6a4faa929..edc748162 100644
--- a/script/get-ml-model-huggingface-zoo/run.bat
+++ b/script/get-ml-model-huggingface-zoo/run.bat
@@ -1,3 +1,3 @@
-echo %CM_RUN_CMD%
-call %CM_RUN_CMD%
+echo %MLC_RUN_CMD%
+call %MLC_RUN_CMD%
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/script/get-ml-model-huggingface-zoo/run.sh b/script/get-ml-model-huggingface-zoo/run.sh
index 111f4f2c8..ebffc6e22 100644
--- a/script/get-ml-model-huggingface-zoo/run.sh
+++ b/script/get-ml-model-huggingface-zoo/run.sh
@@ -1,4 +1,4 @@
#!/bin/bash
-echo ${CM_RUN_CMD}
-eval ${CM_RUN_CMD}
+echo ${MLC_RUN_CMD}
+eval ${MLC_RUN_CMD}
test $? -eq 0 || exit $?
diff --git a/script/get-ml-model-llama2/customize.py b/script/get-ml-model-llama2/customize.py
index 1c091c12b..7e8e0b4f1 100644
--- a/script/get-ml-model-llama2/customize.py
+++ b/script/get-ml-model-llama2/customize.py
@@ -7,28 +7,28 @@ def preprocess(i):
os_info = i['os_info']
env = i['env']
- if env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia':
+ if env.get('MLC_TMP_ML_MODEL_PROVIDER', '') == 'nvidia':
i['run_script_input']['script_name'] = 'run-nvidia'
gpu_arch = int(
float(
- env['CM_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) *
+ env['MLC_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) *
10)
- env['CM_GPU_ARCH'] = gpu_arch
- env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no'
+ env['MLC_GPU_ARCH'] = gpu_arch
+ env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'no'
else:
path = env.get('LLAMA2_CHECKPOINT_PATH', '').strip()
- if env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'amd':
- env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no'
+ if env.get('MLC_TMP_ML_MODEL_PROVIDER', '') == 'amd':
+ env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'no'
i['run_script_input']['script_name'] = 'run-amd'
env['AMD_CODE_DIR'] = os.path.join(
- env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'AMD', 'code')
- env['CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT'] = os.getcwd()
- env['CM_LLAMA2_FINAL_SAFE_TENSORS_PATH'] = os.path.join(
- env['CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT'], "llama.safetensors")
+ env['MLC_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'AMD', 'code')
+ env['MLC_LLAMA2_FINAL_SAFE_TENSORS_ROOT'] = os.getcwd()
+ env['MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH'] = os.path.join(
+ env['MLC_LLAMA2_FINAL_SAFE_TENSORS_ROOT'], "llama.safetensors")
else:
if path == '' or not os.path.exists(path):
- env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes'
+ env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes'
return {'return': 0}
@@ -37,10 +37,10 @@ def postprocess(i):
env = i['env']
if env.get('LLAMA2_CHECKPOINT_PATH', '') == '':
- env['LLAMA2_CHECKPOINT_PATH'] = env['CM_ML_MODEL_PATH']
+ env['LLAMA2_CHECKPOINT_PATH'] = env['MLC_ML_MODEL_PATH']
else:
- env['CM_ML_MODEL_PATH'] = env['LLAMA2_CHECKPOINT_PATH']
- env['CM_ML_MODEL_LLAMA2_FILE_WITH_PATH'] = env['LLAMA2_CHECKPOINT_PATH']
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH']
+ env['MLC_ML_MODEL_PATH'] = env['LLAMA2_CHECKPOINT_PATH']
+ env['MLC_ML_MODEL_LLAMA2_FILE_WITH_PATH'] = env['LLAMA2_CHECKPOINT_PATH']
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_PATH']
return {'return': 0}
diff --git a/script/get-ml-model-llama2/meta.yaml b/script/get-ml-model-llama2/meta.yaml
index fe082718e..265b66925 100644
--- a/script/get-ml-model-llama2/meta.yaml
+++ b/script/get-ml-model-llama2/meta.yaml
@@ -6,28 +6,28 @@ category: AI/ML models
docker:
real_run: false
env:
- CM_ML_MODEL_DATASET: openorca
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
+ MLC_ML_MODEL_DATASET: openorca
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
input_mapping:
checkpoint: LLAMA2_CHECKPOINT_PATH
new_env_keys:
-- CM_ML_MODEL_*
+- MLC_ML_MODEL_*
- LLAMA2_CHECKPOINT_PATH
-- CM_NVIDIA_TP_SIZE
-- CM_LLAMA2_FINAL_SAFE_TENSORS_PATH
+- MLC_NVIDIA_TP_SIZE
+- MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH
prehook_deps:
- enable_if_env:
- CM_TMP_REQUIRE_DOWNLOAD:
+ MLC_TMP_REQUIRE_DOWNLOAD:
- 'yes'
env: {}
extra_cache_tags: llama2,llama-2
force_env_keys:
- - CM_GIT_CHECKOUT_FOLDER
+ - MLC_GIT_CHECKOUT_FOLDER
names:
- hf-zoo
tags: get,ml-model,huggingface,zoo,_clone-repo
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
print_env_at_the_end:
LLAMA2_CHECKPOINT_PATH: LLAMA2 checkpoint path
tags:
@@ -42,45 +42,45 @@ uid: 5db97be9f61244c6
variations:
L40s:
env:
- CM_NVIDIA_TP_SIZE: 4
+ MLC_NVIDIA_TP_SIZE: 4
group: gpu
amd:
default_env:
- CM_LLAMA2_QUANTIZATION_DEVICE: ''
+ MLC_LLAMA2_QUANTIZATION_DEVICE: ''
default_variations:
framework: pytorch
precision: fp8
env:
- CM_TMP_ML_MODEL_PROVIDER: amd
+ MLC_TMP_ML_MODEL_PROVIDER: amd
group: model-provider
new_env_keys:
- - CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT
- - CM_LLAMA2_FINAL_SAFE_TENSORS_PATH
+ - MLC_LLAMA2_FINAL_SAFE_TENSORS_ROOT
+ - MLC_LLAMA2_FINAL_SAFE_TENSORS_PATH
batch_size.#:
env:
- CM_ML_MODEL_BATCH_SIZE: '#'
+ MLC_ML_MODEL_BATCH_SIZE: '#'
fp32:
default: true
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: fp32
- CM_ML_MODEL_PRECISION: fp32
- CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32
+ MLC_ML_MODEL_INPUT_DATA_TYPES: fp32
+ MLC_ML_MODEL_PRECISION: fp32
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32
group: precision
fp8:
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: fp8
- CM_ML_MODEL_PRECISION: fp8
- CM_ML_MODEL_WEIGHT_DATA_TYPES: fp8
+ MLC_ML_MODEL_INPUT_DATA_TYPES: fp8
+ MLC_ML_MODEL_PRECISION: fp8
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp8
group: precision
generic:
env:
- CM_NVIDIA_TP_SIZE: 2
+ MLC_NVIDIA_TP_SIZE: 2
group: gpu
int8:
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: int8
- CM_ML_MODEL_PRECISION: int8
- CM_ML_MODEL_WEIGHT_DATA_TYPES: int8
+ MLC_ML_MODEL_INPUT_DATA_TYPES: int8
+ MLC_ML_MODEL_PRECISION: int8
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: int8
group: precision
meta-llama/Llama-2-70b-chat-hf:
adr:
@@ -88,27 +88,27 @@ variations:
tags: _model-stub.meta-llama/Llama-2-70b-chat-hf
default: true
env:
- CM_GIT_CHECKOUT_FOLDER: Llama-2-70b-chat-hf
- CM_MODEL_ZOO_ENV_KEY: LLAMA2
+ MLC_GIT_CHECKOUT_FOLDER: Llama-2-70b-chat-hf
+ MLC_MODEL_ZOO_ENV_KEY: LLAMA2
group: huggingface-stub
meta-llama/Llama-2-7b-chat-hf:
adr:
hf-zoo:
tags: _model-stub.meta-llama/Llama-2-7b-chat-hf
env:
- CM_GIT_CHECKOUT_FOLDER: Llama-2-7b-chat-hf
- CM_MODEL_ZOO_ENV_KEY: LLAMA2
+ MLC_GIT_CHECKOUT_FOLDER: Llama-2-7b-chat-hf
+ MLC_MODEL_ZOO_ENV_KEY: LLAMA2
group: huggingface-stub
nvidia:
default_variations:
framework: pytorch
env:
- CM_TMP_ML_MODEL_PROVIDER: nvidia
+ MLC_TMP_ML_MODEL_PROVIDER: nvidia
group: model-provider
pytorch:
default: true
env:
- CM_ML_MODEL_FRAMEWORK: pytorch
+ MLC_ML_MODEL_FRAMEWORK: pytorch
group: framework
pytorch,amd:
default_variations:
@@ -125,9 +125,9 @@ variations:
tags: get,ml-model,llama2-70b,_fp32,_pytorch
- tags: get,preprocessed,dataset,openorca,_calibration,_mlc
- env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_RESULTS_PATH
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_RESULTS_PATH
extra_cache_tags: inference,results
- tags: get,git,repo,_repo.https://github.com/mlcommons/inference_results_v4.1,_branch.cm-code-only
+ tags: get,git,repo,_repo.https://github.com/mlcommons/inference_results_v4.1,_branch.mlc-code-only
- tags: get,generic-python-lib,_quark-amd
- tags: get,generic-python-lib,_package.nltk
- tags: get,generic-python-lib,_torch_cuda
@@ -140,7 +140,7 @@ variations:
precision: fp8
deps:
- env:
- CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_TENSORRT_LLM_CHECKOUT_PATH
+ MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_TENSORRT_LLM_CHECKOUT_PATH
extra_cache_tags: tensorrt-llm
tags: get,git,repo,_repo.https://github.com/NVIDIA/TensorRT-LLM.git,_sha.0ab9d17a59c284d2de36889832fe9fc7c8697604
- names:
@@ -164,15 +164,15 @@ variations:
hf-zoo:
tags: _model-stub.#
env:
- CM_MODEL_ZOO_ENV_KEY: LLAMA2
+ MLC_MODEL_ZOO_ENV_KEY: LLAMA2
group: huggingface-stub
tp-size.#:
env:
- CM_NVIDIA_TP_SIZE: '#'
+ MLC_NVIDIA_TP_SIZE: '#'
group: gpu
uint8:
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: uint8
- CM_ML_MODEL_PRECISION: uint8
- CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8
+ MLC_ML_MODEL_INPUT_DATA_TYPES: uint8
+ MLC_ML_MODEL_PRECISION: uint8
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: uint8
group: precision
diff --git a/script/get-ml-model-llama2/run-amd.sh b/script/get-ml-model-llama2/run-amd.sh
index 6f3ee48e9..308a5c294 100644
--- a/script/get-ml-model-llama2/run-amd.sh
+++ b/script/get-ml-model-llama2/run-amd.sh
@@ -3,19 +3,19 @@
code_dir=$AMD_CODE_DIR
model_dir=${LLAMA2_CHECKPOINT_PATH}
output_dir=$PWD
-calib_dataset=${CM_DATASET_OPENORCA_CALIBRATION_PATH}
+calib_dataset=${MLC_DATASET_OPENORCA_CALIBRATION_PATH}
cmd="cd $code_dir/llama2-70b-99.9/tools/quark-0.1.0+a9827f5-mlperf/examples/torch/language_modeling/"
echo $cmd
eval $cmd
test $? -eq 0 || exit $?
-if [[ "x$CM_LLAMA2_QUANTIZATION_DEVICE" == "x" ]]; then
+if [[ "x$MLC_LLAMA2_QUANTIZATION_DEVICE" == "x" ]]; then
device_str=""
else
- device_str="--device $CM_LLAMA2_QUANTIZATION_DEVICE"
+ device_str="--device $MLC_LLAMA2_QUANTIZATION_DEVICE"
fi
-cmd="${CM_PYTHON_BIN_WITH_PATH} quantize_quark.py --model_dir $model_dir \
+cmd="${MLC_PYTHON_BIN_WITH_PATH} quantize_quark.py --model_dir $model_dir \
--output_dir $output_dir \
--quant_scheme w_fp8_a_fp8_o_fp8 \
--dataset $calib_dataset \
diff --git a/script/get-ml-model-llama2/run-nvidia.sh b/script/get-ml-model-llama2/run-nvidia.sh
index 2e576280b..d38e911cb 100644
--- a/script/get-ml-model-llama2/run-nvidia.sh
+++ b/script/get-ml-model-llama2/run-nvidia.sh
@@ -1,29 +1,29 @@
#!/bin/bash
-echo "Set tp size is ${CM_NVIDIA_TP_SIZE}"
+echo "Set tp size is ${MLC_NVIDIA_TP_SIZE}"
-if [[ ! -e ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf ]]; then
- mkdir -p ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf
+if [[ ! -e ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf ]]; then
+ mkdir -p ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf
cd ${LLAMA2_CHECKPOINT_PATH}
- cp -r ${LLAMA2_CHECKPOINT_PATH}/* ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf
+ cp -r ${LLAMA2_CHECKPOINT_PATH}/* ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf
test $? -eq 0 || exit $?
fi
-echo "cd ${CM_TENSORRT_LLM_CHECKOUT_PATH}"
-cd ${CM_TENSORRT_LLM_CHECKOUT_PATH}
+echo "cd ${MLC_TENSORRT_LLM_CHECKOUT_PATH}"
+cd ${MLC_TENSORRT_LLM_CHECKOUT_PATH}
make -C docker build
test $? -eq 0 || exit $?
-if [ "${CM_NVIDIA_TP_SIZE}" -eq 1 ]; then
- RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${CM_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/Llama2/fp8-quantized-ammo/llama2-70b-chat-hf-tp${CM_NVIDIA_TP_SIZE}pp1-fp8-02072024 --model_dir=/mnt/models/Llama2/Llama-2-70b-chat-hf --qformat=fp8 --kv_cache_dtype=fp8 --tp_size ${CM_NVIDIA_TP_SIZE}'"
+if [ "${MLC_NVIDIA_TP_SIZE}" -eq 1 ]; then
+ RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${MLC_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/Llama2/fp8-quantized-ammo/llama2-70b-chat-hf-tp${MLC_NVIDIA_TP_SIZE}pp1-fp8-02072024 --model_dir=/mnt/models/Llama2/Llama-2-70b-chat-hf --qformat=fp8 --kv_cache_dtype=fp8 --tp_size ${MLC_NVIDIA_TP_SIZE}'"
else
- RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${CM_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/Llama2/fp8-quantized-ammo/llama2-70b-chat-hf-tp${CM_NVIDIA_TP_SIZE}pp1-fp8 --model_dir=/mnt/models/Llama2/Llama-2-70b-chat-hf --qformat=fp8 --kv_cache_dtype=fp8 --tp_size ${CM_NVIDIA_TP_SIZE}'"
+ RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${MLC_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/Llama2/fp8-quantized-ammo/llama2-70b-chat-hf-tp${MLC_NVIDIA_TP_SIZE}pp1-fp8 --model_dir=/mnt/models/Llama2/Llama-2-70b-chat-hf --qformat=fp8 --kv_cache_dtype=fp8 --tp_size ${MLC_NVIDIA_TP_SIZE}'"
fi
-DOCKER_RUN_ARGS=" -v ${CM_NVIDIA_MLPERF_SCRATCH_PATH}:/mnt"
+DOCKER_RUN_ARGS=" -v ${MLC_NVIDIA_MLPERF_SCRATCH_PATH}:/mnt"
export DOCKER_RUN_ARGS="$DOCKER_RUN_ARGS"
export RUN_CMD="$RUN_CMD"
make -C docker run LOCAL_USER=1
test $? -eq 0 || exit $?
-echo "MLPerf Nvidia scratch path is:${CM_NVIDIA_MLPERF_SCRATCH_PATH}"
+echo "MLPerf Nvidia scratch path is:${MLC_NVIDIA_MLPERF_SCRATCH_PATH}"
diff --git a/script/get-ml-model-llama3/customize.py b/script/get-ml-model-llama3/customize.py
index 71d309b3a..2429a1e92 100644
--- a/script/get-ml-model-llama3/customize.py
+++ b/script/get-ml-model-llama3/customize.py
@@ -9,18 +9,18 @@ def preprocess(i):
# skip download and register in cache if the llama3 checkpoint path is
# already defined by the user
- if env.get('CM_ML_MODEL_LLAMA3_CHECKPOINT_PATH', '') != '':
- env['LLAMA3_CHECKPOINT_PATH'] = env['CM_ML_MODEL_LLAMA3_CHECKPOINT_PATH']
+ if env.get('MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH', '') != '':
+ env['LLAMA3_CHECKPOINT_PATH'] = env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH']
return {'return': 0}
- path = env.get('CM_OUTDIRNAME', '').strip()
+ path = env.get('MLC_OUTDIRNAME', '').strip()
if path != "":
os.makedirs(path, exist_ok=True)
- env['CM_GIT_CHECKOUT_FOLDER'] = os.path.join(
- path, env['CM_ML_MODEL_NAME'])
+ env['MLC_GIT_CHECKOUT_FOLDER'] = os.path.join(
+ path, env['MLC_ML_MODEL_NAME'])
- env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes'
+ env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes'
return {'return': 0}
@@ -29,7 +29,7 @@ def postprocess(i):
env = i['env']
- env['CM_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] = env['LLAMA3_CHECKPOINT_PATH']
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH']
+ env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] = env['LLAMA3_CHECKPOINT_PATH']
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_PATH']
return {'return': 0}
diff --git a/script/get-ml-model-llama3/meta.yaml b/script/get-ml-model-llama3/meta.yaml
index 376553823..f5432f3ee 100644
--- a/script/get-ml-model-llama3/meta.yaml
+++ b/script/get-ml-model-llama3/meta.yaml
@@ -4,18 +4,18 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML models
input_mapping:
- outdirname: CM_OUTDIRNAME
+ outdirname: MLC_OUTDIRNAME
new_env_keys:
-- CM_ML_MODEL_*
+- MLC_ML_MODEL_*
- LLAMA3_CHECKPOINT_PATH
prehook_deps:
- enable_if_env:
- CM_TMP_REQUIRE_DOWNLOAD:
+ MLC_TMP_REQUIRE_DOWNLOAD:
- 'yes'
env: {}
extra_cache_tags: llama3,llama-3
force_env_keys:
- - CM_GIT_CHECKOUT_FOLDER
+ - MLC_GIT_CHECKOUT_FOLDER
names:
- hf-zoo
tags: get,ml-model,huggingface,zoo,_clone-repo
@@ -33,9 +33,9 @@ variations:
fp16:
default: true
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: fp16
- CM_ML_MODEL_PRECISION: fp16
- CM_ML_MODEL_WEIGHT_DATA_TYPES: fp16
+ MLC_ML_MODEL_INPUT_DATA_TYPES: fp16
+ MLC_ML_MODEL_PRECISION: fp16
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp16
group: precision
meta-llama/Llama-3.1-405B-Instruct:
adr:
@@ -43,26 +43,26 @@ variations:
tags: _model-stub.meta-llama/Llama-3.1-405B-Instruct
default: true
env:
- CM_ML_MODEL_NAME: Llama-3-405b-instruct
- CM_MODEL_ZOO_ENV_KEY: LLAMA3
+ MLC_ML_MODEL_NAME: Llama-3-405b-instruct
+ MLC_MODEL_ZOO_ENV_KEY: LLAMA3
group: huggingface-stub
meta-llama/Llama-3.1-8B-Instruct:
adr:
hf-zoo:
tags: _model-stub.meta-llama/Llama-3.1-8B-Instruct
env:
- CM_ML_MODEL_NAME: Llama-3-8b-instruct
- CM_MODEL_ZOO_ENV_KEY: LLAMA3
+ MLC_ML_MODEL_NAME: Llama-3-8b-instruct
+ MLC_MODEL_ZOO_ENV_KEY: LLAMA3
group: huggingface-stub
vllm:
default: true
env:
- CM_ML_MODEL_FRAMEWORK: vllm
+ MLC_ML_MODEL_FRAMEWORK: vllm
group: framework
stub.#:
adr:
hf-zoo:
tags: _model-stub.#
env:
- CM_MODEL_ZOO_ENV_KEY: LLAMA3
+ MLC_MODEL_ZOO_ENV_KEY: LLAMA3
group: huggingface-stub
diff --git a/script/get-ml-model-mixtral/customize.py b/script/get-ml-model-mixtral/customize.py
index 15ca81033..d33656b33 100644
--- a/script/get-ml-model-mixtral/customize.py
+++ b/script/get-ml-model-mixtral/customize.py
@@ -10,7 +10,7 @@ def preprocess(i):
path = env.get('MIXTRAL_CHECKPOINT_PATH', '').strip()
if path == '' or not os.path.exists(path):
- env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes'
+ env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes'
return {'return': 0}
@@ -20,9 +20,9 @@ def postprocess(i):
env = i['env']
if env.get('MIXTRAL_CHECKPOINT_PATH', '') == '':
- env['MIXTRAL_CHECKPOINT_PATH'] = env['CM_ML_MODEL_PATH']
+ env['MIXTRAL_CHECKPOINT_PATH'] = env['MLC_ML_MODEL_PATH']
else:
- env['CM_ML_MODEL_PATH'] = env['MIXTRAL_CHECKPOINT_PATH']
- env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH']
+ env['MLC_ML_MODEL_PATH'] = env['MIXTRAL_CHECKPOINT_PATH']
+ env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_ML_MODEL_PATH']
return {'return': 0}
diff --git a/script/get-ml-model-mixtral/meta.yaml b/script/get-ml-model-mixtral/meta.yaml
index 358d56318..48ded49db 100644
--- a/script/get-ml-model-mixtral/meta.yaml
+++ b/script/get-ml-model-mixtral/meta.yaml
@@ -4,28 +4,28 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML models
env:
- CM_ML_MODEL_DATASET: ''
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
+ MLC_ML_MODEL_DATASET: ''
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
docker:
real_run: False
input_mapping:
checkpoint: MIXTRAL_CHECKPOINT_PATH
new_env_keys:
-- CM_ML_MODEL_*
+- MLC_ML_MODEL_*
- MIXTRAL_CHECKPOINT_PATH
prehook_deps:
- enable_if_env:
- CM_TMP_REQUIRE_DOWNLOAD:
+ MLC_TMP_REQUIRE_DOWNLOAD:
- 'yes'
env: {}
extra_cache_tags: mixtral
force_env_keys:
- - CM_GIT_CHECKOUT_FOLDER
+ - MLC_GIT_CHECKOUT_FOLDER
names:
- hf-zoo
tags: get,ml-model,huggingface,zoo,_clone-repo
force_env_keys:
- - CM_OUTDIRNAME
+ - MLC_OUTDIRNAME
print_env_at_the_end:
MIXTRAL_CHECKPOINT_PATH: MIXTRAL checkpoint path
tags:
@@ -39,13 +39,13 @@ uid: 0c14127677f34ea2
variations:
batch_size.#:
env:
- CM_ML_MODEL_BATCH_SIZE: '#'
+ MLC_ML_MODEL_BATCH_SIZE: '#'
fp32:
default: true
env:
- CM_ML_MODEL_INPUT_DATA_TYPES: fp32
- CM_ML_MODEL_PRECISION: fp32
- CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32
+ MLC_ML_MODEL_INPUT_DATA_TYPES: fp32
+ MLC_ML_MODEL_PRECISION: fp32
+ MLC_ML_MODEL_WEIGHT_DATA_TYPES: fp32
group: precision
mistralai/Mixtral-8x7B-Instruct-v0.1:
adr:
@@ -53,19 +53,19 @@ variations:
tags: _model-stub.mistralai/Mixtral-8x7B-Instruct-v0.1
default: true
env:
- CM_GIT_CHECKOUT_FOLDER: Mixtral-8x7B-Instruct-v0.1
- CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
- CM_MODEL_ZOO_ENV_KEY: MIXTRAL
+ MLC_GIT_CHECKOUT_FOLDER: Mixtral-8x7B-Instruct-v0.1
+ MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
+ MLC_MODEL_ZOO_ENV_KEY: MIXTRAL
group: huggingface-stub
pytorch:
default: true
env:
- CM_ML_MODEL_FRAMEWORK: pytorch
+ MLC_ML_MODEL_FRAMEWORK: pytorch
group: framework
stub.#:
adr:
hf-zoo:
tags: _model-stub.#
env:
- CM_MODEL_ZOO_ENV_KEY: MIXTRAL
+ MLC_MODEL_ZOO_ENV_KEY: MIXTRAL
group: huggingface-stub
diff --git a/script/get-ml-model-mobilenet/README-extra.md b/script/get-ml-model-mobilenet/README-extra.md
index 63766e960..24bc0e34f 100644
--- a/script/get-ml-model-mobilenet/README-extra.md
+++ b/script/get-ml-model-mobilenet/README-extra.md
@@ -9,7 +9,7 @@ where,
* `[VARIATION]` is one of `tf-fp32`, `tf-int8`, `onnx-v1-opset-8`, `onnx-v1-opset-11`, `onnx-int8`.
## Exported Variables
-* `CM_ML_MODEL_FILE:` Model filename
-* `CM_ML_MODEL_FILE_WITH_PATH:` Full path to model file
-* `CM_ML_MODEL_PATH:` Path to folder containing the model file
+* `MLC_ML_MODEL_FILE:` Model filename
+* `MLC_ML_MODEL_FILE_WITH_PATH:` Full path to model file
+* `MLC_ML_MODEL_PATH:` Path to folder containing the model file
* More env variables being exported are given in [cm.json file](_cm.json)
diff --git a/script/get-ml-model-mobilenet/customize.py b/script/get-ml-model-mobilenet/customize.py
index 59f3c580e..6b8a8fd6b 100644
--- a/script/get-ml-model-mobilenet/customize.py
+++ b/script/get-ml-model-mobilenet/customize.py
@@ -14,8 +14,8 @@ def preprocess(i):
path = os.getcwd()
- url = env['CM_PACKAGE_URL']
- env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url
+ url = env['MLC_PACKAGE_URL']
+ env['MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url
print('Downloading from {}'.format(url))
@@ -27,30 +27,30 @@ def preprocess(i):
filename = r['filename']
- if env.get('CM_UNZIP') == "yes" or env.get('CM_UNTAR') == "yes":
- if env.get('CM_UNZIP') == "yes":
+ if env.get('MLC_UNZIP') == "yes" or env.get('MLC_UNTAR') == "yes":
+ if env.get('MLC_UNZIP') == "yes":
cmd = "unzip "
- elif env.get('CM_UNTAR') == "yes":
+ elif env.get('MLC_UNTAR') == "yes":
cmd = "tar -xvzf "
os.system(cmd + filename)
- filename = env['CM_ML_MODEL_FILE']
+ filename = env['MLC_ML_MODEL_FILE']
- extract_folder = env.get('CM_EXTRACT_FOLDER', '')
+ extract_folder = env.get('MLC_EXTRACT_FOLDER', '')
if extract_folder:
- env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join(
path, extract_folder, filename)
else:
- env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename)
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename)
else:
- env['CM_ML_MODEL_FILE'] = filename
- env['CM_ML_MODEL_FILE_WITH_PATH'] = r['path']
+ env['MLC_ML_MODEL_FILE'] = filename
+ env['MLC_ML_MODEL_FILE_WITH_PATH'] = r['path']
- env['CM_ML_MODEL_PATH'] = path
+ env['MLC_ML_MODEL_PATH'] = path
- if not os.path.exists(env['CM_ML_MODEL_FILE_WITH_PATH']):
+ if not os.path.exists(env['MLC_ML_MODEL_FILE_WITH_PATH']):
return {
- 'return': 1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"}
+ 'return': 1, 'error': f"Model file path {env['MLC_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['MLC_ML_MODEL_FILE']} in model meta is wrong"}
return {'return': 0}
diff --git a/script/get-ml-model-mobilenet/meta.yaml b/script/get-ml-model-mobilenet/meta.yaml
index d690ffa69..297deb5d3 100644
--- a/script/get-ml-model-mobilenet/meta.yaml
+++ b/script/get-ml-model-mobilenet/meta.yaml
@@ -4,18 +4,18 @@ automation_uid: 5b4e0237da074764
cache: true
category: AI/ML models
default_env:
- CM_ML_MODEL: mobilenet
- CM_ML_MODEL_DATASET: imagenet2012-val
- CM_ML_MODEL_INPUTS_DATA_TYPE: fp32
- CM_ML_MODEL_MOBILENET_NAME_SUFFIX: ''
- CM_ML_MODEL_RETRAINING: 'no'
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
+ MLC_ML_MODEL: mobilenet
+ MLC_ML_MODEL_DATASET: imagenet2012-val
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32
+ MLC_ML_MODEL_MOBILENET_NAME_SUFFIX: ''
+ MLC_ML_MODEL_RETRAINING: 'no'
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no'
new_env_keys:
-- CM_ML_MODEL_*
-- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
+- MLC_ML_MODEL_*
+- MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
print_env_at_the_end:
- CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model
+ MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model
tags:
- get
- ml-model
@@ -28,217 +28,217 @@ variations:
fp32:
default: true
env:
- CM_ML_MODEL_INPUTS_DATA_TYPE: fp32
- CM_ML_MODEL_MOBILENET_PRECISION: float
- CM_ML_MODEL_PRECISION: fp32
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: fp32
+ MLC_ML_MODEL_MOBILENET_PRECISION: float
+ MLC_ML_MODEL_PRECISION: fp32
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: fp32
group: precision
from.google:
env:
- CM_DOWNLOAD_SOURCE: google
+ MLC_DOWNLOAD_SOURCE: google
group: source
from.zenodo:
env:
- CM_DOWNLOAD_SOURCE: zenodo
+ MLC_DOWNLOAD_SOURCE: zenodo
group: source
int8:
base:
- quantized_
env:
- CM_ML_MODEL_INPUTS_DATA_TYPE: int8
- CM_ML_MODEL_MOBILENET_PRECISION: int8
- CM_ML_MODEL_PRECISION: int8
- CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8
+ MLC_ML_MODEL_INPUTS_DATA_TYPE: int8
+ MLC_ML_MODEL_MOBILENET_PRECISION: int8
+ MLC_ML_MODEL_PRECISION: int8
+ MLC_ML_MODEL_WEIGHTS_DATA_TYPE: int8
group: precision
large:
env:
- CM_ML_MODEL_MOBILENET_KIND: large
+ MLC_ML_MODEL_MOBILENET_KIND: large
group: kind
large-minimalistic:
env:
- CM_ML_MODEL_MOBILENET_KIND: large-minimalistic
+ MLC_ML_MODEL_MOBILENET_KIND: large-minimalistic
group: kind
multiplier-0.25:
env:
- CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.25'
- CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '25'
+ MLC_ML_MODEL_MOBILENET_MULTIPLIER: '0.25'
+ MLC_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '25'
group: multiplier
multiplier-0.35:
env:
- CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.35'
- CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '35'
+ MLC_ML_MODEL_MOBILENET_MULTIPLIER: '0.35'
+ MLC_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '35'
group: multiplier
multiplier-0.5:
env:
- CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.5'
- CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '50'
+ MLC_ML_MODEL_MOBILENET_MULTIPLIER: '0.5'
+ MLC_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '50'
group: multiplier
multiplier-0.75:
env:
- CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.75'
- CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '75'
+ MLC_ML_MODEL_MOBILENET_MULTIPLIER: '0.75'
+ MLC_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '75'
group: multiplier
multiplier-1.0:
env:
- CM_ML_MODEL_MOBILENET_MULTIPLIER: '1.0'
- CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '100'
+ MLC_ML_MODEL_MOBILENET_MULTIPLIER: '1.0'
+ MLC_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '100'
group: multiplier
onnx:
env:
- CM_ML_MODEL_DATA_LAYOUT: NCHW
- CM_ML_MODEL_FRAMEWORK: onnx
+ MLC_ML_MODEL_DATA_LAYOUT: NCHW
+ MLC_ML_MODEL_FRAMEWORK: onnx
group: framework
onnx,fp32,v1:
env:
- CM_ML_MODEL_INPUT_LAYER_NAME: input:0
- CM_ML_MODEL_NORMALIZE_DATA: 'yes'
- CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV1/Predictions/Reshape_1:0
- CM_ML_MODEL_SUBTRACT_MEANS: 'no'
- CM_ML_MODEL_VER: '1_1.0_224'
+ MLC_ML_MODEL_INPUT_LAYER_NAME: input:0
+ MLC_ML_MODEL_NORMALIZE_DATA: 'yes'
+ MLC_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV1/Predictions/Reshape_1:0
+ MLC_ML_MODEL_SUBTRACT_MEANS: 'no'
+ MLC_ML_MODEL_VER: '1_1.0_224'
onnx,int8,v1:
env:
- CM_ML_MODEL_FILE: mobilenet_sym_no_bn.onnx
- CM_ML_MODEL_GIVEN_CHANNEL_MEANS: 128.0 128.0 128.0
- CM_ML_MODEL_INPUT_LAYER_NAME: '0'
- CM_ML_MODEL_NORMALIZE_DATA: 'no'
- CM_ML_MODEL_OUTPUT_LAYER_NAME: '169'
- CM_ML_MODEL_SUBTRACT_MEANS: 'yes'
- CM_ML_MODEL_VER: 1_1.0_224_quant
- CM_PACKAGE_URL: https://zenodo.org/record/3353417/files/Quantized%20MobileNet.zip
- CM_UNZIP: 'yes'
+ MLC_ML_MODEL_FILE: mobilenet_sym_no_bn.onnx
+ MLC_ML_MODEL_GIVEN_CHANNEL_MEANS: 128.0 128.0 128.0
+ MLC_ML_MODEL_INPUT_LAYER_NAME: '0'
+ MLC_ML_MODEL_NORMALIZE_DATA: 'no'
+ MLC_ML_MODEL_OUTPUT_LAYER_NAME: '169'
+ MLC_ML_MODEL_SUBTRACT_MEANS: 'yes'
+ MLC_ML_MODEL_VER: 1_1.0_224_quant
+ MLC_PACKAGE_URL: https://zenodo.org/record/3353417/files/Quantized%20MobileNet.zip
+ MLC_UNZIP: 'yes'
onnx,opset-11,fp32,v1:
env:
- CM_PACKAGE_URL: https://zenodo.org/record/4735651/files/mobilenet_v1_1.0_224.onnx
+ MLC_PACKAGE_URL: https://zenodo.org/record/4735651/files/mobilenet_v1_1.0_224.onnx
onnx,opset-8,fp32,v1:
env:
- CM_PACKAGE_URL: https://zenodo.org/record/3157894/files/mobilenet_v1_1.0_224.onnx
+ MLC_PACKAGE_URL: https://zenodo.org/record/3157894/files/mobilenet_v1_1.0_224.onnx
opset-11:
env:
- CM_ML_MODEL_ONNX_OPSET: '11'
+ MLC_ML_MODEL_ONNX_OPSET: '11'
group: opset-version
opset-8:
env:
- CM_ML_MODEL_ONNX_OPSET: '8'
+ MLC_ML_MODEL_ONNX_OPSET: '8'
group: opset-version
quantized_:
env:
- CM_ML_MODEL_MOBILENET_NAME_SUFFIX: _quant
- CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'yes'
+ MLC_ML_MODEL_MOBILENET_NAME_SUFFIX: _quant
+ MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'yes'
resolution-128:
env:
- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.128
- CM_ML_MODEL_IMAGE_HEIGHT: '128'
- CM_ML_MODEL_IMAGE_WIDTH: '128'
- CM_ML_MODEL_MOBILENET_RESOLUTION: '128'
+ MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.128
+ MLC_ML_MODEL_IMAGE_HEIGHT: '128'
+ MLC_ML_MODEL_IMAGE_WIDTH: '128'
+ MLC_ML_MODEL_MOBILENET_RESOLUTION: '128'
group: resolution
resolution-160:
env:
- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.160
- CM_ML_MODEL_IMAGE_HEIGHT: '160'
- CM_ML_MODEL_IMAGE_WIDTH: '160'
- CM_ML_MODEL_MOBILENET_RESOLUTION: '160'
+ MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.160
+ MLC_ML_MODEL_IMAGE_HEIGHT: '160'
+ MLC_ML_MODEL_IMAGE_WIDTH: '160'
+ MLC_ML_MODEL_MOBILENET_RESOLUTION: '160'
group: resolution
resolution-192:
env:
- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.192
- CM_ML_MODEL_IMAGE_HEIGHT: '192'
- CM_ML_MODEL_IMAGE_WIDTH: '192'
- CM_ML_MODEL_MOBILENET_RESOLUTION: '192'
+ MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.192
+ MLC_ML_MODEL_IMAGE_HEIGHT: '192'
+ MLC_ML_MODEL_IMAGE_WIDTH: '192'
+ MLC_ML_MODEL_MOBILENET_RESOLUTION: '192'
group: resolution
resolution-224:
env:
- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.224
- CM_ML_MODEL_IMAGE_HEIGHT: '224'
- CM_ML_MODEL_IMAGE_WIDTH: '224'
- CM_ML_MODEL_MOBILENET_RESOLUTION: '224'
+ MLC_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.224
+ MLC_ML_MODEL_IMAGE_HEIGHT: '224'
+ MLC_ML_MODEL_IMAGE_WIDTH: '224'
+ MLC_ML_MODEL_MOBILENET_RESOLUTION: '224'
group: resolution
small:
env:
- CM_ML_MODEL_MOBILENET_KIND: small
+ MLC_ML_MODEL_MOBILENET_KIND: small
group: kind
small-minimalistic:
default_variations:
precision: fp32
env:
- CM_ML_MODEL_MOBILENET_KIND: small-minimalistic
+ MLC_ML_MODEL_MOBILENET_KIND: small-minimalistic
group: kind
tf:
default: true
default_variations:
source: from.google
env:
- CM_ML_MODEL_DATA_LAYOUT: NHWC
- CM_ML_MODEL_INPUT_LAYER_NAME: input
- CM_ML_MODEL_NORMALIZE_DATA: 'yes'
- CM_ML_MODEL_SUBTRACT_MEANS: 'no'
+ MLC_ML_MODEL_DATA_LAYOUT: NHWC
+ MLC_ML_MODEL_INPUT_LAYER_NAME: input
+ MLC_ML_MODEL_NORMALIZE_DATA: 'yes'
+ MLC_ML_MODEL_SUBTRACT_MEANS: 'no'
group: framework
tf,fp32,v1,resolution-224,multiplier-1.0:
env:
- CM_ML_MODEL_ACCURACY: '71.676'
+ MLC_ML_MODEL_ACCURACY: '71.676'
tf,from.google,v1:
env:
- CM_PACKAGE_URL: http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_<<>>_<<>><<>>.tgz
- CM_UNTAR: 'yes'
+ MLC_PACKAGE_URL: http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_<<>>_<<>><<>>.tgz
+ MLC_UNTAR: 'yes'
tf,from.google,v2,fp32:
env:
- CM_ML_MODEL_FILE: mobilenet_v2_<<>>_<<>>.tflite
- CM_ML_MODEL_WEIGHTS_FILE: mobilenet_v2_<<>>_<<>>.ckpt.data-00000-of-00001
- CM_PACKAGE_URL: https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_<<>>_<<>>.tgz
- CM_UNTAR: 'yes'
+ MLC_ML_MODEL_FILE: mobilenet_v2_<<>>_<<>>.tflite
+ MLC_ML_MODEL_WEIGHTS_FILE: mobilenet_v2_<<>>_<<>>.ckpt.data-00000-of-00001
+ MLC_PACKAGE_URL: https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_<<>>_<<>>.tgz
+ MLC_UNTAR: 'yes'
tf,from.google,v2,quantized_:
env:
- CM_EXTRACT_FOLDER: v2_<<>>_<<>>
- CM_ML_MODEL_FILE: model.tflite
- CM_ML_MODEL_WEIGHTS_FILE: <<