diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index ba82117cf..8de31237d 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -6,10 +6,11 @@ on: push: branches: - - dev + - dev_off paths: - VERSION + jobs: build_wheels: diff --git a/.github/workflows/test-amd-mlperf-inference-implementations.yml b/.github/workflows/test-amd-mlperf-inference-implementations.yml index 4c4b6f749..bc01ad20c 100644 --- a/.github/workflows/test-amd-mlperf-inference-implementations.yml +++ b/.github/workflows/test-amd-mlperf-inference-implementations.yml @@ -22,5 +22,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC pip install --upgrade mlc-scripts mlc pull repo - mlcr --tags=run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet --docker_skip_run_cmd=yes - # mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=dev --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c + mlcr run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet --docker_skip_run_cmd=yes + # mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=dev --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c diff --git a/.github/workflows/test-image-classification-onnx.yml b/.github/workflows/test-image-classification-onnx.yml index 4edbb61b6..121c111ef 100644 --- a/.github/workflows/test-image-classification-onnx.yml +++ b/.github/workflows/test-image-classification-onnx.yml @@ -38,4 +38,4 @@ jobs: mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - name: Test image classification with ONNX run: | - mlcr --tags=python,app,image-classification,onnx --quiet + mlcr python,app,image-classification,onnx --quiet diff --git a/.github/workflows/test-intel-mlperf-inference-implementations.yml b/.github/workflows/test-intel-mlperf-inference-implementations.yml index 0041f9762..9e4d03e26 100644 --- a/.github/workflows/test-intel-mlperf-inference-implementations.yml +++ b/.github/workflows/test-intel-mlperf-inference-implementations.yml @@ -22,5 +22,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC pip install --upgrade mlc-scripts pip install tabulate - mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c + mlcr run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c diff --git a/.github/workflows/test-mlc-script-features.yml b/.github/workflows/test-mlc-script-features.yml index bca961c1b..05d62e2ae 100644 --- a/.github/workflows/test-mlc-script-features.yml +++ b/.github/workflows/test-mlc-script-features.yml @@ -35,12 +35,12 @@ jobs: - name: Test Python venv run: | - mlcr --tags=install,python-venv --name=test --quiet + mlcr install,python-venv --name=test --quiet mlc search cache --tags=get,python,virtual,name-test --quiet - name: Test variations run: | - mlcr --tags=get,dataset,preprocessed,imagenet,_NHWC --quiet + mlcr get,dataset,preprocessed,imagenet,_NHWC --quiet mlc search cache --tags=get,dataset,preprocessed,imagenet,-_NCHW mlc search cache --tags=get,dataset,preprocessed,imagenet,-_NHWC @@ -48,17 +48,17 @@ jobs: continue-on-error: true if: runner.os == 'linux' run: | - mlcr --tags=get,generic-python-lib,_package.scipy --version=1.9.3 --quiet + mlcr get,generic-python-lib,_package.scipy --version=1.9.3 --quiet test $? -eq 0 || exit $? - mlcr --tags=get,generic-python-lib,_package.scipy --version=1.9.2 --quiet + mlcr get,generic-python-lib,_package.scipy --version=1.9.2 --quiet test $? -eq 0 || exit $? # Need to add find cache here - # mlcr --tags=get,generic-python-lib,_package.scipy --version=1.9.3 --quiet --only_execute_from_cache=True + # mlcr get,generic-python-lib,_package.scipy --version=1.9.3 --quiet --only_execute_from_cache=True # test $? -eq 0 || exit 0 - name: Test python install from src run: | - mlcr --tags=python,src,install,_shared --version=3.9.10 --quiet + mlcr python,src,install,_shared --version=3.9.10 --quiet mlc search cache --tags=python,src,install,_shared,version-3.9.10 test_docker: @@ -81,11 +81,11 @@ jobs: - name: Run docker container from dockerhub on linux run: | - mlcr --tags=run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=cm-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=cknowledge --quiet + mlcr run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=cm-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=cknowledge --quiet - name: Run docker container locally on linux run: | - mlcr --tags=run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet + mlcr run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet test_mlperf_retinanet_cpp_venv: runs-on: ubuntu-latest @@ -107,15 +107,15 @@ jobs: - name: Run MLPerf Inference Retinanet with native and virtual Python run: | - mlcr --tags=app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=accuracy --test_query_count=10 --rerun --quiet + mlcr app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=accuracy --test_query_count=10 --rerun --quiet - mlcr --tags=app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=performance --test_query_count=10 --rerun --quiet + mlcr app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=performance --test_query_count=10 --rerun --quiet - mlcr --tags=install,python-venv --version=3.10.8 --name=mlperf --quiet + mlcr install,python-venv --version=3.10.8 --name=mlperf --quiet export MLC_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf" - mlcr --tags=run,mlperf,inference,_submission,_short --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --submitter=MLCommons --implementation=cpp --hw_name=default --model=retinanet --backend=onnxruntime --device=cpu --scenario=Offline --quiet + mlcr run,mlperf,inference,_submission,_short --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --submitter=MLCommons --implementation=cpp --hw_name=default --model=retinanet --backend=onnxruntime --device=cpu --scenario=Offline --quiet # Step for Linux/MacOS - name: Randomly Execute Step (Linux/MacOS) @@ -160,4 +160,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-abtf-poc.yml b/.github/workflows/test-mlperf-inference-abtf-poc.yml index cc2ec9868..fadea97da 100644 --- a/.github/workflows/test-mlperf-inference-abtf-poc.yml +++ b/.github/workflows/test-mlperf-inference-abtf-poc.yml @@ -114,4 +114,4 @@ jobs: - name: Test MLPerf Inference ABTF POC using ${{ matrix.backend }} on ${{ matrix.os }} run: | - mlcr --tags=run-abtf,inference,_poc-demo --test_query_count=2 --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet ${{ matrix.extra-args }} ${{ matrix.docker }} -v + mlcr run-abtf,inference,_poc-demo --test_query_count=2 --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet ${{ matrix.extra-args }} ${{ matrix.docker }} -v diff --git a/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml b/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml index 2a89dbe6f..73f0d4adb 100644 --- a/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml +++ b/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml @@ -43,11 +43,11 @@ jobs: - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 -v --quiet + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 -v --quiet - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os != 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 -v --quiet + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 -v --quiet - name: Randomly Execute Step id: random-check run: | @@ -77,4 +77,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-dlrm.yml b/.github/workflows/test-mlperf-inference-dlrm.yml index 13bf2dbcc..c1a48a4b3 100644 --- a/.github/workflows/test-mlperf-inference-dlrm.yml +++ b/.github/workflows/test-mlperf-inference-dlrm.yml @@ -24,7 +24,7 @@ jobs: source gh_action/bin/activate export MLC_REPOS=$HOME/GH_MLC python3 -m pip install mlperf - mlcr --tags=run-mlperf,inference,_performance-only --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --model=dlrm-v2-99 --implementation=reference --backend=pytorch --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --clean + mlcr run-mlperf,inference,_performance-only --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --model=dlrm-v2-99 --implementation=reference --backend=pytorch --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --clean build_intel: if: github.repository_owner == 'gateoverflow_off' @@ -44,4 +44,4 @@ jobs: export MLC_REPOS=$HOME/GH_MLC python3 -m pip install mlperf mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=dlrm-v2-99 --implementation=intel --batch_size=1 --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=dlrm-v2-99 --implementation=intel --batch_size=1 --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean diff --git a/.github/workflows/test-mlperf-inference-gptj.yml b/.github/workflows/test-mlperf-inference-gptj.yml index 341e2e818..346947611 100644 --- a/.github/workflows/test-mlperf-inference-gptj.yml +++ b/.github/workflows/test-mlperf-inference-gptj.yml @@ -5,7 +5,7 @@ name: MLPerf inference GPT-J on: schedule: - - cron: "15 19 * * *" + - cron: "15 19 1 * *" jobs: build: @@ -26,6 +26,6 @@ jobs: export MLC_REPOS=$HOME/GH_MLC python3 -m pip install --upgrade mlc-scripts mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --pull_changes=yes --pull_inference_changes=yes --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --get_platform_details=yes --implementation=reference --clean - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --pull_changes=yes --pull_inference_changes=yes --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --get_platform_details=yes --implementation=reference --clean + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml index 70e4e4909..acf896b88 100644 --- a/.github/workflows/test-mlperf-inference-llama2.yml +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -5,7 +5,7 @@ name: MLPerf inference LLAMA2-70B on: schedule: - - cron: "59 04 * * *" + - cron: "59 04 1 * *" jobs: build_reference: @@ -31,5 +31,5 @@ jobs: pip install "huggingface_hub[cli]" git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-mixtral.yml b/.github/workflows/test-mlperf-inference-mixtral.yml index eb47f853f..e091d2fe7 100644 --- a/.github/workflows/test-mlperf-inference-mixtral.yml +++ b/.github/workflows/test-mlperf-inference-mixtral.yml @@ -2,7 +2,7 @@ name: MLPerf inference MIXTRAL-8x7B on: schedule: - - cron: "59 23 * * */5" # 30th minute and 20th hour => 20:30 UTC => 2 AM IST + - cron: "59 23 1 * */5" # 30th minute and 20th hour => 20:30 UTC => 2 AM IST jobs: build_reference: @@ -28,5 +28,5 @@ jobs: git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1 - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - GO-phoenix" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1 + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - GO-phoenix" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml index f4ed3f079..737132572 100644 --- a/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml @@ -41,11 +41,11 @@ jobs: - name: Test MLPerf Inference MLCommons C++ ResNet50 on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | - mlcr --tags=app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --adr.loadgen.tags=_from-pip --pip_loadgen=yes -v --quiet + mlcr app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --adr.loadgen.tags=_from-pip --pip_loadgen=yes -v --quiet - name: Test MLPerf Inference MLCommons C++ ResNet50 on ${{ matrix.os }} if: matrix.os != 'windows-latest' run: | - mlcr --tags=app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} -v --quiet + mlcr app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} -v --quiet - name: Randomly Execute Step id: random-check run: | @@ -77,4 +77,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=dev --commit_message="Results from MLCommons C++ ResNet50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=dev --commit_message="Results from MLCommons C++ ResNet50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-resnet50.yml b/.github/workflows/test-mlperf-inference-resnet50.yml index 4bfbe06ad..9bc5db424 100644 --- a/.github/workflows/test-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-resnet50.yml @@ -53,11 +53,11 @@ jobs: - name: Test MLPerf Inference ResNet50 (Windows) if: matrix.os == 'windows-latest' run: | - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet - name: Test MLPerf Inference ResNet50 (Linux/macOS) if: matrix.os != 'windows-latest' run: | - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet # Step for Linux/MacOS - name: Randomly Execute Step (Linux/MacOS) if: runner.os != 'Windows' @@ -101,5 +101,5 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-retinanet.yml b/.github/workflows/test-mlperf-inference-retinanet.yml index c1777beae..40e749831 100644 --- a/.github/workflows/test-mlperf-inference-retinanet.yml +++ b/.github/workflows/test-mlperf-inference-retinanet.yml @@ -47,11 +47,11 @@ jobs: - name: Test MLPerf Inference Retinanet using ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }} --model=retinanet --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }} --model=retinanet --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 - name: Test MLPerf Inference Retinanet using ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os != 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=retinanet --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=retinanet --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 # Step for Linux/MacOS - name: Randomly Execute Step (Linux/MacOS) @@ -96,4 +96,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-rgat.yml b/.github/workflows/test-mlperf-inference-rgat.yml index 026c64886..f8b0e6116 100644 --- a/.github/workflows/test-mlperf-inference-rgat.yml +++ b/.github/workflows/test-mlperf-inference-rgat.yml @@ -35,7 +35,7 @@ jobs: mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - name: Test MLPerf Inference R-GAT using ${{ matrix.backend }} on ${{ matrix.os }} run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 - name: Retrieve secrets from Keeper id: ksecrets @@ -55,4 +55,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-rnnt.yml b/.github/workflows/test-mlperf-inference-rnnt.yml index 89ec6e4e2..3dfba6938 100644 --- a/.github/workflows/test-mlperf-inference-rnnt.yml +++ b/.github/workflows/test-mlperf-inference-rnnt.yml @@ -37,4 +37,4 @@ jobs: mlcr --quiet --tags=get,sys-utils-cm - name: Test MLPerf Inference RNNT run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_performance-only --submitter="cTuning" --model=rnnt --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=5 -v --quiet + mlcr run,mlperf,inference,generate-run-cmds,_performance-only --submitter="cTuning" --model=rnnt --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=5 -v --quiet diff --git a/.github/workflows/test-mlperf-inference-sdxl.yaml b/.github/workflows/test-mlperf-inference-sdxl.yaml index 2e287a0be..b76abb2ee 100644 --- a/.github/workflows/test-mlperf-inference-sdxl.yaml +++ b/.github/workflows/test-mlperf-inference-sdxl.yaml @@ -1,7 +1,7 @@ name: MLPerf inference SDXL on: schedule: - - cron: "19 17 * * *" + - cron: "19 17 1 * *" jobs: build_reference: @@ -21,5 +21,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC python3 -m pip install mlc-scripts mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-tvm-resnet50.yml b/.github/workflows/test-mlperf-inference-tvm-resnet50.yml index 616f67db2..b2ae35d04 100644 --- a/.github/workflows/test-mlperf-inference-tvm-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-tvm-resnet50.yml @@ -35,7 +35,7 @@ jobs: mlcr --quiet --tags=get,sys-utils-cm - name: Test MLC Tutorial TVM run: | - mlcr --tags=run-mlperf,inference,_submission,_short --adr.python.name=mlperf --adr.python.version_min=3.8 --submitter=MLCommons --implementation=python --hw_name=gh_ubuntu-latest --model=resnet50 --backend=tvm-onnx --device=cpu --scenario=Offline --mode=accuracy --test_query_count=5 --clean --quiet ${{ matrix.extra-options }} + mlcr run-mlperf,inference,_submission,_short --adr.python.name=mlperf --adr.python.version_min=3.8 --submitter=MLCommons --implementation=python --hw_name=gh_ubuntu-latest --model=resnet50 --backend=tvm-onnx --device=cpu --scenario=Offline --mode=accuracy --test_query_count=5 --clean --quiet ${{ matrix.extra-options }} - name: Randomly Execute Step id: random-check run: | @@ -67,4 +67,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml b/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml index 7c9a68d85..cafb93a12 100644 --- a/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml +++ b/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml @@ -32,4 +32,4 @@ jobs: mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - name: Test MLPerf loadgen with HuggingFace bert onnx fp32 squad model run: | - mlcr --tags=python,app,loadgen-generic,_onnxruntime,_custom,_huggingface,_model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1 --quiet + mlcr python,app,loadgen-generic,_onnxruntime,_custom,_huggingface,_model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1 --quiet diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index 2fac2632c..9b75db22a 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -2,7 +2,7 @@ name: MLPerf Inference Nvidia implementations on: schedule: - - cron: "27 11 * * *" + - cron: "27 11 1 * *" jobs: run_nvidia: @@ -58,7 +58,7 @@ jobs: pip install --upgrade mlcflow mlc pull repo mlcommons@mlperf-automations --branch=dev - mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="GATEOverflow" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string $submission_preprocessor_args --quiet - #mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/GATEOverflow/mlperf_inference_submissions_v5.0 --repo_branch=main --commit_message="Results from GH actions on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name + mlcr run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="GATEOverflow" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string $submission_preprocessor_args --quiet + #mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/GATEOverflow/mlperf_inference_submissions_v5.0 --repo_branch=main --commit_message="Results from GH actions on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name diff --git a/.github/workflows/test-qaic-compute-sdk-build.yml b/.github/workflows/test-qaic-compute-sdk-build.yml index 0dff27cd0..6b3e91356 100644 --- a/.github/workflows/test-qaic-compute-sdk-build.yml +++ b/.github/workflows/test-qaic-compute-sdk-build.yml @@ -27,8 +27,8 @@ jobs: - name: Install dependencies run: | pip install mlc-scripts - mlcr --tags=get,sys-utils-cm --quiet + mlcr get,sys-utils-cm --quiet - name: Test QAIC Compute SDK for compilation run: | - mlcr --tags=get,qaic,compute,sdk --adr.llvm.version=${{ matrix.llvm-version }} --quiet + mlcr get,qaic,compute,sdk --adr.llvm.version=${{ matrix.llvm-version }} --quiet diff --git a/.github/workflows/test-qaic-software-kit.yml b/.github/workflows/test-qaic-software-kit.yml index 5cbfc0add..64bba66b4 100644 --- a/.github/workflows/test-qaic-software-kit.yml +++ b/.github/workflows/test-qaic-software-kit.yml @@ -32,8 +32,8 @@ jobs: - name: Pull MLOps repository run: | pip install mlc-scripts - mlcr --tags=get,sys-utils-mlc --quiet + mlcr get,sys-utils-mlc --quiet - name: Test Software Kit for compilation on Ubuntu 20.04 run: | - mlcr --tags=get,qaic,software,kit --adr.compiler.tags=${{ matrix.compiler }} --adr.compiler.version=${{ matrix.llvm-version }} --quiet + mlcr get,qaic,software,kit --adr.compiler.tags=${{ matrix.compiler }} --adr.compiler.version=${{ matrix.llvm-version }} --quiet diff --git a/.github/workflows/test-scc24-sdxl.yaml b/.github/workflows/test-scc24-sdxl.yaml index becc3830d..3151c8183 100644 --- a/.github/workflows/test-scc24-sdxl.yaml +++ b/.github/workflows/test-scc24-sdxl.yaml @@ -1,8 +1,9 @@ name: MLPerf inference SDXL (SCC) + on: schedule: - - cron: "34 19 * * *" + - cron: "34 19 1 * *" jobs: build_reference: @@ -28,10 +29,10 @@ jobs: pip install --upgrade mlcflow pip install tabulate mlc pull repo - mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions + mlcr run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results + # mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions build_nvidia: if: github.repository_owner == 'gateoverflow' @@ -56,7 +57,7 @@ jobs: pip install --upgrade mlcflow pip install tabulate mlc pull repo - mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean - mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions + mlcr run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean + mlcr run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results + # mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions diff --git a/automation/script/docker.py b/automation/script/docker.py index bebee59e1..056192898 100644 --- a/automation/script/docker.py +++ b/automation/script/docker.py @@ -246,7 +246,7 @@ def docker_run(self_module, i): for t in i.get('tags', '').split(",") if t.startswith("_")] docker_cache = i.get('docker_cache', "yes") - if docker_cache.lower() in ["no", "false"]: + if is_false(docker_cache): env.setdefault('MLC_DOCKER_CACHE', docker_cache) image_repo = i.get('docker_image_repo', '') @@ -371,7 +371,7 @@ def docker_run(self_module, i): # Execute the Docker container mlc_docker_input = { - 'action': 'run', 'automation': 'script', 'tags': 'run,docker,container', + 'action': 'run', 'target': 'script', 'tags': 'run,docker,container', 'rebuild': rebuild_docker_image, 'env': env, 'mounts': mounts, 'script_tags': i.get('tags'), 'run_cmd': final_run_cmd, 'v': verbose, diff --git a/automation/script/docker_utils.py b/automation/script/docker_utils.py index 074c8312c..621db878a 100644 --- a/automation/script/docker_utils.py +++ b/automation/script/docker_utils.py @@ -377,7 +377,6 @@ def get_docker_default(key): "use_host_user_id": True, "use_host_group_id": True, "keep_detached": False, - "reuse_existing": True } if key in defaults: return defaults[key] diff --git a/automation/script/module.py b/automation/script/module.py index 4c3b7fbb1..acdacff8e 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -358,9 +358,9 @@ def _run(self, i): if fake_deps: env['MLC_TMP_FAKE_DEPS'] = 'yes' - if str(i.get('skip_sys_utils', '')).lower() in ['true', 'yes']: + if is_true(i.get('skip_sys_utils', '')): env['MLC_SKIP_SYS_UTILS'] = 'yes' - if str(i.get('skip_sudo', '')).lower() in ['true', 'yes']: + if is_true(i.get('skip_sudo', '')): env['MLC_TMP_SKIP_SUDO'] = 'yes' run_state = i.get('run_state', self.run_state) @@ -374,12 +374,10 @@ def _run(self, i): # Check verbose and silent verbose = False - silent = True if str(i.get('silent', '')).lower() in [ - 'true', 'yes', 'on'] else False + silent = True if is_true(i.get('silent', '')) else False if not silent: - silent = True if str(i.get('s', '')).lower() in [ - 'true', 'yes', 'on'] else False + silent = True if is_true(i.get('s', '')) else False if silent: if 'verbose' in i: @@ -1020,11 +1018,9 @@ def _run(self, i): if r['return'] > 0: return r - if str(env.get('MLC_RUN_STATE_DOCKER', False) - ).lower() in ['true', '1', 'yes']: + if is_true(env.get('MLC_RUN_STATE_DOCKER', False)): if state.get('docker'): - if str(state['docker'].get('run', True) - ).lower() in ['false', '0', 'no']: + if is_false(state['docker'].get('run', True)): logger.info( recursion_spaces + ' - Skipping script::{} run as we are inside docker'.format(found_script_item)) @@ -1047,7 +1043,7 @@ def _run(self, i): 'deps': []} return rr - elif str(state['docker'].get('real_run', True)).lower() in ['false', '0', 'no']: + elif is_false(state['docker'].get('real_run', True)): logger.info( recursion_spaces + ' - Doing fake run for script::{} as we are inside docker'.format(found_script_item)) @@ -1576,7 +1572,7 @@ def _run(self, i): } # Check and run predeps in customize.py - if str(meta.get('predeps', 'True')).lower() not in ["0", "false", "no"] and os.path.isfile( + if not is_false(meta.get('predeps', 'True')) and os.path.isfile( path_to_customize_py): # possible duplicate execution - needs fix r = utils.load_python_module( {'path': path, 'name': 'customize'}) @@ -1614,6 +1610,7 @@ def _run(self, i): ii['env'] = env ii['state'] = state ii['meta'] = meta + ii['automation'] = self # may need to detect versions in multiple paths ii['run_script_input'] = run_script_input @@ -1766,6 +1763,7 @@ def _run(self, i): ii['env'] = env ii['state'] = state ii['meta'] = meta + ii['automation'] = self # may need to detect versions in multiple paths ii['run_script_input'] = run_script_input @@ -2962,13 +2960,10 @@ def test(self, i): run_variations = [ f"_{v}" for v in variations if variations[v].get( 'group', - '') == '' and str( + '') == '' and not is_true( variations[v].get( 'exclude-in-test', - '')).lower() not in [ - "1", - "true", - "yes"]] + ''))] else: given_variations = run_input.get( 'variations_list', []) @@ -5029,7 +5024,7 @@ def enable_or_skip_script(meta, env): """ if not isinstance(meta, dict): - logger.info( + logger.warn( "The meta entry is not a dictionary for skip/enable if_env: %s", meta) @@ -5039,10 +5034,10 @@ def enable_or_skip_script(meta, env): value = str(env[key]).lower().strip() if set(meta_key) & set(["yes", "on", "true", "1"]): # Any set value other than false is taken as set - if value not in ["no", "off", "false", "0", ""]: + if not is_false(value) and value != '': continue elif set(meta_key) & set(["no", "off", "false", "0"]): - if value in ["no", "off", "false", "0", ""]: + if is_false(value) or value == "": continue elif value in meta_key: continue @@ -5072,10 +5067,10 @@ def any_enable_or_skip_script(meta, env): meta_key = [str(v).lower() for v in meta[key]] if set(meta_key) & set(["yes", "on", "true", "1"]): - if value not in ["no", "off", "false", "0", ""]: + if not is_false(value) and value != "": found = True elif set(meta_key) & set(["no", "off", "false", "0", ""]): - if value in ["no", "off", "false", "0", ""]: + if is_false(value) or value == "": found = True elif value in meta_key: found = True @@ -5525,6 +5520,7 @@ def run_detect_version(customize_code, customize_common_input, ii['env'] = env ii['state'] = state ii['meta'] = meta + ii['automation'] = customize_common_input['automation'] r = customize_code.detect_version(ii) return r @@ -5555,6 +5551,7 @@ def run_postprocess(customize_code, customize_common_input, recursion_spaces, ii['env'] = env ii['state'] = state ii['meta'] = meta + ii['automation'] = customize_common_input['automation'] if run_script_input is not None: ii['run_script_input'] = run_script_input diff --git a/docs/getting-started.md b/docs/getting-started.md index 2bf8ff5b2..248c604bc 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -1,5 +1,12 @@ # Getting Started with MLC Script Automation +## Install MLC Scripts +``` +pip install mlc-scripts +``` + +For more customized installation you can see [here](https://docs.mlcommons.org/mlcflow/install/) + ## Running MLC Scripts To execute a simple script in MLC that captures OS details, use the following command: @@ -12,7 +19,6 @@ mlcr detect,os -j This command gathers details about the system on which it's run, such as: ```json -$ mlcr detect,os -j [2025-02-03 04:57:23,449 main.py:694 INFO] - Repos path for Index: /home/arjun/MLC/repos [2025-02-03 04:57:24,167 main.py:837 INFO] - Shared index for script saved to /home/arjun/MLC/repos/index_script.json. [2025-02-03 04:57:24,167 main.py:837 INFO] - Shared index for cache saved to /home/arjun/MLC/repos/index_cache.json. diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp b/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp index c7a07faa8..ff10fb074 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp @@ -422,12 +422,6 @@ void TestSingleStream(Program *prg) { ? mlperf::TestMode::FindPeakPerformance : mlperf::TestMode::SubmissionRun; - if (ts.FromConfig(mlperf_conf_path, model_name, scenario_string)) { - std::cout << "Issue with mlperf.conf file at " << mlperf_conf_path - << std::endl; - exit(1); - } - if (ts.FromConfig(user_conf_path, model_name, scenario_string)) { std::cout << "Issue with user.conf file at " << user_conf_path << std::endl; exit(1); diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index 8144df6e7..bbdff3f7c 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -219,6 +219,9 @@ def get_run_cmd(os_info, env, scenario_extra_options, def get_run_cmd_reference( os_info, env, scenario_extra_options, mode_extra_options, dataset_options): + device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] not in [ + "gpu", "rocm"] else "cuda" + if env['MLC_MODEL'] in ["gptj-99", "gptj-99.9"]: env['RUN_DIR'] = os.path.join( @@ -352,8 +355,6 @@ def get_run_cmd_reference( "fid")) backend = env['MLC_MLPERF_BACKEND'] - device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] not in [ - "gpu", "rocm"] else "cuda" max_batchsize = env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ @@ -377,7 +378,6 @@ def get_run_cmd_reference( "language", "llama2-70b") backend = env['MLC_MLPERF_BACKEND'] - device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda" cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ @@ -410,7 +410,6 @@ def get_run_cmd_reference( "language", "mixtral-8x7b") backend = env['MLC_MLPERF_BACKEND'] - device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda" cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ " --dataset-path " + env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] + \ @@ -489,7 +488,6 @@ def get_run_cmd_reference( else: mode_extra_options += " --dataset igbh-dgl-tiny --profile debug-dgl " - device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda" # have to add the condition for running in debug mode or real run mode cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ @@ -537,7 +535,7 @@ def get_run_cmd_reference( cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ " --dataset waymo" + \ " --dataset-path " + env['MLC_DATASET_WAYMO_PATH'] + \ - " --lidar-path " + env['MLC_ML_MODEL_POINT_PILLARS_PATH'] + \ + " --lidar-path " + env['MLC_ML_MODEL_POINT_PAINTING_PATH'] + \ " --segmentor-path " + env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] + \ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + \ diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml index 2b6795b00..720d59514 100644 --- a/script/app-mlperf-inference-mlcommons-python/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml @@ -66,7 +66,7 @@ input_mapping: multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY network: MLC_NETWORK_LOADGEN sut_servers: MLC_NETWORK_LOADGEN_SUT_SERVERS - pointpillars_checkpoint_path: MLC_ML_MODEL_POINT_PILLARS_PATH + pointpainting_checkpoint_path: MLC_ML_MODEL_POINT_PAINTING_PATH deeplab_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH waymo_path: MLC_DATASET_WAYMO_PATH @@ -505,7 +505,7 @@ deps: - "yes" ## pointpainting - - tags: get,ml-model,pointpillars + - tags: get,ml-model,pointpainting names: - pointpillars-model enable_if_env: @@ -514,21 +514,12 @@ deps: skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" - - tags: get,ml-model,resnet50-deeplab - enable_if_env: - MLC_MODEL: - - pointpainting - skip_if_env: - MLC_RUN_STATE_DOCKER: - - "yes" - names: - - resnet50-deeplab-model ######################################################################## # Install datasets ## ImageNet (small for tests) - - tags: get,dataset,image-classification,imagenet,preprocessed + - tags: get,dataset,image-classification,imagenet,preprocessed,-_for.mobilenet names: - imagenet-preprocessed enable_if_env: @@ -894,6 +885,7 @@ variations: ENQUEUE_NUM_THREADS: 2 MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 16 MLC_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> tvm-onnx: @@ -1288,6 +1280,7 @@ variations: - protobuf version_max: "4.23.4" version_max_usable: "4.23.4" + version_min: "3.20.3" enable_if_env: MLC_MLPERF_BACKEND: - tf diff --git a/script/app-mlperf-inference-nvidia/customize.py b/script/app-mlperf-inference-nvidia/customize.py index 550b273fe..70e5d3710 100644 --- a/script/app-mlperf-inference-nvidia/customize.py +++ b/script/app-mlperf-inference-nvidia/customize.py @@ -1,6 +1,7 @@ from mlc import utils import os import shutil +from utils import * def preprocess(i): @@ -590,8 +591,8 @@ def preprocess(i): run_infer_on_copy_streams = str( env.get('MLC_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS', '')) - if run_infer_on_copy_streams and run_infer_on_copy_streams.lower() not in [ - "no", "false", "0", ""]: + if run_infer_on_copy_streams and not is_false( + run_infer_on_copy_streams): run_config += " --run_infer_on_copy_streams" start_from_device = str( diff --git a/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md b/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md index e27a6f3ec..cd19536a7 100644 --- a/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md +++ b/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md @@ -13,14 +13,14 @@ image from the Community AMIs is the recommended OS image as it comes with the Q sudo yum install -y python38-devel git python3.8 -m pip install cmind cm pull repo mlcommons@cm4mlops -mlcr --tags=get,python --version_min=3.8.1 +mlcr get,python --version_min=3.8.1 ``` ## Bert-99 ### Quick performance run ``` -mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic \ +mlcr generate-run-cmds,inference,_performance-only --device=qaic \ --backend=glow --scenario=Offline --implementation=kilt --model=bert-99 \ --test_query_count=40000 --precision=uint8 --rerun --quiet \ --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ @@ -29,7 +29,7 @@ mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic \ ### Full valid run ``` -mlcr --tags=generate-run-cmds,inference,_submission --device=qaic \ +mlcr generate-run-cmds,inference,_submission --device=qaic \ --backend=glow --scenario=Offline --implementation=kilt --model=bert-99 --precision=uint8 \ --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ --rerun --quiet --execution-mode=valid @@ -45,13 +45,13 @@ The expected accuracy is ~90 (Optional) If you have Imagenet 2012 validation dataset downloaded, you can register it in CM as follows. This step is optional and can avoid the download from the public URL which can be slow at times. ``` -mlcr --tags=get,dataset,imagenet,original,_full --env.IMAGENET_PATH=`pwd`/imagenet-2012-val +mlcr get,dataset,imagenet,original,_full --env.IMAGENET_PATH=`pwd`/imagenet-2012-val ``` ### Quick performance run ``` -mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=resnet50 \ --test_query_count=400000 --precision=uint8 --rerun --adr.compiler.tags=gcc \ --adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=test --quiet @@ -60,7 +60,7 @@ mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backen ### Full valid run ``` -mlcr --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_submission --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=resnet50 \ --precision=uint8 --rerun --adr.compiler.tags=gcc \ --adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=valid --quiet @@ -76,7 +76,7 @@ Expected accuracy is 75.936% ### Quick performance run ``` -mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=retinanet --test_query_count=40000 --precision=uint8 \ --rerun --quiet --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.1,_dl2q.24xlarge,_bs.1 \ --adr.compiler.tags=gcc --execution-mode=test @@ -85,7 +85,7 @@ mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backen ### Full valid run ``` -mlcr --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_submission --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=retinanet \ --precision=uint8 --rerun --adr.compiler.tags=gcc --adr.dataset-preprocessed.tags=_custom-annotations \ --adr.mlperf-inference-implementation.tags=_bs.1,_dl2q.24xlarge --execution-mode=valid --quiet diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py index 34bd3e046..5a215d27b 100644 --- a/script/app-mlperf-inference/customize.py +++ b/script/app-mlperf-inference/customize.py @@ -10,6 +10,7 @@ import mlperf_utils import re from datetime import datetime, timezone +from utils import * def preprocess(i): @@ -254,7 +255,7 @@ def postprocess(i): measurements['starting_weights_filename'] = env.get( 'MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get( 'MLC_ML_MODEL_FILE', measurements.get( - 'starting_weights_filename', ''))) + 'starting_weights_filename', 'TBD'))) measurements['retraining'] = env.get( 'MLC_ML_MODEL_RETRAINING', measurements.get( 'retraining', 'no')) @@ -286,8 +287,7 @@ def postprocess(i): state['app_mlperf_inference_log_summary'][y[0].strip().lower() ] = y[1].strip() - if env.get("MLC_MLPERF_PRINT_SUMMARY", "").lower() not in [ - "no", "0", "false"]: + if not is_false(env.get("MLC_MLPERF_PRINT_SUMMARY", "")): print("\n") print(mlperf_log_summary) diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index b1abbe02d..846936234 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -27,7 +27,7 @@ default_env: MLC_MLPERF_RUN_STYLE: test MLC_TEST_QUERY_COUNT: '10' MLC_MLPERF_QUANTIZATION: off - MLC_GET_PLATFORM_DETAILS: yes + MLC_GET_PLATFORM_DETAILS: no env: MLC_MLPERF_PRINT_SUMMARY: "no" @@ -121,7 +121,7 @@ deps: posthook_deps: - tags: get,mlperf,sut,description #populate system meta information like framework - tags: get,platform,details - enable_if_any_env: + enable_if_env: MLC_GET_PLATFORM_DETAILS: - yes skip_if_env: @@ -846,18 +846,12 @@ variations: - 'yes' names: - waymo-dataset - - tags: get,ml-model,pointpillars - enable_if_env: - MLC_USE_DATASET_FROM_HOST: - - 'yes' - names: - - pointpillars-model - - tags: get,ml-model,resnet50-deeplab + - tags: get,ml-model,pointpainting enable_if_env: MLC_USE_DATASET_FROM_HOST: - 'yes' names: - - resnet50-deeplab-model + - pointpainting-model posthook_deps: - enable_if_env: MLC_MLPERF_LOADGEN_MODE: @@ -1937,7 +1931,7 @@ docker: MLC_MLPERF_INFERENCE_SUBMISSION_DIR: [ on ] pre_run_cmds: - #- mlc pull repo && mlcr --tags=get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update + #- mlc pull repo && mlcr get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update - mlc pull repo mounts: - "${{ MLC_DATASET_IMAGENET_PATH }}:${{ MLC_DATASET_IMAGENET_PATH }}" @@ -1956,7 +1950,7 @@ docker: - "${{ MLC_DATASET_IGBH_PATH }}:${{ MLC_DATASET_IGBH_PATH }}" - "${{ MLC_ML_MODEL_RGAT_CHECKPOINT_PATH }}:${{ MLC_ML_MODEL_RGAT_CHECKPOINT_PATH }}" - "${{ MLC_DATASET_WAYMO_PATH }}:${{ MLC_DATASET_WAYMO_PATH }}" - - "${{ MLC_ML_MODEL_POINT_PILLARS_PATH }}:${{ MLC_ML_MODEL_POINT_PILLARS_PATH }}" + - "${{ MLC_ML_MODEL_POINT_PAINTING_PATH }}:${{ MLC_ML_MODEL_POINT_PAINTING_PATH }}" - "${{ MLC_ML_MODEL_DPLAB_RESNET50_PATH }}:${{ MLC_ML_MODEL_DPLAB_RESNET50_PATH }}" skip_run_cmd: 'no' shm_size: '32gb' diff --git a/script/benchmark-any-mlperf-inference-implementation/run-template.sh b/script/benchmark-any-mlperf-inference-implementation/run-template.sh index 0224c34dd..8e0cb42c0 100644 --- a/script/benchmark-any-mlperf-inference-implementation/run-template.sh +++ b/script/benchmark-any-mlperf-inference-implementation/run-template.sh @@ -43,47 +43,47 @@ function run_test() { results_dir=$HOME/results_dir #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun ${EXTRA_ARGS}' -find_ss_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_ss_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=SingleStream --quiet --test_query_count=$test_query_count $rerun ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme --scenario=$scenario \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -tflite_accuracy_cmd='mlcr --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +tflite_accuracy_cmd='mlcr run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} \ ${EXTRA_ARGS}' -tflite_performance_cmd='mlcr --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ +tflite_performance_cmd='mlcr run,mobilenet-models,_tflite,_performance-only$extra_tags \ ${POWER_STRING} \ --adr.compiler.tags=gcc \ ${extra_option} \ ${EXTRA_ARGS}' -tflite_readme_cmd='mlcr --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ +tflite_readme_cmd='mlcr run,mobilenet-models,_tflite,_populate-readme$extra_tags \ ${POWER_STRING} \ --adr.compiler.tags=gcc \ ${extra_option} \ diff --git a/script/benchmark-program-mlperf/customize.py b/script/benchmark-program-mlperf/customize.py index 3f92511eb..493505c48 100644 --- a/script/benchmark-program-mlperf/customize.py +++ b/script/benchmark-program-mlperf/customize.py @@ -33,9 +33,9 @@ def postprocess(i): echo \${MLC_MLPERF_RUN_COUNT} > \${MLC_RUN_DIR}/count.txt; if [ \${MLC_MLPERF_RUN_COUNT} -eq 1 ]; then -export MLC_MLPERF_USER_CONF="${MLC_MLPERF_RANGING_USER_CONF}"; +export MLC_MLPERF_USER_CONF="\${MLC_MLPERF_RANGING_USER_CONF}"; else -export MLC_MLPERF_USER_CONF="${MLC_MLPERF_TESTING_USER_CONF}"; +export MLC_MLPERF_USER_CONF="\${MLC_MLPERF_TESTING_USER_CONF}"; fi ; diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py index 35cf623ec..a355e8248 100644 --- a/script/benchmark-program/customize.py +++ b/script/benchmark-program/customize.py @@ -1,5 +1,6 @@ from mlc import utils import os +from utils import * def preprocess(i): @@ -20,7 +21,7 @@ def preprocess(i): env['MLC_RUN_CMD'] += ' ' + env['MLC_RUN_SUFFIX'] else: - if env['MLC_ENABLE_NUMACTL'].lower() in ["on", "1", "true", "yes"]: + if is_true(env['MLC_ENABLE_NUMACTL']): env['MLC_ENABLE_NUMACTL'] = "1" MLC_RUN_PREFIX = "numactl " + env['MLC_NUMACTL_MEMBIND'] + ' ' else: @@ -49,8 +50,8 @@ def preprocess(i): if x != '': env['MLC_RUN_CMD'] = x + ' ' + env.get('MLC_RUN_CMD', '') - if os_info['platform'] != 'windows' and str( - env.get('MLC_SAVE_CONSOLE_LOG', True)).lower() not in ["no", "false", "0"]: + if os_info['platform'] != 'windows' and not is_false( + env.get('MLC_SAVE_CONSOLE_LOG', True)): logs_dir = env.get('MLC_LOGS_DIR', env['MLC_RUN_DIR']) env['MLC_RUN_CMD'] += r" 2>&1 | tee " + q + os.path.join( logs_dir, "console.out") + q + r"; echo \${PIPESTATUS[0]} > exitstatus" @@ -84,7 +85,7 @@ def preprocess(i): pre_run_cmd += ' && ' # running the script as a process in background - pre_run_cmd = pre_run_cmd + 'mlcr --tags=runtime,system,utilisation' + \ + pre_run_cmd = pre_run_cmd + 'mlcr runtime,system,utilisation' + \ env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] + ' --quiet & ' # obtain the command if of the background process pre_run_cmd += r" cmd_pid=\$! && echo CMD_PID=\$cmd_pid" diff --git a/script/download-file/customize.py b/script/download-file/customize.py index f72034d5f..64066122f 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -85,8 +85,7 @@ def preprocess(i): extra_download_options = env.get('MLC_DOWNLOAD_EXTRA_OPTIONS', '') verify_ssl = env.get('MLC_VERIFY_SSL', "True") - if str(verify_ssl).lower() in [ - "no", "false"] or os_info['platform'] == 'windows': + if is_false(verify_ssl) or os_info['platform'] == 'windows': verify_ssl = False else: verify_ssl = True diff --git a/script/extract-file/customize.py b/script/extract-file/customize.py index bedfe41f5..fd6ab8235 100644 --- a/script/extract-file/customize.py +++ b/script/extract-file/customize.py @@ -125,7 +125,7 @@ def preprocess(i): q + extract_to_folder + q env['MLC_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder - x = '"' if ' ' in filename else '' + x = q if ' ' in filename else '' env['MLC_EXTRACT_CMD'] = env['MLC_EXTRACT_PRE_CMD'] + env['MLC_EXTRACT_TOOL'] + ' ' + \ env.get('MLC_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \ ' ' + env.get('MLC_EXTRACT_TOOL_OPTIONS', '') + ' ' + x + filename + x diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index 277b52933..a10ce68a5 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -131,12 +131,13 @@ def generate_submission(env, state, inp, submission_division): system_meta_tmp['system_type'] = env['MLC_MLPERF_SUBMISSION_CATEGORY'].replace( "-", ",") - duplicate = ( + '''duplicate = ( env.get( 'MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS', 'no') in [ "yes", "True"]) + ''' if division not in ['open', 'closed']: return {'return': 1, 'error': '"division" must be "open" or "closed"'} @@ -159,15 +160,11 @@ def generate_submission(env, state, inp, submission_division): print('* MLPerf inference submitter: {}'.format(submitter)) if env.get('MLC_MLPERF_SUT_SW_NOTES_EXTRA', '') != '': - sw_notes = f"""{ - system_meta_tmp['sw_notes']} { - env['MLC_MLPERF_SUT_SW_NOTES_EXTRA']}""" + sw_notes = f"""{system_meta_tmp.get('sw_notes','')} {env['MLC_MLPERF_SUT_SW_NOTES_EXTRA']}""" system_meta_tmp['sw_notes'] = sw_notes if env.get('MLC_MLPERF_SUT_HW_NOTES_EXTRA', '') != '': - hw_notes = f"""{ - system_meta_tmp['hw_notes']} { - env['MLC_MLPERF_SUT_HW_NOTES_EXTRA']}""" + hw_notes = f"""{system_meta_tmp.get('hw_notes', '')} {env['MLC_MLPERF_SUT_HW_NOTES_EXTRA']}""" system_meta_tmp['hw_notes'] = hw_notes path_submission = os.path.join(path_submission_division, submitter) @@ -361,6 +358,7 @@ def generate_submission(env, state, inp, submission_division): compliance_scenario_path = os.path.join( compliance_model_path, scenario) + ''' if duplicate and scenario == 'singlestream': if not os.path.exists(os.path.join( result_model_path, "offline")): @@ -378,6 +376,7 @@ def generate_submission(env, state, inp, submission_division): result_scenario_path, os.path.join( result_model_path, "multistream")) scenarios.append("multistream") + ''' modes = [ f for f in os.listdir(result_scenario_path) if not os.path.isfile( @@ -552,9 +551,9 @@ def generate_submission(env, state, inp, submission_division): target_measurement_json_path) / "model-info.json" shutil.copy(measurements_json_path, destination) - else: + elif mode == 'performance': print( - f"Warning: measurements.json file not present, creating a dummy measurements.json in path {measurements_json_path}") + f"Warning: measurements.json file not present from perf run, creating a dummy measurements.json in path {measurements_json_path}. Please update it later.") dummy_measurements_data = { "input_data_types": env['MLC_ML_MODEL_INPUTS_DATA_TYPE'] if env.get('MLC_ML_MODEL_INPUTS_DATA_TYPE') else "TBD", "retraining": env['MLC_ML_MODEL_RETRAINING'] if env.get('MLC_ML_MODEL_RETRAINING') else "TBD", @@ -563,7 +562,8 @@ def generate_submission(env, state, inp, submission_division): "weight_transformations": env['MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS'] if env.get('MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS') else "TBD" } with open(measurements_json_path, 'w') as json_file: - json.dump(data, json_file, indent=4) + json.dump( + dummy_measurements_data, json_file, indent=4) files = [] readme = False diff --git a/script/generate-mlperf-inference-submission/meta.yaml b/script/generate-mlperf-inference-submission/meta.yaml index c249d7243..9448db682 100644 --- a/script/generate-mlperf-inference-submission/meta.yaml +++ b/script/generate-mlperf-inference-submission/meta.yaml @@ -67,11 +67,11 @@ input_mapping: dashboard_wb_project: MLC_MLPERF_DASHBOARD_WANDB_PROJECT device: MLC_MLPERF_DEVICE division: MLC_MLPERF_SUBMISSION_DIVISION - duplicate: MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS extra_checker_args: MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARG hw_name: MLC_HW_NAME hw_notes_extra: MLC_MLPERF_SUT_HW_NOTES_EXTRA - infer_scenario_results: MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS + noinfer_scenario_results: MLC_MLPERF_NOINFER_SCENARIO_RESULTS + noinfer_low_accuracy_results: MLC_MLPERF_NOINFER_LOW_ACCURACY_RESULTS power_settings_file: MLC_MLPERF_POWER_SETTINGS_FILE_PATH preprocess: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR preprocess_submission: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index 4863aa494..a9f8e3eaa 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -4,6 +4,7 @@ import shutil import subprocess import sys +from utils import * def preprocess(i): @@ -112,8 +113,8 @@ def preprocess(i): env['MLC_MLPERF_USE_MAX_DURATION'] = 'no' elif scenario == "MultiStream" and (1000 / float(value) * 660 < 662): env['MLC_MLPERF_USE_MAX_DURATION'] = 'no' - if env.get('MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"] and env.get( - 'MLC_MLPERF_USE_MAX_DURATION', "yes").lower() not in ["no", "false", "0"]: + if not is_true(env.get('MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')) and not is_false(env.get( + 'MLC_MLPERF_USE_MAX_DURATION', "yes")): tolerance = 0.4 # much lower because we have max_duration else: tolerance = 0.9 @@ -355,13 +356,13 @@ def preprocess(i): max_duration_ranging_s * 1000) # in milliseconds if scenario == "MultiStream" or scenario == "SingleStream": - if env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes').lower() not in ["no", "false", "0"] and env.get( - 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"]: - user_conf += ml_model_name + "." + scenario + \ - f".max_duration = {max_duration_valid}" + "\n" - elif env.get('MLC_MLPERF_INFERENCE_MIN_DURATION', '') != '': + if env.get('MLC_MLPERF_INFERENCE_MIN_DURATION', '') != '': user_conf += ml_model_name + "." + scenario + ".min_duration = " + \ env['MLC_MLPERF_INFERENCE_MIN_DURATION'] + " \n" + elif not is_false(env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes')) and not is_true(env.get( + 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')): + user_conf += ml_model_name + "." + scenario + \ + f".max_duration = {max_duration_valid}" + "\n" if scenario == "MultiStream": user_conf += ml_model_name + "." + scenario + ".min_query_count = " + \ env.get( diff --git a/script/get-aocc/customize.py b/script/get-aocc/customize.py new file mode 100644 index 000000000..383cac39f --- /dev/null +++ b/script/get-aocc/customize.py @@ -0,0 +1,112 @@ +from mlc import utils +import os + + +def predeps(i): + os_info = i['os_info'] + + env = i['env'] + if env.get('MLC_AOCC_TAR_FILE_PATH', '') != '': + env['MLC_AOCC_NEEDS_TAR'] = 'yes' + + return {'return': 0} + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + exe_c = 'clang.exe' if os_info['platform'] == 'windows' else 'clang' + + if env.get('MLC_AOCC_DIR_PATH', '') != '' and env.get( + 'MLC_AOCC_BIN_WITH_PATH', '') == '': + for f in os.listdir(env['MLC_AOCC_DIR_PATH']): + if os.path.exists(os.path.join( + env['MLC_AOCC_DIR_PATH'], f, "bin", exe_c)): + env['MLC_AOCC_BIN_WITH_PATH'] = os.path.join( + env['MLC_AOCC_DIR_PATH'], f, "bin", exe_c) + + if env.get('MLC_HOST_OS_FLAVOR', '') == 'rhel': + if "12" in env.get('MLC_VERSION', '') or "12" in env.get( + 'MLC_VERSION_MIN', ''): + if env.get('MLC_TMP_PATH', '') == '': + env['MLC_TMP_PATH'] = '' + env['MLC_TMP_PATH'] += "/opt/rh/aocc/root/usr/bin" + env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + if 'MLC_AOCC_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': exe_c, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'MLC_AOCC_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': i['recursion_spaces']}) + if r['return'] > 0: + + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'AMD\s+clang\sversion\s([\d.]+)', + 'group_number': 1, + 'env_key': 'MLC_AOCC_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + env['MLC_COMPILER_FAMILY'] = 'AOCC' + version = r['version'] + env['MLC_COMPILER_VERSION'] = env['MLC_AOCC_VERSION'] + env['MLC_AOCC_CACHE_TAGS'] = 'version-' + version + env['MLC_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-aocc' + + found_file_path = env['MLC_AOCC_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + env['MLC_AOCC_INSTALLED_PATH'] = found_path + + file_name_c = os.path.basename(found_file_path) + file_name_cpp = file_name_c.replace('clang', 'clang++') + env['FILE_NAME_CPP'] = file_name_cpp + + env['MLC_AOCC_BIN'] = file_name_c + + # General compiler for general program compilation + env['MLC_C_COMPILER_BIN'] = file_name_c + env['MLC_C_COMPILER_FLAG_OUTPUT'] = '-o ' + env['MLC_C_COMPILER_WITH_PATH'] = found_file_path + env['MLC_C_COMPILER_FLAG_VERSION'] = '--version' + + env['MLC_CXX_COMPILER_BIN'] = file_name_cpp + env['MLC_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp) + env['MLC_CXX_COMPILER_FLAG_OUTPUT'] = '-o ' + env['MLC_CXX_COMPILER_FLAG_VERSION'] = '--version' + + env['MLC_COMPILER_FLAGS_FAST'] = "-O3" + env['MLC_LINKER_FLAGS_FAST'] = "-O3" + env['MLC_COMPILER_FLAGS_DEBUG'] = "-O0" + env['MLC_LINKER_FLAGS_DEBUG'] = "-O0" + env['MLC_COMPILER_FLAGS_DEFAULT'] = "-O2" + env['MLC_LINKER_FLAGS_DEFAULT'] = "-O2" + + return {'return': 0, 'version': version} diff --git a/script/get-aocc/meta.yaml b/script/get-aocc/meta.yaml new file mode 100644 index 000000000..80a21d311 --- /dev/null +++ b/script/get-aocc/meta.yaml @@ -0,0 +1,44 @@ +alias: get-aocc +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +clean_files: [] +deps: +- tags: detect,os +- tags: extract,file + update_tags_from_env_with_prefix: + _path.: + - MLC_AOCC_TAR_FILE_PATH + force_cache: true + env: + MLC_EXTRACT_FINAL_ENV_NAME: MLC_AOCC_DIR_PATH + MLC_EXTRACT_TO_FOLDER: aocc_install + enable_if_env: + MLC_AOCC_NEEDS_TAR: + - yes + +input_mapping: + tar_file_path: MLC_AOCC_TAR_FILE_PATH + +name: Detect or install AOCC compiler +new_env_keys: +- MLC_AOCC_* +- MLC_C_COMPILER_* +- MLC_CXX_COMPILER_* +- MLC_COMPILER_* +- MLC_LINKER_* +- + CFLAGS +- + CXXFLAGS +- + FFLAGS +- + LDFLAGS +- +MLC_HOST_OS_DEFAULT_INCLUDE_PATH +- +PATH +post_depsq: +- tags: get,compiler-flags +sort: 500 +tags: +- compiler +- get +- aocc +uid: 1ceb0656e99a44ec diff --git a/script/get-aocc/run.bat b/script/get-aocc/run.bat new file mode 100644 index 000000000..5a08d5b62 --- /dev/null +++ b/script/get-aocc/run.bat @@ -0,0 +1,3 @@ +%MLC_AOCC_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/script/get-aocc/run.sh b/script/get-aocc/run.sh new file mode 100644 index 000000000..fe19b6c8d --- /dev/null +++ b/script/get-aocc/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +aocc_bin=${MLC_AOCC_BIN_WITH_PATH} +echo "${aocc_bin} --version" + +${aocc_bin} --version > tmp-ver.out +test $? -eq 0 || exit $? + +cat tmp-ver.out diff --git a/script/get-cuda-devices/detect.sh b/script/get-cuda-devices/detect.sh index 9de8aa64b..95b170267 100644 --- a/script/get-cuda-devices/detect.sh +++ b/script/get-cuda-devices/detect.sh @@ -1,4 +1,4 @@ #!/bin/bash ${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/detect.py -test $? -eq 0 || exit $? +test $? -eq 0 || exit 11 diff --git a/script/get-dataset-igbh/meta.yaml b/script/get-dataset-igbh/meta.yaml index b48e7b42c..2f6dc96ad 100644 --- a/script/get-dataset-igbh/meta.yaml +++ b/script/get-dataset-igbh/meta.yaml @@ -24,9 +24,10 @@ deps: - tags: get,python names: - get-python - - tags: get,generic-python-lib,_package.igb,_url.git+https://github.com/gateoverflow/IGB-Datasets.git + - tags: get,generic-python-lib,_package.igb,_url.git+https://github.com/IllinoisGraphBenchmark/IGB-Datasets - tags: get,generic-python-lib,_package.colorama - tags: get,generic-python-lib,_package.tqdm + - tags: get,generic-python-lib,_package.torch prehook_deps: diff --git a/script/get-dataset-imagenet-aux/meta.yaml b/script/get-dataset-imagenet-aux/meta.yaml index 00036303f..0863b982d 100644 --- a/script/get-dataset-imagenet-aux/meta.yaml +++ b/script/get-dataset-imagenet-aux/meta.yaml @@ -30,10 +30,19 @@ variations: '2012': env: MLC_DATASET_AUX_VER: '2012' - from.berkeleyvision: + from.go: base: - '2012' default: true + env: + MLC_DOWNLOAD_CHECKSUM: ee346d67141e476df9c1a3f813552503 + MLC_DOWNLOAD_CHECKSUM1: ee346d67141e476df9c1a3f813552503 + MLC_PACKAGE_URL: https://armi.in/files/caffe_ilsvrc12.tar.gz + MLC_PACKAGE_URL1: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz + group: download-source + from.berkeleyvision: + base: + - '2012' env: MLC_DOWNLOAD_CHECKSUM: f963098ea0e785a968ca1eb634003a90 MLC_DOWNLOAD_CHECKSUM1: ee346d67141e476df9c1a3f813552503 diff --git a/script/get-ml-model-pointpillars/COPYRIGHT.md b/script/get-dataset-waymo-calibration/COPYRIGHT.md similarity index 100% rename from script/get-ml-model-pointpillars/COPYRIGHT.md rename to script/get-dataset-waymo-calibration/COPYRIGHT.md diff --git a/script/get-dataset-waymo-calibration/customize.py b/script/get-dataset-waymo-calibration/customize.py new file mode 100644 index 000000000..e404aa512 --- /dev/null +++ b/script/get-dataset-waymo-calibration/customize.py @@ -0,0 +1,35 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + if env.get('MLC_DATASET_WAYMO_CALIBRATION_PATH', '') != '': + if not os.path.exists(env['MLC_DATASET_WAYMO_CALIBRATION_PATH']): + return { + 'return': 1, 'error': f"Path {env['MLC_DATASET_WAYMO_CALIBRATION_PATH']} does not exists!"} + else: + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + if env['MLC_DOWNLOAD_SRC'] == "mlcommons": + i['run_script_input']['script_name'] = 'run-rclone' + if env.get('MLC_OUTDIRNAME', '') != '': + env['MLC_DATASET_WAYMO_CALIBRATION_PATH'] = env['MLC_OUTDIRNAME'] + else: + env['MLC_DATASET_WAYMO_CALIBRATION_PATH'] = os.path.join( + os.getcwd(), "kitti_format", "calibration") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/script/get-dataset-waymo-calibration/meta.yaml b/script/get-dataset-waymo-calibration/meta.yaml new file mode 100644 index 000000000..dbc17d494 --- /dev/null +++ b/script/get-dataset-waymo-calibration/meta.yaml @@ -0,0 +1,35 @@ +alias: get-dataset-waymo-calibration +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +input_mapping: + waymo_calibration_path: MLC_DATASET_WAYMO_CALIBRATION_PATH +new_env_keys: +- MLC_DATASET_WAYMO_CALIBRATION_PATH +tags: +- get +- waymo +- dataset +- calibration +uid: 59d3a8d48d5e4767 +variations: + kitti_format: + default: true + env: + MLC_DATASET_WAYMO_FORMAT: kitti + group: dataset-format + mlc: + default: true + env: + MLC_DOWNLOAD_SRC: mlcommons + group: download-src + prehook_deps: + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - true + tags: get,rclone + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - true + force_cache: true + tags: get,rclone-config,_waymo diff --git a/script/get-dataset-waymo-calibration/run-rclone.sh b/script/get-dataset-waymo-calibration/run-rclone.sh new file mode 100644 index 000000000..fd289eff4 --- /dev/null +++ b/script/get-dataset-waymo-calibration/run-rclone.sh @@ -0,0 +1,4 @@ +cmd="rclone sync mlc-waymo:waymo_preprocessed_dataset/kitti_format/testing ${MLC_DATASET_WAYMO_CALIBRATION_PATH} -P" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? \ No newline at end of file diff --git a/script/get-dataset-waymo/customize.py b/script/get-dataset-waymo/customize.py index 273feef06..cb625f443 100644 --- a/script/get-dataset-waymo/customize.py +++ b/script/get-dataset-waymo/customize.py @@ -11,12 +11,19 @@ def preprocess(i): if os_info['platform'] == "windows": return {'return': 1, 'error': 'Script not supported in windows yet!'} - if env.get('MLC_DATASET_WAYMO_PATH', '') == '': - return {'return': 1, 'error': 'Please provide path to kitti dataset using tag \\`--waymo_path\\`as automatic download of this dataset is not supported yet.'} - - if not os.path.exists(env['MLC_DATASET_WAYMO_PATH']): - return { - 'return': 1, 'error': f"Path {env['MLC_DATASET_WAYMO_PATH']} does not exists!"} + if env.get('MLC_DATASET_WAYMO_PATH', '') != '': + if not os.path.exists(env['MLC_DATASET_WAYMO_PATH']): + return { + 'return': 1, 'error': f"Path {env['MLC_DATASET_WAYMO_PATH']} does not exists!"} + else: + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + if env['MLC_DOWNLOAD_SRC'] == "mlcommons": + i['run_script_input']['script_name'] = 'run-rclone' + if env.get('MLC_OUTDIRNAME', '') != '': + env['MLC_DATASET_WAYMO_PATH'] = env['MLC_OUTDIRNAME'] + else: + env['MLC_DATASET_WAYMO_PATH'] = os.path.join( + os.getcwd(), "kitti_format") return {'return': 0} diff --git a/script/get-dataset-waymo/meta.yaml b/script/get-dataset-waymo/meta.yaml index bfbba995f..63bbf2472 100644 --- a/script/get-dataset-waymo/meta.yaml +++ b/script/get-dataset-waymo/meta.yaml @@ -17,3 +17,18 @@ variations: group: dataset-format env: MLC_DATASET_WAYMO_FORMAT: kitti + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_waymo + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_DOWNLOAD_SRC: mlcommons diff --git a/script/get-dataset-waymo/run-rclone.sh b/script/get-dataset-waymo/run-rclone.sh new file mode 100644 index 000000000..12aac3377 --- /dev/null +++ b/script/get-dataset-waymo/run-rclone.sh @@ -0,0 +1,7 @@ +cmd="rclone sync mlc-waymo:waymo_preprocessed_dataset/kitti_format ${MLC_DATASET_WAYMO_PATH} -P" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? +cd ${MLC_DATASET_WAYMO_PATH}/kitti_format/training +for f in *.tar.gz; do tar -xzvf "$f"; done +cd - diff --git a/script/get-lib-armnn/customize.py b/script/get-lib-armnn/customize.py index 263850444..b6aaae7a5 100644 --- a/script/get-lib-armnn/customize.py +++ b/script/get-lib-armnn/customize.py @@ -11,7 +11,7 @@ def preprocess(i): version = env['MLC_LIB_ARMNN_VERSION'] if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == 'x86_64': url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-x86_64.tar.gz" - elif env.get('MLC_HOST_PLATFORM_FLAVOR', '') == 'aarch64': + elif env.get('MLC_HOST_PLATFORM_FLAVOR', '') in ['arm64', 'aarch64']: url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-aarch64.tar.gz" env['MLC_LIB_ARMNN_PREBUILT_BINARY_URL'] = url diff --git a/script/get-lib-armnn/meta.yaml b/script/get-lib-armnn/meta.yaml index 6c33e97ed..ff71bf95a 100644 --- a/script/get-lib-armnn/meta.yaml +++ b/script/get-lib-armnn/meta.yaml @@ -3,7 +3,7 @@ automation_alias: script automation_uid: 5b4e0237da074764 cache: true category: Detection or installation of tools and artifacts -default_version: '23.11' +default_version: '25.02' deps: - tags: detect,os env: diff --git a/script/get-ml-model-llama2/meta.yaml b/script/get-ml-model-llama2/meta.yaml index ed7477c04..d0e14cf77 100644 --- a/script/get-ml-model-llama2/meta.yaml +++ b/script/get-ml-model-llama2/meta.yaml @@ -90,8 +90,15 @@ variations: env: MLC_DOWNLOAD_SRC: mlcommons prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes - tags: get,rclone-config,_mlperf-llama2 force_cache: yes + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes hf: group: download-source env: diff --git a/script/get-ml-model-llama3/meta.yaml b/script/get-ml-model-llama3/meta.yaml index 673f34c49..2ae6ba84e 100644 --- a/script/get-ml-model-llama3/meta.yaml +++ b/script/get-ml-model-llama3/meta.yaml @@ -52,8 +52,15 @@ variations: group: download-src default: true prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes - tags: get,rclone-config,_mlperf-llama3-1 force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes env: MLC_DOWNLOAD_SRC: mlcommons hf: diff --git a/script/get-ml-model-resnet50-deeplab/COPYRIGHT.md b/script/get-ml-model-pointpainting/COPYRIGHT.md similarity index 100% rename from script/get-ml-model-resnet50-deeplab/COPYRIGHT.md rename to script/get-ml-model-pointpainting/COPYRIGHT.md diff --git a/script/get-ml-model-pointpainting/customize.py b/script/get-ml-model-pointpainting/customize.py new file mode 100644 index 000000000..07b18602d --- /dev/null +++ b/script/get-ml-model-pointpainting/customize.py @@ -0,0 +1,59 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + if env.get('MLC_ML_MODEL_POINT_PAINTING_PATH', '') != '': + if not os.path.exists(env['MLC_ML_MODEL_POINT_PAINTING']): + return { + 'return': 1, 'error': f"Provided model path {env['MLC_ML_MODEL_POINT_PAINTING']} does not exist."} + + if env.get('MLC_ML_MODEL_DPLAB_RESNET50_PATH', '') != '': + if not os.path.exists(env['MLC_ML_MODEL_DPLAB_RESNET50_PATH']): + return { + 'return': 1, 'error': f"Provided model path {env['MLC_ML_MODEL_DPLAB_RESNET50_PATH']} does not exist."} + + if env.get('MLC_ML_MODEL_POINT_PAINTING_PATH', '') == '' or env.get( + 'MLC_ML_MODEL_DPLAB_RESNET50_PATH', '') == '': + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + if env['MLC_DOWNLOAD_SRC'] == "mlcommons": + i['run_script_input']['script_name'] = 'run-rclone' + if env.get('MLC_OUTDIRNAME', '') != '': + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'] = env['MLC_OUTDIRNAME'] + else: + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'] = os.path.join( + os.getcwd(), "model") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + if env.get('MLC_ML_MODEL_POINT_PAINTING_PATH', '') == '': + if env['MLC_ML_MODEL_PP_FORMAT'] == "onnx": + env['MLC_ML_MODEL_POINT_PAINTING_PATH'] = os.path.join( + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'], "pp.onnx") + else: + env['MLC_ML_MODEL_POINT_PAINTING_PATH'] = os.path.join( + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'], "pp_ep36.pth") + + if env.get('MLC_ML_MODEL_DPLAB_RESNET50_PATH', '') == '': + if env['MLC_ML_MODEL_DPLAB_RESNET50_FORMAT'] == "onnx": + env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] = os.path.join( + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'], "deeplabv3+.onnx") + else: + env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] = os.path.join( + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'], + "best_deeplabv3plus_resnet50_waymo_os16.pth") + + return {'return': 0} diff --git a/script/get-ml-model-pointpainting/meta.yaml b/script/get-ml-model-pointpainting/meta.yaml new file mode 100644 index 000000000..b811d58d6 --- /dev/null +++ b/script/get-ml-model-pointpainting/meta.yaml @@ -0,0 +1,45 @@ +alias: get-ml-model-pointpillars +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- ml-model +- ml +- model +- pointpainting +uid: 3562621a8994411d +new_env_keys: + - MLC_ML_MODEL_POINT_PAINTING_PATH + - MLC_ML_MODEL_DPLAB_RESNET50_PATH +input_mapping: + pp_path: MLC_ML_MODEL_POINT_PAINTING_PATH + dp_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH +variations: + gpu: + default: true + group: device + env: + MLC_ML_MODEL_PP_FORMAT: pth + MLC_ML_MODEL_DPLAB_RESNET50_FORMAT: pth + cpu: + group: device + env: + MLC_ML_MODEL_PP_FORMAT: onnx + MLC_ML_MODEL_DPLAB_RESNET50_FORMAT: onnx + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_waymo + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_DOWNLOAD_SRC: mlcommons + diff --git a/script/get-ml-model-pointpainting/run-rclone.sh b/script/get-ml-model-pointpainting/run-rclone.sh new file mode 100644 index 000000000..9b76a1511 --- /dev/null +++ b/script/get-ml-model-pointpainting/run-rclone.sh @@ -0,0 +1,4 @@ +cmd="rclone sync mlc-waymo:waymo_preprocessed_dataset/model ${MLC_ML_MODEL_POINT_PAINTING_TMP_PATH} -P" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? \ No newline at end of file diff --git a/script/get-ml-model-pointpillars/run.sh b/script/get-ml-model-pointpainting/run.sh similarity index 100% rename from script/get-ml-model-pointpillars/run.sh rename to script/get-ml-model-pointpainting/run.sh diff --git a/script/get-ml-model-pointpillars/customize.py b/script/get-ml-model-pointpillars/customize.py deleted file mode 100644 index b6685c889..000000000 --- a/script/get-ml-model-pointpillars/customize.py +++ /dev/null @@ -1,32 +0,0 @@ -from mlc import utils -import os - - -def preprocess(i): - - os_info = i['os_info'] - - env = i['env'] - - if os_info['platform'] == "windows": - return {'return': 1, 'error': 'Script not supported in windows yet!'} - - if env.get('MLC_ML_MODEL_POINT_PILLARS_PATH', '') == '': - return {'return': 1, 'error': 'Please provide path to pointpillars model using tag \\`--pp_path\\`as automatic download of this model is not supported yet.'} - - if os.path.isdir(env['MLC_ML_MODEL_POINT_PILLARS_PATH']): - if env['MLC_ML_MODEL_PP_FORMAT'] == "onnx": - env['MLC_ML_MODEL_POINT_PILLARS_PATH'] = os.path.join( - env['MLC_ML_MODEL_POINT_PILLARS_PATH'], "pp.onnx") - else: - env['MLC_ML_MODEL_POINT_PILLARS_PATH'] = os.path.join( - env['MLC_ML_MODEL_POINT_PILLARS_PATH'], "pp_ep36.pth") - - return {'return': 0} - - -def postprocess(i): - - env = i['env'] - - return {'return': 0} diff --git a/script/get-ml-model-pointpillars/meta.yaml b/script/get-ml-model-pointpillars/meta.yaml deleted file mode 100644 index 18470e4c0..000000000 --- a/script/get-ml-model-pointpillars/meta.yaml +++ /dev/null @@ -1,26 +0,0 @@ -alias: get-ml-model-pointpillars -automation_alias: script -automation_uid: 5b4e0237da074764 -cache: true -tags: -- get -- ml-model -- ml -- model -- pointpillars -uid: 3562621a8994411d -new_env_keys: - - MLC_ML_MODEL_POINT_PILLARS_PATH -input_mapping: - pp_path: MLC_ML_MODEL_POINT_PILLARS_PATH -variations: - gpu: - default: true - group: device - env: - MLC_ML_MODEL_PP_FORMAT: pth - cpu: - group: device - env: - MLC_ML_MODEL_PP_FORMAT: onnx - diff --git a/script/get-ml-model-resnet50-deeplab/customize.py b/script/get-ml-model-resnet50-deeplab/customize.py deleted file mode 100644 index 0df3b1c3f..000000000 --- a/script/get-ml-model-resnet50-deeplab/customize.py +++ /dev/null @@ -1,33 +0,0 @@ -from mlc import utils -import os - - -def preprocess(i): - - os_info = i['os_info'] - - env = i['env'] - - if os_info['platform'] == "windows": - return {'return': 1, 'error': 'Script not supported in windows yet!'} - - if env.get('MLC_ML_MODEL_DPLAB_RESNET50_PATH', '') == '': - return {'return': 1, 'error': 'Please provide path to deeplab resnet 50 model using tag \\`--dp_resnet50_path\\`as automatic download of this dataset is not supported yet.'} - - if os.path.isdir(env['MLC_ML_MODEL_DPLAB_RESNET50_PATH']): - if env['MLC_ML_MODEL_DPLAB_RESNET50_FORMAT'] == "onnx": - env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] = os.path.join( - env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'], "deeplabv3+.onnx") - else: - env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] = os.path.join( - env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'], - "best_deeplabv3plus_resnet50_waymo_os16.pth") - - return {'return': 0} - - -def postprocess(i): - - env = i['env'] - - return {'return': 0} diff --git a/script/get-ml-model-resnet50-deeplab/meta.yaml b/script/get-ml-model-resnet50-deeplab/meta.yaml deleted file mode 100644 index c8c8b84e1..000000000 --- a/script/get-ml-model-resnet50-deeplab/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -alias: get-dataset-deeplab-resnet50 -automation_alias: script -automation_uid: 5b4e0237da074764 -cache: true -tags: -- get -- ml-model -- ml -- model -- resnet50-deeplab -- resnet50 -- deeplab -uid: 93097b691a6a4fce -new_env_keys: - - MLC_ML_MODEL_DPLAB_RESNET50_PATH -input_mapping: - dp_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH -variations: - gpu: - default: true - group: device - env: - MLC_ML_MODEL_DPLAB_RESNET50_FORMAT: pth - cpu: - group: device - env: - MLC_ML_MODEL_DPLAB_RESNET50_FORMAT: onnx diff --git a/script/get-ml-model-resnet50-deeplab/run.sh b/script/get-ml-model-resnet50-deeplab/run.sh deleted file mode 100644 index 3197bb8ad..000000000 --- a/script/get-ml-model-resnet50-deeplab/run.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} - -#To export any variable -#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out - -#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency diff --git a/script/get-onnxruntime-prebuilt/meta.yaml b/script/get-onnxruntime-prebuilt/meta.yaml index 57078077a..3a3a185a9 100644 --- a/script/get-onnxruntime-prebuilt/meta.yaml +++ b/script/get-onnxruntime-prebuilt/meta.yaml @@ -4,7 +4,7 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML frameworks clean_files: [] -default_version: 1.16.3 +default_version: 1.20.1 deps: - tags: detect,os new_env_keys: diff --git a/script/get-platform-details/README-EXTRA.md b/script/get-platform-details/README-EXTRA.md index 45ac261f2..c5b6ebfc2 100644 --- a/script/get-platform-details/README-EXTRA.md +++ b/script/get-platform-details/README-EXTRA.md @@ -1,7 +1,7 @@ Please execute the following CM command to obtain the platform details of the System Under Test (SUT): ``` -mlcr --tags=get,platform-details --platform_details_dir= +mlcr get,platform-details --platform_details_dir= ``` diff --git a/script/get-platform-details/run.sh b/script/get-platform-details/run.sh index 843d1cba7..a2ebfd341 100644 --- a/script/get-platform-details/run.sh +++ b/script/get-platform-details/run.sh @@ -46,7 +46,7 @@ echo "8. numactl --hardware" >> "${OUTPUT_FILE}" if [[ ${MLC_SUDO_USER} == "yes" ]]; then echo "${MLC_SUDO} numactl --hardware" eval "${MLC_SUDO} numactl --hardware" >> "${OUTPUT_FILE}" - test $? -eq 0 || exit $? + #test $? -eq 0 || exit $? else echo "Requires SUDO permission" >> "${OUTPUT_FILE}" fi @@ -86,7 +86,7 @@ echo "15. sysctl" >> "${OUTPUT_FILE}" if [[ ${MLC_SUDO_USER} == "yes" ]]; then echo "${MLC_SUDO} sysctl -a" eval "${MLC_SUDO} sysctl -a" >> "${OUTPUT_FILE}" - test $? -eq 0 || exit $? + #test $? -eq 0 || exit $? else echo "Requires SUDO permission" >> "${OUTPUT_FILE}" fi @@ -94,12 +94,12 @@ echo "------------------------------------------------------------" >> "${OUTPUT echo "16. /sys/kernel/mm/transparent_hugepage" >> "${OUTPUT_FILE}" eval "cat /sys/kernel/mm/transparent_hugepage/enabled" >> "${OUTPUT_FILE}" -test $? -eq 0 || exit $? +#test $? -eq 0 || exit $? echo "------------------------------------------------------------" >> "${OUTPUT_FILE}" echo "17. /sys/kernel/mm/transparent_hugepage/khugepaged" >> "${OUTPUT_FILE}" eval "cat /sys/kernel/mm/transparent_hugepage/khugepaged/defrag" >> "${OUTPUT_FILE}" -test $? -eq 0 || exit $? +#test $? -eq 0 || exit $? echo "------------------------------------------------------------" >> "${OUTPUT_FILE}" echo "18. OS release" >> "${OUTPUT_FILE}" @@ -114,7 +114,7 @@ echo "------------------------------------------------------------" >> "${OUTPUT echo "20. /sys/devices/virtual/dmi/id" >> "${OUTPUT_FILE}" eval "ls /sys/devices/virtual/dmi/id" >> "${OUTPUT_FILE}" -test $? -eq 0 || exit $? +#test $? -eq 0 || exit $? echo "------------------------------------------------------------" >> "${OUTPUT_FILE}" echo "21. dmidecode" >> "${OUTPUT_FILE}" diff --git a/script/get-preprocessed-dataset-openimages/meta.yaml b/script/get-preprocessed-dataset-openimages/meta.yaml index 20583814d..12d907289 100644 --- a/script/get-preprocessed-dataset-openimages/meta.yaml +++ b/script/get-preprocessed-dataset-openimages/meta.yaml @@ -52,7 +52,6 @@ variations: ad: original-dataset: tags: _50 - default: true env: MLC_DATASET_SIZE: '50' group: dataset-count @@ -139,6 +138,7 @@ variations: group: dataset-precision full: group: dataset-count + default: true full,validation: ad: original-dataset: diff --git a/script/get-rclone-config/meta.yaml b/script/get-rclone-config/meta.yaml index 8cc949d25..f2f3332f0 100644 --- a/script/get-rclone-config/meta.yaml +++ b/script/get-rclone-config/meta.yaml @@ -19,3 +19,7 @@ variations: env: MLC_RCLONE_CONFIG_CMD: 'rclone config create mlc-llama3-1 drive config_is_local=false scope=drive.readonly root_folder_id=12K-2yvmr1ZSZ7SLrhidCbWc0BriN98am' MLC_RCLONE_CONNECT_CMD: 'rclone config reconnect mlc-llama3-1:' + waymo: + env: + MLC_RCLONE_CONFIG_CMD: 'rclone config create mlc-waymo drive config_is_local=false scope=drive.readonly root_folder_id=1xbfnaUurFeXliFFl1i1gj48eRU2NDiH5' + MLC_RCLONE_CONNECT_CMD: 'rclone config reconnect mlc-waymo:' diff --git a/script/get-rocm-devices/README.md b/script/get-rocm-devices/README.md index 294a147bb..722b01028 100644 --- a/script/get-rocm-devices/README.md +++ b/script/get-rocm-devices/README.md @@ -1,4 +1,4 @@ Run this script ``` -mlcr --tags=get,rocm-devices +mlcr get,rocm-devices ``` diff --git a/script/install-python-src/meta.yaml b/script/install-python-src/meta.yaml index 99ed1df15..f2e6e533f 100644 --- a/script/install-python-src/meta.yaml +++ b/script/install-python-src/meta.yaml @@ -20,6 +20,9 @@ deps: MLC_HOST_OS_FLAVOR: - ubuntu - tags: get,generic-sys-util,_libssl-dev + enable_if_env: + MLC_ENABLE_SSL: + - 'yes' - enable_if_env: MLC_HOST_OS_FLAVOR: - ubuntu @@ -84,3 +87,4 @@ variations: MLC_ENABLE_SSL: 'yes' MLC_PYTHON_INSTALL_CACHE_TAGS: with-ssl group: ssl + default: true diff --git a/script/install-tensorflow-from-src/meta.yaml b/script/install-tensorflow-from-src/meta.yaml index 30821bb38..c219d1179 100644 --- a/script/install-tensorflow-from-src/meta.yaml +++ b/script/install-tensorflow-from-src/meta.yaml @@ -344,3 +344,51 @@ versions: version: 5.0.0 env: MLC_GIT_CHECKOUT: v2.9.0 + v2.12.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.11.999 + version_max_usable: 3.11.12 + version_min: 3.8.0 + - tags: get,gcc + version_max: '12.9' + version_min: '9' + - tags: get,bazel + version: 5.3.0 + env: + MLC_GIT_CHECKOUT: v2.12.0 + v2.15.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.11.999 + version_max_usable: 3.11.12 + version_min: 3.9.0 + - tags: get,llvm + version_max: '16.9.999' + version_min: '16.0.0' + - tags: get,bazel + version: 6.1.0 + env: + MLC_GIT_CHECKOUT: v2.15.0 + v2.18.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.12.999 + version_max_usable: 3.12.12 + version_min: 3.9.0 + - tags: get,llvm + version_max: '17.9.999' + version_min: '17.0.6' + - tags: get,bazel + version: 6.5.0 + env: + MLC_GIT_CHECKOUT: v2.18.0 diff --git a/script/preprocess-mlperf-inference-submission/customize.py b/script/preprocess-mlperf-inference-submission/customize.py index 58f38524b..a394b4446 100644 --- a/script/preprocess-mlperf-inference-submission/customize.py +++ b/script/preprocess-mlperf-inference-submission/customize.py @@ -35,6 +35,8 @@ def preprocess(i): extra_args = [] if is_true(env.get('MLC_MLPERF_NOINFER_LOW_ACCURACY_RESULTS')): extra_args.append("--noinfer-low-accuracy-results") + if is_true(env.get('MLC_MLPERF_NOINFER_SCENARIO_RESULTS')): + extra_args.append("--noinfer-scenario-results") if is_true(env.get('MLC_MLPERF_NODELETE_EMPTY_DIRS')): extra_args.append("--nodelete-empty-dirs") if is_true(env.get('MLC_MLPERF_NOMOVE_FAILED_TO_OPEN')): diff --git a/script/preprocess-mlperf-inference-submission/meta.yaml b/script/preprocess-mlperf-inference-submission/meta.yaml index 4e5d3023e..5381b36dd 100644 --- a/script/preprocess-mlperf-inference-submission/meta.yaml +++ b/script/preprocess-mlperf-inference-submission/meta.yaml @@ -12,7 +12,8 @@ deps: - names: - inference-src - submission-checker-src - tags: get,mlcommons,inference,src + tags: get,mlcommons,inference,src,_branch.dev + version: custom - names: - get-mlperf-submission-dir skip_if_env: @@ -25,6 +26,9 @@ input_mapping: version: MLC_MLPERF_SUBMISSION_CHECKER_VERSION submitter: MLC_MLPERF_SUBMITTER submission_preprocessor_args: MLC_MLPERF_PREPROCESS_SUBMISSION_EXTRA_ARGS +default_env: + MLC_MLPERF_NOINFER_LOW_ACCURACY_RESULTS: True + MLC_MLPERF_NOINFER_SCENARIO_RESULTS: True tags: - run - mlc diff --git a/script/push-mlperf-inference-results-to-github/run.sh b/script/push-mlperf-inference-results-to-github/run.sh index 53a297cf9..ffac61801 100644 --- a/script/push-mlperf-inference-results-to-github/run.sh +++ b/script/push-mlperf-inference-results-to-github/run.sh @@ -17,6 +17,7 @@ fi test $? -eq 0 || exit $? git commit -a -m "${MLC_MLPERF_RESULTS_REPO_COMMIT_MESSAGE}" +test $? -eq 0 || exit $? echo ${MLC_GIT_PUSH_CMD} ${MLC_GIT_PUSH_CMD} diff --git a/script/run-all-mlperf-models/run-bert-macos.sh b/script/run-all-mlperf-models/run-bert-macos.sh index e0275153c..bc7a75747 100644 --- a/script/run-all-mlperf-models/run-bert-macos.sh +++ b/script/run-all-mlperf-models/run-bert-macos.sh @@ -37,29 +37,29 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' power="" #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' # run "$MLC_RUN_CMD" run_test "onnxruntime" "100" "reference" "cpu" "$find_performance_cmd" diff --git a/script/run-all-mlperf-models/run-bert.sh b/script/run-all-mlperf-models/run-bert.sh index 530c55e48..815e144ec 100644 --- a/script/run-all-mlperf-models/run-bert.sh +++ b/script/run-all-mlperf-models/run-bert.sh @@ -38,29 +38,20 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' - -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' # run "$MLC_RUN_CMD" run_test "onnxruntime" "20" "reference" "cpu" "$find_performance_cmd" diff --git a/script/run-all-mlperf-models/run-cpp-implementation.sh b/script/run-all-mlperf-models/run-cpp-implementation.sh index 7159cbcd8..3d4c7b93d 100644 --- a/script/run-all-mlperf-models/run-cpp-implementation.sh +++ b/script/run-all-mlperf-models/run-cpp-implementation.sh @@ -27,137 +27,137 @@ division="closed" # run "$MLC_RUN_CMD" POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 " -POWER="" +POWER=" --env.MLC_GET_PLATFORM_DETAILS=no" -run "mlcr --tags=set,system,performance,mode" +#run "mlcr set,system,performance,mode" #cpp -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --adr.compiler.tags=gcc \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=2000 " -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --adr.compiler.tags=gcc \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" # GPU -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --adr.compiler.tags=gcc \ --test_query_count=20000 \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --adr.compiler.tags=gcc \ --test_query_count=2000 \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" #multistream -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=MultiStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=MultiStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" diff --git a/script/run-all-mlperf-models/run-mobilenet-models.sh b/script/run-all-mlperf-models/run-mobilenet-models.sh index 8fa760c8e..ad24ed87d 100644 --- a/script/run-all-mlperf-models/run-mobilenet-models.sh +++ b/script/run-all-mlperf-models/run-mobilenet-models.sh @@ -24,7 +24,8 @@ function run() { } POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4940 " POWER="" -extra_option="" +#extra_option=" --minimize_disk_usage=yes" +extra_option=" --minimize_disk_usage=no" extra_tags="" #extra_option=" --adr.mlperf-inference-implementation.compressed_dataset=on" #extra_tags=",_only-fp32" @@ -32,36 +33,21 @@ extra_tags="" #Add your run commands here... # run "$MLC_RUN_CMD" -run "mlcr --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +run "mlcr run,mobilenet-models,_tflite$extra_tags \ --adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" +${extra_option} " -run "mlcr --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ -${POWER} \ +run "mlcr run,mobilenet-models,_tflite,_armnn,_neon$extra_tags \ --adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" +${extra_option} " -run "mlcr --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ -${POWER} \ ---adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" - -run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ ---adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" -run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ -${POWER} \ -${extra_option} \ +extra_option=" $extra_option --adr.mlperf-inference-implementation.compressed_dataset=on" +extra_tag=",_only-fp32" +run "mlcr run,mobilenet-models,_tflite$extra_tags \ --adr.compiler.tags=gcc \ ---results_dir=$HOME/results_dir" +${extra_option} " -run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_populate-readme$extra_tags \ -${POWER} \ -${extra_option} \ +run "mlcr run,mobilenet-models,_tflite,_armnn,_neon$extra_tags \ --adr.compiler.tags=gcc \ ---results_dir=$HOME/results_dir" +${extra_option} " diff --git a/script/run-all-mlperf-models/run-nvidia-4090.sh b/script/run-all-mlperf-models/run-nvidia-4090.sh index bc4eb5ae5..135a270d0 100644 --- a/script/run-all-mlperf-models/run-nvidia-4090.sh +++ b/script/run-all-mlperf-models/run-nvidia-4090.sh @@ -38,7 +38,7 @@ power="" power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -49,8 +49,8 @@ find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance #run "3d-unet" "30" "${find_performance_cmd}" -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ ---model=$model --execution-mode=valid --implementation=$implementation --device=$device --backend=$backend --results_dir=$HOME/results_dir \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --execution_mode=valid --implementation=$implementation --device=$device --backend=$backend --results_dir=$HOME/results_dir \ --category=$category --division=$division --skip_submission_generation=yes --quiet $power' #run_model "bert-99.9" "10" "${submission_cmd} --offline_target_qps=1680 --server_target_qps=1520" diff --git a/script/run-all-mlperf-models/run-nvidia-a100.sh b/script/run-all-mlperf-models/run-nvidia-a100.sh index 70069b9a7..a3489e7d2 100644 --- a/script/run-all-mlperf-models/run-nvidia-a100.sh +++ b/script/run-all-mlperf-models/run-nvidia-a100.sh @@ -37,7 +37,7 @@ connection_type="sxm" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -48,7 +48,7 @@ run "bert-99" "20000" "${find_performance_cmd}" run "3d-unet-99.9" "30" "${find_performance_cmd}" -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --adr.nvidia-harensss.tags=_${connection_type} $power' diff --git a/script/run-all-mlperf-models/run-nvidia-t4.sh b/script/run-all-mlperf-models/run-nvidia-t4.sh index facdb0a60..adde34344 100644 --- a/script/run-all-mlperf-models/run-nvidia-t4.sh +++ b/script/run-all-mlperf-models/run-nvidia-t4.sh @@ -35,7 +35,7 @@ category="edge,datacenter" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -47,7 +47,7 @@ run "bert-99.9" "5000" "${find_performance_cmd}" run "3d-unet" "10" "${find_performance_cmd}" -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet' diff --git a/script/run-all-mlperf-models/run-pruned-bert.sh b/script/run-all-mlperf-models/run-pruned-bert.sh index b7bc2beae..a0b7af75a 100644 --- a/script/run-all-mlperf-models/run-pruned-bert.sh +++ b/script/run-all-mlperf-models/run-pruned-bert.sh @@ -45,8 +45,8 @@ scenario="Offline" if [[ $scenario == "Offline" ]]; then for stub in ${zoo_stub_list[@]}; do -cmd="mlcr --tags=run,mlperf,inference,generate-run-cmds,_find-performance \ - --adr.python.version_min=3.8 \ +cmd="mlcr run,mlperf,inference,generate-run-cmds,_find-performance \ + --adr.python.version=3.9.12 \ --implementation=reference \ --model=bert-99 \ --precision=int8 \ @@ -64,8 +64,8 @@ done fi for stub in ${zoo_stub_list[@]}; do - cmd="mlcr --tags=run,mlperf,inference,generate-run-cmds \ - --adr.python.version_min=3.8 \ + cmd="mlcr run,mlperf,inference,generate-run-cmds \ + --adr.python.version=3.9.12 \ --adr.compiler.tags=gcc \ --implementation=reference \ --model=bert-99 \ diff --git a/script/run-all-mlperf-models/run-reference-models.sh b/script/run-all-mlperf-models/run-reference-models.sh index 84d7526fd..01766158a 100644 --- a/script/run-all-mlperf-models/run-reference-models.sh +++ b/script/run-all-mlperf-models/run-reference-models.sh @@ -25,43 +25,43 @@ function run() { division="closed" #Add your run commands here... # run "$MLC_RUN_CMD" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=100" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=rnnt --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=rnnt --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=retinanet --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" diff --git a/script/run-all-mlperf-models/run-resnet50-macos.sh b/script/run-all-mlperf-models/run-resnet50-macos.sh index ea2f91346..5b83b9a9b 100644 --- a/script/run-all-mlperf-models/run-resnet50-macos.sh +++ b/script/run-all-mlperf-models/run-resnet50-macos.sh @@ -37,34 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' power="" #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' - -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' # run "$MLC_RUN_CMD" -run_test "onnxruntime" "6000" "reference" "cpu" "$find_performance_cmd --rerun" -run_test "tf" "6000" "reference" "cpu" "$find_performance_cmd --rerun" +#run_test "onnxruntime" "6000" "reference" "cpu" "$find_performance_cmd --rerun" +#run_test "tf" "6000" "reference" "cpu" "$find_performance_cmd --rerun" run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd" + run_test "tf" "100" "reference" "cpu" "$submission_cmd" diff --git a/script/run-all-mlperf-models/run-resnet50.sh b/script/run-all-mlperf-models/run-resnet50.sh index d9945c745..a2144bf0a 100644 --- a/script/run-all-mlperf-models/run-resnet50.sh +++ b/script/run-all-mlperf-models/run-resnet50.sh @@ -35,53 +35,37 @@ function run_test() { run "$5" } power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' +power='' #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' - -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' # run "$MLC_RUN_CMD" run_test "onnxruntime" "200" "reference" "cpu" "$find_performance_cmd" run_test "tf" "200" "reference" "cpu" "$find_performance_cmd" -run_test "onnxruntime" "10000" "reference" "cuda" "$find_performance_cmd" -run_test "tf" "20000" "reference" "cuda" "$find_performance_cmd" run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd" run_test "tf" "100" "reference" "cpu" "$submission_cmd" scenario="SingleStream" run_test "tflite" "100" "tflite-cpp" "cpu" "$submission_cmd_scenario --adr.compiler.tags=gcc" run_test "tflite" "100" "tflite-cpp" "cpu" "$submission_cmd_scenario --adr.compiler.tags=gcc --adr.mlperf-inference-implementation.compressed_dataset=on" + + +run_test "onnxruntime" "10000" "reference" "cuda" "$find_performance_cmd" +run_test "tf" "20000" "reference" "cuda" "$find_performance_cmd" run_test "onnxruntime" "100" "reference" "cuda" "$submission_cmd " -scenario="Offline" -run_test "tf" "100" "reference" "cuda" "$submission_cmd_scenario" -scenario="SingleStream" -run_test "tf" "100" "reference" "cuda" "$submission_cmd_scenario" - -run_test "onnxruntime" "100" "reference" "cpu" "$readme_cmd" -run_test "tf" "100" "reference" "cpu" "$readme_cmd" -run_test "tflite" "100" "tflite-cpp" "cpu" "$readme_cmd_single --adr.compiler.tags=gcc --scenario=SingleStream" -run_test "tflite" "100" "tflite-cpp" "cpu" "$readme_cmd_single --adr.compiler.tags=gcc --scenario=SingleStream --adr.mlperf-inference-implementation.compressed_dataset=on" -run_test "onnxruntime" "100" "reference" "cuda" "$readme_cmd --scenario=SingleStream" -run_test "tf" "100" "reference" "cuda" "$readme_cmd_single --scenario=SingleStream" -run_test "tf" "100" "reference" "cuda" "$readme_cmd_single --scenario=Offline" +run_test "tf" "100" "reference" "cuda" "$submission_cmd" + diff --git a/script/run-all-mlperf-models/run-retinanet-sh b/script/run-all-mlperf-models/run-retinanet-sh index c5ede6296..3f10a88ee 100644 --- a/script/run-all-mlperf-models/run-retinanet-sh +++ b/script/run-all-mlperf-models/run-retinanet-sh @@ -37,29 +37,20 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' - -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' # run "$MLC_RUN_CMD" run_test "onnxruntime" "50" "reference" "cpu" "$find_performance_cmd" diff --git a/script/run-all-mlperf-models/template.sh b/script/run-all-mlperf-models/template.sh index ff43cf2fe..9a5fb1893 100644 --- a/script/run-all-mlperf-models/template.sh +++ b/script/run-all-mlperf-models/template.sh @@ -40,27 +40,27 @@ function run_test() { power=${POWER_STRING} #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index 41660a001..02ef20fd0 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -23,7 +23,7 @@ def preprocess(i): env['MLC_DOCKER_RUN_SCRIPT_TAGS'] = "run,docker,container" MLC_RUN_CMD = "mlc version" else: - MLC_RUN_CMD = "mlcr --tags=" + \ + MLC_RUN_CMD = "mlcr " + \ env['MLC_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' r = mlc.access({'action': 'search', @@ -88,6 +88,8 @@ def preprocess(i): if existing_container_id: print( f"""Not using existing container {existing_container_id} as env['MLC_DOCKER_REUSE_EXISTING_CONTAINER'] is not set""") + else: + print("No existing container") if env.get('MLC_DOCKER_CONTAINER_ID', '') != '': del (env['MLC_DOCKER_CONTAINER_ID']) # not valid ID @@ -120,7 +122,6 @@ def preprocess(i): # elif recreate_image == "yes": # env['MLC_DOCKER_IMAGE_RECREATE'] = "no" - return {'return': 0} @@ -184,8 +185,8 @@ def postprocess(i): if is_true(env.get('MLC_DOCKER_USE_GOOGLE_DNS', '')): run_opts += ' --dns 8.8.8.8 --dns 8.8.4.4 ' - if env.get('MLC_CONTAINER_TOOL', '') == 'podman' and env.get( - 'MLC_PODMAN_MAP_USER_ID', '').lower() not in ["no", "0", "false"]: + if env.get('MLC_CONTAINER_TOOL', '') == 'podman' and not is_false(env.get( + 'MLC_PODMAN_MAP_USER_ID', '')): run_opts += " --userns=keep-id" if env.get('MLC_DOCKER_PORT_MAPS', []): diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index 885d1d8b3..3d1886e1a 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -281,8 +281,6 @@ def preprocess(i): mlc = i['automation'].action_object - # print(ii) - # return {'return': 1} r = mlc.access(ii) if r['return'] > 0: return r @@ -307,7 +305,7 @@ def preprocess(i): if state.get('docker', {}): del (state['docker']) - if env.get("MLC_MLPERF_LOADGEN_COMPLIANCE", "") == "yes": + if is_true(env.get("MLC_MLPERF_LOADGEN_COMPLIANCE", "")): for test in test_list: env['MLC_MLPERF_LOADGEN_COMPLIANCE_TEST'] = test env['MLC_MLPERF_LOADGEN_MODE'] = "compliance" diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index cc8404fbd..b19362ecc 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -67,6 +67,7 @@ input_mapping: imagenet_path: IMAGENET_PATH implementation: MLC_MLPERF_IMPLEMENTATION lang: MLC_MLPERF_IMPLEMENTATION + min_duration: MLC_MLPERF_INFERENCE_MIN_DURATION min_query_count: MLC_MLPERF_INFERENCE_MIN_QUERY_COUNT max_query_count: MLC_MLPERF_INFERENCE_MAX_QUERY_COUNT mode: MLC_MLPERF_LOADGEN_MODE @@ -119,7 +120,7 @@ input_mapping: use_dataset_from_host: MLC_USE_DATASET_FROM_HOST use_model_from_host: MLC_USE_MODEL_FROM_HOST rgat_checkpoint_path: RGAT_CHECKPOINT_PATH - pointpillars_checkpoint_path: MLC_ML_MODEL_POINT_PILLARS_PATH + pointpainting_checkpoint_path: MLC_ML_MODEL_POINT_PILLARS_PATH deeplab_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH waymo_path: MLC_DATASET_WAYMO_PATH nm_model_zoo_stub: MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB @@ -383,6 +384,8 @@ variations: add_deps_recursive: submission-checker: tags: _short-run + openimages-preprocessed: + tags: _50 default: 'true' env: MLC_MLPERF_SUBMISSION_DIVISION: open diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py index 171b6e17c..a7e3d5ee0 100644 --- a/script/run-mlperf-inference-mobilenet-models/customize.py +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -1,7 +1,9 @@ from mlc import utils import os -import mlc import sys +from utils import * +import mlc +import importlib def preprocess(i): @@ -17,6 +19,9 @@ def preprocess(i): adr = i['input'].get('adr') automation = i['automation'] + # mlc = i['automation'].action_object + # cache_action = i['automation'].cache_action + cache_action = mlc quiet = (env.get('MLC_QUIET', False) == 'yes') verbose = (env.get('MLC_VERBOSE', False) == 'yes') @@ -49,18 +54,18 @@ def preprocess(i): } models = {} - if env.get('MLC_MLPERF_RUN_MOBILENET_V1', '') == "yes": + if is_true(env.get('MLC_MLPERF_RUN_MOBILENET_V1', '')): models['mobilenet'] = {} models['mobilenet']['v1'] = models_all['mobilenet']['v1'] - elif env.get('MLC_MLPERF_RUN_MOBILENET_V2', '') == "yes": + elif is_true(env.get('MLC_MLPERF_RUN_MOBILENET_V2', '')): models['mobilenet'] = {} models['mobilenet']['v2'] = models_all['mobilenet']['v2'] - elif env.get('MLC_MLPERF_RUN_MOBILENET_V3', '') == "yes": + elif is_true(env.get('MLC_MLPERF_RUN_MOBILENET_V3', '')): models['mobilenet'] = {} models['mobilenet']['v3'] = models_all['mobilenet']['v3'] - elif env.get('MLC_MLPERF_RUN_MOBILENETS', '') == "yes": + elif is_true(env.get('MLC_MLPERF_RUN_MOBILENETS', '')): models['mobilenet'] = models_all['mobilenet'] - elif env.get('MLC_MLPERF_RUN_EFFICIENTNETS', '') == "yes": + if is_true(env.get('MLC_MLPERF_RUN_EFFICIENTNETS', '')): models['efficientnet'] = models_all['efficientnet'] variation_strings = {} @@ -89,16 +94,16 @@ def preprocess(i): variation_list.append("_" + k3) variation_strings[t1].append(",".join(variation_list)) - if env.get('MLC_MLPERF_SUBMISSION_MODE', '') == "yes": + if is_true(env.get('MLC_MLPERF_SUBMISSION_MODE', '')): var = "_submission" execution_mode = "valid" - elif env.get('MLC_MLPERF_ACCURACY_MODE', '') == "yes" and env.get('MLC_MLPERF_PERFORMANCE_MODE', '') == "yes": + elif is_true(env.get('MLC_MLPERF_ACCURACY_MODE', '')) and is_true(env.get('MLC_MLPERF_PERFORMANCE_MODE', '')): var = "_full,_performance-and-accuracy" execution_mode = "valid" - elif env.get('MLC_MLPERF_ACCURACY_MODE', '') == "yes": + elif is_true(env.get('MLC_MLPERF_ACCURACY_MODE', '')): var = "_full,_accuracy-only" execution_mode = "valid" - elif env.get('MLC_MLPERF_PERFORMANCE_MODE', '') == "yes": + elif is_true(env.get('MLC_MLPERF_PERFORMANCE_MODE', '')): var = "_full,_performance-only" execution_mode = "valid" else: @@ -106,25 +111,33 @@ def preprocess(i): execution_mode = "test" precisions = [] - if env.get('MLC_MLPERF_RUN_FP32', '') == "yes": + if is_true(env.get('MLC_MLPERF_RUN_FP32', '')): precisions.append("fp32") - if env.get('MLC_MLPERF_RUN_INT8', '') == "yes": + if is_true(env.get('MLC_MLPERF_RUN_INT8', '')): precisions.append("uint8") implementation_tags = [] - if env.get('MLC_MLPERF_USE_ARMNN_LIBRARY', '') == "yes": + if is_true(env.get('MLC_MLPERF_USE_ARMNN_LIBRARY', '')): implementation_tags.append("_armnn") - if env.get('MLC_MLPERF_TFLITE_ARMNN_NEON', '') == "yes": + if is_true(env.get('MLC_MLPERF_TFLITE_ARMNN_NEON', '')): implementation_tags.append("_use-neon") - if env.get('MLC_MLPERF_TFLITE_ARMNN_OPENCL', '') == "yes": + if is_true(env.get('MLC_MLPERF_TFLITE_ARMNN_OPENCL', '')): implementation_tags.append("_use-opencl") implementation_tags_string = ",".join(implementation_tags) inp = i['input'] + clean_input = { + 'action': 'rm', + 'target': 'cache', + 'tags': 'get,preprocessed,dataset,_for.mobilenet', + 'quiet': True, + 'v': verbose, + 'f': True + } - for model in variation_strings: - for v in variation_strings[model]: - for precision in precisions: + for precision in precisions: + for model in variation_strings: + for v in variation_strings[model]: if "small-minimalistic" in v and precision == "uint8": continue @@ -134,8 +147,8 @@ def preprocess(i): mlc_input = { 'action': 'run', - 'automation': 'script', - 'tags': f'generate-run-cmds,mlperf,inference,{var}', + 'target': 'script', + 'tags': f'run-mlperf,mlperf,inference,{var}', 'quiet': True, 'env': env, 'input': inp, @@ -169,42 +182,38 @@ def preprocess(i): if env.get('MLC_MLPERF_INFERENCE_SUBMISSION_DIR', '') != '': mlc_input['submission_dir'] = env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR'] - if env.get('MLC_MLPERF_FIND_PERFORMANCE_MODE', '') == "yes" and env.get( - 'MLC_MLPERF_NO_RERUN', '') != 'yes': + if is_true(env.get('MLC_MLPERF_FIND_PERFORMANCE_MODE', '')) and not is_true(env.get( + 'MLC_MLPERF_NO_RERUN', '')): mlc_input['rerun'] = True - if env.get('MLC_MLPERF_POWER', '') == "yes": + if is_true(env.get('MLC_MLPERF_POWER', '')): mlc_input['power'] = 'yes' - if env.get('MLC_MLPERF_ACCURACY_MODE', '') == "yes": - mlc_input['mode'] = 'accuracy' - print(mlc_input) - r = mlc.access(mlc_input) - if r['return'] > 0: - return r - - if env.get('MLC_MLPERF_PERFORMANCE_MODE', '') == "yes": - mlc_input['mode'] = 'performance' + print(mlc_input) + r = mlc.access(mlc_input) + if r['return'] > 0: + return r + importlib.reload(mlc.action) - print(mlc_input) - r = mlc.access(mlc_input) + if is_true(env.get('MLC_MINIMIZE_DISK_USAGE', '')): + r = cache_action.access(clean_input) if r['return'] > 0: - return r + print(r) + # return r + else: + importlib.reload(mlc.action) - if env.get('MLC_TEST_ONE_RUN', '') == "yes": + if is_true(env.get('MLC_TEST_ONE_RUN', '')): return {'return': 0} - clean_input = { - 'action': 'rm', - 'automation': 'cache', - 'tags': 'get,preprocessed,dataset,_for.mobilenet', - 'quiet': True, - 'v': verbose, - 'f': 'True' - } - r = mlc.access(clean_input) - # if r['return'] > 0: - # return r + ''' + r = cache_action.access(clean_input) + if r['return'] > 0: + print(r) + # return r + else: + importlib.reload(mlc.action) + ''' return {'return': 0} diff --git a/script/run-mlperf-inference-mobilenet-models/meta.yaml b/script/run-mlperf-inference-mobilenet-models/meta.yaml index f78f8b6e7..813b1ef93 100644 --- a/script/run-mlperf-inference-mobilenet-models/meta.yaml +++ b/script/run-mlperf-inference-mobilenet-models/meta.yaml @@ -8,22 +8,49 @@ default_env: MLC_MLPERF_RUN_FP32: 'yes' MLC_MLPERF_RUN_INT8: 'yes' MLC_MLPERF_RUN_MOBILENETS: 'no' + MLC_USE_DATASET_FROM_HOST: 'yes' + MLC_MINIMIZE_DISK_USAGE: 'no' deps: - tags: get,sys-utils-mlc docker: + deps: + - tags: get,mlperf,inference,results,dir,local + names: + - get-mlperf-inference-results-dir + skip_if_env: + OUTPUT_BASE_DIR: [ on ] + - tags: get,mlperf,inference,submission,dir,local + names: + - get-mlperf-inference-submission-dir + skip_if_env: + MLC_MLPERF_INFERENCE_SUBMISSION_DIR: [ on ] + - tags: get,dataset,imagenet,validation,original,_full + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - 'yes' + names: + - imagenet-original + - dataset-original input_mapping: - imagenet_path: IMAGENET_PATH - results_dir: RESULTS_DIR - submission_dir: SUBMISSION_DIR - docker_run_final_cmds: - - mlcr --tags=run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True - --adr.compiler.tags=gcc + imagenet_path: MLC_DATASET_IMAGENET_PATH + results_dir: MLC_MLPERF_INFERENCE_RESULTS_DIR + submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR + minimize_disk_usage: MLC_MINIMIZE_DISK_USAGE fake_run_deps: false + pre_run_cmds: + - mlc pull repo mounts: - - ${{ IMAGENET_PATH }}:${{ IMAGENET_PATH }} - - ${{ RESULTS_DIR }}:${{ RESULTS_DIR }} - - ${{ SUBMISSION_DIR }}:${{ SUBMISSION_DIR }} + - "${{ MLC_DATASET_IMAGENET_PATH }}:${{ MLC_DATASET_IMAGENET_PATH }}" + - "${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}:${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}" + - "${{ OUTPUT_BASE_DIR }}:${{ OUTPUT_BASE_DIR }}" + - "${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}" run: true + interactive: true + user: mlcuser + mlc_repo: mlcommons@mlperf-automations + mlc_repo_branch: dev + real_run: False + os_version: '22.04' input_mapping: find-performance: MLC_MLPERF_FIND_PERFORMANCE_MODE imagenet_path: IMAGENET_PATH @@ -132,3 +159,13 @@ variations: group: optimization use-neon: alias: neon +tests: + run_inputs: + - env: + MLC_TEST_ONE_RUN: 'yes' + variations_list: + - tflite + - accuracy_only + adr: + compiler: + tags: gcc diff --git a/script/run-mlperf-inference-submission-checker/meta.yaml b/script/run-mlperf-inference-submission-checker/meta.yaml index d5971b146..6c5a8d417 100644 --- a/script/run-mlperf-inference-submission-checker/meta.yaml +++ b/script/run-mlperf-inference-submission-checker/meta.yaml @@ -80,6 +80,8 @@ post_deps: tags: run,tar env: MLC_TAR_INPUT_DIR: <<>> + MLC_TAR_SUB_FOLDERS_TO_INCLUDE: closed,open,network + COPYFILE_DISABLE: 1 - enable_if_env: MLC_MLPERF_SUBMITTER_ID: - 'yes' diff --git a/script/run-mlperf-power-server/power-server.conf b/script/run-mlperf-power-server/power-server.conf new file mode 100644 index 000000000..70797bd62 --- /dev/null +++ b/script/run-mlperf-power-server/power-server.conf @@ -0,0 +1,19 @@ +[server] +ntpserver = time.google.com +listen = 0.0.0.0 4950 + +[ptd] +ptd = C:\Users\arjun\CM\repos\local\cache\5a0a52d578724774\repo\PTD\binaries\ptd-windows-x86.exe +analyzerCount = 2 + +[analyzer1] +interfaceflag = -y +devicetype = 49 +deviceport = C3YD21068E +networkport = 8888 + +[analyzer2] +interfaceflag = -g +devicetype = 8 +deviceport = 20 +networkport = 8889 diff --git a/script/run-terraform/README-about.md b/script/run-terraform/README-about.md index 674ebee42..d0a7ba01f 100644 --- a/script/run-terraform/README-about.md +++ b/script/run-terraform/README-about.md @@ -7,6 +7,6 @@ gcloud auth application-default login The above two commands will install google-cloud-cli and authorizes the user to access it. Once done, you can start creating gcp instance using CM commands like below. To destroy an instance just repeat the same command with `--destroy` option. ``` -mlcr --tags=run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit +mlcr run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit ``` Here, `mlperf-inference-tests` is the name of the google project as created in [Google cloud console](https://console.cloud.google.com/apis/dashboard) diff --git a/script/submit-mlperf-results/customize.py b/script/submit-mlperf-results/customize.py index 0936ec560..ae5824af2 100644 --- a/script/submit-mlperf-results/customize.py +++ b/script/submit-mlperf-results/customize.py @@ -17,6 +17,14 @@ def preprocess(i): file_path = env['MLC_MLPERF_SUBMISSION_FILE'] submitter_name = env.get('MLC_MLPERF_SUBMITTER', '') + # check the file_path is absolute or relative + # if it is relative, convert to absolute + if not os.path.isabs(file_path): + file_path = os.path.abspath( + os.path.join( + env['MLC_TMP_CURRENT_PATH'], + file_path)) + r = get_signed_url( server, benchmark, diff --git a/script/tar-my-folder/customize.py b/script/tar-my-folder/customize.py index 9013a3431..a8407ff2c 100644 --- a/script/tar-my-folder/customize.py +++ b/script/tar-my-folder/customize.py @@ -21,10 +21,24 @@ def preprocess(i): env['MLC_TAR_OUTFILE'] = output_file from pathlib import Path input_path = Path(input_dir) - cd_dir = input_path.parent.absolute() - CMD = 'tar --directory ' + \ - str(cd_dir) + ' -czf ' + os.path.join(output_dir, - output_file) + ' ' + input_dirname + sub_folders_to_include = env.get('MLC_TAR_SUB_FOLDERS_TO_INCLUDE', '') + if sub_folders_to_include != '': + cd_dir = input_path.absolute() + r = sub_folders_to_include.split(",") + v_sub_folders = [] + for sub_folder in r: + f = sub_folder.strip() + if os.path.exists(os.path.join(input_path, f)): + v_sub_folders.append(f) + CMD = 'tar --directory ' + \ + str(cd_dir) + ' -czf ' + os.path.join(output_dir, + output_file) + ' ' + ' '.join(v_sub_folders) + else: + cd_dir = input_path.parent.absolute() + CMD = 'tar --directory ' + \ + str(cd_dir) + ' -czf ' + os.path.join(output_dir, + output_file) + ' ' + input_dirname + print(CMD) ret = os.system(CMD) print("Tar file " + os.path.join(output_dir, output_file) + " created") diff --git a/script/tar-my-folder/meta.yaml b/script/tar-my-folder/meta.yaml index 900475310..0a07b105c 100644 --- a/script/tar-my-folder/meta.yaml +++ b/script/tar-my-folder/meta.yaml @@ -9,6 +9,7 @@ input_mapping: input_dir: MLC_TAR_INPUT_DIR outfile: MLC_TAR_OUTFILE output_dir: MLC_TAR_OUTPUT_DIR + sub_folders_to_include: MLC_TAR_SUB_FOLDERS_TO_INCLUDE new_env_keys: - MLC_TAR_OUTFILE tags: