diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index 867f93586..fe22fc33b 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -5,7 +5,6 @@ on: types: [published] push: branches: - - main - dev paths: - VERSION diff --git a/.github/workflows/test-amd-mlperf-inference-implementations.yml b/.github/workflows/test-amd-mlperf-inference-implementations.yml index 9ff0b4da4..4c4b6f749 100644 --- a/.github/workflows/test-amd-mlperf-inference-implementations.yml +++ b/.github/workflows/test-amd-mlperf-inference-implementations.yml @@ -20,7 +20,7 @@ jobs: python3 -m venv gh_action source gh_action/bin/activate export MLC_REPOS=$HOME/GH_MLC - pip install --upgrade cm4mlops - cm pull repo + pip install --upgrade mlc-scripts + mlc pull repo mlcr --tags=run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet --docker_skip_run_cmd=yes # mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=dev --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c diff --git a/.github/workflows/test-intel-mlperf-inference-implementations.yml b/.github/workflows/test-intel-mlperf-inference-implementations.yml index d56302111..0041f9762 100644 --- a/.github/workflows/test-intel-mlperf-inference-implementations.yml +++ b/.github/workflows/test-intel-mlperf-inference-implementations.yml @@ -20,7 +20,7 @@ jobs: python3 -m venv gh_action_conda source gh_action_conda/bin/activate export MLC_REPOS=$HOME/GH_MLC - pip install --upgrade cm4mlops + pip install --upgrade mlc-scripts pip install tabulate mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c diff --git a/.github/workflows/test-mlc-script-features.yml b/.github/workflows/test-mlc-script-features.yml index 898512b7e..d2928e37f 100644 --- a/.github/workflows/test-mlc-script-features.yml +++ b/.github/workflows/test-mlc-script-features.yml @@ -1,7 +1,7 @@ name: MLC script automation features test on: - pull_request: + pull_request_target: branches: [ "main", "dev" ] paths: - '.github/workflows/test-mlc-script-features.yml' @@ -61,18 +61,51 @@ jobs: mlcr --tags=python,src,install,_shared --version=3.9.10 --quiet mlc search cache --tags=python,src,install,_shared,version-3.9.10 + test_docker: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.12", "3.8"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Pull MLOps repository + run: | + pip install mlcflow + mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} + - name: Run docker container from dockerhub on linux - if: runner.os == 'linux' run: | mlcr --tags=run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=cm-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=cknowledge --quiet - name: Run docker container locally on linux - if: runner.os == 'linux' run: | mlcr --tags=run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet + test_mlperf_retinanet_cpp_venv: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.12", "3.8"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Pull MLOps repository + run: | + pip install mlcflow + mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} + - name: Run MLPerf Inference Retinanet with native and virtual Python - if: runner.os == 'linux' run: | mlcr --tags=app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=accuracy --test_query_count=10 --rerun --quiet @@ -80,4 +113,49 @@ jobs: mlcr --tags=install,python-venv --version=3.10.8 --name=mlperf --quiet - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --adr.python.name=mlperf --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --submitter=Community --implementation=cpp --hw_name=default --model=retinanet --backend=onnxruntime --device=cpu --scenario=Offline --quiet + mlcr --tags=run,mlperf,inference,_submission,_short --adr.python.name=mlperf --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --submitter=MLCommons --implementation=cpp --hw_name=default --model=retinanet --backend=onnxruntime --device=cpu --scenario=Offline --quiet + + # Step for Linux/MacOS + - name: Randomly Execute Step (Linux/MacOS) + if: runner.os != 'Windows' + run: | + RANDOM_NUMBER=$((RANDOM % 10)) + echo "Random number is $RANDOM_NUMBER" + if [ "$RANDOM_NUMBER" -eq 0 ]; then + echo "run_step=true" >> $GITHUB_ENV + else + echo "run_step=false" >> $GITHUB_ENV + fi + + # Step for Windows + - name: Randomly Execute Step (Windows) + if: runner.os == 'Windows' + run: | + $RANDOM_NUMBER = Get-Random -Maximum 10 + Write-Host "Random number is $RANDOM_NUMBER" + if ($RANDOM_NUMBER -eq 0) { + Write-Host "run_step=true" | Out-File -FilePath $Env:GITHUB_ENV -Append + } else { + Write-Host "run_step=false" | Out-File -FilePath $Env:GITHUB_ENV -Append + } + + - name: Retrieve secrets from Keeper + if: github.repository_owner == 'mlcommons' && env.run_step == 'true' + id: ksecrets + uses: Keeper-Security/ksm-action@master + with: + keeper-secret-config: ${{ secrets.KSM_CONFIG }} + secrets: |- + ubwkjh-Ii8UJDpG2EoU6GQ/field/Access Token > env:PAT + - name: Push Results + env: + GITHUB_TOKEN: ${{ env.PAT }} + if: github.repository_owner == 'mlcommons' && env.run_step == 'true' + run: | + git config --global user.name "mlcommons-bot" + git config --global user.email "mlcommons-bot@users.noreply.github.com" + git config --global credential.https://github.com.helper "" + git config --global credential.https://github.com.helper "!gh auth git-credential" + git config --global credential.https://gist.github.com.helper "" + git config --global credential.https://gist.github.com.helper "!gh auth git-credential" + mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-wheel.yml b/.github/workflows/test-mlcscripts-wheel.yml similarity index 83% rename from .github/workflows/test-mlperf-wheel.yml rename to .github/workflows/test-mlcscripts-wheel.yml index 2ff1595fb..7abf40bbc 100644 --- a/.github/workflows/test-mlperf-wheel.yml +++ b/.github/workflows/test-mlcscripts-wheel.yml @@ -1,4 +1,4 @@ -name: Build Python Wheel +name: Build mlc-scripts Wheel on: pull_request: @@ -6,7 +6,7 @@ on: - main - dev paths: - - '.github/workflows/test-mlperf-wheel.yml' + - '.github/workflows/test-mlcscripts-wheel.yml' - 'setup.py' jobs: @@ -16,6 +16,9 @@ jobs: matrix: os: [macos-latest, ubuntu-latest, windows-latest] python-version: [ '3.8', '3.13'] + exclude: + - os: windows-latest + python-version: "3.8" runs-on: ${{ matrix.os }} diff --git a/.github/workflows/test-mlperf-inference-gptj.yml b/.github/workflows/test-mlperf-inference-gptj.yml index c99c503ff..341e2e818 100644 --- a/.github/workflows/test-mlperf-inference-gptj.yml +++ b/.github/workflows/test-mlperf-inference-gptj.yml @@ -24,8 +24,8 @@ jobs: source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate export MLC_REPOS=$HOME/GH_MLC - python3 -m pip install cm4mlops - cm pull repo + python3 -m pip install --upgrade mlc-scripts + mlc pull repo mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --pull_changes=yes --pull_inference_changes=yes --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --get_platform_details=yes --implementation=reference --clean mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml index 8de010505..70e4e4909 100644 --- a/.github/workflows/test-mlperf-inference-llama2.yml +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -25,9 +25,9 @@ jobs: source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate export MLC_REPOS=$HOME/GH_MLC - pip install cm4mlops + pip install mlc-scripts pip install tabulate - cm pull repo + mlc pull repo pip install "huggingface_hub[cli]" git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential diff --git a/.github/workflows/test-mlperf-inference-mixtral.yml b/.github/workflows/test-mlperf-inference-mixtral.yml index 26b369c09..e48cbb1e9 100644 --- a/.github/workflows/test-mlperf-inference-mixtral.yml +++ b/.github/workflows/test-mlperf-inference-mixtral.yml @@ -26,10 +26,10 @@ jobs: source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate export MLC_REPOS=$HOME/GH_MLC - pip install cm4mlops + pip install --upgrade mlc-scripts pip install "huggingface_hub[cli]" git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential - cm pull repo + mlc pull repo mlcr --tags=run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1 mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - GO-phoenix" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml index 5bbec09b8..500a9dfdc 100644 --- a/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml @@ -1,11 +1,8 @@ -# This workflow will install Python dependencies, run tests and lint with a variety of Python versions -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - name: MLPerf inference MLCommons C++ ResNet50 on: - pull_request: - branches: [ "main", "dev", "mlperf-inference" ] + pull_request_target: + branches: [ "main", "dev" ] paths: - '.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml' - '**' diff --git a/.github/workflows/test-mlperf-inference-resnet50.yml b/.github/workflows/test-mlperf-inference-resnet50.yml index 85bcd3cc2..75e9fe6f9 100644 --- a/.github/workflows/test-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-resnet50.yml @@ -58,16 +58,30 @@ jobs: if: matrix.os != 'windows-latest' run: | mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet - - name: Randomly Execute Step - id: random-check + # Step for Linux/MacOS + - name: Randomly Execute Step (Linux/MacOS) + if: runner.os != 'Windows' run: | - RANDOM_NUMBER=$((RANDOM % 10)) - echo "Random number is $RANDOM_NUMBER" - if [ "$RANDOM_NUMBER" -eq 0 ]; then - echo "run_step=true" >> $GITHUB_ENV - else - echo "run_step=false" >> $GITHUB_ENV - fi + RANDOM_NUMBER=$((RANDOM % 10)) + echo "Random number is $RANDOM_NUMBER" + if [ "$RANDOM_NUMBER" -eq 0 ]; then + echo "run_step=true" >> $GITHUB_ENV + else + echo "run_step=false" >> $GITHUB_ENV + fi + + # Step for Windows + - name: Randomly Execute Step (Windows) + if: runner.os == 'Windows' + run: | + $RANDOM_NUMBER = Get-Random -Maximum 10 + Write-Host "Random number is $RANDOM_NUMBER" + if ($RANDOM_NUMBER -eq 0) { + Write-Host "run_step=true" | Out-File -FilePath $Env:GITHUB_ENV -Append + } else { + Write-Host "run_step=false" | Out-File -FilePath $Env:GITHUB_ENV -Append + } + - name: Retrieve secrets from Keeper if: github.repository_owner == 'mlcommons' && env.run_step == 'true' id: ksecrets diff --git a/.github/workflows/test-mlperf-inference-retinanet.yml b/.github/workflows/test-mlperf-inference-retinanet.yml index 373eef9b4..c1777beae 100644 --- a/.github/workflows/test-mlperf-inference-retinanet.yml +++ b/.github/workflows/test-mlperf-inference-retinanet.yml @@ -52,16 +52,31 @@ jobs: if: matrix.os != 'windows-latest' run: | mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=retinanet --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 - - name: Randomly Execute Step - id: random-check + + # Step for Linux/MacOS + - name: Randomly Execute Step (Linux/MacOS) + if: runner.os != 'Windows' + run: | + RANDOM_NUMBER=$((RANDOM % 10)) + echo "Random number is $RANDOM_NUMBER" + if [ "$RANDOM_NUMBER" -eq 0 ]; then + echo "run_step=true" >> $GITHUB_ENV + else + echo "run_step=false" >> $GITHUB_ENV + fi + + # Step for Windows + - name: Randomly Execute Step (Windows) + if: runner.os == 'Windows' run: | - RANDOM_NUMBER=$((RANDOM % 10)) - echo "Random number is $RANDOM_NUMBER" - if [ "$RANDOM_NUMBER" -eq 0 ]; then - echo "run_step=true" >> $GITHUB_ENV - else - echo "run_step=false" >> $GITHUB_ENV - fi + $RANDOM_NUMBER = Get-Random -Maximum 10 + Write-Host "Random number is $RANDOM_NUMBER" + if ($RANDOM_NUMBER -eq 0) { + Write-Host "run_step=true" | Out-File -FilePath $Env:GITHUB_ENV -Append + } else { + Write-Host "run_step=false" | Out-File -FilePath $Env:GITHUB_ENV -Append + } + - name: Retrieve secrets from Keeper if: github.repository_owner == 'mlcommons' && env.run_step == 'true' id: ksecrets diff --git a/.github/workflows/test-mlperf-inference-rnnt.yml b/.github/workflows/test-mlperf-inference-rnnt.yml index 352272370..89ec6e4e2 100644 --- a/.github/workflows/test-mlperf-inference-rnnt.yml +++ b/.github/workflows/test-mlperf-inference-rnnt.yml @@ -30,10 +30,10 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies on Unix Platforms run: | - MLC_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops + pip install mlcflow - name: Pull MLOps repository run: | - cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + mlc pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} mlcr --quiet --tags=get,sys-utils-cm - name: Test MLPerf Inference RNNT run: | diff --git a/.github/workflows/test-mlperf-inference-sdxl.yaml b/.github/workflows/test-mlperf-inference-sdxl.yaml index 7f7ce1fea..2e287a0be 100644 --- a/.github/workflows/test-mlperf-inference-sdxl.yaml +++ b/.github/workflows/test-mlperf-inference-sdxl.yaml @@ -19,7 +19,7 @@ jobs: source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate export MLC_REPOS=$HOME/GH_MLC - python3 -m pip install cm4mlops - cm pull repo + python3 -m pip install mlc-scripts + mlc pull repo mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index 86f06873d..e4d780780 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -2,7 +2,7 @@ name: MLPerf Inference Nvidia implementations on: schedule: - - cron: "58 23 * * *" #to be adjusted + - cron: "35 01 * * *" jobs: run_nvidia: @@ -17,7 +17,7 @@ jobs: strategy: fail-fast: false matrix: - system: [ "GO-spr", "phoenix-Amd-Am5", "GO-i9"] + system: [ "GO-spr", "phoenix"] # system: [ "mlc-server" ] python-version: [ "3.12" ] model: [ "resnet50", "retinanet", "bert-99", "bert-99.9", "gptj-99.9", "3d-unet-99.9", "sdxl" ] @@ -48,9 +48,9 @@ jobs: python3 -m venv gh_action source gh_action/bin/activate export MLC_REPOS=$HOME/GH_MLC - MLC_PULL_DEFAULT_MLOPS_REPO=no pip install --upgrade cm4mlops - cm pull repo + pip install --upgrade mlcflow + mlc pull repo mlcommons@mlperf-automations --branch=dev - mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=datacenter,edge --division=closed --docker_dt=yes --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string --quiet + mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=datacenter,edge --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string --quiet mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name diff --git a/.github/workflows/test-qaic-compute-sdk-build.yml b/.github/workflows/test-qaic-compute-sdk-build.yml index 810da9e8e..0dff27cd0 100644 --- a/.github/workflows/test-qaic-compute-sdk-build.yml +++ b/.github/workflows/test-qaic-compute-sdk-build.yml @@ -26,7 +26,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | - MLC_PULL_DEFAULT_MLOPS_REPO=no pip install cm4mlops + pip install mlc-scripts mlcr --tags=get,sys-utils-cm --quiet - name: Test QAIC Compute SDK for compilation diff --git a/.github/workflows/test-qaic-software-kit.yml b/.github/workflows/test-qaic-software-kit.yml index 127de2323..5cbfc0add 100644 --- a/.github/workflows/test-qaic-software-kit.yml +++ b/.github/workflows/test-qaic-software-kit.yml @@ -31,7 +31,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Pull MLOps repository run: | - pip install mlperf + pip install mlc-scripts mlcr --tags=get,sys-utils-mlc --quiet - name: Test Software Kit for compilation on Ubuntu 20.04 diff --git a/VERSION b/VERSION index 8acdd82b7..c5d54ec32 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.0.1 +0.0.9 diff --git a/automation/cache/README-extra.md b/automation/cache/README-extra.md deleted file mode 100644 index 84d274179..000000000 --- a/automation/cache/README-extra.md +++ /dev/null @@ -1,71 +0,0 @@ -[ [Back to index](../../../docs/README.md) ] - -# CM "cache" automation - -*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md) - and [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) to understand CM motivation and concepts.* - -## CM script CLI - -Whenever a [given CM script]() caches the output, you can find it - -Whenever a [CM script](https://access.cknowledge.org/playground/?action=scripts) -caches its output (such as downloaded model or pre-processed data set or built code), -you can find it using the CM "cache" automation as follows: - -```bash -cm show cache -``` - -You can prune cache entries by tags and variations: -```bash -cm show cache --tags=ml-model -cm show cache --tags=python -``` - -You can find a path to a given cache artifact as follows: -```bash -cm find cache --tags=ml-model,bert -``` - -You can delete one or more cache artifacts as follows: -```bash -cm rm cache --tags=ml-model -``` - -You can skip user prompt by adding `-f` flag as follows: -```bash -cm rm cache --tags=ml-model -f -``` - -You can clean the whole cache as follows: -```bash -cm rm cache -f -``` - -## CM python API - -You can access the same functionality via CM Python API as follows: - -```python - -import cmind - -output = cmind.access({'action':'show', - 'automation':'cache,541d6f712a6b464e'}) - -if output['return']>0: - cmind.error(output) - -artifacts = output['list'] - -for artifact in artifacts: - print ('') - print (artifact.path) - print (artifact.meta) - -``` - -## Related - -* [CM "script" automation](../script/README-extra.md) diff --git a/automation/cache/README.md b/automation/cache/README.md deleted file mode 100644 index 0a3114d3b..000000000 --- a/automation/cache/README.md +++ /dev/null @@ -1,87 +0,0 @@ -*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!* - -### Automation actions - -#### test - - * CM CLI: ```cm test cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15)) - * CM CLI with UID: ```cm test cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'test' - 'automation':'cache,541d6f712a6b464e' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### show - - * CM CLI: ```cm show cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54)) - * CM CLI with UID: ```cm show cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'show' - 'automation':'cache,541d6f712a6b464e' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### search - - * CM CLI: ```cm search cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153)) - * CM CLI with UID: ```cm search cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'search' - 'automation':'cache,541d6f712a6b464e' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -#### copy_to_remote - - * CM CLI: ```cm copy_to_remote cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186)) - * CM CLI with UID: ```cm copy_to_remote cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186)) - * CM Python API: - ```python - import cmind - - r=cm.access({ - 'action':'copy_to_remote' - 'automation':'cache,541d6f712a6b464e' - 'out':'con' - ``` - [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186) - ```python - }) - if r['return']>0: - print(r['error']) - ``` - -### Maintainers - -* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/automation/cache/meta.json b/automation/cache/meta.json deleted file mode 100644 index ac383f937..000000000 --- a/automation/cache/meta.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "alias": "cache", - "automation_alias": "automation", - "automation_uid": "bbeb15d8f0a944a4", - "desc": "Caching cross-platform CM scripts", - "developers": "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)", - "sort": 900, - "tags": [ - "automation" - ], - "uid": "541d6f712a6b464e" -} diff --git a/automation/cache/module.py b/automation/cache/module.py deleted file mode 100644 index b205b539f..000000000 --- a/automation/cache/module.py +++ /dev/null @@ -1,236 +0,0 @@ -import os - -from cmind.automation import Automation -from cmind import utils - - -class CAutomation(Automation): - """ - Automation actions - """ - - ############################################################ - def __init__(self, cmind, automation_file): - super().__init__(cmind, __file__) - - ############################################################ - def test(self, i): - """ - Test automation - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - automation (str): automation as CM string object - - parsed_automation (list): prepared in CM CLI or CM access function - [ (automation alias, automation UID) ] or - [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] - - (artifact) (str): artifact as CM string object - - (parsed_artifact) (list): prepared in CM CLI or CM access function - [ (artifact alias, artifact UID) ] or - [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] - - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * Output from this automation action - - """ - - import json - print(json.dumps(i, indent=2)) - - return {'return': 0} - - ############################################################ - def show(self, i): - """ - Show cache - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - (env) (bool): if True, show env from cm-cached-state.json - ... - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - * Output from this automation action - - """ - import json - - # Check parsed automation - if 'parsed_automation' not in i: - return {'return': 1, 'error': 'automation is not specified'} - - console = i.get('out') == 'con' - - show_env = i.get('env', False) - -# Moved to search function -# # Check simplified CMD: cm show cache "get python" -# # If artifact has spaces, treat them as tags! -# artifact = i.get('artifact','') -# tags = i.get('tags','').strip() -# if ' ' in artifact or ',' in artifact: -# del(i['artifact']) -# if 'parsed_artifact' in i: del(i['parsed_artifact']) -# -# new_tags = artifact.replace(' ',',') -# tags = new_tags if tags=='' else new_tags+','+tags -# -# i['tags'] = tags - - # Find CM artifact(s) - i['out'] = None - r = self.search(i) - - if r['return'] > 0: - return r - - lst = r['list'] - for artifact in sorted(lst, key=lambda x: sorted(x.meta['tags'])): - # for artifact in lst: - path = artifact.path - meta = artifact.meta - dependent_cached_path = meta.get( - 'dependent_cached_path', '') - if dependent_cached_path and not os.path.exists( - dependent_cached_path): - continue - - original_meta = artifact.original_meta - - alias = meta.get('alias', '') - uid = meta.get('uid', '') - - tags = meta.get('tags', []) - tags1 = sorted([x for x in tags if not x.startswith('_')]) - tags2 = sorted([x for x in tags if x.startswith('_')]) - tags = tags1 + tags2 - - version = meta.get('version', '') - - if console: - print('') -# print ('* UID: {}'.format(uid)) - print('* Tags: {}'.format(','.join(tags))) - print(' Path: {}'.format(path)) - if version != '': - print(' Version: {}'.format(version)) - - if show_env and console: - path_to_cached_state_file = os.path.join( - path, 'cm-cached-state.json') - - if os.path.isfile(path_to_cached_state_file): - r = utils.load_json(file_name=path_to_cached_state_file) - if r['return'] > 0: - return r - - # Update env and state from cache! - cached_state = r['meta'] - - new_env = cached_state.get('new_env', {}) - if len(new_env) > 0: - print(' New env:') - print( - json.dumps( - new_env, - indent=6, - sort_keys=True).replace( - '{', - '').replace( - '}', - '')) - - new_state = cached_state.get('new_state', {}) - if len(new_state) > 0: - print(' New state:') - print(json.dumps(new_env, indent=6, sort_keys=True)) - - return {'return': 0, 'list': lst} - - ############################################################ - def search(self, i): - """ - Overriding the automation search function to add support for a simplified CMD with tags with spaces - - TBD: add input/output description - """ - # Check simplified CMD: cm show cache "get python" - # If artifact has spaces, treat them as tags! - artifact = i.get('artifact', '') - tags = i.get('tags', '') - - # Tags may be a list (if comes internally from CM scripts) or string if - # comes from CMD - if not isinstance(tags, list): - tags = tags.strip() - - if ' ' in artifact: # or ',' in artifact: - del (i['artifact']) - if 'parsed_artifact' in i: - del (i['parsed_artifact']) - - new_tags = artifact.replace(' ', ',') - tags = new_tags if tags == '' else new_tags + ',' + tags - - i['tags'] = tags - - # Force automation when reruning access with processed input - i['automation'] = 'cache,541d6f712a6b464e' - i['action'] = 'search' - # Avoid recursion - use internal CM add function to add the script - # artifact - i['common'] = True - - # Find CM artifact(s) - return self.cmind.access(i) - - ############################################################ - - def copy_to_remote(self, i): - """ - Add CM automation. - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - parsed_artifact (list): prepared in CM CLI or CM access function - [ (artifact alias, artifact UID) ] or - [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] - - (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default) - - (output_dir) (str): output directory (./ by default) - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - """ - - return utils.call_internal_module( - self, __file__, 'module_misc', 'copy_to_remote', i) diff --git a/automation/cache/module_misc.py b/automation/cache/module_misc.py deleted file mode 100644 index d5895edd4..000000000 --- a/automation/cache/module_misc.py +++ /dev/null @@ -1,109 +0,0 @@ -import os -from cmind import utils - - -############################################################ -def copy_to_remote(i): - """ - Add CM automation. - - Args: - (CM input dict): - - (out) (str): if 'con', output to console - - parsed_artifact (list): prepared in CM CLI or CM access function - [ (artifact alias, artifact UID) ] or - [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] - - (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default) - - (output_dir) (str): output directory (./ by default) - - Returns: - (CM return dict): - - * return (int): return code == 0 if no error and >0 if error - * (error) (str): error string if return>0 - - """ - - self_module = i['self_module'] - - remote_host = i.get('remote_host') - if not remote_host: - return {'return': 1, - 'error': 'Please input remote host_name/IP via --remote_host'} - remote_mlc_repos_location = i.get( - 'remote_mlc_repos_location', os.path.join( - "/home", os.getlogin(), "CM", "repos")) - remote_mlc_cache_location = os.path.join( - remote_mlc_repos_location, "local", "cache") - - remote_port = i.get('remote_port', '22') - remote_user = i.get('remote_user', os.getlogin()) - - tag_string = i['tags'] - tag_string += ",-tmp" - - mlc_input = {'action': 'show', - 'automation': 'cache', - 'tags': f'{tag_string}', - 'quiet': True - } - r = self_module.cmind.access(mlc_input) - if r['return'] > 0: - return r - - if len(r['list']) == 0: - pass # fixme - elif len(r['list']) > 1: - print("Multiple cache entries found: ") - for k in sorted(r['list'], key=lambda x: x.meta.get('alias', '')): - print(k.path) - x = input("Would you like to copy them all? Y/n: ") - if x.lower() == 'n': - return {'return': 0} - - import json - - for k in sorted(r['list'], key=lambda x: x.meta.get('alias', '')): - path = k.path - cacheid = os.path.basename(path) - - copy_cmd = f"rsync -avz --exclude cm-cached-state.json -e 'ssh -p {remote_port}' {path} {remote_user}@{remote_host}:{remote_mlc_cache_location}" - print(copy_cmd) - os.system(copy_cmd) - - mlc_cached_state_json_file = os.path.join(path, "cm-cached-state.json") - if not os.path.exists(mlc_cached_state_json_file): - return {'return': 1, - 'error': f'cm-cached-state.json file missing in {path}'} - - with open(mlc_cached_state_json_file, "r") as f: - mlc_cached_state = json.load(f) - - new_env = mlc_cached_state['new_env'] - new_state = mlc_cached_state['new_state'] # Todo fix new state - mlc_repos_path = os.environ.get( - 'MLC_REPOS', os.path.join( - os.path.expanduser("~"), "CM", "repos")) - mlc_cache_path = os.path.realpath( - os.path.join(mlc_repos_path, "local", "cache")) - - for key, val in new_env.items(): - - -if isinstance(val, if ) new_env[key] = val.replace( - mlc_cache_path, remote_mlc_cache_location) - - with open("tmp_remote_cached_state.json", "w") as f: - json.dump(mlc_cached_state, f, indent=2) - - remote_cached_state_file_location = os.path.join( - remote_mlc_cache_location, cacheid, "cm-cached-state.json") - copy_cmd = f"rsync -avz -e 'ssh -p {remote_port}' tmp_remote_cached_state.json {remote_user}@{remote_host}:{remote_cached_state_file_location}" - print(copy_cmd) - os.system(copy_cmd) - - return {'return': 0} diff --git a/automation/script/README.md b/automation/script/README.md index bbedf887d..397f19c35 100644 --- a/automation/script/README.md +++ b/automation/script/README.md @@ -1,28 +1,24 @@ -# CM "script" automation specification +# "Script" automation specification -Please check the [CM documentation](https://github.com/mlcommons/ck/tree/master/docs#collective-mind-language-cm) -for more details about the CM automation language. +## Getting started with script automation +* A script is identified by a set of tags and by unique ID. +* Further each script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix. -## Getting started with CM scripts - -* A CM script is identified by a set of tags and by unique ID. -* Further each CM script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix. - -### CM script execution flow -* When a CM script is invoked (either by tags or by unique ID), its `_cm.json` is processed first which will check for any `deps` script and if there are, then they are executed in order. +### MLC script execution flow +* When a script is invoked (either by tags or by unique ID), its `meta.yaml` is processed first which will check for any `deps` script and if there are, then they are executed in order. * Once all the `deps` scripts are executed, `customize.py` file is checked and if existing `preprocess` function inside it is executed if present. -* Then any `prehook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* Then any `prehook_deps` scripts mentioned in `meta.yaml` are executed similar to `deps` * After this, keys in `env` dictionary is exported as `ENV` variables and `run` file if exists is executed. -* Once run file execution is done, any `posthook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* Once run file execution is done, any `posthook_deps` scripts mentioned in `meta.yaml` are executed similar to `deps` * Then `postprocess` function inside customize.py is executed if present. -* After this stage any `post_deps` CM scripts mentioned in `_cm.json` is executed. +* After this stage any `post_deps` scripts mentioned in `meta.yaml` is executed. ** If a script is already cached, then the `preprocess`, `run file` and `postprocess` executions won't happen and only the dependencies marked as `dynamic` will be executed from `deps`, `prehook_deps`, `posthook_deps` and `postdeps`. ### Input flags -When we run a CM script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `_cm.json` gets converted to the corresponding `ENV` variable. +When we run an MLC script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `meta.yaml` gets converted to the corresponding `ENV` variable. ### Conditional execution of any `deps`, `post_deps` We can use `skip_if_env` dictionary inside any `deps`, `prehook_deps`, `posthook_deps` or `post_deps` to make its execution conditional @@ -33,7 +29,7 @@ We can specify any specific version of a script using `version`. `version_max` a * When `version_max` is given, any version below this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if below `version_max` will be used for installation. Otherwise `version_max_usable` (additional needed input for `version_max`) will be used as `version`. ### Variations -* Variations are used to customize CM script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`. +* Variations are used to customize scripts and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`. #### Variation groups `group` is a key to map variations into a group and at any time only one variation from a group can be used in the variation tags. For example, both `cpu` and `cuda` can be two variations under the `device` group, but user can at any time use either `cpu` or `cuda` as variation tags but not both. @@ -41,8 +37,7 @@ We can specify any specific version of a script using `version`. `version_max` a #### Dynamic variations Sometimes it is difficult to add all variations needed for a script like say `batch_size` which can take many different values. To handle this case, we support dynamic variations using '#' where '#' can be dynamically replaced by any string. For example, `"_batch_size.8"` can be used as a tag to turn on the dynamic variation `"_batch_size.#"`. -### ENV flow during CM script execution -* [TBD] Issue added [here](https://github.com/mlcommons/ck/issues/382) +### ENV flow during MLC script execution * During a given script execution incoming `env` dictionary is saved `(saved_env)` and all the updates happens on a copy of it. * Once a script execution is over (which includes all the dependent script executions as well), newly created keys and any updated keys are merged with the `saved_env` provided the keys are mentioned in `new_env_keys` * Same behaviour applies to `state` dictionary. diff --git a/automation/script/assets/scripts-workflow.png b/automation/script/assets/scripts-workflow.png deleted file mode 100644 index 60d0ef715..000000000 Binary files a/automation/script/assets/scripts-workflow.png and /dev/null differ diff --git a/automation/script/docker.py b/automation/script/docker.py index 9852937c1..bebee59e1 100644 --- a/automation/script/docker.py +++ b/automation/script/docker.py @@ -15,6 +15,8 @@ def dockerfile(self_module, input_params): if prune_result['return'] > 0: return prune_result + logger = self_module.logger + run_command_arc = prune_result['new_input'] current_directory = os.getcwd() is_quiet_mode = input_params.get('quiet', False) @@ -99,7 +101,7 @@ def dockerfile(self_module, input_params): if not docker_settings.get('run', True) and not input_params.get( 'docker_run_override', False): - logging.info("Docker 'run' is set to False in meta.json") + logger.info("Docker 'run' is set to False in meta.json") continue # Handle build dependencies @@ -186,7 +188,7 @@ def dockerfile(self_module, input_params): if dockerfile_result['return'] > 0: return dockerfile_result - logging.info(f"Dockerfile generated at {dockerfile_path}") + logger.info(f"Dockerfile generated at {dockerfile_path}") return {'return': 0} @@ -209,7 +211,7 @@ def docker_run(self_module, i): quiet = i.get('quiet', False) verbose = i.get('v', False) show_time = i.get('show_time', False) - + logger = self_module.logger env = i.get('env', {}) regenerate_docker_file = not i.get('docker_noregenerate', False) @@ -325,7 +327,7 @@ def docker_run(self_module, i): # Skip scripts marked as non-runnable if not docker_settings.get('run', True) and not i.get( 'docker_run_override', False): - logging.info("docker.run set to False in meta.yaml") + logger.info("docker.run set to False in meta.yaml") continue r = self_module._update_env_from_input(env, i) diff --git a/automation/script/docker_utils.py b/automation/script/docker_utils.py index 6379c515f..75c1f68a4 100644 --- a/automation/script/docker_utils.py +++ b/automation/script/docker_utils.py @@ -1,7 +1,6 @@ import os from mlc import utils from utils import * -import logging from pathlib import PureWindowsPath, PurePosixPath from script.docker_utils import * import copy @@ -60,7 +59,7 @@ def process_mounts(mounts, env, docker_settings, f_run_cmd): for placeholder in container_placeholders: if placeholder in env: new_container_mount, container_env_key = get_container_path( - env[placeholder]) + env[placeholder], docker_settings.get('user', 'mlcuser')) else: # Skip mount if variable is missing mounts[index] = None break @@ -77,7 +76,6 @@ def process_mounts(mounts, env, docker_settings, f_run_cmd): container_env_string += f" --env.{host_env_key}={container_env_key} " for key, value in docker_input_mapping.items(): if value == host_env_key: - i[key] = container_env_key f_run_cmd[key] = container_env_key # Remove invalid mounts and construct mount string @@ -103,7 +101,7 @@ def prepare_docker_inputs(input_params, docker_settings, keys = [ "mlc_repo", "mlc_repo_branch", "base_image", "os", "os_version", - "mlc_repos", "skip_mlc_sys_upgrade", "extra_sys_deps", + "mlc_repos", "skip_mlc_sys_upgrade", "extra_sys_deps", "image_name", "gh_token", "fake_run_deps", "run_final_cmds", "real_run", "copy_files", "path", "user" ] @@ -111,7 +109,7 @@ def prepare_docker_inputs(input_params, docker_settings, keys += [ "skip_run_cmd", "pre_run_cmds", "run_cmd_prefix", "all_gpus", "num_gpus", "device", "gh_token", "port_maps", "shm_size", "pass_user_id", "pass_user_group", "extra_run_args", "detached", "interactive", - "dt", "it", "use_host_group_id", "use_host_user_id" + "dt", "it", "use_host_group_id", "use_host_user_id", "keep_detached", "reuse_existing" ] # Collect Dockerfile inputs docker_inputs = { @@ -378,6 +376,8 @@ def get_docker_default(key): "port_maps": [], "use_host_user_id": True, "use_host_group_id": True, + "keep_detached": False, + "reuse_existing": True } if key in defaults: return defaults[key] @@ -399,19 +399,25 @@ def get_host_path(value): def get_container_path_script(i): + import getpass + cur_user = getpass.getuser() + if not cur_user or cur_user == '': + cur_user = os.environ.get('USER', 'mlcuser') + tmp_dep_cached_path = i['tmp_dep_cached_path'] - value_mnt, value_env = get_container_path(tmp_dep_cached_path) + value_mnt, value_env = get_container_path( + tmp_dep_cached_path, cur_user) return {'return': 0, 'value_mnt': value_mnt, 'value_env': value_env} -def get_container_path(value): +def get_container_path(value, username="mlcuser"): path_split = value.split(os.sep) if len(path_split) == 1: return value new_value = '' if "cache" in path_split and "local" in path_split: - new_path_split = ["", "home", "mlcuser", "MLC", "repos"] + new_path_split = ["", "home", username, "MLC", "repos"] repo_entry_index = path_split.index("local") if len(path_split) >= repo_entry_index + 3: new_path_split1 = new_path_split + \ diff --git a/automation/script/module.py b/automation/script/module.py index 868178f49..8e7556eb4 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -12,6 +12,7 @@ import logging from mlc.main import Automation +from mlc.main import CacheAction import mlc.utils as utils from utils import * @@ -26,7 +27,6 @@ class ScriptAutomation(Automation): ############################################################ def __init__(self, action_object, automation_file): super().__init__(action_object, "script", automation_file) - logging.basicConfig(level=logging.INFO) self.os_info = {} self.run_state = {} self.run_state['deps'] = [] @@ -35,7 +35,13 @@ def __init__(self, action_object, automation_file): self.run_state['version_info'] = [] self.run_state['cache'] = False self.file_with_cached_state = 'mlc-cached-state.json' + # self.logger = logging.getLogger() + # logging.basicConfig(level=logging.INFO) + self.logger = self.action_object.logger + self.logger.propagate = False + # Create CacheAction using the same parent as the Script + self.cache_action = CacheAction(self.action_object.parent) self.tmp_file_env = 'tmp-env' self.tmp_file_env_all = 'tmp-env-all' self.tmp_file_run = 'tmp-run' @@ -188,7 +194,7 @@ def run(self, i): (print_readme) (bool): if True, will print README with all MLC steps (deps) to run a given script - (script_call_prefix) (str): how to call script in logs and READMEs (mlc run script) + (script_call_prefix) (str): how to call script in logs and READMEs (mlcr) (skip_sys_utils) (bool): if True, set env['MLC_SKIP_SYS_UTILS']='yes' to skip MLC sys installation @@ -233,6 +239,8 @@ def _run(self, i): repro = i.get('repro', False) repro_prefix = '' + logger = self.logger + if repro: repro_prefix = i.get('repro_prefix', '') if repro_prefix == '': @@ -257,19 +265,6 @@ def _run(self, i): return { 'return': 1, 'error': 'Current directory "{}" is not writable - please change it'.format(os.getcwd())} - ''' - # Check if has default config - r = self.action_object.access({'action': 'load', - 'automation': 'cfg,88dce9c160324c5d', - 'artifact': 'default'}) - if r['return'] == 0: - config = r['config'] - - script_input = config.get('script', {}) - - if len(script_input) > 0: - utils.merge_dicts({'dict1': i, 'dict2': script_input}) - ''' recursion_int = int(i.get('recursion_int', 0)) + 1 start_time = time.time() @@ -359,7 +354,6 @@ def _run(self, i): skip_cache = i.get('skip_cache', False) force_cache = i.get('force_cache', False) - fake_run = i.get('fake_run', False) fake_run = i.get( 'fake_run', False) if 'fake_run' in i else i.get( @@ -417,7 +411,7 @@ def _run(self, i): if verbose: env['MLC_VERBOSE'] = 'yes' run_state['tmp_verbose'] = True - logging.getLogger().setLevel(logging.DEBUG) + logger.setLevel(logging.DEBUG) print_deps = i.get('print_deps', False) print_versions = i.get('print_versions', False) @@ -469,8 +463,6 @@ def _run(self, i): # manage OS environment if len(self.os_info) == 0: r = get_host_os_info() - # r = self.access({'action': 'get_host_os_info', - # 'automation': 'utils,dc2743f8450541e3'}) if r['return'] > 0: return r @@ -552,15 +544,15 @@ def _run(self, i): mlc_script_info = i.get('script_call_prefix', '').strip() if mlc_script_info == '': - mlc_script_info = 'mlc run script' + mlc_script_info = 'mlcr ' if not mlc_script_info.endswith(' '): mlc_script_info += ' ' - x = '--tags=' + x = '' y = ',' if parsed_script_alias != '': mlc_script_info += parsed_script_alias - x = '--tags="' + x = '"' if len(script_tags) > 0 or len(variation_tags) > 0: mlc_script_info += x @@ -576,10 +568,10 @@ def _run(self, i): mlc_script_info += y.join(x_variation_tags) # if verbose: -# logging.info('') +# logger.info('') if not run_state.get('tmp_silent', False): - logging.info(recursion_spaces + '* ' + mlc_script_info) + logger.info(recursion_spaces + '* ' + mlc_script_info) ####################################################################### # Report if scripts were not found or there is an ambiguity with UIDs @@ -611,8 +603,8 @@ def _run(self, i): # Sort scripts for better determinism list_of_found_scripts = sorted(list_of_found_scripts, key=lambda a: (a.meta.get('sort', 0), a.path)) - logging.debug(recursion_spaces + - ' - Number of scripts found: {}'.format(len(list_of_found_scripts))) + logger.debug(recursion_spaces + + ' - Number of scripts found: {}'.format(len(list_of_found_scripts))) # Check if script selection is remembered if not skip_remembered_selections and len(list_of_found_scripts) > 1: @@ -621,7 +613,7 @@ def _run(self, i): selection['tags'].split(',')) == set(script_tags_string.split(',')): # Leave 1 entry in the found list list_of_found_scripts = [selection['cached_script']] - logging.debug( + logger.debug( recursion_spaces + ' - Found remembered selection with tags: {}'.format(script_tags_string)) break @@ -673,20 +665,20 @@ def _run(self, i): cache_tags_without_tmp_string = cache_tags_without_tmp_string.replace( ",_-", ",-_") - logging.debug( + logger.debug( recursion_spaces + ' - Searching for cached script outputs with the following tags: {}'.format(cache_tags_without_tmp_string)) - search_cache = {'action': 'find', - 'automation': self.meta['deps']['cache'], + search_cache = {'action': 'search', + 'target_name': 'cache', 'tags': cache_tags_without_tmp_string} - rc = self.action_object.access(search_cache) + rc = self.cache_action.access(search_cache) if rc['return'] > 0: return rc cache_list = rc['list'] - logging.debug( + logger.debug( recursion_spaces + ' - Number of cached script outputs found: {}'.format( len(cache_list))) @@ -708,21 +700,21 @@ def _run(self, i): for cache_entry in cache_list: # Find associated script and add to the # list_of_found_scripts - associated_script_artifact = cache_entry.meta['associated_script_artifact'] + associated_script_item = cache_entry.meta['associated_script_item'] - x = associated_script_artifact.find(',') + x = associated_script_item.find(',') if x < 0: return {'return': 1, 'error': 'MLC artifact format is wrong "{}" - no comma found'.format( - associated_script_artifact)} + associated_script_item)} - associated_script_artifact_uid = associated_script_artifact[x + 1:] + associated_script_item_uid = associated_script_item[x + 1:] - cache_entry.meta['associated_script_artifact_uid'] = associated_script_artifact_uid + cache_entry.meta['associated_script_item_uid'] = associated_script_item_uid for script in list_of_found_scripts: script_uid = script.meta['uid'] - if associated_script_artifact_uid == script_uid: + if associated_script_item_uid == script_uid: if script not in new_list_of_found_scripts: new_list_of_found_scripts.append(script) @@ -733,14 +725,15 @@ def _run(self, i): # Select scripts if len(list_of_found_scripts) > 1: - select_script = select_script_artifact( + select_script = select_script_item( list_of_found_scripts, 'script', recursion_spaces, False, script_tags_string, quiet, - verbose) + verbose, + logger) # Remember selection if not skip_remembered_selections: @@ -752,23 +745,23 @@ def _run(self, i): # Prune cache list with the selected script if len(list_of_found_scripts) > 0: - script_artifact_uid = list_of_found_scripts[select_script].meta['uid'] + script_item_uid = list_of_found_scripts[select_script].meta['uid'] new_cache_list = [] for cache_entry in cache_list: - if cache_entry.meta['associated_script_artifact_uid'] == script_artifact_uid: + if cache_entry.meta['associated_script_item_uid'] == script_item_uid: new_cache_list.append(cache_entry) cache_list = new_cache_list # Here a specific script is found and meta obtained # Set some useful local variables - script_artifact = list_of_found_scripts[select_script] + script_item = list_of_found_scripts[select_script] # print(list_of_found_scripts) - meta = script_artifact.meta + meta = script_item.meta # print(meta) - path = script_artifact.path + path = script_item.path # Check min MLC version requirement min_mlc_version = meta.get('min_mlc_version', '').strip() @@ -785,12 +778,12 @@ def _run(self, i): error = format(e) # Check path to repo - script_repo_path = script_artifact.repo.path + script_repo_path = script_item.repo.path - script_repo_path_with_prefix = script_artifact.repo.path - if script_artifact.repo.meta.get('prefix', '') != '': + script_repo_path_with_prefix = script_item.repo.path + if script_item.repo.meta.get('prefix', '') != '': script_repo_path_with_prefix = os.path.join( - script_repo_path, script_artifact.repo.meta['prefix']) + script_repo_path, script_item.repo.meta['prefix']) env['MLC_TMP_CURRENT_SCRIPT_REPO_PATH'] = script_repo_path env['MLC_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX'] = script_repo_path_with_prefix @@ -803,18 +796,18 @@ def _run(self, i): run_state['script_id'] = meta['alias'] + "," + meta['uid'] run_state['script_tags'] = script_tags run_state['script_variation_tags'] = variation_tags - run_state['script_repo_alias'] = script_artifact.repo.meta.get( + run_state['script_repo_alias'] = script_item.repo.meta.get( 'alias', '') - run_state['script_repo_git'] = script_artifact.repo.meta.get( + run_state['script_repo_git'] = script_item.repo.meta.get( 'git', False) run_state['cache'] = meta.get('cache', False) if not recursion: run_state['script_entry_repo_to_report_errors'] = meta.get( 'repo_to_report_errors', '') - run_state['script_entry_repo_alias'] = script_artifact.repo.meta.get( + run_state['script_entry_repo_alias'] = script_item.repo.meta.get( 'alias', '') - run_state['script_entry_repo_git'] = script_artifact.repo.meta.get( + run_state['script_entry_repo_git'] = script_item.repo.meta.get( 'git', False) deps = meta.get('deps', []) @@ -826,7 +819,7 @@ def _run(self, i): new_env_keys_from_meta = meta.get('new_env_keys', []) new_state_keys_from_meta = meta.get('new_state_keys', []) - found_script_artifact = utils.assemble_object( + found_script_item = utils.assemble_object( meta['alias'], meta['uid']) found_script_tags = meta.get('tags', []) @@ -834,10 +827,10 @@ def _run(self, i): if i.get('debug_script', False): debug_script_tags = ','.join(found_script_tags) - logging.debug(recursion_spaces + - ' - Found script::{} in {}'.format(found_script_artifact, path)) + logger.debug(recursion_spaces + + ' - Found script::{} in {}'.format(found_script_item, path)) - # STEP 500 output: script_artifact - unique selected script artifact + # STEP 500 output: script_item - unique selected script artifact # (cache_list) pruned for the unique script if cache is used # meta - script meta # path - script path @@ -848,21 +841,21 @@ def _run(self, i): # STEP 600: Continue updating env # Add default env from meta to new env if not empty # (env NO OVERWRITE) - script_artifact_default_env = meta.get('default_env', {}) - for key in script_artifact_default_env: - env.setdefault(key, script_artifact_default_env[key]) + script_item_default_env = meta.get('default_env', {}) + for key in script_item_default_env: + env.setdefault(key, script_item_default_env[key]) # Force env from meta['env'] as a CONST # (env OVERWRITE) - script_artifact_env = meta.get('env', {}) - # print(f"script meta env= {script_artifact_env}") + script_item_env = meta.get('env', {}) + # print(f"script meta env= {script_item_env}") - env.update(script_artifact_env) + env.update(script_item_env) # print(f"env = {env}") - script_artifact_state = meta.get('state', {}) + script_item_state = meta.get('state', {}) utils.merge_dicts({'dict1': state, - 'dict2': script_artifact_state, + 'dict2': script_item_state, 'append_lists': True, 'append_unique': True}) @@ -901,7 +894,7 @@ def _run(self, i): # VARIATIONS OVERWRITE current ENV but not input keys (they become # const) - variations = script_artifact.meta.get('variations', {}) + variations = script_item.meta.get('variations', {}) state['docker'] = meta.get('docker', {}) r = self._update_state_from_variations( @@ -988,7 +981,7 @@ def _run(self, i): # del(env[key]) if len(notes) > 0: - logging.debug( + logger.debug( recursion_spaces + ' - Requested version: ' + ' '.join(notes)) @@ -998,7 +991,7 @@ def _run(self, i): # STEP 1000: Update version only if in "versions" (not obligatory) # can be useful when handling complex Git revisions - versions = script_artifact.meta.get('versions', {}) + versions = script_item.meta.get('versions', {}) if version != '' and version in versions: versions_meta = versions[version] @@ -1046,9 +1039,9 @@ def _run(self, i): if state.get('docker'): if str(state['docker'].get('run', True) ).lower() in ['false', '0', 'no']: - logging.info( + logger.info( recursion_spaces + - ' - Skipping script::{} run as we are inside docker'.format(found_script_artifact)) + ' - Skipping script::{} run as we are inside docker'.format(found_script_item)) # restore env and state for k in list(env.keys()): @@ -1069,9 +1062,9 @@ def _run(self, i): return rr elif str(state['docker'].get('real_run', True)).lower() in ['false', '0', 'no']: - logging.info( + logger.info( recursion_spaces + - ' - Doing fake run for script::{} as we are inside docker'.format(found_script_artifact)) + ' - Doing fake run for script::{} as we are inside docker'.format(found_script_item)) fake_run = True env['MLC_TMP_FAKE_RUN'] = 'yes' @@ -1146,8 +1139,8 @@ def _run(self, i): customize_common_input = { 'input': i, 'automation': self, - 'artifact': script_artifact, - 'customize': script_artifact.meta.get('customize', {}), + 'artifact': script_item, + 'customize': script_item.meta.get('customize', {}), 'os_info': os_info, 'recursion_spaces': recursion_spaces, 'script_tags': script_tags, @@ -1216,14 +1209,15 @@ def _run(self, i): num_found_cached_scripts = 1 if num_found_cached_scripts > 1: - selection = select_script_artifact( + selection = select_script_item( found_cached_scripts, 'cached script output', recursion_spaces, True, script_tags_string, quiet, - verbose) + verbose, + logger) if selection >= 0: if not skip_remembered_selections: @@ -1235,7 +1229,7 @@ def _run(self, i): num_found_cached_scripts = 0 elif num_found_cached_scripts == 1: - logging.debug( + logger.debug( recursion_spaces + ' - Found cached script output: {}'.format( found_cached_scripts[0].path)) @@ -1245,7 +1239,7 @@ def _run(self, i): # Check chain of dynamic dependencies on other MLC scripts if len(deps) > 0: - logging.debug( + logger.debug( recursion_spaces + ' - Checking dynamic dependencies on other MLC scripts:') @@ -1255,7 +1249,7 @@ def _run(self, i): if r['return'] > 0: return r - logging.debug( + logger.debug( recursion_spaces + ' - Processing env after dependencies ...') @@ -1265,7 +1259,7 @@ def _run(self, i): # Check chain of prehook dependencies on other MLC scripts. # (No execution of customize.py for cached scripts) - logging.debug( + logger.debug( recursion_spaces + ' - Checking prehook dependencies on other MLC scripts:') @@ -1278,7 +1272,7 @@ def _run(self, i): # Continue with the selected cached script cached_script = found_cached_scripts[selection] - logging.debug( + logger.debug( recursion_spaces + ' - Loading state from cached entry ...') @@ -1291,7 +1285,7 @@ def _run(self, i): version = r['meta'].get('version') if not run_state.get('tmp_silent', False): - logging.info( + logger.info( recursion_spaces + ' ! load {}'.format(path_to_cached_state_file)) @@ -1326,7 +1320,7 @@ def _run(self, i): if not fake_run: # Check chain of posthook dependencies on other MLC scripts. We consider them same as postdeps when # script is in cache - logging.debug( + logger.debug( recursion_spaces + ' - Checking posthook dependencies on other MLC scripts:') @@ -1339,7 +1333,7 @@ def _run(self, i): if r['return'] > 0: return r - logging.debug( + logger.debug( recursion_spaces + ' - Checking post dependencies on other MLC scripts:') @@ -1353,7 +1347,7 @@ def _run(self, i): if renew or (not found_cached and num_found_cached_scripts == 0): # Add more tags to cached tags # based on meta information of the found script - x = 'script-artifact-' + meta['uid'] + x = 'script-item-' + meta['uid'] if x not in cached_tags: cached_tags.append(x) @@ -1386,24 +1380,23 @@ def _run(self, i): tmp_tags.append(x) # Use update to update the tmp one if already exists - logging.debug( + logger.debug( recursion_spaces + ' - Creating new "cache" script artifact in the MLC local repository ...') - logging.debug(recursion_spaces + - ' - Tags: {}'.format(','.join(tmp_tags))) - + logger.debug(recursion_spaces + + ' - Tags: {}'.format(','.join(tmp_tags))) if version != '': cached_meta['version'] = version ii = {'action': 'update', - 'automation': self.meta['deps']['cache'], + 'target': 'cache', 'search_tags': tmp_tags, 'script_alias': meta['alias'], 'tags': ','.join(tmp_tags), 'meta': cached_meta, 'force': True} - r = self.action_object.access(ii) + r = self.cache_action.access(ii) if r['return'] > 0: return r @@ -1418,7 +1411,7 @@ def _run(self, i): # Changing path to MLC script artifact for cached output # to record data and files there - logging.debug( + logger.debug( recursion_spaces + ' - Changing to {}'.format(cached_path)) @@ -1433,7 +1426,7 @@ def _run(self, i): # Changing path to MLC script artifact for cached output # to record data and files there - logging.debug( + logger.debug( recursion_spaces + ' - Changing to {}'.format(cached_path)) @@ -1460,12 +1453,12 @@ def _run(self, i): ################################ if not found_cached: if len(warnings) > 0: - logging.warn( + logger.warn( '=================================================') - logging.warn('WARNINGS:') + logger.warn('WARNINGS:') for w in warnings: - logging.warn(' ' + w) - logging.warn( + logger.warn(' ' + w) + logger.warn( '=================================================') # Update default version meta if version is not set @@ -1495,7 +1488,7 @@ def _run(self, i): else: version = version_max - logging.debug( + logger.debug( recursion_spaces + ' - Version is not specified - use either default_version from meta or min/max/usable: {}'.format(version)) @@ -1503,8 +1496,9 @@ def _run(self, i): if r['return'] > 0: return r - if 'version-' + version not in cached_tags: - cached_tags.append('version-' + version) + r = get_version_tag_from_version(version, cached_tags) + if r['return'] > 0: + return r if default_version in versions: versions_meta = versions[default_version] @@ -1547,7 +1541,7 @@ def _run(self, i): if len(docker_deps) > 0: - logging.debug( + logger.debug( recursion_spaces + ' - Checking docker run dependencies on other MLC scripts:') @@ -1557,7 +1551,7 @@ def _run(self, i): if r['return'] > 0: return r - logging.debug( + logger.debug( recursion_spaces + ' - Processing env after docker run dependencies ...') @@ -1608,8 +1602,8 @@ def _run(self, i): customize_common_input = { 'input': i, 'automation': self, - 'artifact': script_artifact, - 'customize': script_artifact.meta.get('customize', {}), + 'artifact': script_item, + 'customize': script_item.meta.get('customize', {}), 'os_info': os_info, 'recursion_spaces': recursion_spaces, 'script_tags': script_tags, @@ -1624,7 +1618,7 @@ def _run(self, i): run_script_input['ignore_script_error'] = True if 'predeps' in dir(customize_code) and not fake_run: - logging.debug( + logger.debug( recursion_spaces + ' - Running preprocess ...') @@ -1645,8 +1639,8 @@ def _run(self, i): # print(f"before deps: ") # utils.print_env(env) if len(deps) > 0: - logging.debug(recursion_spaces + - ' - Checking dependencies on other MLC scripts:') + logger.debug(recursion_spaces + + ' - Checking dependencies on other MLC scripts:') r = self._call_run_deps(deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, recursion_spaces + extra_recursion_spaces, @@ -1654,8 +1648,8 @@ def _run(self, i): if r['return'] > 0: return r - logging.debug(recursion_spaces + - ' - Processing env after dependencies ...') + logger.debug(recursion_spaces + + ' - Processing env after dependencies ...') r = update_env_with_values(env) if r['return'] > 0: @@ -1710,8 +1704,8 @@ def _run(self, i): customize_common_input = { 'input': i, 'automation': self, - 'artifact': script_artifact, - 'customize': script_artifact.meta.get('customize', {}), + 'artifact': script_item, + 'customize': script_item.meta.get('customize', {}), 'os_info': os_info, 'recursion_spaces': recursion_spaces, 'script_tags': script_tags, @@ -1755,7 +1749,7 @@ def _run(self, i): return r if pip_version_string != '': - logging.debug( + logger.debug( recursion_spaces + ' # potential PIP version string (if needed): ' + pip_version_string) @@ -1776,7 +1770,7 @@ def _run(self, i): # Check if pre-process and detect if 'preprocess' in dir(customize_code) and not fake_run: - logging.debug(recursion_spaces + ' - Running preprocess ...') + logger.debug(recursion_spaces + ' - Running preprocess ...') # print(f"preprocess_env:") # utils.print_env(env) @@ -1797,7 +1791,7 @@ def _run(self, i): skip = r.get('skip', False) if skip: - logging.debug( + logger.debug( recursion_spaces + ' - this script is skipped!') @@ -1808,7 +1802,7 @@ def _run(self, i): if len(another_script) == 0: return {'return': 0, 'skipped': True} - logging.debug( + logger.debug( recursion_spaces + ' - another script is executed instead!') @@ -1836,10 +1830,13 @@ def _run(self, i): # If return version if cache: - if r.get('version', '') != '': + version = r.get('version', '') + if version != '': cached_tags = [ x for x in cached_tags if not x.startswith('version-')] - cached_tags.append('version-' + r['version']) + r = get_version_tag_from_version(version, cached_tags) + if r['return'] > 0: + return r if len(r.get('add_extra_cache_tags', [])) > 0: for t in r['add_extra_cache_tags']: @@ -1848,11 +1845,11 @@ def _run(self, i): if print_env: import json - logging.debug(json.dumps(env, indent=2, sort_keys=True)) + logger.debug(json.dumps(env, indent=2, sort_keys=True)) # Check chain of pre hook dependencies on other MLC scripts if len(prehook_deps) > 0: - logging.debug( + logger.debug( recursion_spaces + ' - Checking prehook dependencies on other MLC scripts:') @@ -1881,9 +1878,14 @@ def _run(self, i): if r.get('version', '') != '': version = r.get('version') if cache: - cached_tags = [ - x for x in cached_tags if not x.startswith('version-')] - cached_tags.append('version-' + r['version']) + version = r.get('version', '') + if version != '': + cached_tags = [ + x for x in cached_tags if not x.startswith('version-')] + r = get_version_tag_from_version( + version, cached_tags) + if r['return'] > 0: + return r if len(r.get('add_extra_cache_tags', [])) > 0 and cache: for t in r['add_extra_cache_tags']: @@ -2015,7 +2017,7 @@ def _run(self, i): return r # Remove tmp tag from the "cached" arifact to finalize caching - logging.debug( + logger.debug( recursion_spaces + ' - Removing tmp tag in the script cached output {} ...'.format(cached_uid)) @@ -2023,15 +2025,15 @@ def _run(self, i): if detected_version != '': cached_meta['version'] = detected_version - if found_script_artifact != '': - cached_meta['associated_script_artifact'] = found_script_artifact + if found_script_item != '': + cached_meta['associated_script_item'] = found_script_item - x = found_script_artifact.find(',') + x = found_script_item.find(',') if x < 0: return { - 'return': 1, 'error': 'MLC artifact format is wrong "{}" - no comma found'.format(found_script_artifact)} + 'return': 1, 'error': 'MLC artifact format is wrong "{}" - no comma found'.format(found_script_item)} - cached_meta['associated_script_artifact_uid'] = found_script_artifact[x + 1:] + cached_meta['associated_script_item_uid'] = found_script_item[x + 1:] # Check if the cached entry is dependent on any path if dependent_cached_path != '': @@ -2042,14 +2044,14 @@ def _run(self, i): cached_meta['dependent_cached_path'] = dependent_cached_path ii = {'action': 'update', - 'automation': self.meta['deps']['cache'], + 'target': 'cache', 'uid': cached_uid, 'meta': cached_meta, 'script_alias': meta['alias'], 'replace_lists': True, # To replace tags 'tags': ','.join(cached_tags)} - r = self.action_object.access(ii) + r = self.cache_action.access(ii) if r['return'] > 0: return r @@ -2124,11 +2126,11 @@ def _run(self, i): # to aggregate all resolved versions and dump them at the end # if requested (for better reproducibility/replicability) - script_uid = script_artifact.meta.get('uid') - script_alias = script_artifact.meta.get('alias') + script_uid = script_item.meta.get('uid') + script_alias = script_item.meta.get('alias') # we should use user-friendly tags here - # script_tags = script_artifact.meta.get('tags') + # script_tags = script_item.meta.get('tags') version_info_tags = ",".join(script_tags) @@ -2159,7 +2161,7 @@ def _run(self, i): elapsed_time = time.time() - start_time if verbose and cached_uid != '': - logging.info( + logger.info( recursion_spaces + ' - cache UID: {}'.format(cached_uid)) @@ -2190,7 +2192,7 @@ def _run(self, i): # Print output as json to console if i.get('json', False) or i.get('j', False): import json - logging.info(json.dumps(rr, indent=2)) + logger.info(json.dumps(rr, indent=2)) # Check if save json to file if repro_prefix != '': @@ -2201,7 +2203,7 @@ def _run(self, i): dump_repro(repro_prefix, rr, run_state) if verbose or show_time: - logging.info( + logger.info( recursion_spaces + ' - running time of script "{}": {:.2f} sec.'.format( ','.join(found_script_tags), @@ -2214,7 +2216,7 @@ def _run(self, i): (start_disk_stats.free - stop_disk_stats.free) / (1024 * 1024)) if used_disk_space_in_mb > 0: - logging.info( + logger.info( recursion_spaces + ' - used disk space: {} MB'.format(used_disk_space_in_mb)) @@ -2229,7 +2231,7 @@ def _run(self, i): v = new_env.get(p, None) - logging.info('{}: {}'.format(t, str(v))) + logger.info('{}: {}'.format(t, str(v))) # Check if print nice versions if print_versions: @@ -2324,7 +2326,7 @@ def _dump_version_info_for_script( pass for f in ['mlc-run-script-versions.json', 'version_info.json']: if not quiet and not silent: - logging.info('Dumping versions to {}'.format(f)) + logger.info('Dumping versions to {}'.format(f)) r = utils.save_json(f, self.run_state.get('version_info', [])) if r['return'] > 0: return r @@ -2335,6 +2337,7 @@ def _dump_version_info_for_script( def _update_state_from_variations(self, i, meta, variation_tags, variations, env, state, const, const_state, deps, post_deps, prehook_deps, posthook_deps, new_env_keys_from_meta, new_state_keys_from_meta, add_deps_recursive, run_state, recursion_spaces, verbose): + logger = self.action_object.logger # Save current explicit variations import copy explicit_variation_tags = copy.deepcopy(variation_tags) @@ -2420,7 +2423,7 @@ def _update_state_from_variations(self, i, meta, variation_tags, variations, env x = '_' + t variation_tags_string += x - logging.debug( + logger.debug( recursion_spaces + ' Prepared variations: {}'.format(variation_tags_string)) @@ -2755,7 +2758,7 @@ def version(self, i): version = self.__version__ if console: - logging.info(version) + self.action_object.logger.info(version) return {'return': 0, 'version': version} @@ -2830,8 +2833,8 @@ def search(self, i): if found_scripts and len(variation_tags) > 0: filtered = [] - for script_artifact in lst: - meta = script_artifact.meta + for script_item in lst: + meta = script_item.meta variations = meta.get('variations', {}) matched = True @@ -2851,7 +2854,7 @@ def search(self, i): if not matched: continue - filtered.append(script_artifact) + filtered.append(script_item) if len(lst) > 0 and not filtered: warning = [""] @@ -2868,8 +2871,8 @@ def search(self, i): # Print filtered paths if console if console: for script in r['list']: - # This should not be logging since the output can be consumed by other external tools and scripts - # logging.info(script.path) + # This should not be logger since the output can be consumed by other external tools and scripts + # logger.info(script.path) print(script.path) # Finalize output @@ -2924,19 +2927,20 @@ def test(self, i): if r['return'] > 0: return r + logger = self.action_object.logger lst = r['list'] - for script_artifact in lst: - path = script_artifact.path - meta = script_artifact.meta - original_meta = script_artifact.original_meta + for script_item in lst: + path = script_item.path + meta = script_item.meta + original_meta = script_item.original_meta alias = meta.get('alias', '') uid = meta.get('uid', '') if console: - logging.info(path) + logger.info(path) test_config = meta.get('tests', '') if test_config: - logging.info(test_config) + logger.info(test_config) variations = meta.get("variations") tags_string = ",".join(meta.get("tags")) test_input_index = i.get('test_input_index') @@ -3035,7 +3039,7 @@ def test(self, i): if i_env: import copy ii['env'] = copy.deepcopy(i_env) - logging.info(ii) + logger.info(ii) r = self.action_object.access(ii) if r['return'] > 0: return r @@ -3160,6 +3164,7 @@ def add(self, i): import shutil console = i.get('out') == 'con' + logger = self.action_object.logger # Try to find script artifact by alias and/or tags # ii = utils.sub_input(i, self.cmind.cfg['artifact_keys']) @@ -3308,7 +3313,7 @@ def add(self, i): new_script_path = r_obj['path'] if console: - logging.info('Created script in {}'.format(new_script_path)) + logger.info('Created script in {}'.format(new_script_path)) # Copy files from template (only if exist) files = [ @@ -3354,7 +3359,7 @@ def add(self, i): f2 = os.path.join(new_script_path, f2) if console: - logging.info(' * Copying {} to {}'.format(f1, f2)) + logger.info(' * Copying {} to {}'.format(f1, f2)) shutil.copyfile(f1, f2) @@ -3682,7 +3687,7 @@ def _run_deps(self, deps, clean_env_keys_deps, env, state, const, const_state, a run_state['script_variation_tags']) + " )" # Run collective script via MLC API: - # Not very efficient but allows logging - can be optimized + # Not very efficient but allows logger - can be optimized # later # print(f"env about to call deps {d}= {env}") @@ -3808,7 +3813,7 @@ def _get_readme(self, cmd_parts, run_state): ```bash """ - cmd = "mlc run script " + cmd = "mlcr " for cmd_part in cmd_parts: x = '"' if ' ' in cmd_part and not cmd_part.startswith('-') else '' @@ -3831,7 +3836,7 @@ def _get_readme(self, cmd_parts, run_state): xversion = ' --version={}\n'.format(version) content += "```bash\n" - content += "mlc run script --tags=" + \ + content += "mlcr " + \ dep_tags + "{}\n".format(xversion) content += "```\n\n" @@ -3843,11 +3848,11 @@ def _print_versions(self, run_state): """ Print versions in the nice format """ - + logger = self.action_object.logger version_info = run_state.get('version_info', []) - logging.info('=========================') - logging.info('Versions of dependencies:') + logger.info('=========================') + logger.info('Versions of dependencies:') for v in version_info: k = list(v.keys())[0] version_info_dict = v[k] @@ -3855,9 +3860,9 @@ def _print_versions(self, run_state): version = version_info_dict.get('version', '') if version != '': - logging.info('* {}: {}'.format(k, version)) + logger.info('* {}: {}'.format(k, version)) - logging.info('=========================') + logger.info('=========================') return {} @@ -3876,11 +3881,12 @@ def _print_deps(self, deps): Prints the MLC run commands for the list of MLC script dependencies """ + logger = self.action_object.logger print_deps_data = [] run_cmds = self._get_deps_run_cmds(deps) for cmd in run_cmds: print_deps_data.append(cmd) - logging.info(cmd) + logger.info(cmd) return print_deps_data @@ -3894,7 +3900,7 @@ def _get_deps_run_cmds(self, deps): run_cmds = [] for dep_tags in deps: - run_cmds.append("mlc run script --tags=" + dep_tags) + run_cmds.append("mlcr " + dep_tags) return run_cmds @@ -3988,7 +3994,7 @@ def find_file_in_paths(self, i): select = i.get('select', False) select_default = i.get('select_default', False) recursion_spaces = i.get('recursion_spaces', '') - + logger = self.action_object.logger hook = i.get('hook', None) verbose = i.get('verbose', False) @@ -4105,14 +4111,14 @@ def find_file_in_paths(self, i): x += ' <= {}'.format(version_max) if x != '': - logging.info( + logger.info( recursion_spaces + ' - Searching for versions: {}'.format(x)) new_recursion_spaces = recursion_spaces + ' ' for path_to_file in found_files: - logging.info(recursion_spaces + ' * ' + path_to_file) + logger.info(recursion_spaces + ' * ' + path_to_file) run_script_input['env'] = env run_script_input['env'][env_path_key] = path_to_file @@ -4132,7 +4138,7 @@ def find_file_in_paths(self, i): if detected_version != '': if detected_version == -1: - logging.info( + logger.info( recursion_spaces + ' SKIPPED due to incompatibility ...') else: ry = check_version_constraints({'detected_version': detected_version, @@ -4147,7 +4153,7 @@ def find_file_in_paths(self, i): found_files_with_good_version.append( path_to_file) else: - logging.info( + logger.info( recursion_spaces + ' SKIPPED due to version constraints ...') found_files = found_files_with_good_version @@ -4158,13 +4164,13 @@ def find_file_in_paths(self, i): selection = 0 else: # Select 1 and proceed - logging.info( + logger.info( recursion_spaces + ' - More than 1 path found:') num = 0 for file in found_files: - logging.info( + logger.info( recursion_spaces + ' {}) {}'.format( num, @@ -4181,7 +4187,7 @@ def find_file_in_paths(self, i): if selection < 0 or selection >= num: selection = 0 - logging.info( + logger.info( recursion_spaces + ' Selected {}: {}'.format( selection, @@ -4219,6 +4225,7 @@ def detect_version_using_script(self, i): import copy detected = False + logger = self.action_object.logger env = i.get('env', {}) @@ -4238,7 +4245,7 @@ def detect_version_using_script(self, i): x += ' <= {}'.format(version_max) if x != '': - logging.info( + logger.info( recursion_spaces + ' - Searching for versions: {}'.format(x)) @@ -4313,11 +4320,11 @@ def find_artifact(self, i): file_name = i['file_name'] os_info = i['os_info'] - + logger = self.action_object.logger env = i['env'] env_path_key = i.get('env_path_key', '') - + logger = self.action_object.logger run_script_input = i.get('run_script_input', {}) extra_paths = i.get('extra_paths', {}) @@ -4362,7 +4369,7 @@ def find_artifact(self, i): if path == '': path_list_tmp = default_path_list else: - logging.info( + logger.info( recursion_spaces + ' # Requested paths: {}'.format(path)) path_list_tmp = path.split(os_info['env_separator']) @@ -4427,7 +4434,7 @@ def find_artifact(self, i): if extra_paths[extra_path] not in env: env[extra_paths[extra_path]] = [] env[extra_paths[extra_path]].append(epath) - logging.info( + logger.info( recursion_spaces + ' # Found artifact in {}'.format(file_path)) @@ -4464,6 +4471,8 @@ def find_file_deep(self, i): paths = i['paths'] file_name = i['file_name'] + logger = self.action_object.logger + restrict_paths = i.get('restrict_paths', []) found_paths = [] @@ -4758,7 +4767,20 @@ def clean_some_tmp_files(self, i): return {'return': 0} +def get_version_tag_from_version(version, cached_tags): + tags_to_add = [] + if version != '': + if 'version-' + version not in cached_tags: + cached_tags.append('version-' + version) + if '-git-' in version: + version_without_git_commit = version.split("-git-")[0] + if 'version-' + version_without_git_commit not in cached_tags: + cached_tags.append('version-' + version_without_git_commit) + return {'return': 0} + ############################################################################## + + def find_cached_script(i): """ Internal automation function: find cached script @@ -4808,10 +4830,11 @@ def find_cached_script(i): verbose = i.get('verbose', False) if not verbose: verbose = i.get('v', False) + logger = self_obj.action_object.logger found_cached_scripts = [] - logging.debug( + logger.debug( recursion_spaces + ' - Checking if script execution is already cached ...') @@ -4844,7 +4867,7 @@ def find_cached_script(i): if x not in explicit_cached_tags: explicit_cached_tags.append(x) - logging.debug( + logger.debug( recursion_spaces + ' - Prepared explicit variations: {}'.format(explicit_variation_tags_string)) @@ -4863,15 +4886,16 @@ def find_cached_script(i): if x not in cached_tags: cached_tags.append(x) - logging.debug( + logger.debug( recursion_spaces + ' - Prepared variations: {}'.format(variation_tags_string)) - # Add version - if version != '': - if 'version-' + version not in cached_tags: - cached_tags.append('version-' + version) - explicit_cached_tags.append('version-' + version) + r = get_version_tag_from_version(version, cached_tags) + if r['return'] > 0: + return r + get_version_tag_from_version(version, explicit_cached_tags) + if r['return'] > 0: + return r # Add extra cache tags (such as "virtual" for python) if len(extra_cache_tags) > 0: @@ -4901,13 +4925,13 @@ def find_cached_script(i): if len(cached_tags) > 0: search_tags += ',' + ','.join(explicit_cached_tags) - logging.debug( + logger.debug( recursion_spaces + ' - Searching for cached script outputs with the following tags: {}'.format(search_tags)) - r = self_obj.action_object.access({'action': 'find', - 'automation': self_obj.meta['deps']['cache'], - 'tags': search_tags}) + r = self_obj.cache_action.access({'action': 'search', + 'target_name': 'cache', + 'tags': search_tags}) if r['return'] > 0: return r @@ -4930,7 +4954,7 @@ def find_cached_script(i): tmp_version_in_cached_script)} else: found_cached_scripts = [selection['cached_script']] - logging.debug( + logger.debug( recursion_spaces + ' - Found remembered selection with tags "{}"!'.format(search_tags)) break @@ -4986,21 +5010,6 @@ def find_cached_script(i): if r['return'] > 0: return r - # Check if pre-process and detect - # if 'preprocess' in dir(customize_code): - - # logging.debug(recursion_spaces + ' - Running preprocess ...') - - # ii = copy.deepcopy(customize_common_input) - # ii['env'] = env - # ii['meta'] = meta - # # may need to detect versions in multiple paths - # ii['run_script_input'] = run_script_input - - # r = customize_code.preprocess(ii) - # if r['return'] > 0: - # return r - ii = { 'run_script_input': run_script_input, 'env': env, @@ -5040,7 +5049,7 @@ def enable_or_skip_script(meta, env): """ if not isinstance(meta, dict): - logging.info( + logger.info( "The meta entry is not a dictionary for skip/enable if_env: %s", meta) @@ -5242,6 +5251,7 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): verbose = i.get('v', False) show_time = i.get('time', False) + logger = i['self'].action_object.logger recursion = i.get('recursion', False) found_script_tags = i.get('found_script_tags', []) @@ -5334,15 +5344,15 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): run_script = tmp_file_run + bat_ext run_script_without_cm = tmp_file_run + '-without-cm' + bat_ext - logging.debug( + logger.debug( recursion_spaces + ' - Running native script "{}" from temporal script "{}" in "{}" ...'.format( path_to_run_script, run_script, cur_dir)) if not run_state.get('tmp_silent', False): - logging.info(recursion_spaces + ' ! cd {}'.format(cur_dir)) - logging.info( + logger.info(recursion_spaces + ' ! cd {}'.format(cur_dir)) + logger.info( recursion_spaces + ' ! call {} from {}'.format( path_to_run_script, @@ -5394,11 +5404,11 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): import shutil shutil.copy(run_script, run_script_without_cm) - logging.info( + logger.info( '================================================================================') - logging.info( + logger.info( 'Debug script to run without MLC was recorded: {}'.format(run_script_without_cm)) - logging.info( + logger.info( '================================================================================') # Run final command @@ -5415,12 +5425,12 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): if os.path.isfile(pr): r = utils.load_txt(file_name=pr) if r['return'] == 0: - logging.info( + logger.info( "========================================================") - logging.info("Print file {}:".format(pr)) - logging.info("") - logging.info(r['string']) - logging.info("") + logger.info("Print file {}:".format(pr)) + logger.info("") + logger.info(r['string']) + logger.info("") # Check where to report errors and failures repo_to_report = run_state.get( @@ -5488,7 +5498,7 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): if postprocess != '' and customize_code is not None and postprocess in dir( customize_code): if not run_state.get('tmp_silent', False): - logging.info( + logger.info( recursion_spaces + ' ! call "{}" from {}'.format( postprocess, @@ -5519,7 +5529,9 @@ def run_detect_version(customize_code, customize_common_input, if customize_code is not None and 'detect_version' in dir(customize_code): import copy - logging.debug(recursion_spaces + ' - Running detect_version ...') + if "self" in customize_common_input: + logger = customize_common_input["self"].action_object.logger + logger.debug(recursion_spaces + ' - Running detect_version ...') # Update env and state with const utils.merge_dicts({'dict1': env, 'dict2': const, @@ -5547,8 +5559,9 @@ def run_postprocess(customize_code, customize_common_input, recursion_spaces, if customize_code is not None and 'postprocess' in dir(customize_code): import copy - - logging.debug(recursion_spaces + ' - Running postprocess ...') + if run_script_input: + logger = run_script_input['self'].action_object.logger + logger.debug(recursion_spaces + ' - Running postprocess ...') # Update env and state with const utils.merge_dicts({'dict1': env, 'dict2': const, @@ -5705,8 +5718,8 @@ def clean_tmp_files(clean_files, recursion_spaces): Internal: clean tmp files """ -# logging.info('') -# logging.info(recursion_spaces+' - cleaning files {} ...'.format(clean_files)) +# logger.info('') +# logger.info(recursion_spaces+' - cleaning files {} ...'.format(clean_files)) for tmp_file in clean_files: if os.path.isfile(tmp_file): @@ -6153,8 +6166,8 @@ def detect_state_diff(env, saved_env, new_env_keys, ############################################################################## -def select_script_artifact(lst, text, recursion_spaces, - can_skip, script_tags_string, quiet, verbose): +def select_script_item(lst, text, recursion_spaces, + can_skip, script_tags_string, quiet, verbose, logger=None): """ Internal: select script """ @@ -6162,15 +6175,18 @@ def select_script_artifact(lst, text, recursion_spaces, string1 = recursion_spaces + \ ' - More than 1 {} found for "{}":'.format(text, script_tags_string) + if not logger: + return {'return': 1, 'error': 'No logger provided'} + # If quiet, select 0 (can be sorted for determinism) if quiet: - logging.debug(string1) - logging.debug('Selected default due to "quiet" mode') + logger.debug(string1) + logger.debug('Selected default due to "quiet" mode') return 0 # Select 1 and proceed - logging.info(string1) + logger.info(string1) num = 0 for a in lst: @@ -6189,7 +6205,7 @@ def select_script_artifact(lst, text, recursion_spaces, if version != '': x += ' (Version {})'.format(version) - logging.info(x) + logger.info(x) num += 1 s = 'Make your selection or press Enter for 0' @@ -6207,11 +6223,11 @@ def select_script_artifact(lst, text, recursion_spaces, selection = 0 if selection < 0: - logging.info(recursion_spaces + ' Skipped') + logger.info(recursion_spaces + ' Skipped') else: if selection >= num: selection = 0 - logging.info( + logger.info( recursion_spaces + ' Selected {}: {}'.format( selection, @@ -6445,4 +6461,4 @@ def dump_repro(repro_prefix, rr, run_state): r = auto.test({'x': 'y'}) - logging.info(r) + auto.action_object.logger.info(r) diff --git a/automation/script/module_help.py b/automation/script/module_help.py deleted file mode 100644 index 820378180..000000000 --- a/automation/script/module_help.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -from cmind import utils - -# Pring help about script - - -def print_help(i): - - meta = i.get('meta', '') - path = i.get('path', '') - - if len(meta) == 0 and path == '': - return {'return': 0} - - print('') - print( - 'Help for this CM script ({},{}):'.format( - meta.get( - 'alias', ''), meta.get( - 'uid', ''))) - - print('') - print('Path to this automation recipe: {}'.format(path)) - - variations = meta.get('variations', {}) - if len(variations) > 0: - print('') - print('Available variations:') - print('') - for v in sorted(variations): - print(' _' + v) - - input_mapping = meta.get('input_mapping', {}) - if len(input_mapping) > 0: - print('') - print('Available flags mapped to environment variables:') - print('') - for k in sorted(input_mapping): - v = input_mapping[k] - - print(' --{} -> --env.{}'.format(k, v)) - - input_description = meta.get('input_description', {}) - if len(input_description) > 0: - # Check if has important ones (sort) - sorted_keys = [] - all_keys = sorted(list(input_description.keys())) - - for k in sorted( - all_keys, key=lambda x: input_description[x].get('sort', 0)): - v = input_description[k] - if v.get('sort', 0) > 0: - sorted_keys.append(k) - - print('') - print('Available flags (Python API dict keys):') - print('') - for k in all_keys: - v = input_description[k] - n = v.get('desc', '') - - x = ' --' + k - if n != '': - x += ' ({})'.format(n) - - print(x) - - if len(sorted_keys) > 0: - print('') - print('Main flags:') - print('') - for k in sorted_keys: - v = input_description[k] - n = v.get('desc', '') - - x = ' --' + k - - d = None - if 'default' in v: - d = v.get('default', '') - - if d is not None: - x += '=' + d - - c = v.get('choices', []) - if len(c) > 0: - x += ' {' + ','.join(c) + '}' - - if n != '': - x += ' ({})'.format(n) - - print(x) - - print('') - x = input('Would you like to see a Python API with a list of common keys/flags for all scripts including this one (y/N)? ') - - x = x.strip().lower() - - skip_delayed_help = False if x in ['y', 'yes'] else True - - r = {'return': 0} - - if skip_delayed_help: - r['skip_delayed_help'] = True - - return r diff --git a/automation/script/template-ae-python/README-extra.md b/automation/script/template-ae-python/README-extra.md deleted file mode 100644 index 05e53dc1a..000000000 --- a/automation/script/template-ae-python/README-extra.md +++ /dev/null @@ -1,2 +0,0 @@ -# CM script to run and reproduce experiments - diff --git a/automation/script/template-ae-python/_cm.yaml b/automation/script/template-ae-python/_cm.yaml deleted file mode 100644 index 261e4cf75..000000000 --- a/automation/script/template-ae-python/_cm.yaml +++ /dev/null @@ -1,38 +0,0 @@ -cache: false - -deps: - # Detect host OS features - - tags: detect,os - - # Detect/install python - - tags: get,python - names: - - python - - python3 - -script_name: run - -input_mapping: - experiment: MLC_EXPERIMENT - -default_env: - MLC_EXPERIMENT: '1' - -variations: - install_deps: - script_name: install_deps - - run: - script_name: run - - reproduce: - script_name: reproduce - - plot: - script_name: plot - - analyze: - script_name: analyze - - validate: - script_name: validate diff --git a/automation/script/template-ae-python/analyze.bat b/automation/script/template-ae-python/analyze.bat deleted file mode 100644 index 375cfaebf..000000000 --- a/automation/script/template-ae-python/analyze.bat +++ /dev/null @@ -1,12 +0,0 @@ -@echo off - -set CUR_DIR=%cd% - -echo. -echo Current execution path: %CUR_DIR% -echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% -echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% - -rem echo. -rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py -rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/analyze.sh b/automation/script/template-ae-python/analyze.sh deleted file mode 100644 index 53c10c73c..000000000 --- a/automation/script/template-ae-python/analyze.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -CUR_DIR=${PWD} - -echo "" -echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" - -#echo "" -#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py -#test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/install_deps.bat b/automation/script/template-ae-python/install_deps.bat deleted file mode 100644 index 3419d9511..000000000 --- a/automation/script/template-ae-python/install_deps.bat +++ /dev/null @@ -1,18 +0,0 @@ -@echo off - -set CUR_DIR=%cd% - -echo. -echo Current execution path: %CUR_DIR% -echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% -echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% - -if exist "%MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( - - echo. - echo Installing requirements.txt ... - echo. - - %MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt - IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% -) diff --git a/automation/script/template-ae-python/install_deps.sh b/automation/script/template-ae-python/install_deps.sh deleted file mode 100644 index 5e8c50a20..000000000 --- a/automation/script/template-ae-python/install_deps.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -CUR_DIR=${PWD} - -echo "" -echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" - -if test -f "${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then - echo "" - echo "Installing requirements.txt ..." - echo "" - - ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt - test $? -eq 0 || exit 1 -fi diff --git a/automation/script/template-ae-python/main.py b/automation/script/template-ae-python/main.py deleted file mode 100644 index 48b974b7f..000000000 --- a/automation/script/template-ae-python/main.py +++ /dev/null @@ -1,10 +0,0 @@ -import os - -if __name__ == "__main__": - - print('') - print('Main script:') - print('Experiment: {}'.format(os.environ.get('MLC_EXPERIMENT', ''))) - print('') - - exit(0) diff --git a/automation/script/template-ae-python/plot.bat b/automation/script/template-ae-python/plot.bat deleted file mode 100644 index 375cfaebf..000000000 --- a/automation/script/template-ae-python/plot.bat +++ /dev/null @@ -1,12 +0,0 @@ -@echo off - -set CUR_DIR=%cd% - -echo. -echo Current execution path: %CUR_DIR% -echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% -echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% - -rem echo. -rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py -rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/plot.sh b/automation/script/template-ae-python/plot.sh deleted file mode 100644 index 53c10c73c..000000000 --- a/automation/script/template-ae-python/plot.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -CUR_DIR=${PWD} - -echo "" -echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" - -#echo "" -#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py -#test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/reproduce.bat b/automation/script/template-ae-python/reproduce.bat deleted file mode 100644 index 375cfaebf..000000000 --- a/automation/script/template-ae-python/reproduce.bat +++ /dev/null @@ -1,12 +0,0 @@ -@echo off - -set CUR_DIR=%cd% - -echo. -echo Current execution path: %CUR_DIR% -echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% -echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% - -rem echo. -rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py -rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/reproduce.sh b/automation/script/template-ae-python/reproduce.sh deleted file mode 100644 index 53c10c73c..000000000 --- a/automation/script/template-ae-python/reproduce.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -CUR_DIR=${PWD} - -echo "" -echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" - -#echo "" -#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py -#test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/run.bat b/automation/script/template-ae-python/run.bat deleted file mode 100644 index f1b69d26d..000000000 --- a/automation/script/template-ae-python/run.bat +++ /dev/null @@ -1,12 +0,0 @@ -@echo off - -set CUR_DIR=%cd% - -echo. -echo Current execution path: %CUR_DIR% -echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% -echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% - -echo. -%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/run.sh b/automation/script/template-ae-python/run.sh deleted file mode 100644 index a4b86e69a..000000000 --- a/automation/script/template-ae-python/run.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -CUR_DIR=${PWD} - -echo "" -echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" - -echo "" -${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py -test $? -eq 0 || exit 1 diff --git a/automation/script/template-ae-python/validate.bat b/automation/script/template-ae-python/validate.bat deleted file mode 100644 index 375cfaebf..000000000 --- a/automation/script/template-ae-python/validate.bat +++ /dev/null @@ -1,12 +0,0 @@ -@echo off - -set CUR_DIR=%cd% - -echo. -echo Current execution path: %CUR_DIR% -echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% -echo ENV MLC_EXPERIMENT: %MLC_EXPERIMENT% - -rem echo. -rem %MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py -rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-ae-python/validate.sh b/automation/script/template-ae-python/validate.sh deleted file mode 100644 index 53c10c73c..000000000 --- a/automation/script/template-ae-python/validate.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -CUR_DIR=${PWD} - -echo "" -echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV MLC_EXPERIMENT: ${MLC_EXPERIMENT}" - -#echo "" -#${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py -#test $? -eq 0 || exit 1 diff --git a/automation/script/template-python/README-extra.md b/automation/script/template-python/README-extra.md deleted file mode 100644 index 582991f6d..000000000 --- a/automation/script/template-python/README-extra.md +++ /dev/null @@ -1 +0,0 @@ -# CM script diff --git a/automation/script/template-python/_cm.yaml b/automation/script/template-python/_cm.yaml deleted file mode 100644 index 11f646860..000000000 --- a/automation/script/template-python/_cm.yaml +++ /dev/null @@ -1,23 +0,0 @@ -cache: false - -deps: - # Detect host OS features - - tags: detect,os - - # Detect/install python - - tags: get,python - names: - - python - - python3 - -input_mapping: - var1: MLC_VAR1 - req: PIP_REQUIREMENTS - -default_env: - MLC_VAR1: 'something' - -variations: - req: - env: - PIP_REQUIREMENTS: True diff --git a/automation/script/template-python/customize.py b/automation/script/template-python/customize.py deleted file mode 100644 index 8961ab5ca..000000000 --- a/automation/script/template-python/customize.py +++ /dev/null @@ -1,32 +0,0 @@ -from cmind import utils -import os - - -def preprocess(i): - - print('') - print('Preprocessing ...') - - os_info = i['os_info'] - - env = i['env'] - - meta = i['meta'] - - automation = i['automation'] - - quiet = (env.get('MLC_QUIET', False) == 'yes') - - print(' ENV MLC_VAR1: {}'.format(env.get('MLC_VAR1', ''))) - - return {'return': 0} - - -def postprocess(i): - - print('') - print('Postprocessing ...') - - env = i['env'] - - return {'return': 0} diff --git a/automation/script/template-python/main.py b/automation/script/template-python/main.py deleted file mode 100644 index 68245e7bd..000000000 --- a/automation/script/template-python/main.py +++ /dev/null @@ -1,10 +0,0 @@ -import os - -if __name__ == "__main__": - - print('') - print('Main script:') - print('ENV MLC_VAR1: {}'.format(os.environ.get('MLC_VAR1', ''))) - print('') - - exit(0) diff --git a/automation/script/template-python/requirements.txt b/automation/script/template-python/requirements.txt deleted file mode 100644 index e69de29bb..000000000 diff --git a/automation/script/template-python/run.bat b/automation/script/template-python/run.bat deleted file mode 100644 index 11e897362..000000000 --- a/automation/script/template-python/run.bat +++ /dev/null @@ -1,25 +0,0 @@ -@echo off - -set CUR_DIR=%cd% - -echo. -echo Current execution path: %CUR_DIR% -echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% -echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS% -echo ENV MLC_VAR1: %MLC_VAR1% - -if "%PIP_REQUIREMENTS%" == "True" ( - if exist "%MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( - - echo. - echo Installing requirements.txt ... - echo. - - %MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt - IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% - ) -) - -echo. -%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-python/run.sh b/automation/script/template-python/run.sh deleted file mode 100644 index a3e2021b9..000000000 --- a/automation/script/template-python/run.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -CUR_DIR=${PWD} - -echo "" -echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}" -echo "ENV MLC_VAR1: ${MLC_VAR1}" - -if [ "${PIP_REQUIREMENTS}" == "True" ]; then - if test -f "${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then - echo "" - echo "Installing requirements.txt ..." - echo "" - - ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt - test $? -eq 0 || exit 1 - fi -fi - -echo "" -${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py -test $? -eq 0 || exit 1 diff --git a/automation/script/template-pytorch/README-extra.md b/automation/script/template-pytorch/README-extra.md deleted file mode 100644 index 582991f6d..000000000 --- a/automation/script/template-pytorch/README-extra.md +++ /dev/null @@ -1 +0,0 @@ -# CM script diff --git a/automation/script/template-pytorch/_cm.yaml b/automation/script/template-pytorch/_cm.yaml deleted file mode 100644 index 22cd7a635..000000000 --- a/automation/script/template-pytorch/_cm.yaml +++ /dev/null @@ -1,42 +0,0 @@ -cache: false - -deps: - # Detect host OS features - - tags: detect,os - - # Detect/install python - - tags: get,python - names: - - python - - python3 - - - tags: get,generic-python-lib,_torch - skip_if_env: - USE_CUDA: - - yes - - - tags: get,generic-python-lib,_torch_cuda - enable_if_env: - USE_CUDA: - - yes - - - tags: get,generic-python-lib,_package.numpy - - -input_mapping: - var1: MLC_VAR1 - req: PIP_REQUIREMENTS - -default_env: - MLC_VAR1: 'something' - -variations: - req: - env: - PIP_REQUIREMENTS: True - - cuda: - env: - USE_CUDA: yes - deps: - - tags: get,cuda diff --git a/automation/script/template-pytorch/customize.py b/automation/script/template-pytorch/customize.py deleted file mode 100644 index 8961ab5ca..000000000 --- a/automation/script/template-pytorch/customize.py +++ /dev/null @@ -1,32 +0,0 @@ -from cmind import utils -import os - - -def preprocess(i): - - print('') - print('Preprocessing ...') - - os_info = i['os_info'] - - env = i['env'] - - meta = i['meta'] - - automation = i['automation'] - - quiet = (env.get('MLC_QUIET', False) == 'yes') - - print(' ENV MLC_VAR1: {}'.format(env.get('MLC_VAR1', ''))) - - return {'return': 0} - - -def postprocess(i): - - print('') - print('Postprocessing ...') - - env = i['env'] - - return {'return': 0} diff --git a/automation/script/template-pytorch/main.py b/automation/script/template-pytorch/main.py deleted file mode 100644 index 3bfcd7572..000000000 --- a/automation/script/template-pytorch/main.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -import torch - -if __name__ == "__main__": - - print('') - print('Main script:') - print('ENV MLC_VAR1: {}'.format(os.environ.get('MLC_VAR1', ''))) - print('ENV USE_CUDA: {}'.format(os.environ.get('USE_CUDA', ''))) - print('') - print('PyTorch version: {}'.format(torch.__version__)) - print('') - - exit(0) diff --git a/automation/script/template-pytorch/requirements.txt b/automation/script/template-pytorch/requirements.txt deleted file mode 100644 index e69de29bb..000000000 diff --git a/automation/script/template-pytorch/run.bat b/automation/script/template-pytorch/run.bat deleted file mode 100644 index 11e897362..000000000 --- a/automation/script/template-pytorch/run.bat +++ /dev/null @@ -1,25 +0,0 @@ -@echo off - -set CUR_DIR=%cd% - -echo. -echo Current execution path: %CUR_DIR% -echo Path to script: %MLC_TMP_CURRENT_SCRIPT_PATH% -echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS% -echo ENV MLC_VAR1: %MLC_VAR1% - -if "%PIP_REQUIREMENTS%" == "True" ( - if exist "%MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( - - echo. - echo Installing requirements.txt ... - echo. - - %MLC_PYTHON_BIN_WITH_PATH% -m pip install -r %MLC_TMP_CURRENT_SCRIPT_PATH%\requirements.txt - IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% - ) -) - -echo. -%MLC_PYTHON_BIN_WITH_PATH% %MLC_TMP_CURRENT_SCRIPT_PATH%\main.py -IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/automation/script/template-pytorch/run.sh b/automation/script/template-pytorch/run.sh deleted file mode 100644 index a3e2021b9..000000000 --- a/automation/script/template-pytorch/run.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -CUR_DIR=${PWD} - -echo "" -echo "Current execution path: ${CUR_DIR}" -echo "Path to script: ${MLC_TMP_CURRENT_SCRIPT_PATH}" -echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}" -echo "ENV MLC_VAR1: ${MLC_VAR1}" - -if [ "${PIP_REQUIREMENTS}" == "True" ]; then - if test -f "${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then - echo "" - echo "Installing requirements.txt ..." - echo "" - - ${MLC_PYTHON_BIN_WITH_PATH} -m pip install -r ${MLC_TMP_CURRENT_SCRIPT_PATH}/requirements.txt - test $? -eq 0 || exit 1 - fi -fi - -echo "" -${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/main.py -test $? -eq 0 || exit 1 diff --git a/automation/script/template/README-extra.md b/automation/script/template/README-extra.md deleted file mode 100644 index 582991f6d..000000000 --- a/automation/script/template/README-extra.md +++ /dev/null @@ -1 +0,0 @@ -# CM script diff --git a/automation/script/template/customize.py b/automation/script/template/customize.py deleted file mode 100644 index bd7c12dd3..000000000 --- a/automation/script/template/customize.py +++ /dev/null @@ -1,24 +0,0 @@ -from cmind import utils -import os - - -def preprocess(i): - - os_info = i['os_info'] - - env = i['env'] - - meta = i['meta'] - - automation = i['automation'] - - quiet = (env.get('MLC_QUIET', False) == 'yes') - - return {'return': 0} - - -def postprocess(i): - - env = i['env'] - - return {'return': 0} diff --git a/automation/script/template_list_of_scripts.md b/automation/script/template_list_of_scripts.md deleted file mode 100644 index 07fb95cb7..000000000 --- a/automation/script/template_list_of_scripts.md +++ /dev/null @@ -1,52 +0,0 @@ -[ [Back to index](README.md) ] - - - -This is an automatically generated list of portable and reusable automation recipes (CM scripts) -with a [human-friendly interface (CM)](https://github.com/mlcommons/ck) -to run a growing number of ad-hoc MLPerf, MLOps, and DevOps scripts -from [MLCommons projects](https://github.com/mlcommons/cm4mlops/tree/main/script) -and [research papers](https://www.youtube.com/watch?v=7zpeIVwICa4) -in a unified way on any operating system with any software and hardware -natively or inside containers. - -Click on any automation recipe below to learn how to run and reuse it -via CM command line, Python API or GUI. - -CM scripts can easily chained together into automation workflows using `deps` and `tags` keys -while automatically updating all environment variables and paths -for a given task and platform [using simple JSON or YAML](https://github.com/mlcommons/ck/blob/master/mlc-mlops/script/app-image-classification-onnx-py/_cm.yaml). - - -*Note that CM is a community project being developed and extended by [MLCommons members and individual contributors](../CONTRIBUTING.md) - - you can find source code of CM scripts maintained by MLCommons [here](../mlc-mlops/script). - Please join [Discord server](https://discord.gg/JjWNWXKxwT) to participate in collaborative developments or provide your feedback.* - - -# License - -[Apache 2.0](LICENSE.md) - - -# Copyright - -2022-2024 [MLCommons](https://mlcommons.org) - - - - - -# List of CM scripts by categories - -{{MLC_TOC_CATEGORIES}} - -{{MLC_TOC2}} - -# List of all sorted CM scripts - -{{MLC_TOC}} - - -{{MLC_MAIN}} diff --git a/automation/script/template-ae-python/customize.py b/automation/script/templates/default/customize.py similarity index 100% rename from automation/script/template-ae-python/customize.py rename to automation/script/templates/default/customize.py diff --git a/automation/script/template/run.bat b/automation/script/templates/default/run.bat similarity index 100% rename from automation/script/template/run.bat rename to automation/script/templates/default/run.bat diff --git a/automation/script/template/run.sh b/automation/script/templates/default/run.sh similarity index 100% rename from automation/script/template/run.sh rename to automation/script/templates/default/run.sh diff --git a/git_commit_hash.txt b/git_commit_hash.txt index 544b69b88..99d2a9a26 100644 --- a/git_commit_hash.txt +++ b/git_commit_hash.txt @@ -1 +1 @@ -04d9d38807dc640eef528b282a2100a95332e395 +3dacfdc14894006f456d3b14d1b174e2e9e6e19f diff --git a/pyproject.toml b/pyproject.toml index b089f0614..aeeba2ae3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "mlc-scripts" -version = "0.0.1" +dynamic = ["version"] description = "Automation scripts for running ML applications using MLC interface" authors = [ { name = "MLCommons", email = "systems@mlcommons.org" } @@ -36,5 +36,8 @@ Issues = "https://github.com/mlcommons/mlperf-automations/issues" packages = [] include-package-data = true +[tool.setuptools.dynamic] +version = {file = "VERSION"} + [tool.setuptools.package-data] "mlcr" = ["README.md", "VERSION", "git_commit_hash.txt"] diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index c621b74cf..b7c0a5efc 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -68,7 +68,7 @@ def preprocess(i): str(env['MLC_MLPERF_LOADGEN_BATCH_SIZE']) if env.get('MLC_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get('MLC_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and ( - env['MLC_MLPERF_LOADGEN_MODE'] == 'accuracy' or 'gptj' in env['MLC_MODEL'] or 'llama2' in env['MLC_MODEL'] or 'mixtral' in env['MLC_MODEL'] or 'llama3' in env['MLC_MODEL']) and env.get('MLC_MLPERF_RUN_STYLE', '') != "valid": + env['MLC_MLPERF_LOADGEN_MODE'] == 'accuracy' or 'gptj' in env['MLC_MODEL'] or 'llama2' in env['MLC_MODEL'] or 'mixtral' in env['MLC_MODEL'] or 'llama3' in env['MLC_MODEL'] or 'pointpainting' in env['MLC_MODEL']) and (env.get('MLC_MLPERF_RUN_STYLE', '') != "valid" or 'pointpainting' in env['MLC_MODEL']): env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \ env['MLC_MLPERF_LOADGEN_QUERY_COUNT'] @@ -524,6 +524,28 @@ def get_run_cmd_reference( cmd = cmd.replace("--count", "--total-sample-count") cmd = cmd.replace("--max-batchsize", "--batch-size") + elif "pointpainting" in env['MLC_MODEL']: + env['RUN_DIR'] = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "automotive", + "3d-object-detection") + + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --dataset waymo" + \ + " --dataset-path " + env['MLC_DATASET_WAYMO_PATH'] + \ + " --lidar-path " + env['MLC_ML_MODEL_POINT_PILLARS_PATH'] + \ + " --segmentor-path " + env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] + \ + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ + " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + \ + " --dtype " + env['MLC_MLPERF_MODEL_PRECISION'].replace("float", "fp") + \ + scenario_extra_options + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + mode_extra_options + + if env.get('MLC_MLPERF_POINTPAINTING_TIME', '') != '': + cmd += f" --time {env['MLC_MLPERF_POINTPAINTING_TIME']}" + + print(cmd) + if env.get('MLC_NETWORK_LOADGEN', '') in ["lon", "sut"]: cmd = cmd + " " + "--network " + env['MLC_NETWORK_LOADGEN'] if env.get('MLC_NETWORK_LOADGEN_SUT_SERVERS', []): diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml index a23acee4d..17907749f 100644 --- a/script/app-mlperf-inference-mlcommons-python/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml @@ -66,6 +66,9 @@ input_mapping: multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY network: MLC_NETWORK_LOADGEN sut_servers: MLC_NETWORK_LOADGEN_SUT_SERVERS + pointpillars_checkpoint_path: MLC_ML_MODEL_POINT_PILLARS_PATH + deeplab_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH + waymo_path: MLC_DATASET_WAYMO_PATH # Duplicate CM environment variables to the ones used in native apps env_key_mappings: @@ -491,7 +494,6 @@ deps: - tags: get,ml-model,llama3 names: - llama3-405b-model - - llama3-402b-model enable_if_env: MLC_MODEL: - llama3_1-405b @@ -502,6 +504,26 @@ deps: MLC_RUN_STATE_DOCKER: - "yes" + ## pointpainting + - tags: get,ml-model,pointpillars + names: + - pointpillars-model + enable_if_env: + MLC_MODEL: + - pointpainting + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + - tags: get,ml-model,resnet50-deeplab + enable_if_env: + MLC_MODEL: + - pointpainting + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - resnet50-deeplab-model + ######################################################################## # Install datasets @@ -641,6 +663,17 @@ deps: MLC_USE_DATASET_FROM_HOST: - "yes" + ## waymo for pointpillats + - tags: get,dataset,waymo + enable_if_env: + MLC_MODEL: + - pointpainting + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - waymo-dataset + ## llama3_1 dataset - tags: get,dataset,mlperf,inference,llama3,_validation names: @@ -1299,6 +1332,45 @@ variations: MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html" MLC_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL: "https://data.dgl.ai/wheels/torch-<<>>/repo.html" + pointpainting: + group: models + env: + MLC_MODEL: pointpainting + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/mlperf-automations/tree/dev/script/get-ml-model-pointpainting,https://github.com/mlcommons/mlperf-automations/tree/dev/script/get-ml-model-resnet50-deeplab" + adr: + pytorch: + version_max: "2.2.2" + torchvision: + version_max: "0.17.2" + deps: + - tags: get,generic-python-lib,_package.shapely + - tags: get,generic-python-lib,_package.numba + - tags: get,generic-python-lib,_package.open3d + - tags: get,generic-python-lib,_package.numpy + version_max: "1.26.4" + names: + - numpy + - tags: get,generic-python-lib,_package.numpy + version_max: "2.0.2" + names: + - numpy-upgrade + - tags: get,generic-python-lib,_package.numpy + version_max: "1.26.4" + names: + - numpy-downgrade + - tags: get,generic-python-lib,_package.tensorboard + - tags: get,generic-python-lib,_package.onnxruntime + - tags: get,generic-python-lib,_package.opencv-python + - tags: get,generic-python-lib,_package.scikit-image + - tags: get,generic-python-lib,_package.scipy + version_max: "1.11.2" + names: + - scipy + - tags: get,generic-python-lib,_package.ninja + - tags: get,generic-sys-util,_ffmpeg + - tags: get,generic-sys-util,_libsm6 + - tags: get,generic-sys-util,_libxext6 + llama3_1-405b: group: models env: diff --git a/script/app-mlperf-inference-nvidia/meta.yaml b/script/app-mlperf-inference-nvidia/meta.yaml index eac133277..472eb9383 100644 --- a/script/app-mlperf-inference-nvidia/meta.yaml +++ b/script/app-mlperf-inference-nvidia/meta.yaml @@ -481,6 +481,8 @@ variations: names: - nvtx - tags: get,generic-python-lib,_package.cuda-python + version_max: 12.6.2 + version_max_usable: 12.6.2 names: - cuda-python - tags: get,generic-python-lib,_package.ninja diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py index fbe70dde4..f5e3619d0 100644 --- a/script/app-mlperf-inference/customize.py +++ b/script/app-mlperf-inference/customize.py @@ -5,7 +5,6 @@ import shutil import subprocess import copy -import mlc import platform import sys import mlperf_utils @@ -60,6 +59,7 @@ def postprocess(i): inp = i['input'] env['CMD'] = '' state = i['state'] + mlc = i['automation'].action_object # if env.get('MLC_MLPERF_USER_CONF', '') == '': # return {'return': 0} @@ -129,6 +129,13 @@ def postprocess(i): accuracy_log_file_option_name = " --mlperf-accuracy-file " datatype_option = " --dtype " + env['MLC_IMAGENET_ACCURACY_DTYPE'] + elif model == "pointpainting": + accuracy_filename = "accuracy-waymo.py" + accuracy_filepath = os.path.join( + env['MLC_MLPERF_INFERENCE_POINTPAINTING_PATH'], accuracy_filename) + accuracy_log_file_option_name = " --mlperf-accuracy-file " + datatype_option = "" + elif model == "retinanet": accuracy_filename = "accuracy-openimages.py" accuracy_filepath = os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", @@ -354,8 +361,14 @@ def postprocess(i): "os_version": platform.platform(), "cpu_version": platform.processor(), "python_version": sys.version, - "mlc_version": mlc.__version__ } + try: + import importlib.metadata + mlc_version = importlib.metadata.version("mlc") + host_info["mlc_version"] = mlc_version + except Exception as e: + error = format(e) + mlc_version = "unknown" x = '' if env.get('MLC_HOST_OS_FLAVOR', '') != '': @@ -405,7 +418,7 @@ def postprocess(i): readme_init = "*Check [CM MLPerf docs](https://docs.mlcommons.org/inference) for more details.*\n\n" readme_body = "## Host platform\n\n* OS version: {}\n* CPU version: {}\n* Python version: {}\n* MLC version: {}\n\n".format(platform.platform(), - platform.processor(), sys.version, mlc.__version__) + platform.processor(), sys.version, mlc_version) x = repo_name if repo_hash != '': @@ -629,7 +642,7 @@ def postprocess(i): 'new_env', os.path.join( output_dir, - "os_info.json")) + "os_info.json"), mlc) dump_script_output( "detect,cpu", env, @@ -637,7 +650,7 @@ def postprocess(i): 'new_env', os.path.join( output_dir, - "cpu_info.json")) + "cpu_info.json"), mlc) env['MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( env['MLC_MLPERF_OUTPUT_DIR'], "pip_freeze.raw") dump_script_output( @@ -647,12 +660,12 @@ def postprocess(i): 'new_state', os.path.join( output_dir, - "pip_freeze.json")) + "pip_freeze.json"), mlc) return {'return': 0} -def dump_script_output(script_tags, env, state, output_key, dump_file): +def dump_script_output(script_tags, env, state, output_key, dump_file, mlc): mlc_input = {'action': 'run', 'automation': 'script', diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index a8381c323..9330c6d49 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -261,6 +261,10 @@ variations: reference,rgat: default_variations: backend: pytorch + + reference,pointpainting: + default_variations: + backend: pytorch reference,sdxl_: default_variations: @@ -823,6 +827,51 @@ variations: - igbh-original - igbh-dataset + pointpainting: + group: + model + add_deps_recursive: + mlperf-inference-implementation: + tags: _pointpainting + env: + MLC_MODEL: + pointpainting + docker: + deps: + - tags: get,dataset,waymo + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - 'yes' + names: + - waymo-dataset + - tags: get,ml-model,pointpillars + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - 'yes' + names: + - pointpillars-model + - tags: get,ml-model,resnet50-deeplab + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - 'yes' + names: + - resnet50-deeplab-model + posthook_deps: + - enable_if_env: + MLC_MLPERF_LOADGEN_MODE: + - accuracy + - all + MLC_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + skip_if_env: + MLC_MLPERF_IMPLEMENTATION: + - nvidia + names: + - mlperf-accuracy-script + - waymo-accuracy-script + tags: run,accuracy,mlperf,_waymo + + llama3_1-405b: group: model @@ -1904,6 +1953,9 @@ docker: - "${{ MLC_DATASET_KITS19_PREPROCESSED_PATH }}:${{ MLC_DATASET_KITS19_PREPROCESSED_PATH }}" - "${{ MLC_DATASET_IGBH_PATH }}:${{ MLC_DATASET_IGBH_PATH }}" - "${{ MLC_ML_MODEL_RGAT_CHECKPOINT_PATH }}:${{ MLC_ML_MODEL_RGAT_CHECKPOINT_PATH }}" + - "${{ MLC_DATASET_WAYMO_PATH }}:${{ MLC_DATASET_WAYMO_PATH }}" + - "${{ MLC_ML_MODEL_POINT_PILLARS_PATH }}:${{ MLC_ML_MODEL_POINT_PILLARS_PATH }}" + - "${{ MLC_ML_MODEL_DPLAB_RESNET50_PATH }}:${{ MLC_ML_MODEL_DPLAB_RESNET50_PATH }}" skip_run_cmd: 'no' shm_size: '32gb' interactive: True diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index 50a4c987c..8a01ef753 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -4,6 +4,7 @@ import json import re import shutil +from utils import * def preprocess(i): @@ -255,8 +256,7 @@ def preprocess(i): 'MLC_DOCKER_USE_DEFAULT_USER', '') == '': env['MLC_DOCKER_USE_DEFAULT_USER'] = 'yes' - if docker_user and str(env.get('MLC_DOCKER_USE_DEFAULT_USER', '')).lower() not in [ - "yes", "1", "true"]: + if docker_user and not is_true(env.get('MLC_DOCKER_USE_DEFAULT_USER', '')): f.write('RUN groupadd -g $GID -o ' + docker_group + EOL) @@ -283,16 +283,19 @@ def preprocess(i): dockerfile_env_input_string = dockerfile_env_input_string + " --env." + \ docker_env_key + "=" + str(dockerfile_env[docker_env_key]) - workdir = get_value(env, config, 'WORKDIR', 'MLC_DOCKER_WORKDIR') - if workdir and (f"""/home/{docker_user}""" not in workdir or str(env.get('MLC_DOCKER_USE_DEFAULT_USER', '')).lower() not in [ - "yes", "1", "true"]): + workdir = env.get('WORKDIR', '') + if workdir == '': + workdir = f"""/home/{docker_user}""" + + if f"""/home/{docker_user}""" not in workdir or not is_true( + env.get('MLC_DOCKER_USE_DEFAULT_USER', '')): f.write('WORKDIR ' + workdir + EOL) f.write(EOL + '# Install python packages' + EOL) python = get_value(env, config, 'PYTHON', 'MLC_DOCKERFILE_PYTHON') docker_use_virtual_python = env.get('MLC_DOCKER_USE_VIRTUAL_PYTHON', "yes") - if str(docker_use_virtual_python).lower() not in ["no", "0", "false"]: + if not is_false(docker_use_virtual_python): f.write('RUN {} -m venv $HOME/venv/mlc'.format(python) + " " + EOL) f.write('ENV PATH="$HOME/venv/mlc/bin:$PATH"' + EOL) # f.write('RUN . /opt/venv/mlc/bin/activate' + EOL) @@ -342,8 +345,7 @@ def preprocess(i): for y in x.split(','): f.write('RUN ' + y + EOL) - if str(env.get('MLC_DOCKER_SKIP_MLC_SYS_UPGRADE', False) - ).lower() not in ["true", "1", "yes"]: + if not is_true(env.get('MLC_DOCKER_SKIP_MLC_SYS_UPGRADE', False)): f.write(EOL + '# Install all system dependencies' + EOL) f.write('RUN mlc run script --tags=get,sys-utils-mlc --quiet' + EOL) @@ -368,14 +370,12 @@ def preprocess(i): env['MLC_DOCKER_RUN_CMD'] += "mlc version" skip_extra = True else: - if str(env.get('MLC_DOCKER_NOT_PULL_UPDATE', 'False') - ).lower() not in ["yes", "1", "true"]: + if not is_true(env.get('MLC_DOCKER_NOT_PULL_UPDATE', 'False')): env['MLC_DOCKER_RUN_CMD'] += "mlc pull repo && " env['MLC_DOCKER_RUN_CMD'] += "mlc run script --tags=" + \ env['MLC_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' else: - if str(env.get('MLC_DOCKER_NOT_PULL_UPDATE', 'False') - ).lower() not in ["yes", "1", "true"]: + if not is_true(env.get('MLC_DOCKER_NOT_PULL_UPDATE', 'False')): env['MLC_DOCKER_RUN_CMD'] = "mlc pull repo && " + \ env['MLC_DOCKER_RUN_CMD'] @@ -394,8 +394,8 @@ def preprocess(i): if run_cmd_extra != '': x += ' ' + run_cmd_extra - if env.get('MLC_DOCKER_RUN_SCRIPT_TAGS', '') != '' and str(env.get( - 'MLC_DOCKER_ADD_DEPENDENT_SCRIPTS_RUN_COMMANDS', '')).lower() in ["yes", "1", "true"]: + if env.get('MLC_DOCKER_RUN_SCRIPT_TAGS', '') != '' and is_true(env.get( + 'MLC_DOCKER_ADD_DEPENDENT_SCRIPTS_RUN_COMMANDS', '')): mlc_input = {'action': 'run', 'automation': 'script', 'tags': f"""{env['MLC_DOCKER_RUN_SCRIPT_TAGS']}""", @@ -417,8 +417,8 @@ def preprocess(i): f.write(x + EOL) # fake_run to install the dependent scripts and caching them - if not "run" in env['MLC_DOCKER_RUN_CMD'] and str( - env.get('MLC_REAL_RUN', False)).lower() in ["false", "0", "no"]: + if not "run" in env['MLC_DOCKER_RUN_CMD'] and is_false( + env.get('MLC_REAL_RUN', False)): fake_run = dockerfile_env_input_string x = 'RUN ' + env['MLC_DOCKER_RUN_CMD'] + fake_run + run_cmd_extra diff --git a/script/build-dockerfile/dockerinfo.json b/script/build-dockerfile/dockerinfo.json index cfb739e77..2b222c01c 100644 --- a/script/build-dockerfile/dockerinfo.json +++ b/script/build-dockerfile/dockerinfo.json @@ -22,11 +22,9 @@ "GID": "", "GROUP": "mlc", "SHELL": "[\"/bin/bash\", \"-c\"]", - "WORKDIR": "/home/mlcuser", "distros": { "ubuntu": { "USER": "ubuntu", - "WORKDIR": "/home/ubuntu", "package-manager-update-cmd": "apt-get update -y", "package-manager-get-cmd": "apt-get install -y", "packages": [ diff --git a/script/clean-nvidia-mlperf-inference-scratch-space/customize.py b/script/clean-nvidia-mlperf-inference-scratch-space/customize.py index 2e3d4bc64..d719c02a6 100644 --- a/script/clean-nvidia-mlperf-inference-scratch-space/customize.py +++ b/script/clean-nvidia-mlperf-inference-scratch-space/customize.py @@ -30,10 +30,11 @@ def preprocess(i): cache_rm_tags = "nvidia-harness,_download_model,_sdxl" cache_rm_tags = cache_rm_tags + extra_cache_rm_tags + mlc_cache = i['automation'].cache_action if cache_rm_tags: - r = mlc.access({'action': 'rm', 'automation': 'cache', - 'tags': cache_rm_tags, 'f': True}) + r = mlc_cache.access({'action': 'rm', 'target': 'cache', + 'tags': cache_rm_tags, 'f': True}) print(r) if r['return'] != 0 and r['return'] != 16: # ignore missing ones return r diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index 384b0c9b8..44ca20167 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -2,7 +2,6 @@ import os import json import shutil -import mlc import sys from tabulate import tabulate import mlperf_utils @@ -700,6 +699,7 @@ def generate_submission(env, state, inp, submission_division): 'env': {'MLC_PLATFORM_DETAILS_FILE_PATH': os.path.join(measurement_path, "system_info.txt")}, 'quiet': True } + mlc = i['automation'].action_object r = mlc.access(mlc_input) if r['return'] > 0: return r diff --git a/script/get-dataset-waymo/COPYRIGHT.md b/script/get-dataset-waymo/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-dataset-waymo/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-dataset-waymo/customize.py b/script/get-dataset-waymo/customize.py new file mode 100644 index 000000000..273feef06 --- /dev/null +++ b/script/get-dataset-waymo/customize.py @@ -0,0 +1,28 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + if env.get('MLC_DATASET_WAYMO_PATH', '') == '': + return {'return': 1, 'error': 'Please provide path to kitti dataset using tag \\`--waymo_path\\`as automatic download of this dataset is not supported yet.'} + + if not os.path.exists(env['MLC_DATASET_WAYMO_PATH']): + return { + 'return': 1, 'error': f"Path {env['MLC_DATASET_WAYMO_PATH']} does not exists!"} + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/script/get-dataset-waymo/meta.yaml b/script/get-dataset-waymo/meta.yaml new file mode 100644 index 000000000..bfbba995f --- /dev/null +++ b/script/get-dataset-waymo/meta.yaml @@ -0,0 +1,19 @@ +alias: get-dataset-waymo +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- dataset +- waymo +uid: 21b269c753b64437 +new_env_keys: + - MLC_DATASET_WAYMO_PATH +input_mapping: + waymo_path: MLC_DATASET_WAYMO_PATH +variations: + kitti_format: + default: true + group: dataset-format + env: + MLC_DATASET_WAYMO_FORMAT: kitti diff --git a/script/get-dataset-waymo/run.sh b/script/get-dataset-waymo/run.sh new file mode 100644 index 000000000..3197bb8ad --- /dev/null +++ b/script/get-dataset-waymo/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency diff --git a/script/get-generic-sys-util/meta.yaml b/script/get-generic-sys-util/meta.yaml index 0436ba72d..d4ef86050 100644 --- a/script/get-generic-sys-util/meta.yaml +++ b/script/get-generic-sys-util/meta.yaml @@ -95,6 +95,43 @@ variations: brew: '' dnf: dmidecode yum: dmidecode + ffmpeg: + env: + MLC_SYS_UTIL_NAME: ffmpeg + MLC_SYS_UTIL_VERSION_CMD: ffmpeg -version # tbd: regular expression for version + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - MLC_FFMPEG_VERSION + state: + ffmpeg: # tbd: complete for other flavours of linux + apt: ffmpeg + brew: '' + dnf: '' + yum: '' + libsm6: + env: + MLC_SYS_UTIL_NAME: libsm6 # tbd: regular expression for version as well as whether its installed? + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - MLC_LIBSM6_VERSION + state: + libsm6: # tbd: complete for other flavours of linux + apt: libsm6 + brew: '' + dnf: '' + yum: '' + libxext6: + env: + MLC_SYS_UTIL_NAME: libxext6 # tbd: regular expression for version as well as whether its installed? + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - MLC_LIBEXT6_VERSION + state: + libxext6: # tbd: complete for other flavours of linux + apt: libxext6 + brew: '' + dnf: '' + yum: '' g++-11: env: MLC_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes' diff --git a/script/get-gh-actions-runner/customize.py b/script/get-gh-actions-runner/customize.py index 564065fb4..360308e91 100644 --- a/script/get-gh-actions-runner/customize.py +++ b/script/get-gh-actions-runner/customize.py @@ -1,6 +1,5 @@ from mlc import utils import os -import cmind as cm def preprocess(i): @@ -12,6 +11,7 @@ def preprocess(i): meta = i['meta'] automation = i['automation'] + mlc = automation.action_object quiet = (env.get('MLC_QUIET', False) == 'yes') @@ -25,8 +25,8 @@ def preprocess(i): elif cmd == "uninstall": run_cmd = f"cd {env['MLC_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh uninstall" cache_rm_tags = "gh,runner,_install" - r = cm.access({'action': 'rm', 'automation': 'cache', - 'tags': cache_rm_tags, 'f': True}) + r = mlc.access({'action': 'rm', 'automation': 'cache', + 'tags': cache_rm_tags, 'f': True}) print(r) if r['return'] != 0 and r['return'] != 16: # ignore missing ones return r diff --git a/script/get-gh-actions-runner/meta.yaml b/script/get-gh-actions-runner/meta.yaml index 67eabf7fb..67c512a45 100644 --- a/script/get-gh-actions-runner/meta.yaml +++ b/script/get-gh-actions-runner/meta.yaml @@ -22,7 +22,7 @@ new_env_keys: deps: - tags: detect-os - - tags: download-and-extract,_extract,_url.https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-linux-x64-2.320.0.tar.gz + - tags: download-and-extract,_extract,_url.https://github.com/actions/runner/releases/download/v2.321.0/actions-runner-linux-x64-2.321.0.tar.gz force_cache: yes extra_cache_tags: gh-actions-runner-code,gh-actions,code env: @@ -31,7 +31,6 @@ deps: variations: config: group: command - default: true env: MLC_GH_ACTIONS_RUNNER_COMMAND: config remove: @@ -51,6 +50,7 @@ variations: MLC_GH_ACTIONS_RUNNER_COMMAND: uninstall start: group: command + default: true deps: - tags: get,gh,actions-runner,_install force_cache: yes diff --git a/script/get-ml-model-bert-large-squad/meta.yaml b/script/get-ml-model-bert-large-squad/meta.yaml index 51bdd93d3..9a11e4d50 100644 --- a/script/get-ml-model-bert-large-squad/meta.yaml +++ b/script/get-ml-model-bert-large-squad/meta.yaml @@ -111,7 +111,8 @@ variations: env: MLC_DOWNLOAD_CHECKSUM: 45f88ffb2915362242703c85c38ec2d4 MLC_ML_MODEL_F1: '90.067' - MLC_PACKAGE_URL: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx + MLC_PACKAGE_URL: https://armi.in/files/bert_large_v1_1_fake_quant.onnx + MLC_PACKAGE_URL1: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx onnx,int8,amazon-s3: env: MLC_PACKAGE_URL: https://mlperf-public.s3.us-west-2.amazonaws.com/bert_large_v1_1_fake_quant.onnx diff --git a/script/get-ml-model-pointpillars/COPYRIGHT.md b/script/get-ml-model-pointpillars/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-ml-model-pointpillars/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-ml-model-pointpillars/customize.py b/script/get-ml-model-pointpillars/customize.py new file mode 100644 index 000000000..b6685c889 --- /dev/null +++ b/script/get-ml-model-pointpillars/customize.py @@ -0,0 +1,32 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + if env.get('MLC_ML_MODEL_POINT_PILLARS_PATH', '') == '': + return {'return': 1, 'error': 'Please provide path to pointpillars model using tag \\`--pp_path\\`as automatic download of this model is not supported yet.'} + + if os.path.isdir(env['MLC_ML_MODEL_POINT_PILLARS_PATH']): + if env['MLC_ML_MODEL_PP_FORMAT'] == "onnx": + env['MLC_ML_MODEL_POINT_PILLARS_PATH'] = os.path.join( + env['MLC_ML_MODEL_POINT_PILLARS_PATH'], "pp.onnx") + else: + env['MLC_ML_MODEL_POINT_PILLARS_PATH'] = os.path.join( + env['MLC_ML_MODEL_POINT_PILLARS_PATH'], "pp_ep36.pth") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/script/get-ml-model-pointpillars/meta.yaml b/script/get-ml-model-pointpillars/meta.yaml new file mode 100644 index 000000000..18470e4c0 --- /dev/null +++ b/script/get-ml-model-pointpillars/meta.yaml @@ -0,0 +1,26 @@ +alias: get-ml-model-pointpillars +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- ml-model +- ml +- model +- pointpillars +uid: 3562621a8994411d +new_env_keys: + - MLC_ML_MODEL_POINT_PILLARS_PATH +input_mapping: + pp_path: MLC_ML_MODEL_POINT_PILLARS_PATH +variations: + gpu: + default: true + group: device + env: + MLC_ML_MODEL_PP_FORMAT: pth + cpu: + group: device + env: + MLC_ML_MODEL_PP_FORMAT: onnx + diff --git a/script/get-ml-model-pointpillars/run.sh b/script/get-ml-model-pointpillars/run.sh new file mode 100644 index 000000000..3197bb8ad --- /dev/null +++ b/script/get-ml-model-pointpillars/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency diff --git a/script/get-ml-model-resnet50-deeplab/COPYRIGHT.md b/script/get-ml-model-resnet50-deeplab/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-ml-model-resnet50-deeplab/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-ml-model-resnet50-deeplab/customize.py b/script/get-ml-model-resnet50-deeplab/customize.py new file mode 100644 index 000000000..0df3b1c3f --- /dev/null +++ b/script/get-ml-model-resnet50-deeplab/customize.py @@ -0,0 +1,33 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + if env.get('MLC_ML_MODEL_DPLAB_RESNET50_PATH', '') == '': + return {'return': 1, 'error': 'Please provide path to deeplab resnet 50 model using tag \\`--dp_resnet50_path\\`as automatic download of this dataset is not supported yet.'} + + if os.path.isdir(env['MLC_ML_MODEL_DPLAB_RESNET50_PATH']): + if env['MLC_ML_MODEL_DPLAB_RESNET50_FORMAT'] == "onnx": + env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] = os.path.join( + env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'], "deeplabv3+.onnx") + else: + env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] = os.path.join( + env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'], + "best_deeplabv3plus_resnet50_waymo_os16.pth") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/script/get-ml-model-resnet50-deeplab/meta.yaml b/script/get-ml-model-resnet50-deeplab/meta.yaml new file mode 100644 index 000000000..c8c8b84e1 --- /dev/null +++ b/script/get-ml-model-resnet50-deeplab/meta.yaml @@ -0,0 +1,27 @@ +alias: get-dataset-deeplab-resnet50 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- ml-model +- ml +- model +- resnet50-deeplab +- resnet50 +- deeplab +uid: 93097b691a6a4fce +new_env_keys: + - MLC_ML_MODEL_DPLAB_RESNET50_PATH +input_mapping: + dp_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH +variations: + gpu: + default: true + group: device + env: + MLC_ML_MODEL_DPLAB_RESNET50_FORMAT: pth + cpu: + group: device + env: + MLC_ML_MODEL_DPLAB_RESNET50_FORMAT: onnx diff --git a/script/get-ml-model-resnet50-deeplab/run.sh b/script/get-ml-model-resnet50-deeplab/run.sh new file mode 100644 index 000000000..3197bb8ad --- /dev/null +++ b/script/get-ml-model-resnet50-deeplab/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency diff --git a/script/get-mlperf-inference-src/customize.py b/script/get-mlperf-inference-src/customize.py index d523f6abe..4fb2c0c31 100644 --- a/script/get-mlperf-inference-src/customize.py +++ b/script/get-mlperf-inference-src/customize.py @@ -114,6 +114,8 @@ def postprocess(i): inference_root, 'graph', 'R-GAT') env['MLC_MLPERF_INFERENCE_3DUNET_PATH'] = os.path.join( inference_root, 'vision', 'medical_imaging', '3d-unet-kits19') + env['MLC_MLPERF_INFERENCE_POINTPAINTING_PATH'] = os.path.join( + inference_root, 'automotive', '3d-object-detection') env['MLC_GET_DEPENDENT_CACHED_PATH'] = inference_root diff --git a/script/get-mlperf-inference-src/meta.yaml b/script/get-mlperf-inference-src/meta.yaml index 1d2db1989..919178125 100644 --- a/script/get-mlperf-inference-src/meta.yaml +++ b/script/get-mlperf-inference-src/meta.yaml @@ -30,6 +30,7 @@ new_env_keys: - MLC_MLPERF_INFERENCE_VERSION - MLC_MLPERF_INFERENCE_VISION_PATH - MLC_MLPERF_LAST_RELEASE +- MLC_MLPERF_INFERENCE_POINTPAINTING_PATH - +PYTHONPATH prehook_deps: - env: diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index c3cf7ac1b..967fde0b5 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -202,6 +202,10 @@ def preprocess(i): CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama3.1-405b", "evaluate-accuracy.py") + "' --checkpoint-path '" + env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join( result_dir, "mlperf_log_accuracy.json") + "' --dtype '" + env['MLC_ACCURACY_DTYPE'] + "' --dataset-file '" + env['MLC_DATASET_LLAMA3_PATH'] + "' > '" + out_file + "'" + elif dataset == "waymo": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "automotive", "3d-object-detection", "accuracy_waymo.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --waymo-dir '" + env['MLC_DATASET_WAYMO_PATH'] + "' > '" + out_file + "'" + else: return {'return': 1, 'error': 'Unsupported dataset'} diff --git a/script/process-mlperf-accuracy/meta.yaml b/script/process-mlperf-accuracy/meta.yaml index f45e3f485..cd14ef67a 100644 --- a/script/process-mlperf-accuracy/meta.yaml +++ b/script/process-mlperf-accuracy/meta.yaml @@ -269,3 +269,7 @@ variations: env: MLC_DATASET: dataset_llama3 group: dataset + waymo: + env: + MLC_DATASET: waymo + group: dataset diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index 97ff7b2aa..9277ab7f0 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -16,7 +16,7 @@ def preprocess(i): interactive = env.get('MLC_DOCKER_INTERACTIVE_MODE', '') - if str(interactive).lower() in ['yes', 'true', '1']: + if is_true(interactive): env['MLC_DOCKER_DETACHED_MODE'] = 'no' if 'MLC_DOCKER_RUN_SCRIPT_TAGS' not in env: @@ -54,7 +54,9 @@ def preprocess(i): print('') print('Checking existing Docker container:') print('') - CMD = f"""{env['MLC_CONTAINER_TOOL']} ps --format=json --filter "ancestor={DOCKER_CONTAINER}" """ + # CMD = f"""{env['MLC_CONTAINER_TOOL']} ps --format=json --filter "ancestor={DOCKER_CONTAINER}" """ + CMD = f"""{env['MLC_CONTAINER_TOOL']} ps --format """ + \ + "'{{ .ID }},'" + f""" --filter "ancestor={DOCKER_CONTAINER}" """ if os_info['platform'] == 'windows': CMD += " 2> nul" else: @@ -71,27 +73,21 @@ def preprocess(i): 'error': 'Unexpected error occurred with docker run:\n{}'.format(e) } - if len(out) > 0 and str(env.get('MLC_DOCKER_REUSE_EXISTING_CONTAINER', - '')).lower() in ["1", "true", "yes"]: # container exists - # print(out) - out_split = out.splitlines() + existing_container_id = None + if len(out) > 0: + out_split = out.split(",") if len(out_split) > 0: - try: - out_json = json.loads(out_split[0]) - # print("JSON successfully loaded:", out_json) - except json.JSONDecodeError as e: - print(f"Error: First line of 'out' is not valid JSON: {e}") - return { - 'return': 1, 'error': f"Error: First line of 'out' is not valid JSON: {e}"} - else: - out_json = [] + existing_container_id = out_split[0].strip() - if isinstance(out_json, list) and len(out_json) > 0: - existing_container_id = out_json[0]['Id'] + if existing_container_id and is_true( + env.get('MLC_DOCKER_REUSE_EXISTING_CONTAINER', '')): print(f"Reusing existing container {existing_container_id}") env['MLC_DOCKER_CONTAINER_ID'] = existing_container_id else: + if existing_container_id: + print( + f"""Not using existing container {existing_container_id} as env['MLC_DOCKER_REUSE_EXISTING_CONTAINER'] is not set""") if env.get('MLC_DOCKER_CONTAINER_ID', '') != '': del (env['MLC_DOCKER_CONTAINER_ID']) # not valid ID @@ -237,13 +233,8 @@ def postprocess(i): run_opts += port_map_cmd_string # Currently have problem running Docker in detached mode on Windows: - detached = str( - env.get( - 'MLC_DOCKER_DETACHED_MODE', - '')).lower() in [ - 'yes', - 'true', - "1"] + detached = is_true(env.get('MLC_DOCKER_DETACHED_MODE', '')) + # if detached and os_info['platform'] != 'windows': if detached: if os_info['platform'] == 'windows': @@ -257,8 +248,7 @@ def postprocess(i): CONTAINER = f"""{env['MLC_CONTAINER_TOOL']} run -dt {run_opts} --rm {docker_image_repo}/{docker_image_name}:{docker_image_tag} bash""" CMD = f"""ID=`{CONTAINER}` && {env['MLC_CONTAINER_TOOL']} exec $ID bash -c '{run_cmd}'""" - if False and str(env.get('MLC_KEEP_DETACHED_CONTAINER', '')).lower() not in [ - 'yes', "1", 'true']: + if is_true(env.get('MLC_KILL_DETACHED_CONTAINER', False)): CMD += f""" && {env['MLC_CONTAINER_TOOL']} kill $ID >/dev/null""" CMD += ' && echo "ID=$ID"' diff --git a/script/run-docker-container/meta.yaml b/script/run-docker-container/meta.yaml index f6f3d19f0..aeaac021f 100644 --- a/script/run-docker-container/meta.yaml +++ b/script/run-docker-container/meta.yaml @@ -14,7 +14,6 @@ cache: false category: Docker automation default_env: - MLC_DOCKER_DETACHED_MODE: 'yes' MLC_DOCKER_REUSE_EXISTING_CONTAINER: 'no' MLC_DOCKER_PRIVILEGED_MODE: 'no' MLC_PODMAN_MAP_USER_ID: 'no' @@ -31,6 +30,7 @@ input_mapping: docker_base_image: MLC_DOCKER_IMAGE_BASE base_image: MLC_DOCKER_IMAGE_BASE keep_detached: MLC_KEEP_DETACHED_CONTAINER + reuse_existing: MLC_DOCKER_REUSE_EXISTING_CONTAINER docker_os: MLC_DOCKER_OS docker_os_version: MLC_DOCKER_OS_VERSION os: MLC_DOCKER_OS diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index c9fc33d52..885d1d8b3 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -5,6 +5,7 @@ import subprocess import copy import mlperf_utils +from utils import * summary_ext = ['.csv', '.json', '.xlsx'] @@ -127,6 +128,9 @@ def preprocess(i): test_list.remove("TEST01") # test_list.remove("TEST05") + if "pointpainting" in env["MLC_MODEL"].lower(): + test_list.append("TEST04") + if "llama2" in env['MLC_MODEL'].lower( ) or "mixtral-8x7b" in env['MLC_MODEL']: test_list.append("TEST06") @@ -218,8 +222,7 @@ def preprocess(i): print('=========================================================') - if str(env.get('MLC_MLPERF_USE_DOCKER', '') - ).lower() in ["1", "true", "yes"]: + if is_true(env.get('MLC_MLPERF_USE_DOCKER', '')): action = "docker" # del(env['OUTPUT_BASE_DIR']) state = {} @@ -232,7 +235,7 @@ def preprocess(i): if k.startswith("docker_"): docker_extra_input[k] = inp[k] inp = {} - if str(docker_dt).lower() in ["yes", "true", "1"]: + if is_true(docker_dt): # turning it off for the first run and after that we turn it on if env.get('MLC_DOCKER_REUSE_EXISTING_CONTAINER', '') == '': env['MLC_DOCKER_REUSE_EXISTING_CONTAINER'] = 'no' @@ -292,7 +295,7 @@ def preprocess(i): env['OUTPUT_BASE_DIR'], f"{env['MLC_MLPERF_RUN_STYLE']}_results") if action == "docker": - if str(docker_dt).lower() not in ["yes", "true", "1"]: + if not is_true(docker_dt): print( f"\nStop Running loadgen scenario: {scenario} and mode: {mode}") # We run commands interactively inside the docker container @@ -320,8 +323,8 @@ def preprocess(i): if state.get('docker', {}): del (state['docker']) - if env.get('MLC_DOCKER_CONTAINER_ID', '') != '' and str(env.get( - 'MLC_DOCKER_CONTAINER_KEEP_ALIVE', '')).lower() not in ["yes", "1", "true"]: + if env.get('MLC_DOCKER_CONTAINER_ID', '') != '' and not is_true(env.get( + 'MLC_DOCKER_CONTAINER_KEEP_ALIVE', '')): container_id = env['MLC_DOCKER_CONTAINER_ID'] CMD = f"docker kill {container_id}" docker_out = subprocess.check_output(CMD, shell=True).decode("utf-8") diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index 9dc4408d6..71db7e876 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -119,6 +119,9 @@ input_mapping: use_dataset_from_host: MLC_USE_DATASET_FROM_HOST use_model_from_host: MLC_USE_MODEL_FROM_HOST rgat_checkpoint_path: RGAT_CHECKPOINT_PATH + pointpillars_checkpoint_path: MLC_ML_MODEL_POINT_PILLARS_PATH + deeplab_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH + waymo_path: MLC_DATASET_WAYMO_PATH new_state_keys: - app_mlperf_inference_* @@ -466,6 +469,7 @@ input_description: - efficientnet - rgat - llama3_1-405b + - pointpainting default: resnet50 desc: MLPerf model sort: 200 diff --git a/script/run-mlperf-inference-submission-checker/customize.py b/script/run-mlperf-inference-submission-checker/customize.py index 4d22d4867..818981d72 100644 --- a/script/run-mlperf-inference-submission-checker/customize.py +++ b/script/run-mlperf-inference-submission-checker/customize.py @@ -11,7 +11,7 @@ def preprocess(i): submission_dir = env.get("MLC_MLPERF_INFERENCE_SUBMISSION_DIR", "") - version = env.get('MLC_MLPERF_SUBMISSION_CHECKER_VERSION', '') + version = env.get('MLC_MLPERF_SUBMISSION_CHECKER_VERSION', 'v5.0') if submission_dir == "": return {'return': 1, diff --git a/script/run-terraform/customize.py b/script/run-terraform/customize.py index 5728ce687..15687e6a9 100644 --- a/script/run-terraform/customize.py +++ b/script/run-terraform/customize.py @@ -1,5 +1,4 @@ from mlc import utils -import mlc import os import shutil import json @@ -72,6 +71,7 @@ def postprocess(i): cmd = cmd.replace(":", "=") cmd = cmd.replace(";;", ",") run_input['run_cmds'].append(cmd) + mlc = i['automation'].action_object r = mlc.access(run_input) if r['return'] > 0: return r diff --git a/setup.py b/setup.py index 16ff5cc7a..0ba25f32d 100644 --- a/setup.py +++ b/setup.py @@ -69,13 +69,13 @@ def run(self): print("Running custom post-install command...") commit_hash = get_commit_hash() import mlc - branch = os.environ.get('MLC_REPO_BRANCH', 'mlc') + branch = os.environ.get('MLC_REPO_BRANCH', 'dev') res = mlc.access({'action': 'pull', - 'automation': 'repo', - 'url': 'mlcommons@mlperf-automations', + 'target': 'repo', + 'repo': 'mlcommons@mlperf-automations', 'branch': branch, - 'checkout': commit_hash + # 'checkout': commit_hash }) print(res) if res['return'] > 0: @@ -108,10 +108,13 @@ def get_commit_hash(): # Get project metadata from pyproject.toml project_meta = get_project_meta() +# Read version from the VERSION file +version = read_file("VERSION", default="0.0.1") + setup( name=project_meta.get("name", "mlperf"), - version=project_meta.get("version", "0.0.1"), + version=version, description=project_meta.get("description", "MLPerf Automations."), author=", ".join(a.get("name", "") for a in project_meta.get("authors", [])),