diff --git a/.github/workflows/test-mlperf-inference-rgat.yml b/.github/workflows/test-mlperf-inference-rgat.yml new file mode 100644 index 000000000..de5b0fbb6 --- /dev/null +++ b/.github/workflows/test-mlperf-inference-rgat.yml @@ -0,0 +1,48 @@ +name: MLPerf inference rgat + +on: + pull_request_target: + branches: [ "main", "dev" ] + paths: + - '.github/workflows/test-mlperf-inference-rgat.yml' + - '**' + - '!**.md' + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + python-version: [ "3.12" ] + backend: [ "pytorch" ] + implementation: [ "python" ] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install "cmind @ git+https://git@github.com/mlcommons/ck.git@mlperf-inference#subdirectory=cm" + cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + - name: Test MLPerf Inference R-GAT using ${{ matrix.backend }} on ${{ matrix.os }} + run: | + cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --quiet -v --target_qps=1 + - name: Push Results + if: github.repository_owner == 'gateoverflow' + env: + USER: "GitHub Action" + EMAIL: "admin@gateoverflow.com" + GITHUB_TOKEN: ${{ secrets.TEST_RESULTS_GITHUB_TOKEN }} + run: | + git config --global user.name "${{ env.USER }}" + git config --global user.email "${{ env.EMAIL }}" + git config --global credential.https://github.com.helper "" + git config --global credential.https://github.com.helper "!gh auth git-credential" + git config --global credential.https://gist.github.com.helper "" + git config --global credential.https://gist.github.com.helper "!gh auth git-credential" + cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/mlperf_inference_test_submissions_v5.0 --repo_branch=main --commit_message="Results from R-GAT GH action on ${{ matrix.os }}" --quiet diff --git a/automation/script/module_misc.py b/automation/script/module_misc.py index 9cae72b07..5de7f0402 100644 --- a/automation/script/module_misc.py +++ b/automation/script/module_misc.py @@ -1634,12 +1634,12 @@ def dockerfile(i): 'docker_cm_repo', docker_settings.get( 'cm_repo', - 'mlcommons@cm4mlops')) + 'mlcommons@mlperf-automations')) cm_repo_branch = i.get( 'docker_cm_repo_branch', docker_settings.get( 'cm_repo_branch', - 'mlperf-inference')) + 'main')) cm_repo_flags = i.get( 'docker_cm_repo_flags', @@ -2295,7 +2295,7 @@ def docker(i): 'docker_cm_repo', docker_settings.get( 'cm_repo', - 'mlcommons@cm4mlops')) + 'mlcommons@mlperf-automations')) docker_path = i.get('docker_path', '').strip() if docker_path == '': diff --git a/script/app-mlperf-inference-mlcommons-python/_cm.yaml b/script/app-mlperf-inference-mlcommons-python/_cm.yaml index 8fa3df206..89646244b 100644 --- a/script/app-mlperf-inference-mlcommons-python/_cm.yaml +++ b/script/app-mlperf-inference-mlcommons-python/_cm.yaml @@ -222,6 +222,7 @@ deps: CM_MODEL: - dlrm-v2-99 - dlrm-v2-99.9 + - rgat enable_if_env: CM_MLPERF_BACKEND: - pytorch @@ -234,6 +235,11 @@ deps: names: - ml-engine-torchvision - torchvision + skip_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + - rgat enable_if_env: CM_MLPERF_BACKEND: - pytorch @@ -487,7 +493,7 @@ deps: CM_MODEL: - rgat skip_if_env: - CM_ML_MODEL_RGAT_CHECKPOINT_PATH: + RGAT_CHECKPOINT_PATH: - 'on' ######################################################################## @@ -619,9 +625,14 @@ deps: enable_if_env: CM_MODEL: - rgat - skip_if_env: + skip_if_any_env: CM_DATASET_IGBH_PATH: - "on" + skip_if_env: + CM_RUN_STATE_DOCKER: + - 'yes' + CM_USE_DATASET_FROM_HOST: + - 'yes' ######################################################################## # Install MLPerf inference dependencies @@ -1226,45 +1237,48 @@ variations: group: models env: CM_MODEL: rgat - adr: + add_deps_recursive: pytorch: - version: 2.1.0 + version_max: "2.4.1" + version_max_usable: "2.4.1" deps: - tags: get,generic-python-lib,_package.colorama - tags: get,generic-python-lib,_package.tqdm - tags: get,generic-python-lib,_package.requests - tags: get,generic-python-lib,_package.torchdata - version: 0.7.0 - - tags: get,generic-python-lib,_package.torchvision - version: 0.16.0 - tags: get,generic-python-lib,_package.pybind11 - tags: get,generic-python-lib,_package.PyYAML - tags: get,generic-python-lib,_package.numpy - version: 1.26.4 + version_max: "1.26.4" + version_max_usable: "1.26.4" - tags: get,generic-python-lib,_package.pydantic - tags: get,generic-python-lib,_package.igb,_url.git+https://github.com/IllinoisGraphBenchmark/IGB-Datasets.git - - rgat,cuda: - deps: - - tags: get,generic-python-lib,_package.dgl,_find_links_url.https://data.dgl.ai/wheels/torch-2.1/cu121/repo.html + - tags: get,generic-python-lib,_package.torch-geometric + update_tags_from_env_with_prefix: + _find_links_url.: + - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL - tags: get,generic-python-lib,_package.torch-scatter + update_tags_from_env_with_prefix: + _find_links_url.: + - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL - tags: get,generic-python-lib,_package.torch-sparse - - tags: get,generic-python-lib,_package.torch-geometric - env: - CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>.html" + update_tags_from_env_with_prefix: + _find_links_url.: + - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL + - tags: get,generic-python-lib,_package.dgl + update_tags_from_env_with_prefix: + _find_links_url.: + - CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL + + rgat,cuda: + env: + CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>.html" + CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL: "https://data.dgl.ai/wheels/torch-<<>>/cu121/repo.html" rgat,cpu: - deps: - - tags: get,generic-python-lib,_package.torch-geometric - env: - CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html" - - tags: get,generic-python-lib,_package.torch-scatter - env: - CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html" - - tags: get,generic-python-lib,_package.torch-sparse - env: - CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html" - - tags: get,generic-python-lib,_package.dgl,_find_links_url.https://data.dgl.ai/wheels/torch-2.1/repo.html + env: + CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: "https://data.pyg.org/whl/torch-<<>>+cpu.html" + CM_TMP_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL_DGL: "https://data.dgl.ai/wheels/torch-<<>>/repo.html" # Target devices cpu: diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index dcffa5672..1a2d3b023 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -388,7 +388,9 @@ def get_run_cmd_reference( env['CM_VLLM_SERVER_MODEL_NAME'] = env.get( "CM_VLLM_SERVER_MODEL_NAME") or "NousResearch/Meta-Llama-3-8B-Instruct" # env['CM_MLPERF_INFERENCE_API_SERVER'] = "http://localhost:8000" - cmd += f" --api-server {env['CM_MLPERF_INFERENCE_API_SERVER']} --model-path {env['CM_VLLM_SERVER_MODEL_NAME']} --api-model-name {env['CM_VLLM_SERVER_MODEL_NAME']} --vllm " + cmd += f""" --api-server {env['CM_MLPERF_INFERENCE_API_SERVER']} \ + --model-path {env['CM_VLLM_SERVER_MODEL_NAME']} \ + --api-model-name {env['CM_VLLM_SERVER_MODEL_NAME']} --vllm """ else: cmd += f" --model-path {env['LLAMA2_CHECKPOINT_PATH']}" @@ -493,7 +495,7 @@ def get_run_cmd_reference( scenario_extra_options + mode_extra_options + \ " --output " + env['CM_MLPERF_OUTPUT_DIR'] + \ ' --dtype ' + dtype_rgat + \ - " --model-path " + env['CM_ML_MODEL_RGAT_CHECKPOINT_PATH'] + " --model-path " + env['RGAT_CHECKPOINT_PATH'] if env.get('CM_ACTIVATE_RGAT_IN_MEMORY', '') == "yes": cmd += " --in-memory " diff --git a/script/app-mlperf-inference/_cm.yaml b/script/app-mlperf-inference/_cm.yaml index 4c368346e..c57003c10 100644 --- a/script/app-mlperf-inference/_cm.yaml +++ b/script/app-mlperf-inference/_cm.yaml @@ -781,6 +781,15 @@ variations: - mlperf-accuracy-script - 3d-unet-accuracy-script tags: run,accuracy,mlperf,_igbh + docker: + deps: + - tags: get,dataset,igbh + enable_if_env: + CM_USE_DATASET_FROM_HOST: + - 'yes' + names: + - igbh-original + - igbh-dataset sdxl: group: @@ -1808,8 +1817,8 @@ docker: interactive: True extra_run_args: ' --dns 8.8.8.8 --dns 8.8.4.4 --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' os: ubuntu - cm_repo: mlcommons@cm4mlops - cm_repo_branch: mlperf-inference + cm_repo: mlcommons@mlperf-automations + cm_repo_branch: dev real_run: False os_version: '22.04' docker_input_mapping: diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py index 41fd8570b..f62ae947d 100644 --- a/script/app-mlperf-inference/customize.py +++ b/script/app-mlperf-inference/customize.py @@ -356,7 +356,7 @@ def postprocess(i): host_info['system_name'] = env['CM_HOST_SYSTEM_NAME'] # Check CM automation repository - repo_name = 'mlcommons@cm4mlops' + repo_name = 'mlcommons@mlperf-automations' repo_hash = '' r = cm.access({'action': 'find', 'automation': 'repo', 'artifact': 'mlcommons@cm4mlops,9e97bb72b0474657'}) diff --git a/script/build-dockerfile/_cm.yaml b/script/build-dockerfile/_cm.yaml index 7535311ea..f54d3a216 100644 --- a/script/build-dockerfile/_cm.yaml +++ b/script/build-dockerfile/_cm.yaml @@ -19,7 +19,7 @@ default_env: ' CM_DOCKER_OS: ubuntu CM_DOCKER_NOT_PULL_UPDATE: False - CM_MLOPS_REPO_BRANCH: mlperf-inference + CM_MLOPS_REPO_BRANCH: dev input_mapping: build: CM_BUILD_DOCKER_IMAGE diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index f5cd06204..9c6012aa4 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -131,7 +131,7 @@ def preprocess(i): print( f"Converted repo format from {env['CM_MLOPS_REPO']} to {cm_mlops_repo}") else: - cm_mlops_repo = "mlcommons@cm4mlops" + cm_mlops_repo = "mlcommons@mlperf-automations" cm_mlops_repo_branch_string = f" --branch={env['CM_MLOPS_REPO_BRANCH']}" @@ -299,7 +299,7 @@ def preprocess(i): f.write(EOL + '# Download CM repo for scripts' + EOL) if use_copy_repo: - docker_repo_dest = "/home/cmuser/CM/repos/mlcommons@cm4mlops" + docker_repo_dest = "/home/cmuser/CM/repos/mlcommons@mlperf-automations" f.write( f'COPY --chown=cmuser:cm {relative_repo_path} {docker_repo_dest}' + EOL) diff --git a/script/generate-mlperf-inference-submission/_cm.yaml b/script/generate-mlperf-inference-submission/_cm.yaml index 32003a1b3..064cbc307 100644 --- a/script/generate-mlperf-inference-submission/_cm.yaml +++ b/script/generate-mlperf-inference-submission/_cm.yaml @@ -31,8 +31,8 @@ deps: - 'on' tags: get,mlperf,submission,dir docker: - cm_repo: mlcommons@cm4mlops - cm_repo_branch: mlperf-inference + cm_repo: mlcommons@mlperf-automations + cm_repo_branch: dev deps: - names: get-mlperf-inference-results-dir skip_if_env: diff --git a/script/get-dataset-mlperf-inference-igbh/_cm.yaml b/script/get-dataset-mlperf-inference-igbh/_cm.yaml index 5af7233d7..796d5674e 100644 --- a/script/get-dataset-mlperf-inference-igbh/_cm.yaml +++ b/script/get-dataset-mlperf-inference-igbh/_cm.yaml @@ -14,7 +14,9 @@ new_env_keys: - CM_DATASET_IGBH_PATH - CM_DATASET_IGBH_SIZE input_mapping: - out_path: CM_IGBH_DATASET_OUT_PATH + out_path: CM_DATASET_IGBH_OUT_PATH +env: + SKIP_USER_PROMPT: yes deps: - tags: mlperf,inference,source names: @@ -22,8 +24,9 @@ deps: - tags: get,python names: - get-python - - tags: get,generic-python-lib,_package.igb,_url.git+https://github.com/anandhu-eng/IGB-Datasets.git + - tags: get,generic-python-lib,_package.igb,_url.git+https://github.com/gateoverflow/IGB-Datasets.git - tags: get,generic-python-lib,_package.colorama + - tags: get,generic-python-lib,_package.tqdm prehook_deps: #paper @@ -31,11 +34,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_feat.npy CM_DOWNLOAD_CHECKSUM: 71058b9ac8011bafa1c5467504452d13 CM_DOWNLOAD_FILENAME: node_feet.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ extra_cache_tags: dataset,igbh,paper,node_feat force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -48,11 +51,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_19.npy CM_DOWNLOAD_CHECKSUM: be6fda45566e679bdb05ebea98ad16d4 CM_DOWNLOAD_FILENAME: node_label_19.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ extra_cache_tags: dataset,igbh,paper,node_label_19 force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -65,11 +68,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_2K.npy CM_DOWNLOAD_CHECKSUM: 6eccab9a14f92f42be5b367c39002031 CM_DOWNLOAD_FILENAME: node_label_2K.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ extra_cache_tags: dataset,igbh,paper,node_label_2K force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -82,11 +85,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/paper_id_index_mapping.npy CM_DOWNLOAD_CHECKSUM: f70dd642a4f7e41d926c91c8c054fc4c CM_DOWNLOAD_FILENAME: paper_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ extra_cache_tags: dataset,igbh,paper,paper_id_index_mapping force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -100,11 +103,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__cites__paper/edge_index.npy CM_DOWNLOAD_CHECKSUM: f4897f53636c04a9c66f6063ec635c16 CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper__cites__paper/ + CM_DOWNLOAD_PATH: <<>>/full/processed/paper__cites__paper/ extra_cache_tags: dataset,igbh,paper_cites_paper,edge_index force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -118,11 +121,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/author_id_index_mapping.npy CM_DOWNLOAD_CHECKSUM: 58c15aab7dae03bbd57e6a4ac5e61bd9 CM_DOWNLOAD_FILENAME: author_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/author/ + CM_DOWNLOAD_PATH: <<>>/full/processed/author/ extra_cache_tags: dataset,igbh,author,author_id_index_mapping force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -135,11 +138,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/node_feat.npy CM_DOWNLOAD_CHECKSUM: 2ec2512b554088381c04ec013e893c8d CM_DOWNLOAD_FILENAME: node_feat.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/author/ + CM_DOWNLOAD_PATH: <<>>/full/processed/author/ extra_cache_tags: dataset,igbh,author,node_feat force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -153,11 +156,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/conference_id_index_mapping.npy CM_DOWNLOAD_CHECKSUM: 0bf7c555d8c697b31b6af6c4cb6b6612 CM_DOWNLOAD_FILENAME: conference_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/conference/ + CM_DOWNLOAD_PATH: <<>>/full/processed/conference/ extra_cache_tags: dataset,igbh,conference,conference_id_index_mapping force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -170,11 +173,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/node_feat.npy CM_DOWNLOAD_CHECKSUM: 898ff529b8cf972261fedd50df6377f8 CM_DOWNLOAD_FILENAME: node_feat.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/conference/ + CM_DOWNLOAD_PATH: <<>>/full/processed/conference/ extra_cache_tags: dataset,igbh,conference,node_feat force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -188,11 +191,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/institute_id_index_mapping.npy CM_DOWNLOAD_CHECKSUM: 03fb45eafb7bd35875ef4c7cd2a299a9 CM_DOWNLOAD_FILENAME: institute_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/institute/ + CM_DOWNLOAD_PATH: <<>>/full/processed/institute/ extra_cache_tags: dataset,igbh,institute,institute_id_index_mapping force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -205,11 +208,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/node_feat.npy CM_DOWNLOAD_CHECKSUM: 12eaeced22d17b4e97d4b4742331c819 CM_DOWNLOAD_FILENAME: node_feat.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/institute/ + CM_DOWNLOAD_PATH: <<>>/full/processed/institute/ extra_cache_tags: dataset,igbh,institute,node_feat force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -223,11 +226,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/journal_id_index_mapping.npy CM_DOWNLOAD_CHECKSUM: b630c20852b76d17a5c9c37b39176f69 CM_DOWNLOAD_FILENAME: journal_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/journal/ + CM_DOWNLOAD_PATH: <<>>/full/processed/journal/ extra_cache_tags: dataset,igbh,journal,journal_id_index_mapping force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -240,11 +243,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/node_feat.npy CM_DOWNLOAD_CHECKSUM: 49d51b554b3004f10bee19d1c7f9b416 CM_DOWNLOAD_FILENAME: node_feat.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/journal/ + CM_DOWNLOAD_PATH: <<>>/full/processed/journal/ extra_cache_tags: dataset,igbh,journal,node_feat force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -258,11 +261,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/fos_id_index_mapping.npy CM_DOWNLOAD_CHECKSUM: 0f0cfde619361cde35d3be9f201d081a CM_DOWNLOAD_FILENAME: fos_id_index_mapping.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/fos/ + CM_DOWNLOAD_PATH: <<>>/full/processed/fos/ extra_cache_tags: dataset,igbh,fos,fos_id_index_mapping force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -275,11 +278,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/node_feat.npy CM_DOWNLOAD_CHECKSUM: 3ef3df19e2475c387fec10bac82773df CM_DOWNLOAD_FILENAME: node_feat.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/fos/ + CM_DOWNLOAD_PATH: <<>>/full/processed/fos/ extra_cache_tags: dataset,igbh,fos,node_feat force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -293,11 +296,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author__affiliated_to__institute/edge_index.npy CM_DOWNLOAD_CHECKSUM: e35dba208f81e0987207f78787c75711 CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/author__affiliated_to__institute/ + CM_DOWNLOAD_PATH: <<>>/full/processed/author__affiliated_to__institute/ extra_cache_tags: dataset,igbh,author_affiliated_to_institute,edge_index force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -311,11 +314,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__published__journal/edge_index.npy CM_DOWNLOAD_CHECKSUM: 38505e83bde8e5cf94ae0a85afa60e13 CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper__published__journal/ + CM_DOWNLOAD_PATH: <<>>/full/processed/paper__published__journal/ extra_cache_tags: dataset,igbh,paper_published_journal,edge_index force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -329,11 +332,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__topic__fos/edge_index.npy CM_DOWNLOAD_CHECKSUM: 427fb350a248ee6eaa8c21cde942fda4 CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper__topic__fos/ + CM_DOWNLOAD_PATH: <<>>/full/processed/paper__topic__fos/ extra_cache_tags: dataset,igbh,paper_topic_fos,edge_index force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -347,11 +350,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__venue__conference/edge_index.npy CM_DOWNLOAD_CHECKSUM: 541b8d43cd93579305cfb71961e10a7d CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper__venue__conference/ + CM_DOWNLOAD_PATH: <<>>/full/processed/paper__venue__conference/ extra_cache_tags: dataset,igbh,paper_venue_conference,edge_index force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -365,11 +368,11 @@ prehook_deps: CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__written_by__author/edge_index.npy CM_DOWNLOAD_CHECKSUM: df39fe44bbcec93a640400e6d81ffcb5 CM_DOWNLOAD_FILENAME: edge_index.npy - CM_DOWNLOAD_PATH: <<>>/full/processed/paper__written_by__author/ + CM_DOWNLOAD_PATH: <<>>/full/processed/paper__written_by__author/ extra_cache_tags: dataset,igbh,paper_written_by_author,edge_index force_cache: true enable_if_env: - CM_IGBH_DATASET_TYPE: + CM_DATASET_IGBH_TYPE: - 'full' names: - dae @@ -383,13 +386,13 @@ variations: default: true group: dataset-type env: - CM_IGBH_DATASET_TYPE: debug - CM_IGBH_DATASET_SIZE: tiny + CM_DATASET_IGBH_TYPE: debug + CM_DATASET_IGBH_SIZE: tiny full: group: dataset-type env: - CM_IGBH_DATASET_TYPE: full - CM_IGBH_DATASET_SIZE: full + CM_DATASET_IGBH_TYPE: full + CM_DATASET_IGBH_SIZE: full glt: env: CM_IGBH_GRAPH_COMPRESS: yes diff --git a/script/get-dataset-mlperf-inference-igbh/customize.py b/script/get-dataset-mlperf-inference-igbh/customize.py index a0e6f24a6..8f789bcad 100644 --- a/script/get-dataset-mlperf-inference-igbh/customize.py +++ b/script/get-dataset-mlperf-inference-igbh/customize.py @@ -19,9 +19,9 @@ def preprocess(i): graph_folder = os.path.join( env['CM_MLPERF_INFERENCE_SOURCE'], 'graph', 'R-GAT') - download_loc = env.get('CM_IGBH_DATASET_OUT_PATH', os.getcwd()) + download_loc = env.get('CM_DATASET_IGBH_OUT_PATH', os.getcwd()) - env['CM_IGBH_DATASET_DOWNLOAD_LOCATION'] = download_loc + env['CM_DATASET_IGBH_DOWNLOAD_LOCATION'] = download_loc run_cmd += f"cd {graph_folder} " x_sep = " && " @@ -33,7 +33,7 @@ def preprocess(i): # split seeds run_cmd += x_sep + \ - f"{env['CM_PYTHON_BIN_WITH_PATH']} tools/split_seeds.py --path {download_loc} --dataset_size {env['CM_DATASET_IGBH_SIZE']}" + f"{env['CM_PYTHON_BIN_WITH_PATH']} tools/split_seeds.py --path {download_loc} --dataset_size {env['CM_DATASET_IGBH_SIZE']} " # compress graph(for glt implementation) if env.get('CM_IGBH_GRAPH_COMPRESS', '') == "yes": @@ -50,7 +50,7 @@ def postprocess(i): env = i['env'] env['CM_DATASET_IGBH_PATH'] = env.get( - 'CM_IGBH_DATASET_OUT_PATH', os.getcwd()) + 'CM_DATASET_IGBH_OUT_PATH', os.getcwd()) print( f"Path to the IGBH dataset: {os.path.join(env['CM_DATASET_IGBH_PATH'], env['CM_DATASET_IGBH_SIZE'])}") diff --git a/script/get-generic-python-lib/customize.py b/script/get-generic-python-lib/customize.py index 6421a22a1..57f25127f 100644 --- a/script/get-generic-python-lib/customize.py +++ b/script/get-generic-python-lib/customize.py @@ -189,4 +189,8 @@ def postprocess(i): if pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23: env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages" + if version.count('.') > 1: + env[f"{env_version_key}_MAJOR_MINOR"] = ".".join( + version.split(".")[:2]) + return {'return': 0, 'version': version} diff --git a/script/get-ml-model-rgat/_cm.yaml b/script/get-ml-model-rgat/_cm.yaml index 644bf688a..d7615acd2 100644 --- a/script/get-ml-model-rgat/_cm.yaml +++ b/script/get-ml-model-rgat/_cm.yaml @@ -13,6 +13,7 @@ input_mapping: new_env_keys: - CM_ML_MODEL_* - CM_ML_MODEL_RGAT_CHECKPOINT_PATH +- RGAT_CHECKPOINT_PATH prehook_deps: - enable_if_env: CM_DOWNLOAD_TOOL: diff --git a/script/get-ml-model-rgat/customize.py b/script/get-ml-model-rgat/customize.py index ac8feaad7..df810a5ab 100644 --- a/script/get-ml-model-rgat/customize.py +++ b/script/get-ml-model-rgat/customize.py @@ -25,6 +25,7 @@ def postprocess(i): elif env.get('CM_ML_MODEL_PATH', '') == '': env['CM_ML_MODEL_PATH'] = env['CM_ML_MODEL_RGAT_CHECKPOINT_PATH'] + env['RGAT_CHECKPOINT_PATH'] = env['CM_ML_MODEL_RGAT_CHECKPOINT_PATH'] env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_RGAT_CHECKPOINT_PATH'] return {'return': 0} diff --git a/script/get-mlperf-inference-src/_cm.yaml b/script/get-mlperf-inference-src/_cm.yaml index c5e195a88..c100e32e8 100644 --- a/script/get-mlperf-inference-src/_cm.yaml +++ b/script/get-mlperf-inference-src/_cm.yaml @@ -142,11 +142,11 @@ versions: CM_TMP_GIT_URL: https://github.com/neuralmagic/inference main: env: - CM_MLPERF_LAST_RELEASE: v4.1 + CM_MLPERF_LAST_RELEASE: v5.0 CM_TMP_GIT_CHECKOUT: main master: env: - CM_MLPERF_LAST_RELEASE: v4.1 + CM_MLPERF_LAST_RELEASE: v5.0 CM_TMP_GIT_CHECKOUT: master r2.1: env: diff --git a/script/run-mlperf-inference-app/_cm.yaml b/script/run-mlperf-inference-app/_cm.yaml index 05ae0d476..689aaabc3 100644 --- a/script/run-mlperf-inference-app/_cm.yaml +++ b/script/run-mlperf-inference-app/_cm.yaml @@ -8,9 +8,6 @@ category: Modular MLPerf inference benchmark pipeline developers: "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)" -gui: - title: CM GUI to run MLPerf inference benchmarks and prepare submissions - clean_output_files: - open.tar.gz - summary.csv @@ -202,11 +199,6 @@ variations: env: CM_MLPERF_LOADGEN_COMPLIANCE: 'yes' - dashboard: - default_gui: false - env: - CM_MLPERF_DASHBOARD: 'on' - find-performance: env: CM_MLPERF_FIND_PERFORMANCE_MODE: 'yes' @@ -396,7 +388,6 @@ variations: submission: base: - all-modes - default_gui: true default_variations: submission-generation-style: full env: diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index 9b5f350d4..480beda5a 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -433,587 +433,3 @@ def get_url(url, path, path2, name, text): url_online = '[{}]({})'.format(text, urlx) return {'return': 0, 'url_online': url_online} - -########################################################################## - - -def gui(i): - - params = i['params'] - st = i['st'] - - script_meta = i['meta'] - - misc = i['misc_module'] - - script_path = i['script_path'] - script_url = i.get('script_url', '') - script_tags = i.get('script_tags', '') - - compute_meta = i.get('compute_meta', {}) - compute_tags = compute_meta.get('tags', []) - bench_meta = i.get('bench_meta', {}) - - compute_uid = compute_meta.get('uid', '') - bench_uid = bench_meta.get('uid', '') - - st_inputs_custom = {} - - bench_input = bench_meta.get('bench_input', {}) - - end_html = '' - - extra = {} - add_to_st_inputs = {} - - inp = script_meta['input_description'] - - # Here we can update params - v = compute_meta.get('mlperf_inference_device') - if v is not None and v != '': - inp['device']['force'] = v - - if v in ['tpu', 'gaudi']: - st.markdown('----') - st.markdown( - '**WARNING: unified CM workflow support for this hardware is pending - please [feel free to help](https://discord.gg/JjWNWXKxwT)!**') - return {'return': 0, 'skip': True, 'end_html': end_html} - - elif 'orin' in compute_tags: - st.markdown('----') - st.markdown( - '**WARNING: we need to encode CM knowledge from [this Orin setp](https://github.com/mlcommons/ck/blob/master/docs/mlperf/setup/setup-nvidia-jetson-orin.md) to this GUI!**') - return {'return': 0, 'skip': True, 'end_html': end_html} - - st.markdown('---') - st.markdown('**How would you like to run the MLPerf inference benchmark?**') - - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_device', - 'desc': inp['device']}) - device = r.get('value2') - inp['device']['force'] = device - - if device == 'cpu': - inp['implementation']['choices'] = ['mlcommons-python', - 'mlcommons-cpp', 'intel', 'ctuning-cpp-tflite'] - if 'intel' in compute_tags: - inp['implementation']['default'] = 'intel' - else: - inp['implementation']['default'] = 'mlcommons-python' - inp['backend']['choices'] = [ - 'onnxruntime', 'deepsparse', 'pytorch', 'tf', 'tvm-onnx'] - inp['backend']['default'] = 'onnxruntime' - elif device == 'rocm': - inp['implementation']['force'] = 'mlcommons-python' - inp['precision']['force'] = '' - inp['backend']['force'] = 'onnxruntime' - st.markdown( - '*WARNING: CM-MLPerf inference workflow was not tested thoroughly for AMD GPU - please feel free to test and improve!*') - elif device == 'qaic': - inp['implementation']['force'] = 'qualcomm' - inp['precision']['force'] = '' - inp['backend']['force'] = 'glow' - - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_division', - 'desc': inp['division']}) - division = r.get('value2') - inp['division']['force'] = division - - y = 'compliance' - if division == 'closed': - inp[y]['default'] = 'yes' - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_compliance', - 'desc': inp[y]}) - compliance = r.get('value2') - inp[y]['force'] = compliance - - if compliance == 'yes': - st.markdown( - '*:red[See [online table with required compliance tests](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#5132-inference)].*') - - else: - inp[y]['force'] = 'no' - - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_category', - 'desc': inp['category']}) - category = r.get('value2') - inp['category']['force'] = category - - ########################################################################## - # Implementation - v = bench_input.get('mlperf_inference_implementation') - if v is not None and v != '': - inp['implementation']['force'] = v - else: - if device == 'cuda': - inp['implementation']['choices'] = [ - 'nvidia', 'mlcommons-python', 'mlcommons-cpp'] - inp['implementation']['default'] = 'nvidia' - inp['backend']['choices'] = ['tensorrt', 'onnxruntime', 'pytorch'] - inp['backend']['default'] = 'tensorrt' - - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_implementation', - 'desc': inp['implementation']}) - implementation = r.get('value2') - inp['implementation']['force'] = implementation - - implementation_setup = '' - r = load_md(script_path, 'setup', 'i-' + implementation) - if r['return'] == 0: - implementation_setup = r['string'] - - url_faq_implementation = '' - r = get_url(script_url, script_path, 'faq', implementation, 'FAQ online') - if r['return'] == 0: - url_faq_implementation = r['url_online'] - - can_have_docker_flag = False - - if implementation == 'mlcommons-cpp': - # inp['backend']['choices'] = ['onnxruntime'] - inp['precision']['force'] = 'float32' - inp['backend']['force'] = 'onnxruntime' - inp['model']['choices'] = ['resnet50', 'retinanet'] - st.markdown( - '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-cpp)]*') - elif implementation == 'mlcommons-python': - inp['precision']['force'] = 'float32' - if device == 'cuda': - inp['backend']['choices'] = ['onnxruntime', 'pytorch', 'tf'] - inp['backend']['default'] = 'onnxruntime' - st.markdown( - '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-python)]*') - elif implementation == 'ctuning-cpp-tflite': - inp['precision']['force'] = 'float32' - inp['model']['force'] = 'resnet50' - st.markdown( - '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-ctuning-cpp-tflite)]*') - elif implementation == 'nvidia': - inp['backend']['force'] = 'tensorrt' - extra['skip_script_docker_func'] = True - can_have_docker_flag = True - st.markdown( - '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-nvidia)]*') - elif implementation == 'intel': - inp['model']['choices'] = ['bert-99', 'gptj-99'] - inp['model']['default'] = 'bert-99' - inp['precision']['choices'] = ['int8', 'int4'] - inp['precision']['default'] = 'int8' - inp['category']['force'] = 'datacenter' - inp['backend']['force'] = 'pytorch' - inp['sut']['default'] = 'sapphire-rapids.112c' - can_have_docker_flag = True - extra['skip_script_docker_func'] = True -# st.markdown('*:red[Note: Intel implementation require extra CM command to build and run Docker container - you will run CM commands to run MLPerf benchmarks there!]*') - st.markdown( - '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-intel)]*') - elif implementation == 'qualcomm': - inp['model']['choices'] = ['resnet50', 'retinanet', 'bert-99'] - inp['model']['default'] = 'bert-99' - inp['precision']['default'] = 'float16' - extra['skip_script_docker_func'] = True - st.markdown( - '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-qualcomm)]*') - - ########################################################################## - # Backend - - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_backend', - 'desc': inp['backend']}) - backend = r.get('value2') - inp['backend']['force'] = backend - - backend_setup = '' - r = load_md(script_path, 'setup', 'b-' + backend) - if r['return'] == 0: - backend_setup = r['string'] - - if backend == 'deepsparse': - inp['model']['choices'] = [ - 'resnet50', 'retinanet', 'bert-99', 'bert-99.9'] - inp['model']['default'] = 'bert-99' - inp['precision']['choices'] = ['float32', 'int8'] - inp['precision']['default'] = 'int8' - if 'force' in inp['precision']: - del (inp['precision']['force']) - - ########################################################################## - # Model - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_model', - 'desc': inp['model']}) - model = r.get('value2') - inp['model']['force'] = model - - github_doc_model = '' - - if model == 'retinanet': - x = '50' - if implementation == 'mlcommons-python': - x = '200' - st.markdown( - ':red[This model requires ~{}GB of free disk space for preprocessed dataset in a full/submission run!]\n'.format(x)) - - elif model.startswith('bert-'): - github_doc_model = 'bert' - - elif model.startswith('3d-unet-'): - github_doc_model = '3d-unet' - - elif model == 'rnnt': - github_doc_model = 'rnnt' - - elif model.startswith('dlrm-v2-'): - github_doc_model = 'dlrm_v2' - - elif model.startswith('gptj-'): - github_doc_model = 'gpt-j' - - elif model == 'sdxl': - github_doc_model = 'stable-diffusion-xl' - - elif model.startswith('llama2-'): - github_doc_model = 'llama2-70b' - - elif model.startswith('mixtral-'): - github_doc_model = 'mixtral-8x7b' - - if github_doc_model == '': - github_doc_model = model - - model_cm_url = 'https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference/{}'.format( - github_doc_model) - extra_notes_online = '[Extra notes online]({})\n'.format(model_cm_url) - - st.markdown( - '*[CM-MLPerf GitHub docs for this model]({})*'.format(model_cm_url)) - - ########################################################################## - # Precision - if implementation == 'intel': - if model == 'bert-99': - inp['precision']['force'] = 'int8' - elif model == 'gptj-99': - inp['precision']['force'] = 'int4' - elif implementation == 'qualcomm': - if model == 'resnet50': - inp['precision']['print'] = 'int8' - elif model == 'retinanet': - inp['precision']['print'] = 'int8' - elif model == 'bert-99': - inp['precision']['print'] = 'int8/float16' - - if inp['precision'].get('force', '') == '': - x = inp['precision'].get('print', '') - if x != '': - st.markdown('**{}**: {}'.format(inp['precision']['desc'], x)) - else: - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_precision', - 'desc': inp['precision']}) - precision = r.get('value2') - inp['precision']['force'] = precision - - ########################################################################## - # Benchmark version - - script_meta_variations = script_meta['variations'] - - choices = [''] + [ - k for k in script_meta_variations if script_meta_variations[k].get( - 'group', '') == 'benchmark-version'] - desc = { - 'choices': choices, - 'default': choices[0], - 'desc': 'Force specific benchmark version?'} - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_version', - 'desc': desc}) - benchmark_version = r.get('value2') - - if benchmark_version != '': - params['~~benchmark-version'] = [benchmark_version] - - ########################################################################## - # Run via Docker container - if can_have_docker_flag: - - default_choice = 'yes - run in container' - - choices = [default_choice, 'no - run natively'] - desc = { - 'choices': choices, - 'default': choices[0], - 'desc': 'Should CM script prepare and run Docker container in interactive mode to run MLPerf? You can then copy/paste CM commands generated by this GUI to benchmark different models.'} - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_docker', - 'desc': desc}) - benchmark_docker = r.get('value2') - - if benchmark_docker == 'yes - run in container': - add_to_st_inputs['@docker'] = True - add_to_st_inputs['@docker_cache'] = 'no' - - ########################################################################## - # Prepare submission - st.markdown('---') - - submission = st.toggle( - 'Would you like to prepare official submission?', - value=False) - if submission: - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_hw_name', - 'desc': inp['hw_name']}) - inp['hw_name']['force'] = r.get('value2') - - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_submitter', - 'desc': inp['submitter']}) - submitter = r.get('value2') - inp['submitter']['force'] = submitter - - params['~~submission-generation'] = ['submission'] - params['~all-scenarios'] = ['true'] - inp['scenario']['force'] = '' - inp['clean']['default'] = False - inp['repro']['force'] = True - - x = '*:red[Use the following command to find local directory with the submission tree and results:]*\n```bash\ncm find cache --tags=submission,dir\n```\n' - - x += '*:red[You will also find results in `mlperf-inference-submission.tar.gz` file that you can submit to MLPerf!]*\n\n' - - x += '*:red[Note that if some results are INVALID due to too short run, you can rerun the same CM command and it should increase the length of the benchmark until you get valid result!]*\n' - - st.markdown(x) - - st.markdown('---') - - else: - inp['submitter']['force'] = '' - inp['clean']['default'] = True - params['~submission'] = ['false'] - - choices = [ - 'Performance', - 'Accuracy', - 'Find Performance from a short run', - 'Performance and Accuracy'] - desc = { - 'choices': choices, - 'default': choices[0], - 'desc': 'What to measure?'} - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_measure', - 'desc': desc}) - measure = r.get('value2') - - x = '' - if measure == 'Performance': - x = 'performance-only' - elif measure == 'Accuracy': - x = 'accuracy-only' - elif measure == 'Find Performance from a short run': - x = 'find-performance' - elif measure == 'Performance and Accuracy': - x = 'submission' - - params['~~submission-generation'] = [x] - - ####################################################################### - # Prepare scenario - - xall = 'All applicable' - choices = ['Offline', 'Server', 'SingleStream', 'MultiStream', xall] - desc = { - 'choices': choices, - 'default': choices[0], - 'desc': 'Which scenario(s)?'} - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_scenario', - 'desc': desc}) - scenario = r.get('value2') - - if scenario == xall: - params['~all-scenarios'] = ['true'] - inp['scenario']['force'] = '' - else: - inp['scenario']['force'] = scenario - - ########################################################################## - # Short or full run - - x = ['Full run', 'Short run'] - if submission: - choices = [x[0], x[1]] - else: - choices = [x[1], x[0]] - - desc = { - 'choices': choices, - 'default': choices[0], - 'desc': 'Short (test) or full (valid) run?'} - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_how', - 'desc': desc}) - how = r.get('value2') - - if how == x[0]: - params['~~submission-generation-style'] = ['full'] - inp['execution_mode']['force'] = 'valid' - else: - params['~~submission-generation-style'] = ['short'] - inp['execution_mode']['force'] = 'test' - - ########################################################################## - # Power - -# desc = {'boolean':True, 'default':False, 'desc':'Measure power?'} -# r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power', 'desc':desc}) -# power = r.get('value2', False) - - power = st.toggle('Measure power consumption?', value=False) - - if power: - inp['power']['force'] = 'yes' - - y = 'adr.mlperf-power-client.power_server' - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_power_server', - 'desc': inp[y]}) - inp[y]['force'] = r.get('value2') - - y = 'adr.mlperf-power-client.port' - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_power_port', - 'desc': inp[y]}) - inp[y]['force'] = r.get('value2') - - st.markdown( - '*:red[See [online notes](https://github.com/mlcommons/ck/blob/master/docs/tutorials/mlperf-inference-power-measurement.md)] to setup power meter and server.*') - - else: - inp['power']['force'] = 'no' - inp['adr.mlperf-power-client.power_server']['force'] = '' - inp['adr.mlperf-power-client.port']['force'] = '' - - ########################################################################## - # Dashboard - -# desc = {'boolean':True, 'default':False, 'desc':'Output results to W&B dashboard?'} -# r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_dashboard', 'desc':desc}) -# dashboard = r.get('value2', False) - - dashboard = st.toggle('Output results to W&B dashboard?', value=False) - - if dashboard: - params['~dashboard'] = ['true'] - - y = 'dashboard_wb_project' - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_power_wb_project', - 'desc': inp[y]}) - inp[y]['force'] = r.get('value2') - - y = 'dashboard_wb_user' - r = misc.make_selector({'st': st, - 'st_inputs': st_inputs_custom, - 'params': params, - 'key': 'mlperf_inference_power_wb_user', - 'desc': inp[y]}) - inp[y]['force'] = r.get('value2') - - else: - params['~dashboard'] = ['false'] - inp['dashboard_wb_project']['force'] = '' - inp['dashboard_wb_user']['force'] = '' - - # Hide customization by default - params['hide_script_customization'] = True - - x = implementation_setup - if backend_setup != '': - if x != '': - x += '\n\n' - x += backend_setup - - extra['extra_notes_online'] = extra_notes_online - extra['extra_faq_online'] = url_faq_implementation - extra['extra_setup'] = x - - ########################################################################## - value_reproduce = inp.get('repro', {}).get('force', False) - reproduce = st.toggle( - 'Record extra info for reproducibility?', - value=value_reproduce) - - explore = st.toggle( - 'Explore/tune benchmark (batch size, threads, etc)?', - value=False) - - if reproduce or explore: - add_to_st_inputs.update({ - "@repro_extra.run-mlperf-inference-app.bench_uid": bench_uid, - "@repro_extra.run-mlperf-inference-app.compute_uid": compute_uid, - '@results_dir': '{{CM_EXPERIMENT_PATH3}}', - '@submission_dir': '{{CM_EXPERIMENT_PATH3}}' - }) - - inp['repro']['force'] = True - extra['use_experiment'] = True - - if explore: - add_to_st_inputs['@batch_size'] = '{{CM_EXPLORE_BATCH_SIZE{[1,2,4,8]}}}' - - ########################################################################## - debug = st.toggle( - 'Debug and run MLPerf benchmark natively from command line after CM auto-generates CMD?', - value=False) - if debug: - inp['debug']['force'] = True - - extra['add_to_st_inputs'] = add_to_st_inputs - - return {'return': 0, 'end_html': end_html, 'extra': extra} diff --git a/script/test-cm-core/src/script/test_docker.py b/script/test-cm-core/src/script/test_docker.py index ad867a2a1..1b63631c6 100644 --- a/script/test-cm-core/src/script/test_docker.py +++ b/script/test-cm-core/src/script/test_docker.py @@ -10,7 +10,7 @@ 'add_deps_recursive': { 'compiler': {'tags': "gcc"} }, - 'docker_cm_repo': 'mlcommons@cm4mlops', + 'docker_cm_repo': 'mlcommons@mlperf-automations', 'image_name': 'cm-script-app-image-classification-onnx-py', 'env': { 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', @@ -27,7 +27,7 @@ 'add_deps_recursive': { 'compiler': {'tags': "gcc"} }, - 'docker_cm_repo': 'mlcommons@cm4mlops', + 'docker_cm_repo': 'mlcommons@mlperf-automations', 'image_name': 'cm-script-app-image-classification-onnx-py', 'env': { 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python',