Skip to content

Commit e6d607c

Browse files
authored
Fix Nvidia implementation github action, support mlperf-inference-submission-preprocessor-args, mobilenet models
* Add quote for llvm path * cleanup * Added alternative URL for r50 onnx opset11 * Fix Nvidia implementation github action, support mlperf-inference-submission-preprocessor-args * Fix import for mobilenet downloads * Fix import for efficientnet downloads
1 parent fef1458 commit e6d607c

File tree

12 files changed

+33
-17
lines changed

12 files changed

+33
-17
lines changed

.github/workflows/build_wheel.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ name: Build wheel and release into PYPI
33
on:
44
release:
55
types: [published]
6+
67
push:
78
branches:
89
- dev

.github/workflows/test-mlperf-inference-resnet50.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,11 @@ jobs:
5353
- name: Test MLPerf Inference ResNet50 (Windows)
5454
if: matrix.os == 'windows-latest'
5555
run: |
56-
mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet
56+
mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet
5757
- name: Test MLPerf Inference ResNet50 (Linux/macOS)
5858
if: matrix.os != 'windows-latest'
5959
run: |
60-
mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet
60+
mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet
6161
# Step for Linux/MacOS
6262
- name: Randomly Execute Step (Linux/MacOS)
6363
if: runner.os != 'Windows'

.github/workflows/test-nvidia-mlperf-inference-implementations.yml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,11 @@ jobs:
4545
gpu_name=rtx_4090
4646
docker_string=" --docker"
4747
fi
48+
if [ "${{ matrix.model }}" = "bert-99.9" ]; then
49+
submission_preprocessor_args=" --noinfer-low-accuracy-results"
50+
else
51+
submission_preprocessor_args=""
52+
fi
4853
category="datacenter,edge"
4954
if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi
5055
python3 -m venv gh_action
@@ -53,6 +58,6 @@ jobs:
5358
pip install --upgrade mlcflow
5459
mlc pull repo mlcommons@mlperf-automations --branch=dev
5560
56-
mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string --quiet
61+
mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string $submission_preprocessor_args --quiet
5762
5863
mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name

script/generate-mlperf-inference-submission/customize.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -535,12 +535,12 @@ def generate_submission(env, state, inp, submission_division):
535535
measurements_json = json.load(f)
536536
model_precision = measurements_json.get(
537537
"weight_data_types", "fp32")
538-
'''shutil.copy(
538+
shutil.copy(
539539
measurements_json_path,
540540
os.path.join(
541541
target_measurement_json_path,
542542
sub_res + '.json'))
543-
'''
543+
544544
shutil.copy(
545545
measurements_json_path,
546546
os.path.join(

script/generate-mlperf-inference-submission/meta.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ input_mapping:
8585
sw_notes_extra: MLC_MLPERF_SUT_SW_NOTES_EXTRA
8686
tar: MLC_TAR_SUBMISSION_DIR
8787
get_platform_details: MLC_GET_PLATFORM_DETAILS
88+
submission_preprocessor_args: MLC_MLPERF_PREPROCESS_SUBMISSION_EXTRA_ARGS
8889
version: MLC_MLPERF_SUBMISSION_CHECKER_VERSION
8990
post_deps:
9091
- enable_if_env:

script/get-ml-model-efficientnet-lite/customize.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
from mlc import utils
21
import os
2+
from utils import *
33

44

55
def preprocess(i):
@@ -19,9 +19,7 @@ def preprocess(i):
1919

2020
print('Downloading from {}'.format(url))
2121

22-
r = cm.access({'action': 'download_file',
23-
'automation': 'utils,dc2743f8450541e3',
24-
'url': url})
22+
r = download_file({'url': url})
2523
if r['return'] > 0:
2624
return r
2725

script/get-ml-model-mobilenet/customize.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
from mlc import utils
21
import os
2+
from utils import *
33

44

55
def preprocess(i):
@@ -19,9 +19,7 @@ def preprocess(i):
1919

2020
print('Downloading from {}'.format(url))
2121

22-
r = cm.access({'action': 'download_file',
23-
'automation': 'utils,dc2743f8450541e3',
24-
'url': url})
22+
r = download_file({'url': url})
2523
if r['return'] > 0:
2624
return r
2725

script/get-mlperf-inference-sut-configs/configs/RTX4090x2/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,4 +39,4 @@
3939
Offline:
4040
target_qps: 1.3
4141
Server:
42-
target_qps: 0.4
42+
target_qps: 0.25

script/preprocess-mlperf-inference-submission/customize.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import os
33
from os.path import exists
44
import shutil
5+
from utils import *
56

67

78
def preprocess(i):
@@ -31,8 +32,20 @@ def preprocess(i):
3132
version = env.get('MLC_MLPERF_SUBMISSION_CHECKER_VERSION', '')
3233
x_version = ' --version ' + version + ' ' if version != '' else ''
3334

35+
extra_args = []
36+
if is_true(env.get('MLC_MLPERF_NOINFER_LOW_ACCURACY_RESULTS')):
37+
extra_args.append("--noinfer-low-accuracy-results")
38+
if is_true(env.get('MLC_MLPERF_NODELETE_EMPTY_DIRS')):
39+
extra_args.append("--nodelete-empty-dirs")
40+
if is_true(env.get('MLC_MLPERF_NOMOVE_FAILED_TO_OPEN')):
41+
extra_args.append("--nomove-failed-to-open")
42+
if is_true(env.get('MLC_MLPERF_NODELETE_FAILED')):
43+
extra_args.append("--nodelete-failed")
44+
if env.get('MLC_MLPERF_PREPROCESS_SUBMISSION_EXTRA_ARGS', '') != '':
45+
extra_args.append(env['MLC_MLPERF_PREPROCESS_SUBMISSION_EXTRA_ARGS'])
46+
3447
CMD = env['MLC_PYTHON_BIN'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "tools", "submission",
35-
"preprocess_submission.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --output '" + submission_processed + "'" + x_version
48+
"preprocess_submission.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --output '" + submission_processed + "'" + x_version + " " + " ".join(extra_args)
3649
env['MLC_RUN_CMD'] = CMD
3750

3851
return {'return': 0}

script/preprocess-mlperf-inference-submission/meta.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ input_mapping:
2424
submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR
2525
version: MLC_MLPERF_SUBMISSION_CHECKER_VERSION
2626
submitter: MLC_MLPERF_SUBMITTER
27+
submission_preprocessor_args: MLC_MLPERF_PREPROCESS_SUBMISSION_EXTRA_ARGS
2728
tags:
2829
- run
2930
- mlc

0 commit comments

Comments
 (0)