Skip to content

Commit 275e911

Browse files
authored
Fix console outputs, R50 deepsparse, interactive sudo timeout
* Fix tags for mlcr * Added a timeout for detect,sudo * Fixes for R50 deepsparse
1 parent f08c497 commit 275e911

File tree

9 files changed

+53
-13
lines changed

9 files changed

+53
-13
lines changed

.github/workflows/test-nvidia-mlperf-inference-implementations.yml

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ name: MLPerf Inference Nvidia implementations
22

33
on:
44
schedule:
5-
- cron: "35 01 * * *"
5+
- cron: "05 01 * * *"
66

77
jobs:
88
run_nvidia:
@@ -23,7 +23,7 @@ jobs:
2323
model: [ "resnet50", "retinanet", "bert-99", "bert-99.9", "gptj-99.9", "3d-unet-99.9", "sdxl" ]
2424
exclude:
2525
- model: gptj-99.9
26-
- system: phoenix
26+
- system: phoenix1
2727
- system: GO-i9
2828

2929
steps:
@@ -59,5 +59,6 @@ jobs:
5959
mlc pull repo mlcommons@mlperf-automations --branch=dev
6060
6161
mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string $submission_preprocessor_args --quiet
62+
#mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name
63+
mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/GATEOverflow/mlperf_inference_submissions_v5.0 --repo_branch=main --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name
6264
63-
mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name

script/app-mlperf-inference-mlcommons-python/customize.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import json
44
import shutil
55
import subprocess
6+
from utils import *
67

78

89
def preprocess(i):
@@ -51,12 +52,14 @@ def preprocess(i):
5152

5253
env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT']
5354

54-
if 'MLC_NUM_THREADS' not in env:
55-
if 'MLC_MINIMIZE_THREADS' in env:
55+
if env.get('MLC_NUM_THREADS', '') == '':
56+
if is_true(env.get('MLC_MINIMIZE_THREADS', '')) and env.get(
57+
'MLC_HOST_CPU_TOTAL_CORES', '') != '':
5658
env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) //
57-
(int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1'))))
59+
(int(env.get('MLC_HOST_CPU_SOCKETS', '1'))))
5860
else:
5961
env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1')
62+
env['CM_NUM_THREADS'] = env['MLC_NUM_THREADS'] # For inference code
6063

6164
if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and str(env.get(
6265
'MLC_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in ["true", "1", "yes"]:
@@ -270,12 +273,13 @@ def get_run_cmd_reference(
270273
env['MODEL_FILE'] = env.get(
271274
'MLC_MLPERF_CUSTOM_MODEL_PATH',
272275
env.get('MLC_ML_MODEL_FILE_WITH_PATH'))
276+
273277
if not env['MODEL_FILE']:
274278
return {'return': 1, 'error': 'No valid model file found!'}
275279

276280
env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR']
277281

278-
extra_options = " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['MLC_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \
282+
extra_options = " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['MLC_MLPERF_VISION_DATASET_OPTION'] + f""" --max-batchsize {env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1')}""" + \
279283
" --dataset-path " + env['MLC_DATASET_PREPROCESSED_PATH'] + " --model " + env['MODEL_FILE'] + \
280284
" --preprocessed_dir " + env['MLC_DATASET_PREPROCESSED_PATH']
281285

script/app-mlperf-inference-mlcommons-python/meta.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -888,6 +888,14 @@ variations:
888888
ml-model:
889889
tags: raw,_deepsparse
890890

891+
deepsparse,resnet50:
892+
default_env:
893+
DEEPSPARSE_NUM_STREAMS: 24
894+
ENQUEUE_NUM_THREADS: 2
895+
MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 16
896+
MLC_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch
897+
898+
891899
tvm-onnx:
892900
group: framework
893901
env:

script/detect-sudo/customize.py

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,24 @@ def is_user_in_sudo_group():
102102
return False
103103

104104

105+
def timeout_input(prompt, timeout=15, default=""):
106+
"""Prompt user for input with a timeout (cross-platform)."""
107+
result = [default] # Store the input result
108+
109+
def get_input():
110+
try:
111+
result[0] = getpass.getpass(prompt)
112+
except EOFError: # Handle Ctrl+D or unexpected EOF
113+
result[0] = default
114+
115+
input_thread = threading.Thread(target=get_input)
116+
input_thread.daemon = True # Daemonize thread
117+
input_thread.start()
118+
input_thread.join(timeout) # Wait for input with timeout
119+
120+
return result[0] # Return user input or default
121+
122+
105123
def prompt_sudo():
106124
if os.geteuid() != 0 and not is_user_in_sudo_group(): # No sudo required for root user
107125

@@ -112,7 +130,11 @@ def prompt_sudo():
112130
print("Skipping password prompt - non-interactive terminal detected!")
113131
password = None
114132
else:
115-
password = getpass.getpass("Enter password (-1 to skip): ")
133+
# password = getpass.getpass("Enter password (-1 to skip): ")
134+
password = timeout_input(
135+
"Enter password (-1 to skip): ",
136+
timeout=15,
137+
default=None)
116138

117139
# Check if the input is -1
118140
if password == "-1":

script/get-cudnn/customize.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,8 @@ def preprocess(i):
103103
return {'return': 0}
104104

105105
if env.get('MLC_CUDNN_TAR_FILE_PATH', '') == '':
106-
return {'return': 1, 'error': 'Please envoke mlcr "get cudnn" --tar_file={full path to the cuDNN tar file}'}
106+
return {
107+
'return': 1, 'error': 'Please envoke mlcr get,cudnn --tar_file={full path to the cuDNN tar file}'}
107108

108109
print('Untaring file - can take some time ...')
109110

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,5 @@
11
#!/bin/bash
2-
${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/download_sparse.py
2+
cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/download_sparse.py"
3+
echo "$cmd"
4+
eval "$cmd"
5+
test $? -eq 0 || exit $?

script/get-mlperf-inference-src/meta.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ variations:
7777
deepsparse:
7878
base:
7979
- _branch.deepsparse
80-
- _repo.https://github.com/neuralmagic/inference
80+
- _repo.https://github.com/gateoverflow/nm-inference
8181
full-history:
8282
env:
8383
MLC_GIT_DEPTH: ''
@@ -139,7 +139,7 @@ versions:
139139
env:
140140
MLC_MLPERF_LAST_RELEASE: v5.0
141141
MLC_TMP_GIT_CHECKOUT: deepsparse
142-
MLC_TMP_GIT_URL: https://github.com/neuralmagic/inference
142+
MLC_TMP_GIT_URL: https://github.com/gateoverflow/nm-inference
143143
main:
144144
env:
145145
MLC_MLPERF_LAST_RELEASE: v5.0

script/get-tensorrt/customize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def preprocess(i):
9292
if env.get('MLC_TENSORRT_REQUIRE_DEV', '') != 'yes':
9393
tags.append("_dev")
9494
return {'return': 1, 'error': 'Please envoke mlcr "' +
95-
" ".join(tags) + '" --tar_file={full path to the TensorRT tar file}'}
95+
",".join(tags) + '" --tar_file={full path to the TensorRT tar file}'}
9696

9797
print('Untaring file - can take some time ...')
9898

script/run-mlperf-inference-app/meta.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,7 @@ input_mapping:
122122
pointpillars_checkpoint_path: MLC_ML_MODEL_POINT_PILLARS_PATH
123123
deeplab_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH
124124
waymo_path: MLC_DATASET_WAYMO_PATH
125+
nm_model_zoo_stub: MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB
125126

126127
new_state_keys:
127128
- app_mlperf_inference_*

0 commit comments

Comments
 (0)