From dfdece716876118d2d95e1c9bfdd4c5fd4196fd4 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 12 Feb 2025 23:12:54 +0530 Subject: [PATCH 01/11] Fix tags for mlcr --- script/app-mlperf-inference-mlcommons-python/customize.py | 7 ++++--- script/get-cudnn/customize.py | 2 +- script/get-tensorrt/customize.py | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index df85eb7e1..45fc1071c 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -3,7 +3,7 @@ import json import shutil import subprocess - +from utils import * def preprocess(i): @@ -51,12 +51,13 @@ def preprocess(i): env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] - if 'MLC_NUM_THREADS' not in env: - if 'MLC_MINIMIZE_THREADS' in env: + if env.get('MLC_NUM_THREADS', '') != '': + if not is_false(env.get('MLC_MINIMIZE_THREADS','')): env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) else: env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') + env['CM_NUM_THREADS'] = env['MLC_NUM_THREADS'] #For inference code if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and str(env.get( 'MLC_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in ["true", "1", "yes"]: diff --git a/script/get-cudnn/customize.py b/script/get-cudnn/customize.py index 1cedb4a1d..809cd60b2 100644 --- a/script/get-cudnn/customize.py +++ b/script/get-cudnn/customize.py @@ -103,7 +103,7 @@ def preprocess(i): return {'return': 0} if env.get('MLC_CUDNN_TAR_FILE_PATH', '') == '': - return {'return': 1, 'error': 'Please envoke mlcr "get cudnn" --tar_file={full path to the cuDNN tar file}'} + return {'return': 1, 'error': 'Please envoke mlcr get,cudnn --tar_file={full path to the cuDNN tar file}'} print('Untaring file - can take some time ...') diff --git a/script/get-tensorrt/customize.py b/script/get-tensorrt/customize.py index 5348edbe9..b84792744 100644 --- a/script/get-tensorrt/customize.py +++ b/script/get-tensorrt/customize.py @@ -92,7 +92,7 @@ def preprocess(i): if env.get('MLC_TENSORRT_REQUIRE_DEV', '') != 'yes': tags.append("_dev") return {'return': 1, 'error': 'Please envoke mlcr "' + - " ".join(tags) + '" --tar_file={full path to the TensorRT tar file}'} + ",".join(tags) + '" --tar_file={full path to the TensorRT tar file}'} print('Untaring file - can take some time ...') From 7d6a663f09bbec6398ebb41a5ce3513b2270fbff Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 12 Feb 2025 17:43:13 +0000 Subject: [PATCH 02/11] [Automated Commit] Format Codebase [skip ci] --- script/app-mlperf-inference-mlcommons-python/customize.py | 5 +++-- script/get-cudnn/customize.py | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index 45fc1071c..69428b804 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -5,6 +5,7 @@ import subprocess from utils import * + def preprocess(i): os_info = i['os_info'] @@ -52,12 +53,12 @@ def preprocess(i): env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] if env.get('MLC_NUM_THREADS', '') != '': - if not is_false(env.get('MLC_MINIMIZE_THREADS','')): + if not is_false(env.get('MLC_MINIMIZE_THREADS', '')): env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) else: env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') - env['CM_NUM_THREADS'] = env['MLC_NUM_THREADS'] #For inference code + env['CM_NUM_THREADS'] = env['MLC_NUM_THREADS'] # For inference code if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and str(env.get( 'MLC_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in ["true", "1", "yes"]: diff --git a/script/get-cudnn/customize.py b/script/get-cudnn/customize.py index 809cd60b2..1e47255e0 100644 --- a/script/get-cudnn/customize.py +++ b/script/get-cudnn/customize.py @@ -103,7 +103,8 @@ def preprocess(i): return {'return': 0} if env.get('MLC_CUDNN_TAR_FILE_PATH', '') == '': - return {'return': 1, 'error': 'Please envoke mlcr get,cudnn --tar_file={full path to the cuDNN tar file}'} + return { + 'return': 1, 'error': 'Please envoke mlcr get,cudnn --tar_file={full path to the cuDNN tar file}'} print('Untaring file - can take some time ...') From c969d2655a292492e9d2c57d02623934fab8b4a1 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 13 Feb 2025 01:20:18 +0530 Subject: [PATCH 03/11] Added a timeout for detect,sudo --- script/detect-sudo/customize.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/script/detect-sudo/customize.py b/script/detect-sudo/customize.py index 844a2b328..547a39bc6 100644 --- a/script/detect-sudo/customize.py +++ b/script/detect-sudo/customize.py @@ -101,6 +101,23 @@ def is_user_in_sudo_group(): print(f"Error checking sudo group: {str(e)}") return False +def timeout_input(prompt, timeout=15, default=""): + """Prompt user for input with a timeout (cross-platform).""" + result = [default] # Store the input result + + def get_input(): + try: + result[0] = getpass.getpass(prompt) + except EOFError: # Handle Ctrl+D or unexpected EOF + result[0] = default + + input_thread = threading.Thread(target=get_input) + input_thread.daemon = True # Daemonize thread + input_thread.start() + input_thread.join(timeout) # Wait for input with timeout + + return result[0] # Return user input or default + def prompt_sudo(): if os.geteuid() != 0 and not is_user_in_sudo_group(): # No sudo required for root user @@ -108,11 +125,13 @@ def prompt_sudo(): # Prompt for the password import getpass + if not os.isatty(sys.stdin.fileno()): print("Skipping password prompt - non-interactive terminal detected!") password = None else: - password = getpass.getpass("Enter password (-1 to skip): ") + #password = getpass.getpass("Enter password (-1 to skip): ") + password = timeout_input("Enter password (-1 to skip): ", timeout=15, default=None) # Check if the input is -1 if password == "-1": From b7cb39bea5da5d4cb3e3a9d5a60771ad46e57778 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 12 Feb 2025 19:50:42 +0000 Subject: [PATCH 04/11] [Automated Commit] Format Codebase [skip ci] --- script/detect-sudo/customize.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/script/detect-sudo/customize.py b/script/detect-sudo/customize.py index 547a39bc6..dbfe5e5bb 100644 --- a/script/detect-sudo/customize.py +++ b/script/detect-sudo/customize.py @@ -101,6 +101,7 @@ def is_user_in_sudo_group(): print(f"Error checking sudo group: {str(e)}") return False + def timeout_input(prompt, timeout=15, default=""): """Prompt user for input with a timeout (cross-platform).""" result = [default] # Store the input result @@ -125,13 +126,15 @@ def prompt_sudo(): # Prompt for the password import getpass - if not os.isatty(sys.stdin.fileno()): print("Skipping password prompt - non-interactive terminal detected!") password = None else: - #password = getpass.getpass("Enter password (-1 to skip): ") - password = timeout_input("Enter password (-1 to skip): ", timeout=15, default=None) + # password = getpass.getpass("Enter password (-1 to skip): ") + password = timeout_input( + "Enter password (-1 to skip): ", + timeout=15, + default=None) # Check if the input is -1 if password == "-1": From d1f7afe96f091399da99ee177416ee6381bb9ca1 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 13 Feb 2025 01:37:32 +0530 Subject: [PATCH 05/11] Fix typo --- script/app-mlperf-inference-mlcommons-python/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index 69428b804..2c3c6329a 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -52,7 +52,7 @@ def preprocess(i): env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] - if env.get('MLC_NUM_THREADS', '') != '': + if env.get('MLC_NUM_THREADS', '') == '': if not is_false(env.get('MLC_MINIMIZE_THREADS', '')): env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) From b6943b6d540b5e1136079e467286c2b97bb11d13 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 13 Feb 2025 01:43:45 +0530 Subject: [PATCH 06/11] Fix typo --- script/app-mlperf-inference-mlcommons-python/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index 2c3c6329a..e42d3b793 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -53,7 +53,7 @@ def preprocess(i): env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] if env.get('MLC_NUM_THREADS', '') == '': - if not is_false(env.get('MLC_MINIMIZE_THREADS', '')): + if not is_false(env.get('MLC_MINIMIZE_THREADS', '')) and env.get('MLC_HOST_CPU_TOTAL_CORES', '') != '': env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) else: From c4614c2434bcb4b20a0ab4a9a56c37e3a4e7cbe9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 12 Feb 2025 20:14:02 +0000 Subject: [PATCH 07/11] [Automated Commit] Format Codebase [skip ci] --- script/app-mlperf-inference-mlcommons-python/customize.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index e42d3b793..e8c67de5a 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -53,7 +53,8 @@ def preprocess(i): env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] if env.get('MLC_NUM_THREADS', '') == '': - if not is_false(env.get('MLC_MINIMIZE_THREADS', '')) and env.get('MLC_HOST_CPU_TOTAL_CORES', '') != '': + if not is_false(env.get('MLC_MINIMIZE_THREADS', '')) and env.get( + 'MLC_HOST_CPU_TOTAL_CORES', '') != '': env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) else: From c1423da3ee5259b633c2c1c78cceddbe446dcbab Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 13 Feb 2025 02:17:19 +0530 Subject: [PATCH 08/11] Fix deepsparse --- script/get-mlperf-inference-src/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-mlperf-inference-src/meta.yaml b/script/get-mlperf-inference-src/meta.yaml index 919178125..5e6b76f09 100644 --- a/script/get-mlperf-inference-src/meta.yaml +++ b/script/get-mlperf-inference-src/meta.yaml @@ -77,7 +77,7 @@ variations: deepsparse: base: - _branch.deepsparse - - _repo.https://github.com/neuralmagic/inference + - _repo.https://github.com/gateoverflow/nm-inference full-history: env: MLC_GIT_DEPTH: '' From 82719920401e5ab24a19b3ed56510c654949ce7d Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 13 Feb 2025 02:37:43 +0530 Subject: [PATCH 09/11] Cleanup for deepsparse --- script/app-mlperf-inference-mlcommons-python/customize.py | 1 + script/get-ml-model-neuralmagic-zoo/run.sh | 5 ++++- script/run-mlperf-inference-app/meta.yaml | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index e8c67de5a..7d62cfffd 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -273,6 +273,7 @@ def get_run_cmd_reference( env['MODEL_FILE'] = env.get( 'MLC_MLPERF_CUSTOM_MODEL_PATH', env.get('MLC_ML_MODEL_FILE_WITH_PATH')) + if not env['MODEL_FILE']: return {'return': 1, 'error': 'No valid model file found!'} diff --git a/script/get-ml-model-neuralmagic-zoo/run.sh b/script/get-ml-model-neuralmagic-zoo/run.sh index d6970d819..db9e7c4de 100644 --- a/script/get-ml-model-neuralmagic-zoo/run.sh +++ b/script/get-ml-model-neuralmagic-zoo/run.sh @@ -1,2 +1,5 @@ #!/bin/bash -${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/download_sparse.py +cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/download_sparse.py" +echo "$cmd" +eval "$cmd" +test $? -eq || exit $? diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index 1d4a46f54..cc8404fbd 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -122,6 +122,7 @@ input_mapping: pointpillars_checkpoint_path: MLC_ML_MODEL_POINT_PILLARS_PATH deeplab_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH waymo_path: MLC_DATASET_WAYMO_PATH + nm_model_zoo_stub: MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB new_state_keys: - app_mlperf_inference_* From 9649a60b22c8c8e4f5055264f92c48d1d530e5a5 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 12 Feb 2025 21:26:34 +0000 Subject: [PATCH 10/11] Update test-nvidia-mlperf-inference-implementations.yml --- .../test-nvidia-mlperf-inference-implementations.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index 9f953b0b6..3ba2db04e 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -2,7 +2,7 @@ name: MLPerf Inference Nvidia implementations on: schedule: - - cron: "35 01 * * *" + - cron: "05 01 * * *" jobs: run_nvidia: @@ -23,7 +23,7 @@ jobs: model: [ "resnet50", "retinanet", "bert-99", "bert-99.9", "gptj-99.9", "3d-unet-99.9", "sdxl" ] exclude: - model: gptj-99.9 - - system: phoenix + - system: phoenix1 - system: GO-i9 steps: @@ -59,5 +59,6 @@ jobs: mlc pull repo mlcommons@mlperf-automations --branch=dev mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string $submission_preprocessor_args --quiet + #mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name + mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/GATEOverflow/mlperf_inference_submissions_v5.0 --repo_branch=main --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name From ec66d2137d9f2c5474e4936d4e08aedb385c01e8 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 13 Feb 2025 03:20:30 +0530 Subject: [PATCH 11/11] Fixes for R50 deepsparse --- script/app-mlperf-inference-mlcommons-python/customize.py | 6 +++--- script/app-mlperf-inference-mlcommons-python/meta.yaml | 8 ++++++++ script/get-ml-model-neuralmagic-zoo/run.sh | 2 +- script/get-mlperf-inference-src/meta.yaml | 2 +- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index 7d62cfffd..8144df6e7 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -53,10 +53,10 @@ def preprocess(i): env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] if env.get('MLC_NUM_THREADS', '') == '': - if not is_false(env.get('MLC_MINIMIZE_THREADS', '')) and env.get( + if is_true(env.get('MLC_MINIMIZE_THREADS', '')) and env.get( 'MLC_HOST_CPU_TOTAL_CORES', '') != '': env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // - (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) + (int(env.get('MLC_HOST_CPU_SOCKETS', '1')))) else: env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') env['CM_NUM_THREADS'] = env['MLC_NUM_THREADS'] # For inference code @@ -279,7 +279,7 @@ def get_run_cmd_reference( env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR'] - extra_options = " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['MLC_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ + extra_options = " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['MLC_MLPERF_VISION_DATASET_OPTION'] + f""" --max-batchsize {env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1')}""" + \ " --dataset-path " + env['MLC_DATASET_PREPROCESSED_PATH'] + " --model " + env['MODEL_FILE'] + \ " --preprocessed_dir " + env['MLC_DATASET_PREPROCESSED_PATH'] diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml index ac4a2e445..2b6795b00 100644 --- a/script/app-mlperf-inference-mlcommons-python/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml @@ -888,6 +888,14 @@ variations: ml-model: tags: raw,_deepsparse + deepsparse,resnet50: + default_env: + DEEPSPARSE_NUM_STREAMS: 24 + ENQUEUE_NUM_THREADS: 2 + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 16 + MLC_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch + + tvm-onnx: group: framework env: diff --git a/script/get-ml-model-neuralmagic-zoo/run.sh b/script/get-ml-model-neuralmagic-zoo/run.sh index db9e7c4de..ec0be49a3 100644 --- a/script/get-ml-model-neuralmagic-zoo/run.sh +++ b/script/get-ml-model-neuralmagic-zoo/run.sh @@ -2,4 +2,4 @@ cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/download_sparse.py" echo "$cmd" eval "$cmd" -test $? -eq || exit $? +test $? -eq 0 || exit $? diff --git a/script/get-mlperf-inference-src/meta.yaml b/script/get-mlperf-inference-src/meta.yaml index 5e6b76f09..bd76d04da 100644 --- a/script/get-mlperf-inference-src/meta.yaml +++ b/script/get-mlperf-inference-src/meta.yaml @@ -139,7 +139,7 @@ versions: env: MLC_MLPERF_LAST_RELEASE: v5.0 MLC_TMP_GIT_CHECKOUT: deepsparse - MLC_TMP_GIT_URL: https://github.com/neuralmagic/inference + MLC_TMP_GIT_URL: https://github.com/gateoverflow/nm-inference main: env: MLC_MLPERF_LAST_RELEASE: v5.0