Skip to content

Fix for early stopping and command generation #445

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 19 commits into from
May 30, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 10 additions & 3 deletions script/app-mlperf-automotive-mlcommons-python/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,8 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options,

q = '"' if os_info['platform'] == 'windows' else "'"

device = env['MLC_MLPERF_DEVICE']

##########################################################################
# Grigori added for ABTF demo

Expand Down Expand Up @@ -235,6 +237,11 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options,

env['RUN_DIR'] = run_dir

if device == "gpu":
logger.warning(
"Bevformer reference implementation is not supported on GPU, defaulting to CPU")
device = "cpu"

env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR']

if env['MLC_MLPERF_BACKEND'] != "onnxruntime":
Expand All @@ -249,7 +256,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options,
"bevformer",
"bevformer_tiny.py")
print(env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'])
cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}"""
cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --device {"cuda" if device == "gpu" else "cpu"} --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}"""
print(cmd)
elif env['MLC_MODEL'] in ['ssd']:
run_dir = env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH']
Expand All @@ -263,7 +270,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options,

config_path = "baseline_8MP_ss_scales_fm1_5x5_all"

cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}"""
cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --cognata-root-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} --device {"cuda" if device == "gpu" else "cpu"} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}"""

elif env['MLC_MODEL'] in ['deeplabv3plus']:
run_dir = env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH']
Expand All @@ -275,7 +282,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options,
backend = "onnx" if env.get(
'MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND')

cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}"""
cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --device {"cuda" if device == "gpu" else "cpu"} --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}"""

##########################################################################

Expand Down
15 changes: 14 additions & 1 deletion script/app-mlperf-automotive-mlcommons-python/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ input_mapping:
target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS
singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
output: MLC_MLPERF_OUTPUT_DIR
Expand Down Expand Up @@ -457,6 +458,9 @@ variations:

bevformer:
group: models
default_env:
MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9"
MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636
add_deps_recursive:
pytorch:
version_max: "2.5.1"
Expand Down Expand Up @@ -492,6 +496,9 @@ variations:

ssd:
group: models
default_env:
MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9"
MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636
add_deps_recursive:
pytorch:
version_max: "2.3.1"
Expand All @@ -515,7 +522,7 @@ variations:
- tags: get,generic-python-lib,_package.onnx
- tags: get,generic-python-lib,_package.onnxruntime
- tags: get,generic-python-lib,_package.tqdm
- tags: get,preprocessed,dataset,cognata,_mlc,_2d_object_det,_validation
- tags: get,preprocessed,dataset,cognata,_mlc,_2d_obj_det,_validation
skip_if_env:
MLC_RUN_STATE_DOCKER:
- "yes"
Expand All @@ -530,6 +537,9 @@ variations:

deeplabv3plus:
group: models
default_env:
MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9"
MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636
add_deps_recursive:
pytorch:
version_max: "2.3.1"
Expand Down Expand Up @@ -601,6 +611,9 @@ variations:
server:
env:
MLC_MLPERF_LOADGEN_SCENARIO: Server
constantstream:
env:
MLC_MLPERF_LOADGEN_SCENARIO: ConstantStream

mvp_demo:
env:
Expand Down
2 changes: 1 addition & 1 deletion script/app-mlperf-automotive/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def postprocess(i):

mlperf_log = MLPerfLog(os.path.join(output_dir, "mlperf_log_detail.txt"))
if mode == "performance":
if scenario in ["Offline", "Server"]:
if scenario in ["Offline", "Server", "ConstantStream"]:
metric = "target_qps"
result = mlperf_log['result_mean_latency_ns'] / 1000000
elif scenario.endswith("Stream"):
Expand Down
20 changes: 17 additions & 3 deletions script/app-mlperf-automotive/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ input_mapping:
target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY
offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS
constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS
singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
output: MLC_MLPERF_OUTPUT_DIR
Expand Down Expand Up @@ -118,8 +119,8 @@ post_deps:


docker:
mlc_repo: anandhu-eng@mlperf-automations
mlc_repo_branch: automotive2
mlc_repo: mlcommons@mlperf-automations
mlc_repo_branch: dev
use_host_group_id: True
use_host_user_id: True
real_run: false
Expand Down Expand Up @@ -303,7 +304,7 @@ variations:
MLC_MODEL: ssd
docker:
deps:
- tags: get,preprocessed,dataset,cognata,_mlc,_2d_object_det,_validation
- tags: get,preprocessed,dataset,cognata,_mlc,_2d_obj_det,_validation
enable_if_env:
MLC_USE_DATASET_FROM_HOST:
- "yes"
Expand Down Expand Up @@ -381,6 +382,9 @@ variations:
abtf-inference-implementation:
tags: _cpu

gpu:
alias: cuda

cuda:
group: device
env:
Expand Down Expand Up @@ -413,12 +417,14 @@ variations:
add_deps_recursive:
abtf-inference-implementation:
tags: _offline

multistream:
env:
MLC_MLPERF_LOADGEN_SCENARIO: MultiStream
add_deps_recursive:
abtf-inference-implementation:
tags: _multistream

singlestream:
group: loadgen-scenario
default: true
Expand All @@ -427,6 +433,14 @@ variations:
add_deps_recursive:
abtf-inference-implementation:
tags: _singlestream

constantstream:
env:
MLC_MLPERF_LOADGEN_SCENARIO: ConstantStream
add_deps_recursive:
abtf-inference-implementation:
tags: _constantstream

server:
env:
MLC_MLPERF_LOADGEN_SCENARIO: Server
Expand Down
4 changes: 2 additions & 2 deletions script/app-mlperf-inference-amd/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -91,12 +91,12 @@ deps:
- inference-src

# Download MLPerf inference loadgen
- tags: get,mlcommons,inference,loadgen
- tags: get,mlcommons,inference,loadgen,_inference
names:
- inference-loadgen

# Creates user conf for given SUT
- tags: generate,user-conf,mlperf,inference
- tags: generate,user-conf,mlperf,inference,_inference
names:
- user-conf-generator

Expand Down
4 changes: 2 additions & 2 deletions script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ deps:
tags: get,cuda
- names:
- loadgen
tags: get,loadgen
tags: get,loadgen,_inference
- names:
- inference-src
tags: get,mlcommons,inference,src
Expand Down Expand Up @@ -121,7 +121,7 @@ post_deps:
prehook_deps:
- names:
- user-conf-generator
tags: generate,user-conf,mlperf,inference
tags: generate,user-conf,mlperf,inference,_inference
- enable_if_env:
MLC_MLPERF_SKIP_RUN:
- 'no'
Expand Down
4 changes: 2 additions & 2 deletions script/app-mlperf-inference-dummy/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -91,12 +91,12 @@ deps:
- inference-src

# Download MLPerf inference loadgen
- tags: get,mlcommons,inference,loadgen
- tags: get,mlcommons,inference,loadgen,_inference
names:
- inference-loadgen

# Creates user conf for given SUT
- tags: generate,user-conf,mlperf,inference
- tags: generate,user-conf,mlperf,inference,_inference
names:
- user-conf-generator

Expand Down
12 changes: 6 additions & 6 deletions script/app-mlperf-inference-intel/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ variations:
- tags: get,mlcommons,inference,src
names:
- inference-src
- tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build
- tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference
names:
- inference-loadgen

Expand Down Expand Up @@ -510,7 +510,7 @@ variations:
- tags: get,mlcommons,inference,src
names:
- inference-src
- tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build
- tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference
names:
- inference-loadgen

Expand Down Expand Up @@ -627,7 +627,7 @@ variations:
- tags: get,mlcommons,inference,src
names:
- inference-src
- tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build
- tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference
names:
- inference-loadgen

Expand Down Expand Up @@ -699,7 +699,7 @@ variations:
- tags: get,mlcommons,inference,src
names:
- inference-src
- tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build
- tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference
names:
- inference-loadgen

Expand Down Expand Up @@ -924,7 +924,7 @@ variations:
- tags: get,mlcommons,inference,src
names:
- inference-src
- tags: get,mlcommons,inference,loadgen,_custom-python
- tags: get,mlcommons,inference,loadgen,_custom-python,_inference
names:
- inference-loadgen
- tags: get,ml-model,dlrm,_pytorch
Expand Down Expand Up @@ -1074,7 +1074,7 @@ variations:
- inference-src

# Creates user conf for given SUT
- tags: generate,user-conf,mlperf,inference
- tags: generate,user-conf,mlperf,inference,_inference
names:
- user-conf-generator
- tags: get,generic-sys-util,_rsync
Expand Down
4 changes: 2 additions & 2 deletions script/app-mlperf-inference-mlcommons-cpp/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ deps:
# Install MLPerf inference dependencies

# Install MLPerf loadgen
- tags: get,loadgen
- tags: get,loadgen,_inference
names:
- loadgen

Expand Down Expand Up @@ -129,7 +129,7 @@ deps:
tags: get,ml-model,retinanet,_onnx,_fp32

# Creates user conf for given SUT
- tags: generate,user-conf,mlperf,inference
- tags: generate,user-conf,mlperf,inference,_inference
names:
- user-conf-generator

Expand Down
4 changes: 2 additions & 2 deletions script/app-mlperf-inference-mlcommons-python/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -684,15 +684,15 @@ deps:
# Install MLPerf inference dependencies

# Creates user conf for given SUT
- tags: generate,user-conf,mlperf,inference
- tags: generate,user-conf,mlperf,inference,_inference
names:
- user-conf-generator
skip_if_env:
MLC_RUN_STATE_DOCKER:
- "yes"

# Install MLPerf loadgen
- tags: get,loadgen
- tags: get,loadgen,_inference
names:
- loadgen
- mlperf-inference-loadgen
Expand Down
4 changes: 2 additions & 2 deletions script/app-mlperf-inference-qualcomm/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -111,12 +111,12 @@ deps:
- inference-src

# Download MLPerf inference loadgen
- tags: get,mlcommons,inference,loadgen
- tags: get,mlcommons,inference,loadgen,_inference
names:
- inference-loadgen

# Creates user conf for given SUT
- tags: generate,user-conf,mlperf,inference
- tags: generate,user-conf,mlperf,inference,_inference
names:
- user-conf-generator

Expand Down
4 changes: 2 additions & 2 deletions script/app-mlperf-inference-redhat/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -91,12 +91,12 @@ deps:
- inference-src

# Download MLPerf inference loadgen
- tags: get,mlcommons,inference,loadgen
- tags: get,mlcommons,inference,loadgen,_inference
names:
- inference-loadgen

# Creates user conf for given SUT
- tags: generate,user-conf,mlperf,inference
- tags: generate,user-conf,mlperf,inference,_inference
names:
- user-conf-generator

Expand Down
Loading
Loading