diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 688a52dc9..806ff2951 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -198,6 +198,8 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, q = '"' if os_info['platform'] == 'windows' else "'" + device = env['MLC_MLPERF_DEVICE'] + ########################################################################## # Grigori added for ABTF demo @@ -235,6 +237,11 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['RUN_DIR'] = run_dir + if device == "gpu": + logger.warning( + "Bevformer reference implementation is not supported on GPU, defaulting to CPU") + device = "cpu" + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] if env['MLC_MLPERF_BACKEND'] != "onnxruntime": @@ -249,7 +256,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, "bevformer", "bevformer_tiny.py") print(env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']) - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --device {"cuda" if device == "gpu" else "cpu"} --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" print(cmd) elif env['MLC_MODEL'] in ['ssd']: run_dir = env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'] @@ -263,7 +270,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, config_path = "baseline_8MP_ss_scales_fm1_5x5_all" - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --cognata-root-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} --device {"cuda" if device == "gpu" else "cpu"} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" elif env['MLC_MODEL'] in ['deeplabv3plus']: run_dir = env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'] @@ -275,7 +282,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, backend = "onnx" if env.get( 'MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --device {"cuda" if device == "gpu" else "cpu"} --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" ########################################################################## diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index cf95e283b..e7c863366 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -60,6 +60,7 @@ input_mapping: target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY output: MLC_MLPERF_OUTPUT_DIR @@ -457,6 +458,9 @@ variations: bevformer: group: models + default_env: + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9" + MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.5.1" @@ -492,6 +496,9 @@ variations: ssd: group: models + default_env: + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9" + MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.3.1" @@ -515,7 +522,7 @@ variations: - tags: get,generic-python-lib,_package.onnx - tags: get,generic-python-lib,_package.onnxruntime - tags: get,generic-python-lib,_package.tqdm - - tags: get,preprocessed,dataset,cognata,_mlc,_2d_object_det,_validation + - tags: get,preprocessed,dataset,cognata,_mlc,_2d_obj_det,_validation skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" @@ -530,6 +537,9 @@ variations: deeplabv3plus: group: models + default_env: + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9" + MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.3.1" @@ -601,6 +611,9 @@ variations: server: env: MLC_MLPERF_LOADGEN_SCENARIO: Server + constantstream: + env: + MLC_MLPERF_LOADGEN_SCENARIO: ConstantStream mvp_demo: env: diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py index b7d8598c5..f1c0f5958 100644 --- a/script/app-mlperf-automotive/customize.py +++ b/script/app-mlperf-automotive/customize.py @@ -68,7 +68,7 @@ def postprocess(i): mlperf_log = MLPerfLog(os.path.join(output_dir, "mlperf_log_detail.txt")) if mode == "performance": - if scenario in ["Offline", "Server"]: + if scenario in ["Offline", "Server", "ConstantStream"]: metric = "target_qps" result = mlperf_log['result_mean_latency_ns'] / 1000000 elif scenario.endswith("Stream"): diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index db81b5313..ebe23d75d 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -59,6 +59,7 @@ input_mapping: target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY output: MLC_MLPERF_OUTPUT_DIR @@ -118,8 +119,8 @@ post_deps: docker: - mlc_repo: anandhu-eng@mlperf-automations - mlc_repo_branch: automotive2 + mlc_repo: mlcommons@mlperf-automations + mlc_repo_branch: dev use_host_group_id: True use_host_user_id: True real_run: false @@ -303,7 +304,7 @@ variations: MLC_MODEL: ssd docker: deps: - - tags: get,preprocessed,dataset,cognata,_mlc,_2d_object_det,_validation + - tags: get,preprocessed,dataset,cognata,_mlc,_2d_obj_det,_validation enable_if_env: MLC_USE_DATASET_FROM_HOST: - "yes" @@ -381,6 +382,9 @@ variations: abtf-inference-implementation: tags: _cpu + gpu: + alias: cuda + cuda: group: device env: @@ -413,12 +417,14 @@ variations: add_deps_recursive: abtf-inference-implementation: tags: _offline + multistream: env: MLC_MLPERF_LOADGEN_SCENARIO: MultiStream add_deps_recursive: abtf-inference-implementation: tags: _multistream + singlestream: group: loadgen-scenario default: true @@ -427,6 +433,14 @@ variations: add_deps_recursive: abtf-inference-implementation: tags: _singlestream + + constantstream: + env: + MLC_MLPERF_LOADGEN_SCENARIO: ConstantStream + add_deps_recursive: + abtf-inference-implementation: + tags: _constantstream + server: env: MLC_MLPERF_LOADGEN_SCENARIO: Server diff --git a/script/app-mlperf-inference-amd/meta.yaml b/script/app-mlperf-inference-amd/meta.yaml index 2c3b6d063..1481362be 100644 --- a/script/app-mlperf-inference-amd/meta.yaml +++ b/script/app-mlperf-inference-amd/meta.yaml @@ -91,12 +91,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml b/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml index 815a2a152..0ffa1e9d2 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml @@ -28,7 +28,7 @@ deps: tags: get,cuda - names: - loadgen - tags: get,loadgen + tags: get,loadgen,_inference - names: - inference-src tags: get,mlcommons,inference,src @@ -121,7 +121,7 @@ post_deps: prehook_deps: - names: - user-conf-generator - tags: generate,user-conf,mlperf,inference + tags: generate,user-conf,mlperf,inference,_inference - enable_if_env: MLC_MLPERF_SKIP_RUN: - 'no' diff --git a/script/app-mlperf-inference-dummy/meta.yaml b/script/app-mlperf-inference-dummy/meta.yaml index f8876eb81..e488d679e 100644 --- a/script/app-mlperf-inference-dummy/meta.yaml +++ b/script/app-mlperf-inference-dummy/meta.yaml @@ -91,12 +91,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-intel/meta.yaml b/script/app-mlperf-inference-intel/meta.yaml index 86a2806eb..3470e6712 100644 --- a/script/app-mlperf-inference-intel/meta.yaml +++ b/script/app-mlperf-inference-intel/meta.yaml @@ -392,7 +392,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -510,7 +510,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -627,7 +627,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -699,7 +699,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -924,7 +924,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python + - tags: get,mlcommons,inference,loadgen,_custom-python,_inference names: - inference-loadgen - tags: get,ml-model,dlrm,_pytorch @@ -1074,7 +1074,7 @@ variations: - inference-src # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator - tags: get,generic-sys-util,_rsync diff --git a/script/app-mlperf-inference-mlcommons-cpp/meta.yaml b/script/app-mlperf-inference-mlcommons-cpp/meta.yaml index dda32e172..06af3ddba 100644 --- a/script/app-mlperf-inference-mlcommons-cpp/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-cpp/meta.yaml @@ -71,7 +71,7 @@ deps: # Install MLPerf inference dependencies # Install MLPerf loadgen - - tags: get,loadgen + - tags: get,loadgen,_inference names: - loadgen @@ -129,7 +129,7 @@ deps: tags: get,ml-model,retinanet,_onnx,_fp32 # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml index a0380fec2..b01c7d989 100644 --- a/script/app-mlperf-inference-mlcommons-python/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml @@ -684,7 +684,7 @@ deps: # Install MLPerf inference dependencies # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator skip_if_env: @@ -692,7 +692,7 @@ deps: - "yes" # Install MLPerf loadgen - - tags: get,loadgen + - tags: get,loadgen,_inference names: - loadgen - mlperf-inference-loadgen diff --git a/script/app-mlperf-inference-qualcomm/meta.yaml b/script/app-mlperf-inference-qualcomm/meta.yaml index 1e508e0e6..2e0193355 100644 --- a/script/app-mlperf-inference-qualcomm/meta.yaml +++ b/script/app-mlperf-inference-qualcomm/meta.yaml @@ -111,12 +111,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-redhat/meta.yaml b/script/app-mlperf-inference-redhat/meta.yaml index 55af68d65..1d32af174 100644 --- a/script/app-mlperf-inference-redhat/meta.yaml +++ b/script/app-mlperf-inference-redhat/meta.yaml @@ -91,12 +91,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/generate-mlperf-inference-submission/meta.yaml b/script/generate-mlperf-inference-submission/meta.yaml index c68bc5a37..480fbd008 100644 --- a/script/generate-mlperf-inference-submission/meta.yaml +++ b/script/generate-mlperf-inference-submission/meta.yaml @@ -13,12 +13,8 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - tags: mlcommons,inference,src - tags: get,sut,system-description - tags: install,pip-package,for-mlc-python,_package.tabulate -- tags: get,mlperf,inference,utils - names: - get-mlperf-results-dir skip_if_env: @@ -97,18 +93,51 @@ post_deps: skip_if_env: MLC_SKIP_TRUNCATE_ACCURACY: - 'yes' + names: + - truncate-mlperf-accuracy-log tags: accuracy,truncate,mlc - enable_if_env: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR: - 'yes' + names: + - preprocess-mlperf-submission tags: preprocess,mlperf,submission - skip_if_env: MLC_RUN_SUBMISSION_CHECKER: - 'no' names: - mlperf-inference-submission-checker + - mlperf-submission-checker - submission-checker tags: submission,inference,checker,mlc +variations: + inference: + default: true + add_deps_recursive: + truncate-mlperf-accuracy-log: + tags: _inference + preprocess-mlperf-submission: + tags: _inference + mlperf-inference-submission-checker: + tags: _inference + deps: + - names: + - inference-src + tags: mlcommons,inference,src + - tags: get,mlperf,inference,utils + automotive: + add_deps_recursive: + truncate-mlperf-accuracy-log: + tags: _automotive + preprocess-mlperf-submission: + tags: _automotive + mlperf-inference-submission-checker: + tags: _automotive + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src + - tags: get,mlperf,automotive,utils tags: - generate - submission diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index b08f0b0fb..2c93af586 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -100,7 +100,7 @@ def preprocess(i): query_count = None value = None - if scenario in ['Offline', 'Server']: + if scenario in ['Offline', 'Server', 'ConstantStream']: metric = "target_qps" tolerance = 1.01 # value = env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS') if scenario == "Server" else env.get('MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') @@ -343,7 +343,7 @@ def preprocess(i): ".sample_concatenate_permutation = 0" + "\n" max_duration_fast_s = int(env.get('MLC_MLPERF_MAX_DURATION_FAST', 120)) max_duration_fast = str(max_duration_fast_s * 1000) # in milliseconds - if scenario == "Server": + if scenario == "Server" or scenario == "ConstantStream": user_conf += ml_model_name + "." + scenario + \ f".max_duration = {max_duration_fast}" + "\n" target_qps = conf['target_qps'] @@ -368,9 +368,13 @@ def preprocess(i): user_conf += ml_model_name + "." + scenario + ".min_duration = " + \ env['MLC_MLPERF_INFERENCE_MIN_DURATION'] + " \n" elif not is_false(env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes')) and not is_true(env.get( - 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')): + 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')) and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') != '99.9': user_conf += ml_model_name + "." + scenario + \ f".max_duration = {max_duration_valid}" + "\n" + if scenario in ["SingleStream", "ConstantStream"] and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') == '99.9' and env.get( + 'MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT', '') != '' and env.get('MLC_MLPERF_TARGET_LATENCY', '') == '': + user_conf += ml_model_name + "." + scenario + \ + f".max_query_count = {env.get('MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT')}" + "\n" if scenario == "MultiStream": user_conf += ml_model_name + "." + scenario + ".min_query_count = " + \ env.get( diff --git a/script/generate-mlperf-inference-user-conf/meta.yaml b/script/generate-mlperf-inference-user-conf/meta.yaml index 4288bd443..18fbec442 100644 --- a/script/generate-mlperf-inference-user-conf/meta.yaml +++ b/script/generate-mlperf-inference-user-conf/meta.yaml @@ -47,6 +47,7 @@ input_mapping: target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT @@ -91,12 +92,16 @@ variations: inference: default: true group: benchmark_wg + env: + MLC_BENCHMARK_GROUP: inference deps: - tags: get,mlcommons,inference,src names: - inference-src automotive: group: benchmark_wg + env: + MLC_BENCHMARK_GROUP: automotive deps: - tags: get,mlcommons,automotive,src names: diff --git a/script/get-mlperf-inference-loadgen/meta.yaml b/script/get-mlperf-inference-loadgen/meta.yaml index ed75db192..e841b3293 100644 --- a/script/get-mlperf-inference-loadgen/meta.yaml +++ b/script/get-mlperf-inference-loadgen/meta.yaml @@ -169,6 +169,10 @@ variations: automotive: env: MLC_INFERENCE_AUTOMOTIVE_REPO: 'YES' + inference: + default: true + env: + MLC_INFERENCE_AUTOMOTIVE_REPO: 'NO' versions: custom: add_deps: diff --git a/script/get-preprocessed-dataset-cognata/meta.yaml b/script/get-preprocessed-dataset-cognata/meta.yaml index c8bac0417..524633f11 100644 --- a/script/get-preprocessed-dataset-cognata/meta.yaml +++ b/script/get-preprocessed-dataset-cognata/meta.yaml @@ -36,7 +36,7 @@ variations: MLC_DATASET_COGNATA_TASK: segmentation validation,2d_obj_det: env: - MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAMEE: val_2d + MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME: val_2d MLC_DATASET_COGNATA_TAR_FILENAME: val_2d.tar.gz MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_2d/<<>> calibration,2d_obj_det: diff --git a/script/preprocess-mlperf-inference-submission/meta.yaml b/script/preprocess-mlperf-inference-submission/meta.yaml index 5381b36dd..0ddc10d51 100644 --- a/script/preprocess-mlperf-inference-submission/meta.yaml +++ b/script/preprocess-mlperf-inference-submission/meta.yaml @@ -9,17 +9,26 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - - submission-checker-src - tags: get,mlcommons,inference,src,_branch.dev - version: custom - names: - get-mlperf-submission-dir skip_if_env: MLC_MLPERF_INFERENCE_SUBMISSION_DIR: - 'on' tags: get,mlperf,submission,dir +variations: + inference: + default: true + deps: + - names: + - inference-src + - submission-checker-src + tags: mlcommons,inference,src,_branch.dev + version: custom + automotive: + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src input_mapping: input: MLC_MLPERF_INFERENCE_SUBMISSION_DIR submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index afb359ce7..8a2168956 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -214,7 +214,7 @@ def preprocess(i): elif dataset == "cognata_ssd": CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --dataset cognata --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" elif dataset == "cognata_deeplab": CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( diff --git a/script/run-mlperf-automotive-app/customize.py b/script/run-mlperf-automotive-app/customize.py index 10983fc24..5e26568db 100644 --- a/script/run-mlperf-automotive-app/customize.py +++ b/script/run-mlperf-automotive-app/customize.py @@ -229,6 +229,9 @@ def preprocess(i): elif scenario == "Server": if env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS'): env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = env['MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS'] + elif scenario == "ConstantStream": + if env.get('MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS'): + env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = env['MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS'] elif scenario == "SingleStream": if env.get('MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'): env['MLC_MLPERF_LOADGEN_TARGET_LATENCY'] = env['MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'] diff --git a/script/run-mlperf-automotive-app/meta.yaml b/script/run-mlperf-automotive-app/meta.yaml index 74b9e34c9..9a2fb7401 100644 --- a/script/run-mlperf-automotive-app/meta.yaml +++ b/script/run-mlperf-automotive-app/meta.yaml @@ -73,6 +73,7 @@ input_mapping: run_style: MLC_MLPERF_RUN_STYLE scenario: MLC_MLPERF_LOADGEN_SCENARIO server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY skip_submission_generation: MLC_MLPERF_SKIP_SUBMISSION_GENERATION skip_truncation: MLC_SKIP_TRUNCATE_ACCURACY @@ -251,4 +252,4 @@ variations: - 'false' - 'False' - '0' - tags: generate,mlperf,inference,submission + tags: generate,mlperf,inference,submission,_automotive diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index 5c13dfa16..9cc3064c6 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -433,7 +433,7 @@ variations: skip_if_env: MLC_MLPERF_SKIP_SUBMISSION_GENERATION: - 'yes' - tags: generate,mlperf,inference,submission + tags: generate,mlperf,inference,submission,_inference versions: master: {} diff --git a/script/run-mlperf-inference-submission-checker/meta.yaml b/script/run-mlperf-inference-submission-checker/meta.yaml index 8d9667ae6..658e8f943 100644 --- a/script/run-mlperf-inference-submission-checker/meta.yaml +++ b/script/run-mlperf-inference-submission-checker/meta.yaml @@ -16,10 +16,6 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - - submission-checker-src - tags: get,mlcommons,inference,src - tags: pull,git,repo env: MLC_GIT_CHECKOUT_PATH: '<<>>' @@ -43,6 +39,8 @@ deps: - enable_if_env: MLC_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION: - 'on' + names: + - preprocess-mlperf-submission tags: preprocess,mlperf,inference,submission docker: mlc_repo: mlcommons@mlperf-automations @@ -137,6 +135,23 @@ variations: short-run: env: MLC_MLPERF_SHORT_RUN: 'yes' + inference: + default: true + add_deps_recursive: + preprocess-mlperf-submission: + tags: _inference + deps: + - names: + - inference-src + tags: mlcommons,inference,src + automotive: + add_deps_recursive: + preprocess-mlperf-submission: + tags: _automotive + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src versions: master: adr: diff --git a/script/truncate-mlperf-inference-accuracy-log/meta.yaml b/script/truncate-mlperf-inference-accuracy-log/meta.yaml index c0f02f6d3..2acbf3b85 100644 --- a/script/truncate-mlperf-inference-accuracy-log/meta.yaml +++ b/script/truncate-mlperf-inference-accuracy-log/meta.yaml @@ -9,15 +9,24 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - tags: get,mlcommons,inference,src - names: - get-mlperf-submission-dir skip_if_env: MLC_MLPERF_INFERENCE_SUBMISSION_DIR: - 'on' tags: get,mlperf,submission,dir +variations: + inference: + default: true + deps: + - names: + - inference-src + tags: mlcommons,inference,src + automotive: + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src input_mapping: input: MLC_MLPERF_INFERENCE_SUBMISSION_DIR submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR