From d545f2da810a322b1bff95572a3e7c6666342905 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 11 May 2025 00:54:27 +0100 Subject: [PATCH 01/43] Improvements for install-gcc-src (#405) * Improvements for gcc install from src --- .../customize.py | 7 +--- script/get-generic-sys-util/meta.yaml | 14 +++++++ script/get-git-repo/meta.yaml | 2 +- script/install-gcc-src/customize.py | 10 ----- script/install-gcc-src/meta.yaml | 39 ++++++++++++++++++- script/install-gcc-src/run.sh | 15 ++++--- 6 files changed, 62 insertions(+), 25 deletions(-) diff --git a/script/get-dataset-cognata-mlcommons/customize.py b/script/get-dataset-cognata-mlcommons/customize.py index 1a070e0ab..be725599d 100644 --- a/script/get-dataset-cognata-mlcommons/customize.py +++ b/script/get-dataset-cognata-mlcommons/customize.py @@ -28,11 +28,6 @@ def preprocess(i): env['MLC_DATASET_COGNATA_POC_TEXT_MD5_FILE_PATH'] = os.path.join( i['run_script_input']['path'], 'checksums', 'cognata_poc.txt') - # Check if user requests path not in CM cache - # - # --path (env MLC_TMP_PATH) shows where to store Cognata data set instead of CM cahe - # --import tells CM to import existing Cognata from a given path and skip further download/processing - # import_path = env.get( 'MLC_DATASET_MLCOMMONS_COGNATA_IMPORT_PATH', '').strip() @@ -75,7 +70,7 @@ def postprocess(i): if not os.path.isdir(mlc_cache_dataset_path): return { - 'return': 1, 'error': 'Dataset corrupted - CM cache path not found: {}'.format(mlc_cache_dataset_path)} + 'return': 1, 'error': 'Dataset corrupted - MLC cache path not found: {}'.format(mlc_cache_dataset_path)} if env.get('MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES', '') == '': env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] = os.path.dirname( diff --git a/script/get-generic-sys-util/meta.yaml b/script/get-generic-sys-util/meta.yaml index 8f75171de..5f366d775 100644 --- a/script/get-generic-sys-util/meta.yaml +++ b/script/get-generic-sys-util/meta.yaml @@ -97,6 +97,20 @@ variations: brew: '' dnf: dmidecode yum: dmidecode + flex: + env: + MLC_SYS_UTIL_NAME: flex + MLC_SYS_UTIL_VERSION_CMD: flex --version # tbd: regular expression for version + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - MLC_FLEX_VERSION + state: + flex: # tbd: complete for other flavours of linux + apt: flex + brew: flex + dnf: flex + yum: flex + ffmpeg: env: MLC_SYS_UTIL_NAME: ffmpeg diff --git a/script/get-git-repo/meta.yaml b/script/get-git-repo/meta.yaml index 962b9281f..efdf3bf63 100644 --- a/script/get-git-repo/meta.yaml +++ b/script/get-git-repo/meta.yaml @@ -36,7 +36,7 @@ post_deps: - pull-git-repo tags: pull,git,repo print_env_at_the_end: - MLC_GIT_CHECKOUT_PATH: CM cache path to the Git repo + MLC_GIT_CHECKOUT_PATH: MLC cache path to the Git repo tags: - get - git diff --git a/script/install-gcc-src/customize.py b/script/install-gcc-src/customize.py index dba1f0d56..7edf5103b 100644 --- a/script/install-gcc-src/customize.py +++ b/script/install-gcc-src/customize.py @@ -17,16 +17,6 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('MLC_VERSION', '') - if need_version == '': - return {'return': 1, - 'error': 'internal problem - MLC_VERSION is not defined in env'} - - logger.info(f"{recursion_spaces} # Requested version: {need_version}") - - if 'MLC_GIT_CHECKOUT' not in env: - env['MLC_GIT_CHECKOUT'] = 'releases/gcc-' + need_version - env['MLC_GCC_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'install', 'bin') return {'return': 0} diff --git a/script/install-gcc-src/meta.yaml b/script/install-gcc-src/meta.yaml index c463860ed..741ab39b6 100644 --- a/script/install-gcc-src/meta.yaml +++ b/script/install-gcc-src/meta.yaml @@ -3,9 +3,27 @@ automation_alias: script automation_uid: 5b4e0237da074764 cache: true category: Compiler automation -default_version: '12' deps: - tags: detect,os +- tags: detect,cpu +- tags: get,generic-sys-util,_flex +- env: + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_GCC_SRC_REPO_PATH + extra_cache_tags: gcc,src,gcc-src,gcc-src-repo + force_env_keys: + - MLC_GIT_* + names: + - gcc-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - MLC_GIT_CHECKOUT + _repo.: + - MLC_GIT_URL + _sha.: + - MLC_GIT_CHECKOUT_SHA + _tag.: + - MLC_GIT_CHECKOUT_TAG env: MLC_GIT_URL: git://gcc.gnu.org/git/gcc.git post_deps: @@ -19,7 +37,24 @@ tags: - gcc - src-gcc uid: faae0ebd6e1242db -versions: +tests: + run_inputs: + - {} +variations: master: + group: version + default: true env: MLC_GIT_CHECKOUT: master + version.#: + group: version + env: + MLC_GIT_CHECKOUT: releases/gcc-# + branch.#: + group: version + env: + MLC_GIT_CHECKOUT: '#' + lang.#: + env: + +MLC_GCC_ENABLED_LANGUAGES: + - '#' diff --git a/script/install-gcc-src/run.sh b/script/install-gcc-src/run.sh index c3631255d..c8b1ca291 100644 --- a/script/install-gcc-src/run.sh +++ b/script/install-gcc-src/run.sh @@ -4,12 +4,14 @@ CUR_DIR=$PWD echo "******************************************************" +echo ${MLC_GCC_SRC_REPO_PATH} + if [ ! -d "src" ]; then - echo "Cloning GCC from ${MLC_GIT_URL} with branch ${MLC_GIT_CHECKOUT}..." - git clone -b "${MLC_GIT_CHECKOUT}" ${MLC_GIT_URL} src - if [ "${?}" != "0" ]; then exit 1; fi + cp -r ${MLC_GCC_SRC_REPO_PATH} src + test $? -eq 0 || exit $? fi + mkdir -p install mkdir -p build @@ -22,16 +24,17 @@ cd ../build ../src/configure --prefix="${INSTALL_DIR}" --with-gcc-major-version-only --disable-multilib -if [ "${?}" != "0" ]; then exit 1; fi +test $? -eq 0 || exit $? echo "******************************************************" MLC_MAKE_CORES=${MLC_MAKE_CORES:-${MLC_HOST_CPU_TOTAL_CORES}} MLC_MAKE_CORES=${MLC_MAKE_CORES:-2} make -j${MLC_MAKE_CORES} -if [ "${?}" != "0" ]; then exit 1; fi +test $? -eq 0 || exit $? + make install -if [ "${?}" != "0" ]; then exit 1; fi +test $? -eq 0 || exit $? # Clean build directory (too large) cd ${CUR_DIR} From ccf78f4711b92dbaa4f240775471edd367080074 Mon Sep 17 00:00:00 2001 From: Hussain Lohawala <73359258+H9660@users.noreply.github.com> Date: Wed, 14 May 2025 02:40:38 +0530 Subject: [PATCH 02/43] Contest 2025 gemini call (#403) * Added gemini script for api calls * Changed encoding to support more characters --- script/gemini_call/customize.py | 63 ++++++++++++++++++++ script/gemini_call/gemini_call.py | 95 +++++++++++++++++++++++++++++++ script/gemini_call/meta.yaml | 36 ++++++++++++ script/gemini_call/run.bat | 1 + script/gemini_call/run.sh | 17 ++++++ 5 files changed, 212 insertions(+) create mode 100644 script/gemini_call/customize.py create mode 100644 script/gemini_call/gemini_call.py create mode 100644 script/gemini_call/meta.yaml create mode 100644 script/gemini_call/run.bat create mode 100644 script/gemini_call/run.sh diff --git a/script/gemini_call/customize.py b/script/gemini_call/customize.py new file mode 100644 index 000000000..37e1bb064 --- /dev/null +++ b/script/gemini_call/customize.py @@ -0,0 +1,63 @@ +from mlc import utils +import os +import json +import yaml + + +def write_gemini_yaml(model, system_prompt, user_prompt, + filename='gemini-prompt.yaml'): + data = { + 'model': model, + 'contents': [ + { + 'role': 'user', + 'parts': [ + {'text': f"{system_prompt}\n\n{user_prompt}"} + ] + } + ], + 'generationConfig': { + 'temperature': 0.7, + 'maxOutputTokens': 200 + } + } + + with open(filename, 'w', encoding='utf-8') as f: + yaml.dump(data, f, sort_keys=False, allow_unicode=True) + + +def preprocess(i): + env = i['env'] + state = i['state'] + + if 'MLC_GEMINI_CONFIG_PATH' not in env or not os.path.exists( + env['MLC_GEMINI_CONFIG_PATH']): + if 'user_prompt' in state: + model = env.get('MLC_GEMINI_MODEL', 'gemini-2.0-flash') + user_prompt = state['user_prompt'] + system_prompt = state.get( + 'system_prompt', + 'You are an AI agent expected to answer questions correctly') + write_gemini_yaml( + model, + system_prompt, + user_prompt, + 'tmp-gemini-prompt.yaml') + env['MLC_GEMINI_CONFIG_PATH'] = 'tmp-gemini-prompt.yaml' + + env['MLC_RUN_CMD'] = f'{env["MLC_PYTHON_BIN_WITH_PATH"]} "{os.path.join(env["MLC_TMP_CURRENT_SCRIPT_PATH"], "gemini_call.py")}"' + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + state = i['state'] + + filename = 'tmp-gemini-results.json' + with open(filename, 'r', encoding='utf-8') as f: + data = json.load(f) + + state['MLC_GEMINI_RESPONSE'] = data['content'] + os_info = i['os_info'] + return {'return': 0} diff --git a/script/gemini_call/gemini_call.py b/script/gemini_call/gemini_call.py new file mode 100644 index 000000000..1f4791b97 --- /dev/null +++ b/script/gemini_call/gemini_call.py @@ -0,0 +1,95 @@ +import requests +import os +import json +import yaml + +import yaml + + +def extract_prompts(yaml_path): + with open(yaml_path, 'r', encoding='utf-8') as f: + data = yaml.safe_load(f) + + full_text = data['contents'][0]['parts'][0]['text'] + + # Split at "Question Text:" + if "Question Text:" not in full_text: + raise ValueError("Expected 'Question Text:' marker not found.") + + system_prompt, question_part = full_text.split("Question Text:", 1) + + # Trim whitespace + system_prompt = system_prompt.strip() + user_prompt = question_part.strip() + + return system_prompt, user_prompt + + +def gemini_call(message=None): + try: + api_key = os.environ['MLC_GEMINI_API_KEY'] + url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={api_key}" + config_path = os.environ.get('MLC_GEMINI_CONFIG_PATH') + # Load config if it exists + if config_path and os.path.exists(config_path): + try: + with open(config_path, 'r', encoding="utf-8") as file: + data = yaml.safe_load(file) + except Exception as e: + return {"error": f"Error reading config file: {str(e)}"} + + if os.environ.get('MLC_GEMINI_CONFIG_MODIFY', '') == 'yes': + try: + data['messages'][1]['content'] = data['messages'][1]['content'].replace( + "{{ MESSAGE }}", message or "") + except Exception as e: + return {"error": f"Config format issue: {str(e)}"} + # Load prompts + system_prompt, user_prompt = extract_prompts(config_path) + # Combine both in first message + full_prompt = f"{system_prompt}\n\n{user_prompt}" + + data = { + "contents": [ + { + "role": "user", + "parts": [ + {"text": full_prompt} + ] + } + ] + } + + headers = { + 'Content-Type': 'application/json' + } + + response = requests.post(url, json=data, headers=headers) + response.raise_for_status() + result = response.json() + + content = result['candidates'][0]['content']['parts'][0]['text'] + + with open('tmp-gemini-results.json', 'w', encoding='utf-8') as f: + json.dump({'content': content}, f, ensure_ascii=False, indent=2) + + return {"content": content} + + except requests.exceptions.RequestException as e: + return {"error": f"Request error: {str(e)}"} + + except KeyError as e: + return {"error": f"Missing key in response: {str(e)}"} + + except Exception as e: + return {"error": f"Unexpected error: {str(e)}"} + + +def main(): + result = gemini_call() + if 'error' in result: + raise Exception(result['error']) + + +if __name__ == '__main__': + main() diff --git a/script/gemini_call/meta.yaml b/script/gemini_call/meta.yaml new file mode 100644 index 000000000..e663a65c0 --- /dev/null +++ b/script/gemini_call/meta.yaml @@ -0,0 +1,36 @@ +alias: gemini_call +automation_alias: script +automation_uid: 5b4e0237da074764 +category: MLC Script Template +deps: + - tags: get,python3 + names: + - python + - python3 +new_env_keys: [] +new_state_keys: +- MLC_GEMINI_RESPONSE +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- gemini-call +- query +input_mapping: + api_key: MLC_GEMINI_API_KEY + config_path: MLC_GEMINI_CONFIG_PATH + system_prompt: MLC_GEMINI_SYSTEM_PROMPT + user_prompt: MLC_GEMINI_USER_PROMPT + model: MLC_GEMINI_MODEL +tests: + run_inputs: [] +uid: 1cfe3d0658364a2b +variations: + gemini: + group: api_provider + default: true + env: + MLC_GEMINI_API_URL: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=' + + + diff --git a/script/gemini_call/run.bat b/script/gemini_call/run.bat new file mode 100644 index 000000000..4509961ce --- /dev/null +++ b/script/gemini_call/run.bat @@ -0,0 +1 @@ +%MLC_RUN_CMD% \ No newline at end of file diff --git a/script/gemini_call/run.sh b/script/gemini_call/run.sh new file mode 100644 index 000000000..fa21de70c --- /dev/null +++ b/script/gemini_call/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$MLC_RUN_CMD" From 9ea38ea54b470d472963a8c308d9760a4c947472 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 13 May 2025 21:10:51 +0000 Subject: [PATCH 03/43] [Automated Commit] Format Codebase [skip ci] --- script/gemini_call/customize.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/script/gemini_call/customize.py b/script/gemini_call/customize.py index 37e1bb064..592ff9fa0 100644 --- a/script/gemini_call/customize.py +++ b/script/gemini_call/customize.py @@ -13,7 +13,7 @@ def write_gemini_yaml(model, system_prompt, user_prompt, 'role': 'user', 'parts': [ {'text': f"{system_prompt}\n\n{user_prompt}"} - ] + ] } ], 'generationConfig': { @@ -44,7 +44,7 @@ def preprocess(i): user_prompt, 'tmp-gemini-prompt.yaml') env['MLC_GEMINI_CONFIG_PATH'] = 'tmp-gemini-prompt.yaml' - + env['MLC_RUN_CMD'] = f'{env["MLC_PYTHON_BIN_WITH_PATH"]} "{os.path.join(env["MLC_TMP_CURRENT_SCRIPT_PATH"], "gemini_call.py")}"' return {'return': 0} @@ -57,7 +57,7 @@ def postprocess(i): filename = 'tmp-gemini-results.json' with open(filename, 'r', encoding='utf-8') as f: data = json.load(f) - + state['MLC_GEMINI_RESPONSE'] = data['content'] os_info = i['os_info'] return {'return': 0} From 7ebc222a512ea610e72d5735b21b90d4ab123742 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 14 May 2025 16:58:15 +0100 Subject: [PATCH 04/43] Fixes for oneapi (#408) * Fixes for oneapi installation --- script/download-and-extract/meta.yaml | 12 ++++- script/extract-file/meta.yaml | 4 +- .../{get-one-api => get-oneapi}/COPYRIGHT.md | 0 .../{get-one-api => get-oneapi}/customize.py | 44 ++++++++++++------- script/get-oneapi/install.sh | 9 ++++ script/{get-one-api => get-oneapi}/meta.yaml | 12 +++++ script/{get-one-api => get-oneapi}/run.bat | 0 script/{get-one-api => get-oneapi}/run.sh | 0 8 files changed, 62 insertions(+), 19 deletions(-) rename script/{get-one-api => get-oneapi}/COPYRIGHT.md (100%) rename script/{get-one-api => get-oneapi}/customize.py (68%) create mode 100644 script/get-oneapi/install.sh rename script/{get-one-api => get-oneapi}/meta.yaml (57%) rename script/{get-one-api => get-oneapi}/run.bat (100%) rename script/{get-one-api => get-oneapi}/run.sh (100%) diff --git a/script/download-and-extract/meta.yaml b/script/download-and-extract/meta.yaml index d2137d71c..96ff8c6a8 100644 --- a/script/download-and-extract/meta.yaml +++ b/script/download-and-extract/meta.yaml @@ -85,11 +85,19 @@ variations: default: 'true' env: MLC_EXTRACT_REMOVE_EXTRACTED: 'no' + adr: + extract-script: + tags: + _keep group: keep - no-remove-extracted: + remove-extracted: env: - MLC_EXTRACT_REMOVE_EXTRACTED: 'no' + MLC_EXTRACT_REMOVE_EXTRACTED: 'yes' group: keep + adr: + extract-script: + tags: + _remove-extracted rclone: add_deps: download-script: diff --git a/script/extract-file/meta.yaml b/script/extract-file/meta.yaml index 79f1538c3..b4cbc9131 100644 --- a/script/extract-file/meta.yaml +++ b/script/extract-file/meta.yaml @@ -41,10 +41,10 @@ variations: default: true env: MLC_EXTRACT_REMOVE_EXTRACTED: 'no' - no-remove-extracted: + remove-extracted: group: keep env: - MLC_EXTRACT_REMOVE_EXTRACTED: 'no' + MLC_EXTRACT_REMOVE_EXTRACTED: 'yes' path.#: env: MLC_EXTRACT_FILEPATH: '#' diff --git a/script/get-one-api/COPYRIGHT.md b/script/get-oneapi/COPYRIGHT.md similarity index 100% rename from script/get-one-api/COPYRIGHT.md rename to script/get-oneapi/COPYRIGHT.md diff --git a/script/get-one-api/customize.py b/script/get-oneapi/customize.py similarity index 68% rename from script/get-one-api/customize.py rename to script/get-oneapi/customize.py index 0f6e78b10..9ad62e0b8 100644 --- a/script/get-one-api/customize.py +++ b/script/get-oneapi/customize.py @@ -28,26 +28,30 @@ def preprocess(i): 'run_script_input': i['run_script_input'], 'recursion_spaces': recursion_spaces}) if r['return'] > 0: - # Uncomment when MLC script for installing oneapi compiler is integrated - # Initial finding suggests that oneapi could be installed without - # hastle in linux using apt, but is tricky in windows - - # if r['return'] == 16: - # if env.get('MLC_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': - # return r - # - # print (recursion_spaces+' # {}'.format(r['error'])) - # - # # Attempt to run installer - # r = {'return':0, 'skip':True, 'script':{'tags':'install,gcc,src'}} - - return r + r = i['automation'].run_native_script( + {'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'install'}) + if r['return'] > 0: + return r + version_prefix = env['MLC_ONEAPI_INSTALL_VERSION_PREFIX'] + env['MLC_TMP_PATH'] = os.path.join( + os.getcwd(), "install", version_prefix, "bin") + + r = i['automation'].find_artifact({'file_name': file_name_c, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'MLC_ICX_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + return r return {'return': 0} def detect_version(i): - r = i['automation'].parse_version({'match_text': r'oneAPI\s+.*\(([\d.]+)\)', + r = i['automation'].parse_version({'match_text': r'oneAPI\s+.* Compiler\s+([\d+.]+)', 'group_number': 1, 'env_key': 'MLC_ONEAPI_VERSION', 'which_env': i['env']}) @@ -90,6 +94,16 @@ def postprocess(i): env['MLC_ONEAPI_COMPILER_WITH_PATH'] = found_file_path env['MLC_ONEAPI_COMPILER_FLAG_VERSION'] = 'version' + env['MLC_DEPENDENT_CACHED_PATH'] = found_file_path + + list_keys = ['+LD_LIBRARY_PATH'] + for key in list_keys: + if not env.get(key): + env[key] = [] + + env['+LD_LIBRARY_PATH'].append(os.path.join( + env['MLC_ONEAPI_INSTALLED_PATH'], "lib")) + # env['MLC_COMPILER_FLAGS_FAST'] = "-O3" # env['MLC_LINKER_FLAGS_FAST'] = "-O3" # env['MLC_COMPILER_FLAGS_DEBUG'] = "-O0" diff --git a/script/get-oneapi/install.sh b/script/get-oneapi/install.sh new file mode 100644 index 000000000..ef2039e43 --- /dev/null +++ b/script/get-oneapi/install.sh @@ -0,0 +1,9 @@ +#!/bin/bash +set -e +wget -nc ${MLC_ONEAPI_INSTALL_URL_BASE}/${MLC_ONEAPI_INSTALL_FILENAME} +rm -rf install +mkdir install +cmd="bash ./${MLC_ONEAPI_INSTALL_FILENAME} -a --silent --cli --eula accept --install-dir ${PWD}/install" + +echo $cmd +eval $cmd diff --git a/script/get-one-api/meta.yaml b/script/get-oneapi/meta.yaml similarity index 57% rename from script/get-one-api/meta.yaml rename to script/get-oneapi/meta.yaml index 47cf218b0..653418412 100644 --- a/script/get-one-api/meta.yaml +++ b/script/get-oneapi/meta.yaml @@ -4,6 +4,7 @@ automation_uid: 5b4e0237da074764 cache: true category: Compiler automation clean_files: [] +default_version: 2025.1.1 deps: - tags: detect,os name: Detect or install OneAPI compiler @@ -12,6 +13,8 @@ new_env_keys: - MLC_ONEAPI_COMPILER_* - MLC_COMPILER_* - MLC_LINKER_* +- +PATH +- +LD_LIBRARY_PATH input_mapping: oneapi_dir: MLC_ONEAPI_DIR_PATH @@ -25,6 +28,15 @@ tags: - compiler - get-oneapi uid: 1af872e81ef54742 + +versions: + 2025.1.1: + env: + MLC_ONEAPI_INSTALL_URL_BASE: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/6bfca885-4156-491e-849b-1cd7da9cc760 + MLC_ONEAPI_INSTALL_FILENAME: intel-oneapi-base-toolkit-2025.1.1.36_offline.sh + MLC_ONEAPI_INSTALL_VERSION_PREFIX: '2025.1' + MLC_VERSION: '2025.1.1' + variations: path.#: env: diff --git a/script/get-one-api/run.bat b/script/get-oneapi/run.bat similarity index 100% rename from script/get-one-api/run.bat rename to script/get-oneapi/run.bat diff --git a/script/get-one-api/run.sh b/script/get-oneapi/run.sh similarity index 100% rename from script/get-one-api/run.sh rename to script/get-oneapi/run.sh From ade0cb0f7e6bd3f2ad365b5003e9dd50dad31fbe Mon Sep 17 00:00:00 2001 From: Arjun Date: Wed, 14 May 2025 23:27:46 +0000 Subject: [PATCH 05/43] Support ifort install in oneapi --- script/get-oneapi/install.sh | 7 +++++++ script/get-oneapi/meta.yaml | 3 +++ 2 files changed, 10 insertions(+) diff --git a/script/get-oneapi/install.sh b/script/get-oneapi/install.sh index ef2039e43..59d523b6a 100644 --- a/script/get-oneapi/install.sh +++ b/script/get-oneapi/install.sh @@ -7,3 +7,10 @@ cmd="bash ./${MLC_ONEAPI_INSTALL_FILENAME} -a --silent --cli --eula accept --in echo $cmd eval $cmd + +if [[ ${MLC_ONEAPI_FORTRAN} == 'yes' ]] then + wget -nc https://registrationcenter-download.intel.com/akdlm/IRC_NAS/2238465b-cfc7-4bf8-ad04-e55cb6577cba/intel-fortran-essentials-2025.1.1.8_offline.sh + cmd="bash ./intel-fortran-essentials-2025.1.1.8_offline.sh -a --silent --cli --eula accept --install-dir ${PWD}/install" + echo $cmd + eval $cmd +fi diff --git a/script/get-oneapi/meta.yaml b/script/get-oneapi/meta.yaml index 653418412..14394cf2a 100644 --- a/script/get-oneapi/meta.yaml +++ b/script/get-oneapi/meta.yaml @@ -41,3 +41,6 @@ variations: path.#: env: MLC_ONEAPI_DIR_PATH: "#" + fortran: + env: + MLC_ONEAPI_FORTRAN: 'yes' From 670759e64fb5da835a85fe68e7290d17f6c97f42 Mon Sep 17 00:00:00 2001 From: Arjun Date: Thu, 15 May 2025 19:23:34 +0530 Subject: [PATCH 06/43] Support default_env in update_meta_if_env, add update_tags_if_env --- automation/script/module.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/automation/script/module.py b/automation/script/module.py index bb1f73440..fba785c01 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -3401,6 +3401,12 @@ def _run_deps(self, deps, clean_env_keys_deps, env, state, const, const_state, a for t in update_tags_from_env: if env.get(t, '').strip() != '': d['tags'] += "," + env[t] + + update_tags_if_env = d.get("update_tags_if_env", []) + for t in update_tags_if_env: + if not is_dep_tobe_skipped(update_tags_if_env[t], env): + d['tags'] += "," + t + inherit_variation_tags = d.get("inherit_variation_tags", False) skip_inherit_variation_groups = d.get( @@ -5772,10 +5778,16 @@ def update_state_from_meta(meta, env, state, const, const_state, deps, post_deps for c_meta in run_state['update_meta_if_env']: if is_dep_tobe_skipped(c_meta, env): continue + utils.merge_dicts({'dict1': default_env, 'dict2': c_meta.get( + 'default_env', {}), 'append_lists': True, 'append_unique': True}) utils.merge_dicts({'dict1': env, 'dict2': c_meta.get( 'env', {}), 'append_lists': True, 'append_unique': True}) + utils.merge_dicts({'dict1': const, 'dict2': c_meta.get( + 'const', {}), 'append_lists': True, 'append_unique': True}) utils.merge_dicts({'dict1': state, 'dict2': c_meta.get( 'state', {}), 'append_lists': True, 'append_unique': True}) + utils.merge_dicts({'dict1': const_state, 'dict2': c_meta.get( + 'const_state', {}), 'append_lists': True, 'append_unique': True}) if c_meta.get('docker', {}): if not state.get('docker', {}): state['docker'] = {} @@ -5784,6 +5796,11 @@ def update_state_from_meta(meta, env, state, const, const_state, deps, post_deps 'append_lists': True, 'append_unique': True}) + + #Updating again in case update_meta_if_env happened + for key in default_env: + env.setdefault(key, default_env[key]) + update_const = meta.get('const', {}) if update_const: const.update(update_const) From 39c025c8747822b56dc3ed956bb36d2ca62a1f2a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 15 May 2025 13:54:14 +0000 Subject: [PATCH 07/43] [Automated Commit] Format Codebase [skip ci] --- automation/script/module.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/automation/script/module.py b/automation/script/module.py index fba785c01..5cac406ae 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -3401,13 +3401,12 @@ def _run_deps(self, deps, clean_env_keys_deps, env, state, const, const_state, a for t in update_tags_from_env: if env.get(t, '').strip() != '': d['tags'] += "," + env[t] - + update_tags_if_env = d.get("update_tags_if_env", []) for t in update_tags_if_env: if not is_dep_tobe_skipped(update_tags_if_env[t], env): d['tags'] += "," + t - inherit_variation_tags = d.get("inherit_variation_tags", False) skip_inherit_variation_groups = d.get( "skip_inherit_variation_groups", []) @@ -5796,8 +5795,7 @@ def update_state_from_meta(meta, env, state, const, const_state, deps, post_deps 'append_lists': True, 'append_unique': True}) - - #Updating again in case update_meta_if_env happened + # Updating again in case update_meta_if_env happened for key in default_env: env.setdefault(key, default_env[key]) From c155c272cd269e60cc606b675ceb75e18cf4ee0d Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 17 May 2025 01:19:46 +0100 Subject: [PATCH 08/43] Support mlc experiment script (#411) * Support mlc experiment script --- .../workflows/test-mlc-script-features.yml | 35 ++++- automation/script/experiment.py | 121 ++++++++++++++++++ automation/script/module.py | 6 +- 3 files changed, 153 insertions(+), 9 deletions(-) create mode 100644 automation/script/experiment.py diff --git a/.github/workflows/test-mlc-script-features.yml b/.github/workflows/test-mlc-script-features.yml index 8280cdf53..a511faa89 100644 --- a/.github/workflows/test-mlc-script-features.yml +++ b/.github/workflows/test-mlc-script-features.yml @@ -50,18 +50,15 @@ jobs: mlcr get,wkhtmltopdf --quiet - name: Test versions - continue-on-error: true if: runner.os == 'linux' run: | mlcr get,generic-python-lib,_package.scipy --version=1.9.3 --quiet test $? -eq 0 || exit $? mlcr get,generic-python-lib,_package.scipy --version=1.9.2 --quiet test $? -eq 0 || exit $? - mlc find cache --tags=get,generic-python-lib,_package.scipy,version-1.9.3 + mlc find cache --tags=get,generic-python-lib,_package.scipy,version-1.9.2 test $? -eq 0 || exit $? - # Need to add find cache here - # mlcr get,generic-python-lib,_package.scipy --version=1.9.3 --quiet --only_execute_from_cache=True - # test $? -eq 0 || exit 0 + - name: Test python install from src run: | @@ -94,6 +91,34 @@ jobs: run: | mlcr run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet + test_experiment: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + python-version: ["3.12", "3.8"] + os: ["ubuntu-latest", "windows-latest", "macos-latest"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Configure git longpaths (Windows) + if: matrix.os == 'windows-latest' + run: | + git config --system core.longpaths true + - name: Pull MLOps repository + run: | + pip install mlcflow + mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} + + - name: Test mlc experiment script + run: | + mlc experiment script --tags=detect,os --quiet --exp.repeat,=1,2,3 + mlc experiment script --tags=detect,cpu --quiet --exp.explore=2:10:2 + test_mlperf_retinanet_cpp_venv: runs-on: ubuntu-latest strategy: diff --git a/automation/script/experiment.py b/automation/script/experiment.py new file mode 100644 index 000000000..5195ce056 --- /dev/null +++ b/automation/script/experiment.py @@ -0,0 +1,121 @@ +from collections import defaultdict +import os +from mlc import utils +from utils import * +import logging +from pathlib import PureWindowsPath, PurePosixPath +import copy + + +def experiment_run(self_module, i): + """ + Automates the exploration runs of MLC scripts. + + Args: + self_module: Reference to the current module for internal calls. + i: Dictionary containing input parameters for the experiment execution. + + Returns: + Dictionary with the result of the operation. Keys: + - 'return': 0 on success, >0 on error. + - 'error': Error message (if any). + """ + + # Extract and handle basic inputs + quiet = i.get('quiet', False) + show_time = i.get('show_time', False) + logger = self_module.logger + env = i.get('env', {}) + prune_result = prune_input( + {'input': i, 'extra_keys_starts_with': ['exp.']}) + if prune_result['return'] > 0: + return prune_result + + run_input = prune_result['new_input'] + if run_input.get('exp'): + del (run_input['exp']) + + r = convert_input(i) + if r.get('exp'): + exp = r['exp'] + else: + exp = {} + + cur_dir = os.getcwd() + r = self_module.search(i.copy()) + if r['return'] > 0: + return r + + lst = r['list'] + if not lst: + return {'return': 1, 'error': 'No scripts were found'} + + # Process each artifact + for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')): + meta, script_path = artifact.meta, artifact.path + tags, script_alias, script_uid = meta.get( + "tags", []), meta.get( + 'alias', ''), meta.get( + 'uid', '') + + # Execute the experiment script + mlc_script_input = { + 'action': 'run', 'target': 'script' + } + if exp: + for key in exp: + ii = {**mlc_script_input, **run_input} + if isinstance(exp[key], list): + for val in exp[key]: + ii[key] = val + r = self_module.action_object.access(ii) + if r['return'] > 0: + return r + elif isinstance(exp[key], dict): + return { + 'return': 1, 'error': 'Dictionary inputs are not supported for mlc experiment script'} + else: + ii[key] = exp[key] + r = self_module.action_object.access(ii) + if r['return'] > 0: + return r + + return {'return': 0} + + +def parse_value(val): + if isinstance(val, list): + return [parse_value(v) for v in val] + + val = str(val) + + # Handle range inputs like 2:10 or 2:10:2 + if ':' in val: + parts = val.split(':') + try: + parts = list(map(int, parts)) + if len(parts) == 2: + return list(range(parts[0], parts[1] + 1)) + elif len(parts) == 3: + return list(range(parts[0], parts[1] + 1, parts[2])) + except ValueError: + pass # Not a valid range, fall through + + # Convert to int if possible + if val.isdigit(): + return int(val) + + return val + + +def convert_input(input_dict): + output = defaultdict(dict) + + for key, value in input_dict.items(): + if '.' in key: + main_key, sub_key = key.split('.', 1) + output[main_key][sub_key] = parse_value(value) + elif isinstance(value, dict): + output[key].update({k: parse_value(v) for k, v in value.items()}) + + return dict(output) diff --git a/automation/script/module.py b/automation/script/module.py index 5cac406ae..dcd709271 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -4475,11 +4475,9 @@ def docker(self, i): return docker_run(self, i) ############################################################ - # portion for experiment action. - # as of now, the experiment action directly calls the run action. - # in the future, we will add more functionality to the experiment action. def experiment(self, i): - return self.run(i) + from script.experiment import experiment_run + return experiment_run(self, i) ########################################################################## From fdfbbe8b9a012a459eab9e0d83d66638e3a35d99 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 17 May 2025 01:31:34 +0100 Subject: [PATCH 09/43] Update test-mlc-script-features.yml --- .../workflows/test-mlc-script-features.yml | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/workflows/test-mlc-script-features.yml b/.github/workflows/test-mlc-script-features.yml index a511faa89..35c783a25 100644 --- a/.github/workflows/test-mlc-script-features.yml +++ b/.github/workflows/test-mlc-script-features.yml @@ -93,31 +93,31 @@ jobs: test_experiment: runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - python-version: ["3.12", "3.8"] - os: ["ubuntu-latest", "windows-latest", "macos-latest"] - - steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 - with: - python-version: ${{ matrix.python-version }} - - name: Configure git longpaths (Windows) - if: matrix.os == 'windows-latest' - run: | - git config --system core.longpaths true - - name: Pull MLOps repository - run: | - pip install mlcflow - mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - - - name: Test mlc experiment script - run: | - mlc experiment script --tags=detect,os --quiet --exp.repeat,=1,2,3 - mlc experiment script --tags=detect,cpu --quiet --exp.explore=2:10:2 + strategy: + fail-fast: false + matrix: + python-version: ["3.12", "3.8"] + os: ["ubuntu-latest", "windows-latest", "macos-latest"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Configure git longpaths (Windows) + if: matrix.os == 'windows-latest' + run: | + git config --system core.longpaths true + - name: Pull MLOps repository + run: | + pip install mlcflow + mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} + + - name: Test mlc experiment script + run: | + mlc experiment script --tags=detect,os --quiet --exp.repeat,=1,2,3 + mlc experiment script --tags=detect,cpu --quiet --exp.explore=2:10:2 test_mlperf_retinanet_cpp_venv: runs-on: ubuntu-latest From 6f659cbab4decb97cd1bac100b5777c7cbfd9f8e Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 17 May 2025 15:56:54 +0100 Subject: [PATCH 10/43] Update test-mlc-script-features.yml --- .github/workflows/test-mlc-script-features.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-mlc-script-features.yml b/.github/workflows/test-mlc-script-features.yml index 35c783a25..21e85bb02 100644 --- a/.github/workflows/test-mlc-script-features.yml +++ b/.github/workflows/test-mlc-script-features.yml @@ -50,7 +50,7 @@ jobs: mlcr get,wkhtmltopdf --quiet - name: Test versions - if: runner.os == 'linux' + if: runner.os == 'linux' && matrix.python-version == "3.8" run: | mlcr get,generic-python-lib,_package.scipy --version=1.9.3 --quiet test $? -eq 0 || exit $? From 2f136ac1281bc9537dd13f865fc5a093c6c519ed Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 17 May 2025 16:05:20 +0100 Subject: [PATCH 11/43] Update test-mlc-script-features.yml --- .github/workflows/test-mlc-script-features.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-mlc-script-features.yml b/.github/workflows/test-mlc-script-features.yml index 21e85bb02..4570d5bdf 100644 --- a/.github/workflows/test-mlc-script-features.yml +++ b/.github/workflows/test-mlc-script-features.yml @@ -50,7 +50,7 @@ jobs: mlcr get,wkhtmltopdf --quiet - name: Test versions - if: runner.os == 'linux' && matrix.python-version == "3.8" + if: runner.os == 'linux' && matrix.python-version == '3.8' run: | mlcr get,generic-python-lib,_package.scipy --version=1.9.3 --quiet test $? -eq 0 || exit $? From aa4875621eeb405a0adbce8dcba60fdbb17600bb Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 17 May 2025 16:15:56 +0100 Subject: [PATCH 12/43] Support mlc experiment entries (#412) * Support mlc experiment entries --- .github/workflows/build_wheels.yml | 1 + automation/script/experiment.py | 16 ++++ script/save-machine-state/capture.py | 106 +++++++++++++++++++++++++ script/save-machine-state/customize.py | 47 +++++++++++ script/save-machine-state/meta.yaml | 67 ++++++++++++++++ script/save-machine-state/run.sh | 4 + 6 files changed, 241 insertions(+) create mode 100644 script/save-machine-state/capture.py create mode 100644 script/save-machine-state/customize.py create mode 100644 script/save-machine-state/meta.yaml create mode 100644 script/save-machine-state/run.sh diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml index 86b93ac06..55e79c0ae 100644 --- a/.github/workflows/build_wheels.yml +++ b/.github/workflows/build_wheels.yml @@ -5,6 +5,7 @@ on: types: [published] workflow_dispatch: {} + jobs: build_wheels: if: github.repository_owner == 'mlcommons' diff --git a/automation/script/experiment.py b/automation/script/experiment.py index 5195ce056..63a29f005 100644 --- a/automation/script/experiment.py +++ b/automation/script/experiment.py @@ -1,5 +1,7 @@ from collections import defaultdict import os +from mlc.main import ExperimentAction +import mlc.utils as utils from mlc import utils from utils import * import logging @@ -26,6 +28,8 @@ def experiment_run(self_module, i): show_time = i.get('show_time', False) logger = self_module.logger env = i.get('env', {}) + experiment_action = ExperimentAction(self_module.action_object.parent) + prune_result = prune_input( {'input': i, 'extra_keys_starts_with': ['exp.']}) if prune_result['return'] > 0: @@ -80,6 +84,18 @@ def experiment_run(self_module, i): if r['return'] > 0: return r + experiment_meta = {} + exp_tags = tags + ii = {'action': 'update', + 'target': 'experiment', + 'script_alias': meta['alias'], + 'tags': ','.join(exp_tags), + 'meta': experiment_meta, + 'force': True} + r = experiment_action.access(ii) + if r['return'] > 0: + return r + return {'return': 0} diff --git a/script/save-machine-state/capture.py b/script/save-machine-state/capture.py new file mode 100644 index 000000000..02963eb81 --- /dev/null +++ b/script/save-machine-state/capture.py @@ -0,0 +1,106 @@ +import os +import json +import psutil +import platform +import subprocess +from datetime import datetime + + +def read_file_safe(path): + try: + with open(path, 'r') as f: + return f.read().strip() + except Exception: + return None + + +def run_command_safe(command, require_sudo=False): + if require_sudo and os.geteuid() != 0: + return "Skipped (requires sudo)" + try: + output = subprocess.check_output(command, shell=True, text=True) + return output.strip() + except subprocess.CalledProcessError: + return "Error running command" + + +def detect_container_context(): + context = { + "docker_env": os.path.exists('/.dockerenv'), + "cgroup_indicators": [] + } + cgroup = read_file_safe('/proc/1/cgroup') + if cgroup: + for line in cgroup.splitlines(): + if any(x in line for x in ['docker', 'kubepods', 'containerd']): + context["cgroup_indicators"].append(line) + return context + + +def get_mounted_file_systems(): + try: + with open("/proc/mounts", "r") as f: + return [line.strip() for line in f.readlines()] + except BaseException: + return [] + + +def capture_machine_state(): + state = { + "timestamp": datetime.now().isoformat(), + "platform": { + "system": platform.system(), + "node": platform.node(), + "release": platform.release(), + "version": platform.version(), + "machine": platform.machine(), + "processor": platform.processor() + }, + "cpu": { + "logical_cores": psutil.cpu_count(logical=True), + "physical_cores": psutil.cpu_count(logical=False), + "load_avg": psutil.getloadavg(), + "cpu_percent": psutil.cpu_percent(interval=1) + }, + "memory": { + "virtual_memory": dict(psutil.virtual_memory()._asdict()), + "swap_memory": dict(psutil.swap_memory()._asdict()) + }, + "disk": { + "disk_usage": dict(psutil.disk_usage('/')._asdict()), + "partitions": [dict(p._asdict()) for p in psutil.disk_partitions()] + }, + "bios": { + "vendor": run_command_safe("dmidecode -s bios-vendor", require_sudo=True), + "version": run_command_safe("dmidecode -s bios-version", require_sudo=True), + "release_date": run_command_safe("dmidecode -s bios-release-date", require_sudo=True) + }, + "thp_settings": { + "enabled": read_file_safe("/sys/kernel/mm/transparent_hugepage/enabled") or "Skipped (requires sudo or permission)", + "defrag": read_file_safe("/sys/kernel/mm/transparent_hugepage/defrag") or "Skipped (requires sudo or permission)" + }, + "kernel": { + "cmdline": read_file_safe("/proc/cmdline") + }, + "uptime": read_file_safe("/proc/uptime"), + "process_count": len(psutil.pids()), + "users_sessions": [dict(u._asdict()) for u in psutil.users()], + "container_context": detect_container_context(), + "mounted_filesystems": get_mounted_file_systems() + } + return state + + +def save_state_to_file(state, filename): + with open(filename, "w") as f: + json.dump(state, f, indent=4) + + +# Example usage +if __name__ == "__main__": + + state = capture_machine_state() + save_file = os.environ.get( + 'MLC_SYSTEM_STATE_SAVE_FILENAME', + 'machine_state.json') + save_state_to_file(state, save_file) diff --git a/script/save-machine-state/customize.py b/script/save-machine-state/customize.py new file mode 100644 index 000000000..a7dbcc4f2 --- /dev/null +++ b/script/save-machine-state/customize.py @@ -0,0 +1,47 @@ +from mlc import utils +import os +import subprocess + + +def check_installation(command, os_info): + if os_info['platform'] == "windows": + return subprocess.call( + [command, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) == 0 + elif os_info['platform'] == "linux": + return subprocess.call(['which', command], stdout=subprocess.PIPE, + stderr=subprocess.PIPE) == 0 # 0 means the package is there + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if not check_installation("numactl", os_info): + env['MLC_INSTALL_NUMACTL'] = 'True' + + # if not check_installation("cpupower",os_info): + env['MLC_INSTALL_CPUPOWER'] = 'True' + + if env.get('MLC_PLATFORM_DETAILS_FILE_PATH', '') == '': + if env.get('MLC_PLATFORM_DETAILS_DIR_PATH', '') == '': + env['MLC_PLATFORM_DETAILS_DIR_PATH'] = os.getcwd() + if env.get('MLC_PLATFORM_DETAILS_FILE_NAME', '') == '': + env['MLC_PLATFORM_DETAILS_FILE_NAME'] = "system-info.txt" + env['MLC_PLATFORM_DETAILS_FILE_PATH'] = os.path.join( + env['MLC_PLATFORM_DETAILS_DIR_PATH'], env['MLC_PLATFORM_DETAILS_FILE_NAME']) + + return {'return': 0} + + +def postprocess(i): + + state = i['state'] + + env = i['env'] + + os_info = i['os_info'] + + automation = i['automation'] + + return {'return': 0} diff --git a/script/save-machine-state/meta.yaml b/script/save-machine-state/meta.yaml new file mode 100644 index 000000000..7d9a07cd1 --- /dev/null +++ b/script/save-machine-state/meta.yaml @@ -0,0 +1,67 @@ +alias: save-machine-state +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Platform information +deps: +- tags: detect,os +- skip_if_env: + MLC_HOST_OS_TYPE: + - windows + tags: detect,sudo +- names: + - python + - python3 + tags: get,python +- skip_if_any_env: + MLC_HOST_OS_TYPE: + - windows + skip_if_env: + MLC_SUDO_USER: + - 'no' + tags: get,sys-util,generic,_psmisc +- enable_if_env: + MLC_HOST_OS_TYPE: + - linux + skip_if_env: + MLC_SUDO_USER: + - 'no' + tags: get,sys-util,generic,_systemd +- enable_if_env: + MLC_HOST_OS_TYPE: + - linux + skip_if_env: + MLC_SUDO_USER: + - 'no' + tags: get,sys-util,generic,_dmidecode +- tags: get,generic-python-lib,_package.psutil +input_mapping: + outfile: MLC_SYSTEM_STATE_SAVE_FILENAME +prehook_deps: +- enable_if_env: + MLC_HOST_OS_TYPE: + - linux + MLC_INSTALL_NUMACTL: + - 'True' + skip_if_env: + MLC_SUDO_USER: + - 'no' + tags: get,sys-util,generic,_numactl +- enable_if_env: + MLC_HOST_OS_TYPE: + - linux + MLC_INSTALL_CPUPOWER: + - 'True' + env: + MLC_TMP_FAIL_SAFE: 'yes' + ignore_missing: true + skip_if_env: + MLC_SUDO_USER: + - 'no' + tags: get,sys-util,generic,_linux-tools +tags: +- machine-state +- save +- machine +- state +uid: 2f62820ed7294659 diff --git a/script/save-machine-state/run.sh b/script/save-machine-state/run.sh new file mode 100644 index 000000000..28e1867f6 --- /dev/null +++ b/script/save-machine-state/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -e + +${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/capture.py From 796cafca6960bdc0be1d8c57353e1778c35c3da9 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 17 May 2025 18:02:01 +0100 Subject: [PATCH 13/43] Support state info for experiment run (#413) * Support state details for experiment run --- automation/script/experiment.py | 117 ++++++++++++++++++++++++---- script/save-machine-state/meta.yaml | 2 + 2 files changed, 105 insertions(+), 14 deletions(-) diff --git a/automation/script/experiment.py b/automation/script/experiment.py index 63a29f005..6040c5efe 100644 --- a/automation/script/experiment.py +++ b/automation/script/experiment.py @@ -6,6 +6,7 @@ from utils import * import logging from pathlib import PureWindowsPath, PurePosixPath +import time import copy @@ -29,6 +30,7 @@ def experiment_run(self_module, i): logger = self_module.logger env = i.get('env', {}) experiment_action = ExperimentAction(self_module.action_object.parent) + skip_state_save = i.get('exp_skip_state_save', False) prune_result = prune_input( {'input': i, 'extra_keys_starts_with': ['exp.']}) @@ -72,7 +74,14 @@ def experiment_run(self_module, i): if isinstance(exp[key], list): for val in exp[key]: ii[key] = val - r = self_module.action_object.access(ii) + r = run_script_and_tag_experiment( + ii, + self_module.action_object, + experiment_action, + tags, + meta, + skip_state_save, + logger) if r['return'] > 0: return r elif isinstance(exp[key], dict): @@ -80,25 +89,105 @@ def experiment_run(self_module, i): 'return': 1, 'error': 'Dictionary inputs are not supported for mlc experiment script'} else: ii[key] = exp[key] - r = self_module.action_object.access(ii) + r = run_script_and_tag_experiment( + ii, + self_module.action_object, + experiment_action, + tags, + meta, + skip_state_save, + logger) if r['return'] > 0: return r - experiment_meta = {} - exp_tags = tags - ii = {'action': 'update', - 'target': 'experiment', - 'script_alias': meta['alias'], - 'tags': ','.join(exp_tags), - 'meta': experiment_meta, - 'force': True} - r = experiment_action.access(ii) - if r['return'] > 0: - return r - return {'return': 0} +def run_script_and_tag_experiment( + ii, script_action, experiment_action, tags, meta, skip_state_save, logger): + + current_path = os.path.abspath(os.getcwd()) + experiment_meta = {} + recursion_spaces = '' + tmp_tags = tags + tmp_tags.append("tmp") + ii = {'action': 'update', + 'target': 'experiment', + 'script_alias': meta['alias'], + 'tags': ','.join(tmp_tags), + 'meta': experiment_meta, + 'force': True} + + r = experiment_action.access(ii) + if r['return'] > 0: + return r + + experiment = r['list'][0] + + logger.debug( + recursion_spaces + + ' - Changing to {}'.format(experiment.path)) + + os.chdir(experiment.path) + + if not skip_state_save: + ssi = {'action': 'run', + 'target': 'script', + 'tags': 'save,system,state', + 'outfile': 'system_state_before.json', + 'quiet': True + } + r = script_action.access(ssi) + if r['return'] > 0: + return r + + start_time = time.time() + r = script_action.access(ii) + if r['return'] > 0: + return r + + end_time = time.time() + elapsed = end_time - start_time + time_taken_string = format_elapsed(elapsed) + logger.info(f"Time taken: {time_taken_string}") + + if not skip_state_save: + ssi['outfile'] = 'system_state_after.json' + r = script_action.access(ssi) + if r['return'] > 0: + return r + + exp_tags = tags + ii = {'action': 'update', + 'target': 'experiment', + 'uid': experiment.meta['uid'], + 'meta': experiment.meta, + 'script_alias': meta['alias'], + 'replace_lists': True, # To replace tags + 'tags': ','.join(exp_tags)} + + r = experiment_action.access(ii) + if r['return'] > 0: + return r + + os.chdir(current_path) + logger.info(f"Experiment entry saved at: {experiment.path}") + + return {'return': 0, 'experiment': experiment} + + +def format_elapsed(seconds): + if seconds < 60: + return f"{seconds:.3f} seconds" + elif seconds < 3600: + mins, secs = divmod(seconds, 60) + return f"{int(mins)} minutes {secs:.1f} seconds" + else: + hours, remainder = divmod(seconds, 3600) + mins, secs = divmod(remainder, 60) + return f"{int(hours)} hours {int(mins)} minutes {secs:.1f} seconds" + + def parse_value(val): if isinstance(val, list): return [parse_value(v) for v in val] diff --git a/script/save-machine-state/meta.yaml b/script/save-machine-state/meta.yaml index 7d9a07cd1..0e22093f0 100644 --- a/script/save-machine-state/meta.yaml +++ b/script/save-machine-state/meta.yaml @@ -63,5 +63,7 @@ tags: - machine-state - save - machine +- system +- system-state - state uid: 2f62820ed7294659 From b2eb6a4e16e12357e31c6299b72f4614449ac10a Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 17 May 2025 21:50:04 +0100 Subject: [PATCH 14/43] Support exp_tags for experiments (#414) * Support exp_tags --- automation/script/experiment.py | 32 ++++++++++++++----- automation/script/module.py | 1 + .../meta.yaml | 3 ++ 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/automation/script/experiment.py b/automation/script/experiment.py index 6040c5efe..8f4215933 100644 --- a/automation/script/experiment.py +++ b/automation/script/experiment.py @@ -8,6 +8,7 @@ from pathlib import PureWindowsPath, PurePosixPath import time import copy +from datetime import datetime def experiment_run(self_module, i): @@ -31,6 +32,7 @@ def experiment_run(self_module, i): env = i.get('env', {}) experiment_action = ExperimentAction(self_module.action_object.parent) skip_state_save = i.get('exp_skip_state_save', False) + extra_exp_tags = i.get('exp_tags', '').split(",") prune_result = prune_input( {'input': i, 'extra_keys_starts_with': ['exp.']}) @@ -79,6 +81,7 @@ def experiment_run(self_module, i): self_module.action_object, experiment_action, tags, + extra_exp_tags, meta, skip_state_save, logger) @@ -94,6 +97,7 @@ def experiment_run(self_module, i): self_module.action_object, experiment_action, tags, + extra_exp_tags, meta, skip_state_save, logger) @@ -104,17 +108,18 @@ def experiment_run(self_module, i): def run_script_and_tag_experiment( - ii, script_action, experiment_action, tags, meta, skip_state_save, logger): + ii, script_action, experiment_action, tags, extra_exp_tags, script_meta, skip_state_save, logger): current_path = os.path.abspath(os.getcwd()) experiment_meta = {} recursion_spaces = '' - tmp_tags = tags - tmp_tags.append("tmp") + exp_tags = tags + extra_exp_tags ii = {'action': 'update', 'target': 'experiment', - 'script_alias': meta['alias'], - 'tags': ','.join(tmp_tags), + 'script_alias': script_meta['alias'], + 'script_uid': script_meta['uid'], + 'tags': ','.join(exp_tags), + 'extra_tags': ",".join(extra_exp_tags), 'meta': experiment_meta, 'force': True} @@ -129,6 +134,15 @@ def run_script_and_tag_experiment( ' - Changing to {}'.format(experiment.path)) os.chdir(experiment.path) + # Get current datetime in YYYY-MM-DD_HH-MM-SS format + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + + # Create a folder name using the timestamp + folder_name = f"run_{timestamp}" + + # Create the directory + os.makedirs(folder_name, exist_ok=True) + os.chdir(folder_name) if not skip_state_save: ssi = {'action': 'run', @@ -157,6 +171,7 @@ def run_script_and_tag_experiment( if r['return'] > 0: return r + ''' exp_tags = tags ii = {'action': 'update', 'target': 'experiment', @@ -169,11 +184,12 @@ def run_script_and_tag_experiment( r = experiment_action.access(ii) if r['return'] > 0: return r - + ''' os.chdir(current_path) - logger.info(f"Experiment entry saved at: {experiment.path}") + logger.info( + f"Experiment entry saved at: {os.path.join(experiment.path, folder_name)}") - return {'return': 0, 'experiment': experiment} + return {'return': 0, 'experiment': experiment, 'folder_name': folder_name} def format_elapsed(seconds): diff --git a/automation/script/module.py b/automation/script/module.py index dcd709271..a2ba275c7 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -1388,6 +1388,7 @@ def _run(self, i): 'target': 'cache', 'search_tags': tmp_tags, 'script_alias': meta['alias'], + 'extra_tags': ",".join(extra_cache_tags), 'tags': ','.join(tmp_tags), 'meta': cached_meta, 'force': True} diff --git a/script/build-mlperf-inference-server-nvidia/meta.yaml b/script/build-mlperf-inference-server-nvidia/meta.yaml index e8e49748b..84a96be1a 100644 --- a/script/build-mlperf-inference-server-nvidia/meta.yaml +++ b/script/build-mlperf-inference-server-nvidia/meta.yaml @@ -123,6 +123,9 @@ deps: # Detect nvidia-dali - tags: get,generic-python-lib,_nvidia-dali + enable_if_env: + MLC_NVIDIA_MLPERF_NEEDS_DALI: + - on # Get Nvidia scratch space where data and models get downloaded - tags: get,mlperf,inference,nvidia,scratch,space From 6a1a97a5b6deaf73aa829cc935312ce7aecd15e6 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 17 May 2025 22:22:21 +0100 Subject: [PATCH 15/43] Fix nvidia-dali version for python3.8, fixes #410 (#415) --- script/build-mlperf-inference-server-nvidia/meta.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/script/build-mlperf-inference-server-nvidia/meta.yaml b/script/build-mlperf-inference-server-nvidia/meta.yaml index 84a96be1a..69ea53bd1 100644 --- a/script/build-mlperf-inference-server-nvidia/meta.yaml +++ b/script/build-mlperf-inference-server-nvidia/meta.yaml @@ -123,9 +123,8 @@ deps: # Detect nvidia-dali - tags: get,generic-python-lib,_nvidia-dali - enable_if_env: - MLC_NVIDIA_MLPERF_NEEDS_DALI: - - on + version_max: 1.48 + version_max_usable: 1.48 # Get Nvidia scratch space where data and models get downloaded - tags: get,mlperf,inference,nvidia,scratch,space From 9e9d39adb3cc7c57677a035d70c0c32eb23d725b Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 17 May 2025 22:33:28 +0100 Subject: [PATCH 16/43] Force str for version in script module (#416) --- automation/script/module.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/automation/script/module.py b/automation/script/module.py index a2ba275c7..6cc851fc0 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -937,32 +937,32 @@ def _run(self, i): # VERSIONS SHOULD NOT BE USED INSIDE VARIATIONS (in meta)! # First, take version from input - version = i.get('version', '').strip() - version_min = i.get('version_min', '').strip() - version_max = i.get('version_max', '').strip() - version_max_usable = i.get('version_max_usable', '').strip() + version = str(i.get('version', '')).strip() + version_min = str(i.get('version_min', '')).strip() + version_max = str(i.get('version_max', '')).strip() + version_max_usable = str(i.get('version_max_usable', '')).strip() # Second, take from env if version == '': - version = env.get('MLC_VERSION', '') + version = str(env.get('MLC_VERSION', '')) if version_min == '': - version_min = env.get('MLC_VERSION_MIN', '') + version_min = str(env.get('MLC_VERSION_MIN', '')) if version_max == '': - version_max = env.get('MLC_VERSION_MAX', '') + version_max = str(env.get('MLC_VERSION_MAX', '')) if version_max_usable == '': - version_max_usable = env.get( - 'MLC_VERSION_MAX_USABLE', '') + version_max_usable = str(env.get( + 'MLC_VERSION_MAX_USABLE', '')) # Third, take from meta if version == '': - version = meta.get('version', '') + version = str(meta.get('version', '')) if version_min == '': - version_min = meta.get('version_min', '') + version_min = str(meta.get('version_min', '')) if version_max == '': - version_max = meta.get('version_max', '') + version_max = str(meta.get('version_max', '')) if version_max_usable == '': - version_max_usable = meta.get( - 'version_max_usable', '') + version_max_usable = str(meta.get( + 'version_max_usable', '')) # Update env with resolved versions notes = [] From d95ea41aa5d4272795bde3d0557f0855d3d8de0b Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sun, 18 May 2025 19:11:36 +0530 Subject: [PATCH 17/43] Code changes for integrating nvidia v5.0 (#417) * code changes for integrating nvidia v5.0 * code clean for pycuda --- automation/utils.py | 4 +- script/add-custom-nvidia-system/meta.yaml | 10 ++ script/app-mlperf-inference-nvidia/meta.yaml | 114 ++++++++++++++---- script/app-mlperf-inference/meta.yaml | 55 +++++++++ script/build-dockerfile/customize.py | 4 + .../meta.yaml | 8 +- .../run.sh | 8 ++ .../meta.yaml | 5 + script/get-mlperf-inference-results/meta.yaml | 4 + script/get-nvidia-mitten/customize.py | 7 +- script/get-nvidia-mitten/meta.yaml | 2 +- .../patch/numpy-mitten-v5.0.patch | 13 ++ script/get-nvidia-mitten/run.sh | 3 + script/run-mlperf-inference-app/meta.yaml | 14 +++ 14 files changed, 221 insertions(+), 30 deletions(-) create mode 100644 script/get-nvidia-mitten/patch/numpy-mitten-v5.0.patch diff --git a/automation/utils.py b/automation/utils.py index 986888e87..98a3a3718 100644 --- a/automation/utils.py +++ b/automation/utils.py @@ -339,8 +339,8 @@ def compare_versions(i): # 3.9.6 vs 3.9 # 3.9 vs 3.9.6 - i_version1 = [int(v) if v.isdigit() else v for v in l_version1] - i_version2 = [int(v) if v.isdigit() else v for v in l_version2] + i_version1 = [int(v) for v in l_version1 if v.isdigit()] + i_version2 = [int(v) for v in l_version2 if v.isdigit()] comparison = 0 diff --git a/script/add-custom-nvidia-system/meta.yaml b/script/add-custom-nvidia-system/meta.yaml index 6dce8414d..b07ec8d5e 100644 --- a/script/add-custom-nvidia-system/meta.yaml +++ b/script/add-custom-nvidia-system/meta.yaml @@ -74,6 +74,11 @@ deps: # Detect pycuda - tags: get,generic-python-lib,_pycuda + - tags: get,generic-python-lib,_package.typeguard + enable_if_env: + MLC_MLPERF_INFERENCE_VERSION: + - "5.0" + variations: nvidia-only: group: code @@ -124,3 +129,8 @@ versions: add_deps_recursive: nvidia-inference-common-code: version: r4.0 + + r5.0: + add_deps_recursive: + nvidia-inference-common-code: + version: r5.0 diff --git a/script/app-mlperf-inference-nvidia/meta.yaml b/script/app-mlperf-inference-nvidia/meta.yaml index 472eb9383..110b792bd 100644 --- a/script/app-mlperf-inference-nvidia/meta.yaml +++ b/script/app-mlperf-inference-nvidia/meta.yaml @@ -272,6 +272,8 @@ deps: - run_harness - tags: get,generic-python-lib,_package.pycuda + names: + - pycuda version: "2022.2.2" - tags: get,generic-python-lib,_package.nvmitten @@ -281,11 +283,10 @@ deps: enable_if_env: MLC_RUN_STATE_DOCKER: - 'yes' + MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: + - 'yes' - tags: get,nvidia,mitten - skip_if_env: - MLC_RUN_STATE_DOCKER: - - 'yes' enable_if_env: MLC_NVIDIA_MITTEN_FROM_SRC: - 'yes' @@ -351,6 +352,18 @@ post_deps: # Variations to customize dependencies variations: # MLPerf inference version + v5.0: + group: version + env: + MLC_MLPERF_INFERENCE_CODE_VERSION: "v5.0" + MLC_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized + MLC_NVIDIA_MITTEN_FROM_SRC: "yes" + MLC_GIT_CHECKOUT: "98bb85df8e936219ec7acd10ce1d702147fb1e21" + adr: + pytorch: + tags: _for-nvidia-mlperf-inference-v5.0 + pycuda: + version_min: "2024.1" v4.1: group: version env: @@ -435,9 +448,20 @@ variations: - tags: get,generic-python-lib,_numpy - tags: get,generic-python-lib,_pycocotools - tags: get,generic-python-lib,_onnx-graphsurgeon + - tags: get,generic,sys-util,_cmake + - tags: get,generic-python-lib,_package.cmake + - tags: get,generic-python-lib,_package.sympy + + retinanet,v5.0: + deps: + - tags: get,generic-python-lib,_package.onnx + version: 1.17.0 + + retinanet,v4.0: + deps: - tags: get,generic-python-lib,_package.onnx version: 1.14.1 - - tags: get,generic-python-lib,_package.sympy + sdxl: new_env_keys: @@ -481,8 +505,8 @@ variations: names: - nvtx - tags: get,generic-python-lib,_package.cuda-python - version_max: 12.6.2 - version_max_usable: 12.6.2 + version_max: "12.6.2" + version_max_usable: "12.6.2" names: - cuda-python - tags: get,generic-python-lib,_package.ninja @@ -494,38 +518,78 @@ variations: - tags: get,generic-python-lib,_package.colored names: - colored - - tags: get,generic-python-lib,_package.nvidia-ammo - names: - - nvidia-ammo - version: 0.7.4 - env: - MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: "https://pypi.nvidia.com" - MLC_GENERIC_PYTHON_PIP_EXTRA: "--no-cache-dir" - tags: get,generic-python-lib,_package.optimum names: - optimum - - tags: get,generic-python-lib,_package.onnx - names: - - onnx - version: 1.14.0 - tags: get,generic-python-lib,_package.scipy names: - scipy - version: 1.10.1 - - tags: get,generic-python-lib,_package.numpy - names: - - numpy - version_max: 1.22.99 - version_max_usable: "1.22" + sdxl,v4.0: + deps: + - tags: get,generic-python-lib,_package.onnx + names: + - onnx + version: "1.14.0" + - tags: get,generic-python-lib,_package.numpy + names: + - numpy + version_max: "1.22.99" + version_max_usable: "1.22" + - tags: get,generic-python-lib,_package.nvidia-ammo + names: + - nvidia-ammo + version: "0.7.4" + env: + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: "https://pypi.nvidia.com" + MLC_GENERIC_PYTHON_PIP_EXTRA: "--no-cache-dir" + sdxl,v4.1: deps: - tags: get,generic-python-lib,_package.torchrec - version: 0.4.0 + version: "0.4.0" - tags: get,generic-python-lib,_package.torchmetrics - version: 1.0.3 + version: "1.0.3" - tags: get,generic-python-lib,_package.typeguard + - tags: get,generic-python-lib,_package.onnx + names: + - onnx + version: "1.14.0" + - tags: get,generic-python-lib,_package.numpy + names: + - numpy + version_max: "1.22.99" + version_max_usable: "1.22" + - tags: get,generic-python-lib,_package.nvidia-ammo + names: + - nvidia-ammo + version: "0.7.4" + env: + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: "https://pypi.nvidia.com" + MLC_GENERIC_PYTHON_PIP_EXTRA: "--no-cache-dir" + - tags: get,generic-python-lib,_package.scipy + names: + - scipy + version: "1.10.1" + sdxl,v5.0: + # nvidia-ammo is decommisioned and model-opt is being used which is built with TRTLLM + deps: + - tags: get,generic-python-lib,_package.torchrec + version: "0.6.0" + - tags: get,generic-python-lib,_package.torchmetrics + version: "1.0.3" + - tags: get,generic-python-lib,_package.typeguard + - tags: get,generic-python-lib,_package.onnx + names: + - onnx + version: "1.17.0" + - tags: get,generic-python-lib,_package.numpy + names: + - numpy + version_max: "1.26.99" + version_max_usable: "1.26.4" + bert_: deps: - tags: get,generic-python-lib,_transformers diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index 6a4b6cf3b..c1c177d61 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -382,6 +382,24 @@ variations: env: MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp310-cp310-linux_aarch64.whl' + nvidia-original,r5.0_default: + env: + MLC_NVIDIA_MITTEN_FROM_SRC: 'yes' + docker: + build_deps: + - tags: detect,os + image_name: mlperf-inference-nvidia-v5.0-common + update_meta_if_env: + - enable_if_env: + MLC_HOST_PLATFORM_FLAVOR: + - x86_64 + docker: + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v5.0-cuda12.8-pytorch25.01-ubuntu24.04-x86_64-release + - skip_if_env: + MLC_HOST_PLATFORM_FLAVOR: + - x86_64 + docker: + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v5.0-cuda12.8-pytorch25.01-ubuntu24.04-aarch64-Grace-release nvidia-original,gptj_: env: @@ -424,6 +442,14 @@ variations: update_tags_from_env_with_prefix: _tp-size.: - MLC_NVIDIA_TP_SIZE + + nvidia-original,r5.0_default,gptj_: + docker: + deps: + - tags: get,ml-model,gptj,_nvidia,_fp8 + update_tags_from_env_with_prefix: + _tp-size.: + - MLC_NVIDIA_TP_SIZE nvidia-original,r4.1-dev_default,llama2-70b_: @@ -446,6 +472,14 @@ variations: - MLC_NVIDIA_TP_SIZE env: BUILD_TRTLLM: 1 + + nvidia-original,r5.0_default,llama2-70b_: + docker: + deps: + - tags: get,ml-model,llama2-70b,_nvidia,_fp8 + update_tags_from_env_with_prefix: + _tp-size.: + - MLC_NVIDIA_TP_SIZE nvidia-original: docker: @@ -1813,6 +1847,27 @@ variations: MLC_REGENERATE_MEASURE_FILES: 'yes' env: MLC_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' + + r5.0_default: + group: + reproducibility + add_deps_recursive: + nvidia-inference-common-code: + version: r5.0 + tags: _mlcommons + nvidia-inference-server: + version: r5.0 + tags: _mlcommons + intel-harness: + tags: _v4.1 + inference-src: + version: r5.0 + nvidia-scratch-space: + tags: _version.5.0 + default_env: + MLC_SKIP_SYS_UTILS: 'yes' + MLC_REGENERATE_MEASURE_FILES: 'yes' + MLC_MLPERF_INFERENCE_VERSION: '5.0' invalid_variation_combinations: diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index a3d32b247..56bbf26b9 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -249,6 +249,10 @@ def preprocess(i): for cmd in config['RUN_CMDS']: f.write('RUN ' + cmd + EOL) + if env.get('MLC_MLPERF_IMPLEMENTATION', '') == "nvidia" and env.get( + 'MLC_MLPERF_INFERENCE_VERSION', '') == "5.0": + f.write('ENV ' + 'ENV' + "=\"" + 'release' + "\"" + EOL) + f.write(EOL + '# Setup docker user' + EOL) docker_user = get_value(env, config, 'USER', 'MLC_DOCKER_USER') docker_group = get_value(env, config, 'GROUP', 'MLC_DOCKER_GROUP') diff --git a/script/build-mlperf-inference-server-nvidia/meta.yaml b/script/build-mlperf-inference-server-nvidia/meta.yaml index 69ea53bd1..c47c9d819 100644 --- a/script/build-mlperf-inference-server-nvidia/meta.yaml +++ b/script/build-mlperf-inference-server-nvidia/meta.yaml @@ -111,6 +111,8 @@ deps: # Detect pycuda - tags: get,generic-python-lib,_pycuda + names: + - pycuda version: "2022.2.2" skip_if_env: MLC_RUN_STATE_DOCKER: @@ -335,7 +337,11 @@ versions: r4.1: default_env: BUILD_TRTLLM: 1 - + + r5.0: + add_deps_recursive: + pycuda: + version: "2024.1" docker: skip_run_cmd: 'no' all_gpus: 'yes' diff --git a/script/build-mlperf-inference-server-nvidia/run.sh b/script/build-mlperf-inference-server-nvidia/run.sh index ac990aa62..c5f4e1a8a 100644 --- a/script/build-mlperf-inference-server-nvidia/run.sh +++ b/script/build-mlperf-inference-server-nvidia/run.sh @@ -8,9 +8,17 @@ if [[ ${MLC_MAKE_CLEAN} == "yes" ]]; then fi if [[ ${MLC_MLPERF_DEVICE} == "inferentia" ]]; then + echo "inferencia" make prebuild fi +# Perform sed replacement only if version is 5.0 +if [[ "${MLC_MLPERF_INFERENCE_VERSION}" == "5.0" ]]; then + echo "Replacing /work/ with ${MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH} in all files..." + find . -type f -exec sed -i "s|/work/|${MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH}/|g" {} + +fi + +echo ${MLC_MAKE_BUILD_COMMAND} SKIP_DRIVER_CHECK=1 make ${MLC_MAKE_BUILD_COMMAND} test $? -eq 0 || exit $? diff --git a/script/get-mlperf-inference-nvidia-common-code/meta.yaml b/script/get-mlperf-inference-nvidia-common-code/meta.yaml index 3c0657dda..8d0be3d3a 100644 --- a/script/get-mlperf-inference-nvidia-common-code/meta.yaml +++ b/script/get-mlperf-inference-nvidia-common-code/meta.yaml @@ -63,3 +63,8 @@ versions: mlperf-inference-results: version: v4.0 tags: _code-only-for-v5.0 + r5.0: + add_deps_recursive: + mlperf-inference-results: + version: v5.0 + tags: _code-only diff --git a/script/get-mlperf-inference-results/meta.yaml b/script/get-mlperf-inference-results/meta.yaml index 8b1da1310..7b8397c0a 100644 --- a/script/get-mlperf-inference-results/meta.yaml +++ b/script/get-mlperf-inference-results/meta.yaml @@ -86,3 +86,7 @@ versions: env: MLC_GIT_URL: https://github.com/<<>>/inference_results_v4.1.git MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v4.1 + v5.0: + env: + MLC_GIT_URL: https://github.com/<<>>/inference_results_v5.0.git + MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v5.0 diff --git a/script/get-nvidia-mitten/customize.py b/script/get-nvidia-mitten/customize.py index a2acfde43..77e4bac46 100644 --- a/script/get-nvidia-mitten/customize.py +++ b/script/get-nvidia-mitten/customize.py @@ -5,8 +5,13 @@ def preprocess(i): os_info = i['os_info'] + env = i['env'] + script_path = i['artifact'].path - # TBD + if env.get('MLC_MLPERF_INFERENCE_VERSION', '') == "5.0": + extra_run_cmd = 'patch -p1 < {}'.format(os.path.join( + script_path, 'patch', 'numpy-mitten-v5.0.patch')) + env['EXTRA_RUN_CMD'] = extra_run_cmd return {'return': 0} diff --git a/script/get-nvidia-mitten/meta.yaml b/script/get-nvidia-mitten/meta.yaml index 3073438bb..09bac19b9 100644 --- a/script/get-nvidia-mitten/meta.yaml +++ b/script/get-nvidia-mitten/meta.yaml @@ -11,7 +11,7 @@ deps: - python tags: get,python3 - tags: get,generic-python-lib,_pycuda - version: 2022.2.2 + version: "2023.1" - env: MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_NVIDIA_MITTEN_SRC extra_cache_tags: nvidia,mitten,src diff --git a/script/get-nvidia-mitten/patch/numpy-mitten-v5.0.patch b/script/get-nvidia-mitten/patch/numpy-mitten-v5.0.patch new file mode 100644 index 000000000..b89abe2ab --- /dev/null +++ b/script/get-nvidia-mitten/patch/numpy-mitten-v5.0.patch @@ -0,0 +1,13 @@ +diff --git a/setup.cfg b/setup.cfg +index 4976354..798175e 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -21,7 +21,7 @@ install_requires = + graphlib_backport >=1.0.3;python_version<'3.9' + requests >=2.28.1 + tqdm >=4.65.0 +- numpy >=1.22.0, <1.24.0 ++ numpy >=1.26.4 + GitPython >=3.1.31 + pandas + opencv-python diff --git a/script/get-nvidia-mitten/run.sh b/script/get-nvidia-mitten/run.sh index ac0dc16b2..81f38f829 100644 --- a/script/get-nvidia-mitten/run.sh +++ b/script/get-nvidia-mitten/run.sh @@ -1,4 +1,7 @@ #!/bin/bash cd ${MLC_NVIDIA_MITTEN_SRC} +echo "EXTRA_RUN_CMD = ${EXTRA_RUN_CMD}" +eval "${EXTRA_RUN_CMD}" +test $? -eq 0 || exit $? ${MLC_PYTHON_BIN_WITH_PATH} -m pip install . test $? -eq 0 || exit $? diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index b19362ecc..5c13dfa16 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -380,6 +380,20 @@ variations: mlperf-inference-nvidia-scratch-space: tags: _version.r5.0-dev + r5.0: + env: + MLC_MLPERF_INFERENCE_VERSION: '5.0' + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r5.0_default + MLC_MLPERF_SUBMISSION_CHECKER_VERSION: v5.0 + group: benchmark-version + adr: + get-mlperf-inference-results-dir: + tags: _version.r5.0 + get-mlperf-inference-submission-dir: + tags: _version.r5.0 + mlperf-inference-nvidia-scratch-space: + tags: _version.r5.0 + short: add_deps_recursive: submission-checker: From 46865b09eef8865f771ebdcc0802f6f57365a429 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sun, 18 May 2025 19:40:04 +0530 Subject: [PATCH 18/43] pycuda version fix (#418) --- script/app-mlperf-inference-nvidia/meta.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/script/app-mlperf-inference-nvidia/meta.yaml b/script/app-mlperf-inference-nvidia/meta.yaml index 110b792bd..c1273e286 100644 --- a/script/app-mlperf-inference-nvidia/meta.yaml +++ b/script/app-mlperf-inference-nvidia/meta.yaml @@ -364,6 +364,7 @@ variations: tags: _for-nvidia-mlperf-inference-v5.0 pycuda: version_min: "2024.1" + version: "" v4.1: group: version env: From 339f3117131b506e54d1d96b889895b0008e860e Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 18 May 2025 16:26:16 +0100 Subject: [PATCH 19/43] Fix for pycuda in nvidia-impl (#420) --- script/app-mlperf-inference-nvidia/meta.yaml | 3 +++ .../meta.yaml | 15 ++++++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/script/app-mlperf-inference-nvidia/meta.yaml b/script/app-mlperf-inference-nvidia/meta.yaml index c1273e286..a5a4e3e53 100644 --- a/script/app-mlperf-inference-nvidia/meta.yaml +++ b/script/app-mlperf-inference-nvidia/meta.yaml @@ -365,6 +365,9 @@ variations: pycuda: version_min: "2024.1" version: "" + nvidia-inference-server: + version: r5.0 + tags: _mlcommons v4.1: group: version env: diff --git a/script/build-mlperf-inference-server-nvidia/meta.yaml b/script/build-mlperf-inference-server-nvidia/meta.yaml index c47c9d819..6ee592a81 100644 --- a/script/build-mlperf-inference-server-nvidia/meta.yaml +++ b/script/build-mlperf-inference-server-nvidia/meta.yaml @@ -113,12 +113,9 @@ deps: - tags: get,generic-python-lib,_pycuda names: - pycuda - version: "2022.2.2" skip_if_env: MLC_RUN_STATE_DOCKER: - 'yes' - - True - - 'True' # Detect opencv-python - tags: get,generic-python-lib,_opencv-python @@ -233,6 +230,9 @@ versions: r2.1: {} r3.0: {} r3.1: + add_deps_recursive: + pycuda: + version: "2022.2.2" deps: - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 names: @@ -247,6 +247,9 @@ versions: r4.0: default_env: BUILD_TRTLLM: 1 + add_deps_recursive: + pycuda: + version: "2022.2.2" deps: - tags: get,generic,sys-util,_nlohmann-json3-dev - tags: get,generic,sys-util,_git-lfs @@ -292,6 +295,9 @@ versions: r4.1-dev: default_env: BUILD_TRTLLM: 1 + add_deps_recursive: + pycuda: + version: "2022.2.2" deps: - tags: get,generic,sys-util,_nlohmann-json3-dev - tags: get,generic,sys-util,_git-lfs @@ -337,6 +343,9 @@ versions: r4.1: default_env: BUILD_TRTLLM: 1 + add_deps_recursive: + pycuda: + version: "2022.2.2" r5.0: add_deps_recursive: From 87f164510439a2c02f2a916b94674c2e5209478c Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sun, 18 May 2025 21:50:11 +0530 Subject: [PATCH 20/43] Deprecated version in scripts (#421) * pycuda version fix * Deprecated version in scripts --- script/app-mlperf-inference-nvidia/meta.yaml | 3 +-- script/app-mlperf-inference/meta.yaml | 3 +-- .../meta.yaml | 22 +++++-------------- .../meta.yaml | 9 ++++---- script/get-mlperf-inference-results/meta.yaml | 9 ++++---- 5 files changed, 17 insertions(+), 29 deletions(-) diff --git a/script/app-mlperf-inference-nvidia/meta.yaml b/script/app-mlperf-inference-nvidia/meta.yaml index a5a4e3e53..86d067c89 100644 --- a/script/app-mlperf-inference-nvidia/meta.yaml +++ b/script/app-mlperf-inference-nvidia/meta.yaml @@ -366,8 +366,7 @@ variations: version_min: "2024.1" version: "" nvidia-inference-server: - version: r5.0 - tags: _mlcommons + tags: _mlcommons,_r5.0 v4.1: group: version env: diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index c1c177d61..c81aa2748 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -1853,8 +1853,7 @@ variations: reproducibility add_deps_recursive: nvidia-inference-common-code: - version: r5.0 - tags: _mlcommons + tags: _mlcommons,_r5.0 nvidia-inference-server: version: r5.0 tags: _mlcommons diff --git a/script/build-mlperf-inference-server-nvidia/meta.yaml b/script/build-mlperf-inference-server-nvidia/meta.yaml index 6ee592a81..fb345bc1f 100644 --- a/script/build-mlperf-inference-server-nvidia/meta.yaml +++ b/script/build-mlperf-inference-server-nvidia/meta.yaml @@ -113,6 +113,7 @@ deps: - tags: get,generic-python-lib,_pycuda names: - pycuda + version: "2022.2.2" skip_if_env: MLC_RUN_STATE_DOCKER: - 'yes' @@ -225,14 +226,16 @@ variations: - x86_64 MLC_PYTHON_MINOR_VERSION: - 8 + + r5.0: + add_deps_recursive: + pycuda: + version: "2024.1" versions: r2.1: {} r3.0: {} r3.1: - add_deps_recursive: - pycuda: - version: "2022.2.2" deps: - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 names: @@ -247,9 +250,6 @@ versions: r4.0: default_env: BUILD_TRTLLM: 1 - add_deps_recursive: - pycuda: - version: "2022.2.2" deps: - tags: get,generic,sys-util,_nlohmann-json3-dev - tags: get,generic,sys-util,_git-lfs @@ -295,9 +295,6 @@ versions: r4.1-dev: default_env: BUILD_TRTLLM: 1 - add_deps_recursive: - pycuda: - version: "2022.2.2" deps: - tags: get,generic,sys-util,_nlohmann-json3-dev - tags: get,generic,sys-util,_git-lfs @@ -343,14 +340,7 @@ versions: r4.1: default_env: BUILD_TRTLLM: 1 - add_deps_recursive: - pycuda: - version: "2022.2.2" - r5.0: - add_deps_recursive: - pycuda: - version: "2024.1" docker: skip_run_cmd: 'no' all_gpus: 'yes' diff --git a/script/get-mlperf-inference-nvidia-common-code/meta.yaml b/script/get-mlperf-inference-nvidia-common-code/meta.yaml index 8d0be3d3a..27dffb57a 100644 --- a/script/get-mlperf-inference-nvidia-common-code/meta.yaml +++ b/script/get-mlperf-inference-nvidia-common-code/meta.yaml @@ -32,6 +32,10 @@ variations: group: repo-owner nvidia-only: group: repo-owner + r5.0: + add_deps_recursive: + mlperf-inference-results: + tags: _code-only,_v5.0 versions: r2.1: add_deps_recursive: @@ -63,8 +67,3 @@ versions: mlperf-inference-results: version: v4.0 tags: _code-only-for-v5.0 - r5.0: - add_deps_recursive: - mlperf-inference-results: - version: v5.0 - tags: _code-only diff --git a/script/get-mlperf-inference-results/meta.yaml b/script/get-mlperf-inference-results/meta.yaml index 7b8397c0a..a9d90b65b 100644 --- a/script/get-mlperf-inference-results/meta.yaml +++ b/script/get-mlperf-inference-results/meta.yaml @@ -65,6 +65,10 @@ variations: GITHUB_REPO_OWNER: GATEOverflow NVIDIA_ONLY: 'yes' group: source-repo + v5.0: + env: + MLC_GIT_URL: https://github.com/<<>>/inference_results_v5.0.git + MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v5.0 versions: v2.1: env: @@ -86,7 +90,4 @@ versions: env: MLC_GIT_URL: https://github.com/<<>>/inference_results_v4.1.git MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v4.1 - v5.0: - env: - MLC_GIT_URL: https://github.com/<<>>/inference_results_v5.0.git - MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v5.0 + From 9ea40c2c87a35fc5757aaa9cf011493b797b47bb Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 19 May 2025 00:10:21 +0530 Subject: [PATCH 21/43] Fixes for pycuda and versions (#422) * fix tags for inheritance * fix variation tag * fix pycuda version * provide version for git checkout * pycuda version fix for get cuda devices * pycuda fix for mitten library * fix variation --- script/add-custom-nvidia-system/meta.yaml | 5 ++++- script/app-mlperf-inference-nvidia/meta.yaml | 5 ++--- script/app-mlperf-inference/meta.yaml | 4 +++- script/get-mlperf-inference-nvidia-common-code/meta.yaml | 2 +- script/get-mlperf-inference-results/meta.yaml | 2 +- script/get-nvidia-mitten/meta.yaml | 3 ++- 6 files changed, 13 insertions(+), 8 deletions(-) diff --git a/script/add-custom-nvidia-system/meta.yaml b/script/add-custom-nvidia-system/meta.yaml index b07ec8d5e..ec7202857 100644 --- a/script/add-custom-nvidia-system/meta.yaml +++ b/script/add-custom-nvidia-system/meta.yaml @@ -105,7 +105,10 @@ variations: add_deps_recursive: nvidia-inference-common-code: tags: _go - + v5.0: + add_deps_recursive: + nvidia-inference-common-code: + tags: _v5.0 diff --git a/script/app-mlperf-inference-nvidia/meta.yaml b/script/app-mlperf-inference-nvidia/meta.yaml index 86d067c89..1fbba99d6 100644 --- a/script/app-mlperf-inference-nvidia/meta.yaml +++ b/script/app-mlperf-inference-nvidia/meta.yaml @@ -363,10 +363,9 @@ variations: pytorch: tags: _for-nvidia-mlperf-inference-v5.0 pycuda: - version_min: "2024.1" - version: "" + version: "2024.1" nvidia-inference-server: - tags: _mlcommons,_r5.0 + tags: _mlcommons,_v5.0 v4.1: group: version env: diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index c81aa2748..6d796f7ce 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -1853,7 +1853,7 @@ variations: reproducibility add_deps_recursive: nvidia-inference-common-code: - tags: _mlcommons,_r5.0 + tags: _mlcommons,_v5.0 nvidia-inference-server: version: r5.0 tags: _mlcommons @@ -1863,6 +1863,8 @@ variations: version: r5.0 nvidia-scratch-space: tags: _version.5.0 + pycuda: + version: "2024.1" default_env: MLC_SKIP_SYS_UTILS: 'yes' MLC_REGENERATE_MEASURE_FILES: 'yes' diff --git a/script/get-mlperf-inference-nvidia-common-code/meta.yaml b/script/get-mlperf-inference-nvidia-common-code/meta.yaml index 27dffb57a..5fda7a3dd 100644 --- a/script/get-mlperf-inference-nvidia-common-code/meta.yaml +++ b/script/get-mlperf-inference-nvidia-common-code/meta.yaml @@ -32,7 +32,7 @@ variations: group: repo-owner nvidia-only: group: repo-owner - r5.0: + v5.0: add_deps_recursive: mlperf-inference-results: tags: _code-only,_v5.0 diff --git a/script/get-mlperf-inference-results/meta.yaml b/script/get-mlperf-inference-results/meta.yaml index a9d90b65b..9b42fdf2a 100644 --- a/script/get-mlperf-inference-results/meta.yaml +++ b/script/get-mlperf-inference-results/meta.yaml @@ -7,7 +7,6 @@ default_env: MLC_GIT_CHECKOUT: master MLC_GIT_DEPTH: --depth 1 MLC_GIT_PATCH: 'no' -default_version: v4.0 deps: [] new_env_keys: - MLC_MLPERF_INFERENCE_RESULTS_* @@ -69,6 +68,7 @@ variations: env: MLC_GIT_URL: https://github.com/<<>>/inference_results_v5.0.git MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v5.0 + MLC_VERSION: "v5.0" versions: v2.1: env: diff --git a/script/get-nvidia-mitten/meta.yaml b/script/get-nvidia-mitten/meta.yaml index 09bac19b9..c5452c480 100644 --- a/script/get-nvidia-mitten/meta.yaml +++ b/script/get-nvidia-mitten/meta.yaml @@ -11,7 +11,8 @@ deps: - python tags: get,python3 - tags: get,generic-python-lib,_pycuda - version: "2023.1" + names: + - pycuda - env: MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_NVIDIA_MITTEN_SRC extra_cache_tags: nvidia,mitten,src From 86a1c8946584c9d7a0358012762799fc7a56c325 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 18 May 2025 22:47:03 +0100 Subject: [PATCH 22/43] Removed pycuda version fix (#423) * Removed pycuda version fix * Upgrade numpy for nvmitten * Fix nvidia-mitten install --- script/app-mlperf-inference-nvidia/meta.yaml | 1 - script/detect-sudo/customize.py | 2 +- script/get-nvidia-mitten/customize.py | 6 ------ script/get-nvidia-mitten/meta.yaml | 14 +++++++++++++- script/get-nvidia-mitten/run.sh | 5 ++--- 5 files changed, 16 insertions(+), 12 deletions(-) diff --git a/script/app-mlperf-inference-nvidia/meta.yaml b/script/app-mlperf-inference-nvidia/meta.yaml index 1fbba99d6..15e66aa40 100644 --- a/script/app-mlperf-inference-nvidia/meta.yaml +++ b/script/app-mlperf-inference-nvidia/meta.yaml @@ -274,7 +274,6 @@ deps: - tags: get,generic-python-lib,_package.pycuda names: - pycuda - version: "2022.2.2" - tags: get,generic-python-lib,_package.nvmitten update_tags_from_env_with_prefix: diff --git a/script/detect-sudo/customize.py b/script/detect-sudo/customize.py index a749acd2a..2a56f6d33 100644 --- a/script/detect-sudo/customize.py +++ b/script/detect-sudo/customize.py @@ -174,7 +174,7 @@ def prompt_sudo(logger): if not prompt_retry(): # If the user chooses not to retry or times out return -1 except subprocess.CalledProcessError as e: - logger.error(f"Command failed: {e.output.decode('utf-8')}") + logger.error(f"Command failed: {e.output}") reset_terminal() # Reset terminal in case of failure return -1 except Exception as e: diff --git a/script/get-nvidia-mitten/customize.py b/script/get-nvidia-mitten/customize.py index 77e4bac46..8d8693ca0 100644 --- a/script/get-nvidia-mitten/customize.py +++ b/script/get-nvidia-mitten/customize.py @@ -6,12 +6,6 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - script_path = i['artifact'].path - - if env.get('MLC_MLPERF_INFERENCE_VERSION', '') == "5.0": - extra_run_cmd = 'patch -p1 < {}'.format(os.path.join( - script_path, 'patch', 'numpy-mitten-v5.0.patch')) - env['EXTRA_RUN_CMD'] = extra_run_cmd return {'return': 0} diff --git a/script/get-nvidia-mitten/meta.yaml b/script/get-nvidia-mitten/meta.yaml index c5452c480..80e414a32 100644 --- a/script/get-nvidia-mitten/meta.yaml +++ b/script/get-nvidia-mitten/meta.yaml @@ -3,7 +3,7 @@ automation_alias: script automation_uid: 5b4e0237da074764 cache: true category: MLPerf benchmark support -default_version: master +default_version: main deps: - tags: detect,os - names: @@ -18,6 +18,7 @@ deps: extra_cache_tags: nvidia,mitten,src force_env_keys: - MLC_GIT_CHECKOUT + - MLC_GIT_PATCH_FILEPATHS names: - nvidia-mitten-git-src tags: get,git,_repo.https://github.com/NVIDIA/mitten @@ -32,3 +33,14 @@ tags: - mitten - nvidia-mitten uid: 1c045f2902374de9 +variations: + patched: + group: numpy-patch + default: true + adr: + nvidia-mitten-git-src: + tags: _patch + env: + MLC_GIT_PATCH_FILEPATHS: <<>>/patch/numpy-mitten-v5.0.patch + default: + group: numpy-patch diff --git a/script/get-nvidia-mitten/run.sh b/script/get-nvidia-mitten/run.sh index 81f38f829..33f3f5b90 100644 --- a/script/get-nvidia-mitten/run.sh +++ b/script/get-nvidia-mitten/run.sh @@ -1,7 +1,6 @@ #!/bin/bash cd ${MLC_NVIDIA_MITTEN_SRC} -echo "EXTRA_RUN_CMD = ${EXTRA_RUN_CMD}" -eval "${EXTRA_RUN_CMD}" test $? -eq 0 || exit $? -${MLC_PYTHON_BIN_WITH_PATH} -m pip install . +PIP_EXTRA=`python3 -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` +${MLC_PYTHON_BIN_WITH_PATH} -m pip install . ${PIP_EXTRA} test $? -eq 0 || exit $? From 9b934c1db097de5491ccd8d313692eb82deff064 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 19 May 2025 04:02:25 +0530 Subject: [PATCH 23/43] Fix onnx version (#425) * Fix onnx version * Update meta.yaml --- script/app-mlperf-inference-nvidia/meta.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/script/app-mlperf-inference-nvidia/meta.yaml b/script/app-mlperf-inference-nvidia/meta.yaml index 15e66aa40..00aa41969 100644 --- a/script/app-mlperf-inference-nvidia/meta.yaml +++ b/script/app-mlperf-inference-nvidia/meta.yaml @@ -432,9 +432,19 @@ variations: deps: - tags: get,generic-python-lib,_onnx-graphsurgeon version: 0.3.27 + + resnet50,v4.0: + - tags: get,generic-python-lib,_package.onnx + version: 1.13.1 + + resnet50,v4.1: - tags: get,generic-python-lib,_package.onnx version: 1.13.1 + resnet50,v5.0: + - tags: get,generic-python-lib,_package.onnx + version: 1.17.0 + retinanet: group: model env: From 15b8870307ba770cd6209627a0c8143b57498056 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Mon, 19 May 2025 01:07:11 +0100 Subject: [PATCH 24/43] Fixes to docker mounts/user (#426) * Explicitly add mlcuser to sudo user group * Cleanup docker user permissions * Fix --docker_user=root --- automation/script/docker.py | 4 ++++ automation/script/docker_utils.py | 5 ++++- script/build-dockerfile/customize.py | 14 ++++++++++---- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/automation/script/docker.py b/automation/script/docker.py index c2f65ee5c..035b34440 100644 --- a/automation/script/docker.py +++ b/automation/script/docker.py @@ -377,6 +377,9 @@ def docker_run(self_module, i): env.update({docker_input_mapping[key]: i[key] for key in docker_input_mapping if key in i}) + if docker_inputs.get('user'): + docker_settings['user'] = docker_inputs['user'] + # Handle environment variable-based mounts res = process_mounts( mounts, @@ -412,6 +415,7 @@ def docker_run(self_module, i): 'quiet': True, 'real_run': True, 'add_deps_recursive': {'build-docker-image': {'dockerfile': dockerfile_path}}, **docker_inputs } + r = self_module.action_object.access(mlc_docker_input) if r['return'] > 0: return r diff --git a/automation/script/docker_utils.py b/automation/script/docker_utils.py index a94e1a010..e2e1b6fee 100644 --- a/automation/script/docker_utils.py +++ b/automation/script/docker_utils.py @@ -452,7 +452,10 @@ def get_container_path(value, username="mlcuser", extract_parent_folder=False): new_value = '' if "cache" in path_split and "local" in path_split: - new_path_split = ["", "home", username, "MLC", "repos"] + if username == "root": + new_path_split = ["", "root", "MLC", "repos"] + else: + new_path_split = ["", "home", username, "MLC", "repos"] repo_entry_index = path_split.index("local") if len(path_split) >= repo_entry_index + 3: new_path_split1 = new_path_split + \ diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index 56bbf26b9..439fd4eb2 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -271,11 +271,17 @@ def preprocess(i): user_shell = json.loads(shell) f.write(f"""RUN (id -u {docker_user} > /dev/null 2>&1 && usermod -u $UID {docker_user}) || useradd """ + DOCKER_USER_ID + DOCKER_GROUP + ' --create-home --shell ' + user_shell[0] + ' ' + docker_user + EOL) + f.write(f'RUN usermod -aG sudo {docker_user}' + EOL) + f.write( - 'RUN echo "' + - docker_user + - ' ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers' + - EOL) + # create the file with both lines and a trailing newline + f"RUN printf '{docker_user} ALL=(ALL) NOPASSWD:ALL\\n" + f"Defaults:{docker_user} !requiretty\\n' " + f"> /etc/sudoers.d/{docker_user} \\\n" + # lock down permissions + f" && chmod 0440 /etc/sudoers.d/{docker_user}{EOL}" + ) + f.write('USER ' + docker_user + ":" + docker_group + EOL) f.write(f"""ENV HOME=/home/{docker_user}""" + EOL) From 901282b1cf9015b67f9f0c4885420efd5dc79e4a Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Mon, 19 May 2025 01:24:34 +0100 Subject: [PATCH 25/43] Update customize.py (#427) --- script/build-dockerfile/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index 439fd4eb2..3e897ac5b 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -275,7 +275,7 @@ def preprocess(i): f.write( # create the file with both lines and a trailing newline - f"RUN printf '{docker_user} ALL=(ALL) NOPASSWD:ALL\\n" + f"RUN printf '{docker_user} ALL=(ALL) NOPASSWD: ALL\\n" f"Defaults:{docker_user} !requiretty\\n' " f"> /etc/sudoers.d/{docker_user} \\\n" # lock down permissions From 462c20d7845faed85143e856dd30b0fbc14a67b1 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Mon, 19 May 2025 11:34:34 +0100 Subject: [PATCH 26/43] Support docker_build_env, fixes #424 (#428) --- automation/script/docker.py | 9 +++++---- automation/script/docker_utils.py | 2 +- script/app-mlperf-inference/meta.yaml | 7 ++++++- script/build-dockerfile/customize.py | 19 ++++++++++--------- script/build-dockerfile/meta.yaml | 1 + 5 files changed, 23 insertions(+), 15 deletions(-) diff --git a/automation/script/docker.py b/automation/script/docker.py index 035b34440..37fa84ab0 100644 --- a/automation/script/docker.py +++ b/automation/script/docker.py @@ -36,7 +36,6 @@ def dockerfile(self_module, input_params): state_data = input_params.get('state', {}) constant_vars = input_params.get('const', {}) constant_state = input_params.get('const_state', {}) - dockerfile_env = input_params.get('dockerfile_env', {}) tag_values = input_params.get('tags', '').split(",") variation_tags = [tag[1:] for tag in tag_values if tag.startswith("_")] @@ -56,7 +55,6 @@ def dockerfile(self_module, input_params): 'script_variation_tags': variation_tags } docker_settings = metadata.get('docker', {}) - docker_settings['dockerfile_env'] = dockerfile_env state_data['docker'] = docker_settings add_deps_recursive = input_params.get('add_deps_recursive', {}) @@ -94,8 +92,6 @@ def dockerfile(self_module, input_params): # Set Docker-specific configurations docker_settings = state_data['docker'] - docker_settings['dockerfile_env'] = dockerfile_env - dockerfile_env['MLC_RUN_STATE_DOCKER'] = True if not docker_settings.get('run', True) and not input_params.get( 'docker_run_override', False): @@ -171,6 +167,10 @@ def dockerfile(self_module, input_params): if input_params.get('docker_push_image') in [True, 'True', 'yes']: env['MLC_DOCKER_PUSH_IMAGE'] = 'yes' + dockerfile_env = docker_inputs.get('env', {}) + dockerfile_build_env = docker_inputs.get('build_env', {}) + + dockerfile_env['MLC_RUN_STATE_DOCKER'] = True # Generate Dockerfile mlc_docker_input = { 'action': 'run', 'automation': 'script', 'tags': 'build,dockerfile', @@ -178,6 +178,7 @@ def dockerfile(self_module, input_params): 'comments': comments, 'run_cmd': f"{run_command_string} --quiet", 'script_tags': input_params.get('tags'), 'env': env, 'dockerfile_env': dockerfile_env, + 'dockerfile_build_env': dockerfile_build_env, 'quiet': True, 'real_run': True } diff --git a/automation/script/docker_utils.py b/automation/script/docker_utils.py index e2e1b6fee..cb733f93a 100644 --- a/automation/script/docker_utils.py +++ b/automation/script/docker_utils.py @@ -134,7 +134,7 @@ def prepare_docker_inputs(input_params, docker_settings, keys = [ "mlc_repo", "mlc_repo_branch", "base_image", "os", "os_version", "mlc_repos", "skip_mlc_sys_upgrade", "extra_sys_deps", "image_name", - "gh_token", "fake_run_deps", "run_final_cmds", "real_run", "copy_files", "path", "user" + "gh_token", "fake_run_deps", "run_final_cmds", "real_run", "copy_files", "path", "user", "env", "build_env" ] if run_stage: diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index 6d796f7ce..abf480eff 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -386,9 +386,14 @@ variations: env: MLC_NVIDIA_MITTEN_FROM_SRC: 'yes' docker: + os_version: "24.04" + user: 'ubuntu' build_deps: - tags: detect,os image_name: mlperf-inference-nvidia-v5.0-common + build_env: + ENV: release + update_meta_if_env: - enable_if_env: MLC_HOST_PLATFORM_FLAVOR: @@ -1309,7 +1314,7 @@ variations: mounts: - "${{ MLC_ML_MODEL_FILE_WITH_PATH }}:${{ MLC_ML_MODEL_FILE_WITH_PATH }}" - "${{ DLRM_DATA_PATH }}:${{ DLRM_DATA_PATH }}" - dockerfile_env: + env: MLC_ML_MODEL_FILE_WITH_PATH: "on" diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index 3e897ac5b..9f3776c2d 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -246,13 +246,15 @@ def preprocess(i): for key, value in config['ENV'].items(): f.write('ENV ' + key + "=\"" + value + "\"" + EOL) + + dockerfile_build_env = env.get('MLC_DOCKERFILE_BUILD_ENV', {}) + for key in dockerfile_build_env: + value = dockerfile_build_env[key] + f.write('ENV ' + key + "=\"" + value + "\"" + EOL) + for cmd in config['RUN_CMDS']: f.write('RUN ' + cmd + EOL) - if env.get('MLC_MLPERF_IMPLEMENTATION', '') == "nvidia" and env.get( - 'MLC_MLPERF_INFERENCE_VERSION', '') == "5.0": - f.write('ENV ' + 'ENV' + "=\"" + 'release' + "\"" + EOL) - f.write(EOL + '# Setup docker user' + EOL) docker_user = get_value(env, config, 'USER', 'MLC_DOCKER_USER') docker_group = get_value(env, config, 'GROUP', 'MLC_DOCKER_GROUP') @@ -261,7 +263,8 @@ def preprocess(i): 'MLC_DOCKER_USE_DEFAULT_USER', '') == '': env['MLC_DOCKER_USE_DEFAULT_USER'] = 'yes' - if docker_user and not is_true(env.get('MLC_DOCKER_USE_DEFAULT_USER', '')): + if docker_user and not is_true( + env.get('MLC_DOCKER_USE_DEFAULT_USER', '') and docker_user != 'root'): f.write('RUN groupadd -g $GID -o ' + docker_group + EOL) @@ -271,15 +274,13 @@ def preprocess(i): user_shell = json.loads(shell) f.write(f"""RUN (id -u {docker_user} > /dev/null 2>&1 && usermod -u $UID {docker_user}) || useradd """ + DOCKER_USER_ID + DOCKER_GROUP + ' --create-home --shell ' + user_shell[0] + ' ' + docker_user + EOL) - f.write(f'RUN usermod -aG sudo {docker_user}' + EOL) + # f.write(f'RUN usermod -aG sudo {docker_user}' + EOL) f.write( # create the file with both lines and a trailing newline f"RUN printf '{docker_user} ALL=(ALL) NOPASSWD: ALL\\n" f"Defaults:{docker_user} !requiretty\\n' " - f"> /etc/sudoers.d/{docker_user} \\\n" - # lock down permissions - f" && chmod 0440 /etc/sudoers.d/{docker_user}{EOL}" + f">> /etc/sudoers " + EOL ) f.write('USER ' + docker_user + ":" + docker_group + EOL) diff --git a/script/build-dockerfile/meta.yaml b/script/build-dockerfile/meta.yaml index 68ea20689..6c91a29dd 100644 --- a/script/build-dockerfile/meta.yaml +++ b/script/build-dockerfile/meta.yaml @@ -37,6 +37,7 @@ input_mapping: docker_os: MLC_DOCKER_OS docker_os_version: MLC_DOCKER_OS_VERSION dockerfile_env: MLC_DOCKERFILE_ENV + dockerfile_build_env: MLC_DOCKERFILE_BUILD_ENV extra_sys_deps: MLC_DOCKER_EXTRA_SYS_DEPS fake_docker_deps: MLC_DOCKER_FAKE_DEPS fake_run_option: MLC_DOCKER_FAKE_RUN_OPTION From 11ced8aefd272abbdc55c90d8caa33c4266a83b3 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Mon, 19 May 2025 12:25:41 +0100 Subject: [PATCH 27/43] Fix env export for get-mlperf-inference-src (#430) --- script/get-mlperf-inference-src/meta.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/script/get-mlperf-inference-src/meta.yaml b/script/get-mlperf-inference-src/meta.yaml index bd76d04da..852c85d68 100644 --- a/script/get-mlperf-inference-src/meta.yaml +++ b/script/get-mlperf-inference-src/meta.yaml @@ -27,7 +27,6 @@ new_env_keys: - MLC_MLPERF_INFERENCE_RGAT_PATH - MLC_MLPERF_INFERENCE_SOURCE - MLC_MLPERF_INFERENCE_SOURCE_VERSION -- MLC_MLPERF_INFERENCE_VERSION - MLC_MLPERF_INFERENCE_VISION_PATH - MLC_MLPERF_LAST_RELEASE - MLC_MLPERF_INFERENCE_POINTPAINTING_PATH From ccc4dcaa7e931f9073890a5e6a8d3ac6bd8fb1b5 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Mon, 19 May 2025 18:50:51 +0100 Subject: [PATCH 28/43] Fix ucx LD_LIBRARY_PATH for app-mlperf-inference-nvidia (#432) --- script/app-mlperf-inference-nvidia/customize.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/script/app-mlperf-inference-nvidia/customize.py b/script/app-mlperf-inference-nvidia/customize.py index cc11ce229..5d56c7f96 100644 --- a/script/app-mlperf-inference-nvidia/customize.py +++ b/script/app-mlperf-inference-nvidia/customize.py @@ -705,7 +705,16 @@ def preprocess(i): env['MLC_RUN_CMD'] = run_cmd env['MLC_RUN_DIR'] = env['MLC_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] -# print(env) + if '+LD_LIBRARY_PATH' not in env: + env['+LD_LIBRARY_PATH'] = [] + + if os.path.exists("/opt/hpcx/ucx/lib"): + env['+LD_LIBRARY_PATH'].append("/opt/hpcx/ucx/lib") + + if os.path.exists("/opt/hpcx/ucc/lib"): + env['+LD_LIBRARY_PATH'].append("/opt/hpcx/ucc/lib") + + # print(env) return {'return': 0} From a57cb5d0c896f3e99bb8e997d3495423eded0ff1 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 21 May 2025 10:46:50 +0100 Subject: [PATCH 29/43] Added set-cpu-freq script (#435) --- script/set-cpu-frequency/customize.py | 59 ++++++++++++++++++++++++++ script/set-cpu-frequency/meta.yaml | 26 ++++++++++++ script/set-cpu-frequency/run.sh | 60 +++++++++++++++++++++++++++ 3 files changed, 145 insertions(+) create mode 100644 script/set-cpu-frequency/customize.py create mode 100644 script/set-cpu-frequency/meta.yaml create mode 100644 script/set-cpu-frequency/run.sh diff --git a/script/set-cpu-frequency/customize.py b/script/set-cpu-frequency/customize.py new file mode 100644 index 000000000..b4f878d3d --- /dev/null +++ b/script/set-cpu-frequency/customize.py @@ -0,0 +1,59 @@ +from mlc import utils +import os +import subprocess + + +def preprocess(i): + + env = i['env'] + state = i['state'] + + freq = env.get('MLC_TARGET_FREQ', '').strip() + if freq != '': + try: + freq = parse_target_freq(freq) + except ValueError as e: + return {'return': 1, 'error': sys.stderr} + + os_info = i['os_info'] + + return {'return': 0} + + +def parse_target_freq(raw: str) -> int | None: + """ + Parse a freq string like '2300KHz', '2.3GHz', '2500M' or a plain integer + into an integer number of kHz. Returns None if the env var is empty. + """ + if not raw: + return None + + # match [.][unit], unit = k/M/G (case-insensitive), + # optional "Hz" + m = re.fullmatch(r"([0-9]+(?:\.[0-9]+)?)([KMGkmg])(?:[Hh][Zz])?", raw) + if m: + val, unit = m.group(1), m.group(2).lower() + val = float(val) + if unit == "g": + khz = int(val * 1_000_000) + elif unit == "m": + khz = int(val * 1_000) + else: # "k" + khz = int(val) + return khz + + # plain integer? treat as kHz + if raw.isdigit(): + return int(raw) + + raise ValueError(f"Invalid frequency format: '{raw}'") + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + return {'return': 0} diff --git a/script/set-cpu-frequency/meta.yaml b/script/set-cpu-frequency/meta.yaml new file mode 100644 index 000000000..85e6960dc --- /dev/null +++ b/script/set-cpu-frequency/meta.yaml @@ -0,0 +1,26 @@ +alias: set-cpu-frequency +automation_alias: script +automation_uid: 5b4e0237da074764 +category: MLC Sys Utils +deps: + - tags: detect,os + - tags: detect,sudo +new_env_keys: [] +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +input_mapping: + freq: MLC_TARGET_FREQ + frequency: MLC_TARGET_FREQ +tags: +- set +- cpu +- target +- freq +- frequency +tests: + run_inputs: + - freq: 1GHz + - freq: '' +uid: 1fe0500d7a2e4c6a diff --git a/script/set-cpu-frequency/run.sh b/script/set-cpu-frequency/run.sh new file mode 100644 index 000000000..57b22872a --- /dev/null +++ b/script/set-cpu-frequency/run.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +set -euo pipefail + +TARGET_FREQ="${MLC_TARGET_FREQ:-}" +DRIVER_FILE="/sys/devices/system/cpu/cpu0/cpufreq/scaling_driver" + +if [[ ! -r $DRIVER_FILE ]]; then + echo "Error: cannot read $DRIVER_FILE. Is cpufreq enabled?" >&2 + exit 2 +fi +DRIVER=$(<"$DRIVER_FILE") +echo "Detected cpufreq driver: $DRIVER" + +# Normalize AMD pstate variants +if [[ $DRIVER == amd-pstate* ]]; then + DRIVER_KEY="amd-pstate" +else + DRIVER_KEY="$DRIVER" +fi + + +case "$DRIVER_KEY" in + intel_pstate) + echo "→ intel_pstate: disabling turbo, setting performance governor" + echo 0 | ${MLC_SUDO} tee /sys/devices/system/cpu/intel_pstate/no_turbo >/dev/null + ${MLC_SUDO} cpupower frequency-set -g performance + ;; + + amd-pstate) + echo "→ amd_pstate: enabling boost, setting performance governor" + # boost file is global under cpufreq + if [[ -w /sys/devices/system/cpu/cpufreq/boost ]]; then + echo 1 | ${MLC_SUDO} tee /sys/devices/system/cpu/cpufreq/boost >/dev/null + fi + ${MLC_SUDO} cpupower frequency-set -g performance + echo "" + echo "Note: amd-pstate does _not_ support a userspace/fixed frequency mode." + echo "If you need a precise kHz, switch back to acpi-cpufreq in your kernel cmdline." + ;; + + acpi-cpufreq) + echo "→ acpi-cpufreq: switching to userspace governor + fixed freq" + if [[ -z "$TARGET_FREQ" ]]; then + TARGET_FREQ=$(< /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq) + echo " No target given; defaulting to min freq = ${TARGET_FREQ} kHz" + fi + ${MLC_SUDO} cpupower frequency-set -g userspace + ${MLC_SUDO} cpupower frequency-set -f "${TARGET_FREQ}" + ;; + + *) + echo "Unsupported driver: $DRIVER" >&2 + exit 3 + ;; +esac + +echo "" +echo "Resulting settings for CPU0:" +cpupower frequency-info | sed -n '1,5p' + From 800639535ceef97f4b9abc718ee4bfce255407db Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 22 May 2025 18:09:22 +0100 Subject: [PATCH 30/43] Update run-tests-on-modified-meta.yml --- .github/workflows/run-tests-on-modified-meta.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/run-tests-on-modified-meta.yml b/.github/workflows/run-tests-on-modified-meta.yml index 244b18a6f..8c7416359 100644 --- a/.github/workflows/run-tests-on-modified-meta.yml +++ b/.github/workflows/run-tests-on-modified-meta.yml @@ -35,6 +35,11 @@ jobs: git fetch upstream changed_files=$(git diff upstream/${{ github.event.pull_request.base.ref }} --name-only) echo "$changed_files" | python3 .github/scripts/list_modified_files.py + + - name: Debug Show processed_files + run: | + echo "Processed files: '${{ steps.filter-modified-files.outputs.processed_files }}'" + process_modified_files: runs-on: ubuntu-latest From 5b24393fedd64416b030ebe8364bb1125de9ba93 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 22 May 2025 22:12:23 +0100 Subject: [PATCH 31/43] Added get-lib-jemalloc (#437), fix automatic test on modified meta * Added get-lib-jemalloc * Update run-tests-on-modified-meta.yml --- .github/scripts/list_modified_files.py | 4 +- .../workflows/run-tests-on-modified-meta.yml | 68 +++++++++++-------- script/get-lib-jemalloc/customize.py | 29 ++++++++ script/get-lib-jemalloc/meta.yaml | 45 ++++++++++++ script/get-lib-jemalloc/run.sh | 13 ++++ 5 files changed, 128 insertions(+), 31 deletions(-) create mode 100644 script/get-lib-jemalloc/customize.py create mode 100644 script/get-lib-jemalloc/meta.yaml create mode 100644 script/get-lib-jemalloc/run.sh diff --git a/.github/scripts/list_modified_files.py b/.github/scripts/list_modified_files.py index c3c2260a7..65fdacbd1 100644 --- a/.github/scripts/list_modified_files.py +++ b/.github/scripts/list_modified_files.py @@ -17,7 +17,7 @@ def get_file_info(filepath): def process_files(files): - filenames = files.split() + filenames = files.split(",") return [ { "file": file, @@ -34,4 +34,4 @@ def process_files(files): changed_files = sys.stdin.read().strip() processed_files = process_files(changed_files) json_processed_files = json.dumps(processed_files) - print(f"::set-output name=processed_files::{json_processed_files}") + print(json_processed_files) diff --git a/.github/workflows/run-tests-on-modified-meta.yml b/.github/workflows/run-tests-on-modified-meta.yml index 8c7416359..86932d896 100644 --- a/.github/workflows/run-tests-on-modified-meta.yml +++ b/.github/workflows/run-tests-on-modified-meta.yml @@ -1,4 +1,3 @@ -# This workflow will run configured tests for any updated MLC script name: Test script on modified meta on: @@ -11,40 +10,52 @@ jobs: get_modified_files: runs-on: ubuntu-latest outputs: - processed_files: ${{ steps.modified-files.outputs.processed_files }} + processed_files: ${{ steps.filter-modified-files.outputs.processed_files }} steps: - - name: 'Checkout' - uses: actions/checkout@v4 - with: - fetch-depth: 2 + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Install dependencies + run: pip install pyyaml - - name: Setup Python - uses: actions/setup-python@v2 - with: - python-version: '3.x' + - name: Fetch base branch + run: | + git fetch origin +refs/heads/${{ github.event.pull_request.base.ref }}:refs/remotes/origin/${{ github.event.pull_request.base.ref }} - - name: Install dependencies - run: | - pip install pyyaml + - name: Get list of changed files + id: modified-files + run: | + git diff --name-only origin/${{ github.event.pull_request.base.ref }}...HEAD > changed_files.txt + files=$(paste -sd, changed_files.txt) + echo "files=$files" >> $GITHUB_OUTPUT - - name: Get changed files - id: modified-files - run: | - git remote add upstream ${{ github.event.pull_request.base.repo.clone_url }} - git fetch upstream - changed_files=$(git diff upstream/${{ github.event.pull_request.base.ref }} --name-only) - echo "$changed_files" | python3 .github/scripts/list_modified_files.py - - - name: Debug Show processed_files - run: | - echo "Processed files: '${{ steps.filter-modified-files.outputs.processed_files }}'" + - name: Filter changed files + id: filter-modified-files + env: + FILES: ${{ steps.modified-files.outputs.files }} + run: | + processed=$(echo "$FILES" | python3 .github/scripts/list_modified_files.py) + echo "processed_files<> $GITHUB_OUTPUT + echo "$processed" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + - name: Debug processed_files output + run: | + echo "Processed files output:" + echo "${{ steps.filter-modified-files.outputs.processed_files }}" process_modified_files: - runs-on: ubuntu-latest needs: get_modified_files - if: needs.determine_modified_files.outputs.processed_files != '[]' && needs.determine_modified_files.outputs.processed_files != '' + runs-on: ubuntu-latest + if: needs.get_modified_files.outputs.processed_files != '[]' strategy: fail-fast: false matrix: @@ -58,8 +69,7 @@ jobs: - name: Process meta.yaml file run: | - echo "Processing ${{ matrix.file_info.file }} with run number ${{ matrix.file_info.num_run }}" - + echo "Processing ${{ matrix.file_info.file }} (run #${{ matrix.file_info.num_run }})" pip install mlcflow mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - mlc test script ${{ matrix.file_info.uid}} --test_input_index=${{ matrix.file_info.num_run }} --docker_mlc_repo=${{ github.event.pull_request.head.repo.html_url }} --docker_mlc_repo_branch=${{ github.event.pull_request.head.ref }} --quiet + mlc test script ${{ matrix.file_info.uid }} --test_input_index=${{ matrix.file_info.num_run }} --docker_mlc_repo=${{ github.event.pull_request.head.repo.html_url }} --docker_mlc_repo_branch=${{ github.event.pull_request.head.ref }} --quiet diff --git a/script/get-lib-jemalloc/customize.py b/script/get-lib-jemalloc/customize.py new file mode 100644 index 000000000..a82d6a829 --- /dev/null +++ b/script/get-lib-jemalloc/customize.py @@ -0,0 +1,29 @@ +from mlc import utils +import os +import subprocess + + +def preprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + lib_path = os.path.join(os.getcwd(), "obj", "lib") + + env['+LD_LIBRARY_PATH'] = lib_path + env['MLC_JEMALLOC_LIB_PATH'] = lib_path + env['MLC_DEPENDENT_CACHED_PATH'] = os.path.join(lib_path, "libjemalloc.so") + + return {'return': 0} diff --git a/script/get-lib-jemalloc/meta.yaml b/script/get-lib-jemalloc/meta.yaml new file mode 100644 index 000000000..929152152 --- /dev/null +++ b/script/get-lib-jemalloc/meta.yaml @@ -0,0 +1,45 @@ +alias: get-lib-jemalloc +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Detection or installation of tools and artifacts +cache: true +deps: + - tags: get,git,repo + env: + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_JEMALLOC_SRC_PATH + force_env_keys: + - MLC_GIT_* + update_tags_from_env_with_prefix: + _branch.: + - MLC_GIT_CHECKOUT + _repo.: + - MLC_GIT_URL + _sha.: + - MLC_GIT_SHA + _submodules.: + - MLC_GIT_SUBMODULES + names: + - jemalloc-repo + extra_cache_tags: jemalloc,repo,jemalloc-repo +new_env_keys: + - MLC_JEMALLOC_LIB_PATH + - +LD_LIBRARY_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- lib +- lib-jemalloc +- jemalloc +tests: + run_inputs: + - quiet: true +uid: 406439a446e04fb7 +variations: + version.official: + group: version + default: true + env: + MLC_GIT_URL: https://github.com/jemalloc/jemalloc.git diff --git a/script/get-lib-jemalloc/run.sh b/script/get-lib-jemalloc/run.sh new file mode 100644 index 000000000..2d156cf42 --- /dev/null +++ b/script/get-lib-jemalloc/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +#Add your run commands here... +# run "$MLC_RUN_CMD" +cd ${MLC_JEMALLOC_SRC_PATH} +autoconf +cd - +mkdir -p obj +cd obj +${MLC_JEMALLOC_SRC_PATH}/configure --enable-autogen +make From 3bd313e4dc04c996979a26b14cfec5b35e5c9df2 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Fri, 23 May 2025 00:25:32 +0100 Subject: [PATCH 32/43] Support mlc test for scripts needing PAT in github actions (#438) * Added github action support for scripts needing PAT --- .github/scripts/list_modified_files.py | 3 +- .../list_modified_scripts_with_secrets.py | 38 ++++++++++ ...run-tests-on-modified-meta-with-secret.yml | 75 +++++++++++++++++++ 3 files changed, 115 insertions(+), 1 deletion(-) create mode 100644 .github/scripts/list_modified_scripts_with_secrets.py create mode 100644 .github/workflows/run-tests-on-modified-meta-with-secret.yml diff --git a/.github/scripts/list_modified_files.py b/.github/scripts/list_modified_files.py index 65fdacbd1..7f406c0e4 100644 --- a/.github/scripts/list_modified_files.py +++ b/.github/scripts/list_modified_files.py @@ -8,7 +8,8 @@ def get_file_info(filepath): with open(filepath, 'r') as file: content = yaml.safe_load(file) tests = content.get('tests', []) - if tests: + needs_pat = content.get('needs_pat', False) + if tests and not needs_pat: num_tests = len(tests.get('run_inputs', [])) else: num_tests = 0 diff --git a/.github/scripts/list_modified_scripts_with_secrets.py b/.github/scripts/list_modified_scripts_with_secrets.py new file mode 100644 index 000000000..f3a8df980 --- /dev/null +++ b/.github/scripts/list_modified_scripts_with_secrets.py @@ -0,0 +1,38 @@ +import yaml +import sys +import json +import os + + +def get_file_info(filepath): + with open(filepath, 'r') as file: + content = yaml.safe_load(file) + tests = content.get('tests', []) + needs_pat = content.get('needs_pat', False) + if tests and needs_pat: + num_tests = len(tests.get('run_inputs', [])) + else: + num_tests = 0 + uid = content['uid'] + return uid, num_tests + + +def process_files(files): + filenames = files.split(",") + return [ + { + "file": file, + "uid": uid, + "num_run": i + } + for file in filenames if os.path.basename(file) == 'meta.yaml' + for uid, num_tests in [get_file_info(file)] + for i in range(1, num_tests + 1) + ] + + +if __name__ == "__main__": + changed_files = sys.stdin.read().strip() + processed_files = process_files(changed_files) + json_processed_files = json.dumps(processed_files) + print(json_processed_files) diff --git a/.github/workflows/run-tests-on-modified-meta-with-secret.yml b/.github/workflows/run-tests-on-modified-meta-with-secret.yml new file mode 100644 index 000000000..d6a8b2c1f --- /dev/null +++ b/.github/workflows/run-tests-on-modified-meta-with-secret.yml @@ -0,0 +1,75 @@ +name: Test script on modified meta (with secret) + +on: + pull_request_target: + branches: [ "main", "dev" ] + paths: + - 'script/**meta.yaml' + +jobs: + get_modified_files: + runs-on: ubuntu-latest + outputs: + processed_files: ${{ steps.filter-modified-files.outputs.processed_files }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Install dependencies + run: pip install pyyaml + + - name: Fetch base branch + run: | + git fetch origin +refs/heads/${{ github.event.pull_request.base.ref }}:refs/remotes/origin/${{ github.event.pull_request.base.ref }} + + - name: Get list of changed files + id: modified-files + run: | + git diff --name-only origin/${{ github.event.pull_request.base.ref }}...HEAD > changed_files.txt + files=$(paste -sd, changed_files.txt) + echo "files=$files" >> $GITHUB_OUTPUT + + - name: Filter changed files + id: filter-modified-files + env: + FILES: ${{ steps.modified-files.outputs.files }} + run: | + processed=$(echo "$FILES" | python3 .github/scripts/list_modified_scripts_with_secrets.py) + echo "processed_files<> $GITHUB_OUTPUT + echo "$processed" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + - name: Debug processed_files output + run: | + echo "Processed files output:" + echo "${{ steps.filter-modified-files.outputs.processed_files }}" + + process_modified_files: + needs: get_modified_files + runs-on: ubuntu-latest + if: needs.get_modified_files.outputs.processed_files != '[]' + strategy: + fail-fast: false + matrix: + file_info: ${{ fromJSON(needs.get_modified_files.outputs.processed_files) }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - name: Process meta.yaml file + run: | + echo "Processing ${{ matrix.file_info.file }} (run #${{ matrix.file_info.num_run }})" + pip install mlcflow + mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} + mlc test script ${{ matrix.file_info.uid }} --test_input_index=${{ matrix.file_info.num_run }} --docker_mlc_repo=${{ github.event.pull_request.head.repo.html_url }} --docker_mlc_repo_branch=${{ github.event.pull_request.head.ref }} --quiet From 44cd55ea0df260e2ccf62fdd6fad87425d16b546 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Fri, 23 May 2025 00:31:40 +0100 Subject: [PATCH 33/43] Delete .github/workflows/run-tests-on-modified-meta-with-secret.yml --- ...run-tests-on-modified-meta-with-secret.yml | 75 ------------------- 1 file changed, 75 deletions(-) delete mode 100644 .github/workflows/run-tests-on-modified-meta-with-secret.yml diff --git a/.github/workflows/run-tests-on-modified-meta-with-secret.yml b/.github/workflows/run-tests-on-modified-meta-with-secret.yml deleted file mode 100644 index d6a8b2c1f..000000000 --- a/.github/workflows/run-tests-on-modified-meta-with-secret.yml +++ /dev/null @@ -1,75 +0,0 @@ -name: Test script on modified meta (with secret) - -on: - pull_request_target: - branches: [ "main", "dev" ] - paths: - - 'script/**meta.yaml' - -jobs: - get_modified_files: - runs-on: ubuntu-latest - outputs: - processed_files: ${{ steps.filter-modified-files.outputs.processed_files }} - - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: '3.x' - - - name: Install dependencies - run: pip install pyyaml - - - name: Fetch base branch - run: | - git fetch origin +refs/heads/${{ github.event.pull_request.base.ref }}:refs/remotes/origin/${{ github.event.pull_request.base.ref }} - - - name: Get list of changed files - id: modified-files - run: | - git diff --name-only origin/${{ github.event.pull_request.base.ref }}...HEAD > changed_files.txt - files=$(paste -sd, changed_files.txt) - echo "files=$files" >> $GITHUB_OUTPUT - - - name: Filter changed files - id: filter-modified-files - env: - FILES: ${{ steps.modified-files.outputs.files }} - run: | - processed=$(echo "$FILES" | python3 .github/scripts/list_modified_scripts_with_secrets.py) - echo "processed_files<> $GITHUB_OUTPUT - echo "$processed" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - - name: Debug processed_files output - run: | - echo "Processed files output:" - echo "${{ steps.filter-modified-files.outputs.processed_files }}" - - process_modified_files: - needs: get_modified_files - runs-on: ubuntu-latest - if: needs.get_modified_files.outputs.processed_files != '[]' - strategy: - fail-fast: false - matrix: - file_info: ${{ fromJSON(needs.get_modified_files.outputs.processed_files) }} - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 2 - - - name: Process meta.yaml file - run: | - echo "Processing ${{ matrix.file_info.file }} (run #${{ matrix.file_info.num_run }})" - pip install mlcflow - mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - mlc test script ${{ matrix.file_info.uid }} --test_input_index=${{ matrix.file_info.num_run }} --docker_mlc_repo=${{ github.event.pull_request.head.repo.html_url }} --docker_mlc_repo_branch=${{ github.event.pull_request.head.ref }} --quiet From 3390e64870105ca07c8e75ae708897b0455c1a44 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Fri, 23 May 2025 00:31:57 +0100 Subject: [PATCH 34/43] Delete .github/scripts/list_modified_scripts_with_secrets.py --- .../list_modified_scripts_with_secrets.py | 38 ------------------- 1 file changed, 38 deletions(-) delete mode 100644 .github/scripts/list_modified_scripts_with_secrets.py diff --git a/.github/scripts/list_modified_scripts_with_secrets.py b/.github/scripts/list_modified_scripts_with_secrets.py deleted file mode 100644 index f3a8df980..000000000 --- a/.github/scripts/list_modified_scripts_with_secrets.py +++ /dev/null @@ -1,38 +0,0 @@ -import yaml -import sys -import json -import os - - -def get_file_info(filepath): - with open(filepath, 'r') as file: - content = yaml.safe_load(file) - tests = content.get('tests', []) - needs_pat = content.get('needs_pat', False) - if tests and needs_pat: - num_tests = len(tests.get('run_inputs', [])) - else: - num_tests = 0 - uid = content['uid'] - return uid, num_tests - - -def process_files(files): - filenames = files.split(",") - return [ - { - "file": file, - "uid": uid, - "num_run": i - } - for file in filenames if os.path.basename(file) == 'meta.yaml' - for uid, num_tests in [get_file_info(file)] - for i in range(1, num_tests + 1) - ] - - -if __name__ == "__main__": - changed_files = sys.stdin.read().strip() - processed_files = process_files(changed_files) - json_processed_files = json.dumps(processed_files) - print(json_processed_files) From 9078b60a07260ed86575f48a9e88fd74c794622b Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Fri, 23 May 2025 00:39:52 +0100 Subject: [PATCH 35/43] Update main.py | Remived mlperf.conf from automotive reference implementation --- .../ref/python/main.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py b/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py index 255554f82..5f509f72f 100644 --- a/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py +++ b/script/app-mlperf-automotive-mlcommons-python/ref/python/main.py @@ -148,11 +148,6 @@ def get_args(): action="store_true", help="debug, turn traces on") - # file to use mlperf rules compliant parameters - parser.add_argument( - "--mlperf_conf", - default="../../mlperf.conf", - help="mlperf rules config") # file for user LoadGen settings such as target QPS parser.add_argument( "--user_conf", @@ -466,11 +461,6 @@ def main(): "cmdline": str(args), } - mlperf_conf = os.path.abspath(args.mlperf_conf) - if not os.path.exists(mlperf_conf): - log.error("{} not found".format(mlperf_conf)) - sys.exit(1) - user_conf = os.path.abspath(args.user_conf) if not os.path.exists(user_conf): log.error("{} not found".format(user_conf)) @@ -526,7 +516,6 @@ def flush_queries(): log_settings.log_output = log_output_settings settings = lg.TestSettings() - settings.FromConfig(mlperf_conf, args.model_name, args.scenario) settings.FromConfig(user_conf, args.model_name, args.scenario) settings.scenario = scenario settings.mode = lg.TestMode.PerformanceOnly From 7db8919dffa717b9e817a39dffde89a961c45b80 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Fri, 23 May 2025 15:16:16 +0100 Subject: [PATCH 36/43] Added JEMALLOC_PATH for lib,jemalloc (#439) * Added JEMALLOC_PATH for lib,jemalloc * Added autoconf dependency for lib-jemalloc --- .../customize.py | 6 ------ script/get-generic-sys-util/meta.yaml | 14 ++++++++++++++ script/get-lib-jemalloc/customize.py | 1 + script/get-lib-jemalloc/meta.yaml | 2 ++ 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 6ea96b377..16805cc35 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -81,12 +81,6 @@ def preprocess(i): env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") x = "" if os_info['platform'] == 'windows' else "'" - if "llama2-70b" in env['MLC_MODEL']: - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \ - x + env['MLC_MLPERF_CONF'] + x - else: - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \ - x + env['MLC_MLPERF_CONF'] + x env['MODEL_DIR'] = env.get('MLC_ML_MODEL_PATH') if not env['MODEL_DIR']: diff --git a/script/get-generic-sys-util/meta.yaml b/script/get-generic-sys-util/meta.yaml index 5f366d775..eac6ecdb1 100644 --- a/script/get-generic-sys-util/meta.yaml +++ b/script/get-generic-sys-util/meta.yaml @@ -52,6 +52,20 @@ tests: docker_prune: 'yes' uid: bb0393afa8404a11 variations: + autoconf: + env: + MLC_SYS_UTIL_NAME: autoconf + MLC_SYS_UTIL_VERSION_CMD: autoconf --version + MLC_SYS_UTIL_VERSION_RE: autoconf \(.*\) ([\d.]+) + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + new_env_keys: + - MLC_AUTOCONF_VERSION + state: + autoconf: + apt: autoconf + brew: autoconf + dnf: autoconf + yum: autoconf cmake: env: MLC_SYS_UTIL_NAME: cmake diff --git a/script/get-lib-jemalloc/customize.py b/script/get-lib-jemalloc/customize.py index a82d6a829..7cb73c6c9 100644 --- a/script/get-lib-jemalloc/customize.py +++ b/script/get-lib-jemalloc/customize.py @@ -23,6 +23,7 @@ def postprocess(i): lib_path = os.path.join(os.getcwd(), "obj", "lib") env['+LD_LIBRARY_PATH'] = lib_path + env['MLC_JEMALLOC_PATH'] = os.path.dirname(lib_path) env['MLC_JEMALLOC_LIB_PATH'] = lib_path env['MLC_DEPENDENT_CACHED_PATH'] = os.path.join(lib_path, "libjemalloc.so") diff --git a/script/get-lib-jemalloc/meta.yaml b/script/get-lib-jemalloc/meta.yaml index 929152152..8a42e9117 100644 --- a/script/get-lib-jemalloc/meta.yaml +++ b/script/get-lib-jemalloc/meta.yaml @@ -21,7 +21,9 @@ deps: names: - jemalloc-repo extra_cache_tags: jemalloc,repo,jemalloc-repo + - tags: get,generic-sys-util,_autoconf new_env_keys: + - MLC_JEMALLOC_PATH - MLC_JEMALLOC_LIB_PATH - +LD_LIBRARY_PATH new_state_keys: [] From 9553d0a1e197d3b67a76c6a86da5a718eba24d25 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 26 May 2025 13:52:30 +0530 Subject: [PATCH 37/43] Add automotive reference implementation workflow (#407) --- .../workflows/run-tests-on-modified-meta.yml | 71 +-- .github/workflows/test-mlperf-automotive.yml | 43 ++ .../customize.py | 541 +++++++++-------- .../meta.yaml | 170 +++++- script/app-mlperf-automotive/customize.py | 524 +++++++++++++---- script/app-mlperf-automotive/meta.yaml | 191 +++++- script/build-dockerfile/customize.py | 2 +- .../meta.yaml | 23 +- .../customize.py | 7 + .../get-dataset-cognata-mlcommons/meta.yaml | 80 ++- script/get-dataset-cognata-mlcommons/run.sh | 16 + script/get-dataset-nuscenes/COPYRIGHT.md | 9 + script/get-dataset-nuscenes/customize.py | 23 + script/get-dataset-nuscenes/meta.yaml | 57 ++ script/get-dataset-nuscenes/run.sh | 20 + script/get-generic-sys-util/meta.yaml | 14 + script/get-git-repo/meta.yaml | 2 +- .../customize.py | 11 +- .../get-ml-model-abtf-ssd-pytorch/meta.yaml | 89 ++- script/get-ml-model-bevformer/COPYRIGHT.md | 9 + script/get-ml-model-bevformer/customize.py | 31 + script/get-ml-model-bevformer/meta.yaml | 81 +++ script/get-ml-model-bevformer/run.sh | 1 + .../get-ml-model-deeplabv3_plus/COPYRIGHT.md | 9 + .../get-ml-model-deeplabv3_plus/customize.py | 30 + script/get-ml-model-deeplabv3_plus/meta.yaml | 91 +++ script/get-ml-model-deeplabv3_plus/run.sh | 2 + script/get-mlperf-automotive-src/COPYRIGHT.md | 9 + script/get-mlperf-automotive-src/customize.py | 154 +++++ script/get-mlperf-automotive-src/meta.yaml | 103 ++++ .../get-mlperf-automotive-utils/COPYRIGHT.md | 9 + .../get-mlperf-automotive-utils/customize.py | 36 ++ script/get-mlperf-automotive-utils/meta.yaml | 25 + .../mlperf_utils.py | 353 +++++++++++ script/get-mlperf-inference-loadgen/meta.yaml | 19 +- .../COPYRIGHT.md | 9 + .../README.md | 1 + .../customize.py | 24 + .../meta.yaml | 114 ++++ .../get-preprocessed-dataset-cognata/run.sh | 9 + .../COPYRIGHT.md | 9 + .../README.md | 1 + .../customize.py | 36 ++ .../meta.yaml | 132 +++++ .../get-preprocessed-dataset-nuscenes/run.sh | 13 + script/process-mlperf-accuracy/customize.py | 554 +++++++++--------- script/process-mlperf-accuracy/meta.yaml | 51 ++ script/run-mlperf-automotive-app/customize.py | 45 +- script/run-mlperf-automotive-app/meta.yaml | 12 +- 49 files changed, 3126 insertions(+), 739 deletions(-) create mode 100644 .github/workflows/test-mlperf-automotive.yml create mode 100644 script/get-dataset-cognata-mlcommons/run.sh create mode 100644 script/get-dataset-nuscenes/COPYRIGHT.md create mode 100644 script/get-dataset-nuscenes/customize.py create mode 100644 script/get-dataset-nuscenes/meta.yaml create mode 100644 script/get-dataset-nuscenes/run.sh create mode 100644 script/get-ml-model-bevformer/COPYRIGHT.md create mode 100644 script/get-ml-model-bevformer/customize.py create mode 100644 script/get-ml-model-bevformer/meta.yaml create mode 100644 script/get-ml-model-bevformer/run.sh create mode 100644 script/get-ml-model-deeplabv3_plus/COPYRIGHT.md create mode 100644 script/get-ml-model-deeplabv3_plus/customize.py create mode 100644 script/get-ml-model-deeplabv3_plus/meta.yaml create mode 100644 script/get-ml-model-deeplabv3_plus/run.sh create mode 100644 script/get-mlperf-automotive-src/COPYRIGHT.md create mode 100644 script/get-mlperf-automotive-src/customize.py create mode 100644 script/get-mlperf-automotive-src/meta.yaml create mode 100644 script/get-mlperf-automotive-utils/COPYRIGHT.md create mode 100644 script/get-mlperf-automotive-utils/customize.py create mode 100644 script/get-mlperf-automotive-utils/meta.yaml create mode 100644 script/get-mlperf-automotive-utils/mlperf_utils.py create mode 100644 script/get-preprocessed-dataset-cognata/COPYRIGHT.md create mode 100644 script/get-preprocessed-dataset-cognata/README.md create mode 100644 script/get-preprocessed-dataset-cognata/customize.py create mode 100644 script/get-preprocessed-dataset-cognata/meta.yaml create mode 100644 script/get-preprocessed-dataset-cognata/run.sh create mode 100644 script/get-preprocessed-dataset-nuscenes/COPYRIGHT.md create mode 100644 script/get-preprocessed-dataset-nuscenes/README.md create mode 100644 script/get-preprocessed-dataset-nuscenes/customize.py create mode 100644 script/get-preprocessed-dataset-nuscenes/meta.yaml create mode 100644 script/get-preprocessed-dataset-nuscenes/run.sh diff --git a/.github/workflows/run-tests-on-modified-meta.yml b/.github/workflows/run-tests-on-modified-meta.yml index 86932d896..a2f3a5fbe 100644 --- a/.github/workflows/run-tests-on-modified-meta.yml +++ b/.github/workflows/run-tests-on-modified-meta.yml @@ -1,3 +1,4 @@ +# This workflow will run configured tests for any updated MLC script name: Test script on modified meta on: @@ -10,52 +11,35 @@ jobs: get_modified_files: runs-on: ubuntu-latest outputs: - processed_files: ${{ steps.filter-modified-files.outputs.processed_files }} + processed_files: ${{ steps.modified-files.outputs.processed_files }} steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: '3.x' - - - name: Install dependencies - run: pip install pyyaml - - - name: Fetch base branch - run: | - git fetch origin +refs/heads/${{ github.event.pull_request.base.ref }}:refs/remotes/origin/${{ github.event.pull_request.base.ref }} - - - name: Get list of changed files - id: modified-files - run: | - git diff --name-only origin/${{ github.event.pull_request.base.ref }}...HEAD > changed_files.txt - files=$(paste -sd, changed_files.txt) - echo "files=$files" >> $GITHUB_OUTPUT - - - name: Filter changed files - id: filter-modified-files - env: - FILES: ${{ steps.modified-files.outputs.files }} - run: | - processed=$(echo "$FILES" | python3 .github/scripts/list_modified_files.py) - echo "processed_files<> $GITHUB_OUTPUT - echo "$processed" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - - name: Debug processed_files output - run: | - echo "Processed files output:" - echo "${{ steps.filter-modified-files.outputs.processed_files }}" + - name: 'Checkout' + uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Install dependencies + run: | + pip install pyyaml + + - name: Get changed files + id: modified-files + run: | + git remote add upstream ${{ github.event.pull_request.base.repo.clone_url }} + git fetch upstream + changed_files=$(git diff upstream/${{ github.event.pull_request.base.ref }} --name-only | paste -sd, -) + echo "$changed_files" | python3 .github/scripts/list_modified_files.py process_modified_files: - needs: get_modified_files runs-on: ubuntu-latest - if: needs.get_modified_files.outputs.processed_files != '[]' + needs: get_modified_files + if: needs.determine_modified_files.outputs.processed_files != '[]' && needs.determine_modified_files.outputs.processed_files != '' strategy: fail-fast: false matrix: @@ -69,7 +53,8 @@ jobs: - name: Process meta.yaml file run: | - echo "Processing ${{ matrix.file_info.file }} (run #${{ matrix.file_info.num_run }})" + echo "Processing ${{ matrix.file_info.file }} with run number ${{ matrix.file_info.num_run }}" + pip install mlcflow mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - mlc test script ${{ matrix.file_info.uid }} --test_input_index=${{ matrix.file_info.num_run }} --docker_mlc_repo=${{ github.event.pull_request.head.repo.html_url }} --docker_mlc_repo_branch=${{ github.event.pull_request.head.ref }} --quiet + mlc test script ${{ matrix.file_info.uid}} --test_input_index=${{ matrix.file_info.num_run }} --docker_mlc_repo=${{ github.event.pull_request.head.repo.html_url }} --docker_mlc_repo_branch=${{ github.event.pull_request.head.ref }} --quiet diff --git a/.github/workflows/test-mlperf-automotive.yml b/.github/workflows/test-mlperf-automotive.yml new file mode 100644 index 000000000..a03560dc1 --- /dev/null +++ b/.github/workflows/test-mlperf-automotive.yml @@ -0,0 +1,43 @@ +name: Test MLPerf automotive + +on: + pull_request_target: + branches: [ "main", "dev" ] + paths: + - 'script/**meta.yaml' + +jobs: + fetch-secret: + runs-on: ubuntu-latest + outputs: + gdrive_secret: ${{ steps.op-load-secret.outputs.GDRIVE_SERVICE_ACCOUNT_KEY }} + steps: + - name: Load secret from 1Password + id: op-load-secret + uses: 1password/load-secrets-action@v2 + with: + export-env: false + env: + OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.OP_SERVICE_ACCOUNT_TOKEN }} + GDRIVE_SERVICE_ACCOUNT_KEY: op://7basd2jirojjckncf6qnq3azai/bzbaco3uxoqs2rcyu42rvuccga/credential + + run-mlperf: + runs-on: ubuntu-latest + needs: + - fetch-secret + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 2 + - name: Set RCLONE Service account env var from secret + shell: bash + run: | + echo "::add-mask::${{ needs.fetch-secret.outputs.gdrive_secret }}" + echo "RCLONE_CONFIG_MLC_COGNATA_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV + echo "RCLONE_CONFIG_MLC_NUSCENES_SERVICE_ACCOUNT_CREDENTIALS=${{ needs.fetch-secret.outputs.gdrive_secret }}" >> $GITHUB_ENV + - name: Run MLPerf + run: | + pip install mlcflow + mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} + mlc test script app,mlperf,automotive diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 16805cc35..4ea2ce0f1 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -1,248 +1,293 @@ -from mlc import utils -from utils import is_true -import os -import json -import shutil -import subprocess - - -def preprocess(i): - - os_info = i['os_info'] - env = i['env'] - state = i['state'] - script_path = i['run_script_input']['path'] - - if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')): - return {'return': 0} - - if is_true(env.get('MLC_RUN_DOCKER_CONTAINER', '')): - return {'return': 0} - - if is_true(env.get('MLC_MLPERF_POWER', '')): - power = "yes" - else: - power = "no" - - rerun = True if env.get("MLC_RERUN", "") != '' else False - - if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env: - env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline" - - if 'MLC_MLPERF_LOADGEN_MODE' not in env: - env['MLC_MLPERF_LOADGEN_MODE'] = "accuracy" - - if 'MLC_MODEL' not in env: - return { - 'return': 1, 'error': "Please select a variation specifying the model to run"} - - # if env['MLC_MODEL'] == "resnet50": - # cmd = "cp " + os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['MLC_DATASET_PATH'], - # "val_map.txt") - # ret = os.system(cmd) - - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \ - env.get('MLC_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " - - if 'MLC_MLPERF_LOADGEN_QPS' not in env: - env['MLC_MLPERF_LOADGEN_QPS_OPT'] = "" - else: - env['MLC_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \ - env['MLC_MLPERF_LOADGEN_QPS'] - - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] - - if 'MLC_NUM_THREADS' not in env: - if 'MLC_MINIMIZE_THREADS' in env: - env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // - (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) - else: - env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') - - if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and not env.get( - 'MLC_MLPERF_MODEL_SKIP_BATCHING', False): - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \ - str(env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE']) - - if env.get('MLC_MLPERF_LOADGEN_BATCH_SIZE', '') != '': - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \ - str(env['MLC_MLPERF_LOADGEN_BATCH_SIZE']) - - if env.get('MLC_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get( - 'MLC_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and env.get('MLC_MLPERF_RUN_STYLE', '') != "valid": - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \ - env['MLC_MLPERF_LOADGEN_QUERY_COUNT'] - - print("Using MLCommons Inference source from '" + - env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") - - if 'MLC_MLPERF_CONF' not in env: - env['MLC_MLPERF_CONF'] = os.path.join( - env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") - - x = "" if os_info['platform'] == 'windows' else "'" - - env['MODEL_DIR'] = env.get('MLC_ML_MODEL_PATH') - if not env['MODEL_DIR']: - env['MODEL_DIR'] = os.path.dirname( - env.get( - 'MLC_MLPERF_CUSTOM_MODEL_PATH', - env.get('MLC_ML_MODEL_FILE_WITH_PATH'))) - - RUN_CMD = "" - - scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] - scenario_extra_options = '' - - NUM_THREADS = env['MLC_NUM_THREADS'] - if int(NUM_THREADS) > 2 and env['MLC_MLPERF_DEVICE'] == "gpu": - NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU - - if env['MLC_MODEL'] in ['resnet50', 'retinanet', 'stable-diffusion-xl']: - scenario_extra_options += " --threads " + NUM_THREADS - - ml_model_name = env['MLC_MODEL'] - if 'MLC_MLPERF_USER_CONF' in env: - user_conf_path = env['MLC_MLPERF_USER_CONF'] - x = "" if os_info['platform'] == 'windows' else "'" - scenario_extra_options += " --user_conf " + x + user_conf_path + x - - mode = env['MLC_MLPERF_LOADGEN_MODE'] - mode_extra_options = "" - - # Grigori blocked for ABTF to preprocess data set on the fly for now - # we can later move it to a separate script to preprocess data set - -# if 'MLC_DATASET_PREPROCESSED_PATH' in env and env['MLC_MODEL'] in [ 'resnet50', 'retinanet' ]: -# #dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['MLC_DATASET_PREPROCESSED_PATH'] -# if env.get('MLC_MLPERF_LAST_RELEASE') not in [ "v2.0", "v2.1" ]: -# dataset_options = " --use_preprocessed_dataset --cache_dir "+env['MLC_DATASET_PREPROCESSED_PATH'] -# else: -# dataset_options = "" -# if env['MLC_MODEL'] == "retinanet": -# dataset_options += " --dataset-list "+ env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] -# elif env['MLC_MODEL'] == "resnet50": -# dataset_options += " --dataset-list "+ os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") -# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') -# else: -# if 'MLC_DATASET_PREPROCESSED_PATH' in env: -# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') -# else: -# env['DATA_DIR'] = env.get('MLC_DATASET_PATH') -# dataset_options = '' - - # Grigori added for ABTF -# dataset_path = env.get('MLC_DATASET_PATH') -# env['DATA_DIR'] = dataset_path - -# dataset_options = " --dataset-list " + env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] -# dataset_options += " --cache_dir " + os.path.join(script_path, 'preprocessed-dataset') - - dataset_options = '' - - if env.get('MLC_MLPERF_EXTRA_DATASET_ARGS', '') != '': - dataset_options += " " + env['MLC_MLPERF_EXTRA_DATASET_ARGS'] - - if mode == "accuracy": - mode_extra_options += " --accuracy" - env['MLC_OUTPUT_PREDICTIONS_PATH'] = os.path.join( - env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'], - env['MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS'], - 'Cognata_Camera_01_8M_png', - 'output') - - elif mode == "performance": - pass - - elif mode == "compliance": - - audit_full_path = env['MLC_MLPERF_INFERENCE_AUDIT_PATH'] - mode_extra_options = " --audit '" + audit_full_path + "'" - - if env.get('MLC_MLPERF_OUTPUT_DIR', '') == '': - env['MLC_MLPERF_OUTPUT_DIR'] = os.getcwd() - - mlperf_implementation = env.get('MLC_MLPERF_IMPLEMENTATION', 'reference') - - # Generate CMD - - # Grigori updated for ABTF demo -# cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options, mode_extra_options, dataset_options, mlperf_implementation) - cmd, run_dir = get_run_cmd_reference( - os_info, env, scenario_extra_options, mode_extra_options, dataset_options, script_path) - - if env.get('MLC_NETWORK_LOADGEN', '') == "lon": - - run_cmd = i['state']['mlperf_inference_run_cmd'] - env['MLC_SSH_RUN_COMMANDS'] = [] - env['MLC_SSH_RUN_COMMANDS'].append( - run_cmd.replace( - "--network=lon", - "--network=sut") + " &") - - env['MLC_MLPERF_RUN_CMD'] = cmd - env['MLC_RUN_DIR'] = run_dir - env['MLC_RUN_CMD'] = cmd - env['CK_PROGRAM_TMP_DIR'] = env.get('MLC_ML_MODEL_PATH') # for tvm - - if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == "arm64": - env['MLC_HOST_PLATFORM_FLAVOR'] = "aarch64" - - if not env.get('MLC_COGNATA_ACCURACY_DUMP_FILE'): - env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( - env['OUTPUT_DIR'], "accuracy.txt") - - return {'return': 0} - - -def get_run_cmd_reference(os_info, env, scenario_extra_options, - mode_extra_options, dataset_options, script_path=None): - - q = '"' if os_info['platform'] == 'windows' else "'" - - ########################################################################## - # Grigori added for ABTF demo - - if env['MLC_MODEL'] in ['retinanet']: - - run_dir = os.path.join(script_path, 'ref') - - env['RUN_DIR'] = run_dir - - env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] - - cognata_dataset_path = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] -# cognata_dataset_path = env['MLC_DATASET_PATH'] # Using open images -# dataset for some tests - - path_to_model = env.get( - 'MLC_MLPERF_CUSTOM_MODEL_PATH', - env.get( - 'MLC_ML_MODEL_FILE_WITH_PATH', - env.get('MLC_ML_MODEL_CODE_WITH_PATH'))) - env['MODEL_FILE'] = path_to_model - - cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " " + os.path.join(run_dir, "python", "main.py") + " --profile " + env['MLC_MODEL'] + "-" + env['MLC_MLPERF_BACKEND'] + \ - " --model=" + q + path_to_model + q + \ - " --dataset=" + env["MLC_MLPERF_VISION_DATASET_OPTION"] + \ - " --dataset-path=" + q + cognata_dataset_path + q + \ - " --cache_dir=" + q + os.path.join(script_path, 'tmp-preprocessed-dataset') + q + \ - " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \ - " --output " + q + env['OUTPUT_DIR'] + q + " " + \ - env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ - scenario_extra_options + mode_extra_options + dataset_options - - ########################################################################## - - return cmd, run_dir - - -def postprocess(i): - - env = i['env'] - - state = i['state'] - - inp = i['input'] - - return {'return': 0} +from mlc import utils +from utils import is_true +import os +import json +import shutil +import subprocess + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + logger = i['automation'].logger + + if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')): + return {'return': 0} + + if is_true(env.get('MLC_RUN_DOCKER_CONTAINER', '')): + return {'return': 0} + + if is_true(env.get('MLC_MLPERF_POWER', '')): + power = "yes" + else: + power = "no" + + rerun = True if env.get("MLC_RERUN", "") != '' else False + + if 'MLC_MLPERF_LOADGEN_SCENARIO' not in env: + env['MLC_MLPERF_LOADGEN_SCENARIO'] = "Offline" + + if 'MLC_MLPERF_LOADGEN_MODE' not in env: + env['MLC_MLPERF_LOADGEN_MODE'] = "accuracy" + + if 'MLC_MODEL' not in env: + return { + 'return': 1, 'error': "Please select a variation specifying the model to run"} + + # if env['MLC_MODEL'] == "resnet50": + # cmd = "cp " + os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['MLC_DATASET_PATH'], + # "val_map.txt") + # ret = os.system(cmd) + + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \ + env.get('MLC_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " + + if 'MLC_MLPERF_LOADGEN_QPS' not in env: + env['MLC_MLPERF_LOADGEN_QPS_OPT'] = "" + else: + env['MLC_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \ + env['MLC_MLPERF_LOADGEN_QPS'] + + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT'] + + if 'MLC_NUM_THREADS' not in env: + if 'MLC_MINIMIZE_THREADS' in env: + env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) // + (int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1')))) + else: + env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1') + + if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and not env.get( + 'MLC_MLPERF_MODEL_SKIP_BATCHING', False): + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \ + str(env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE']) + + if env.get('MLC_MLPERF_LOADGEN_BATCH_SIZE', '') != '': + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \ + str(env['MLC_MLPERF_LOADGEN_BATCH_SIZE']) + + if env.get('MLC_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get( + 'MLC_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and env.get('MLC_MLPERF_RUN_STYLE', '') != "valid": + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \ + env['MLC_MLPERF_LOADGEN_QUERY_COUNT'] + + print("Using MLCommons Inference source from '" + + env['MLC_MLPERF_INFERENCE_SOURCE'] + "'") + + env['MODEL_DIR'] = env.get('MLC_ML_MODEL_PATH') + if not env['MODEL_DIR']: + env['MODEL_DIR'] = os.path.dirname( + env.get( + 'MLC_MLPERF_CUSTOM_MODEL_PATH', + env.get('MLC_ML_MODEL_FILE_WITH_PATH'))) + + RUN_CMD = "" + + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] + scenario_extra_options = '' + + NUM_THREADS = env['MLC_NUM_THREADS'] + if int(NUM_THREADS) > 2 and env['MLC_MLPERF_DEVICE'] == "gpu": + NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU + + if env['MLC_MODEL'] in ['resnet50', 'retinanet', 'stable-diffusion-xl']: + scenario_extra_options += " --threads " + NUM_THREADS + + ml_model_name = env['MLC_MODEL'] + if 'MLC_MLPERF_USER_CONF' in env: + user_conf_path = env['MLC_MLPERF_USER_CONF'] + x = "" if os_info['platform'] == 'windows' else "'" + scenario_extra_options += " --user_conf " + x + user_conf_path + x + + mode = env['MLC_MLPERF_LOADGEN_MODE'] + mode_extra_options = "" + + # Grigori blocked for ABTF to preprocess data set on the fly for now + # we can later move it to a separate script to preprocess data set + +# if 'MLC_DATASET_PREPROCESSED_PATH' in env and env['MLC_MODEL'] in [ 'resnet50', 'retinanet' ]: +# #dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['MLC_DATASET_PREPROCESSED_PATH'] +# if env.get('MLC_MLPERF_LAST_RELEASE') not in [ "v2.0", "v2.1" ]: +# dataset_options = " --use_preprocessed_dataset --cache_dir "+env['MLC_DATASET_PREPROCESSED_PATH'] +# else: +# dataset_options = "" +# if env['MLC_MODEL'] == "retinanet": +# dataset_options += " --dataset-list "+ env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] +# elif env['MLC_MODEL'] == "resnet50": +# dataset_options += " --dataset-list "+ os.path.join(env['MLC_DATASET_AUX_PATH'], "val.txt") +# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') +# else: +# if 'MLC_DATASET_PREPROCESSED_PATH' in env: +# env['DATA_DIR'] = env.get('MLC_DATASET_PREPROCESSED_PATH') +# else: +# env['DATA_DIR'] = env.get('MLC_DATASET_PATH') +# dataset_options = '' + + # Grigori added for ABTF +# dataset_path = env.get('MLC_DATASET_PATH') +# env['DATA_DIR'] = dataset_path + +# dataset_options = " --dataset-list " + env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] +# dataset_options += " --cache_dir " + os.path.join(script_path, 'preprocessed-dataset') + + dataset_options = '' + + if env.get('MLC_MLPERF_EXTRA_DATASET_ARGS', '') != '': + dataset_options += " " + env['MLC_MLPERF_EXTRA_DATASET_ARGS'] + + if mode == "accuracy": + mode_extra_options += " --accuracy" + if env.get('MLC_MODEL', '') == "retinanet": + env['MLC_OUTPUT_PREDICTIONS_PATH'] = os.path.join( + env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'], + env['MLC_DATASET_MLCOMMONS_COGNATA_SERIAL_NUMBERS'], + 'Cognata_Camera_01_8M_png', + 'output') + + elif mode == "performance": + pass + + elif mode == "compliance": + + audit_full_path = env['MLC_MLPERF_INFERENCE_AUDIT_PATH'] + mode_extra_options = " --audit '" + audit_full_path + "'" + + if env.get('MLC_MLPERF_OUTPUT_DIR', '') == '': + env['MLC_MLPERF_OUTPUT_DIR'] = os.getcwd() + + mlperf_implementation = env.get('MLC_MLPERF_IMPLEMENTATION', 'reference') + + # Generate CMD + + # Grigori updated for ABTF demo +# cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options, mode_extra_options, dataset_options, mlperf_implementation) + cmd, run_dir = get_run_cmd_reference( + os_info, env, scenario_extra_options, mode_extra_options, dataset_options, logger, script_path) + + if env.get('MLC_NETWORK_LOADGEN', '') == "lon": + + run_cmd = i['state']['mlperf_inference_run_cmd'] + env['MLC_SSH_RUN_COMMANDS'] = [] + env['MLC_SSH_RUN_COMMANDS'].append( + run_cmd.replace( + "--network=lon", + "--network=sut") + " &") + + env['MLC_MLPERF_RUN_CMD'] = cmd + env['MLC_RUN_DIR'] = run_dir + env['MLC_RUN_CMD'] = cmd + env['CK_PROGRAM_TMP_DIR'] = env.get('MLC_ML_MODEL_PATH') # for tvm + + if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == "arm64": + env['MLC_HOST_PLATFORM_FLAVOR'] = "aarch64" + + if env.get('MLC_MODEL', '') == "retinanet": + if not env.get('MLC_COGNATA_ACCURACY_DUMP_FILE'): + env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( + env['OUTPUT_DIR'], "accuracy.txt") + + return {'return': 0} + + +def get_run_cmd_reference(os_info, env, scenario_extra_options, + mode_extra_options, dataset_options, logger, script_path=None): + + q = '"' if os_info['platform'] == 'windows' else "'" + + ########################################################################## + # Grigori added for ABTF demo + + if env['MLC_MODEL'] in ['retinanet']: + + run_dir = os.path.join(script_path, 'ref') + + env['RUN_DIR'] = run_dir + + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + + cognata_dataset_path = env['MLC_DATASET_MLCOMMONS_COGNATA_PATH'] +# cognata_dataset_path = env['MLC_DATASET_PATH'] # Using open images +# dataset for some tests + + path_to_model = env.get( + 'MLC_MLPERF_CUSTOM_MODEL_PATH', + env.get( + 'MLC_ML_MODEL_FILE_WITH_PATH', + env.get('MLC_ML_MODEL_CODE_WITH_PATH'))) + env['MODEL_FILE'] = path_to_model + + cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " " + os.path.join(run_dir, "python", "main.py") + " --profile " + env['MLC_MODEL'] + "-" + env['MLC_MLPERF_BACKEND'] + \ + " --model=" + q + path_to_model + q + \ + " --dataset=" + env["MLC_MLPERF_VISION_DATASET_OPTION"] + \ + " --dataset-path=" + q + cognata_dataset_path + q + \ + " --cache_dir=" + q + os.path.join(script_path, 'tmp-preprocessed-dataset') + q + \ + " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \ + " --output " + q + env['OUTPUT_DIR'] + q + " " + \ + env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + dataset_options + + elif env['MLC_MODEL'] in ['bevformer']: + run_dir = env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'] + + env['RUN_DIR'] = run_dir + + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + + if env['MLC_MLPERF_BACKEND'] != "onnxruntime": + logger.warning( + "Unsupported backend {MLC_MLPERF_BACKEND}, defaulting to onnx") + env['MLC_MLPERF_BACKEND'] = "onnx" + + config_path = os.path.join( + run_dir, + "projects", + "configs", + "bevformer", + "bevformer_tiny.py") + print(env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']) + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + print(cmd) + elif env['MLC_MODEL'] in ['ssd-resnet50']: + run_dir = env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'] + + env['RUN_DIR'] = run_dir + + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + + backend = "onnx" if env.get( + 'MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') + + config_path = "baseline_8MP_ss_scales_fm1_5x5_all" + + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + + elif env['MLC_MODEL'] in ['deeplab_v3+']: + run_dir = env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'] + + env['RUN_DIR'] = run_dir + + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] + + backend = "onnx" if env.get( + 'MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') + + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + + ########################################################################## + + return cmd, run_dir + + +def postprocess(i): + + env = i['env'] + + state = i['state'] + + inp = i['input'] + + return {'return': 0} diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index e5567ac27..dd288d46e 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -9,10 +9,13 @@ category: "Modular MLPerf inference benchmark pipeline for ABTF model" # User-friendly tags to find this CM script tags: -- demo +- automotive +- mlcommons +- reference - run-mlperf-inference - object-detection - abtf-model +- demo # Default environment @@ -174,26 +177,19 @@ deps: names: - ml-engine-pytorch - pytorch - enable_if_env: - MLC_MLPERF_BACKEND: - - pytorch - - tvm-pytorch + skip_if_env: MLC_MLPERF_DEVICE: - - cpu - - rocm + - gpu ## Pytorch (CUDA) - tags: get,generic-python-lib,_torch_cuda names: - ml-engine-pytorch - pytorch - enable_if_env: - MLC_MLPERF_BACKEND: - - pytorch - - tvm-pytorch - - ray + skip_if_env: MLC_MLPERF_DEVICE: - - gpu + - cpu + - rocm ## Torchvision (CPU) - tags: get,generic-python-lib,_torchvision @@ -259,7 +255,7 @@ deps: # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_automotive names: - user-conf-generator @@ -273,7 +269,7 @@ deps: - loadgen - mlperf-inference-loadgen - - tags: get,loadgen + - tags: get,loadgen,_automotive enable_if_any_env: MLC_MLPERF_LOADGEN_BUILD_FROM_SRC: - "on" @@ -340,6 +336,9 @@ variations: group: framework env: MLC_MLPERF_BACKEND: onnxruntime + add_deps_recursive: + ml-model-bevformer: + tags: _onnx onnxruntime,cpu: env: @@ -361,10 +360,27 @@ variations: tags: _NCHW ml-model: tags: raw,_pytorch + ml-model-bevformer: + tags: _pytorch + ml-model-ssd-resnet50: + tags: _pytorch + ml-model-deeplabv3-plus: + tags: _pytorch env: MLC_MLPERF_BACKEND: pytorch MLC_MLPERF_BACKEND_VERSION: <<>> - + + pytorch,cpu: + add_deps_recursive: + pytorch: + env: + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu/torch_stable.html + torchvision: + env: + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu/torch_stable.html + torchaudio: + env: + MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu/torch_stable.html @@ -383,6 +399,9 @@ variations: abtf-demo-model: group: models + add_deps_recursive: + automotive-src: + tags: _sha.ee526dc63d9ca2636000343c5d2d16132145719e deps: - tags: get,generic-python-lib,_opencv-python - tags: get,generic-python-lib,_numpy @@ -399,13 +418,15 @@ variations: - tags: get,ml-model,abtf-ssd-pytorch,_abtf-mvp names: - ml-model-abtf - env: MLC_MODEL: retinanet abtf-poc-model: group: models default: true + add_deps_recursive: + automotive-src: + tags: _sha.ee526dc63d9ca2636000343c5d2d16132145719e deps: - tags: get,generic-python-lib,_opencv-python - tags: get,generic-python-lib,_numpy @@ -427,10 +448,115 @@ variations: - tags: get,ml-model,abtf-ssd-pytorch,_abtf-poc names: - ml-model-abtf - env: MLC_MODEL: retinanet + bevformer: + group: models + add_deps_recursive: + pytorch: + version_max: "2.5.1" + version_max_usable: "2.5.1" + torchvision: + version_max: "0.20.1" + version_max_usable": "0.20.1" + torchaudio: + version_max: "2.5.1" + version_max_usable": "2.5.1" + deps: + - tags: get,generic-python-lib,_package.opencv-python + - tags: get,generic-python-lib,_package.numpy + version_max: "1.26.4" + version_max_usable: "1.26.4" + - tags: get,generic-python-lib,_package.onnx + - tags: get,generic-python-lib,_package.pillow + - tags: get,generic-python-lib,_package.pyquaternion + - tags: get,generic-python-lib,_package.tqdm + - tags: get,generic-python-lib,_package.nuscenes-devkit + - tags: get,preprocessed,dataset,nuscenes + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - preprocessed-dataset-mlcommons-nuscenes + - tags: get,ml-model,bevformer + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - ml-model-bevformer + + ssd-resnet50: + group: models + add_deps_recursive: + pytorch: + version_max: "2.3.1" + version_max_usable: "2.3.1" + torchvision: + version_max: "0.18.1" + version_max_usable": "0.18.1" + torchaudio: + version_max: "2.3.1" + version_max_usable": "2.3.1" + deps: + - tags: get,generic-python-lib,_package.Cython + - tags: get,generic-python-lib,_package.scikit-image + - tags: get,generic-python-lib,_package.faster-coco-eval + - tags: get,generic-python-lib,_package.torchinfo + - tags: get,generic-python-lib,_package.torchmetrics + - tags: get,generic-sys-util,_libgl1-mesa-glx + - tags: get,generic-python-lib,_package.onnx + - tags: get,generic-python-lib,_package.onnxruntime + - tags: get,generic-python-lib,_package.tqdm + - tags: get,preprocessed,dataset,cognata,_mlc + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - preprocessed-dataset-mlcommons-cognata-ssd-resnet50 + - tags: get,ml-model,ssd,resnet50,_mlc,_rclone + skip_if_any_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - ml-model-ssd-resnet50 + + deeplab-v3+: + group: models + add_deps_recursive: + pytorch: + version_max: "2.3.1" + version_max_usable: "2.3.1" + torchvision: + version_max: "0.18.1" + version_max_usable": "0.18.1" + torchaudio: + version_max: "2.3.1" + version_max_usable": "2.3.1" + deps: + - tags: get,generic-python-lib,_package.Cython + - tags: get,generic-python-lib,_package.scikit-image + - tags: get,generic-python-lib,_package.scikit-learn + - tags: get,generic-python-lib,_package.torchinfo + - tags: get,generic-python-lib,_package.torchmetrics + - tags: get,generic-sys-util,_libgl1-mesa-glx + - tags: get,generic-python-lib,_package.onnx + - tags: get,generic-python-lib,_package.onnxruntime + - tags: get,generic-python-lib,_package.tqdm + - tags: get,generic-python-lib,_package.ijson + - tags: get,preprocessed,dataset,cognata,_mlc,_segmentation + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - preprocessed-dataset-mlcommons-cognata-deeplabv3-plus + - tags: get,ml-model,deeplabv3-plus + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - ml-model-deeplabv3-plus + # Target devices cpu: group: device @@ -460,10 +586,16 @@ variations: singlestream: env: MLC_MLPERF_LOADGEN_SCENARIO: SingleStream - MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 + default_variations: + batch-size: batch_size.1 server: env: MLC_MLPERF_LOADGEN_SCENARIO: Server mvp_demo: env: + + batch_size.#: + group: batch-size + env: + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" \ No newline at end of file diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py index 1333e0719..b7d8598c5 100644 --- a/script/app-mlperf-automotive/customize.py +++ b/script/app-mlperf-automotive/customize.py @@ -1,103 +1,421 @@ -from mlc import utils -import os -import json -import shutil -import subprocess -import mlperf_utils -from log_parser import MLPerfLog - - -def preprocess(i): - - os_info = i['os_info'] - env = i['env'] - state = i['state'] - script_path = i['run_script_input']['path'] - - if 'cmd' in i['input']: - state['mlperf_inference_run_cmd'] = "mlcr " + \ - " ".join(i['input']['cmd']) - - state['mlperf-inference-implementation'] = {} - - run_state = i['run_script_input']['run_state'] - state['mlperf-inference-implementation']['script_id'] = run_state['script_id'] + \ - ":" + ",".join(run_state['script_variation_tags']) - - return {'return': 0} - - -def postprocess(i): - - env = i['env'] - state = i['state'] - - inp = i['input'] - os_info = i['os_info'] - - xsep = '^' if os_info['platform'] == 'windows' else '\\' - - env['CMD'] = '' - - # if env.get('MLC_MLPERF_USER_CONF', '') == '': - # return {'return': 0} - - output_dir = env['MLC_MLPERF_OUTPUT_DIR'] - mode = env['MLC_MLPERF_LOADGEN_MODE'] - - model = env['MLC_MODEL'] - model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', model) - - scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] - - if not os.path.exists(output_dir) or not os.path.exists( - os.path.join(output_dir, "mlperf_log_summary.txt")): - # No output, fake_run? - return {'return': 0} - - mlperf_log = MLPerfLog(os.path.join(output_dir, "mlperf_log_detail.txt")) - if mode == "performance": - result = mlperf_log['result_mean_latency_ns'] / 1000000 - elif mode == "accuracy": - if not env.get( - 'MLC_COGNATA_ACCURACY_DUMP_FILE'): # can happen while reusing old runs - env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( - output_dir, "accuracy.txt") - acc = "" - if os.path.exists(env['MLC_COGNATA_ACCURACY_DUMP_FILE']): - with open(env['MLC_COGNATA_ACCURACY_DUMP_FILE'], "r") as f: - acc = f.readline() - result = acc - else: - return {'return': 1, 'error': f"Unknown mode {mode}"} - - valid = {'performance': True, 'accuracy': True} # its POC - power_result = None # No power measurement in POC - - # result, valid, power_result = mlperf_utils.get_result_from_log(env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode) - - if not state.get('mlc-mlperf-inference-results'): - state['mlc-mlperf-inference-results'] = {} - if not state.get('mlc-mlperf-inference-results-last'): - state['mlc-mlperf-inference-results-last'] = {} - if not state['mlc-mlperf-inference-results'].get( - state['MLC_SUT_CONFIG_NAME']): - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']] = {} - if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ].get(model): - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']][model] = {} - if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ][model].get(scenario): - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ][model][scenario] = {} - - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ][model][scenario][mode] = result - state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] - ][model][scenario][mode + '_valid'] = valid.get(mode, False) - - state['mlc-mlperf-inference-results-last'][mode] = result - state['mlc-mlperf-inference-results-last'][mode + - '_valid'] = valid.get(mode, False) - - return {'return': 0} +from mlc import utils +import os +import json +import shutil +import subprocess +import mlperf_utils +from log_parser import MLPerfLog +from utils import * +import copy +import platform +import sys + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + if 'cmd' in i['input']: + state['mlperf_inference_run_cmd'] = "mlcr " + \ + " ".join(i['input']['cmd']) + + state['abtf-inference-implementation'] = {} + + run_state = i['run_script_input']['run_state'] + state['abtf-inference-implementation']['script_id'] = run_state['script_id'] + \ + ":" + ",".join(run_state['script_variation_tags']) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + inp = i['input'] + os_info = i['os_info'] + + xsep = '^' if os_info['platform'] == 'windows' else '\\' + q = '"' if os_info['platform'] == 'windows' else "'" + + logger = i['automation'].logger + + env['CMD'] = '' + + # if env.get('MLC_MLPERF_USER_CONF', '') == '': + # return {'return': 0} + + output_dir = env['MLC_MLPERF_OUTPUT_DIR'] + mode = env['MLC_MLPERF_LOADGEN_MODE'] + + mlc = i['automation'].action_object + + result_sut_folder_path = env['MLC_MLPERF_INFERENCE_RESULTS_SUT_PATH'] + + model = env['MLC_MODEL'] + model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', model) + + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] + + if not os.path.exists(output_dir) or not os.path.exists( + os.path.join(output_dir, "mlperf_log_summary.txt")): + # No output, fake_run? + return {'return': 0} + + mlperf_log = MLPerfLog(os.path.join(output_dir, "mlperf_log_detail.txt")) + if mode == "performance": + if scenario in ["Offline", "Server"]: + metric = "target_qps" + result = mlperf_log['result_mean_latency_ns'] / 1000000 + elif scenario.endswith("Stream"): + metric = "target_latency" + result = mlperf_log['result_mean_latency_ns'] + else: + return {'return': 1, + 'error': 'Unsupported scenario: {}'.format(scenario)} + import yaml + sut_name = state['MLC_SUT_CONFIG_NAME'] + sut_config = state['MLC_SUT_CONFIG'][sut_name] + sut_config_path = state['MLC_SUT_CONFIG_PATH'][sut_name] + if scenario not in sut_config[model_full_name]: + sut_config[model_full_name][scenario] = {} + sut_config[model_full_name][scenario][metric] = result + + print( + f"SUT: {sut_name}, model: {model_full_name}, scenario: {scenario}, {metric} (mean value) updated as {result}") + with open(sut_config_path, "w") as f: + yaml.dump(sut_config, f) + logger.info(f"New config stored in {sut_config_path}") + elif mode == "accuracy": + acc = "" + if env.get('MLC_MLPERF_INFERENCE_VERSION', '') == "mvp-demo" and env.get( + 'MLC_MLPERF_INFERENCE_VERSION') == "poc-demo": + if not env.get( + 'MLC_COGNATA_ACCURACY_DUMP_FILE'): # can happen while reusing old runs + env['MLC_COGNATA_ACCURACY_DUMP_FILE'] = os.path.join( + output_dir, "accuracy.txt") + if os.path.exists(env['MLC_COGNATA_ACCURACY_DUMP_FILE']): + with open(env['MLC_COGNATA_ACCURACY_DUMP_FILE'], "r") as f: + acc = f.readline() + result = acc + else: + return {'return': 1, 'error': f"Unknown mode {mode}"} + + valid = {'performance': True, 'accuracy': True} # its POC + power_result = None # No power measurement in POC + + # result, valid, power_result = mlperf_utils.get_result_from_log(env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode) + + if not state.get('mlc-mlperf-inference-results'): + state['mlc-mlperf-inference-results'] = {} + if not state.get('mlc-mlperf-inference-results-last'): + state['mlc-mlperf-inference-results-last'] = {} + if not state['mlc-mlperf-inference-results'].get( + state['MLC_SUT_CONFIG_NAME']): + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']] = {} + if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ].get(model): + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']][model] = {} + if not state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model].get(scenario): + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario] = {} + + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario][mode] = result + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario][mode + '_valid'] = valid.get(mode, False) + + state['mlc-mlperf-inference-results-last'][mode] = result + state['mlc-mlperf-inference-results-last'][mode + + '_valid'] = valid.get(mode, False) + + if mode in ["performance", "accuracy"] and env.get( + 'MLC_MLPERF_INFERENCE_VERSION', '') not in ["", "mvp-demo", "poc-demo"]: + # if measurements file exist read it + if os.path.exists("measurements.json"): + with open("measurements.json", "r") as file: + measurements = json.load(file) # Load JSON data from the file + else: + measurements = {} + measurements['starting_weights_filename'] = env.get( + 'MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get( + 'MLC_ML_MODEL_FILE', measurements.get( + 'starting_weights_filename', 'TBD'))) + measurements['retraining'] = env.get( + 'MLC_ML_MODEL_RETRAINING', measurements.get( + 'retraining', 'no')) + measurements['input_data_types'] = env.get( + 'MLC_ML_MODEL_INPUTS_DATA_TYPE', measurements.get( + 'input_data_types', 'fp32')) + measurements['weight_data_types'] = env.get( + 'MLC_ML_MODEL_WEIGHTS_DATA_TYPE', measurements.get( + 'weight_data_types', 'fp32')) + measurements['weight_transformations'] = env.get( + 'MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS', measurements.get( + 'weight_transformations', 'none')) + + os.chdir(output_dir) + + if not os.path.exists("mlperf_log_summary.txt"): + return {'return': 0} + + mlperf_log_summary = '' + if os.path.isfile("mlperf_log_summary.txt"): + with open("mlperf_log_summary.txt", "r") as fp: + mlperf_log_summary = fp.read() + + if mlperf_log_summary != '': + state['app_mlperf_inference_log_summary'] = {} + for x in mlperf_log_summary.split('\n'): + y = x.split(': ') + if len(y) == 2: + state['app_mlperf_inference_log_summary'][y[0].strip().lower() + ] = y[1].strip() + + if not is_false(env.get("MLC_MLPERF_PRINT_SUMMARY", "")): + logger.info("\n") + logger.info(mlperf_log_summary) + + with open("measurements.json", "w") as fp: + json.dump(measurements, fp, indent=2) + + mlc_sut_info = {} + mlc_sut_info['system_name'] = state['MLC_SUT_META']['system_name'] + mlc_sut_info['implementation'] = env['MLC_MLPERF_IMPLEMENTATION'] + mlc_sut_info['device'] = env['MLC_MLPERF_DEVICE'] + mlc_sut_info['framework'] = state['MLC_SUT_META']['framework'] + mlc_sut_info['run_config'] = env['MLC_MLPERF_INFERENCE_SUT_RUN_CONFIG'] + with open(os.path.join(result_sut_folder_path, "mlc-sut-info.json"), "w") as fp: + json.dump(mlc_sut_info, fp, indent=2) + + system_meta = state['MLC_SUT_META'] + with open("system_meta.json", "w") as fp: + json.dump(system_meta, fp, indent=2) + + # map the custom model for inference result to the official model + # if custom model name is not set, the official model name will be + # mapped to itself + official_model_name = model + if "efficientnet" in official_model_name or "mobilenet" in official_model_name: + official_model_name = "resnet" + model_mapping = {model_full_name: official_model_name} + with open("model_mapping.json", "w") as fp: + json.dump(model_mapping, fp, indent=2) + + # Add to the state + state['app_mlperf_inference_measurements'] = copy.deepcopy( + measurements) + + if os.path.exists(env['MLC_MLPERF_CONF']): + shutil.copy(env['MLC_MLPERF_CONF'], 'mlperf.conf') + + if os.path.exists(env['MLC_MLPERF_USER_CONF']): + shutil.copy(env['MLC_MLPERF_USER_CONF'], 'user.conf') + + result, valid, power_result = mlperf_utils.get_result_from_log( + env['MLC_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('MLC_MLPERF_INFERENCE_SOURCE_VERSION')) + + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario][mode] = result + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario][mode + '_valid'] = valid.get(mode, False) + + state['mlc-mlperf-inference-results-last'][mode] = result + state['mlc-mlperf-inference-results-last'][mode + + '_valid'] = valid.get(mode, False) + + # Power not included in v0.5, code should be added in future + + # Record basic host info + host_info = { + "os_version": platform.platform(), + "cpu_version": platform.processor(), + "python_version": sys.version, + } + try: + import importlib.metadata + mlc_version = importlib.metadata.version("mlc") + host_info["mlc_version"] = mlc_version + except Exception as e: + error = format(e) + mlc_version = "unknown" + + x = '' + if env.get('MLC_HOST_OS_FLAVOR', '') != '': + x += env['MLC_HOST_OS_FLAVOR'] + if env.get('MLC_HOST_OS_VERSION', '') != '': + x += ' ' + env['MLC_HOST_OS_VERSION'] + if x != '': + host_info['os_version_sys'] = x + + if env.get('MLC_HOST_SYSTEM_NAME', '') != '': + host_info['system_name'] = env['MLC_HOST_SYSTEM_NAME'] + + # Check CM automation repository + repo_name = 'mlcommons@mlperf-automations' + repo_hash = '' + r = mlc.access({'action': 'find', 'automation': 'repo', + 'item': 'mlcommons@mlperf-automations,9e97bb72b0474657'}) + if r['return'] == 0 and len(r['list']) == 1: + repo_path = r['list'][0].path + if os.path.isdir(repo_path): + repo_name = os.path.basename(repo_path) + + # Check dev + # if repo_name == 'cm4mlops': repo_name = 'mlcommons@cm4mlops' + + r = utils.run_system_cmd({ + 'path': repo_path, + 'cmd': 'git rev-parse HEAD'}) + if r['return'] == 0: + repo_hash = r['output'] + + host_info['mlc_repo_name'] = repo_name + host_info['mlc_repo_git_hash'] = repo_hash + + with open("mlc-host-info.json", "w") as fp: + fp.write(json.dumps(host_info, indent=2) + '\n') + + # Prepare README + if "cmd" in inp: + cmd = "mlc run script \\\n\t" + " \\\n\t".join(inp['cmd']) + xcmd = "mlc run script " + xsep + "\n\t" + \ + (" " + xsep + "\n\t").join(inp['cmd']) + else: + cmd = "" + xcmd = "" + + readme_init = "*Check [MLC MLPerf docs](https://docs.mlcommons.org/automotive) for more details.*\n\n" + + readme_body = "## Host platform\n\n* OS version: {}\n* CPU version: {}\n* Python version: {}\n* MLC version: {}\n\n".format(platform.platform(), + platform.processor(), sys.version, mlc_version) + + x = repo_name + if repo_hash != '': + x += ' --checkout=' + str(repo_hash) + + readme_body += "## MLC Run Command\n\nSee [MLC installation guide](https://docs.mlcommons.org/mlcflow/install/).\n\n" + \ + "```bash\npip install -U mlcflow\n\nmlc rm cache -f\n\nmlc pull repo {}\n\n{}\n```".format( + x, xcmd) + + readme_body += "\n*Note that if you want to use the [latest automation recipes](https://docs.mlcommons.org/inference) for MLPerf,\n" + \ + " you should simply reload {} without checkout and clean MLC cache as follows:*\n\n".format(repo_name) + \ + "```bash\nmlc rm repo {}\nmlc pull repo {}\nmlc rm cache -f\n\n```".format( + repo_name, repo_name) + + extra_readme_init = '' + extra_readme_body = '' + if env.get('MLC_MLPERF_README', '') == "yes": + extra_readme_body += "\n## Dependent MLPerf Automation scripts\n\n" + + script_tags = inp['tags'] + script_adr = inp.get('adr', {}) + + mlc_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'adr': script_adr, + 'print_deps': True, + 'env': env, + 'quiet': True, + 'silent': True, + 'fake_run': True + } + r = mlc.access(mlc_input) + if r['return'] > 0: + return r + + print_deps = r['new_state']['print_deps'] + count = 1 + for dep in print_deps: + extra_readme_body += "\n\n" + str(count) + ". `" + dep + "`\n" + count = count + 1 + + if state.get( + 'abtfabtf-inference-implementation') and state['abtfabtf-inference-implementation'].get('print_deps'): + + extra_readme_body += "\n## Dependent automation scripts for the MLPerf Automotive Implementation\n" + + print_deps = state['abtfabtf-inference-implementation']['print_deps'] + count = 1 + for dep in print_deps: + extra_readme_body += "\n\n" + \ + str(count) + ". `" + dep + "`\n" + count = count + 1 + + readme = readme_init + readme_body + extra_readme = extra_readme_init + extra_readme_body + + with open("README.md", "w") as fp: + fp.write(readme) + if extra_readme: + with open("README-extra.md", "w") as fp: + fp.write(extra_readme) + + if state.get( + 'abtf-inference-implementation') and state['abtf-inference-implementation'].get('version_info'): + env['MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join( + output_dir, "mlc-version-info.json") + env['MLC_MLPERF_RUN_DEPS_GRAPH'] = os.path.join( + output_dir, "mlc-deps.png") + env['MLC_MLPERF_RUN_DEPS_MERMAID'] = os.path.join( + output_dir, "mlc-deps.mmd") + with open(os.path.join(output_dir, "mlc-version-info.json"), "w") as f: + f.write( + json.dumps( + state['abtf-inference-implementation']['version_info'], + indent=2)) + + if env.get('MLC_DUMP_SYSTEM_INFO', True): + dump_script_output( + "detect,os", + env, + state, + 'new_env', + os.path.join( + output_dir, + "os_info.json"), mlc) + dump_script_output( + "detect,cpu", + env, + state, + 'new_env', + os.path.join( + output_dir, + "cpu_info.json"), mlc) + env['MLC_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( + env['MLC_MLPERF_OUTPUT_DIR'], "pip_freeze.raw") + dump_script_output( + "dump,pip,freeze", + env, + state, + 'new_state', + os.path.join( + output_dir, + "pip_freeze.json"), mlc) + + return {'return': 0} + + +def dump_script_output(script_tags, env, state, output_key, dump_file, mlc): + + mlc_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'env': env, + 'state': state, + 'quiet': True, + 'silent': True, + } + r = mlc.access(mlc_input) + if r['return'] > 0: + return r + with open(dump_file, "w") as f: + f.write(json.dumps(r[output_key], indent=2)) + + return {'return': 0} diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 896008c39..101a7b851 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -4,14 +4,16 @@ uid: f7488ce376484fd2 automation_alias: script automation_uid: 5b4e0237da074764 -category: "Modular MLPerf inference benchmark pipeline for ABTF model" +category: "Modular MLPerf automotive benchmark pipeline for ABTF models" # User-friendly tags to find this CM script tags: - app - app-mlperf-inference +- app-mlperf-inference-automotive - mlperf-inference +- mlperf-inference-automotive - abtf-inference predeps: no @@ -90,26 +92,53 @@ deps: # Use mlc inside scripts #- tags: get,generic-python-lib,_package.mlcflow - - tags: get,mlperf,inference,utils + - tags: get,mlperf,automotive,utils + +posthook_deps: + - tags: get,mlperf,sut,description #populate system meta information like framework + - tags: get,platform,details + enable_if_env: + MLC_GET_PLATFORM_DETAILS: + - yes + skip_if_env: + MLC_MLPERF_LOADGEN_MODE: + - accuracy + env: + MLC_PLATFORM_DETAILS_FILE_PATH: '<<>>/system_info.txt' + +post_deps: + - tags: draw,graph,from-json + enable_if_env: + MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE: + - on + env: + MLC_JSON_INPUT_FILE: <<>> + MLC_OUTPUT_IMAGE_PATH: <<>> + MLC_OUTPUT_MERMAID_PATH: <<>> docker: - mlc_repo: gateoverflow@mlperf-automations - mlc_repo_branch: dev + mlc_repo: anandhu-eng@mlperf-automations + mlc_repo_branch: automotive use_host_group_id: True use_host_user_id: True real_run: false + user: mlcuser interactive: True mlc_repos_off: 'mlc pull repo mlcommons@cm4abtf --branch=poc' deps: - tags: get,abtf,scratch,space mounts: - "${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}:${{ MLC_ABTF_SCRATCH_PATH_DATASETS }}" + - "${{ MLC_ML_MODEL_FILE_WITH_PATH }}:${{ MLC_ML_MODEL_FILE_WITH_PATH }}" # Variations to customize dependencies variations: + reference: + alias: mlcommons-python + # Implementation mlcommons-python: group: implementation @@ -121,7 +150,7 @@ variations: - names: - python-reference-abtf-inference - abtf-inference-implementation - tags: run-mlperf-inference,demo,abtf-model + tags: run-mlperf-inference,abtf-model skip_if_env: MLC_SKIP_RUN: - yes @@ -157,7 +186,19 @@ variations: add_deps_recursive: abtf-inference-implementation: tags: _onnxruntime - + ml-model-bevformer: + tags: _onnx + ml-model-ssd-resnet50: + tags: _onnx + ml-model-deeplab-v3+: + tags: _onnx + + onnx_dynamic: + base: + - onnxruntime + add_deps_recursive: + ml-model-deeplab-v3+: + tags: _onnx_dynamic onnxruntime,cpu: env: @@ -178,6 +219,12 @@ variations: add_deps_recursive: abtf-inference-implementation: tags: _pytorch + ml-model-bevformer: + tags: _pytorch + ml-model-ssd-resnet50: + tags: _pytorch + ml-model-deeplab-v3+: + tags: _pytorch abtf-demo-model: @@ -204,10 +251,120 @@ variations: enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_IN_HOST: - yes - mounts: - "${{ MLC_DATASET_MLCOMMONS_COGNATA_PATH }}:${{ MLC_DATASET_MLCOMMONS_COGNATA_PATH }}" - + + bevformer: + group: + models + default_env: + MLC_USE_DATASET_FROM_HOST: yes + env: + MLC_MODEL: bevformer + docker: + deps: + - tags: get,preprocessed,dataset,nuscenes + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - "yes" + - tags: get,ml-model,bevformer + enable_if_env: + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-bevformer + mounts: + - "${{ MLC_PREPROCESSED_DATASET_NUSCENES_PATH }}:${{ MLC_PREPROCESSED_DATASET_NUSCENES_PATH }}" + - "${{ MLC_ML_MODEL_BEVFORMER_PATH }}:${{ MLC_ML_MODEL_BEVFORMER_PATH }}" + - "${{ MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH }}:${{ MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH }}" + add_deps_recursive: + abtf-inference-implementation: + tags: _bevformer + posthook_deps: + - enable_if_env: + MLC_MLPERF_LOADGEN_MODE: + - accuracy + - all + MLC_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - nuscenes-accuracy-script + tags: run,accuracy,mlperf,_nuscenes + + ssd-resnet50: + group: + models + default_env: + MLC_USE_DATASET_FROM_HOST: yes + env: + MLC_MODEL: ssd-resnet50 + docker: + deps: + - tags: get,preprocessed,dataset,cognata + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - "yes" + - tags: get,ml-model,ssd,resnet50,_mlc,_rclone + enable_if_env: + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-ssd-resnet50 + mounts: + - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" + - "${{ MLC_ML_MODEL_SSD_PATH }}:${{ MLC_ML_MODEL_SSD_PATH }}" + add_deps_recursive: + abtf-inference-implementation: + tags: _ssd-resnet50 + posthook_deps: + - enable_if_env: + MLC_MLPERF_LOADGEN_MODE: + - accuracy + - all + MLC_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - cognata-ssd-accuracy-script + tags: run,accuracy,mlperf,_cognata_ssd + + deeplab-v3+: + group: + models + default_env: + MLC_USE_DATASET_FROM_HOST: yes + env: + MLC_MODEL: deeplab-v3+ + docker: + deps: + - tags: get,preprocessed,dataset,cognata,_segmentation + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - "yes" + - tags: get,ml-model,ssd,resnet50,_mlc,_rclone + enable_if_env: + MLC_USE_MODEL_FROM_HOST: + - "yes" + names: + - ml-model-deeplab-v3+ + mounts: + - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" + - "${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}:${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}" + add_deps_recursive: + abtf-inference-implementation: + tags: _deeplab-v3+ + posthook_deps: + - enable_if_env: + MLC_MLPERF_LOADGEN_MODE: + - accuracy + - all + MLC_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - cognata-deeplab-accuracy-script + tags: run,accuracy,mlperf,_cognata_deeplab # Target devices cpu: @@ -234,8 +391,18 @@ variations: docker: all_gpus: 'yes' base_image: nvcr.io/nvidia/pytorch:24.03-py3 + os_version: 22.04 + + v0.5: {} + + mvp-demo: {} + poc-demo: {} + v0.5,mlcommons-python,cpu: + docker: + base_image: ubuntu:22.04 + os_version: 22.04 # Loadgen scenarios offline: @@ -286,3 +453,11 @@ variations: MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: Cognata_Camera_01_8M MLC_ABTF_ML_MODEL_TRAINING_FORCE_COGNATA_LABELS: 'yes' MLC_ABTF_ML_MODEL_SKIP_WARMUP: 'yes' + + batch_size.#: + group: batch_size + env: + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: '#' + add_deps_recursive: + abtf-inference-implementation: + tags: _batch_size.# \ No newline at end of file diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index 9f3776c2d..b240e4164 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -228,7 +228,7 @@ def preprocess(i): f.write('RUN ' + env['MLC_DOCKER_EXTRA_SYS_DEPS'] + EOL) if env['MLC_DOCKER_OS'] == "ubuntu": - if int(env['MLC_DOCKER_OS_VERSION'].split('.')[0]) >= 23: + if int(str(env['MLC_DOCKER_OS_VERSION']).split('.')[0]) >= 23: if "--break-system-packages" not in env.get( 'MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''): env['MLC_DOCKER_PIP_INSTALL_EXTRA_FLAGS'] = " --break-system-packages" diff --git a/script/generate-mlperf-inference-user-conf/meta.yaml b/script/generate-mlperf-inference-user-conf/meta.yaml index fbba97b33..4288bd443 100644 --- a/script/generate-mlperf-inference-user-conf/meta.yaml +++ b/script/generate-mlperf-inference-user-conf/meta.yaml @@ -84,13 +84,20 @@ deps: OUTPUT_BASE_DIR: - "on" - ######################################################################## - # Install MLPerf inference dependencies - - # Download MLPerf inference source - - tags: get,mlcommons,inference,src - names: - - inference-src - # Get SUT configs (System Under Test) - tags: get,sut,configs + +variations: + inference: + default: true + group: benchmark_wg + deps: + - tags: get,mlcommons,inference,src + names: + - inference-src + automotive: + group: benchmark_wg + deps: + - tags: get,mlcommons,automotive,src + names: + - automotive-src \ No newline at end of file diff --git a/script/get-dataset-cognata-mlcommons/customize.py b/script/get-dataset-cognata-mlcommons/customize.py index be725599d..64406d44e 100644 --- a/script/get-dataset-cognata-mlcommons/customize.py +++ b/script/get-dataset-cognata-mlcommons/customize.py @@ -8,6 +8,10 @@ def preprocess(i): env = i['env'] + if env.get('MLC_COGNATA_DATASET_TYPE', '') == "release": + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + return {'return': 0} + mlc_cache_dataset_path = env.get( 'MLC_CUSTOM_CACHE_ENTRY_DATASET_MLCOMMONS_COGNATA_PATH', '').strip() @@ -61,6 +65,9 @@ def postprocess(i): logger = automation.logger + if env.get('MLC_COGNATA_DATASET_TYPE', '') == "release": + return {'return': 0} + cur_dir = os.getcwd() quiet = is_true(env.get('MLC_QUIET', False)) diff --git a/script/get-dataset-cognata-mlcommons/meta.yaml b/script/get-dataset-cognata-mlcommons/meta.yaml index 309b6ba90..ecb10799a 100644 --- a/script/get-dataset-cognata-mlcommons/meta.yaml +++ b/script/get-dataset-cognata-mlcommons/meta.yaml @@ -48,9 +48,11 @@ deps: - custom-cache-entry-mlcommons-cognata-dataset tags: create,custom,cache,entry extra_cache_tags: dataset,cognata,mlcommons-cognata - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'off' + MLC_DOWNLOAD_SRC: + - mlcommons env_key: DATASET_MLCOMMONS_COGNATA # this script will prepare env MLC_CUSTOM_CACHE_ENTRY_{env_key}_PATH @@ -58,9 +60,11 @@ deps: prehook_deps: - names: - gdrive-downloader-cognata - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' + MLC_DOWNLOAD_SRC: + - mlcommons enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'no' @@ -76,9 +80,11 @@ prehook_deps: - names: - rclone-downloader-cognata - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' + MLC_DOWNLOAD_SRC: + - mlcommons enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'no' @@ -101,27 +107,33 @@ prehook_deps: - python - python3 tags: get,python3 - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'yes' + MLC_DOWNLOAD_SRC: + - mlcommons enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' # Python package to read/write Excel files - tags: get,generic-python-lib,_package.openpyxl - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'yes' + MLC_DOWNLOAD_SRC: + - mlcommons enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' # Tool to download large files - tags: get,aria2 - skip_if_env: + skip_if_any_env: MLC_DATASET_MLCOMMONS_COGNATA_IMPORTED: - 'yes' + MLC_DOWNLOAD_SRC: + - mlcommons enable_if_env: MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: - 'on' @@ -143,6 +155,11 @@ variations: MLC_DATASET_MLCOMMONS_COGNATA_GROUP_NAMES: "Cognata_Camera_01_8M" MLC_DATASET_MLCOMMONS_COGNATA_FILE_NAMES: "" + release: + group: dataset-type + env: + MLC_COGNATA_DATASET_TYPE: "release" + rclone: group: download-tool default: true @@ -153,6 +170,57 @@ variations: group: download-tool env: MLC_DATASET_MLCOMMONS_COGNATA_DOWNLOAD_TOOL: gdrive + + mlc: + group: download-src + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.cognata + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 1u5FDoeXHVtDrd4zClE47Gmyr7iLFidz1 + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_MLCOMMONS_COGNATA_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_MLCOMMONS_COGNATA_PATH + MLC_DOWNLOAD_URL: 'mlc-cognata:mlc_cognata_dataset/unprocessed' + extra_cache_tags: dataset,cognata,release + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + + rclone,mlc: + add_deps_recursive: + dae: + tags: _rclone + env: + MLC_RCLONE_COPY_USING: sync + + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run + new_env_keys: - MLC_DATASET_MLCOMMONS_COGNATA* diff --git a/script/get-dataset-cognata-mlcommons/run.sh b/script/get-dataset-cognata-mlcommons/run.sh new file mode 100644 index 000000000..5563a95da --- /dev/null +++ b/script/get-dataset-cognata-mlcommons/run.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" == "true" && "$MLC_COGNATA_DATASET_TYPE" == "release" ]]; then + cd "${MLC_DATASET_MLCOMMONS_COGNATA_PATH}" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd - || exit +fi \ No newline at end of file diff --git a/script/get-dataset-nuscenes/COPYRIGHT.md b/script/get-dataset-nuscenes/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-dataset-nuscenes/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-dataset-nuscenes/customize.py b/script/get-dataset-nuscenes/customize.py new file mode 100644 index 000000000..d4abad774 --- /dev/null +++ b/script/get-dataset-nuscenes/customize.py @@ -0,0 +1,23 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/script/get-dataset-nuscenes/meta.yaml b/script/get-dataset-nuscenes/meta.yaml new file mode 100644 index 000000000..e12787bdf --- /dev/null +++ b/script/get-dataset-nuscenes/meta.yaml @@ -0,0 +1,57 @@ +alias: get-dataset-nuscenes +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- dataset +- nuscenes +uid: ec2a0842c9a644f5 +new_env_keys: + - MLC_DATASET_NUSCENES_PATH +variations: + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_waymo + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_NUSCENES_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_NUSCENES_PATH + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset + extra_cache_tags: nuscenes,dataset + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run diff --git a/script/get-dataset-nuscenes/run.sh b/script/get-dataset-nuscenes/run.sh new file mode 100644 index 000000000..abe5a17c7 --- /dev/null +++ b/script/get-dataset-nuscenes/run.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" = "true" ]]; then + cd "${MLC_DATASET_NUSCENES_PATH}" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd "${MLC_DATASET_NUSCENES_PATH}/nuscenes" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd - || exit +fi \ No newline at end of file diff --git a/script/get-generic-sys-util/meta.yaml b/script/get-generic-sys-util/meta.yaml index eac6ecdb1..f058b3127 100644 --- a/script/get-generic-sys-util/meta.yaml +++ b/script/get-generic-sys-util/meta.yaml @@ -153,6 +153,20 @@ variations: brew: '' dnf: mesa-libGL yum: mesa-libGL + libgl1-mesa-glx: + env: + MLC_SYS_UTIL_NAME: libgl1-mesa-glx # tbd: regular expression for version as well as whether its installed? + MLC_SYS_UTIL_CHECK_CMD: 'ldconfig -p | grep -i libGLX_mesa.so.*' + default_env: + MLC_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE: yes + new_env_keys: + - MLC_LIBGLX_VERSION + state: + libgl: # tbd: complete for other flavours of linux + apt: libgl1-mesa-glx + brew: '' + dnf: '' + yum: '' libsm6: env: MLC_SYS_UTIL_NAME: libsm6 # tbd: regular expression for version as well as whether its installed? diff --git a/script/get-git-repo/meta.yaml b/script/get-git-repo/meta.yaml index efdf3bf63..468468bb5 100644 --- a/script/get-git-repo/meta.yaml +++ b/script/get-git-repo/meta.yaml @@ -79,7 +79,7 @@ variations: git-history: full-history env: MLC_GIT_SHA: '#' - group: checkout + group: post-checkout short-history: default: true env: diff --git a/script/get-ml-model-abtf-ssd-pytorch/customize.py b/script/get-ml-model-abtf-ssd-pytorch/customize.py index a4fa7f16c..e63e92fff 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/customize.py +++ b/script/get-ml-model-abtf-ssd-pytorch/customize.py @@ -26,6 +26,9 @@ def preprocess(i): 'error': 'ML model {} is not found'.format(ml_model)} env['MLC_ML_MODEL_FILE_WITH_PATH'] = ml_model + # handles download from mlcommons gdrive + elif env.get('MLC_DOWNLOAD_SRC', '') == "mlcommons" and env.get('MLC_ML_MODEL_SSD_PATH', '') == '': + env['MLC_TMP_REQUIRE_DOWNLOAD'] = 'yes' return {'return': 0} @@ -35,7 +38,13 @@ def postprocess(i): env = i['env'] if env.get('MLC_ML_MODEL_FILE_WITH_PATH', '') == '': - env['MLC_ML_MODEL_FILE_WITH_PATH'] = 'model-weights-skipped' + if env.get('MLC_ML_MODEL_SSD_PATH', '') == '': + env['MLC_ML_MODEL_FILE_WITH_PATH'] = 'model-weights-skipped' + else: + if is_true(env.get('MLC_TMP_REQUIRE_DOWNLOAD', '')): + env['MLC_ML_MODEL_SSD_PATH'] = os.path.join( + env['MLC_ML_MODEL_SSD_PATH'], env['MLC_ML_MODEL_FILENAME']) + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_SSD_PATH'] env['MLC_ML_MODEL_FILE'] = os.path.basename( env['MLC_ML_MODEL_FILE_WITH_PATH']) diff --git a/script/get-ml-model-abtf-ssd-pytorch/meta.yaml b/script/get-ml-model-abtf-ssd-pytorch/meta.yaml index b9f70ebc3..017acec37 100644 --- a/script/get-ml-model-abtf-ssd-pytorch/meta.yaml +++ b/script/get-ml-model-abtf-ssd-pytorch/meta.yaml @@ -17,6 +17,8 @@ tags: - get - ml-model - abtf-ssd-pytorch +- ssd +- resnet50 - cmc @@ -38,9 +40,11 @@ deps: names: - abtf-ssd-pytorch-git-repo - abtf-ml-model-code-git-repo - skip_if_env: + skip_if_any_env: MLC_SKIP_MODEL_CODE_DOWNLOAD: - 'yes' + MLC_DOWNLOAD_SRC: + - 'mlcommons' env: MLC_GIT_AUTH: 'yes' MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_ABTF_SSD_PYTORCH @@ -62,9 +66,11 @@ deps: names: - abtf-ml-model-weights - abtf-ml-model-weights-download - skip_if_env: + skip_if_any_env: MLC_SKIP_MODEL_WEIGHTS_DOWNLOAD: - 'yes' + MLC_DOWNLOAD_SRC: + - 'mlcommons' update_tags_from_env_with_prefix: _url.: - MLC_ML_MODEL_URL @@ -77,6 +83,7 @@ new_env_keys: print_env_at_the_end: MLC_ML_MODEL_FILE_WITH_PATH: Path to the ML model weights MLC_ML_MODEL_CODE_WITH_PATH: Path to the ML model code + MLC_ML_MODEL_SSD_PATH: Path to ssd resnet50 model variations: @@ -172,3 +179,81 @@ variations: adr: abtf-ml-model-weights-download: tags: _gdown + + onnx: + group: model-format + default: true + env: + MLC_MODEL_FORMAT: onnx + + onnx,mlc: + env: + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_ssd/ssd_resnet50.onnx + MLC_ML_MODEL_FILENAME: ssd_resnet50.onnx + + pytorch: + group: model-format + env: + MLC_MODEL_FORMAT: pth + + pytorch,mlc: + env: + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_ssd/baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth + MLC_ML_MODEL_FILENAME: baseline_8MP_ss_scales_fm1_5x5_all_ep60.pth + + rclone,mlc: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + env: + MLC_RCLONE_COPY_USING: sync + + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run + + mlc: + group: download-src + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.cognata + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 1u5FDoeXHVtDrd4zClE47Gmyr7iLFidz1 + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_SSD_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_MLC_MODEL_SSD_PATH + MLC_DOWNLOAD_URL: 'mlc-cognata:mlc_cognata_dataset/<<>>' + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + extra_cache_tags: ml,model,ssd,resnet50 + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons +tests: + run_inputs: + - variations_list: + - onnx,rclone,mlc,dry-run + - pytorch,rclone,mlc,dry-run \ No newline at end of file diff --git a/script/get-ml-model-bevformer/COPYRIGHT.md b/script/get-ml-model-bevformer/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-ml-model-bevformer/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-ml-model-bevformer/customize.py b/script/get-ml-model-bevformer/customize.py new file mode 100644 index 000000000..9afc37740 --- /dev/null +++ b/script/get-ml-model-bevformer/customize.py @@ -0,0 +1,31 @@ +from mlc import utils +from utils import is_true +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + if env.get('MLC_ML_MODEL_BEVFORMER_PATH', '') == '': + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + if is_true(env.get('MLC_TMP_REQUIRE_DOWNLOAD', '')): + env['MLC_ML_MODEL_BEVFORMER_PATH'] = os.path.join( + env['MLC_ML_MODEL_BEVFORMER_PATH'], env['MLC_ML_MODEL_FILENAME']) + + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_BEVFORMER_PATH'] + + return {'return': 0} diff --git a/script/get-ml-model-bevformer/meta.yaml b/script/get-ml-model-bevformer/meta.yaml new file mode 100644 index 000000000..e4c156030 --- /dev/null +++ b/script/get-ml-model-bevformer/meta.yaml @@ -0,0 +1,81 @@ +alias: get-ml-model-bevformer +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- ml-model +- bevformer +uid: 438a053f666443bd +new_env_keys: + - MLC_ML_MODEL_BEVFORMER_PATH + - MLC_ML_MODEL_FILE_WITH_PATH +print_env_at_the_end: + MLC_ML_MODEL_BEVFORMER_PATH: BevFormer checkpoint path +variations: + onnx: + group: model-format + default: true + env: + MLC_MODEL_FORMAT: onnx + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_bevformer/bevformer_tiny.onnx + MLC_ML_MODEL_FILENAME: bevformer_tiny.onnx + pytorch: + group: model-format + env: + MLC_MODEL_FORMAT: pth + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_bevformer/bevformer_tiny_epoch_24.pth + MLC_ML_MODEL_FILENAME: bevformer_tiny_epoch_24.onnx + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.mlc-nuscenes + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 17CpM5eU8tjrxh_LpH_BTNTeT37PhzcnC + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_BEVFORMER_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_BEVFORMER_PATH + MLC_DOWNLOAD_URL: 'mlc-nuscenes:<<>>' + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + extra_cache_tags: ml,model,bevformer + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run +tests: + run_inputs: + - variations_list: + - onnx,rclone,mlc,dry-run + - pytorch,rclone,mlc,dry-run \ No newline at end of file diff --git a/script/get-ml-model-bevformer/run.sh b/script/get-ml-model-bevformer/run.sh new file mode 100644 index 000000000..a9bf588e2 --- /dev/null +++ b/script/get-ml-model-bevformer/run.sh @@ -0,0 +1 @@ +#!/bin/bash diff --git a/script/get-ml-model-deeplabv3_plus/COPYRIGHT.md b/script/get-ml-model-deeplabv3_plus/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-ml-model-deeplabv3_plus/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-ml-model-deeplabv3_plus/customize.py b/script/get-ml-model-deeplabv3_plus/customize.py new file mode 100644 index 000000000..2fff179b1 --- /dev/null +++ b/script/get-ml-model-deeplabv3_plus/customize.py @@ -0,0 +1,30 @@ +from mlc import utils +import os +from utils import is_true + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + if env.get('MLC_ML_MODEL_DEEPLABV3_PLUS_PATH', '') == '': + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + if is_true(env.get('MLC_TMP_REQUIRE_DOWNLOAD', '')): + env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] = os.path.join( + env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'], env['MLC_ML_MODEL_FILENAME']) + env['MLC_ML_MODEL_FILE_WITH_PATH'] = env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH'] + + return {'return': 0} diff --git a/script/get-ml-model-deeplabv3_plus/meta.yaml b/script/get-ml-model-deeplabv3_plus/meta.yaml new file mode 100644 index 000000000..66215eb09 --- /dev/null +++ b/script/get-ml-model-deeplabv3_plus/meta.yaml @@ -0,0 +1,91 @@ +alias: get-ml-model-deeplabv3-plus +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- ml-model +- deeplab +- v3-plus +- deeplabv3-plus +uid: cfb2d53b9dbc4dc0 +new_env_keys: + - MLC_ML_MODEL_DEEPLABV3_PLUS_PATH + - MLC_ML_MODEL_FILE_WITH_PATH +print_env_at_the_end: + MLC_ML_MODEL_DEEPLABV3_PLUS_PATH: DeepLabV3+ checkpoint path +variations: + onnx: + group: model-format + default: true + env: + MLC_MODEL_FORMAT: onnx + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_deeplab/deeplabv3+_8mp.onnx + MLC_ML_MODEL_FILENAME: deeplabv3+_8mp.onnx + dynamic: {} + onnx,dynamic: + group: model-format + env: + MLC_MODEL_FORMAT: onnx + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_deeplab/deeplabv3+_dynamic.onnx + MLC_ML_MODEL_FILENAME: deeplabv3+_dynamic.onnx + pytorch: + group: model-format + env: + MLC_MODEL_FORMAT: pth + MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_deeplab/latest_deeplabv3plus_resnet50_cognata_os16_it100000.pth + MLC_ML_MODEL_FILENAME: latest_deeplabv3plus_resnet50_cognata_os16_it100000.pth + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.cognata + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 1u5FDoeXHVtDrd4zClE47Gmyr7iLFidz1 + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_ML_MODEL_DEEPLABV3_PLUS_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_ML_MODEL_DEEPLABV3_PLUS_PATH + MLC_DOWNLOAD_URL: 'mlc-cognata:mlc_cognata_dataset/<<>>' + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + extra_cache_tags: ml,model,deeplabv3,plus + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run +tests: + run_inputs: + - variations_list: + - onnx,rclone,mlc,dry-run + - onnx_dynamic,rclone,mlc,dry-run + - pytorch,rclone,mlc,dry-run \ No newline at end of file diff --git a/script/get-ml-model-deeplabv3_plus/run.sh b/script/get-ml-model-deeplabv3_plus/run.sh new file mode 100644 index 000000000..05a7907cf --- /dev/null +++ b/script/get-ml-model-deeplabv3_plus/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash + diff --git a/script/get-mlperf-automotive-src/COPYRIGHT.md b/script/get-mlperf-automotive-src/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-mlperf-automotive-src/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-mlperf-automotive-src/customize.py b/script/get-mlperf-automotive-src/customize.py new file mode 100644 index 000000000..69b336134 --- /dev/null +++ b/script/get-mlperf-automotive-src/customize.py @@ -0,0 +1,154 @@ +from mlc import utils +from utils import is_true +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + meta = i['meta'] + + script_path = i['run_script_input']['path'] + + if env.get('MLC_GIT_CHECKOUT', '') == '' and env.get( + 'MLC_GIT_URL', '') == '' and env.get('MLC_VERSION', '') == '': + # if custom checkout and url parameters are not set and MLC_VERSION is + # not specified + env['MLC_VERSION'] = "master" + env["MLC_GIT_CHECKOUT"] = "master" + env["MLC_GIT_URL"] = "https://github.com/mlcommons/mlperf_automotive" + elif env.get('MLC_GIT_CHECKOUT', '') != '' and env.get('MLC_TMP_GIT_CHECKOUT', '') != '' and env.get('MLC_GIT_CHECKOUT', '') != env.get('MLC_TMP_GIT_CHECKOUT', ''): + # if checkout branch is assigned inside version and custom branch is + # also specified + return { + "return": 1, "error": "Conflicting branches between version assigned and user specified."} + elif env.get('MLC_GIT_URL', '') != '' and env.get('MLC_TMP_GIT_URL', '') != '' and env.get('MLC_GIT_URL', '') != env.get('MLC_TMP_GIT_URL', ''): + # if GIT URL is assigned inside version and custom branch is also + # specified + return { + "return": 1, "error": "Conflicting URL's between version assigned and user specified."} + + if env.get('MLC_VERSION', '') == '': + env['MLC_VERSION'] = "custom" + + # check whether branch and url is specified, + # if not try to assign the values specified in version parameters, + # if version parameters does not have the value to a parameter, set the + # default one + if env.get('MLC_GIT_CHECKOUT', '') == '' and env.get( + 'MLC_GIT_CHECKOUT_TAG', '') == '': + if env.get('MLC_TMP_GIT_CHECKOUT', '') != '': + env["MLC_GIT_CHECKOUT"] = env["MLC_TMP_GIT_CHECKOUT"] + else: + env["MLC_GIT_CHECKOUT"] = "master" + + if env.get('MLC_GIT_URL', '') == '': + if env.get('MLC_TMP_GIT_URL', '') != '': + env["MLC_GIT_URL"] = env["MLC_TMP_GIT_URL"] + else: + env["MLC_GIT_URL"] = "https://github.com/mlcommons/mlperf_automotive" + + if env.get("MLC_MLPERF_LAST_RELEASE", '') == '': + env["MLC_MLPERF_LAST_RELEASE"] = "v0.5" + + if 'MLC_GIT_DEPTH' not in env: + env['MLC_GIT_DEPTH'] = '' + + if 'MLC_GIT_RECURSE_SUBMODULES' not in env: + env['MLC_GIT_RECURSE_SUBMODULES'] = '' + submodules = [] + possible_submodules = { + "pybind": "third_party/pybind", + } + for submodule in possible_submodules: + env_name = submodule.upper().replace("-", "_") + if is_true(env.get("MLC_SUBMODULE_" + env_name)): + submodules.append(possible_submodules[submodule]) + + env['MLC_GIT_SUBMODULES'] = ",".join(submodules) + + if env.get('MLC_GIT_PATCH_FILENAME', '') != '': + patch_file_name = env['MLC_GIT_PATCH_FILENAME'] + env['MLC_GIT_PATCH_FILEPATHS'] = os.path.join( + script_path, 'patch', patch_file_name) + + need_version = env.get('MLC_VERSION', '') + versions = meta['versions'] + + if need_version != '' and not need_version in versions: + env['MLC_GIT_CHECKOUT'] = need_version + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + automotive_root = env['MLC_MLPERF_INFERENCE_SOURCE'] + env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'] = os.path.join( + automotive_root, 'automotive', 'camera-3d-detection') + env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'] = os.path.join( + automotive_root, 'automotive', '2d-object-detection') + env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'] = os.path.join( + automotive_root, 'automotive', 'semantic-segmentation') + + env['MLC_GET_DEPENDENT_CACHED_PATH'] = automotive_root + +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if '+PYTHONPATH' not in env: env['+PYTHONPATH'] = [] + env['+PYTHONPATH'] = [] + + if os.path.exists(os.path.join(automotive_root, "loadgen", "VERSION.txt")): + with open(os.path.join(automotive_root, "loadgen", "VERSION.txt")) as f: + version_info = f.read().strip() + env['MLC_MLPERF_INFERENCE_SOURCE_VERSION'] = version_info + + if is_true(env.get('MLC_GET_MLPERF_IMPLEMENTATION_ONLY', '')): + return {'return': 0} + + env['MLC_MLPERF_INFERENCE_CONF_PATH'] = os.path.join( + automotive_root, 'mlperf.conf') + env['+PYTHONPATH'].append( + os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + 'tools', + 'submission')) + + # To be uncommented after Pablo's PR is merged: https://github.com/mlcommons/mlperf_automotive/pull/14 + # valid_models = get_valid_models( + # env['MLC_MLPERF_LAST_RELEASE'], + # env['MLC_MLPERF_INFERENCE_SOURCE']) + + # state['MLC_MLPERF_AUTOMOTIVE_MODELS'] = valid_models + + if env.get('MLC_GIT_REPO_CURRENT_HASH', '') != '': + env['MLC_VERSION'] += "-git-" + env['MLC_GIT_REPO_CURRENT_HASH'] + + return {'return': 0, 'version': env['MLC_VERSION']} + + +def get_valid_models(mlperf_version, mlperf_path): + + import sys + + submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") + + sys.path.append(submission_checker_dir) + + if not os.path.exists(os.path.join( + submission_checker_dir, "submission_checker.py")): + shutil.copy(os.path.join(submission_checker_dir, "submission-checker.py"), os.path.join(submission_checker_dir, + "submission_checker.py")) + + import submission_checker as checker + + config = checker.MODEL_CONFIG + + valid_models = config[mlperf_version]["models"] + + return valid_models diff --git a/script/get-mlperf-automotive-src/meta.yaml b/script/get-mlperf-automotive-src/meta.yaml new file mode 100644 index 000000000..9fa26d5ca --- /dev/null +++ b/script/get-mlperf-automotive-src/meta.yaml @@ -0,0 +1,103 @@ +alias: get-mlperf-automotive-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_env: + MLC_GIT_CHECKOUT_FOLDER: automotive + MLC_GIT_DEPTH: --depth 4 + MLC_GIT_PATCH: 'no' + MLC_GIT_RECURSE_SUBMODULES: '' +default_version: master +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +new_env_keys: +- MLC_MLPERF_INFERENCE_BEVFORMER_PATH +- MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH +- MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH +- MLC_MLPERF_LAST_RELEASE +- MLC_MLPERF_INFERENCE_SOURCE +- MLC_MLPERF_INFERENCE_SOURCE_VERSION +- +PYTHONPATH +prehook_deps: +- env: + MLC_GIT_CHECKOUT_PATH_ENV_NAME: MLC_MLPERF_INFERENCE_SOURCE + extra_cache_tags: automotive,src + force_env_keys: + - MLC_GIT_* + names: + - automotive-git-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - MLC_GIT_CHECKOUT + _repo.: + - MLC_GIT_URL + _sha.: + - MLC_GIT_SHA + _submodules.: + - MLC_GIT_SUBMODULES +print_env_at_the_end: + MLC_MLPERF_INFERENCE_SOURCE: Path to MLPerf automotive benchmark source +tags: +- get +- src +- source +- automotive +- automotive-src +- automotive-source +- mlperf +- mlcommons +uid: c3842e6e35d947ef +variations: + branch.#: + default_version: custom + env: + MLC_GIT_CHECKOUT: '#' + group: checkout + full-history: + env: + MLC_GIT_DEPTH: '' + group: git-history + no-recurse-submodules: + env: + MLC_GIT_RECURSE_SUBMODULES: '' + patch: + ad: + automotive-git-repo: + tags: _patch + env: + MLC_GIT_PATCH: 'yes' + pybind: + env: + MLC_SUBMODULE_PYBIND: 'yes' + recurse-submodules: + env: + MLC_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' + repo.#: + env: + MLC_GIT_URL: '#' + sha.#: + env: + MLC_GIT_SHA: '#' + group: checkout + short-history: + default: true + env: + MLC_GIT_DEPTH: --depth 10 + group: git-history + submodules.#: + env: + MLC_GIT_SUBMODULES: '#' +versions: + custom: + env: + MLC_MLPERF_LAST_RELEASE: v0.5 + master: + env: + MLC_MLPERF_LAST_RELEASE: v0.5 + MLC_TMP_GIT_CHECKOUT: master diff --git a/script/get-mlperf-automotive-utils/COPYRIGHT.md b/script/get-mlperf-automotive-utils/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-mlperf-automotive-utils/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-mlperf-automotive-utils/customize.py b/script/get-mlperf-automotive-utils/customize.py new file mode 100644 index 000000000..7ae5d02bd --- /dev/null +++ b/script/get-mlperf-automotive-utils/customize.py @@ -0,0 +1,36 @@ +from mlc import utils +from utils import is_true +import os +import sys + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = is_true(env.get('MLC_QUIET', False)) + + utils_path = env['MLC_TMP_CURRENT_SCRIPT_PATH'] + + env['+PYTHONPATH'] = [utils_path] + + submission_checker_dir = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], "tools", "submission") + + sys.path.append(submission_checker_dir) + sys.path.append(utils_path) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/script/get-mlperf-automotive-utils/meta.yaml b/script/get-mlperf-automotive-utils/meta.yaml new file mode 100644 index 000000000..bdd5c667b --- /dev/null +++ b/script/get-mlperf-automotive-utils/meta.yaml @@ -0,0 +1,25 @@ +alias: get-mlperf-automotive-utils +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- get +- mlperf +- automotive +- util +- utils +- functions +uid: c20cfade1c184f83 +deps: + - tags: get,mlperf,automotive,src + names: + - automotive-src +new_env_keys: + - '+PYTHONPATH' + - MLC_MLPERF_INFERENCE_BEVFORMER_PATH + - MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH + - MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH + - MLC_MLPERF_LAST_RELEASE + - MLC_MLPERF_INFERENCE_SOURCE + - MLC_MLPERF_INFERENCE_VERSION + - MLC_MLPERF_INFERENCE_SOURCE_VERSION \ No newline at end of file diff --git a/script/get-mlperf-automotive-utils/mlperf_utils.py b/script/get-mlperf-automotive-utils/mlperf_utils.py new file mode 100644 index 000000000..f7441cedd --- /dev/null +++ b/script/get-mlperf-automotive-utils/mlperf_utils.py @@ -0,0 +1,353 @@ +import sys +import os +import submission_checker as checker +from log_parser import MLPerfLog + + +def get_result_from_log(version, model, scenario, + result_path, mode, automotive_src_version=None): + + config = checker.Config( + version, + None, + ignore_uncommited=False, + skip_power_check=False, + ) + mlperf_model = config.get_mlperf_model(model) + # scenario = checker.SCENARIO_MAPPING[scenario] + + result = '' + power_result = None + valid = {} + if mode == "performance": + # has_power = os.path.exists(os.path.join(result_path, "..", "power")) + version_tuple = None + if automotive_src_version: + version_tuple = tuple(map(int, automotive_src_version.split('.'))) + + if version_tuple and version_tuple >= (4, 1, 22): + result_ = checker.get_performance_metric( + config, mlperf_model, result_path, scenario) + else: + result_ = checker.get_performance_metric( + config, mlperf_model, result_path, scenario, None, None, has_power) + mlperf_log = MLPerfLog( + os.path.join( + result_path, + "mlperf_log_detail.txt")) + if ( + "result_validity" not in mlperf_log.get_keys() + or mlperf_log["result_validity"] != "VALID" + ): + valid['performance'] = False + else: + valid['performance'] = True + + if "stream" in scenario.lower(): + result = result_ / 1000000 # convert to milliseconds + else: + result = result_ + result = str(round(result, 3)) + + # if has_power: + # power_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric( + # config, scenario, result_path, True, result_) + # power_result = f"{round(power_metric,3)},{round(avg_power_efficiency,3)}" + # valid['power'] = power_valid + + elif mode == "accuracy" and os.path.exists(os.path.join(result_path, 'accuracy.txt')): + + acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric( + config, mlperf_model, result_path) + valid['accuracy'] = acc_valid + + if len(acc_results) == 1: + for acc in acc_results: + result = str(round(float(acc_results[acc]), 5)) + else: + result = '(' + result_list = [] + for i, acc in enumerate(acc_results): + result_list.append(str(round(float(acc_results[acc]), 5))) + result += ", ".join(result_list) + ")" + + return result, valid, power_result + + +def get_accuracy_metric(config, model, path): + + import re + is_valid = False + all_accuracy_valid = True + acc = None + result_acc = None + target = config.get_accuracy_target(model) + acc_upper_limit = config.get_accuracy_upper_limit(model) + patterns = [] + acc_targets = [] + acc_limits = [None] * (len(target) // 2) + up_patterns = [None] * (len(target) // 2) + acc_types = [] + + if acc_upper_limit is not None: + acc_limit_check = True + + for ii in range(0, len(target), 2): + acc_type1, tmp = target[ii:ii + 2] + for i in range(0, len(acc_upper_limit), 2): + acc_type, acc_target = acc_upper_limit[i:i + 2] + if acc_type != acc_type1: + continue + acc_limits[ii // 2] = acc_target + up_patterns[ii // 2] = checker.ACC_PATTERN[acc_type] + + for i in range(0, len(target), 2): + acc_type, acc_target = target[i:i + 2] + acc_types.append(acc_type) + patterns.append(checker.ACC_PATTERN[acc_type]) + acc_targets.append(acc_target) + + acc_seen = [False for _ in acc_targets] + acc_results = {} + with open(os.path.join(path, "accuracy.txt"), "r", encoding="utf-8") as f: + for line in f: + for i, (pattern, acc_target, acc_type) in enumerate( + zip(patterns, acc_targets, acc_types)): + m = re.match(pattern, line) + if m: + acc = m.group(1) + + acc_results[acc_type] = acc + + if acc is not None and float(acc) >= acc_target: + all_accuracy_valid &= True + acc_seen[i] = True + elif acc is not None: + all_accuracy_valid = False + # log.warning("%s accuracy not met: expected=%f, found=%s", path, acc_target, acc) + if i == 0 and acc: + result_acc = acc + acc = None + if acc_upper_limit is not None: + for i, (pattern, acc_limit) in enumerate( + zip(up_patterns, acc_limits)): + if not pattern: + continue + m = re.match(pattern, line) + if m: + acc = m.group(1) + if acc is not None and acc_upper_limit is not None and float( + acc) > acc_limit: + acc_limit_check = False + # log.warning("%s accuracy not met: upper limit=%f, found=%s", path, acc_limit, acc) + acc = None + if all(acc_seen): + break + is_valid = all_accuracy_valid & all(acc_seen) + if acc_upper_limit is not None: + is_valid &= acc_limit_check + + return is_valid, acc_results, acc_targets, acc_limits + + +def get_result_string(version, model, scenario, result_path, has_power, sub_res, + division="open", system_json=None, model_precision="fp32", automotive_src_version=None): + + config = checker.Config( + version, + None, + ignore_uncommited=False, + skip_power_check=False, + ) + mlperf_model = config.get_mlperf_model(model) + performance_path = os.path.join(result_path, "performance", "run_1") + accuracy_path = os.path.join(result_path, "accuracy") + scenario = checker.SCENARIO_MAPPING[scenario.lower()] + + fname = os.path.join(performance_path, "mlperf_log_detail.txt") + mlperf_log = MLPerfLog(fname) + effective_scenario = mlperf_log["effective_scenario"] + inferred = False + result = {} + + version_tuple = None + if automotive_src_version: + version_tuple = tuple(map(int, automotive_src_version.split('.'))) + + if version_tuple and version_tuple >= (4, 1, 22): + performance_result = checker.get_performance_metric( + config, mlperf_model, performance_path, scenario) + else: + performance_result = checker.get_performance_metric( + config, mlperf_model, performance_path, scenario, None, None) + if "stream" in scenario.lower(): + performance_result_ = performance_result / 1000000 # convert to milliseconds + else: + performance_result_ = performance_result + result['performance'] = round(performance_result_, 3) + + if scenario != effective_scenario: + inferred, inferred_result = checker.get_inferred_result( + scenario, effective_scenario, performance_result, mlperf_log, config, False) + + # if has_power: + # is_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric( + # config, scenario, performance_path, True, performance_result) + # if "stream" in scenario.lower(): + # power_metric_unit = "milliJoules" + # else: + # power_metric_unit = "Watts" + # power_result_string = f"`Power consumed`: `{round(power_metric, 3)} {power_metric_unit}`, `Power efficiency`: `{round(avg_power_efficiency * 1000, 3)} samples per Joule`" + + # power_result = round(power_metric, 3) + # power_efficiency_result = round(avg_power_efficiency, 3) + # result['power'] = power_result + # result['power_efficiency'] = power_efficiency_result + + # compliance_list = ["TEST01", "TEST04", "TEST06"] + # if division == "closed": + # for test in compliance_list: + # test_path = os.path.join(result_path, test) + # if os.path.exists( + # test_path): # We dont consider missing test folders now - submission checker will do that + # # test_pass = checker.check_compliance_dir(test_path, mlperf_model, scenario, config, "closed", system_json, sub_res) + # test_pass = checker.check_compliance_perf_dir( + # test_path) if test != "TEST06" else True + # if test_pass and test in ["TEST01", "TEST06"]: + # # test_pass = checker.check_compliance_acc_dir(test_path, mlperf_model, config) + # pass # accuracy truncation script is done after submission generation. We assume here that it'll pass + # if test_pass: + # result[test] = "passed" + # else: + # result[test] = "failed" + + acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric( + config, mlperf_model, accuracy_path) + + result_field = checker.RESULT_FIELD[effective_scenario] + + performance_result_string = f"`{result_field}`: `{performance_result}`\n" + if inferred: + inferred_result_field = checker.RESULT_FIELD[scenario] + performance_result_string += f"Inferred result: `{inferred_result_field}`: `{inferred_result}` \n" + + accuracy_result_string = '' + accuracy_results = [] + for i, acc in enumerate(acc_results): + accuracy_results.append(str(round(float(acc_results[acc]), 5))) + accuracy_result_string += f"`{acc}`: `{round(float(acc_results[acc]), 5)}`" + if not acc_limits or not acc_limits[i]: + accuracy_result_string += f", Required accuracy for closed division `>= {round(acc_targets[i], 5)}`" + else: + accuracy_result_string += f", Required accuracy for closed division `>= {round(acc_targets[i], 5)}` and `<= {round(acc_limits[i], 5)}`" + accuracy_result_string += "\n" + + if len(accuracy_results) == 1: + accuracy_result = accuracy_results[0] + else: + accuracy_result = "(" + ", ".join(accuracy_results) + ")" + result['accuracy'] = accuracy_result + + result_string = f"\n\n## Results\n" + result_string += f"\nPlatform: {sub_res}\n" + result_string += f"\nModel Precision: {model_precision}\n" + result_string += "\n### Accuracy Results \n" + accuracy_result_string + result_string += "\n### Performance Results \n" + performance_result_string + # if has_power: + # result_string += "\n### Power Results \n" + power_result_string + + return result_string, result + + +def get_result_table(results): + + headers = [ + "Model", + "Scenario", + "Accuracy", + "Throughput", + "Latency (in ms)", + "Power Efficiency (in samples/J)", + "TEST01", + "TEST04"] + table = [] + for model in results: + for scenario in results[model]: + row = [] + row.append(model) + row.append(scenario) + if results[model][scenario].get('accuracy'): + val = str(results[model][scenario]['accuracy']) + if not results[model][scenario].get('accuracy_valid', True): + val = "X " + val + row.append(val) + else: + row.append("-") + + if results[model][scenario].get('performance'): + + if "stream" in scenario.lower(): + if float(results[model][scenario]['performance']) == 0: + row.append("-") + elif scenario.lower() == "singlestream": + val_qps = str( + round( + 1000 / + float( + results[model][scenario]['performance']), + 3)) + if not results[model][scenario].get( + 'performance_valid', True): # we explicitly mark invalid results + val_qps = "X " + val_qps + row.append(val_qps) + elif scenario.lower() == "multistream": + val_qps = str( + round( + 8000 / + float( + results[model][scenario]['performance']), + 3)) + if not results[model][scenario].get( + 'performance_valid', True): + val_qps = "X " + val_qps + row.append(val_qps) + val = str(results[model][scenario]['performance']) + if not results[model][scenario].get( + 'performance_valid', True): + val = "X " + val + row.append(val) + else: + val = str(results[model][scenario]['performance']) + if not results[model][scenario].get( + 'performance_valid', True): + val = "X " + val + row.append(val) + row.append("-") + + # val1 = results[model][scenario].get('TEST01') + # val2 = results[model][scenario].get('TEST05') + # val3 = results[model][scenario].get('TEST04') + + # if results[model][scenario].get('power','') != '': + # row.append(results[model][scenario]['power']) + # if results[model][scenario].get('power_efficiency', '') != '': + # val = str(results[model][scenario]['power_efficiency']) + # if not results[model][scenario].get('power_valid', True): + # val = "X " + val + # row.append(val) + # elif val1 or val3: # Don't output unless there are any further column data + # row.append(None) + + # if val1: + # row.append(val1) + # if val3: + # row.append(val3) + + # else: + # if val3: + # row.append("missing") + # row.append(val3) + + table.append(row) + + return table, headers diff --git a/script/get-mlperf-inference-loadgen/meta.yaml b/script/get-mlperf-inference-loadgen/meta.yaml index ad59163fd..ed75db192 100644 --- a/script/get-mlperf-inference-loadgen/meta.yaml +++ b/script/get-mlperf-inference-loadgen/meta.yaml @@ -24,10 +24,21 @@ deps: - MLC_GIT_CHECKOUT names: - inference-src-loadgen - skip_if_env: + skip_if_any_env: MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: - 'YES' + MLC_INFERENCE_AUTOMOTIVE_REPO: + - 'YES' tags: get,mlcommons,inference,src +- force_env_keys: + - MLC_GIT_URL + - MLC_GIT_CHECKOUT + names: + - automotive-src + enable_if_env: + MLC_INFERENCE_AUTOMOTIVE_REPO: + - 'YES' + tags: get,mlcommons,automotive,src - enable_if_env: MLC_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: - 'YES' @@ -49,7 +60,7 @@ deps: - enable_if_env: MLC_HOST_OS_TYPE: - windows - skip_if_env: + skip_if_any_env: MLC_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: - 'yes' names: @@ -155,7 +166,9 @@ variations: '+ CXXFLAGS': - '-Werror' - '-Wno-unused-parameter' - + automotive: + env: + MLC_INFERENCE_AUTOMOTIVE_REPO: 'YES' versions: custom: add_deps: diff --git a/script/get-preprocessed-dataset-cognata/COPYRIGHT.md b/script/get-preprocessed-dataset-cognata/COPYRIGHT.md new file mode 100644 index 000000000..2d6a2775e --- /dev/null +++ b/script/get-preprocessed-dataset-cognata/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2023-2025 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-preprocessed-dataset-cognata/README.md b/script/get-preprocessed-dataset-cognata/README.md new file mode 100644 index 000000000..fee3d0ae4 --- /dev/null +++ b/script/get-preprocessed-dataset-cognata/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19) for the documentation of this CM script. diff --git a/script/get-preprocessed-dataset-cognata/customize.py b/script/get-preprocessed-dataset-cognata/customize.py new file mode 100644 index 000000000..1e4f0beba --- /dev/null +++ b/script/get-preprocessed-dataset-cognata/customize.py @@ -0,0 +1,24 @@ +from mlc import utils +import os +import shutil +from utils import is_true + + +def preprocess(i): + + env = i['env'] + + if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt": + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + if is_true(env.get('MLC_TMP_REQUIRE_DOWNLOAD', '')): + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] = os.path.join( + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'], + env['MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME']) + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-cognata/meta.yaml b/script/get-preprocessed-dataset-cognata/meta.yaml new file mode 100644 index 000000000..c8bac0417 --- /dev/null +++ b/script/get-preprocessed-dataset-cognata/meta.yaml @@ -0,0 +1,114 @@ +alias: get-preprocessed-dataset-cognata +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + MLC_DATASET: cognata +new_env_keys: +- MLC_PREPROCESSED_DATASET_* +tags: +- get +- dataset +- cognata +- preprocessed +uid: 29b3a984ff444de9 +print_env_at_the_end: + MLC_PREPROCESSED_DATASET_COGNATA_PATH: Preprocessed Cognata dataset path +variations: + validation: + default: true + group: dataset-type + env: + MLC_DATASET_COGNATA_TYPE: validation + calibration: + group: dataset-type + env: + MLC_DATASET_COGNATA_TYPE: calibration + 2d_obj_det: + default: true + group: task + env: + MLC_DATASET_COGNATA_TASK: 2d_object_detection + segmentation: + group: task + env: + MLC_DATASET_COGNATA_TASK: segmentation + validation,2d_obj_det: + env: + MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAMEE: val_2d + MLC_DATASET_COGNATA_TAR_FILENAME: val_2d.tar.gz + MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_2d/<<>> + calibration,2d_obj_det: + env: + MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME: calib_2d + MLC_DATASET_COGNATA_TAR_FILENAME: calib_2d.tar.gz + MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_2d/<<>> + validation,segmentation: + env: + MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME: val_seg + MLC_DATASET_COGNATA_TAR_FILENAME: val_seg.tar.gz + MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_seg/<<>> + calibration,segmentation: + env: + MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME: calib_seg + MLC_DATASET_COGNATA_TAR_FILENAME: calib_seg.tar.gz + MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_seg/<<>> + prebuilt: + default: true + group: dataset-src + env: + MLC_NUSCENES_DATASET_TYPE: prebuilt + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_COGNATA_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_COGNATA_PATH + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.mlc-cognata + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 1u5FDoeXHVtDrd4zClE47Gmyr7iLFidz1 + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + extra_cache_tags: nuscenes,dataset + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run +tests: + run_inputs: + - variations_list: + - validation,prebuilt,2d_obj_det,rclone,mlc,dry-run + - calibration,prebuilt,2d_obj_det,rclone,mlc,dry-run + - validation,prebuilt,segmentation,rclone,mlc,dry-run + - calibration,prebuilt,segmentation,rclone,mlc,dry-run \ No newline at end of file diff --git a/script/get-preprocessed-dataset-cognata/run.sh b/script/get-preprocessed-dataset-cognata/run.sh new file mode 100644 index 000000000..0c141b8e6 --- /dev/null +++ b/script/get-preprocessed-dataset-cognata/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" = "yes" ]]; then + cd "${MLC_PREPROCESSED_DATASET_COGNATA_PATH}" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd - || exit +fi \ No newline at end of file diff --git a/script/get-preprocessed-dataset-nuscenes/COPYRIGHT.md b/script/get-preprocessed-dataset-nuscenes/COPYRIGHT.md new file mode 100644 index 000000000..2d6a2775e --- /dev/null +++ b/script/get-preprocessed-dataset-nuscenes/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2023-2025 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-preprocessed-dataset-nuscenes/README.md b/script/get-preprocessed-dataset-nuscenes/README.md new file mode 100644 index 000000000..fee3d0ae4 --- /dev/null +++ b/script/get-preprocessed-dataset-nuscenes/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19) for the documentation of this CM script. diff --git a/script/get-preprocessed-dataset-nuscenes/customize.py b/script/get-preprocessed-dataset-nuscenes/customize.py new file mode 100644 index 000000000..5236b5c78 --- /dev/null +++ b/script/get-preprocessed-dataset-nuscenes/customize.py @@ -0,0 +1,36 @@ +from mlc import utils +import os +import shutil +from utils import is_true + + +def preprocess(i): + + env = i['env'] + + if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt" and env.get( + 'MLC_PREPROCESSED_DATASET_NUSCENES_PATH', '') == '': + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + if is_true(env.get('MLC_TMP_REQUIRE_DOWNLOAD', '')): + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'] = os.path.join( + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'], + env['MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME']) + if env.get( + 'MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH', '') != '': + shutil.copy( + os.path.join( + env['MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH'], + env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME']), + os.path.join( + os.path.dirname( + env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/")), + env['MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME'])) + + return {'return': 0} diff --git a/script/get-preprocessed-dataset-nuscenes/meta.yaml b/script/get-preprocessed-dataset-nuscenes/meta.yaml new file mode 100644 index 000000000..6436574a6 --- /dev/null +++ b/script/get-preprocessed-dataset-nuscenes/meta.yaml @@ -0,0 +1,132 @@ +alias: get-preprocessed-dataset-nuscenes +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + MLC_DATASET: nuscenes +new_env_keys: +- MLC_PREPROCESSED_DATASET_* +tags: +- get +- dataset +- nuscenes +- preprocessed +uid: 0e403a2861984a4e +print_env_at_the_end: + MLC_PREPROCESSED_DATASET_NUSCENES_PATH: Preprocessed Nuscenes dataset path + MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH: Path containing minimum files for accuracy checker +variations: + validation: + default: true + group: dataset-type + env: + MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME: val_3d + MLC_DATASET_NUSCENES_TAR_FILENAME: val_3d.tar.gz + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> + calibration: + group: dataset-type + env: + MLC_DATASET_NUSCENES_EXTRACTED_FOLDER_NAME: calib_3d + MLC_DATASET_NUSCENES_TAR_FILENAME: calib_3d.tar.gz + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/preprocessed/<<>> + prebuilt: + default: true + group: dataset-src + env: + MLC_NUSCENES_DATASET_TYPE: prebuilt + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_PATH + MLC_DOWNLOAD_EXTRA_OPTIONS: ' --include ' + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_config-name.mlc-nuscenes + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_RCLONE_DRIVE_FOLDER_ID: 17CpM5eU8tjrxh_LpH_BTNTeT37PhzcnC + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + extra_cache_tags: nuscenes,dataset + force_cache: true + names: + - dae + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + env: + MLC_DOWNLOAD_SRC: mlcommons + mlc,validation: + env: + MLC_DATASET_NUSCENES_SCENE_PICKLE_FILENAME: scene_lengths.pkl + MLC_DATASET_NUSCENES_ACC_CHECKER_DEP_FILES_TAR_NAME: nuscenes_min.tar.gz + MLC_DATASET_NUSCENES_ACC_REQ_FILE_EXTRACTED_FOLDER_NAME: nuscenes + prehook_deps: + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + extra_cache_tags: nuscenes,dataset,scene_lengths + force_cache: true + names: + - dae_sl + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_SCENE_LENGTHS_PATH + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/scene_lengths.pkl + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - 'yes' + extra_cache_tags: nuscenes,dataset,accuracy_checker + force_cache: true + names: + - dae_ac + tags: download-and-extract + force_env_keys: + - MLC_OUTDIRNAME + env: + MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH + MLC_EXTRACT_FINAL_ENV_NAME: MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH + MLC_DOWNLOAD_URL: mlc-nuscenes:nuscenes_dataset/nuscenes_min.tar.gz + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + rclone: + group: download-tool + add_deps_recursive: + dae: + tags: _rclone + dae_sl: + tags: _rclone + dae_ac: + tags: _rclone + default: true + dry-run: + group: run-mode + env: + MLC_DOWNLOAD_MODE: dry + dry-run,rclone: + env: + MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run +tests: + run_inputs: + - variations_list: + - validation,prebuilt,rclone,mlc,dry-run + - calibration,prebuilt,rclone,mlc,dry-run \ No newline at end of file diff --git a/script/get-preprocessed-dataset-nuscenes/run.sh b/script/get-preprocessed-dataset-nuscenes/run.sh new file mode 100644 index 000000000..16337c44d --- /dev/null +++ b/script/get-preprocessed-dataset-nuscenes/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if [[ "$MLC_DOWNLOAD_MODE" != "dry" && "$MLC_TMP_REQUIRE_DOWNLOAD" = "yes" ]]; then + cd "${MLC_PREPROCESSED_DATASET_NUSCENES_PATH}" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd "${MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH}" || exit + for f in *.tar.gz; do + tar -xzvf "$f" || { echo "Failed to extract $f"; exit 1; } + done + cd - || exit +fi \ No newline at end of file diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index 4ef3fbd43..afb359ce7 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -1,271 +1,283 @@ -from mlc import utils -import os - - -def preprocess(i): - - os_info = i['os_info'] - - xsep = ';' if os_info['platform'] == 'windows' else ':' - - env = i['env'] - logger = i['automation'].logger - - results_dir = env.get("MLC_MLPERF_ACCURACY_RESULTS_DIR", "") - - if results_dir == "": - logger.error("Please set MLC_MLPERF_ACCURACY_RESULTS_DIR") - return {'return': -1} - - # In fact, we expect only 1 command line here - run_cmds = [] - - if env.get('MLC_MAX_EXAMPLES', '') != '' and env.get( - 'MLC_MLPERF_RUN_STYLE', '') != 'valid': - max_examples_string = " --max_examples " + env['MLC_MAX_EXAMPLES'] - else: - max_examples_string = "" - - results_dir_split = results_dir.split(xsep) - dataset = env['MLC_DATASET'] - regenerate_accuracy_file = env.get( - 'MLC_MLPERF_REGENERATE_ACCURACY_FILE', env.get( - 'MLC_RERUN', False)) - - for result_dir in results_dir_split: - - out_file = os.path.join(result_dir, 'accuracy.txt') - - if os.path.exists(out_file) and ( - os.stat(out_file).st_size != 0) and not regenerate_accuracy_file: - continue - - if dataset == "openimages": - if env.get('MLC_DATASET_PATH_ROOT', '') != '': - dataset_dir = env['MLC_DATASET_PATH_ROOT'] - if 'DATASET_ANNOTATIONS_FILE_PATH' in env: - del (env['DATASET_ANNOTATIONS_FILE_PATH']) - else: - env['DATASET_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] - dataset_dir = os.getcwd() # not used, just to keep the script happy - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " " + "'" + os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", - "accuracy-openimages.py") + "'" + " --mlperf-accuracy-file " + "'" + os.path.join(result_dir, - "mlperf_log_accuracy.json") + "'" + " --openimages-dir " + "'" + dataset_dir + "'" + " --verbose > " + "'" + \ - out_file + "'" - - elif dataset == "imagenet": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", - "accuracy-imagenet.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, - "mlperf_log_accuracy.json") + "' --imagenet-val-file '" + os.path.join(env['MLC_DATASET_AUX_PATH'], - "val.txt") + "' --dtype " + env.get('MLC_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" - - elif dataset == "squad": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_BERT_PATH'], - "accuracy-squad.py") + "' --val_data '" + env['MLC_DATASET_SQUAD_VAL_PATH'] + \ - "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --vocab_file '" + env['MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + \ - "' --out_file '" + os.path.join(result_dir, 'predictions.json') + \ - "' --features_cache_file '" + os.path.join(env['MLC_MLPERF_INFERENCE_BERT_PATH'], 'eval_features.pickle') + \ - "' --output_dtype " + env['MLC_ACCURACY_DTYPE'] + env.get( - 'MLC_OUTPUT_TRANSPOSED', '') + max_examples_string + " > '" + out_file + "'" - - elif dataset == "cnndm": - if env.get('MLC_MLPERF_IMPLEMENTATION', '') == 'intel': - accuracy_checker_file = env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_ACCURACY_FILE_WITH_PATH'] - env['+PYTHONPATH'] = [os.path.dirname(env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH'])] + [ - os.path.dirname(env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH'])] + env['+PYTHONPATH'] - suffix_string = " --model-name-or-path '" + \ - env['GPTJ_CHECKPOINT_PATH'] + "'" - else: - accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j", - "evaluation.py") - suffix_string = " --dtype " + \ - env.get('MLC_ACCURACY_DTYPE', "float32") - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --dataset-file '" + \ - env['MLC_DATASET_EVAL_PATH'] + "'" + \ - suffix_string + " > '" + out_file + "'" - - elif dataset == "openorca": - accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b", - "evaluate-accuracy.py") - if env.get('MLC_VLLM_SERVER_MODEL_NAME', '') == '': - checkpoint_path = env['MLC_ML_MODEL_LLAMA2_FILE_WITH_PATH'] - else: - checkpoint_path = env['MLC_VLLM_SERVER_MODEL_NAME'] - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + checkpoint_path + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --dataset-file '" + env['MLC_DATASET_PREPROCESSED_PATH'] + "'" + " --dtype " + env.get( - 'MLC_ACCURACY_DTYPE', "int32") + " > '" + out_file + "'" - - elif dataset == "openorca-gsm8k-mbxp-combined": - accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b", - "evaluate-accuracy.py") - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + env['MIXTRAL_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --dataset-file '" + env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] + "'" + \ - " --dtype " + env.get('MLC_ACCURACY_DTYPE', - "float32") + " > '" + out_file + "'" - - elif dataset == "coco2014": - env['+PYTHONPATH'] = [ - os.path.join( - env['MLC_MLPERF_INFERENCE_SOURCE'], - "text_to_image", - "tools"), - os.path.join( - env['MLC_MLPERF_INFERENCE_SOURCE'], - "text_to_image", - "tools", - "fid")] - extra_options = "" - - if env.get('MLC_SDXL_STATISTICS_FILE_PATH', '') != '': - extra_options += ( - f""" --statistics-path '{ - env['MLC_SDXL_STATISTICS_FILE_PATH']}'""" - ) - - if env.get('MLC_SDXL_COMPLIANCE_IMAGES_PATH', '') != '': - extra_options += ( - f""" --compliance-images-path '{ - env['MLC_SDXL_COMPLIANCE_IMAGES_PATH']}' """ - ) - else: - extra_options += f""" --compliance-images-path '{ - os.path.join( - result_dir, "images")}' """ - - if env.get('MLC_COCO2014_SAMPLE_ID_PATH', '') != '': - extra_options += ( - f" --ids-path '{env['MLC_COCO2014_SAMPLE_ID_PATH']}' " - ) - - if env.get('MLC_SDXL_ACCURACY_RUN_DEVICE', '') != '': - extra_options += ( - f" --device '{env['MLC_SDXL_ACCURACY_RUN_DEVICE']}' " - ) - - # env['DATASET_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", - "accuracy_coco.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --caption-path '" + os.path.join( - env['MLC_MLPERF_INFERENCE_SOURCE'], - "text_to_image", - "coco2014", - "captions", - "captions_source.tsv") + "'" + extra_options + " > '" + out_file + "'" - - elif dataset == "kits19": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_3DUNET_PATH'], - "accuracy_kits.py") + \ - "' --preprocessed_data_dir '" + env['MLC_DATASET_PREPROCESSED_PATH'] +\ - "' --postprocessed_data_dir '" + result_dir +\ - "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --output_dtype " + \ - env['MLC_ACCURACY_DTYPE'] + " > '" + out_file + "'" - - elif dataset == "librispeech": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_RNNT_PATH'], - "accuracy_eval.py") + \ - "' --dataset_dir '" + os.path.join(env['MLC_DATASET_PREPROCESSED_PATH'], "..") +\ - "' --manifest '" + env['MLC_DATASET_PREPROCESSED_JSON'] +\ - "' --log_dir '" + result_dir + \ - "' --output_dtype " + \ - env['MLC_ACCURACY_DTYPE'] + " > '" + out_file + "'" - - elif dataset == "terabyte": - extra_options = "" - if env.get('MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH', '') != '': - extra_options += ( - f""" --aggregation-trace-file '{ - env['MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH']}' """ - ) - if env.get('MLC_DLRM_V2_DAY23_FILE_PATH', '') != '': - extra_options += ( - f""" --day-23-file '{ - env['MLC_DLRM_V2_DAY23_FILE_PATH']}' """ - ) - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch", "tools", - "accuracy-dlrm.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, - "mlperf_log_accuracy.json") + "'" + extra_options + \ - " --dtype " + env.get('MLC_ACCURACY_DTYPE', - "float32") + " > '" + out_file + "'" - - elif dataset == "igbh": - if env.get('MLC_DATASET_IGBH_SIZE', '') == '': - if env.get('MLC_MLPERF_SUBMISSION_GENERATION_STYLE', - '') == "full": - env['MLC_DATASET_IGBH_SIZE'] = "full" - else: - env['MLC_DATASET_IGBH_SIZE'] = "tiny" - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "graph", "R-GAT", "tools", "accuracy_igbh.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_DATASET_IGBH_PATH'] + "' --dataset-size '" + env['MLC_DATASET_IGBH_SIZE'] + "' --output-file '" + out_file + "'" - - elif dataset == "dataset_llama3": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama3.1-405b", "evaluate-accuracy.py") + "' --checkpoint-path '" + env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dtype '" + env['MLC_ACCURACY_DTYPE'] + "' --dataset-file '" + env['MLC_DATASET_LLAMA3_PATH'] + "' > '" + out_file + "'" - - elif dataset == "waymo": - CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "automotive", "3d-object-detection", "accuracy_waymo.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --waymo-dir '" + env['MLC_DATASET_WAYMO_PATH'] + "' > '" + out_file + "'" - - else: - return {'return': 1, 'error': 'Unsupported dataset'} - - run_cmds.append(CMD) - - if os_info['platform'] == 'windows': - env['MLC_RUN_CMDS'] = ( - '\n'.join(run_cmds)).replace( - "'", - '"').replace( - '>', - '^>') - else: - env['MLC_RUN_CMDS'] = "??".join(run_cmds) - - return {'return': 0} - - -def postprocess(i): - - os_info = i['os_info'] - env = i['env'] - state = i['state'] - logger = i['automation'].logger - xsep = ';' if os_info['platform'] == 'windows' else ':' - - results_dir = env.get("MLC_MLPERF_ACCURACY_RESULTS_DIR", "") - - results_dir_split = results_dir.split(xsep) - - for result_dir in results_dir_split: - accuracy_file = os.path.join(result_dir, "accuracy.txt") - - if os.path.exists(accuracy_file): - logger.info('') - logger.info('Accuracy file: {}'.format(accuracy_file)) - logger.info('') - - x = '' - with open(accuracy_file, "r") as fp: - x = fp.read() - - if x != '': - logger.info(f"{x}") - - # Trying to extract accuracy dict - for y in x.split('\n'): - if y.startswith('{') and y.endswith('}'): - - import json - - try: - z = json.loads(y) - state['app_mlperf_inference_accuracy'] = z - - break - except ValueError as e: - pass - - logger.info('') - return {'return': 0} +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + xsep = ';' if os_info['platform'] == 'windows' else ':' + + env = i['env'] + logger = i['automation'].logger + + results_dir = env.get("MLC_MLPERF_ACCURACY_RESULTS_DIR", "") + + if results_dir == "": + logger.error("Please set MLC_MLPERF_ACCURACY_RESULTS_DIR") + return {'return': -1} + + # In fact, we expect only 1 command line here + run_cmds = [] + + if env.get('MLC_MAX_EXAMPLES', '') != '' and env.get( + 'MLC_MLPERF_RUN_STYLE', '') != 'valid': + max_examples_string = " --max_examples " + env['MLC_MAX_EXAMPLES'] + else: + max_examples_string = "" + + results_dir_split = results_dir.split(xsep) + dataset = env['MLC_DATASET'] + regenerate_accuracy_file = env.get( + 'MLC_MLPERF_REGENERATE_ACCURACY_FILE', env.get( + 'MLC_RERUN', False)) + + for result_dir in results_dir_split: + + out_file = os.path.join(result_dir, 'accuracy.txt') + + if os.path.exists(out_file) and ( + os.stat(out_file).st_size != 0) and not regenerate_accuracy_file: + continue + + if dataset == "openimages": + if env.get('MLC_DATASET_PATH_ROOT', '') != '': + dataset_dir = env['MLC_DATASET_PATH_ROOT'] + if 'DATASET_ANNOTATIONS_FILE_PATH' in env: + del (env['DATASET_ANNOTATIONS_FILE_PATH']) + else: + env['DATASET_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] + dataset_dir = os.getcwd() # not used, just to keep the script happy + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " " + "'" + os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + "accuracy-openimages.py") + "'" + " --mlperf-accuracy-file " + "'" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "'" + " --openimages-dir " + "'" + dataset_dir + "'" + " --verbose > " + "'" + \ + out_file + "'" + + elif dataset == "imagenet": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + "accuracy-imagenet.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "' --imagenet-val-file '" + os.path.join(env['MLC_DATASET_AUX_PATH'], + "val.txt") + "' --dtype " + env.get('MLC_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" + + elif dataset == "squad": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_BERT_PATH'], + "accuracy-squad.py") + "' --val_data '" + env['MLC_DATASET_SQUAD_VAL_PATH'] + \ + "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --vocab_file '" + env['MLC_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + \ + "' --out_file '" + os.path.join(result_dir, 'predictions.json') + \ + "' --features_cache_file '" + os.path.join(env['MLC_MLPERF_INFERENCE_BERT_PATH'], 'eval_features.pickle') + \ + "' --output_dtype " + env['MLC_ACCURACY_DTYPE'] + env.get( + 'MLC_OUTPUT_TRANSPOSED', '') + max_examples_string + " > '" + out_file + "'" + + elif dataset == "cnndm": + if env.get('MLC_MLPERF_IMPLEMENTATION', '') == 'intel': + accuracy_checker_file = env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_ACCURACY_FILE_WITH_PATH'] + env['+PYTHONPATH'] = [os.path.dirname(env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH'])] + [ + os.path.dirname(env['MLC_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH'])] + env['+PYTHONPATH'] + suffix_string = " --model-name-or-path '" + \ + env['GPTJ_CHECKPOINT_PATH'] + "'" + else: + accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j", + "evaluation.py") + suffix_string = " --dtype " + \ + env.get('MLC_ACCURACY_DTYPE', "float32") + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + \ + env['MLC_DATASET_EVAL_PATH'] + "'" + \ + suffix_string + " > '" + out_file + "'" + + elif dataset == "openorca": + accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b", + "evaluate-accuracy.py") + if env.get('MLC_VLLM_SERVER_MODEL_NAME', '') == '': + checkpoint_path = env['MLC_ML_MODEL_LLAMA2_FILE_WITH_PATH'] + else: + checkpoint_path = env['MLC_VLLM_SERVER_MODEL_NAME'] + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + checkpoint_path + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + env['MLC_DATASET_PREPROCESSED_PATH'] + "'" + " --dtype " + env.get( + 'MLC_ACCURACY_DTYPE', "int32") + " > '" + out_file + "'" + + elif dataset == "openorca-gsm8k-mbxp-combined": + accuracy_checker_file = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b", + "evaluate-accuracy.py") + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + env['MIXTRAL_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] + "'" + \ + " --dtype " + env.get('MLC_ACCURACY_DTYPE', + "float32") + " > '" + out_file + "'" + + elif dataset == "coco2014": + env['+PYTHONPATH'] = [ + os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools"), + os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools", + "fid")] + extra_options = "" + + if env.get('MLC_SDXL_STATISTICS_FILE_PATH', '') != '': + extra_options += ( + f""" --statistics-path '{ + env['MLC_SDXL_STATISTICS_FILE_PATH']}'""" + ) + + if env.get('MLC_SDXL_COMPLIANCE_IMAGES_PATH', '') != '': + extra_options += ( + f""" --compliance-images-path '{ + env['MLC_SDXL_COMPLIANCE_IMAGES_PATH']}' """ + ) + else: + extra_options += f""" --compliance-images-path '{ + os.path.join( + result_dir, "images")}' """ + + if env.get('MLC_COCO2014_SAMPLE_ID_PATH', '') != '': + extra_options += ( + f" --ids-path '{env['MLC_COCO2014_SAMPLE_ID_PATH']}' " + ) + + if env.get('MLC_SDXL_ACCURACY_RUN_DEVICE', '') != '': + extra_options += ( + f" --device '{env['MLC_SDXL_ACCURACY_RUN_DEVICE']}' " + ) + + # env['DATASET_ANNOTATIONS_FILE_PATH'] = env['MLC_DATASET_ANNOTATIONS_FILE_PATH'] + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", + "accuracy_coco.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --caption-path '" + os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "coco2014", + "captions", + "captions_source.tsv") + "'" + extra_options + " > '" + out_file + "'" + + elif dataset == "kits19": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_3DUNET_PATH'], + "accuracy_kits.py") + \ + "' --preprocessed_data_dir '" + env['MLC_DATASET_PREPROCESSED_PATH'] +\ + "' --postprocessed_data_dir '" + result_dir +\ + "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --output_dtype " + \ + env['MLC_ACCURACY_DTYPE'] + " > '" + out_file + "'" + + elif dataset == "librispeech": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_RNNT_PATH'], + "accuracy_eval.py") + \ + "' --dataset_dir '" + os.path.join(env['MLC_DATASET_PREPROCESSED_PATH'], "..") +\ + "' --manifest '" + env['MLC_DATASET_PREPROCESSED_JSON'] +\ + "' --log_dir '" + result_dir + \ + "' --output_dtype " + \ + env['MLC_ACCURACY_DTYPE'] + " > '" + out_file + "'" + + elif dataset == "terabyte": + extra_options = "" + if env.get('MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH', '') != '': + extra_options += ( + f""" --aggregation-trace-file '{ + env['MLC_DLRM_V2_AGGREGATION_TRACE_FILE_PATH']}' """ + ) + if env.get('MLC_DLRM_V2_DAY23_FILE_PATH', '') != '': + extra_options += ( + f""" --day-23-file '{ + env['MLC_DLRM_V2_DAY23_FILE_PATH']}' """ + ) + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch", "tools", + "accuracy-dlrm.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "'" + extra_options + \ + " --dtype " + env.get('MLC_ACCURACY_DTYPE', + "float32") + " > '" + out_file + "'" + + elif dataset == "igbh": + if env.get('MLC_DATASET_IGBH_SIZE', '') == '': + if env.get('MLC_MLPERF_SUBMISSION_GENERATION_STYLE', + '') == "full": + env['MLC_DATASET_IGBH_SIZE'] = "full" + else: + env['MLC_DATASET_IGBH_SIZE'] = "tiny" + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "graph", "R-GAT", "tools", "accuracy_igbh.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_DATASET_IGBH_PATH'] + "' --dataset-size '" + env['MLC_DATASET_IGBH_SIZE'] + "' --output-file '" + out_file + "'" + + elif dataset == "dataset_llama3": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "language", "llama3.1-405b", "evaluate-accuracy.py") + "' --checkpoint-path '" + env['MLC_ML_MODEL_LLAMA3_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dtype '" + env['MLC_ACCURACY_DTYPE'] + "' --dataset-file '" + env['MLC_DATASET_LLAMA3_PATH'] + "' > '" + out_file + "'" + + elif dataset == "waymo": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "automotive", "3d-object-detection", "accuracy_waymo.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --waymo-dir '" + env['MLC_DATASET_WAYMO_PATH'] + "' > '" + out_file + "'" + + elif dataset == "nuscenes": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'], "accuracy_nuscenes_cpu.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --nuscenes-dir '" + env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH'] + "' --config '" + os.path.join(env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'], "projects" + "configs" + "bevformer" + "bevformer_tiny.py") + "' > '" + out_file + "'" + + elif dataset == "cognata_ssd": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --dataset cognata --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" + + elif dataset == "cognata_deeplab": + CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' > '" + out_file + "'" + + else: + return {'return': 1, 'error': 'Unsupported dataset'} + + run_cmds.append(CMD) + + if os_info['platform'] == 'windows': + env['MLC_RUN_CMDS'] = ( + '\n'.join(run_cmds)).replace( + "'", + '"').replace( + '>', + '^>') + else: + env['MLC_RUN_CMDS'] = "??".join(run_cmds) + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + logger = i['automation'].logger + xsep = ';' if os_info['platform'] == 'windows' else ':' + + results_dir = env.get("MLC_MLPERF_ACCURACY_RESULTS_DIR", "") + + results_dir_split = results_dir.split(xsep) + + for result_dir in results_dir_split: + accuracy_file = os.path.join(result_dir, "accuracy.txt") + + if os.path.exists(accuracy_file): + logger.info('') + logger.info('Accuracy file: {}'.format(accuracy_file)) + logger.info('') + + x = '' + with open(accuracy_file, "r") as fp: + x = fp.read() + + if x != '': + logger.info(f"{x}") + + # Trying to extract accuracy dict + for y in x.split('\n'): + if y.startswith('{') and y.endswith('}'): + + import json + + try: + z = json.loads(y) + state['app_mlperf_inference_accuracy'] = z + + break + except ValueError as e: + pass + + logger.info('') + return {'return': 0} diff --git a/script/process-mlperf-accuracy/meta.yaml b/script/process-mlperf-accuracy/meta.yaml index cd14ef67a..458f740fa 100644 --- a/script/process-mlperf-accuracy/meta.yaml +++ b/script/process-mlperf-accuracy/meta.yaml @@ -273,3 +273,54 @@ variations: env: MLC_DATASET: waymo group: dataset + nuscenes: + deps: + - tags: get,preprocessed,dataset,nuscenes + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - preprocessed-dataset-mlcommons-nuscenes + - tags: get,ml-model,bevformer + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - ml-model-bevformer + env: + MLC_DATASET: nuscenes + group: dataset + cognata_ssd: + deps: + - tags: get,preprocessed,dataset,cognata,_mlc + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - preprocessed-dataset-mlcommons-cognata-ssd-resnet50 + - tags: get,ml-model,ssd,resnet50,_mlc,_rclone + skip_if_any_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - ml-model-ssd-resnet50 + env: + MLC_DATASET: cognata_ssd + group: dataset + cognata_deeplab: + deps: + - tags: get,preprocessed,dataset,cognata,_mlc,_segmentation + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - preprocessed-dataset-mlcommons-cognata-deeplabv3-plus + - tags: get,ml-model,deeplabv3-plus + skip_if_env: + MLC_RUN_STATE_DOCKER: + - "yes" + names: + - ml-model-deeplabv3-plus + env: + MLC_DATASET: cognata_deeplab + group: dataset diff --git a/script/run-mlperf-automotive-app/customize.py b/script/run-mlperf-automotive-app/customize.py index c3dda9c7a..10983fc24 100644 --- a/script/run-mlperf-automotive-app/customize.py +++ b/script/run-mlperf-automotive-app/customize.py @@ -116,7 +116,8 @@ def preprocess(i): test_list = [] - variation_implementation = "_" + \ + variation_benchmark_version = "_" + env["MLC_MLPERF_INFERENCE_VERSION"] + variation_implementation = ",_" + \ env.get("MLC_MLPERF_IMPLEMENTATION", "reference") variation_model = ",_" + env["MLC_MLPERF_MODEL"] variation_backend = ",_" + \ @@ -135,7 +136,7 @@ def preprocess(i): else: variation_quantization_string = "" - tags = "app,abtf-inference," + variation_implementation + variation_model + variation_backend + variation_device + \ + tags = "app,abtf-inference," + variation_benchmark_version + variation_implementation + variation_model + variation_backend + variation_device + \ variation_run_style + variation_reproducibility + \ variation_quantization_string + power_variation verbose = inp.get('v', False) @@ -151,37 +152,37 @@ def preprocess(i): add_deps_recursive[key] = adr_from_meta[key] if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '': - if not add_deps_recursive.get('mlperf-inference-implementation', {}): - add_deps_recursive['mlperf-inference-implementation'] = {} - if add_deps_recursive['mlperf-inference-implementation'].get( + if not add_deps_recursive.get('abtf-inference-implementation', {}): + add_deps_recursive['abtf-inference-implementation'] = {} + if add_deps_recursive['abtf-inference-implementation'].get( 'tags', '') == '': - add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + add_deps_recursive['abtf-inference-implementation']['tags'] = '' else: - add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' - add_deps_recursive['mlperf-inference-implementation']['tags'] += "_batch_size." + \ + add_deps_recursive['abtf-inference-implementation']['tags'] += ',' + add_deps_recursive['abtf-inference-implementation']['tags'] += "_batch_size." + \ env['MLC_MLPERF_LOADGEN_MAX_BATCHSIZE'] if env.get('MLC_MLPERF_INFERENCE_SUT_VARIATION', '') != '': - if not add_deps_recursive.get('mlperf-inference-implementation', {}): - add_deps_recursive['mlperf-inference-implementation'] = {} - if add_deps_recursive['mlperf-inference-implementation'].get( + if not add_deps_recursive.get('abtf-inference-implementation', {}): + add_deps_recursive['abtf-inference-implementation'] = {} + if add_deps_recursive['abtf-inference-implementation'].get( 'tags', '') == '': - add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + add_deps_recursive['abtf-inference-implementation']['tags'] = '' else: - add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' - add_deps_recursive['mlperf-inference-implementation']['tags'] += "_" + \ + add_deps_recursive['abtf-inference-implementation']['tags'] += ',' + add_deps_recursive['abtf-inference-implementation']['tags'] += "_" + \ env['MLC_MLPERF_INFERENCE_SUT_VARIATION'] if env.get('MLC_NETWORK_LOADGEN', '') != '': - if not add_deps_recursive.get('mlperf-inference-implementation', {}): - add_deps_recursive['mlperf-inference-implementation'] = {} + if not add_deps_recursive.get('abtf-inference-implementation', {}): + add_deps_recursive['abtf-inference-implementation'] = {} network_variation_tag = f"_network-{env['MLC_NETWORK_LOADGEN']}" - if add_deps_recursive['mlperf-inference-implementation'].get( + if add_deps_recursive['abtf-inference-implementation'].get( 'tags', '') == '': - add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + add_deps_recursive['abtf-inference-implementation']['tags'] = '' else: - add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' - add_deps_recursive['mlperf-inference-implementation']['tags'] += network_variation_tag + add_deps_recursive['abtf-inference-implementation']['tags'] += ',' + add_deps_recursive['abtf-inference-implementation']['tags'] += network_variation_tag if env.get('MLC_OUTPUT_FOLDER_NAME', '') == '': env['MLC_OUTPUT_FOLDER_NAME'] = env['MLC_MLPERF_RUN_STYLE'] + "_results" @@ -290,9 +291,9 @@ def preprocess(i): if state.get("mlc-mlperf-inference-results"): # print(state["mlc-mlperf-inference-results"]) for sut in state["mlc-mlperf-inference-results"]: # only one sut will be there - # Better to do this in a stand alone CM script with proper deps but + # Better to do this in a stand alone MLC script with proper deps but # currently we manage this by modifying the sys path of the python - # executing CM + # executing MLC import mlperf_utils # noqa logger.info(f"{sut}") diff --git a/script/run-mlperf-automotive-app/meta.yaml b/script/run-mlperf-automotive-app/meta.yaml index c70ac9e97..74b9e34c9 100644 --- a/script/run-mlperf-automotive-app/meta.yaml +++ b/script/run-mlperf-automotive-app/meta.yaml @@ -43,6 +43,7 @@ input_mapping: save_console_log: MLC_SAVE_CONSOLE_LOG execution_mode: MLC_MLPERF_RUN_STYLE find_performance: MLC_MLPERF_FIND_PERFORMANCE_MODE + framework: MLC_MLPERF_BACKEND gh_token: MLC_GH_TOKEN gpu_name: MLC_NVIDIA_GPU_NAME hw_name: MLC_HW_NAME @@ -105,8 +106,8 @@ deps: skip_if_env: MLC_MLPERF_USE_DOCKER: [ on ] - names: - - inference-src - tags: get,mlcommons,inference,src + - automotive-src + tags: get,mlcommons,automotive,src skip_if_env: MLC_MLPERF_USE_DOCKER: [ on ] - tags: get,sut,description @@ -121,7 +122,7 @@ deps: skip_if_env: OUTPUT_BASE_DIR: [ on ] - tags: install,pip-package,for-mlc-python,_package.tabulate -- tags: get,mlperf,inference,utils +- tags: get,mlperf,automotive,utils skip_if_env: MLC_MLPERF_USE_DOCKER: [ on ] @@ -214,6 +215,11 @@ variations: compiler: tags: gcc group: benchmark-version + + v0.5: + group: benchmark-version + env: + MLC_MLPERF_INFERENCE_VERSION: v0.5 performance-and-accuracy: default: true From 8839d870099bc03b777627c16ef49c10e93935d4 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 28 May 2025 09:30:49 +0100 Subject: [PATCH 38/43] Fix ipmitool version detect (#443) * Fix ipmitool version detect --- script/get-generic-sys-util/meta.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/script/get-generic-sys-util/meta.yaml b/script/get-generic-sys-util/meta.yaml index f058b3127..b8f248337 100644 --- a/script/get-generic-sys-util/meta.yaml +++ b/script/get-generic-sys-util/meta.yaml @@ -139,6 +139,21 @@ variations: dnf: '' yum: '' + ipmitool: + env: + MLC_SYS_UTIL_NAME: ipmitool + MLC_SYS_UTIL_VERSION_CMD: ipmitool -V + MLC_SYS_UTIL_VERSION_RE: 'ipmitool version ([\d.]+)' + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - MLC_IPMITOOL_VERSION + state: + ipmitool: # tbd: complete for other flavours of linux + apt: ipmitool + brew: '' + dnf: '' + yum: '' + libgl: env: MLC_SYS_UTIL_NAME: libgl # tbd: regular expression for version as well as whether its installed? From 5796f15a219dc71f9057e97e02bc23adfd6cdef9 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 28 May 2025 16:05:59 +0100 Subject: [PATCH 39/43] Fix detect-sudo (#444) --- script/detect-sudo/customize.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/script/detect-sudo/customize.py b/script/detect-sudo/customize.py index 2a56f6d33..3e1b65059 100644 --- a/script/detect-sudo/customize.py +++ b/script/detect-sudo/customize.py @@ -129,8 +129,7 @@ def get_input(): def prompt_sudo(logger): - if os.geteuid() != 0 and not is_user_in_sudo_group( - logger): # No sudo required for root user + if os.geteuid() != 0: # No sudo required for root user # Prompt for the password @@ -171,7 +170,8 @@ def prompt_sudo(logger): except subprocess.TimeoutExpired: logger.info("Timedout") reset_terminal() # Reset terminal to sane state - if not prompt_retry(): # If the user chooses not to retry or times out + if not prompt_retry( + logger): # If the user chooses not to retry or times out return -1 except subprocess.CalledProcessError as e: logger.error(f"Command failed: {e.output}") From dc07eb4305f54a41dd501c4a2e35dac0bef1f351 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 28 May 2025 20:44:37 +0530 Subject: [PATCH 40/43] Changes for automotive 0.5 (#442) * fix package name * download only if cognata dataset path is not present * change the model name and variations to official ones * enable torchvision for deeplabv3plus * fix torchvision dependency name * fix tag for cognata download * fix dependencies for accuracy checker --- .../customize.py | 6 +-- .../meta.yaml | 40 +++++++++++------- script/app-mlperf-automotive/meta.yaml | 42 ++++++++++--------- script/get-generic-sys-util/meta.yaml | 2 +- .../customize.py | 3 +- script/process-mlperf-accuracy/meta.yaml | 18 +++++++- 6 files changed, 69 insertions(+), 42 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 4ea2ce0f1..688a52dc9 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -251,7 +251,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, print(env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']) cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" print(cmd) - elif env['MLC_MODEL'] in ['ssd-resnet50']: + elif env['MLC_MODEL'] in ['ssd']: run_dir = env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'] env['RUN_DIR'] = run_dir @@ -265,7 +265,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" - elif env['MLC_MODEL'] in ['deeplab_v3+']: + elif env['MLC_MODEL'] in ['deeplabv3plus']: run_dir = env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'] env['RUN_DIR'] = run_dir @@ -275,7 +275,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, backend = "onnx" if env.get( 'MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" ########################################################################## diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index dd288d46e..cf95e283b 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -339,6 +339,10 @@ variations: add_deps_recursive: ml-model-bevformer: tags: _onnx + ml-model-ssd: + tags: _onnx + ml-model-deeplabv3-plus: + tags: _onnx onnxruntime,cpu: env: @@ -362,7 +366,7 @@ variations: tags: raw,_pytorch ml-model-bevformer: tags: _pytorch - ml-model-ssd-resnet50: + ml-model-ssd: tags: _pytorch ml-model-deeplabv3-plus: tags: _pytorch @@ -375,7 +379,7 @@ variations: pytorch: env: MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu/torch_stable.html - torchvision: + ml-engine-torchvision: env: MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu/torch_stable.html torchaudio: @@ -457,7 +461,7 @@ variations: pytorch: version_max: "2.5.1" version_max_usable: "2.5.1" - torchvision: + ml-engine-torchvision: version_max: "0.20.1" version_max_usable": "0.20.1" torchaudio: @@ -473,26 +477,26 @@ variations: - tags: get,generic-python-lib,_package.pyquaternion - tags: get,generic-python-lib,_package.tqdm - tags: get,generic-python-lib,_package.nuscenes-devkit - - tags: get,preprocessed,dataset,nuscenes + - tags: get,preprocessed,dataset,nuscenes,_mlc,_validation skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" names: - preprocessed-dataset-mlcommons-nuscenes - - tags: get,ml-model,bevformer + - tags: get,ml-model,bevformer,_mlc,_rclone skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" names: - ml-model-bevformer - ssd-resnet50: + ssd: group: models add_deps_recursive: pytorch: version_max: "2.3.1" version_max_usable: "2.3.1" - torchvision: + ml-engine-torchvision: version_max: "0.18.1" version_max_usable": "0.18.1" torchaudio: @@ -502,32 +506,35 @@ variations: - tags: get,generic-python-lib,_package.Cython - tags: get,generic-python-lib,_package.scikit-image - tags: get,generic-python-lib,_package.faster-coco-eval + - tags: get,generic-python-lib,_torchvision + names: + - ml-engine-torchvision - tags: get,generic-python-lib,_package.torchinfo - tags: get,generic-python-lib,_package.torchmetrics - tags: get,generic-sys-util,_libgl1-mesa-glx - tags: get,generic-python-lib,_package.onnx - tags: get,generic-python-lib,_package.onnxruntime - tags: get,generic-python-lib,_package.tqdm - - tags: get,preprocessed,dataset,cognata,_mlc + - tags: get,preprocessed,dataset,cognata,_mlc,_2d_object_det,_validation skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" names: - - preprocessed-dataset-mlcommons-cognata-ssd-resnet50 + - preprocessed-dataset-mlcommons-cognata-ssd - tags: get,ml-model,ssd,resnet50,_mlc,_rclone skip_if_any_env: MLC_RUN_STATE_DOCKER: - "yes" names: - - ml-model-ssd-resnet50 + - ml-model-ssd - deeplab-v3+: + deeplabv3plus: group: models add_deps_recursive: pytorch: version_max: "2.3.1" version_max_usable: "2.3.1" - torchvision: + ml-engine-torchvision: version_max: "0.18.1" version_max_usable": "0.18.1" torchaudio: @@ -537,6 +544,9 @@ variations: - tags: get,generic-python-lib,_package.Cython - tags: get,generic-python-lib,_package.scikit-image - tags: get,generic-python-lib,_package.scikit-learn + - tags: get,generic-python-lib,_torchvision + names: + - ml-engine-torchvision - tags: get,generic-python-lib,_package.torchinfo - tags: get,generic-python-lib,_package.torchmetrics - tags: get,generic-sys-util,_libgl1-mesa-glx @@ -544,13 +554,13 @@ variations: - tags: get,generic-python-lib,_package.onnxruntime - tags: get,generic-python-lib,_package.tqdm - tags: get,generic-python-lib,_package.ijson - - tags: get,preprocessed,dataset,cognata,_mlc,_segmentation + - tags: get,preprocessed,dataset,cognata,_mlc,_segmentation,_validation skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" names: - preprocessed-dataset-mlcommons-cognata-deeplabv3-plus - - tags: get,ml-model,deeplabv3-plus + - tags: get,ml-model,deeplabv3-plus,_mlc,_rclone skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" @@ -598,4 +608,4 @@ variations: batch_size.#: group: batch-size env: - MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" \ No newline at end of file + MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index 101a7b851..db81b5313 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -119,13 +119,15 @@ post_deps: docker: mlc_repo: anandhu-eng@mlperf-automations - mlc_repo_branch: automotive + mlc_repo_branch: automotive2 use_host_group_id: True use_host_user_id: True real_run: false user: mlcuser interactive: True mlc_repos_off: 'mlc pull repo mlcommons@cm4abtf --branch=poc' + pre_run_cmds: + - mlc pull repo deps: - tags: get,abtf,scratch,space mounts: @@ -188,16 +190,16 @@ variations: tags: _onnxruntime ml-model-bevformer: tags: _onnx - ml-model-ssd-resnet50: + ml-model-ssd: tags: _onnx - ml-model-deeplab-v3+: + ml-model-deeplabv3plus: tags: _onnx onnx_dynamic: base: - onnxruntime add_deps_recursive: - ml-model-deeplab-v3+: + ml-model-deeplabv3plus: tags: _onnx_dynamic onnxruntime,cpu: @@ -221,9 +223,9 @@ variations: tags: _pytorch ml-model-bevformer: tags: _pytorch - ml-model-ssd-resnet50: + ml-model-ssd: tags: _pytorch - ml-model-deeplab-v3+: + ml-model-deeplabv3plus: tags: _pytorch @@ -263,11 +265,11 @@ variations: MLC_MODEL: bevformer docker: deps: - - tags: get,preprocessed,dataset,nuscenes + - tags: get,preprocessed,dataset,nuscenes,_mlc,_validation enable_if_env: MLC_USE_DATASET_FROM_HOST: - "yes" - - tags: get,ml-model,bevformer + - tags: get,ml-model,bevformer,_mlc,_rclone enable_if_env: MLC_USE_MODEL_FROM_HOST: - "yes" @@ -292,16 +294,16 @@ variations: - nuscenes-accuracy-script tags: run,accuracy,mlperf,_nuscenes - ssd-resnet50: + ssd: group: models default_env: MLC_USE_DATASET_FROM_HOST: yes env: - MLC_MODEL: ssd-resnet50 + MLC_MODEL: ssd docker: deps: - - tags: get,preprocessed,dataset,cognata + - tags: get,preprocessed,dataset,cognata,_mlc,_2d_object_det,_validation enable_if_env: MLC_USE_DATASET_FROM_HOST: - "yes" @@ -310,13 +312,13 @@ variations: MLC_USE_MODEL_FROM_HOST: - "yes" names: - - ml-model-ssd-resnet50 + - ml-model-ssd mounts: - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" - "${{ MLC_ML_MODEL_SSD_PATH }}:${{ MLC_ML_MODEL_SSD_PATH }}" add_deps_recursive: abtf-inference-implementation: - tags: _ssd-resnet50 + tags: _ssd posthook_deps: - enable_if_env: MLC_MLPERF_LOADGEN_MODE: @@ -329,31 +331,31 @@ variations: - cognata-ssd-accuracy-script tags: run,accuracy,mlperf,_cognata_ssd - deeplab-v3+: + deeplabv3plus: group: models default_env: MLC_USE_DATASET_FROM_HOST: yes env: - MLC_MODEL: deeplab-v3+ + MLC_MODEL: deeplabv3plus docker: deps: - - tags: get,preprocessed,dataset,cognata,_segmentation + - tags: get,preprocessed,dataset,cognata,_mlc,_segmentation,_validation enable_if_env: MLC_USE_DATASET_FROM_HOST: - "yes" - - tags: get,ml-model,ssd,resnet50,_mlc,_rclone + - tags: get,ml-model,deeplabv3-plus,_mlc,_rclone enable_if_env: MLC_USE_MODEL_FROM_HOST: - "yes" names: - - ml-model-deeplab-v3+ + - ml-model-deeplabv3plus mounts: - "${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}:${{ MLC_PREPROCESSED_DATASET_COGNATA_PATH }}" - "${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}:${{ MLC_ML_MODEL_DEEPLABV3_PLUS_PATH }}" add_deps_recursive: abtf-inference-implementation: - tags: _deeplab-v3+ + tags: _deeplabv3plus posthook_deps: - enable_if_env: MLC_MLPERF_LOADGEN_MODE: @@ -460,4 +462,4 @@ variations: MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: '#' add_deps_recursive: abtf-inference-implementation: - tags: _batch_size.# \ No newline at end of file + tags: _batch_size.# diff --git a/script/get-generic-sys-util/meta.yaml b/script/get-generic-sys-util/meta.yaml index b8f248337..e0e5ed1d3 100644 --- a/script/get-generic-sys-util/meta.yaml +++ b/script/get-generic-sys-util/meta.yaml @@ -177,7 +177,7 @@ variations: new_env_keys: - MLC_LIBGLX_VERSION state: - libgl: # tbd: complete for other flavours of linux + libgl1-mesa-glx: # tbd: complete for other flavours of linux apt: libgl1-mesa-glx brew: '' dnf: '' diff --git a/script/get-preprocessed-dataset-cognata/customize.py b/script/get-preprocessed-dataset-cognata/customize.py index 1e4f0beba..c77e3bb17 100644 --- a/script/get-preprocessed-dataset-cognata/customize.py +++ b/script/get-preprocessed-dataset-cognata/customize.py @@ -8,7 +8,8 @@ def preprocess(i): env = i['env'] - if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt": + if env.get('MLC_NUSCENES_DATASET_TYPE', '') == "prebuilt" and env.get( + 'MLC_PREPROCESSED_DATASET_COGNATA_PATH', '') == '': env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" return {'return': 0} diff --git a/script/process-mlperf-accuracy/meta.yaml b/script/process-mlperf-accuracy/meta.yaml index 458f740fa..b2d628611 100644 --- a/script/process-mlperf-accuracy/meta.yaml +++ b/script/process-mlperf-accuracy/meta.yaml @@ -13,6 +13,20 @@ deps: - inference-src - accuracy-check-src tags: get,mlcommons,inference,src + skip_if_env: + MLC_DATASET: + - nuscenes + - cognata_deeplab + - cognata_ssd +- names: + - automotive-src + - accuracy-check-src + tags: get,mlcommons,automotive,src + enable_if_env: + MLC_DATASET: + - nuscenes + - cognata_deeplab + - cognata_ssd input_mapping: rerun: MLC_RERUN result_dir: MLC_MLPERF_ACCURACY_RESULTS_DIR @@ -297,13 +311,13 @@ variations: MLC_RUN_STATE_DOCKER: - "yes" names: - - preprocessed-dataset-mlcommons-cognata-ssd-resnet50 + - preprocessed-dataset-mlcommons-cognata-ssd - tags: get,ml-model,ssd,resnet50,_mlc,_rclone skip_if_any_env: MLC_RUN_STATE_DOCKER: - "yes" names: - - ml-model-ssd-resnet50 + - ml-model-ssd env: MLC_DATASET: cognata_ssd group: dataset From cc1d43d1d5eeebee7efa08a1aa0f1cc62fcb1560 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Fri, 30 May 2025 22:29:34 +0530 Subject: [PATCH 41/43] Fix for early stopping and command generation (#445) * fix early stopping issue + cmd generation for ssd * add device flag * changes for constantstream * fix command generation for accuracy check * docker repo defaulted to mlcommons * submission generation changes for automotive --- .../customize.py | 13 +++++-- .../meta.yaml | 15 +++++++- script/app-mlperf-automotive/customize.py | 2 +- script/app-mlperf-automotive/meta.yaml | 20 ++++++++-- script/app-mlperf-inference-amd/meta.yaml | 4 +- .../meta.yaml | 4 +- script/app-mlperf-inference-dummy/meta.yaml | 4 +- script/app-mlperf-inference-intel/meta.yaml | 12 +++--- .../meta.yaml | 4 +- .../meta.yaml | 4 +- .../app-mlperf-inference-qualcomm/meta.yaml | 4 +- script/app-mlperf-inference-redhat/meta.yaml | 4 +- .../meta.yaml | 37 +++++++++++++++++-- .../customize.py | 10 +++-- .../meta.yaml | 5 +++ script/get-mlperf-inference-loadgen/meta.yaml | 4 ++ .../meta.yaml | 2 +- .../meta.yaml | 19 +++++++--- script/process-mlperf-accuracy/customize.py | 2 +- script/run-mlperf-automotive-app/customize.py | 3 ++ script/run-mlperf-automotive-app/meta.yaml | 3 +- script/run-mlperf-inference-app/meta.yaml | 2 +- .../meta.yaml | 23 ++++++++++-- .../meta.yaml | 15 ++++++-- 24 files changed, 164 insertions(+), 51 deletions(-) diff --git a/script/app-mlperf-automotive-mlcommons-python/customize.py b/script/app-mlperf-automotive-mlcommons-python/customize.py index 688a52dc9..806ff2951 100644 --- a/script/app-mlperf-automotive-mlcommons-python/customize.py +++ b/script/app-mlperf-automotive-mlcommons-python/customize.py @@ -198,6 +198,8 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, q = '"' if os_info['platform'] == 'windows' else "'" + device = env['MLC_MLPERF_DEVICE'] + ########################################################################## # Grigori added for ABTF demo @@ -235,6 +237,11 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, env['RUN_DIR'] = run_dir + if device == "gpu": + logger.warning( + "Bevformer reference implementation is not supported on GPU, defaulting to CPU") + device = "cpu" + env['OUTPUT_DIR'] = env['MLC_MLPERF_OUTPUT_DIR'] if env['MLC_MLPERF_BACKEND'] != "onnxruntime": @@ -249,7 +256,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, "bevformer", "bevformer_tiny.py") print(env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']) - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend onnx --dataset nuscenes --device {"cuda" if device == "gpu" else "cpu"} --nuscenes-root {os.path.dirname(env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH'].rstrip("/"))} --dataset-path {env['MLC_PREPROCESSED_DATASET_NUSCENES_PATH']} --checkpoint {env['MLC_ML_MODEL_BEVFORMER_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" print(cmd) elif env['MLC_MODEL'] in ['ssd']: run_dir = env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'] @@ -263,7 +270,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, config_path = "baseline_8MP_ss_scales_fm1_5x5_all" - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --cognata-root-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_SSD_PATH']} --config {config_path} --device {"cuda" if device == "gpu" else "cpu"} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" elif env['MLC_MODEL'] in ['deeplabv3plus']: run_dir = env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'] @@ -275,7 +282,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, backend = "onnx" if env.get( 'MLC_MLPERF_BACKEND') == "onnxruntime" else env.get('MLC_MLPERF_BACKEND') - cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(run_dir, "main.py")} --output {env['OUTPUT_DIR']} --scenario {env['MLC_MLPERF_LOADGEN_SCENARIO']} --backend {backend} --dataset cognata --device {"cuda" if device == "gpu" else "cpu"} --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --checkpoint {env['MLC_ML_MODEL_DEEPLABV3_PLUS_PATH']} {env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS']} {scenario_extra_options} {mode_extra_options} {dataset_options}""" ########################################################################## diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index cf95e283b..e7c863366 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -60,6 +60,7 @@ input_mapping: target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY output: MLC_MLPERF_OUTPUT_DIR @@ -457,6 +458,9 @@ variations: bevformer: group: models + default_env: + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9" + MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.5.1" @@ -492,6 +496,9 @@ variations: ssd: group: models + default_env: + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9" + MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.3.1" @@ -515,7 +522,7 @@ variations: - tags: get,generic-python-lib,_package.onnx - tags: get,generic-python-lib,_package.onnxruntime - tags: get,generic-python-lib,_package.tqdm - - tags: get,preprocessed,dataset,cognata,_mlc,_2d_object_det,_validation + - tags: get,preprocessed,dataset,cognata,_mlc,_2d_obj_det,_validation skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" @@ -530,6 +537,9 @@ variations: deeplabv3plus: group: models + default_env: + MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE: "99.9" + MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT: 6636 add_deps_recursive: pytorch: version_max: "2.3.1" @@ -601,6 +611,9 @@ variations: server: env: MLC_MLPERF_LOADGEN_SCENARIO: Server + constantstream: + env: + MLC_MLPERF_LOADGEN_SCENARIO: ConstantStream mvp_demo: env: diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py index b7d8598c5..f1c0f5958 100644 --- a/script/app-mlperf-automotive/customize.py +++ b/script/app-mlperf-automotive/customize.py @@ -68,7 +68,7 @@ def postprocess(i): mlperf_log = MLPerfLog(os.path.join(output_dir, "mlperf_log_detail.txt")) if mode == "performance": - if scenario in ["Offline", "Server"]: + if scenario in ["Offline", "Server", "ConstantStream"]: metric = "target_qps" result = mlperf_log['result_mean_latency_ns'] / 1000000 elif scenario.endswith("Stream"): diff --git a/script/app-mlperf-automotive/meta.yaml b/script/app-mlperf-automotive/meta.yaml index db81b5313..ebe23d75d 100644 --- a/script/app-mlperf-automotive/meta.yaml +++ b/script/app-mlperf-automotive/meta.yaml @@ -59,6 +59,7 @@ input_mapping: target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY output: MLC_MLPERF_OUTPUT_DIR @@ -118,8 +119,8 @@ post_deps: docker: - mlc_repo: anandhu-eng@mlperf-automations - mlc_repo_branch: automotive2 + mlc_repo: mlcommons@mlperf-automations + mlc_repo_branch: dev use_host_group_id: True use_host_user_id: True real_run: false @@ -303,7 +304,7 @@ variations: MLC_MODEL: ssd docker: deps: - - tags: get,preprocessed,dataset,cognata,_mlc,_2d_object_det,_validation + - tags: get,preprocessed,dataset,cognata,_mlc,_2d_obj_det,_validation enable_if_env: MLC_USE_DATASET_FROM_HOST: - "yes" @@ -381,6 +382,9 @@ variations: abtf-inference-implementation: tags: _cpu + gpu: + alias: cuda + cuda: group: device env: @@ -413,12 +417,14 @@ variations: add_deps_recursive: abtf-inference-implementation: tags: _offline + multistream: env: MLC_MLPERF_LOADGEN_SCENARIO: MultiStream add_deps_recursive: abtf-inference-implementation: tags: _multistream + singlestream: group: loadgen-scenario default: true @@ -427,6 +433,14 @@ variations: add_deps_recursive: abtf-inference-implementation: tags: _singlestream + + constantstream: + env: + MLC_MLPERF_LOADGEN_SCENARIO: ConstantStream + add_deps_recursive: + abtf-inference-implementation: + tags: _constantstream + server: env: MLC_MLPERF_LOADGEN_SCENARIO: Server diff --git a/script/app-mlperf-inference-amd/meta.yaml b/script/app-mlperf-inference-amd/meta.yaml index 2c3b6d063..1481362be 100644 --- a/script/app-mlperf-inference-amd/meta.yaml +++ b/script/app-mlperf-inference-amd/meta.yaml @@ -91,12 +91,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml b/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml index 815a2a152..0ffa1e9d2 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/meta.yaml @@ -28,7 +28,7 @@ deps: tags: get,cuda - names: - loadgen - tags: get,loadgen + tags: get,loadgen,_inference - names: - inference-src tags: get,mlcommons,inference,src @@ -121,7 +121,7 @@ post_deps: prehook_deps: - names: - user-conf-generator - tags: generate,user-conf,mlperf,inference + tags: generate,user-conf,mlperf,inference,_inference - enable_if_env: MLC_MLPERF_SKIP_RUN: - 'no' diff --git a/script/app-mlperf-inference-dummy/meta.yaml b/script/app-mlperf-inference-dummy/meta.yaml index f8876eb81..e488d679e 100644 --- a/script/app-mlperf-inference-dummy/meta.yaml +++ b/script/app-mlperf-inference-dummy/meta.yaml @@ -91,12 +91,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-intel/meta.yaml b/script/app-mlperf-inference-intel/meta.yaml index 86a2806eb..3470e6712 100644 --- a/script/app-mlperf-inference-intel/meta.yaml +++ b/script/app-mlperf-inference-intel/meta.yaml @@ -392,7 +392,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -510,7 +510,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -627,7 +627,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -699,7 +699,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build,_inference names: - inference-loadgen @@ -924,7 +924,7 @@ variations: - tags: get,mlcommons,inference,src names: - inference-src - - tags: get,mlcommons,inference,loadgen,_custom-python + - tags: get,mlcommons,inference,loadgen,_custom-python,_inference names: - inference-loadgen - tags: get,ml-model,dlrm,_pytorch @@ -1074,7 +1074,7 @@ variations: - inference-src # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator - tags: get,generic-sys-util,_rsync diff --git a/script/app-mlperf-inference-mlcommons-cpp/meta.yaml b/script/app-mlperf-inference-mlcommons-cpp/meta.yaml index dda32e172..06af3ddba 100644 --- a/script/app-mlperf-inference-mlcommons-cpp/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-cpp/meta.yaml @@ -71,7 +71,7 @@ deps: # Install MLPerf inference dependencies # Install MLPerf loadgen - - tags: get,loadgen + - tags: get,loadgen,_inference names: - loadgen @@ -129,7 +129,7 @@ deps: tags: get,ml-model,retinanet,_onnx,_fp32 # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml index a0380fec2..b01c7d989 100644 --- a/script/app-mlperf-inference-mlcommons-python/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml @@ -684,7 +684,7 @@ deps: # Install MLPerf inference dependencies # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator skip_if_env: @@ -692,7 +692,7 @@ deps: - "yes" # Install MLPerf loadgen - - tags: get,loadgen + - tags: get,loadgen,_inference names: - loadgen - mlperf-inference-loadgen diff --git a/script/app-mlperf-inference-qualcomm/meta.yaml b/script/app-mlperf-inference-qualcomm/meta.yaml index 1e508e0e6..2e0193355 100644 --- a/script/app-mlperf-inference-qualcomm/meta.yaml +++ b/script/app-mlperf-inference-qualcomm/meta.yaml @@ -111,12 +111,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/app-mlperf-inference-redhat/meta.yaml b/script/app-mlperf-inference-redhat/meta.yaml index 55af68d65..1d32af174 100644 --- a/script/app-mlperf-inference-redhat/meta.yaml +++ b/script/app-mlperf-inference-redhat/meta.yaml @@ -91,12 +91,12 @@ deps: - inference-src # Download MLPerf inference loadgen - - tags: get,mlcommons,inference,loadgen + - tags: get,mlcommons,inference,loadgen,_inference names: - inference-loadgen # Creates user conf for given SUT - - tags: generate,user-conf,mlperf,inference + - tags: generate,user-conf,mlperf,inference,_inference names: - user-conf-generator diff --git a/script/generate-mlperf-inference-submission/meta.yaml b/script/generate-mlperf-inference-submission/meta.yaml index c68bc5a37..480fbd008 100644 --- a/script/generate-mlperf-inference-submission/meta.yaml +++ b/script/generate-mlperf-inference-submission/meta.yaml @@ -13,12 +13,8 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - tags: mlcommons,inference,src - tags: get,sut,system-description - tags: install,pip-package,for-mlc-python,_package.tabulate -- tags: get,mlperf,inference,utils - names: - get-mlperf-results-dir skip_if_env: @@ -97,18 +93,51 @@ post_deps: skip_if_env: MLC_SKIP_TRUNCATE_ACCURACY: - 'yes' + names: + - truncate-mlperf-accuracy-log tags: accuracy,truncate,mlc - enable_if_env: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR: - 'yes' + names: + - preprocess-mlperf-submission tags: preprocess,mlperf,submission - skip_if_env: MLC_RUN_SUBMISSION_CHECKER: - 'no' names: - mlperf-inference-submission-checker + - mlperf-submission-checker - submission-checker tags: submission,inference,checker,mlc +variations: + inference: + default: true + add_deps_recursive: + truncate-mlperf-accuracy-log: + tags: _inference + preprocess-mlperf-submission: + tags: _inference + mlperf-inference-submission-checker: + tags: _inference + deps: + - names: + - inference-src + tags: mlcommons,inference,src + - tags: get,mlperf,inference,utils + automotive: + add_deps_recursive: + truncate-mlperf-accuracy-log: + tags: _automotive + preprocess-mlperf-submission: + tags: _automotive + mlperf-inference-submission-checker: + tags: _automotive + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src + - tags: get,mlperf,automotive,utils tags: - generate - submission diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index b08f0b0fb..2c93af586 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -100,7 +100,7 @@ def preprocess(i): query_count = None value = None - if scenario in ['Offline', 'Server']: + if scenario in ['Offline', 'Server', 'ConstantStream']: metric = "target_qps" tolerance = 1.01 # value = env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS') if scenario == "Server" else env.get('MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') @@ -343,7 +343,7 @@ def preprocess(i): ".sample_concatenate_permutation = 0" + "\n" max_duration_fast_s = int(env.get('MLC_MLPERF_MAX_DURATION_FAST', 120)) max_duration_fast = str(max_duration_fast_s * 1000) # in milliseconds - if scenario == "Server": + if scenario == "Server" or scenario == "ConstantStream": user_conf += ml_model_name + "." + scenario + \ f".max_duration = {max_duration_fast}" + "\n" target_qps = conf['target_qps'] @@ -368,9 +368,13 @@ def preprocess(i): user_conf += ml_model_name + "." + scenario + ".min_duration = " + \ env['MLC_MLPERF_INFERENCE_MIN_DURATION'] + " \n" elif not is_false(env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes')) and not is_true(env.get( - 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')): + 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')) and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') != '99.9': user_conf += ml_model_name + "." + scenario + \ f".max_duration = {max_duration_valid}" + "\n" + if scenario in ["SingleStream", "ConstantStream"] and env.get('MLC_MLPERF_SINGLESTREAM_TARGET_LATENCY_PERCENTILE') == '99.9' and env.get( + 'MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT', '') != '' and env.get('MLC_MLPERF_TARGET_LATENCY', '') == '': + user_conf += ml_model_name + "." + scenario + \ + f".max_query_count = {env.get('MLC_MLPERF_DEFAULT_MAX_QUERY_COUNT')}" + "\n" if scenario == "MultiStream": user_conf += ml_model_name + "." + scenario + ".min_query_count = " + \ env.get( diff --git a/script/generate-mlperf-inference-user-conf/meta.yaml b/script/generate-mlperf-inference-user-conf/meta.yaml index 4288bd443..18fbec442 100644 --- a/script/generate-mlperf-inference-user-conf/meta.yaml +++ b/script/generate-mlperf-inference-user-conf/meta.yaml @@ -47,6 +47,7 @@ input_mapping: target_latency: MLC_MLPERF_LOADGEN_TARGET_LATENCY offline_target_qps: MLC_MLPERF_LOADGEN_OFFLINE_TARGET_QPS server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY performance_sample_count: MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT @@ -91,12 +92,16 @@ variations: inference: default: true group: benchmark_wg + env: + MLC_BENCHMARK_GROUP: inference deps: - tags: get,mlcommons,inference,src names: - inference-src automotive: group: benchmark_wg + env: + MLC_BENCHMARK_GROUP: automotive deps: - tags: get,mlcommons,automotive,src names: diff --git a/script/get-mlperf-inference-loadgen/meta.yaml b/script/get-mlperf-inference-loadgen/meta.yaml index ed75db192..e841b3293 100644 --- a/script/get-mlperf-inference-loadgen/meta.yaml +++ b/script/get-mlperf-inference-loadgen/meta.yaml @@ -169,6 +169,10 @@ variations: automotive: env: MLC_INFERENCE_AUTOMOTIVE_REPO: 'YES' + inference: + default: true + env: + MLC_INFERENCE_AUTOMOTIVE_REPO: 'NO' versions: custom: add_deps: diff --git a/script/get-preprocessed-dataset-cognata/meta.yaml b/script/get-preprocessed-dataset-cognata/meta.yaml index c8bac0417..524633f11 100644 --- a/script/get-preprocessed-dataset-cognata/meta.yaml +++ b/script/get-preprocessed-dataset-cognata/meta.yaml @@ -36,7 +36,7 @@ variations: MLC_DATASET_COGNATA_TASK: segmentation validation,2d_obj_det: env: - MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAMEE: val_2d + MLC_DATASET_COGNATA_EXTRACTED_FOLDER_NAME: val_2d MLC_DATASET_COGNATA_TAR_FILENAME: val_2d.tar.gz MLC_DOWNLOAD_URL: mlc-cognata:mlc_cognata_dataset/preprocessed_2d/<<>> calibration,2d_obj_det: diff --git a/script/preprocess-mlperf-inference-submission/meta.yaml b/script/preprocess-mlperf-inference-submission/meta.yaml index 5381b36dd..0ddc10d51 100644 --- a/script/preprocess-mlperf-inference-submission/meta.yaml +++ b/script/preprocess-mlperf-inference-submission/meta.yaml @@ -9,17 +9,26 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - - submission-checker-src - tags: get,mlcommons,inference,src,_branch.dev - version: custom - names: - get-mlperf-submission-dir skip_if_env: MLC_MLPERF_INFERENCE_SUBMISSION_DIR: - 'on' tags: get,mlperf,submission,dir +variations: + inference: + default: true + deps: + - names: + - inference-src + - submission-checker-src + tags: mlcommons,inference,src,_branch.dev + version: custom + automotive: + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src input_mapping: input: MLC_MLPERF_INFERENCE_SUBMISSION_DIR submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index afb359ce7..8a2168956 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -214,7 +214,7 @@ def preprocess(i): elif dataset == "cognata_ssd": CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( - result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --dataset cognata --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" + result_dir, "mlperf_log_accuracy.json") + "' --dataset-path '" + env['MLC_PREPROCESSED_DATASET_COGNATA_PATH'] + "' --config '" + "baseline_8MP_ss_scales_fm1_5x5_all" + "' > '" + out_file + "'" elif dataset == "cognata_deeplab": CMD = env['MLC_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'], "accuracy_cognata.py") + "' --mlperf-accuracy-file '" + os.path.join( diff --git a/script/run-mlperf-automotive-app/customize.py b/script/run-mlperf-automotive-app/customize.py index 10983fc24..5e26568db 100644 --- a/script/run-mlperf-automotive-app/customize.py +++ b/script/run-mlperf-automotive-app/customize.py @@ -229,6 +229,9 @@ def preprocess(i): elif scenario == "Server": if env.get('MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS'): env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = env['MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS'] + elif scenario == "ConstantStream": + if env.get('MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS'): + env['MLC_MLPERF_LOADGEN_TARGET_QPS'] = env['MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS'] elif scenario == "SingleStream": if env.get('MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'): env['MLC_MLPERF_LOADGEN_TARGET_LATENCY'] = env['MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'] diff --git a/script/run-mlperf-automotive-app/meta.yaml b/script/run-mlperf-automotive-app/meta.yaml index 74b9e34c9..9a2fb7401 100644 --- a/script/run-mlperf-automotive-app/meta.yaml +++ b/script/run-mlperf-automotive-app/meta.yaml @@ -73,6 +73,7 @@ input_mapping: run_style: MLC_MLPERF_RUN_STYLE scenario: MLC_MLPERF_LOADGEN_SCENARIO server_target_qps: MLC_MLPERF_LOADGEN_SERVER_TARGET_QPS + constantstream_target_qps: MLC_MLPERF_LOADGEN_CONSTANTSTREAM_TARGET_QPS singlestream_target_latency: MLC_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY skip_submission_generation: MLC_MLPERF_SKIP_SUBMISSION_GENERATION skip_truncation: MLC_SKIP_TRUNCATE_ACCURACY @@ -251,4 +252,4 @@ variations: - 'false' - 'False' - '0' - tags: generate,mlperf,inference,submission + tags: generate,mlperf,inference,submission,_automotive diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index 5c13dfa16..9cc3064c6 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -433,7 +433,7 @@ variations: skip_if_env: MLC_MLPERF_SKIP_SUBMISSION_GENERATION: - 'yes' - tags: generate,mlperf,inference,submission + tags: generate,mlperf,inference,submission,_inference versions: master: {} diff --git a/script/run-mlperf-inference-submission-checker/meta.yaml b/script/run-mlperf-inference-submission-checker/meta.yaml index 8d9667ae6..658e8f943 100644 --- a/script/run-mlperf-inference-submission-checker/meta.yaml +++ b/script/run-mlperf-inference-submission-checker/meta.yaml @@ -16,10 +16,6 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - - submission-checker-src - tags: get,mlcommons,inference,src - tags: pull,git,repo env: MLC_GIT_CHECKOUT_PATH: '<<>>' @@ -43,6 +39,8 @@ deps: - enable_if_env: MLC_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION: - 'on' + names: + - preprocess-mlperf-submission tags: preprocess,mlperf,inference,submission docker: mlc_repo: mlcommons@mlperf-automations @@ -137,6 +135,23 @@ variations: short-run: env: MLC_MLPERF_SHORT_RUN: 'yes' + inference: + default: true + add_deps_recursive: + preprocess-mlperf-submission: + tags: _inference + deps: + - names: + - inference-src + tags: mlcommons,inference,src + automotive: + add_deps_recursive: + preprocess-mlperf-submission: + tags: _automotive + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src versions: master: adr: diff --git a/script/truncate-mlperf-inference-accuracy-log/meta.yaml b/script/truncate-mlperf-inference-accuracy-log/meta.yaml index c0f02f6d3..2acbf3b85 100644 --- a/script/truncate-mlperf-inference-accuracy-log/meta.yaml +++ b/script/truncate-mlperf-inference-accuracy-log/meta.yaml @@ -9,15 +9,24 @@ deps: - python - python3 tags: get,python3 -- names: - - inference-src - tags: get,mlcommons,inference,src - names: - get-mlperf-submission-dir skip_if_env: MLC_MLPERF_INFERENCE_SUBMISSION_DIR: - 'on' tags: get,mlperf,submission,dir +variations: + inference: + default: true + deps: + - names: + - inference-src + tags: mlcommons,inference,src + automotive: + deps: + - names: + - automotive-src + tags: mlcommons,automotive,src input_mapping: input: MLC_MLPERF_INFERENCE_SUBMISSION_DIR submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR From 3509e2ca54f593a58ec37e9e26d0e310343d2145 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 2 Jun 2025 16:47:39 +0530 Subject: [PATCH 42/43] Updated tags for cognata dataset dependency (#447) --- script/get-preprocessed-dataset-cognata/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-preprocessed-dataset-cognata/meta.yaml b/script/get-preprocessed-dataset-cognata/meta.yaml index 524633f11..940351310 100644 --- a/script/get-preprocessed-dataset-cognata/meta.yaml +++ b/script/get-preprocessed-dataset-cognata/meta.yaml @@ -80,7 +80,7 @@ variations: - enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - 'yes' - extra_cache_tags: nuscenes,dataset + extra_cache_tags: cognata,dataset force_cache: true names: - dae From 8b19450442735df9aa838ad2a3b59b928ed7db39 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Mon, 2 Jun 2025 14:29:18 +0100 Subject: [PATCH 43/43] Merge from GO (#448) * Added option to skip detect-sudo, support postfix detection * Support installation variations in get,llvm * Fix tags for nvidia-harness --- script/app-mlperf-inference/meta.yaml | 6 ++++++ script/detect-sudo/customize.py | 4 ++-- script/get-generic-sys-util/meta.yaml | 3 +++ script/get-llvm/meta.yaml | 14 ++++++++++++++ 4 files changed, 25 insertions(+), 2 deletions(-) diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index abf480eff..dcfea1431 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -1820,6 +1820,8 @@ variations: nvidia-inference-server: version: r4.1 tags: _go + nvidia-harness: + tags: _v4.0 intel-harness: tags: _v4.1 nvidia-scratch-space: @@ -1841,6 +1843,8 @@ variations: nvidia-inference-server: version: r4.0 tags: _mlcommons + nvidia-harness: + tags: _v4.0 intel-harness: tags: _v4.1 inference-src: @@ -1862,6 +1866,8 @@ variations: nvidia-inference-server: version: r5.0 tags: _mlcommons + nvidia-harness: + tags: _v5.0 intel-harness: tags: _v4.1 inference-src: diff --git a/script/detect-sudo/customize.py b/script/detect-sudo/customize.py index 3e1b65059..104d8b781 100644 --- a/script/detect-sudo/customize.py +++ b/script/detect-sudo/customize.py @@ -27,8 +27,8 @@ def preprocess(i): env['MLC_SUDO'] = '' # root user does not need sudo env['MLC_SUDO_USER'] = "yes" else: - if can_execute_sudo_without_password( - logger) or prompt_sudo(logger) == 0: + if not is_true(env.get('MLC_SKIP_SUDO')) and (can_execute_sudo_without_password( + logger) or prompt_sudo(logger) == 0): env['MLC_SUDO_USER'] = "yes" env['MLC_SUDO'] = 'sudo' diff --git a/script/get-generic-sys-util/meta.yaml b/script/get-generic-sys-util/meta.yaml index e0e5ed1d3..01ffa7b9f 100644 --- a/script/get-generic-sys-util/meta.yaml +++ b/script/get-generic-sys-util/meta.yaml @@ -662,6 +662,9 @@ variations: postfix: env: MLC_SYS_UTIL_NAME: postfix + MLC_SYS_UTIL_VERSION_CMD: postconf mail_version + MLC_SYS_UTIL_VERSION_RE: mail_version = \b(\d+\.\d+(?:\.\d+)?)\b + MLC_TMP_VERSION_DETECT_GROUP_NUMBER: 1 new_env_keys: - MLC_POSTFIX_VERSION state: diff --git a/script/get-llvm/meta.yaml b/script/get-llvm/meta.yaml index 8193227b7..abcebd854 100644 --- a/script/get-llvm/meta.yaml +++ b/script/get-llvm/meta.yaml @@ -27,6 +27,9 @@ prehook_deps: - 'yes' names: llvm-install reuse_version: true + inherit_variation_tags: true + skip_inherit_variation_groups: + - install-src tags: install,llvm sort: 100 tags: @@ -39,13 +42,24 @@ tags: uid: 99832a103ed04eb8 variations: from-prebuilt: + group: install-src + default: true ad: llvm-install: tags: prebuilt from-src: + group: install-src ad: llvm-install: tags: src,_clang path.#: env: MLC_LLVM_DIR_PATH: '#' + release: {} + version.#: {} + branch.#: {} + runtimes.#: {} + flang: {} + sha.#: {} + tag.#: {} +