Skip to content

Replace print with MLC Logger #396

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Apr 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion script/app-image-classification-onnx-py/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ def postprocess(i):
os_info = i['os_info']
env = i['env']
state = i['state']

automation = i['automation']
logger = automation.action_object.logger

Expand Down Expand Up @@ -48,6 +47,7 @@ def postprocess(i):
json.dump(data, f, ensure_ascii=False, indent=4)
except Exception as e:
logger.warning('CM warning: {}'.format(e))
logger.warning('CM warning: {}'.format(e))

try:
import yaml
Expand Down
3 changes: 2 additions & 1 deletion script/app-image-corner-detection/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ def preprocess(i):
def postprocess(i):

env = i['env']
print(env['MLC_OUTPUT'] + " generated in " + env['MLC_RUN_DIR'])
logger = i['automation'].logger
logger.info(env['MLC_OUTPUT'] + " generated in " + env['MLC_RUN_DIR'])

return {'return': 0}
8 changes: 5 additions & 3 deletions script/app-loadgen-generic-python/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ def preprocess(i):

env = i['env']

logger = i['automation'].logger

if 'MLC_ML_MODEL_FILE_WITH_PATH' not in env:
return {
'return': 1, 'error': 'Please select a variation specifying the model to run'}
Expand Down Expand Up @@ -87,9 +89,9 @@ def preprocess(i):

env['MLC_RUN_OPTS'] = run_opts

print('')
print('Assembled flags: {}'.format(run_opts))
print('')
logger.info('')
logger.info('Assembled flags: {}'.format(run_opts))
logger.info('')

return {'return': 0}

Expand Down
6 changes: 4 additions & 2 deletions script/app-mlperf-inference-dummy/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']

logger = i['automation'].logger

if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}

Expand All @@ -29,8 +31,8 @@ def preprocess(i):
return r
run_cmd = r['run_cmd']
run_dir = r['run_dir']
print(run_cmd)
print(run_dir)
logger.info(run_cmd)
logger.info(run_dir)
return {'return': 1, 'error': 'Run command needs to be tested!'}


Expand Down
6 changes: 4 additions & 2 deletions script/app-mlperf-inference-intel/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']

logger = i['automation'].logger

if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}

Expand Down Expand Up @@ -104,7 +106,7 @@ def preprocess(i):
os.path.dirname(env['MLC_ML_MODEL_FILE_WITH_PATH']), 'retinanet-int8-model.pth')

elif env['MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "build_harness":
print(f"Harness Root: {harness_root}")
logger.info(f"Harness Root: {harness_root}")
if "bert" in env['MLC_MODEL']:
i['run_script_input']['script_name'] = "build_bert_harness"
env['MLC_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(
Expand Down Expand Up @@ -162,7 +164,7 @@ def preprocess(i):
env[model_dir_name])

elif env['MLC_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "run_harness":
print(f"Harness Root: {harness_root}")
logger.info(f"Harness Root: {harness_root}")
if env.get('MLC_MLPERF_LOADGEN_MODE', '') == "compliance":
audit_path = env['MLC_MLPERF_INFERENCE_AUDIT_PATH']
shutil.copy(audit_path, env['MLC_RUN_DIR'])
Expand Down
11 changes: 8 additions & 3 deletions script/app-mlperf-inference-mlcommons-cpp/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,15 @@ def preprocess(i):

meta = i['meta']

logger = i['automation'].logger

if os_info['platform'] == 'windows':
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('WARNING: this script was not thoroughly tested on Windows and compilation may fail - please help us test and improve it!')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
logger.info(
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
logger.warning(
'This script was not thoroughly tested on Windows and compilation may fail - please help us test and improve it!')
logger.info(
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
# # Currently support only LLVM on Windows
# print ('# Forcing LLVM on Windows')
# r = automation.update_deps({'deps':meta['post_deps'], 'update_deps':{'compile-program': {'adr':{'compiler':{'tags':'llvm'}}}}})
Expand Down
12 changes: 7 additions & 5 deletions script/app-mlperf-inference-mlcommons-python/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ def preprocess(i):
state = i['state']
script_path = i['run_script_input']['path']

logger = i['automation'].logger

if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
return {'return': 0}

Expand Down Expand Up @@ -179,7 +181,7 @@ def preprocess(i):

mlperf_implementation = env.get('MLC_MLPERF_IMPLEMENTATION', 'reference')
cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options,
mode_extra_options, dataset_options, mlperf_implementation)
mode_extra_options, dataset_options, logger, mlperf_implementation)

if env.get('MLC_NETWORK_LOADGEN', '') == "lon":

Expand All @@ -202,18 +204,18 @@ def preprocess(i):


def get_run_cmd(os_info, env, scenario_extra_options,
mode_extra_options, dataset_options, implementation="reference"):
mode_extra_options, dataset_options, logger, implementation="reference"):
if implementation == "reference":
return get_run_cmd_reference(
os_info, env, scenario_extra_options, mode_extra_options, dataset_options)
os_info, env, scenario_extra_options, mode_extra_options, dataset_options, logger)
if implementation == "nvidia":
return get_run_cmd_nvidia(
os_info, env, scenario_extra_options, mode_extra_options, dataset_options)
return "", os.getcwd()


def get_run_cmd_reference(
os_info, env, scenario_extra_options, mode_extra_options, dataset_options):
os_info, env, scenario_extra_options, mode_extra_options, dataset_options, logger):

device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] not in [
"gpu", "rocm"] else "cuda"
Expand Down Expand Up @@ -533,7 +535,7 @@ def get_run_cmd_reference(
if env.get('MLC_MLPERF_POINTPAINTING_TIME', '') != '':
cmd += f" --time {env['MLC_MLPERF_POINTPAINTING_TIME']}"

print(cmd)
logger.info(fcmd)

if env.get('MLC_NETWORK_LOADGEN', '') in ["lon", "sut"]:
cmd = cmd + " " + "--network " + env['MLC_NETWORK_LOADGEN']
Expand Down
6 changes: 4 additions & 2 deletions script/app-mlperf-inference-qualcomm/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ def preprocess(i):

os_info = i['os_info']

logger = i['automation'].logger

if os_info['platform'] == 'windows':
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']
Expand All @@ -27,7 +29,7 @@ def preprocess(i):

kilt_root = env['MLC_KILT_CHECKOUT_PATH']

print(f"Harness Root: {kilt_root}")
logger.info(f"Harness Root: {kilt_root}")

source_files = []
env['MLC_SOURCE_FOLDER_PATH'] = env['MLC_KILT_CHECKOUT_PATH']
Expand Down Expand Up @@ -161,7 +163,7 @@ def preprocess(i):
"master",
"QAicInfApi.cpp"))

print(f"Compiling the source files: {source_files}")
logger.info(f"Compiling the source files: {source_files}")
env['MLC_CXX_SOURCE_FILES'] = ";".join(source_files)

env['+ CXXFLAGS'].append("-std=c++17")
Expand Down
6 changes: 4 additions & 2 deletions script/app-mlperf-inference-redhat/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']

logger = i['automation'].logger

if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')):
return {'return': 0}

Expand All @@ -30,8 +32,8 @@ def preprocess(i):
return r
run_cmd = r['run_cmd']
run_dir = r['run_dir']
print(run_cmd)
print(run_dir)
logger.info(run_cmd)
logger.info(run_dir)
env['MLC_MLPERF_RUN_CMD'] = run_cmd
env['MLC_RUN_DIR'] = run_dir
env['MLC_RUN_CMD'] = run_cmd
Expand Down
29 changes: 16 additions & 13 deletions script/app-mlperf-inference/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ def postprocess(i):
state = i['state']
mlc = i['automation'].action_object

logger = i['automation'].logger

# if env.get('MLC_MLPERF_USER_CONF', '') == '':
# return {'return': 0}

Expand Down Expand Up @@ -218,7 +220,7 @@ def postprocess(i):
pattern["Offline"] = "Samples per second: (.*)\n"
pattern["SingleStream"] = "Mean latency \\(ns\\)\\s*:(.*)"
pattern["MultiStream"] = "Mean latency \\(ns\\)\\s*:(.*)"
print("\n")
logger.info("\n")
with open("mlperf_log_summary.txt", "r") as fp:
summary = fp.read()

Expand All @@ -241,7 +243,7 @@ def postprocess(i):

print(
f"SUT: {sut_name}, model: {model_full_name}, scenario: {scenario}, {metric} (mean value) updated as {value}")
print(f"New config stored in {sut_config_path}")
logger.info(f"New config stored in {sut_config_path}")
with open(sut_config_path, "w") as f:
yaml.dump(sut_config, f)

Expand Down Expand Up @@ -288,8 +290,8 @@ def postprocess(i):
] = y[1].strip()

if not is_false(env.get("MLC_MLPERF_PRINT_SUMMARY", "")):
print("\n")
print(mlperf_log_summary)
logger.info("\n")
logger.info(mlperf_log_summary)

with open("measurements.json", "w") as fp:
json.dump(measurements, fp, indent=2)
Expand Down Expand Up @@ -503,7 +505,7 @@ def postprocess(i):
else:
cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {q}{SCRIPT_PATH}{q} -r {q}{RESULT_DIR}{q} -c {q}{COMPLIANCE_DIR}{q} -o {q}{OUTPUT_DIR}{q}"""

print(cmd)
logger.info(cmd)
os.system(cmd)

if test == "TEST01":
Expand All @@ -520,13 +522,13 @@ def postprocess(i):

ACCURACY_DIR = os.path.join(RESULT_DIR, "accuracy")
if not os.path.exists(ACCURACY_DIR):
print("Accuracy run not yet completed")
logger.warning("Accuracy run not yet completed")
return {
'return': 1, 'error': 'TEST01 needs accuracy run to be completed first'}

cmd = f"""cd {q}{TEST01_DIR}{q} && bash {q}{SCRIPT_PATH}{q} {q}{os.path.join(ACCURACY_DIR, "mlperf_log_accuracy.json")}{q} {q}{os.path.join(COMPLIANCE_DIR, "mlperf_log_accuracy.json")}{q} """
env['CMD'] = cmd
print(cmd)
logger.info(cmd)
r = automation.run_native_script(
{'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'})
if r['return'] > 0:
Expand All @@ -538,7 +540,8 @@ def postprocess(i):
data = file.read().replace('\n', '\t')

if 'TEST PASS' not in data:
print("\nDeterministic TEST01 failed... Trying with non-determinism.\n")
logger.warning(
"\nDeterministic TEST01 failed... Trying with non-determinism.\n")
# #Normal test failed, trying the check with non-determinism

baseline_accuracy_file = os.path.join(
Expand Down Expand Up @@ -603,8 +606,8 @@ def postprocess(i):
sys_utilisation_log['timestamp'])
'''
for i in range(len(sys_utilisation_log['timestamp'])):
print(f"{sys_utilisation_log['timestamp'][i]} {power_begin_time}")
print(sys_utilisation_log['timestamp'][i]>=power_begin_time)
logger.info(f"{sys_utilisation_log['timestamp'][i]} {power_begin_time}")
logger.info(sys_utilisation_log['timestamp'][i]>=power_begin_time)
'''
# print(f"{sys_utilisation_log['timestamp'][0]} {power_begin_time}")
# print(sys_utilisation_log['timestamp'][0]>=power_begin_time)
Expand All @@ -616,9 +619,9 @@ def postprocess(i):
)
system_utilisation_info_dump["avg_used_memory_gb"] = filtered_log['used_memory_gb'].mean(
)
print("\nSystem utilisation info for the current run:")
print(system_utilisation_info_dump)
print("\n")
logger.info("\nSystem utilisation info for the current run:")
logger.info(system_utilisation_info_dump)
logger.info("\n")

if state.get(
'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ def preprocess(i):

automation = i['automation']

logger = automation.logger

quiet = is_true(env.get('MLC_QUIET', False))

models = env['MODELS'].split(",")
Expand Down Expand Up @@ -165,7 +167,7 @@ def preprocess(i):

with open(os.path.join(script_path, run_file_name + ".sh"), 'w') as f:
f.write(run_script_content)
print(run_script_content)
logger.info(run_script_content)

run_script_input = i['run_script_input']
r = automation.run_native_script(
Expand Down
17 changes: 9 additions & 8 deletions script/benchmark-program/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
def preprocess(i):
os_info = i['os_info']
env = i['env']

logger = i['automation'].logger
q = '"' if os_info['platform'] == 'windows' else "'"

if env.get('MLC_RUN_CMD', '') == '':
Expand Down Expand Up @@ -104,15 +104,16 @@ def preprocess(i):
env['MLC_POST_RUN_CMD'] = post_run_cmd

# Print info
print('***************************************************************************')
print('CM script::benchmark-program/run.sh')
print('')
print('Run Directory: {}'.format(env.get('MLC_RUN_DIR', '')))
logger.info(
'***************************************************************************')
logger.info('CM script::benchmark-program/run.sh')
logger.info('')
logger.info('Run Directory: {}'.format(env.get('MLC_RUN_DIR', '')))

print('')
print('CMD: {}'.format(env.get('MLC_RUN_CMD', '')))
logger.info('')
logger.info('CMD: {}'.format(env.get('MLC_RUN_CMD', '')))

print('')
logger.info('')

return {'return': 0}

Expand Down
11 changes: 6 additions & 5 deletions script/build-docker-image/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ def preprocess(i):

os_info = i['os_info']
env = i['env']

logger = i['automation'].logger
dockerfile_path = env.get('MLC_DOCKERFILE_WITH_PATH', '')
if dockerfile_path != '' and os.path.exists(dockerfile_path):
build_dockerfile = False
Expand Down Expand Up @@ -83,7 +83,7 @@ def preprocess(i):

CMD = ''.join(XCMD)

print(CMD)
logger.info(CMD)

env['MLC_DOCKER_BUILD_CMD'] = CMD

Expand All @@ -102,6 +102,7 @@ def get_image_name(env):
def postprocess(i):

env = i['env']
logger = i['automation'].logger

# Check if need to push docker image to the Docker Hub
if is_true(env.get('MLC_DOCKER_PUSH_IMAGE', '')):
Expand All @@ -118,12 +119,12 @@ def postprocess(i):
with open(dockerfile_path + '.build.bat', 'w') as f:
f.write(PCMD + '\n')

print(PCMD)
logger.info(PCMD)

print('')
logger.info('')

r = os.system(PCMD)
print('')
logger.info('')

if r > 0:
return {'return': 1, 'error': 'pushing to Docker Hub failed'}
Expand Down
Loading
Loading