Skip to content

Replace boolean usage #357

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
Apr 15, 2025
Merged
4 changes: 2 additions & 2 deletions .github/workflows/test-mlperf-inference-abtf-poc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-22.04, ubuntu-20.04, ubuntu-24.04, macos-latest, macos-13, windows-latest]
os: [ubuntu-22.04, ubuntu-latest, macos-latest, macos-13, windows-latest]
python-version: [ "3.8", "3.12" ]
backend: [ "pytorch" ]
implementation: [ "python" ]
docker: [ "", " --docker --docker_mlc_repo=${{ github.event.pull_request.head.repo.html_url }} --docker_mlc_repo_branch=${{ github.event.pull_request.head.ref }} --docker_dt" ]
extra-args: [ "--adr.compiler.tags=gcc", "--env.MLC_MLPERF_LOADGEN_BUILD_FROM_SRC=off" ]
exclude:
- os: ubuntu-24.04
- os: ubuntu-latest
python-version: "3.8"
- os: windows-latest
python-version: "3.8"
Expand Down
3 changes: 2 additions & 1 deletion script/activate-python-venv/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true

import os

Expand All @@ -13,7 +14,7 @@ def preprocess(i):

automation = i['automation']

quiet = (env.get('MLC_QUIET', False) == 'yes')
quiet = is_true(env.get('MLC_QUIET', False))

name = env.get('MLC_NAME', '')
if name != '':
Expand Down
7 changes: 4 additions & 3 deletions script/app-mlperf-automotive-mlcommons-python/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os
import json
import shutil
Expand All @@ -12,13 +13,13 @@ def preprocess(i):
state = i['state']
script_path = i['run_script_input']['path']

if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')):
return {'return': 0}

if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes":
if is_true(env.get('MLC_RUN_DOCKER_CONTAINER', '')):
return {'return': 0}

if env.get('MLC_MLPERF_POWER', '') == "yes":
if is_true(env.get('MLC_MLPERF_POWER', '')):
power = "yes"
else:
power = "no"
Expand Down
3 changes: 2 additions & 1 deletion script/app-mlperf-inference-amd/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os
import shutil

Expand All @@ -11,7 +12,7 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']

if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')):
return {'return': 0}

env['MLC_MLPERF_AMD_SCRIPT_PATH'] = env['MLC_TMP_CURRENT_SCRIPT_PATH']
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os
import shutil

Expand All @@ -11,7 +12,7 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']

if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')):
return {'return': 0}

if 'MLC_MODEL' not in env:
Expand Down
14 changes: 6 additions & 8 deletions script/app-mlperf-inference-nvidia/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']

if str(env.get('MLC_RUN_STATE_DOCKER', '')
).lower() in ['1', 'true', 'yes']:
if is_true(env.get('MLC_RUN_STATE_DOCKER', '')):
return {'return': 0}

if env.get('MLC_MODEL', '') == '':
Expand All @@ -26,8 +25,8 @@ def preprocess(i):
return {
'return': 1, 'error': 'Please select a variation specifying the device to run on'}

if env.get('MLC_MLPERF_SKIP_RUN',
'') == "yes" and make_command == "run_harness":
if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')
) and make_command == "run_harness":
return {'return': 0}

env['MLPERF_SCRATCH_PATH'] = env['MLC_NVIDIA_MLPERF_SCRATCH_PATH']
Expand Down Expand Up @@ -641,15 +640,15 @@ def preprocess(i):
run_config += f" --audio_buffer_num_lines={audio_buffer_num_lines}"

use_fp8 = str(env.get('MLC_MLPERF_NVIDIA_HARNESS_USE_FP8', ''))
if use_fp8 and use_fp8.lower() not in ["no", "false", "0", ""]:
if use_fp8 and not is_false(use_fp8):
run_config += f" --use_fp8"

if "llama2" in env["MLC_MODEL"]:
run_config += f" --fp8_quant_model_path={fp8_model_path}"
run_config += f" --tensor_parallelism={tmp_tp_size}"

enable_sort = env.get('MLC_MLPERF_NVIDIA_HARNESS_ENABLE_SORT')
if enable_sort and enable_sort.lower() not in ["no", "false", "0"]:
if enable_sort and not is_false(enable_sort):
run_config += f" --enable_sort"

sdxl_server_batcher_time_limit = env.get(
Expand All @@ -675,8 +674,7 @@ def preprocess(i):
env.get(
'MLC_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS',
''))
if skip_postprocess and skip_postprocess.lower() not in [
"no", "false", "0", ""]:
if skip_postprocess and not is_false(skip_postprocess):
run_config += f" --skip_postprocess"

if test_mode:
Expand Down
5 changes: 3 additions & 2 deletions script/app-mlperf-inference-qualcomm/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os
import shutil

Expand All @@ -11,7 +12,7 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']

if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')):
return {'return': 0}

if 'MLC_MODEL' not in env:
Expand Down Expand Up @@ -78,7 +79,7 @@ def preprocess(i):

keys = ['LOC_OFFSET', 'LOC_SCALE', 'CONF_OFFSET', 'CONF_SCALE']

if env.get('MLC_RETINANET_USE_MULTIPLE_SCALES_OFFSETS', '') == 'yes':
if is_true(env.get('MLC_RETINANET_USE_MULTIPLE_SCALES_OFFSETS', '')):
env['+ CXXFLAGS'].append("-DUSE_MULTIPLE_SCALES_OFFSETS=1")
for j in range(0, 4):
keys.append(f'LOC_OFFSET{j}')
Expand Down
3 changes: 2 additions & 1 deletion script/app-mlperf-inference-redhat/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os
import shutil

Expand All @@ -11,7 +12,7 @@ def preprocess(i):
return {'return': 1, 'error': 'Windows is not supported in this script yet'}
env = i['env']

if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')):
return {'return': 0}

if 'MLC_MODEL' not in env:
Expand Down
2 changes: 1 addition & 1 deletion script/app-mlperf-inference/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def postprocess(i):
return {'return': 0}

# in power mode copy the log files from tmp_power directory
if env.get('MLC_MLPERF_POWER', '') == "yes" and mode == "performance":
if is_true(env.get('MLC_MLPERF_POWER', '')) and mode == "performance":
mlperf_power_logs_dir = os.path.join(
env['MLC_MLPERF_OUTPUT_DIR'], "..", "power")
mlperf_ranging_logs_dir = os.path.join(
Expand Down
7 changes: 4 additions & 3 deletions script/app-mlperf-training-nvidia/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os
import json
import shutil
Expand All @@ -12,13 +13,13 @@ def preprocess(i):
state = i['state']
script_path = i['run_script_input']['path']

if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')):
return {'return': 0}

if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes":
if is_true(env.get('MLC_RUN_DOCKER_CONTAINER', '')):
return {'return': 0}

if env.get('MLC_MLPERF_POWER', '') == "yes":
if is_true(env.get('MLC_MLPERF_POWER', '')):
power = "yes"
else:
power = "no"
Expand Down
7 changes: 4 additions & 3 deletions script/app-mlperf-training-reference/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os
import json
import shutil
Expand All @@ -12,13 +13,13 @@ def preprocess(i):
state = i['state']
script_path = i['run_script_input']['path']

if env.get('MLC_MLPERF_SKIP_RUN', '') == "yes":
if is_true(env.get('MLC_MLPERF_SKIP_RUN', '')):
return {'return': 0}

if env.get('MLC_RUN_DOCKER_CONTAINER', '') == "yes":
if is_true(env.get('MLC_RUN_DOCKER_CONTAINER', '')):
return {'return': 0}

if env.get('MLC_MLPERF_POWER', '') == "yes":
if is_true(env.get('MLC_MLPERF_POWER', '')):
power = "yes"
else:
power = "no"
Expand Down
3 changes: 2 additions & 1 deletion script/authenticate-github-cli/customize.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from mlc import utils
import os
from utils import is_true


def preprocess(i):
Expand All @@ -22,7 +23,7 @@ def preprocess(i):
cmd = f" echo {env['MLC_GH_AUTH_TOKEN']} | {cmd} --with-token"

env['MLC_RUN_CMD'] = cmd
quiet = (env.get('MLC_QUIET', False) == 'yes')
quiet = is_true(env.get('MLC_QUIET', False))

return {'return': 0}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import *
import os


Expand All @@ -13,7 +14,7 @@ def preprocess(i):

automation = i['automation']

quiet = (env.get('MLC_QUIET', False) == 'yes')
quiet = is_true(env.get('MLC_QUIET', False))

models = env['MODELS'].split(",")

Expand All @@ -29,7 +30,7 @@ def preprocess(i):

power = env.get('POWER', '')

if str(power).lower() in ["yes", "true"]:
if is_true(str(power)):
POWER_STRING = " --power=yes --adr.mlperf-power-client.power_server=" + env.get(
'POWER_SERVER',
'192.168.0.15') + " --adr.mlperf-power-client.port=" + str(
Expand Down
5 changes: 3 additions & 2 deletions script/benchmark-program-mlperf/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os


Expand All @@ -16,9 +17,9 @@ def postprocess(i):

env['MLC_MLPERF_RUN_CMD'] = env.get('MLC_RUN_CMD')

if env.get('MLC_MLPERF_POWER', '') == "yes":
if is_true(env.get('MLC_MLPERF_POWER', '')):

if env.get('MLC_MLPERF_SHORT_RANGING_RUN', '') != 'no':
if not is_false(env.get('MLC_MLPERF_SHORT_RANGING_RUN', '')):
# Write '0' to the count.txt file in MLC_RUN_DIR
count_file = os.path.join(env.get('MLC_RUN_DIR', ''), 'count.txt')
with open(count_file, 'w') as f:
Expand Down
7 changes: 4 additions & 3 deletions script/calibrate-model-for.qaic/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os
import sys
import yaml
Expand All @@ -14,9 +15,9 @@ def preprocess(i):

automation = i['automation']

quiet = (env.get('MLC_QUIET', False) == 'yes')
quiet = is_true(env.get('MLC_QUIET', False))

if env.get('MLC_CREATE_INPUT_BATCH', '') == 'yes':
if is_true(env.get('MLC_CREATE_INPUT_BATCH', '')):
r = create_batched_inputs(env)
if r['return'] > 0:
return r
Expand Down Expand Up @@ -74,7 +75,7 @@ def construct_calibration_cmd(env):
compiler_params = env['MLC_QAIC_COMPILER_PARAMS']
batchsize = env.get('MLC_QAIC_MODEL_BATCH_SIZE', "1")
cmd = env['MLC_QAIC_EXEC_PATH'] + " "
if env.get('MLC_CREATE_INPUT_BATCH', '') == 'yes':
if is_true(env.get('MLC_CREATE_INPUT_BATCH', '')):
cmd += " -input-list-file=batched_input_files -batchsize=" + batchsize + " "
cmd += compiler_params + " -dump-profile=profile.yaml -model=" + \
env['MLC_ML_MODEL_FILE_WITH_PATH']
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from mlc import utils
import os
from utils import is_true


def preprocess(i):
Expand All @@ -12,7 +13,7 @@ def preprocess(i):

automation = i['automation']

quiet = (env.get('MLC_QUIET', False) == 'yes')
quiet = is_true(env.get('MLC_QUIET', False))

clean_cmd = ''
cache_rm_tags = ''
Expand Down
5 changes: 3 additions & 2 deletions script/compile-model-for.qaic/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os


Expand All @@ -12,7 +13,7 @@ def preprocess(i):

automation = i['automation']

quiet = (env.get('MLC_QUIET', False) == 'yes')
quiet = is_true(env.get('MLC_QUIET', False))

if env.get('MLC_REGISTER_CACHE', '') == '':

Expand Down Expand Up @@ -49,7 +50,7 @@ def construct_compilation_cmd(env):
' ' + env.get('MLC_QAIC_MODEL_COMPILER_ARGS_SUT', '')
batchsize = env.get('MLC_QAIC_MODEL_BATCH_SIZE')

if env.get('MLC_QAIC_MODEL_QUANTIZATION', '') == 'yes':
if is_true(env.get('MLC_QAIC_MODEL_QUANTIZATION', '')):
profile_string = " -load-profile=" + \
env['MLC_QAIC_MODEL_PROFILE_WITH_PATH']
else:
Expand Down
3 changes: 2 additions & 1 deletion script/convert-csv-to-md/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os


Expand All @@ -12,7 +13,7 @@ def preprocess(i):

automation = i['automation']

quiet = (env.get('MLC_QUIET', False) == 'yes')
quiet = is_true(env.get('MLC_QUIET', False))

csv_file = env.get('MLC_CSV_FILE', '')
md_file = env.get('MLC_MD_FILE', '')
Expand Down
3 changes: 2 additions & 1 deletion script/create-fpgaconvnet-app-tinyml/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os


Expand All @@ -12,7 +13,7 @@ def preprocess(i):

automation = i['automation']

quiet = (env.get('MLC_QUIET', False) == 'yes')
quiet = is_true(env.get('MLC_QUIET', False))

network_env_name = env['MLC_TINY_FPGACONVNET_NETWORK_ENV_NAME']
run_dir = env['MLC_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR']
Expand Down
3 changes: 2 additions & 1 deletion script/create-fpgaconvnet-config-tinyml/customize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from mlc import utils
from utils import is_true
import os


Expand All @@ -12,7 +13,7 @@ def preprocess(i):

automation = i['automation']

quiet = (env.get('MLC_QUIET', False) == 'yes')
quiet = is_true(env.get('MLC_QUIET', False))

code_path = os.path.join(
env['MLC_GIT_REPO_CHECKOUT_PATH'],
Expand Down
Loading
Loading