diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index c1f58e20a..a8656b77f 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -17,12 +17,14 @@ jobs: strategy: fail-fast: false matrix: - system: [ "GO-spr", "phoenix"] + system: [ "GO-spr", "phoenix", "GO-i9" ] # system: [ "mlc-server" ] python-version: [ "3.12" ] model: [ "resnet50", "retinanet", "bert-99", "bert-99.9", "gptj-99.9", "3d-unet-99.9", "sdxl" ] exclude: - model: gptj-99.9 + - system: GO-spr + - system: GO-i9 steps: - name: Test MLPerf Inference NVIDIA ${{ matrix.model }} @@ -43,12 +45,7 @@ jobs: gpu_name=rtx_4090 docker_string=" --docker" fi - if [ "${{ matrix.model }}" = "bert-99" ] || [ "${{ matrix.model }}" = "bert-99.9" ]; then - category="edge" - else - category="datacenter,edge" - fi - + category="datacenter,edge" if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi python3 -m venv gh_action source gh_action/bin/activate diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index 9277ab7f0..932f813cb 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -56,7 +56,7 @@ def preprocess(i): print('') # CMD = f"""{env['MLC_CONTAINER_TOOL']} ps --format=json --filter "ancestor={DOCKER_CONTAINER}" """ CMD = f"""{env['MLC_CONTAINER_TOOL']} ps --format """ + \ - "'{{ .ID }},'" + f""" --filter "ancestor={DOCKER_CONTAINER}" """ + '"{{ .ID }},"' + f""" --filter "ancestor={DOCKER_CONTAINER}" """ if os_info['platform'] == 'windows': CMD += " 2> nul" else: