Skip to content

Commit 9ec820b

Browse files
authored
Build: Updating to allow passing DOCKER_GPU_ARGS at model generation (#7566)
1 parent 4ef45da commit 9ec820b

File tree

3 files changed

+8
-7
lines changed

3 files changed

+8
-7
lines changed

qa/common/gen_jetson_trt_models

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,7 @@ HOST_MODEL_DIR=${HOST_MODEL_DIR:="${HOST_BUILD_DIR}/${TRITON_VERSION}"}
4848
HOST_SOURCE_DIR=$HOST_BUILD_DIR/gen_srcdir
4949

5050
# Set CI specific parameters
51-
DOCKER_GPU_ARGS="${DOCKER_GPU_ARGS:="--gpus device=$CUDA_DEVICE"}"
52-
[[ $RUNNER_GPUS =~ ^[0-9] ]] && DOCKER_GPU_ARGS=$(eval $NV_DOCKER_ARGS)
53-
51+
DOCKER_GPU_ARGS=${DOCKER_GPU_ARGS:-$([[ $RUNNER_GPUS =~ ^[0-9] ]] && eval $NV_DOCKER_ARGS || echo "--gpus device=$CUDA_DEVICE" )}
5452

5553
# Set model output directories
5654

qa/common/gen_qa_custom_ops

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ PYTORCH_IMAGE=${PYTORCH_IMAGE:=nvcr.io/nvidia/pytorch:$NVIDIA_UPSTREAM_VERSION-p
4444

4545
CUDA_DEVICE=${NV_GPU:=0}
4646

47-
[[ $RUNNER_GPUS =~ ^[0-9] ]] && DOCKER_GPU_ARGS=$(eval $NV_DOCKER_ARGS) || DOCKER_GPU_ARGS="--gpus device=$CUDA_DEVICE"
47+
DOCKER_GPU_ARGS=${DOCKER_GPU_ARGS:-$([[ $RUNNER_GPUS =~ ^[0-9] ]] && eval $NV_DOCKER_ARGS || echo "--gpus device=$CUDA_DEVICE" )}
4848

4949
###
5050
HOST_BUILD_DIR=${HOST_BUILD_DIR:=/tmp}

qa/common/gen_qa_model_repository

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,8 @@ TENSORFLOW_IMAGE=${TENSORFLOW_IMAGE:=nvcr.io/nvidia/tensorflow:$TRITON_VERSION-t
6363
TENSORRT_IMAGE=${TENSORRT_IMAGE:=nvcr.io/nvidia/tensorrt:$TRITON_VERSION-py3}
6464
CUDA_DEVICE=${NV_GPU:=0}
6565

66-
[[ $RUNNER_GPUS =~ ^[0-9] ]] && DOCKER_GPU_ARGS=$(eval $NV_DOCKER_ARGS) || DOCKER_GPU_ARGS="--gpus device=$CUDA_DEVICE"
66+
DOCKER_GPU_ARGS=${DOCKER_GPU_ARGS:-$([[ $RUNNER_GPUS =~ ^[0-9] ]] && eval $NV_DOCKER_ARGS || echo "--gpus device=$CUDA_DEVICE" )}
67+
MODEL_TYPE=${MODEL_TYPE:-""}
6768

6869
###
6970
HOST_BUILD_DIR=${HOST_BUILD_DIR:=/tmp}
@@ -360,8 +361,10 @@ python3 $SRCDIR/gen_qa_implicit_models.py --libtorch --variable --models_dir=$VA
360361
chmod -R 777 $VARIMPLICITSEQDESTDIR
361362
python3 $SRCDIR/gen_qa_dyna_sequence_models.py --libtorch --models_dir=$DYNASEQDESTDIR
362363
chmod -R 777 $DYNASEQDESTDIR
363-
python3 $SRCDIR/gen_qa_torchtrt_models.py --models_dir=$TORCHTRTDESTDIR
364-
chmod -R 777 $TORCHTRTDESTDIR
364+
if [ -z "$MODEL_TYPE" ] || [ "$MODEL_TYPE" != "igpu" ]; then
365+
python3 $SRCDIR/gen_qa_torchtrt_models.py --models_dir=$TORCHTRTDESTDIR
366+
chmod -R 777 $TORCHTRTDESTDIR
367+
fi
365368
python3 $SRCDIR/gen_qa_ragged_models.py --libtorch --models_dir=$RAGGEDDESTDIR
366369
chmod -R 777 $RAGGEDDESTDIR
367370
# Export torchvision image models to ONNX

0 commit comments

Comments
 (0)