diff --git a/src/test/input_byte_size_test.cc b/src/test/input_byte_size_test.cc index 1774fe7b4..0fe2cdeee 100644 --- a/src/test/input_byte_size_test.cc +++ b/src/test/input_byte_size_test.cc @@ -258,7 +258,7 @@ char InputByteSizeTest::input_data_string_ TEST_F(InputByteSizeTest, ValidInputByteSize) { - const char* model_name = "savedmodel_zero_1_float32"; + const char* model_name = "onnx_zero_1_float32"; // Create an inference request FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestNew( @@ -313,7 +313,7 @@ TEST_F(InputByteSizeTest, ValidInputByteSize) TEST_F(InputByteSizeTest, InputByteSizeMismatch) { - const char* model_name = "savedmodel_zero_1_float32"; + const char* model_name = "onnx_zero_1_float32"; // Create an inference request FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestNew( @@ -366,7 +366,7 @@ TEST_F(InputByteSizeTest, InputByteSizeMismatch) TEST_F(InputByteSizeTest, InputByteSizeLarge) { - const char* model_name = "savedmodel_zero_1_float32"; + const char* model_name = "onnx_zero_1_float32"; // Create an inference request FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestNew( @@ -422,7 +422,7 @@ TEST_F(InputByteSizeTest, InputByteSizeLarge) TEST_F(InputByteSizeTest, ValidStringInputByteSize) { - const char* model_name = "savedmodel_zero_1_object"; + const char* model_name = "onnx_zero_1_object"; // Create an inference request FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestNew( @@ -483,7 +483,7 @@ TEST_F(InputByteSizeTest, ValidStringInputByteSize) TEST_F(InputByteSizeTest, StringCountMismatch) { - const char* model_name = "savedmodel_zero_1_object"; + const char* model_name = "onnx_zero_1_object"; // Create an inference request FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestNew( @@ -528,8 +528,7 @@ TEST_F(InputByteSizeTest, StringCountMismatch) // Create an inference request FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestNew( - &irequest_, server_, "savedmodel_zero_1_object", - -1 /* model_version */), + &irequest_, server_, "onnx_zero_1_object", -1 /* model_version */), "creating inference request"); FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestSetReleaseCallback( @@ -569,7 +568,7 @@ TEST_F(InputByteSizeTest, StringCountMismatch) TEST_F(InputByteSizeTest, StringSizeMisalign) { - const char* model_name = "savedmodel_zero_1_object"; + const char* model_name = "onnx_zero_1_object"; // Create an inference request FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestNew( @@ -642,8 +641,7 @@ TEST_F(InputByteSizeTest, StringCountMismatchGPU) // Create an inference request FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestNew( - &irequest_, server_, "savedmodel_zero_1_object", - -1 /* model_version */), + &irequest_, server_, "onnx_zero_1_object", -1 /* model_version */), "creating inference request"); FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestSetReleaseCallback( @@ -699,8 +697,7 @@ TEST_F(InputByteSizeTest, StringCountMismatchGPU) // Create an inference request FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestNew( - &irequest_, server_, "savedmodel_zero_1_object", - -1 /* model_version */), + &irequest_, server_, "onnx_zero_1_object", -1 /* model_version */), "creating inference request"); FAIL_TEST_IF_ERR( TRITONSERVER_InferenceRequestSetReleaseCallback(