Skip to content

Commit 05edcc6

Browse files
authored
rm:TensorFlow - fix to 'L0_server_status' (#8100)
1 parent 4db2c00 commit 05edcc6

File tree

2 files changed

+15
-15
lines changed

2 files changed

+15
-15
lines changed

qa/L0_server_status/server_status_test.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/usr/bin/env python3
22

3-
# Copyright 2018-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3+
# Copyright 2018-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
44
#
55
# Redistribution and use in source and binary forms, with or without
66
# modification, are permitted provided that the following conditions
@@ -45,7 +45,7 @@ class ServerMetadataTest(tu.TestResultCollector):
4545
def test_basic(self):
4646
try:
4747
for pair in [("localhost:8000", "http"), ("localhost:8001", "grpc")]:
48-
model_name = "graphdef_int32_int8_int8"
48+
model_name = "libtorch_int32_int8_int8"
4949
extensions = [
5050
"classification",
5151
"sequence",
@@ -129,7 +129,7 @@ def test_unknown_model(self):
129129
def test_unknown_model_version(self):
130130
try:
131131
for pair in [("localhost:8000", "http"), ("localhost:8001", "grpc")]:
132-
model_name = "graphdef_int32_int8_int8"
132+
model_name = "onnx_int32_int8_int8"
133133
if pair[1] == "http":
134134
triton_client = httpclient.InferenceServerClient(
135135
url=pair[0], verbose=True
@@ -149,18 +149,18 @@ def test_unknown_model_version(self):
149149
except InferenceServerException as ex:
150150
self.assertTrue(
151151
ex.message().startswith(
152-
"Request for unknown model: 'graphdef_int32_int8_int8' version 99 is not found"
152+
"Request for unknown model: 'onnx_int32_int8_int8' version 99 is not found"
153153
)
154154
)
155155

156156
def test_model_latest_infer(self):
157157
input_size = 16
158158
tensor_shape = (1, input_size)
159-
platform_name = {"graphdef": "tensorflow_graphdef", "onnx": "onnxruntime_onnx"}
159+
platform_name = {"plan": "tensorrt_plan", "onnx": "onnxruntime_onnx"}
160160

161161
# There are 3 versions of *_int32_int32_int32 and all
162162
# should be available.
163-
for platform in ("graphdef", "onnx"):
163+
for platform in ("plan", "onnx"):
164164
model_name = platform + "_int32_int32_int32"
165165

166166
# Initially there should be no version stats..
@@ -316,7 +316,7 @@ def test_model_specific_infer(self):
316316

317317
# There are 3 versions of *_float32_float32_float32 but only
318318
# versions 1 and 3 should be available.
319-
for platform in ("graphdef", "onnx", "plan"):
319+
for platform in ("libtorch", "onnx", "plan"):
320320
tensor_shape = (1, input_size)
321321
model_name = platform + "_float32_float32_float32"
322322

@@ -439,7 +439,7 @@ def test_model_versions_deleted(self):
439439
# version 3 was executed once. Version 2 and 3 models were
440440
# deleted from the model repository so now only expect version 1 to
441441
# be ready and show stats.
442-
for platform in ("graphdef", "onnx"):
442+
for platform in ("libtorch", "onnx"):
443443
model_name = platform + "_int32_int32_int32"
444444

445445
try:
@@ -513,7 +513,7 @@ def test_model_versions_added(self):
513513
# Originally There was version 1 of *_float16_float32_float32.
514514
# Version 7 was added so now expect just version 7 to be ready
515515
# and provide infer stats.
516-
for platform in ("graphdef",):
516+
for platform in ("plan",):
517517
model_name = platform + "_float16_float32_float32"
518518

519519
try:
@@ -615,7 +615,7 @@ def test_infer_stats_no_model_version(self):
615615
# version 3 was executed once. Version 2 and 3 models were
616616
# deleted from the model repository so now only expect version 1 to
617617
# be ready and show infer stats.
618-
for platform in ("graphdef", "onnx"):
618+
for platform in ("libtorch", "onnx"):
619619
model_name = platform + "_int32_int32_int32"
620620

621621
try:
@@ -723,8 +723,8 @@ def test_infer_stats_no_model(self):
723723
stats = infer_stats.model_stats
724724
self.assertEqual(
725725
len(stats),
726-
221,
727-
"expected 221 infer stats for all ready versions of all model",
726+
125,
727+
"expected 125 infer stats for all ready versions of all model",
728728
)
729729

730730
except InferenceServerException as ex:

qa/L0_server_status/test.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#!/bin/bash
2-
# Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
2+
# Copyright (c) 2018-2025, NVIDIA CORPORATION. All rights reserved.
33
#
44
# Redistribution and use in source and binary forms, with or without
55
# modification, are permitted provided that the following conditions
@@ -84,9 +84,9 @@ fi
8484

8585
set -e
8686

87-
rm -fr models/graphdef_int32_int32_int32/2 models/graphdef_int32_int32_int32/3
87+
rm -fr models/libtorch_int32_int32_int32/2 models/libtorch_int32_int32_int32/3
8888
rm -fr models/onnx_int32_int32_int32/2 models/onnx_int32_int32_int32/3
89-
cp -r models/graphdef_float16_float32_float32/1 models/graphdef_float16_float32_float32/7
89+
cp -r models/plan_float16_float32_float32/1 models/plan_float16_float32_float32/7
9090
sleep 3
9191

9292
# Dumping the contents of the models that are currently loaded for debugging purposes

0 commit comments

Comments
 (0)