@@ -285,13 +285,14 @@ def test_performance(self):
285
285
dataset = Datasets ("tensorflow" )["dummy" ]((100 , 256 , 256 , 1 ), label = True )
286
286
287
287
from neural_compressor .experimental import Quantization , common
288
- from neural_compressor .utils . utility import get_size
288
+ from neural_compressor .model import tensorflow_model
289
289
290
290
quantizer = Quantization ("fake_yaml.yaml" )
291
291
quantizer .calib_dataloader = common .DataLoader (dataset )
292
292
quantizer .eval_dataloader = common .DataLoader (dataset )
293
293
quantizer .model = self .constant_graph
294
294
q_model = quantizer .fit ()
295
+ self .assertTrue (isinstance (q_model , tensorflow_model .TensorflowBaseModel ))
295
296
296
297
from neural_compressor .experimental import Benchmark , common
297
298
@@ -345,6 +346,7 @@ def eval(model):
345
346
346
347
from neural_compressor .conf .config import conf
347
348
from neural_compressor .experimental import Quantization
349
+ from neural_compressor .model import onnx_model
348
350
349
351
conf .model .framework = "onnxrt_integerops"
350
352
conf .quantization .approach = "post_training_dynamic_quant"
@@ -357,6 +359,8 @@ def eval(model):
357
359
quantize .model = model
358
360
quantize .eval_func = eval
359
361
q_model = quantize ()
362
+ self .assertTrue (isinstance (q_model , onnx_model .ONNXModel ))
363
+ self .assertTrue ("quantize" in str (q_model .model .producer_name ))
360
364
361
365
def test_tune_data (self ):
362
366
from neural_compressor .objective import MultiObjective
0 commit comments