@@ -3823,3 +3823,29 @@ func.func @test_qlinear_sigmoid(%arg0: !torch.vtensor<[?,?],ui8>, %arg1: !torch.
3823
3823
// CHECK: return %[[OUT]]
3824
3824
return %0 : !torch.vtensor <[?,?],ui8 >
3825
3825
}
3826
+
3827
+ // -----
3828
+
3829
+ // CHECK-LABEL: @test_qlinearAveragePool(
3830
+ // CHECK-SAME: %[[X:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[1,128,56,56],ui8>,
3831
+ // CHECK-SAME: %[[X_SCALE:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],f32>,
3832
+ // CHECK-SAME: %[[X_ZERO_POINT:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],ui8>,
3833
+ // CHECK-SAME: %[[Y_SCALE:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],f32>,
3834
+ // CHECK-SAME: %[[Y_ZERO_POINT:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],ui8>) -> !torch.vtensor<[1,128,28,28],ui8>
3835
+ func.func @test_qlinearAveragePool (%arg0: !torch.vtensor <[1 ,128 ,56 ,56 ],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[],f32 >, %arg4: !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,128 ,28 ,28 ],ui8 > attributes {torch.onnx_meta.ir_version = 5 : si64 , torch.onnx_meta.opset_version = 10 : si64 } {
3836
+ %0 = torch.operator " onnx.QLinearAveragePool" (%arg0 , %arg1 , %arg2 , %arg3 , %arg4 ) {torch.onnx.auto_pad = " NOTSET" , torch.onnx.ceil_mode = 0 : si64 , torch.onnx.count_include_pad = 0 : si64 , torch.onnx.kernel_shape = [2 : si64 , 2 : si64 ], torch.onnx.pads = [0 : si64 , 0 : si64 , 0 : si64 , 0 : si64 ], torch.onnx.strides = [2 : si64 , 2 : si64 ]} : (!torch.vtensor <[1 ,128 ,56 ,56 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,128 ,28 ,28 ],ui8 >
3837
+ // CHECK-DAG: %[[EMPTY:.+]] = torch.prim.ListConstruct : () -> !torch.list<int>
3838
+ // CHECK-DAG: %[[XSCALE:.+]] = torch.aten.item %[[X_SCALE]] : !torch.vtensor<[],f32> -> !torch.float
3839
+ // CHECK-DAG: %[[XZP:.+]] = torch.aten.item %[[X_ZERO_POINT]] : !torch.vtensor<[],ui8> -> !torch.int
3840
+ // CHECK-DAG: %[[EMPTY_0:.+]] = torch.prim.ListConstruct : () -> !torch.list<int>
3841
+ // CHECK-DAG: %[[YSCALE:.+]] = torch.aten.item %[[Y_SCALE]] : !torch.vtensor<[],f32> -> !torch.float
3842
+ // CHECK-DAG: %[[YZP:.+]] = torch.aten.item %[[Y_ZERO_POINT]] : !torch.vtensor<[],ui8> -> !torch.int
3843
+ // CHECK-DAG: %[[X_QUANT:.+]] = torch.aten._make_per_tensor_quantized_tensor %[[X]], %[[XSCALE]], %[[XZP]] : !torch.vtensor<[1,128,56,56],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,56,56],!torch.quint8>
3844
+ // CHECK: %[[X_F32:.+]] = torch.aten.dequantize.self %[[X_QUANT]] : !torch.vtensor<[1,128,56,56],!torch.quint8> -> !torch.vtensor<[1,128,56,56],f32>
3845
+ // CHECK: %[[AVGPOOL:.*]] = torch.aten.avg_pool2d %[[X_F32]], %{{.+}}, %{{.+}}, %{{.+}}, %{{.+}}, %{{.+}}, %{{.+}} : !torch.vtensor<[1,128,56,56],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,128,28,28],f32>
3846
+ // CHECK: %[[DTY:.+]] = torch.constant.int 13
3847
+ // CHECK: %[[QO:.+]] = torch.aten.quantize_per_tensor %[[AVGPOOL]], %[[YSCALE]], %[[YZP]], %[[DTY]] : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.quint8>
3848
+ // CHECK: %[[OUT:.+]] = torch.aten.int_repr %[[QO]] : !torch.vtensor<[1,128,28,28],!torch.quint8> -> !torch.vtensor<[1,128,28,28],ui8>
3849
+ // CHECK: return %[[OUT]]
3850
+ return %0 : !torch.vtensor <[1 ,128 ,28 ,28 ],ui8 >
3851
+ }
0 commit comments