@@ -3797,3 +3797,29 @@ func.func @test_qlinearglobalavgpool(%arg0: !torch.vtensor<[1,1000,13,13],ui8>,
3797
3797
// CHECK: return %[[OUT]]
3798
3798
return %0 : !torch.vtensor <[1 ,1000 ,1 ,1 ],ui8 >
3799
3799
}
3800
+
3801
+ // -----
3802
+
3803
+ // CHECK-LABEL: @test_qlinear_sigmoid(
3804
+ // CHECK-SAME: %[[X:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[?,?],ui8>,
3805
+ // CHECK-SAME: %[[X_SCALE:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],f32>,
3806
+ // CHECK-SAME: %[[X_ZERO_POINT:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],ui8>,
3807
+ // CHECK-SAME: %[[Y_SCALE:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],f32>,
3808
+ // CHECK-SAME: %[[Y_ZERO_POINT:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],ui8>) -> !torch.vtensor<[?,?],ui8>
3809
+ func.func @test_qlinear_sigmoid (%arg0: !torch.vtensor <[?,?],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[],f32 >, %arg4: !torch.vtensor <[],ui8 >) -> !torch.vtensor <[?,?],ui8 > attributes {torch.onnx_meta.ir_version = 7 : si64 , torch.onnx_meta.opset_version = 21 : si64 } {
3810
+ %0 = torch.operator " onnx.QLinearSigmoid" (%arg0 , %arg1 , %arg2 , %arg3 , %arg4 ) : (!torch.vtensor <[?,?],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >) -> !torch.vtensor <[?,?],ui8 >
3811
+ // CHECK-DAG: %[[EMPTY:.+]] = torch.prim.ListConstruct : () -> !torch.list<int>
3812
+ // CHECK-DAG: %[[XSCALE:.+]] = torch.aten.item %[[X_SCALE]] : !torch.vtensor<[],f32> -> !torch.float
3813
+ // CHECK-DAG: %[[XZP:.+]] = torch.aten.item %[[X_ZERO_POINT]] : !torch.vtensor<[],ui8> -> !torch.int
3814
+ // CHECK-DAG: %[[EMPTY_0:.+]] = torch.prim.ListConstruct : () -> !torch.list<int>
3815
+ // CHECK-DAG: %[[YSCALE:.+]] = torch.aten.item %[[Y_SCALE]] : !torch.vtensor<[],f32> -> !torch.float
3816
+ // CHECK-DAG: %[[YZP:.+]] = torch.aten.item %[[Y_ZERO_POINT]] : !torch.vtensor<[],ui8> -> !torch.int
3817
+ // CHECK-DAG: %[[X_QUANT:.+]] = torch.aten._make_per_tensor_quantized_tensor %[[X]], %[[XSCALE]], %[[XZP]] : !torch.vtensor<[?,?],ui8>, !torch.float, !torch.int -> !torch.vtensor<[?,?],!torch.quint8>
3818
+ // CHECK: %[[X_F32:.+]] = torch.aten.dequantize.self %[[X_QUANT]] : !torch.vtensor<[?,?],!torch.quint8> -> !torch.vtensor<[?,?],f32>
3819
+ // CHECK: %[[SIGMOID:.*]] = torch.aten.sigmoid %[[X_F32]] : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32>
3820
+ // CHECK: %[[DTY:.+]] = torch.constant.int 13
3821
+ // CHECK: %[[QO:.+]] = torch.aten.quantize_per_tensor %[[SIGMOID]], %[[YSCALE]], %[[YZP]], %[[DTY]] : !torch.vtensor<[?,?],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[?,?],!torch.quint8>
3822
+ // CHECK: %[[OUT:.+]] = torch.aten.int_repr %[[QO]] : !torch.vtensor<[?,?],!torch.quint8> -> !torch.vtensor<[?,?],ui8>
3823
+ // CHECK: return %[[OUT]]
3824
+ return %0 : !torch.vtensor <[?,?],ui8 >
3825
+ }
0 commit comments