Skip to content

Commit 6ee8775

Browse files
authored
[RISCV][IR] Implement verifier check for llvm.experimental.vp.splice immediate. (#147458)
This applies the same check as llvm.vector.splice which checks that the immediate is in the range [-VL, VL-1] where VL is the minimum vector length. If vscale_range is available, the lower bound is used to increase the known minimum vector length for this check. This ensures the immediate is in range for any possible value of vscale that satisfies the vscale_range.
1 parent d3d77f7 commit 6ee8775

File tree

4 files changed

+210
-149
lines changed

4 files changed

+210
-149
lines changed

llvm/lib/IR/Verifier.cpp

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6939,20 +6939,44 @@ void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
69396939
break;
69406940
}
69416941
}
6942-
if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
6942+
6943+
switch (VPI.getIntrinsicID()) {
6944+
case Intrinsic::vp_fcmp: {
69436945
auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
69446946
Check(CmpInst::isFPPredicate(Pred),
69456947
"invalid predicate for VP FP comparison intrinsic", &VPI);
6948+
break;
69466949
}
6947-
if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
6950+
case Intrinsic::vp_icmp: {
69486951
auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
69496952
Check(CmpInst::isIntPredicate(Pred),
69506953
"invalid predicate for VP integer comparison intrinsic", &VPI);
6954+
break;
69516955
}
6952-
if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) {
6956+
case Intrinsic::vp_is_fpclass: {
69536957
auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
69546958
Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
69556959
"unsupported bits for llvm.vp.is.fpclass test mask");
6960+
break;
6961+
}
6962+
case Intrinsic::experimental_vp_splice: {
6963+
VectorType *VecTy = cast<VectorType>(VPI.getType());
6964+
int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
6965+
int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6966+
if (VPI.getParent() && VPI.getParent()->getParent()) {
6967+
AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
6968+
if (Attrs.hasFnAttr(Attribute::VScaleRange))
6969+
KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6970+
}
6971+
Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6972+
(Idx >= 0 && Idx < KnownMinNumElements),
6973+
"The splice index exceeds the range [-VL, VL-1] where VL is the "
6974+
"known minimum number of elements in the vector. For scalable "
6975+
"vectors the minimum number of elements is determined from "
6976+
"vscale_range.",
6977+
&VPI);
6978+
break;
6979+
}
69566980
}
69576981
}
69586982

llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll

Lines changed: 43 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ declare <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i
1010
declare <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, i32, <vscale x 32 x i1>, i32, i32)
1111
declare <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>, i32, <vscale x 64 x i1>, i32, i32)
1212

13-
define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
13+
define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
1414
; CHECK-LABEL: test_vp_splice_nxv1i1:
1515
; CHECK: # %bb.0:
1616
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -22,19 +22,19 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x
2222
; CHECK-NEXT: vmv.v.i v10, 0
2323
; CHECK-NEXT: vmv1r.v v0, v9
2424
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
25-
; CHECK-NEXT: addi a0, a0, -5
25+
; CHECK-NEXT: addi a0, a0, -1
2626
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
27-
; CHECK-NEXT: vslidedown.vi v9, v9, 5
27+
; CHECK-NEXT: vslidedown.vi v9, v9, 1
2828
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2929
; CHECK-NEXT: vslideup.vx v9, v8, a0
3030
; CHECK-NEXT: vmsne.vi v0, v9, 0
3131
; CHECK-NEXT: ret
3232

33-
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
33+
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 1, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
3434
ret <vscale x 1 x i1> %v
3535
}
3636

37-
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
37+
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
3838
; CHECK-LABEL: test_vp_splice_nxv1i1_negative_offset:
3939
; CHECK: # %bb.0:
4040
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -46,19 +46,19 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1
4646
; CHECK-NEXT: vmv.v.i v10, 0
4747
; CHECK-NEXT: vmv1r.v v0, v9
4848
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
49-
; CHECK-NEXT: addi a0, a0, -5
50-
; CHECK-NEXT: vsetivli zero, 5, e8, mf8, ta, ma
49+
; CHECK-NEXT: addi a0, a0, -2
50+
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
5151
; CHECK-NEXT: vslidedown.vx v9, v9, a0
5252
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
53-
; CHECK-NEXT: vslideup.vi v9, v8, 5
53+
; CHECK-NEXT: vslideup.vi v9, v8, 2
5454
; CHECK-NEXT: vmsne.vi v0, v9, 0
5555
; CHECK-NEXT: ret
5656

57-
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
57+
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 -2, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
5858
ret <vscale x 1 x i1> %v
5959
}
6060

61-
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
61+
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
6262
; CHECK-LABEL: test_vp_splice_nxv1i1_masked:
6363
; CHECK: # %bb.0:
6464
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -70,20 +70,20 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <v
7070
; CHECK-NEXT: vmv.v.i v11, 0
7171
; CHECK-NEXT: vmv1r.v v0, v10
7272
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
73-
; CHECK-NEXT: addi a0, a0, -5
73+
; CHECK-NEXT: addi a0, a0, -1
7474
; CHECK-NEXT: vmv1r.v v0, v9
7575
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
76-
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
76+
; CHECK-NEXT: vslidedown.vi v10, v10, 1, v0.t
7777
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
7878
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
7979
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
8080
; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
8181
; CHECK-NEXT: ret
82-
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
82+
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 1, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
8383
ret <vscale x 1 x i1> %v
8484
}
8585

86-
define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
86+
define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
8787
; CHECK-LABEL: test_vp_splice_nxv2i1:
8888
; CHECK: # %bb.0:
8989
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -95,19 +95,19 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x
9595
; CHECK-NEXT: vmv.v.i v10, 0
9696
; CHECK-NEXT: vmv1r.v v0, v9
9797
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
98-
; CHECK-NEXT: addi a0, a0, -5
98+
; CHECK-NEXT: addi a0, a0, -3
9999
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
100-
; CHECK-NEXT: vslidedown.vi v9, v9, 5
100+
; CHECK-NEXT: vslidedown.vi v9, v9, 3
101101
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
102102
; CHECK-NEXT: vslideup.vx v9, v8, a0
103103
; CHECK-NEXT: vmsne.vi v0, v9, 0
104104
; CHECK-NEXT: ret
105105

106-
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
106+
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 3, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
107107
ret <vscale x 2 x i1> %v
108108
}
109109

110-
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
110+
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
111111
; CHECK-LABEL: test_vp_splice_nxv2i1_negative_offset:
112112
; CHECK: # %bb.0:
113113
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -119,19 +119,19 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1
119119
; CHECK-NEXT: vmv.v.i v10, 0
120120
; CHECK-NEXT: vmv1r.v v0, v9
121121
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
122-
; CHECK-NEXT: addi a0, a0, -5
123-
; CHECK-NEXT: vsetivli zero, 5, e8, mf4, ta, ma
122+
; CHECK-NEXT: addi a0, a0, -4
123+
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
124124
; CHECK-NEXT: vslidedown.vx v9, v9, a0
125125
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
126-
; CHECK-NEXT: vslideup.vi v9, v8, 5
126+
; CHECK-NEXT: vslideup.vi v9, v8, 4
127127
; CHECK-NEXT: vmsne.vi v0, v9, 0
128128
; CHECK-NEXT: ret
129129

130-
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
130+
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 -4, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
131131
ret <vscale x 2 x i1> %v
132132
}
133133

134-
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
134+
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
135135
; CHECK-LABEL: test_vp_splice_nxv2i1_masked:
136136
; CHECK: # %bb.0:
137137
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -143,20 +143,20 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <v
143143
; CHECK-NEXT: vmv.v.i v11, 0
144144
; CHECK-NEXT: vmv1r.v v0, v10
145145
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
146-
; CHECK-NEXT: addi a0, a0, -5
146+
; CHECK-NEXT: addi a0, a0, -3
147147
; CHECK-NEXT: vmv1r.v v0, v9
148148
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
149-
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
149+
; CHECK-NEXT: vslidedown.vi v10, v10, 3, v0.t
150150
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
151151
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
152152
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
153153
; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
154154
; CHECK-NEXT: ret
155-
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
155+
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 3, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
156156
ret <vscale x 2 x i1> %v
157157
}
158158

159-
define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
159+
define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
160160
; CHECK-LABEL: test_vp_splice_nxv4i1:
161161
; CHECK: # %bb.0:
162162
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -180,7 +180,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x
180180
ret <vscale x 4 x i1> %v
181181
}
182182

183-
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
183+
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
184184
; CHECK-LABEL: test_vp_splice_nxv4i1_negative_offset:
185185
; CHECK: # %bb.0:
186186
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -204,7 +204,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1
204204
ret <vscale x 4 x i1> %v
205205
}
206206

207-
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
207+
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
208208
; CHECK-LABEL: test_vp_splice_nxv4i1_masked:
209209
; CHECK: # %bb.0:
210210
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -229,7 +229,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <v
229229
ret <vscale x 4 x i1> %v
230230
}
231231

232-
define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
232+
define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
233233
; CHECK-LABEL: test_vp_splice_nxv8i1:
234234
; CHECK: # %bb.0:
235235
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -253,7 +253,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x
253253
ret <vscale x 8 x i1> %v
254254
}
255255

256-
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
256+
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
257257
; CHECK-LABEL: test_vp_splice_nxv8i1_negative_offset:
258258
; CHECK: # %bb.0:
259259
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -277,7 +277,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1
277277
ret <vscale x 8 x i1> %v
278278
}
279279

280-
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
280+
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
281281
; CHECK-LABEL: test_vp_splice_nxv8i1_masked:
282282
; CHECK: # %bb.0:
283283
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -302,7 +302,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <v
302302
ret <vscale x 8 x i1> %v
303303
}
304304

305-
define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
305+
define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
306306
; CHECK-LABEL: test_vp_splice_nxv16i1:
307307
; CHECK: # %bb.0:
308308
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -326,7 +326,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscal
326326
ret <vscale x 16 x i1> %v
327327
}
328328

329-
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
329+
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
330330
; CHECK-LABEL: test_vp_splice_nxv16i1_negative_offset:
331331
; CHECK: # %bb.0:
332332
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -350,7 +350,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x
350350
ret <vscale x 16 x i1> %v
351351
}
352352

353-
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
353+
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
354354
; CHECK-LABEL: test_vp_splice_nxv16i1_masked:
355355
; CHECK: # %bb.0:
356356
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -376,7 +376,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va,
376376
ret <vscale x 16 x i1> %v
377377
}
378378

379-
define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
379+
define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
380380
; CHECK-LABEL: test_vp_splice_nxv32i1:
381381
; CHECK: # %bb.0:
382382
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -400,7 +400,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscal
400400
ret <vscale x 32 x i1> %v
401401
}
402402

403-
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
403+
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
404404
; CHECK-LABEL: test_vp_splice_nxv32i1_negative_offset:
405405
; CHECK: # %bb.0:
406406
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -424,7 +424,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x
424424
ret <vscale x 32 x i1> %v
425425
}
426426

427-
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
427+
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
428428
; CHECK-LABEL: test_vp_splice_nxv32i1_masked:
429429
; CHECK: # %bb.0:
430430
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -450,7 +450,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va,
450450
ret <vscale x 32 x i1> %v
451451
}
452452

453-
define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
453+
define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
454454
; CHECK-LABEL: test_vp_splice_nxv64i1:
455455
; CHECK: # %bb.0:
456456
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -474,7 +474,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscal
474474
ret <vscale x 64 x i1> %v
475475
}
476476

477-
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
477+
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
478478
; CHECK-LABEL: test_vp_splice_nxv64i1_negative_offset:
479479
; CHECK: # %bb.0:
480480
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -498,7 +498,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x
498498
ret <vscale x 64 x i1> %v
499499
}
500500

501-
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
501+
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
502502
; CHECK-LABEL: test_vp_splice_nxv64i1_masked:
503503
; CHECK: # %bb.0:
504504
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -523,3 +523,5 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va,
523523
%v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 5, <vscale x 64 x i1> %mask, i32 %evla, i32 %evlb)
524524
ret <vscale x 64 x i1> %v
525525
}
526+
527+
attributes #0 = { vscale_range(2,0) }

0 commit comments

Comments
 (0)