Skip to content

Commit 4c7cfe3

Browse files
committed
[RISCV] Remove intrinsic declares from costmodel tests. NFC
Declaring an intrinsic is no longer needed these days, and for intrinsic tests we end up with a lot of them due to the various type overloads.
1 parent 85a11bc commit 4c7cfe3

24 files changed

+0
-1916
lines changed

llvm/test/Analysis/CostModel/RISCV/abs.ll

Lines changed: 0 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -3,44 +3,6 @@
33
; Check that we don't crash querying costs when vectors are not enabled.
44
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64
55

6-
declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)
7-
declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
8-
declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1)
9-
declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
10-
declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1)
11-
declare <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64>, i1)
12-
13-
declare <2 x i32> @llvm.abs.v2i32(<2 x i32>, i1)
14-
declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
15-
declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
16-
declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
17-
declare <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32>, i1)
18-
declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
19-
declare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1)
20-
declare <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32>, i1)
21-
22-
declare <2 x i16> @llvm.abs.v2i16(<2 x i16>, i1)
23-
declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1)
24-
declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)
25-
declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
26-
declare <32 x i16> @llvm.abs.v32i16(<32 x i16>, i1)
27-
declare <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16>, i1)
28-
declare <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16>, i1)
29-
declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
30-
declare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1)
31-
declare <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16>, i1)
32-
33-
declare <2 x i8> @llvm.abs.v2i8(<2 x i8>, i1)
34-
declare <4 x i8> @llvm.abs.v4i8(<4 x i8>, i1)
35-
declare <8 x i8> @llvm.abs.v8i8(<8 x i8>, i1)
36-
declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)
37-
declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)
38-
declare <64 x i8> @llvm.abs.v64i8(<64 x i8>, i1)
39-
declare <vscale x 8 x i8> @llvm.abs.nxv8i8(<vscale x 8 x i8>, i1)
40-
declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
41-
declare <vscale x 32 x i8> @llvm.abs.nxv32i8(<vscale x 32 x i8>, i1)
42-
declare <vscale x 64 x i8> @llvm.abs.nxv64i8(<vscale x 64 x i8>, i1)
43-
446
define i32 @abs(i32 %arg) {
457
; CHECK-LABEL: 'abs'
468
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call <2 x i64> @llvm.abs.v2i64(<2 x i64> undef, i1 false)

llvm/test/Analysis/CostModel/RISCV/active_lane_mask.ll

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -57,26 +57,3 @@ define void @get_lane_mask() {
5757

5858
ret void
5959
}
60-
61-
declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64, i64)
62-
declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64, i64)
63-
declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64, i64)
64-
declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64, i64)
65-
declare <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i64(i64, i64)
66-
declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32, i32)
67-
declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32, i32)
68-
declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32, i32)
69-
declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32, i32)
70-
declare <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i32(i32, i32)
71-
declare <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64, i64)
72-
declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16, i16)
73-
declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64, i64)
74-
declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64, i64)
75-
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64, i64)
76-
declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64, i64)
77-
declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
78-
declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
79-
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
80-
declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32, i32)
81-
declare <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64, i64)
82-
declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16, i16)

llvm/test/Analysis/CostModel/RISCV/arith-fp.ll

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1577,32 +1577,3 @@ define void @fmuladd_f16() {
15771577
call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
15781578
ret void
15791579
}
1580-
1581-
declare <1 x bfloat> @llvm.vp.frem.v1f16(<1 x bfloat>, <1 x bfloat>, <1 x i1>, i32)
1582-
declare <2 x bfloat> @llvm.vp.frem.v2f16(<2 x bfloat>, <2 x bfloat>, <2 x i1>, i32)
1583-
declare <4 x bfloat> @llvm.vp.frem.v4f16(<4 x bfloat>, <4 x bfloat>, <4 x i1>, i32)
1584-
declare <8 x bfloat> @llvm.vp.frem.v8f16(<8 x bfloat>, <8 x bfloat>, <8 x i1>, i32)
1585-
declare <16 x bfloat> @llvm.vp.frem.v16f16(<16 x bfloat>, <16 x bfloat>, <16 x i1>, i32)
1586-
declare <1 x float> @llvm.vp.frem.v1f32(<1 x float>, <1 x float>, <1 x i1>, i32)
1587-
declare <2 x float> @llvm.vp.frem.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32)
1588-
declare <4 x float> @llvm.vp.frem.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
1589-
declare <8 x float> @llvm.vp.frem.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
1590-
declare <16 x float> @llvm.vp.frem.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32)
1591-
declare <1 x double> @llvm.vp.frem.v1f64(<1 x double>, <1 x double>, <1 x i1>, i32)
1592-
declare <2 x double> @llvm.vp.frem.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32)
1593-
declare <4 x double> @llvm.vp.frem.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32)
1594-
declare <8 x double> @llvm.vp.frem.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32)
1595-
declare <vscale x 1 x bfloat> @llvm.vp.frem.nxv1f16(<vscale x 1 x bfloat>, <vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
1596-
declare <vscale x 2 x bfloat> @llvm.vp.frem.nxv2f16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x i1>, i32)
1597-
declare <vscale x 4 x bfloat> @llvm.vp.frem.nxv4f16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x i1>, i32)
1598-
declare <vscale x 8 x bfloat> @llvm.vp.frem.nxv8f16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, i32)
1599-
declare <vscale x 16 x bfloat> @llvm.vp.frem.nxv16f16(<vscale x 16 x bfloat>, <vscale x 16 x bfloat>, <vscale x 16 x i1>, i32)
1600-
declare <vscale x 1 x float> @llvm.vp.frem.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32)
1601-
declare <vscale x 2 x float> @llvm.vp.frem.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32)
1602-
declare <vscale x 4 x float> @llvm.vp.frem.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32)
1603-
declare <vscale x 8 x float> @llvm.vp.frem.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32)
1604-
declare <vscale x 16 x float> @llvm.vp.frem.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32)
1605-
declare <vscale x 1 x double> @llvm.vp.frem.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32)
1606-
declare <vscale x 2 x double> @llvm.vp.frem.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32)
1607-
declare <vscale x 4 x double> @llvm.vp.frem.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32)
1608-
declare <vscale x 8 x double> @llvm.vp.frem.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32)

llvm/test/Analysis/CostModel/RISCV/cttz_elts.ll

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -131,19 +131,3 @@ define void @foo_vscale_range_2_16() vscale_range(2,16) {
131131

132132
ret void
133133
}
134-
135-
declare i64 @llvm.experimental.cttz.elts.i64.nxv2i1(<vscale x 2 x i1>, i1)
136-
declare i64 @llvm.experimental.cttz.elts.i64.nxv4i1(<vscale x 4 x i1>, i1)
137-
declare i64 @llvm.experimental.cttz.elts.i64.nxv8i1(<vscale x 8 x i1>, i1)
138-
declare i64 @llvm.experimental.cttz.elts.i64.nxv16i1(<vscale x 16 x i1>, i1)
139-
declare i64 @llvm.experimental.cttz.elts.i64.nxv32i1(<vscale x 32 x i1>, i1)
140-
declare i64 @llvm.experimental.cttz.elts.i64.nxv64i1(<vscale x 64 x i1>, i1)
141-
declare i64 @llvm.experimental.cttz.elts.i64.nxv128i1(<vscale x 128 x i1>, i1)
142-
143-
declare i32 @llvm.experimental.cttz.elts.i32.nxv2i1(<vscale x 2 x i1>, i1)
144-
declare i32 @llvm.experimental.cttz.elts.i32.nxv4i1(<vscale x 4 x i1>, i1)
145-
declare i32 @llvm.experimental.cttz.elts.i32.nxv8i1(<vscale x 8 x i1>, i1)
146-
declare i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1>, i1)
147-
declare i32 @llvm.experimental.cttz.elts.i32.nxv32i1(<vscale x 32 x i1>, i1)
148-
declare i32 @llvm.experimental.cttz.elts.i32.nxv64i1(<vscale x 64 x i1>, i1)
149-
declare i32 @llvm.experimental.cttz.elts.i32.nxv128i1(<vscale x 128 x i1>, i1)

llvm/test/Analysis/CostModel/RISCV/fixed-vector-gather.ll

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -185,46 +185,3 @@ define i32 @masked_gather() {
185185
ret i32 0
186186
}
187187

188-
declare <8 x double> @llvm.masked.gather.v8f64.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x double>)
189-
declare <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x double>)
190-
declare <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x double>)
191-
declare <1 x double> @llvm.masked.gather.v1f64.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x double>)
192-
193-
declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>)
194-
declare <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x float>)
195-
declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>)
196-
declare <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x float>)
197-
declare <1 x float> @llvm.masked.gather.v1f32.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x float>)
198-
199-
declare <32 x half> @llvm.masked.gather.v32f16.v32p0(<32 x ptr>, i32, <32 x i1>, <32 x half>)
200-
declare <16 x half> @llvm.masked.gather.v16f16.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x half>)
201-
declare <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x half>)
202-
declare <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x half>)
203-
declare <2 x half> @llvm.masked.gather.v2f16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x half>)
204-
declare <1 x half> @llvm.masked.gather.v1f16.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x half>)
205-
206-
declare <8 x i64> @llvm.masked.gather.v8i64.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i64>)
207-
declare <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i64>)
208-
declare <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i64>)
209-
declare <1 x i64> @llvm.masked.gather.v1i64.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i64>)
210-
211-
declare <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i32>)
212-
declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i32>)
213-
declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)
214-
declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i32>)
215-
declare <1 x i32> @llvm.masked.gather.v1i32.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i32>)
216-
217-
declare <32 x i16> @llvm.masked.gather.v32i16.v32p0(<32 x ptr>, i32, <32 x i1>, <32 x i16>)
218-
declare <16 x i16> @llvm.masked.gather.v16i16.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i16>)
219-
declare <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i16>)
220-
declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i16>)
221-
declare <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i16>)
222-
declare <1 x i16> @llvm.masked.gather.v1i16.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i16>)
223-
224-
declare <64 x i8> @llvm.masked.gather.v64i8.v64p0(<64 x ptr>, i32, <64 x i1>, <64 x i8>)
225-
declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32, <32 x i1>, <32 x i8>)
226-
declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i8>)
227-
declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i8>)
228-
declare <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i8>)
229-
declare <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i8>)
230-
declare <1 x i8> @llvm.masked.gather.v1i8.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i8>)

llvm/test/Analysis/CostModel/RISCV/fixed-vector-scatter.ll

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -185,46 +185,3 @@ define i32 @masked_scatter() {
185185
ret i32 0
186186
}
187187

188-
declare void @llvm.masked.scatter.v8f64.v8p0(<8 x double>, <8 x ptr>, i32, <8 x i1>)
189-
declare void @llvm.masked.scatter.v4f64.v4p0(<4 x double>, <4 x ptr>, i32, <4 x i1>)
190-
declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, i32, <2 x i1>)
191-
declare void @llvm.masked.scatter.v1f64.v1p0(<1 x double>, <1 x ptr>, i32, <1 x i1>)
192-
193-
declare void @llvm.masked.scatter.v16f32.v16p0(<16 x float>, <16 x ptr>, i32, <16 x i1>)
194-
declare void @llvm.masked.scatter.v8f32.v8p0(<8 x float>, <8 x ptr>, i32, <8 x i1>)
195-
declare void @llvm.masked.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, i32, <4 x i1>)
196-
declare void @llvm.masked.scatter.v2f32.v2p0(<2 x float>, <2 x ptr>, i32, <2 x i1>)
197-
declare void @llvm.masked.scatter.v1f32.v1p0(<1 x float>, <1 x ptr>, i32, <1 x i1>)
198-
199-
declare void @llvm.masked.scatter.v32f16.v32p0(<32 x half>, <32 x ptr>, i32, <32 x i1>)
200-
declare void @llvm.masked.scatter.v16f16.v16p0(<16 x half>, <16 x ptr>, i32, <16 x i1>)
201-
declare void @llvm.masked.scatter.v8f16.v8p0(<8 x half>, <8 x ptr>, i32, <8 x i1>)
202-
declare void @llvm.masked.scatter.v4f16.v4p0(<4 x half>, <4 x ptr>, i32, <4 x i1>)
203-
declare void @llvm.masked.scatter.v2f16.v2p0(<2 x half>, <2 x ptr>, i32, <2 x i1>)
204-
declare void @llvm.masked.scatter.v1f16.v1p0(<1 x half>, <1 x ptr>, i32, <1 x i1>)
205-
206-
declare void @llvm.masked.scatter.v8i64.v8p0(<8 x i64>, <8 x ptr>, i32, <8 x i1>)
207-
declare void @llvm.masked.scatter.v4i64.v4p0(<4 x i64>, <4 x ptr>, i32, <4 x i1>)
208-
declare void @llvm.masked.scatter.v2i64.v2p0(<2 x i64>, <2 x ptr>, i32, <2 x i1>)
209-
declare void @llvm.masked.scatter.v1i64.v1p0(<1 x i64>, <1 x ptr>, i32, <1 x i1>)
210-
211-
declare void @llvm.masked.scatter.v16i32.v16p0(<16 x i32>, <16 x ptr>, i32, <16 x i1>)
212-
declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, i32, <8 x i1>)
213-
declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32, <4 x i1>)
214-
declare void @llvm.masked.scatter.v2i32.v2p0(<2 x i32>, <2 x ptr>, i32, <2 x i1>)
215-
declare void @llvm.masked.scatter.v1i32.v1p0(<1 x i32>, <1 x ptr>, i32, <1 x i1>)
216-
217-
declare void @llvm.masked.scatter.v32i16.v32p0(<32 x i16>, <32 x ptr>, i32, <32 x i1>)
218-
declare void @llvm.masked.scatter.v16i16.v16p0(<16 x i16>, <16 x ptr>, i32, <16 x i1>)
219-
declare void @llvm.masked.scatter.v8i16.v8p0(<8 x i16>, <8 x ptr>, i32, <8 x i1>)
220-
declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32, <4 x i1>)
221-
declare void @llvm.masked.scatter.v2i16.v2p0(<2 x i16>, <2 x ptr>, i32, <2 x i1>)
222-
declare void @llvm.masked.scatter.v1i16.v1p0(<1 x i16>, <1 x ptr>, i32, <1 x i1>)
223-
224-
declare void @llvm.masked.scatter.v64i8.v64p0(<64 x i8>, <64 x ptr>, i32, <64 x i1>)
225-
declare void @llvm.masked.scatter.v32i8.v32p0(<32 x i8>, <32 x ptr>, i32, <32 x i1>)
226-
declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32, <16 x i1>)
227-
declare void @llvm.masked.scatter.v8i8.v8p0(<8 x i8>, <8 x ptr>, i32, <8 x i1>)
228-
declare void @llvm.masked.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, i32, <4 x i1>)
229-
declare void @llvm.masked.scatter.v2i8.v2p0(<2 x i8>, <2 x ptr>, i32, <2 x i1>)
230-
declare void @llvm.masked.scatter.v1i8.v1p0(<1 x i8>, <1 x ptr>, i32, <1 x i1>)

llvm/test/Analysis/CostModel/RISCV/gep.ll

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -396,14 +396,3 @@ define void @foldable_vector_uses(ptr %base, <2 x ptr> %base.vec) {
396396
ret void
397397
}
398398

399-
declare <2 x i8> @llvm.masked.load.v2i8.p0(ptr, i32, <2 x i1>, <2 x i8>)
400-
declare <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i8>)
401-
declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>)
402-
declare <2 x i8> @llvm.vp.load.v2i8.p0(ptr, <2 x i1>, i32)
403-
declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.i64(ptr, i64, <2 x i1>, i32)
404-
405-
declare void @llvm.masked.store.v2i8.p0(<2 x i8>, ptr, i32, <2 x i1>)
406-
declare void @llvm.masked.scatter.v2i8.v2p0(<2 x i8>, <2 x ptr>, i32, <2 x i1>)
407-
declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
408-
declare void @llvm.vp.store.v2i8.p0(<2 x i8>, ptr, <2 x i1>, i32)
409-
declare void @llvm.experimental.vp.strided.store.v2i8.i64(<2 x i8>, ptr, i64, <2 x i1>, i32)

0 commit comments

Comments
 (0)