Skip to content

Commit fced87d

Browse files
committed
[AMDGPU] Fix regression with vectorization limiting
D67148 has removed TTI::getNumberOfRegisters(bool Vector) and started to call TTI::getNumberOfRegisters(unsigned ClassID) from the LoopVectorize. This has resulted in an unrestricted vectorization on AMDGPU blowing up register pressure. Differential Revision: https://reviews.llvm.org/D122850
1 parent 833882b commit fced87d

File tree

4 files changed

+53
-116
lines changed

4 files changed

+53
-116
lines changed

llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp

Lines changed: 7 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -288,33 +288,21 @@ GCNTTIImpl::GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
288288
: BaseT(TM, F.getParent()->getDataLayout()),
289289
ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
290290
TLI(ST->getTargetLowering()), CommonTTI(TM, F),
291-
IsGraphics(AMDGPU::isGraphics(F.getCallingConv())),
292-
MaxVGPRs(ST->getMaxNumVGPRs(
293-
std::max(ST->getWavesPerEU(F).first,
294-
ST->getWavesPerEUForWorkGroup(
295-
ST->getFlatWorkGroupSizes(F).second)))) {
291+
IsGraphics(AMDGPU::isGraphics(F.getCallingConv())) {
296292
AMDGPU::SIModeRegisterDefaults Mode(F);
297293
HasFP32Denormals = Mode.allFP32Denormals();
298294
HasFP64FP16Denormals = Mode.allFP64FP16Denormals();
299295
}
300296

301-
unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
302-
// The concept of vector registers doesn't really exist. Some packed vector
303-
// operations operate on the normal 32-bit registers.
304-
return MaxVGPRs;
305-
}
297+
unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
298+
// NB: RCID is not an RCID. In fact it is 0 or 1 for scalar or vector
299+
// registers. See getRegisterClassForType for the implementation.
300+
// In this case vector registers are not vector in terms of
301+
// VGPRs, but those which can hold multiple values.
306302

307-
unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const {
308303
// This is really the number of registers to fill when vectorizing /
309304
// interleaving loops, so we lie to avoid trying to use all registers.
310-
return getHardwareNumberOfRegisters(Vec) >> 3;
311-
}
312-
313-
unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
314-
const SIRegisterInfo *TRI = ST->getRegisterInfo();
315-
const TargetRegisterClass *RC = TRI->getRegClass(RCID);
316-
unsigned NumVGPRs = (TRI->getRegSizeInBits(*RC) + 31) / 32;
317-
return getHardwareNumberOfRegisters(false) / NumVGPRs;
305+
return 4;
318306
}
319307

320308
TypeSize

llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,6 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
6868
bool IsGraphics;
6969
bool HasFP32Denormals;
7070
bool HasFP64FP16Denormals;
71-
unsigned MaxVGPRs;
7271

7372
static const FeatureBitset InlineFeatureIgnoreList;
7473

@@ -113,8 +112,6 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
113112
return TTI::PSK_FastHardware;
114113
}
115114

116-
unsigned getHardwareNumberOfRegisters(bool Vector) const;
117-
unsigned getNumberOfRegisters(bool Vector) const;
118115
unsigned getNumberOfRegisters(unsigned RCID) const;
119116
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind Vector) const;
120117
unsigned getMinVectorRegisterBitWidth() const;
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a < %s -loop-vectorize -S | FileCheck -check-prefix=GFX90A %s
2+
3+
; GFX90A-LABEL: @vectorize_v2f32_loop(
4+
; GFX90A-COUNT-2: load <2 x float>
5+
; GFX90A-COUNT-2: fadd fast <2 x float>
6+
7+
define float @vectorize_v2f32_loop(float addrspace(1)* noalias %s) {
8+
entry:
9+
br label %for.body
10+
11+
for.body:
12+
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
13+
%q.04 = phi float [ 0.0, %entry ], [ %add, %for.body ]
14+
%arrayidx = getelementptr inbounds float, float addrspace(1)* %s, i64 %indvars.iv
15+
%load = load float, float addrspace(1)* %arrayidx, align 4
16+
%add = fadd fast float %q.04, %load
17+
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
18+
%exitcond = icmp eq i64 %indvars.iv.next, 256
19+
br i1 %exitcond, label %for.end, label %for.body
20+
21+
for.end:
22+
%add.lcssa = phi float [ %add, %for.body ]
23+
ret float %add.lcssa
24+
}

llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll

Lines changed: 22 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -11,65 +11,29 @@ define half @vectorize_v2f16_loop(half addrspace(1)* noalias %s) {
1111
; GFX9-NEXT: br label [[VECTOR_BODY:%.*]]
1212
; GFX9: vector.body:
1313
; GFX9-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
14-
; GFX9-NEXT: [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
15-
; GFX9-NEXT: [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
16-
; GFX9-NEXT: [[VEC_PHI2:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
17-
; GFX9-NEXT: [[VEC_PHI3:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
18-
; GFX9-NEXT: [[VEC_PHI4:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
19-
; GFX9-NEXT: [[VEC_PHI5:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
20-
; GFX9-NEXT: [[VEC_PHI6:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
21-
; GFX9-NEXT: [[VEC_PHI7:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ]
14+
; GFX9-NEXT: [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
15+
; GFX9-NEXT: [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
2216
; GFX9-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, half addrspace(1)* [[S:%.*]], i64 [[INDEX]]
2317
; GFX9-NEXT: [[TMP1:%.*]] = bitcast half addrspace(1)* [[TMP0]] to <2 x half> addrspace(1)*
2418
; GFX9-NEXT: [[WIDE_LOAD:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP1]], align 2
2519
; GFX9-NEXT: [[TMP2:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 2
2620
; GFX9-NEXT: [[TMP3:%.*]] = bitcast half addrspace(1)* [[TMP2]] to <2 x half> addrspace(1)*
27-
; GFX9-NEXT: [[WIDE_LOAD8:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP3]], align 2
28-
; GFX9-NEXT: [[TMP4:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 4
29-
; GFX9-NEXT: [[TMP5:%.*]] = bitcast half addrspace(1)* [[TMP4]] to <2 x half> addrspace(1)*
30-
; GFX9-NEXT: [[WIDE_LOAD9:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP5]], align 2
31-
; GFX9-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 6
32-
; GFX9-NEXT: [[TMP7:%.*]] = bitcast half addrspace(1)* [[TMP6]] to <2 x half> addrspace(1)*
33-
; GFX9-NEXT: [[WIDE_LOAD10:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP7]], align 2
34-
; GFX9-NEXT: [[TMP8:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 8
35-
; GFX9-NEXT: [[TMP9:%.*]] = bitcast half addrspace(1)* [[TMP8]] to <2 x half> addrspace(1)*
36-
; GFX9-NEXT: [[WIDE_LOAD11:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP9]], align 2
37-
; GFX9-NEXT: [[TMP10:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 10
38-
; GFX9-NEXT: [[TMP11:%.*]] = bitcast half addrspace(1)* [[TMP10]] to <2 x half> addrspace(1)*
39-
; GFX9-NEXT: [[WIDE_LOAD12:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP11]], align 2
40-
; GFX9-NEXT: [[TMP12:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 12
41-
; GFX9-NEXT: [[TMP13:%.*]] = bitcast half addrspace(1)* [[TMP12]] to <2 x half> addrspace(1)*
42-
; GFX9-NEXT: [[WIDE_LOAD13:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP13]], align 2
43-
; GFX9-NEXT: [[TMP14:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 14
44-
; GFX9-NEXT: [[TMP15:%.*]] = bitcast half addrspace(1)* [[TMP14]] to <2 x half> addrspace(1)*
45-
; GFX9-NEXT: [[WIDE_LOAD14:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP15]], align 2
46-
; GFX9-NEXT: [[TMP16]] = fadd fast <2 x half> [[VEC_PHI]], [[WIDE_LOAD]]
47-
; GFX9-NEXT: [[TMP17]] = fadd fast <2 x half> [[VEC_PHI1]], [[WIDE_LOAD8]]
48-
; GFX9-NEXT: [[TMP18]] = fadd fast <2 x half> [[VEC_PHI2]], [[WIDE_LOAD9]]
49-
; GFX9-NEXT: [[TMP19]] = fadd fast <2 x half> [[VEC_PHI3]], [[WIDE_LOAD10]]
50-
; GFX9-NEXT: [[TMP20]] = fadd fast <2 x half> [[VEC_PHI4]], [[WIDE_LOAD11]]
51-
; GFX9-NEXT: [[TMP21]] = fadd fast <2 x half> [[VEC_PHI5]], [[WIDE_LOAD12]]
52-
; GFX9-NEXT: [[TMP22]] = fadd fast <2 x half> [[VEC_PHI6]], [[WIDE_LOAD13]]
53-
; GFX9-NEXT: [[TMP23]] = fadd fast <2 x half> [[VEC_PHI7]], [[WIDE_LOAD14]]
54-
; GFX9-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
55-
; GFX9-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
56-
; GFX9-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
21+
; GFX9-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP3]], align 2
22+
; GFX9-NEXT: [[TMP4]] = fadd fast <2 x half> [[VEC_PHI]], [[WIDE_LOAD]]
23+
; GFX9-NEXT: [[TMP5]] = fadd fast <2 x half> [[VEC_PHI1]], [[WIDE_LOAD2]]
24+
; GFX9-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
25+
; GFX9-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
26+
; GFX9-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
5727
; GFX9: middle.block:
58-
; GFX9-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP17]], [[TMP16]]
59-
; GFX9-NEXT: [[BIN_RDX15:%.*]] = fadd fast <2 x half> [[TMP18]], [[BIN_RDX]]
60-
; GFX9-NEXT: [[BIN_RDX16:%.*]] = fadd fast <2 x half> [[TMP19]], [[BIN_RDX15]]
61-
; GFX9-NEXT: [[BIN_RDX17:%.*]] = fadd fast <2 x half> [[TMP20]], [[BIN_RDX16]]
62-
; GFX9-NEXT: [[BIN_RDX18:%.*]] = fadd fast <2 x half> [[TMP21]], [[BIN_RDX17]]
63-
; GFX9-NEXT: [[BIN_RDX19:%.*]] = fadd fast <2 x half> [[TMP22]], [[BIN_RDX18]]
64-
; GFX9-NEXT: [[BIN_RDX20:%.*]] = fadd fast <2 x half> [[TMP23]], [[BIN_RDX19]]
65-
; GFX9-NEXT: [[TMP25:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH8000, <2 x half> [[BIN_RDX20]])
28+
; GFX9-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP5]], [[TMP4]]
29+
; GFX9-NEXT: [[TMP7:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH8000, <2 x half> [[BIN_RDX]])
6630
; GFX9-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
6731
; GFX9: scalar.ph:
6832
; GFX9-NEXT: br label [[FOR_BODY:%.*]]
6933
; GFX9: for.body:
7034
; GFX9-NEXT: br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
7135
; GFX9: for.end:
72-
; GFX9-NEXT: [[ADD_LCSSA:%.*]] = phi half [ undef, [[FOR_BODY]] ], [ [[TMP25]], [[MIDDLE_BLOCK]] ]
36+
; GFX9-NEXT: [[ADD_LCSSA:%.*]] = phi half [ undef, [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
7337
; GFX9-NEXT: ret half [[ADD_LCSSA]]
7438
;
7539
; VI-LABEL: @vectorize_v2f16_loop(
@@ -79,65 +43,29 @@ define half @vectorize_v2f16_loop(half addrspace(1)* noalias %s) {
7943
; VI-NEXT: br label [[VECTOR_BODY:%.*]]
8044
; VI: vector.body:
8145
; VI-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
82-
; VI-NEXT: [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
83-
; VI-NEXT: [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
84-
; VI-NEXT: [[VEC_PHI2:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
85-
; VI-NEXT: [[VEC_PHI3:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
86-
; VI-NEXT: [[VEC_PHI4:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
87-
; VI-NEXT: [[VEC_PHI5:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
88-
; VI-NEXT: [[VEC_PHI6:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
89-
; VI-NEXT: [[VEC_PHI7:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ]
46+
; VI-NEXT: [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
47+
; VI-NEXT: [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
9048
; VI-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, half addrspace(1)* [[S:%.*]], i64 [[INDEX]]
9149
; VI-NEXT: [[TMP1:%.*]] = bitcast half addrspace(1)* [[TMP0]] to <2 x half> addrspace(1)*
9250
; VI-NEXT: [[WIDE_LOAD:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP1]], align 2
9351
; VI-NEXT: [[TMP2:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 2
9452
; VI-NEXT: [[TMP3:%.*]] = bitcast half addrspace(1)* [[TMP2]] to <2 x half> addrspace(1)*
95-
; VI-NEXT: [[WIDE_LOAD8:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP3]], align 2
96-
; VI-NEXT: [[TMP4:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 4
97-
; VI-NEXT: [[TMP5:%.*]] = bitcast half addrspace(1)* [[TMP4]] to <2 x half> addrspace(1)*
98-
; VI-NEXT: [[WIDE_LOAD9:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP5]], align 2
99-
; VI-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 6
100-
; VI-NEXT: [[TMP7:%.*]] = bitcast half addrspace(1)* [[TMP6]] to <2 x half> addrspace(1)*
101-
; VI-NEXT: [[WIDE_LOAD10:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP7]], align 2
102-
; VI-NEXT: [[TMP8:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 8
103-
; VI-NEXT: [[TMP9:%.*]] = bitcast half addrspace(1)* [[TMP8]] to <2 x half> addrspace(1)*
104-
; VI-NEXT: [[WIDE_LOAD11:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP9]], align 2
105-
; VI-NEXT: [[TMP10:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 10
106-
; VI-NEXT: [[TMP11:%.*]] = bitcast half addrspace(1)* [[TMP10]] to <2 x half> addrspace(1)*
107-
; VI-NEXT: [[WIDE_LOAD12:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP11]], align 2
108-
; VI-NEXT: [[TMP12:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 12
109-
; VI-NEXT: [[TMP13:%.*]] = bitcast half addrspace(1)* [[TMP12]] to <2 x half> addrspace(1)*
110-
; VI-NEXT: [[WIDE_LOAD13:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP13]], align 2
111-
; VI-NEXT: [[TMP14:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 14
112-
; VI-NEXT: [[TMP15:%.*]] = bitcast half addrspace(1)* [[TMP14]] to <2 x half> addrspace(1)*
113-
; VI-NEXT: [[WIDE_LOAD14:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP15]], align 2
114-
; VI-NEXT: [[TMP16]] = fadd fast <2 x half> [[VEC_PHI]], [[WIDE_LOAD]]
115-
; VI-NEXT: [[TMP17]] = fadd fast <2 x half> [[VEC_PHI1]], [[WIDE_LOAD8]]
116-
; VI-NEXT: [[TMP18]] = fadd fast <2 x half> [[VEC_PHI2]], [[WIDE_LOAD9]]
117-
; VI-NEXT: [[TMP19]] = fadd fast <2 x half> [[VEC_PHI3]], [[WIDE_LOAD10]]
118-
; VI-NEXT: [[TMP20]] = fadd fast <2 x half> [[VEC_PHI4]], [[WIDE_LOAD11]]
119-
; VI-NEXT: [[TMP21]] = fadd fast <2 x half> [[VEC_PHI5]], [[WIDE_LOAD12]]
120-
; VI-NEXT: [[TMP22]] = fadd fast <2 x half> [[VEC_PHI6]], [[WIDE_LOAD13]]
121-
; VI-NEXT: [[TMP23]] = fadd fast <2 x half> [[VEC_PHI7]], [[WIDE_LOAD14]]
122-
; VI-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
123-
; VI-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
124-
; VI-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
53+
; VI-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP3]], align 2
54+
; VI-NEXT: [[TMP4]] = fadd fast <2 x half> [[VEC_PHI]], [[WIDE_LOAD]]
55+
; VI-NEXT: [[TMP5]] = fadd fast <2 x half> [[VEC_PHI1]], [[WIDE_LOAD2]]
56+
; VI-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
57+
; VI-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
58+
; VI-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
12559
; VI: middle.block:
126-
; VI-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP17]], [[TMP16]]
127-
; VI-NEXT: [[BIN_RDX15:%.*]] = fadd fast <2 x half> [[TMP18]], [[BIN_RDX]]
128-
; VI-NEXT: [[BIN_RDX16:%.*]] = fadd fast <2 x half> [[TMP19]], [[BIN_RDX15]]
129-
; VI-NEXT: [[BIN_RDX17:%.*]] = fadd fast <2 x half> [[TMP20]], [[BIN_RDX16]]
130-
; VI-NEXT: [[BIN_RDX18:%.*]] = fadd fast <2 x half> [[TMP21]], [[BIN_RDX17]]
131-
; VI-NEXT: [[BIN_RDX19:%.*]] = fadd fast <2 x half> [[TMP22]], [[BIN_RDX18]]
132-
; VI-NEXT: [[BIN_RDX20:%.*]] = fadd fast <2 x half> [[TMP23]], [[BIN_RDX19]]
133-
; VI-NEXT: [[TMP25:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH8000, <2 x half> [[BIN_RDX20]])
60+
; VI-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP5]], [[TMP4]]
61+
; VI-NEXT: [[TMP7:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH8000, <2 x half> [[BIN_RDX]])
13462
; VI-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
13563
; VI: scalar.ph:
13664
; VI-NEXT: br label [[FOR_BODY:%.*]]
13765
; VI: for.body:
13866
; VI-NEXT: br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
13967
; VI: for.end:
140-
; VI-NEXT: [[ADD_LCSSA:%.*]] = phi half [ undef, [[FOR_BODY]] ], [ [[TMP25]], [[MIDDLE_BLOCK]] ]
68+
; VI-NEXT: [[ADD_LCSSA:%.*]] = phi half [ undef, [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
14169
; VI-NEXT: ret half [[ADD_LCSSA]]
14270
;
14371
; CI-LABEL: @vectorize_v2f16_loop(

0 commit comments

Comments
 (0)