Skip to content

Commit 287267c

Browse files
committed
[AArch64] Add SLP test for abs (NFC)
Differential Revision: https://reviews.llvm.org/D144376
1 parent 5d560b6 commit 287267c

File tree

1 file changed

+151
-0
lines changed
  • llvm/test/Transforms/SLPVectorizer/AArch64

1 file changed

+151
-0
lines changed
Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,151 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2+
; RUN: opt < %s -S -mtriple=aarch64 -passes=slp-vectorizer | FileCheck %s
3+
4+
@a = common global ptr null, align 8
5+
6+
declare i64 @llvm.abs.i64(i64, i1)
7+
declare i32 @llvm.abs.i32(i32, i1)
8+
declare i16 @llvm.abs.i16(i16, i1)
9+
declare i8 @llvm.abs.i8 (i8, i1)
10+
11+
define void @abs_v2i64() {
12+
; CHECK-LABEL: @abs_v2i64(
13+
; CHECK-NEXT: entry:
14+
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @a, align 8
15+
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> [[TMP0]], i1 false)
16+
; CHECK-NEXT: store <2 x i64> [[TMP1]], ptr @a, align 8
17+
; CHECK-NEXT: ret void
18+
;
19+
20+
entry:
21+
%a0 = load i64, i64* getelementptr inbounds (i64, ptr @a, i64 0), align 8
22+
%a1 = load i64, i64* getelementptr inbounds (i64, ptr @a, i64 1), align 8
23+
%r0 = call i64 @llvm.abs.i64(i64 %a0, i1 false)
24+
%r1 = call i64 @llvm.abs.i64(i64 %a1, i1 false)
25+
store i64 %r0, i64* getelementptr inbounds (i64, ptr @a, i64 0), align 8
26+
store i64 %r1, i64* getelementptr inbounds (i64, ptr @a, i64 1), align 8
27+
ret void
28+
}
29+
30+
define void @abs_v4i32() {
31+
; CHECK-LABEL: @abs_v4i32(
32+
; CHECK-NEXT: entry:
33+
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @a, align 8
34+
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[TMP0]], i1 false)
35+
; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr @a, align 8
36+
; CHECK-NEXT: ret void
37+
;
38+
39+
entry:
40+
%a0 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 0), align 8
41+
%a1 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 1), align 8
42+
%a2 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 2), align 8
43+
%a3 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 3), align 8
44+
%r0 = call i32 @llvm.abs.i32(i32 %a0, i1 false)
45+
%r1 = call i32 @llvm.abs.i32(i32 %a1, i1 false)
46+
%r2 = call i32 @llvm.abs.i32(i32 %a2, i1 false)
47+
%r3 = call i32 @llvm.abs.i32(i32 %a3, i1 false)
48+
store i32 %r0, i32* getelementptr inbounds (i32, ptr @a, i64 0), align 8
49+
store i32 %r1, i32* getelementptr inbounds (i32, ptr @a, i64 1), align 8
50+
store i32 %r2, i32* getelementptr inbounds (i32, ptr @a, i64 2), align 8
51+
store i32 %r3, i32* getelementptr inbounds (i32, ptr @a, i64 3), align 8
52+
ret void
53+
}
54+
55+
define void @abs_v8i16() {
56+
; CHECK-LABEL: @abs_v8i16(
57+
; CHECK-NEXT: entry:
58+
; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr @a, align 8
59+
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.abs.v8i16(<8 x i16> [[TMP0]], i1 false)
60+
; CHECK-NEXT: store <8 x i16> [[TMP1]], ptr @a, align 8
61+
; CHECK-NEXT: ret void
62+
;
63+
64+
entry:
65+
%a0 = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 0), align 8
66+
%a1 = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 1), align 8
67+
%a2 = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 2), align 8
68+
%a3 = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 3), align 8
69+
%a4 = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 4), align 8
70+
%a5 = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 5), align 8
71+
%a6 = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 6), align 8
72+
%a7 = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 7), align 8
73+
%r0 = call i16 @llvm.abs.i16(i16 %a0, i1 false)
74+
%r1 = call i16 @llvm.abs.i16(i16 %a1, i1 false)
75+
%r2 = call i16 @llvm.abs.i16(i16 %a2, i1 false)
76+
%r3 = call i16 @llvm.abs.i16(i16 %a3, i1 false)
77+
%r4 = call i16 @llvm.abs.i16(i16 %a4, i1 false)
78+
%r5 = call i16 @llvm.abs.i16(i16 %a5, i1 false)
79+
%r6 = call i16 @llvm.abs.i16(i16 %a6, i1 false)
80+
%r7 = call i16 @llvm.abs.i16(i16 %a7, i1 false)
81+
store i16 %r0, i16* getelementptr inbounds (i16, ptr @a, i64 0), align 8
82+
store i16 %r1, i16* getelementptr inbounds (i16, ptr @a, i64 1), align 8
83+
store i16 %r2, i16* getelementptr inbounds (i16, ptr @a, i64 2), align 8
84+
store i16 %r3, i16* getelementptr inbounds (i16, ptr @a, i64 3), align 8
85+
store i16 %r4, i16* getelementptr inbounds (i16, ptr @a, i64 4), align 8
86+
store i16 %r5, i16* getelementptr inbounds (i16, ptr @a, i64 5), align 8
87+
store i16 %r6, i16* getelementptr inbounds (i16, ptr @a, i64 6), align 8
88+
store i16 %r7, i16* getelementptr inbounds (i16, ptr @a, i64 7), align 8
89+
ret void
90+
}
91+
92+
define void @abs_v16i8() {
93+
; CHECK-LABEL: @abs_v16i8(
94+
; CHECK-NEXT: entry:
95+
; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr @a, align 8
96+
; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i8> @llvm.abs.v16i8(<16 x i8> [[TMP0]], i1 false)
97+
; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr @a, align 8
98+
; CHECK-NEXT: ret void
99+
;
100+
101+
entry:
102+
%a0 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 0), align 8
103+
%a1 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 1), align 8
104+
%a2 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 2), align 8
105+
%a3 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 3), align 8
106+
%a4 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 4), align 8
107+
%a5 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 5), align 8
108+
%a6 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 6), align 8
109+
%a7 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 7), align 8
110+
%a8 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 8), align 8
111+
%a9 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 9), align 8
112+
%a10 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 10), align 8
113+
%a11 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 11), align 8
114+
%a12 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 12), align 8
115+
%a13 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 13), align 8
116+
%a14 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 14), align 8
117+
%a15 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 15), align 8
118+
%r0 = call i8 @llvm.abs.i8(i8 %a0, i1 false)
119+
%r1 = call i8 @llvm.abs.i8(i8 %a1, i1 false)
120+
%r2 = call i8 @llvm.abs.i8(i8 %a2, i1 false)
121+
%r3 = call i8 @llvm.abs.i8(i8 %a3, i1 false)
122+
%r4 = call i8 @llvm.abs.i8(i8 %a4, i1 false)
123+
%r5 = call i8 @llvm.abs.i8(i8 %a5, i1 false)
124+
%r6 = call i8 @llvm.abs.i8(i8 %a6, i1 false)
125+
%r7 = call i8 @llvm.abs.i8(i8 %a7, i1 false)
126+
%r8 = call i8 @llvm.abs.i8(i8 %a8, i1 false)
127+
%r9 = call i8 @llvm.abs.i8(i8 %a9, i1 false)
128+
%r10 = call i8 @llvm.abs.i8(i8 %a10, i1 false)
129+
%r11 = call i8 @llvm.abs.i8(i8 %a11, i1 false)
130+
%r12 = call i8 @llvm.abs.i8(i8 %a12, i1 false)
131+
%r13 = call i8 @llvm.abs.i8(i8 %a13, i1 false)
132+
%r14 = call i8 @llvm.abs.i8(i8 %a14, i1 false)
133+
%r15 = call i8 @llvm.abs.i8(i8 %a15, i1 false)
134+
store i8 %r0, i8* getelementptr inbounds (i8, ptr @a, i64 0), align 8
135+
store i8 %r1, i8* getelementptr inbounds (i8, ptr @a, i64 1), align 8
136+
store i8 %r2, i8* getelementptr inbounds (i8, ptr @a, i64 2), align 8
137+
store i8 %r3, i8* getelementptr inbounds (i8, ptr @a, i64 3), align 8
138+
store i8 %r4, i8* getelementptr inbounds (i8, ptr @a, i64 4), align 8
139+
store i8 %r5, i8* getelementptr inbounds (i8, ptr @a, i64 5), align 8
140+
store i8 %r6, i8* getelementptr inbounds (i8, ptr @a, i64 6), align 8
141+
store i8 %r7, i8* getelementptr inbounds (i8, ptr @a, i64 7), align 8
142+
store i8 %r8, i8* getelementptr inbounds (i8, ptr @a, i64 8), align 8
143+
store i8 %r9, i8* getelementptr inbounds (i8, ptr @a, i64 9), align 8
144+
store i8 %r10, i8* getelementptr inbounds (i8, ptr @a, i64 10), align 8
145+
store i8 %r11, i8* getelementptr inbounds (i8, ptr @a, i64 11), align 8
146+
store i8 %r12, i8* getelementptr inbounds (i8, ptr @a, i64 12), align 8
147+
store i8 %r13, i8* getelementptr inbounds (i8, ptr @a, i64 13), align 8
148+
store i8 %r14, i8* getelementptr inbounds (i8, ptr @a, i64 14), align 8
149+
store i8 %r15, i8* getelementptr inbounds (i8, ptr @a, i64 15), align 8
150+
ret void
151+
}

0 commit comments

Comments
 (0)