1
1
// RUN: %clang_cc1 -fms-extensions -triple x86_64-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=X64
2
2
// RUN: %clang_cc1 -fms-extensions -triple thumbv7-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=ARM
3
- // RUN: %clang_cc1 -fms-extensions -triple aarch64-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=ARM
3
+ // RUN: %clang_cc1 -fms-extensions -triple aarch64-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=ARM64 -check-prefix= ARM
4
4
5
5
volatile unsigned char sink = 0 ;
6
6
void test32 (long * base , long idx ) {
@@ -10,7 +10,6 @@ void test32(long *base, long idx) {
10
10
sink = _bittestandset (base , idx );
11
11
sink = _interlockedbittestandreset (base , idx );
12
12
sink = _interlockedbittestandset (base , idx );
13
- sink = _interlockedbittestandset (base , idx );
14
13
}
15
14
16
15
void test64 (__int64 * base , __int64 idx ) {
@@ -33,6 +32,17 @@ void test_arm(long *base, long idx) {
33
32
}
34
33
#endif
35
34
35
+ #if defined(_M_ARM64 )
36
+ void test_arm64 (__int64 * base , __int64 idx ) {
37
+ sink = _interlockedbittestandreset64_acq (base , idx );
38
+ sink = _interlockedbittestandreset64_rel (base , idx );
39
+ sink = _interlockedbittestandreset64_nf (base , idx );
40
+ sink = _interlockedbittestandset64_acq (base , idx );
41
+ sink = _interlockedbittestandset64_rel (base , idx );
42
+ sink = _interlockedbittestandset64_nf (base , idx );
43
+ }
44
+ #endif
45
+
36
46
// X64-LABEL: define dso_local void @test32(ptr noundef %base, i32 noundef %idx)
37
47
// X64: call i8 asm sideeffect "btl $2, ($1)", "={@ccc},r,r,~{cc},~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %{{.*}}, i32 {{.*}})
38
48
// X64: call i8 asm sideeffect "btcl $2, ($1)", "={@ccc},r,r,~{cc},~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %{{.*}}, i32 {{.*}})
@@ -117,13 +127,122 @@ void test_arm(long *base, long idx) {
117
127
// ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
118
128
// ARM: store volatile i8 %[[RES]], ptr @sink, align 1
119
129
130
+ // ARM-LABEL: define dso_local {{.*}}void @test64(ptr noundef %base, i64 noundef %idx)
131
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i64 %{{.*}}, 3
132
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, ptr %{{.*}}, i64 %[[IDXHI]]
133
+ // ARM: %[[IDX8:[^ ]*]] = trunc i64 %{{.*}} to i8
134
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
135
+ // ARM: %[[BYTE:[^ ]*]] = load i8, ptr %[[BYTEADDR]], align 1
136
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
137
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
138
+ // ARM: store volatile i8 %[[RES]], ptr @sink, align 1
120
139
121
- // Just look for the atomicrmw instructions.
140
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i64 %{{.*}}, 3
141
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, ptr %{{.*}}, i64 %[[IDXHI]]
142
+ // ARM: %[[IDX8:[^ ]*]] = trunc i64 %{{.*}} to i8
143
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
144
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
145
+ // ARM: %[[BYTE:[^ ]*]] = load i8, ptr %[[BYTEADDR]], align 1
146
+ // ARM: %[[NEWBYTE:[^ ]*]] = xor i8 %[[BYTE]], %[[MASK]]
147
+ // ARM: store i8 %[[NEWBYTE]], ptr %[[BYTEADDR]], align 1
148
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
149
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
150
+ // ARM: store volatile i8 %[[RES]], ptr @sink, align 1
151
+
152
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i64 %{{.*}}, 3
153
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, ptr %{{.*}}, i64 %[[IDXHI]]
154
+ // ARM: %[[IDX8:[^ ]*]] = trunc i64 %{{.*}} to i8
155
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
156
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
157
+ // ARM: %[[BYTE:[^ ]*]] = load i8, ptr %[[BYTEADDR]], align 1
158
+ // ARM: %[[NOTMASK:[^ ]*]] = xor i8 %[[MASK]], -1
159
+ // ARM: %[[NEWBYTE:[^ ]*]] = and i8 %[[BYTE]], %[[NOTMASK]]
160
+ // ARM: store i8 %[[NEWBYTE]], ptr %[[BYTEADDR]], align 1
161
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
162
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
163
+ // ARM: store volatile i8 %[[RES]], ptr @sink, align 1
164
+
165
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i64 %{{.*}}, 3
166
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, ptr %{{.*}}, i64 %[[IDXHI]]
167
+ // ARM: %[[IDX8:[^ ]*]] = trunc i64 %{{.*}} to i8
168
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
169
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
170
+ // ARM: %[[BYTE:[^ ]*]] = load i8, ptr %[[BYTEADDR]], align 1
171
+ // ARM: %[[NEWBYTE:[^ ]*]] = or i8 %[[BYTE]], %[[MASK]]
172
+ // ARM: store i8 %[[NEWBYTE]], ptr %[[BYTEADDR]], align 1
173
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
174
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
175
+ // ARM: store volatile i8 %[[RES]], ptr @sink, align 1
176
+
177
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i64 %{{.*}}, 3
178
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, ptr %{{.*}}, i64 %[[IDXHI]]
179
+ // ARM: %[[IDX8:[^ ]*]] = trunc i64 %{{.*}} to i8
180
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
181
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
182
+ // ARM: %[[NOTMASK:[^ ]*]] = xor i8 %[[MASK]], -1
183
+ // ARM: %[[BYTE:[^ ]*]] = atomicrmw and ptr %[[BYTEADDR]], i8 %[[NOTMASK]] seq_cst, align 1
184
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
185
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
186
+ // ARM: store volatile i8 %[[RES]], ptr @sink, align 1
187
+
188
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i64 %{{.*}}, 3
189
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, ptr %{{.*}}, i64 %[[IDXHI]]
190
+ // ARM: %[[IDX8:[^ ]*]] = trunc i64 %{{.*}} to i8
191
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
192
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
193
+ // ARM: %[[BYTE:[^ ]*]] = atomicrmw or ptr %[[BYTEADDR]], i8 %[[MASK]] seq_cst, align 1
194
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
195
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
196
+ // ARM: store volatile i8 %[[RES]], ptr @sink, align 1
122
197
123
198
// ARM-LABEL: define dso_local {{.*}}void @test_arm(ptr noundef %base, i32 noundef %idx)
124
- // ARM: atomicrmw and ptr %{{.*}}, i8 {{.*}} acquire, align 1
199
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3
200
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, ptr %{{.*}}, i32 %[[IDXHI]]
201
+ // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8
202
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
203
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
204
+ // ARM: %[[NOTMASK:[^ ]*]] = xor i8 %[[MASK]], -1
205
+ // ARM: %[[BYTE:[^ ]*]] = atomicrmw and ptr %[[BYTEADDR]], i8 %[[NOTMASK]] acquire, align 1
206
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
207
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
208
+ // ARM: store volatile i8 %[[RES]], ptr @sink, align 1
209
+ // Just look for the atomicrmw instructions.
125
210
// ARM: atomicrmw and ptr %{{.*}}, i8 {{.*}} release, align 1
126
211
// ARM: atomicrmw and ptr %{{.*}}, i8 {{.*}} monotonic, align 1
127
- // ARM: atomicrmw or ptr %{{.*}}, i8 {{.*}} acquire, align 1
212
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3
213
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, ptr %{{.*}}, i32 %[[IDXHI]]
214
+ // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8
215
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
216
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
217
+ // ARM: %[[BYTE:[^ ]*]] = atomicrmw or ptr %[[BYTEADDR]], i8 %[[MASK]] acquire, align 1
218
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
219
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
220
+ // ARM: store volatile i8 %[[RES]], ptr @sink, align 1
221
+ // Just look for the atomicrmw instructions.
128
222
// ARM: atomicrmw or ptr %{{.*}}, i8 {{.*}} release, align 1
129
223
// ARM: atomicrmw or ptr %{{.*}}, i8 {{.*}} monotonic, align 1
224
+
225
+ // ARM64-LABEL: define dso_local void @test_arm64(ptr noundef %base, i64 noundef %idx)
226
+ // ARM64: %[[IDXHI:[^ ]*]] = ashr i64 %{{.*}}, 3
227
+ // ARM64: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, ptr %{{.*}}, i64 %[[IDXHI]]
228
+ // ARM64: %[[IDX8:[^ ]*]] = trunc i64 %{{.*}} to i8
229
+ // ARM64: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
230
+ // ARM64: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
231
+ // ARM64: %[[NOTMASK:[^ ]*]] = xor i8 %[[MASK]], -1
232
+ // ARM64: %[[BYTE:[^ ]*]] = atomicrmw and ptr %[[BYTEADDR]], i8 %[[NOTMASK]] acquire, align 1
233
+ // ARM64: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
234
+ // ARM64: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
235
+ // ARM64: store volatile i8 %[[RES]], ptr @sink, align 1
236
+ // ARM64: atomicrmw and ptr %{{.*}}, i8 {{.*}} release, align 1
237
+ // ARM64: atomicrmw and ptr %{{.*}}, i8 {{.*}} monotonic, align 1
238
+ // ARM64: %[[IDXHI:[^ ]*]] = ashr i64 %{{.*}}, 3
239
+ // ARM64: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, ptr %{{.*}}, i64 %[[IDXHI]]
240
+ // ARM64: %[[IDX8:[^ ]*]] = trunc i64 %{{.*}} to i8
241
+ // ARM64: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
242
+ // ARM64: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
243
+ // ARM64: %[[BYTE:[^ ]*]] = atomicrmw or ptr %[[BYTEADDR]], i8 %[[MASK]] acquire, align 1
244
+ // ARM64: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
245
+ // ARM64: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
246
+ // ARM64: store volatile i8 %[[RES]], ptr @sink, align 1
247
+ // ARM64: atomicrmw or ptr %{{.*}}, i8 {{.*}} release, align 1
248
+ // ARM64: atomicrmw or ptr %{{.*}}, i8 {{.*}} monotonic, align 1
0 commit comments