Skip to content

Commit e249281

Browse files
Update tests to be more amenable for testing SME.
Add macro to allow the testing of streaming mode. Split sve2p1 load/store tests to extract the struct variants that are available to sme2p1 from the gather/scatter variants that are not.
1 parent 2cde41d commit e249281

17 files changed

+3536
-3386
lines changed

clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_rax1.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,14 @@
1515
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
1616
#endif
1717

18+
#if defined(__ARM_FEATURE_SME) && defined(__ARM_FEATURE_SVE)
19+
#define ATTR __arm_streaming_compatible
20+
#elif defined(__ARM_FEATURE_SME)
21+
#define ATTR __arm_streaming
22+
#else
23+
#define ATTR
24+
#endif
25+
1826
// CHECK-LABEL: @test_svrax1_s64(
1927
// CHECK-NEXT: entry:
2028
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.rax1(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
@@ -25,7 +33,7 @@
2533
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.rax1(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
2634
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2735
//
28-
svint64_t test_svrax1_s64(svint64_t op1, svint64_t op2)
36+
svint64_t test_svrax1_s64(svint64_t op1, svint64_t op2) ATTR
2937
{
3038
return SVE_ACLE_FUNC(svrax1,_s64,,)(op1, op2);
3139
}
@@ -40,7 +48,7 @@ svint64_t test_svrax1_s64(svint64_t op1, svint64_t op2)
4048
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.rax1(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
4149
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
4250
//
43-
svuint64_t test_svrax1_u64(svuint64_t op1, svuint64_t op2)
51+
svuint64_t test_svrax1_u64(svuint64_t op1, svuint64_t op2) ATTR
4452
{
4553
return SVE_ACLE_FUNC(svrax1,_u64,,)(op1, op2);
4654
}

clang/test/CodeGen/AArch64/sve2p1-intrinsics/acle_sve2p1_dupq.c

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,14 @@
2020
#define SVE_ACLE_FUNC(A1, A2) A1##A2
2121
#endif
2222

23+
#if defined(__ARM_FEATURE_SME) && defined(__ARM_FEATURE_SVE)
24+
#define ATTR __arm_streaming_compatible
25+
#elif defined(__ARM_FEATURE_SME)
26+
#define ATTR __arm_streaming
27+
#else
28+
#define ATTR
29+
#endif
30+
2331
// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_svdup_laneq_s8
2432
// CHECK-SAME: (<vscale x 16 x i8> [[ZN:%.*]]) #[[ATTR0:[0-9]+]] {
2533
// CHECK-NEXT: entry:
@@ -32,7 +40,7 @@
3240
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.laneq.nxv16i8(<vscale x 16 x i8> [[ZN]], i32 0)
3341
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
3442
//
35-
svint8_t test_svdup_laneq_s8(svint8_t zn) {
43+
svint8_t test_svdup_laneq_s8(svint8_t zn) ATTR {
3644
return SVE_ACLE_FUNC(svdup_laneq, _s8)(zn, 0);
3745
}
3846

@@ -48,7 +56,7 @@ svint8_t test_svdup_laneq_s8(svint8_t zn) {
4856
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.laneq.nxv16i8(<vscale x 16 x i8> [[ZN]], i32 15)
4957
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
5058
//
51-
svuint8_t test_svdup_laneq_u8(svuint8_t zn) {
59+
svuint8_t test_svdup_laneq_u8(svuint8_t zn) ATTR {
5260
return SVE_ACLE_FUNC(svdup_laneq, _u8)(zn, 15);
5361
}
5462

@@ -64,7 +72,7 @@ svuint8_t test_svdup_laneq_u8(svuint8_t zn) {
6472
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.laneq.nxv8i16(<vscale x 8 x i16> [[ZN]], i32 1)
6573
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
6674
//
67-
svint16_t test_svdup_laneq_s16(svint16_t zn) {
75+
svint16_t test_svdup_laneq_s16(svint16_t zn) ATTR {
6876
return SVE_ACLE_FUNC(svdup_laneq, _s16)(zn, 1);
6977
}
7078

@@ -80,7 +88,7 @@ svint16_t test_svdup_laneq_s16(svint16_t zn) {
8088
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.laneq.nxv8i16(<vscale x 8 x i16> [[ZN]], i32 7)
8189
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
8290
//
83-
svuint16_t test_svdup_laneq_u16(svuint16_t zn) {
91+
svuint16_t test_svdup_laneq_u16(svuint16_t zn) ATTR {
8492
return SVE_ACLE_FUNC(svdup_laneq, _u16)(zn, 7);
8593
}
8694

@@ -96,7 +104,7 @@ svuint16_t test_svdup_laneq_u16(svuint16_t zn) {
96104
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.laneq.nxv4i32(<vscale x 4 x i32> [[ZN]], i32 2)
97105
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
98106
//
99-
svint32_t test_svdup_laneq_s32(svint32_t zn) {
107+
svint32_t test_svdup_laneq_s32(svint32_t zn) ATTR {
100108
return SVE_ACLE_FUNC(svdup_laneq, _s32)(zn, 2);
101109
}
102110

@@ -112,7 +120,7 @@ svint32_t test_svdup_laneq_s32(svint32_t zn) {
112120
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.laneq.nxv4i32(<vscale x 4 x i32> [[ZN]], i32 3)
113121
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
114122
//
115-
svuint32_t test_svdup_laneq_u32(svuint32_t zn) {
123+
svuint32_t test_svdup_laneq_u32(svuint32_t zn) ATTR {
116124
return SVE_ACLE_FUNC(svdup_laneq, _u32)(zn, 3);
117125
}
118126

@@ -128,7 +136,7 @@ svuint32_t test_svdup_laneq_u32(svuint32_t zn) {
128136
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.laneq.nxv2i64(<vscale x 2 x i64> [[ZN]], i32 0)
129137
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
130138
//
131-
svint64_t test_svdup_laneq_s64(svint64_t zn) {
139+
svint64_t test_svdup_laneq_s64(svint64_t zn) ATTR {
132140
return SVE_ACLE_FUNC(svdup_laneq, _s64)(zn, 0);
133141
}
134142

@@ -144,7 +152,7 @@ svint64_t test_svdup_laneq_s64(svint64_t zn) {
144152
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.laneq.nxv2i64(<vscale x 2 x i64> [[ZN]], i32 1)
145153
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
146154
//
147-
svuint64_t test_svdup_laneq_u64(svuint64_t zn) {
155+
svuint64_t test_svdup_laneq_u64(svuint64_t zn) ATTR {
148156
return SVE_ACLE_FUNC(svdup_laneq, _u64)(zn, 1);
149157
}
150158

@@ -160,7 +168,7 @@ svuint64_t test_svdup_laneq_u64(svuint64_t zn) {
160168
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.laneq.nxv8f16(<vscale x 8 x half> [[ZN]], i32 4)
161169
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
162170
//
163-
svfloat16_t test_svdup_laneq_f16(svfloat16_t zn) {
171+
svfloat16_t test_svdup_laneq_f16(svfloat16_t zn) ATTR {
164172
return SVE_ACLE_FUNC(svdup_laneq, _f16)(zn, 4);
165173
}
166174

@@ -176,7 +184,7 @@ svfloat16_t test_svdup_laneq_f16(svfloat16_t zn) {
176184
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.laneq.nxv4f32(<vscale x 4 x float> [[ZN]], i32 1)
177185
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
178186
//
179-
svfloat32_t test_svdup_laneq_f32(svfloat32_t zn) {
187+
svfloat32_t test_svdup_laneq_f32(svfloat32_t zn) ATTR {
180188
return SVE_ACLE_FUNC(svdup_laneq, _f32)(zn, 1);
181189
}
182190

@@ -192,7 +200,7 @@ svfloat32_t test_svdup_laneq_f32(svfloat32_t zn) {
192200
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.laneq.nxv2f64(<vscale x 2 x double> [[ZN]], i32 1)
193201
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
194202
//
195-
svfloat64_t test_svdup_laneq_f64(svfloat64_t zn) {
203+
svfloat64_t test_svdup_laneq_f64(svfloat64_t zn) ATTR {
196204
return SVE_ACLE_FUNC(svdup_laneq, _f64)(zn, 1);
197205
}
198206

@@ -208,7 +216,7 @@ svfloat64_t test_svdup_laneq_f64(svfloat64_t zn) {
208216
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.dup.laneq.nxv8bf16(<vscale x 8 x bfloat> [[ZN]], i32 3)
209217
// CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
210218
//
211-
svbfloat16_t test_svdup_laneq_bf16(svbfloat16_t zn) {
219+
svbfloat16_t test_svdup_laneq_bf16(svbfloat16_t zn) ATTR {
212220
return SVE_ACLE_FUNC(svdup_laneq, _bf16)(zn, 3);
213221
}
214222

@@ -224,6 +232,6 @@ svbfloat16_t test_svdup_laneq_bf16(svbfloat16_t zn) {
224232
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.laneq.nxv16i8(<vscale x 16 x i8> [[ZN]], i32 1)
225233
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
226234
//
227-
svmfloat8_t test_svdup_laneq_mf8(svmfloat8_t zn) {
235+
svmfloat8_t test_svdup_laneq_mf8(svmfloat8_t zn) ATTR {
228236
return SVE_ACLE_FUNC(svdup_laneq, _mf8)(zn, 1);
229237
}

clang/test/CodeGen/AArch64/sve2p1-intrinsics/acle_sve2p1_extq.c

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,14 @@
2020
#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
2121
#endif
2222

23+
#if defined(__ARM_FEATURE_SME) && defined(__ARM_FEATURE_SVE)
24+
#define ATTR __arm_streaming_compatible
25+
#elif defined(__ARM_FEATURE_SME)
26+
#define ATTR __arm_streaming
27+
#else
28+
#define ATTR
29+
#endif
30+
2331
// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_svextq_u8
2432
// CHECK-SAME: (<vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]]) #[[ATTR0:[0-9]+]] {
2533
// CHECK-NEXT: entry:
@@ -32,7 +40,7 @@
3240
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.extq.nxv16i8(<vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0)
3341
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
3442
//
35-
svuint8_t test_svextq_u8(svuint8_t zn, svuint8_t zm) {
43+
svuint8_t test_svextq_u8(svuint8_t zn, svuint8_t zm) ATTR {
3644
return SVE_ACLE_FUNC(svextq, _u8,,)(zn, zm, 0);
3745
}
3846

@@ -48,7 +56,7 @@ svuint8_t test_svextq_u8(svuint8_t zn, svuint8_t zm) {
4856
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.extq.nxv16i8(<vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 4)
4957
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
5058
//
51-
svint8_t test_svextq_s8(svint8_t zn, svint8_t zm) {
59+
svint8_t test_svextq_s8(svint8_t zn, svint8_t zm) ATTR {
5260
return SVE_ACLE_FUNC(svextq, _s8,,)(zn, zm, 4);
5361
}
5462

@@ -64,7 +72,7 @@ svint8_t test_svextq_s8(svint8_t zn, svint8_t zm) {
6472
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.extq.nxv8i16(<vscale x 8 x i16> [[ZN]], <vscale x 8 x i16> [[ZM]], i32 1)
6573
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
6674
//
67-
svuint16_t test_svextq_u16(svuint16_t zn, svuint16_t zm) {
75+
svuint16_t test_svextq_u16(svuint16_t zn, svuint16_t zm) ATTR {
6876
return SVE_ACLE_FUNC(svextq, _u16,,)(zn, zm, 1);
6977
}
7078

@@ -80,7 +88,7 @@ svuint16_t test_svextq_u16(svuint16_t zn, svuint16_t zm) {
8088
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.extq.nxv8i16(<vscale x 8 x i16> [[ZN]], <vscale x 8 x i16> [[ZM]], i32 5)
8189
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
8290
//
83-
svint16_t test_svextq_s16(svint16_t zn, svint16_t zm) {
91+
svint16_t test_svextq_s16(svint16_t zn, svint16_t zm) ATTR {
8492
return SVE_ACLE_FUNC(svextq, _s16,,)(zn, zm, 5);
8593
}
8694

@@ -96,7 +104,7 @@ svint16_t test_svextq_s16(svint16_t zn, svint16_t zm) {
96104
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.extq.nxv4i32(<vscale x 4 x i32> [[ZN]], <vscale x 4 x i32> [[ZM]], i32 2)
97105
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
98106
//
99-
svuint32_t test_svextq_u32(svuint32_t zn, svuint32_t zm) {
107+
svuint32_t test_svextq_u32(svuint32_t zn, svuint32_t zm) ATTR {
100108
return SVE_ACLE_FUNC(svextq, _u32,,)(zn, zm, 2);
101109
}
102110

@@ -112,7 +120,7 @@ svuint32_t test_svextq_u32(svuint32_t zn, svuint32_t zm) {
112120
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.extq.nxv4i32(<vscale x 4 x i32> [[ZN]], <vscale x 4 x i32> [[ZM]], i32 3)
113121
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
114122
//
115-
svint32_t test_svextq_s32(svint32_t zn, svint32_t zm) {
123+
svint32_t test_svextq_s32(svint32_t zn, svint32_t zm) ATTR {
116124
return SVE_ACLE_FUNC(svextq, _s32,,)(zn, zm, 3);
117125
}
118126

@@ -128,7 +136,7 @@ svint32_t test_svextq_s32(svint32_t zn, svint32_t zm) {
128136
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.extq.nxv2i64(<vscale x 2 x i64> [[ZN]], <vscale x 2 x i64> [[ZM]], i32 1)
129137
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
130138
//
131-
svuint64_t test_svextq_u64(svuint64_t zn, svuint64_t zm) {
139+
svuint64_t test_svextq_u64(svuint64_t zn, svuint64_t zm) ATTR {
132140
return SVE_ACLE_FUNC(svextq, _u64,,)(zn, zm, 1);
133141
}
134142

@@ -144,7 +152,7 @@ svuint64_t test_svextq_u64(svuint64_t zn, svuint64_t zm) {
144152
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.extq.nxv2i64(<vscale x 2 x i64> [[ZN]], <vscale x 2 x i64> [[ZM]], i32 0)
145153
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
146154
//
147-
svint64_t test_svextq_s64(svint64_t zn, svint64_t zm) {
155+
svint64_t test_svextq_s64(svint64_t zn, svint64_t zm) ATTR {
148156
return SVE_ACLE_FUNC(svextq, _s64,,)(zn, zm, 0);
149157
}
150158

@@ -160,7 +168,7 @@ svint64_t test_svextq_s64(svint64_t zn, svint64_t zm) {
160168
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.extq.nxv8f16(<vscale x 8 x half> [[ZN]], <vscale x 8 x half> [[ZM]], i32 7)
161169
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
162170
//
163-
svfloat16_t test_svextq_f16(svfloat16_t zn, svfloat16_t zm) {
171+
svfloat16_t test_svextq_f16(svfloat16_t zn, svfloat16_t zm) ATTR {
164172
return SVE_ACLE_FUNC(svextq, _f16,,)(zn, zm, 7);
165173
}
166174

@@ -176,7 +184,7 @@ svfloat16_t test_svextq_f16(svfloat16_t zn, svfloat16_t zm) {
176184
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.extq.nxv4f32(<vscale x 4 x float> [[ZN]], <vscale x 4 x float> [[ZM]], i32 2)
177185
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
178186
//
179-
svfloat32_t test_svextq_f32(svfloat32_t zn, svfloat32_t zm) {
187+
svfloat32_t test_svextq_f32(svfloat32_t zn, svfloat32_t zm) ATTR {
180188
return SVE_ACLE_FUNC(svextq, _f32,,)(zn, zm, 2);
181189
}
182190

@@ -192,7 +200,7 @@ svfloat32_t test_svextq_f32(svfloat32_t zn, svfloat32_t zm) {
192200
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.extq.nxv2f64(<vscale x 2 x double> [[ZN]], <vscale x 2 x double> [[ZM]], i32 0)
193201
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
194202
//
195-
svfloat64_t test_svextq_f64(svfloat64_t zn, svfloat64_t zm) {
203+
svfloat64_t test_svextq_f64(svfloat64_t zn, svfloat64_t zm) ATTR {
196204
return SVE_ACLE_FUNC(svextq, _f64,,)(zn, zm, 0);
197205
}
198206

@@ -208,7 +216,7 @@ svfloat64_t test_svextq_f64(svfloat64_t zn, svfloat64_t zm) {
208216
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.extq.nxv8bf16(<vscale x 8 x bfloat> [[ZN]], <vscale x 8 x bfloat> [[ZM]], i32 6)
209217
// CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
210218
//
211-
svbfloat16_t test_svextq_bf16(svbfloat16_t zn, svbfloat16_t zm) {
219+
svbfloat16_t test_svextq_bf16(svbfloat16_t zn, svbfloat16_t zm) ATTR {
212220
return SVE_ACLE_FUNC(svextq, _bf16,,)(zn, zm, 6);
213221
}
214222

@@ -224,6 +232,6 @@ svbfloat16_t test_svextq_bf16(svbfloat16_t zn, svbfloat16_t zm) {
224232
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.extq.nxv16i8(<vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 6)
225233
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
226234
//
227-
svmfloat8_t test_svextq_mf8(svmfloat8_t zn, svmfloat8_t zm) {
235+
svmfloat8_t test_svextq_mf8(svmfloat8_t zn, svmfloat8_t zm) ATTR {
228236
return SVE_ACLE_FUNC(svextq, _mf8,,)(zn, zm, 6);
229237
}

0 commit comments

Comments
 (0)