@@ -324,50 +324,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
324
324
if (useSVEForFixedLengthVectorVT(VT))
325
325
addRegisterClass(VT, &AArch64::ZPRRegClass);
326
326
}
327
-
328
- for (auto VT : { MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64 }) {
329
- setOperationAction(ISD::SADDSAT, VT, Legal);
330
- setOperationAction(ISD::UADDSAT, VT, Legal);
331
- setOperationAction(ISD::SSUBSAT, VT, Legal);
332
- setOperationAction(ISD::USUBSAT, VT, Legal);
333
- setOperationAction(ISD::UREM, VT, Expand);
334
- setOperationAction(ISD::SREM, VT, Expand);
335
- setOperationAction(ISD::SDIVREM, VT, Expand);
336
- setOperationAction(ISD::UDIVREM, VT, Expand);
337
- }
338
-
339
- for (auto VT :
340
- { MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, MVT::nxv4i8,
341
- MVT::nxv4i16, MVT::nxv4i32, MVT::nxv8i8, MVT::nxv8i16 })
342
- setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Legal);
343
-
344
- for (auto VT :
345
- { MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32, MVT::nxv4f32,
346
- MVT::nxv2f64 }) {
347
- setCondCodeAction(ISD::SETO, VT, Expand);
348
- setCondCodeAction(ISD::SETOLT, VT, Expand);
349
- setCondCodeAction(ISD::SETLT, VT, Expand);
350
- setCondCodeAction(ISD::SETOLE, VT, Expand);
351
- setCondCodeAction(ISD::SETLE, VT, Expand);
352
- setCondCodeAction(ISD::SETULT, VT, Expand);
353
- setCondCodeAction(ISD::SETULE, VT, Expand);
354
- setCondCodeAction(ISD::SETUGE, VT, Expand);
355
- setCondCodeAction(ISD::SETUGT, VT, Expand);
356
- setCondCodeAction(ISD::SETUEQ, VT, Expand);
357
- setCondCodeAction(ISD::SETUNE, VT, Expand);
358
-
359
- setOperationAction(ISD::FREM, VT, Expand);
360
- setOperationAction(ISD::FPOW, VT, Expand);
361
- setOperationAction(ISD::FPOWI, VT, Expand);
362
- setOperationAction(ISD::FCOS, VT, Expand);
363
- setOperationAction(ISD::FSIN, VT, Expand);
364
- setOperationAction(ISD::FSINCOS, VT, Expand);
365
- setOperationAction(ISD::FEXP, VT, Expand);
366
- setOperationAction(ISD::FEXP2, VT, Expand);
367
- setOperationAction(ISD::FLOG, VT, Expand);
368
- setOperationAction(ISD::FLOG2, VT, Expand);
369
- setOperationAction(ISD::FLOG10, VT, Expand);
370
- }
371
327
}
372
328
373
329
// Compute derived properties from the register classes
@@ -1163,9 +1119,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
1163
1119
}
1164
1120
}
1165
1121
1166
- if (Subtarget->hasSVE())
1167
- setOperationAction(ISD::VSCALE, MVT::i32, Custom);
1168
-
1169
1122
setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom);
1170
1123
1171
1124
setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
@@ -1224,6 +1177,15 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
1224
1177
setOperationAction(ISD::SELECT_CC, VT, Expand);
1225
1178
setOperationAction(ISD::ROTL, VT, Expand);
1226
1179
setOperationAction(ISD::ROTR, VT, Expand);
1180
+
1181
+ setOperationAction(ISD::SADDSAT, VT, Legal);
1182
+ setOperationAction(ISD::UADDSAT, VT, Legal);
1183
+ setOperationAction(ISD::SSUBSAT, VT, Legal);
1184
+ setOperationAction(ISD::USUBSAT, VT, Legal);
1185
+ setOperationAction(ISD::UREM, VT, Expand);
1186
+ setOperationAction(ISD::SREM, VT, Expand);
1187
+ setOperationAction(ISD::SDIVREM, VT, Expand);
1188
+ setOperationAction(ISD::UDIVREM, VT, Expand);
1227
1189
}
1228
1190
1229
1191
// Illegal unpacked integer vector types.
@@ -1237,6 +1199,11 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
1237
1199
MVT::nxv2f16, MVT::nxv4f16, MVT::nxv2f32})
1238
1200
setOperationAction(ISD::BITCAST, VT, Custom);
1239
1201
1202
+ for (auto VT :
1203
+ { MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, MVT::nxv4i8,
1204
+ MVT::nxv4i16, MVT::nxv4i32, MVT::nxv8i8, MVT::nxv8i16 })
1205
+ setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Legal);
1206
+
1240
1207
for (auto VT : {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1}) {
1241
1208
setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1242
1209
setOperationAction(ISD::SELECT, VT, Custom);
@@ -1326,6 +1293,29 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
1326
1293
setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1327
1294
1328
1295
setOperationAction(ISD::SELECT_CC, VT, Expand);
1296
+ setOperationAction(ISD::FREM, VT, Expand);
1297
+ setOperationAction(ISD::FPOW, VT, Expand);
1298
+ setOperationAction(ISD::FPOWI, VT, Expand);
1299
+ setOperationAction(ISD::FCOS, VT, Expand);
1300
+ setOperationAction(ISD::FSIN, VT, Expand);
1301
+ setOperationAction(ISD::FSINCOS, VT, Expand);
1302
+ setOperationAction(ISD::FEXP, VT, Expand);
1303
+ setOperationAction(ISD::FEXP2, VT, Expand);
1304
+ setOperationAction(ISD::FLOG, VT, Expand);
1305
+ setOperationAction(ISD::FLOG2, VT, Expand);
1306
+ setOperationAction(ISD::FLOG10, VT, Expand);
1307
+
1308
+ setCondCodeAction(ISD::SETO, VT, Expand);
1309
+ setCondCodeAction(ISD::SETOLT, VT, Expand);
1310
+ setCondCodeAction(ISD::SETLT, VT, Expand);
1311
+ setCondCodeAction(ISD::SETOLE, VT, Expand);
1312
+ setCondCodeAction(ISD::SETLE, VT, Expand);
1313
+ setCondCodeAction(ISD::SETULT, VT, Expand);
1314
+ setCondCodeAction(ISD::SETULE, VT, Expand);
1315
+ setCondCodeAction(ISD::SETUGE, VT, Expand);
1316
+ setCondCodeAction(ISD::SETUGT, VT, Expand);
1317
+ setCondCodeAction(ISD::SETUEQ, VT, Expand);
1318
+ setCondCodeAction(ISD::SETUNE, VT, Expand);
1329
1319
}
1330
1320
1331
1321
for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) {
@@ -1426,6 +1416,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
1426
1416
setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv4i1, MVT::nxv4i32);
1427
1417
setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv8i1, MVT::nxv8i16);
1428
1418
setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv16i1, MVT::nxv16i8);
1419
+
1420
+ setOperationAction(ISD::VSCALE, MVT::i32, Custom);
1429
1421
}
1430
1422
1431
1423
if (Subtarget->hasMOPS() && Subtarget->hasMTE()) {
0 commit comments