diff --git a/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/llvm/include/llvm/CodeGen/MIRYamlMapping.h index 119786f045ed9..73737de73c552 100644 --- a/llvm/include/llvm/CodeGen/MIRYamlMapping.h +++ b/llvm/include/llvm/CodeGen/MIRYamlMapping.h @@ -378,6 +378,8 @@ struct ScalarEnumerationTraits { IO.enumCase(ID, "default", TargetStackID::Default); IO.enumCase(ID, "sgpr-spill", TargetStackID::SGPRSpill); IO.enumCase(ID, "scalable-vector", TargetStackID::ScalableVector); + IO.enumCase(ID, "scalable-predicate-vector", + TargetStackID::ScalablePredicateVector); IO.enumCase(ID, "wasm-local", TargetStackID::WasmLocal); IO.enumCase(ID, "noalloc", TargetStackID::NoAlloc); } diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h index 403e5eda949f1..6137e66f881af 100644 --- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h +++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h @@ -494,7 +494,14 @@ class MachineFrameInfo { /// Should this stack ID be considered in MaxAlignment. bool contributesToMaxAlignment(uint8_t StackID) { return StackID == TargetStackID::Default || - StackID == TargetStackID::ScalableVector; + StackID == TargetStackID::ScalableVector || + StackID == TargetStackID::ScalablePredicateVector; + } + + bool isScalableStackID(int ObjectIdx) const { + uint8_t StackID = getStackID(ObjectIdx); + return StackID == TargetStackID::ScalableVector || + StackID == TargetStackID::ScalablePredicateVector; } /// setObjectAlignment - Change the alignment of the specified stack object. diff --git a/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/llvm/include/llvm/CodeGen/TargetFrameLowering.h index 0e29e45752a9f..75696faf114cc 100644 --- a/llvm/include/llvm/CodeGen/TargetFrameLowering.h +++ b/llvm/include/llvm/CodeGen/TargetFrameLowering.h @@ -32,6 +32,7 @@ enum Value { SGPRSpill = 1, ScalableVector = 2, WasmLocal = 3, + ScalablePredicateVector = 4, NoAlloc = 255 }; } diff --git a/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp b/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp index 096a33c17cb4b..ec75dc3e3871c 100644 --- a/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp +++ b/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp @@ -72,7 +72,7 @@ struct StackFrameLayoutAnalysis { : Slot(Idx), Size(MFI.getObjectSize(Idx)), Align(MFI.getObjectAlign(Idx).value()), Offset(Offset), SlotTy(Invalid), Scalable(false) { - Scalable = MFI.getStackID(Idx) == TargetStackID::ScalableVector; + Scalable = MFI.isScalableStackID(Idx); if (MFI.isSpillSlotObjectIndex(Idx)) SlotTy = SlotType::Spill; else if (MFI.isFixedObjectIndex(Idx)) diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index 2f3cb71e4767f..409a21a37810f 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -335,7 +335,7 @@ static Register findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB, bool HasCall = false); static bool requiresSaveVG(const MachineFunction &MF); -// Conservatively, returns true if the function is likely to have an SVE vectors +// Conservatively, returns true if the function is likely to have SVE vectors // on the stack. This function is safe to be called before callee-saves or // object offsets have been determined. static bool isLikelyToHaveSVEStack(MachineFunction &MF) { @@ -348,7 +348,7 @@ static bool isLikelyToHaveSVEStack(MachineFunction &MF) { const MachineFrameInfo &MFI = MF.getFrameInfo(); for (int FI = MFI.getObjectIndexBegin(); FI < MFI.getObjectIndexEnd(); FI++) { - if (MFI.getStackID(FI) == TargetStackID::ScalableVector) + if (MFI.isScalableStackID(FI)) return true; } @@ -675,7 +675,7 @@ void AArch64FrameLowering::emitCalleeSavedGPRLocations( CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup); for (const auto &Info : CSI) { unsigned FrameIdx = Info.getFrameIdx(); - if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) + if (MFI.isScalableStackID(FrameIdx)) continue; assert(!Info.isSpilledToReg() && "Spilling to registers not implemented"); @@ -708,7 +708,7 @@ void AArch64FrameLowering::emitCalleeSavedSVELocations( CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup); for (const auto &Info : CSI) { - if (!(MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector)) + if (!MFI.isScalableStackID(Info.getFrameIdx())) continue; // Not all unwinders may know about SVE registers, so assume the lowest @@ -775,8 +775,7 @@ static void emitCalleeSavedRestores(MachineBasicBlock &MBB, CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameDestroy); for (const auto &Info : CSI) { - if (SVE != - (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector)) + if (SVE != MFI.isScalableStackID(Info.getFrameIdx())) continue; MCRegister Reg = Info.getReg(); @@ -2812,7 +2811,7 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, const auto *AFI = MF.getInfo(); bool FPAfterSVECalleeSaves = isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize(); - if (MFI.getStackID(FI) == TargetStackID::ScalableVector) { + if (MFI.isScalableStackID(FI)) { if (FPAfterSVECalleeSaves && -ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize()) return StackOffset::getScalable(ObjectOffset); @@ -2878,7 +2877,7 @@ StackOffset AArch64FrameLowering::resolveFrameIndexReference( const auto &MFI = MF.getFrameInfo(); int64_t ObjectOffset = MFI.getObjectOffset(FI); bool isFixed = MFI.isFixedObjectIndex(FI); - bool isSVE = MFI.getStackID(FI) == TargetStackID::ScalableVector; + bool isSVE = MFI.isScalableStackID(FI); return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg, PreferFP, ForSimm); } @@ -3614,10 +3613,14 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters( } // Update the StackIDs of the SVE stack slots. MachineFrameInfo &MFI = MF.getFrameInfo(); - if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) { + if (RPI.Type == RegPairInfo::ZPR) { MFI.setStackID(FrameIdxReg1, TargetStackID::ScalableVector); if (RPI.isPaired()) MFI.setStackID(FrameIdxReg2, TargetStackID::ScalableVector); + } else if (RPI.Type == RegPairInfo::PPR) { + MFI.setStackID(FrameIdxReg1, TargetStackID::ScalablePredicateVector); + if (RPI.isPaired()) + MFI.setStackID(FrameIdxReg2, TargetStackID::ScalablePredicateVector); } if (X0Scratch != AArch64::NoRegister) @@ -3832,8 +3835,7 @@ void AArch64FrameLowering::determineStackHazardSlot( for (auto &MI : MBB) { std::optional FI = getLdStFrameID(MI, MFI); if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) { - if (MFI.getStackID(*FI) == TargetStackID::ScalableVector || - AArch64InstrInfo::isFpOrNEON(MI)) + if (MFI.isScalableStackID(*FI) || AArch64InstrInfo::isFpOrNEON(MI)) FrameObjects[*FI] |= 2; else FrameObjects[*FI] |= 1; @@ -4301,7 +4303,7 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, #ifndef NDEBUG // First process all fixed stack objects. for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) - assert(MFI.getStackID(I) != TargetStackID::ScalableVector && + assert(!MFI.isScalableStackID(I) && "SVE vectors should never be passed on the stack by value, only by " "reference."); #endif @@ -4335,12 +4337,11 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, int StackProtectorFI = -1; if (MFI.hasStackProtectorIndex()) { StackProtectorFI = MFI.getStackProtectorIndex(); - if (MFI.getStackID(StackProtectorFI) == TargetStackID::ScalableVector) + if (MFI.isScalableStackID(StackProtectorFI)) ObjectsToAllocate.push_back(StackProtectorFI); } for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) { - unsigned StackID = MFI.getStackID(I); - if (StackID != TargetStackID::ScalableVector) + if (!MFI.isScalableStackID(I)) continue; if (I == StackProtectorFI) continue; @@ -5372,8 +5373,7 @@ void AArch64FrameLowering::orderFrameObjects( if (AFI.hasStackHazardSlotIndex()) { std::optional FI = getLdStFrameID(MI, MFI); if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) { - if (MFI.getStackID(*FI) == TargetStackID::ScalableVector || - AArch64InstrInfo::isFpOrNEON(MI)) + if (MFI.isScalableStackID(*FI) || AArch64InstrInfo::isFpOrNEON(MI)) FrameObjects[*FI].Accesses |= FrameObject::AccessFPR; else FrameObjects[*FI].Accesses |= FrameObject::AccessGPR; @@ -5731,7 +5731,7 @@ void AArch64FrameLowering::emitRemarks( } unsigned RegTy = StackAccess::AccessType::GPR; - if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) { + if (MFI.isScalableStackID(FrameIdx)) { // SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO // spill/fill the predicate as a data vector (so are an FPR access). if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO && diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h index ced69c9cd3699..6892be4d97b26 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h @@ -111,6 +111,7 @@ class AArch64FrameLowering : public TargetFrameLowering { return false; case TargetStackID::Default: case TargetStackID::ScalableVector: + case TargetStackID::ScalablePredicateVector: case TargetStackID::NoAlloc: return true; } @@ -119,7 +120,8 @@ class AArch64FrameLowering : public TargetFrameLowering { bool isStackIdSafeForLocalArea(unsigned StackId) const override { // We don't support putting SVE objects into the pre-allocated local // frame block at the moment. - return StackId != TargetStackID::ScalableVector; + return (StackId != TargetStackID::ScalableVector && + StackId != TargetStackID::ScalablePredicateVector); } void diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index da617b7e19266..7beacbfd1903e 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -7487,7 +7487,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, int FI = cast(N)->getIndex(); // We can only encode VL scaled offsets, so only fold in frame indexes // referencing SVE objects. - if (MFI.getStackID(FI) == TargetStackID::ScalableVector) { + if (MFI.isScalableStackID(FI)) { Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64); return true; @@ -7533,7 +7533,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, int FI = cast(Base)->getIndex(); // We can only encode VL scaled offsets, so only fold in frame indexes // referencing SVE objects. - if (MFI.getStackID(FI) == TargetStackID::ScalableVector) + if (MFI.isScalableStackID(FI)) Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 01be10be433fd..75d42129d2775 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -8714,8 +8714,7 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, (MI.getOpcode() == AArch64::ADDXri || MI.getOpcode() == AArch64::SUBXri)) { const MachineOperand &MO = MI.getOperand(1); - if (MO.isFI() && MF.getFrameInfo().getStackID(MO.getIndex()) == - TargetStackID::ScalableVector) + if (MO.isFI() && MF.getFrameInfo().isScalableStackID(MO.getIndex())) MI.addOperand(MachineOperand::CreateReg(AArch64::VG, /*IsDef=*/false, /*IsImplicit=*/true)); } @@ -9151,8 +9150,12 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty); MachineFrameInfo &MFI = MF.getFrameInfo(); int FI = MFI.CreateStackObject(StoreSize, Alignment, false); - if (isScalable) - MFI.setStackID(FI, TargetStackID::ScalableVector); + if (isScalable) { + bool IsPred = VA.getValVT() == MVT::aarch64svcount || + VA.getValVT().getVectorElementType() == MVT::i1; + MFI.setStackID(FI, IsPred ? TargetStackID::ScalablePredicateVector + : TargetStackID::ScalableVector); + } MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); SDValue Ptr = DAG.getFrameIndex( @@ -28554,7 +28557,7 @@ void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const { // than doing it here in finalizeLowering. if (MFI.hasStackProtectorIndex()) { for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { - if (MFI.getStackID(i) == TargetStackID::ScalableVector && + if (MFI.isScalableStackID(i) && MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) { MFI.setStackID(MFI.getStackProtectorIndex(), TargetStackID::ScalableVector); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 8847c62690714..96b2d03c25b1c 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -5485,7 +5485,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, assert(Subtarget.isSVEorStreamingSVEAvailable() && "Unexpected register store without SVE store instructions"); Opc = AArch64::STR_PXI; - StackID = TargetStackID::ScalableVector; + StackID = TargetStackID::ScalablePredicateVector; } break; } @@ -5500,7 +5500,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, Opc = AArch64::STRSui; else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) { Opc = AArch64::STR_PPXI; - StackID = TargetStackID::ScalableVector; + StackID = TargetStackID::ScalablePredicateVector; } break; case 8: @@ -5662,7 +5662,7 @@ void AArch64InstrInfo::loadRegFromStackSlot( if (IsPNR) PNRReg = DestReg; Opc = AArch64::LDR_PXI; - StackID = TargetStackID::ScalableVector; + StackID = TargetStackID::ScalablePredicateVector; } break; } @@ -5677,7 +5677,7 @@ void AArch64InstrInfo::loadRegFromStackSlot( Opc = AArch64::LDRSui; else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) { Opc = AArch64::LDR_PPXI; - StackID = TargetStackID::ScalableVector; + StackID = TargetStackID::ScalablePredicateVector; } break; case 8: diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir index aca2816225e3e..7fd0cee068fd1 100644 --- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir +++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir @@ -164,10 +164,10 @@ stack: - { id: 1, name: z1.addr, size: 16, alignment: 16, stack-id: scalable-vector, debug-info-variable: '!31', debug-info-expression: '!DIExpression()', debug-info-location: '!32' } - - { id: 2, name: p0.addr, size: 2, alignment: 2, stack-id: scalable-vector, + - { id: 2, name: p0.addr, size: 2, alignment: 2, stack-id: scalable-predicate-vector, debug-info-variable: '!33', debug-info-expression: '!DIExpression()', debug-info-location: '!34' } - - { id: 3, name: p1.addr, size: 2, alignment: 2, stack-id: scalable-vector, + - { id: 3, name: p1.addr, size: 2, alignment: 2, stack-id: scalable-predicate-vector, debug-info-variable: '!35', debug-info-expression: '!DIExpression()', debug-info-location: '!36' } - { id: 4, name: w0.addr, size: 4, alignment: 4, local-offset: -4, debug-info-variable: '!37', @@ -181,10 +181,10 @@ stack: - { id: 7, name: localv1, size: 16, alignment: 16, stack-id: scalable-vector, debug-info-variable: '!45', debug-info-expression: '!DIExpression()', debug-info-location: '!46' } - - { id: 8, name: localp0, size: 2, alignment: 2, stack-id: scalable-vector, + - { id: 8, name: localp0, size: 2, alignment: 2, stack-id: scalable-predicate-vector, debug-info-variable: '!48', debug-info-expression: '!DIExpression()', debug-info-location: '!49' } - - { id: 9, name: localp1, size: 2, alignment: 2, stack-id: scalable-vector, + - { id: 9, name: localp1, size: 2, alignment: 2, stack-id: scalable-predicate-vector, debug-info-variable: '!51', debug-info-expression: '!DIExpression()', debug-info-location: '!52' } machineFunctionInfo: {} diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir index 0ea180b20730f..41ba5542150ab 100644 --- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir +++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir @@ -96,8 +96,8 @@ stack: - { id: 1, size: 8, alignment: 8 } - { id: 2, size: 16, alignment: 16, stack-id: scalable-vector } - { id: 3, size: 16, alignment: 16, stack-id: scalable-vector } - - { id: 4, size: 2, alignment: 2, stack-id: scalable-vector } - - { id: 5, size: 2, alignment: 2, stack-id: scalable-vector } + - { id: 4, size: 2, alignment: 2, stack-id: scalable-predicate-vector } + - { id: 5, size: 2, alignment: 2, stack-id: scalable-predicate-vector } machineFunctionInfo: {} body: | bb.0.entry: diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir index 17b1ad2197c46..9ea042e234699 100644 --- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir @@ -88,7 +88,7 @@ # # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_offset: +32 +# UNWINDINFO: DW_CFA_def_cfa_offset: +32 # UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 @@ -166,7 +166,7 @@ body: | # UNWINDINFO: DW_CFA_offset: reg20 -8 # UNWINDINFO-NEXT: DW_CFA_offset: reg21 -16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32 -# UNWINDINFO: DW_CFA_def_cfa_offset: +48 +# UNWINDINFO: DW_CFA_def_cfa_offset: +48 # UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # # UNWINDINFO: DW_CFA_def_cfa: reg31 +48 @@ -918,7 +918,7 @@ body: | # ASM-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG # ASM-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG # ASM-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG -# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 144 * VG +# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 144 * VG # ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 152 * VG # # ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG @@ -1165,19 +1165,19 @@ body: | # CHECK: - { id: 2, name: '', type: default, offset: -112, size: 16, alignment: 16, # CHECK-NEXT: stack-id: scalable-vector, # CHECK: - { id: 3, name: '', type: default, offset: -114, size: 2, alignment: 2, -# CHECK-NEXT: stack-id: scalable-vector, +# CHECK-NEXT: stack-id: scalable-predicate-vector, # CHECK: - { id: 4, name: '', type: spill-slot, offset: -144, size: 16, alignment: 16, # CHECK-NEXT: stack-id: scalable-vector, # CHECK: - { id: 5, name: '', type: spill-slot, offset: -146, size: 2, alignment: 2, -# CHECK-NEXT: stack-id: scalable-vector, +# CHECK-NEXT: stack-id: scalable-predicate-vector, # CHECK: - { id: 6, name: '', type: spill-slot, offset: -16, size: 16, alignment: 16, # CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$z8', # CHECK: - { id: 7, name: '', type: spill-slot, offset: -32, size: 16, alignment: 16, # CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$z23', # CHECK: - { id: 8, name: '', type: spill-slot, offset: -34, size: 2, alignment: 2, -# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$p4', +# CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '$p4', # CHECK: - { id: 9, name: '', type: spill-slot, offset: -36, size: 2, alignment: 2, -# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$p15', +# CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '$p15', # CHECK: - { id: 10, name: '', type: spill-slot, offset: -16, size: 8, alignment: 16, # CHECK-NEXT: stack-id: default, callee-saved-register: '$fp', # @@ -1241,9 +1241,9 @@ stack: - { id: 0, type: default, size: 32, alignment: 16, stack-id: scalable-vector } - { id: 1, type: default, size: 4, alignment: 2, stack-id: scalable-vector } - { id: 2, type: default, size: 16, alignment: 16, stack-id: scalable-vector } - - { id: 3, type: default, size: 2, alignment: 2, stack-id: scalable-vector } + - { id: 3, type: default, size: 2, alignment: 2, stack-id: scalable-predicate-vector } - { id: 4, type: spill-slot, size: 16, alignment: 16, stack-id: scalable-vector } - - { id: 5, type: spill-slot, size: 2, alignment: 2, stack-id: scalable-vector } + - { id: 5, type: spill-slot, size: 2, alignment: 2, stack-id: scalable-predicate-vector } body: | bb.0.entry: diff --git a/llvm/test/CodeGen/AArch64/spillfill-sve.mir b/llvm/test/CodeGen/AArch64/spillfill-sve.mir index 83c9b73c57570..cfa753b84e23d 100644 --- a/llvm/test/CodeGen/AArch64/spillfill-sve.mir +++ b/llvm/test/CodeGen/AArch64/spillfill-sve.mir @@ -38,7 +38,7 @@ body: | ; CHECK-LABEL: name: spills_fills_stack_id_ppr ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2 - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '' + ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '' ; EXPAND-LABEL: name: spills_fills_stack_id_ppr ; EXPAND: STR_PXI $p0, $sp, 7 @@ -81,7 +81,7 @@ body: | ; CHECK-LABEL: name: spills_fills_stack_id_ppr2 ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 2 - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '' + ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '' ; EXPAND-LABEL: name: spills_fills_stack_id_ppr2 ; EXPAND: STR_PXI $p0, $sp, 6 @@ -126,7 +126,7 @@ body: | ; CHECK-LABEL: name: spills_fills_stack_id_ppr2 ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 2 - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '' + ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '' ; EXPAND-LABEL: name: spills_fills_stack_id_ppr2mul2 ; EXPAND: STR_PXI $p0, $sp, 6 @@ -171,7 +171,7 @@ body: | ; CHECK-LABEL: name: spills_fills_stack_id_pnr ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2 - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '' + ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '' ; EXPAND-LABEL: name: spills_fills_stack_id_pnr ; EXPAND: STR_PXI $pn0, $sp, 7 @@ -210,7 +210,7 @@ body: | ; CHECK-LABEL: name: spills_fills_stack_id_virtreg_pnr ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2 - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '' + ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '' ; EXPAND-LABEL: name: spills_fills_stack_id_virtreg_pnr ; EXPAND: renamable $pn8 = WHILEGE_CXX_B diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll index 7bddd1d70aaa8..cc63c7ffc0c1e 100644 --- a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll +++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll @@ -56,9 +56,9 @@ define aarch64_sve_vector_pcs @caller_with_many_svepred_arg(< ; CHECK: name: caller_with_many_svepred_arg ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2, -; CHECK-NEXT: stack-id: scalable-vector +; CHECK-NEXT: stack-id: scalable-predicate-vector ; CHECK: - { id: 1, name: '', type: default, offset: 0, size: 2, alignment: 2, -; CHECK-NEXT: stack-id: scalable-vector +; CHECK-NEXT: stack-id: scalable-predicate-vector ; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.0, 0 ; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.1, 0 ; CHECK-DAG: [[BASE1:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0 @@ -90,7 +90,7 @@ define aarch64_sve_vector_pcs @caller_with_svepred_arg_1xv16i ; CHECK: name: caller_with_svepred_arg_1xv16i1_4xv16i1 ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2, -; CHECK-NEXT: stack-id: scalable-vector, +; CHECK-NEXT: stack-id: scalable-predicate-vector, ; CHECK: [[PRED0:%[0-9]+]]:ppr = COPY $p0 ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp ; CHECK: STR_PXI [[PRED0]], %stack.0, 0 :: (store () into %stack.0) @@ -139,7 +139,7 @@ define [4 x ] @caller_with_svepred_arg_4xv16i1_4xv16i1([4 x ] @caller_with_svepred_arg_2xv32i1_1xv16i1([2 x