From ec74dddedc75c14310bd441d1a659ace973f784d Mon Sep 17 00:00:00 2001 From: Liao Chunyu Date: Mon, 16 Jun 2025 21:40:22 -0400 Subject: [PATCH] [RISCV] Combine VP_SELECT constant false to use vmerge.vxm/vmerge.vim Currently, when the false path of a vp_select is a splat vector, it is lowered to a vmv_v_x/vmv_v_i. The vmv is hoisted out of the loop and the whole copy in loop body by MachineLICM. By inverting the mask register and swapping the true and false values in the vp_select, we can eliminate some instructions inside the loop. corrent: https://godbolt.org/z/EnGMn3xeM expected similar form: https://godbolt.org/z/nWhGM6Ej5 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 30 +++++++++++++------ .../test/CodeGen/RISCV/rvv/masked-load-int.ll | 5 ++-- llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll | 6 ++-- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index b8ef221742a26..182488517a5d1 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1628,15 +1628,15 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}); if (Subtarget.hasVInstructions()) setTargetDAGCombine( - {ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER, - ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, - ISD::SRL, ISD::SHL, ISD::STORE, - ISD::SPLAT_VECTOR, ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS, - ISD::VP_STORE, ISD::VP_TRUNCATE, ISD::EXPERIMENTAL_VP_REVERSE, - ISD::MUL, ISD::SDIV, ISD::UDIV, - ISD::SREM, ISD::UREM, ISD::INSERT_VECTOR_ELT, - ISD::ABS, ISD::CTPOP, ISD::VECTOR_SHUFFLE, - ISD::VSELECT, ISD::VECREDUCE_ADD}); + {ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER, + ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, + ISD::SRL, ISD::SHL, ISD::STORE, + ISD::SPLAT_VECTOR, ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS, + ISD::VP_STORE, ISD::VP_TRUNCATE, ISD::EXPERIMENTAL_VP_REVERSE, + ISD::MUL, ISD::SDIV, ISD::UDIV, + ISD::SREM, ISD::UREM, ISD::INSERT_VECTOR_ELT, + ISD::ABS, ISD::CTPOP, ISD::VECTOR_SHUFFLE, + ISD::VSELECT, ISD::VECREDUCE_ADD, ISD::VP_SELECT}); if (Subtarget.hasVendorXTHeadMemPair()) setTargetDAGCombine({ISD::LOAD, ISD::STORE}); @@ -19732,6 +19732,18 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, return performSELECTCombine(N, DAG, Subtarget); case ISD::VSELECT: return performVSELECTCombine(N, DAG); + case ISD::VP_SELECT: { + if (SDValue Op2 = N->getOperand(2); + Op2.hasOneUse() && (Op2.getOpcode() == ISD::SPLAT_VECTOR || + Op2.getOpcode() == ISD::SPLAT_VECTOR_PARTS)) { + SDLoc DL(N); + SDValue Op0 = N->getOperand(0); + SDValue Val = DAG.getLogicalNOT(DL, Op0, Op0.getValueType()); + return DAG.getNode(ISD::VP_SELECT, DL, N->getValueType(0), Val, + N->getOperand(2), N->getOperand(1), N->getOperand(3)); + } + return SDValue(); + } case RISCVISD::CZERO_EQZ: case RISCVISD::CZERO_NEZ: { SDValue Val = N->getOperand(0); diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll index 372b07e0137b4..a9ed70b94c90f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll @@ -34,9 +34,10 @@ define @masked_load_passthru_nxv1i8(ptr %a, ; ZVE32: # %bb.0: ; ZVE32-NEXT: csrr a1, vlenb ; ZVE32-NEXT: srli a1, a1, 3 -; ZVE32-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; ZVE32-NEXT: vmv.v.i v8, 0 +; ZVE32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; ZVE32-NEXT: vle8.v v8, (a0), v0.t +; ZVE32-NEXT: vmnot.m v0, v0 +; ZVE32-NEXT: vmerge.vim v8, v8, 0, v0 ; ZVE32-NEXT: ret %load = call @llvm.masked.load.nxv1i8(ptr %a, i32 1, %mask, zeroinitializer) ret %load diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll index 522c83fd9fa99..3918a8009fde8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll @@ -483,10 +483,10 @@ define @select_nxv2i64_constant_true( %a, @select_nxv2i64_constant_false( %a, %b, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i64_constant_false: ; CHECK: # %bb.0: -; CHECK-NEXT: li a1, 100 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv.v.x v10, a1 -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: vmnot.m v0, v0 +; CHECK-NEXT: li a0, 100 +; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv2i64( %a, %b, splat (i64 100), i32 %evl) ret %v