diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp index a60855cc4f2d6..41d24c8797426 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp @@ -671,6 +671,9 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, .Any({{{UniB256, UniP1}, isAlign4 && isUL}, {{SgprB256}, {SgprP1}}}) .Any({{{UniB512, UniP1}, isAlign4 && isUL}, {{SgprB512}, {SgprP1}}}) .Any({{{UniB32, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB32}, {SgprP1}}}) + .Any({{{UniB64, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB64}, {SgprP1}}}) + .Any({{{UniB96, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB96}, {SgprP1}}}) + .Any({{{UniB128, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB128}, {SgprP1}}}) .Any({{{UniB256, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB256}, {VgprP1}, SplitLoad}}) .Any({{{UniB512, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB512}, {VgprP1}, SplitLoad}}) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/load-uniform-in-vgpr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-uniform-in-vgpr.ll new file mode 100644 index 0000000000000..92e532b6cf340 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-uniform-in-vgpr.ll @@ -0,0 +1,95 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -mattr=+unaligned-access-mode < %s | FileCheck %s + +define amdgpu_ps void @uniform_load_i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1, ptr addrspace(1) inreg %ptr2) { +; CHECK-LABEL: uniform_load_i32: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: global_load_dword v2, v0, s[2:3] +; CHECK-NEXT: v_readfirstlane_b32 s0, v1 +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_readfirstlane_b32 s1, v2 +; CHECK-NEXT: s_add_i32 s0, s0, s1 +; CHECK-NEXT: v_mov_b32_e32 v1, s0 +; CHECK-NEXT: global_store_dword v0, v1, s[4:5] +; CHECK-NEXT: s_endpgm + %load0 = load volatile i32, ptr addrspace(1) %ptr0 + %load1 = load i32, ptr addrspace(1) %ptr1, align 1 + %sum = add i32 %load0, %load1 + store i32 %sum, ptr addrspace(1) %ptr2 + ret void +} + +define amdgpu_ps void @uniform_load_v2i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) { +; CHECK-LABEL: uniform_load_v2i32: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_mov_b32_e32 v2, 0 +; CHECK-NEXT: global_load_dwordx2 v[0:1], v2, s[0:1] glc dlc +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_readfirstlane_b32 s0, v0 +; CHECK-NEXT: v_readfirstlane_b32 s1, v1 +; CHECK-NEXT: s_add_i32 s0, s0, s1 +; CHECK-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-NEXT: global_store_dword v2, v0, s[2:3] +; CHECK-NEXT: s_endpgm + %load = load volatile <2 x i32>, ptr addrspace(1) %ptr0 + %elt0 = extractelement <2 x i32> %load, i32 0 + %elt1 = extractelement <2 x i32> %load, i32 1 + %sum = add i32 %elt0, %elt1 + store i32 %sum, ptr addrspace(1) %ptr1 + ret void +} + +define amdgpu_ps void @uniform_load_v3i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) { +; CHECK-LABEL: uniform_load_v3i32: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_mov_b32_e32 v3, 0 +; CHECK-NEXT: global_load_dwordx3 v[0:2], v3, s[0:1] +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_readfirstlane_b32 s0, v0 +; CHECK-NEXT: v_readfirstlane_b32 s1, v1 +; CHECK-NEXT: v_readfirstlane_b32 s4, v2 +; CHECK-NEXT: s_add_i32 s0, s0, s1 +; CHECK-NEXT: s_add_i32 s0, s0, s4 +; CHECK-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-NEXT: global_store_dword v3, v0, s[2:3] +; CHECK-NEXT: s_endpgm + %load = load <3 x i32>, ptr addrspace(1) %ptr0, align 2 + %elt0 = extractelement <3 x i32> %load, i32 0 + %elt1 = extractelement <3 x i32> %load, i32 1 + %elt2 = extractelement <3 x i32> %load, i32 2 + %sum0 = add i32 %elt0, %elt1 + %sum = add i32 %sum0, %elt2 + store i32 %sum, ptr addrspace(1) %ptr1 + ret void +} + +define amdgpu_ps void @uniform_load_v4i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) { +; CHECK-LABEL: uniform_load_v4i32: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_mov_b32_e32 v4, 0 +; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[0:1] glc dlc +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_readfirstlane_b32 s0, v0 +; CHECK-NEXT: v_readfirstlane_b32 s1, v1 +; CHECK-NEXT: v_readfirstlane_b32 s4, v2 +; CHECK-NEXT: v_readfirstlane_b32 s5, v3 +; CHECK-NEXT: s_add_i32 s0, s0, s1 +; CHECK-NEXT: s_add_i32 s0, s0, s4 +; CHECK-NEXT: s_add_i32 s0, s0, s5 +; CHECK-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-NEXT: global_store_dword v4, v0, s[2:3] +; CHECK-NEXT: s_endpgm + %load = load volatile <4 x i32>, ptr addrspace(1) %ptr0 + %elt0 = extractelement <4 x i32> %load, i32 0 + %elt1 = extractelement <4 x i32> %load, i32 1 + %elt2 = extractelement <4 x i32> %load, i32 2 + %elt3 = extractelement <4 x i32> %load, i32 3 + %sum0 = add i32 %elt0, %elt1 + %sum1 = add i32 %sum0, %elt2 + %sum = add i32 %sum1, %elt3 + store i32 %sum, ptr addrspace(1) %ptr1 + ret void +}