Skip to content

AMDGPU/GlobalISel: Add regbanklegalize rules for uniform global loads #145909

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -671,6 +671,9 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST,
.Any({{{UniB256, UniP1}, isAlign4 && isUL}, {{SgprB256}, {SgprP1}}})
.Any({{{UniB512, UniP1}, isAlign4 && isUL}, {{SgprB512}, {SgprP1}}})
.Any({{{UniB32, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB32}, {SgprP1}}})
.Any({{{UniB64, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB64}, {SgprP1}}})
.Any({{{UniB96, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB96}, {SgprP1}}})
.Any({{{UniB128, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB128}, {SgprP1}}})
.Any({{{UniB256, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB256}, {VgprP1}, SplitLoad}})
.Any({{{UniB512, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB512}, {VgprP1}, SplitLoad}})

Expand Down
95 changes: 95 additions & 0 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/load-uniform-in-vgpr.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -mattr=+unaligned-access-mode < %s | FileCheck %s

define amdgpu_ps void @uniform_load_i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1, ptr addrspace(1) inreg %ptr2) {
; CHECK-LABEL: uniform_load_i32:
; CHECK: ; %bb.0:
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: global_load_dword v2, v0, s[2:3]
; CHECK-NEXT: v_readfirstlane_b32 s0, v1
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s1, v2
; CHECK-NEXT: s_add_i32 s0, s0, s1
; CHECK-NEXT: v_mov_b32_e32 v1, s0
; CHECK-NEXT: global_store_dword v0, v1, s[4:5]
; CHECK-NEXT: s_endpgm
%load0 = load volatile i32, ptr addrspace(1) %ptr0
%load1 = load i32, ptr addrspace(1) %ptr1, align 1
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

updated with cases that hit align check

%sum = add i32 %load0, %load1
store i32 %sum, ptr addrspace(1) %ptr2
ret void
}

define amdgpu_ps void @uniform_load_v2i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) {
; CHECK-LABEL: uniform_load_v2i32:
; CHECK: ; %bb.0:
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: global_load_dwordx2 v[0:1], v2, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_readfirstlane_b32 s1, v1
; CHECK-NEXT: s_add_i32 s0, s0, s1
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: global_store_dword v2, v0, s[2:3]
; CHECK-NEXT: s_endpgm
%load = load volatile <2 x i32>, ptr addrspace(1) %ptr0
%elt0 = extractelement <2 x i32> %load, i32 0
%elt1 = extractelement <2 x i32> %load, i32 1
%sum = add i32 %elt0, %elt1
store i32 %sum, ptr addrspace(1) %ptr1
ret void
}

define amdgpu_ps void @uniform_load_v3i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) {
; CHECK-LABEL: uniform_load_v3i32:
; CHECK: ; %bb.0:
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: global_load_dwordx3 v[0:2], v3, s[0:1]
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_readfirstlane_b32 s1, v1
; CHECK-NEXT: v_readfirstlane_b32 s4, v2
; CHECK-NEXT: s_add_i32 s0, s0, s1
; CHECK-NEXT: s_add_i32 s0, s0, s4
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: global_store_dword v3, v0, s[2:3]
; CHECK-NEXT: s_endpgm
%load = load <3 x i32>, ptr addrspace(1) %ptr0, align 2
%elt0 = extractelement <3 x i32> %load, i32 0
%elt1 = extractelement <3 x i32> %load, i32 1
%elt2 = extractelement <3 x i32> %load, i32 2
%sum0 = add i32 %elt0, %elt1
%sum = add i32 %sum0, %elt2
store i32 %sum, ptr addrspace(1) %ptr1
ret void
}

define amdgpu_ps void @uniform_load_v4i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) {
; CHECK-LABEL: uniform_load_v4i32:
; CHECK: ; %bb.0:
; CHECK-NEXT: v_mov_b32_e32 v4, 0
; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_readfirstlane_b32 s1, v1
; CHECK-NEXT: v_readfirstlane_b32 s4, v2
; CHECK-NEXT: v_readfirstlane_b32 s5, v3
; CHECK-NEXT: s_add_i32 s0, s0, s1
; CHECK-NEXT: s_add_i32 s0, s0, s4
; CHECK-NEXT: s_add_i32 s0, s0, s5
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: global_store_dword v4, v0, s[2:3]
; CHECK-NEXT: s_endpgm
%load = load volatile <4 x i32>, ptr addrspace(1) %ptr0
%elt0 = extractelement <4 x i32> %load, i32 0
%elt1 = extractelement <4 x i32> %load, i32 1
%elt2 = extractelement <4 x i32> %load, i32 2
%elt3 = extractelement <4 x i32> %load, i32 3
%sum0 = add i32 %elt0, %elt1
%sum1 = add i32 %sum0, %elt2
%sum = add i32 %sum1, %elt3
store i32 %sum, ptr addrspace(1) %ptr1
ret void
}