@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
196
196
return - EINVAL ;
197
197
198
198
vram_size = KFD_XCP_MEMORY_SIZE (adev , xcp_id );
199
- if (adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) {
199
+ if (adev -> flags & AMD_IS_APU ) {
200
200
system_mem_needed = size ;
201
201
ttm_mem_needed = size ;
202
202
}
@@ -233,7 +233,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
233
233
if (adev && xcp_id >= 0 ) {
234
234
adev -> kfd .vram_used [xcp_id ] += vram_needed ;
235
235
adev -> kfd .vram_used_aligned [xcp_id ] +=
236
- (adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) ?
236
+ (adev -> flags & AMD_IS_APU ) ?
237
237
vram_needed :
238
238
ALIGN (vram_needed , VRAM_AVAILABLITY_ALIGN );
239
239
}
@@ -261,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
261
261
262
262
if (adev ) {
263
263
adev -> kfd .vram_used [xcp_id ] -= size ;
264
- if (adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) {
264
+ if (adev -> flags & AMD_IS_APU ) {
265
265
adev -> kfd .vram_used_aligned [xcp_id ] -= size ;
266
266
kfd_mem_limit .system_mem_used -= size ;
267
267
kfd_mem_limit .ttm_mem_used -= size ;
@@ -890,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
890
890
* if peer device has large BAR. In contrast, access over xGMI is
891
891
* allowed for both small and large BAR configurations of peer device
892
892
*/
893
- if ((adev != bo_adev && !(adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU )) &&
893
+ if ((adev != bo_adev && !(adev -> flags & AMD_IS_APU )) &&
894
894
((mem -> domain == AMDGPU_GEM_DOMAIN_VRAM ) ||
895
895
(mem -> alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ) ||
896
896
(mem -> alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP ))) {
@@ -1658,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
1658
1658
- atomic64_read (& adev -> vram_pin_size )
1659
1659
- reserved_for_pt ;
1660
1660
1661
- if (adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) {
1661
+ if (adev -> flags & AMD_IS_APU ) {
1662
1662
system_mem_available = no_system_mem_limit ?
1663
1663
kfd_mem_limit .max_system_mem_limit :
1664
1664
kfd_mem_limit .max_system_mem_limit -
@@ -1706,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1706
1706
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM ) {
1707
1707
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM ;
1708
1708
1709
- if (adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) {
1709
+ if (adev -> flags & AMD_IS_APU ) {
1710
1710
domain = AMDGPU_GEM_DOMAIN_GTT ;
1711
1711
alloc_domain = AMDGPU_GEM_DOMAIN_GTT ;
1712
1712
alloc_flags = 0 ;
@@ -1953,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1953
1953
if (size ) {
1954
1954
if (!is_imported &&
1955
1955
(mem -> bo -> preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
1956
- ((adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) &&
1956
+ ((adev -> flags & AMD_IS_APU ) &&
1957
1957
mem -> bo -> preferred_domains == AMDGPU_GEM_DOMAIN_GTT )))
1958
1958
* size = bo_size ;
1959
1959
else
@@ -2376,7 +2376,7 @@ static int import_obj_create(struct amdgpu_device *adev,
2376
2376
(* mem )-> bo = bo ;
2377
2377
(* mem )-> va = va ;
2378
2378
(* mem )-> domain = (bo -> preferred_domains & AMDGPU_GEM_DOMAIN_VRAM ) &&
2379
- !(adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) ?
2379
+ !(adev -> flags & AMD_IS_APU ) ?
2380
2380
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT ;
2381
2381
2382
2382
(* mem )-> mapped_to_gpu_memory = 0 ;
0 commit comments