Skip to content

Commit 68786b7

Browse files
suijingfenglynxeye-dev
authored andcommitted
drm/etnaviv: Map and unmap GPUVA range with respect to the GPUVA size
Etnaviv assumes that GPU page size is 4KiB, however, GPUVA ranges collision when using softpin capable GPUs on a non 4KiB CPU page size configuration. The root cause is that kernel side BO takes up bigger address space than userspace expect, the size of backing memory of GEM buffer objects are required to align to the CPU PAGE_SIZE. Therefore, results in userspace allocated GPUVA range fails to be inserted to the specified hole exactly. To solve this problem, record the GPU visiable size of a BO firstly, then map and unmap the SG entry strictly with respect to the total GPUVA size. Signed-off-by: Sui Jingfeng <sui.jingfeng@linux.dev> Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
1 parent b5f1eed commit 68786b7

File tree

1 file changed

+13
-25
lines changed

1 file changed

+13
-25
lines changed

drivers/gpu/drm/etnaviv/etnaviv_mmu.c

Lines changed: 13 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -69,9 +69,11 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context,
6969
return ret;
7070
}
7171

72-
static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
72+
static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
73+
u32 iova, unsigned int va_len,
7374
struct sg_table *sgt, int prot)
74-
{ struct scatterlist *sg;
75+
{
76+
struct scatterlist *sg;
7577
unsigned int da = iova;
7678
unsigned int i;
7779
int ret;
@@ -81,14 +83,16 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
8183

8284
for_each_sgtable_dma_sg(sgt, sg, i) {
8385
phys_addr_t pa = sg_dma_address(sg) - sg->offset;
84-
size_t bytes = sg_dma_len(sg) + sg->offset;
86+
unsigned int da_len = sg_dma_len(sg) + sg->offset;
87+
unsigned int bytes = min_t(unsigned int, da_len, va_len);
8588

86-
VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
89+
VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes);
8790

8891
ret = etnaviv_context_map(context, da, pa, bytes, prot);
8992
if (ret)
9093
goto fail;
9194

95+
va_len -= bytes;
9296
da += bytes;
9397
}
9498

@@ -104,21 +108,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
104108
static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
105109
struct sg_table *sgt, unsigned len)
106110
{
107-
struct scatterlist *sg;
108-
unsigned int da = iova;
109-
int i;
110-
111-
for_each_sgtable_dma_sg(sgt, sg, i) {
112-
size_t bytes = sg_dma_len(sg) + sg->offset;
113-
114-
etnaviv_context_unmap(context, da, bytes);
115-
116-
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
117-
118-
BUG_ON(!PAGE_ALIGNED(bytes));
119-
120-
da += bytes;
121-
}
111+
etnaviv_context_unmap(context, iova, len);
122112

123113
context->flush_seq++;
124114
}
@@ -131,7 +121,7 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
131121
lockdep_assert_held(&context->lock);
132122

133123
etnaviv_iommu_unmap(context, mapping->vram_node.start,
134-
etnaviv_obj->sgt, etnaviv_obj->base.size);
124+
etnaviv_obj->sgt, etnaviv_obj->size);
135125
drm_mm_remove_node(&mapping->vram_node);
136126
}
137127

@@ -305,16 +295,14 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
305295
node = &mapping->vram_node;
306296

307297
if (va)
308-
ret = etnaviv_iommu_insert_exact(context, node,
309-
etnaviv_obj->base.size, va);
298+
ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va);
310299
else
311-
ret = etnaviv_iommu_find_iova(context, node,
312-
etnaviv_obj->base.size);
300+
ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size);
313301
if (ret < 0)
314302
goto unlock;
315303

316304
mapping->iova = node->start;
317-
ret = etnaviv_iommu_map(context, node->start, sgt,
305+
ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt,
318306
ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
319307

320308
if (ret < 0) {

0 commit comments

Comments
 (0)